From 36b7c06975ca41c17a78fe4c0641709e40bac7b3 Mon Sep 17 00:00:00 2001
From: Andy Polyakov <appro@openssl.org>
Date: Mon, 22 Jan 2007 20:33:46 +0000
Subject: [PATCH] SHA1 for ARMv4 and Thumb.

---
 crypto/sha/asm/sha1-armv4-large.pl | 223 +++++++++++++++++++++++++
 crypto/sha/asm/sha1-thumb.pl       | 255 +++++++++++++++++++++++++++++
 2 files changed, 478 insertions(+)
 create mode 100644 crypto/sha/asm/sha1-armv4-large.pl
 create mode 100644 crypto/sha/asm/sha1-thumb.pl

diff --git a/crypto/sha/asm/sha1-armv4-large.pl b/crypto/sha/asm/sha1-armv4-large.pl
new file mode 100644
index 000000000..9df9af4c9
--- /dev/null
+++ b/crypto/sha/asm/sha1-armv4-large.pl
@@ -0,0 +1,223 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# sha1_block precedure for ARMv4.
+#
+# January 2007.
+
+# Size/performance trade-off
+# ====================================================================
+# impl		size in bytes	comp cycles[*]	measured performance
+# ====================================================================
+# thumb		304		3212		4420
+# armv4-small	392/+29%	1958/+64%	2290/+93%
+# armv4-compact	740/+89%	1552/+26%	1910/+20%
+# armv4-large	1420/+92%	1307/+19%	1630/+17%
+# full unroll	~5100/+260%	~1260/+4%	~1600/+2%
+# ====================================================================
+# thumb		= same as 'small' but in Thumb instructions[**] and
+#		  with recurring code in two private functions;
+# small		= detached Xload/update, loops are folded;
+# compact	= detached Xload/update, 5x unroll;
+# large		= interleaved Xload/update, 5x unroll;
+# full unroll	= interleaved Xload/update, full unroll, estimated[!];
+#
+# [*]	Manually counted instructions in "grand" loop body. Measured
+#	performance is affected by prologue and epilogue overhead,
+#	i-cache availability, branch penalties, etc.
+# [**]	While each Thumb instruction is twice smaller, they are not as
+#	diverse as ARM ones: e.g., there are only two arithmetic
+#	instructions with 3 arguments, no [fixed] rotate, addressing
+#	modes are limited. As result it takes more instructions to do
+#	the same job in Thumb, therefore the code is never twice as
+#	small and always slower.
+
+$ctx="r0";
+$inp="r1";
+$len="r2";
+$a="r3";
+$b="r4";
+$c="r5";
+$d="r6";
+$e="r7";
+$K="r8";
+$t0="r10";
+$t1="r11";
+$t2="r12";
+$Xi="r14";
+@V=($a,$b,$c,$d,$e);
+
+# One can optimize this for aligned access on big-endian architecture,
+# but code's endian neutrality makes it too pretty:-)
+sub Xload {
+my ($a,$b,$c,$d,$e)=@_;
+$code.=<<___;
+	ldrb	$t0,[$inp],#4
+	ldrb	$t1,[$inp,#-3]
+	ldrb	$t2,[$inp,#-2]
+	add	$e,$K,$e,ror#2			@ E+=K_00_19
+	orr	$t0,$t1,$t0,lsl#8
+	ldrb	$t1,[$inp,#-1]
+	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
+	orr	$t0,$t2,$t0,lsl#8
+	orr	$t0,$t1,$t0,lsl#8
+	add	$e,$e,$t0			@ E+=X[i]
+	str	$t0,[$Xi,#-4]!
+___
+}
+sub Xupdate {
+my ($a,$b,$c,$d,$e)=@_;
+$code.=<<___;
+	ldr	$t0,[$Xi,#15*4]
+	ldr	$t1,[$Xi,#13*4]
+	ldr	$t2,[$Xi,#7*4]
+	add	$e,$K,$e,ror#2			@ E+=K_xx_xx
+	eor	$t0,$t0,$t1
+	ldr	$t1,[$Xi,#2*4]
+	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
+	eor	$t0,$t0,$t2
+	eor	$t0,$t0,$t1
+	mov	$t0,$t0,ror#31
+	add	$e,$e,$t0			@ E+=X[i]
+	str	$t0,[$Xi,#-4]!
+___
+}
+
+sub BODY_00_15 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xload(@_);
+$code.=<<___;
+	eor	$t1,$c,$d
+	and	$t1,$b,$t1,ror#2
+	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
+	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_16_19 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_);
+$code.=<<___;
+	eor	$t1,$c,$d
+	and	$t1,$b,$t1,ror#2
+	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
+	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_20_39 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_);
+$code.=<<___;
+	eor	$t1,$c,$d
+	eor	$t1,$b,$t1,ror#2		@ F_20_39(B,C,D)
+	add	$e,$e,$t1			@ E+=F_20_39(B,C,D)
+___
+}
+
+sub BODY_40_59 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_);
+$code.=<<___;
+	and	$t1,$b,$c,ror#2
+	orr	$t2,$b,$c,ror#2
+	and	$t2,$t2,$d,ror#2
+	orr	$t1,$t1,$t2			@ F_40_59(B,C,D)
+	add	$e,$e,$t1			@ E+=F_40_59(B,C,D)
+___
+}
+
+$code=<<___;
+.text
+
+.global	sha1_block_data_order
+.type	sha1_block_data_order,%function
+
+.align	2
+sha1_block_data_order:
+	stmdb	sp!,{r4-r12,lr}
+	add	$len,$inp,$len,lsl#6	@ $len to point at the end of $inp
+	ldmia	$ctx,{$a,$b,$c,$d,$e}
+.Lloop:
+	ldr	$K,.LK_00_19
+	mov	$Xi,sp
+	sub	sp,sp,#15*4
+	mov	$c,$c,ror#30
+	mov	$d,$d,ror#30
+	mov	$e,$e,ror#30		@ [6]
+.L_00_15:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_00_15(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp
+	bne	.L_00_15		@ [((11+4)*5+2)*3]
+___
+	&BODY_00_15(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+$code.=<<___;
+
+	ldr	$K,.LK_20_39		@ [+15+16*4]
+	sub	sp,sp,#25*4
+	cmn	sp,#0			@ [+3], clear carry to denote 20_39
+.L_20_39_or_60_79:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_20_39(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp			@ preserve carry
+	bne	.L_20_39_or_60_79	@ [+((12+3)*5+2)*4]
+	bcs	.L_done			@ [+((12+3)*5+2)*4], spare 300 bytes
+
+	ldr	$K,.LK_40_59
+	sub	sp,sp,#20*4		@ [+2]
+.L_40_59:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_40_59(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp
+	bne	.L_40_59		@ [+((12+5)*5+2)*4]
+
+	ldr	$K,.LK_60_79
+	sub	sp,sp,#20*4
+	cmp	sp,#0			@ set carry to denote 60_79
+	b	.L_20_39_or_60_79	@ [+4], spare 300 bytes
+.L_done:
+	add	sp,sp,#80*4		@ "deallocate" stack frame
+	ldmia	$ctx,{$K,$t0,$t1,$t2,$Xi}
+	add	$a,$K,$a
+	add	$b,$t0,$b
+	add	$c,$t1,$c,ror#2
+	add	$d,$t2,$d,ror#2
+	add	$e,$Xi,$e,ror#2
+	stmia	$ctx,{$a,$b,$c,$d,$e}
+	teq	$inp,$len
+	bne	.Lloop			@ [+18], total 1307
+
+	ldmia	sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+.align	2
+.LK_00_19:	.word	0x5a827999
+.LK_20_39:	.word	0x6ed9eba1
+.LK_40_59:	.word	0x8f1bbcdc
+.LK_60_79:	.word	0xca62c1d6
+.size	sha1_block_data_order,.-sha1_block_data_order
+.asciz	"SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+print $code;
diff --git a/crypto/sha/asm/sha1-thumb.pl b/crypto/sha/asm/sha1-thumb.pl
new file mode 100644
index 000000000..f025001b2
--- /dev/null
+++ b/crypto/sha/asm/sha1-thumb.pl
@@ -0,0 +1,255 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# sha1_block for Thumb.
+#
+# January 2007.
+#
+# The code does not present direct interest to OpenSSL, because of low
+# performance. Its purpose is to establish _size_ benchmark. Pretty
+# useless one I must say, because 30% or 88 bytes larger ARMv4 code
+# [avialable on demand] is almost _twice_ as fast. It should also be
+# noted that in-lining of .Lcommon and .Lrotate improves performance
+# by over 40%, while code increases by only 10% or 32 bytes. But once
+# again, the goal was to establish _size_ benchmark, not performance.
+
+$inline=0;
+#$cheat_on_binutils=1;
+
+$t0="r0";
+$t1="r1";
+$t2="r2";
+$a="r3";
+$b="r4";
+$c="r5";
+$d="r6";
+$e="r7";
+$K="r8";	# "upper" registers can be used in add/sub and mov insns
+$ctx="r9";
+$inp="r10";
+$len="r11";
+$Xi="r12";
+
+sub common {
+<<___;
+	sub	$t0,#4
+	ldr	$t1,[$t0]
+	add	$e,$K			@ E+=K_xx_xx
+	lsl	$t2,$a,#5
+	add	$t2,$e
+	lsr	$e,$a,#27
+	add	$t2,$e			@ E+=ROR(A,27)
+	add	$t2,$t1			@ E+=X[i]
+___
+}
+sub rotate {
+<<___;
+	mov	$e,$d			@ E=D
+	mov	$d,$c			@ D=C
+	lsl	$c,$b,#30
+	lsr	$b,$b,#2
+	orr	$c,$b			@ C=ROR(B,2)
+	mov	$b,$a			@ B=A
+	add	$a,$t2,$t1		@ A=E+F_xx_xx(B,C,D)
+___
+}
+
+sub BODY_00_19 {
+$code.=$inline?&common():"\tbl	.Lcommon\n";
+$code.=<<___;
+	mov	$t1,$c
+	eor	$t1,$d
+	and	$t1,$b
+	eor	$t1,$d			@ F_00_19(B,C,D)
+___
+$code.=$inline?&rotate():"\tbl	.Lrotate\n";
+}
+
+sub BODY_20_39 {
+$code.=$inline?&common():"\tbl	.Lcommon\n";
+$code.=<<___;
+	mov	$t1,$b
+	eor	$t1,$c
+	eor	$t1,$d			@ F_20_39(B,C,D)
+___
+$code.=$inline?&rotate():"\tbl	.Lrotate\n";
+}
+
+sub BODY_40_59 {
+$code.=$inline?&common():"\tbl	.Lcommon\n";
+$code.=<<___;
+	mov	$t1,$b
+	and	$t1,$c
+	mov	$e,$b
+	orr	$e,$c
+	and	$e,$d
+	orr	$t1,$e			@ F_40_59(B,C,D)
+___
+$code.=$inline?&rotate():"\tbl	.Lrotate\n";
+}
+
+$code=<<___;
+.text
+.code	16
+
+.global	sha1_block_data_order
+.type	sha1_block_data_order,%function
+
+.align	2
+sha1_block_data_order:
+___
+if ($cheat_on_binutils) {
+$code.=<<___;
+.code	32
+	add	r3,pc,#1
+	bx	r3			@ switch to Thumb ISA
+.code	16
+___
+}
+$code.=<<___;
+	push	{r4-r7}
+	mov	r3,r8
+	mov	r4,r9
+	mov	r5,r10
+	mov	r6,r11
+	mov	r7,r12
+	push	{r3-r7,lr}
+	lsl	r2,#6
+	mov	$ctx,r0			@ save context
+	mov	$inp,r1			@ save inp
+	mov	$len,r2			@ save len
+	add	$len,$inp		@ $len to point at inp end
+
+.Lloop:
+	mov	$Xi,sp
+	mov	$t2,sp
+	sub	$t2,#16*4		@ [3]
+.LXload:
+	ldrb	$a,[$t1,#0]		@ $t1 is r1 and holds inp
+	ldrb	$b,[$t1,#1]
+	ldrb	$c,[$t1,#2]
+	ldrb	$d,[$t1,#3]
+	lsl	$a,#24
+	lsl	$b,#16
+	lsl	$c,#8
+	orr	$a,$b
+	orr	$a,$c
+	orr	$a,$d
+	add	$t1,#4
+	push	{$a}
+	cmp	sp,$t2
+	bne	.LXload			@ [+14*16]
+
+	mov	$inp,$t1		@ update $inp
+	sub	$t2,#32*4
+	sub	$t2,#32*4
+	mov	$e,#31			@ [+4]
+.LXupdate:
+	ldr	$a,[sp,#15*4]
+	ldr	$b,[sp,#13*4]
+	ldr	$c,[sp,#7*4]
+	ldr	$d,[sp,#2*4]
+	eor	$a,$b
+	eor	$a,$c
+	eor	$a,$d
+	ror	$a,$e
+	push	{$a}
+	cmp	sp,$t2
+	bne	.LXupdate		@ [+(11+1)*64]
+
+	ldmia	$t0!,{$a,$b,$c,$d,$e}	@ $t0 is r0 and holds ctx
+	mov	$t0,$Xi
+
+	ldr	$t2,.LK_00_19
+	mov	$t1,$t0
+	sub	$t1,#20*4
+	mov	$Xi,$t1
+	mov	$K,$t2			@ [+7+4]
+.L_00_19:
+___
+	&BODY_00_19();
+$code.=<<___;
+	cmp	$Xi,$t0
+	bne	.L_00_19		@ [+(2+9+4+2+8+2)*20]
+
+	ldr	$t2,.LK_20_39
+	mov	$t1,$t0
+	sub	$t1,#20*4
+	mov	$Xi,$t1
+	mov	$K,$t2			@ [+5]
+.L_20_39_or_60_79:
+___
+	&BODY_20_39();
+$code.=<<___;
+	cmp	$Xi,$t0
+	bne	.L_20_39_or_60_79	@ [+(2+9+3+2+8+2)*20*2]
+	cmp	sp,$t0
+	beq	.Ldone			@ [+2]
+
+	ldr	$t2,.LK_40_59
+	mov	$t1,$t0
+	sub	$t1,#20*4
+	mov	$Xi,$t1
+	mov	$K,$t2			@ [+5]
+.L_40_59:
+___
+	&BODY_40_59();
+$code.=<<___;
+	cmp	$Xi,$t0
+	bne	.L_40_59		@ [+(2+9+6+2+8+2)*20]
+
+	ldr	$t2,.LK_60_79
+	mov	$Xi,sp
+	mov	$K,$t2
+	b	.L_20_39_or_60_79	@ [+4]
+.Ldone:
+	mov	$t0,$ctx
+	ldr	$t1,[$t0,#0]
+	ldr	$t2,[$t0,#4]
+	add	$a,$t1
+	ldr	$t1,[$t0,#8]
+	add	$b,$t2
+	ldr	$t2,[$t0,#12]
+	add	$c,$t1
+	ldr	$t1,[$t0,#16]
+	add	$d,$t2
+	add	$e,$t1
+	stmia	$t0!,{$a,$b,$c,$d,$e}	@ [+20]
+
+	add	sp,#80*4		@ deallocate stack frame
+	mov	$t0,$ctx		@ restore ctx
+	mov	$t1,$inp		@ restore inp
+	cmp	$t1,$len
+	beq	.Lexit
+	b	.Lloop			@ [+6] total 3212 cycles
+.Lexit:
+	pop	{r2-r7}
+	mov	r8,r2
+	mov	r9,r3
+	mov	r10,r4
+	mov	r11,r5
+	mov	r12,r6
+	mov	lr,r7
+	pop	{r4-r7}
+	bx	lr
+.align	2
+___
+$code.=".Lcommon:\n".&common()."\tmov	pc,lr\n" if (!$inline);
+$code.=".Lrotate:\n".&rotate()."\tmov	pc,lr\n" if (!$inline);
+$code.=<<___;
+.align	2
+.LK_00_19:	.word	0x5a827999
+.LK_20_39:	.word	0x6ed9eba1
+.LK_40_59:	.word	0x8f1bbcdc
+.LK_60_79:	.word	0xca62c1d6
+.size	sha1_block_data_order,.-sha1_block_data_order
+.asciz	"SHA1 block transform for Thumb, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+print $code;