ARM assembly pack: SHA update from master.

This commit is contained in:
Andy Polyakov 2013-12-09 23:53:42 +01:00
parent b76310ba74
commit 422c8c36e5
2 changed files with 124 additions and 80 deletions

View File

@ -21,15 +21,15 @@
# February 2011. # February 2011.
# #
# Profiler-assisted and platform-specific optimization resulted in 16% # Profiler-assisted and platform-specific optimization resulted in 16%
# improvement on Cortex A8 core and ~17 cycles per processed byte. # improvement on Cortex A8 core and ~16.4 cycles per processed byte.
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
open STDOUT,">$output"; open STDOUT,">$output";
$ctx="r0"; $t0="r0"; $ctx="r0"; $t0="r0";
$inp="r1"; $t3="r1"; $inp="r1"; $t4="r1";
$len="r2"; $t1="r2"; $len="r2"; $t1="r2";
$T1="r3"; $T1="r3"; $t3="r3";
$A="r4"; $A="r4";
$B="r5"; $B="r5";
$C="r6"; $C="r6";
@ -52,71 +52,90 @@ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___ if ($i<16); $code.=<<___ if ($i<16);
#if __ARM_ARCH__>=7 #if __ARM_ARCH__>=7
ldr $T1,[$inp],#4 @ ldr $t1,[$inp],#4 @ $i
# if $i==15
str $inp,[sp,#17*4] @ make room for $t4
# endif
mov $t0,$e,ror#$Sigma1[0]
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
rev $t1,$t1
eor $t0,$t0,$e,ror#$Sigma1[1]
#else #else
ldrb $T1,[$inp,#3] @ $i @ ldrb $t1,[$inp,#3] @ $i
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
ldrb $t2,[$inp,#2] ldrb $t2,[$inp,#2]
ldrb $t1,[$inp,#1] ldrb $t0,[$inp,#1]
ldrb $t0,[$inp],#4 orr $t1,$t1,$t2,lsl#8
orr $T1,$T1,$t2,lsl#8 ldrb $t2,[$inp],#4
orr $T1,$T1,$t1,lsl#16 orr $t1,$t1,$t0,lsl#16
orr $T1,$T1,$t0,lsl#24 # if $i==15
str $inp,[sp,#17*4] @ make room for $t4
# endif
mov $t0,$e,ror#$Sigma1[0]
orr $t1,$t1,$t2,lsl#24
eor $t0,$t0,$e,ror#$Sigma1[1]
#endif #endif
___ ___
$code.=<<___; $code.=<<___;
mov $t0,$e,ror#$Sigma1[0]
ldr $t2,[$Ktbl],#4 @ *K256++ ldr $t2,[$Ktbl],#4 @ *K256++
eor $t0,$t0,$e,ror#$Sigma1[1] add $h,$h,$t1 @ h+=X[i]
str $t1,[sp,#`$i%16`*4]
eor $t1,$f,$g eor $t1,$f,$g
#if $i>=16
add $T1,$T1,$t3 @ from BODY_16_xx
#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
rev $T1,$T1
#endif
#if $i==15
str $inp,[sp,#17*4] @ leave room for $t3
#endif
eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e) eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
and $t1,$t1,$e and $t1,$t1,$e
str $T1,[sp,#`$i%16`*4] add $h,$h,$t0 @ h+=Sigma1(e)
add $T1,$T1,$t0
eor $t1,$t1,$g @ Ch(e,f,g) eor $t1,$t1,$g @ Ch(e,f,g)
add $T1,$T1,$h add $h,$h,$t2 @ h+=K256[i]
mov $h,$a,ror#$Sigma0[0] mov $t0,$a,ror#$Sigma0[0]
add $T1,$T1,$t1 add $h,$h,$t1 @ h+=Ch(e,f,g)
eor $h,$h,$a,ror#$Sigma0[1] #if $i==31
add $T1,$T1,$t2 and $t2,$t2,#0xff
eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a) cmp $t2,#0xf2 @ done?
#if $i>=15
ldr $t3,[sp,#`($i+2)%16`*4] @ from BODY_16_xx
#endif #endif
orr $t0,$a,$b #if $i<15
and $t1,$a,$b # if __ARM_ARCH__>=7
and $t0,$t0,$c ldr $t1,[$inp],#4 @ prefetch
add $h,$h,$T1 # else
orr $t0,$t0,$t1 @ Maj(a,b,c) ldrb $t1,[$inp,#3]
add $d,$d,$T1 # endif
add $h,$h,$t0 eor $t2,$a,$b @ a^b, b^c in next round
#else
ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx
eor $t2,$a,$b @ a^b, b^c in next round
ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
#endif
eor $t0,$a,ror#$Sigma0[1]
and $t3,$t3,$t2 @ (b^c)&=(a^b)
add $d,$d,$h @ d+=h
eor $t0,$a,ror#$Sigma0[2] @ Sigma0(a)
eor $t3,$t3,$b @ Maj(a,b,c)
add $h,$h,$t0 @ h+=Sigma0(a)
@ add $h,$h,$t3 @ h+=Maj(a,b,c)
___ ___
($t2,$t3)=($t3,$t2);
} }
sub BODY_16_XX { sub BODY_16_XX {
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___; $code.=<<___;
@ ldr $t3,[sp,#`($i+1)%16`*4] @ $i @ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
ldr $t2,[sp,#`($i+14)%16`*4] @ ldr $t4,[sp,#`($i+14)%16`*4]
mov $t0,$t3,ror#$sigma0[0] mov $t0,$t1,ror#$sigma0[0]
ldr $T1,[sp,#`($i+0)%16`*4] add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
eor $t0,$t0,$t3,ror#$sigma0[1] mov $t2,$t4,ror#$sigma1[0]
ldr $t1,[sp,#`($i+9)%16`*4] eor $t0,$t0,$t1,ror#$sigma0[1]
eor $t0,$t0,$t3,lsr#$sigma0[2] @ sigma0(X[i+1]) eor $t2,$t2,$t4,ror#$sigma1[1]
mov $t3,$t2,ror#$sigma1[0] eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
add $T1,$T1,$t0 ldr $t1,[sp,#`($i+0)%16`*4]
eor $t3,$t3,$t2,ror#$sigma1[1] eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
add $T1,$T1,$t1 ldr $t4,[sp,#`($i+9)%16`*4]
eor $t3,$t3,$t2,lsr#$sigma1[2] @ sigma1(X[i+14])
@ add $T1,$T1,$t3 add $t2,$t2,$t0
mov $t0,$e,ror#$Sigma1[0] @ from BODY_00_15
add $t1,$t1,$t2
eor $t0,$t0,$e,ror#$Sigma1[1] @ from BODY_00_15
add $t1,$t1,$t4 @ X[i]
___ ___
&BODY_00_15(@_); &BODY_00_15(@_);
} }
@ -158,35 +177,41 @@ sha256_block_data_order:
sub $Ktbl,r3,#256 @ K256 sub $Ktbl,r3,#256 @ K256
sub sp,sp,#16*4 @ alloca(X[16]) sub sp,sp,#16*4 @ alloca(X[16])
.Loop: .Loop:
# if __ARM_ARCH__>=7
ldr $t1,[$inp],#4
# else
ldrb $t1,[$inp,#3]
# endif
eor $t3,$B,$C @ magic
eor $t2,$t2,$t2
___ ___
for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
$code.=".Lrounds_16_xx:\n"; $code.=".Lrounds_16_xx:\n";
for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); } for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
$code.=<<___; $code.=<<___;
and $t2,$t2,#0xff ldreq $t3,[sp,#16*4] @ pull ctx
cmp $t2,#0xf2
bne .Lrounds_16_xx bne .Lrounds_16_xx
ldr $T1,[sp,#16*4] @ pull ctx add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
ldr $t0,[$T1,#0] ldr $t0,[$t3,#0]
ldr $t1,[$T1,#4] ldr $t1,[$t3,#4]
ldr $t2,[$T1,#8] ldr $t2,[$t3,#8]
add $A,$A,$t0 add $A,$A,$t0
ldr $t0,[$T1,#12] ldr $t0,[$t3,#12]
add $B,$B,$t1 add $B,$B,$t1
ldr $t1,[$T1,#16] ldr $t1,[$t3,#16]
add $C,$C,$t2 add $C,$C,$t2
ldr $t2,[$T1,#20] ldr $t2,[$t3,#20]
add $D,$D,$t0 add $D,$D,$t0
ldr $t0,[$T1,#24] ldr $t0,[$t3,#24]
add $E,$E,$t1 add $E,$E,$t1
ldr $t1,[$T1,#28] ldr $t1,[$t3,#28]
add $F,$F,$t2 add $F,$F,$t2
ldr $inp,[sp,#17*4] @ pull inp ldr $inp,[sp,#17*4] @ pull inp
ldr $t2,[sp,#18*4] @ pull inp+len ldr $t2,[sp,#18*4] @ pull inp+len
add $G,$G,$t0 add $G,$G,$t0
add $H,$H,$t1 add $H,$H,$t1
stmia $T1,{$A,$B,$C,$D,$E,$F,$G,$H} stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
cmp $inp,$t2 cmp $inp,$t2
sub $Ktbl,$Ktbl,#256 @ rewind Ktbl sub $Ktbl,$Ktbl,#256 @ rewind Ktbl
bne .Loop bne .Loop

View File

@ -1,7 +1,7 @@
#!/usr/bin/env perl #!/usr/bin/env perl
# ==================================================================== # ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and # project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further # CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/. # details see http://www.openssl.org/~appro/cryptogams/.
@ -26,7 +26,24 @@
# March 2011. # March 2011.
# #
# Add NEON implementation. On Cortex A8 it was measured to process # Add NEON implementation. On Cortex A8 it was measured to process
# one byte in 25.5 cycles or 47% faster than integer-only code. # one byte in 23.3 cycles or ~60% faster than integer-only code.
# August 2012.
#
# Improve NEON performance by 12% on Snapdragon S4. In absolute
# terms it's 22.6 cycles per byte, which is disappointing result.
# Technical writers asserted that 3-way S4 pipeline can sustain
# multiple NEON instructions per cycle, but dual NEON issue could
# not be observed, and for NEON-only sequences IPC(*) was found to
# be limited by 1:-( 0.33 and 0.66 were measured for sequences with
# ILPs(*) of 1 and 2 respectively. This in turn means that you can
# even find yourself striving, as I did here, for achieving IPC
# adequate to one delivered by Cortex A8 [for reference, it's
# 0.5 for ILP of 1, and 1 for higher ILPs].
#
# (*) ILP, instruction-level parallelism, how many instructions
# *can* execute at the same time. IPC, instructions per cycle,
# indicates how many instructions actually execute.
# Byte order [in]dependence. ========================================= # Byte order [in]dependence. =========================================
# #
@ -457,40 +474,40 @@ $code.=<<___ if ($i<16 || $i&1);
vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned vld1.64 {@X[$i%16]},[$inp]! @ handles unaligned
#endif #endif
vshr.u64 $t1,$e,#@Sigma1[1] vshr.u64 $t1,$e,#@Sigma1[1]
#if $i>0
vadd.i64 $a,$Maj @ h+=Maj from the past
#endif
vshr.u64 $t2,$e,#@Sigma1[2] vshr.u64 $t2,$e,#@Sigma1[2]
___ ___
$code.=<<___; $code.=<<___;
vld1.64 {$K},[$Ktbl,:64]! @ K[i++] vld1.64 {$K},[$Ktbl,:64]! @ K[i++]
vsli.64 $t0,$e,#`64-@Sigma1[0]` vsli.64 $t0,$e,#`64-@Sigma1[0]`
vsli.64 $t1,$e,#`64-@Sigma1[1]` vsli.64 $t1,$e,#`64-@Sigma1[1]`
vmov $Ch,$e
vsli.64 $t2,$e,#`64-@Sigma1[2]` vsli.64 $t2,$e,#`64-@Sigma1[2]`
#if $i<16 && defined(__ARMEL__) #if $i<16 && defined(__ARMEL__)
vrev64.8 @X[$i],@X[$i] vrev64.8 @X[$i],@X[$i]
#endif #endif
vadd.i64 $T1,$K,$h veor $t1,$t0
veor $Ch,$f,$g vbsl $Ch,$f,$g @ Ch(e,f,g)
veor $t0,$t1
vand $Ch,$e
veor $t0,$t2 @ Sigma1(e)
veor $Ch,$g @ Ch(e,f,g)
vadd.i64 $T1,$t0
vshr.u64 $t0,$a,#@Sigma0[0] vshr.u64 $t0,$a,#@Sigma0[0]
vadd.i64 $T1,$Ch veor $t2,$t1 @ Sigma1(e)
vadd.i64 $T1,$Ch,$h
vshr.u64 $t1,$a,#@Sigma0[1] vshr.u64 $t1,$a,#@Sigma0[1]
vshr.u64 $t2,$a,#@Sigma0[2]
vsli.64 $t0,$a,#`64-@Sigma0[0]` vsli.64 $t0,$a,#`64-@Sigma0[0]`
vadd.i64 $T1,$t2
vshr.u64 $t2,$a,#@Sigma0[2]
vadd.i64 $K,@X[$i%16]
vsli.64 $t1,$a,#`64-@Sigma0[1]` vsli.64 $t1,$a,#`64-@Sigma0[1]`
veor $Maj,$a,$b
vsli.64 $t2,$a,#`64-@Sigma0[2]` vsli.64 $t2,$a,#`64-@Sigma0[2]`
vadd.i64 $T1,@X[$i%16]
vorr $Maj,$a,$c
vand $Ch,$a,$c
veor $h,$t0,$t1 veor $h,$t0,$t1
vand $Maj,$b vadd.i64 $T1,$K
vbsl $Maj,$c,$b @ Maj(a,b,c)
veor $h,$t2 @ Sigma0(a) veor $h,$t2 @ Sigma0(a)
vorr $Maj,$Ch @ Maj(a,b,c)
vadd.i64 $h,$T1
vadd.i64 $d,$T1 vadd.i64 $d,$T1
vadd.i64 $h,$Maj vadd.i64 $Maj,$T1
@ vadd.i64 $h,$Maj
___ ___
} }
@ -508,6 +525,7 @@ $i /= 2;
$code.=<<___; $code.=<<___;
vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0] vshr.u64 $t0,@X[($i+7)%8],#@sigma1[0]
vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1] vshr.u64 $t1,@X[($i+7)%8],#@sigma1[1]
vadd.i64 @_[0],d30 @ h+=Maj from the past
vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2] vshr.u64 $s1,@X[($i+7)%8],#@sigma1[2]
vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]` vsli.64 $t0,@X[($i+7)%8],#`64-@sigma1[0]`
vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1] vext.8 $s0,@X[$i%8],@X[($i+1)%8],#8 @ X[i+1]
@ -554,6 +572,7 @@ for(;$i<32;$i++) { &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
$code.=<<___; $code.=<<___;
bne .L16_79_neon bne .L16_79_neon
vadd.i64 $A,d30 @ h+=Maj from the past
vldmia $ctx,{d24-d31} @ load context to temp vldmia $ctx,{d24-d31} @ load context to temp
vadd.i64 q8,q12 @ vectorized accumulate vadd.i64 q8,q12 @ vectorized accumulate
vadd.i64 q9,q13 vadd.i64 q9,q13