Rename amd64 modules to x86_64 and update RC4 implementation.

This commit is contained in:
Andy Polyakov 2005-05-03 15:42:05 +00:00
parent 4b45051902
commit 5f1841cdca
7 changed files with 248 additions and 170 deletions

View File

@ -118,7 +118,7 @@ my $x86_elf_asm="x86cpuid-elf.o:bn86-elf.o co86-elf.o:dx86-elf.o yx86-elf.o:ax86
my $x86_coff_asm="x86cpuid-cof.o:bn86-cof.o co86-cof.o:dx86-cof.o yx86-cof.o:ax86-cof.o:bx86-cof.o:mx86-cof.o:sx86-cof.o s512sse2-cof.o:cx86-cof.o:rx86-cof.o:rm86-cof.o:r586-cof.o";
my $x86_out_asm="x86cpuid-out.o:bn86-out.o co86-out.o:dx86-out.o yx86-out.o:ax86-out.o:bx86-out.o:mx86-out.o:sx86-out.o s512sse2-out.o:cx86-out.o:rx86-out.o:rm86-out.o:r586-out.o";
my $x86_64_asm="amd64cpuid.o:x86_64-gcc.o:::::::rc4-amd64.o::";
my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o:::::::rc4-x86_64.o::";
my $ia64_asm=":ia64.o::aes_core.o aes_cbc.o aes-ia64.o:::sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o::";
my $no_asm="::::::::::";

12
TABLE
View File

@ -278,7 +278,7 @@ $thread_cflag = -pthread -D_THREAD_SAFE -D_REENTRANT
$sys_id =
$lflags =
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
$cpuid_obj = amd64cpuid.o
$cpuid_obj = x86_64cpuid.o
$bn_obj = x86_64-gcc.o
$des_obj =
$aes_obj =
@ -286,7 +286,7 @@ $bf_obj =
$md5_obj =
$sha1_obj =
$cast_obj =
$rc4_obj = rc4-amd64.o
$rc4_obj = rc4-x86_64.o
$rmd160_obj =
$rc5_obj =
$dso_scheme = dlfcn
@ -3086,7 +3086,7 @@ $thread_cflag = -D_REENTRANT
$sys_id =
$lflags = -ldl
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL
$cpuid_obj = amd64cpuid.o
$cpuid_obj = x86_64cpuid.o
$bn_obj = x86_64-gcc.o
$des_obj =
$aes_obj =
@ -3094,7 +3094,7 @@ $bf_obj =
$md5_obj =
$sha1_obj =
$cast_obj =
$rc4_obj = rc4-amd64.o
$rc4_obj = rc4-x86_64.o
$rmd160_obj =
$rc5_obj =
$dso_scheme = dlfcn
@ -3896,7 +3896,7 @@ $thread_cflag = -D_REENTRANT
$sys_id =
$lflags = -lsocket -lnsl -ldl
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL
$cpuid_obj = amd64cpuid.o
$cpuid_obj = x86_64cpuid.o
$bn_obj = x86_64-gcc.o
$des_obj =
$aes_obj =
@ -3904,7 +3904,7 @@ $bf_obj =
$md5_obj =
$sha1_obj =
$cast_obj =
$rc4_obj = rc4-amd64.o
$rc4_obj = rc4-x86_64.o
$rmd160_obj =
$rc5_obj =
$dso_scheme = dlfcn

View File

@ -70,8 +70,8 @@ x86cpuid-cof.s: x86cpuid.pl perlasm/x86asm.pl
x86cpuid-out.s: x86cpuid.pl perlasm/x86asm.pl
$(PERL) x86cpuid.pl a.out $(CFLAGS) $(PROCESSOR) > $@
amd64cpuid.s: amd64cpuid.pl
$(PERL) amd64cpuid.pl $@
x86_64cpuid.s: x86_64cpuid.pl
$(PERL) x86_64cpuid.pl $@
ia64cpuid.s: ia64cpuid.S
$(CC) $(CFLAGS) -E ia64cpuid.S > $@

View File

@ -58,7 +58,7 @@ rx86-cof.s: asm/rc4-586.pl ../perlasm/x86asm.pl
rx86-out.s: asm/rc4-586.pl ../perlasm/x86asm.pl
(cd asm; $(PERL) rc4-586.pl a.out $(CFLAGS) > ../$@)
rc4-amd64.s: asm/rc4-amd64.pl; $(PERL) asm/rc4-amd64.pl $@
rc4-x86_64.s: asm/rc4-x86_64.pl; $(PERL) asm/rc4-x86_64.pl $@
rc4-ia64.s: asm/rc4-ia64.S
$(CC) $(CFLAGS) -E asm/rc4-ia64.S > $@

View File

@ -1,160 +0,0 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
# project. Rights for redistribution and usage in source and binary
# forms are granted according to the OpenSSL license.
# ====================================================================
#
# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
# "hand-coded assembler"] doesn't stand for the whole improvement
# coefficient. It turned out that eliminating RC4_CHAR from config
# line results in ~40% improvement (yes, even for C implementation).
# Presumably it has everything to do with AMD cache architecture and
# RAW or whatever penalties. Once again! The module *requires* config
# line *without* RC4_CHAR! As for coding "secret," I bet on partial
# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
# I simply 'inc %r8b'. Even though optimization manual discourages
# to operate on partial registers, it turned out to be the best bet.
# At least for AMD... How IA32E would perform remains to be seen...
# As was shown by Marc Bevand reordering of couple of load operations
# results in even higher performance gain of 3.3x:-) At least on
# Opteron... For reference, 1x in this case is RC4_CHAR C-code
# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
# Latter means that if you want to *estimate* what to expect from
# *your* CPU, then multiply 54 by 3.3 and clock frequency in GHz.
# Intel P4 EM64T core was found to run the AMD64 code really slow...
# The only way to achieve comparable performance on P4 is to keep
# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
# compose blended code, which would perform even within 30% marginal
# on either AMD and Intel platforms, I implement both cases. See
# rc4_skey.c for further details... This applies to 0.9.8 and later.
# In 0.9.7 context RC4_CHAR codepath is never engaged and ~70 bytes
# of code remain redundant.
$output=shift;
open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
$dat="%rdi"; # arg1
$len="%rsi"; # arg2
$inp="%rdx"; # arg3
$out="%rcx"; # arg4
$XX="%r10";
$TX="%r8";
$YY="%r11";
$TY="%r9";
$code=<<___;
.text
.globl RC4
.type RC4,\@function,4
.align 16
RC4: or $len,$len
jne .Lentry
ret
.Lentry:
add \$8,$dat
movl -8($dat),$XX#d
movl -4($dat),$YY#d
cmpl \$-1,256($dat)
je .LRC4_CHAR
test \$-8,$len
jz .Lloop1
.align 16
.Lloop8:
inc $XX#b
movl ($dat,$XX,4),$TX#d
add $TX#b,$YY#b
movl ($dat,$YY,4),$TY#d
movl $TX#d,($dat,$YY,4)
movl $TY#d,($dat,$XX,4)
add $TX#b,$TY#b
inc $XX#b
movl ($dat,$XX,4),$TX#d
movb ($dat,$TY,4),%al
___
for ($i=1;$i<=6;$i++) {
$code.=<<___;
add $TX#b,$YY#b
ror \$8,%rax
movl ($dat,$YY,4),$TY#d
movl $TX#d,($dat,$YY,4)
movl $TY#d,($dat,$XX,4)
add $TX#b,$TY#b
inc $XX#b
movl ($dat,$XX,4),$TX#d
movb ($dat,$TY,4),%al
___
}
$code.=<<___;
add $TX#b,$YY#b
ror \$8,%rax
movl ($dat,$YY,4),$TY#d
movl $TX#d,($dat,$YY,4)
movl $TY#d,($dat,$XX,4)
sub \$8,$len
add $TY#b,$TX#b
movb ($dat,$TX,4),%al
ror \$8,%rax
add \$8,$inp
add \$8,$out
xor -8($inp),%rax
mov %rax,-8($out)
test \$-8,$len
jnz .Lloop8
cmp \$0,$len
jne .Lloop1
.Lexit:
movl $XX#d,-8($dat)
movl $YY#d,-4($dat)
ret
.align 16
.Lloop1:
movzb ($inp),%eax
inc $XX#b
movl ($dat,$XX,4),$TX#d
add $TX#b,$YY#b
movl ($dat,$YY,4),$TY#d
movl $TX#d,($dat,$YY,4)
movl $TY#d,($dat,$XX,4)
add $TY#b,$TX#b
movl ($dat,$TX,4),$TY#d
xor $TY,%rax
inc $inp
movb %al,($out)
inc $out
dec $len
jnz .Lloop1
jmp .Lexit
.align 16
.LRC4_CHAR:
add \$1,$XX#b
movzb ($dat,$XX),$TX#d
add $TX#b,$YY#b
movzb ($dat,$YY),$TY#d
movb $TX#b,($dat,$YY)
movb $TY#b,($dat,$XX)
add $TX#b,$TY#b
movzb ($dat,$TY),$TY#d
xorb ($inp),$TY#b
movb $TY#b,($out)
lea 1($inp),$inp
lea 1($out),$out
sub \$1,$len
jnz .LRC4_CHAR
jmp .Lexit
.size RC4,.-RC4
___
$code =~ s/#([bwd])/$1/gm;
print $code;
close STDOUT;

238
crypto/rc4/asm/rc4-x86_64.pl Executable file
View File

@ -0,0 +1,238 @@
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
# project. Rights for redistribution and usage in source and binary
# forms are granted according to the OpenSSL license.
# ====================================================================
#
# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
# "hand-coded assembler"] doesn't stand for the whole improvement
# coefficient. It turned out that eliminating RC4_CHAR from config
# line results in ~40% improvement (yes, even for C implementation).
# Presumably it has everything to do with AMD cache architecture and
# RAW or whatever penalties. Once again! The module *requires* config
# line *without* RC4_CHAR! As for coding "secret," I bet on partial
# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
# I simply 'inc %r8b'. Even though optimization manual discourages
# to operate on partial registers, it turned out to be the best bet.
# At least for AMD... How IA32E would perform remains to be seen...
# As was shown by Marc Bevand reordering of couple of load operations
# results in even higher performance gain of 3.3x:-) At least on
# Opteron... For reference, 1x in this case is RC4_CHAR C-code
# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
# Latter means that if you want to *estimate* what to expect from
# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
# Intel P4 EM64T core was found to run the AMD64 code really slow...
# The only way to achieve comparable performance on P4 was to keep
# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
# compose blended code, which would perform even within 30% marginal
# on either AMD and Intel platforms, I implement both cases. See
# rc4_skey.c for further details...
# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
# those with add/sub results in 50% performance improvement of folded
# loop...
# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
# performance by >30% [unlike P4 32-bit case that is]. But this is
# provided that loads are reordered even more aggressively! Both code
# pathes, AMD64 and EM64T, reorder loads in essentially same manner
# as my IA-64 implementation. On Opteron this resulted in modest 5%
# improvement [I had to test it], while final Intel P4 performance
# achieves respectful 432MBps on 2.8GHz processor now. For reference.
# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
# RC4_INT code-path. While if executed on Opteron, it's is only 25%
# slower than the latter...
$output=shift;
open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
$dat="%rdi"; # arg1
$len="%rsi"; # arg2
$inp="%rdx"; # arg3
$out="%rcx"; # arg4
@XX=("%r8","%r10");
@TX=("%r9","%r11");
$YY="%r12";
$TY="%r13";
$code=<<___;
.text
.globl RC4
.type RC4,\@function,4
.align 16
RC4: or $len,$len
jne .Lentry
ret
.Lentry:
push %r12
push %r13
add \$8,$dat
movl -8($dat),$XX[0]#d
movl -4($dat),$YY#d
cmpl \$-1,256($dat)
je .LRC4_CHAR
inc $XX[0]#b
movl ($dat,$XX[0],4),$TX[0]#d
test \$-8,$len
jz .Lloop1
jmp .Lloop8
.align 16
.Lloop8:
___
for ($i=0;$i<8;$i++) {
$code.=<<___;
add $TX[0]#b,$YY#b
mov $XX[0],$XX[1]
movl ($dat,$YY,4),$TY#d
ror \$8,%rax # ror is redundant when $i=0
inc $XX[1]#b
movl ($dat,$XX[1],4),$TX[1]#d
cmp $XX[1],$YY
movl $TX[0]#d,($dat,$YY,4)
cmove $TX[0],$TX[1]
movl $TY#d,($dat,$XX[0],4)
add $TX[0]#b,$TY#b
movb ($dat,$TY,4),%al
___
push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
}
$code.=<<___;
ror \$8,%rax
sub \$8,$len
xor ($inp),%rax
add \$8,$inp
mov %rax,($out)
add \$8,$out
test \$-8,$len
jnz .Lloop8
cmp \$0,$len
jne .Lloop1
___
$code.=<<___;
.Lexit:
sub \$1,$XX[0]#b
movl $XX[0]#d,-8($dat)
movl $YY#d,-4($dat)
pop %r13
pop %r12
ret
.align 16
.Lloop1:
add $TX[0]#b,$YY#b
movl ($dat,$YY,4),$TY#d
movl $TX[0]#d,($dat,$YY,4)
movl $TY#d,($dat,$XX[0],4)
add $TY#b,$TX[0]#b
inc $XX[0]#b
movl ($dat,$TX[0],4),$TY#d
movl ($dat,$XX[0],4),$TX[0]#d
xorb ($inp),$TY#b
inc $inp
movb $TY#b,($out)
inc $out
dec $len
jnz .Lloop1
jmp .Lexit
.align 16
.LRC4_CHAR:
add \$1,$XX[0]#b
movzb ($dat,$XX[0]),$TX[0]#d
test \$-8,$len
jz .Lcloop1
push %rbx
jmp .Lcloop8
.align 16
.Lcloop8:
mov ($inp),%eax
mov 4($inp),%ebx
___
# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
for ($i=0;$i<4;$i++) {
$code.=<<___;
add $TX[0]#b,$YY#b
lea 1($XX[0]),$XX[1]
movzb ($dat,$YY),$TY#d
movzb $XX[1]#b,$XX[1]#d
movzb ($dat,$XX[1]),$TX[1]#d
movb $TX[0]#b,($dat,$YY)
cmp $XX[1],$YY
movb $TY#b,($dat,$XX[0])
jne .Lcmov$i # Intel cmov is sloooow...
mov $TX[0],$TX[1]
.Lcmov$i:
add $TX[0]#b,$TY#b
xor ($dat,$TY),%al
ror \$8,%eax
___
push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
}
for ($i=4;$i<8;$i++) {
$code.=<<___;
add $TX[0]#b,$YY#b
lea 1($XX[0]),$XX[1]
movzb ($dat,$YY),$TY#d
movzb $XX[1]#b,$XX[1]
movzb ($dat,$XX[1]),$TX[1]#d
movb $TX[0]#b,($dat,$YY)
cmp $XX[1],$YY
movb $TY#b,($dat,$XX[0])
jne .Lcmov$i # Intel cmov is sloooow...
mov $TX[0],$TX[1]
.Lcmov$i:
add $TX[0]#b,$TY#b
xor ($dat,$TY),%bl
ror \$8,%ebx
___
push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
}
$code.=<<___;
lea -8($len),$len
mov %eax,($out)
lea 8($inp),$inp
mov %ebx,4($out)
lea 8($out),$out
test \$-8,$len
jnz .Lcloop8
pop %rbx
cmp \$0,$len
jne .Lcloop1
jmp .Lexit
___
$code.=<<___;
.align 16
.Lcloop1:
add $TX[0]#b,$YY#b
movzb ($dat,$YY),$TY#d
movb $TX[0]#b,($dat,$YY)
movb $TY#b,($dat,$XX[0])
add $TX[0]#b,$TY#b
add \$1,$XX[0]#b
movzb ($dat,$TY),$TY#d
movzb ($dat,$XX[0]),$TX[0]#d
xorb ($inp),$TY#b
lea 1($inp),$inp
movb $TY#b,($out)
lea 1($out),$out
sub \$1,$len
jnz .Lcloop1
jmp .Lexit
.size RC4,.-RC4
___
$code =~ s/#([bwd])/$1/gm;
print $code;
close STDOUT;