Backport of rc4-x86_64 from HEAD.
This commit is contained in:
parent
3f2f0c8892
commit
3380c4561e
@ -177,11 +177,11 @@ my %table=(
|
||||
# actually recommend to consider using gcc shared build even with vendor
|
||||
# compiler:-)
|
||||
# <appro@fy.chalmers.se>
|
||||
"solaris64-x86_64-gcc","gcc:-m64 -O3 -Wall -DL_ENDIAN -DMD32_REG_T=int::-D_REENTRANT::-lsocket -lnsl -ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL:asm/x86_64-gcc.o::::::asm/rc4-amd64.o:::dlfcn:solaris-shared:-fPIC:-m64 -shared -static-libgcc:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"solaris64-x86_64-gcc","gcc:-m64 -O3 -Wall -DL_ENDIAN -DMD32_REG_T=int::-D_REENTRANT::-lsocket -lnsl -ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL:asm/x86_64-gcc.o::::::asm/rc4-x86_64.o:::dlfcn:solaris-shared:-fPIC:-m64 -shared -static-libgcc:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
|
||||
#### Solaris x86 with Sun C setups
|
||||
"solaris-x86-cc","cc:-fast -O -Xa::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_PTR DES_UNROLL BF_PTR::::::::::dlfcn:solaris-shared:-KPIC:-G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"solaris64-x86_64-cc","cc:-fast -xarch=amd64 -xstrconst -Xa -DL_ENDIAN::-D_REENTRANT::-lsocket -lnsl -ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL::::::::::dlfcn:solaris-shared:-KPIC:-xarch=amd64 -G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"solaris64-x86_64-cc","cc:-fast -xarch=amd64 -xstrconst -Xa -DL_ENDIAN::-D_REENTRANT::-lsocket -lnsl -ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL::::::::::dlfcn:solaris-shared:-KPIC:-xarch=amd64 -G -dy -z text:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
|
||||
#### SPARC Solaris with GNU C setups
|
||||
"solaris-sparcv7-gcc","gcc:-O3 -fomit-frame-pointer -Wall -DB_ENDIAN -DBN_DIV2W::-D_REENTRANT::-lsocket -lnsl -ldl:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR::::::::::dlfcn:solaris-shared:-fPIC:-shared:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
@ -411,8 +411,7 @@ my %table=(
|
||||
"linux-s390x", "gcc:-DB_ENDIAN -DTERMIO -DNO_ASM -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG::::::::::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"linux-ia64", "gcc:-DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK:asm/ia64.o::::asm/sha1-ia64.o::asm/rc4-ia64.o:::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"linux-ia64-ecc", "ecc:-DL_ENDIAN -DTERMIO -O2 -Wall -no_cpprt::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK:asm/ia64.o::::asm/sha1-ia64.o::asm/rc4-ia64.o:::dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"linux-x86_64", "gcc:-m64 -DL_ENDIAN -DTERMIO -O3 -Wall -DMD32_REG_T=int::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL:asm/x86_64-gcc.o::::::asm/rc4-amd64.o:::dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"linux-em64t", "gcc:-m64 -DL_ENDIAN -DTERMIO -O3 -Wall -DMD32_REG_T=int::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL:asm/x86_64-gcc.o:::::::::dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"linux-x86_64", "gcc:-m64 -DL_ENDIAN -DTERMIO -O3 -Wall -DMD32_REG_T=int::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL:asm/x86_64-gcc.o::::::asm/rc4-x86_64.o:::dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"NetBSD-sparc", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -mv8 -Wall -DB_ENDIAN::(unknown):::BN_LLONG MD2_CHAR RC4_INDEX DES_UNROLL::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"NetBSD-m68", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -Wall -DB_ENDIAN::(unknown):::BN_LLONG MD2_CHAR RC4_INDEX DES_UNROLL::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
"NetBSD-x86", "gcc:-DTERMIOS -O3 -fomit-frame-pointer -m486 -Wall::(unknown):::BN_LLONG ${x86_gcc_des} ${x86_gcc_opts}::::::::::dlfcn:bsd-gcc-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)",
|
||||
|
35
TABLE
35
TABLE
@ -3075,31 +3075,6 @@ $shared_extension = .so.$(SHLIB_MAJOR).$(SHLIB_MINOR)
|
||||
$ranlib =
|
||||
$arflags =
|
||||
|
||||
*** linux-em64t
|
||||
$cc = gcc
|
||||
$cflags = -m64 -DL_ENDIAN -DTERMIO -O3 -Wall -DMD32_REG_T=int
|
||||
$unistd =
|
||||
$thread_cflag = -D_REENTRANT
|
||||
$sys_id =
|
||||
$lflags = -ldl
|
||||
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL
|
||||
$bn_obj = asm/x86_64-gcc.o
|
||||
$des_obj =
|
||||
$bf_obj =
|
||||
$md5_obj =
|
||||
$sha1_obj =
|
||||
$cast_obj =
|
||||
$rc4_obj =
|
||||
$rmd160_obj =
|
||||
$rc5_obj =
|
||||
$dso_scheme = dlfcn
|
||||
$shared_target= linux-shared
|
||||
$shared_cflag = -fPIC
|
||||
$shared_ldflag = -m64
|
||||
$shared_extension = .so.$(SHLIB_MAJOR).$(SHLIB_MINOR)
|
||||
$ranlib =
|
||||
$arflags =
|
||||
|
||||
*** linux-ia64
|
||||
$cc = gcc
|
||||
$cflags = -DL_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall
|
||||
@ -3507,14 +3482,14 @@ $unistd =
|
||||
$thread_cflag = -D_REENTRANT
|
||||
$sys_id =
|
||||
$lflags = -ldl
|
||||
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL
|
||||
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL
|
||||
$bn_obj = asm/x86_64-gcc.o
|
||||
$des_obj =
|
||||
$bf_obj =
|
||||
$md5_obj =
|
||||
$sha1_obj =
|
||||
$cast_obj =
|
||||
$rc4_obj = asm/rc4-amd64.o
|
||||
$rc4_obj = asm/rc4-x86_64.o
|
||||
$rmd160_obj =
|
||||
$rc5_obj =
|
||||
$dso_scheme = dlfcn
|
||||
@ -4182,7 +4157,7 @@ $unistd =
|
||||
$thread_cflag = -D_REENTRANT
|
||||
$sys_id =
|
||||
$lflags = -lsocket -lnsl -ldl
|
||||
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL
|
||||
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL
|
||||
$bn_obj =
|
||||
$des_obj =
|
||||
$bf_obj =
|
||||
@ -4207,14 +4182,14 @@ $unistd =
|
||||
$thread_cflag = -D_REENTRANT
|
||||
$sys_id =
|
||||
$lflags = -lsocket -lnsl -ldl
|
||||
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK BF_PTR2 DES_INT DES_UNROLL
|
||||
$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK RC4_CHAR BF_PTR2 DES_INT DES_UNROLL
|
||||
$bn_obj = asm/x86_64-gcc.o
|
||||
$des_obj =
|
||||
$bf_obj =
|
||||
$md5_obj =
|
||||
$sha1_obj =
|
||||
$cast_obj =
|
||||
$rc4_obj = asm/rc4-amd64.o
|
||||
$rc4_obj = asm/rc4-x86_64.o
|
||||
$rmd160_obj =
|
||||
$rc5_obj =
|
||||
$dso_scheme = dlfcn
|
||||
|
@ -66,7 +66,7 @@ asm/rx86bsdi.o: asm/rx86unix.cpp
|
||||
asm/rx86unix.cpp: asm/rc4-586.pl ../perlasm/x86asm.pl
|
||||
(cd asm; $(PERL) rc4-586.pl cpp >rx86unix.cpp)
|
||||
|
||||
asm/rc4-amd64.s: asm/rc4-amd64.pl; $(PERL) asm/rc4-amd64.pl $@
|
||||
asm/rc4-x86_64.s: asm/rc4-x86_64.pl; $(PERL) asm/rc4-x86_64.pl $@
|
||||
|
||||
asm/rc4-ia64.s: asm/rc4-ia64.S
|
||||
$(CC) $(CFLAGS) -E asm/rc4-ia64.S > $@
|
||||
|
@ -1,227 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
#
|
||||
# ====================================================================
|
||||
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
|
||||
# project. Rights for redistribution and usage in source and binary
|
||||
# forms are granted according to the OpenSSL license.
|
||||
# ====================================================================
|
||||
#
|
||||
# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
|
||||
# "hand-coded assembler"] doesn't stand for the whole improvement
|
||||
# coefficient. It turned out that eliminating RC4_CHAR from config
|
||||
# line results in ~40% improvement (yes, even for C implementation).
|
||||
# Presumably it has everything to do with AMD cache architecture and
|
||||
# RAW or whatever penalties. Once again! The module *requires* config
|
||||
# line *without* RC4_CHAR! As for coding "secret," I bet on partial
|
||||
# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
|
||||
# I simply 'inc %r8b'. Even though optimization manual discourages
|
||||
# to operate on partial registers, it turned out to be the best bet.
|
||||
# At least for AMD... How IA32E would perform remains to be seen...
|
||||
|
||||
# As was shown by Marc Bevand reordering of couple of load operations
|
||||
# results in even higher performance gain of 3.3x:-) At least on
|
||||
# Opteron... For reference, 1x in this case is RC4_CHAR C-code
|
||||
# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
|
||||
# Latter means that if you want to *estimate* what to expect from
|
||||
# *your* CPU, then multiply 54 by 3.3 and clock frequency in GHz.
|
||||
|
||||
# Intel P4 EM64T core was found to run the AMD64 code really slow...
|
||||
# The only way to achieve comparable performance on P4 is to keep
|
||||
# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
|
||||
# compose blended code, which would perform even within 30% marginal
|
||||
# on either AMD and Intel platforms, I implement both cases. See
|
||||
# rc4_skey.c for further details... This applies to 0.9.8 and later.
|
||||
# In 0.9.7 context RC4_CHAR codepath is never engaged and ~70 bytes
|
||||
# of code remain redundant.
|
||||
|
||||
$output=shift;
|
||||
|
||||
$win64a=1 if ($output =~ /win64a.[s|asm]/);
|
||||
|
||||
open STDOUT,">$output" || die "can't open $output: $!";
|
||||
|
||||
if (defined($win64a)) {
|
||||
$dat="%rcx"; # arg1
|
||||
$len="%rdx"; # arg2
|
||||
$inp="%rsi"; # r8, arg3 moves here
|
||||
$out="%rdi"; # r9, arg4 moves here
|
||||
} else {
|
||||
$dat="%rdi"; # arg1
|
||||
$len="%rsi"; # arg2
|
||||
$inp="%rdx"; # arg3
|
||||
$out="%rcx"; # arg4
|
||||
}
|
||||
|
||||
$XX="%r10";
|
||||
$TX="%r8";
|
||||
$YY="%r11";
|
||||
$TY="%r9";
|
||||
|
||||
sub PTR() {
|
||||
my $ret=shift;
|
||||
if (defined($win64a)) {
|
||||
$ret =~ s/\[([\S]+)\+([\S]+)\]/[$2+$1]/g; # [%rN+%rM*4]->[%rM*4+%rN]
|
||||
$ret =~ s/:([^\[]+)\[([^\]]+)\]/:[$2+$1]/g; # :off[ea]->:[ea+off]
|
||||
} else {
|
||||
$ret =~ s/[\+\*]/,/g; # [%rN+%rM*4]->[%rN,%rM,4]
|
||||
$ret =~ s/\[([^\]]+)\]/($1)/g; # [%rN]->(%rN)
|
||||
}
|
||||
$ret;
|
||||
}
|
||||
|
||||
$code=<<___ if (!defined($win64a));
|
||||
.text
|
||||
|
||||
.globl RC4
|
||||
.type RC4,\@function
|
||||
.align 16
|
||||
RC4: or $len,$len
|
||||
jne .Lentry
|
||||
repret
|
||||
.Lentry:
|
||||
___
|
||||
$code=<<___ if (defined($win64a));
|
||||
_TEXT SEGMENT
|
||||
PUBLIC RC4
|
||||
ALIGN 16
|
||||
RC4 PROC
|
||||
or $len,$len
|
||||
jne .Lentry
|
||||
repret
|
||||
.Lentry:
|
||||
push %rdi
|
||||
push %rsi
|
||||
sub \$40,%rsp
|
||||
mov %r8,$inp
|
||||
mov %r9,$out
|
||||
___
|
||||
$code.=<<___;
|
||||
add \$8,$dat
|
||||
movl `&PTR("DWORD:-8[$dat]")`,$XX#d
|
||||
movl `&PTR("DWORD:-4[$dat]")`,$YY#d
|
||||
cmpl \$-1,`&PTR("DWORD:256[$dat]")`
|
||||
je .LRC4_CHAR
|
||||
test \$-8,$len
|
||||
jz .Lloop1
|
||||
.align 16
|
||||
.Lloop8:
|
||||
inc $XX#b
|
||||
movl `&PTR("DWORD:[$dat+$XX*4]")`,$TX#d
|
||||
add $TX#b,$YY#b
|
||||
movl `&PTR("DWORD:[$dat+$YY*4]")`,$TY#d
|
||||
movl $TX#d,`&PTR("DWORD:[$dat+$YY*4]")`
|
||||
movl $TY#d,`&PTR("DWORD:[$dat+$XX*4]")`
|
||||
add $TX#b,$TY#b
|
||||
inc $XX#b
|
||||
movl `&PTR("DWORD:[$dat+$XX*4]")`,$TX#d
|
||||
movb `&PTR("BYTE:[$dat+$TY*4]")`,%al
|
||||
___
|
||||
for ($i=1;$i<=6;$i++) {
|
||||
$code.=<<___;
|
||||
add $TX#b,$YY#b
|
||||
ror \$8,%rax
|
||||
movl `&PTR("DWORD:[$dat+$YY*4]")`,$TY#d
|
||||
movl $TX#d,`&PTR("DWORD:[$dat+$YY*4]")`
|
||||
movl $TY#d,`&PTR("DWORD:[$dat+$XX*4]")`
|
||||
add $TX#b,$TY#b
|
||||
inc $XX#b
|
||||
movl `&PTR("DWORD:[$dat+$XX*4]")`,$TX#d
|
||||
movb `&PTR("BYTE:[$dat+$TY*4]")`,%al
|
||||
___
|
||||
}
|
||||
$code.=<<___;
|
||||
add $TX#b,$YY#b
|
||||
ror \$8,%rax
|
||||
movl `&PTR("DWORD:[$dat+$YY*4]")`,$TY#d
|
||||
movl $TX#d,`&PTR("DWORD:[$dat+$YY*4]")`
|
||||
movl $TY#d,`&PTR("DWORD:[$dat+$XX*4]")`
|
||||
sub \$8,$len
|
||||
add $TY#b,$TX#b
|
||||
movb `&PTR("BYTE:[$dat+$TX*4]")`,%al
|
||||
ror \$8,%rax
|
||||
add \$8,$inp
|
||||
add \$8,$out
|
||||
|
||||
xor `&PTR("QWORD:-8[$inp]")`,%rax
|
||||
mov %rax,`&PTR("QWORD:-8[$out]")`
|
||||
|
||||
test \$-8,$len
|
||||
jnz .Lloop8
|
||||
cmp \$0,$len
|
||||
jne .Lloop1
|
||||
.Lexit:
|
||||
movl $XX#d,`&PTR("DWORD:-8[$dat]")`
|
||||
movl $YY#d,`&PTR("DWORD:-4[$dat]")`
|
||||
___
|
||||
$code.=<<___ if (defined($win64a));
|
||||
add \$40,%rsp
|
||||
pop %rsi
|
||||
pop %rdi
|
||||
___
|
||||
$code.=<<___;
|
||||
repret
|
||||
.align 16
|
||||
.Lloop1:
|
||||
movzb `&PTR("BYTE:[$inp]")`,%eax
|
||||
inc $XX#b
|
||||
movl `&PTR("DWORD:[$dat+$XX*4]")`,$TX#d
|
||||
add $TX#b,$YY#b
|
||||
movl `&PTR("DWORD:[$dat+$YY*4]")`,$TY#d
|
||||
movl $TX#d,`&PTR("DWORD:[$dat+$YY*4]")`
|
||||
movl $TY#d,`&PTR("DWORD:[$dat+$XX*4]")`
|
||||
add $TY#b,$TX#b
|
||||
movl `&PTR("DWORD:[$dat+$TX*4]")`,$TY#d
|
||||
xor $TY,%rax
|
||||
inc $inp
|
||||
movb %al,`&PTR("BYTE:[$out]")`
|
||||
inc $out
|
||||
dec $len
|
||||
jnz .Lloop1
|
||||
jmp .Lexit
|
||||
|
||||
.align 16
|
||||
.LRC4_CHAR:
|
||||
inc $XX#b
|
||||
movzb `&PTR("BYTE:[$dat+$XX]")`,$TX#d
|
||||
add $TX#b,$YY#b
|
||||
movzb `&PTR("BYTE:[$dat+$YY]")`,$TY#d
|
||||
movb $TX#b,`&PTR("BYTE:[$dat+$YY]")`
|
||||
movb $TY#b,`&PTR("BYTE:[$dat+$XX]")`
|
||||
add $TX#b,$TY#b
|
||||
movzb `&PTR("BYTE:[$dat+$TY]")`,$TY#d
|
||||
xorb `&PTR("BYTE:[$inp]")`,$TY#b
|
||||
movb $TY#b,`&PTR("BYTE:[$out]")`
|
||||
inc $inp
|
||||
inc $out
|
||||
dec $len
|
||||
jnz .LRC4_CHAR
|
||||
jmp .Lexit
|
||||
___
|
||||
$code.=<<___ if (defined($win64a));
|
||||
RC4 ENDP
|
||||
_TEXT ENDS
|
||||
END
|
||||
___
|
||||
$code.=<<___ if (!defined($win64a));
|
||||
.size RC4,.-RC4
|
||||
___
|
||||
|
||||
$code =~ s/#([bwd])/$1/gm;
|
||||
$code =~ s/\`([^\`]*)\`/eval $1/gem;
|
||||
|
||||
if (defined($win64a)) {
|
||||
$code =~ s/\.align/ALIGN/gm;
|
||||
$code =~ s/[\$%]//gm;
|
||||
$code =~ s/\.L/\$L/gm;
|
||||
$code =~ s/([\w]+)([\s]+)([\S]+),([\S]+)/$1$2$4,$3/gm;
|
||||
$code =~ s/([QD]*WORD|BYTE):/$1 PTR/gm;
|
||||
$code =~ s/mov[bwlq]/mov/gm;
|
||||
$code =~ s/movzb/movzx/gm;
|
||||
$code =~ s/repret/DB\t0F3h,0C3h/gm;
|
||||
$code =~ s/cmpl/cmp/gm;
|
||||
$code =~ s/xorb/xor/gm;
|
||||
} else {
|
||||
$code =~ s/([QD]*WORD|BYTE)://gm;
|
||||
$code =~ s/repret/.byte\t0xF3,0xC3/gm;
|
||||
}
|
||||
print $code;
|
@ -6,51 +6,25 @@
|
||||
# forms are granted according to the OpenSSL license.
|
||||
# ====================================================================
|
||||
#
|
||||
# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
|
||||
# "hand-coded assembler"] doesn't stand for the whole improvement
|
||||
# coefficient. It turned out that eliminating RC4_CHAR from config
|
||||
# line results in ~40% improvement (yes, even for C implementation).
|
||||
# Presumably it has everything to do with AMD cache architecture and
|
||||
# RAW or whatever penalties. Once again! The module *requires* config
|
||||
# line *without* RC4_CHAR! As for coding "secret," I bet on partial
|
||||
# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
|
||||
# I simply 'inc %r8b'. Even though optimization manual discourages
|
||||
# to operate on partial registers, it turned out to be the best bet.
|
||||
# At least for AMD... How IA32E would perform remains to be seen...
|
||||
|
||||
# As was shown by Marc Bevand reordering of couple of load operations
|
||||
# results in even higher performance gain of 3.3x:-) At least on
|
||||
# Opteron... For reference, 1x in this case is RC4_CHAR C-code
|
||||
# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
|
||||
# Latter means that if you want to *estimate* what to expect from
|
||||
# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
|
||||
|
||||
# Intel P4 EM64T core was found to run the AMD64 code really slow...
|
||||
# The only way to achieve comparable performance on P4 was to keep
|
||||
# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
|
||||
# compose blended code, which would perform even within 30% marginal
|
||||
# on either AMD and Intel platforms, I implement both cases. See
|
||||
# rc4_skey.c for further details...
|
||||
|
||||
# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
|
||||
# those with add/sub results in 50% performance improvement of folded
|
||||
# loop...
|
||||
|
||||
# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
|
||||
# performance by >30% [unlike P4 32-bit case that is]. But this is
|
||||
# provided that loads are reordered even more aggressively! Both code
|
||||
# pathes, AMD64 and EM64T, reorder loads in essentially same manner
|
||||
# as my IA-64 implementation. On Opteron this resulted in modest 5%
|
||||
# improvement [I had to test it], while final Intel P4 performance
|
||||
# achieves respectful 432MBps on 2.8GHz processor now. For reference.
|
||||
# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
|
||||
# RC4_INT code-path. While if executed on Opteron, it's only 25%
|
||||
# slower than the RC4_INT one [meaning that if CPU µ-arch detection
|
||||
# is not implemented, then this final RC4_CHAR code-path should be
|
||||
# preferred, as it provides better *all-round* performance].
|
||||
# Unlike 0.9.7f this code expects RC4_CHAR back in config line! See
|
||||
# commentary section in corresponding script in development branch
|
||||
# for background information about this option carousel. For those
|
||||
# who don't have energy to figure out these gory details, here is
|
||||
# basis in form of performance matrix relative to the original
|
||||
# 0.9.7e C code-base:
|
||||
#
|
||||
# 0.9.7e 0.9.7f this
|
||||
# AMD64 1x 3.3x 2.4x
|
||||
# EM64T 1x 0.8x 1.5x
|
||||
#
|
||||
# In other words idea is to trade -25% AMD64 performance to compensate
|
||||
# for deterioration and gain +90% on EM64T core. Development branch
|
||||
# maintains best performance for either target, i.e. 3.3x for AMD64
|
||||
# and 1.5x for EM64T.
|
||||
|
||||
$output=shift;
|
||||
open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
|
||||
|
||||
open STDOUT,">$output" || die "can't open $output: $!";
|
||||
|
||||
$dat="%rdi"; # arg1
|
||||
$len="%rsi"; # arg2
|
||||
@ -62,99 +36,29 @@ $out="%rcx"; # arg4
|
||||
$YY="%r12";
|
||||
$TY="%r13";
|
||||
|
||||
$code=<<___;
|
||||
$code=<<___;;
|
||||
.text
|
||||
|
||||
.globl RC4
|
||||
.type RC4,\@function,4
|
||||
.type RC4,\@function
|
||||
.align 16
|
||||
RC4: or $len,$len
|
||||
jne .Lentry
|
||||
ret
|
||||
repret
|
||||
.Lentry:
|
||||
push %r12
|
||||
push %r13
|
||||
|
||||
add \$8,$dat
|
||||
movl -8($dat),$XX[0]#d
|
||||
movl -4($dat),$YY#d
|
||||
cmpl \$-1,256($dat)
|
||||
je .LRC4_CHAR
|
||||
inc $XX[0]#b
|
||||
movl ($dat,$XX[0],4),$TX[0]#d
|
||||
test \$-8,$len
|
||||
jz .Lloop1
|
||||
jmp .Lloop8
|
||||
.align 16
|
||||
.Lloop8:
|
||||
___
|
||||
for ($i=0;$i<8;$i++) {
|
||||
$code.=<<___;
|
||||
add $TX[0]#b,$YY#b
|
||||
mov $XX[0],$XX[1]
|
||||
movl ($dat,$YY,4),$TY#d
|
||||
ror \$8,%rax # ror is redundant when $i=0
|
||||
inc $XX[1]#b
|
||||
movl ($dat,$XX[1],4),$TX[1]#d
|
||||
cmp $XX[1],$YY
|
||||
movl $TX[0]#d,($dat,$YY,4)
|
||||
cmove $TX[0],$TX[1]
|
||||
movl $TY#d,($dat,$XX[0],4)
|
||||
add $TX[0]#b,$TY#b
|
||||
movb ($dat,$TY,4),%al
|
||||
___
|
||||
push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
|
||||
}
|
||||
$code.=<<___;
|
||||
ror \$8,%rax
|
||||
sub \$8,$len
|
||||
add \$2,$dat
|
||||
movzb -2($dat),$XX[0]#d
|
||||
movzb -1($dat),$YY#d
|
||||
|
||||
xor ($inp),%rax
|
||||
add \$8,$inp
|
||||
mov %rax,($out)
|
||||
add \$8,$out
|
||||
|
||||
test \$-8,$len
|
||||
jnz .Lloop8
|
||||
cmp \$0,$len
|
||||
jne .Lloop1
|
||||
___
|
||||
$code.=<<___;
|
||||
.Lexit:
|
||||
sub \$1,$XX[0]#b
|
||||
movl $XX[0]#d,-8($dat)
|
||||
movl $YY#d,-4($dat)
|
||||
|
||||
pop %r13
|
||||
pop %r12
|
||||
ret
|
||||
.align 16
|
||||
.Lloop1:
|
||||
add $TX[0]#b,$YY#b
|
||||
movl ($dat,$YY,4),$TY#d
|
||||
movl $TX[0]#d,($dat,$YY,4)
|
||||
movl $TY#d,($dat,$XX[0],4)
|
||||
add $TY#b,$TX[0]#b
|
||||
inc $XX[0]#b
|
||||
movl ($dat,$TX[0],4),$TY#d
|
||||
movl ($dat,$XX[0],4),$TX[0]#d
|
||||
xorb ($inp),$TY#b
|
||||
inc $inp
|
||||
movb $TY#b,($out)
|
||||
inc $out
|
||||
dec $len
|
||||
jnz .Lloop1
|
||||
jmp .Lexit
|
||||
|
||||
.align 16
|
||||
.LRC4_CHAR:
|
||||
add \$1,$XX[0]#b
|
||||
movzb ($dat,$XX[0]),$TX[0]#d
|
||||
test \$-8,$len
|
||||
jz .Lcloop1
|
||||
push %rbx
|
||||
jmp .Lcloop8
|
||||
.align 16
|
||||
.align 16 # incidentally aligned already
|
||||
.Lcloop8:
|
||||
mov ($inp),%eax
|
||||
mov 4($inp),%ebx
|
||||
@ -210,9 +114,15 @@ $code.=<<___;
|
||||
pop %rbx
|
||||
cmp \$0,$len
|
||||
jne .Lcloop1
|
||||
jmp .Lexit
|
||||
___
|
||||
$code.=<<___;
|
||||
.Lexit:
|
||||
sub \$1,$XX[0]#b
|
||||
movb $XX[0]#b,-2($dat)
|
||||
movb $YY#b,-1($dat)
|
||||
|
||||
pop %r13
|
||||
pop %r12
|
||||
repret
|
||||
|
||||
.align 16
|
||||
.Lcloop1:
|
||||
add $TX[0]#b,$YY#b
|
||||
@ -235,6 +145,6 @@ ___
|
||||
|
||||
$code =~ s/#([bwd])/$1/gm;
|
||||
|
||||
print $code;
|
||||
$code =~ s/repret/.byte\t0xF3,0xC3/gm;
|
||||
|
||||
close STDOUT;
|
||||
print $code;
|
||||
|
Loading…
x
Reference in New Issue
Block a user