bionic/libc/arch-x86/generic/string/swab.S
Varvara Rainchik 5a92284167 Add 32-bit Silvermont-optimized string/memory functions.
Add following functions:
bcopy, memcpy, memmove, memset, bzero, memcmp, wmemcmp, strlen,
strcpy, strncpy, stpcpy, stpncpy.
Create new directories inside arch-x86 to specify architecture: atom,
silvermont and generic (non atom or silvermont architectures are treated like generic).
Due to introducing optimized versions of stpcpy and stpncpy,
c-implementations of these functions are moved from
common for architectures makefile to arm and mips specific makefiles.

Change-Id: I990f8061c3e9bca1f154119303da9e781c5d086e
Signed-off-by: Varvara Rainchik <varvara.rainchik@intel.com>
2014-05-12 13:56:59 -07:00

69 lines
1.1 KiB
ArmAsm

/* $OpenBSD: swab.S,v 1.3 2005/08/07 11:30:38 espie Exp $ */
/*
* Written by J.T. Conklin <jtc@netbsd.org>.
* Public domain.
*/
#include <private/bionic_asm.h>
/*
* On the i486, this code is negligibly faster than the code generated
* by gcc at about half the size. If my i386 databook is correct, it
* should be considerably faster than the gcc code on a i386.
*/
ENTRY(swab)
pushl %esi
pushl %edi
movl 12(%esp),%esi
movl 16(%esp),%edi
movl 20(%esp),%ecx
cld # set direction forward
shrl $1,%ecx
testl $7,%ecx # copy first group of 1 to 7 words
jz L2 # while swaping alternate bytes.
.align 2,0x90
L1: lodsw
rorw $8,%ax
stosw
decl %ecx
testl $7,%ecx
jnz L1
L2: shrl $3,%ecx # copy remainder 8 words at a time
jz L4 # while swapping alternate bytes.
.align 2,0x90
L3: lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
lodsw
rorw $8,%ax
stosw
decl %ecx
jnz L3
L4: popl %edi
popl %esi
ret
END(swab)