Break bionic implementations into arch versions. DO NOT MERGE

Move arch specific code for arm, mips, x86 into separate
makefiles.
In addition, add different arm cpu versions of memcpy/memset.

Bug: 8005082

(cherry picked from commit acdde8c1cf)

Change-Id: I0108d432af9f6283ae99adfc92a3399e5ab3e31d
This commit is contained in:
Christopher Ferris 2013-02-26 01:30:00 -08:00
parent 04766565d3
commit 7c83a1ed81
16 changed files with 1467 additions and 82 deletions

View File

@ -364,29 +364,6 @@ libc_upstream_netbsd_src_files := \
# ========================================================= # =========================================================
ifeq ($(TARGET_ARCH),arm) ifeq ($(TARGET_ARCH),arm)
libc_common_src_files += \ libc_common_src_files += \
arch-arm/bionic/abort_arm.S \
arch-arm/bionic/atomics_arm.c \
arch-arm/bionic/clone.S \
arch-arm/bionic/eabi.c \
arch-arm/bionic/_exit_with_stack_teardown.S \
arch-arm/bionic/ffs.S \
arch-arm/bionic/futex_arm.S \
arch-arm/bionic/__get_sp.S \
arch-arm/bionic/kill.S \
arch-arm/bionic/libgcc_compat.c \
arch-arm/bionic/memcmp16.S \
arch-arm/bionic/memcmp.S \
arch-arm/bionic/memcpy.S \
arch-arm/bionic/memset.S \
arch-arm/bionic/_setjmp.S \
arch-arm/bionic/setjmp.S \
arch-arm/bionic/sigsetjmp.S \
arch-arm/bionic/strcmp.S \
arch-arm/bionic/strcpy.S \
arch-arm/bionic/strlen.c.arm \
arch-arm/bionic/syscall.S \
arch-arm/bionic/tgkill.S \
arch-arm/bionic/tkill.S \
bionic/memmove.c.arm \ bionic/memmove.c.arm \
bionic/socketcalls.c \ bionic/socketcalls.c \
string/bcopy.c \ string/bcopy.c \
@ -406,38 +383,10 @@ libc_static_common_src_files += \
bionic/pthread_create.cpp.arm \ bionic/pthread_create.cpp.arm \
bionic/pthread_key.cpp.arm \ bionic/pthread_key.cpp.arm \
# these are used by the static and dynamic versions of the libc
# respectively
libc_arch_static_src_files := \
arch-arm/bionic/exidx_static.c
libc_arch_dynamic_src_files := \
arch-arm/bionic/exidx_dynamic.c
endif # arm endif # arm
ifeq ($(TARGET_ARCH),x86) ifeq ($(TARGET_ARCH),x86)
libc_common_src_files += \ libc_common_src_files += \
arch-x86/bionic/clone.S \
arch-x86/bionic/_exit_with_stack_teardown.S \
arch-x86/bionic/futex_x86.S \
arch-x86/bionic/__get_sp.S \
arch-x86/bionic/__get_tls.c \
arch-x86/bionic/_setjmp.S \
arch-x86/bionic/setjmp.S \
arch-x86/bionic/__set_tls.c \
arch-x86/bionic/sigsetjmp.S \
arch-x86/bionic/syscall.S \
arch-x86/bionic/vfork.S \
arch-x86/string/bcopy_wrapper.S \
arch-x86/string/bzero_wrapper.S \
arch-x86/string/ffs.S \
arch-x86/string/memcmp_wrapper.S \
arch-x86/string/memcpy_wrapper.S \
arch-x86/string/memmove_wrapper.S \
arch-x86/string/memset_wrapper.S \
arch-x86/string/strcmp_wrapper.S \
arch-x86/string/strlen_wrapper.S \
arch-x86/string/strncmp_wrapper.S \
bionic/pthread-atfork.c \ bionic/pthread-atfork.c \
bionic/pthread-rwlocks.c \ bionic/pthread-rwlocks.c \
bionic/pthread-timers.c \ bionic/pthread-timers.c \
@ -449,36 +398,9 @@ libc_static_common_src_files += \
bionic/pthread_create.cpp \ bionic/pthread_create.cpp \
bionic/pthread_key.cpp \ bionic/pthread_key.cpp \
libc_arch_static_src_files := \
bionic/dl_iterate_phdr_static.c
libc_arch_dynamic_src_files :=
endif # x86 endif # x86
ifeq ($(TARGET_ARCH),mips) ifeq ($(TARGET_ARCH),mips)
libc_common_src_files += \
arch-mips/bionic/__get_sp.S \
arch-mips/bionic/__get_tls.c \
arch-mips/bionic/__set_tls.c \
arch-mips/bionic/_exit_with_stack_teardown.S \
arch-mips/bionic/_setjmp.S \
arch-mips/bionic/futex_mips.S \
arch-mips/bionic/bzero.S \
arch-mips/bionic/cacheflush.c \
arch-mips/bionic/clone.S \
arch-mips/bionic/ffs.S \
arch-mips/bionic/memcmp16.S \
arch-mips/bionic/memmove.c \
arch-mips/bionic/pipe.S \
arch-mips/bionic/setjmp.S \
arch-mips/bionic/sigsetjmp.S \
arch-mips/bionic/vfork.S
libc_common_src_files += \
arch-mips/string/memset.S \
arch-mips/string/memcpy.S \
arch-mips/string/mips_strlen.c
libc_common_src_files += \ libc_common_src_files += \
bionic/memcmp.c \ bionic/memcmp.c \
string/bcopy.c \ string/bcopy.c \
@ -497,12 +419,42 @@ libc_static_common_src_files += \
bionic/pthread_create.cpp \ bionic/pthread_create.cpp \
bionic/pthread_key.cpp \ bionic/pthread_key.cpp \
libc_arch_static_src_files := \
bionic/dl_iterate_phdr_static.c
libc_arch_dynamic_src_files :=
endif # mips endif # mips
ifeq ($(strip $(TARGET_CPU_VARIANT)),)
$(warning TARGET_CPU_VARIANT is not defined)
endif
###########################################################
## Add cpu specific source files.
##
## This can be called multiple times, but it will only add
## the first source file for each unique $(1).
## $(1): Unique identifier to identify the cpu variant
## implementation.
## $(2): Cpu specific source file.
###########################################################
define libc-add-cpu-variant-src
$(if $(filter true,$(_LIBC_ARCH_CPU_VARIANT_HAS_$(1))), \
, \
$(eval _LIBC_ARCH_CPU_VARIANT_HAS_$(1) := true) \
$(eval _LIBC_ARCH_CPU_VARIANT_SRC_FILE.$(1) := $(2)) \
$(eval _LIBC_ARCH_CPU_VARIANT_SRC_FILES += $(2)) \
)
endef
_LIBC_ARCH_COMMON_SRC_FILES :=
_LIBC_ARCH_CPU_VARIANT_SRC_FILES :=
_LIBC_ARCH_STATIC_SRC_FILES :=
_LIBC_ARCH_DYNAMIC_SRC_FILES :=
include bionic/libc/arch-$(TARGET_ARCH)/$(TARGET_ARCH).mk
libc_common_src_files += $(_LIBC_ARCH_COMMON_SRC_FILES)
libc_common_src_files += $(_LIBC_ARCH_CPU_VARIANT_SRC_FILES)
libc_arch_static_src_files := $(_LIBC_ARCH_STATIC_SRC_FILES)
libc_arch_dynamic_src_files := $(_LIBC_ARCH_DYNAMIC_SRC_FILES)
# Define some common cflags # Define some common cflags
# ======================================================== # ========================================================
libc_common_cflags := \ libc_common_cflags := \

36
libc/arch-arm/arm.mk Normal file
View File

@ -0,0 +1,36 @@
_LIBC_ARCH_COMMON_SRC_FILES := \
arch-arm/bionic/abort_arm.S \
arch-arm/bionic/atomics_arm.c \
arch-arm/bionic/clone.S \
arch-arm/bionic/eabi.c \
arch-arm/bionic/_exit_with_stack_teardown.S \
arch-arm/bionic/ffs.S \
arch-arm/bionic/futex_arm.S \
arch-arm/bionic/__get_sp.S \
arch-arm/bionic/kill.S \
arch-arm/bionic/libgcc_compat.c \
arch-arm/bionic/memcmp16.S \
arch-arm/bionic/memcmp.S \
arch-arm/bionic/_setjmp.S \
arch-arm/bionic/setjmp.S \
arch-arm/bionic/sigsetjmp.S \
arch-arm/bionic/strcmp.S \
arch-arm/bionic/strcpy.S \
arch-arm/bionic/strlen.c.arm \
arch-arm/bionic/syscall.S \
arch-arm/bionic/tgkill.S \
arch-arm/bionic/tkill.S \
# These are used by the static and dynamic versions of the libc
# respectively.
_LIBC_ARCH_STATIC_SRC_FILES := \
arch-arm/bionic/exidx_static.c
_LIBC_ARCH_DYNAMIC_SRC_FILES := \
arch-arm/bionic/exidx_dynamic.c
ifeq ($(strip $(wildcard bionic/libc/arch-arm/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk)),)
$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are cortex-a9, cortex-a15, krait. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
endif
include bionic/libc/arch-arm/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk

View File

@ -0,0 +1,146 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Assumes neon instructions and a cache line size of 64 bytes. */
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions, that supports neon instructions, and that has a 64 byte
* cache line.
*/
.text
.fpu neon
#define CACHE_LINE_SIZE 64
ENTRY(memcpy)
.save {r0, lr}
/* start preloading as early as possible */
pld [r1, #(CACHE_LINE_SIZE*0)]
stmfd sp!, {r0, lr}
pld [r1, #(CACHE_LINE_SIZE*1)]
/* do we have at least 16-bytes to copy (needed for alignment below) */
cmp r2, #16
blo 5f
/* align destination to cache-line for the write-buffer */
rsb r3, r0, #0
ands r3, r3, #0xF
beq 0f
/* copy up to 15-bytes (count in r3) */
sub r2, r2, r3
movs ip, r3, lsl #31
ldrmib lr, [r1], #1
strmib lr, [r0], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// copies 4 bytes, destination 32-bits aligned
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
1: bcc 2f
// copies 8 bytes, destination 64-bits aligned
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
2:
0: /* preload immediately the next cache line, which we may need */
pld [r1, #(CACHE_LINE_SIZE*0)]
pld [r1, #(CACHE_LINE_SIZE*1)]
/* make sure we have at least 64 bytes to copy */
subs r2, r2, #64
blo 2f
/* Preload all the cache lines we need.
* NOTE: The number of pld below depends on CACHE_LINE_SIZE,
* ideally we would increase the distance in the main loop to
* avoid the goofy code below. In practice this doesn't seem to make
* a big difference.
* NOTE: The value CACHE_LINE_SIZE * 4 was chosen through
* experimentation.
*/
pld [r1, #(CACHE_LINE_SIZE*2)]
pld [r1, #(CACHE_LINE_SIZE*3)]
pld [r1, #(CACHE_LINE_SIZE*4)]
1: /* The main loop copies 64 bytes at a time */
vld1.8 {d0 - d3}, [r1]!
vld1.8 {d4 - d7}, [r1]!
pld [r1, #(CACHE_LINE_SIZE*4)]
subs r2, r2, #64
vst1.8 {d0 - d3}, [r0, :128]!
vst1.8 {d4 - d7}, [r0, :128]!
bhs 1b
2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
add r2, r2, #64
subs r2, r2, #32
blo 4f
3: /* 32 bytes at a time. These cache lines were already preloaded */
vld1.8 {d0 - d3}, [r1]!
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0, :128]!
bhs 3b
4: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 5f
// copies 16 bytes, 128-bits aligned
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0, :128]!
5: /* copy up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]!
1: bge 2f
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
2: movs ip, r2, lsl #31
ldrmib r3, [r1], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strmib r3, [r0], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
ldmfd sp!, {r0, lr}
bx lr
END(memcpy)

View File

@ -0,0 +1,106 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* Optimized memset() for ARM.
*
* memset() returns its first argument.
*/
.fpu neon
ENTRY(bzero)
mov r2, r1
mov r1, #0
// Fall through to memset...
END(bzero)
ENTRY(memset)
.save {r0}
stmfd sp!, {r0}
vdup.8 q0, r1
/* do we have at least 16-bytes to write (needed for alignment below) */
cmp r2, #16
blo 3f
/* align destination to 16 bytes for the write-buffer */
rsb r3, r0, #0
ands r3, r3, #0xF
beq 2f
/* write up to 15-bytes (count in r3) */
sub r2, r2, r3
movs ip, r3, lsl #31
strmib r1, [r0], #1
strcsb r1, [r0], #1
strcsb r1, [r0], #1
movs ip, r3, lsl #29
bge 1f
// writes 4 bytes, 32-bits aligned
vst1.32 {d0[0]}, [r0, :32]!
1: bcc 2f
// writes 8 bytes, 64-bits aligned
vst1.8 {d0}, [r0, :64]!
2:
/* make sure we have at least 32 bytes to write */
subs r2, r2, #32
blo 2f
vmov q1, q0
1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0, :128]!
bhs 1b
2: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 3f
// writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r0, :128]!
3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r0]!
1: bge 2f
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
strmib r1, [r0], #1
strcsb r1, [r0], #1
strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr
END(memset)

View File

@ -0,0 +1,4 @@
$(call libc-add-cpu-variant-src,MEMCPY,arch-arm/cortex-a15/bionic/memcpy.S)
$(call libc-add-cpu-variant-src,MEMSET,arch-arm/cortex-a15/bionic/memset.S)
include bionic/libc/arch-arm/generic/generic.mk

View File

@ -0,0 +1,211 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions, that supports neon instructions, and that has a 32 byte
* cache line.
*/
.text
.fpu neon
#define CACHE_LINE_SIZE 32
ENTRY(memcpy)
.save {r0, lr}
/* start preloading as early as possible */
pld [r1, #(CACHE_LINE_SIZE * 0)]
stmfd sp!, {r0, lr}
pld [r1, #(CACHE_LINE_SIZE * 2)]
// Check so divider is at least 16 bytes, needed for alignment code.
cmp r2, #16
blo 5f
/* check if buffers are aligned. If so, run arm-only version */
eor r3, r0, r1
ands r3, r3, #0x3
beq 11f
/* Check the upper size limit for Neon unaligned memory access in memcpy */
cmp r2, #224
blo 3f
/* align destination to 16 bytes for the write-buffer */
rsb r3, r0, #0
ands r3, r3, #0xF
beq 3f
/* copy up to 15-bytes (count in r3) */
sub r2, r2, r3
movs ip, r3, lsl #31
ldrmib lr, [r1], #1
strmib lr, [r0], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// copies 4 bytes, destination 32-bits aligned
vld1.32 {d0[0]}, [r1]!
vst1.32 {d0[0]}, [r0, :32]!
1: bcc 2f
// copies 8 bytes, destination 64-bits aligned
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
2:
/* preload immediately the next cache line, which we may need */
pld [r1, #(CACHE_LINE_SIZE * 0)]
pld [r1, #(CACHE_LINE_SIZE * 2)]
3:
/* make sure we have at least 64 bytes to copy */
subs r2, r2, #64
blo 2f
/* preload all the cache lines we need */
pld [r1, #(CACHE_LINE_SIZE * 4)]
pld [r1, #(CACHE_LINE_SIZE * 6)]
1: /* The main loop copies 64 bytes at a time */
vld1.8 {d0 - d3}, [r1]!
vld1.8 {d4 - d7}, [r1]!
pld [r1, #(CACHE_LINE_SIZE * 6)]
subs r2, r2, #64
vst1.8 {d0 - d3}, [r0]!
vst1.8 {d4 - d7}, [r0]!
bhs 1b
2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
add r2, r2, #64
subs r2, r2, #32
blo 4f
3: /* 32 bytes at a time. These cache lines were already preloaded */
vld1.8 {d0 - d3}, [r1]!
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0]!
bhs 3b
4: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 5f
// copies 16 bytes, 128-bits aligned
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0]!
5: /* copy up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]!
1: bge 2f
vld1.32 {d0[0]}, [r1]!
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
ldrmib r3, [r1], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strmib r3, [r0], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
ldmfd sp!, {r0, lr}
bx lr
11:
/* Simple arm-only copy loop to handle aligned copy operations */
stmfd sp!, {r4, r5, r6, r7, r8}
pld [r1, #(CACHE_LINE_SIZE * 4)]
/* Check alignment */
rsb r3, r1, #0
ands r3, #3
beq 2f
/* align source to 32 bits. We need to insert 2 instructions between
* a ldr[b|h] and str[b|h] because byte and half-word instructions
* stall 2 cycles.
*/
movs r12, r3, lsl #31
sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
ldrmib r3, [r1], #1
ldrcsb r4, [r1], #1
ldrcsb r5, [r1], #1
strmib r3, [r0], #1
strcsb r4, [r0], #1
strcsb r5, [r0], #1
2:
subs r2, r2, #64
blt 4f
3: /* Main copy loop, copying 64 bytes at a time */
pld [r1, #(CACHE_LINE_SIZE * 8)]
ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
subs r2, r2, #64
bge 3b
4: /* Check if there are > 32 bytes left */
adds r2, r2, #64
subs r2, r2, #32
blt 5f
/* Copy 32 bytes */
ldmia r1!, {r3, r4, r5, r6, r7, r8, r12, lr}
stmia r0!, {r3, r4, r5, r6, r7, r8, r12, lr}
subs r2, #32
5: /* Handle any remaining bytes */
adds r2, #32
beq 6f
movs r12, r2, lsl #28
ldmcsia r1!, {r3, r4, r5, r6} /* 16 bytes */
ldmmiia r1!, {r7, r8} /* 8 bytes */
stmcsia r0!, {r3, r4, r5, r6}
stmmiia r0!, {r7, r8}
movs r12, r2, lsl #30
ldrcs r3, [r1], #4 /* 4 bytes */
ldrmih r4, [r1], #2 /* 2 bytes */
strcs r3, [r0], #4
strmih r4, [r0], #2
tst r2, #0x1
ldrneb r3, [r1] /* last byte */
strneb r3, [r0]
6:
ldmfd sp!, {r4, r5, r6, r7, r8}
ldmfd sp!, {r0, pc}
END(memcpy)

View File

@ -0,0 +1,152 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions and that supports neon instructions.
*/
.fpu neon
ENTRY(bzero)
mov r2, r1
mov r1, #0
END(bzero)
/* memset() returns its first argument. */
ENTRY(memset)
# The neon memset only wins for less than 132.
cmp r2, #132
bhi 11f
.save {r0}
stmfd sp!, {r0}
vdup.8 q0, r1
/* make sure we have at least 32 bytes to write */
subs r2, r2, #32
blo 2f
vmov q1, q0
1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0]!
bhs 1b
2: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 3f
// writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r0]!
3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r0]!
1: bge 2f
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
strmib r1, [r0], #1
strcsb r1, [r0], #1
strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr
11:
/* compute the offset to align the destination
* offset = (4-(src&3))&3 = -src & 3
*/
.save {r0, r4-r7, lr}
stmfd sp!, {r0, r4-r7, lr}
rsb r3, r0, #0
ands r3, r3, #3
cmp r3, r2
movhi r3, r2
/* splat r1 */
mov r1, r1, lsl #24
orr r1, r1, r1, lsr #8
orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31
strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strcsb r1, [r0], #1
strmib r1, [r0], #1
subs r2, r2, r3
ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */
mov r12, r1
mov lr, r1
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
rsb r3, r0, #0
ands r3, r3, #0x1C
beq 3f
cmp r3, r2
andhi r3, r2, #0x1C
sub r2, r2, r3
/* conditionally writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28
stmcsia r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2
strcs r1, [r0], #4
3:
subs r2, r2, #32
mov r3, r1
bmi 2f
1: subs r2, r2, #32
stmia r0!, {r1,r3,r4,r5,r6,r7,r12,lr}
bhs 1b
2: add r2, r2, #32
/* conditionally stores 0 to 31 bytes */
movs r2, r2, lsl #28
stmcsia r0!, {r1,r3,r12,lr}
stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2
strcs r1, [r0], #4
strmih r1, [r0], #2
movs r2, r2, lsl #2
strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, lr}
bx lr
END(memset)

View File

@ -0,0 +1,4 @@
$(call libc-add-cpu-variant-src,MEMCPY,arch-arm/cortex-a9/bionic/memcpy.S)
$(call libc-add-cpu-variant-src,MEMSET,arch-arm/cortex-a9/bionic/memset.S)
include bionic/libc/arch-arm/generic/generic.mk

View File

@ -0,0 +1,380 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* Optimized memcpy() for ARM.
*
* note that memcpy() always returns the destination pointer,
* so we have to preserve R0.
*/
ENTRY(memcpy)
/* The stack must always be 64-bits aligned to be compliant with the
* ARM ABI. Since we have to save R0, we might as well save R4
* which we can use for better pipelining of the reads below
*/
.save {r0, r4, lr}
stmfd sp!, {r0, r4, lr}
/* Making room for r5-r11 which will be spilled later */
.pad #28
sub sp, sp, #28
// preload the destination because we'll align it to a cache line
// with small writes. Also start the source "pump".
PLD (r0, #0)
PLD (r1, #0)
PLD (r1, #32)
/* it simplifies things to take care of len<4 early */
cmp r2, #4
blo copy_last_3_and_return
/* compute the offset to align the source
* offset = (4-(src&3))&3 = -src & 3
*/
rsb r3, r1, #0
ands r3, r3, #3
beq src_aligned
/* align source to 32 bits. We need to insert 2 instructions between
* a ldr[b|h] and str[b|h] because byte and half-word instructions
* stall 2 cycles.
*/
movs r12, r3, lsl #31
sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
ldrmib r3, [r1], #1
ldrcsb r4, [r1], #1
ldrcsb r12,[r1], #1
strmib r3, [r0], #1
strcsb r4, [r0], #1
strcsb r12,[r0], #1
src_aligned:
/* see if src and dst are aligned together (congruent) */
eor r12, r0, r1
tst r12, #3
bne non_congruent
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
/* align the destination to a cache-line */
rsb r3, r0, #0
ands r3, r3, #0x1C
beq congruent_aligned32
cmp r3, r2
andhi r3, r2, #0x1C
/* conditionally copies 0 to 7 words (length in r3) */
movs r12, r3, lsl #28
ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r8, r9}
tst r3, #0x4
ldrne r10,[r1], #4 /* 4 bytes */
strne r10,[r0], #4
sub r2, r2, r3
congruent_aligned32:
/*
* here source is aligned to 32 bytes.
*/
cached_aligned32:
subs r2, r2, #32
blo less_than_32_left
/*
* We preload a cache-line up to 64 bytes ahead. On the 926, this will
* stall only until the requested world is fetched, but the linefill
* continues in the the background.
* While the linefill is going, we write our previous cache-line
* into the write-buffer (which should have some free space).
* When the linefill is done, the writebuffer will
* start dumping its content into memory
*
* While all this is going, we then load a full cache line into
* 8 registers, this cache line should be in the cache by now
* (or partly in the cache).
*
* This code should work well regardless of the source/dest alignment.
*
*/
// Align the preload register to a cache-line because the cpu does
// "critical word first" (the first word requested is loaded first).
bic r12, r1, #0x1F
add r12, r12, #64
1: ldmia r1!, { r4-r11 }
PLD (r12, #64)
subs r2, r2, #32
// NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
// for ARM9 preload will not be safely guarded by the preceding subs.
// When it is safely guarded the only possibility to have SIGSEGV here
// is because the caller overstates the length.
ldrhi r3, [r12], #32 /* cheap ARM9 preload */
stmia r0!, { r4-r11 }
bhs 1b
add r2, r2, #32
less_than_32_left:
/*
* less than 32 bytes left at this point (length in r2)
*/
/* skip all this if there is nothing to do, which should
* be a common case (if not executed the code below takes
* about 16 cycles)
*/
tst r2, #0x1F
beq 1f
/* conditionnaly copies 0 to 31 bytes */
movs r12, r2, lsl #28
ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
ldmmiia r1!, {r8, r9} /* 8 bytes */
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r8, r9}
movs r12, r2, lsl #30
ldrcs r3, [r1], #4 /* 4 bytes */
ldrmih r4, [r1], #2 /* 2 bytes */
strcs r3, [r0], #4
strmih r4, [r0], #2
tst r2, #0x1
ldrneb r3, [r1] /* last byte */
strneb r3, [r0]
/* we're done! restore everything and return */
1: ldmfd sp!, {r5-r11}
ldmfd sp!, {r0, r4, lr}
bx lr
/********************************************************************/
non_congruent:
/*
* here source is aligned to 4 bytes
* but destination is not.
*
* in the code below r2 is the number of bytes read
* (the number of bytes written is always smaller, because we have
* partial words in the shift queue)
*/
cmp r2, #4
blo copy_last_3_and_return
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
/* compute shifts needed to align src to dest */
rsb r5, r0, #0
and r5, r5, #3 /* r5 = # bytes in partial words */
mov r12, r5, lsl #3 /* r12 = right */
rsb lr, r12, #32 /* lr = left */
/* read the first word */
ldr r3, [r1], #4
sub r2, r2, #4
/* write a partial word (0 to 3 bytes), such that destination
* becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
*/
movs r5, r5, lsl #31
strmib r3, [r0], #1
movmi r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
cmp r2, #4
blo partial_word_tail
/* Align destination to 32 bytes (cache line boundary) */
1: tst r0, #0x1c
beq 2f
ldr r5, [r1], #4
sub r2, r2, #4
orr r4, r3, r5, lsl lr
mov r3, r5, lsr r12
str r4, [r0], #4
cmp r2, #4
bhs 1b
blo partial_word_tail
/* copy 32 bytes at a time */
2: subs r2, r2, #32
blo less_than_thirtytwo
/* Use immediate mode for the shifts, because there is an extra cycle
* for register shifts, which could account for up to 50% of
* performance hit.
*/
cmp r12, #24
beq loop24
cmp r12, #8
beq loop8
loop16:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
mov r6, r6, lsr #16
orr r6, r6, r7, lsl #16
mov r7, r7, lsr #16
orr r7, r7, r8, lsl #16
mov r8, r8, lsr #16
orr r8, r8, r9, lsl #16
mov r9, r9, lsr #16
orr r9, r9, r10, lsl #16
mov r10, r10, lsr #16
orr r10, r10, r11, lsl #16
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #16
bhs 1b
b less_than_thirtytwo
loop8:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r6, lsl #24
mov r6, r6, lsr #8
orr r6, r6, r7, lsl #24
mov r7, r7, lsr #8
orr r7, r7, r8, lsl #24
mov r8, r8, lsr #8
orr r8, r8, r9, lsl #24
mov r9, r9, lsr #8
orr r9, r9, r10, lsl #24
mov r10, r10, lsr #8
orr r10, r10, r11, lsl #24
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #8
bhs 1b
b less_than_thirtytwo
loop24:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
PLD (r1, #64)
subs r2, r2, #32
ldrhs r12, [r1], #4
orr r3, r3, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r6, lsl #8
mov r6, r6, lsr #24
orr r6, r6, r7, lsl #8
mov r7, r7, lsr #24
orr r7, r7, r8, lsl #8
mov r8, r8, lsr #24
orr r8, r8, r9, lsl #8
mov r9, r9, lsr #24
orr r9, r9, r10, lsl #8
mov r10, r10, lsr #24
orr r10, r10, r11, lsl #8
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #24
bhs 1b
less_than_thirtytwo:
/* copy the last 0 to 31 bytes of the source */
rsb r12, lr, #32 /* we corrupted r12, recompute it */
add r2, r2, #32
cmp r2, #4
blo partial_word_tail
1: ldr r5, [r1], #4
sub r2, r2, #4
orr r4, r3, r5, lsl lr
mov r3, r5, lsr r12
str r4, [r0], #4
cmp r2, #4
bhs 1b
partial_word_tail:
/* we have a partial word in the input buffer */
movs r5, lr, lsl #(31-3)
strmib r3, [r0], #1
movmi r3, r3, lsr #8
strcsb r3, [r0], #1
movcs r3, r3, lsr #8
strcsb r3, [r0], #1
/* Refill spilled registers from the stack. Don't update sp. */
ldmfd sp, {r5-r11}
copy_last_3_and_return:
movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
ldrmib r2, [r1], #1
ldrcsb r3, [r1], #1
ldrcsb r12,[r1]
strmib r2, [r0], #1
strcsb r3, [r0], #1
strcsb r12,[r0]
/* we're done! restore sp and spilled registers and return */
add sp, sp, #28
ldmfd sp!, {r0, r4, lr}
bx lr
END(memcpy)

View File

@ -0,0 +1,109 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* Optimized memset() for ARM.
*
* memset() returns its first argument.
*/
ENTRY(bzero)
mov r2, r1
mov r1, #0
END(bzero)
ENTRY(memset)
/* compute the offset to align the destination
* offset = (4-(src&3))&3 = -src & 3
*/
.save {r0, r4-r7, lr}
stmfd sp!, {r0, r4-r7, lr}
rsb r3, r0, #0
ands r3, r3, #3
cmp r3, r2
movhi r3, r2
/* splat r1 */
mov r1, r1, lsl #24
orr r1, r1, r1, lsr #8
orr r1, r1, r1, lsr #16
movs r12, r3, lsl #31
strcsb r1, [r0], #1 /* can't use strh (alignment unknown) */
strcsb r1, [r0], #1
strmib r1, [r0], #1
subs r2, r2, r3
ldmlsfd sp!, {r0, r4-r7, lr} /* return */
bxls lr
/* align the destination to a cache-line */
mov r12, r1
mov lr, r1
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
rsb r3, r0, #0
ands r3, r3, #0x1C
beq 3f
cmp r3, r2
andhi r3, r2, #0x1C
sub r2, r2, r3
/* conditionally writes 0 to 7 words (length in r3) */
movs r3, r3, lsl #28
stmcsia r0!, {r1, lr}
stmcsia r0!, {r1, lr}
stmmiia r0!, {r1, lr}
movs r3, r3, lsl #2
strcs r1, [r0], #4
3:
subs r2, r2, #32
mov r3, r1
bmi 2f
1: subs r2, r2, #32
stmia r0!, {r1,r3,r4,r5,r6,r7,r12,lr}
bhs 1b
2: add r2, r2, #32
/* conditionally stores 0 to 31 bytes */
movs r2, r2, lsl #28
stmcsia r0!, {r1,r3,r12,lr}
stmmiia r0!, {r1, lr}
movs r2, r2, lsl #2
strcs r1, [r0], #4
strmih r1, [r0], #2
movs r2, r2, lsl #2
strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, lr}
bx lr
END(memset)

View File

@ -0,0 +1,2 @@
$(call libc-add-cpu-variant-src,MEMCPY,arch-arm/generic/bionic/memcpy.S)
$(call libc-add-cpu-variant-src,MEMSET,arch-arm/generic/bionic/memset.S)

View File

@ -0,0 +1,146 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Assumes neon instructions and a cache line size of 32 bytes. */
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions, that supports neon instructions, and that has a 32 byte
* cache line.
*/
.text
.fpu neon
#define CACHE_LINE_SIZE 32
ENTRY(memcpy)
.save {r0, lr}
/* start preloading as early as possible */
pld [r1, #(CACHE_LINE_SIZE*0)]
stmfd sp!, {r0, lr}
pld [r1, #(CACHE_LINE_SIZE*2)]
/* do we have at least 16-bytes to copy (needed for alignment below) */
cmp r2, #16
blo 5f
/* align destination to cache-line for the write-buffer */
rsb r3, r0, #0
ands r3, r3, #0xF
beq 0f
/* copy up to 15-bytes (count in r3) */
sub r2, r2, r3
movs ip, r3, lsl #31
ldrmib lr, [r1], #1
strmib lr, [r0], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
movs ip, r3, lsl #29
bge 1f
// copies 4 bytes, destination 32-bits aligned
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
1: bcc 2f
// copies 8 bytes, destination 64-bits aligned
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0, :64]!
2:
0: /* preload immediately the next cache line, which we may need */
pld [r1, #(CACHE_LINE_SIZE*0)]
pld [r1, #(CACHE_LINE_SIZE*2)]
/* make sure we have at least 64 bytes to copy */
subs r2, r2, #64
blo 2f
/* Preload all the cache lines we need.
* NOTE: The number of pld below depends on CACHE_LINE_SIZE,
* ideally we would increase the distance in the main loop to
* avoid the goofy code below. In practice this doesn't seem to make
* a big difference.
* NOTE: The value CACHE_LINE_SIZE * 8 was chosen through
* experimentation.
*/
pld [r1, #(CACHE_LINE_SIZE*4)]
pld [r1, #(CACHE_LINE_SIZE*6)]
pld [r1, #(CACHE_LINE_SIZE*8)]
1: /* The main loop copies 64 bytes at a time */
vld1.8 {d0 - d3}, [r1]!
vld1.8 {d4 - d7}, [r1]!
pld [r1, #(CACHE_LINE_SIZE*8)]
subs r2, r2, #64
vst1.8 {d0 - d3}, [r0, :128]!
vst1.8 {d4 - d7}, [r0, :128]!
bhs 1b
2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
add r2, r2, #64
subs r2, r2, #32
blo 4f
3: /* 32 bytes at a time. These cache lines were already preloaded */
vld1.8 {d0 - d3}, [r1]!
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0, :128]!
bhs 3b
4: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 5f
// copies 16 bytes, 128-bits aligned
vld1.8 {d0, d1}, [r1]!
vst1.8 {d0, d1}, [r0, :128]!
5: /* copy up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vld1.8 {d0}, [r1]!
vst1.8 {d0}, [r0]!
1: bge 2f
vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
2: movs ip, r2, lsl #31
ldrmib r3, [r1], #1
ldrcsb ip, [r1], #1
ldrcsb lr, [r1], #1
strmib r3, [r0], #1
strcsb ip, [r0], #1
strcsb lr, [r0], #1
ldmfd sp!, {r0, lr}
bx lr
END(memcpy)

View File

@ -0,0 +1,81 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/cpu-features.h>
#include <machine/asm.h>
/*
* This code assumes it is running on a processor that supports all arm v7
* instructions, that supports neon instructions, and that supports
* unaligned neon instruction accesses to memory.
*/
.fpu neon
ENTRY(bzero)
mov r2, r1
mov r1, #0
END(bzero)
/* memset() returns its first argument. */
ENTRY(memset)
.save {r0}
stmfd sp!, {r0}
vdup.8 q0, r1
/* make sure we have at least 32 bytes to write */
subs r2, r2, #32
blo 2f
vmov q1, q0
1: /* The main loop writes 32 bytes at a time */
subs r2, r2, #32
vst1.8 {d0 - d3}, [r0]!
bhs 1b
2: /* less than 32 left */
add r2, r2, #32
tst r2, #0x10
beq 3f
// writes 16 bytes, 128-bits aligned
vst1.8 {d0, d1}, [r0]!
3: /* write up to 15-bytes (count in r2) */
movs ip, r2, lsl #29
bcc 1f
vst1.8 {d0}, [r0]!
1: bge 2f
vst1.32 {d0[0]}, [r0]!
2: movs ip, r2, lsl #31
strmib r1, [r0], #1
strcsb r1, [r0], #1
strcsb r1, [r0], #1
ldmfd sp!, {r0}
bx lr
END(memset)

View File

@ -0,0 +1,4 @@
$(call libc-add-cpu-variant-src,MEMCPY,arch-arm/krait/bionic/memcpy.S)
$(call libc-add-cpu-variant-src,MEMSET,arch-arm/krait/bionic/memset.S)
include bionic/libc/arch-arm/generic/generic.mk

25
libc/arch-mips/mips.mk Normal file
View File

@ -0,0 +1,25 @@
_LIBC_ARCH_COMMON_SRC_FILES := \
arch-mips/bionic/__get_sp.S \
arch-mips/bionic/__get_tls.c \
arch-mips/bionic/__set_tls.c \
arch-mips/bionic/_exit_with_stack_teardown.S \
arch-mips/bionic/_setjmp.S \
arch-mips/bionic/futex_mips.S \
arch-mips/bionic/bzero.S \
arch-mips/bionic/cacheflush.c \
arch-mips/bionic/clone.S \
arch-mips/bionic/ffs.S \
arch-mips/bionic/memcmp16.S \
arch-mips/bionic/memmove.c \
arch-mips/bionic/pipe.S \
arch-mips/bionic/setjmp.S \
arch-mips/bionic/sigsetjmp.S \
arch-mips/bionic/vfork.S \
arch-mips/string/memset.S \
arch-mips/string/memcpy.S \
arch-mips/string/mips_strlen.c \
_LIBC_ARCH_STATIC_SRC_FILES := \
bionic/dl_iterate_phdr_static.c \
_LIBC_ARCH_DYNAMIC_SRC_FILES :=

27
libc/arch-x86/x86.mk Normal file
View File

@ -0,0 +1,27 @@
_LIBC_ARCH_COMMON_SRC_FILES := \
arch-x86/bionic/clone.S \
arch-x86/bionic/_exit_with_stack_teardown.S \
arch-x86/bionic/futex_x86.S \
arch-x86/bionic/__get_sp.S \
arch-x86/bionic/__get_tls.c \
arch-x86/bionic/_setjmp.S \
arch-x86/bionic/setjmp.S \
arch-x86/bionic/__set_tls.c \
arch-x86/bionic/sigsetjmp.S \
arch-x86/bionic/syscall.S \
arch-x86/bionic/vfork.S \
arch-x86/string/bcopy_wrapper.S \
arch-x86/string/bzero_wrapper.S \
arch-x86/string/ffs.S \
arch-x86/string/memcmp_wrapper.S \
arch-x86/string/memcpy_wrapper.S \
arch-x86/string/memmove_wrapper.S \
arch-x86/string/memset_wrapper.S \
arch-x86/string/strcmp_wrapper.S \
arch-x86/string/strlen_wrapper.S \
arch-x86/string/strncmp_wrapper.S \
_LIBC_ARCH_STATIC_SRC_FILES := \
bionic/dl_iterate_phdr_static.c \
_LIBC_ARCH_DYNAMIC_SRC_FILES :=