Remove the unnecessary generic-neon code.

Bug: 18156619
(cherry picked from commit 2169e17482)

Change-Id: I4a7f5bb9ad4c27b274f3a3c86c1617ca0578b98f
This commit is contained in:
Christopher Ferris 2014-09-23 18:31:45 -07:00
parent 1ea31a2901
commit 43dc3a9aae
3 changed files with 1 additions and 193 deletions

View File

@ -52,7 +52,7 @@ ifeq ($(strip $(TARGET_CPU_VARIANT)),)
endif
cpu_variant_mk := $(LOCAL_PATH)/arch-arm64/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk
ifeq ($(wildcard $(cpu_variant_mk)),)
$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, generic-neon, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
endif
include $(cpu_variant_mk)
libc_common_additional_dependencies += $(cpu_variank_mk)

View File

@ -1,179 +0,0 @@
/* Copyright (c) 2012, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* ARMv8-a, AArch64
* Unaligned accesses
*
*/
#include <private/bionic_asm.h>
#define dstin x0
#define src x1
#define count x2
#define tmp1 x3
#define tmp1w w3
#define tmp2 x4
#define tmp2w w4
#define tmp3 x5
#define tmp3w w5
#define dst x6
#define A_l x7
#define A_h x8
#define B_l x9
#define B_h x10
#define C_l x11
#define C_h x12
#define D_l x13
#define D_h x14
#define QA_l q0
#define QA_h q1
#define QB_l q2
#define QB_h q3
ENTRY(memcpy)
mov dst, dstin
cmp count, #64
b.ge .Lcpy_not_short
cmp count, #15
b.le .Ltail15tiny
/* Deal with small copies quickly by dropping straight into the
* exit block. */
.Ltail63:
/* Copy up to 48 bytes of data. At this point we only need the
* bottom 6 bits of count to be accurate. */
ands tmp1, count, #0x30
b.eq .Ltail15
add dst, dst, tmp1
add src, src, tmp1
cmp tmp1w, #0x20
b.eq 1f
b.lt 2f
ldp A_l, A_h, [src, #-48]
stp A_l, A_h, [dst, #-48]
1:
ldp A_l, A_h, [src, #-32]
stp A_l, A_h, [dst, #-32]
2:
ldp A_l, A_h, [src, #-16]
stp A_l, A_h, [dst, #-16]
.Ltail15:
ands count, count, #15
beq 1f
add src, src, count
ldp A_l, A_h, [src, #-16]
add dst, dst, count
stp A_l, A_h, [dst, #-16]
1:
ret
.Ltail15tiny:
/* Copy up to 15 bytes of data. Does not assume additional data
being copied. */
tbz count, #3, 1f
ldr tmp1, [src], #8
str tmp1, [dst], #8
1:
tbz count, #2, 1f
ldr tmp1w, [src], #4
str tmp1w, [dst], #4
1:
tbz count, #1, 1f
ldrh tmp1w, [src], #2
strh tmp1w, [dst], #2
1:
tbz count, #0, 1f
ldrb tmp1w, [src]
strb tmp1w, [dst]
1:
ret
.Lcpy_not_short:
/* We don't much care about the alignment of DST, but we want SRC
* to be 128-bit (16 byte) aligned so that we don't cross cache line
* boundaries on both loads and stores. */
neg tmp2, src
ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
b.eq 2f
sub count, count, tmp2
/* Copy more data than needed; it's faster than jumping
* around copying sub-Quadword quantities. We know that
* it can't overrun. */
ldp A_l, A_h, [src]
add src, src, tmp2
stp A_l, A_h, [dst]
add dst, dst, tmp2
/* There may be less than 63 bytes to go now. */
cmp count, #63
b.le .Ltail63
2:
subs count, count, #128
b.ge .Lcpy_body_large
/* Less than 128 bytes to copy, so handle 64 here and then jump
* to the tail. */
ldp QA_l, QA_h, [src]
ldp QB_l, QB_h, [src, #32]
stp QA_l, QA_h, [dst]
stp QB_l, QB_h, [dst, #32]
tst count, #0x3f
add src, src, #64
add dst, dst, #64
b.ne .Ltail63
ret
/* Critical loop. Start at a new cache line boundary. Assuming
* 64 bytes per line this ensures the entire loop is in one line. */
.p2align 6
.Lcpy_body_large:
/* There are at least 128 bytes to copy. */
ldp QA_l, QA_h, [src, #0]
sub dst, dst, #32 /* Pre-bias. */
ldp QB_l, QB_h, [src, #32]! /* src += 64 - Pre-bias. */
1:
stp QA_l, QA_h, [dst, #32]
ldp QA_l, QA_h, [src, #32]
stp QB_l, QB_h, [dst, #64]!
ldp QB_l, QB_h, [src, #64]!
subs count, count, #64
b.ge 1b
stp QA_l, QA_h, [dst, #32]
stp QB_l, QB_h, [dst, #64]
add src, src, #32
add dst, dst, #64 + 32
tst count, #0x3f
b.ne .Ltail63
ret
END(memcpy)

View File

@ -1,13 +0,0 @@
libc_bionic_src_files_arm64 += \
arch-arm64/generic/bionic/memchr.S \
arch-arm64/generic/bionic/memcmp.S \
arch-arm64/generic/bionic/memmove.S \
arch-arm64/generic/bionic/memset.S \
arch-arm64/generic/bionic/stpcpy.S \
arch-arm64/generic/bionic/strchr.S \
arch-arm64/generic/bionic/strcmp.S \
arch-arm64/generic/bionic/strcpy.S \
arch-arm64/generic/bionic/strlen.S \
arch-arm64/generic/bionic/strncmp.S \
arch-arm64/generic/bionic/strnlen.S \
arch-arm64/generic-neon/bionic/memcpy.S \