From 0ef7a8fd31a7ee9571c54c17e1f42cba4cfb44ec Mon Sep 17 00:00:00 2001 From: Shu Zhang Date: Mon, 17 Mar 2014 15:15:32 +0800 Subject: [PATCH] add 64-bit bionic implementation for denver arch Add 64-bit bionic implementation for denver. memcpy/memset are denver-specific optimized. Use generic version of other routines. Change-Id: I44a830e07b82b2986001d73d1540b4080aaa839b --- libc/arch-arm64/arm64.mk | 2 +- libc/arch-arm64/denver64/bionic/memcpy.S | 205 +++++++++++++++++ libc/arch-arm64/denver64/bionic/memset.S | 271 +++++++++++++++++++++++ libc/arch-arm64/denver64/denver64.mk | 10 + 4 files changed, 487 insertions(+), 1 deletion(-) create mode 100644 libc/arch-arm64/denver64/bionic/memcpy.S create mode 100644 libc/arch-arm64/denver64/bionic/memset.S create mode 100644 libc/arch-arm64/denver64/denver64.mk diff --git a/libc/arch-arm64/arm64.mk b/libc/arch-arm64/arm64.mk index 2c90cd6f2..8920f5fbb 100644 --- a/libc/arch-arm64/arm64.mk +++ b/libc/arch-arm64/arm64.mk @@ -58,7 +58,7 @@ ifeq ($(strip $(TARGET_CPU_VARIANT)),) endif cpu_variant_mk := $(LOCAL_PATH)/arch-arm64/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk ifeq ($(wildcard $(cpu_variant_mk)),) -$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, generic-neon. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.") +$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, generic-neon, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.") endif include $(cpu_variant_mk) libc_common_additional_dependencies += $(cpu_variank_mk) diff --git a/libc/arch-arm64/denver64/bionic/memcpy.S b/libc/arch-arm64/denver64/bionic/memcpy.S new file mode 100644 index 000000000..700f0d011 --- /dev/null +++ b/libc/arch-arm64/denver64/bionic/memcpy.S @@ -0,0 +1,205 @@ +/* Copyright (c) 2012, Linaro Limited + All rights reserved. + Copyright (c) 2014, NVIDIA Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Linaro nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* Assumptions: + * + * denver, ARMv8-a, AArch64 + * Unaligned accesses + * + */ + +#include + +#define dstin x0 +#define src x1 +#define count x2 +#define tmp1 x3 +#define tmp1w w3 +#define tmp2 x4 +#define tmp2w w4 +#define tmp3 x5 +#define tmp3w w5 +#define dst x6 + +#define A_l x7 +#define A_h x8 +#define B_l x9 +#define B_h x10 +#define C_l x11 +#define C_h x12 +#define D_l x13 +#define D_h x14 + +#define QA_l q0 +#define QA_h q1 +#define QB_l q2 +#define QB_h q3 + +ENTRY(memcpy) + + mov dst, dstin + cmp count, #64 + b.ge .Lcpy_not_short + cmp count, #15 + b.le .Ltail15tiny + + /* Deal with small copies quickly by dropping straight into the + * exit block. */ +.Ltail63: + /* Copy up to 48 bytes of data. At this point we only need the + * bottom 6 bits of count to be accurate. */ + ands tmp1, count, #0x30 + b.eq .Ltail15 + add dst, dst, tmp1 + add src, src, tmp1 + cmp tmp1w, #0x20 + b.eq 1f + b.lt 2f + ldp A_l, A_h, [src, #-48] + stp A_l, A_h, [dst, #-48] +1: + ldp A_l, A_h, [src, #-32] + stp A_l, A_h, [dst, #-32] +2: + ldp A_l, A_h, [src, #-16] + stp A_l, A_h, [dst, #-16] + +.Ltail15: + ands count, count, #15 + beq 1f + add src, src, count + ldp A_l, A_h, [src, #-16] + add dst, dst, count + stp A_l, A_h, [dst, #-16] +1: + ret + +.Ltail15tiny: + /* Copy up to 15 bytes of data. Does not assume additional data + being copied. */ + tbz count, #3, 1f + ldr tmp1, [src], #8 + str tmp1, [dst], #8 +1: + tbz count, #2, 1f + ldr tmp1w, [src], #4 + str tmp1w, [dst], #4 +1: + tbz count, #1, 1f + ldrh tmp1w, [src], #2 + strh tmp1w, [dst], #2 +1: + tbz count, #0, 1f + ldrb tmp1w, [src] + strb tmp1w, [dst] +1: + ret + +.Lcpy_not_short: + /* We don't much care about the alignment of DST, but we want SRC + * to be 128-bit (16 byte) aligned so that we don't cross cache line + * boundaries on both loads and stores. */ + neg tmp2, src + ands tmp2, tmp2, #15 /* Bytes to reach alignment. */ + b.eq 2f + sub count, count, tmp2 + /* Copy more data than needed; it's faster than jumping + * around copying sub-Quadword quantities. We know that + * it can't overrun. */ + ldp A_l, A_h, [src] + add src, src, tmp2 + stp A_l, A_h, [dst] + add dst, dst, tmp2 + /* There may be less than 63 bytes to go now. */ + cmp count, #63 + b.le .Ltail63 +2: + subs count, count, #128 + b.ge .Lcpy_body_large + /* Less than 128 bytes to copy, so handle 64 here and then jump + * to the tail. */ + ldp QA_l, QA_h, [src] + ldp QB_l, QB_h, [src, #32] + stp QA_l, QA_h, [dst] + stp QB_l, QB_h, [dst, #32] + tst count, #0x3f + add src, src, #64 + add dst, dst, #64 + b.ne .Ltail63 + ret + + /* Critical loop. Start at a new cache line boundary. Assuming + * 64 bytes per line this ensures the entire loop is in one line. */ + .p2align 6 +.Lcpy_body_large: + cmp count, 65536 + bhi .Lcpy_body_huge + /* There are at least 128 bytes to copy. */ + ldp QA_l, QA_h, [src, #0] + sub dst, dst, #32 /* Pre-bias. */ + ldp QB_l, QB_h, [src, #32]! /* src += 64 - Pre-bias. */ +1: + stp QA_l, QA_h, [dst, #32] + ldp QA_l, QA_h, [src, #32] + stp QB_l, QB_h, [dst, #64]! + ldp QB_l, QB_h, [src, #64]! + + subs count, count, #64 + b.ge 1b + + stp QA_l, QA_h, [dst, #32] + stp QB_l, QB_h, [dst, #64] + add src, src, #32 + add dst, dst, #64 + 32 + tst count, #0x3f + b.ne .Ltail63 + ret +.Lcpy_body_huge: + /* There are at least 128 bytes to copy. */ + ldp QA_l, QA_h, [src, #0] + sub dst, dst, #32 /* Pre-bias. */ + ldp QB_l, QB_h, [src, #32]! +1: + stnp QA_l, QA_h, [dst, #32] + stnp QB_l, QB_h, [dst, #64] + ldp QA_l, QA_h, [src, #32] + ldp QB_l, QB_h, [src, #64]! + add dst, dst, #64 + + subs count, count, #64 + b.ge 1b + + stnp QA_l, QA_h, [dst, #32] + stnp QB_l, QB_h, [dst, #64] + add src, src, #32 + add dst, dst, #64 + 32 + tst count, #0x3f + b.ne .Ltail63 + ret + +END(memcpy) diff --git a/libc/arch-arm64/denver64/bionic/memset.S b/libc/arch-arm64/denver64/bionic/memset.S new file mode 100644 index 000000000..9127d89f1 --- /dev/null +++ b/libc/arch-arm64/denver64/bionic/memset.S @@ -0,0 +1,271 @@ +/* Copyright (c) 2012, Linaro Limited + All rights reserved. + Copyright (c) 2014, NVIDIA Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Linaro nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* Assumptions: + * + * denver, ARMv8-a, AArch64 + * Unaligned accesses + * + */ + +#include + +/* By default we assume that the DC instruction can be used to zero + data blocks more efficiently. In some circumstances this might be + unsafe, for example in an asymmetric multiprocessor environment with + different DC clear lengths (neither the upper nor lower lengths are + safe to use). The feature can be disabled by defining DONT_USE_DC. + + If code may be run in a virtualized environment, then define + MAYBE_VIRT. This will cause the code to cache the system register + values rather than re-reading them each call. */ + +#define dstin x0 +#define val w1 +#define count x2 +#define tmp1 x3 +#define tmp1w w3 +#define tmp2 x4 +#define tmp2w w4 +#define zva_len_x x5 +#define zva_len w5 +#define zva_bits_x x6 + +#define A_l x7 +#define A_lw w7 +#define dst x8 +#define tmp3w w9 + +#define QA_l q0 + +ENTRY(memset) + + mov dst, dstin /* Preserve return value. */ + ands A_lw, val, #255 +#ifndef DONT_USE_DC +# b.eq .Lzero_mem +#endif + orr A_lw, A_lw, A_lw, lsl #8 + orr A_lw, A_lw, A_lw, lsl #16 + orr A_l, A_l, A_l, lsl #32 +.Ltail_maybe_long: + cmp count, #256 + b.ge .Lnot_short +.Ltail_maybe_tiny: + cmp count, #15 + b.le .Ltail15tiny +.Ltail255: + ands tmp1, count, #0xC0 + b.eq .Ltail63 + dup v0.4s, A_lw + cmp tmp1w, #0x80 + b.eq 1f + b.lt 2f + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 +1: + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 +2: + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 +.Ltail63: + ands tmp1, count, #0x30 + b.eq .Ltail15 + add dst, dst, tmp1 + cmp tmp1w, #0x20 + b.eq 1f + b.lt 2f + stp A_l, A_l, [dst, #-48] +1: + stp A_l, A_l, [dst, #-32] +2: + stp A_l, A_l, [dst, #-16] + +.Ltail15: + and count, count, #15 + add dst, dst, count + stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */ + ret + +.Ltail15tiny: + /* Set up to 15 bytes. Does not assume earlier memory + being set. */ + tbz count, #3, 1f + str A_l, [dst], #8 +1: + tbz count, #2, 1f + str A_lw, [dst], #4 +1: + tbz count, #1, 1f + strh A_lw, [dst], #2 +1: + tbz count, #0, 1f + strb A_lw, [dst] +1: + ret + + /* Critical loop. Start at a new cache line boundary. Assuming + * 64 bytes per line, this ensures the entire loop is in one line. */ + .p2align 6 +.Lnot_short: + dup v0.4s, A_lw + neg tmp2, dst + ands tmp2, tmp2, #15 + b.eq 2f + /* Bring DST to 128-bit (16-byte) alignment. We know that there's + * more than that to set, so we simply store 16 bytes and advance by + * the amount required to reach alignment. */ + sub count, count, tmp2 + stp A_l, A_l, [dst] + add dst, dst, tmp2 + /* There may be less than 63 bytes to go now. */ + cmp count, #255 + b.le .Ltail255 +2: + cmp count, #2097152 + b.gt 3f +1: + sub count, count, #256 +2: + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 + stp QA_l, QA_l, [dst], #32 + subs count, count, #256 + b.ge 2b + tst count, #0xff + b.ne .Ltail255 + ret +3: + sub count, count, #64 +4: + subs count, count, #64 + stnp QA_l, QA_l, [dst] + stnp QA_l, QA_l, [dst, #32] + add dst, dst, #64 + b.ge 4b + tst count, #0x3f + b.ne .Ltail63 + ret + +#ifndef DONT_USE_DC + /* For zeroing memory, check to see if we can use the ZVA feature to + * zero entire 'cache' lines. */ +.Lzero_mem: + mov A_l, #0 + cmp count, #63 + b.le .Ltail_maybe_tiny + neg tmp2, dst + ands tmp2, tmp2, #15 + b.eq 1f + sub count, count, tmp2 + stp A_l, A_l, [dst] + add dst, dst, tmp2 + cmp count, #63 + b.le .Ltail63 +1: + /* For zeroing small amounts of memory, it's not worth setting up + * the line-clear code. */ + cmp count, #128 + b.lt .Lnot_short +#ifdef MAYBE_VIRT + /* For efficiency when virtualized, we cache the ZVA capability. */ + adrp tmp2, .Lcache_clear + ldr zva_len, [tmp2, #:lo12:.Lcache_clear] + tbnz zva_len, #31, .Lnot_short + cbnz zva_len, .Lzero_by_line + mrs tmp1, dczid_el0 + tbz tmp1, #4, 1f + /* ZVA not available. Remember this for next time. */ + mov zva_len, #~0 + str zva_len, [tmp2, #:lo12:.Lcache_clear] + b .Lnot_short +1: + mov tmp3w, #4 + and zva_len, tmp1w, #15 /* Safety: other bits reserved. */ + lsl zva_len, tmp3w, zva_len + str zva_len, [tmp2, #:lo12:.Lcache_clear] +#else + mrs tmp1, dczid_el0 + tbnz tmp1, #4, .Lnot_short + mov tmp3w, #4 + and zva_len, tmp1w, #15 /* Safety: other bits reserved. */ + lsl zva_len, tmp3w, zva_len +#endif + +.Lzero_by_line: + /* Compute how far we need to go to become suitably aligned. We're + * already at quad-word alignment. */ + cmp count, zva_len_x + b.lt .Lnot_short /* Not enough to reach alignment. */ + sub zva_bits_x, zva_len_x, #1 + neg tmp2, dst + ands tmp2, tmp2, zva_bits_x + b.eq 1f /* Already aligned. */ + /* Not aligned, check that there's enough to copy after alignment. */ + sub tmp1, count, tmp2 + cmp tmp1, #64 + ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */ + b.lt .Lnot_short + /* We know that there's at least 64 bytes to zero and that it's safe + * to overrun by 64 bytes. */ + mov count, tmp1 +2: + stp A_l, A_l, [dst] + stp A_l, A_l, [dst, #16] + stp A_l, A_l, [dst, #32] + subs tmp2, tmp2, #64 + stp A_l, A_l, [dst, #48] + add dst, dst, #64 + b.ge 2b + /* We've overrun a bit, so adjust dst downwards. */ + add dst, dst, tmp2 +1: + sub count, count, zva_len_x +3: + dc zva, dst + add dst, dst, zva_len_x + subs count, count, zva_len_x + b.ge 3b + ands count, count, zva_bits_x + b.ne .Ltail_maybe_long + ret +END(memset) + +#ifdef MAYBE_VIRT + .bss + .p2align 2 +.Lcache_clear: + .space 4 +#endif +#endif /* DONT_USE_DC */ diff --git a/libc/arch-arm64/denver64/denver64.mk b/libc/arch-arm64/denver64/denver64.mk new file mode 100644 index 000000000..36146bfa8 --- /dev/null +++ b/libc/arch-arm64/denver64/denver64.mk @@ -0,0 +1,10 @@ +libc_bionic_src_files_arm64 += \ + arch-arm64/generic/bionic/memcmp.S \ + arch-arm64/denver64/bionic/memcpy.S \ + arch-arm64/generic/bionic/memmove.S \ + arch-arm64/denver64/bionic/memset.S \ + arch-arm64/generic/bionic/strcmp.S \ + arch-arm64/generic/bionic/strlen.S \ + arch-arm64/generic/bionic/strncmp.S \ + arch-arm64/generic/bionic/strnlen.S \ + arch-arm64/generic/bionic/wmemmove.S