Compare commits

..

2 Commits

Author SHA1 Message Date
Yabin Cui
492a0bf212 Merge "Make sys_resource test more robust." into lollipop-cts-dev 2015-06-02 22:02:29 +00:00
Yabin Cui
e7ece90b50 Make sys_resource test more robust.
Bug: 19482626

"ulimit -c xxx" command may run before bionic-unit-tests.
Make sure sys_resource test fails gently in that case.

Change-Id: Ic3b5ed8b20acba56df8c5ef082c88e5050e761aa
(cherry picked from commit 4853f40f3f)
2015-06-02 14:40:26 -07:00
137 changed files with 3637 additions and 5448 deletions

View File

@@ -60,8 +60,6 @@ libc_common_src_files := \
bionic/siginterrupt.c \
bionic/sigsetmask.c \
bionic/system_properties_compat.c \
stdio/findfp.c \
stdio/fread.c \
stdio/snprintf.c\
stdio/sprintf.c \
@@ -391,12 +389,14 @@ libc_upstream_openbsd_src_files := \
upstream-openbsd/lib/libc/stdio/fgetwc.c \
upstream-openbsd/lib/libc/stdio/fgetws.c \
upstream-openbsd/lib/libc/stdio/fileno.c \
upstream-openbsd/lib/libc/stdio/findfp.c \
upstream-openbsd/lib/libc/stdio/fprintf.c \
upstream-openbsd/lib/libc/stdio/fpurge.c \
upstream-openbsd/lib/libc/stdio/fputc.c \
upstream-openbsd/lib/libc/stdio/fputs.c \
upstream-openbsd/lib/libc/stdio/fputwc.c \
upstream-openbsd/lib/libc/stdio/fputws.c \
upstream-openbsd/lib/libc/stdio/fread.c \
upstream-openbsd/lib/libc/stdio/freopen.c \
upstream-openbsd/lib/libc/stdio/fscanf.c \
upstream-openbsd/lib/libc/stdio/fseek.c \
@@ -503,10 +503,7 @@ ifneq ($(TARGET_USES_LOGD),false)
libc_common_cflags += -DTARGET_USES_LOGD
endif
use_clang := $(USE_CLANG_PLATFORM_BUILD)
ifeq ($(use_clang),)
use_clang := false
endif
use_clang := false
# Try to catch typical 32-bit assumptions that break with 64-bit pointers.
libc_common_cflags += \
@@ -692,7 +689,6 @@ LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_SYSTEM_SHARED_LIBRARIES :=
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_freebsd_src_files))
include $(BUILD_STATIC_LIBRARY)
@@ -737,13 +733,6 @@ include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(libc_upstream_openbsd_src_files)
ifneq (,$(filter $(TARGET_ARCH),x86 x86_64))
# Clang has wrong long double size or LDBL_MANT_DIG, http://b/17163651.
LOCAL_CLANG := false
else
LOCAL_CLANG := $(use_clang)
endif
LOCAL_CFLAGS := \
$(libc_common_cflags) \
-Wno-sign-compare -Wno-uninitialized -Wno-unused-parameter \
@@ -757,11 +746,11 @@ LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_openbsd
LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_SYSTEM_SHARED_LIBRARIES :=
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_openbsd_src_files))
include $(BUILD_STATIC_LIBRARY)
@@ -776,13 +765,6 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES_32 := $(libc_upstream_openbsd_gdtoa_src_files_32)
LOCAL_SRC_FILES_64 := $(libc_upstream_openbsd_gdtoa_src_files_64)
ifneq (,$(filter $(TARGET_ARCH),x86 x86_64))
# Clang has wrong long double size or LDBL_MANT_DIG, http://b/17163651.
LOCAL_CLANG := false
else
LOCAL_CLANG := $(use_clang)
endif
LOCAL_CFLAGS := \
$(libc_common_cflags) \
-Wno-sign-compare -Wno-uninitialized \
@@ -796,6 +778,7 @@ LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_gdtoa
LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_SYSTEM_SHARED_LIBRARIES :=
@@ -813,11 +796,6 @@ LOCAL_SRC_FILES := $(libc_bionic_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \
ifeq ($(TARGET_ARCH),x86_64)
# Clang assembler has problem with ssse3-strcmp-slm.S, http://b/17302991
LOCAL_CLANG_ASFLAGS += -no-integrated-as
endif
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
@@ -1166,7 +1144,6 @@ libstdcxx_common_src_files := \
include $(CLEAR_VARS)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_CFLAGS := $(libc_common_cflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_SRC_FILES := $(libstdcxx_common_src_files)
LOCAL_MODULE:= libstdc++
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
@@ -1179,7 +1156,6 @@ include $(BUILD_SHARED_LIBRARY)
include $(CLEAR_VARS)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_CFLAGS := $(libc_common_cflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_SRC_FILES := $(libstdcxx_common_src_files)
LOCAL_MODULE:= libstdc++
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk

View File

@@ -1,35 +1,22 @@
# 32-bit arm.
# arm specific configs
#
# Various kinds of LP32 cruft.
#
libc_bionic_src_files_arm += \
bionic/mmap.cpp \
libc_common_src_files_arm += \
# These are used by the 32-bit targets, but not the 64-bit ones.
libc_common_src_files_arm := \
bionic/legacy_32_bit_support.cpp \
bionic/ndk_cruft.cpp \
bionic/time64.c \
libc_netbsd_src_files_arm += \
upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
libc_openbsd_src_files_arm += \
upstream-openbsd/lib/libc/stdio/putw.c \
#
# Default implementations of functions that are commonly optimized.
#
# These are shared by all the 32-bit targets, but not the 64-bit ones.
libc_bionic_src_files_arm := \
bionic/mmap.cpp
libc_bionic_src_files_arm += \
libc_common_src_files_arm += \
bionic/memchr.c \
bionic/memrchr.c \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
libc_freebsd_src_files_arm += \
upstream-freebsd/lib/libc/string/wcscat.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
@@ -38,9 +25,8 @@ libc_freebsd_src_files_arm += \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_arm += \
upstream-openbsd/lib/libc/string/bcopy.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/strlcpy.c \
@@ -48,10 +34,20 @@ libc_openbsd_src_files_arm += \
upstream-openbsd/lib/libc/string/strncmp.c \
upstream-openbsd/lib/libc/string/strncpy.c \
# The C++ fortify function implementations for which there is an
# arm assembler version.
#
# Inherently architecture-specific code.
#
# Fortify implementations of libc functions.
# libc_common_src_files_arm +=
# bionic/__memcpy_chk.cpp \
# bionic/__memset_chk.cpp \
# bionic/__strcpy_chk.cpp \
# bionic/__strcat_chk.cpp \
libc_common_cflags_arm := -DSOFTFLOAT
##########################################
### CPU specific source files
libc_bionic_src_files_arm += \
arch-arm/bionic/abort_arm.S \
arch-arm/bionic/atomics_arm.c \
@@ -59,7 +55,6 @@ libc_bionic_src_files_arm += \
arch-arm/bionic/_exit_with_stack_teardown.S \
arch-arm/bionic/libgcc_compat.c \
arch-arm/bionic/memcmp.S \
arch-arm/bionic/__restore.S \
arch-arm/bionic/_setjmp.S \
arch-arm/bionic/setjmp.S \
arch-arm/bionic/sigsetjmp.S \
@@ -68,6 +63,9 @@ libc_bionic_src_files_arm += \
libc_arch_static_src_files_arm := arch-arm/bionic/exidx_static.c
libc_arch_dynamic_src_files_arm := arch-arm/bionic/exidx_dynamic.c
libc_netbsd_src_files_arm := \
upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
## CPU variant specific source files
ifeq ($(strip $(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)),)
$(warning TARGET_$(my_2nd_arch_prefix)ARCH is arm, but TARGET_$(my_2nd_arch_prefix)CPU_VARIANT is not defined)

View File

@@ -1,61 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <private/bionic_asm.h>
// gdb is smart enough to unwind through signal frames with just the regular
// CFI information but libgcc and libunwind both need extra help. We do this
// by using .fnstart/.fnend and inserting a nop before both __restore and
// __restore_rt (but covered by the .fnstart/.fnend) so that although they're
// not inside the functions from objdump's point of view, an unwinder that
// blindly looks at the previous instruction (but is then smart enough to check
// the DWARF information to find out where it landed) gets the right answer.
// We need to place .fnstart ourselves (but we may as well keep the free .fnend).
#undef __bionic_asm_custom_entry
#define __bionic_asm_custom_entry(f)
.fnstart
.save {r0-r15}
.pad #32
nop
ENTRY_PRIVATE(__restore)
// This function must have exactly this instruction sequence.
mov r7, #__NR_sigreturn
swi #0
END(__restore)
.fnstart
.save {r0-r15}
.pad #160
nop
ENTRY_PRIVATE(__restore_rt)
// This function must have exactly this instruction sequence.
mov r7, #__NR_rt_sigreturn
swi #0
END(__restore_rt)

View File

@@ -40,10 +40,12 @@
ENTRY(__strcat_chk)
pld [r0, #0]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
push {r4, r5}
.save {r4, r5}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
@@ -193,6 +195,9 @@ END(__strcat_chk)
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcat_chk_failed)
.save {r0, lr}
.save {r4, r5}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -39,6 +39,7 @@
ENTRY(__strcpy_chk)
pld [r0, #0]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -160,6 +161,7 @@ END(__strcpy_chk)
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcpy_chk_failed)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -72,6 +72,7 @@ END(__memcpy_chk)
ENTRY(memcpy)
pld [r1, #64]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -84,6 +85,7 @@ END(memcpy)
ENTRY_PRIVATE(__memcpy_chk_fail)
// Preserve lr for backtrace.
push {lr}
.save {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0

View File

@@ -54,6 +54,7 @@
*/
ENTRY_PRIVATE(MEMCPY_BASE)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -171,6 +172,7 @@ ENTRY_PRIVATE(MEMCPY_BASE)
END(MEMCPY_BASE)
ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -179,14 +181,17 @@ ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED)
// i.e., not keeping the stack looking like users expect
// (highest numbered register at highest address).
strd r4, r5, [sp, #-8]!
.save {r4, r5}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
strd r6, r7, [sp, #-8]!
.save {r6, r7}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r6, 0
.cfi_rel_offset r7, 0
strd r8, r9, [sp, #-8]!
.save {r8, r9}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r8, 0
.cfi_rel_offset r9, 4

View File

@@ -44,6 +44,7 @@ ENTRY(__memset_chk)
bls .L_done
// Preserve lr for backtrace.
.save {lr}
push {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0
@@ -67,6 +68,7 @@ ENTRY(bzero)
END(bzero)
ENTRY(memset)
.save {r0}
stmfd sp!, {r0}
.cfi_def_cfa_offset 4
.cfi_rel_offset r0, 0

View File

@@ -168,6 +168,7 @@ ENTRY(strcmp)
bne .L_do_align
/* Fast path. */
.save {r4-r7}
init
.L_doubleword_aligned:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -25,6 +25,427 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define STRCPY
#include "string_copy.S"
#include <private/bionic_asm.h>
.syntax unified
.thumb
.thumb_func
.macro m_push
push {r0, r4, r5, lr}
.endm // m_push
.macro m_pop
pop {r0, r4, r5, pc}
.endm // m_pop
.macro m_copy_byte reg, cmd, label
ldrb \reg, [r1], #1
strb \reg, [r0], #1
\cmd \reg, \label
.endm // m_copy_byte
ENTRY(strcpy)
// For short copies, hard-code checking the first 8 bytes since this
// new code doesn't win until after about 8 bytes.
m_push
m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
strcpy_finish:
m_pop
strcpy_continue:
pld [r1, #0]
ands r3, r0, #7
beq strcpy_check_src_align
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq strcpy_align_to_32
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, strcpy_complete
strcpy_align_to_32:
bcc strcpy_align_to_64
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, strcpy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, strcpy_complete
strcpy_align_to_64:
tst r3, #4
beq strcpy_check_src_align
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
str r2, [r0], #4
strcpy_check_src_align:
// At this point dst is aligned to a double word, check if src
// is also aligned to a double word.
ands r3, r1, #7
bne strcpy_unaligned_copy
.p2align 2
strcpy_mainloop:
ldrd r2, r3, [r1], #8
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b strcpy_mainloop
strcpy_complete:
m_pop
strcpy_zero_in_first_register:
lsls lr, ip, #17
bne strcpy_copy1byte
bcs strcpy_copy2bytes
lsls ip, ip, #1
bne strcpy_copy3bytes
strcpy_copy4bytes:
// Copy 4 bytes to the destiniation.
str r2, [r0]
m_pop
strcpy_copy1byte:
strb r2, [r0]
m_pop
strcpy_copy2bytes:
strh r2, [r0]
m_pop
strcpy_copy3bytes:
strh r2, [r0], #2
lsr r2, #16
strb r2, [r0]
m_pop
strcpy_zero_in_second_register:
lsls lr, ip, #17
bne strcpy_copy5bytes
bcs strcpy_copy6bytes
lsls ip, ip, #1
bne strcpy_copy7bytes
// Copy 8 bytes to the destination.
strd r2, r3, [r0]
m_pop
strcpy_copy5bytes:
str r2, [r0], #4
strb r3, [r0]
m_pop
strcpy_copy6bytes:
str r2, [r0], #4
strh r3, [r0]
m_pop
strcpy_copy7bytes:
str r2, [r0], #4
strh r3, [r0], #2
lsr r3, #16
strb r3, [r0]
m_pop
strcpy_unaligned_copy:
// Dst is aligned to a double word, while src is at an unknown alignment.
// There are 7 different versions of the unaligned copy code
// to prevent overreading the src. The mainloop of every single version
// will store 64 bits per loop. The difference is how much of src can
// be read without potentially crossing a page boundary.
tbb [pc, r3]
strcpy_unaligned_branchtable:
.byte 0
.byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
.p2align 2
// Can read 7 bytes before possibly crossing a page.
strcpy_unalign7:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
ldrb r3, [r1]
cbz r3, strcpy_unalign7_copy5bytes
ldrb r4, [r1, #1]
cbz r4, strcpy_unalign7_copy6bytes
ldrb r5, [r1, #2]
cbz r5, strcpy_unalign7_copy7bytes
ldr r3, [r1], #4
pld [r1, #64]
lsrs ip, r3, #24
strd r2, r3, [r0], #8
beq strcpy_unalign_return
b strcpy_unalign7
strcpy_unalign7_copy5bytes:
str r2, [r0], #4
strb r3, [r0]
strcpy_unalign_return:
m_pop
strcpy_unalign7_copy6bytes:
str r2, [r0], #4
strb r3, [r0], #1
strb r4, [r0], #1
m_pop
strcpy_unalign7_copy7bytes:
str r2, [r0], #4
strb r3, [r0], #1
strb r4, [r0], #1
strb r5, [r0], #1
m_pop
.p2align 2
// Can read 6 bytes before possibly crossing a page.
strcpy_unalign6:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
ldrb r4, [r1]
cbz r4, strcpy_unalign_copy5bytes
ldrb r5, [r1, #1]
cbz r5, strcpy_unalign_copy6bytes
ldr r3, [r1], #4
pld [r1, #64]
tst r3, #0xff0000
beq strcpy_copy7bytes
lsrs ip, r3, #24
strd r2, r3, [r0], #8
beq strcpy_unalign_return
b strcpy_unalign6
.p2align 2
// Can read 5 bytes before possibly crossing a page.
strcpy_unalign5:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
ldrb r4, [r1]
cbz r4, strcpy_unalign_copy5bytes
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b strcpy_unalign5
strcpy_unalign_copy5bytes:
str r2, [r0], #4
strb r4, [r0]
m_pop
strcpy_unalign_copy6bytes:
str r2, [r0], #4
strb r4, [r0], #1
strb r5, [r0]
m_pop
.p2align 2
// Can read 4 bytes before possibly crossing a page.
strcpy_unalign4:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b strcpy_unalign4
.p2align 2
// Can read 3 bytes before possibly crossing a page.
strcpy_unalign3:
ldrb r2, [r1]
cbz r2, strcpy_unalign3_copy1byte
ldrb r3, [r1, #1]
cbz r3, strcpy_unalign3_copy2bytes
ldrb r4, [r1, #2]
cbz r4, strcpy_unalign3_copy3bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
lsrs lr, r2, #24
beq strcpy_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b strcpy_unalign3
strcpy_unalign3_copy1byte:
strb r2, [r0]
m_pop
strcpy_unalign3_copy2bytes:
strb r2, [r0], #1
strb r3, [r0]
m_pop
strcpy_unalign3_copy3bytes:
strb r2, [r0], #1
strb r3, [r0], #1
strb r4, [r0]
m_pop
.p2align 2
// Can read 2 bytes before possibly crossing a page.
strcpy_unalign2:
ldrb r2, [r1]
cbz r2, strcpy_unalign_copy1byte
ldrb r4, [r1, #1]
cbz r4, strcpy_unalign_copy2bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
tst r2, #0xff0000
beq strcpy_copy3bytes
lsrs ip, r2, #24
beq strcpy_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b strcpy_unalign2
.p2align 2
// Can read 1 byte before possibly crossing a page.
strcpy_unalign1:
ldrb r2, [r1]
cbz r2, strcpy_unalign_copy1byte
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
strd r2, r3, [r0], #8
b strcpy_unalign1
strcpy_unalign_copy1byte:
strb r2, [r0]
m_pop
strcpy_unalign_copy2bytes:
strb r2, [r0], #1
strb r4, [r0]
m_pop
END(strcpy)

View File

@@ -1,513 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if !defined(STPCPY) && !defined(STRCPY)
#error "Either STPCPY or STRCPY must be defined."
#endif
#include <private/bionic_asm.h>
.syntax unified
.thumb
.thumb_func
#if defined(STPCPY)
.macro m_push
push {r4, r5, lr}
.cfi_def_cfa_offset 12
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
.endm // m_push
#else
.macro m_push
push {r0, r4, r5, lr}
.cfi_def_cfa_offset 16
.cfi_rel_offset r0, 0
.cfi_rel_offset r4, 4
.cfi_rel_offset r5, 8
.cfi_rel_offset lr, 12
.endm // m_push
#endif
#if defined(STPCPY)
.macro m_pop
pop {r4, r5, pc}
.endm // m_pop
#else
.macro m_pop
pop {r0, r4, r5, pc}
.endm // m_pop
#endif
.macro m_copy_byte reg, cmd, label
ldrb \reg, [r1], #1
strb \reg, [r0], #1
\cmd \reg, \label
.endm // m_copy_byte
#if defined(STPCPY)
ENTRY(stpcpy)
#else
ENTRY(strcpy)
#endif
// For short copies, hard-code checking the first 8 bytes since this
// new code doesn't win until after about 8 bytes.
m_push
m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r5, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r5, cmd=cbnz, label=.Lstringcopy_continue
.Lstringcopy_finish:
#if defined(STPCPY)
sub r0, r0, #1
#endif
m_pop
.Lstringcopy_continue:
pld [r1, #0]
ands r3, r0, #7
beq .Lstringcopy_check_src_align
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .Lstringcopy_align_to_32
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .Lstringcopy_complete
.Lstringcopy_align_to_32:
bcc .Lstringcopy_align_to_64
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .Lstringcopy_complete
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .Lstringcopy_complete
.Lstringcopy_align_to_64:
tst r3, #4
beq .Lstringcopy_check_src_align
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
str r2, [r0], #4
.Lstringcopy_check_src_align:
// At this point dst is aligned to a double word, check if src
// is also aligned to a double word.
ands r3, r1, #7
bne .Lstringcopy_unaligned_copy
.p2align 2
.Lstringcopy_mainloop:
ldrd r2, r3, [r1], #8
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
strd r2, r3, [r0], #8
b .Lstringcopy_mainloop
.Lstringcopy_complete:
#if defined(STPCPY)
sub r0, r0, #1
#endif
m_pop
.Lstringcopy_zero_in_first_register:
lsls lr, ip, #17
bne .Lstringcopy_copy1byte
bcs .Lstringcopy_copy2bytes
lsls ip, ip, #1
bne .Lstringcopy_copy3bytes
.Lstringcopy_copy4bytes:
// Copy 4 bytes to the destiniation.
#if defined(STPCPY)
str r2, [r0], #3
#else
str r2, [r0]
#endif
m_pop
.Lstringcopy_copy1byte:
strb r2, [r0]
m_pop
.Lstringcopy_copy2bytes:
#if defined(STPCPY)
strh r2, [r0], #1
#else
strh r2, [r0]
#endif
m_pop
.Lstringcopy_copy3bytes:
strh r2, [r0], #2
lsr r2, #16
strb r2, [r0]
m_pop
.Lstringcopy_zero_in_second_register:
lsls lr, ip, #17
bne .Lstringcopy_copy5bytes
bcs .Lstringcopy_copy6bytes
lsls ip, ip, #1
bne .Lstringcopy_copy7bytes
// Copy 8 bytes to the destination.
strd r2, r3, [r0]
#if defined(STPCPY)
add r0, r0, #7
#endif
m_pop
.Lstringcopy_copy5bytes:
str r2, [r0], #4
strb r3, [r0]
m_pop
.Lstringcopy_copy6bytes:
str r2, [r0], #4
#if defined(STPCPY)
strh r3, [r0], #1
#else
strh r3, [r0]
#endif
m_pop
.Lstringcopy_copy7bytes:
str r2, [r0], #4
strh r3, [r0], #2
lsr r3, #16
strb r3, [r0]
m_pop
.Lstringcopy_unaligned_copy:
// Dst is aligned to a double word, while src is at an unknown alignment.
// There are 7 different versions of the unaligned copy code
// to prevent overreading the src. The mainloop of every single version
// will store 64 bits per loop. The difference is how much of src can
// be read without potentially crossing a page boundary.
tbb [pc, r3]
.Lstringcopy_unaligned_branchtable:
.byte 0
.byte ((.Lstringcopy_unalign7 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign6 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign5 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign4 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign3 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign2 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign1 - .Lstringcopy_unaligned_branchtable)/2)
.p2align 2
// Can read 7 bytes before possibly crossing a page.
.Lstringcopy_unalign7:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
ldrb r3, [r1]
cbz r3, .Lstringcopy_unalign7_copy5bytes
ldrb r4, [r1, #1]
cbz r4, .Lstringcopy_unalign7_copy6bytes
ldrb r5, [r1, #2]
cbz r5, .Lstringcopy_unalign7_copy7bytes
ldr r3, [r1], #4
pld [r1, #64]
lsrs ip, r3, #24
strd r2, r3, [r0], #8
#if defined(STPCPY)
beq .Lstringcopy_finish
#else
beq .Lstringcopy_unalign_return
#endif
b .Lstringcopy_unalign7
.Lstringcopy_unalign7_copy5bytes:
str r2, [r0], #4
strb r3, [r0]
.Lstringcopy_unalign_return:
m_pop
.Lstringcopy_unalign7_copy6bytes:
str r2, [r0], #4
strb r3, [r0], #1
strb r4, [r0]
m_pop
.Lstringcopy_unalign7_copy7bytes:
str r2, [r0], #4
strb r3, [r0], #1
strb r4, [r0], #1
strb r5, [r0]
m_pop
.p2align 2
// Can read 6 bytes before possibly crossing a page.
.Lstringcopy_unalign6:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
ldrb r4, [r1]
cbz r4, .Lstringcopy_unalign_copy5bytes
ldrb r5, [r1, #1]
cbz r5, .Lstringcopy_unalign_copy6bytes
ldr r3, [r1], #4
pld [r1, #64]
tst r3, #0xff0000
beq .Lstringcopy_copy7bytes
lsrs ip, r3, #24
strd r2, r3, [r0], #8
#if defined(STPCPY)
beq .Lstringcopy_finish
#else
beq .Lstringcopy_unalign_return
#endif
b .Lstringcopy_unalign6
.p2align 2
// Can read 5 bytes before possibly crossing a page.
.Lstringcopy_unalign5:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
ldrb r4, [r1]
cbz r4, .Lstringcopy_unalign_copy5bytes
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
strd r2, r3, [r0], #8
b .Lstringcopy_unalign5
.Lstringcopy_unalign_copy5bytes:
str r2, [r0], #4
strb r4, [r0]
m_pop
.Lstringcopy_unalign_copy6bytes:
str r2, [r0], #4
strb r4, [r0], #1
strb r5, [r0]
m_pop
.p2align 2
// Can read 4 bytes before possibly crossing a page.
.Lstringcopy_unalign4:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
strd r2, r3, [r0], #8
b .Lstringcopy_unalign4
.p2align 2
// Can read 3 bytes before possibly crossing a page.
.Lstringcopy_unalign3:
ldrb r2, [r1]
cbz r2, .Lstringcopy_unalign3_copy1byte
ldrb r3, [r1, #1]
cbz r3, .Lstringcopy_unalign3_copy2bytes
ldrb r4, [r1, #2]
cbz r4, .Lstringcopy_unalign3_copy3bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
lsrs lr, r2, #24
beq .Lstringcopy_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
strd r2, r3, [r0], #8
b .Lstringcopy_unalign3
.Lstringcopy_unalign3_copy1byte:
strb r2, [r0]
m_pop
.Lstringcopy_unalign3_copy2bytes:
strb r2, [r0], #1
strb r3, [r0]
m_pop
.Lstringcopy_unalign3_copy3bytes:
strb r2, [r0], #1
strb r3, [r0], #1
strb r4, [r0]
m_pop
.p2align 2
// Can read 2 bytes before possibly crossing a page.
.Lstringcopy_unalign2:
ldrb r2, [r1]
cbz r2, .Lstringcopy_unalign_copy1byte
ldrb r4, [r1, #1]
cbz r4, .Lstringcopy_unalign_copy2bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
tst r2, #0xff0000
beq .Lstringcopy_copy3bytes
lsrs ip, r2, #24
beq .Lstringcopy_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
strd r2, r3, [r0], #8
b .Lstringcopy_unalign2
.p2align 2
// Can read 1 byte before possibly crossing a page.
.Lstringcopy_unalign1:
ldrb r2, [r1]
cbz r2, .Lstringcopy_unalign_copy1byte
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
strd r2, r3, [r0], #8
b .Lstringcopy_unalign1
.Lstringcopy_unalign_copy1byte:
strb r2, [r0]
m_pop
.Lstringcopy_unalign_copy2bytes:
strb r2, [r0], #1
strb r4, [r0]
m_pop
#if defined(STPCPY)
END(stpcpy)
#else
END(strcpy)
#endif

View File

@@ -1,11 +1,10 @@
libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/memcpy.S \
arch-arm/cortex-a15/bionic/memset.S \
arch-arm/cortex-a15/bionic/stpcpy.S \
arch-arm/cortex-a15/bionic/strcat.S \
arch-arm/cortex-a15/bionic/__strcat_chk.S \
arch-arm/cortex-a15/bionic/strcmp.S \
arch-arm/cortex-a15/bionic/strcpy.S \
arch-arm/cortex-a15/bionic/__strcpy_chk.S \
arch-arm/cortex-a15/bionic/strlen.S \
arch-arm/cortex-a15/bionic/__strcat_chk.S \
arch-arm/cortex-a15/bionic/__strcpy_chk.S \
bionic/memmove.c \

View File

@@ -40,10 +40,12 @@
ENTRY(__strcat_chk)
pld [r0, #0]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
push {r4, r5}
.save {r4, r5}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
@@ -197,6 +199,8 @@ END(__strcat_chk)
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcat_chk_fail)
.save {r0, lr}
.save {r4, r5}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -39,6 +39,7 @@
ENTRY(__strcpy_chk)
pld [r0, #0]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -164,6 +165,7 @@ END(__strcpy_chk)
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcpy_chk_fail)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -50,6 +50,7 @@ END(__memcpy_chk)
ENTRY(memcpy)
pld [r1, #0]
stmfd sp!, {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -63,6 +64,7 @@ END(memcpy)
ENTRY_PRIVATE(__memcpy_chk_fail)
// Preserve lr for backtrace.
push {lr}
.save {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0

View File

@@ -33,6 +33,7 @@
*/
ENTRY_PRIVATE(MEMCPY_BASE)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -138,12 +139,14 @@ ENTRY_PRIVATE(MEMCPY_BASE)
END(MEMCPY_BASE)
ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
/* Simple arm-only copy loop to handle aligned copy operations */
stmfd sp!, {r4-r8}
.save {r4-r8}
.cfi_adjust_cfa_offset 20
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4

View File

@@ -42,6 +42,7 @@ ENTRY(__memset_chk)
// Preserve lr for backtrace.
push {lr}
.save {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0
@@ -71,6 +72,7 @@ ENTRY(memset)
bhi __memset_large_copy
stmfd sp!, {r0}
.save {r0}
.cfi_def_cfa_offset 4
.cfi_rel_offset r0, 0
@@ -112,6 +114,7 @@ ENTRY_PRIVATE(__memset_large_copy)
* offset = (4-(src&3))&3 = -src & 3
*/
stmfd sp!, {r0, r4-r7, lr}
.save {r0, r4-r7, lr}
.cfi_def_cfa_offset 24
.cfi_rel_offset r0, 0
.cfi_rel_offset r4, 4

View File

@@ -168,6 +168,7 @@ ENTRY(strcmp)
bne .L_do_align
/* Fast path. */
.save {r4-r7}
init
.L_doubleword_aligned:

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -25,6 +25,432 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define STRCPY
#include "string_copy.S"
#include <private/bionic_asm.h>
.syntax unified
.thumb
.thumb_func
.macro m_push
push {r0, r4, r5, lr}
.endm // m_push
.macro m_ret inst
\inst {r0, r4, r5, pc}
.endm // m_ret
.macro m_copy_byte reg, cmd, label
ldrb \reg, [r1], #1
strb \reg, [r0], #1
\cmd \reg, \label
.endm // m_copy_byte
ENTRY(strcpy)
// Unroll the first 8 bytes that will be copied.
m_push
m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
strcpy_finish:
m_ret inst=pop
strcpy_continue:
pld [r1, #0]
ands r3, r0, #7
bne strcpy_align_dst
strcpy_check_src_align:
// At this point dst is aligned to a double word, check if src
// is also aligned to a double word.
ands r3, r1, #7
bne strcpy_unaligned_copy
.p2align 2
strcpy_mainloop:
ldmia r1!, {r2, r3}
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
stmia r0!, {r2, r3}
b strcpy_mainloop
strcpy_zero_in_first_register:
lsls lr, ip, #17
itt ne
strbne r2, [r0]
m_ret inst=popne
itt cs
strhcs r2, [r0]
m_ret inst=popcs
lsls ip, ip, #1
itt eq
streq r2, [r0]
m_ret inst=popeq
strh r2, [r0], #2
lsr r3, r2, #16
strb r3, [r0]
m_ret inst=pop
strcpy_zero_in_second_register:
lsls lr, ip, #17
ittt ne
stmiane r0!, {r2}
strbne r3, [r0]
m_ret inst=popne
ittt cs
strcs r2, [r0], #4
strhcs r3, [r0]
m_ret inst=popcs
lsls ip, ip, #1
itt eq
stmiaeq r0, {r2, r3}
m_ret inst=popeq
stmia r0!, {r2}
strh r3, [r0], #2
lsr r4, r3, #16
strb r4, [r0]
m_ret inst=pop
strcpy_align_dst:
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq strcpy_align_to_32
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, strcpy_complete
strcpy_align_to_32:
bcc strcpy_align_to_64
ldrb r4, [r1], #1
strb r4, [r0], #1
cmp r4, #0
it eq
m_ret inst=popeq
ldrb r5, [r1], #1
strb r5, [r0], #1
cmp r5, #0
it eq
m_ret inst=popeq
strcpy_align_to_64:
tst r3, #4
beq strcpy_check_src_align
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
stmia r0!, {r2}
b strcpy_check_src_align
strcpy_complete:
m_ret inst=pop
strcpy_unaligned_copy:
// Dst is aligned to a double word, while src is at an unknown alignment.
// There are 7 different versions of the unaligned copy code
// to prevent overreading the src. The mainloop of every single version
// will store 64 bits per loop. The difference is how much of src can
// be read without potentially crossing a page boundary.
tbb [pc, r3]
strcpy_unaligned_branchtable:
.byte 0
.byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
.byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
.p2align 2
// Can read 7 bytes before possibly crossing a page.
strcpy_unalign7:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
ldrb r3, [r1]
cbz r3, strcpy_unalign7_copy5bytes
ldrb r4, [r1, #1]
cbz r4, strcpy_unalign7_copy6bytes
ldrb r5, [r1, #2]
cbz r5, strcpy_unalign7_copy7bytes
ldr r3, [r1], #4
pld [r1, #64]
lsrs ip, r3, #24
stmia r0!, {r2, r3}
beq strcpy_unalign_return
b strcpy_unalign7
strcpy_unalign7_copy5bytes:
stmia r0!, {r2}
strb r3, [r0]
strcpy_unalign_return:
m_ret inst=pop
strcpy_unalign7_copy6bytes:
stmia r0!, {r2}
strb r3, [r0], #1
strb r4, [r0], #1
m_ret inst=pop
strcpy_unalign7_copy7bytes:
stmia r0!, {r2}
strb r3, [r0], #1
strb r4, [r0], #1
strb r5, [r0], #1
m_ret inst=pop
.p2align 2
// Can read 6 bytes before possibly crossing a page.
strcpy_unalign6:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
ldrb r4, [r1]
cbz r4, strcpy_unalign_copy5bytes
ldrb r5, [r1, #1]
cbz r5, strcpy_unalign_copy6bytes
ldr r3, [r1], #4
pld [r1, #64]
tst r3, #0xff0000
beq strcpy_unalign6_copy7bytes
lsrs ip, r3, #24
stmia r0!, {r2, r3}
beq strcpy_unalign_return
b strcpy_unalign6
strcpy_unalign6_copy7bytes:
stmia r0!, {r2}
strh r3, [r0], #2
lsr r3, #16
strb r3, [r0]
m_ret inst=pop
.p2align 2
// Can read 5 bytes before possibly crossing a page.
strcpy_unalign5:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
ldrb r4, [r1]
cbz r4, strcpy_unalign_copy5bytes
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
stmia r0!, {r2, r3}
b strcpy_unalign5
strcpy_unalign_copy5bytes:
stmia r0!, {r2}
strb r4, [r0]
m_ret inst=pop
strcpy_unalign_copy6bytes:
stmia r0!, {r2}
strb r4, [r0], #1
strb r5, [r0]
m_ret inst=pop
.p2align 2
// Can read 4 bytes before possibly crossing a page.
strcpy_unalign4:
ldmia r1!, {r2}
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
ldmia r1!, {r3}
pld [r1, #64]
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
stmia r0!, {r2, r3}
b strcpy_unalign4
.p2align 2
// Can read 3 bytes before possibly crossing a page.
strcpy_unalign3:
ldrb r2, [r1]
cbz r2, strcpy_unalign3_copy1byte
ldrb r3, [r1, #1]
cbz r3, strcpy_unalign3_copy2bytes
ldrb r4, [r1, #2]
cbz r4, strcpy_unalign3_copy3bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
lsrs lr, r2, #24
beq strcpy_unalign_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
stmia r0!, {r2, r3}
b strcpy_unalign3
strcpy_unalign3_copy1byte:
strb r2, [r0]
m_ret inst=pop
strcpy_unalign3_copy2bytes:
strb r2, [r0], #1
strb r3, [r0]
m_ret inst=pop
strcpy_unalign3_copy3bytes:
strb r2, [r0], #1
strb r3, [r0], #1
strb r4, [r0]
m_ret inst=pop
.p2align 2
// Can read 2 bytes before possibly crossing a page.
strcpy_unalign2:
ldrb r2, [r1]
cbz r2, strcpy_unalign_copy1byte
ldrb r3, [r1, #1]
cbz r3, strcpy_unalign_copy2bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
tst r2, #0xff0000
beq strcpy_unalign_copy3bytes
lsrs ip, r2, #24
beq strcpy_unalign_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
stmia r0!, {r2, r3}
b strcpy_unalign2
.p2align 2
// Can read 1 byte before possibly crossing a page.
strcpy_unalign1:
ldrb r2, [r1]
cbz r2, strcpy_unalign_copy1byte
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne strcpy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne strcpy_zero_in_second_register
stmia r0!, {r2, r3}
b strcpy_unalign1
strcpy_unalign_copy1byte:
strb r2, [r0]
m_ret inst=pop
strcpy_unalign_copy2bytes:
strb r2, [r0], #1
strb r3, [r0]
m_ret inst=pop
strcpy_unalign_copy3bytes:
strh r2, [r0], #2
lsr r2, #16
strb r2, [r0]
m_ret inst=pop
strcpy_unalign_copy4bytes:
stmia r0, {r2}
m_ret inst=pop
END(strcpy)

View File

@@ -1,535 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2013 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if !defined(STPCPY) && !defined(STRCPY)
#error "Either STPCPY or STRCPY must be defined."
#endif
#include <private/bionic_asm.h>
.syntax unified
.thumb
.thumb_func
#if defined(STPCPY)
.macro m_push
push {r4, r5, lr}
.cfi_def_cfa_offset 12
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
.endm // m_push
#else
.macro m_push
push {r0, r4, r5, lr}
.cfi_def_cfa_offset 16
.cfi_rel_offset r0, 0
.cfi_rel_offset r4, 4
.cfi_rel_offset r5, 8
.cfi_rel_offset lr, 12
.endm // m_push
#endif
#if defined(STPCPY)
.macro m_ret inst
\inst {r4, r5, pc}
.endm // m_ret
#else
.macro m_ret inst
\inst {r0, r4, r5, pc}
.endm // m_ret
#endif
.macro m_copy_byte reg, cmd, label
ldrb \reg, [r1], #1
strb \reg, [r0], #1
\cmd \reg, \label
.endm // m_copy_byte
#if defined(STPCPY)
ENTRY(stpcpy)
#else
ENTRY(strcpy)
#endif
// Unroll the first 8 bytes that will be copied.
m_push
m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r5, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
m_copy_byte reg=r5, cmd=cbnz, label=.Lstringcopy_continue
.Lstringcopy_finish:
#if defined(STPCPY)
sub r0, r0, #1
#endif
m_ret inst=pop
.Lstringcopy_continue:
pld [r1, #0]
ands r3, r0, #7
bne .Lstringcopy_align_dst
.Lstringcopy_check_src_align:
// At this point dst is aligned to a double word, check if src
// is also aligned to a double word.
ands r3, r1, #7
bne .Lstringcopy_unaligned_copy
.p2align 2
.Lstringcopy_mainloop:
ldmia r1!, {r2, r3}
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
stmia r0!, {r2, r3}
b .Lstringcopy_mainloop
.Lstringcopy_zero_in_first_register:
lsls lr, ip, #17
itt ne
strbne r2, [r0]
m_ret inst=popne
itt cs
#if defined(STPCPY)
strhcs r2, [r0], #1
#else
strhcs r2, [r0]
#endif
m_ret inst=popcs
lsls ip, ip, #1
itt eq
#if defined(STPCPY)
streq r2, [r0], #3
#else
streq r2, [r0]
#endif
m_ret inst=popeq
strh r2, [r0], #2
lsr r3, r2, #16
strb r3, [r0]
m_ret inst=pop
.Lstringcopy_zero_in_second_register:
lsls lr, ip, #17
ittt ne
stmiane r0!, {r2}
strbne r3, [r0]
m_ret inst=popne
ittt cs
strcs r2, [r0], #4
#if defined(STPCPY)
strhcs r3, [r0], #1
#else
strhcs r3, [r0]
#endif
m_ret inst=popcs
lsls ip, ip, #1
#if defined(STPCPY)
ittt eq
#else
itt eq
#endif
stmiaeq r0, {r2, r3}
#if defined(STPCPY)
addeq r0, r0, #7
#endif
m_ret inst=popeq
stmia r0!, {r2}
strh r3, [r0], #2
lsr r4, r3, #16
strb r4, [r0]
m_ret inst=pop
.Lstringcopy_align_dst:
// Align to a double word (64 bits).
rsb r3, r3, #8
lsls ip, r3, #31
beq .Lstringcopy_align_to_32
ldrb r2, [r1], #1
strb r2, [r0], #1
cbz r2, .Lstringcopy_complete
.Lstringcopy_align_to_32:
bcc .Lstringcopy_align_to_64
ldrb r4, [r1], #1
strb r4, [r0], #1
cmp r4, #0
#if defined(STPCPY)
itt eq
subeq r0, r0, #1
#else
it eq
#endif
m_ret inst=popeq
ldrb r5, [r1], #1
strb r5, [r0], #1
cmp r5, #0
#if defined(STPCPY)
itt eq
subeq r0, r0, #1
#else
it eq
#endif
m_ret inst=popeq
.Lstringcopy_align_to_64:
tst r3, #4
beq .Lstringcopy_check_src_align
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
stmia r0!, {r2}
b .Lstringcopy_check_src_align
.Lstringcopy_complete:
#if defined(STPCPY)
sub r0, r0, #1
#endif
m_ret inst=pop
.Lstringcopy_unaligned_copy:
// Dst is aligned to a double word, while src is at an unknown alignment.
// There are 7 different versions of the unaligned copy code
// to prevent overreading the src. The mainloop of every single version
// will store 64 bits per loop. The difference is how much of src can
// be read without potentially crossing a page boundary.
tbb [pc, r3]
.Lstringcopy_unaligned_branchtable:
.byte 0
.byte ((.Lstringcopy_unalign7 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign6 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign5 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign4 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign3 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign2 - .Lstringcopy_unaligned_branchtable)/2)
.byte ((.Lstringcopy_unalign1 - .Lstringcopy_unaligned_branchtable)/2)
.p2align 2
// Can read 7 bytes before possibly crossing a page.
.Lstringcopy_unalign7:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
ldrb r3, [r1]
cbz r3, .Lstringcopy_unalign7_copy5bytes
ldrb r4, [r1, #1]
cbz r4, .Lstringcopy_unalign7_copy6bytes
ldrb r5, [r1, #2]
cbz r5, .Lstringcopy_unalign7_copy7bytes
ldr r3, [r1], #4
pld [r1, #64]
lsrs ip, r3, #24
stmia r0!, {r2, r3}
#if defined(STPCPY)
beq .Lstringcopy_finish
#else
beq .Lstringcopy_unalign_return
#endif
b .Lstringcopy_unalign7
.Lstringcopy_unalign7_copy5bytes:
stmia r0!, {r2}
strb r3, [r0]
.Lstringcopy_unalign_return:
m_ret inst=pop
.Lstringcopy_unalign7_copy6bytes:
stmia r0!, {r2}
strb r3, [r0], #1
strb r4, [r0]
m_ret inst=pop
.Lstringcopy_unalign7_copy7bytes:
stmia r0!, {r2}
strb r3, [r0], #1
strb r4, [r0], #1
strb r5, [r0]
m_ret inst=pop
.p2align 2
// Can read 6 bytes before possibly crossing a page.
.Lstringcopy_unalign6:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
ldrb r4, [r1]
cbz r4, .Lstringcopy_unalign_copy5bytes
ldrb r5, [r1, #1]
cbz r5, .Lstringcopy_unalign_copy6bytes
ldr r3, [r1], #4
pld [r1, #64]
tst r3, #0xff0000
beq .Lstringcopy_unalign6_copy7bytes
lsrs ip, r3, #24
stmia r0!, {r2, r3}
#if defined(STPCPY)
beq .Lstringcopy_finish
#else
beq .Lstringcopy_unalign_return
#endif
b .Lstringcopy_unalign6
.Lstringcopy_unalign6_copy7bytes:
stmia r0!, {r2}
strh r3, [r0], #2
lsr r3, #16
strb r3, [r0]
m_ret inst=pop
.p2align 2
// Can read 5 bytes before possibly crossing a page.
.Lstringcopy_unalign5:
ldr r2, [r1], #4
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
ldrb r4, [r1]
cbz r4, .Lstringcopy_unalign_copy5bytes
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
stmia r0!, {r2, r3}
b .Lstringcopy_unalign5
.Lstringcopy_unalign_copy5bytes:
stmia r0!, {r2}
strb r4, [r0]
m_ret inst=pop
.Lstringcopy_unalign_copy6bytes:
stmia r0!, {r2}
strb r4, [r0], #1
strb r5, [r0]
m_ret inst=pop
.p2align 2
// Can read 4 bytes before possibly crossing a page.
.Lstringcopy_unalign4:
ldmia r1!, {r2}
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
ldmia r1!, {r3}
pld [r1, #64]
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
stmia r0!, {r2, r3}
b .Lstringcopy_unalign4
.p2align 2
// Can read 3 bytes before possibly crossing a page.
.Lstringcopy_unalign3:
ldrb r2, [r1]
cbz r2, .Lstringcopy_unalign3_copy1byte
ldrb r3, [r1, #1]
cbz r3, .Lstringcopy_unalign3_copy2bytes
ldrb r4, [r1, #2]
cbz r4, .Lstringcopy_unalign3_copy3bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
lsrs lr, r2, #24
beq .Lstringcopy_unalign_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
stmia r0!, {r2, r3}
b .Lstringcopy_unalign3
.Lstringcopy_unalign3_copy1byte:
strb r2, [r0]
m_ret inst=pop
.Lstringcopy_unalign3_copy2bytes:
strb r2, [r0], #1
strb r3, [r0]
m_ret inst=pop
.Lstringcopy_unalign3_copy3bytes:
strb r2, [r0], #1
strb r3, [r0], #1
strb r4, [r0]
m_ret inst=pop
.p2align 2
// Can read 2 bytes before possibly crossing a page.
.Lstringcopy_unalign2:
ldrb r2, [r1]
cbz r2, .Lstringcopy_unalign_copy1byte
ldrb r3, [r1, #1]
cbz r3, .Lstringcopy_unalign_copy2bytes
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
tst r2, #0xff0000
beq .Lstringcopy_unalign_copy3bytes
lsrs ip, r2, #24
beq .Lstringcopy_unalign_copy4bytes
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
stmia r0!, {r2, r3}
b .Lstringcopy_unalign2
.p2align 2
// Can read 1 byte before possibly crossing a page.
.Lstringcopy_unalign1:
ldrb r2, [r1]
cbz r2, .Lstringcopy_unalign_copy1byte
ldr r2, [r1], #4
ldr r3, [r1], #4
pld [r1, #64]
sub ip, r2, #0x01010101
bic ip, ip, r2
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_first_register
sub ip, r3, #0x01010101
bic ip, ip, r3
ands ip, ip, #0x80808080
bne .Lstringcopy_zero_in_second_register
stmia r0!, {r2, r3}
b .Lstringcopy_unalign1
.Lstringcopy_unalign_copy1byte:
strb r2, [r0]
m_ret inst=pop
.Lstringcopy_unalign_copy2bytes:
strb r2, [r0], #1
strb r3, [r0]
m_ret inst=pop
.Lstringcopy_unalign_copy3bytes:
strh r2, [r0], #2
lsr r2, #16
strb r2, [r0]
m_ret inst=pop
.Lstringcopy_unalign_copy4bytes:
stmia r0, {r2}
#if defined(STPCPY)
add r0, r0, #3
#endif
m_ret inst=pop
#if defined(STPCPY)
END(stpcpy)
#else
END(strcpy)
#endif

View File

@@ -1,11 +1,10 @@
libc_bionic_src_files_arm += \
arch-arm/cortex-a9/bionic/memcpy.S \
arch-arm/cortex-a9/bionic/memset.S \
arch-arm/cortex-a9/bionic/stpcpy.S \
arch-arm/cortex-a9/bionic/strcat.S \
arch-arm/cortex-a9/bionic/__strcat_chk.S \
arch-arm/cortex-a9/bionic/strcmp.S \
arch-arm/cortex-a9/bionic/strcpy.S \
arch-arm/cortex-a9/bionic/__strcpy_chk.S \
arch-arm/cortex-a9/bionic/strlen.S \
arch-arm/cortex-a9/bionic/__strcat_chk.S \
arch-arm/cortex-a9/bionic/__strcpy_chk.S \
bionic/memmove.c \

View File

@@ -40,10 +40,12 @@
ENTRY(__strcat_chk)
pld [r0, #0]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
push {r4, r5}
.save {r4, r5}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
@@ -193,6 +195,9 @@ END(__strcat_chk)
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcat_chk_failed)
.save {r0, lr}
.save {r4, r5}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -39,6 +39,7 @@
ENTRY(__strcpy_chk)
pld [r0, #0]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -160,6 +161,7 @@ END(__strcpy_chk)
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcpy_chk_failed)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -72,6 +72,7 @@ END(__memcpy_chk)
ENTRY(memcpy)
pld [r1, #64]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -84,6 +85,7 @@ END(memcpy)
ENTRY_PRIVATE(__memcpy_chk_fail)
// Preserve lr for backtrace.
push {lr}
.save {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0

View File

@@ -7,8 +7,7 @@ libc_bionic_src_files_arm += \
# Use cortex-a15 versions of strcat/strcpy/strlen.
libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/stpcpy.S \
arch-arm/cortex-a15/bionic/strcat.S \
arch-arm/cortex-a15/bionic/strcmp.S \
arch-arm/cortex-a15/bionic/strcpy.S \
arch-arm/cortex-a15/bionic/strlen.S \
arch-arm/cortex-a15/bionic/strcmp.S \

View File

@@ -39,7 +39,7 @@
ENTRY(__memcpy_chk)
cmp r2, r3
bhi __memcpy_chk_fail
bgt fortify_check_failed
// Fall through to memcpy...
END(__memcpy_chk)
@@ -49,14 +49,11 @@ ENTRY(memcpy)
* ARM ABI. Since we have to save R0, we might as well save R4
* which we can use for better pipelining of the reads below
*/
.save {r0, r4, lr}
stmfd sp!, {r0, r4, lr}
.cfi_def_cfa_offset 12
.cfi_rel_offset r0, 0
.cfi_rel_offset r4, 4
.cfi_rel_offset lr, 8
/* Making room for r5-r11 which will be spilled later */
.pad #28
sub sp, sp, #28
.cfi_adjust_cfa_offset 28
// preload the destination because we'll align it to a cache line
// with small writes. Also start the source "pump".
@@ -66,14 +63,14 @@ ENTRY(memcpy)
/* it simplifies things to take care of len<4 early */
cmp r2, #4
blo .Lcopy_last_3_and_return
blo copy_last_3_and_return
/* compute the offset to align the source
* offset = (4-(src&3))&3 = -src & 3
*/
rsb r3, r1, #0
ands r3, r3, #3
beq .Lsrc_aligned
beq src_aligned
/* align source to 32 bits. We need to insert 2 instructions between
* a ldr[b|h] and str[b|h] because byte and half-word instructions
@@ -88,12 +85,12 @@ ENTRY(memcpy)
strcsb r4, [r0], #1
strcsb r12,[r0], #1
.Lsrc_aligned:
src_aligned:
/* see if src and dst are aligned together (congruent) */
eor r12, r0, r1
tst r12, #3
bne .Lnon_congruent
bne non_congruent
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
@@ -103,7 +100,7 @@ ENTRY(memcpy)
/* align the destination to a cache-line */
rsb r3, r0, #0
ands r3, r3, #0x1C
beq .Lcongruent_aligned32
beq congruent_aligned32
cmp r3, r2
andhi r3, r2, #0x1C
@@ -118,14 +115,14 @@ ENTRY(memcpy)
strne r10,[r0], #4
sub r2, r2, r3
.Lcongruent_aligned32:
congruent_aligned32:
/*
* here source is aligned to 32 bytes.
*/
.Lcached_aligned32:
cached_aligned32:
subs r2, r2, #32
blo .Lless_than_32_left
blo less_than_32_left
/*
* We preload a cache-line up to 64 bytes ahead. On the 926, this will
@@ -163,7 +160,10 @@ ENTRY(memcpy)
add r2, r2, #32
.Lless_than_32_left:
less_than_32_left:
/*
* less than 32 bytes left at this point (length in r2)
*/
@@ -197,7 +197,7 @@ ENTRY(memcpy)
/********************************************************************/
.Lnon_congruent:
non_congruent:
/*
* here source is aligned to 4 bytes
* but destination is not.
@@ -207,9 +207,9 @@ ENTRY(memcpy)
* partial words in the shift queue)
*/
cmp r2, #4
blo .Lcopy_last_3_and_return
blo copy_last_3_and_return
/* Use post-increment mode for stm to spill r5-r11 to reserved stack
/* Use post-incriment mode for stm to spill r5-r11 to reserved stack
* frame. Don't update sp.
*/
stmea sp, {r5-r11}
@@ -236,7 +236,7 @@ ENTRY(memcpy)
movcs r3, r3, lsr #8
cmp r2, #4
blo .Lpartial_word_tail
blo partial_word_tail
/* Align destination to 32 bytes (cache line boundary) */
1: tst r0, #0x1c
@@ -248,11 +248,11 @@ ENTRY(memcpy)
str r4, [r0], #4
cmp r2, #4
bhs 1b
blo .Lpartial_word_tail
blo partial_word_tail
/* copy 32 bytes at a time */
2: subs r2, r2, #32
blo .Lless_than_thirtytwo
blo less_than_thirtytwo
/* Use immediate mode for the shifts, because there is an extra cycle
* for register shifts, which could account for up to 50% of
@@ -260,11 +260,11 @@ ENTRY(memcpy)
*/
cmp r12, #24
beq .Lloop24
beq loop24
cmp r12, #8
beq .Lloop8
beq loop8
.Lloop16:
loop16:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
@@ -289,9 +289,9 @@ ENTRY(memcpy)
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #16
bhs 1b
b .Lless_than_thirtytwo
b less_than_thirtytwo
.Lloop8:
loop8:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
@@ -316,9 +316,9 @@ ENTRY(memcpy)
stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
mov r3, r11, lsr #8
bhs 1b
b .Lless_than_thirtytwo
b less_than_thirtytwo
.Lloop24:
loop24:
ldr r12, [r1], #4
1: mov r4, r12
ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
@@ -345,12 +345,12 @@ ENTRY(memcpy)
bhs 1b
.Lless_than_thirtytwo:
less_than_thirtytwo:
/* copy the last 0 to 31 bytes of the source */
rsb r12, lr, #32 /* we corrupted r12, recompute it */
add r2, r2, #32
cmp r2, #4
blo .Lpartial_word_tail
blo partial_word_tail
1: ldr r5, [r1], #4
sub r2, r2, #4
@@ -360,7 +360,7 @@ ENTRY(memcpy)
cmp r2, #4
bhs 1b
.Lpartial_word_tail:
partial_word_tail:
/* we have a partial word in the input buffer */
movs r5, lr, lsl #(31-3)
strmib r3, [r0], #1
@@ -372,7 +372,7 @@ ENTRY(memcpy)
/* Refill spilled registers from the stack. Don't update sp. */
ldmfd sp, {r5-r11}
.Lcopy_last_3_and_return:
copy_last_3_and_return:
movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
ldrmib r2, [r1], #1
ldrcsb r3, [r1], #1
@@ -385,15 +385,9 @@ ENTRY(memcpy)
add sp, sp, #28
ldmfd sp!, {r0, r4, lr}
bx lr
END(memcpy)
// Only reached when the __memcpy_chk check fails.
ENTRY_PRIVATE(__memcpy_chk_fail)
// Preserve lr for backtrace.
push {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0
fortify_check_failed:
ldr r0, error_message
ldr r1, error_code
1:
@@ -403,7 +397,7 @@ error_code:
.word BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
error_message:
.word error_string-(1b+8)
END(__memcpy_chk_fail)
END(memcpy)
.data
error_string:

View File

@@ -7,5 +7,4 @@ libc_bionic_src_files_arm += \
bionic/memmove.c \
bionic/__strcat_chk.cpp \
bionic/__strcpy_chk.cpp \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/strcat.c \

View File

@@ -40,10 +40,12 @@
ENTRY(__strcat_chk)
pld [r0, #0]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
push {r4, r5}
.save {r4, r5}
.cfi_adjust_cfa_offset 8
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
@@ -192,6 +194,8 @@ END(__strcat_chk)
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcat_chk_failed)
.save {r0, lr}
.save {r4, r5}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -39,6 +39,7 @@
ENTRY(__strcpy_chk)
pld [r0, #0]
push {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -160,6 +161,7 @@ END(__strcpy_chk)
#include "memcpy_base.S"
ENTRY_PRIVATE(__strcpy_chk_failed)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -53,6 +53,7 @@ END(__memcpy_chk)
ENTRY(memcpy)
pld [r1, #64]
stmfd sp!, {r0, lr}
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4
@@ -65,6 +66,7 @@ END(memcpy)
ENTRY_PRIVATE(__memcpy_chk_fail)
// Preserve lr for backtrace.
push {lr}
.save {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0

View File

@@ -36,6 +36,7 @@
// Assumes neon instructions and a cache line size of 32 bytes.
ENTRY_PRIVATE(MEMCPY_BASE)
.save {r0, lr}
.cfi_def_cfa_offset 8
.cfi_rel_offset r0, 0
.cfi_rel_offset lr, 4

View File

@@ -43,6 +43,7 @@ ENTRY(__memset_chk)
bls .L_done
// Preserve lr for backtrace.
.save {lr}
push {lr}
.cfi_def_cfa_offset 4
.cfi_rel_offset lr, 0
@@ -68,6 +69,7 @@ END(bzero)
/* memset() returns its first argument. */
ENTRY(memset)
.save {r0}
stmfd sp!, {r0}
.cfi_def_cfa_offset 4
.cfi_rel_offset r0, 0

View File

@@ -168,6 +168,7 @@ ENTRY(strcmp)
bne .L_do_align
/* Fast path. */
.save {r4-r7}
init
.L_doubleword_aligned:

View File

@@ -7,7 +7,6 @@ libc_bionic_src_files_arm += \
# Use cortex-a15 versions of strcat/strcpy/strlen and standard memmove
libc_bionic_src_files_arm += \
arch-arm/cortex-a15/bionic/stpcpy.S \
arch-arm/cortex-a15/bionic/strcat.S \
arch-arm/cortex-a15/bionic/strcpy.S \
arch-arm/cortex-a15/bionic/strlen.S \

View File

@@ -1,17 +1,8 @@
# 64-bit arm.
# arm64 specific configs
#
# Default implementations of functions that are commonly optimized.
#
libc_bionic_src_files_arm64 += \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
libc_common_src_files_arm64 := \
bionic/memrchr.c \
bionic/strrchr.cpp \
libc_freebsd_src_files_arm64 += \
upstream-freebsd/lib/libc/string/wcscat.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
@@ -19,8 +10,6 @@ libc_freebsd_src_files_arm64 += \
upstream-freebsd/lib/libc/string/wcslen.c \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
libc_openbsd_src_files_arm64 += \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
upstream-openbsd/lib/libc/string/strlcat.c \
@@ -28,13 +17,19 @@ libc_openbsd_src_files_arm64 += \
upstream-openbsd/lib/libc/string/strncat.c \
upstream-openbsd/lib/libc/string/strncpy.c \
#
# Inherently architecture-specific code.
#
# Fortify implementations of libc functions.
libc_common_src_files_arm64 += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
libc_bionic_src_files_arm64 += \
##########################################
### CPU specific source files
libc_bionic_src_files_arm64 := \
arch-arm64/bionic/__bionic_clone.S \
arch-arm64/bionic/_exit_with_stack_teardown.S \
arch-arm64/bionic/__rt_sigreturn.S \
arch-arm64/bionic/_setjmp.S \
arch-arm64/bionic/setjmp.S \
arch-arm64/bionic/__set_tls.c \
@@ -58,7 +53,7 @@ ifeq ($(strip $(TARGET_CPU_VARIANT)),)
endif
cpu_variant_mk := $(LOCAL_PATH)/arch-arm64/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk
ifeq ($(wildcard $(cpu_variant_mk)),)
$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, generic-neon, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
endif
include $(cpu_variant_mk)
libc_common_additional_dependencies += $(cpu_variank_mk)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,5 +26,9 @@
* SUCH DAMAGE.
*/
#define STPCPY
#include "string_copy.S"
#include <private/bionic_asm.h>
ENTRY_PRIVATE(__rt_sigreturn)
mov x8, __NR_rt_sigreturn
svc #0
END(__rt_sigreturn)

View File

@@ -1,63 +1,205 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
/* Copyright (c) 2012, Linaro Limited
All rights reserved.
Copyright (c) 2014, NVIDIA Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* denver, ARMv8-a, AArch64
* Unaligned accesses
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
// Prototype: void *memcpy (void *dst, const void *src, size_t count).
#include <private/bionic_asm.h>
#include <private/libc_events.h>
ENTRY(__memcpy_chk)
cmp x2, x3
b.hi __memcpy_chk_fail
#define dstin x0
#define src x1
#define count x2
#define tmp1 x3
#define tmp1w w3
#define tmp2 x4
#define tmp2w w4
#define tmp3 x5
#define tmp3w w5
#define dst x6
// Fall through to memcpy...
END(__memcpy_chk)
#define A_l x7
#define A_h x8
#define B_l x9
#define B_h x10
#define C_l x11
#define C_h x12
#define D_l x13
#define D_h x14
#define QA_l q0
#define QA_h q1
#define QB_l q2
#define QB_h q3
ENTRY(memcpy)
#include "memcpy_base.S"
mov dst, dstin
cmp count, #64
b.ge .Lcpy_not_short
cmp count, #15
b.le .Ltail15tiny
/* Deal with small copies quickly by dropping straight into the
* exit block. */
.Ltail63:
/* Copy up to 48 bytes of data. At this point we only need the
* bottom 6 bits of count to be accurate. */
ands tmp1, count, #0x30
b.eq .Ltail15
add dst, dst, tmp1
add src, src, tmp1
cmp tmp1w, #0x20
b.eq 1f
b.lt 2f
ldp A_l, A_h, [src, #-48]
stp A_l, A_h, [dst, #-48]
1:
ldp A_l, A_h, [src, #-32]
stp A_l, A_h, [dst, #-32]
2:
ldp A_l, A_h, [src, #-16]
stp A_l, A_h, [dst, #-16]
.Ltail15:
ands count, count, #15
beq 1f
add src, src, count
ldp A_l, A_h, [src, #-16]
add dst, dst, count
stp A_l, A_h, [dst, #-16]
1:
ret
.Ltail15tiny:
/* Copy up to 15 bytes of data. Does not assume additional data
being copied. */
tbz count, #3, 1f
ldr tmp1, [src], #8
str tmp1, [dst], #8
1:
tbz count, #2, 1f
ldr tmp1w, [src], #4
str tmp1w, [dst], #4
1:
tbz count, #1, 1f
ldrh tmp1w, [src], #2
strh tmp1w, [dst], #2
1:
tbz count, #0, 1f
ldrb tmp1w, [src]
strb tmp1w, [dst]
1:
ret
.Lcpy_not_short:
/* We don't much care about the alignment of DST, but we want SRC
* to be 128-bit (16 byte) aligned so that we don't cross cache line
* boundaries on both loads and stores. */
neg tmp2, src
ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
b.eq 2f
sub count, count, tmp2
/* Copy more data than needed; it's faster than jumping
* around copying sub-Quadword quantities. We know that
* it can't overrun. */
ldp A_l, A_h, [src]
add src, src, tmp2
stp A_l, A_h, [dst]
add dst, dst, tmp2
/* There may be less than 63 bytes to go now. */
cmp count, #63
b.le .Ltail63
2:
subs count, count, #128
b.ge .Lcpy_body_large
/* Less than 128 bytes to copy, so handle 64 here and then jump
* to the tail. */
ldp QA_l, QA_h, [src]
ldp QB_l, QB_h, [src, #32]
stp QA_l, QA_h, [dst]
stp QB_l, QB_h, [dst, #32]
tst count, #0x3f
add src, src, #64
add dst, dst, #64
b.ne .Ltail63
ret
/* Critical loop. Start at a new cache line boundary. Assuming
* 64 bytes per line this ensures the entire loop is in one line. */
.p2align 6
.Lcpy_body_large:
cmp count, 65536
bhi .Lcpy_body_huge
/* There are at least 128 bytes to copy. */
ldp QA_l, QA_h, [src, #0]
sub dst, dst, #32 /* Pre-bias. */
ldp QB_l, QB_h, [src, #32]! /* src += 64 - Pre-bias. */
1:
stp QA_l, QA_h, [dst, #32]
ldp QA_l, QA_h, [src, #32]
stp QB_l, QB_h, [dst, #64]!
ldp QB_l, QB_h, [src, #64]!
subs count, count, #64
b.ge 1b
stp QA_l, QA_h, [dst, #32]
stp QB_l, QB_h, [dst, #64]
add src, src, #32
add dst, dst, #64 + 32
tst count, #0x3f
b.ne .Ltail63
ret
.Lcpy_body_huge:
/* There are at least 128 bytes to copy. */
ldp QA_l, QA_h, [src, #0]
sub dst, dst, #32 /* Pre-bias. */
ldp QB_l, QB_h, [src, #32]!
1:
stnp QA_l, QA_h, [dst, #32]
stnp QB_l, QB_h, [dst, #64]
ldp QA_l, QA_h, [src, #32]
ldp QB_l, QB_h, [src, #64]!
add dst, dst, #64
subs count, count, #64
b.ge 1b
stnp QA_l, QA_h, [dst, #32]
stnp QB_l, QB_h, [dst, #64]
add src, src, #32
add dst, dst, #64 + 32
tst count, #0x3f
b.ne .Ltail63
ret
END(memcpy)
ENTRY_PRIVATE(__memcpy_chk_fail)
// Preserve for accurate backtrace.
stp x29, x30, [sp, -16]!
.cfi_def_cfa_offset 16
.cfi_rel_offset x29, 0
.cfi_rel_offset x30, 8
adrp x0, error_string
add x0, x0, :lo12:error_string
ldr x1, error_code
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
END(__memcpy_chk_fail)
.data
.align 2
error_string:
.string "memcpy: prevented write past end of buffer"

View File

@@ -1,199 +0,0 @@
/* Copyright (c) 2012, Linaro Limited
All rights reserved.
Copyright (c) 2014, NVIDIA Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* denver, ARMv8-a, AArch64
* Unaligned accesses
*
*/
#define dstin x0
#define src x1
#define count x2
#define tmp1 x3
#define tmp1w w3
#define tmp2 x4
#define tmp2w w4
#define tmp3 x5
#define tmp3w w5
#define dst x6
#define A_l x7
#define A_h x8
#define B_l x9
#define B_h x10
#define C_l x11
#define C_h x12
#define D_l x13
#define D_h x14
#define QA_l q0
#define QA_h q1
#define QB_l q2
#define QB_h q3
mov dst, dstin
cmp count, #64
b.ge .Lcpy_not_short
cmp count, #15
b.le .Ltail15tiny
/* Deal with small copies quickly by dropping straight into the
* exit block. */
.Ltail63:
/* Copy up to 48 bytes of data. At this point we only need the
* bottom 6 bits of count to be accurate. */
ands tmp1, count, #0x30
b.eq .Ltail15
add dst, dst, tmp1
add src, src, tmp1
cmp tmp1w, #0x20
b.eq 1f
b.lt 2f
ldp A_l, A_h, [src, #-48]
stp A_l, A_h, [dst, #-48]
1:
ldp A_l, A_h, [src, #-32]
stp A_l, A_h, [dst, #-32]
2:
ldp A_l, A_h, [src, #-16]
stp A_l, A_h, [dst, #-16]
.Ltail15:
ands count, count, #15
beq 1f
add src, src, count
ldp A_l, A_h, [src, #-16]
add dst, dst, count
stp A_l, A_h, [dst, #-16]
1:
ret
.Ltail15tiny:
/* Copy up to 15 bytes of data. Does not assume additional data
being copied. */
tbz count, #3, 1f
ldr tmp1, [src], #8
str tmp1, [dst], #8
1:
tbz count, #2, 1f
ldr tmp1w, [src], #4
str tmp1w, [dst], #4
1:
tbz count, #1, 1f
ldrh tmp1w, [src], #2
strh tmp1w, [dst], #2
1:
tbz count, #0, 1f
ldrb tmp1w, [src]
strb tmp1w, [dst]
1:
ret
.Lcpy_not_short:
/* We don't much care about the alignment of DST, but we want SRC
* to be 128-bit (16 byte) aligned so that we don't cross cache line
* boundaries on both loads and stores. */
neg tmp2, src
ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
b.eq 2f
sub count, count, tmp2
/* Copy more data than needed; it's faster than jumping
* around copying sub-Quadword quantities. We know that
* it can't overrun. */
ldp A_l, A_h, [src]
add src, src, tmp2
stp A_l, A_h, [dst]
add dst, dst, tmp2
/* There may be less than 63 bytes to go now. */
cmp count, #63
b.le .Ltail63
2:
subs count, count, #128
b.ge .Lcpy_body_large
/* Less than 128 bytes to copy, so handle 64 here and then jump
* to the tail. */
ldp QA_l, QA_h, [src]
ldp QB_l, QB_h, [src, #32]
stp QA_l, QA_h, [dst]
stp QB_l, QB_h, [dst, #32]
tst count, #0x3f
add src, src, #64
add dst, dst, #64
b.ne .Ltail63
ret
/* Critical loop. Start at a new cache line boundary. Assuming
* 64 bytes per line this ensures the entire loop is in one line. */
.p2align 6
.Lcpy_body_large:
cmp count, 65536
bhi .Lcpy_body_huge
/* There are at least 128 bytes to copy. */
ldp QA_l, QA_h, [src, #0]
sub dst, dst, #32 /* Pre-bias. */
ldp QB_l, QB_h, [src, #32]! /* src += 64 - Pre-bias. */
1:
stp QA_l, QA_h, [dst, #32]
ldp QA_l, QA_h, [src, #32]
stp QB_l, QB_h, [dst, #64]!
ldp QB_l, QB_h, [src, #64]!
subs count, count, #64
b.ge 1b
stp QA_l, QA_h, [dst, #32]
stp QB_l, QB_h, [dst, #64]
add src, src, #32
add dst, dst, #64 + 32
tst count, #0x3f
b.ne .Ltail63
ret
.Lcpy_body_huge:
/* There are at least 128 bytes to copy. */
ldp QA_l, QA_h, [src, #0]
sub dst, dst, #32 /* Pre-bias. */
ldp QB_l, QB_h, [src, #32]!
1:
stnp QA_l, QA_h, [dst, #32]
stnp QB_l, QB_h, [dst, #64]
ldp QA_l, QA_h, [src, #32]
ldp QB_l, QB_h, [src, #64]!
add dst, dst, #64
subs count, count, #64
b.ge 1b
stnp QA_l, QA_h, [dst, #32]
stnp QB_l, QB_h, [dst, #64]
add src, src, #32
add dst, dst, #64 + 32
tst count, #0x3f
b.ne .Ltail63
ret

View File

@@ -32,6 +32,8 @@
*
*/
#include <private/bionic_asm.h>
#define dstin x0
#define src x1
#define count x2
@@ -52,6 +54,13 @@
#define D_l x13
#define D_h x14
#define QA_l q0
#define QA_h q1
#define QB_l q2
#define QB_h q3
ENTRY(memcpy)
mov dst, dstin
cmp count, #64
b.ge .Lcpy_not_short
@@ -133,14 +142,10 @@
b.ge .Lcpy_body_large
/* Less than 128 bytes to copy, so handle 64 here and then jump
* to the tail. */
ldp A_l, A_h, [src]
ldp B_l, B_h, [src, #16]
ldp C_l, C_h, [src, #32]
ldp D_l, D_h, [src, #48]
stp A_l, A_h, [dst]
stp B_l, B_h, [dst, #16]
stp C_l, C_h, [dst, #32]
stp D_l, D_h, [dst, #48]
ldp QA_l, QA_h, [src]
ldp QB_l, QB_h, [src, #32]
stp QA_l, QA_h, [dst]
stp QB_l, QB_h, [dst, #32]
tst count, #0x3f
add src, src, #64
add dst, dst, #64
@@ -152,28 +157,23 @@
.p2align 6
.Lcpy_body_large:
/* There are at least 128 bytes to copy. */
ldp A_l, A_h, [src, #0]
sub dst, dst, #16 /* Pre-bias. */
ldp B_l, B_h, [src, #16]
ldp C_l, C_h, [src, #32]
ldp D_l, D_h, [src, #48]! /* src += 64 - Pre-bias. */
ldp QA_l, QA_h, [src, #0]
sub dst, dst, #32 /* Pre-bias. */
ldp QB_l, QB_h, [src, #32]! /* src += 64 - Pre-bias. */
1:
stp A_l, A_h, [dst, #16]
ldp A_l, A_h, [src, #16]
stp B_l, B_h, [dst, #32]
ldp B_l, B_h, [src, #32]
stp C_l, C_h, [dst, #48]
ldp C_l, C_h, [src, #48]
stp D_l, D_h, [dst, #64]!
ldp D_l, D_h, [src, #64]!
stp QA_l, QA_h, [dst, #32]
ldp QA_l, QA_h, [src, #32]
stp QB_l, QB_h, [dst, #64]!
ldp QB_l, QB_h, [src, #64]!
subs count, count, #64
b.ge 1b
stp A_l, A_h, [dst, #16]
stp B_l, B_h, [dst, #32]
stp C_l, C_h, [dst, #48]
stp D_l, D_h, [dst, #64]
add src, src, #16
add dst, dst, #64 + 16
stp QA_l, QA_h, [dst, #32]
stp QB_l, QB_h, [dst, #64]
add src, src, #32
add dst, dst, #64 + 32
tst count, #0x3f
b.ne .Ltail63
ret
END(memcpy)

View File

@@ -0,0 +1,13 @@
libc_bionic_src_files_arm64 += \
arch-arm64/generic/bionic/memchr.S \
arch-arm64/generic/bionic/memcmp.S \
arch-arm64/generic/bionic/memmove.S \
arch-arm64/generic/bionic/memset.S \
arch-arm64/generic/bionic/stpcpy.S \
arch-arm64/generic/bionic/strchr.S \
arch-arm64/generic/bionic/strcmp.S \
arch-arm64/generic/bionic/strcpy.S \
arch-arm64/generic/bionic/strlen.S \
arch-arm64/generic/bionic/strncmp.S \
arch-arm64/generic/bionic/strnlen.S \
arch-arm64/generic-neon/bionic/memcpy.S \

View File

@@ -1,63 +1,184 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
/* Copyright (c) 2012, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Assumptions:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* ARMv8-a, AArch64
* Unaligned accesses
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
// Prototype: void *memcpy (void *dst, const void *src, size_t count).
#include <private/bionic_asm.h>
#include <private/libc_events.h>
ENTRY(__memcpy_chk)
cmp x2, x3
b.hi __memcpy_chk_fail
#define dstin x0
#define src x1
#define count x2
#define tmp1 x3
#define tmp1w w3
#define tmp2 x4
#define tmp2w w4
#define tmp3 x5
#define tmp3w w5
#define dst x6
// Fall through to memcpy...
END(__memcpy_chk)
#define A_l x7
#define A_h x8
#define B_l x9
#define B_h x10
#define C_l x11
#define C_h x12
#define D_l x13
#define D_h x14
ENTRY(memcpy)
#include "memcpy_base.S"
mov dst, dstin
cmp count, #64
b.ge .Lcpy_not_short
cmp count, #15
b.le .Ltail15tiny
/* Deal with small copies quickly by dropping straight into the
* exit block. */
.Ltail63:
/* Copy up to 48 bytes of data. At this point we only need the
* bottom 6 bits of count to be accurate. */
ands tmp1, count, #0x30
b.eq .Ltail15
add dst, dst, tmp1
add src, src, tmp1
cmp tmp1w, #0x20
b.eq 1f
b.lt 2f
ldp A_l, A_h, [src, #-48]
stp A_l, A_h, [dst, #-48]
1:
ldp A_l, A_h, [src, #-32]
stp A_l, A_h, [dst, #-32]
2:
ldp A_l, A_h, [src, #-16]
stp A_l, A_h, [dst, #-16]
.Ltail15:
ands count, count, #15
beq 1f
add src, src, count
ldp A_l, A_h, [src, #-16]
add dst, dst, count
stp A_l, A_h, [dst, #-16]
1:
ret
.Ltail15tiny:
/* Copy up to 15 bytes of data. Does not assume additional data
being copied. */
tbz count, #3, 1f
ldr tmp1, [src], #8
str tmp1, [dst], #8
1:
tbz count, #2, 1f
ldr tmp1w, [src], #4
str tmp1w, [dst], #4
1:
tbz count, #1, 1f
ldrh tmp1w, [src], #2
strh tmp1w, [dst], #2
1:
tbz count, #0, 1f
ldrb tmp1w, [src]
strb tmp1w, [dst]
1:
ret
.Lcpy_not_short:
/* We don't much care about the alignment of DST, but we want SRC
* to be 128-bit (16 byte) aligned so that we don't cross cache line
* boundaries on both loads and stores. */
neg tmp2, src
ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
b.eq 2f
sub count, count, tmp2
/* Copy more data than needed; it's faster than jumping
* around copying sub-Quadword quantities. We know that
* it can't overrun. */
ldp A_l, A_h, [src]
add src, src, tmp2
stp A_l, A_h, [dst]
add dst, dst, tmp2
/* There may be less than 63 bytes to go now. */
cmp count, #63
b.le .Ltail63
2:
subs count, count, #128
b.ge .Lcpy_body_large
/* Less than 128 bytes to copy, so handle 64 here and then jump
* to the tail. */
ldp A_l, A_h, [src]
ldp B_l, B_h, [src, #16]
ldp C_l, C_h, [src, #32]
ldp D_l, D_h, [src, #48]
stp A_l, A_h, [dst]
stp B_l, B_h, [dst, #16]
stp C_l, C_h, [dst, #32]
stp D_l, D_h, [dst, #48]
tst count, #0x3f
add src, src, #64
add dst, dst, #64
b.ne .Ltail63
ret
/* Critical loop. Start at a new cache line boundary. Assuming
* 64 bytes per line this ensures the entire loop is in one line. */
.p2align 6
.Lcpy_body_large:
/* There are at least 128 bytes to copy. */
ldp A_l, A_h, [src, #0]
sub dst, dst, #16 /* Pre-bias. */
ldp B_l, B_h, [src, #16]
ldp C_l, C_h, [src, #32]
ldp D_l, D_h, [src, #48]! /* src += 64 - Pre-bias. */
1:
stp A_l, A_h, [dst, #16]
ldp A_l, A_h, [src, #16]
stp B_l, B_h, [dst, #32]
ldp B_l, B_h, [src, #32]
stp C_l, C_h, [dst, #48]
ldp C_l, C_h, [src, #48]
stp D_l, D_h, [dst, #64]!
ldp D_l, D_h, [src, #64]!
subs count, count, #64
b.ge 1b
stp A_l, A_h, [dst, #16]
stp B_l, B_h, [dst, #32]
stp C_l, C_h, [dst, #48]
stp D_l, D_h, [dst, #64]
add src, src, #16
add dst, dst, #64 + 16
tst count, #0x3f
b.ne .Ltail63
ret
END(memcpy)
ENTRY_PRIVATE(__memcpy_chk_fail)
// Preserve for accurate backtrace.
stp x29, x30, [sp, -16]!
.cfi_def_cfa_offset 16
.cfi_rel_offset x29, 0
.cfi_rel_offset x30, 8
adrp x0, error_string
add x0, x0, :lo12:error_string
ldr x1, error_code
bl __fortify_chk_fail
error_code:
.word BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
END(__memcpy_chk_fail)
.data
.align 2
error_string:
.string "memcpy: prevented write past end of buffer"

View File

@@ -99,7 +99,6 @@
#define R_AARCH64_RELATIVE 1027 /* Adjust by program base. */
#define R_AARCH64_TLS_TPREL64 1030
#define R_AARCH64_TLS_DTPREL32 1031
#define R_AARCH64_IRELATIVE 1032
#define R_TYPE(name) __CONCAT(R_AARCH64_,name)

View File

@@ -1,32 +1,17 @@
# 32-bit mips.
# mips specific configs
#
# Various kinds of LP32 cruft.
#
libc_bionic_src_files_mips += \
bionic/mmap.cpp \
libc_common_src_files_mips += \
# These are shared by all the 32-bit targets, but not the 64-bit ones.
libc_common_src_files_mips := \
bionic/legacy_32_bit_support.cpp \
bionic/ndk_cruft.cpp \
bionic/time64.c \
libc_netbsd_src_files_mips += \
upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
libc_openbsd_src_files_mips += \
upstream-openbsd/lib/libc/stdio/putw.c \
#
# Default implementations of functions that are commonly optimized.
#
# These are shared by all the 32-bit targets, but not the 64-bit ones.
libc_bionic_src_files_mips += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
bionic/mmap.cpp
libc_common_src_files_mips += \
bionic/memchr.c \
bionic/memcmp.c \
bionic/memmove.c \
@@ -34,8 +19,6 @@ libc_bionic_src_files_mips += \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
libc_freebsd_src_files_mips += \
upstream-freebsd/lib/libc/string/wcscat.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
@@ -44,8 +27,6 @@ libc_freebsd_src_files_mips += \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_mips += \
upstream-openbsd/lib/libc/string/bcopy.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
@@ -58,10 +39,16 @@ libc_openbsd_src_files_mips += \
upstream-openbsd/lib/libc/string/strncmp.c \
upstream-openbsd/lib/libc/string/strncpy.c \
#
# Inherently architecture-specific code.
#
# Fortify implementations of libc functions.
libc_common_src_files_mips += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
##########################################
### CPU specific source files
libc_bionic_src_files_mips += \
arch-mips/bionic/__bionic_clone.S \
arch-mips/bionic/bzero.S \
@@ -82,13 +69,14 @@ libc_bionic_src_files_mips += \
else
libc_bionic_src_files_mips += \
bionic/memcpy.cpp \
bionic/memset.c \
libc_openbsd_src_files_mips += \
upstream-openbsd/lib/libc/string/strlen.c \
bionic/memset.c
libc_common_src_files_mips += \
upstream-openbsd/lib/libc/string/strlen.c
endif
libc_netbsd_src_files_mips := \
upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
libc_crt_target_cflags_mips := \
$($(my_2nd_arch_prefix)TARGET_GLOBAL_CFLAGS) \
-I$(LOCAL_PATH)/arch-mips/include

View File

@@ -1,25 +1,13 @@
# 64-bit mips.
# mips64 specific configs
#
# Default implementations of functions that are commonly optimized.
#
libc_bionic_src_files_mips64 += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
libc_common_src_files_mips64 := \
bionic/memchr.c \
bionic/memcmp.c \
bionic/memcpy.cpp \
bionic/memmove.c \
bionic/memrchr.c \
bionic/memset.c \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
libc_freebsd_src_files_mips64 += \
upstream-freebsd/lib/libc/string/wcscat.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
@@ -28,8 +16,6 @@ libc_freebsd_src_files_mips64 += \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_mips64 += \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
@@ -42,11 +28,17 @@ libc_openbsd_src_files_mips64 += \
upstream-openbsd/lib/libc/string/strncmp.c \
upstream-openbsd/lib/libc/string/strncpy.c \
#
# Inherently architecture-specific code.
#
# Fortify implementations of libc functions.
libc_common_src_files_mips64 += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
libc_bionic_src_files_mips64 += \
##########################################
### CPU specific source files
libc_bionic_src_files_mips64 := \
arch-mips64/bionic/__bionic_clone.S \
arch-mips64/bionic/_exit_with_stack_teardown.S \
arch-mips64/bionic/__get_sp.S \
@@ -56,18 +48,25 @@ libc_bionic_src_files_mips64 += \
arch-mips64/bionic/syscall.S \
arch-mips64/bionic/vfork.S \
# FIXME TODO
## libc_bionic_src_files_mips64 += arch-mips64/string/memcpy.S
## libc_bionic_src_files_mips64 += arch-mips64/string/memset.S
libc_bionic_src_files_mips64 += bionic/memcpy.cpp
libc_bionic_src_files_mips64 += bionic/memset.c
libc_crt_target_cflags_mips64 := \
$($(my_2nd_arch_prefix)TARGET_GLOBAL_CFLAGS) \
-I$(LOCAL_PATH)/arch-mips64/include \
-I$(LOCAL_PATH)/arch-mips64/include
libc_crt_target_crtbegin_file_mips64 := \
$(LOCAL_PATH)/arch-mips64/bionic/crtbegin.c \
$(LOCAL_PATH)/arch-mips64/bionic/crtbegin.c
libc_crt_target_crtbegin_so_file_mips64 := \
$(LOCAL_PATH)/arch-common/bionic/crtbegin_so.c \
$(LOCAL_PATH)/arch-common/bionic/crtbegin_so.c
libc_crt_target_so_cflags_mips64 := \
-fPIC \
-fPIC
libc_crt_target_ldflags_mips64 := \
-melf64ltsmip \
-melf64ltsmip

View File

@@ -1,135 +0,0 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <private/bionic_asm.h>
// DWARF constants.
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_expression 0x10
#define DW_EH_PE_pcrel 0x10
#define DW_EH_PE_sdata4 0x0b
#define DW_OP_breg4 0x74
#define DW_OP_deref 0x06
// Offsets into struct sigcontext.
#define OFFSET_EDI 16
#define OFFSET_ESI 20
#define OFFSET_EBP 24
#define OFFSET_ESP 28
#define OFFSET_EBX 32
#define OFFSET_EDX 36
#define OFFSET_ECX 40
#define OFFSET_EAX 44
#define OFFSET_EIP 56
// Non-standard DWARF constants for the x86 registers.
#define DW_x86_REG_EAX 0
#define DW_x86_REG_ECX 1
#define DW_x86_REG_EDX 2
#define DW_x86_REG_EBX 3
#define DW_x86_REG_EBP 5
#define DW_x86_REG_ESI 6
#define DW_x86_REG_EDI 7
#define DW_x86_REG_EIP 8
#define cfi_signal_frame_start(f) \
.section .eh_frame,"a",@progbits; \
.L ## f ## _START_EH_FRAME: \
.long 2f - 1f; /* CIE length. */ \
1:.long 0; /* CIE ID. */ \
.byte 1; /* Version. */ \
.string "zRS"; /* Augmentation string. */ \
.uleb128 1; /* Code alignment factor. */ \
.sleb128 -4; /* Data alignment factor. */ \
.uleb128 DW_x86_REG_EIP; /* Return address register. */ \
.uleb128 1; /* 1 byte of augmentation data. */ \
.byte (DW_EH_PE_pcrel|DW_EH_PE_sdata4); /* FDE encoding. */ \
.align 8; \
2: \
.long .L ## f ## _END_FDE - .L ## f ## _START_FDE; /* FDE length. */ \
.L ## f ## _START_FDE: \
.long .L ## f ## _START_FDE - .L ## f ## _START_EH_FRAME; /* CIE location. */ \
.long (.L ## f ## _START - 1) - .; /* pcrel start address (see FDE encoding above). */ \
.long .L ## f ## _END - (.L ## f ## _START - 1); /* Function this FDE applies to. */ \
.uleb128 0; /* FDE augmentation length. */ \
#define cfi_signal_frame_end(f) \
.L ## f ## _END_FDE: \
#define cfi_def_cfa(offset) \
.byte DW_CFA_def_cfa_expression; \
.uleb128 2f-1f; \
1:.byte DW_OP_breg4; \
.sleb128 offset; \
.byte DW_OP_deref; \
2: \
#define cfi_offset(reg_number,offset) \
.byte DW_CFA_expression; \
.uleb128 reg_number; \
.uleb128 2f-1f; \
1:.byte DW_OP_breg4; \
.sleb128 offset; \
2: \
ENTRY_PRIVATE(__restore)
.L__restore_START:
popl %eax
movl $__NR_sigreturn, %eax
int $0x80
.L__restore_END:
END(__restore)
cfi_signal_frame_start(__restore)
cfi_def_cfa(OFFSET_ESP + 4)
cfi_offset(DW_x86_REG_EDI, OFFSET_EDI + 4)
cfi_offset(DW_x86_REG_ESI, OFFSET_ESI + 4)
cfi_offset(DW_x86_REG_EBP, OFFSET_EBP + 4)
cfi_offset(DW_x86_REG_EBX, OFFSET_EBX + 4)
cfi_offset(DW_x86_REG_EDX, OFFSET_EDX + 4)
cfi_offset(DW_x86_REG_ECX, OFFSET_ECX + 4)
cfi_offset(DW_x86_REG_EAX, OFFSET_EAX + 4)
cfi_offset(DW_x86_REG_EIP, OFFSET_EIP + 4)
cfi_signal_frame_end(__restore)
ENTRY_PRIVATE(__restore_rt)
.L__restore_rt_START:
movl $__NR_rt_sigreturn, %eax
int $0x80
.L__restore_rt_END:
END(__restore_rt)
cfi_signal_frame_start(__restore_rt)
cfi_def_cfa(OFFSET_ESP + 160)
cfi_offset(DW_x86_REG_EDI, OFFSET_EDI + 160)
cfi_offset(DW_x86_REG_ESI, OFFSET_ESI + 160)
cfi_offset(DW_x86_REG_EBP, OFFSET_EBP + 160)
cfi_offset(DW_x86_REG_EBX, OFFSET_EBX + 160)
cfi_offset(DW_x86_REG_EDX, OFFSET_EDX + 160)
cfi_offset(DW_x86_REG_ECX, OFFSET_ECX + 160)
cfi_offset(DW_x86_REG_EAX, OFFSET_EAX + 160)
cfi_offset(DW_x86_REG_EIP, OFFSET_EIP + 160)
cfi_signal_frame_end(__restore_rt)

View File

@@ -59,6 +59,5 @@
#define R_386_TLS_GOTDESC 39
#define R_386_TLS_DESC_CALL 40
#define R_386_TLS_DESC 41
#define R_386_IRELATIVE 42
#define R_TYPE(name) __CONCAT(R_386_,name)

View File

@@ -1,45 +1,31 @@
# 32-bit x86.
# x86 specific configs
#
# Various kinds of LP32 cruft.
#
libc_bionic_src_files_x86 += \
bionic/mmap.cpp \
libc_common_src_files_x86 += \
# These are shared by all the 32-bit targets, but not the 64-bit ones.
libc_common_src_files_x86 := \
bionic/legacy_32_bit_support.cpp \
bionic/ndk_cruft.cpp \
bionic/time64.c \
libc_netbsd_src_files_x86 += \
upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
libc_openbsd_src_files_x86 += \
upstream-openbsd/lib/libc/stdio/putw.c \
#
# Default implementations of functions that are commonly optimized.
#
# Fortify implementations of libc functions.
libc_common_src_files_x86 += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
libc_freebsd_src_files_x86 += \
upstream-freebsd/lib/libc/string/wmemmove.c \
#
# Inherently architecture-specific functions.
#
# These are shared by all the 32-bit targets, but not the 64-bit ones.
libc_bionic_src_files_x86 := \
bionic/mmap.cpp
##########################################
### CPU specific source files
libc_bionic_src_files_x86 += \
arch-x86/bionic/__bionic_clone.S \
arch-x86/bionic/_exit_with_stack_teardown.S \
arch-x86/bionic/libgcc_compat.c \
arch-x86/bionic/__restore.S \
arch-x86/bionic/_setjmp.S \
arch-x86/bionic/setjmp.S \
arch-x86/bionic/__set_tls.c \
@@ -55,6 +41,9 @@ endif
include $(arch_variant_mk)
libc_common_additional_dependencies += $(arch_variant_mk)
libc_netbsd_src_files_x86 := \
upstream-netbsd/common/lib/libc/hash/sha1/sha1.c \
arch_variant_mk :=
libc_crt_target_cflags_x86 := \

View File

@@ -1,143 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <private/bionic_asm.h>
// DWARF constants.
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_expression 0x10
#define DW_EH_PE_pcrel 0x10
#define DW_EH_PE_sdata4 0x0b
#define DW_OP_breg4 0x74
#define DW_OP_breg7 0x77
#define DW_OP_deref 0x06
// Offsets into struct ucontext_t of uc_mcontext.gregs[x].
#define OFFSET_R8 40
#define OFFSET_R9 48
#define OFFSET_R10 56
#define OFFSET_R11 64
#define OFFSET_R12 72
#define OFFSET_R13 80
#define OFFSET_R14 88
#define OFFSET_R15 96
#define OFFSET_RDI 104
#define OFFSET_RSI 112
#define OFFSET_RBP 120
#define OFFSET_RSP 160
#define OFFSET_RBX 128
#define OFFSET_RDX 136
#define OFFSET_RAX 144
#define OFFSET_RCX 152
#define OFFSET_RIP 168
// Non-standard DWARF constants for the x86-64 registers.
#define DW_x86_64_RAX 0
#define DW_x86_64_RDX 1
#define DW_x86_64_RCX 2
#define DW_x86_64_RBX 3
#define DW_x86_64_RSI 4
#define DW_x86_64_RDI 5
#define DW_x86_64_RBP 6
#define DW_x86_64_RSP 7
#define DW_x86_64_R8 8
#define DW_x86_64_R9 9
#define DW_x86_64_R10 10
#define DW_x86_64_R11 11
#define DW_x86_64_R12 12
#define DW_x86_64_R13 13
#define DW_x86_64_R14 14
#define DW_x86_64_R15 15
#define DW_x86_64_RIP 16
#define cfi_signal_frame_start(f) \
.section .eh_frame,"a",@progbits; \
.L ## f ## _START_EH_FRAME: \
.long 2f - 1f; /* CIE length. */ \
1:.long 0; /* CIE ID. */ \
.byte 1; /* Version. */ \
.string "zRS"; /* Augmentation string. */ \
.uleb128 1; /* Code alignment factor. */ \
.sleb128 -8; /* Data alignment factor. */ \
.uleb128 DW_x86_64_RIP; /* Return address register. */ \
.uleb128 1; /* 1 byte of augmentation data. */ \
.byte (DW_EH_PE_pcrel | DW_EH_PE_sdata4); /* FDE encoding. */ \
.align 8; \
2: \
.long .L ## f ## _END_FDE - .L ## f ## _START_FDE; /* FDE length. */ \
.L ## f ## _START_FDE: \
.long .L ## f ## _START_FDE - .L ## f ## _START_EH_FRAME; /* CIE location. */ \
.long (.L ## f ## _START - 1) - .; /* pcrel start address (see FDE encoding above). */ \
.long .L ## f ## _END - (.L ## f ## _START - 1); /* Function this FDE applies to. */ \
.uleb128 0; /* FDE augmentation length. */ \
#define cfi_signal_frame_end(f) \
.L ## f ## _END_FDE: \
#define cfi_def_cfa(offset) \
.byte DW_CFA_def_cfa_expression; \
.uleb128 2f-1f; \
1:.byte DW_OP_breg7; \
.sleb128 offset; \
.byte DW_OP_deref; \
2: \
#define cfi_offset(reg_number,offset) \
.byte DW_CFA_expression; \
.uleb128 reg_number; \
.uleb128 2f-1f; \
1:.byte DW_OP_breg7; \
.sleb128 offset; \
2: \
ENTRY_PRIVATE(__restore_rt)
.L__restore_rt_START:
mov $__NR_rt_sigreturn, %rax
syscall
.L__restore_rt_END:
END(__restore_rt)
cfi_signal_frame_start(__restore_rt)
cfi_def_cfa(OFFSET_RSP)
cfi_offset(DW_x86_64_R8, OFFSET_R8)
cfi_offset(DW_x86_64_R9, OFFSET_R9)
cfi_offset(DW_x86_64_R10, OFFSET_R10)
cfi_offset(DW_x86_64_R11, OFFSET_R11)
cfi_offset(DW_x86_64_R12, OFFSET_R12)
cfi_offset(DW_x86_64_R13, OFFSET_R13)
cfi_offset(DW_x86_64_R14, OFFSET_R14)
cfi_offset(DW_x86_64_R15, OFFSET_R15)
cfi_offset(DW_x86_64_RDI, OFFSET_RDI)
cfi_offset(DW_x86_64_RSI, OFFSET_RSI)
cfi_offset(DW_x86_64_RBP, OFFSET_RBP)
cfi_offset(DW_x86_64_RSP, OFFSET_RSP)
cfi_offset(DW_x86_64_RBX, OFFSET_RBX)
cfi_offset(DW_x86_64_RDX, OFFSET_RDX)
cfi_offset(DW_x86_64_RAX, OFFSET_RAX)
cfi_offset(DW_x86_64_RCX, OFFSET_RCX)
cfi_offset(DW_x86_64_RIP, OFFSET_RIP)
cfi_signal_frame_end(__restore_rt)

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2014 The Android Open Source Project
* Copyright (C) 2013 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,5 +26,9 @@
* SUCH DAMAGE.
*/
#define STPCPY
#include "string_copy.S"
#include <private/bionic_asm.h>
ENTRY_PRIVATE(__rt_sigreturn)
movl $__NR_rt_sigreturn, %eax
syscall
END(__rt_sigreturn)

View File

@@ -46,8 +46,6 @@
#define R_X86_64_GOTTPOFF 22
#define R_X86_64_TPOFF32 23
#define R_X86_64_IRELATIVE 37
#define R_TYPE(name) __CONCAT(R_X86_64_,name)
#else /* !__i386__ */

View File

@@ -1,21 +1,11 @@
# 64-bit x86.
# x86_64 specific configs
#
# Default implementations of functions that are commonly optimized.
#
libc_bionic_src_files_x86_64 += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
libc_common_src_files_x86_64 := \
bionic/memchr.c \
bionic/memrchr.c \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
libc_freebsd_src_files_x86_64 += \
upstream-freebsd/lib/libc/string/wcscat.c \
upstream-freebsd/lib/libc/string/wcschr.c \
upstream-freebsd/lib/libc/string/wcscmp.c \
@@ -24,19 +14,23 @@ libc_freebsd_src_files_x86_64 += \
upstream-freebsd/lib/libc/string/wcsrchr.c \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_x86_64 += \
upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/strlcpy.c \
#
# Inherently architecture-specific code.
#
# Fortify implementations of libc functions.
libc_common_src_files_x86_64 += \
bionic/__memcpy_chk.cpp \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
libc_bionic_src_files_x86_64 += \
##########################################
### CPU specific source files
libc_bionic_src_files_x86_64 := \
arch-x86_64/bionic/__bionic_clone.S \
arch-x86_64/bionic/_exit_with_stack_teardown.S \
arch-x86_64/bionic/__restore_rt.S \
arch-x86_64/bionic/__rt_sigreturn.S \
arch-x86_64/bionic/_setjmp.S \
arch-x86_64/bionic/setjmp.S \
arch-x86_64/bionic/__set_tls.c \
@@ -44,10 +38,6 @@ libc_bionic_src_files_x86_64 += \
arch-x86_64/bionic/syscall.S \
arch-x86_64/bionic/vfork.S \
#
# Optimized memory/string functions.
#
libc_bionic_src_files_x86_64 += \
arch-x86_64/string/sse2-memcpy-slm.S \
arch-x86_64/string/sse2-memmove-slm.S \
@@ -65,15 +55,15 @@ libc_bionic_src_files_x86_64 += \
libc_crt_target_cflags_x86_64 += \
-m64 \
-I$(LOCAL_PATH)/arch-x86_64/include \
-I$(LOCAL_PATH)/arch-x86_64/include
libc_crt_target_ldflags_x86_64 := -melf_x86_64 \
libc_crt_target_ldflags_x86_64 := -melf_x86_64
libc_crt_target_crtbegin_file_x86_64 := \
$(LOCAL_PATH)/arch-common/bionic/crtbegin.c \
$(LOCAL_PATH)/arch-common/bionic/crtbegin.c
libc_crt_target_crtbegin_so_file_x86_64 := \
$(LOCAL_PATH)/arch-common/bionic/crtbegin_so.c \
$(LOCAL_PATH)/arch-common/bionic/crtbegin_so.c
libc_crt_target_so_cflags_x86_64 := \
-fPIC \
-fPIC

View File

@@ -43,8 +43,8 @@
* This fgets check is called if _FORTIFY_SOURCE is defined and
* greater than 0.
*/
char* __fgets_chk(char* dest, int supplied_size, FILE* stream,
size_t dest_len_from_compiler) {
extern "C" char* __fgets_chk(char* dest, int supplied_size,
FILE* stream, size_t dest_len_from_compiler) {
if (supplied_size < 0) {
__fortify_chk_fail("fgets: buffer size < 0", 0);
}

View File

@@ -32,9 +32,9 @@
#include <sys/socket.h>
#include "private/libc_logging.h"
ssize_t __recvfrom_chk(int socket, void* buf, size_t len, size_t buflen,
int flags, const struct sockaddr* src_addr,
socklen_t* addrlen) {
extern "C"
ssize_t __recvfrom_chk(int socket, void* buf, size_t len, size_t buflen, unsigned int flags,
const struct sockaddr* src_addr, socklen_t* addrlen) {
if (__predict_false(len > buflen)) {
__fortify_chk_fail("recvfrom: prevented write past end of buffer", 0);
}

View File

@@ -62,11 +62,6 @@ int dl_iterate_phdr(int (*cb)(struct dl_phdr_info* info, size_t size, void* data
// Try the VDSO if that didn't work.
ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(getauxval(AT_SYSINFO_EHDR));
if (ehdr_vdso == nullptr) {
// There is no VDSO, so there's nowhere left to look.
return rc;
}
struct dl_phdr_info vdso_info;
vdso_info.dlpi_addr = 0;
vdso_info.dlpi_name = NULL;

View File

@@ -36,20 +36,12 @@
// struct __sfileext (see fileext.h).
void flockfile(FILE* fp) {
if (!__sdidinit) {
__sinit();
}
if (fp != NULL) {
pthread_mutex_lock(&_FLOCK(fp));
}
}
int ftrylockfile(FILE* fp) {
if (!__sdidinit) {
__sinit();
}
// The specification for ftrylockfile() says it returns 0 on success,
// or non-zero on error. So return an errno code directly on error.
if (fp == NULL) {
@@ -60,10 +52,6 @@ int ftrylockfile(FILE* fp) {
}
void funlockfile(FILE* fp) {
if (!__sdidinit) {
__sinit();
}
if (fp != NULL) {
pthread_mutex_unlock(&_FLOCK(fp));
}

View File

@@ -62,7 +62,6 @@ struct PosixTimer {
pthread_t callback_thread;
void (*callback)(sigval_t);
sigval_t callback_argument;
volatile bool armed;
};
static __kernel_timer_t to_kernel_timer_id(timer_t timer) {
@@ -84,7 +83,7 @@ static void* __timer_thread_start(void* arg) {
continue;
}
if (si.si_code == SI_TIMER && timer->armed) {
if (si.si_code == SI_TIMER) {
// This signal was sent because a timer fired, so call the callback.
timer->callback(timer->callback_argument);
} else if (si.si_code == SI_TKILL) {
@@ -96,9 +95,6 @@ static void* __timer_thread_start(void* arg) {
}
static void __timer_thread_stop(PosixTimer* timer) {
// Immediately mark the timer as disarmed so even if some events
// continue to happen, the callback won't be called.
timer->armed = false;
pthread_kill(timer->callback_thread, TIMER_SIGNAL);
}
@@ -125,7 +121,6 @@ int timer_create(clockid_t clock_id, sigevent* evp, timer_t* timer_id) {
// Otherwise, this must be SIGEV_THREAD timer...
timer->callback = evp->sigev_notify_function;
timer->callback_argument = evp->sigev_value;
timer->armed = false;
// Check arguments that the kernel doesn't care about but we do.
if (timer->callback == NULL) {
@@ -205,18 +200,7 @@ int timer_gettime(timer_t id, itimerspec* ts) {
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/timer_getoverrun.html
int timer_settime(timer_t id, int flags, const itimerspec* ts, itimerspec* ots) {
PosixTimer* timer= reinterpret_cast<PosixTimer*>(id);
int rc = __timer_settime(timer->kernel_timer_id, flags, ts, ots);
if (rc == 0) {
// Mark the timer as either being armed or disarmed. This avoids the
// callback being called after the disarm for SIGEV_THREAD timers only.
if (ts->it_value.tv_sec != 0 || ts->it_value.tv_nsec != 0) {
timer->armed = true;
} else {
timer->armed = false;
}
}
return rc;
return __timer_settime(to_kernel_timer_id(id), flags, ts, ots);
}
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/timer_getoverrun.html

View File

@@ -31,7 +31,6 @@
#include <inttypes.h>
#include <stdio.h>
#include <sys/resource.h>
#include <unistd.h>
#include "private/bionic_string_utils.h"
#include "private/ErrnoRestorer.h"
@@ -127,12 +126,8 @@ static int __pthread_attr_getstack_main_thread(void** stack_base, size_t* stack_
stack_limit.rlim_cur = 8 * 1024 * 1024;
}
// It shouldn't matter which thread we are because we're just looking for "[stack]", but
// valgrind seems to mess with the stack enough that the kernel will report "[stack:pid]"
// instead if you look in /proc/self/maps, so we need to look in /proc/pid/task/pid/maps.
char path[64];
snprintf(path, sizeof(path), "/proc/self/task/%d/maps", getpid());
FILE* fp = fopen(path, "re");
// It doesn't matter which thread we are; we're just looking for "[stack]".
FILE* fp = fopen("/proc/self/maps", "re");
if (fp == NULL) {
return errno;
}
@@ -148,7 +143,7 @@ static int __pthread_attr_getstack_main_thread(void** stack_base, size_t* stack_
}
}
}
__libc_fatal("No [stack] line found in \"%s\"!", path);
__libc_fatal("No [stack] line found in /proc/self/maps!");
}
int pthread_attr_getstack(const pthread_attr_t* attr, void** stack_base, size_t* stack_size) {

View File

@@ -28,32 +28,27 @@
#include <signal.h>
extern "C" void __restore_rt(void);
extern "C" void __restore(void);
#if defined(__LP64__)
#if __LP64__
extern "C" void __rt_sigreturn(void);
extern "C" int __rt_sigaction(int, const struct __kernel_sigaction*, struct __kernel_sigaction*, size_t);
#else
extern "C" int __sigaction(int, const struct sigaction*, struct sigaction*);
#endif
int sigaction(int signal, const struct sigaction* bionic_new_action, struct sigaction* bionic_old_action) {
#if __LP64__
__kernel_sigaction kernel_new_action;
if (bionic_new_action != NULL) {
kernel_new_action.sa_flags = bionic_new_action->sa_flags;
kernel_new_action.sa_handler = bionic_new_action->sa_handler;
kernel_new_action.sa_mask = bionic_new_action->sa_mask;
#if defined(SA_RESTORER)
#ifdef SA_RESTORER
kernel_new_action.sa_restorer = bionic_new_action->sa_restorer;
#if defined(__aarch64__)
// arm64 has sa_restorer, but unwinding works best if you just let the
// kernel supply the default restorer from [vdso]. gdb doesn't care, but
// libgcc needs the nop that the kernel includes before the actual code.
// (We could add that ourselves, but why bother?)
#else
if (!(kernel_new_action.sa_flags & SA_RESTORER)) {
kernel_new_action.sa_flags |= SA_RESTORER;
kernel_new_action.sa_restorer = &__restore_rt;
kernel_new_action.sa_restorer = &__rt_sigreturn;
}
#endif
#endif
}
@@ -67,36 +62,19 @@ int sigaction(int signal, const struct sigaction* bionic_new_action, struct siga
bionic_old_action->sa_flags = kernel_old_action.sa_flags;
bionic_old_action->sa_handler = kernel_old_action.sa_handler;
bionic_old_action->sa_mask = kernel_old_action.sa_mask;
#if defined(SA_RESTORER)
#ifdef SA_RESTORER
bionic_old_action->sa_restorer = kernel_old_action.sa_restorer;
if (bionic_old_action->sa_restorer == &__rt_sigreturn) {
bionic_old_action->sa_flags &= ~SA_RESTORER;
}
#endif
}
return result;
}
#else
extern "C" int __sigaction(int, const struct sigaction*, struct sigaction*);
int sigaction(int signal, const struct sigaction* bionic_new_action, struct sigaction* bionic_old_action) {
// The 32-bit ABI is broken. struct sigaction includes a too-small sigset_t,
// so we have to use sigaction(2) rather than rt_sigaction(2).
struct sigaction kernel_new_action;
if (bionic_new_action != NULL) {
kernel_new_action.sa_flags = bionic_new_action->sa_flags;
kernel_new_action.sa_handler = bionic_new_action->sa_handler;
kernel_new_action.sa_mask = bionic_new_action->sa_mask;
#if defined(SA_RESTORER)
kernel_new_action.sa_restorer = bionic_new_action->sa_restorer;
if (!(kernel_new_action.sa_flags & SA_RESTORER)) {
kernel_new_action.sa_flags |= SA_RESTORER;
kernel_new_action.sa_restorer = (kernel_new_action.sa_flags & SA_SIGINFO) ? &__restore_rt : &__restore;
}
// The 32-bit ABI is broken. struct sigaction includes a too-small sigset_t.
// TODO: if we also had correct struct sigaction definitions available, we could copy in and out.
return __sigaction(signal, bionic_new_action, bionic_old_action);
#endif
}
return __sigaction(signal, (bionic_new_action != NULL) ? &kernel_new_action : NULL, bionic_old_action);
}
#endif

View File

@@ -150,7 +150,7 @@ static int __sysconf_monotonic_clock() {
return (rc == -1) ? -1 : _POSIX_VERSION;
}
long sysconf(int name) {
int sysconf(int name) {
switch (name) {
#ifdef _POSIX_ARG_MAX
case _SC_ARG_MAX: return _POSIX_ARG_MAX;

View File

@@ -475,8 +475,8 @@ static const prop_info *find_property(prop_bt *const trie, const char *name,
static int send_prop_msg(const prop_msg *msg)
{
const int fd = socket(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0);
if (fd == -1) {
const int fd = socket(AF_LOCAL, SOCK_STREAM, 0);
if (fd < 0) {
return -1;
}

View File

@@ -0,0 +1,38 @@
/* $NetBSD: __dn_comp.c,v 1.4 2005/09/13 01:44:10 christos Exp $ */
/*
* written by matthew green, 22/04/97.
* public domain.
*/
#include <sys/cdefs.h>
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: __dn_comp.c,v 1.4 2005/09/13 01:44:10 christos Exp $");
#endif /* LIBC_SCCS and not lint */
#if defined(__indr_reference)
__indr_reference(__dn_comp,dn_comp)
#else
#include <sys/types.h>
#include <netinet/in.h>
#ifdef ANDROID_CHANGES
#include "resolv_private.h"
#else
#include <resolv.h>
#endif
/* XXX THIS IS A MESS! SEE <resolv.h> XXX */
#undef dn_comp
int dn_comp(const char *, u_char *, int, u_char **, u_char **);
int
dn_comp(const char *exp_dn, u_char *comp_dn, u_char **dnptrs,
u_char **lastdnptr, int length)
{
return __dn_comp(exp_dn, comp_dn, length, dnptrs, lastdnptr);
}
#endif

View File

@@ -0,0 +1,33 @@
/* $NetBSD: __res_close.c,v 1.4 2005/09/13 01:44:10 christos Exp $ */
/*
* written by matthew green, 22/04/97.
* public domain.
*/
#include <sys/cdefs.h>
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: __res_close.c,v 1.4 2005/09/13 01:44:10 christos Exp $");
#endif /* LIBC_SCCS and not lint */
#if defined(__indr_reference)
__indr_reference(__res_close, res_close)
#else
#include <sys/types.h>
#include <netinet/in.h>
#include "resolv_private.h"
/* XXX THIS IS A MESS! SEE <resolv.h> XXX */
#undef res_close
void res_close(void);
void
res_close(void)
{
__res_close();
}
#endif

View File

@@ -0,0 +1,37 @@
/* $NetBSD: __res_send.c,v 1.4 2005/09/13 01:44:10 christos Exp $ */
/*
* written by matthew green, 22/04/97.
* public domain.
*/
#include <sys/cdefs.h>
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: __res_send.c,v 1.4 2005/09/13 01:44:10 christos Exp $");
#endif
#if defined(__indr_reference)
__indr_reference(__res_send, res_send)
#else
#include <sys/types.h>
#include <netinet/in.h>
#ifdef ANDROID_CHANGES
#include "resolv_private.h"
#else
#include <resolv.h>
#endif
/* XXX THIS IS A MESS! SEE <resolv.h> XXX */
#undef res_send
int res_send(const u_char *, int, u_char *, int);
int
res_send(const u_char *buf, int buflen, u_char *ans, int anssiz)
{
return __res_send(buf, buflen, ans, anssiz);
}
#endif

View File

@@ -402,10 +402,6 @@ res_nsend(res_state statp,
}
if (statp->nscount == 0) {
// We have no nameservers configured, so there's no point trying.
// Tell the cache the query failed, or any retries and anyone else asking the same
// question will block for PENDING_REQUEST_TIMEOUT seconds instead of failing fast.
_resolv_cache_query_failed(statp->netid, buf, buflen);
errno = ESRCH;
return (-1);
}

View File

@@ -54,19 +54,12 @@ enum {
*/
ANDROID_DLEXT_USE_LIBRARY_FD = 0x10,
/* If opening a library using library_fd read it starting at library_fd_offset.
* This flag is only valid when ANDROID_DLEXT_USE_LIBRARY_FD is set.
*/
ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET = 0x20,
/* Mask of valid bits */
ANDROID_DLEXT_VALID_FLAG_BITS = ANDROID_DLEXT_RESERVED_ADDRESS |
ANDROID_DLEXT_RESERVED_ADDRESS_HINT |
ANDROID_DLEXT_WRITE_RELRO |
ANDROID_DLEXT_USE_RELRO |
ANDROID_DLEXT_USE_LIBRARY_FD |
ANDROID_DLEXT_USE_LIBRARY_FD_OFFSET,
ANDROID_DLEXT_USE_LIBRARY_FD,
};
typedef struct {
@@ -75,7 +68,6 @@ typedef struct {
size_t reserved_size;
int relro_fd;
int library_fd;
off64_t library_fd_offset;
} android_dlextinfo;
extern void* android_dlopen_ext(const char* filename, int flag, const android_dlextinfo* extinfo);

View File

@@ -25,7 +25,6 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _ARPA_INET_H_
#define _ARPA_INET_H_
@@ -35,6 +34,8 @@
__BEGIN_DECLS
typedef uint32_t in_addr_t;
in_addr_t inet_addr(const char*);
int inet_aton(const char*, struct in_addr*);
in_addr_t inet_lnaof(struct in_addr);

View File

@@ -518,8 +518,9 @@ typedef enum __ns_cert_types {
(cp) += NS_INT32SZ; \
} while (/*CONSTCOND*/0)
#if !defined(__LP64__)
/* Annoyingly, LP32 shipped with __ names. */
/*
* ANSI C identifier hiding for bind's lib/nameser.
*/
#define ns_msg_getflag __ns_msg_getflag
#define ns_get16 __ns_get16
#define ns_get32 __ns_get32
@@ -563,73 +564,101 @@ typedef enum __ns_cert_types {
#define ns_subdomain __ns_subdomain
#define ns_makecanon __ns_makecanon
#define ns_samename __ns_samename
#endif
#define ns_newmsg_init __ns_newmsg_init
#define ns_newmsg_copy __ns_newmsg_copy
#define ns_newmsg_id __ns_newmsg_id
#define ns_newmsg_flag __ns_newmsg_flag
#define ns_newmsg_q __ns_newmsg_q
#define ns_newmsg_rr __ns_newmsg_rr
#define ns_newmsg_done __ns_newmsg_done
#define ns_rdata_unpack __ns_rdata_unpack
#define ns_rdata_equal __ns_rdata_equal
#define ns_rdata_refers __ns_rdata_refers
__BEGIN_DECLS
int ns_msg_getflag(ns_msg, int) __LIBC_ABI_PUBLIC__;
uint16_t ns_get16(const u_char *) __LIBC_ABI_PUBLIC__;
uint32_t ns_get32(const u_char *) __LIBC_ABI_PUBLIC__;
void ns_put16(uint16_t, u_char *) __LIBC_ABI_PUBLIC__;
void ns_put32(uint32_t, u_char *) __LIBC_ABI_PUBLIC__;
int ns_initparse(const u_char *, int, ns_msg *) __LIBC_ABI_PUBLIC__;
int ns_skiprr(const u_char *, const u_char *, ns_sect, int) __LIBC_ABI_PUBLIC__;
int ns_parserr(ns_msg *, ns_sect, int, ns_rr *) __LIBC_ABI_PUBLIC__;
int ns_parserr2(ns_msg *, ns_sect, int, ns_rr2 *) __LIBC_HIDDEN__;
int ns_msg_getflag(ns_msg, int);
uint16_t ns_get16(const u_char *);
uint32_t ns_get32(const u_char *);
void ns_put16(uint16_t, u_char *);
void ns_put32(uint32_t, u_char *);
int ns_initparse(const u_char *, int, ns_msg *);
int ns_skiprr(const u_char *, const u_char *, ns_sect, int);
int ns_parserr(ns_msg *, ns_sect, int, ns_rr *);
int ns_parserr2(ns_msg *, ns_sect, int, ns_rr2 *);
int ns_sprintrr(const ns_msg *, const ns_rr *,
const char *, const char *, char *, size_t) __LIBC_ABI_PUBLIC__;
const char *, const char *, char *, size_t);
int ns_sprintrrf(const u_char *, size_t, const char *,
ns_class, ns_type, u_long, const u_char *,
size_t, const char *, const char *,
char *, size_t) __LIBC_ABI_PUBLIC__;
int ns_format_ttl(u_long, char *, size_t) __LIBC_ABI_PUBLIC__;
int ns_parse_ttl(const char *, u_long *) __LIBC_ABI_PUBLIC__;
uint32_t ns_datetosecs(const char *cp, int *errp) __LIBC_ABI_PUBLIC__;
int ns_name_ntol(const u_char *, u_char *, size_t) __LIBC_ABI_PUBLIC__;
int ns_name_ntop(const u_char *, char *, size_t) __LIBC_ABI_PUBLIC__;
int ns_name_pton(const char *, u_char *, size_t) __LIBC_ABI_PUBLIC__;
int ns_name_pton2(const char *, u_char *, size_t, size_t *) __LIBC_HIDDEN__;
char *, size_t);
int ns_format_ttl(u_long, char *, size_t);
int ns_parse_ttl(const char *, u_long *);
uint32_t ns_datetosecs(const char *cp, int *errp);
int ns_name_ntol(const u_char *, u_char *, size_t);
int ns_name_ntop(const u_char *, char *, size_t);
int ns_name_pton(const char *, u_char *, size_t);
int ns_name_pton2(const char *, u_char *, size_t, size_t *);
int ns_name_unpack(const u_char *, const u_char *,
const u_char *, u_char *, size_t) __LIBC_ABI_PUBLIC__;
const u_char *, u_char *, size_t);
int ns_name_unpack2(const u_char *, const u_char *,
const u_char *, u_char *, size_t,
size_t *) __LIBC_HIDDEN__;
size_t *);
int ns_name_pack(const u_char *, u_char *, int,
const u_char **, const u_char **) __LIBC_ABI_PUBLIC__;
const u_char **, const u_char **);
int ns_name_uncompress(const u_char *, const u_char *,
const u_char *, char *, size_t) __LIBC_ABI_PUBLIC__;
const u_char *, char *, size_t);
int ns_name_compress(const char *, u_char *, size_t,
const u_char **, const u_char **) __LIBC_ABI_PUBLIC__;
int ns_name_skip(const u_char **, const u_char *) __LIBC_ABI_PUBLIC__;
const u_char **, const u_char **);
int ns_name_skip(const u_char **, const u_char *);
void ns_name_rollback(const u_char *, const u_char **,
const u_char **) __LIBC_ABI_PUBLIC__;
const u_char **);
int ns_sign(u_char *, int *, int, int, void *,
const u_char *, int, u_char *, int *, time_t) __LIBC_ABI_PUBLIC__;
const u_char *, int, u_char *, int *, time_t);
int ns_sign2(u_char *, int *, int, int, void *,
const u_char *, int, u_char *, int *, time_t,
u_char **, u_char **) __LIBC_ABI_PUBLIC__;
ssize_t ns_name_length(ns_nname_ct, size_t) __LIBC_HIDDEN__;
int ns_name_eq(ns_nname_ct, size_t, ns_nname_ct, size_t) __LIBC_HIDDEN__;
int ns_name_owned(ns_namemap_ct, int, ns_namemap_ct, int) __LIBC_HIDDEN__;
int ns_name_map(ns_nname_ct, size_t, ns_namemap_t, int) __LIBC_HIDDEN__;
int ns_name_labels(ns_nname_ct, size_t) __LIBC_HIDDEN__;
u_char **, u_char **);
ssize_t ns_name_length(ns_nname_ct, size_t);
int ns_name_eq(ns_nname_ct, size_t, ns_nname_ct, size_t);
int ns_name_owned(ns_namemap_ct, int, ns_namemap_ct, int);
int ns_name_map(ns_nname_ct, size_t, ns_namemap_t, int);
int ns_name_labels(ns_nname_ct, size_t);
int ns_sign_tcp(u_char *, int *, int, int,
ns_tcp_tsig_state *, int) __LIBC_ABI_PUBLIC__;
ns_tcp_tsig_state *, int);
int ns_sign_tcp2(u_char *, int *, int, int,
ns_tcp_tsig_state *, int,
u_char **, u_char **) __LIBC_ABI_PUBLIC__;
u_char **, u_char **);
int ns_sign_tcp_init(void *, const u_char *, int,
ns_tcp_tsig_state *) __LIBC_ABI_PUBLIC__;
u_char *ns_find_tsig(u_char *, u_char *) __LIBC_ABI_PUBLIC__;
ns_tcp_tsig_state *);
u_char *ns_find_tsig(u_char *, u_char *);
int ns_verify(u_char *, int *, void *,
const u_char *, int, u_char *, int *,
time_t *, int) __LIBC_ABI_PUBLIC__;
time_t *, int);
int ns_verify_tcp(u_char *, int *, ns_tcp_tsig_state *, int);
int ns_verify_tcp_init(void *, const u_char *, int,
ns_tcp_tsig_state *) __LIBC_ABI_PUBLIC__;
int ns_samedomain(const char *, const char *) __LIBC_ABI_PUBLIC__;
int ns_subdomain(const char *, const char *) __LIBC_ABI_PUBLIC__;
int ns_makecanon(const char *, char *, size_t) __LIBC_ABI_PUBLIC__;
int ns_samename(const char *, const char *) __LIBC_ABI_PUBLIC__;
ns_tcp_tsig_state *);
int ns_samedomain(const char *, const char *);
int ns_subdomain(const char *, const char *);
int ns_makecanon(const char *, char *, size_t);
int ns_samename(const char *, const char *);
int ns_newmsg_init(u_char *buffer, size_t bufsiz, ns_newmsg *);
int ns_newmsg_copy(ns_newmsg *, ns_msg *);
void ns_newmsg_id(ns_newmsg *handle, uint16_t id);
void ns_newmsg_flag(ns_newmsg *handle, ns_flag flag, u_int value);
int ns_newmsg_q(ns_newmsg *handle, ns_nname_ct qname,
ns_type qtype, ns_class qclass);
int ns_newmsg_rr(ns_newmsg *handle, ns_sect sect,
ns_nname_ct name, ns_type type,
ns_class rr_class, uint32_t ttl,
uint16_t rdlen, const u_char *rdata);
size_t ns_newmsg_done(ns_newmsg *handle);
ssize_t ns_rdata_unpack(const u_char *, const u_char *, ns_type,
const u_char *, size_t, u_char *, size_t);
int ns_rdata_equal(ns_type,
const u_char *, size_t,
const u_char *, size_t);
int ns_rdata_refers(ns_type,
const u_char *, size_t,
const u_char *);
__END_DECLS
#ifdef BIND_4_COMPAT

View File

@@ -54,35 +54,6 @@ typedef struct {
#define DF_BIND_NOW 0x00000008
#define DF_STATIC_TLS 0x00000010
#define DF_1_NOW 0x00000001 // Perform complete relocation processing.
#define DF_1_GLOBAL 0x00000002 // implies RTLD_GLOBAL
#define DF_1_GROUP 0x00000004
#define DF_1_NODELETE 0x00000008 // implies RTLD_NODELETE
#define DF_1_LOADFLTR 0x00000010
#define DF_1_INITFIRST 0x00000020
#define DF_1_NOOPEN 0x00000040 // Object can not be used with dlopen(3)
#define DF_1_ORIGIN 0x00000080
#define DF_1_DIRECT 0x00000100
#define DF_1_TRANS 0x00000200
#define DF_1_INTERPOSE 0x00000400
#define DF_1_NODEFLIB 0x00000800
#define DF_1_NODUMP 0x00001000 // Object cannot be dumped with dldump(3)
#define DF_1_CONFALT 0x00002000
#define DF_1_ENDFILTEE 0x00004000
#define DF_1_DISPRELDNE 0x00008000
#define DF_1_DISPRELPND 0x00010000
#define DF_1_NODIRECT 0x00020000
#define DF_1_IGNMULDEF 0x00040000 // Internal use
#define DF_1_NOKSYMS 0x00080000 // Internal use
#define DF_1_NOHDR 0x00100000 // Internal use
#define DF_1_EDITED 0x00200000
#define DF_1_NORELOC 0x00400000 // Internal use
#define DF_1_SYMINTPOSE 0x00800000
#define DF_1_GLOBAUDIT 0x01000000
#define DF_1_SINGLETON 0x02000000
#define DF_1_STUB 0x04000000
#define DF_1_PIE 0x08000000
#define DT_BIND_NOW 24
#define DT_INIT_ARRAY 25
#define DT_FINI_ARRAY 26
@@ -98,15 +69,14 @@ typedef struct {
#define PT_GNU_RELRO 0x6474e552
#define STB_LOOS 10
#define STB_HIOS 12
#define STB_LOPROC 13
#define STB_HIPROC 15
#define STB_LOOS 10
#define STB_HIOS 12
#define STB_LOPROC 13
#define STB_HIPROC 15
#define STT_GNU_IFUNC 10
#define STT_LOOS 10
#define STT_HIOS 12
#define STT_LOPROC 13
#define STT_HIPROC 15
#define STT_LOOS 10
#define STT_HIOS 12
#define STT_LOPROC 13
#define STT_HIPROC 15
#endif /* _ELF_H */

View File

@@ -81,15 +81,15 @@ extern ssize_t tee(int, int, size_t, unsigned int);
extern int unlinkat(int, const char*, int);
extern ssize_t vmsplice(int, const struct iovec*, size_t, unsigned int);
#if defined(__BIONIC_FORTIFY)
extern int __open_2(const char*, int);
extern int __open_real(const char*, int, ...) __RENAME(open);
extern int __open_real(const char*, int, ...) __asm__(__USER_LABEL_PREFIX__ "open");
extern int __openat_2(int, const char*, int);
extern int __openat_real(int, const char*, int, ...) __RENAME(openat);
extern int __openat_real(int, const char*, int, ...) __asm__(__USER_LABEL_PREFIX__ "openat");
__errordecl(__creat_missing_mode, "called with O_CREAT, but missing mode");
__errordecl(__creat_too_many_args, "too many arguments");
#if defined(__BIONIC_FORTIFY)
#if !defined(__clang__)
__BIONIC_FORTIFY_INLINE

View File

@@ -25,7 +25,6 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _NETINET_IN_H_
#define _NETINET_IN_H_
@@ -44,9 +43,6 @@ __BEGIN_DECLS
#define INET_ADDRSTRLEN 16
typedef uint16_t in_port_t;
typedef uint32_t in_addr_t;
extern int bindresvport (int sd, struct sockaddr_in *sin);
static const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;

View File

@@ -48,9 +48,6 @@
# include <linux/signal.h>
#endif
#include <sys/ucontext.h>
#define __BIONIC_HAVE_UCONTEXT_T
__BEGIN_DECLS
typedef int sig_atomic_t;

View File

@@ -33,7 +33,7 @@
#include <sys/cdefs.h>
#if defined(__cplusplus) && __cplusplus >= 201103L && defined(_USING_LIBCXX)
#if defined(__cplusplus) && defined(_USING_LIBCXX)
# ifdef __clang__
# if __has_feature(cxx_atomic)
# define _STDATOMIC_HAVE_ATOMIC
@@ -89,7 +89,6 @@ using std::atomic_signal_fence;
using std::memory_order;
using std::memory_order_relaxed;
using std::memory_order_consume;
using std::memory_order_acquire;
using std::memory_order_release;
using std::memory_order_acq_rel;
using std::memory_order_seq_cst;

View File

@@ -47,8 +47,6 @@
#define __need_NULL
#include <stddef.h>
__BEGIN_DECLS
#define _FSTDIO /* Define for new stdio with functions. */
typedef off_t fpos_t; /* stdio file position type */
@@ -138,7 +136,9 @@ typedef struct __sFILE {
fpos_t _offset; /* current lseek offset */
} FILE;
__BEGIN_DECLS
extern FILE __sF[];
__END_DECLS
#define __SLBF 0x0001 /* line buffered */
#define __SNBF 0x0002 /* unbuffered */
@@ -208,6 +208,7 @@ extern FILE __sF[];
/*
* Functions defined in ANSI C standard.
*/
__BEGIN_DECLS
void clearerr(FILE *);
int fclose(FILE *);
int feof(FILE *);
@@ -295,12 +296,16 @@ int vsscanf(const char * __restrict, const char * __restrict, __va_list)
__scanflike(2, 0);
#endif /* __ISO_C_VISIBLE >= 1999 || __BSD_VISIBLE */
__END_DECLS
/*
* Functions defined in POSIX 1003.1.
*/
#if __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE
#define L_ctermid 1024 /* size for ctermid(); PATH_MAX */
__BEGIN_DECLS
FILE *fdopen(int, const char *);
int fileno(FILE *);
@@ -324,12 +329,15 @@ int putc_unlocked(int, FILE *);
int putchar_unlocked(int);
#endif /* __POSIX_VISIBLE >= 199506 */
__END_DECLS
#endif /* __BSD_VISIBLE || __POSIX_VISIBLE || __XPG_VISIBLE */
/*
* Routines that are purely local.
*/
#if __BSD_VISIBLE
__BEGIN_DECLS
int asprintf(char ** __restrict, const char * __restrict, ...)
__printflike(2, 3);
char *fgetln(FILE * __restrict, size_t * __restrict);
@@ -339,27 +347,26 @@ int setlinebuf(FILE *);
int vasprintf(char ** __restrict, const char * __restrict,
__va_list)
__printflike(2, 0);
__END_DECLS
/*
* Stdio function-access interface.
*/
__BEGIN_DECLS
FILE *funopen(const void *,
int (*)(void *, char *, int),
int (*)(void *, const char *, int),
fpos_t (*)(void *, fpos_t, int),
int (*)(void *));
__END_DECLS
#define fropen(cookie, fn) funopen(cookie, fn, 0, 0, 0)
#define fwopen(cookie, fn) funopen(cookie, 0, fn, 0, 0)
#endif /* __BSD_VISIBLE */
extern char* __fgets_chk(char*, int, FILE*, size_t);
extern char* __fgets_real(char*, int, FILE*) __RENAME(fgets);
__errordecl(__fgets_too_big_error, "fgets called with size bigger than buffer");
__errordecl(__fgets_too_small_error, "fgets called with size less than zero");
#if defined(__BIONIC_FORTIFY)
__BEGIN_DECLS
__BIONIC_FORTIFY_INLINE
__printflike(3, 0)
int vsnprintf(char *dest, size_t size, const char *format, __va_list ap)
@@ -404,6 +411,11 @@ int sprintf(char *dest, const char *format, ...)
}
#endif
extern char* __fgets_chk(char*, int, FILE*, size_t);
extern char* __fgets_real(char*, int, FILE*) __asm__(__USER_LABEL_PREFIX__ "fgets");
__errordecl(__fgets_too_big_error, "fgets called with size bigger than buffer");
__errordecl(__fgets_too_small_error, "fgets called with size less than zero");
#if !defined(__clang__)
__BIONIC_FORTIFY_INLINE
@@ -438,8 +450,8 @@ char *fgets(char* dest, int size, FILE* stream) {
#endif /* !defined(__clang__) */
#endif /* defined(__BIONIC_FORTIFY) */
__END_DECLS
#endif /* defined(__BIONIC_FORTIFY) */
#endif /* _STDIO_H_ */

View File

@@ -92,18 +92,16 @@ extern size_t strxfrm(char* __restrict, const char* __restrict, size_t);
extern int strcoll_l(const char *, const char *, locale_t) __purefunc;
extern size_t strxfrm_l(char* __restrict, const char* __restrict, size_t, locale_t);
extern char* __stpncpy_chk2(char* __restrict, const char* __restrict, size_t, size_t, size_t);
extern char* __strncpy_chk2(char* __restrict, const char* __restrict, size_t, size_t, size_t);
extern size_t __strlcpy_real(char* __restrict, const char* __restrict, size_t) __RENAME(strlcpy);
extern size_t __strlcpy_chk(char *, const char *, size_t, size_t);
extern size_t __strlcat_real(char* __restrict, const char* __restrict, size_t) __RENAME(strlcat);
extern size_t __strlcat_chk(char* __restrict, const char* __restrict, size_t, size_t);
#if defined(__BIONIC_FORTIFY)
__BIONIC_FORTIFY_INLINE
void* memcpy(void* __restrict dest, const void* __restrict src, size_t copy_amount) {
return __builtin___memcpy_chk(dest, src, copy_amount, __bos0(dest));
char *d = (char *) dest;
const char *s = (const char *) src;
size_t s_len = __bos0(s);
size_t d_len = __bos0(d);
return __builtin___memcpy_chk(dest, src, copy_amount, d_len);
}
__BIONIC_FORTIFY_INLINE
@@ -121,6 +119,8 @@ char* strcpy(char* __restrict dest, const char* __restrict src) {
return __builtin___strcpy_chk(dest, src, __bos(dest));
}
extern char* __stpncpy_chk2(char* __restrict, const char* __restrict, size_t, size_t, size_t);
__BIONIC_FORTIFY_INLINE
char* stpncpy(char* __restrict dest, const char* __restrict src, size_t n) {
size_t bos_dest = __bos(dest);
@@ -142,6 +142,8 @@ char* stpncpy(char* __restrict dest, const char* __restrict src, size_t n) {
return __stpncpy_chk2(dest, src, n, bos_dest, bos_src);
}
extern char* __strncpy_chk2(char* __restrict, const char* __restrict, size_t, size_t, size_t);
__BIONIC_FORTIFY_INLINE
char* strncpy(char* __restrict dest, const char* __restrict src, size_t n) {
size_t bos_dest = __bos(dest);
@@ -178,6 +180,10 @@ void* memset(void *s, int c, size_t n) {
return __builtin___memset_chk(s, c, n, __bos0(s));
}
extern size_t __strlcpy_real(char* __restrict, const char* __restrict, size_t)
__asm__(__USER_LABEL_PREFIX__ "strlcpy");
extern size_t __strlcpy_chk(char *, const char *, size_t, size_t);
__BIONIC_FORTIFY_INLINE
size_t strlcpy(char* __restrict dest, const char* __restrict src, size_t size) {
size_t bos = __bos(dest);
@@ -198,6 +204,10 @@ size_t strlcpy(char* __restrict dest, const char* __restrict src, size_t size) {
return __strlcpy_chk(dest, src, size, bos);
}
extern size_t __strlcat_real(char* __restrict, const char* __restrict, size_t)
__asm__(__USER_LABEL_PREFIX__ "strlcat");
extern size_t __strlcat_chk(char* __restrict, const char* __restrict, size_t, size_t);
__BIONIC_FORTIFY_INLINE
size_t strlcat(char* __restrict dest, const char* __restrict src, size_t size) {

View File

@@ -81,7 +81,6 @@ struct prop_msg
#define PROP_PATH_RAMDISK_DEFAULT "/default.prop"
#define PROP_PATH_SYSTEM_BUILD "/system/build.prop"
#define PROP_PATH_SYSTEM_DEFAULT "/system/default.prop"
#define PROP_PATH_VENDOR_BUILD "/vendor/build.prop"
#define PROP_PATH_LOCAL_OVERRIDE "/data/local.prop"
#define PROP_PATH_FACTORY "/factory/factory.prop"

View File

@@ -53,9 +53,6 @@
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
/*
@@ -270,6 +267,20 @@
#endif /* NO_KERNEL_RCSIDS */
#endif /* _KERNEL */
#if !defined(_STANDALONE) && !defined(_KERNEL)
#ifdef __GNUC__
#define __RENAME(x) ___RENAME(x)
#else
#ifdef __lint__
#define __RENAME(x) __symbolrename(x)
#else
#error "No function renaming possible"
#endif /* __lint__ */
#endif /* __GNUC__ */
#else /* _STANDALONE || _KERNEL */
#define __RENAME(x) no renaming in kernel or standalone environment
#endif
/*
* A barrier to stop the optimizer from moving code or assume live
* register values. This is gcc specific, the version is more or less
@@ -348,15 +359,60 @@
#endif
/*
* Some BSD source needs these macros.
* Originally they embedded the rcs versions of each source file
* in the generated binary. We strip strings during build anyway,.
* Macros for manipulating "link sets". Link sets are arrays of pointers
* to objects, which are gathered up by the linker.
*
* Object format-specific code has provided us with the following macros:
*
* __link_set_add_text(set, sym)
* Add a reference to the .text symbol `sym' to `set'.
*
* __link_set_add_rodata(set, sym)
* Add a reference to the .rodata symbol `sym' to `set'.
*
* __link_set_add_data(set, sym)
* Add a reference to the .data symbol `sym' to `set'.
*
* __link_set_add_bss(set, sym)
* Add a reference to the .bss symbol `sym' to `set'.
*
* __link_set_decl(set, ptype)
* Provide an extern declaration of the set `set', which
* contains an array of the pointer type `ptype'. This
* macro must be used by any code which wishes to reference
* the elements of a link set.
*
* __link_set_start(set)
* This points to the first slot in the link set.
*
* __link_set_end(set)
* This points to the (non-existent) slot after the last
* entry in the link set.
*
* __link_set_count(set)
* Count the number of entries in link set `set'.
*
* In addition, we provide the following macros for accessing link sets:
*
* __link_set_foreach(pvar, set)
* Iterate over the link set `set'. Because a link set is
* an array of pointers, pvar must be declared as "type **pvar",
* and the actual entry accessed as "*pvar".
*
* __link_set_entry(set, idx)
* Access the link set entry at index `idx' from set `set'.
*/
#define __IDSTRING(_prefix,_s) /* nothing */
#define __COPYRIGHT(_s) /* nothing */
#define __FBSDID(_s) /* nothing */
#define __RCSID(_s) /* nothing */
#define __SCCSID(_s) /* nothing */
#define __link_set_foreach(pvar, set) \
for (pvar = __link_set_start(set); pvar < __link_set_end(set); pvar++)
#define __link_set_entry(set, idx) (__link_set_begin(set)[idx])
/*
* Some of the FreeBSD sources used in Bionic need this.
* Originally, this is used to embed the rcs versions of each source file
* in the generated binary. We certainly don't want this in Bionic.
*/
#define __FBSDID(s) /* nothing */
/*-
* The following definitions are an extension of the behavior originally
@@ -514,28 +570,11 @@
#endif
#define __bos0(s) __builtin_object_size((s), 0)
#if __GNUC_PREREQ(4,3) || __has_attribute(__artificial__)
#define __BIONIC_FORTIFY_INLINE extern __inline__ __always_inline __attribute__((gnu_inline)) __attribute__((__artificial__))
#else
#define __BIONIC_FORTIFY_INLINE extern __inline__ __always_inline __attribute__((gnu_inline))
#endif
#define __BIONIC_FORTIFY_INLINE \
extern __inline__ \
__attribute__ ((always_inline)) \
__attribute__ ((gnu_inline))
#endif
#define __BIONIC_FORTIFY_UNKNOWN_SIZE ((size_t) -1)
/* Used to tag non-static symbols that are private and never exposed by the shared library. */
#define __LIBC_HIDDEN__ __attribute__((visibility("hidden")))
/* Like __LIBC_HIDDEN__, but preserves binary compatibility for LP32. */
#ifdef __LP64__
#define __LIBC64_HIDDEN__ __LIBC_HIDDEN__
#else
#define __LIBC64_HIDDEN__ __LIBC_ABI_PUBLIC__
#endif
/* Used to tag non-static symbols that are public and exposed by the shared library. */
#define __LIBC_ABI_PUBLIC__ __attribute__((visibility ("default")))
/* Used to rename functions so that the compiler emits a call to 'x' rather than the function this was applied to. */
#define __RENAME(x) __asm__(#x)
#endif /* !_SYS_CDEFS_H_ */

View File

@@ -30,13 +30,27 @@
#ifndef _SYS_CDEFS_ELF_H_
#define _SYS_CDEFS_ELF_H_
#define __strong_alias(alias, sym) \
__asm__(".global " #alias "\n" \
#alias " = " #sym);
#ifdef __LEADING_UNDERSCORE
#define _C_LABEL(x) __CONCAT(_,x)
#define _C_LABEL_STRING(x) "_"x
#else
#define _C_LABEL(x) x
#define _C_LABEL_STRING(x) x
#endif
#define __weak_alias(alias,sym) \
__asm__(".weak " #alias "\n" \
#alias " = " #sym);
#define ___RENAME(x) __asm__(___STRING(_C_LABEL(x)))
#define __indr_reference(sym,alias) /* nada, since we do weak refs */
#define __strong_alias(alias,sym) \
__asm__(".global " _C_LABEL_STRING(#alias) "\n" \
_C_LABEL_STRING(#alias) " = " _C_LABEL_STRING(#sym));
#define __weak_alias(alias,sym) \
__asm__(".weak " _C_LABEL_STRING(#alias) "\n" \
_C_LABEL_STRING(#alias) " = " _C_LABEL_STRING(#sym));
#define __weak_extern(sym) \
__asm__(".weak " _C_LABEL_STRING(#sym));
/* We use __warnattr instead of __warn_references.
* TODO: remove this and put an empty definition in one of the upstream-* compatibility headers.
@@ -44,4 +58,74 @@
#define __warn_references(sym,msg) \
/*__asm__(".section .gnu.warning." #sym "\n\t.ascii \"" msg "\"\n\t.text");*/
#define __SECTIONSTRING(_sec, _str) \
__asm__(".section " #_sec "\n\t.asciz \"" _str "\"\n\t.previous")
/* Used to tag non-static symbols that are private and never exposed by the shared library. */
#define __LIBC_HIDDEN__ __attribute__((visibility ("hidden")))
/* Like __LIBC_HIDDEN__, but preserves binary compatibility for LP32. */
#ifdef __LP64__
#define __LIBC64_HIDDEN__ __LIBC_HIDDEN__
#else
#define __LIBC64_HIDDEN__ __LIBC_ABI_PUBLIC__
#endif
/* Used to tag non-static symbols that are public and exposed by the shared library. */
#define __LIBC_ABI_PUBLIC__ __attribute__((visibility ("default")))
#define __IDSTRING(_n,_s) __SECTIONSTRING(.ident,_s)
#define __RCSID(_s) __IDSTRING(rcsid,_s)
#define __SCCSID(_s)
#define __SCCSID2(_s)
#if 0 /* XXX userland __COPYRIGHTs have \ns in them */
#define __COPYRIGHT(_s) __SECTIONSTRING(.copyright,_s)
#else
#define __COPYRIGHT(_s) \
static const char copyright[] \
__attribute__((__unused__,__section__(".copyright"))) = _s
#endif
#define __KERNEL_RCSID(_n, _s) __RCSID(_s)
#define __KERNEL_SCCSID(_n, _s)
#if 0 /* XXX see above */
#define __KERNEL_COPYRIGHT(_n, _s) __COPYRIGHT(_s)
#else
#define __KERNEL_COPYRIGHT(_n, _s) __SECTIONSTRING(.copyright, _s)
#endif
#ifndef __lint__
#define __link_set_make_entry(set, sym) \
static void const * const __link_set_##set##_sym_##sym \
__section("link_set_" #set) __used = &sym
#define __link_set_make_entry2(set, sym, n) \
static void const * const __link_set_##set##_sym_##sym##_##n \
__section("link_set_" #set) __used = &sym[n]
#else
#define __link_set_make_entry(set, sym) \
extern void const * const __link_set_##set##_sym_##sym
#define __link_set_make_entry2(set, sym, n) \
extern void const * const __link_set_##set##_sym_##sym##_##n
#endif /* __lint__ */
#define __link_set_add_text(set, sym) __link_set_make_entry(set, sym)
#define __link_set_add_rodata(set, sym) __link_set_make_entry(set, sym)
#define __link_set_add_data(set, sym) __link_set_make_entry(set, sym)
#define __link_set_add_bss(set, sym) __link_set_make_entry(set, sym)
#define __link_set_add_text2(set, sym, n) __link_set_make_entry2(set, sym, n)
#define __link_set_add_rodata2(set, sym, n) __link_set_make_entry2(set, sym, n)
#define __link_set_add_data2(set, sym, n) __link_set_make_entry2(set, sym, n)
#define __link_set_add_bss2(set, sym, n) __link_set_make_entry2(set, sym, n)
#define __link_set_decl(set, ptype) \
extern ptype * const __start_link_set_##set[]; \
extern ptype * const __stop_link_set_##set[] \
#define __link_set_start(set) (__start_link_set_##set)
#define __link_set_end(set) (__stop_link_set_##set)
#define __link_set_count(set) \
(__link_set_end(set) - __link_set_start(set))
#endif /* !_SYS_CDEFS_ELF_H_ */

View File

@@ -51,11 +51,10 @@ typedef struct {
#define FD_ZERO(set) (memset(set, 0, sizeof(*(fd_set*)(set))))
#if defined(__BIONIC_FORTIFY)
extern void __FD_CLR_chk(int, fd_set*, size_t);
extern void __FD_SET_chk(int, fd_set*, size_t);
extern int __FD_ISSET_chk(int, fd_set*, size_t);
#if defined(__BIONIC_FORTIFY)
#define FD_CLR(fd, set) __FD_CLR_chk(fd, set, __bos(set))
#define FD_SET(fd, set) __FD_SET_chk(fd, set, __bos(set))
#define FD_ISSET(fd, set) __FD_ISSET_chk(fd, set, __bos(set))

View File

@@ -291,11 +291,11 @@ extern ssize_t recv(int, void*, size_t, int);
__socketcall ssize_t sendto(int, const void*, size_t, int, const struct sockaddr*, socklen_t);
__socketcall ssize_t recvfrom(int, void*, size_t, int, const struct sockaddr*, socklen_t*);
#if defined(__BIONIC_FORTIFY)
__errordecl(__recvfrom_error, "recvfrom called with size bigger than buffer");
extern ssize_t __recvfrom_chk(int, void*, size_t, size_t, int, const struct sockaddr*, socklen_t*);
extern ssize_t __recvfrom_real(int, void*, size_t, int, const struct sockaddr*, socklen_t*) __RENAME(recvfrom);
#if defined(__BIONIC_FORTIFY)
extern ssize_t __recvfrom_real(int, void*, size_t, int, const struct sockaddr*, socklen_t*)
__asm__(__USER_LABEL_PREFIX__ "recvfrom");
__BIONIC_FORTIFY_INLINE
ssize_t recvfrom(int fd, void* buf, size_t len, int flags, const struct sockaddr* src_addr, socklen_t* addr_len) {

View File

@@ -159,12 +159,12 @@ extern int stat64(const char*, struct stat64*);
extern int mknod(const char*, mode_t, dev_t);
extern mode_t umask(mode_t);
extern mode_t __umask_chk(mode_t);
extern mode_t __umask_real(mode_t) __RENAME(umask);
__errordecl(__umask_invalid_mode, "umask called with invalid mode");
#if defined(__BIONIC_FORTIFY)
extern mode_t __umask_chk(mode_t);
extern mode_t __umask_real(mode_t) __asm__(__USER_LABEL_PREFIX__ "umask");
__errordecl(__umask_invalid_mode, "umask called with invalid mode");
__BIONIC_FORTIFY_INLINE
mode_t umask(mode_t mode) {
#if !defined(__clang__)

View File

@@ -129,7 +129,7 @@ __BEGIN_DECLS
#define _SC_AVPHYS_PAGES 0x0063
#define _SC_MONOTONIC_CLOCK 0x0064
long sysconf(int);
extern int sysconf(int name);
__END_DECLS

View File

@@ -68,9 +68,11 @@ typedef struct ucontext {
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
sigset_t uc_sigmask;
// Android has a wrong (smaller) sigset_t on ARM.
uint32_t __padding_rt_sigset;
union {
sigset_t bionic;
uint32_t kernel[2];
} uc_sigmask;
// The kernel adds extra padding after uc_sigmask to match glibc sigset_t on ARM.
char __padding[120];
unsigned long uc_regspace[128] __attribute__((__aligned__(8)));
@@ -78,10 +80,6 @@ typedef struct ucontext {
#elif defined(__aarch64__)
#define NGREG 34 /* x0..x30 + sp + pc + pstate */
typedef unsigned long greg_t;
typedef greg_t gregset_t[NGREG];
#include <asm/sigcontext.h>
typedef struct sigcontext mcontext_t;
@@ -154,9 +152,11 @@ typedef struct ucontext {
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
sigset_t uc_sigmask;
// Android has a wrong (smaller) sigset_t on x86.
uint32_t __padding_rt_sigset;
union {
sigset_t bionic;
uint32_t kernel[2];
} uc_sigmask;
struct _libc_fpstate __fpregs_mem;
} ucontext_t;

View File

@@ -91,7 +91,7 @@ struct user {
unsigned long start_stack;
long int signal;
int reserved;
struct user_regs_struct* u_ar0;
unsigned long u_ar0;
struct user_fpregs_struct* u_fpstate;
unsigned long magic;
char u_comm[32];
@@ -108,7 +108,7 @@ struct user_fpregs_struct {
__u64 rip;
__u64 rdp;
__u32 mxcsr;
__u32 mxcr_mask;
__u32 mxcsr_mask;
__u32 st_space[32];
__u32 xmm_space[64];
__u32 padding[24];
@@ -155,7 +155,7 @@ struct user {
long int signal;
int reserved;
int pad1;
struct user_regs_struct* u_ar0;
unsigned long u_ar0;
struct user_fpregs_struct* u_fpstate;
unsigned long magic;
char u_comm[32];
@@ -175,7 +175,7 @@ struct user {
unsigned long start_data;
unsigned long start_stack;
long int signal;
void* u_ar0;
unsigned long u_ar0;
unsigned long magic;
char u_comm[32];
};

View File

@@ -177,7 +177,7 @@ extern int acct(const char* filepath);
int getpagesize(void);
long sysconf(int);
extern int sysconf(int name);
extern int daemon(int, int);
@@ -197,12 +197,12 @@ extern int tcsetpgrp(int fd, pid_t _pid);
} while (_rc == -1 && errno == EINTR); \
_rc; })
#if defined(__BIONIC_FORTIFY)
extern ssize_t __read_chk(int, void*, size_t, size_t);
__errordecl(__read_dest_size_error, "read called with size bigger than destination");
__errordecl(__read_count_toobig_error, "read called with count > SSIZE_MAX");
extern ssize_t __read_real(int, void*, size_t) __RENAME(read);
#if defined(__BIONIC_FORTIFY)
extern ssize_t __read_real(int, void*, size_t)
__asm__(__USER_LABEL_PREFIX__ "read");
__BIONIC_FORTIFY_INLINE
ssize_t read(int fd, void* buf, size_t count) {

View File

@@ -14,10 +14,8 @@
* limitations under the License.
*/
#ifndef _SCOPE_GUARD_H
#define _SCOPE_GUARD_H
#include "private/bionic_macros.h"
#ifndef SCOPE_GUARD_H
#define SCOPE_GUARD_H
// TODO: include explicit std::move when it becomes available
template<typename F>
@@ -42,12 +40,14 @@ class ScopeGuard {
F f_;
bool active_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeGuard);
ScopeGuard() = delete;
ScopeGuard(const ScopeGuard&) = delete;
ScopeGuard& operator=(const ScopeGuard&) = delete;
};
template<typename T>
ScopeGuard<T> make_scope_guard(T f) {
ScopeGuard<T> create_scope_guard(T f) {
return ScopeGuard<T>(f);
}
#endif // _SCOPE_GUARD_H
#endif // SCOPE_GUARD_H

View File

@@ -1,140 +0,0 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef UNIQUE_PTR_H_included
#define UNIQUE_PTR_H_included
// Default deleter for pointer types.
template <typename T>
struct DefaultDelete {
enum { type_must_be_complete = sizeof(T) };
DefaultDelete() {}
void operator()(T* p) const {
delete p;
}
};
// Default deleter for array types.
template <typename T>
struct DefaultDelete<T[]> {
enum { type_must_be_complete = sizeof(T) };
void operator()(T* p) const {
delete[] p;
}
};
// A smart pointer that deletes the given pointer on destruction.
// Equivalent to C++0x's std::unique_ptr (a combination of boost::scoped_ptr
// and boost::scoped_array).
// Named to be in keeping with Android style but also to avoid
// collision with any other implementation, until we can switch over
// to unique_ptr.
// Use thus:
// UniquePtr<C> c(new C);
template <typename T, typename D = DefaultDelete<T> >
class UniquePtr {
public:
// Construct a new UniquePtr, taking ownership of the given raw pointer.
explicit UniquePtr(T* ptr = nullptr) : mPtr(ptr) { }
UniquePtr(UniquePtr<T, D>&& that) {
mPtr = that.mPtr;
that.mPtr = nullptr;
}
~UniquePtr() {
reset();
}
// Accessors.
T& operator*() const { return *mPtr; }
T* operator->() const { return mPtr; }
T* get() const { return mPtr; }
// Returns the raw pointer and hands over ownership to the caller.
// The pointer will not be deleted by UniquePtr.
T* release() __attribute__((warn_unused_result)) {
T* result = mPtr;
mPtr = nullptr;
return result;
}
// Takes ownership of the given raw pointer.
// If this smart pointer previously owned a different raw pointer, that
// raw pointer will be freed.
void reset(T* ptr = nullptr) {
if (ptr != mPtr) {
D()(mPtr);
mPtr = ptr;
}
}
private:
// The raw pointer.
T* mPtr;
// Comparing unique pointers is probably a mistake, since they're unique.
template <typename T2> bool operator==(const UniquePtr<T2>& p) const = delete;
template <typename T2> bool operator!=(const UniquePtr<T2>& p) const = delete;
// Disallow copy and assignment.
UniquePtr(const UniquePtr&) = delete;
void operator=(const UniquePtr&) = delete;
};
// Partial specialization for array types. Like std::unique_ptr, this removes
// operator* and operator-> but adds operator[].
template <typename T, typename D>
class UniquePtr<T[], D> {
public:
explicit UniquePtr(T* ptr = NULL) : mPtr(ptr) {
}
UniquePtr(UniquePtr<T, D>&& that) {
mPtr = that.mPtr;
that.mPtr = nullptr;
}
~UniquePtr() {
reset();
}
T& operator[](size_t i) const {
return mPtr[i];
}
T* get() const { return mPtr; }
T* release() __attribute__((warn_unused_result)) {
T* result = mPtr;
mPtr = NULL;
return result;
}
void reset(T* ptr = NULL) {
if (ptr != mPtr) {
D()(mPtr);
mPtr = ptr;
}
}
private:
T* mPtr;
// Disallow copy and assignment.
UniquePtr(const UniquePtr&) = delete;
void operator=(const UniquePtr&) = delete;
};
#endif // UNIQUE_PTR_H_included

View File

@@ -20,8 +20,8 @@
// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions.
// It goes in the private: declarations in a class.
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
void operator=(const TypeName&) = delete
TypeName(const TypeName&); \
void operator=(const TypeName&)
// A macro to disallow all the implicit constructors, namely the
// default constructor, copy constructor and operator= functions.
@@ -30,7 +30,7 @@
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName() = delete; \
TypeName(); \
DISALLOW_COPY_AND_ASSIGN(TypeName)
#define BIONIC_ALIGN(value, alignment) \

View File

@@ -34,8 +34,6 @@
#include <pthread.h>
__BEGIN_DECLS
/*
* file extension
*/
@@ -65,6 +63,4 @@ do { \
_FILEEXT_INIT(f); \
} while (0)
__END_DECLS
#endif /* _FILEEXT_H_ */

View File

@@ -32,10 +32,6 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__BEGIN_DECLS
/*
* The first few FILEs are statically allocated; others are dynamically
* allocated and linked in via this glue structure.
@@ -48,5 +44,3 @@ struct glue {
/* This was referenced by a couple of different pieces of middleware and the Crystax NDK. */
__LIBC64_HIDDEN__ extern struct glue __sglue;
__END_DECLS

View File

@@ -41,8 +41,6 @@
#include "wcio.h"
#include "fileext.h"
__BEGIN_DECLS
/*
* Android <= KitKat had getc/putc macros in <stdio.h> that referred
* to __srget/__swbuf, so those symbols need to be public for LP32
@@ -139,5 +137,3 @@ extern int __sfvwrite(FILE *, struct __suio *);
wint_t __fputwc_unlock(wchar_t wc, FILE *fp);
#pragma GCC visibility pop
__END_DECLS

View File

@@ -32,10 +32,6 @@
#ifndef _WCIO_H_
#define _WCIO_H_
#include <sys/cdefs.h>
__BEGIN_DECLS
/* minimal requirement of SUSv2 */
#define WCIO_UNGETWC_BUFSIZE 1
@@ -82,6 +78,4 @@ do {\
#define WCIO_INIT(fp) \
memset(&(_EXT(fp)->_wcio), 0, sizeof(struct wchar_io_data))
__END_DECLS
#endif /*_WCIO_H_*/

View File

@@ -132,15 +132,9 @@ public class ZoneCompactor {
throw new RuntimeException("zone filename too long: " + zoneName.length());
}
// Follow the chain of links to work out where the real data for this zone lives.
String actualZoneName = zoneName;
while (links.get(actualZoneName) != null) {
actualZoneName = links.get(actualZoneName);
}
f.write(toAscii(new byte[MAXNAME], zoneName));
f.writeInt(offsets.get(actualZoneName));
f.writeInt(lengths.get(actualZoneName));
f.writeInt(offsets.get(zoneName));
f.writeInt(lengths.get(zoneName));
f.writeInt(0); // Used to be raw GMT offset. No longer used.
}

View File

@@ -13,11 +13,8 @@ import sys
import tarfile
import tempfile
regions = ['africa', 'antarctica', 'asia', 'australasia',
'etcetera', 'europe', 'northamerica', 'southamerica',
# These two deliberately come last so they override what came
# before (and each other).
'backward', 'backzone' ]
regions = ['africa', 'antarctica', 'asia', 'australasia', 'backward',
'etcetera', 'europe', 'northamerica', 'southamerica']
def CheckDirExists(dir, dirname):
if not os.path.isdir(dir):
@@ -52,16 +49,16 @@ def WriteSetupFile():
fields = line.split()
if fields:
if fields[0] == 'Link':
links.append('%s %s %s' % (fields[0], fields[1], fields[2]))
links.append('%s %s %s\n' % (fields[0], fields[1], fields[2]))
zones.append(fields[2])
elif fields[0] == 'Zone':
zones.append(fields[1])
zones.sort()
setup = open('setup', 'w')
for link in sorted(set(links)):
setup.write('%s\n' % link)
for zone in sorted(set(zones)):
for link in links:
setup.write(link)
for zone in zones:
setup.write('%s\n' % zone)
setup.close()
@@ -117,37 +114,27 @@ def BuildIcuToolsAndData(data_filename):
# Build the ICU tools.
print 'Configuring ICU tools...'
subprocess.check_call(['%s/runConfigureICU' % icu_dir, 'Linux'])
print 'Making ICU tools...'
subprocess.check_call(['make', '-j32'])
# Run the ICU tools.
os.chdir('tools/tzcode')
# The tz2icu tool only picks up icuregions and icuzones in they are in the CWD
for icu_data_file in [ 'icuregions', 'icuzones']:
icu_data_file_source = '%s/tools/tzcode/%s' % (icu_dir, icu_data_file)
icu_data_file_symlink = './%s' % icu_data_file
os.symlink(icu_data_file_source, icu_data_file_symlink)
shutil.copyfile('%s/%s' % (original_working_dir, data_filename), data_filename)
print 'Making ICU data...'
# The Makefile assumes the existence of the bin directory.
os.mkdir('%s/bin' % icu_working_dir)
subprocess.check_call(['make'])
# Copy the source file to its ultimate destination.
# Copy the output files to their ultimate destination.
icu_txt_data_dir = '%s/data/misc' % icu_dir
print 'Copying zoneinfo64.txt to %s ...' % icu_txt_data_dir
shutil.copy('zoneinfo64.txt', icu_txt_data_dir)
# Regenerate the .dat file.
os.chdir(icu_working_dir)
subprocess.check_call(['make', '-j32'])
# Copy the .dat file to its ultimate destination.
icu_dat_data_dir = '%s/stubdata' % icu_dir
datfiles = glob.glob('data/out/tmp/icudt??l.dat')
if len(datfiles) != 1:
print 'ERROR: Unexpectedly found %d .dat files (%s). Halting.' % (len(datfiles), datfiles)
sys.exit(1)
datfile = datfiles[0]
print 'Copying %s to %s ...' % (datfile, icu_dat_data_dir)
shutil.copy(datfile, icu_dat_data_dir)
@@ -175,10 +162,9 @@ def BuildBionicToolsAndData(data_filename):
print 'Calling zic(1)...'
os.mkdir('data')
zic_inputs = [ 'extracted/%s' % x for x in regions ]
zic_cmd = ['zic', '-d', 'data' ]
zic_cmd.extend(zic_inputs)
subprocess.check_call(zic_cmd)
for region in regions:
if region != 'backward':
subprocess.check_call(['zic', '-d', 'data', 'extracted/%s' % region])
WriteSetupFile()

View File

@@ -2252,11 +2252,14 @@ static int __bionic_open_tzdata_path(const char* path_prefix_variable, const cha
}
static int __bionic_open_tzdata(const char* olson_id, int* data_size) {
int fd = __bionic_open_tzdata_path("ANDROID_ROOT", "/usr/share/zoneinfo/tzdata", olson_id, data_size);
if (fd == -2) {
// The first thing that 'recovery' does is try to format the current time. It doesn't have
// any tzdata available, so we must not abort here --- doing so breaks the recovery image!
fprintf(stderr, "%s: couldn't find any tzdata when looking for %s!\n", __FUNCTION__, olson_id);
int fd = __bionic_open_tzdata_path("ANDROID_DATA", "/misc/zoneinfo/tzdata", olson_id, data_size);
if (fd < 0) {
fd = __bionic_open_tzdata_path("ANDROID_ROOT", "/usr/share/zoneinfo/tzdata", olson_id, data_size);
if (fd == -2) {
// The first thing that 'recovery' does is try to format the current time. It doesn't have
// any tzdata available, so we must not abort here --- doing so breaks the recovery image!
fprintf(stderr, "%s: couldn't find any tzdata when looking for %s!\n", __FUNCTION__, olson_id);
}
}
return fd;
}

View File

@@ -3526,9 +3526,7 @@ static struct mallinfo internal_mallinfo(mstate m) {
nm.arena = sum;
nm.ordblks = nfree;
nm.hblkhd = m->footprint - sum;
/* BEGIN android-changed: usmblks set to footprint from max_footprint */
nm.usmblks = m->footprint;
/* END android-changed */
nm.usmblks = m->max_footprint;
nm.uordblks = m->footprint - mfree;
nm.fordblks = mfree;
nm.keepcost = m->topsize;

View File

@@ -44,17 +44,15 @@
#define ALIGNBYTES (sizeof(uintptr_t) - 1)
#define ALIGN(p) (((uintptr_t)(p) + ALIGNBYTES) &~ ALIGNBYTES)
#undef stdin
#undef stdout
#undef stderr
int __sdidinit;
#define NDYNAMIC 10 /* add ten more whenever necessary */
#define std(flags, file) \
{0,0,0,flags,file,{0},0,__sF+file,__sclose,__sread,__sseek,__swrite, \
{(unsigned char *)(__sFext+file), 0},NULL,0,{0},{0},{0},0,0}
{0,0,0,flags,file,{0,0},0,__sF+file,__sclose,__sread,__sseek,__swrite, \
{(unsigned char *)(__sFext+file), 0},NULL,0,{0,0,0},{0},{0,0},0,0}
/* p r w flags file _bf z cookie close read seek write
ext */
/* the usual - (stdin + stdout + stderr) */
static FILE usual[FOPEN_MAX - 3];
@@ -69,9 +67,6 @@ FILE __sF[3] = {
std(__SWR, STDOUT_FILENO), /* stdout */
std(__SWR|__SNBF, STDERR_FILENO) /* stderr */
};
FILE* stdin = &__sF[0];
FILE* stdout = &__sF[1];
FILE* stderr = &__sF[2];
struct glue __sglue = { &uglue, 3, __sF };
static struct glue *
@@ -170,26 +165,17 @@ void
__sinit(void)
{
_THREAD_PRIVATE_MUTEX(__sinit_mutex);
int i;
_THREAD_PRIVATE_MUTEX_LOCK(__sinit_mutex);
if (__sdidinit) {
/* bail out if caller lost the race */
_THREAD_PRIVATE_MUTEX_UNLOCK(__sinit_mutex);
return;
}
/* Initialize stdin/stdout/stderr (for the recursive mutex). http://b/18208568. */
for (size_t i = 0; i < 3; ++i) {
_FILEEXT_SETUP(__sF+i, __sFext+i);
}
/* Initialize the pre-allocated (but initially unused) streams. */
for (size_t i = 0; i < FOPEN_MAX - 3; ++i) {
if (__sdidinit)
goto out; /* bail out if caller lost the race */
for (i = 0; i < FOPEN_MAX - 3; i++) {
_FILEEXT_SETUP(usual+i, usualext+i);
}
/* make sure we clean up on exit */
__atexit_register_cleanup(_cleanup); /* conservative */
__sdidinit = 1;
out:
_THREAD_PRIVATE_MUTEX_UNLOCK(__sinit_mutex);
}

View File

@@ -68,23 +68,7 @@ fread(void *buf, size_t size, size_t count, FILE *fp)
fp->_r = 0;
total = resid;
p = buf;
// BEGIN android-added
// Avoid pathological behavior on unbuffered files. OpenBSD
// will loop reading one byte then memcpying one byte!
if ((fp->_flags & __SNBF) != 0) {
// We know if we're unbuffered that our buffer is empty, so
// we can just read directly.
while (resid > 0 && (r = (*fp->_read)(fp->_cookie, p, resid)) > 0) {
p += r;
resid -= r;
}
FUNLOCKFILE(fp);
return ((total - resid) / size);
}
// END android-added
while (resid > (size_t)(r = fp->_r)) {
while (resid > (r = fp->_r)) {
(void)memcpy((void *)p, (void *)fp->_p, (size_t)r);
fp->_p += r;
/* fp->_r = 0 ... done in __srefill */

Binary file not shown.

View File

@@ -8,7 +8,6 @@ LOCAL_SRC_FILES:= \
linker.cpp \
linker_allocator.cpp \
linker_environ.cpp \
linker_libc_support.c \
linker_phdr.cpp \
rt.cpp \

View File

@@ -162,12 +162,12 @@ static void log_signal_summary(int signum, const siginfo_t* info) {
thread_name[MAX_TASK_NAME_LEN] = 0;
}
// "info" will be null if the siginfo_t information was not available.
// "info" will be NULL if the siginfo_t information was not available.
// Many signals don't have an address or a code.
char code_desc[32]; // ", code -6"
char addr_desc[32]; // ", fault addr 0x1234"
addr_desc[0] = code_desc[0] = 0;
if (info != nullptr) {
if (info != NULL) {
// For a rethrown signal, this si_code will be right and the one debuggerd shows will
// always be SI_TKILL.
__libc_format_buffer(code_desc, sizeof(code_desc), ", code %d", info->si_code);
@@ -198,7 +198,7 @@ static bool have_siginfo(int signum) {
}
bool result = (old_action.sa_flags & SA_SIGINFO) != 0;
if (sigaction(signum, &old_action, nullptr) == -1) {
if (sigaction(signum, &old_action, NULL) == -1) {
__libc_format_log(ANDROID_LOG_WARN, "libc", "Restore failed in test for SA_SIGINFO: %s",
strerror(errno));
}
@@ -215,7 +215,7 @@ static void send_debuggerd_packet(siginfo_t* info) {
return;
}
int s = socket_abstract_client(DEBUGGER_SOCKET_NAME, SOCK_STREAM | SOCK_CLOEXEC);
int s = socket_abstract_client(DEBUGGER_SOCKET_NAME, SOCK_STREAM);
if (s == -1) {
__libc_format_log(ANDROID_LOG_FATAL, "libc", "Unable to open connection to debuggerd: %s",
strerror(errno));
@@ -230,7 +230,7 @@ static void send_debuggerd_packet(siginfo_t* info) {
msg.action = DEBUGGER_ACTION_CRASH;
msg.tid = gettid();
msg.abort_msg_address = reinterpret_cast<uintptr_t>(g_abort_message);
msg.original_si_code = (info != nullptr) ? info->si_code : 0;
msg.original_si_code = (info != NULL) ? info->si_code : 0;
int ret = TEMP_FAILURE_RETRY(write(s, &msg, sizeof(msg)));
if (ret == sizeof(msg)) {
char debuggerd_ack;
@@ -255,7 +255,7 @@ static void debuggerd_signal_handler(int signal_number, siginfo_t* info, void*)
// It's possible somebody cleared the SA_SIGINFO flag, which would mean
// our "info" arg holds an undefined value.
if (!have_siginfo(signal_number)) {
info = nullptr;
info = NULL;
}
log_signal_summary(signal_number, info);
@@ -296,14 +296,14 @@ __LIBC_HIDDEN__ void debuggerd_init() {
// Use the alternate signal stack if available so we can catch stack overflows.
action.sa_flags |= SA_ONSTACK;
sigaction(SIGABRT, &action, nullptr);
sigaction(SIGBUS, &action, nullptr);
sigaction(SIGFPE, &action, nullptr);
sigaction(SIGILL, &action, nullptr);
sigaction(SIGPIPE, &action, nullptr);
sigaction(SIGSEGV, &action, nullptr);
sigaction(SIGABRT, &action, NULL);
sigaction(SIGBUS, &action, NULL);
sigaction(SIGFPE, &action, NULL);
sigaction(SIGILL, &action, NULL);
sigaction(SIGPIPE, &action, NULL);
sigaction(SIGSEGV, &action, NULL);
#if defined(SIGSTKFLT)
sigaction(SIGSTKFLT, &action, nullptr);
sigaction(SIGSTKFLT, &action, NULL);
#endif
sigaction(SIGTRAP, &action, nullptr);
sigaction(SIGTRAP, &action, NULL);
}

Some files were not shown because too many files have changed in this diff Show More