Merge remote-tracking branch 'aosp/master' into HEAD

This commit is contained in:
Colin Cross
2015-03-16 16:54:58 -07:00
309 changed files with 243115 additions and 2602 deletions

View File

@@ -85,7 +85,7 @@ libc_common_src_files += \
bionic/__vsnprintf_chk.cpp \
bionic/__vsprintf_chk.cpp \
libc_bionic_src_files := \
libc_bionic_ndk_src_files := \
bionic/abort.cpp \
bionic/accept.cpp \
bionic/accept4.cpp \
@@ -116,16 +116,14 @@ libc_bionic_src_files := \
bionic/error.cpp \
bionic/eventfd_read.cpp \
bionic/eventfd_write.cpp \
bionic/faccessat.cpp \
bionic/fchmod.cpp \
bionic/fchmodat.cpp \
bionic/ffs.cpp \
bionic/flockfile.cpp \
bionic/fork.cpp \
bionic/fpclassify.cpp \
bionic/futimens.cpp \
bionic/getauxval.cpp \
bionic/getcwd.cpp \
bionic/getentropy_linux.c \
bionic/gethostname.cpp \
bionic/getpgrp.cpp \
bionic/getpid.cpp \
@@ -146,6 +144,7 @@ libc_bionic_src_files := \
bionic/mbrtoc16.cpp \
bionic/mbrtoc32.cpp \
bionic/mbstate.cpp \
bionic/mempcpy.cpp \
bionic/mkdir.cpp \
bionic/mkfifo.cpp \
bionic/mknod.cpp \
@@ -160,27 +159,6 @@ libc_bionic_src_files := \
bionic/posix_fallocate.cpp \
bionic/posix_madvise.cpp \
bionic/posix_timers.cpp \
bionic/pthread_atfork.cpp \
bionic/pthread_attr.cpp \
bionic/pthread_cond.cpp \
bionic/pthread_create.cpp \
bionic/pthread_detach.cpp \
bionic/pthread_equal.cpp \
bionic/pthread_exit.cpp \
bionic/pthread_getcpuclockid.cpp \
bionic/pthread_getschedparam.cpp \
bionic/pthread_gettid_np.cpp \
bionic/pthread_internals.cpp \
bionic/pthread_join.cpp \
bionic/pthread_key.cpp \
bionic/pthread_kill.cpp \
bionic/pthread_mutex.cpp \
bionic/pthread_once.cpp \
bionic/pthread_rwlock.cpp \
bionic/pthread_self.cpp \
bionic/pthread_setname_np.cpp \
bionic/pthread_setschedparam.cpp \
bionic/pthread_sigmask.cpp \
bionic/ptrace.cpp \
bionic/pty.cpp \
bionic/raise.cpp \
@@ -223,7 +201,6 @@ libc_bionic_src_files := \
bionic/strtold.cpp \
bionic/stubs.cpp \
bionic/symlink.cpp \
bionic/sysconf.cpp \
bionic/sysinfo.cpp \
bionic/syslog.cpp \
bionic/sys_siglist.c \
@@ -236,10 +213,27 @@ libc_bionic_src_files := \
bionic/umount.cpp \
bionic/unlink.cpp \
bionic/utimes.cpp \
bionic/vdso.cpp \
bionic/wait.cpp \
bionic/wchar.cpp \
bionic/wctype.cpp \
bionic/wmempcpy.cpp \
libc_bionic_src_files :=
# The fork implementation depends on pthread data, so we can't include it in
# libc_ndk.a.
libc_bionic_src_files += bionic/fork.cpp
# The data that backs getauxval is initialized in the libc init functions which
# are invoked by the linker. If this file is included in libc_ndk.a, only one of
# the copies of the global data will be initialized, resulting in nullptr
# dereferences.
libc_bionic_src_files += bionic/getauxval.cpp
# These three require getauxval, which isn't available on older platforms.
libc_bionic_src_files += bionic/getentropy_linux.c
libc_bionic_src_files += bionic/sysconf.cpp
libc_bionic_src_files += bionic/vdso.cpp
libc_cxa_src_files := \
bionic/__cxa_guard.cpp \
@@ -337,10 +331,13 @@ libc_upstream_openbsd_gdtoa_src_files_64 := \
$(libc_upstream_openbsd_gdtoa_src_files) \
upstream-openbsd/lib/libc/gdtoa/strtorQ.c \
# These two depend on getentropy_linux.cpp, which isn't in libc_ndk.a.
libc_upstream_openbsd_src_files := \
upstream-openbsd/lib/libc/compat-43/killpg.c \
upstream-openbsd/lib/libc/crypt/arc4random.c \
upstream-openbsd/lib/libc/crypt/arc4random_uniform.c \
libc_upstream_openbsd_ndk_src_files := \
upstream-openbsd/lib/libc/compat-43/killpg.c \
upstream-openbsd/lib/libc/gen/alarm.c \
upstream-openbsd/lib/libc/gen/ctype_.c \
upstream-openbsd/lib/libc/gen/daemon.c \
@@ -513,6 +510,29 @@ libc_upstream_openbsd_src_files := \
upstream-openbsd/lib/libc/string/wcsstr.c \
upstream-openbsd/lib/libc/string/wcswidth.c \
libc_pthread_src_files := \
bionic/pthread_atfork.cpp \
bionic/pthread_attr.cpp \
bionic/pthread_cond.cpp \
bionic/pthread_create.cpp \
bionic/pthread_detach.cpp \
bionic/pthread_equal.cpp \
bionic/pthread_exit.cpp \
bionic/pthread_getcpuclockid.cpp \
bionic/pthread_getschedparam.cpp \
bionic/pthread_gettid_np.cpp \
bionic/pthread_internals.cpp \
bionic/pthread_join.cpp \
bionic/pthread_key.cpp \
bionic/pthread_kill.cpp \
bionic/pthread_mutex.cpp \
bionic/pthread_once.cpp \
bionic/pthread_rwlock.cpp \
bionic/pthread_self.cpp \
bionic/pthread_setname_np.cpp \
bionic/pthread_setschedparam.cpp \
bionic/pthread_sigmask.cpp \
libc_arch_static_src_files := \
bionic/dl_iterate_phdr_static.cpp \
@@ -785,6 +805,51 @@ $(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_netbsd_src_files
include $(BUILD_STATIC_LIBRARY)
# ========================================================
# libc_openbsd_ndk.a - upstream OpenBSD C library code
# that can be safely included in the libc_ndk.a (doesn't
# contain any troublesome global data or constructors).
# ========================================================
#
# These files are built with the openbsd-compat.h header file
# automatically included.
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(libc_upstream_openbsd_ndk_src_files)
ifneq (,$(filter $(TARGET_ARCH),x86 x86_64))
# Clang has wrong long double size or LDBL_MANT_DIG, http://b/17163651.
LOCAL_CLANG := false
else
LOCAL_CLANG := $(use_clang)
endif
LOCAL_CFLAGS := \
$(libc_common_cflags) \
-Wno-sign-compare \
-Wno-uninitialized \
-Wno-unused-parameter \
-include openbsd-compat.h \
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_C_INCLUDES := $(libc_common_c_includes) \
$(LOCAL_PATH)/private \
$(LOCAL_PATH)/upstream-openbsd/android/include \
$(LOCAL_PATH)/upstream-openbsd/lib/libc/include \
$(LOCAL_PATH)/upstream-openbsd/lib/libc/gdtoa/ \
LOCAL_MODULE := libc_openbsd_ndk
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
include $(BUILD_STATIC_LIBRARY)
# ========================================================
# libc_openbsd.a - upstream OpenBSD C library code
# ========================================================
@@ -899,11 +964,80 @@ LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
include $(BUILD_STATIC_LIBRARY)
# ========================================================
# libc_bionic_ndk.a - The portions of libc_bionic that can
# be safely used in libc_ndk.a (no troublesome global data
# or constructors).
# ========================================================
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(libc_bionic_ndk_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \
# ssse3-strcmp-slm.S does not compile with Clang.
LOCAL_CLANG_ASFLAGS_x86_64 += -no-integrated-as
# memcpy.S, memchr.S, etc. do not compile with Clang.
LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include
LOCAL_MODULE := libc_bionic_ndk
LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_bionic_src_files))
include $(BUILD_STATIC_LIBRARY)
# ========================================================
# libc_pthread.a - pthreads parts that previously lived in
# libc_bionic.a. Relocated to their own library because
# they can't be included in libc_ndk.a (as they layout of
# pthread_t has changed over the years and has ABI
# compatibility issues).
# ========================================================
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(libc_pthread_src_files)
LOCAL_CFLAGS := $(libc_common_cflags) \
-Wframe-larger-than=2048 \
# ssse3-strcmp-slm.S does not compile with Clang.
LOCAL_CLANG_ASFLAGS_x86_64 += -no-integrated-as
# memcpy.S, memchr.S, etc. do not compile with Clang.
LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as
LOCAL_CLANG_ASFLAGS_arm64 += -no-integrated-as
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags) -Wold-style-cast
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_MODULE := libc_pthread
LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CXX_STL := none
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
include $(BUILD_STATIC_LIBRARY)
# ========================================================
# libc_cxa.a - Things traditionally in libstdc++
# ========================================================
@@ -992,9 +1126,56 @@ include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE := libc_ndk
LOCAL_WHOLE_STATIC_LIBRARIES := libc_syscalls libm
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_CLANG := $(use_clang)
LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CFLAGS := $(libc_common_cflags) -fvisibility=hidden -O0
LOCAL_CPPFLAGS := $(libc_common_cppflags)
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_ADDRESS_SANITIZER := false
LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
LOCAL_SYSTEM_SHARED_LIBRARIES :=
LOCAL_SRC_FILES := \
$(libc_common_src_files) \
$(libc_arch_dynamic_src_files) \
$(libc_ndk_stub_src_files) \
bionic/malloc_debug_common.cpp \
LOCAL_SRC_FILES_arm += \
arch-common/bionic/crtbegin_so.c \
arch-arm/bionic/atexit_legacy.c \
arch-common/bionic/crtend_so.S \
LOCAL_CFLAGS := $(libc_common_cflags) \
-DLIBC_STATIC \
LOCAL_WHOLE_STATIC_LIBRARIES := \
libc_bionic_ndk \
libc_cxa \
libc_freebsd \
libc_gdtoa \
libc_malloc \
libc_netbsd \
libc_openbsd_ndk \
libc_stack_protector \
libc_syscalls \
libc_tzcode \
libm \
LOCAL_WHOLE_STATIC_LIBRARIES_arm := libc_aeabi
LOCAL_CXX_STL := none
ifneq ($(MALLOC_IMPL),dlmalloc)
LOCAL_WHOLE_STATIC_LIBRARIES += libjemalloc
endif
$(eval $(call patch-up-arch-specific-flags,LOCAL_CFLAGS,libc_common_cflags))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_common_src_files))
$(eval $(call patch-up-arch-specific-flags,LOCAL_SRC_FILES,libc_arch_dynamic_src_files))
$(eval $(call patch-up-arch-specific-flags,LOCAL_ASFLAGS,LOCAL_CFLAGS))
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
include $(BUILD_STATIC_LIBRARY)
# ========================================================
@@ -1013,6 +1194,7 @@ LOCAL_CLANG := $(use_clang)
LOCAL_ADDITIONAL_DEPENDENCIES := $(libc_common_additional_dependencies)
LOCAL_WHOLE_STATIC_LIBRARIES := \
libc_bionic \
libc_bionic_ndk \
libc_cxa \
libc_dns \
libc_freebsd \
@@ -1020,6 +1202,8 @@ LOCAL_WHOLE_STATIC_LIBRARIES := \
libc_malloc \
libc_netbsd \
libc_openbsd \
libc_openbsd_ndk \
libc_pthread \
libc_stack_protector \
libc_syscalls \
libc_tzcode \
@@ -1140,6 +1324,10 @@ include $(CLEAR_VARS)
LOCAL_CFLAGS := $(libc_common_cflags)
LOCAL_CONLYFLAGS := $(libc_common_conlyflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
# TODO: This is to work around b/19059885. Remove after root cause is fixed
LOCAL_LDFLAGS_arm := -Wl,--hash-style=sysv
LOCAL_C_INCLUDES := $(libc_common_c_includes)
LOCAL_SRC_FILES := \
$(libc_arch_dynamic_src_files) \
@@ -1290,6 +1478,10 @@ include $(CLEAR_VARS)
LOCAL_C_INCLUDES := $(libc_common_c_includes) bionic/libstdc++/include
LOCAL_CFLAGS := $(libc_common_cflags)
LOCAL_CPPFLAGS := $(libc_common_cppflags)
# TODO: This is to work around b/19059885. Remove after root cause is fixed
LOCAL_LDFLAGS_arm := -Wl,--hash-style=both
LOCAL_SRC_FILES := $(libstdcxx_common_src_files)
LOCAL_MODULE:= libstdc++
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk

View File

@@ -130,7 +130,7 @@ int fremovexattr(int, const char*) all
int __getdents64:getdents64(unsigned int, struct dirent*, unsigned int) arm,arm64,mips,mips64,x86,x86_64
int __openat:openat(int, const char*, int, mode_t) all
int faccessat(int, const char*, int, int) all
int ___faccessat:faccessat(int, const char*, int) all
int ___fchmodat:fchmodat(int, const char*, mode_t) all
int fchownat(int, const char*, uid_t, gid_t, int) all
int fstatat64|fstatat:fstatat64(int, const char*, struct stat*, int) arm,mips,x86

View File

@@ -5,8 +5,6 @@
#
libc_bionic_src_files_arm += \
bionic/memchr.c \
bionic/memrchr.c \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
@@ -22,6 +20,8 @@ libc_freebsd_src_files_arm += \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_arm += \
upstream-openbsd/lib/libc/string/memchr.c \
upstream-openbsd/lib/libc/string/memrchr.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strlcat.c \
upstream-openbsd/lib/libc/string/strlcpy.c \

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(faccessat)
ENTRY(___faccessat)
mov ip, r7
ldr r7, =__NR_faccessat
swi #0
@@ -11,4 +11,5 @@ ENTRY(faccessat)
bxls lr
neg r0, r0
b __set_errno_internal
END(faccessat)
END(___faccessat)
.hidden ___faccessat

View File

@@ -8,7 +8,6 @@ libc_bionic_src_files_arm64 += \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
bionic/memrchr.c \
bionic/strrchr.cpp \
libc_freebsd_src_files_arm64 += \
@@ -21,6 +20,7 @@ libc_freebsd_src_files_arm64 += \
upstream-freebsd/lib/libc/string/wmemcmp.c \
libc_openbsd_src_files_arm64 += \
upstream-openbsd/lib/libc/string/memrchr.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \
upstream-openbsd/lib/libc/string/strlcat.c \

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(faccessat)
ENTRY(___faccessat)
mov x8, __NR_faccessat
svc #0
@@ -11,4 +11,5 @@ ENTRY(faccessat)
b.hi __set_errno_internal
ret
END(faccessat)
END(___faccessat)
.hidden ___faccessat

View File

@@ -1,3 +1,30 @@
/*
* Copyright (C) 2014-2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 2001-2002 Opsycon AB (www.opsycon.se / www.opsycon.com)
*
@@ -94,23 +121,31 @@
#include <private/bionic_asm.h>
#include <machine/setjmp.h>
/* On Mips32, jmpbuf begins with optional 4-byte filler so that
* all saved FP regs are aligned on 8-byte boundary, despite this whole
* struct being mis-declared to users as an array of (4-byte) longs.
* All the following offsets are then from the rounded-up base addr
/* jmpbuf is declared to users as an array of longs, which is only
* 4-byte aligned in 32-bit builds. The Mips jmpbuf begins with a
* dynamically-sized 0- or 4-byte unused filler so that double-prec FP regs
* are saved to 8-byte-aligned mem cells.
* All the following jmpbuf offsets are from the rounded-DOWN base addr.
*/
/* Fields of same size on all MIPS abis: */
#define SC_MAGIC (0*4) /* 4 bytes, identify jmpbuf */
#define SC_MASK (1*4) /* 4 bytes, saved signal mask */
#define SC_FPSR (2*4) /* 4 bytes, floating point control/status reg */
/* filler2 (3*4) 4 bytes, pad to 8-byte boundary */
/* field: byte offset: size: */
/* dynam filler (0*4) 0-4 bytes of rounddown filler, DON'T TOUCH!!
often overlays user storage!! */
#define SC_MAGIC_OFFSET (1*4) /* 4 bytes, identify jmpbuf, first actual field */
#define SC_FLAG_OFFSET (2*4) /* 4 bytes, savesigs flag */
#define SC_FPSR_OFFSET (3*4) /* 4 bytes, floating point control/status reg */
/* following fields are 8-byte aligned */
#define SC_MASK_OFFSET (4*4) /* 16 bytes, mips32/mips64 version of sigset_t */
#define SC_SPARE_OFFSET (8*4) /* 8 bytes, reserved for future uses */
/* Registers that are 4-byte on mips32 o32, and 8-byte on mips64 n64 abi */
#define SC_REGS_SAVED 12 /* ra,gp,sp,s0-s8 */
#define SC_REGS (4*4) /* SC_REGS_SAVED*REGSZ bytes */
#define SC_REGS_OFFSET (10*4) /* SC_REGS_BYTES */
#define SC_REGS_SAVED 12 /*regs*/ /* ra,s0-s8,gp,sp */
#define SC_REGS_BYTES (SC_REGS_SAVED*REGSZ)
#define SC_REGS SC_REGS_OFFSET
/* Floating pt registers are 8-bytes on all abis,
/* Double floating pt registers are 8-bytes on all abis,
* but the number of saved fp regs varies for o32/n32 versus n64 abis:
*/
@@ -120,22 +155,20 @@
#define SC_FPREGS_SAVED 6 /* even fp regs f20,f22,f24,f26,f28,f30 */
#endif
#define SC_FPREGS (SC_REGS + SC_REGS_SAVED*REGSZ) /* SC_FPREGS_SAVED*REGSZ_FP bytes */
#define SC_FPREGS_OFFSET (SC_REGS_OFFSET + SC_REGS_BYTES) /* SC_FPREGS_BYTES */
#define SC_FPREGS_BYTES (SC_FPREGS_SAVED*REGSZ_FP)
#define SC_FPREGS SC_FPREGS_OFFSET
#define SC_BYTES (SC_FPREGS + SC_FPREGS_SAVED*REGSZ_FP)
#define SC_LONGS (SC_BYTES/REGSZ)
#define SC_TOTAL_BYTES (SC_FPREGS_OFFSET + SC_FPREGS_BYTES)
#define SC_TOTAL_LONGS (SC_TOTAL_BYTES/REGSZ)
#ifdef __LP64__
/* SC_LONGS is 22, so _JBLEN should be 22 or larger */
#else
/* SC_LONGS is 28, but must also allocate dynamic-roundup filler.
so _JBLEN should be 29 or larger */
#if SC_TOTAL_LONGS > _JBLEN
#error _JBLEN is too small
#endif
/*
* _setjmp, _longjmp (restoring signal state)
*
* GPOFF and FRAMESIZE must be the same for both _setjmp and _longjmp!
* GPOFF and FRAMESIZE must be the same for all setjmp/longjmp routines
*
*/
@@ -145,30 +178,33 @@ A0OFF= FRAMESZ-3*REGSZ
GPOFF= FRAMESZ-2*REGSZ
RAOFF= FRAMESZ-1*REGSZ
NON_LEAF(setjmp, FRAMESZ, ra)
NON_LEAF(sigsetjmp, FRAMESZ, ra)
.mask 0x80000000, RAOFF
PTR_SUBU sp, FRAMESZ # allocate stack frame
SETUP_GP64(GPOFF, setjmp)
SETUP_GP64(GPOFF, sigsetjmp)
SAVE_GP(GPOFF)
.set reorder
setjmp_common:
#ifndef __LP64__
addiu a0, 7 # roundup jmpbuf addr to 8-byte boundary
li t0, ~7
and a0, t0
li t0, ~7
and a0, t0 # round jmpbuf addr DOWN to 8-byte boundary
#endif
sw a1, SC_FLAG_OFFSET(a0) # save savesigs flag
beqz a1, 1f # do saving of signal mask?
REG_S ra, RAOFF(sp) # save state
REG_S ra, RAOFF(sp) # spill state
REG_S a0, A0OFF(sp)
move a0, zero # get current signal mask
jal sigblock
# call sigprocmask(int how ignored, sigset_t* null, sigset_t* SC_MASK(a0)):
LA a2, SC_MASK_OFFSET(a0) # gets current signal mask
li a0, 0 # how; ignored when new mask is null
li a1, 0 # null new mask
jal sigprocmask # get current signal mask
REG_L a0, A0OFF(sp)
REG_L ra, RAOFF(sp)
REG_S v0, SC_MASK(a0) # save sc_mask = sigblock(0)
1:
li v0, 0xACEDBADE # sigcontext magic number
sw v0, SC_MAGIC(a0)
sw v0, SC_MAGIC_OFFSET(a0)
# callee-saved long-sized regs:
REG_S ra, SC_REGS+0*REGSZ(a0)
REG_S s0, SC_REGS+1*REGSZ(a0)
@@ -181,9 +217,9 @@ NON_LEAF(setjmp, FRAMESZ, ra)
REG_S s7, SC_REGS+8*REGSZ(a0)
REG_S s8, SC_REGS+9*REGSZ(a0)
REG_L v0, GPOFF(sp)
REG_S v0, SC_REGS+10*REGSZ(a0)
REG_S v0, SC_REGS+10*REGSZ(a0) # save gp
PTR_ADDU v0, sp, FRAMESZ
REG_S v0, SC_REGS+11*REGSZ(a0)
REG_S v0, SC_REGS+11*REGSZ(a0) # save orig sp
cfc1 v0, $31
@@ -199,7 +235,7 @@ NON_LEAF(setjmp, FRAMESZ, ra)
s.d $f31, SC_FPREGS+7*REGSZ_FP(a0)
#else
# callee-saved fp regs on mips o32 ABI are
# the even-numbered fp regs $f20,$f22,...$f30
# the even-numbered double fp regs $f20,$f22,...$f30
s.d $f20, SC_FPREGS+0*REGSZ_FP(a0)
s.d $f22, SC_FPREGS+1*REGSZ_FP(a0)
s.d $f24, SC_FPREGS+2*REGSZ_FP(a0)
@@ -207,37 +243,68 @@ NON_LEAF(setjmp, FRAMESZ, ra)
s.d $f28, SC_FPREGS+4*REGSZ_FP(a0)
s.d $f30, SC_FPREGS+5*REGSZ_FP(a0)
#endif
sw v0, SC_FPSR(a0)
sw v0, SC_FPSR_OFFSET(a0)
move v0, zero
RESTORE_GP64
PTR_ADDU sp, FRAMESZ
j ra
END(setjmp)
END(sigsetjmp)
NON_LEAF(longjmp, FRAMESZ, ra)
# Alternate entry points:
NON_LEAF(setjmp, FRAMESZ, ra)
.mask 0x80000000, RAOFF
PTR_SUBU sp, FRAMESZ
SETUP_GP64(GPOFF, longjmp)
SETUP_GP64(GPOFF, setjmp) # can't share sigsetjmp's gp code
SAVE_GP(GPOFF)
.set reorder
li a1, 1 # save/restore signals state
b setjmp_common # tail call
END(setjmp)
NON_LEAF(_setjmp, FRAMESZ, ra)
.mask 0x80000000, RAOFF
PTR_SUBU sp, FRAMESZ
SETUP_GP64(GPOFF, _setjmp) # can't share sigsetjmp's gp code
SAVE_GP(GPOFF)
.set reorder
li a1, 0 # don't save/restore signals
b setjmp_common # tail call
END(_setjmp)
NON_LEAF(siglongjmp, FRAMESZ, ra)
.mask 0x80000000, RAOFF
PTR_SUBU sp, FRAMESZ
SETUP_GP64(GPOFF, siglongjmp)
SAVE_GP(GPOFF)
.set reorder
#ifndef __LP64__
addiu a0, 7 # roundup jmpbuf addr to 8-byte boundary
li t0, ~7
and a0, t0
li t0, ~7
and a0, t0 # round jmpbuf addr DOWN to 8-byte boundary
#endif
lw v0, SC_MAGIC_OFFSET(a0)
li t0, 0xACEDBADE
bne v0, t0, longjmp_botch # jump if error
REG_S a1, A1OFF(sp)
lw t0, SC_FLAG_OFFSET(a0) # get savesigs flag
beqz t0, 1f # restore signal mask?
REG_S a1, A1OFF(sp) # temp spill
REG_S a0, A0OFF(sp)
lw a0, SC_MASK(a0)
jal sigsetmask
# call sigprocmask(int how SIG_SETMASK, sigset_t* SC_MASK(a0), sigset_t* null):
LA a1, SC_MASK_OFFSET(a0) # signals being restored
li a0, 3 # mips SIG_SETMASK
li a2, 0 # null
jal sigprocmask # restore signal mask
REG_L a0, A0OFF(sp)
REG_L a1, A1OFF(sp)
lw v0, SC_MAGIC(a0)
li t0, 0xACEDBADE
bne v0, t0, longjmp_botch # jump if error
1:
# callee-saved long-sized regs:
REG_L ra, SC_REGS+0*REGSZ(a0)
REG_L s0, SC_REGS+1*REGSZ(a0)
@@ -252,8 +319,8 @@ NON_LEAF(longjmp, FRAMESZ, ra)
REG_L gp, SC_REGS+10*REGSZ(a0)
REG_L sp, SC_REGS+11*REGSZ(a0)
lw v0, SC_FPSR(a0)
ctc1 v0, $31
lw v0, SC_FPSR_OFFSET(a0)
ctc1 v0, $31 # restore old fr mode before fp values
#ifdef __LP64__
# callee-saved fp regs on mips n64 ABI are $f24..$f31
l.d $f24, SC_FPREGS+0*REGSZ_FP(a0)
@@ -266,7 +333,7 @@ NON_LEAF(longjmp, FRAMESZ, ra)
l.d $f31, SC_FPREGS+7*REGSZ_FP(a0)
#else
# callee-saved fp regs on mips o32 ABI are
# the even-numbered fp regs $f20,$f22,...$f30
# the even-numbered double fp regs $f20,$f22,...$f30
l.d $f20, SC_FPREGS+0*REGSZ_FP(a0)
l.d $f22, SC_FPREGS+1*REGSZ_FP(a0)
l.d $f24, SC_FPREGS+2*REGSZ_FP(a0)
@@ -278,192 +345,19 @@ NON_LEAF(longjmp, FRAMESZ, ra)
li a1, 1 # never return 0!
1:
move v0, a1
j ra
j ra # return to setjmp call site
longjmp_botch:
jal longjmperror
jal abort
RESTORE_GP64
PTR_ADDU sp, FRAMESZ
END(longjmp)
/*
* _setjmp, _longjmp (not restoring signal state)
*
* GPOFF and FRAMESIZE must be the same for both _setjmp and _longjmp!
*
*/
FRAMESZ= MKFSIZ(0,4)
GPOFF= FRAMESZ-2*REGSZ
LEAF(_setjmp, FRAMESZ)
PTR_SUBU sp, FRAMESZ
SETUP_GP64(GPOFF, _setjmp)
SAVE_GP(GPOFF)
.set reorder
#ifndef __LP64__
addiu a0, 7 # roundup jmpbuf addr to 8-byte boundary
li t0, ~7
and a0, t0
#endif
# SC_MASK is unused here
li v0, 0xACEDBADE # sigcontext magic number
sw v0, SC_MAGIC(a0)
# callee-saved long-sized regs:
REG_S ra, SC_REGS+0*REGSZ(a0)
REG_S s0, SC_REGS+1*REGSZ(a0)
REG_S s1, SC_REGS+2*REGSZ(a0)
REG_S s2, SC_REGS+3*REGSZ(a0)
REG_S s3, SC_REGS+4*REGSZ(a0)
REG_S s4, SC_REGS+5*REGSZ(a0)
REG_S s5, SC_REGS+6*REGSZ(a0)
REG_S s6, SC_REGS+7*REGSZ(a0)
REG_S s7, SC_REGS+8*REGSZ(a0)
REG_S s8, SC_REGS+9*REGSZ(a0)
REG_L v0, GPOFF(sp)
REG_S v0, SC_REGS+10*REGSZ(a0)
PTR_ADDU v0, sp, FRAMESZ
REG_S v0, SC_REGS+11*REGSZ(a0)
cfc1 v0, $31
#ifdef __LP64__
# callee-saved fp regs on mips n64 ABI are $f24..$f31
s.d $f24, SC_FPREGS+0*REGSZ_FP(a0)
s.d $f25, SC_FPREGS+1*REGSZ_FP(a0)
s.d $f26, SC_FPREGS+2*REGSZ_FP(a0)
s.d $f27, SC_FPREGS+3*REGSZ_FP(a0)
s.d $f28, SC_FPREGS+4*REGSZ_FP(a0)
s.d $f29, SC_FPREGS+5*REGSZ_FP(a0)
s.d $f30, SC_FPREGS+6*REGSZ_FP(a0)
s.d $f31, SC_FPREGS+7*REGSZ_FP(a0)
#else
# callee-saved fp regs on mips o32 ABI are
# the even-numbered fp regs $f20,$f22,...$f30
s.d $f20, SC_FPREGS+0*REGSZ_FP(a0)
s.d $f22, SC_FPREGS+1*REGSZ_FP(a0)
s.d $f24, SC_FPREGS+2*REGSZ_FP(a0)
s.d $f26, SC_FPREGS+3*REGSZ_FP(a0)
s.d $f28, SC_FPREGS+4*REGSZ_FP(a0)
s.d $f30, SC_FPREGS+5*REGSZ_FP(a0)
#endif
sw v0, SC_FPSR(a0)
move v0, zero
RESTORE_GP64
PTR_ADDU sp, FRAMESZ
j ra
END(_setjmp)
LEAF(_longjmp, FRAMESZ)
PTR_SUBU sp, FRAMESZ
SETUP_GP64(GPOFF, _longjmp)
SAVE_GP(GPOFF)
.set reorder
#ifndef __LP64__
addiu a0, 7 # roundup jmpbuf addr to 8-byte boundary
li t0, ~7
and a0, t0
#endif
# SC_MASK is unused here
lw v0, SC_MAGIC(a0)
li t0, 0xACEDBADE
bne v0, t0, _longjmp_botch # jump if error
# callee-saved long-sized regs:
REG_L ra, SC_REGS+0*REGSZ(a0)
REG_L s0, SC_REGS+1*REGSZ(a0)
REG_L s1, SC_REGS+2*REGSZ(a0)
REG_L s2, SC_REGS+3*REGSZ(a0)
REG_L s3, SC_REGS+4*REGSZ(a0)
REG_L s4, SC_REGS+5*REGSZ(a0)
REG_L s5, SC_REGS+6*REGSZ(a0)
REG_L s6, SC_REGS+7*REGSZ(a0)
REG_L s7, SC_REGS+8*REGSZ(a0)
REG_L s8, SC_REGS+9*REGSZ(a0)
REG_L gp, SC_REGS+10*REGSZ(a0)
REG_L sp, SC_REGS+11*REGSZ(a0)
lw v0, SC_FPSR(a0)
ctc1 v0, $31
#ifdef __LP64__
# callee-saved fp regs on mips n64 ABI are $f24..$f31
l.d $f24, SC_FPREGS+0*REGSZ_FP(a0)
l.d $f25, SC_FPREGS+1*REGSZ_FP(a0)
l.d $f26, SC_FPREGS+2*REGSZ_FP(a0)
l.d $f27, SC_FPREGS+3*REGSZ_FP(a0)
l.d $f28, SC_FPREGS+4*REGSZ_FP(a0)
l.d $f29, SC_FPREGS+5*REGSZ_FP(a0)
l.d $f30, SC_FPREGS+6*REGSZ_FP(a0)
l.d $f31, SC_FPREGS+7*REGSZ_FP(a0)
#else
# callee-saved fp regs on mips o32 ABI are
# the even-numbered fp regs $f20,$f22,...$f30
l.d $f20, SC_FPREGS+0*REGSZ_FP(a0)
l.d $f22, SC_FPREGS+1*REGSZ_FP(a0)
l.d $f24, SC_FPREGS+2*REGSZ_FP(a0)
l.d $f26, SC_FPREGS+3*REGSZ_FP(a0)
l.d $f28, SC_FPREGS+4*REGSZ_FP(a0)
l.d $f30, SC_FPREGS+5*REGSZ_FP(a0)
#endif
bne a1, zero, 1f
li a1, 1 # never return 0!
1:
move v0, a1
j ra
_longjmp_botch:
jal longjmperror
jal abort
RESTORE_GP64
PTR_ADDU sp, FRAMESZ
END(_longjmp)
/*
* trampolines for sigsetjmp and siglongjmp save and restore mask.
*
*/
FRAMESZ= MKFSIZ(1,1)
GPOFF= FRAMESZ-2*REGSZ
LEAF(sigsetjmp, FRAMESZ)
PTR_SUBU sp, FRAMESZ
SETUP_GP64(GPOFF, sigsetjmp)
.set reorder
sw a1, _JBLEN*REGSZ(a0) # save "savemask"
bne a1, 0x0, 1f # do saving of signal mask?
LA t9, _setjmp
RESTORE_GP64
PTR_ADDU sp, FRAMESZ
jr t9
1: LA t9, setjmp
RESTORE_GP64
PTR_ADDU sp, FRAMESZ
jr t9
END(sigsetjmp)
LEAF(siglongjmp, FRAMESZ)
PTR_SUBU sp, FRAMESZ
SETUP_GP64(GPOFF, siglongjmp)
.set reorder
lw t0, _JBLEN*REGSZ(a0) # get "savemask"
bne t0, 0x0, 1f # restore signal mask?
LA t9, _longjmp
RESTORE_GP64
PTR_ADDU sp, FRAMESZ
jr t9
1:
LA t9, longjmp
RESTORE_GP64
PTR_ADDU sp, FRAMESZ
jr t9
END(siglongjmp)
.globl longjmp
.type longjmp, @function
.equ longjmp, siglongjmp # alias for siglongjmp
.globl _longjmp
.type _longjmp, @function
.equ _longjmp, siglongjmp # alias for siglongjmp

View File

@@ -6,9 +6,10 @@
#define _MIPS_SETJMP_H_
#ifdef __LP64__
#define _JBLEN 22 /* size, in 8-byte longs, of a mips64 jmp_buf */
#define _JBLEN 25 /* size, in 8-byte longs, of a mips64 jmp_buf/sigjmp_buf */
#else
#define _JBLEN 29 /* size, in 4-byte longs, of a mips32 jmp_buf */
#define _JBLEN 157 /* historical size, in 4-byte longs, of a mips32 jmp_buf */
/* actual used size is 34 */
#endif
#endif /* !_MIPS_SETJMP_H_ */

View File

@@ -10,8 +10,6 @@ libc_bionic_src_files_mips += \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
bionic/memchr.c \
bionic/memrchr.c \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
@@ -27,7 +25,9 @@ libc_freebsd_src_files_mips += \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_mips += \
upstream-openbsd/lib/libc/string/memchr.c \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/memrchr.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(faccessat)
ENTRY(___faccessat)
.set noreorder
.cpload t9
li v0, __NR_faccessat
@@ -16,4 +16,5 @@ ENTRY(faccessat)
j t9
nop
.set reorder
END(faccessat)
END(___faccessat)
.hidden ___faccessat

View File

@@ -29,7 +29,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <asm/unistd.h>
#include <unistd.h>
struct kernel_stat {
unsigned int st_dev;

View File

@@ -9,8 +9,6 @@ libc_bionic_src_files_mips64 += \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
bionic/memchr.c \
bionic/memrchr.c \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
@@ -30,7 +28,9 @@ libc_freebsd_src_files_mips64 += \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_mips64 += \
upstream-openbsd/lib/libc/string/memchr.c \
upstream-openbsd/lib/libc/string/memmove.c \
upstream-openbsd/lib/libc/string/memrchr.c \
upstream-openbsd/lib/libc/string/stpcpy.c \
upstream-openbsd/lib/libc/string/stpncpy.c \
upstream-openbsd/lib/libc/string/strcat.c \

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(faccessat)
ENTRY(___faccessat)
.set push
.set noreorder
li v0, __NR_faccessat
@@ -22,4 +22,5 @@ ENTRY(faccessat)
j t9
move ra, t0
.set pop
END(faccessat)
END(___faccessat)
.hidden ___faccessat

View File

@@ -2,7 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(faccessat)
ENTRY(___faccessat)
pushl %ebx
.cfi_def_cfa_offset 8
.cfi_rel_offset ebx, 0
@@ -12,13 +12,9 @@ ENTRY(faccessat)
pushl %edx
.cfi_adjust_cfa_offset 4
.cfi_rel_offset edx, 0
pushl %esi
.cfi_adjust_cfa_offset 4
.cfi_rel_offset esi, 0
mov 20(%esp), %ebx
mov 24(%esp), %ecx
mov 28(%esp), %edx
mov 32(%esp), %esi
mov 16(%esp), %ebx
mov 20(%esp), %ecx
mov 24(%esp), %edx
movl $__NR_faccessat, %eax
int $0x80
cmpl $-MAX_ERRNO, %eax
@@ -28,9 +24,9 @@ ENTRY(faccessat)
call __set_errno_internal
addl $4, %esp
1:
popl %esi
popl %edx
popl %ecx
popl %ebx
ret
END(faccessat)
END(___faccessat)
.hidden ___faccessat

View File

@@ -2,8 +2,7 @@
#include <private/bionic_asm.h>
ENTRY(faccessat)
movq %rcx, %r10
ENTRY(___faccessat)
movl $__NR_faccessat, %eax
syscall
cmpq $-MAX_ERRNO, %rax
@@ -13,4 +12,5 @@ ENTRY(faccessat)
call __set_errno_internal
1:
ret
END(faccessat)
END(___faccessat)
.hidden ___faccessat

View File

@@ -9,8 +9,6 @@ libc_bionic_src_files_x86_64 += \
bionic/__memset_chk.cpp \
bionic/__strcpy_chk.cpp \
bionic/__strcat_chk.cpp \
bionic/memchr.c \
bionic/memrchr.c \
bionic/strchr.cpp \
bionic/strnlen.c \
bionic/strrchr.cpp \
@@ -25,6 +23,10 @@ libc_freebsd_src_files_x86_64 += \
upstream-freebsd/lib/libc/string/wmemcmp.c \
upstream-freebsd/lib/libc/string/wmemmove.c \
libc_openbsd_src_files_x86_64 += \
upstream-openbsd/lib/libc/string/memchr.c \
upstream-openbsd/lib/libc/string/memrchr.c \
#
# Inherently architecture-specific code.
#

View File

@@ -26,8 +26,19 @@
* SUCH DAMAGE.
*/
#include <fcntl.h>
#include <unistd.h>
int dup2(int old_fd, int new_fd) {
// If old_fd is equal to new_fd and a valid file descriptor, dup2 returns
// old_fd without closing it. This is not true of dup3, so we have to
// handle this case ourselves.
if (old_fd == new_fd) {
if (fcntl(old_fd, F_GETFD) == -1) {
return -1;
}
return old_fd;
}
return dup3(old_fd, new_fd, 0);
}

59
libc/bionic/faccessat.cpp Normal file
View File

@@ -0,0 +1,59 @@
/*
* Copyright (C) 2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
extern "C" int ___faccessat(int, const char*, int);
int faccessat(int dirfd, const char* pathname, int mode, int flags) {
// "The mode specifies the accessibility check(s) to be performed,
// and is either the value F_OK, or a mask consisting of the
// bitwise OR of one or more of R_OK, W_OK, and X_OK."
if ((mode != F_OK) && ((mode & ~(R_OK | W_OK | X_OK)) != 0) &&
((mode & (R_OK | W_OK | X_OK)) == 0)) {
errno = EINVAL;
return -1;
}
if (flags != 0) {
// We deliberately don't support AT_SYMLINK_NOFOLLOW, a glibc
// only feature which is error prone and dangerous.
//
// AT_EACCESS isn't supported either. Android doesn't have setuid
// programs, and never runs code with euid!=uid. It could be
// implemented in an expensive way, following the model at
// https://gitlab.com/bminor/musl/commit/0a05eace163cee9b08571d2ff9d90f5e82d9c228
// but not worth it.
errno = EINVAL;
return -1;
}
return ___faccessat(dirfd, pathname, mode);
}

View File

@@ -88,7 +88,6 @@ void __libc_init_tls(KernelArgumentBlock& args) {
// The main thread has no mmap allocated space for stack or pthread_internal_t.
main_thread.mmap_size = 0;
pthread_attr_init(&main_thread.attr);
main_thread.attr.flags = PTHREAD_ATTR_FLAG_MAIN_THREAD;
main_thread.attr.guard_size = 0; // The main thread has no guard page.
main_thread.attr.stack_size = 0; // User code should never see this; we'll compute it when asked.
// TODO: the main thread's sched_policy and sched_priority need to be queried.

View File

@@ -58,15 +58,18 @@ struct __locale_t {
DISALLOW_COPY_AND_ASSIGN(__locale_t);
};
size_t __ctype_get_mb_cur_max() {
locale_t l = uselocale(NULL);
if (l == LC_GLOBAL_LOCALE) {
return __bionic_current_locale_is_utf8 ? 4 : 1;
} else {
return l->mb_cur_max;
}
}
static pthread_once_t g_locale_once = PTHREAD_ONCE_INIT;
static lconv g_locale;
// We don't use pthread_once for this so that we know when the resource (a TLS slot) will be taken.
static pthread_key_t g_uselocale_key;
__attribute__((constructor)) static void __bionic_tls_uselocale_key_init() {
pthread_key_create(&g_uselocale_key, NULL);
}
static void __locale_init() {
g_locale.decimal_point = const_cast<char*>(".");
@@ -97,15 +100,6 @@ static void __locale_init() {
g_locale.int_n_sign_posn = CHAR_MAX;
}
size_t __ctype_get_mb_cur_max() {
locale_t l = reinterpret_cast<locale_t>(pthread_getspecific(g_uselocale_key));
if (l == nullptr || l == LC_GLOBAL_LOCALE) {
return __bionic_current_locale_is_utf8 ? 4 : 1;
} else {
return l->mb_cur_max;
}
}
static bool __is_supported_locale(const char* locale) {
return (strcmp(locale, "") == 0 ||
strcmp(locale, "C") == 0 ||
@@ -162,7 +156,16 @@ char* setlocale(int category, const char* locale_name) {
return const_cast<char*>(__bionic_current_locale_is_utf8 ? "C.UTF-8" : "C");
}
// We can't use a constructor to create g_uselocal_key, because it may be used in constructors.
static pthread_once_t g_uselocale_once = PTHREAD_ONCE_INIT;
static pthread_key_t g_uselocale_key;
static void g_uselocale_key_init() {
pthread_key_create(&g_uselocale_key, NULL);
}
locale_t uselocale(locale_t new_locale) {
pthread_once(&g_uselocale_once, g_uselocale_key_init);
locale_t old_locale = static_cast<locale_t>(pthread_getspecific(g_uselocale_key));
// If this is the first call to uselocale(3) on this thread, we return LC_GLOBAL_LOCALE.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (C) 2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -25,22 +25,9 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stddef.h>
#include <string.h>
void *memchr(const void *s, int c, size_t n)
{
const unsigned char* p = s;
const unsigned char* end = p + n;
for (;;) {
if (p >= end || p[0] == c) break; p++;
if (p >= end || p[0] == c) break; p++;
if (p >= end || p[0] == c) break; p++;
if (p >= end || p[0] == c) break; p++;
}
if (p >= end)
return NULL;
else
return (void*) p;
void* mempcpy(void* dst, const void* src, size_t n) {
return reinterpret_cast<char*>(memcpy(dst, src, n)) + n;
}

View File

@@ -26,11 +26,11 @@
* SUCH DAMAGE.
*/
// This file perpetuates the mistakes of the past, but only for 32-bit targets.
#if !defined(__LP64__)
// This file perpetuates the mistakes of the past.
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include <signal.h>
@@ -45,6 +45,11 @@
#include <unistd.h>
#include <wchar.h>
#include "private/libc_logging.h"
// The part is only for 32-bit targets.
#if !defined(__LP64__)
// These were accidentally declared in <unistd.h> because we stupidly used to inline
// getpagesize() and __getpageshift(). Needed for backwards compatibility with old NDK apps.
extern "C" {
@@ -341,4 +346,18 @@ extern "C" void* dlmalloc(size_t size) {
return malloc(size);
}
#endif
#endif // !defined(__LP64__)
// This is never implemented in bionic, only needed for ABI compatibility with the NDK.
extern "C" char* getusershell() {
return NULL;
}
// This is never implemented in bionic, only needed for ABI compatibility with the NDK.
extern "C" void setusershell() { }
// This is never implemented in bionic, only needed for ABI compatibility with the NDK.
extern "C" void endusershell() { }
// This is never implemented in bionic, only needed for ABI compatibility with the NDK.
extern "C" void endpwent() { }

View File

@@ -152,9 +152,6 @@ static int __pthread_attr_getstack_main_thread(void** stack_base, size_t* stack_
}
int pthread_attr_getstack(const pthread_attr_t* attr, void** stack_base, size_t* stack_size) {
if ((attr->flags & PTHREAD_ATTR_FLAG_MAIN_THREAD) != 0) {
return __pthread_attr_getstack_main_thread(stack_base, stack_size);
}
*stack_base = attr->stack_base;
*stack_size = attr->stack_size;
return 0;
@@ -171,7 +168,13 @@ int pthread_attr_getguardsize(const pthread_attr_t* attr, size_t* guard_size) {
}
int pthread_getattr_np(pthread_t t, pthread_attr_t* attr) {
*attr = reinterpret_cast<pthread_internal_t*>(t)->attr;
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(t);
*attr = thread->attr;
// The main thread's stack information is not stored in thread->attr, and we need to
// collect that at runtime.
if (thread->tid == getpid()) {
return __pthread_attr_getstack_main_thread(&attr->stack_base, &attr->stack_size);
}
return 0;
}

View File

@@ -41,6 +41,13 @@
#include "private/bionic_time_conversions.h"
#include "private/bionic_tls.h"
// XXX *technically* there is a race condition that could allow
// XXX a signal to be missed. If thread A is preempted in _wait()
// XXX after unlocking the mutex and before waiting, and if other
// XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
// XXX before thread A is scheduled again and calls futex_wait(),
// XXX then the signal will be lost.
// We use one bit in pthread_condattr_t (long) values as the 'shared' flag
// and one bit for the clock type (CLOCK_REALTIME is ((clockid_t) 1), and
// CLOCK_MONOTONIC is ((clockid_t) 0).). The rest of the bits are a counter.
@@ -57,7 +64,6 @@
#define COND_GET_CLOCK(c) (((c) & COND_CLOCK_MASK) >> 1)
#define COND_SET_CLOCK(attr, c) ((attr) | (c << 1))
int pthread_condattr_init(pthread_condattr_t* attr) {
*attr = 0;
*attr |= PTHREAD_PROCESS_PRIVATE;
@@ -98,47 +104,50 @@ int pthread_condattr_destroy(pthread_condattr_t* attr) {
return 0;
}
static inline atomic_uint* COND_TO_ATOMIC_POINTER(pthread_cond_t* cond) {
static_assert(sizeof(atomic_uint) == sizeof(cond->value),
"cond->value should actually be atomic_uint in implementation.");
struct pthread_cond_internal_t {
atomic_uint state;
// We prefer casting to atomic_uint instead of declaring cond->value to be atomic_uint directly.
// Because using the second method pollutes pthread.h, and causes an error when compiling libcxx.
return reinterpret_cast<atomic_uint*>(&cond->value);
bool process_shared() {
return COND_IS_SHARED(atomic_load_explicit(&state, memory_order_relaxed));
}
int get_clock() {
return COND_GET_CLOCK(atomic_load_explicit(&state, memory_order_relaxed));
}
#if defined(__LP64__)
char __reserved[44];
#endif
};
static pthread_cond_internal_t* __get_internal_cond(pthread_cond_t* cond_interface) {
static_assert(sizeof(pthread_cond_t) == sizeof(pthread_cond_internal_t),
"pthread_cond_t should actually be pthread_cond_internal_t in implementation.");
return reinterpret_cast<pthread_cond_internal_t*>(cond_interface);
}
// XXX *technically* there is a race condition that could allow
// XXX a signal to be missed. If thread A is preempted in _wait()
// XXX after unlocking the mutex and before waiting, and if other
// XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
// XXX before thread A is scheduled again and calls futex_wait(),
// XXX then the signal will be lost.
int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
unsigned int init_value = 0;
int pthread_cond_init(pthread_cond_t* cond_interface, const pthread_condattr_t* attr) {
pthread_cond_internal_t* cond = __get_internal_cond(cond_interface);
unsigned int init_state = 0;
if (attr != NULL) {
init_value = (*attr & COND_FLAGS_MASK);
init_state = (*attr & COND_FLAGS_MASK);
}
atomic_init(cond_value_ptr, init_value);
atomic_init(&cond->state, init_state);
return 0;
}
int pthread_cond_destroy(pthread_cond_t* cond) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
atomic_store_explicit(cond_value_ptr, 0xdeadc04d, memory_order_relaxed);
int pthread_cond_destroy(pthread_cond_t* cond_interface) {
pthread_cond_internal_t* cond = __get_internal_cond(cond_interface);
atomic_store_explicit(&cond->state, 0xdeadc04d, memory_order_relaxed);
return 0;
}
// This function is used by pthread_cond_broadcast and
// pthread_cond_signal to atomically decrement the counter
// then wake up thread_count threads.
static int __pthread_cond_pulse(atomic_uint* cond_value_ptr, int thread_count) {
unsigned int old_value = atomic_load_explicit(cond_value_ptr, memory_order_relaxed);
bool shared = COND_IS_SHARED(old_value);
static int __pthread_cond_pulse(pthread_cond_internal_t* cond, int thread_count) {
// We don't use a release/seq_cst fence here. Because pthread_cond_wait/signal can't be
// used as a method for memory synchronization by itself. It should always be used with
// pthread mutexes. Note that Spurious wakeups from pthread_cond_wait/timedwait may occur,
@@ -149,20 +158,18 @@ static int __pthread_cond_pulse(atomic_uint* cond_value_ptr, int thread_count) {
// synchronization. And it doesn't help even if we use any fence here.
// The increase of value should leave flags alone, even if the value can overflows.
atomic_fetch_add_explicit(cond_value_ptr, COND_COUNTER_STEP, memory_order_relaxed);
atomic_fetch_add_explicit(&cond->state, COND_COUNTER_STEP, memory_order_relaxed);
__futex_wake_ex(cond_value_ptr, shared, thread_count);
__futex_wake_ex(&cond->state, cond->process_shared(), thread_count);
return 0;
}
__LIBC_HIDDEN__
int __pthread_cond_timedwait_relative(atomic_uint* cond_value_ptr, pthread_mutex_t* mutex,
const timespec* reltime) {
unsigned int old_value = atomic_load_explicit(cond_value_ptr, memory_order_relaxed);
bool shared = COND_IS_SHARED(old_value);
static int __pthread_cond_timedwait_relative(pthread_cond_internal_t* cond, pthread_mutex_t* mutex,
const timespec* rel_timeout_or_null) {
unsigned int old_state = atomic_load_explicit(&cond->state, memory_order_relaxed);
pthread_mutex_unlock(mutex);
int status = __futex_wait_ex(cond_value_ptr, shared, old_value, reltime);
int status = __futex_wait_ex(&cond->state, cond->process_shared(), old_state, rel_timeout_or_null);
pthread_mutex_lock(mutex);
if (status == -ETIMEDOUT) {
@@ -171,67 +178,68 @@ int __pthread_cond_timedwait_relative(atomic_uint* cond_value_ptr, pthread_mutex
return 0;
}
__LIBC_HIDDEN__
int __pthread_cond_timedwait(atomic_uint* cond_value_ptr, pthread_mutex_t* mutex,
const timespec* abs_ts, clockid_t clock) {
static int __pthread_cond_timedwait(pthread_cond_internal_t* cond, pthread_mutex_t* mutex,
const timespec* abs_timeout_or_null, clockid_t clock) {
timespec ts;
timespec* tsp;
timespec* rel_timeout = NULL;
if (abs_ts != NULL) {
if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
if (abs_timeout_or_null != NULL) {
rel_timeout = &ts;
if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) {
return ETIMEDOUT;
}
tsp = &ts;
} else {
tsp = NULL;
}
return __pthread_cond_timedwait_relative(cond_value_ptr, mutex, tsp);
return __pthread_cond_timedwait_relative(cond, mutex, rel_timeout);
}
int pthread_cond_broadcast(pthread_cond_t* cond) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
return __pthread_cond_pulse(cond_value_ptr, INT_MAX);
int pthread_cond_broadcast(pthread_cond_t* cond_interface) {
return __pthread_cond_pulse(__get_internal_cond(cond_interface), INT_MAX);
}
int pthread_cond_signal(pthread_cond_t* cond) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
return __pthread_cond_pulse(cond_value_ptr, 1);
int pthread_cond_signal(pthread_cond_t* cond_interface) {
return __pthread_cond_pulse(__get_internal_cond(cond_interface), 1);
}
int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
return __pthread_cond_timedwait(cond_value_ptr, mutex, NULL,
COND_GET_CLOCK(atomic_load_explicit(cond_value_ptr, memory_order_relaxed)));
int pthread_cond_wait(pthread_cond_t* cond_interface, pthread_mutex_t* mutex) {
pthread_cond_internal_t* cond = __get_internal_cond(cond_interface);
return __pthread_cond_timedwait(cond, mutex, NULL, cond->get_clock());
}
int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t * mutex, const timespec *abstime) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
return __pthread_cond_timedwait(cond_value_ptr, mutex, abstime,
COND_GET_CLOCK(atomic_load_explicit(cond_value_ptr, memory_order_relaxed)));
int pthread_cond_timedwait(pthread_cond_t *cond_interface, pthread_mutex_t * mutex,
const timespec *abstime) {
pthread_cond_internal_t* cond = __get_internal_cond(cond_interface);
return __pthread_cond_timedwait(cond, mutex, abstime, cond->get_clock());
}
#if !defined(__LP64__)
// TODO: this exists only for backward binary compatibility on 32 bit platforms.
extern "C" int pthread_cond_timedwait_monotonic(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
return __pthread_cond_timedwait(cond_value_ptr, mutex, abstime, CLOCK_MONOTONIC);
extern "C" int pthread_cond_timedwait_monotonic(pthread_cond_t* cond_interface,
pthread_mutex_t* mutex,
const timespec* abs_timeout) {
return __pthread_cond_timedwait(__get_internal_cond(cond_interface), mutex, abs_timeout,
CLOCK_MONOTONIC);
}
extern "C" int pthread_cond_timedwait_monotonic_np(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
return __pthread_cond_timedwait(cond_value_ptr, mutex, abstime, CLOCK_MONOTONIC);
extern "C" int pthread_cond_timedwait_monotonic_np(pthread_cond_t* cond_interface,
pthread_mutex_t* mutex,
const timespec* abs_timeout) {
return pthread_cond_timedwait_monotonic(cond_interface, mutex, abs_timeout);
}
extern "C" int pthread_cond_timedwait_relative_np(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* reltime) {
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
return __pthread_cond_timedwait_relative(cond_value_ptr, mutex, reltime);
extern "C" int pthread_cond_timedwait_relative_np(pthread_cond_t* cond_interface,
pthread_mutex_t* mutex,
const timespec* rel_timeout) {
return __pthread_cond_timedwait_relative(__get_internal_cond(cond_interface), mutex, rel_timeout);
}
extern "C" int pthread_cond_timeout_np(pthread_cond_t* cond, pthread_mutex_t* mutex, unsigned ms) {
extern "C" int pthread_cond_timeout_np(pthread_cond_t* cond_interface,
pthread_mutex_t* mutex, unsigned ms) {
timespec ts;
timespec_from_ms(ts, ms);
atomic_uint* cond_value_ptr = COND_TO_ATOMIC_POINTER(cond);
return __pthread_cond_timedwait_relative(cond_value_ptr, mutex, &ts);
return pthread_cond_timedwait_relative_np(cond_interface, mutex, &ts);
}
#endif // !defined(__LP64__)

View File

@@ -56,7 +56,8 @@ void __init_tls(pthread_internal_t* thread) {
if (thread->mmap_size == 0) {
// If the TLS area was not allocated by mmap(), it may not have been cleared to zero.
// So assume the worst and zero the TLS area.
memset(&thread->tls[0], 0, BIONIC_TLS_SLOTS * sizeof(void*));
memset(thread->tls, 0, sizeof(thread->tls));
memset(thread->key_data, 0, sizeof(thread->key_data));
}
// Slot 0 must point to itself. The x86 Linux kernel reads the TLS from %fs:0.
@@ -155,7 +156,7 @@ static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp,
}
// Mapped space(or user allocated stack) is used for:
// thread_internal_t (including tls array)
// thread_internal_t
// thread stack (including guard page)
stack_top -= sizeof(pthread_internal_t);
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);

View File

@@ -41,8 +41,10 @@
/* Did the thread exit without freeing pthread_internal_t? */
#define PTHREAD_ATTR_FLAG_ZOMBIE 0x00000004
/* Is this the main thread? */
#define PTHREAD_ATTR_FLAG_MAIN_THREAD 0x80000000
struct pthread_key_data_t {
uintptr_t seq; // Use uintptr_t just for alignment, as we use pointer below.
void* data;
};
struct pthread_internal_t {
struct pthread_internal_t* next;
@@ -86,6 +88,8 @@ struct pthread_internal_t {
void* tls[BIONIC_TLS_SLOTS];
pthread_key_data_t key_data[BIONIC_PTHREAD_KEY_COUNT];
/*
* The dynamic linker implements dlerror(3), which makes it hard for us to implement this
* per-thread buffer by simply using malloc(3) and free(3).

View File

@@ -28,175 +28,98 @@
#include <errno.h>
#include <pthread.h>
#include <stdatomic.h>
#include "private/bionic_tls.h"
#include "pthread_internal.h"
/* A technical note regarding our thread-local-storage (TLS) implementation:
*
* There can be up to BIONIC_TLS_SLOTS independent TLS keys in a given process,
* The keys below TLS_SLOT_FIRST_USER_SLOT are reserved for Bionic to hold
* special thread-specific variables like errno or a pointer to
* the current thread's descriptor. These entries cannot be accessed through
* pthread_getspecific() / pthread_setspecific() or pthread_key_delete()
*
* The 'tls_map_t' type defined below implements a shared global map of
* currently created/allocated TLS keys and the destructors associated
* with them.
*
* The global TLS map simply contains a bitmap of allocated keys, and
* an array of destructors.
*
* Each thread has a TLS area that is a simple array of BIONIC_TLS_SLOTS void*
* pointers. the TLS area of the main thread is stack-allocated in
* __libc_init_common, while the TLS area of other threads is placed at
* the top of their stack in pthread_create.
*
* When pthread_key_delete() is called it will erase the key's bitmap bit
* and its destructor, and will also clear the key data in the TLS area of
* all created threads. As mandated by Posix, it is the responsibility of
* the caller of pthread_key_delete() to properly reclaim the objects that
* were pointed to by these data fields (either before or after the call).
*/
#define TLSMAP_BITS 32
#define TLSMAP_WORDS ((BIONIC_TLS_SLOTS+TLSMAP_BITS-1)/TLSMAP_BITS)
#define TLSMAP_WORD(m,k) (m).map[(k)/TLSMAP_BITS]
#define TLSMAP_MASK(k) (1U << ((k)&(TLSMAP_BITS-1)))
static inline bool IsValidUserKey(pthread_key_t key) {
return (key >= TLS_SLOT_FIRST_USER_SLOT && key < BIONIC_TLS_SLOTS);
}
typedef void (*key_destructor_t)(void*);
struct tls_map_t {
bool is_initialized;
#define SEQ_KEY_IN_USE_BIT 0
/* bitmap of allocated keys */
uint32_t map[TLSMAP_WORDS];
#define SEQ_INCREMENT_STEP (1 << SEQ_KEY_IN_USE_BIT)
key_destructor_t key_destructors[BIONIC_TLS_SLOTS];
// pthread_key_internal_t records the use of each pthread key slot:
// seq records the state of the slot.
// bit 0 is 1 when the key is in use, 0 when it is unused. Each time we create or delete the
// pthread key in the slot, we increse the seq by 1 (which inverts bit 0). The reason to use
// a sequence number instead of a boolean value here is that when the key slot is deleted and
// reused for a new key, pthread_getspecific will not return stale data.
// key_destructor records the destructor called at thread exit.
struct pthread_key_internal_t {
atomic_uintptr_t seq;
atomic_uintptr_t key_destructor;
};
class ScopedTlsMapAccess {
public:
ScopedTlsMapAccess() {
Lock();
static pthread_key_internal_t key_map[BIONIC_PTHREAD_KEY_COUNT];
// If this is the first time the TLS map has been accessed,
// mark the slots belonging to well-known keys as being in use.
// This isn't currently necessary because the well-known keys
// can only be accessed directly by bionic itself, do not have
// destructors, and all the functions that touch the TLS map
// start after the maximum well-known slot.
if (!s_tls_map_.is_initialized) {
for (pthread_key_t key = 0; key < TLS_SLOT_FIRST_USER_SLOT; ++key) {
SetInUse(key, NULL);
}
s_tls_map_.is_initialized = true;
}
}
static inline bool SeqOfKeyInUse(uintptr_t seq) {
return seq & (1 << SEQ_KEY_IN_USE_BIT);
}
~ScopedTlsMapAccess() {
Unlock();
}
int CreateKey(pthread_key_t* result, void (*key_destructor)(void*)) {
// Take the first unallocated key.
for (int key = 0; key < BIONIC_TLS_SLOTS; ++key) {
if (!IsInUse(key)) {
SetInUse(key, key_destructor);
*result = key;
return 0;
}
}
// We hit PTHREAD_KEYS_MAX. POSIX says EAGAIN for this case.
return EAGAIN;
}
void DeleteKey(pthread_key_t key) {
TLSMAP_WORD(s_tls_map_, key) &= ~TLSMAP_MASK(key);
s_tls_map_.key_destructors[key] = NULL;
}
bool IsInUse(pthread_key_t key) {
return (TLSMAP_WORD(s_tls_map_, key) & TLSMAP_MASK(key)) != 0;
}
void SetInUse(pthread_key_t key, void (*key_destructor)(void*)) {
TLSMAP_WORD(s_tls_map_, key) |= TLSMAP_MASK(key);
s_tls_map_.key_destructors[key] = key_destructor;
}
// Called from pthread_exit() to remove all TLS key data
// from this thread's TLS area. This must call the destructor of all keys
// that have a non-NULL data value and a non-NULL destructor.
void CleanAll() {
void** tls = __get_tls();
// Because destructors can do funky things like deleting/creating other
// keys, we need to implement this in a loop.
for (int rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; --rounds) {
size_t called_destructor_count = 0;
for (int key = 0; key < BIONIC_TLS_SLOTS; ++key) {
if (IsInUse(key)) {
void* data = tls[key];
void (*key_destructor)(void*) = s_tls_map_.key_destructors[key];
if (data != NULL && key_destructor != NULL) {
// we need to clear the key data now, this will prevent the
// destructor (or a later one) from seeing the old value if
// it calls pthread_getspecific() for some odd reason
// we do not do this if 'key_destructor == NULL' just in case another
// destructor function might be responsible for manually
// releasing the corresponding data.
tls[key] = NULL;
// because the destructor is free to call pthread_key_create
// and/or pthread_key_delete, we need to temporarily unlock
// the TLS map
Unlock();
(*key_destructor)(data);
Lock();
++called_destructor_count;
}
}
}
// If we didn't call any destructors, there is no need to check the TLS data again.
if (called_destructor_count == 0) {
break;
}
}
}
private:
static tls_map_t s_tls_map_;
static pthread_mutex_t s_tls_map_lock_;
void Lock() {
pthread_mutex_lock(&s_tls_map_lock_);
}
void Unlock() {
pthread_mutex_unlock(&s_tls_map_lock_);
}
};
__LIBC_HIDDEN__ tls_map_t ScopedTlsMapAccess::s_tls_map_;
__LIBC_HIDDEN__ pthread_mutex_t ScopedTlsMapAccess::s_tls_map_lock_;
static inline bool KeyInValidRange(pthread_key_t key) {
return key >= 0 && key < BIONIC_PTHREAD_KEY_COUNT;
}
// Called from pthread_exit() to remove all pthread keys. This must call the destructor of
// all keys that have a non-NULL data value and a non-NULL destructor.
__LIBC_HIDDEN__ void pthread_key_clean_all() {
ScopedTlsMapAccess tls_map;
tls_map.CleanAll();
// Because destructors can do funky things like deleting/creating other keys,
// we need to implement this in a loop.
pthread_key_data_t* key_data = __get_thread()->key_data;
for (size_t rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; --rounds) {
size_t called_destructor_count = 0;
for (size_t i = 0; i < BIONIC_PTHREAD_KEY_COUNT; ++i) {
uintptr_t seq = atomic_load_explicit(&key_map[i].seq, memory_order_relaxed);
if (SeqOfKeyInUse(seq) && seq == key_data[i].seq && key_data[i].data != NULL) {
// Other threads may be calling pthread_key_delete/pthread_key_create while current thread
// is exiting. So we need to ensure we read the right key_destructor.
// We can rely on a user-established happens-before relationship between the creation and
// use of pthread key to ensure that we're not getting an earlier key_destructor.
// To avoid using the key_destructor of the newly created key in the same slot, we need to
// recheck the sequence number after reading key_destructor. As a result, we either see the
// right key_destructor, or the sequence number must have changed when we reread it below.
key_destructor_t key_destructor = reinterpret_cast<key_destructor_t>(
atomic_load_explicit(&key_map[i].key_destructor, memory_order_relaxed));
if (key_destructor == NULL) {
continue;
}
atomic_thread_fence(memory_order_acquire);
if (atomic_load_explicit(&key_map[i].seq, memory_order_relaxed) != seq) {
continue;
}
// We need to clear the key data now, this will prevent the destructor (or a later one)
// from seeing the old value if it calls pthread_getspecific().
// We don't do this if 'key_destructor == NULL' just in case another destructor
// function is responsible for manually releasing the corresponding data.
void* data = key_data[i].data;
key_data[i].data = NULL;
(*key_destructor)(data);
++called_destructor_count;
}
}
// If we didn't call any destructors, there is no need to check the pthread keys again.
if (called_destructor_count == 0) {
break;
}
}
}
int pthread_key_create(pthread_key_t* key, void (*key_destructor)(void*)) {
ScopedTlsMapAccess tls_map;
return tls_map.CreateKey(key, key_destructor);
for (size_t i = 0; i < BIONIC_PTHREAD_KEY_COUNT; ++i) {
uintptr_t seq = atomic_load_explicit(&key_map[i].seq, memory_order_relaxed);
while (!SeqOfKeyInUse(seq)) {
if (atomic_compare_exchange_weak(&key_map[i].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
atomic_store(&key_map[i].key_destructor, reinterpret_cast<uintptr_t>(key_destructor));
*key = i;
return 0;
}
}
}
return EAGAIN;
}
// Deletes a pthread_key_t. note that the standard mandates that this does
@@ -204,42 +127,44 @@ int pthread_key_create(pthread_key_t* key, void (*key_destructor)(void*)) {
// responsibility of the caller to properly dispose of the corresponding data
// and resources, using any means it finds suitable.
int pthread_key_delete(pthread_key_t key) {
ScopedTlsMapAccess tls_map;
if (!IsValidUserKey(key) || !tls_map.IsInUse(key)) {
if (!KeyInValidRange(key)) {
return EINVAL;
}
// Clear value in all threads.
pthread_mutex_lock(&g_thread_list_lock);
for (pthread_internal_t* t = g_thread_list; t != NULL; t = t->next) {
t->tls[key] = NULL;
// Increase seq to invalidate values in all threads.
uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
if (SeqOfKeyInUse(seq)) {
if (atomic_compare_exchange_strong(&key_map[key].seq, &seq, seq + SEQ_INCREMENT_STEP)) {
return 0;
}
}
tls_map.DeleteKey(key);
pthread_mutex_unlock(&g_thread_list_lock);
return 0;
return EINVAL;
}
void* pthread_getspecific(pthread_key_t key) {
if (!IsValidUserKey(key)) {
if (!KeyInValidRange(key)) {
return NULL;
}
// For performance reasons, we do not lock/unlock the global TLS map
// to check that the key is properly allocated. If the key was not
// allocated, the value read from the TLS should always be NULL
// due to pthread_key_delete() clearing the values for all threads.
return __get_tls()[key];
uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
pthread_key_data_t* data = &(__get_thread()->key_data[key]);
// It is user's responsibility to synchornize between the creation and use of pthread keys,
// so we use memory_order_relaxed when checking the sequence number.
if (__predict_true(SeqOfKeyInUse(seq) && data->seq == seq)) {
return data->data;
}
data->data = NULL;
return NULL;
}
int pthread_setspecific(pthread_key_t key, const void* ptr) {
ScopedTlsMapAccess tls_map;
if (!IsValidUserKey(key) || !tls_map.IsInUse(key)) {
if (!KeyInValidRange(key)) {
return EINVAL;
}
__get_tls()[key] = const_cast<void*>(ptr);
return 0;
uintptr_t seq = atomic_load_explicit(&key_map[key].seq, memory_order_relaxed);
if (SeqOfKeyInUse(seq)) {
pthread_key_data_t* data = &(__get_thread()->key_data[key]);
data->seq = seq;
data->data = const_cast<void*>(ptr);
return 0;
}
return EINVAL;
}

View File

@@ -27,6 +27,7 @@
*/
#include <errno.h>
#include <stdatomic.h>
#include "pthread_internal.h"
#include "private/bionic_futex.h"
@@ -52,11 +53,6 @@
* - This implementation will return EDEADLK in "write after write" and "read after
* write" cases and will deadlock in write after read case.
*
* TODO: VERY CAREFULLY convert this to use C++11 atomics when possible. All volatile
* members of pthread_rwlock_t should be converted to atomics<> and __sync_bool_compare_and_swap
* should be changed to compare_exchange_strong accompanied by the proper ordering
* constraints (comments have been added with the intending ordering across the code).
*
* TODO: As it stands now, pending_readers and pending_writers could be merged into a
* a single waiters variable. Keeping them separate adds a bit of clarity and keeps
* the door open for a writer-biased implementation.
@@ -66,18 +62,6 @@
#define RWLOCKATTR_DEFAULT 0
#define RWLOCKATTR_SHARED_MASK 0x0010
static inline bool rwlock_is_shared(const pthread_rwlock_t* rwlock) {
return rwlock->attr == PTHREAD_PROCESS_SHARED;
}
static bool timespec_from_absolute(timespec* rel_timeout, const timespec* abs_timeout) {
if (abs_timeout != NULL) {
if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout, CLOCK_REALTIME)) {
return false;
}
}
return true;
}
int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) {
*attr = PTHREAD_PROCESS_PRIVATE;
@@ -105,8 +89,36 @@ int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared
return 0;
}
int pthread_rwlock_init(pthread_rwlock_t* rwlock, const pthread_rwlockattr_t* attr) {
if (attr != NULL) {
struct pthread_rwlock_internal_t {
atomic_int state; // 0=unlock, -1=writer lock, +n=reader lock
atomic_int writer_thread_id;
atomic_uint pending_readers;
atomic_uint pending_writers;
int32_t attr;
bool process_shared() const {
return attr == PTHREAD_PROCESS_SHARED;
}
#if defined(__LP64__)
char __reserved[36];
#else
char __reserved[20];
#endif
};
static inline pthread_rwlock_internal_t* __get_internal_rwlock(pthread_rwlock_t* rwlock_interface) {
static_assert(sizeof(pthread_rwlock_t) == sizeof(pthread_rwlock_internal_t),
"pthread_rwlock_t should actually be pthread_rwlock_internal_t in implementation.");
return reinterpret_cast<pthread_rwlock_internal_t*>(rwlock_interface);
}
int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
if (__predict_true(attr == NULL)) {
rwlock->attr = 0;
} else {
switch (*attr) {
case PTHREAD_PROCESS_SHARED:
case PTHREAD_PROCESS_PRIVATE:
@@ -117,165 +129,214 @@ int pthread_rwlock_init(pthread_rwlock_t* rwlock, const pthread_rwlockattr_t* at
}
}
rwlock->state = 0;
rwlock->pending_readers = 0;
rwlock->pending_writers = 0;
rwlock->writer_thread_id = 0;
atomic_init(&rwlock->state, 0);
atomic_init(&rwlock->writer_thread_id, 0);
atomic_init(&rwlock->pending_readers, 0);
atomic_init(&rwlock->pending_writers, 0);
return 0;
}
int pthread_rwlock_destroy(pthread_rwlock_t* rwlock) {
if (rwlock->state != 0) {
int pthread_rwlock_destroy(pthread_rwlock_t* rwlock_interface) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
if (atomic_load_explicit(&rwlock->state, memory_order_relaxed) != 0) {
return EBUSY;
}
return 0;
}
static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
if (__predict_false(__get_thread()->tid == rwlock->writer_thread_id)) {
static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock,
const timespec* abs_timeout_or_null) {
if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
memory_order_relaxed))) {
return EDEADLK;
}
timespec ts;
timespec* rel_timeout = (abs_timeout == NULL) ? NULL : &ts;
bool done = false;
do {
// This is actually a race read as there's nothing that guarantees the atomicity of integer
// reads / writes. However, in practice this "never" happens so until we switch to C++11 this
// should work fine. The same applies in the other places this idiom is used.
int32_t cur_state = rwlock->state; // C++11 relaxed atomic read
if (__predict_true(cur_state >= 0)) {
// Add as an extra reader.
done = __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state + 1); // C++11 memory_order_aquire
} else {
if (!timespec_from_absolute(rel_timeout, abs_timeout)) {
return ETIMEDOUT;
while (true) {
int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
if (__predict_true(old_state >= 0)) {
if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, old_state + 1,
memory_order_acquire, memory_order_relaxed)) {
return 0;
}
// Owner holds it in write mode, hang up.
// To avoid losing wake ups the pending_readers update and the state read should be
// sequentially consistent. (currently enforced by __sync_fetch_and_add which creates a full barrier)
__sync_fetch_and_add(&rwlock->pending_readers, 1); // C++11 memory_order_relaxed (if the futex_wait ensures the ordering)
int ret = __futex_wait_ex(&rwlock->state, rwlock_is_shared(rwlock), cur_state, rel_timeout);
__sync_fetch_and_sub(&rwlock->pending_readers, 1); // C++11 memory_order_relaxed
} else {
timespec ts;
timespec* rel_timeout = NULL;
if (abs_timeout_or_null != NULL) {
rel_timeout = &ts;
if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
return ETIMEDOUT;
}
}
// To avoid losing wake ups, the pending_readers increment should be observed before
// futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
// here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
// operations in futex_wait.
atomic_fetch_add_explicit(&rwlock->pending_readers, 1, memory_order_relaxed);
atomic_thread_fence(memory_order_seq_cst);
int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
rel_timeout);
atomic_fetch_sub_explicit(&rwlock->pending_readers, 1, memory_order_relaxed);
if (ret == -ETIMEDOUT) {
return ETIMEDOUT;
}
}
} while (!done);
return 0;
}
}
static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
int tid = __get_thread()->tid;
if (__predict_false(tid == rwlock->writer_thread_id)) {
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
const timespec* abs_timeout_or_null) {
if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
memory_order_relaxed))) {
return EDEADLK;
}
timespec ts;
timespec* rel_timeout = (abs_timeout == NULL) ? NULL : &ts;
bool done = false;
do {
int32_t cur_state = rwlock->state;
if (__predict_true(cur_state == 0)) {
// Change state from 0 to -1.
done = __sync_bool_compare_and_swap(&rwlock->state, 0 /* cur state */, -1 /* new state */); // C++11 memory_order_aquire
} else {
if (!timespec_from_absolute(rel_timeout, abs_timeout)) {
return ETIMEDOUT;
while (true) {
int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
if (__predict_true(old_state == 0)) {
if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
memory_order_acquire, memory_order_relaxed)) {
// writer_thread_id is protected by rwlock and can only be modified in rwlock write
// owner thread. Other threads may read it for EDEADLK error checking, atomic operation
// is safe enough for it.
atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
return 0;
}
// Failed to acquire, hang up.
// To avoid losing wake ups the pending_writers update and the state read should be
// sequentially consistent. (currently enforced by __sync_fetch_and_add which creates a full barrier)
__sync_fetch_and_add(&rwlock->pending_writers, 1); // C++11 memory_order_relaxed (if the futex_wait ensures the ordering)
int ret = __futex_wait_ex(&rwlock->state, rwlock_is_shared(rwlock), cur_state, rel_timeout);
__sync_fetch_and_sub(&rwlock->pending_writers, 1); // C++11 memory_order_relaxed
} else {
timespec ts;
timespec* rel_timeout = NULL;
if (abs_timeout_or_null != NULL) {
rel_timeout = &ts;
if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
return ETIMEDOUT;
}
}
// To avoid losing wake ups, the pending_writers increment should be observed before
// futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
// here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
// operations in futex_wait.
atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);
atomic_thread_fence(memory_order_seq_cst);
int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
rel_timeout);
atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);
if (ret == -ETIMEDOUT) {
return ETIMEDOUT;
}
}
} while (!done);
rwlock->writer_thread_id = tid;
return 0;
}
}
int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) {
int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock_interface) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
return __pthread_rwlock_timedrdlock(rwlock, NULL);
}
int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
}
int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock) {
int32_t cur_state = rwlock->state;
if ((cur_state >= 0) &&
__sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state + 1)) { // C++11 memory_order_acquire
return 0;
int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock_interface) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
while (old_state >= 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
old_state + 1, memory_order_acquire, memory_order_relaxed)) {
}
return EBUSY;
return (old_state >= 0) ? 0 : EBUSY;
}
int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) {
int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
return __pthread_rwlock_timedwrlock(rwlock, NULL);
}
int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
}
int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock) {
int tid = __get_thread()->tid;
int32_t cur_state = rwlock->state;
if ((cur_state == 0) &&
__sync_bool_compare_and_swap(&rwlock->state, 0 /* cur state */, -1 /* new state */)) { // C++11 memory_order_acquire
rwlock->writer_thread_id = tid;
int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
while (old_state == 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
memory_order_acquire, memory_order_relaxed)) {
}
if (old_state == 0) {
atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
return 0;
}
return EBUSY;
}
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock) {
int tid = __get_thread()->tid;
bool done = false;
do {
int32_t cur_state = rwlock->state;
if (cur_state == 0) {
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) {
pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
if (__predict_false(old_state == 0)) {
return EPERM;
} else if (old_state == -1) {
if (atomic_load_explicit(&rwlock->writer_thread_id, memory_order_relaxed) != __get_thread()->tid) {
return EPERM;
}
if (cur_state == -1) {
if (rwlock->writer_thread_id != tid) {
return EPERM;
}
// We're no longer the owner.
rwlock->writer_thread_id = 0;
// Change state from -1 to 0.
// We use __sync_bool_compare_and_swap to achieve sequential consistency of the state store and
// the following pendingX loads. A simple store with memory_order_release semantics
// is not enough to guarantee that the pendingX loads are not reordered before the
// store (which may lead to a lost wakeup).
__sync_bool_compare_and_swap( &rwlock->state, -1 /* cur state*/, 0 /* new state */); // C++11 maybe memory_order_seq_cst?
// We're no longer the owner.
atomic_store_explicit(&rwlock->writer_thread_id, 0, memory_order_relaxed);
// Change state from -1 to 0.
atomic_store_explicit(&rwlock->state, 0, memory_order_release);
// Wake any waiters.
if (__predict_false(rwlock->pending_readers > 0 || rwlock->pending_writers > 0)) {
__futex_wake_ex(&rwlock->state, rwlock_is_shared(rwlock), INT_MAX);
}
done = true;
} else { // cur_state > 0
// Reduce state by 1.
// See the comment above on why we need __sync_bool_compare_and_swap.
done = __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state - 1); // C++11 maybe memory_order_seq_cst?
if (done && (cur_state - 1) == 0) {
// There are no more readers, wake any waiters.
if (__predict_false(rwlock->pending_readers > 0 || rwlock->pending_writers > 0)) {
__futex_wake_ex(&rwlock->state, rwlock_is_shared(rwlock), INT_MAX);
}
}
} else { // old_state > 0
// Reduce state by 1.
while (old_state > 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
old_state - 1, memory_order_release, memory_order_relaxed)) {
}
} while (!done);
if (old_state <= 0) {
return EPERM;
} else if (old_state > 1) {
return 0;
}
// old_state = 1, which means the last reader calling unlock. It has to wake up waiters.
}
// If having waiters, wake up them.
// To avoid losing wake ups, the update of state should be observed before reading
// pending_readers/pending_writers by all threads. Use read locking as an example:
// read locking thread unlocking thread
// pending_readers++; state = 0;
// seq_cst fence seq_cst fence
// read state for futex_wait read pending_readers for futex_wake
//
// So when locking and unlocking threads are running in parallel, we will not get
// in a situation that the locking thread reads state as negative and needs to wait,
// while the unlocking thread reads pending_readers as zero and doesn't need to wake up waiters.
atomic_thread_fence(memory_order_seq_cst);
if (__predict_false(atomic_load_explicit(&rwlock->pending_readers, memory_order_relaxed) > 0 ||
atomic_load_explicit(&rwlock->pending_writers, memory_order_relaxed) > 0)) {
__futex_wake_ex(&rwlock->state, rwlock->process_shared(), INT_MAX);
}
return 0;
}

View File

@@ -446,31 +446,6 @@ protoent* getprotobynumber(int /*proto*/) {
return NULL;
}
static void unimplemented_stub(const char* function) {
const char* fmt = "%s(3) is not implemented on Android\n";
__libc_format_log(ANDROID_LOG_WARN, "libc", fmt, function);
fprintf(stderr, fmt, function);
}
#define UNIMPLEMENTED unimplemented_stub(__PRETTY_FUNCTION__)
void endpwent() {
UNIMPLEMENTED;
}
char* getusershell() {
UNIMPLEMENTED;
return NULL;
}
void setusershell() {
UNIMPLEMENTED;
}
void endusershell() {
UNIMPLEMENTED;
}
// Portable code should use sysconf(_SC_PAGE_SIZE) directly instead.
int getpagesize() {
// We dont use sysconf(3) here because that drags in stdio, which makes static binaries fat.

View File

@@ -33,6 +33,7 @@
#include <pthread.h>
#include <stdio.h> // For FOPEN_MAX.
#include <sys/auxv.h>
#include <sys/resource.h>
#include <sys/sysconf.h>
#include <sys/sysinfo.h>
#include <time.h>
@@ -50,6 +51,12 @@ static bool __sysconf_has_clock(clockid_t clock_id) {
return clock_getres(clock_id, NULL) == 0;
}
static long __sysconf_rlimit(int resource) {
rlimit rl;
getrlimit(resource, &rl);
return rl.rlim_cur;
}
long sysconf(int name) {
switch (name) {
case _SC_ARG_MAX: return ARG_MAX;
@@ -57,13 +64,13 @@ long sysconf(int name) {
case _SC_BC_DIM_MAX: return _POSIX2_BC_DIM_MAX; // Minimum requirement.
case _SC_BC_SCALE_MAX: return _POSIX2_BC_SCALE_MAX; // Minimum requirement.
case _SC_BC_STRING_MAX: return _POSIX2_BC_STRING_MAX; // Minimum requirement.
case _SC_CHILD_MAX: return CHILD_MAX;
case _SC_CHILD_MAX: return __sysconf_rlimit(RLIMIT_NPROC);
case _SC_CLK_TCK: return static_cast<long>(getauxval(AT_CLKTCK));
case _SC_COLL_WEIGHTS_MAX: return _POSIX2_COLL_WEIGHTS_MAX; // Minimum requirement.
case _SC_EXPR_NEST_MAX: return _POSIX2_EXPR_NEST_MAX; // Minimum requirement.
case _SC_LINE_MAX: return _POSIX2_LINE_MAX; // Minimum requirement.
case _SC_NGROUPS_MAX: return NGROUPS_MAX;
case _SC_OPEN_MAX: return OPEN_MAX;
case _SC_OPEN_MAX: return __sysconf_rlimit(RLIMIT_NOFILE);
case _SC_PASS_MAX: return PASS_MAX;
case _SC_2_C_BIND: return _POSIX2_C_BIND;
case _SC_2_C_DEV: return _POSIX2_C_DEV;

View File

@@ -51,7 +51,6 @@
#include <sys/_system_properties.h>
#include <sys/system_properties.h>
#include "private/bionic_atomic_inline.h"
#include "private/bionic_futex.h"
#include "private/bionic_macros.h"
@@ -80,22 +79,26 @@ struct prop_bt {
uint8_t namelen;
uint8_t reserved[3];
// TODO: The following fields should be declared as atomic_uint32_t.
// They should be assigned to with release semantics, instead of using
// explicit fences. Unfortunately, the read accesses are generally
// followed by more dependent read accesses, and the dependence
// is assumed to enforce memory ordering. Which it does on supported
// hardware. This technically should use memory_order_consume, if
// that worked as intended.
// The property trie is updated only by the init process (single threaded) which provides
// property service. And it can be read by multiple threads at the same time.
// As the property trie is not protected by locks, we use atomic_uint_least32_t types for the
// left, right, children "pointers" in the trie node. To make sure readers who see the
// change of "pointers" can also notice the change of prop_bt structure contents pointed by
// the "pointers", we always use release-consume ordering pair when accessing these "pointers".
// prop "points" to prop_info structure if there is a propery associated with the trie node.
// Its situation is similar to the left, right, children "pointers". So we use
// atomic_uint_least32_t and release-consume ordering to protect it as well.
// We should also avoid rereading these fields redundantly, since not
// all processor implementations ensure that multiple loads from the
// same field are carried out in the right order.
volatile uint32_t prop;
atomic_uint_least32_t prop;
volatile uint32_t left;
volatile uint32_t right;
atomic_uint_least32_t left;
atomic_uint_least32_t right;
volatile uint32_t children;
atomic_uint_least32_t children;
char name[0];
@@ -103,8 +106,6 @@ struct prop_bt {
this->namelen = name_length;
memcpy(this->name, name, name_length);
this->name[name_length] = '\0';
ANDROID_MEMBAR_FULL(); // TODO: Instead use a release store
// for subsequent pointer assignment.
}
private:
@@ -143,8 +144,6 @@ struct prop_info {
atomic_init(&this->serial, valuelen << 24);
memcpy(this->value, value, valuelen);
this->value[valuelen] = '\0';
ANDROID_MEMBAR_FULL(); // TODO: Instead use a release store
// for subsequent point assignment.
}
private:
DISALLOW_COPY_AND_ASSIGN(prop_info);
@@ -291,10 +290,10 @@ static int map_prop_area()
return map_result;
}
static void *allocate_obj(const size_t size, uint32_t *const off)
static void *allocate_obj(const size_t size, uint_least32_t *const off)
{
prop_area *pa = __system_property_area__;
const size_t aligned = BIONIC_ALIGN(size, sizeof(uint32_t));
const size_t aligned = BIONIC_ALIGN(size, sizeof(uint_least32_t));
if (pa->bytes_used + aligned > pa_data_size) {
return NULL;
}
@@ -304,12 +303,12 @@ static void *allocate_obj(const size_t size, uint32_t *const off)
return pa->data + *off;
}
static prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint32_t *const off)
static prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint_least32_t *const off)
{
uint32_t new_offset;
void *const offset = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset);
if (offset) {
prop_bt* bt = new(offset) prop_bt(name, namelen);
uint_least32_t new_offset;
void *const p = allocate_obj(sizeof(prop_bt) + namelen + 1, &new_offset);
if (p != NULL) {
prop_bt* bt = new(p) prop_bt(name, namelen);
*off = new_offset;
return bt;
}
@@ -318,20 +317,20 @@ static prop_bt *new_prop_bt(const char *name, uint8_t namelen, uint32_t *const o
}
static prop_info *new_prop_info(const char *name, uint8_t namelen,
const char *value, uint8_t valuelen, uint32_t *const off)
const char *value, uint8_t valuelen, uint_least32_t *const off)
{
uint32_t off_tmp;
void* const offset = allocate_obj(sizeof(prop_info) + namelen + 1, &off_tmp);
if (offset) {
prop_info* info = new(offset) prop_info(name, namelen, value, valuelen);
*off = off_tmp;
uint_least32_t new_offset;
void* const p = allocate_obj(sizeof(prop_info) + namelen + 1, &new_offset);
if (p != NULL) {
prop_info* info = new(p) prop_info(name, namelen, value, valuelen);
*off = new_offset;
return info;
}
return NULL;
}
static void *to_prop_obj(const uint32_t off)
static void *to_prop_obj(uint_least32_t off)
{
if (off > pa_data_size)
return NULL;
@@ -341,7 +340,17 @@ static void *to_prop_obj(const uint32_t off)
return (__system_property_area__->data + off);
}
static prop_bt *root_node()
static inline prop_bt *to_prop_bt(atomic_uint_least32_t* off_p) {
uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
return reinterpret_cast<prop_bt*>(to_prop_obj(off));
}
static inline prop_info *to_prop_info(atomic_uint_least32_t* off_p) {
uint_least32_t off = atomic_load_explicit(off_p, memory_order_consume);
return reinterpret_cast<prop_info*>(to_prop_obj(off));
}
static inline prop_bt *root_node()
{
return reinterpret_cast<prop_bt*>(to_prop_obj(0));
}
@@ -373,36 +382,34 @@ static prop_bt *find_prop_bt(prop_bt *const bt, const char *name,
}
if (ret < 0) {
if (current->left) {
current = reinterpret_cast<prop_bt*>(to_prop_obj(current->left));
uint_least32_t left_offset = atomic_load_explicit(&current->left, memory_order_relaxed);
if (left_offset != 0) {
current = to_prop_bt(&current->left);
} else {
if (!alloc_if_needed) {
return NULL;
}
// Note that there isn't a race condition here. "clients" never
// reach this code-path since It's only the (single threaded) server
// that allocates new nodes. Though "bt->left" is volatile, it can't
// have changed since the last value was last read.
uint32_t new_offset = 0;
uint_least32_t new_offset;
prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
if (new_bt) {
current->left = new_offset;
atomic_store_explicit(&current->left, new_offset, memory_order_release);
}
return new_bt;
}
} else {
if (current->right) {
current = reinterpret_cast<prop_bt*>(to_prop_obj(current->right));
uint_least32_t right_offset = atomic_load_explicit(&current->right, memory_order_relaxed);
if (right_offset != 0) {
current = to_prop_bt(&current->right);
} else {
if (!alloc_if_needed) {
return NULL;
}
uint32_t new_offset;
uint_least32_t new_offset;
prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
if (new_bt) {
current->right = new_offset;
atomic_store_explicit(&current->right, new_offset, memory_order_release);
}
return new_bt;
}
@@ -429,13 +436,14 @@ static const prop_info *find_property(prop_bt *const trie, const char *name,
}
prop_bt* root = NULL;
if (current->children) {
root = reinterpret_cast<prop_bt*>(to_prop_obj(current->children));
uint_least32_t children_offset = atomic_load_explicit(&current->children, memory_order_relaxed);
if (children_offset != 0) {
root = to_prop_bt(&current->children);
} else if (alloc_if_needed) {
uint32_t new_bt_offset;
root = new_prop_bt(remaining_name, substr_size, &new_bt_offset);
uint_least32_t new_offset;
root = new_prop_bt(remaining_name, substr_size, &new_offset);
if (root) {
current->children = new_bt_offset;
atomic_store_explicit(&current->children, new_offset, memory_order_release);
}
}
@@ -454,13 +462,14 @@ static const prop_info *find_property(prop_bt *const trie, const char *name,
remaining_name = sep + 1;
}
if (current->prop) {
return reinterpret_cast<prop_info*>(to_prop_obj(current->prop));
uint_least32_t prop_offset = atomic_load_explicit(&current->prop, memory_order_relaxed);
if (prop_offset != 0) {
return to_prop_info(&current->prop);
} else if (alloc_if_needed) {
uint32_t new_info_offset;
prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_info_offset);
uint_least32_t new_offset;
prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset);
if (new_info) {
current->prop = new_info_offset;
atomic_store_explicit(&current->prop, new_offset, memory_order_release);
}
return new_info;
@@ -534,31 +543,34 @@ static void find_nth_fn(const prop_info *pi, void *ptr)
cookie->count++;
}
static int foreach_property(const uint32_t off,
static int foreach_property(prop_bt *const trie,
void (*propfn)(const prop_info *pi, void *cookie), void *cookie)
{
prop_bt *trie = reinterpret_cast<prop_bt*>(to_prop_obj(off));
if (!trie)
return -1;
if (trie->left) {
const int err = foreach_property(trie->left, propfn, cookie);
uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed);
if (left_offset != 0) {
const int err = foreach_property(to_prop_bt(&trie->left), propfn, cookie);
if (err < 0)
return -1;
}
if (trie->prop) {
prop_info *info = reinterpret_cast<prop_info*>(to_prop_obj(trie->prop));
uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed);
if (prop_offset != 0) {
prop_info *info = to_prop_info(&trie->prop);
if (!info)
return -1;
propfn(info, cookie);
}
if (trie->children) {
const int err = foreach_property(trie->children, propfn, cookie);
uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed);
if (children_offset != 0) {
const int err = foreach_property(to_prop_bt(&trie->children), propfn, cookie);
if (err < 0)
return -1;
}
if (trie->right) {
const int err = foreach_property(trie->right, propfn, cookie);
uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed);
if (right_offset != 0) {
const int err = foreach_property(to_prop_bt(&trie->right), propfn, cookie);
if (err < 0)
return -1;
}
@@ -766,5 +778,5 @@ int __system_property_foreach(void (*propfn)(const prop_info *pi, void *cookie),
return __system_property_foreach_compat(propfn, cookie);
}
return foreach_property(0, propfn, cookie);
return foreach_property(root_node(), propfn, cookie);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (C) 2015 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -25,23 +25,9 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stddef.h>
#include <string.h>
void *memrchr(const void *s, int c, size_t n)
{
if (n > 0) {
const char* p = (const char*) s;
const char* q = p + n;
#include <wchar.h>
while (1) {
q--; if (q < p || q[0] == (char) c) break;
q--; if (q < p || q[0] == (char) c) break;
q--; if (q < p || q[0] == (char) c) break;
q--; if (q < p || q[0] == (char) c) break;
}
if (q >= p)
return (void*)q;
}
return NULL;
wchar_t* wmempcpy(wchar_t* dst, const wchar_t* src, size_t n) {
return wmemcpy(dst, src, n) + n;
}

View File

@@ -402,6 +402,10 @@ res_nsend(res_state statp,
}
if (statp->nscount == 0) {
// We have no nameservers configured, so there's no point trying.
// Tell the cache the query failed, or any retries and anyone else asking the same
// question will block for PENDING_REQUEST_TIMEOUT seconds instead of failing fast.
_resolv_cache_query_failed(statp->netid, buf, buflen);
errno = ESRCH;
return (-1);
}

View File

@@ -39,6 +39,8 @@
#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
#include <sys/_system_properties.h>
#include "private/ThreadLocalBuffer.h"
/* Set to 1 to enable debug traces */
#define DEBUG 0
@@ -50,8 +52,6 @@
# define D(...) do{}while(0)
#endif
static pthread_key_t _res_key;
typedef struct {
int _h_errno;
// TODO: Have one __res_state per network so we don't have to repopulate frequently.
@@ -105,12 +105,7 @@ _res_thread_free( void* _rt )
free(rt);
}
__attribute__((constructor))
static void
_res_init_key( void )
{
pthread_key_create( &_res_key, _res_thread_free );
}
BIONIC_PTHREAD_KEY_WITH_CONSTRUCTOR(_res_key, _res_thread_free);
static _res_thread*
_res_thread_get(void)

View File

@@ -94,6 +94,13 @@ typedef struct {
#define DT_PREINIT_ARRAY 32
#define DT_PREINIT_ARRAYSZ 33
/* Android compressed rel/rela sections */
#define DT_ANDROID_REL (DT_LOOS + 2)
#define DT_ANDROID_RELSZ (DT_LOOS + 3)
#define DT_ANDROID_RELA (DT_LOOS + 4)
#define DT_ANDROID_RELASZ (DT_LOOS + 5)
/* gnu hash entry */
#define DT_GNU_HASH 0x6ffffef5
@@ -106,6 +113,9 @@ typedef struct {
#define STB_LOPROC 13
#define STB_HIPROC 15
#define SHT_LOOS 0x60000000
#define SHT_HIOS 0x6fffffff
#define STT_GNU_IFUNC 10
#define STT_LOOS 10
#define STT_HIOS 12
@@ -115,4 +125,8 @@ typedef struct {
/* The kernel uses NT_PRFPREG but glibc also offers NT_FPREGSET */
#define NT_FPREGSET NT_PRFPREG
#define ELF_NOTE_GNU "GNU"
#define NT_GNU_BUILD_ID 3
#endif /* _ELF_H */

View File

@@ -59,22 +59,29 @@ __BEGIN_DECLS
extern int creat(const char*, mode_t);
extern int creat64(const char*, mode_t);
extern int fallocate64(int, int, off64_t, off64_t);
extern int fallocate(int, int, off_t, off_t);
extern int fcntl(int, int, ...);
extern int openat(int, const char*, int, ...);
extern int openat64(int, const char*, int, ...);
extern int open(const char*, int, ...);
extern int open64(const char*, int, ...);
extern int posix_fadvise64(int, off64_t, off64_t, int);
extern int posix_fadvise(int, off_t, off_t, int);
extern int posix_fallocate64(int, off64_t, off64_t);
extern int posix_fallocate(int, off_t, off_t);
extern ssize_t splice(int, off64_t*, int, off64_t*, size_t, unsigned int);
extern ssize_t tee(int, int, size_t, unsigned int);
extern int unlinkat(int, const char*, int);
extern ssize_t vmsplice(int, const struct iovec*, size_t, unsigned int);
#if defined(__USE_FILE_OFFSET64)
extern int fallocate(int, int, off_t, off_t) __RENAME(fallocate64);
extern int posix_fadvise(int, off_t, off_t, int) __RENAME(posix_fadvise64);
extern int posix_fallocate(int, off_t, off_t) __RENAME(posix_fallocate);
#else
extern int fallocate(int, int, off_t, off_t);
extern int posix_fadvise(int, off_t, off_t, int);
extern int posix_fallocate(int, off_t, off_t);
#endif
extern int fallocate64(int, int, off64_t, off64_t);
extern int posix_fadvise64(int, off64_t, off64_t, int);
extern int posix_fallocate64(int, off64_t, off64_t);
extern int __open_2(const char*, int);
extern int __open_real(const char*, int, ...) __RENAME(open);
extern int __openat_2(int, const char*, int);

View File

@@ -54,9 +54,9 @@ __BEGIN_DECLS
struct group *getgrgid(gid_t);
struct group *getgrnam(const char *);
#if __POSIX_VISIBLE >= 200112 || __XPG_VISIBLE
struct group *getgrent(void);
void setgrent(void);
void endgrent(void);
struct group *getgrent(void) __attribute__((deprecated("getgrent is meaningless on Android")));
void setgrent(void) __attribute__((deprecated("setgrent is meaningless on Android")));
void endgrent(void) __attribute__((deprecated("endgrent is meaningless on Android")));
int getgrgid_r(gid_t, struct group *, char *,
size_t, struct group **);
int getgrnam_r(const char *, struct group *, char *,

View File

@@ -39,7 +39,6 @@
#define _PATH_CONSOLE "/dev/console"
#define _PATH_DEVNULL "/dev/null"
#define _PATH_KLOG "/proc/kmsg"
#define _PATH_MEM "/dev/mem"
#define _PATH_MOUNTED "/proc/mounts"
#define _PATH_TTY "/dev/tty"

View File

@@ -73,13 +73,14 @@ enum {
};
typedef struct {
unsigned int value;
#ifdef __LP64__
char __reserved[44];
#if defined(__LP64__)
char __private[48];
#else
char __private[4];
#endif
} pthread_cond_t;
} pthread_cond_t __attribute__((aligned(8)));
#define PTHREAD_COND_INITIALIZER {0 __RESERVED_INITIALIZER}
#define PTHREAD_COND_INITIALIZER { { 0 } }
typedef long pthread_mutexattr_t;
typedef long pthread_condattr_t;
@@ -87,28 +88,14 @@ typedef long pthread_condattr_t;
typedef long pthread_rwlockattr_t;
typedef struct {
#if !defined(__LP64__)
pthread_mutex_t __unused_lock;
pthread_cond_t __unused_cond;
#endif
volatile int32_t state; // 0=unlock, -1=writer lock, +n=reader lock
volatile int32_t writer_thread_id;
volatile int32_t pending_readers;
volatile int32_t pending_writers;
int32_t attr;
#ifdef __LP64__
char __reserved[36];
#if defined(__LP64__)
char __private[56];
#else
char __reserved[12];
char __private[40];
#endif
} pthread_rwlock_t __attribute__((aligned(8)));
} pthread_rwlock_t;
#ifdef __LP64__
#define PTHREAD_RWLOCK_INITIALIZER { 0, 0, 0, 0, 0, { 0 } }
#else
#define PTHREAD_RWLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0, 0, 0, 0, { 0 } }
#endif
#define PTHREAD_RWLOCK_INITIALIZER { { 0 } }
typedef int pthread_key_t;

View File

@@ -119,10 +119,6 @@ struct passwd* getpwuid(uid_t);
int getpwnam_r(const char*, struct passwd*, char*, size_t, struct passwd**);
int getpwuid_r(uid_t, struct passwd*, char*, size_t, struct passwd**);
void endpwent(void);
struct passwd* getpwent(void);
int setpwent(void);
__END_DECLS
#endif

View File

@@ -57,8 +57,6 @@
__BEGIN_DECLS
#define _FSTDIO /* Define for new stdio with functions. */
typedef off_t fpos_t; /* stdio file position type */
/*
@@ -266,27 +264,38 @@ int vdprintf(int, const char * __restrict, __va_list) __printflike(2, 0);
#ifndef __AUDIT__
#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L
char* gets(char*) __warnattr("gets is very unsafe; consider using fgets");
char* gets(char*) __attribute__((deprecated("gets is unsafe, use fgets instead")));
#endif
int sprintf(char* __restrict, const char* __restrict, ...)
__printflike(2, 3) __warnattr("sprintf is often misused; please use snprintf");
char* tmpnam(char*) __warnattr("tmpnam possibly used unsafely; consider using mkstemp");
int vsprintf(char* __restrict, const char* __restrict, __va_list)
__printflike(2, 0) __warnattr("vsprintf is often misused; please use vsnprintf");
char* tmpnam(char*) __attribute__((deprecated("tmpnam is unsafe, use mkstemp or tmpfile instead")));
#if __XPG_VISIBLE
char* tempnam(const char*, const char*)
__warnattr("tempnam possibly used unsafely; consider using mkstemp");
__attribute__((deprecated("tempnam is unsafe, use mkstemp or tmpfile instead")));
#endif
#endif
extern int rename(const char*, const char*);
extern int renameat(int, const char*, int, const char*);
#if defined(__USE_FILE_OFFSET64)
/* Not possible. */
int fgetpos(FILE * __restrict, fpos_t * __restrict)
__attribute__((__error__("not available with _FILE_OFFSET_BITS=64")));
int fsetpos(FILE *, const fpos_t *)
__attribute__((__error__("not available with _FILE_OFFSET_BITS=64")));
int fseeko(FILE *, off_t, int)
__attribute__((__error__("not available with _FILE_OFFSET_BITS=64")));
off_t ftello(FILE *)
__attribute__((__error__("not available with _FILE_OFFSET_BITS=64")));
#else
int fgetpos(FILE * __restrict, fpos_t * __restrict);
int fsetpos(FILE *, const fpos_t *);
int fseeko(FILE *, off_t, int);
off_t ftello(FILE *);
#endif
#if __ISO_C_VISIBLE >= 1999 || __BSD_VISIBLE
int snprintf(char * __restrict, size_t, const char * __restrict, ...)

View File

@@ -58,7 +58,7 @@ extern int unsetenv(const char*);
extern int clearenv(void);
extern char* mkdtemp(char*);
extern char* mktemp(char*) __warnattr("mktemp possibly used unsafely; consider using mkstemp");
extern char* mktemp(char*) __attribute__((deprecated("mktemp is unsafe, use mkstemp or tmpfile instead")));
extern int mkostemp64(char*, int);
extern int mkostemp(char*, int);

View File

@@ -44,6 +44,9 @@ extern void* memchr(const void *, int, size_t) __purefunc;
extern void* memrchr(const void *, int, size_t) __purefunc;
extern int memcmp(const void *, const void *, size_t) __purefunc;
extern void* memcpy(void* __restrict, const void* __restrict, size_t);
#if defined(__USE_GNU)
extern void* mempcpy(void* __restrict, const void* __restrict, size_t);
#endif
extern void* memmove(void *, const void *, size_t);
extern void* memset(void *, int, size_t);
extern void* memmem(const void *, size_t, const void *, size_t) __purefunc;

View File

@@ -335,13 +335,15 @@
#endif
#if __GNUC_PREREQ(4, 3)
#define __errordecl(name, msg) extern void name(void) __attribute__((__error__(msg)))
#define __errorattr(msg) __attribute__((__error__(msg)))
#define __warnattr(msg) __attribute__((__warning__(msg)))
#else
#define __errordecl(name, msg) extern void name(void)
#define __errorattr(msg)
#define __warnattr(msg)
#endif
#define __errordecl(name, msg) extern void name(void) __errorattr(msg)
/*
* Some BSD source needs these macros.
* Originally they embedded the rcs versions of each source file
@@ -378,6 +380,15 @@
# define __USE_BSD 1
#endif
/*
* _FILE_OFFSET_BITS 64 support.
*/
#if !defined(__LP64__) && defined(_FILE_OFFSET_BITS)
#if _FILE_OFFSET_BITS == 64
#define __USE_FILE_OFFSET64 1
#endif
#endif
/*-
* POSIX.1 requires that the macros we test be defined before any standard
* header file is included.

View File

@@ -25,14 +25,19 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_FILE_H_
#define _SYS_FILE_H_
#include <sys/cdefs.h>
#include <sys/types.h>
/* ANDROID: needed for flock() */
#include <unistd.h>
#include <fcntl.h>
__BEGIN_DECLS
int flock(int, int);
__END_DECLS
#endif /* _SYS_FILE_H_ */

View File

@@ -49,8 +49,13 @@ __BEGIN_DECLS
#define POSIX_MADV_WILLNEED MADV_WILLNEED
#define POSIX_MADV_DONTNEED MADV_DONTNEED
#if defined(__USE_FILE_OFFSET64)
extern void* mmap(void*, size_t, int, int, int, off_t) __RENAME(mmap64);
#else
extern void* mmap(void*, size_t, int, int, int, off_t);
#endif
extern void* mmap64(void*, size_t, int, int, int, off64_t);
extern int munmap(void*, size_t);
extern int msync(const void*, size_t, int);
extern int mprotect(const void*, size_t, int);

View File

@@ -25,6 +25,7 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_MOUNT_H
#define _SYS_MOUNT_H
@@ -35,9 +36,10 @@
__BEGIN_DECLS
/* umount2 flags. */
#define MNT_FORCE 1 /* Forcibly unmount */
#define MNT_DETACH 2 /* Detach from tree only */
#define MNT_EXPIRE 4 /* Mark for expiry */
#define MNT_FORCE 1
#define MNT_DETACH 2
#define MNT_EXPIRE 4
#define UMOUNT_NOFOLLOW 8
extern int mount(const char*, const char*, const char*, unsigned long, const void*);
extern int umount(const char*);

View File

@@ -36,6 +36,10 @@
__BEGIN_DECLS
/* The kernel header doesn't have these, but POSIX does. */
#define RLIM_SAVED_CUR RLIM_INFINITY
#define RLIM_SAVED_MAX RLIM_INFINITY
typedef unsigned long rlim_t;
extern int getrlimit(int, struct rlimit*);

View File

@@ -34,7 +34,11 @@
__BEGIN_DECLS
#if defined(__USE_FILE_OFFSET64)
extern ssize_t sendfile(int out_fd, int in_fd, off_t* offset, size_t count) __RENAME(sendfile64);
#else
extern ssize_t sendfile(int out_fd, int in_fd, off_t* offset, size_t count);
#endif
extern ssize_t sendfile64(int out_fd, int in_fd, off64_t* offset, size_t count);
__END_DECLS

View File

@@ -25,20 +25,13 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_SYSCALL_H_
#define _SYS_SYSCALL_H_
#include <errno.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <asm/unistd.h>
#include <asm/unistd.h> /* Linux kernel __NR_* names. */
#include <sys/glibc-syscalls.h> /* glibc-compatible SYS_* aliases. */
#include <sys/glibc-syscalls.h> /* glibc-compatible SYS_* aliases for our __NR_* names. */
__BEGIN_DECLS
long syscall(long number, ...);
__END_DECLS
/* The syscall function itself is declared in <unistd.h>, not here. */
#endif /* _SYS_SYSCALL_H_ */

View File

@@ -90,16 +90,14 @@ typedef uint64_t dev_t;
typedef __kernel_time_t __time_t;
typedef __time_t time_t;
/* This historical accident means that we had a 32-bit off_t on 32-bit architectures. */
#if !defined(__LP64__)
typedef __kernel_off_t off_t;
typedef __kernel_loff_t loff_t;
#if defined(__USE_FILE_OFFSET64) || defined(__LP64__)
typedef int64_t off_t;
typedef off_t loff_t;
typedef loff_t off64_t;
#else
/* We could re-use the LP32 definitions, but that would mean that although off_t and loff_t/off64_t
* would be the same size, they wouldn't actually be the same type, which can lead to warnings. */
/* This historical accident means that we had a 32-bit off_t on 32-bit architectures. */
typedef __kernel_off_t off_t;
typedef off_t loff_t;
typedef __kernel_loff_t loff_t;
typedef loff_t off64_t;
#endif

View File

@@ -47,6 +47,7 @@ __BEGIN_DECLS
#define LOG_PRIMASK 7
#define LOG_PRI(x) ((x) & LOG_PRIMASK)
#define LOG_MAKEPRI(fac, pri) ((fac) | (pri))
/* Facilities are currently ignored on Android. */
#define LOG_KERN 0000

View File

@@ -116,10 +116,6 @@ extern int setresgid(gid_t, gid_t, gid_t);
extern int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid);
extern int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid);
extern char* getlogin(void);
extern char* getusershell(void);
extern void setusershell(void);
extern void endusershell(void);
extern long fpathconf(int, int);
extern long pathconf(const char*, int);
@@ -146,32 +142,40 @@ extern int chown(const char *, uid_t, gid_t);
extern int fchown(int, uid_t, gid_t);
extern int fchownat(int, const char*, uid_t, gid_t, int);
extern int lchown(const char *, uid_t, gid_t);
extern int truncate(const char *, off_t);
extern int truncate64(const char *, off64_t);
extern char *getcwd(char *, size_t);
extern int sync(void);
extern int close(int);
extern off_t lseek(int, off_t, int);
extern off64_t lseek64(int, off64_t, int);
extern ssize_t read(int, void *, size_t);
extern ssize_t write(int, const void *, size_t);
extern ssize_t pread(int, void *, size_t, off_t);
extern ssize_t pread64(int, void *, size_t, off64_t);
extern ssize_t pwrite(int, const void *, size_t, off_t);
extern ssize_t pwrite64(int, const void *, size_t, off64_t);
extern int dup(int);
extern int dup2(int, int);
extern int dup3(int, int, int);
extern int fcntl(int, int, ...);
extern int ioctl(int, int, ...);
extern int flock(int, int);
extern int fsync(int);
extern int fdatasync(int);
#if defined(__USE_FILE_OFFSET64)
extern int truncate(const char *, off_t) __RENAME(truncate64);
extern off_t lseek(int, off_t, int) __RENAME(lseek64);
extern ssize_t pread(int, void *, size_t, off_t) __RENAME(pread64);
extern ssize_t pwrite(int, const void *, size_t, off_t) __RENAME(pwrite64);
extern int ftruncate(int, off_t) __RENAME(ftruncate64);
#else
extern int truncate(const char *, off_t);
extern off_t lseek(int, off_t, int);
extern ssize_t pread(int, void *, size_t, off_t);
extern ssize_t pwrite(int, const void *, size_t, off_t);
extern int ftruncate(int, off_t);
#endif
extern int truncate64(const char *, off64_t);
extern off64_t lseek64(int, off64_t, int);
extern ssize_t pread64(int, void *, size_t, off64_t);
extern ssize_t pwrite64(int, const void *, size_t, off64_t);
extern int ftruncate64(int, off64_t);
extern int pause(void);
@@ -200,6 +204,8 @@ int getpagesize(void);
long sysconf(int);
long syscall(long number, ...);
extern int daemon(int, int);
#if defined(__arm__) || (defined(__mips__) && !defined(__LP64__))

View File

@@ -151,6 +151,9 @@ extern int wcwidth(wchar_t);
extern wchar_t *wmemchr(const wchar_t *, wchar_t, size_t);
extern int wmemcmp(const wchar_t *, const wchar_t *, size_t);
extern wchar_t *wmemcpy(wchar_t *, const wchar_t *, size_t);
#if defined(__USE_GNU)
extern wchar_t *wmempcpy(wchar_t *, const wchar_t *, size_t);
#endif
extern wchar_t *wmemmove(wchar_t *, const wchar_t *, size_t);
extern wchar_t *wmemset(wchar_t *, wchar_t, size_t);
extern int wprintf(const wchar_t *, ...);

View File

@@ -38,15 +38,17 @@
// We used to use pthread_once to initialize the keys, but life is more predictable
// if we allocate them all up front when the C library starts up, via __constructor__.
#define BIONIC_PTHREAD_KEY_WITH_CONSTRUCTOR(key_name, key_destructor) \
static pthread_key_t key_name; \
__attribute__((constructor)) static void __bionic_tls_ ## key_name ## _key_init() { \
pthread_key_create(&key_name, key_destructor); \
}
#define GLOBAL_INIT_THREAD_LOCAL_BUFFER(name) \
static pthread_key_t __bionic_tls_ ## name ## _key; \
static void __bionic_tls_ ## name ## _key_destroy(void* buffer) { \
free(buffer); \
} \
__attribute__((constructor)) static void __bionic_tls_ ## name ## _key_init() { \
pthread_key_create(&__bionic_tls_ ## name ## _key, __bionic_tls_ ## name ## _key_destroy); \
}
BIONIC_PTHREAD_KEY_WITH_CONSTRUCTOR(__bionic_tls_ ## name ## _key, __bionic_tls_ ## name ## _key_destroy)
// Leaves "name_tls_buffer" and "name_tls_buffer_size" defined and initialized.
#define LOCAL_INIT_THREAD_LOCAL_BUFFER(type, name, byte_count) \

View File

@@ -57,4 +57,8 @@
ENTRY(f); \
.hidden f \
#define ALIAS_SYMBOL(alias, original) \
.globl alias; \
.equ alias, original
#endif /* _PRIVATE_BIONIC_ASM_H_ */

View File

@@ -1,74 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BIONIC_ATOMIC_ARM_H
#define BIONIC_ATOMIC_ARM_H
__ATOMIC_INLINE__ void __bionic_memory_barrier() {
__asm__ __volatile__ ( "dmb ish" : : : "memory" );
}
/* Compare-and-swap, without any explicit barriers. Note that this function
* returns 0 on success, and 1 on failure. The opposite convention is typically
* used on other platforms.
*/
__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
int32_t prev, status;
do {
__asm__ __volatile__ (
"ldrex %0, [%3]\n"
"mov %1, #0\n"
"teq %0, %4\n"
#ifdef __thumb2__
"it eq\n"
#endif
"strexeq %1, %5, [%3]"
: "=&r" (prev), "=&r" (status), "+m"(*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prev != old_value;
}
/* Swap, without any explicit barriers. */
__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
int32_t prev, status;
do {
__asm__ __volatile__ (
"ldrex %0, [%3]\n"
"strex %1, %4, [%3]"
: "=&r" (prev), "=&r" (status), "+m" (*ptr)
: "r" (ptr), "r" (new_value)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prev;
}
/* Atomic decrement, without explicit barriers. */
__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
int32_t prev, tmp, status;
do {
__asm__ __volatile__ (
"ldrex %0, [%4]\n"
"sub %1, %0, #1\n"
"strex %2, %1, [%4]"
: "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
: "r" (ptr)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prev;
}
#endif /* SYS_ATOMICS_ARM_H */

View File

@@ -1,72 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BIONIC_ATOMIC_AARCH64_H
#define BIONIC_ATOMIC_AARCH64_H
/* For ARMv8, we can use the 'dmb' instruction directly */
__ATOMIC_INLINE__ void __bionic_memory_barrier() {
__asm__ __volatile__ ( "dmb ish" : : : "memory" );
}
/* Compare-and-swap, without any explicit barriers. Note that this function
* returns 0 on success, and 1 on failure. The opposite convention is typically
* used on other platforms.
*/
__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
int32_t tmp, oldval;
__asm__ __volatile__ (
"// atomic_cmpxchg\n"
"1: ldxr %w1, [%3]\n"
" cmp %w1, %w4\n"
" b.ne 2f\n"
" stxr %w0, %w5, [%3]\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+o"(*ptr)
: "r" (ptr), "Ir" (old_value), "r" (new_value)
: "cc", "memory");
return oldval != old_value;
}
/* Swap, without any explicit barriers. */
__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
int32_t prev, status;
__asm__ __volatile__ (
"// atomic_swap\n"
"1: ldxr %w0, [%3]\n"
" stxr %w1, %w4, [%3]\n"
" cbnz %w1, 1b\n"
: "=&r" (prev), "=&r" (status), "+o" (*ptr)
: "r" (ptr), "r" (new_value)
: "cc", "memory");
return prev;
}
/* Atomic decrement, without explicit barriers. */
__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
int32_t prev, tmp, status;
__asm__ __volatile__ (
"1: ldxr %w0, [%4]\n"
" sub %w1, %w0, #1\n"
" stxr %w2, %w1, [%4]\n"
" cbnz %w2, 1b"
: "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
: "r" (ptr)
: "cc", "memory");
return prev;
}
#endif /* BIONIC_ATOMICS_AARCH64_H */

View File

@@ -1,50 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BIONIC_ATOMIC_GCC_BUILTIN_H
#define BIONIC_ATOMIC_GCC_BUILTIN_H
/*
* This header file is used by default if we don't have optimized atomic
* routines for a given platform. See bionic_atomic_arm.h and
* bionic_atomic_x86.h for examples.
*
* Note that the GCC builtins include barriers that aren't present in
* the architecture-specific assembler versions.
*/
__ATOMIC_INLINE__ void __bionic_memory_barrier() {
__sync_synchronize();
}
__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
/* We must return 0 on success. */
return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value;
}
__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
int32_t old_value;
do {
old_value = *ptr;
} while (__sync_val_compare_and_swap(ptr, old_value, new_value) != old_value);
return old_value;
}
__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
/* We must return the old value. */
return __sync_fetch_and_add(ptr, -1);
}
#endif /* BIONIC_ATOMIC_GCC_BUILTIN_H */

View File

@@ -1,61 +0,0 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BIONIC_ATOMIC_INLINE_H
#define BIONIC_ATOMIC_INLINE_H
/*
* Inline declarations and macros for some special-purpose atomic
* operations. These are intended for rare circumstances where a
* memory barrier needs to be issued inline rather than as a function
* call.
*
* Macros defined in this header:
*
* void ANDROID_MEMBAR_FULL()
* Full memory barrier. Provides a compiler reordering barrier, and
* on SMP systems emits an appropriate instruction.
*/
#ifdef __cplusplus
extern "C" {
#endif
/* Define __ATOMIC_INLINE__ to control the inlining of all atomics
* functions declared here. For a slight performance boost, we want
* all of them to be always_inline
*/
#define __ATOMIC_INLINE__ static __inline__ __attribute__((always_inline))
#if defined(__arm__)
# include "bionic_atomic_arm.h"
#elif defined(__aarch64__)
# include "bionic_atomic_arm64.h"
#elif defined(__i386__)
# include "bionic_atomic_x86.h"
#elif defined(__mips__)
# include "bionic_atomic_mips.h"
#else
# include "bionic_atomic_gcc_builtin.h"
#endif
#define ANDROID_MEMBAR_FULL __bionic_memory_barrier
#ifdef __cplusplus
} // extern "C"
#endif
#endif // BIONIC_ATOMIC_INLINE_H

View File

@@ -1,71 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BIONIC_ATOMIC_MIPS_H
#define BIONIC_ATOMIC_MIPS_H
/* Define a full memory barrier, this is only needed if we build the
* platform for a multi-core device.
*/
__ATOMIC_INLINE__ void __bionic_memory_barrier() {
__asm__ __volatile__ ( "sync" : : : "memory" );
}
/* Compare-and-swap, without any explicit barriers. Note that this function
* returns 0 on success, and 1 on failure. The opposite convention is typically
* used on other platforms.
*/
__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
int32_t prev, status;
__asm__ __volatile__ ("1: move %[status], %[new_value] \n"
" ll %[prev], 0(%[ptr]) \n"
" bne %[old_value], %[prev], 2f \n"
" sc %[status], 0(%[ptr]) \n"
" beqz %[status], 1b \n"
"2: \n"
: [prev]"=&r"(prev), [status]"=&r"(status), "+m"(*ptr)
: [new_value]"r"(new_value), [old_value]"r"(old_value), [ptr]"r"(ptr)
: "memory");
return prev != old_value;
}
/* Swap, without any explicit barriers. */
__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t* ptr) {
int32_t prev, status;
__asm__ __volatile__ ("1: move %[status], %[new_value] \n"
" ll %[prev], 0(%[ptr]) \n"
" sc %[status], 0(%[ptr]) \n"
" beqz %[status], 1b \n"
: [prev]"=&r"(prev), [status]"=&r"(status), "+m"(*ptr)
: [ptr]"r"(ptr), [new_value]"r"(new_value)
: "memory");
return prev;
}
/* Atomic decrement, without explicit barriers. */
__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
int32_t prev, status;
__asm__ __volatile__ ("1: ll %[prev], 0(%[ptr]) \n"
" addiu %[status], %[prev], -1 \n"
" sc %[status], 0(%[ptr]) \n"
" beqz %[status], 1b \n"
: [prev]"=&r" (prev), [status]"=&r"(status), "+m" (*ptr)
: [ptr]"r"(ptr)
: "memory");
return prev;
}
#endif /* BIONIC_ATOMIC_MIPS_H */

View File

@@ -1,58 +0,0 @@
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BIONIC_ATOMIC_X86_H
#define BIONIC_ATOMIC_X86_H
/* Define a full memory barrier, this is only needed if we build the
* platform for a multi-core device.
*/
__ATOMIC_INLINE__ void __bionic_memory_barrier() {
__asm__ __volatile__ ( "mfence" : : : "memory" );
}
/* Compare-and-swap, without any explicit barriers. Note that this function
* returns 0 on success, and 1 on failure. The opposite convention is typically
* used on other platforms.
*/
__ATOMIC_INLINE__ int __bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr) {
int32_t prev;
__asm__ __volatile__ ("lock; cmpxchgl %1, %2"
: "=a" (prev)
: "q" (new_value), "m" (*ptr), "0" (old_value)
: "memory");
return prev != old_value;
}
/* Swap, without any explicit barriers. */
__ATOMIC_INLINE__ int32_t __bionic_swap(int32_t new_value, volatile int32_t *ptr) {
__asm__ __volatile__ ("xchgl %1, %0"
: "=r" (new_value)
: "m" (*ptr), "0" (new_value)
: "memory");
return new_value;
}
/* Atomic decrement, without explicit barriers. */
__ATOMIC_INLINE__ int32_t __bionic_atomic_dec(volatile int32_t* ptr) {
int increment = -1;
__asm__ __volatile__ ("lock; xaddl %0, %1"
: "+r" (increment), "+m" (*ptr)
: : "memory");
/* increment now holds the old value of *ptr */
return increment;
}
#endif /* BIONIC_ATOMIC_X86_H */

View File

@@ -34,6 +34,7 @@
#include <stddef.h>
#include <sys/cdefs.h>
#include <sys/syscall.h>
#include <unistd.h>
__BEGIN_DECLS

View File

@@ -67,18 +67,18 @@ enum {
TLS_SLOT_STACK_GUARD = 5, // GCC requires this specific slot for x86.
TLS_SLOT_DLERROR,
TLS_SLOT_FIRST_USER_SLOT // Must come last!
BIONIC_TLS_SLOTS // Must come last!
};
/*
* There are two kinds of slot used internally by bionic --- there are the well-known slots
* enumerated above, and then there are those that are allocated during startup by calls to
* pthread_key_create; grep for GLOBAL_INIT_THREAD_LOCAL_BUFFER to find those. We need to manually
* maintain that second number, but pthread_test will fail if we forget.
* Following are current pthread keys used internally:
* Bionic uses some pthread keys internally. All pthread keys used internally
* should be created in constructors, except for keys that may be used in or before constructors.
* We need to manually maintain the count of pthread keys used internally, but
* pthread_test should fail if we forget.
* Following are current pthread keys used internally by libc:
* basename libc (GLOBAL_INIT_THREAD_LOCAL_BUFFER)
* dirname libc (GLOBAL_INIT_THREAD_LOCAL_BUFFER)
* uselocale libc
* uselocale libc (can be used in constructors)
* getmntent_mntent libc (GLOBAL_INIT_THREAD_LOCAL_BUFFER)
* getmntent_strings libc (GLOBAL_INIT_THREAD_LOCAL_BUFFER)
* ptsname libc (GLOBAL_INIT_THREAD_LOCAL_BUFFER)
@@ -87,29 +87,30 @@ enum {
* strsignal libc (GLOBAL_INIT_THREAD_LOCAL_BUFFER)
* passwd libc (GLOBAL_INIT_THREAD_LOCAL_BUFFER)
* group libc (GLOBAL_INIT_THREAD_LOCAL_BUFFER)
* _res_key libc
* _res_key libc (BIONIC_PTHREAD_KEY_WITH_CONSTRUCTOR)
*/
#define LIBC_PTHREAD_KEY_RESERVED_COUNT 12
#if defined(USE_JEMALLOC)
/* Following are current pthread keys used internally by jemalloc:
* je_thread_allocated_tsd jemalloc
* je_arenas_tsd jemalloc
* je_tcache_tsd jemalloc
* je_tcache_enabled_tsd jemalloc
* je_quarantine_tsd jemalloc
*
*/
#define LIBC_TLS_RESERVED_SLOTS 12
#if defined(USE_JEMALLOC)
/* jemalloc uses 5 keys for itself. */
#define BIONIC_TLS_RESERVED_SLOTS (LIBC_TLS_RESERVED_SLOTS + 5)
#define JEMALLOC_PTHREAD_KEY_RESERVED_COUNT 5
#define BIONIC_PTHREAD_KEY_RESERVED_COUNT (LIBC_PTHREAD_KEY_RESERVED_COUNT + JEMALLOC_PTHREAD_KEY_RESERVED_COUNT)
#else
#define BIONIC_TLS_RESERVED_SLOTS LIBC_TLS_RESERVED_SLOTS
#define BIONIC_PTHREAD_KEY_RESERVED_COUNT LIBC_PTHREAD_KEY_RESERVED_COUNT
#endif
/*
* Maximum number of elements in the TLS array.
* This includes space for pthread keys and our own internal slots.
* Maximum number of pthread keys allocated.
* This includes pthread keys used internally and externally.
*/
#define BIONIC_TLS_SLOTS (PTHREAD_KEYS_MAX + TLS_SLOT_FIRST_USER_SLOT + BIONIC_TLS_RESERVED_SLOTS)
#define BIONIC_PTHREAD_KEY_COUNT (BIONIC_PTHREAD_KEY_RESERVED_COUNT + PTHREAD_KEYS_MAX)
__END_DECLS

View File

@@ -102,6 +102,12 @@ fread(void *buf, size_t size, size_t count, FILE *fp)
* avoid copying it through the buffer?
*/
if (total > (size_t) fp->_bf._size) {
/*
* Make sure that fseek doesn't think it can
* reuse the buffer since we are going to read
* directly from the file descriptor.
*/
fp->_flags |= __SMOD;
break;
}

View File

@@ -0,0 +1,48 @@
/* $OpenBSD: memchr.c,v 1.7 2005/08/08 08:05:37 espie Exp $ */
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Chris Torek.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <string.h>
void *
memchr(const void *s, int c, size_t n)
{
if (n != 0) {
const unsigned char *p = s;
do {
if (*p++ == (unsigned char)c)
return ((void *)(p - 1));
} while (--n != 0);
}
return (NULL);
}

View File

@@ -0,0 +1,38 @@
/* $OpenBSD: memrchr.c,v 1.2 2007/11/27 16:22:12 martynas Exp $ */
/*
* Copyright (c) 2007 Todd C. Miller <Todd.Miller@courtesan.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <string.h>
/*
* Reverse memchr()
* Find the last occurrence of 'c' in the buffer 's' of size 'n'.
*/
void *
memrchr(const void *s, int c, size_t n)
{
const unsigned char *cp;
if (n != 0) {
cp = (unsigned char *)s + n;
do {
if (*(--cp) == (unsigned char)c)
return((void *)cp);
} while (--n != 0);
}
return(NULL);
}

Binary file not shown.