Merge remote-tracking branch 'goog/master' into merge-from-master
This commit is contained in:
commit
79e7c0a98e
@ -43,7 +43,6 @@ libc_common_src_files := \
|
||||
bionic/err.c \
|
||||
bionic/ether_aton.c \
|
||||
bionic/ether_ntoa.c \
|
||||
bionic/fdprintf.c \
|
||||
bionic/ftime.c \
|
||||
bionic/fts.c \
|
||||
bionic/getdtablesize.c \
|
||||
@ -55,7 +54,6 @@ libc_common_src_files := \
|
||||
bionic/initgroups.c \
|
||||
bionic/ioctl.c \
|
||||
bionic/isatty.c \
|
||||
bionic/issetugid.c \
|
||||
bionic/md5.c \
|
||||
bionic/memmem.c \
|
||||
bionic/pathconf.c \
|
||||
@ -257,7 +255,6 @@ libc_upstream_freebsd_src_files := \
|
||||
upstream-freebsd/lib/libc/string/wcstok.c \
|
||||
upstream-freebsd/lib/libc/string/wmemchr.c \
|
||||
upstream-freebsd/lib/libc/string/wmemcpy.c \
|
||||
upstream-freebsd/lib/libc/string/wmemmove.c \
|
||||
upstream-freebsd/lib/libc/string/wmemset.c \
|
||||
|
||||
libc_upstream_netbsd_src_files := \
|
||||
@ -371,6 +368,7 @@ libc_upstream_openbsd_src_files := \
|
||||
upstream-openbsd/lib/libc/net/ntohs.c \
|
||||
upstream-openbsd/lib/libc/stdio/asprintf.c \
|
||||
upstream-openbsd/lib/libc/stdio/clrerr.c \
|
||||
upstream-openbsd/lib/libc/stdio/dprintf.c \
|
||||
upstream-openbsd/lib/libc/stdio/fdopen.c \
|
||||
upstream-openbsd/lib/libc/stdio/feof.c \
|
||||
upstream-openbsd/lib/libc/stdio/ferror.c \
|
||||
@ -435,6 +433,7 @@ libc_upstream_openbsd_src_files := \
|
||||
upstream-openbsd/lib/libc/stdio/ungetc.c \
|
||||
upstream-openbsd/lib/libc/stdio/ungetwc.c \
|
||||
upstream-openbsd/lib/libc/stdio/vasprintf.c \
|
||||
upstream-openbsd/lib/libc/stdio/vdprintf.c \
|
||||
upstream-openbsd/lib/libc/stdio/vfprintf.c \
|
||||
upstream-openbsd/lib/libc/stdio/vfscanf.c \
|
||||
upstream-openbsd/lib/libc/stdio/vfwprintf.c \
|
||||
|
@ -58,7 +58,6 @@ int setresgid:setresgid32(gid_t, gid_t, gid_t) arm,x86
|
||||
int setresgid:setresgid(gid_t, gid_t, gid_t) arm64,mips,mips64,x86_64
|
||||
void* __brk:brk(void*) all
|
||||
int kill(pid_t, int) all
|
||||
int tkill(pid_t tid, int sig) all
|
||||
int tgkill(pid_t tgid, pid_t tid, int sig) all
|
||||
int __ptrace:ptrace(int request, int pid, void* addr, void* data) all
|
||||
|
||||
@ -306,8 +305,6 @@ int eventfd:eventfd2(unsigned int, int) all
|
||||
void _exit|_Exit:exit_group(int) all
|
||||
void __exit:exit(int) all
|
||||
|
||||
int futex(void*, int, int, void*, void*, int) all
|
||||
|
||||
int inotify_init1(int) all
|
||||
int inotify_add_watch(int, const char*, unsigned int) all
|
||||
int inotify_rm_watch(int, unsigned int) all
|
||||
|
@ -24,6 +24,7 @@ libc_common_src_files_arm += \
|
||||
upstream-freebsd/lib/libc/string/wcslen.c \
|
||||
upstream-freebsd/lib/libc/string/wcsrchr.c \
|
||||
upstream-freebsd/lib/libc/string/wmemcmp.c \
|
||||
upstream-freebsd/lib/libc/string/wmemmove.c \
|
||||
upstream-openbsd/lib/libc/string/bcopy.c \
|
||||
upstream-openbsd/lib/libc/string/stpcpy.c \
|
||||
upstream-openbsd/lib/libc/string/stpncpy.c \
|
||||
@ -53,7 +54,6 @@ libc_bionic_src_files_arm += \
|
||||
arch-arm/bionic/__bionic_clone.S \
|
||||
arch-arm/bionic/eabi.c \
|
||||
arch-arm/bionic/_exit_with_stack_teardown.S \
|
||||
arch-arm/bionic/futex_arm.S \
|
||||
arch-arm/bionic/__get_sp.S \
|
||||
arch-arm/bionic/libgcc_compat.c \
|
||||
arch-arm/bionic/memcmp16.S \
|
||||
|
@ -54,19 +54,14 @@ ENTRY(__bionic_clone)
|
||||
|
||||
# In the parent, reload saved registers then either return or set errno.
|
||||
ldmfd sp!, {r4, r5, r6, r7}
|
||||
.cfi_def_cfa_offset 0
|
||||
cmn r0, #(MAX_ERRNO + 1)
|
||||
bxls lr
|
||||
neg r0, r0
|
||||
b __set_errno
|
||||
|
||||
1: # The child.
|
||||
# Re-add the unwind directives that were reset from above.
|
||||
.cfi_def_cfa_offset 16
|
||||
.cfi_rel_offset r4, 0
|
||||
.cfi_rel_offset r5, 4
|
||||
.cfi_rel_offset r6, 8
|
||||
.cfi_rel_offset r7, 12
|
||||
# Setting lr to 0 will make the unwinder stop at __bionic_clone_entry
|
||||
mov lr, #0
|
||||
ldr r0, [sp, #-4]
|
||||
ldr r1, [sp, #-8]
|
||||
b __bionic_clone_entry
|
||||
|
@ -1,38 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
// int __futex_syscall4(volatile void* ftx, int op, int val, const struct timespec* timeout)
|
||||
ENTRY_PRIVATE(__futex_syscall4)
|
||||
mov ip, r7
|
||||
ldr r7, =__NR_futex
|
||||
swi #0
|
||||
mov r7, ip
|
||||
bx lr
|
||||
END(__futex_syscall4)
|
@ -1,22 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(futex)
|
||||
mov ip, sp
|
||||
stmfd sp!, {r4, r5, r6, r7}
|
||||
.cfi_def_cfa_offset 16
|
||||
.cfi_rel_offset r4, 0
|
||||
.cfi_rel_offset r5, 4
|
||||
.cfi_rel_offset r6, 8
|
||||
.cfi_rel_offset r7, 12
|
||||
ldmfd ip, {r4, r5, r6}
|
||||
ldr r7, =__NR_futex
|
||||
swi #0
|
||||
ldmfd sp!, {r4, r5, r6, r7}
|
||||
.cfi_def_cfa_offset 0
|
||||
cmn r0, #(MAX_ERRNO + 1)
|
||||
bxls lr
|
||||
neg r0, r0
|
||||
b __set_errno
|
||||
END(futex)
|
@ -1,14 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(tkill)
|
||||
mov ip, r7
|
||||
ldr r7, =__NR_tkill
|
||||
swi #0
|
||||
mov r7, ip
|
||||
cmn r0, #(MAX_ERRNO + 1)
|
||||
bxls lr
|
||||
neg r0, r0
|
||||
b __set_errno
|
||||
END(tkill)
|
@ -14,7 +14,6 @@ libc_common_src_files_arm64 := \
|
||||
upstream-freebsd/lib/libc/string/wcslen.c \
|
||||
upstream-freebsd/lib/libc/string/wcsrchr.c \
|
||||
upstream-freebsd/lib/libc/string/wmemcmp.c \
|
||||
upstream-openbsd/lib/libc/string/bcopy.c \
|
||||
upstream-openbsd/lib/libc/string/stpcpy.c \
|
||||
upstream-openbsd/lib/libc/string/stpncpy.c \
|
||||
upstream-openbsd/lib/libc/string/strcat.c \
|
||||
@ -35,9 +34,7 @@ libc_common_src_files_arm64 += \
|
||||
### CPU specific source files
|
||||
libc_bionic_src_files_arm64 := \
|
||||
arch-arm64/bionic/__bionic_clone.S \
|
||||
arch-arm64/bionic/bzero_arm64.c \
|
||||
arch-arm64/bionic/_exit_with_stack_teardown.S \
|
||||
arch-arm64/bionic/futex_arm64.S \
|
||||
arch-arm64/bionic/__get_sp.S \
|
||||
arch-arm64/bionic/__rt_sigreturn.S \
|
||||
arch-arm64/bionic/_setjmp.S \
|
||||
|
@ -61,9 +61,9 @@ ENTRY(__bionic_clone)
|
||||
|
||||
.L_bc_child:
|
||||
# We're in the child now. Set the end of the frame record chain...
|
||||
.cfi_undefined x29
|
||||
.cfi_undefined x30
|
||||
mov x29, xzr
|
||||
# Setting x30 to 0 will make the unwinder stop at __bionic_clone_entry
|
||||
mov x30, xzr
|
||||
# ...and call __bionic_clone_entry with the 'fn' and 'arg' we stored on the child stack.
|
||||
ldp x0, x1, [sp, #-16]
|
||||
b __bionic_clone_entry
|
||||
|
@ -1,33 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2013 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
||||
void bzero(void* s, size_t n) {
|
||||
memset(s, '\0', n);
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2013 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
// int __futex_syscall4(volatile void* ftx, int op, int val, const struct timespec* timeout)
|
||||
ENTRY_PRIVATE(__futex_syscall4)
|
||||
stp x29, x30, [sp, #-16]!
|
||||
.cfi_def_cfa_offset 16
|
||||
.cfi_rel_offset x29, 0
|
||||
.cfi_rel_offset x30, 8
|
||||
mov x29, sp
|
||||
|
||||
mov x8, __NR_futex
|
||||
svc #0
|
||||
|
||||
ldp x29, x30, [sp], #16
|
||||
.cfi_def_cfa_offset 0
|
||||
.cfi_restore x29
|
||||
.cfi_restore x30
|
||||
ret
|
||||
END(__futex_syscall4)
|
30
libc/arch-arm64/generic/bionic/bcopy.S
Normal file
30
libc/arch-arm64/generic/bionic/bcopy.S
Normal file
@ -0,0 +1,30 @@
|
||||
/* Copyright (c) 2014, Linaro Limited
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the Linaro nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define BCOPY
|
||||
#include "memmove.S"
|
||||
#undef BCOPY
|
30
libc/arch-arm64/generic/bionic/bzero.S
Normal file
30
libc/arch-arm64/generic/bionic/bzero.S
Normal file
@ -0,0 +1,30 @@
|
||||
/* Copyright (c) 2014, Linaro Limited
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the Linaro nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define BZERO
|
||||
#include "memset.S"
|
||||
#undef BZERO
|
@ -29,11 +29,16 @@
|
||||
*
|
||||
* ARMv8-a, AArch64
|
||||
* Unaligned accesses
|
||||
* wchar_t is 4 bytes
|
||||
*/
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
/* Parameters and result. */
|
||||
#ifdef BCOPY
|
||||
#define origdstin x1
|
||||
#define origsrc x0
|
||||
#endif
|
||||
#define dstin x0
|
||||
#define src x1
|
||||
#define count x2
|
||||
@ -54,7 +59,18 @@
|
||||
#define D_l x13
|
||||
#define D_h x14
|
||||
|
||||
#ifdef BCOPY
|
||||
ENTRY(bcopy)
|
||||
/* Swap src and dst so that a branch to memcpy doesn't cause issues. */
|
||||
mov tmp1, origsrc
|
||||
mov origsrc, origdstin
|
||||
mov origdstin, tmp1
|
||||
#elif defined(WMEMMOVE)
|
||||
ENTRY(wmemmove)
|
||||
lsl count, count, #2
|
||||
#else
|
||||
ENTRY(memmove)
|
||||
#endif
|
||||
cmp dstin, src
|
||||
b.lo .Ldownwards
|
||||
add tmp1, src, count
|
||||
@ -316,4 +332,10 @@ ENTRY(memmove)
|
||||
tst count, #0x3f
|
||||
b.ne .Ltail63down
|
||||
ret
|
||||
#ifdef BCOPY
|
||||
END(bcopy)
|
||||
#elif defined(WMEMMOVE)
|
||||
END(wmemmove)
|
||||
#else
|
||||
END(memmove)
|
||||
#endif
|
||||
|
@ -38,15 +38,19 @@
|
||||
data blocks more efficiently. In some circumstances this might be
|
||||
unsafe, for example in an asymmetric multiprocessor environment with
|
||||
different DC clear lengths (neither the upper nor lower lengths are
|
||||
safe to use). The feature can be disabled by defining DONT_USE_DC.
|
||||
safe to use).
|
||||
|
||||
If code may be run in a virtualized environment, then define
|
||||
MAYBE_VIRT. This will cause the code to cache the system register
|
||||
values rather than re-reading them each call. */
|
||||
|
||||
#define dstin x0
|
||||
#define val w1
|
||||
#ifdef BZERO
|
||||
#define count x1
|
||||
#else
|
||||
#define count x2
|
||||
#endif
|
||||
#define val w1
|
||||
#define tmp1 x3
|
||||
#define tmp1w w3
|
||||
#define tmp2 x4
|
||||
@ -60,13 +64,18 @@
|
||||
#define dst x8
|
||||
#define tmp3w w9
|
||||
|
||||
#ifdef BZERO
|
||||
ENTRY(bzero)
|
||||
#else
|
||||
ENTRY(memset)
|
||||
#endif
|
||||
|
||||
mov dst, dstin /* Preserve return value. */
|
||||
ands A_lw, val, #255
|
||||
#ifndef DONT_USE_DC
|
||||
b.eq .Lzero_mem
|
||||
#ifdef BZERO
|
||||
b .Lzero_mem
|
||||
#endif
|
||||
ands A_lw, val, #255
|
||||
b.eq .Lzero_mem
|
||||
orr A_lw, A_lw, A_lw, lsl #8
|
||||
orr A_lw, A_lw, A_lw, lsl #16
|
||||
orr A_l, A_l, A_l, lsl #32
|
||||
@ -143,7 +152,6 @@ ENTRY(memset)
|
||||
b.ne .Ltail63
|
||||
ret
|
||||
|
||||
#ifndef DONT_USE_DC
|
||||
/* For zeroing memory, check to see if we can use the ZVA feature to
|
||||
* zero entire 'cache' lines. */
|
||||
.Lzero_mem:
|
||||
@ -225,7 +233,11 @@ ENTRY(memset)
|
||||
ands count, count, zva_bits_x
|
||||
b.ne .Ltail_maybe_long
|
||||
ret
|
||||
#ifdef BZERO
|
||||
END(bzero)
|
||||
#else
|
||||
END(memset)
|
||||
#endif
|
||||
|
||||
#ifdef MAYBE_VIRT
|
||||
.bss
|
||||
@ -233,4 +245,3 @@ END(memset)
|
||||
.Lcache_clear:
|
||||
.space 4
|
||||
#endif
|
||||
#endif /* DONT_USE_DC */
|
||||
|
30
libc/arch-arm64/generic/bionic/wmemmove.S
Normal file
30
libc/arch-arm64/generic/bionic/wmemmove.S
Normal file
@ -0,0 +1,30 @@
|
||||
/* Copyright (c) 2014, Linaro Limited
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the Linaro nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define WMEMMOVE
|
||||
#include "memmove.S"
|
||||
#undef WMEMMOVE
|
@ -1,4 +1,6 @@
|
||||
libc_bionic_src_files_arm64 += \
|
||||
arch-arm64/generic/bionic/bcopy.S \
|
||||
arch-arm64/generic/bionic/bzero.S \
|
||||
arch-arm64/generic/bionic/memcmp.S \
|
||||
arch-arm64/generic/bionic/memcpy.S \
|
||||
arch-arm64/generic/bionic/memmove.S \
|
||||
@ -7,3 +9,4 @@ libc_bionic_src_files_arm64 += \
|
||||
arch-arm64/generic/bionic/strlen.S \
|
||||
arch-arm64/generic/bionic/strncmp.S \
|
||||
arch-arm64/generic/bionic/strnlen.S \
|
||||
arch-arm64/generic/bionic/wmemmove.S
|
||||
|
@ -1,25 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(futex)
|
||||
stp x29, x30, [sp, #-16]!
|
||||
.cfi_def_cfa_offset 16
|
||||
.cfi_rel_offset x29, 0
|
||||
.cfi_rel_offset x30, 8
|
||||
mov x29, sp
|
||||
|
||||
mov x8, __NR_futex
|
||||
svc #0
|
||||
|
||||
ldp x29, x30, [sp], #16
|
||||
.cfi_def_cfa_offset 0
|
||||
.cfi_restore x29
|
||||
.cfi_restore x30
|
||||
|
||||
cmn x0, #(MAX_ERRNO + 1)
|
||||
cneg x0, x0, hi
|
||||
b.hi __set_errno
|
||||
|
||||
ret
|
||||
END(futex)
|
@ -1,25 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(tkill)
|
||||
stp x29, x30, [sp, #-16]!
|
||||
.cfi_def_cfa_offset 16
|
||||
.cfi_rel_offset x29, 0
|
||||
.cfi_rel_offset x30, 8
|
||||
mov x29, sp
|
||||
|
||||
mov x8, __NR_tkill
|
||||
svc #0
|
||||
|
||||
ldp x29, x30, [sp], #16
|
||||
.cfi_def_cfa_offset 0
|
||||
.cfi_restore x29
|
||||
.cfi_restore x30
|
||||
|
||||
cmn x0, #(MAX_ERRNO + 1)
|
||||
cneg x0, x0, hi
|
||||
b.hi __set_errno
|
||||
|
||||
ret
|
||||
END(tkill)
|
@ -54,6 +54,9 @@ ENTRY(__bionic_clone)
|
||||
j ra
|
||||
|
||||
.L__thread_start_bc:
|
||||
# Clear return address in child so we don't unwind further.
|
||||
li ra,0
|
||||
|
||||
lw a0,0(sp) # fn
|
||||
lw a1,4(sp) # arg
|
||||
|
||||
|
@ -1,50 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
// int __futex_syscall4(volatile void* ftx, int op, int val, const struct timespec* timeout)
|
||||
ENTRY_PRIVATE(__futex_syscall4)
|
||||
subu sp,4*6
|
||||
sw $0,20(sp) /* val3 */
|
||||
sw $0,16(sp) /* addr2 */
|
||||
# move a3,a3 /* timespec */
|
||||
# move a2,a2 /* val */
|
||||
# li a1,a1 /* op */
|
||||
# move a0,a0 /* ftx */
|
||||
li v0,__NR_futex
|
||||
syscall
|
||||
.set noreorder
|
||||
bnez a3, 1f /* Check for error */
|
||||
neg v0 /* Negate error number if it's valid */
|
||||
move v0,$0 /* Otherwise return 0 */
|
||||
1:
|
||||
.set reorder
|
||||
addu sp,4*6
|
||||
j ra
|
||||
END(__futex_syscall4)
|
@ -26,6 +26,7 @@ libc_common_src_files_mips += \
|
||||
upstream-freebsd/lib/libc/string/wcslen.c \
|
||||
upstream-freebsd/lib/libc/string/wcsrchr.c \
|
||||
upstream-freebsd/lib/libc/string/wmemcmp.c \
|
||||
upstream-freebsd/lib/libc/string/wmemmove.c \
|
||||
upstream-openbsd/lib/libc/string/bcopy.c \
|
||||
upstream-openbsd/lib/libc/string/stpcpy.c \
|
||||
upstream-openbsd/lib/libc/string/stpncpy.c \
|
||||
@ -58,7 +59,6 @@ libc_bionic_src_files_mips += \
|
||||
arch-mips/bionic/bzero.S \
|
||||
arch-mips/bionic/cacheflush.cpp \
|
||||
arch-mips/bionic/_exit_with_stack_teardown.S \
|
||||
arch-mips/bionic/futex_mips.S \
|
||||
arch-mips/bionic/__get_sp.S \
|
||||
arch-mips/bionic/memcmp16.S \
|
||||
arch-mips/bionic/_setjmp.S \
|
||||
|
@ -1,19 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(futex)
|
||||
.set noreorder
|
||||
.cpload t9
|
||||
li v0, __NR_futex
|
||||
syscall
|
||||
bnez a3, 1f
|
||||
move a0, v0
|
||||
j ra
|
||||
nop
|
||||
1:
|
||||
la t9,__set_errno
|
||||
j t9
|
||||
nop
|
||||
.set reorder
|
||||
END(futex)
|
@ -1,19 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(tkill)
|
||||
.set noreorder
|
||||
.cpload t9
|
||||
li v0, __NR_tkill
|
||||
syscall
|
||||
bnez a3, 1f
|
||||
move a0, v0
|
||||
j ra
|
||||
nop
|
||||
1:
|
||||
la t9,__set_errno
|
||||
j t9
|
||||
nop
|
||||
.set reorder
|
||||
END(tkill)
|
@ -75,6 +75,9 @@ LEAF(__bionic_clone, FRAMESZ)
|
||||
j ra
|
||||
|
||||
.L__thread_start_bc:
|
||||
# Clear return address in child so we don't unwind further.
|
||||
li ra,0
|
||||
|
||||
# void __bionic_clone_entry(int (*func)(void*), void *arg)
|
||||
PTR_L a0,FRAME_FN(sp) # fn
|
||||
PTR_L a1,FRAME_ARG(sp) # arg
|
||||
|
@ -1,64 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
#if (_MIPS_SIM == _ABIO32) || (_MIPS_SIM == _ABI32)
|
||||
FRAMESZ = MKFSIZ(NARGSAVE+2,0)
|
||||
FRAME_A4 = 4*REGSZ
|
||||
FRAME_A5 = 5*REGSZ
|
||||
#else
|
||||
FRAMESZ = 0
|
||||
#endif
|
||||
|
||||
// int __futex_syscall4(volatile void* ftx, int op, int val, const struct timespec* timeout)
|
||||
LEAF(__futex_syscall4,FRAMESZ)
|
||||
#if (_MIPS_SIM == _ABIO32) || (_MIPS_SIM == _ABI32)
|
||||
PTR_SUBU sp, FRAMESZ
|
||||
REG_S $0,FRAME_A5(sp) /* val3 */
|
||||
REG_S $0,FRAME_A4(sp) /* addr2 */
|
||||
#else
|
||||
move a5,$0 /* val3 */
|
||||
move a4,$0 /* addr2 */
|
||||
#endif
|
||||
# move a3,a3 /* timespec */
|
||||
# move a2,a2 /* val */
|
||||
# move a1,a1 /* op */
|
||||
# move a0,a0 /* ftx */
|
||||
LI v0,__NR_futex
|
||||
syscall
|
||||
neg v0 /* Negate errno */
|
||||
bnez a3,1f /* Check for error */
|
||||
move v0,$0 /* Return 0 if no error */
|
||||
1:
|
||||
#if (_MIPS_SIM == _ABIO32) || (_MIPS_SIM == _ABI32)
|
||||
PTR_ADDU sp,FRAMESZ
|
||||
#endif
|
||||
j ra
|
||||
END(__futex_syscall4)
|
||||
.hidden __futex_syscall4
|
@ -16,6 +16,7 @@ libc_common_src_files_mips64 := \
|
||||
upstream-freebsd/lib/libc/string/wcslen.c \
|
||||
upstream-freebsd/lib/libc/string/wcsrchr.c \
|
||||
upstream-freebsd/lib/libc/string/wmemcmp.c \
|
||||
upstream-freebsd/lib/libc/string/wmemmove.c \
|
||||
upstream-openbsd/lib/libc/string/bcopy.c \
|
||||
upstream-openbsd/lib/libc/string/stpcpy.c \
|
||||
upstream-openbsd/lib/libc/string/stpncpy.c \
|
||||
@ -43,7 +44,6 @@ libc_bionic_src_files_mips64 := \
|
||||
arch-mips64/bionic/__bionic_clone.S \
|
||||
arch-mips64/bionic/bzero.S \
|
||||
arch-mips64/bionic/_exit_with_stack_teardown.S \
|
||||
arch-mips64/bionic/futex_mips.S \
|
||||
arch-mips64/bionic/__get_sp.S \
|
||||
arch-mips64/bionic/getdents.cpp \
|
||||
arch-mips64/bionic/memcmp16.S \
|
||||
|
@ -1,25 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(futex)
|
||||
.set push
|
||||
.set noreorder
|
||||
li v0, __NR_futex
|
||||
syscall
|
||||
bnez a3, 1f
|
||||
move a0, v0
|
||||
j ra
|
||||
nop
|
||||
1:
|
||||
move t0, ra
|
||||
bal 2f
|
||||
nop
|
||||
2:
|
||||
.cpsetup ra, t1, 2b
|
||||
LA t9,__set_errno
|
||||
.cpreturn
|
||||
j t9
|
||||
move ra, t0
|
||||
.set pop
|
||||
END(futex)
|
@ -1,25 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(tkill)
|
||||
.set push
|
||||
.set noreorder
|
||||
li v0, __NR_tkill
|
||||
syscall
|
||||
bnez a3, 1f
|
||||
move a0, v0
|
||||
j ra
|
||||
nop
|
||||
1:
|
||||
move t0, ra
|
||||
bal 2f
|
||||
nop
|
||||
2:
|
||||
.cpsetup ra, t1, 2b
|
||||
LA t9,__set_errno
|
||||
.cpreturn
|
||||
j t9
|
||||
move ra, t0
|
||||
.set pop
|
||||
END(tkill)
|
@ -1,16 +0,0 @@
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
// int __futex_syscall4(volatile void* ftx, int op, int val, const struct timespec* timeout)
|
||||
ENTRY_PRIVATE(__futex_syscall4)
|
||||
pushl %ebx
|
||||
pushl %esi
|
||||
movl 12(%esp), %ebx /* ftx */
|
||||
movl 16(%esp), %ecx /* op */
|
||||
movl 20(%esp), %edx /* val */
|
||||
movl 24(%esp), %esi /* timeout */
|
||||
movl $__NR_futex, %eax
|
||||
int $0x80
|
||||
popl %esi
|
||||
popl %ebx
|
||||
ret
|
||||
END(__futex_syscall4)
|
@ -1,42 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(futex)
|
||||
pushl %ebx
|
||||
pushl %ecx
|
||||
pushl %edx
|
||||
pushl %esi
|
||||
pushl %edi
|
||||
pushl %ebp
|
||||
.cfi_def_cfa_offset 24
|
||||
.cfi_rel_offset ebx, 0
|
||||
.cfi_rel_offset ecx, 4
|
||||
.cfi_rel_offset edx, 8
|
||||
.cfi_rel_offset esi, 12
|
||||
.cfi_rel_offset edi, 16
|
||||
.cfi_rel_offset ebp, 20
|
||||
mov 28(%esp), %ebx
|
||||
mov 32(%esp), %ecx
|
||||
mov 36(%esp), %edx
|
||||
mov 40(%esp), %esi
|
||||
mov 44(%esp), %edi
|
||||
mov 48(%esp), %ebp
|
||||
movl $__NR_futex, %eax
|
||||
int $0x80
|
||||
cmpl $-MAX_ERRNO, %eax
|
||||
jb 1f
|
||||
negl %eax
|
||||
pushl %eax
|
||||
call __set_errno
|
||||
addl $4, %esp
|
||||
orl $-1, %eax
|
||||
1:
|
||||
popl %ebp
|
||||
popl %edi
|
||||
popl %esi
|
||||
popl %edx
|
||||
popl %ecx
|
||||
popl %ebx
|
||||
ret
|
||||
END(futex)
|
@ -1,26 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(tkill)
|
||||
pushl %ebx
|
||||
pushl %ecx
|
||||
.cfi_def_cfa_offset 8
|
||||
.cfi_rel_offset ebx, 0
|
||||
.cfi_rel_offset ecx, 4
|
||||
mov 12(%esp), %ebx
|
||||
mov 16(%esp), %ecx
|
||||
movl $__NR_tkill, %eax
|
||||
int $0x80
|
||||
cmpl $-MAX_ERRNO, %eax
|
||||
jb 1f
|
||||
negl %eax
|
||||
pushl %eax
|
||||
call __set_errno
|
||||
addl $4, %esp
|
||||
orl $-1, %eax
|
||||
1:
|
||||
popl %ecx
|
||||
popl %ebx
|
||||
ret
|
||||
END(tkill)
|
@ -12,6 +12,7 @@ libc_common_src_files_x86 += \
|
||||
bionic/__memset_chk.cpp \
|
||||
bionic/__strcpy_chk.cpp \
|
||||
bionic/__strcat_chk.cpp \
|
||||
upstream-freebsd/lib/libc/string/wmemmove.c \
|
||||
|
||||
|
||||
# These are shared by all the 32-bit targets, but not the 64-bit ones.
|
||||
@ -23,7 +24,6 @@ libc_bionic_src_files_x86 := \
|
||||
libc_bionic_src_files_x86 += \
|
||||
arch-x86/bionic/__bionic_clone.S \
|
||||
arch-x86/bionic/_exit_with_stack_teardown.S \
|
||||
arch-x86/bionic/futex_x86.S \
|
||||
arch-x86/bionic/__get_sp.S \
|
||||
arch-x86/bionic/_setjmp.S \
|
||||
arch-x86/bionic/setjmp.S \
|
||||
|
@ -1,37 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2013 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
// int __futex_syscall4(volatile void* ftx, int op, int val, const struct timespec* timeout)
|
||||
ENTRY_PRIVATE(__futex_syscall4)
|
||||
mov %rcx, %r10 /* timeout */
|
||||
mov $__NR_futex, %eax
|
||||
syscall
|
||||
ret
|
||||
END(__futex_syscall4)
|
@ -1,17 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(futex)
|
||||
movq %rcx, %r10
|
||||
movl $__NR_futex, %eax
|
||||
syscall
|
||||
cmpq $-MAX_ERRNO, %rax
|
||||
jb 1f
|
||||
negl %eax
|
||||
movl %eax, %edi
|
||||
call __set_errno
|
||||
orq $-1, %rax
|
||||
1:
|
||||
ret
|
||||
END(futex)
|
@ -1,16 +0,0 @@
|
||||
/* Generated by gensyscalls.py. Do not edit. */
|
||||
|
||||
#include <private/bionic_asm.h>
|
||||
|
||||
ENTRY(tkill)
|
||||
movl $__NR_tkill, %eax
|
||||
syscall
|
||||
cmpq $-MAX_ERRNO, %rax
|
||||
jb 1f
|
||||
negl %eax
|
||||
movl %eax, %edi
|
||||
call __set_errno
|
||||
orq $-1, %rax
|
||||
1:
|
||||
ret
|
||||
END(tkill)
|
@ -14,6 +14,7 @@ libc_common_src_files_x86_64 := \
|
||||
upstream-freebsd/lib/libc/string/wcslen.c \
|
||||
upstream-freebsd/lib/libc/string/wcsrchr.c \
|
||||
upstream-freebsd/lib/libc/string/wmemcmp.c \
|
||||
upstream-freebsd/lib/libc/string/wmemmove.c \
|
||||
upstream-openbsd/lib/libc/string/strlcat.c \
|
||||
upstream-openbsd/lib/libc/string/strlcpy.c \
|
||||
|
||||
@ -30,7 +31,6 @@ libc_common_src_files_x86_64 += \
|
||||
libc_bionic_src_files_x86_64 := \
|
||||
arch-x86_64/bionic/__bionic_clone.S \
|
||||
arch-x86_64/bionic/_exit_with_stack_teardown.S \
|
||||
arch-x86_64/bionic/futex_x86_64.S \
|
||||
arch-x86_64/bionic/__get_sp.S \
|
||||
arch-x86_64/bionic/__rt_sigreturn.S \
|
||||
arch-x86_64/bionic/_setjmp.S \
|
||||
|
@ -56,19 +56,19 @@ void* sbrk(ptrdiff_t increment) {
|
||||
}
|
||||
|
||||
// Avoid overflow.
|
||||
intptr_t old_brk = reinterpret_cast<intptr_t>(__bionic_brk);
|
||||
if ((increment > 0 && INTPTR_MAX - increment > old_brk) ||
|
||||
(increment < 0 && (increment == PTRDIFF_MIN || old_brk < -increment))) {
|
||||
uintptr_t old_brk = reinterpret_cast<uintptr_t>(__bionic_brk);
|
||||
if ((increment > 0 && static_cast<uintptr_t>(increment) > (UINTPTR_MAX - old_brk)) ||
|
||||
(increment < 0 && static_cast<uintptr_t>(-increment) > old_brk)) {
|
||||
errno = ENOMEM;
|
||||
return reinterpret_cast<void*>(-1);
|
||||
}
|
||||
|
||||
void* desired_brk = reinterpret_cast<void*>(old_brk + increment);
|
||||
__bionic_brk = __brk(desired_brk);
|
||||
|
||||
if (__bionic_brk < desired_brk) {
|
||||
errno = ENOMEM;
|
||||
return reinterpret_cast<void*>(-1);
|
||||
}
|
||||
|
||||
return reinterpret_cast<void*>(old_brk);
|
||||
}
|
||||
|
@ -1,58 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2010 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int vfdprintf(int fd, const char * __restrict format, __va_list ap)
|
||||
{
|
||||
char *buf=0;
|
||||
int ret;
|
||||
ret = vasprintf(&buf, format, ap);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
ret = write(fd, buf, ret);
|
||||
free(buf);
|
||||
end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int fdprintf(int fd, const char * __restrict format, ...)
|
||||
{
|
||||
__va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, format);
|
||||
ret = vfdprintf(fd, format, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
#include <unistd.h>
|
||||
|
||||
int issetugid(void)
|
||||
{
|
||||
/* for Bionic, this is sufficient */
|
||||
return 0;
|
||||
}
|
||||
|
@ -32,8 +32,10 @@
|
||||
#include <ctype.h>
|
||||
#include <inttypes.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
@ -184,4 +186,39 @@ extern "C" intmax_t strntoimax(const char* nptr, char** endptr, int base, size_t
|
||||
return (intmax_t) strntoumax(nptr, endptr, base, n);
|
||||
}
|
||||
|
||||
// POSIX calls this dprintf, but LP32 Android had fdprintf instead.
|
||||
extern "C" int fdprintf(int fd, const char* fmt, ...) {
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
int rc = vdprintf(fd, fmt, ap);
|
||||
va_end(ap);
|
||||
return rc;
|
||||
}
|
||||
|
||||
// POSIX calls this vdprintf, but LP32 Android had fdprintf instead.
|
||||
extern "C" int vfdprintf(int fd, const char* fmt, va_list ap) {
|
||||
return vdprintf(fd, fmt, ap);
|
||||
}
|
||||
|
||||
#define __futex_wake __real_futex_wake
|
||||
#define __futex_wait __real_futex_wait
|
||||
#include "private/bionic_futex.h"
|
||||
#undef __futex_wake
|
||||
#undef __futex_wait
|
||||
|
||||
// This used to be in <sys/atomics.h>.
|
||||
extern "C" int __futex_wake(volatile void* ftx, int count) {
|
||||
return __real_futex_wake(ftx, count);
|
||||
}
|
||||
|
||||
// This used to be in <sys/atomics.h>.
|
||||
extern "C" int __futex_wait(volatile void* ftx, int value, const struct timespec* timeout) {
|
||||
return __real_futex_wait(ftx, value, timeout);
|
||||
}
|
||||
|
||||
// Unity's libmono uses this.
|
||||
extern "C" int tkill(pid_t tid, int sig) {
|
||||
return syscall(__NR_tkill, tid, sig);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -30,7 +30,6 @@
|
||||
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <sys/atomics.h>
|
||||
#include <sys/mman.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
@ -144,10 +144,8 @@ static int __pthread_start(void* arg) {
|
||||
// notify gdb about this thread before we start doing anything.
|
||||
// This also provides the memory barrier needed to ensure that all memory
|
||||
// accesses previously made by the creating thread are visible to us.
|
||||
pthread_mutex_t* start_mutex = (pthread_mutex_t*) &thread->tls[TLS_SLOT_START_MUTEX];
|
||||
pthread_mutex_lock(start_mutex);
|
||||
pthread_mutex_destroy(start_mutex);
|
||||
thread->tls[TLS_SLOT_START_MUTEX] = NULL;
|
||||
pthread_mutex_lock(&thread->startup_handshake_mutex);
|
||||
pthread_mutex_destroy(&thread->startup_handshake_mutex);
|
||||
|
||||
__init_alternate_signal_stack(thread);
|
||||
|
||||
@ -204,7 +202,8 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
||||
// The child stack is the same address, just growing in the opposite direction.
|
||||
// At offsets >= 0, we have the TLS slots.
|
||||
// At offsets < 0, we have the child stack.
|
||||
thread->tls = (void**)((uint8_t*)(thread->attr.stack_base) + thread->attr.stack_size - BIONIC_TLS_SLOTS * sizeof(void*));
|
||||
thread->tls = reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(thread->attr.stack_base) +
|
||||
thread->attr.stack_size - BIONIC_TLS_SLOTS * sizeof(void*));
|
||||
void* child_stack = thread->tls;
|
||||
__init_tls(thread);
|
||||
|
||||
@ -214,9 +213,8 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
||||
// This also provides the memory barrier we need to ensure that all
|
||||
// memory accesses previously performed by this thread are visible to
|
||||
// the new thread.
|
||||
pthread_mutex_t* start_mutex = (pthread_mutex_t*) &thread->tls[TLS_SLOT_START_MUTEX];
|
||||
pthread_mutex_init(start_mutex, NULL);
|
||||
pthread_mutex_lock(start_mutex);
|
||||
pthread_mutex_init(&thread->startup_handshake_mutex, NULL);
|
||||
pthread_mutex_lock(&thread->startup_handshake_mutex);
|
||||
|
||||
thread->start_routine = start_routine;
|
||||
thread->start_routine_arg = arg;
|
||||
@ -237,7 +235,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
||||
// We don't have to unlock the mutex at all because clone(2) failed so there's no child waiting to
|
||||
// be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
|
||||
// reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
|
||||
pthread_mutex_unlock(start_mutex);
|
||||
pthread_mutex_unlock(&thread->startup_handshake_mutex);
|
||||
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) == 0) {
|
||||
munmap(thread->attr.stack_base, thread->attr.stack_size);
|
||||
}
|
||||
@ -252,7 +250,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
||||
// Letting the thread run is the easiest way to clean up its resources.
|
||||
thread->attr.flags |= PTHREAD_ATTR_FLAG_DETACHED;
|
||||
thread->start_routine = __do_nothing;
|
||||
pthread_mutex_unlock(start_mutex);
|
||||
pthread_mutex_unlock(&thread->startup_handshake_mutex);
|
||||
return init_errno;
|
||||
}
|
||||
|
||||
@ -264,7 +262,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
||||
|
||||
// Publish the pthread_t and unlock the mutex to let the new thread start running.
|
||||
*thread_out = reinterpret_cast<pthread_t>(thread);
|
||||
pthread_mutex_unlock(start_mutex);
|
||||
pthread_mutex_unlock(&thread->startup_handshake_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -48,6 +48,8 @@ struct pthread_internal_t {
|
||||
|
||||
void* alternate_signal_stack;
|
||||
|
||||
pthread_mutex_t startup_handshake_mutex;
|
||||
|
||||
/*
|
||||
* The dynamic linker implements dlerror(3), which makes it hard for us to implement this
|
||||
* per-thread buffer by simply using malloc(3) and free(3).
|
||||
|
@ -30,7 +30,6 @@
|
||||
|
||||
#include <errno.h>
|
||||
#include <limits.h>
|
||||
#include <sys/atomics.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
@ -306,9 +305,7 @@ int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr)
|
||||
* "type" value is zero, so the only bits that will be set are the ones in
|
||||
* the lock state field.
|
||||
*/
|
||||
static __inline__ void
|
||||
_normal_lock(pthread_mutex_t* mutex, int shared)
|
||||
{
|
||||
static inline void _normal_lock(pthread_mutex_t* mutex, int shared) {
|
||||
/* convenience shortcuts */
|
||||
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||
const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
||||
@ -336,8 +333,9 @@ _normal_lock(pthread_mutex_t* mutex, int shared)
|
||||
* that the mutex is in state 2 when we go to sleep on it, which
|
||||
* guarantees a wake-up call.
|
||||
*/
|
||||
while (__bionic_swap(locked_contended, &mutex->value) != unlocked)
|
||||
__futex_wait_ex(&mutex->value, shared, locked_contended, 0);
|
||||
while (__bionic_swap(locked_contended, &mutex->value) != unlocked) {
|
||||
__futex_wait_ex(&mutex->value, shared, locked_contended, NULL);
|
||||
}
|
||||
}
|
||||
ANDROID_MEMBAR_FULL();
|
||||
}
|
||||
@ -346,9 +344,7 @@ _normal_lock(pthread_mutex_t* mutex, int shared)
|
||||
* Release a non-recursive mutex. The caller is responsible for determining
|
||||
* that we are in fact the owner of this lock.
|
||||
*/
|
||||
static __inline__ void
|
||||
_normal_unlock(pthread_mutex_t* mutex, int shared)
|
||||
{
|
||||
static inline void _normal_unlock(pthread_mutex_t* mutex, int shared) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
|
||||
/*
|
||||
@ -410,9 +406,7 @@ _normal_unlock(pthread_mutex_t* mutex, int shared)
|
||||
* mvalue is the current mutex value (already loaded)
|
||||
* mutex pointers to the mutex.
|
||||
*/
|
||||
static __inline__ __attribute__((always_inline)) int
|
||||
_recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype)
|
||||
{
|
||||
static inline __always_inline int _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype) {
|
||||
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
|
||||
/* trying to re-lock a mutex we already acquired */
|
||||
return EDEADLK;
|
||||
@ -452,7 +446,7 @@ int pthread_mutex_lock(pthread_mutex_t* mutex) {
|
||||
mtype = (mvalue & MUTEX_TYPE_MASK);
|
||||
shared = (mvalue & MUTEX_SHARED_MASK);
|
||||
|
||||
/* Handle normal case first */
|
||||
/* Handle non-recursive case first */
|
||||
if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
|
||||
_normal_lock(mutex, shared);
|
||||
return 0;
|
||||
|
@ -26,9 +26,11 @@
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "pthread_internal.h"
|
||||
#include <errno.h>
|
||||
|
||||
#include "pthread_internal.h"
|
||||
#include "private/bionic_futex.h"
|
||||
|
||||
/* Technical note:
|
||||
*
|
||||
* Possible states of a read/write lock:
|
||||
@ -40,265 +42,239 @@
|
||||
* Additionally:
|
||||
* - trying to get the write-lock while there are any readers blocks
|
||||
* - trying to get the read-lock while there is a writer blocks
|
||||
* - a single thread can acquire the lock multiple times in the same mode
|
||||
* - a single thread can acquire the lock multiple times in read mode
|
||||
*
|
||||
* - Posix states that behavior is undefined it a thread tries to acquire
|
||||
* the lock in two distinct modes (e.g. write after read, or read after write).
|
||||
* - Posix states that behavior is undefined (may deadlock) if a thread tries
|
||||
* to acquire the lock
|
||||
* - in write mode while already holding the lock (whether in read or write mode)
|
||||
* - in read mode while already holding the lock in write mode.
|
||||
* - This implementation will return EDEADLK in "write after write" and "read after
|
||||
* write" cases and will deadlock in write after read case.
|
||||
*
|
||||
* - This implementation tries to avoid writer starvation by making the readers
|
||||
* block as soon as there is a waiting writer on the lock. However, it cannot
|
||||
* completely eliminate it: each time the lock is unlocked, all waiting threads
|
||||
* are woken and battle for it, which one gets it depends on the kernel scheduler
|
||||
* and is semi-random.
|
||||
* TODO: VERY CAREFULLY convert this to use C++11 atomics when possible. All volatile
|
||||
* members of pthread_rwlock_t should be converted to atomics<> and __sync_bool_compare_and_swap
|
||||
* should be changed to compare_exchange_strong accompanied by the proper ordering
|
||||
* constraints (comments have been added with the intending ordering across the code).
|
||||
*
|
||||
* TODO: As it stands now, pending_readers and pending_writers could be merged into a
|
||||
* a single waiters variable. Keeping them separate adds a bit of clarity and keeps
|
||||
* the door open for a writer-biased implementation.
|
||||
*
|
||||
*/
|
||||
|
||||
#define RWLOCKATTR_DEFAULT 0
|
||||
#define RWLOCKATTR_SHARED_MASK 0x0010
|
||||
#define RWLOCKATTR_DEFAULT 0
|
||||
#define RWLOCKATTR_SHARED_MASK 0x0010
|
||||
|
||||
extern pthread_internal_t* __get_thread(void);
|
||||
|
||||
int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
|
||||
{
|
||||
*attr = PTHREAD_PROCESS_PRIVATE;
|
||||
return 0;
|
||||
static inline bool rwlock_is_shared(const pthread_rwlock_t* rwlock) {
|
||||
return rwlock->attr == PTHREAD_PROCESS_SHARED;
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
|
||||
{
|
||||
*attr = -1;
|
||||
return 0;
|
||||
static bool timespec_from_absolute(timespec* rel_timeout, const timespec* abs_timeout) {
|
||||
if (abs_timeout != NULL) {
|
||||
if (__timespec_from_absolute(rel_timeout, abs_timeout, CLOCK_REALTIME) < 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
|
||||
{
|
||||
switch (pshared) {
|
||||
int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) {
|
||||
*attr = PTHREAD_PROCESS_PRIVATE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_destroy(pthread_rwlockattr_t* attr) {
|
||||
*attr = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared) {
|
||||
switch (pshared) {
|
||||
case PTHREAD_PROCESS_PRIVATE:
|
||||
case PTHREAD_PROCESS_SHARED:
|
||||
*attr = pshared;
|
||||
return 0;
|
||||
*attr = pshared;
|
||||
return 0;
|
||||
default:
|
||||
return EINVAL;
|
||||
}
|
||||
return EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) {
|
||||
*pshared = *attr;
|
||||
return 0;
|
||||
*pshared = *attr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
|
||||
{
|
||||
pthread_mutexattr_t* lock_attr = NULL;
|
||||
pthread_condattr_t* cond_attr = NULL;
|
||||
pthread_mutexattr_t lock_attr0;
|
||||
pthread_condattr_t cond_attr0;
|
||||
int ret;
|
||||
|
||||
if (attr && *attr == PTHREAD_PROCESS_SHARED) {
|
||||
lock_attr = &lock_attr0;
|
||||
pthread_mutexattr_init(lock_attr);
|
||||
pthread_mutexattr_setpshared(lock_attr, PTHREAD_PROCESS_SHARED);
|
||||
|
||||
cond_attr = &cond_attr0;
|
||||
pthread_condattr_init(cond_attr);
|
||||
pthread_condattr_setpshared(cond_attr, PTHREAD_PROCESS_SHARED);
|
||||
int pthread_rwlock_init(pthread_rwlock_t* rwlock, const pthread_rwlockattr_t* attr) {
|
||||
if (attr != NULL) {
|
||||
switch (*attr) {
|
||||
case PTHREAD_PROCESS_SHARED:
|
||||
case PTHREAD_PROCESS_PRIVATE:
|
||||
rwlock->attr= *attr;
|
||||
break;
|
||||
default:
|
||||
return EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = pthread_mutex_init(&rwlock->lock, lock_attr);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
rwlock->state = 0;
|
||||
rwlock->pending_readers = 0;
|
||||
rwlock->pending_writers = 0;
|
||||
rwlock->writer_thread_id = 0;
|
||||
|
||||
ret = pthread_cond_init(&rwlock->cond, cond_attr);
|
||||
if (ret != 0) {
|
||||
pthread_mutex_destroy(&rwlock->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rwlock->numLocks = 0;
|
||||
rwlock->pendingReaders = 0;
|
||||
rwlock->pendingWriters = 0;
|
||||
rwlock->writerThreadId = 0;
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
if (rwlock->numLocks > 0)
|
||||
return EBUSY;
|
||||
|
||||
pthread_cond_destroy(&rwlock->cond);
|
||||
pthread_mutex_destroy(&rwlock->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Returns TRUE iff we can acquire a read lock. */
|
||||
static __inline__ int read_precondition(pthread_rwlock_t* rwlock, int tid)
|
||||
{
|
||||
/* We can't have the lock if any writer is waiting for it (writer bias).
|
||||
* This tries to avoid starvation when there are multiple readers racing.
|
||||
*/
|
||||
if (rwlock->pendingWriters > 0)
|
||||
return 0;
|
||||
|
||||
/* We can have the lock if there is no writer, or if we write-own it */
|
||||
/* The second test avoids a self-dead lock in case of buggy code. */
|
||||
if (rwlock->writerThreadId == 0 || rwlock->writerThreadId == tid)
|
||||
return 1;
|
||||
|
||||
/* Otherwise, we can't have it */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* returns TRUE iff we can acquire a write lock. */
|
||||
static __inline__ int write_precondition(pthread_rwlock_t* rwlock, int tid)
|
||||
{
|
||||
/* We can get the lock if nobody has it */
|
||||
if (rwlock->numLocks == 0)
|
||||
return 1;
|
||||
|
||||
/* Or if we already own it */
|
||||
if (rwlock->writerThreadId == tid)
|
||||
return 1;
|
||||
|
||||
/* Otherwise, not */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function is used to waken any waiting thread contending
|
||||
* for the lock. One of them should be able to grab it after
|
||||
* that.
|
||||
*/
|
||||
static void _pthread_rwlock_pulse(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
if (rwlock->pendingReaders > 0 || rwlock->pendingWriters > 0)
|
||||
pthread_cond_broadcast(&rwlock->cond);
|
||||
int pthread_rwlock_destroy(pthread_rwlock_t* rwlock) {
|
||||
if (rwlock->state != 0) {
|
||||
return EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
|
||||
int ret = 0;
|
||||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
int tid = __get_thread()->tid;
|
||||
if (__predict_false(!read_precondition(rwlock, tid))) {
|
||||
rwlock->pendingReaders += 1;
|
||||
do {
|
||||
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
|
||||
} while (ret == 0 && !read_precondition(rwlock, tid));
|
||||
rwlock->pendingReaders -= 1;
|
||||
if (ret != 0) {
|
||||
goto EXIT;
|
||||
}
|
||||
if (__predict_false(__get_thread()->tid == rwlock->writer_thread_id)) {
|
||||
return EDEADLK;
|
||||
}
|
||||
++rwlock->numLocks;
|
||||
EXIT:
|
||||
pthread_mutex_unlock(&rwlock->lock);
|
||||
return ret;
|
||||
|
||||
timespec ts;
|
||||
timespec* rel_timeout = (abs_timeout == NULL) ? NULL : &ts;
|
||||
bool done = false;
|
||||
do {
|
||||
// This is actually a race read as there's nothing that guarantees the atomicity of integer
|
||||
// reads / writes. However, in practice this "never" happens so until we switch to C++11 this
|
||||
// should work fine. The same applies in the other places this idiom is used.
|
||||
int32_t cur_state = rwlock->state; // C++11 relaxed atomic read
|
||||
if (__predict_true(cur_state >= 0)) {
|
||||
// Add as an extra reader.
|
||||
done = __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state + 1); // C++11 memory_order_aquire
|
||||
} else {
|
||||
if (!timespec_from_absolute(rel_timeout, abs_timeout)) {
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
// Owner holds it in write mode, hang up.
|
||||
// To avoid losing wake ups the pending_readers update and the state read should be
|
||||
// sequentially consistent. (currently enforced by __sync_fetch_and_add which creates a full barrier)
|
||||
__sync_fetch_and_add(&rwlock->pending_readers, 1); // C++11 memory_order_relaxed (if the futex_wait ensures the ordering)
|
||||
int ret = __futex_wait_ex(&rwlock->state, rwlock_is_shared(rwlock), cur_state, rel_timeout);
|
||||
__sync_fetch_and_sub(&rwlock->pending_readers, 1); // C++11 memory_order_relaxed
|
||||
if (ret == -ETIMEDOUT) {
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
} while (!done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
|
||||
int ret = 0;
|
||||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
int tid = __get_thread()->tid;
|
||||
if (__predict_false(!write_precondition(rwlock, tid))) {
|
||||
// If we can't read yet, wait until the rwlock is unlocked
|
||||
// and try again. Increment pendingReaders to get the
|
||||
// cond broadcast when that happens.
|
||||
rwlock->pendingWriters += 1;
|
||||
do {
|
||||
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
|
||||
} while (ret == 0 && !write_precondition(rwlock, tid));
|
||||
rwlock->pendingWriters -= 1;
|
||||
if (ret != 0) {
|
||||
goto EXIT;
|
||||
}
|
||||
if (__predict_false(tid == rwlock->writer_thread_id)) {
|
||||
return EDEADLK;
|
||||
}
|
||||
++rwlock->numLocks;
|
||||
rwlock->writerThreadId = tid;
|
||||
EXIT:
|
||||
pthread_mutex_unlock(&rwlock->lock);
|
||||
return ret;
|
||||
|
||||
timespec ts;
|
||||
timespec* rel_timeout = (abs_timeout == NULL) ? NULL : &ts;
|
||||
bool done = false;
|
||||
do {
|
||||
int32_t cur_state = rwlock->state;
|
||||
if (__predict_true(cur_state == 0)) {
|
||||
// Change state from 0 to -1.
|
||||
done = __sync_bool_compare_and_swap(&rwlock->state, 0 /* cur state */, -1 /* new state */); // C++11 memory_order_aquire
|
||||
} else {
|
||||
if (!timespec_from_absolute(rel_timeout, abs_timeout)) {
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
// Failed to acquire, hang up.
|
||||
// To avoid losing wake ups the pending_writers update and the state read should be
|
||||
// sequentially consistent. (currently enforced by __sync_fetch_and_add which creates a full barrier)
|
||||
__sync_fetch_and_add(&rwlock->pending_writers, 1); // C++11 memory_order_relaxed (if the futex_wait ensures the ordering)
|
||||
int ret = __futex_wait_ex(&rwlock->state, rwlock_is_shared(rwlock), cur_state, rel_timeout);
|
||||
__sync_fetch_and_sub(&rwlock->pending_writers, 1); // C++11 memory_order_relaxed
|
||||
if (ret == -ETIMEDOUT) {
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
} while (!done);
|
||||
|
||||
rwlock->writer_thread_id = tid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) {
|
||||
return __pthread_rwlock_timedrdlock(rwlock, NULL);
|
||||
}
|
||||
|
||||
int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
if (__predict_false(!read_precondition(rwlock, __get_thread()->tid)))
|
||||
ret = EBUSY;
|
||||
else
|
||||
++rwlock->numLocks;
|
||||
pthread_mutex_unlock(&rwlock->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
|
||||
return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
|
||||
}
|
||||
|
||||
int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock) {
|
||||
int32_t cur_state = rwlock->state;
|
||||
if ((cur_state >= 0) &&
|
||||
__sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state + 1)) { // C++11 memory_order_acquire
|
||||
return 0;
|
||||
}
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) {
|
||||
return __pthread_rwlock_timedwrlock(rwlock, NULL);
|
||||
}
|
||||
|
||||
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
int tid = __get_thread()->tid;
|
||||
if (__predict_false(!write_precondition(rwlock, tid))) {
|
||||
ret = EBUSY;
|
||||
} else {
|
||||
++rwlock->numLocks;
|
||||
rwlock->writerThreadId = tid;
|
||||
}
|
||||
pthread_mutex_unlock(&rwlock->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
|
||||
return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
|
||||
}
|
||||
|
||||
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
pthread_mutex_lock(&rwlock->lock);
|
||||
|
||||
/* The lock must be held */
|
||||
if (rwlock->numLocks == 0) {
|
||||
ret = EPERM;
|
||||
goto EXIT;
|
||||
}
|
||||
|
||||
/* If it has only readers, writerThreadId is 0 */
|
||||
if (rwlock->writerThreadId == 0) {
|
||||
if (--rwlock->numLocks == 0)
|
||||
_pthread_rwlock_pulse(rwlock);
|
||||
}
|
||||
/* Otherwise, it has only a single writer, which
|
||||
* must be ourselves.
|
||||
*/
|
||||
else {
|
||||
if (rwlock->writerThreadId != __get_thread()->tid) {
|
||||
ret = EPERM;
|
||||
goto EXIT;
|
||||
}
|
||||
if (--rwlock->numLocks == 0) {
|
||||
rwlock->writerThreadId = 0;
|
||||
_pthread_rwlock_pulse(rwlock);
|
||||
}
|
||||
}
|
||||
EXIT:
|
||||
pthread_mutex_unlock(&rwlock->lock);
|
||||
return ret;
|
||||
int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock) {
|
||||
int tid = __get_thread()->tid;
|
||||
int32_t cur_state = rwlock->state;
|
||||
if ((cur_state == 0) &&
|
||||
__sync_bool_compare_and_swap(&rwlock->state, 0 /* cur state */, -1 /* new state */)) { // C++11 memory_order_acquire
|
||||
rwlock->writer_thread_id = tid;
|
||||
return 0;
|
||||
}
|
||||
return EBUSY;
|
||||
}
|
||||
|
||||
|
||||
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock) {
|
||||
int tid = __get_thread()->tid;
|
||||
bool done = false;
|
||||
do {
|
||||
int32_t cur_state = rwlock->state;
|
||||
if (cur_state == 0) {
|
||||
return EPERM;
|
||||
}
|
||||
if (cur_state == -1) {
|
||||
if (rwlock->writer_thread_id != tid) {
|
||||
return EPERM;
|
||||
}
|
||||
// We're no longer the owner.
|
||||
rwlock->writer_thread_id = 0;
|
||||
// Change state from -1 to 0.
|
||||
// We use __sync_bool_compare_and_swap to achieve sequential consistency of the state store and
|
||||
// the following pendingX loads. A simple store with memory_order_release semantics
|
||||
// is not enough to guarantee that the pendingX loads are not reordered before the
|
||||
// store (which may lead to a lost wakeup).
|
||||
__sync_bool_compare_and_swap( &rwlock->state, -1 /* cur state*/, 0 /* new state */); // C++11 maybe memory_order_seq_cst?
|
||||
|
||||
// Wake any waiters.
|
||||
if (__predict_false(rwlock->pending_readers > 0 || rwlock->pending_writers > 0)) {
|
||||
__futex_wake_ex(&rwlock->state, rwlock_is_shared(rwlock), INT_MAX);
|
||||
}
|
||||
done = true;
|
||||
} else { // cur_state > 0
|
||||
// Reduce state by 1.
|
||||
// See the comment above on why we need __sync_bool_compare_and_swap.
|
||||
done = __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state - 1); // C++11 maybe memory_order_seq_cst?
|
||||
if (done && (cur_state - 1) == 0) {
|
||||
// There are no more readers, wake any waiters.
|
||||
if (__predict_false(rwlock->pending_readers > 0 || rwlock->pending_writers > 0)) {
|
||||
__futex_wake_ex(&rwlock->state, rwlock_is_shared(rwlock), INT_MAX);
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (!done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include <semaphore.h>
|
||||
#include <errno.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/atomics.h>
|
||||
#include <time.h>
|
||||
#include <limits.h>
|
||||
|
||||
|
@ -94,16 +94,28 @@ typedef long pthread_condattr_t;
|
||||
typedef long pthread_rwlockattr_t;
|
||||
|
||||
typedef struct {
|
||||
pthread_mutex_t lock;
|
||||
pthread_cond_t cond;
|
||||
int numLocks;
|
||||
int writerThreadId;
|
||||
int pendingReaders;
|
||||
int pendingWriters;
|
||||
void* __reserved[4];
|
||||
#if !defined(__LP64__)
|
||||
pthread_mutex_t __unused_lock;
|
||||
pthread_cond_t __unused_cond;
|
||||
#endif
|
||||
volatile int32_t state; // 0=unlock, -1=writer lock, +n=reader lock
|
||||
volatile int32_t writer_thread_id;
|
||||
volatile int32_t pending_readers;
|
||||
volatile int32_t pending_writers;
|
||||
int32_t attr;
|
||||
#ifdef __LP64__
|
||||
char __reserved[36];
|
||||
#else
|
||||
char __reserved[12];
|
||||
#endif
|
||||
|
||||
} pthread_rwlock_t;
|
||||
|
||||
#define PTHREAD_RWLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0, 0, 0, { NULL, NULL, NULL, NULL } }
|
||||
#ifdef __LP64__
|
||||
#define PTHREAD_RWLOCK_INITIALIZER { 0, 0, 0, 0, 0, { 0 } }
|
||||
#else
|
||||
#define PTHREAD_RWLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0, 0, 0, 0, { 0 } }
|
||||
#endif
|
||||
|
||||
typedef int pthread_key_t;
|
||||
typedef long pthread_t;
|
||||
|
420
libc/include/stdatomic.h
Normal file
420
libc/include/stdatomic.h
Normal file
@ -0,0 +1,420 @@
|
||||
/*-
|
||||
* Copyright (c) 2011 Ed Schouten <ed@FreeBSD.org>
|
||||
* David Chisnall <theraven@FreeBSD.org>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _STDATOMIC_H_
|
||||
#define _STDATOMIC_H_
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#if __has_extension(c_atomic) || __has_extension(cxx_atomic)
|
||||
#define __CLANG_ATOMICS
|
||||
#elif __GNUC_PREREQ__(4, 7)
|
||||
#define __GNUC_ATOMICS
|
||||
#elif defined(__GNUC__)
|
||||
#define __SYNC_ATOMICS
|
||||
#else
|
||||
#error "stdatomic.h does not support your compiler"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 7.17.1 Atomic lock-free macros.
|
||||
*/
|
||||
|
||||
#ifdef __GCC_ATOMIC_BOOL_LOCK_FREE
|
||||
#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_CHAR_LOCK_FREE
|
||||
#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_CHAR16_T_LOCK_FREE
|
||||
#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_CHAR32_T_LOCK_FREE
|
||||
#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_WCHAR_T_LOCK_FREE
|
||||
#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_SHORT_LOCK_FREE
|
||||
#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_INT_LOCK_FREE
|
||||
#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_LONG_LOCK_FREE
|
||||
#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_LLONG_LOCK_FREE
|
||||
#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
|
||||
#endif
|
||||
#ifdef __GCC_ATOMIC_POINTER_LOCK_FREE
|
||||
#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 7.17.2 Initialization.
|
||||
*/
|
||||
|
||||
#if defined(__CLANG_ATOMICS)
|
||||
#define ATOMIC_VAR_INIT(value) (value)
|
||||
#define atomic_init(obj, value) __c11_atomic_init(obj, value)
|
||||
#else
|
||||
#define ATOMIC_VAR_INIT(value) { .__val = (value) }
|
||||
#define atomic_init(obj, value) ((void)((obj)->__val = (value)))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Clang and recent GCC both provide predefined macros for the memory
|
||||
* orderings. If we are using a compiler that doesn't define them, use the
|
||||
* clang values - these will be ignored in the fallback path.
|
||||
*/
|
||||
|
||||
#ifndef __ATOMIC_RELAXED
|
||||
#define __ATOMIC_RELAXED 0
|
||||
#endif
|
||||
#ifndef __ATOMIC_CONSUME
|
||||
#define __ATOMIC_CONSUME 1
|
||||
#endif
|
||||
#ifndef __ATOMIC_ACQUIRE
|
||||
#define __ATOMIC_ACQUIRE 2
|
||||
#endif
|
||||
#ifndef __ATOMIC_RELEASE
|
||||
#define __ATOMIC_RELEASE 3
|
||||
#endif
|
||||
#ifndef __ATOMIC_ACQ_REL
|
||||
#define __ATOMIC_ACQ_REL 4
|
||||
#endif
|
||||
#ifndef __ATOMIC_SEQ_CST
|
||||
#define __ATOMIC_SEQ_CST 5
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 7.17.3 Order and consistency.
|
||||
*
|
||||
* The memory_order_* constants that denote the barrier behaviour of the
|
||||
* atomic operations.
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
memory_order_relaxed = __ATOMIC_RELAXED,
|
||||
memory_order_consume = __ATOMIC_CONSUME,
|
||||
memory_order_acquire = __ATOMIC_ACQUIRE,
|
||||
memory_order_release = __ATOMIC_RELEASE,
|
||||
memory_order_acq_rel = __ATOMIC_ACQ_REL,
|
||||
memory_order_seq_cst = __ATOMIC_SEQ_CST
|
||||
} memory_order;
|
||||
|
||||
/*
|
||||
* 7.17.4 Fences.
|
||||
*/
|
||||
|
||||
static __inline void
|
||||
atomic_thread_fence(memory_order __order __unused)
|
||||
{
|
||||
|
||||
#ifdef __CLANG_ATOMICS
|
||||
__c11_atomic_thread_fence(__order);
|
||||
#elif defined(__GNUC_ATOMICS)
|
||||
__atomic_thread_fence(__order);
|
||||
#else
|
||||
__sync_synchronize();
|
||||
#endif
|
||||
}
|
||||
|
||||
static __inline void
|
||||
atomic_signal_fence(memory_order __order __unused)
|
||||
{
|
||||
|
||||
#ifdef __CLANG_ATOMICS
|
||||
__c11_atomic_signal_fence(__order);
|
||||
#elif defined(__GNUC_ATOMICS)
|
||||
__atomic_signal_fence(__order);
|
||||
#else
|
||||
__asm volatile ("" ::: "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* 7.17.5 Lock-free property.
|
||||
*/
|
||||
|
||||
#if defined(_KERNEL)
|
||||
/* Atomics in kernelspace are always lock-free. */
|
||||
#define atomic_is_lock_free(obj) \
|
||||
((void)(obj), (_Bool)1)
|
||||
#elif defined(__CLANG_ATOMICS)
|
||||
#define atomic_is_lock_free(obj) \
|
||||
__atomic_is_lock_free(sizeof(*(obj)), obj)
|
||||
#elif defined(__GNUC_ATOMICS)
|
||||
#define atomic_is_lock_free(obj) \
|
||||
__atomic_is_lock_free(sizeof((obj)->__val), &(obj)->__val)
|
||||
#else
|
||||
#define atomic_is_lock_free(obj) \
|
||||
((void)(obj), sizeof((obj)->__val) <= sizeof(void *))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* 7.17.6 Atomic integer types.
|
||||
*/
|
||||
|
||||
#if !__has_extension(c_atomic) && !__has_extension(cxx_atomic)
|
||||
/*
|
||||
* No native support for _Atomic(). Place object in structure to prevent
|
||||
* most forms of direct non-atomic access.
|
||||
*/
|
||||
#define _Atomic(T) struct { T volatile __val; }
|
||||
#endif
|
||||
|
||||
typedef _Atomic(bool) atomic_bool;
|
||||
typedef _Atomic(char) atomic_char;
|
||||
typedef _Atomic(signed char) atomic_schar;
|
||||
typedef _Atomic(unsigned char) atomic_uchar;
|
||||
typedef _Atomic(short) atomic_short;
|
||||
typedef _Atomic(unsigned short) atomic_ushort;
|
||||
typedef _Atomic(int) atomic_int;
|
||||
typedef _Atomic(unsigned int) atomic_uint;
|
||||
typedef _Atomic(long) atomic_long;
|
||||
typedef _Atomic(unsigned long) atomic_ulong;
|
||||
typedef _Atomic(long long) atomic_llong;
|
||||
typedef _Atomic(unsigned long long) atomic_ullong;
|
||||
typedef _Atomic(char16_t) atomic_char16_t;
|
||||
typedef _Atomic(char32_t) atomic_char32_t;
|
||||
typedef _Atomic(wchar_t) atomic_wchar_t;
|
||||
typedef _Atomic(int_least8_t) atomic_int_least8_t;
|
||||
typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
|
||||
typedef _Atomic(int_least16_t) atomic_int_least16_t;
|
||||
typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
|
||||
typedef _Atomic(int_least32_t) atomic_int_least32_t;
|
||||
typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
|
||||
typedef _Atomic(int_least64_t) atomic_int_least64_t;
|
||||
typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
|
||||
typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
|
||||
typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
|
||||
typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
|
||||
typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
|
||||
typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
|
||||
typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
|
||||
typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
|
||||
typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
|
||||
typedef _Atomic(intptr_t) atomic_intptr_t;
|
||||
typedef _Atomic(uintptr_t) atomic_uintptr_t;
|
||||
typedef _Atomic(size_t) atomic_size_t;
|
||||
typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
|
||||
typedef _Atomic(intmax_t) atomic_intmax_t;
|
||||
typedef _Atomic(uintmax_t) atomic_uintmax_t;
|
||||
|
||||
/*
|
||||
* 7.17.7 Operations on atomic types.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Compiler-specific operations.
|
||||
*/
|
||||
|
||||
#if defined(__CLANG_ATOMICS)
|
||||
#define atomic_compare_exchange_strong_explicit(object, expected, \
|
||||
desired, success, failure) \
|
||||
__c11_atomic_compare_exchange_strong(object, expected, desired, \
|
||||
success, failure)
|
||||
#define atomic_compare_exchange_weak_explicit(object, expected, \
|
||||
desired, success, failure) \
|
||||
__c11_atomic_compare_exchange_weak(object, expected, desired, \
|
||||
success, failure)
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
__c11_atomic_exchange(object, desired, order)
|
||||
#define atomic_fetch_add_explicit(object, operand, order) \
|
||||
__c11_atomic_fetch_add(object, operand, order)
|
||||
#define atomic_fetch_and_explicit(object, operand, order) \
|
||||
__c11_atomic_fetch_and(object, operand, order)
|
||||
#define atomic_fetch_or_explicit(object, operand, order) \
|
||||
__c11_atomic_fetch_or(object, operand, order)
|
||||
#define atomic_fetch_sub_explicit(object, operand, order) \
|
||||
__c11_atomic_fetch_sub(object, operand, order)
|
||||
#define atomic_fetch_xor_explicit(object, operand, order) \
|
||||
__c11_atomic_fetch_xor(object, operand, order)
|
||||
#define atomic_load_explicit(object, order) \
|
||||
__c11_atomic_load(object, order)
|
||||
#define atomic_store_explicit(object, desired, order) \
|
||||
__c11_atomic_store(object, desired, order)
|
||||
#elif defined(__GNUC_ATOMICS)
|
||||
#define atomic_compare_exchange_strong_explicit(object, expected, \
|
||||
desired, success, failure) \
|
||||
__atomic_compare_exchange_n(&(object)->__val, expected, \
|
||||
desired, 0, success, failure)
|
||||
#define atomic_compare_exchange_weak_explicit(object, expected, \
|
||||
desired, success, failure) \
|
||||
__atomic_compare_exchange_n(&(object)->__val, expected, \
|
||||
desired, 1, success, failure)
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
__atomic_exchange_n(&(object)->__val, desired, order)
|
||||
#define atomic_fetch_add_explicit(object, operand, order) \
|
||||
__atomic_fetch_add(&(object)->__val, operand, order)
|
||||
#define atomic_fetch_and_explicit(object, operand, order) \
|
||||
__atomic_fetch_and(&(object)->__val, operand, order)
|
||||
#define atomic_fetch_or_explicit(object, operand, order) \
|
||||
__atomic_fetch_or(&(object)->__val, operand, order)
|
||||
#define atomic_fetch_sub_explicit(object, operand, order) \
|
||||
__atomic_fetch_sub(&(object)->__val, operand, order)
|
||||
#define atomic_fetch_xor_explicit(object, operand, order) \
|
||||
__atomic_fetch_xor(&(object)->__val, operand, order)
|
||||
#define atomic_load_explicit(object, order) \
|
||||
__atomic_load_n(&(object)->__val, order)
|
||||
#define atomic_store_explicit(object, desired, order) \
|
||||
__atomic_store_n(&(object)->__val, desired, order)
|
||||
#else
|
||||
#define __atomic_apply_stride(object, operand) \
|
||||
(((__typeof__((object)->__val))0) + (operand))
|
||||
#define atomic_compare_exchange_strong_explicit(object, expected, \
|
||||
desired, success, failure) __extension__ ({ \
|
||||
__typeof__(expected) __ep = (expected); \
|
||||
__typeof__(*__ep) __e = *__ep; \
|
||||
(void)(success); (void)(failure); \
|
||||
(bool)((*__ep = __sync_val_compare_and_swap(&(object)->__val, \
|
||||
__e, desired)) == __e); \
|
||||
})
|
||||
#define atomic_compare_exchange_weak_explicit(object, expected, \
|
||||
desired, success, failure) \
|
||||
atomic_compare_exchange_strong_explicit(object, expected, \
|
||||
desired, success, failure)
|
||||
#if __has_builtin(__sync_swap)
|
||||
/* Clang provides a full-barrier atomic exchange - use it if available. */
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
((void)(order), __sync_swap(&(object)->__val, desired))
|
||||
#else
|
||||
/*
|
||||
* __sync_lock_test_and_set() is only an acquire barrier in theory (although in
|
||||
* practice it is usually a full barrier) so we need an explicit barrier before
|
||||
* it.
|
||||
*/
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
__extension__ ({ \
|
||||
__typeof__(object) __o = (object); \
|
||||
__typeof__(desired) __d = (desired); \
|
||||
(void)(order); \
|
||||
__sync_synchronize(); \
|
||||
__sync_lock_test_and_set(&(__o)->__val, __d); \
|
||||
})
|
||||
#endif
|
||||
#define atomic_fetch_add_explicit(object, operand, order) \
|
||||
((void)(order), __sync_fetch_and_add(&(object)->__val, \
|
||||
__atomic_apply_stride(object, operand)))
|
||||
#define atomic_fetch_and_explicit(object, operand, order) \
|
||||
((void)(order), __sync_fetch_and_and(&(object)->__val, operand))
|
||||
#define atomic_fetch_or_explicit(object, operand, order) \
|
||||
((void)(order), __sync_fetch_and_or(&(object)->__val, operand))
|
||||
#define atomic_fetch_sub_explicit(object, operand, order) \
|
||||
((void)(order), __sync_fetch_and_sub(&(object)->__val, \
|
||||
__atomic_apply_stride(object, operand)))
|
||||
#define atomic_fetch_xor_explicit(object, operand, order) \
|
||||
((void)(order), __sync_fetch_and_xor(&(object)->__val, operand))
|
||||
#define atomic_load_explicit(object, order) \
|
||||
((void)(order), __sync_fetch_and_add(&(object)->__val, 0))
|
||||
#define atomic_store_explicit(object, desired, order) \
|
||||
((void)atomic_exchange_explicit(object, desired, order))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Convenience functions.
|
||||
*
|
||||
* Don't provide these in kernel space. In kernel space, we should be
|
||||
* disciplined enough to always provide explicit barriers.
|
||||
*/
|
||||
|
||||
#ifndef _KERNEL
|
||||
#define atomic_compare_exchange_strong(object, expected, desired) \
|
||||
atomic_compare_exchange_strong_explicit(object, expected, \
|
||||
desired, memory_order_seq_cst, memory_order_seq_cst)
|
||||
#define atomic_compare_exchange_weak(object, expected, desired) \
|
||||
atomic_compare_exchange_weak_explicit(object, expected, \
|
||||
desired, memory_order_seq_cst, memory_order_seq_cst)
|
||||
#define atomic_exchange(object, desired) \
|
||||
atomic_exchange_explicit(object, desired, memory_order_seq_cst)
|
||||
#define atomic_fetch_add(object, operand) \
|
||||
atomic_fetch_add_explicit(object, operand, memory_order_seq_cst)
|
||||
#define atomic_fetch_and(object, operand) \
|
||||
atomic_fetch_and_explicit(object, operand, memory_order_seq_cst)
|
||||
#define atomic_fetch_or(object, operand) \
|
||||
atomic_fetch_or_explicit(object, operand, memory_order_seq_cst)
|
||||
#define atomic_fetch_sub(object, operand) \
|
||||
atomic_fetch_sub_explicit(object, operand, memory_order_seq_cst)
|
||||
#define atomic_fetch_xor(object, operand) \
|
||||
atomic_fetch_xor_explicit(object, operand, memory_order_seq_cst)
|
||||
#define atomic_load(object) \
|
||||
atomic_load_explicit(object, memory_order_seq_cst)
|
||||
#define atomic_store(object, desired) \
|
||||
atomic_store_explicit(object, desired, memory_order_seq_cst)
|
||||
#endif /* !_KERNEL */
|
||||
|
||||
/*
|
||||
* 7.17.8 Atomic flag type and operations.
|
||||
*
|
||||
* XXX: Assume atomic_bool can be used as an atomic_flag. Is there some
|
||||
* kind of compiler built-in type we could use?
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
atomic_bool __flag;
|
||||
} atomic_flag;
|
||||
|
||||
#define ATOMIC_FLAG_INIT { ATOMIC_VAR_INIT(0) }
|
||||
|
||||
static __inline bool
|
||||
atomic_flag_test_and_set_explicit(volatile atomic_flag *__object,
|
||||
memory_order __order)
|
||||
{
|
||||
return (atomic_exchange_explicit(&__object->__flag, 1, __order));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
atomic_flag_clear_explicit(volatile atomic_flag *__object, memory_order __order)
|
||||
{
|
||||
|
||||
atomic_store_explicit(&__object->__flag, 0, __order);
|
||||
}
|
||||
|
||||
#ifndef _KERNEL
|
||||
static __inline bool
|
||||
atomic_flag_test_and_set(volatile atomic_flag *__object)
|
||||
{
|
||||
|
||||
return (atomic_flag_test_and_set_explicit(__object,
|
||||
memory_order_seq_cst));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
atomic_flag_clear(volatile atomic_flag *__object)
|
||||
{
|
||||
|
||||
atomic_flag_clear_explicit(__object, memory_order_seq_cst);
|
||||
}
|
||||
#endif /* !_KERNEL */
|
||||
|
||||
#endif /* !_STDATOMIC_H_ */
|
@ -250,6 +250,9 @@ int vfprintf(FILE * __restrict, const char * __restrict, __va_list)
|
||||
int vprintf(const char * __restrict, __va_list)
|
||||
__printflike(1, 0);
|
||||
|
||||
int dprintf(int, const char * __restrict, ...) __printflike(2, 3);
|
||||
int vdprintf(int, const char * __restrict, __va_list) __printflike(2, 0);
|
||||
|
||||
#ifndef __AUDIT__
|
||||
char* gets(char*) __warnattr("gets is very unsafe; consider using fgets");
|
||||
int sprintf(char* __restrict, const char* __restrict, ...)
|
||||
@ -359,21 +362,6 @@ __END_DECLS
|
||||
#define fwopen(cookie, fn) funopen(cookie, 0, fn, 0, 0)
|
||||
#endif /* __BSD_VISIBLE */
|
||||
|
||||
#ifdef _GNU_SOURCE
|
||||
/*
|
||||
* glibc defines dprintf(int, const char*, ...), which is poorly named
|
||||
* and likely to conflict with locally defined debugging printfs
|
||||
* fdprintf is a better name, and some programs that use fdprintf use a
|
||||
* #define fdprintf dprintf for compatibility
|
||||
*/
|
||||
__BEGIN_DECLS
|
||||
int fdprintf(int, const char*, ...)
|
||||
__printflike(2, 3);
|
||||
int vfdprintf(int, const char*, __va_list)
|
||||
__printflike(2, 0);
|
||||
__END_DECLS
|
||||
#endif /* _GNU_SOURCE */
|
||||
|
||||
#if defined(__BIONIC_FORTIFY)
|
||||
|
||||
__BEGIN_DECLS
|
||||
|
@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
#ifndef _SYS_ATOMICS_H
|
||||
#define _SYS_ATOMICS_H
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
__BEGIN_DECLS
|
||||
|
||||
/* Note: atomic operations that were exported by the C library didn't
|
||||
* provide any memory barriers, which created potential issues on
|
||||
* multi-core devices. We now define them as inlined calls to
|
||||
* GCC sync builtins, which always provide a full barrier.
|
||||
*
|
||||
* NOTE: The C library still exports atomic functions by the same
|
||||
* name to ensure ABI stability for existing NDK machine code.
|
||||
*
|
||||
* If you are an NDK developer, we encourage you to rebuild your
|
||||
* unmodified sources against this header as soon as possible.
|
||||
*/
|
||||
#define __ATOMIC_INLINE__ static __inline__ __attribute__((always_inline))
|
||||
|
||||
__ATOMIC_INLINE__ int
|
||||
__atomic_cmpxchg(int old_value, int new_value, volatile int* ptr)
|
||||
{
|
||||
/* We must return 0 on success */
|
||||
return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value;
|
||||
}
|
||||
|
||||
__ATOMIC_INLINE__ int
|
||||
__atomic_swap(int new_value, volatile int *ptr)
|
||||
{
|
||||
int old_value;
|
||||
do {
|
||||
old_value = *ptr;
|
||||
} while (__sync_val_compare_and_swap(ptr, old_value, new_value) != old_value);
|
||||
return old_value;
|
||||
}
|
||||
|
||||
__ATOMIC_INLINE__ int
|
||||
__atomic_dec(volatile int *ptr)
|
||||
{
|
||||
return __sync_fetch_and_sub (ptr, 1);
|
||||
}
|
||||
|
||||
__ATOMIC_INLINE__ int
|
||||
__atomic_inc(volatile int *ptr)
|
||||
{
|
||||
return __sync_fetch_and_add (ptr, 1);
|
||||
}
|
||||
|
||||
__END_DECLS
|
||||
|
||||
#endif /* _SYS_ATOMICS_H */
|
@ -37,6 +37,24 @@
|
||||
#ifndef _SYS_CDEFS_H_
|
||||
#define _SYS_CDEFS_H_
|
||||
|
||||
/*
|
||||
* Testing against Clang-specific extensions.
|
||||
*/
|
||||
|
||||
#ifndef __has_extension
|
||||
#define __has_extension __has_feature
|
||||
#endif
|
||||
#ifndef __has_feature
|
||||
#define __has_feature(x) 0
|
||||
#endif
|
||||
#ifndef __has_include
|
||||
#define __has_include(x) 0
|
||||
#endif
|
||||
#ifndef __has_builtin
|
||||
#define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Macro to test if we're using a GNU C compiler of a specific vintage
|
||||
* or later, for e.g. features that appeared in a particular version
|
||||
|
@ -60,8 +60,6 @@ enum {
|
||||
typedef int greg_t;
|
||||
typedef greg_t gregset_t[NGREG];
|
||||
|
||||
/* TODO: fpregset_t. */
|
||||
|
||||
#include <asm/sigcontext.h>
|
||||
typedef struct sigcontext mcontext_t;
|
||||
|
||||
@ -70,15 +68,18 @@ typedef struct ucontext {
|
||||
struct ucontext* uc_link;
|
||||
stack_t uc_stack;
|
||||
mcontext_t uc_mcontext;
|
||||
sigset_t uc_sigmask;
|
||||
char __padding[128 - sizeof(sigset_t)];
|
||||
// Android has a wrong (smaller) sigset_t on ARM.
|
||||
union {
|
||||
sigset_t bionic;
|
||||
uint32_t kernel[2];
|
||||
} uc_sigmask;
|
||||
// The kernel adds extra padding after uc_sigmask to match glibc sigset_t on ARM.
|
||||
char __padding[120];
|
||||
unsigned long uc_regspace[128] __attribute__((__aligned__(8)));
|
||||
} ucontext_t;
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
|
||||
/* TODO: gregset_t and fpregset_t. */
|
||||
|
||||
#include <asm/sigcontext.h>
|
||||
typedef struct sigcontext mcontext_t;
|
||||
|
||||
@ -87,6 +88,7 @@ typedef struct ucontext {
|
||||
struct ucontext *uc_link;
|
||||
stack_t uc_stack;
|
||||
sigset_t uc_sigmask;
|
||||
// The kernel adds extra padding after uc_sigmask to match glibc sigset_t on ARM64.
|
||||
char __padding[128 - sizeof(sigset_t)];
|
||||
mcontext_t uc_mcontext;
|
||||
} ucontext_t;
|
||||
@ -150,8 +152,11 @@ typedef struct ucontext {
|
||||
struct ucontext* uc_link;
|
||||
stack_t uc_stack;
|
||||
mcontext_t uc_mcontext;
|
||||
sigset_t uc_sigmask;
|
||||
char __padding[128 - sizeof(sigset_t)];
|
||||
// Android has a wrong (smaller) sigset_t on x86.
|
||||
union {
|
||||
sigset_t bionic;
|
||||
uint32_t kernel[2];
|
||||
} uc_sigmask;
|
||||
struct _libc_fpstate __fpregs_mem;
|
||||
} ucontext_t;
|
||||
|
||||
@ -278,7 +283,6 @@ typedef struct ucontext {
|
||||
stack_t uc_stack;
|
||||
mcontext_t uc_mcontext;
|
||||
sigset_t uc_sigmask;
|
||||
char __padding[128 - sizeof(sigset_t)];
|
||||
struct _libc_fpstate __fpregs_mem;
|
||||
} ucontext_t;
|
||||
|
||||
|
@ -92,7 +92,6 @@ extern int setresuid(uid_t, uid_t, uid_t);
|
||||
extern int setresgid(gid_t, gid_t, gid_t);
|
||||
extern int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid);
|
||||
extern int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid);
|
||||
extern int issetugid(void);
|
||||
extern char* getlogin(void);
|
||||
extern char* getusershell(void);
|
||||
extern void setusershell(void);
|
||||
@ -207,7 +206,7 @@ extern int setdomainname(const char *, size_t);
|
||||
|
||||
/* Used to retry syscalls that can return EINTR. */
|
||||
#define TEMP_FAILURE_RETRY(exp) ({ \
|
||||
typeof (exp) _rc; \
|
||||
__typeof__(exp) _rc; \
|
||||
do { \
|
||||
_rc = (exp); \
|
||||
} while (_rc == -1 && errno == EINTR); \
|
||||
|
@ -28,31 +28,42 @@
|
||||
#ifndef _BIONIC_FUTEX_H
|
||||
#define _BIONIC_FUTEX_H
|
||||
|
||||
#include <errno.h>
|
||||
#include <linux/futex.h>
|
||||
#include <sys/cdefs.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <sys/cdefs.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
__BEGIN_DECLS
|
||||
|
||||
struct timespec;
|
||||
|
||||
extern int __futex_syscall4(volatile void* ftx, int op, int value, const struct timespec* timeout);
|
||||
static inline __always_inline int __futex(volatile void* ftx, int op, int value, const struct timespec* timeout) {
|
||||
// Our generated syscall assembler sets errno, but our callers (pthread functions) don't want to.
|
||||
int saved_errno = errno;
|
||||
int result = syscall(__NR_futex, ftx, op, value, timeout);
|
||||
if (__predict_false(result == -1)) {
|
||||
result = -errno;
|
||||
errno = saved_errno;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline int __futex_wake(volatile void* ftx, int count) {
|
||||
return __futex_syscall4(ftx, FUTEX_WAKE, count, NULL);
|
||||
return __futex(ftx, FUTEX_WAKE, count, NULL);
|
||||
}
|
||||
|
||||
static inline int __futex_wake_ex(volatile void* ftx, bool shared, int count) {
|
||||
return __futex_syscall4(ftx, shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, count, NULL);
|
||||
return __futex(ftx, shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, count, NULL);
|
||||
}
|
||||
|
||||
static inline int __futex_wait(volatile void* ftx, int value, const struct timespec* timeout) {
|
||||
return __futex_syscall4(ftx, FUTEX_WAIT, value, timeout);
|
||||
return __futex(ftx, FUTEX_WAIT, value, timeout);
|
||||
}
|
||||
|
||||
static inline int __futex_wait_ex(volatile void* ftx, bool shared, int value, const struct timespec* timeout) {
|
||||
return __futex_syscall4(ftx, shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, value, timeout);
|
||||
return __futex(ftx, shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, value, timeout);
|
||||
}
|
||||
|
||||
__END_DECLS
|
||||
|
@ -46,32 +46,27 @@ __BEGIN_DECLS
|
||||
** pre-allocated slot directly for performance reason).
|
||||
**/
|
||||
|
||||
/* Well-known TLS slots. What data goes in which slot is arbitrary unless otherwise noted. */
|
||||
// Well-known TLS slots. What data goes in which slot is arbitrary unless otherwise noted.
|
||||
enum {
|
||||
TLS_SLOT_SELF = 0, /* The kernel requires this specific slot for x86. */
|
||||
TLS_SLOT_SELF = 0, // The kernel requires this specific slot for x86.
|
||||
TLS_SLOT_THREAD_ID,
|
||||
TLS_SLOT_ERRNO,
|
||||
|
||||
/* This slot in the child's TLS is used to synchronize the parent and child
|
||||
* during thread initialization. The child finishes with this mutex before
|
||||
* running any code that can set errno, so we can reuse the errno slot. */
|
||||
TLS_SLOT_START_MUTEX = TLS_SLOT_ERRNO,
|
||||
|
||||
/* These two aren't used by bionic itself, but allow the graphics code to
|
||||
* access TLS directly rather than using the pthread API. */
|
||||
// These two aren't used by bionic itself, but allow the graphics code to
|
||||
// access TLS directly rather than using the pthread API.
|
||||
TLS_SLOT_OPENGL_API = 3,
|
||||
TLS_SLOT_OPENGL = 4,
|
||||
|
||||
/* This slot is only used to pass information from the dynamic linker to
|
||||
* libc.so when the C library is loaded in to memory. The C runtime init
|
||||
* function will then clear it. Since its use is extremely temporary,
|
||||
* we reuse an existing location that isn't needed during libc startup. */
|
||||
// This slot is only used to pass information from the dynamic linker to
|
||||
// libc.so when the C library is loaded in to memory. The C runtime init
|
||||
// function will then clear it. Since its use is extremely temporary,
|
||||
// we reuse an existing location that isn't needed during libc startup.
|
||||
TLS_SLOT_BIONIC_PREINIT = TLS_SLOT_OPENGL_API,
|
||||
|
||||
TLS_SLOT_STACK_GUARD = 5, /* GCC requires this specific slot for x86. */
|
||||
TLS_SLOT_STACK_GUARD = 5, // GCC requires this specific slot for x86.
|
||||
TLS_SLOT_DLERROR,
|
||||
|
||||
TLS_SLOT_FIRST_USER_SLOT /* Must come last! */
|
||||
TLS_SLOT_FIRST_USER_SLOT // Must come last!
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -42,4 +42,8 @@ struct glue {
|
||||
FILE *iobs;
|
||||
};
|
||||
|
||||
#pragma GCC visibility push(hidden)
|
||||
|
||||
extern struct glue __sglue;
|
||||
|
||||
#pragma GCC visibility pop
|
||||
|
@ -41,10 +41,33 @@
|
||||
#include "wcio.h"
|
||||
#include "fileext.h"
|
||||
|
||||
#if defined(__LP64__)
|
||||
/*
|
||||
* Android <= KitKat had getc/putc macros in <stdio.h> that referred
|
||||
* to __srget/__swbuf, so those symbols need to be public for LP32
|
||||
* but can be hidden for LP64.
|
||||
*/
|
||||
__LIBC_HIDDEN__ int __srget(FILE*);
|
||||
__LIBC_HIDDEN__ int __swbuf(int, FILE*);
|
||||
|
||||
/*
|
||||
* The NDK apparently includes an android_support.a library that
|
||||
* refers to __srefill in its copy of the vsnprintf implementation.
|
||||
*/
|
||||
/* TODO(LP64): __LIBC_HIDDEN__ int __srefill(FILE*);*/
|
||||
/* http://b/15291317: the LP64 NDK needs to be fixed to remove that cruft. */
|
||||
__LIBC_ABI_PUBLIC__ int __srefill(FILE*);
|
||||
#else
|
||||
__LIBC_ABI_PUBLIC__ int __srget(FILE*);
|
||||
__LIBC_ABI_PUBLIC__ int __swbuf(int, FILE*);
|
||||
__LIBC_ABI_PUBLIC__ int __srefill(FILE*);
|
||||
#endif
|
||||
|
||||
#pragma GCC visibility push(hidden)
|
||||
|
||||
int __sflush(FILE *);
|
||||
int __sflush_locked(FILE *);
|
||||
FILE *__sfp(void);
|
||||
int __srefill(FILE *);
|
||||
int __sread(void *, char *, int);
|
||||
int __swrite(void *, const char *, int);
|
||||
fpos_t __sseek(void *, fpos_t, int);
|
||||
@ -102,10 +125,8 @@ extern int __sdidinit;
|
||||
#define NO_PRINTF_PERCENT_N
|
||||
|
||||
/* OpenBSD exposes these in <stdio.h>, but we only want them exposed to the implementation. */
|
||||
__BEGIN_DECLS
|
||||
int __srget(FILE*);
|
||||
int __swbuf(int, FILE*);
|
||||
__END_DECLS
|
||||
#define __sfeof(p) (((p)->_flags & __SEOF) != 0)
|
||||
#define __sferror(p) (((p)->_flags & __SERR) != 0)
|
||||
#define __sclearerr(p) ((void)((p)->_flags &= ~(__SERR|__SEOF)))
|
||||
@ -118,3 +139,10 @@ static __inline int __sputc(int _c, FILE* _p) {
|
||||
return (__swbuf(_c, _p));
|
||||
}
|
||||
}
|
||||
|
||||
/* OpenBSD declares these in fvwrite.h but we want to ensure they're hidden. */
|
||||
struct __suio;
|
||||
extern int __sfvwrite(FILE *, struct __suio *);
|
||||
wint_t __fputwc_unlock(wchar_t wc, FILE *fp);
|
||||
|
||||
#pragma GCC visibility pop
|
||||
|
@ -51,10 +51,18 @@ bionic = GetSymbolsFromAndroidSo('libc.so', 'libm.so')
|
||||
|
||||
# bionic includes various BSD symbols to ease porting other BSD-licensed code.
|
||||
bsd_stuff = set([
|
||||
'basename_r',
|
||||
'dirname_r',
|
||||
'fgetln',
|
||||
'fpurge',
|
||||
'funopen',
|
||||
'gamma_r',
|
||||
'gammaf_r',
|
||||
'getprogname',
|
||||
'setprogname',
|
||||
'strlcat',
|
||||
'strlcpy',
|
||||
'sys_signame',
|
||||
'wcslcat',
|
||||
'wcslcpy'
|
||||
])
|
||||
|
@ -31,4 +31,7 @@
|
||||
#define _X _CTYPE_X
|
||||
#define _B _CTYPE_B
|
||||
|
||||
/* OpenBSD has this, but we can't really implement it correctly on Linux. */
|
||||
#define issetugid() 0
|
||||
|
||||
#endif
|
||||
|
48
libc/upstream-openbsd/lib/libc/stdio/dprintf.c
Normal file
48
libc/upstream-openbsd/lib/libc/stdio/dprintf.c
Normal file
@ -0,0 +1,48 @@
|
||||
/* $OpenBSD: dprintf.c,v 1.1 2013/01/30 00:08:13 brad Exp $ */
|
||||
/* $FreeBSD: src/lib/libc/stdio/dprintf.c,v 1.2 2012/11/17 01:49:39 svnexp Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2009 David Schultz <das@FreeBSD.org>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Copyright (c) 2011 The FreeBSD Foundation
|
||||
* All rights reserved.
|
||||
* Portions of this software were developed by David Chisnall
|
||||
* under sponsorship from the FreeBSD Foundation.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
int
|
||||
dprintf(int fd, const char * __restrict fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = vdprintf(fd, fmt, ap);
|
||||
va_end(ap);
|
||||
return ret;
|
||||
}
|
73
libc/upstream-openbsd/lib/libc/stdio/vdprintf.c
Normal file
73
libc/upstream-openbsd/lib/libc/stdio/vdprintf.c
Normal file
@ -0,0 +1,73 @@
|
||||
/* $OpenBSD: vdprintf.c,v 1.1 2013/01/30 00:08:13 brad Exp $ */
|
||||
/* $FreeBSD: src/lib/libc/stdio/vdprintf.c,v 1.4 2012/11/17 01:49:40 svnexp Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2009 David Schultz <das@FreeBSD.org>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Copyright (c) 2011 The FreeBSD Foundation
|
||||
* All rights reserved.
|
||||
* Portions of this software were developed by David Chisnall
|
||||
* under sponsorship from the FreeBSD Foundation.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "local.h"
|
||||
|
||||
static int
|
||||
__dwrite(void *cookie, const char *buf, int n)
|
||||
{
|
||||
int *fdp = cookie;
|
||||
return (write(*fdp, buf, n));
|
||||
}
|
||||
|
||||
int
|
||||
vdprintf(int fd, const char * __restrict fmt, va_list ap)
|
||||
{
|
||||
FILE f;
|
||||
struct __sfileext fext;
|
||||
unsigned char buf[BUFSIZ];
|
||||
int ret;
|
||||
|
||||
_FILEEXT_SETUP(&f, &fext);
|
||||
|
||||
f._p = buf;
|
||||
f._w = sizeof(buf);
|
||||
f._flags = __SWR;
|
||||
f._file = -1;
|
||||
f._bf._base = buf;
|
||||
f._bf._size = sizeof(buf);
|
||||
f._cookie = &fd;
|
||||
f._write = __dwrite;
|
||||
|
||||
if ((ret = __vfprintf(&f, fmt, ap)) < 0)
|
||||
return ret;
|
||||
|
||||
return fflush(&f) ? EOF : ret;
|
||||
}
|
Binary file not shown.
@ -241,6 +241,11 @@ libm_common_cflags := \
|
||||
-Wno-unknown-pragmas \
|
||||
-fvisibility=hidden \
|
||||
|
||||
# Workaround the GCC "(long)fn -> lfn" optimization bug which will result in
|
||||
# self recursions for lrint, lrintf, and lrintl.
|
||||
# BUG: 14225968
|
||||
libm_common_cflags += -fno-builtin-rint -fno-builtin-rintf -fno-builtin-rintl
|
||||
|
||||
libm_common_includes := $(LOCAL_PATH)/upstream-freebsd/lib/msun/src/
|
||||
|
||||
libm_ld_includes := $(LOCAL_PATH)/upstream-freebsd/lib/msun/ld128/
|
||||
@ -270,10 +275,8 @@ LOCAL_SRC_FILES_x86 := i387/fenv.c
|
||||
LOCAL_C_INCLUDES_x86_64 := $(libm_ld_includes)
|
||||
LOCAL_SRC_FILES_x86_64 := amd64/fenv.c $(libm_ld_src_files)
|
||||
|
||||
LOCAL_CFLAGS_mips := -fno-builtin-rintf -fno-builtin-rint
|
||||
LOCAL_SRC_FILES_mips := mips/fenv.c
|
||||
|
||||
LOCAL_CFLAGS_mips64 := -fno-builtin-rintf -fno-builtin-rint
|
||||
LOCAL_C_INCLUDES_mips64 := $(libm_ld_includes)
|
||||
LOCAL_SRC_FILES_mips64 := mips/fenv.c $(libm_ld_src_files)
|
||||
|
||||
|
@ -89,10 +89,13 @@ void* dlopen(const char* filename, int flags) {
|
||||
void* dlsym(void* handle, const char* symbol) {
|
||||
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||
|
||||
#if !defined(__LP64__)
|
||||
if (handle == NULL) {
|
||||
__bionic_format_dlerror("dlsym library handle is null", NULL);
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (symbol == NULL) {
|
||||
__bionic_format_dlerror("dlsym symbol name is null", NULL);
|
||||
return NULL;
|
||||
@ -100,9 +103,9 @@ void* dlsym(void* handle, const char* symbol) {
|
||||
|
||||
soinfo* found = NULL;
|
||||
ElfW(Sym)* sym = NULL;
|
||||
if (handle == RTLD_DEFAULT) {
|
||||
if (handle == RTLD_DEFAULT || handle == (void*)0xffffffffL) {
|
||||
sym = dlsym_linear_lookup(symbol, &found, NULL);
|
||||
} else if (handle == RTLD_NEXT || handle == (void*)0xffffffffL) {
|
||||
} else if (handle == RTLD_NEXT || handle == (void*)0xfffffffeL) {
|
||||
void* caller_addr = __builtin_return_address(0);
|
||||
soinfo* si = find_containing_library(caller_addr);
|
||||
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/atomics.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
@ -841,9 +840,6 @@ soinfo* do_dlopen(const char* name, int flags, soinfo* caller, const android_dle
|
||||
soinfo* si = find_library(name, flags, extinfo);
|
||||
if (si != NULL) {
|
||||
si->CallConstructors();
|
||||
if (caller != NULL) {
|
||||
caller->add_child(si);
|
||||
}
|
||||
}
|
||||
protect_data(PROT_READ);
|
||||
return si;
|
||||
|
@ -87,6 +87,7 @@ libBionicStandardTests_src_files := \
|
||||
stack_protector_test.cpp \
|
||||
stack_unwinding_test.cpp \
|
||||
stack_unwinding_test_impl.c \
|
||||
stdatomic_test.cpp \
|
||||
stdint_test.cpp \
|
||||
stdio_test.cpp \
|
||||
stdlib_test.cpp \
|
||||
|
@ -101,13 +101,12 @@ TEST(dlfcn, dlsym_failures) {
|
||||
|
||||
void* sym;
|
||||
|
||||
// NULL handle.
|
||||
#if defined(__BIONIC__) && !defined(__LP64__)
|
||||
// RTLD_DEFAULT in lp32 bionic is not (void*)0
|
||||
// so it can be distinguished from the NULL handle.
|
||||
sym = dlsym(NULL, "test");
|
||||
ASSERT_TRUE(sym == NULL);
|
||||
#if defined(__BIONIC__)
|
||||
ASSERT_SUBSTR("dlsym library handle is null", dlerror());
|
||||
#else
|
||||
ASSERT_SUBSTR("undefined symbol: test", dlerror()); // glibc isn't specific about the failure.
|
||||
#endif
|
||||
|
||||
// NULL symbol name.
|
||||
|
@ -551,12 +551,49 @@ TEST(pthread, pthread_rwlock_smoke) {
|
||||
pthread_rwlock_t l;
|
||||
ASSERT_EQ(0, pthread_rwlock_init(&l, NULL));
|
||||
|
||||
// Single read lock
|
||||
ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
|
||||
// Multiple read lock
|
||||
ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
|
||||
// Write lock
|
||||
ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
|
||||
// Try writer lock
|
||||
ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
|
||||
ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
|
||||
ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
|
||||
// Try reader lock
|
||||
ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
|
||||
ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
|
||||
// Try writer lock after unlock
|
||||
ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
|
||||
#ifdef __BIONIC__
|
||||
// EDEADLK in "read after write"
|
||||
ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
|
||||
ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
|
||||
// EDEADLK in "write after write"
|
||||
ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
|
||||
ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
|
||||
ASSERT_EQ(0, pthread_rwlock_unlock(&l));
|
||||
#endif
|
||||
|
||||
ASSERT_EQ(0, pthread_rwlock_destroy(&l));
|
||||
}
|
||||
|
||||
|
165
tests/stdatomic_test.cpp
Normal file
165
tests/stdatomic_test.cpp
Normal file
@ -0,0 +1,165 @@
|
||||
/*
|
||||
* Copyright (C) 2014 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#if !defined(__GLIBC__) /* TODO: fix our prebuilt toolchains! */
|
||||
|
||||
#include <stdatomic.h>
|
||||
|
||||
TEST(stdatomic, LOCK_FREE) {
|
||||
ASSERT_TRUE(ATOMIC_BOOL_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_CHAR16_T_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_CHAR32_T_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_CHAR_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_INT_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_LLONG_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_LONG_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_POINTER_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_SHORT_LOCK_FREE);
|
||||
ASSERT_TRUE(ATOMIC_WCHAR_T_LOCK_FREE);
|
||||
}
|
||||
|
||||
TEST(stdatomic, init) {
|
||||
atomic_int v = ATOMIC_VAR_INIT(123);
|
||||
ASSERT_EQ(123, atomic_load(&v));
|
||||
|
||||
atomic_init(&v, 456);
|
||||
ASSERT_EQ(456, atomic_load(&v));
|
||||
|
||||
atomic_flag f = ATOMIC_FLAG_INIT;
|
||||
ASSERT_FALSE(atomic_flag_test_and_set(&f));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_thread_fence) {
|
||||
atomic_thread_fence(memory_order_relaxed);
|
||||
atomic_thread_fence(memory_order_consume);
|
||||
atomic_thread_fence(memory_order_acquire);
|
||||
atomic_thread_fence(memory_order_release);
|
||||
atomic_thread_fence(memory_order_acq_rel);
|
||||
atomic_thread_fence(memory_order_seq_cst);
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_signal_fence) {
|
||||
atomic_signal_fence(memory_order_relaxed);
|
||||
atomic_signal_fence(memory_order_consume);
|
||||
atomic_signal_fence(memory_order_acquire);
|
||||
atomic_signal_fence(memory_order_release);
|
||||
atomic_signal_fence(memory_order_acq_rel);
|
||||
atomic_signal_fence(memory_order_seq_cst);
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_is_lock_free) {
|
||||
atomic_char small;
|
||||
atomic_intmax_t big;
|
||||
ASSERT_TRUE(atomic_is_lock_free(&small));
|
||||
ASSERT_TRUE(atomic_is_lock_free(&big));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_flag) {
|
||||
atomic_flag f = ATOMIC_FLAG_INIT;
|
||||
ASSERT_FALSE(atomic_flag_test_and_set(&f));
|
||||
ASSERT_TRUE(atomic_flag_test_and_set(&f));
|
||||
|
||||
atomic_flag_clear(&f);
|
||||
|
||||
ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
|
||||
ASSERT_TRUE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
|
||||
|
||||
atomic_flag_clear_explicit(&f, memory_order_relaxed);
|
||||
ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_store) {
|
||||
atomic_int i;
|
||||
atomic_store(&i, 123);
|
||||
ASSERT_EQ(123, atomic_load(&i));
|
||||
atomic_store_explicit(&i, 123, memory_order_relaxed);
|
||||
ASSERT_EQ(123, atomic_load_explicit(&i, memory_order_relaxed));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_exchange) {
|
||||
atomic_int i;
|
||||
atomic_store(&i, 123);
|
||||
ASSERT_EQ(123, atomic_exchange(&i, 456));
|
||||
ASSERT_EQ(456, atomic_exchange_explicit(&i, 123, memory_order_relaxed));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_compare_exchange) {
|
||||
atomic_int i;
|
||||
int expected;
|
||||
|
||||
atomic_store(&i, 123);
|
||||
expected = 123;
|
||||
ASSERT_TRUE(atomic_compare_exchange_strong(&i, &expected, 456));
|
||||
ASSERT_FALSE(atomic_compare_exchange_strong(&i, &expected, 456));
|
||||
ASSERT_EQ(456, expected);
|
||||
|
||||
atomic_store(&i, 123);
|
||||
expected = 123;
|
||||
ASSERT_TRUE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed, memory_order_relaxed));
|
||||
ASSERT_FALSE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed, memory_order_relaxed));
|
||||
ASSERT_EQ(456, expected);
|
||||
|
||||
atomic_store(&i, 123);
|
||||
expected = 123;
|
||||
ASSERT_TRUE(atomic_compare_exchange_weak(&i, &expected, 456));
|
||||
ASSERT_FALSE(atomic_compare_exchange_weak(&i, &expected, 456));
|
||||
ASSERT_EQ(456, expected);
|
||||
|
||||
atomic_store(&i, 123);
|
||||
expected = 123;
|
||||
ASSERT_TRUE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed, memory_order_relaxed));
|
||||
ASSERT_FALSE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed, memory_order_relaxed));
|
||||
ASSERT_EQ(456, expected);
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_fetch_add) {
|
||||
atomic_int i = ATOMIC_VAR_INIT(123);
|
||||
ASSERT_EQ(123, atomic_fetch_add(&i, 1));
|
||||
ASSERT_EQ(124, atomic_fetch_add_explicit(&i, 1, memory_order_relaxed));
|
||||
ASSERT_EQ(125, atomic_load(&i));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_fetch_sub) {
|
||||
atomic_int i = ATOMIC_VAR_INIT(123);
|
||||
ASSERT_EQ(123, atomic_fetch_sub(&i, 1));
|
||||
ASSERT_EQ(122, atomic_fetch_sub_explicit(&i, 1, memory_order_relaxed));
|
||||
ASSERT_EQ(121, atomic_load(&i));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_fetch_or) {
|
||||
atomic_int i = ATOMIC_VAR_INIT(0x100);
|
||||
ASSERT_EQ(0x100, atomic_fetch_or(&i, 0x020));
|
||||
ASSERT_EQ(0x120, atomic_fetch_or_explicit(&i, 0x003, memory_order_relaxed));
|
||||
ASSERT_EQ(0x123, atomic_load(&i));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_fetch_xor) {
|
||||
atomic_int i = ATOMIC_VAR_INIT(0x100);
|
||||
ASSERT_EQ(0x100, atomic_fetch_xor(&i, 0x120));
|
||||
ASSERT_EQ(0x020, atomic_fetch_xor_explicit(&i, 0x103, memory_order_relaxed));
|
||||
ASSERT_EQ(0x123, atomic_load(&i));
|
||||
}
|
||||
|
||||
TEST(stdatomic, atomic_fetch_and) {
|
||||
atomic_int i = ATOMIC_VAR_INIT(0x123);
|
||||
ASSERT_EQ(0x123, atomic_fetch_and(&i, 0x00f));
|
||||
ASSERT_EQ(0x003, atomic_fetch_and_explicit(&i, 0x2, memory_order_relaxed));
|
||||
ASSERT_EQ(0x002, atomic_load(&i));
|
||||
}
|
||||
|
||||
#endif
|
@ -54,6 +54,24 @@ TEST(stdio, tmpfile_fileno_fprintf_rewind_fgets) {
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
TEST(stdio, dprintf) {
|
||||
TemporaryFile tf;
|
||||
|
||||
int rc = dprintf(tf.fd, "hello\n");
|
||||
ASSERT_EQ(rc, 6);
|
||||
|
||||
lseek(tf.fd, SEEK_SET, 0);
|
||||
FILE* tfile = fdopen(tf.fd, "r");
|
||||
ASSERT_TRUE(tfile != NULL);
|
||||
|
||||
char buf[7];
|
||||
ASSERT_EQ(buf, fgets(buf, sizeof(buf), tfile));
|
||||
ASSERT_STREQ("hello\n", buf);
|
||||
// Make sure there isn't anything else in the file.
|
||||
ASSERT_EQ(NULL, fgets(buf, sizeof(buf), tfile));
|
||||
fclose(tfile);
|
||||
}
|
||||
|
||||
TEST(stdio, getdelim) {
|
||||
FILE* fp = tmpfile();
|
||||
ASSERT_TRUE(fp != NULL);
|
||||
|
@ -56,21 +56,82 @@ TEST(unistd, brk_ENOMEM) {
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
}
|
||||
|
||||
#if defined(__GLIBC__)
|
||||
#define SBRK_MIN INTPTR_MIN
|
||||
#define SBRK_MAX INTPTR_MAX
|
||||
#else
|
||||
#define SBRK_MIN PTRDIFF_MIN
|
||||
#define SBRK_MAX PTRDIFF_MAX
|
||||
#endif
|
||||
|
||||
TEST(unistd, sbrk_ENOMEM) {
|
||||
intptr_t current_brk = reinterpret_cast<intptr_t>(get_brk());
|
||||
#if defined(__BIONIC__) && !defined(__LP64__)
|
||||
// There is no way to guarantee that all overflow conditions can be tested
|
||||
// without manipulating the underlying values of the current break.
|
||||
extern void* __bionic_brk;
|
||||
|
||||
class ScopedBrk {
|
||||
public:
|
||||
ScopedBrk() : saved_brk_(__bionic_brk) {}
|
||||
virtual ~ScopedBrk() { __bionic_brk = saved_brk_; }
|
||||
|
||||
private:
|
||||
void* saved_brk_;
|
||||
};
|
||||
|
||||
ScopedBrk scope_brk;
|
||||
|
||||
// Set the current break to a point that will cause an overflow.
|
||||
__bionic_brk = reinterpret_cast<void*>(static_cast<uintptr_t>(PTRDIFF_MAX) + 2);
|
||||
|
||||
// Can't increase by so much that we'd overflow.
|
||||
ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(PTRDIFF_MAX));
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
|
||||
// Can't reduce by more than the current break.
|
||||
ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(-(current_brk + 1)));
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
// Set the current break to a point that will cause an overflow.
|
||||
__bionic_brk = reinterpret_cast<void*>(static_cast<uintptr_t>(PTRDIFF_MAX));
|
||||
|
||||
#if defined(__BIONIC__)
|
||||
// The maximum negative value is an interesting special case that glibc gets wrong.
|
||||
ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(PTRDIFF_MIN));
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
|
||||
__bionic_brk = reinterpret_cast<void*>(static_cast<uintptr_t>(PTRDIFF_MAX) - 1);
|
||||
|
||||
ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(PTRDIFF_MIN + 1));
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
#else
|
||||
class ScopedBrk {
|
||||
public:
|
||||
ScopedBrk() : saved_brk_(get_brk()) {}
|
||||
virtual ~ScopedBrk() { brk(saved_brk_); }
|
||||
|
||||
private:
|
||||
void* saved_brk_;
|
||||
};
|
||||
|
||||
ScopedBrk scope_brk;
|
||||
|
||||
uintptr_t cur_brk = reinterpret_cast<uintptr_t>(get_brk());
|
||||
if (cur_brk < static_cast<uintptr_t>(-(SBRK_MIN+1))) {
|
||||
// Do the overflow test for a max negative increment.
|
||||
ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(SBRK_MIN));
|
||||
#if defined(__BIONIC__)
|
||||
// GLIBC does not set errno in overflow case.
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
#endif
|
||||
}
|
||||
|
||||
uintptr_t overflow_brk = static_cast<uintptr_t>(SBRK_MAX) + 2;
|
||||
if (cur_brk < overflow_brk) {
|
||||
// Try and move the value to PTRDIFF_MAX + 2.
|
||||
cur_brk = reinterpret_cast<uintptr_t>(sbrk(overflow_brk));
|
||||
}
|
||||
if (cur_brk >= overflow_brk) {
|
||||
ASSERT_EQ(reinterpret_cast<void*>(-1), sbrk(SBRK_MAX));
|
||||
#if defined(__BIONIC__)
|
||||
// GLIBC does not set errno in overflow case.
|
||||
ASSERT_EQ(ENOMEM, errno);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -442,3 +442,14 @@ TEST(wchar, wcsftime) {
|
||||
EXPECT_EQ(24U, wcsftime(buf, sizeof(buf), L"%c", &t));
|
||||
EXPECT_STREQ(L"Sun Mar 10 00:00:00 2100", buf);
|
||||
}
|
||||
|
||||
TEST(wchar, wmemmove) {
|
||||
const wchar_t const_wstr[] = L"This is a test of something or other.....";
|
||||
wchar_t* wstr = new wchar_t[sizeof(const_wstr)];
|
||||
|
||||
wmemmove(wstr, const_wstr, sizeof(const_wstr)/sizeof(wchar_t));
|
||||
EXPECT_STREQ(const_wstr, wstr);
|
||||
|
||||
wmemmove(wstr+5, wstr, sizeof(const_wstr)/sizeof(wchar_t) - 6);
|
||||
EXPECT_STREQ(L"This This is a test of something or other", wstr);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user