From c3f114037dbf028896310609fd28cf2b3da99c4d Mon Sep 17 00:00:00 2001 From: Elliott Hughes Date: Wed, 30 Oct 2013 14:40:09 -0700 Subject: [PATCH] fixes and pthread cleanup. was missing nonnull attributes, noreturn on pthread_exit, and had incorrect cv qualifiers for several standard functions. I've also marked the non-standard stuff (where I count glibc rather than POSIX as "standard") so we can revisit this cruft for LP64 and try to ensure we're compatible with glibc. I've also broken out the pthread_cond* functions into a new file. I've made the remaining pthread files (plus ptrace) part of the bionic code and fixed all the warnings. I've added a few more smoke tests for chunks of untested pthread functionality. We no longer need the libc_static_common_src_files hack for any of the pthread implementation because we long since stripped out the rest of the armv5 support, and this hack was just to ensure that __get_tls in libc.a went via the kernel if necessary. This patch also finishes the job of breaking up the pthread.c monolith, and adds a handful of new tests. Change-Id: Idc0ae7f5d8aa65989598acd4c01a874fe21582c7 --- libc/Android.mk | 23 +- libc/bionic/pthread-atfork.c | 126 ----- libc/bionic/pthread_atfork.cpp | 120 +++++ libc/bionic/pthread_attr.cpp | 23 +- libc/bionic/pthread_cond.cpp | 214 +++++++++ libc/bionic/pthread_create.cpp | 3 +- libc/bionic/pthread_exit.cpp | 135 ++++++ libc/bionic/pthread_internal.h | 30 +- libc/bionic/pthread_internals.cpp | 46 +- libc/bionic/{pthread.c => pthread_mutex.cpp} | 441 +----------------- libc/bionic/pthread_once.cpp | 100 ++++ .../{pthread-rwlocks.c => pthread_rwlock.cpp} | 125 ++--- libc/bionic/pthread_setschedparam.cpp | 2 +- libc/bionic/{ptrace.c => ptrace.cpp} | 47 +- .../{thread_atexit.c => thread_atexit.cpp} | 19 +- libc/bionic/{pthread-timers.c => timer.cpp} | 99 ++-- libc/include/pthread.h | 343 ++++++-------- libc/private/bionic_futex.h | 6 +- libc/private/bionic_pthread.h | 1 - tests/pthread_test.cpp | 62 +++ 20 files changed, 1006 insertions(+), 959 deletions(-) delete mode 100644 libc/bionic/pthread-atfork.c create mode 100644 libc/bionic/pthread_atfork.cpp create mode 100644 libc/bionic/pthread_cond.cpp create mode 100644 libc/bionic/pthread_exit.cpp rename libc/bionic/{pthread.c => pthread_mutex.cpp} (68%) create mode 100644 libc/bionic/pthread_once.cpp rename libc/bionic/{pthread-rwlocks.c => pthread_rwlock.cpp} (78%) rename libc/bionic/{ptrace.c => ptrace.cpp} (71%) rename libc/bionic/{thread_atexit.c => thread_atexit.cpp} (82%) rename libc/bionic/{pthread-timers.c => timer.cpp} (87%) diff --git a/libc/Android.mk b/libc/Android.mk index 116e08f19..e860d17f9 100644 --- a/libc/Android.mk +++ b/libc/Android.mk @@ -133,7 +133,6 @@ libc_common_src_files := \ bionic/system_properties_compat.c \ bionic/tcgetpgrp.c \ bionic/tcsetpgrp.c \ - bionic/thread_atexit.c \ bionic/time64.c \ bionic/umount.c \ bionic/unlockpt.c \ @@ -242,18 +241,27 @@ libc_bionic_src_files := \ bionic/pause.cpp \ bionic/pipe.cpp \ bionic/poll.cpp \ + bionic/pthread_atfork.cpp \ bionic/pthread_attr.cpp \ + bionic/pthread_cond.cpp \ + bionic/pthread_create.cpp \ bionic/pthread_detach.cpp \ bionic/pthread_equal.cpp \ + bionic/pthread_exit.cpp \ bionic/pthread_getcpuclockid.cpp \ bionic/pthread_getschedparam.cpp \ bionic/pthread_internals.cpp \ bionic/pthread_join.cpp \ + bionic/pthread_key.cpp \ bionic/pthread_kill.cpp \ + bionic/pthread_mutex.cpp \ + bionic/pthread_once.cpp \ + bionic/pthread_rwlock.cpp \ bionic/pthread_self.cpp \ bionic/pthread_setname_np.cpp \ bionic/pthread_setschedparam.cpp \ bionic/pthread_sigmask.cpp \ + bionic/ptrace.cpp \ bionic/raise.cpp \ bionic/readlink.cpp \ bionic/rename.cpp \ @@ -285,8 +293,10 @@ libc_bionic_src_files := \ bionic/stubs.cpp \ bionic/symlink.cpp \ bionic/sysconf.cpp \ + bionic/thread_atexit.cpp \ bionic/tdestroy.cpp \ bionic/__thread_entry.cpp \ + bionic/timer.cpp \ bionic/tmpfile.cpp \ bionic/unlink.cpp \ bionic/utimes.cpp \ @@ -414,17 +424,6 @@ libc_upstream_netbsd_src_files := \ upstream-netbsd/libc/string/strxfrm.c \ upstream-netbsd/libc/unistd/killpg.c \ -libc_common_src_files += \ - bionic/pthread-atfork.c \ - bionic/pthread-rwlocks.c \ - bionic/pthread-timers.c \ - bionic/ptrace.c \ - -libc_static_common_src_files += \ - bionic/pthread.c \ - bionic/pthread_create.cpp \ - bionic/pthread_key.cpp \ - # Architecture specific source files go here # ========================================================= ifeq ($(TARGET_ARCH),arm) diff --git a/libc/bionic/pthread-atfork.c b/libc/bionic/pthread-atfork.c deleted file mode 100644 index 42420dc4c..000000000 --- a/libc/bionic/pthread-atfork.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (C) 2008 The Android Open Source Project - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS - * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED - * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -#include -#include -#include -#include - -static pthread_mutex_t handler_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; - -struct atfork_t -{ - CIRCLEQ_ENTRY(atfork_t) entries; - - void (*prepare)(void); - void (*child)(void); - void (*parent)(void); -}; -static CIRCLEQ_HEAD(atfork_head_t, atfork_t) atfork_head = \ - CIRCLEQ_HEAD_INITIALIZER(atfork_head); - -void __bionic_atfork_run_prepare() -{ - struct atfork_t *cursor; - - /* We will lock this here, and unlock it in the parent and child functions. - * This ensures that nobody can modify the handler array between the calls - * to the prepare and parent/child handlers. - * - * TODO: If a handler mucks with the list, it could cause problems. Right - * now it's ok because all they can do is add new items to the end - * of the list, but if/when we implement cleanup in dlclose() things - * will get more interesting... - */ - pthread_mutex_lock(&handler_mutex); - - /* Call pthread_atfork() prepare handlers. Posix states that the prepare - * handlers should be called in the reverse order of the parent/child - * handlers, so we iterate backwards. - */ - for (cursor = atfork_head.cqh_last; - cursor != (void*)&atfork_head; - cursor = cursor->entries.cqe_prev) { - if (cursor->prepare != NULL) { - cursor->prepare(); - } - } -} - -void __bionic_atfork_run_child() -{ - struct atfork_t *cursor; - pthread_mutexattr_t attr; - - /* Call pthread_atfork() child handlers */ - for (cursor = atfork_head.cqh_first; - cursor != (void*)&atfork_head; - cursor = cursor->entries.cqe_next) { - if (cursor->child != NULL) { - cursor->child(); - } - } - - pthread_mutexattr_init(&attr); - pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init(&handler_mutex, &attr); -} - -void __bionic_atfork_run_parent() -{ - struct atfork_t *cursor; - - /* Call pthread_atfork() parent handlers */ - for (cursor = atfork_head.cqh_first; - cursor != (void*)&atfork_head; - cursor = cursor->entries.cqe_next) { - if (cursor->parent != NULL) { - cursor->parent(); - } - } - - pthread_mutex_unlock(&handler_mutex); -} - -int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) -{ - struct atfork_t *entry = malloc(sizeof(struct atfork_t)); - - if (entry == NULL) { - return ENOMEM; - } - - entry->prepare = prepare; - entry->parent = parent; - entry->child = child; - - pthread_mutex_lock(&handler_mutex); - CIRCLEQ_INSERT_TAIL(&atfork_head, entry, entries); - pthread_mutex_unlock(&handler_mutex); - - return 0; -} diff --git a/libc/bionic/pthread_atfork.cpp b/libc/bionic/pthread_atfork.cpp new file mode 100644 index 000000000..5bf63fb41 --- /dev/null +++ b/libc/bionic/pthread_atfork.cpp @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include + +static pthread_mutex_t gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER; + +struct atfork_t { + atfork_t* next; + atfork_t* prev; + + void (*prepare)(void); + void (*child)(void); + void (*parent)(void); +}; + +struct atfork_list_t { + atfork_t* first; + atfork_t* last; +}; + +static atfork_list_t gAtForkList = { NULL, NULL }; + +void __bionic_atfork_run_prepare() { + // We will lock this here, and unlock it in the parent and child functions. + // This ensures that nobody can modify the handler array between the calls + // to the prepare and parent/child handlers. + // + // TODO: If a handler mucks with the list, it could cause problems. Right + // now it's ok because all they can do is add new items to the end + // of the list, but if/when we implement cleanup in dlclose() things + // will get more interesting... + pthread_mutex_lock(&gAtForkListMutex); + + // Call pthread_atfork() prepare handlers. POSIX states that the prepare + // handlers should be called in the reverse order of the parent/child + // handlers, so we iterate backwards. + for (atfork_t* it = gAtForkList.last; it != NULL; it = it->prev) { + if (it->prepare != NULL) { + it->prepare(); + } + } +} + +void __bionic_atfork_run_child() { + for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) { + if (it->child != NULL) { + it->child(); + } + } + + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&gAtForkListMutex, &attr); +} + +void __bionic_atfork_run_parent() { + for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) { + if (it->parent != NULL) { + it->parent(); + } + } + + pthread_mutex_unlock(&gAtForkListMutex); +} + +int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) { + atfork_t* entry = reinterpret_cast(malloc(sizeof(atfork_t))); + if (entry == NULL) { + return ENOMEM; + } + + entry->prepare = prepare; + entry->parent = parent; + entry->child = child; + + pthread_mutex_lock(&gAtForkListMutex); + + // Append 'entry' to the list. + entry->next = NULL; + entry->prev = gAtForkList.last; + if (entry->prev != NULL) { + entry->prev->next = entry; + } + if (gAtForkList.first == NULL) { + gAtForkList.first = entry; + } + gAtForkList.last = entry; + + pthread_mutex_unlock(&gAtForkListMutex); + + return 0; +} diff --git a/libc/bionic/pthread_attr.cpp b/libc/bionic/pthread_attr.cpp index dfb740ddf..fdf296591 100644 --- a/libc/bionic/pthread_attr.cpp +++ b/libc/bionic/pthread_attr.cpp @@ -56,7 +56,7 @@ int pthread_attr_setdetachstate(pthread_attr_t* attr, int state) { return 0; } -int pthread_attr_getdetachstate(pthread_attr_t const* attr, int* state) { +int pthread_attr_getdetachstate(const pthread_attr_t* attr, int* state) { *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED) ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE; return 0; } @@ -66,17 +66,17 @@ int pthread_attr_setschedpolicy(pthread_attr_t* attr, int policy) { return 0; } -int pthread_attr_getschedpolicy(pthread_attr_t const* attr, int* policy) { +int pthread_attr_getschedpolicy(const pthread_attr_t* attr, int* policy) { *policy = attr->sched_policy; return 0; } -int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const* param) { +int pthread_attr_setschedparam(pthread_attr_t* attr, const sched_param* param) { attr->sched_priority = param->sched_priority; return 0; } -int pthread_attr_getschedparam(pthread_attr_t const* attr, struct sched_param* param) { +int pthread_attr_getschedparam(const pthread_attr_t* attr, sched_param* param) { param->sched_priority = attr->sched_priority; return 0; } @@ -89,7 +89,7 @@ int pthread_attr_setstacksize(pthread_attr_t* attr, size_t stack_size) { return 0; } -int pthread_attr_getstacksize(pthread_attr_t const* attr, size_t* stack_size) { +int pthread_attr_getstacksize(const pthread_attr_t* attr, size_t* stack_size) { *stack_size = attr->stack_size; return 0; } @@ -100,7 +100,7 @@ int pthread_attr_setstackaddr(pthread_attr_t*, void*) { return ENOSYS; } -int pthread_attr_getstackaddr(pthread_attr_t const* attr, void** stack_addr) { +int pthread_attr_getstackaddr(const pthread_attr_t* attr, void** stack_addr) { // This was removed from POSIX.1-2008. // Needed for ABI compatibility with the NDK. *stack_addr = (char*)attr->stack_base + attr->stack_size; @@ -119,7 +119,7 @@ int pthread_attr_setstack(pthread_attr_t* attr, void* stack_base, size_t stack_s return 0; } -int pthread_attr_getstack(pthread_attr_t const* attr, void** stack_base, size_t* stack_size) { +int pthread_attr_getstack(const pthread_attr_t* attr, void** stack_base, size_t* stack_size) { *stack_base = attr->stack_base; *stack_size = attr->stack_size; return 0; @@ -130,7 +130,7 @@ int pthread_attr_setguardsize(pthread_attr_t* attr, size_t guard_size) { return 0; } -int pthread_attr_getguardsize(pthread_attr_t const* attr, size_t* guard_size) { +int pthread_attr_getguardsize(const pthread_attr_t* attr, size_t* guard_size) { *guard_size = attr->guard_size; return 0; } @@ -141,7 +141,7 @@ int pthread_getattr_np(pthread_t thid, pthread_attr_t* attr) { return 0; } -int pthread_attr_setscope(pthread_attr_t* , int scope) { +int pthread_attr_setscope(pthread_attr_t*, int scope) { if (scope == PTHREAD_SCOPE_SYSTEM) { return 0; } @@ -151,6 +151,7 @@ int pthread_attr_setscope(pthread_attr_t* , int scope) { return EINVAL; } -int pthread_attr_getscope(pthread_attr_t const*) { - return PTHREAD_SCOPE_SYSTEM; +int pthread_attr_getscope(const pthread_attr_t*, int* scope) { + *scope = PTHREAD_SCOPE_SYSTEM; + return 0; } diff --git a/libc/bionic/pthread_cond.cpp b/libc/bionic/pthread_cond.cpp new file mode 100644 index 000000000..abd453e8f --- /dev/null +++ b/libc/bionic/pthread_cond.cpp @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include + +#include +#include +#include +#include +#include + +#include "pthread_internal.h" + +#include "private/bionic_atomic_inline.h" +#include "private/bionic_futex.h" +#include "private/bionic_pthread.h" +#include "private/bionic_time_conversions.h" +#include "private/bionic_tls.h" +#include "private/thread_private.h" + +int pthread_condattr_init(pthread_condattr_t* attr) { + if (attr == NULL) { + return EINVAL; + } + *attr = PTHREAD_PROCESS_PRIVATE; + return 0; +} + +int pthread_condattr_getpshared(const pthread_condattr_t* attr, int* pshared) { + if (attr == NULL || pshared == NULL) { + return EINVAL; + } + *pshared = *attr; + return 0; +} + +int pthread_condattr_setpshared(pthread_condattr_t* attr, int pshared) { + if (attr == NULL) { + return EINVAL; + } + if (pshared != PTHREAD_PROCESS_SHARED && pshared != PTHREAD_PROCESS_PRIVATE) { + return EINVAL; + } + *attr = pshared; + return 0; +} + +int pthread_condattr_destroy(pthread_condattr_t* attr) { + if (attr == NULL) { + return EINVAL; + } + *attr = 0xdeada11d; + return 0; +} + +// We use one bit in condition variable values as the 'shared' flag +// The rest is a counter. +#define COND_SHARED_MASK 0x0001 +#define COND_COUNTER_INCREMENT 0x0002 +#define COND_COUNTER_MASK (~COND_SHARED_MASK) + +#define COND_IS_SHARED(c) (((c)->value & COND_SHARED_MASK) != 0) + +// XXX *technically* there is a race condition that could allow +// XXX a signal to be missed. If thread A is preempted in _wait() +// XXX after unlocking the mutex and before waiting, and if other +// XXX threads call signal or broadcast UINT_MAX/2 times (exactly), +// XXX before thread A is scheduled again and calls futex_wait(), +// XXX then the signal will be lost. + +int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr) { + if (cond == NULL) { + return EINVAL; + } + + cond->value = 0; + + if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED) { + cond->value |= COND_SHARED_MASK; + } + + return 0; +} + +int pthread_cond_destroy(pthread_cond_t* cond) { + if (cond == NULL) { + return EINVAL; + } + + cond->value = 0xdeadc04d; + return 0; +} + +// This function is used by pthread_cond_broadcast and +// pthread_cond_signal to atomically decrement the counter +// then wake up 'counter' threads. +static int __pthread_cond_pulse(pthread_cond_t* cond, int counter) { + if (__predict_false(cond == NULL)) { + return EINVAL; + } + + long flags = (cond->value & ~COND_COUNTER_MASK); + while (true) { + long old_value = cond->value; + long new_value = ((old_value - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK) | flags; + if (__bionic_cmpxchg(old_value, new_value, &cond->value) == 0) { + break; + } + } + + // Ensure that all memory accesses previously made by this thread are + // visible to the woken thread(s). On the other side, the "wait" + // code will issue any necessary barriers when locking the mutex. + // + // This may not strictly be necessary -- if the caller follows + // recommended practice and holds the mutex before signaling the cond + // var, the mutex ops will provide correct semantics. If they don't + // hold the mutex, they're subject to race conditions anyway. + ANDROID_MEMBAR_FULL(); + + __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter); + return 0; +} + +__LIBC_HIDDEN__ +int __pthread_cond_timedwait_relative(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* reltime) { + int old_value = cond->value; + + pthread_mutex_unlock(mutex); + int status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), old_value, reltime); + pthread_mutex_lock(mutex); + + if (status == (-ETIMEDOUT)) { + return ETIMEDOUT; + } + return 0; +} + +__LIBC_HIDDEN__ +int __pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime, clockid_t clock) { + timespec ts; + timespec* tsp; + + if (abstime != NULL) { + if (__timespec_to_absolute(&ts, abstime, clock) < 0) { + return ETIMEDOUT; + } + tsp = &ts; + } else { + tsp = NULL; + } + + return __pthread_cond_timedwait_relative(cond, mutex, tsp); +} + +int pthread_cond_broadcast(pthread_cond_t* cond) { + return __pthread_cond_pulse(cond, INT_MAX); +} + +int pthread_cond_signal(pthread_cond_t* cond) { + return __pthread_cond_pulse(cond, 1); +} + +int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex) { + return __pthread_cond_timedwait(cond, mutex, NULL, CLOCK_REALTIME); +} + +int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t * mutex, const timespec *abstime) { + return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME); +} + +// TODO: this exists only for backward binary compatibility. +int pthread_cond_timedwait_monotonic(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime) { + return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC); +} + +int pthread_cond_timedwait_monotonic_np(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime) { + return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC); +} + +int pthread_cond_timedwait_relative_np(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* reltime) { + return __pthread_cond_timedwait_relative(cond, mutex, reltime); +} + +int pthread_cond_timeout_np(pthread_cond_t* cond, pthread_mutex_t* mutex, unsigned ms) { + timespec ts; + timespec_from_ms(ts, ms); + return __pthread_cond_timedwait_relative(cond, mutex, &ts); +} diff --git a/libc/bionic/pthread_create.cpp b/libc/bionic/pthread_create.cpp index f0ee22242..21533108f 100644 --- a/libc/bionic/pthread_create.cpp +++ b/libc/bionic/pthread_create.cpp @@ -82,13 +82,12 @@ void __init_tls(pthread_internal_t* thread) { } } -__LIBC_ABI_PRIVATE__ int _init_thread(pthread_internal_t* thread, bool add_to_thread_list) { int error = 0; // Set the scheduling policy/priority of the thread. if (thread->attr.sched_policy != SCHED_NORMAL) { - struct sched_param param; + sched_param param; param.sched_priority = thread->attr.sched_priority; if (sched_setscheduler(thread->tid, thread->attr.sched_policy, ¶m) == -1) { #if __LP64__ diff --git a/libc/bionic/pthread_exit.cpp b/libc/bionic/pthread_exit.cpp new file mode 100644 index 000000000..aa9bd3832 --- /dev/null +++ b/libc/bionic/pthread_exit.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include + +#include +#include + +#include "pthread_internal.h" + +extern "C" void _exit_with_stack_teardown(void*, size_t, int); +extern "C" void __exit(int); + +/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions + * and thread cancelation + */ + +void __pthread_cleanup_push(__pthread_cleanup_t* c, __pthread_cleanup_func_t routine, void* arg) { + pthread_internal_t* thread = __get_thread(); + c->__cleanup_routine = routine; + c->__cleanup_arg = arg; + c->__cleanup_prev = thread->cleanup_stack; + thread->cleanup_stack = c; +} + +void __pthread_cleanup_pop(__pthread_cleanup_t* c, int execute) { + pthread_internal_t* thread = __get_thread(); + thread->cleanup_stack = c->__cleanup_prev; + if (execute) { + c->__cleanup_routine(c->__cleanup_arg); + } +} + +void pthread_exit(void* retval) { + pthread_internal_t* thread = __get_thread(); + + // Call the cleanup handlers first. + while (thread->cleanup_stack) { + __pthread_cleanup_t* c = thread->cleanup_stack; + thread->cleanup_stack = c->__cleanup_prev; + c->__cleanup_routine(c->__cleanup_arg); + } + + // Call the TLS destructors. It is important to do that before removing this + // thread from the global list. This will ensure that if someone else deletes + // a TLS key, the corresponding value will be set to NULL in this thread's TLS + // space (see pthread_key_delete). + pthread_key_clean_all(); + + if (thread->alternate_signal_stack != NULL) { + // Tell the kernel to stop using the alternate signal stack. + stack_t ss; + ss.ss_sp = NULL; + ss.ss_flags = SS_DISABLE; + sigaltstack(&ss, NULL); + + // Free it. + munmap(thread->alternate_signal_stack, SIGSTKSZ); + thread->alternate_signal_stack = NULL; + } + + // Keep track of what we need to know about the stack before we lose the pthread_internal_t. + void* stack_base = thread->attr.stack_base; + size_t stack_size = thread->attr.stack_size; + bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0); + + // If the thread is detached, destroy the pthread_internal_t, + // otherwise keep it in memory and signal any joiners. + pthread_mutex_lock(&gThreadListLock); + if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) { + _pthread_internal_remove_locked(thread); + } else { + // Make sure that the pthread_internal_t doesn't have stale pointers to a stack that + // will be unmapped after the exit call below. + if (!user_allocated_stack) { + thread->attr.stack_base = NULL; + thread->attr.stack_size = 0; + thread->tls = NULL; + } + + // Indicate that the thread has exited for joining threads. + thread->attr.flags |= PTHREAD_ATTR_FLAG_ZOMBIE; + thread->return_value = retval; + + // Signal the joining thread if present. + if (thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) { + pthread_cond_signal(&thread->join_cond); + } + } + pthread_mutex_unlock(&gThreadListLock); + + if (user_allocated_stack) { + // Cleaning up this thread's stack is the creator's responsibility, not ours. + __exit(0); + } else { + // We need to munmap the stack we're running on before calling exit. + // That's not something we can do in C. + + // We don't want to take a signal after we've unmapped the stack. + // That's one last thing we can handle in C. + sigset_t mask; + sigfillset(&mask); + sigprocmask(SIG_SETMASK, &mask, NULL); + + _exit_with_stack_teardown(stack_base, stack_size, 0); + } + + /* NOTREACHED, but we told the compiler this function is noreturn, and it doesn't believe us. */ + abort(); +} diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h index ce8b410bd..8cca83aa9 100644 --- a/libc/bionic/pthread_internal.h +++ b/libc/bionic/pthread_internal.h @@ -29,13 +29,8 @@ #define _PTHREAD_INTERNAL_H_ #include -#include -#include -__BEGIN_DECLS - -typedef struct pthread_internal_t -{ +struct pthread_internal_t { struct pthread_internal_t* next; struct pthread_internal_t* prev; pthread_attr_t attr; @@ -55,12 +50,12 @@ typedef struct pthread_internal_t */ #define __BIONIC_DLERROR_BUFFER_SIZE 512 char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE]; -} pthread_internal_t; +}; -int _init_thread(pthread_internal_t* thread, bool add_to_thread_list); -void __init_tls(pthread_internal_t* thread); -void _pthread_internal_add(pthread_internal_t* thread); -pthread_internal_t* __get_thread(void); +__LIBC_HIDDEN__ int _init_thread(pthread_internal_t* thread, bool add_to_thread_list); +__LIBC_HIDDEN__ void __init_tls(pthread_internal_t* thread); +__LIBC_HIDDEN__ void _pthread_internal_add(pthread_internal_t* thread); +__LIBC_HIDDEN__ pthread_internal_t* __get_thread(void); __LIBC_HIDDEN__ void pthread_key_clean_all(void); __LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread); @@ -91,12 +86,13 @@ __LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread) __LIBC_HIDDEN__ extern pthread_internal_t* gThreadList; __LIBC_HIDDEN__ extern pthread_mutex_t gThreadListLock; -/* needed by fork.c */ -extern void __timer_table_start_stop(int stop); -extern void __bionic_atfork_run_prepare(); -extern void __bionic_atfork_run_child(); -extern void __bionic_atfork_run_parent(); +__LIBC_HIDDEN__ int __timespec_to_absolute(timespec*, const timespec*, clockid_t); -__END_DECLS +/* needed by fork.c */ +__LIBC_HIDDEN__ extern void __timer_table_start_stop(int); +__LIBC_HIDDEN__ extern void __bionic_atfork_run_prepare(); +__LIBC_HIDDEN__ extern void __bionic_atfork_run_child(); +__LIBC_HIDDEN__ extern void __bionic_atfork_run_parent(); +__LIBC_HIDDEN__ extern int __pthread_settid(pthread_t, pid_t); #endif /* _PTHREAD_INTERNAL_H_ */ diff --git a/libc/bionic/pthread_internals.cpp b/libc/bionic/pthread_internals.cpp index 01ecd5f6b..4b1f6efb9 100644 --- a/libc/bionic/pthread_internals.cpp +++ b/libc/bionic/pthread_internals.cpp @@ -28,11 +28,13 @@ #include "pthread_internal.h" +#include "private/bionic_futex.h" +#include "private/bionic_pthread.h" #include "private/bionic_tls.h" #include "private/ScopedPthreadMutexLocker.h" -__LIBC_HIDDEN__ pthread_internal_t* gThreadList = NULL; -__LIBC_HIDDEN__ pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER; +pthread_internal_t* gThreadList = NULL; +pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER; void _pthread_internal_remove_locked(pthread_internal_t* thread) { if (thread->next != NULL) { @@ -51,7 +53,7 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread) { } } -__LIBC_ABI_PRIVATE__ void _pthread_internal_add(pthread_internal_t* thread) { +void _pthread_internal_add(pthread_internal_t* thread) { ScopedPthreadMutexLocker locker(&gThreadListLock); // We insert at the head. @@ -63,6 +65,42 @@ __LIBC_ABI_PRIVATE__ void _pthread_internal_add(pthread_internal_t* thread) { gThreadList = thread; } -__LIBC_ABI_PRIVATE__ pthread_internal_t* __get_thread(void) { +pthread_internal_t* __get_thread(void) { return reinterpret_cast(__get_tls()[TLS_SLOT_THREAD_ID]); } + +pid_t __pthread_gettid(pthread_t t) { + return reinterpret_cast(t)->tid; +} + +int __pthread_settid(pthread_t t, pid_t tid) { + if (t == 0) { + return EINVAL; + } + reinterpret_cast(t)->tid = tid; + return 0; +} + +// Initialize 'ts' with the difference between 'abstime' and the current time +// according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise. +int __timespec_to_absolute(timespec* ts, const timespec* abstime, clockid_t clock) { + clock_gettime(clock, ts); + ts->tv_sec = abstime->tv_sec - ts->tv_sec; + ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec; + if (ts->tv_nsec < 0) { + ts->tv_sec--; + ts->tv_nsec += 1000000000; + } + if ((ts->tv_nsec < 0) || (ts->tv_sec < 0)) { + return -1; + } + return 0; +} + +int __futex_wake_ex(volatile void* ftx, int pshared, int val) { + return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val); +} + +int __futex_wait_ex(volatile void* ftx, int pshared, int val, const timespec* timeout) { + return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout); +} diff --git a/libc/bionic/pthread.c b/libc/bionic/pthread_mutex.cpp similarity index 68% rename from libc/bionic/pthread.c rename to libc/bionic/pthread_mutex.cpp index aa300e9e9..6b8215118 100644 --- a/libc/bionic/pthread.c +++ b/libc/bionic/pthread_mutex.cpp @@ -45,119 +45,6 @@ extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex); extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex); -extern void _exit_with_stack_teardown(void * stackBase, size_t stackSize, int status); -extern void __exit(int status); - -int __futex_wake_ex(volatile void *ftx, int pshared, int val) -{ - return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val); -} - -int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout) -{ - return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout); -} - -/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions - * and thread cancelation - */ - -void __pthread_cleanup_push( __pthread_cleanup_t* c, - __pthread_cleanup_func_t routine, - void* arg ) -{ - pthread_internal_t* thread = __get_thread(); - - c->__cleanup_routine = routine; - c->__cleanup_arg = arg; - c->__cleanup_prev = thread->cleanup_stack; - thread->cleanup_stack = c; -} - -void __pthread_cleanup_pop( __pthread_cleanup_t* c, int execute ) -{ - pthread_internal_t* thread = __get_thread(); - - thread->cleanup_stack = c->__cleanup_prev; - if (execute) - c->__cleanup_routine(c->__cleanup_arg); -} - -void pthread_exit(void* retval) { - pthread_internal_t* thread = __get_thread(); - - // Call the cleanup handlers first. - while (thread->cleanup_stack) { - __pthread_cleanup_t* c = thread->cleanup_stack; - thread->cleanup_stack = c->__cleanup_prev; - c->__cleanup_routine(c->__cleanup_arg); - } - - // Call the TLS destructors. It is important to do that before removing this - // thread from the global list. This will ensure that if someone else deletes - // a TLS key, the corresponding value will be set to NULL in this thread's TLS - // space (see pthread_key_delete). - pthread_key_clean_all(); - - if (thread->alternate_signal_stack != NULL) { - // Tell the kernel to stop using the alternate signal stack. - stack_t ss; - ss.ss_sp = NULL; - ss.ss_flags = SS_DISABLE; - sigaltstack(&ss, NULL); - - // Free it. - munmap(thread->alternate_signal_stack, SIGSTKSZ); - thread->alternate_signal_stack = NULL; - } - - // Keep track of what we need to know about the stack before we lose the pthread_internal_t. - void* stack_base = thread->attr.stack_base; - size_t stack_size = thread->attr.stack_size; - bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0); - - // If the thread is detached, destroy the pthread_internal_t, - // otherwise keep it in memory and signal any joiners. - pthread_mutex_lock(&gThreadListLock); - if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) { - _pthread_internal_remove_locked(thread); - } else { - // Make sure that the thread struct doesn't have stale pointers to a stack that - // will be unmapped after the exit call below. - if (!user_allocated_stack) { - thread->attr.stack_base = NULL; - thread->attr.stack_size = 0; - thread->tls = NULL; - } - - // Indicate that the thread has exited for joining threads. - thread->attr.flags |= PTHREAD_ATTR_FLAG_ZOMBIE; - thread->return_value = retval; - - // Signal the joining thread if present. - if (thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) { - pthread_cond_signal(&thread->join_cond); - } - } - pthread_mutex_unlock(&gThreadListLock); - - if (user_allocated_stack) { - // Cleaning up this thread's stack is the creator's responsibility, not ours. - __exit(0); - } else { - // We need to munmap the stack we're running on before calling exit. - // That's not something we can do in C. - - // We don't want to take a signal after we've unmapped the stack. - // That's one last thing we can handle in C. - sigset_t mask; - sigfillset(&mask); - sigprocmask(SIG_SETMASK, &mask, NULL); - - _exit_with_stack_teardown(stack_base, stack_size, 0); - } -} - /* a mutex is implemented as a 32-bit integer holding the following fields * * bits: name description @@ -387,8 +274,7 @@ int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared) return EINVAL; } -int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared) -{ +int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) { if (!attr || !pshared) return EINVAL; @@ -802,31 +688,10 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex) return err; } -/* initialize 'ts' with the difference between 'abstime' and the current time - * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise. - */ -static int -__timespec_to_absolute(struct timespec* ts, const struct timespec* abstime, clockid_t clock) -{ - clock_gettime(clock, ts); - ts->tv_sec = abstime->tv_sec - ts->tv_sec; - ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec; - if (ts->tv_nsec < 0) { - ts->tv_sec--; - ts->tv_nsec += 1000000000; - } - if ((ts->tv_nsec < 0) || (ts->tv_sec < 0)) - return -1; - - return 0; -} - /* initialize 'abstime' to the current time according to 'clock' plus 'msecs' * milliseconds. */ -static void -__timespec_to_relative_msec(struct timespec* abstime, unsigned msecs, clockid_t clock) -{ +static void __timespec_to_relative_msec(timespec* abstime, unsigned msecs, clockid_t clock) { clock_gettime(clock, abstime); abstime->tv_sec += msecs/1000; abstime->tv_nsec += (msecs%1000)*1000000; @@ -840,8 +705,8 @@ __LIBC_HIDDEN__ int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs) { clockid_t clock = CLOCK_MONOTONIC; - struct timespec abstime; - struct timespec ts; + timespec abstime; + timespec ts; int mvalue, mtype, tid, shared; /* compute absolute expiration time */ @@ -900,7 +765,7 @@ int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs) } for (;;) { - struct timespec ts; + timespec ts; /* if the value is 'unlocked', try to acquire it directly */ /* NOTE: put state to 2 since we know there is contention */ @@ -977,299 +842,3 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex) mutex->value = 0xdead10cc; return 0; } - - - -int pthread_condattr_init(pthread_condattr_t *attr) -{ - if (attr == NULL) - return EINVAL; - - *attr = PTHREAD_PROCESS_PRIVATE; - return 0; -} - -int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared) -{ - if (attr == NULL || pshared == NULL) - return EINVAL; - - *pshared = *attr; - return 0; -} - -int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared) -{ - if (attr == NULL) - return EINVAL; - - if (pshared != PTHREAD_PROCESS_SHARED && - pshared != PTHREAD_PROCESS_PRIVATE) - return EINVAL; - - *attr = pshared; - return 0; -} - -int pthread_condattr_destroy(pthread_condattr_t *attr) -{ - if (attr == NULL) - return EINVAL; - - *attr = 0xdeada11d; - return 0; -} - -/* We use one bit in condition variable values as the 'shared' flag - * The rest is a counter. - */ -#define COND_SHARED_MASK 0x0001 -#define COND_COUNTER_INCREMENT 0x0002 -#define COND_COUNTER_MASK (~COND_SHARED_MASK) - -#define COND_IS_SHARED(c) (((c)->value & COND_SHARED_MASK) != 0) - -/* XXX *technically* there is a race condition that could allow - * XXX a signal to be missed. If thread A is preempted in _wait() - * XXX after unlocking the mutex and before waiting, and if other - * XXX threads call signal or broadcast UINT_MAX/2 times (exactly), - * XXX before thread A is scheduled again and calls futex_wait(), - * XXX then the signal will be lost. - */ - -int pthread_cond_init(pthread_cond_t *cond, - const pthread_condattr_t *attr) -{ - if (cond == NULL) - return EINVAL; - - cond->value = 0; - - if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED) - cond->value |= COND_SHARED_MASK; - - return 0; -} - -int pthread_cond_destroy(pthread_cond_t *cond) -{ - if (cond == NULL) - return EINVAL; - - cond->value = 0xdeadc04d; - return 0; -} - -/* This function is used by pthread_cond_broadcast and - * pthread_cond_signal to atomically decrement the counter - * then wake-up 'counter' threads. - */ -static int -__pthread_cond_pulse(pthread_cond_t *cond, int counter) -{ - long flags; - - if (__predict_false(cond == NULL)) - return EINVAL; - - flags = (cond->value & ~COND_COUNTER_MASK); - for (;;) { - long oldval = cond->value; - long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK) - | flags; - if (__bionic_cmpxchg(oldval, newval, &cond->value) == 0) - break; - } - - /* - * Ensure that all memory accesses previously made by this thread are - * visible to the woken thread(s). On the other side, the "wait" - * code will issue any necessary barriers when locking the mutex. - * - * This may not strictly be necessary -- if the caller follows - * recommended practice and holds the mutex before signaling the cond - * var, the mutex ops will provide correct semantics. If they don't - * hold the mutex, they're subject to race conditions anyway. - */ - ANDROID_MEMBAR_FULL(); - - __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter); - return 0; -} - -int pthread_cond_broadcast(pthread_cond_t *cond) -{ - return __pthread_cond_pulse(cond, INT_MAX); -} - -int pthread_cond_signal(pthread_cond_t *cond) -{ - return __pthread_cond_pulse(cond, 1); -} - -int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) -{ - return pthread_cond_timedwait(cond, mutex, NULL); -} - -int __pthread_cond_timedwait_relative(pthread_cond_t *cond, - pthread_mutex_t * mutex, - const struct timespec *reltime) -{ - int status; - int oldvalue = cond->value; - - pthread_mutex_unlock(mutex); - status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime); - pthread_mutex_lock(mutex); - - if (status == (-ETIMEDOUT)) return ETIMEDOUT; - return 0; -} - -int __pthread_cond_timedwait(pthread_cond_t *cond, - pthread_mutex_t * mutex, - const struct timespec *abstime, - clockid_t clock) -{ - struct timespec ts; - struct timespec * tsp; - - if (abstime != NULL) { - if (__timespec_to_absolute(&ts, abstime, clock) < 0) - return ETIMEDOUT; - tsp = &ts; - } else { - tsp = NULL; - } - - return __pthread_cond_timedwait_relative(cond, mutex, tsp); -} - -int pthread_cond_timedwait(pthread_cond_t *cond, - pthread_mutex_t * mutex, - const struct timespec *abstime) -{ - return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME); -} - - -/* this one exists only for backward binary compatibility */ -int pthread_cond_timedwait_monotonic(pthread_cond_t *cond, - pthread_mutex_t * mutex, - const struct timespec *abstime) -{ - return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC); -} - -int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond, - pthread_mutex_t * mutex, - const struct timespec *abstime) -{ - return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC); -} - -int pthread_cond_timedwait_relative_np(pthread_cond_t *cond, - pthread_mutex_t * mutex, - const struct timespec *reltime) -{ - return __pthread_cond_timedwait_relative(cond, mutex, reltime); -} - -int pthread_cond_timeout_np(pthread_cond_t *cond, - pthread_mutex_t * mutex, - unsigned ms) -{ - struct timespec ts; - timespec_from_ms(ts, ms); - return __pthread_cond_timedwait_relative(cond, mutex, &ts); -} - - -/* NOTE: this implementation doesn't support a init function that throws a C++ exception - * or calls fork() - */ -int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) ) -{ - volatile pthread_once_t* ocptr = once_control; - - /* PTHREAD_ONCE_INIT is 0, we use the following bit flags - * - * bit 0 set -> initialization is under way - * bit 1 set -> initialization is complete - */ -#define ONCE_INITIALIZING (1 << 0) -#define ONCE_COMPLETED (1 << 1) - - /* First check if the once is already initialized. This will be the common - * case and we want to make this as fast as possible. Note that this still - * requires a load_acquire operation here to ensure that all the - * stores performed by the initialization function are observable on - * this CPU after we exit. - */ - if (__predict_true((*ocptr & ONCE_COMPLETED) != 0)) { - ANDROID_MEMBAR_FULL(); - return 0; - } - - for (;;) { - /* Try to atomically set the INITIALIZING flag. - * This requires a cmpxchg loop, and we may need - * to exit prematurely if we detect that - * COMPLETED is now set. - */ - int32_t oldval, newval; - - do { - oldval = *ocptr; - if ((oldval & ONCE_COMPLETED) != 0) - break; - - newval = oldval | ONCE_INITIALIZING; - } while (__bionic_cmpxchg(oldval, newval, ocptr) != 0); - - if ((oldval & ONCE_COMPLETED) != 0) { - /* We detected that COMPLETED was set while in our loop */ - ANDROID_MEMBAR_FULL(); - return 0; - } - - if ((oldval & ONCE_INITIALIZING) == 0) { - /* We got there first, we can jump out of the loop to - * handle the initialization */ - break; - } - - /* Another thread is running the initialization and hasn't completed - * yet, so wait for it, then try again. */ - __futex_wait_ex(ocptr, 0, oldval, NULL); - } - - /* call the initialization function. */ - (*init_routine)(); - - /* Do a store_release indicating that initialization is complete */ - ANDROID_MEMBAR_FULL(); - *ocptr = ONCE_COMPLETED; - - /* Wake up any waiters, if any */ - __futex_wake_ex(ocptr, 0, INT_MAX); - - return 0; -} - -pid_t __pthread_gettid(pthread_t thid) { - pthread_internal_t* thread = (pthread_internal_t*) thid; - return thread->tid; -} - -int __pthread_settid(pthread_t thid, pid_t tid) { - if (thid == 0) { - return EINVAL; - } - - pthread_internal_t* thread = (pthread_internal_t*) thid; - thread->tid = tid; - - return 0; -} diff --git a/libc/bionic/pthread_once.cpp b/libc/bionic/pthread_once.cpp new file mode 100644 index 000000000..6d9d7d17b --- /dev/null +++ b/libc/bionic/pthread_once.cpp @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2008 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include + +#include "private/bionic_atomic_inline.h" +#include "private/bionic_futex.h" + +#define ONCE_INITIALIZING (1 << 0) +#define ONCE_COMPLETED (1 << 1) + +/* NOTE: this implementation doesn't support a init function that throws a C++ exception + * or calls fork() + */ +int pthread_once(pthread_once_t* once_control, void (*init_routine)(void)) { + volatile pthread_once_t* once_control_ptr = once_control; + + // PTHREAD_ONCE_INIT is 0, we use the following bit flags + // bit 0 set -> initialization is under way + // bit 1 set -> initialization is complete + + // First check if the once is already initialized. This will be the common + // case and we want to make this as fast as possible. Note that this still + // requires a load_acquire operation here to ensure that all the + // stores performed by the initialization function are observable on + // this CPU after we exit. + if (__predict_true((*once_control_ptr & ONCE_COMPLETED) != 0)) { + ANDROID_MEMBAR_FULL(); + return 0; + } + + while (true) { + // Try to atomically set the INITIALIZING flag. + // This requires a cmpxchg loop, and we may need + // to exit prematurely if we detect that + // COMPLETED is now set. + int32_t old_value, new_value; + + do { + old_value = *once_control_ptr; + if ((old_value & ONCE_COMPLETED) != 0) { + break; + } + + new_value = old_value | ONCE_INITIALIZING; + } while (__bionic_cmpxchg(old_value, new_value, once_control_ptr) != 0); + + if ((old_value & ONCE_COMPLETED) != 0) { + // We detected that COMPLETED was set while in our loop. + ANDROID_MEMBAR_FULL(); + return 0; + } + + if ((old_value & ONCE_INITIALIZING) == 0) { + // We got there first, we can jump out of the loop to handle the initialization. + break; + } + + // Another thread is running the initialization and hasn't completed + // yet, so wait for it, then try again. + __futex_wait_ex(once_control_ptr, 0, old_value, NULL); + } + + // Call the initialization function. + (*init_routine)(); + + // Do a store_release indicating that initialization is complete. + ANDROID_MEMBAR_FULL(); + *once_control_ptr = ONCE_COMPLETED; + + // Wake up any waiters, if any. + __futex_wake_ex(once_control_ptr, 0, INT_MAX); + + return 0; +} diff --git a/libc/bionic/pthread-rwlocks.c b/libc/bionic/pthread_rwlock.cpp similarity index 78% rename from libc/bionic/pthread-rwlocks.c rename to libc/bionic/pthread_rwlock.cpp index 59e224872..0182ef307 100644 --- a/libc/bionic/pthread-rwlocks.c +++ b/libc/bionic/pthread_rwlock.cpp @@ -91,8 +91,7 @@ int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared) } } -int pthread_rwlockattr_getpshared(pthread_rwlockattr_t *attr, int *pshared) -{ +int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) { if (!attr || !pshared) return EINVAL; @@ -195,10 +194,62 @@ static void _pthread_rwlock_pulse(pthread_rwlock_t *rwlock) pthread_cond_broadcast(&rwlock->cond); } +static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) { + int ret = 0; -int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) -{ - return pthread_rwlock_timedrdlock(rwlock, NULL); + if (rwlock == NULL) { + return EINVAL; + } + + pthread_mutex_lock(&rwlock->lock); + int tid = __get_thread()->tid; + if (__predict_false(!read_precondition(rwlock, tid))) { + rwlock->pendingReaders += 1; + do { + ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout); + } while (ret == 0 && !read_precondition(rwlock, tid)); + rwlock->pendingReaders -= 1; + if (ret != 0) { + goto EXIT; + } + } + ++rwlock->numLocks; +EXIT: + pthread_mutex_unlock(&rwlock->lock); + return ret; +} + +static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) { + int ret = 0; + + if (rwlock == NULL) { + return EINVAL; + } + + pthread_mutex_lock(&rwlock->lock); + int tid = __get_thread()->tid; + if (__predict_false(!write_precondition(rwlock, tid))) { + // If we can't read yet, wait until the rwlock is unlocked + // and try again. Increment pendingReaders to get the + // cond broadcast when that happens. + rwlock->pendingWriters += 1; + do { + ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout); + } while (ret == 0 && !write_precondition(rwlock, tid)); + rwlock->pendingWriters -= 1; + if (ret != 0) { + goto EXIT; + } + } + ++rwlock->numLocks; + rwlock->writerThreadId = tid; +EXIT: + pthread_mutex_unlock(&rwlock->lock); + return ret; +} + +int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) { + return __pthread_rwlock_timedrdlock(rwlock, NULL); } int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) @@ -212,40 +263,18 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) if (__predict_false(!read_precondition(rwlock, __get_thread()->tid))) ret = EBUSY; else - rwlock->numLocks ++; + ++rwlock->numLocks; pthread_mutex_unlock(&rwlock->lock); return ret; } -int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout) -{ - int ret = 0; - - if (rwlock == NULL) - return EINVAL; - - pthread_mutex_lock(&rwlock->lock); - int tid = __get_thread()->tid; - if (__predict_false(!read_precondition(rwlock, tid))) { - rwlock->pendingReaders += 1; - do { - ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout); - } while (ret == 0 && !read_precondition(rwlock, tid)); - rwlock->pendingReaders -= 1; - if (ret != 0) - goto EXIT; - } - rwlock->numLocks ++; -EXIT: - pthread_mutex_unlock(&rwlock->lock); - return ret; +int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) { + return __pthread_rwlock_timedrdlock(rwlock, abs_timeout); } - -int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) -{ - return pthread_rwlock_timedwrlock(rwlock, NULL); +int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) { + return __pthread_rwlock_timedwrlock(rwlock, NULL); } int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) @@ -260,43 +289,17 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) if (__predict_false(!write_precondition(rwlock, tid))) { ret = EBUSY; } else { - rwlock->numLocks ++; + ++rwlock->numLocks; rwlock->writerThreadId = tid; } pthread_mutex_unlock(&rwlock->lock); return ret; } -int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout) -{ - int ret = 0; - - if (rwlock == NULL) - return EINVAL; - - pthread_mutex_lock(&rwlock->lock); - int tid = __get_thread()->tid; - if (__predict_false(!write_precondition(rwlock, tid))) { - /* If we can't read yet, wait until the rwlock is unlocked - * and try again. Increment pendingReaders to get the - * cond broadcast when that happens. - */ - rwlock->pendingWriters += 1; - do { - ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout); - } while (ret == 0 && !write_precondition(rwlock, tid)); - rwlock->pendingWriters -= 1; - if (ret != 0) - goto EXIT; - } - rwlock->numLocks ++; - rwlock->writerThreadId = tid; -EXIT: - pthread_mutex_unlock(&rwlock->lock); - return ret; +int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) { + return __pthread_rwlock_timedwrlock(rwlock, abs_timeout); } - int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) { int ret = 0; diff --git a/libc/bionic/pthread_setschedparam.cpp b/libc/bionic/pthread_setschedparam.cpp index 55ec79180..419cc6f81 100644 --- a/libc/bionic/pthread_setschedparam.cpp +++ b/libc/bionic/pthread_setschedparam.cpp @@ -31,7 +31,7 @@ #include "private/ErrnoRestorer.h" #include "pthread_accessor.h" -int pthread_setschedparam(pthread_t t, int policy, struct sched_param const* param) { +int pthread_setschedparam(pthread_t t, int policy, const sched_param* param) { ErrnoRestorer errno_restorer; pthread_accessor thread(t); diff --git a/libc/bionic/ptrace.c b/libc/bionic/ptrace.cpp similarity index 71% rename from libc/bionic/ptrace.c rename to libc/bionic/ptrace.cpp index 0bb1acd78..c0fd5ded3 100644 --- a/libc/bionic/ptrace.c +++ b/libc/bionic/ptrace.cpp @@ -25,33 +25,31 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ + #include #include -extern long __ptrace(int request, pid_t pid, void *addr, void *data); +extern "C" long __ptrace(int request, pid_t pid, void* addr, void* data); -long ptrace(int request, pid_t pid, void * addr, void * data) -{ - switch (request) { - case PTRACE_PEEKUSR: - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: - { - long word; - long ret; - - ret = __ptrace(request, pid, addr, &word); - if (ret == 0) { - return word; - } else { - // __ptrace will set errno for us - return -1; - } - } - - default: - return __ptrace(request, pid, addr, data); +long ptrace(int request, pid_t pid, void* addr, void* data) { + switch (request) { + case PTRACE_PEEKUSR: + case PTRACE_PEEKTEXT: + case PTRACE_PEEKDATA: + { + long word; + long ret = __ptrace(request, pid, addr, &word); + if (ret == 0) { + return word; + } else { + // __ptrace already set errno for us. + return -1; + } } + + default: + return __ptrace(request, pid, addr, data); + } } /* @@ -63,6 +61,7 @@ long ptrace(int request, pid_t pid, void * addr, void * data) #define ATTRIBUTES __attribute__((noinline)) #endif -void ATTRIBUTES _thread_created_hook(pid_t thread_id) -{ +extern "C" void _thread_created_hook(pid_t) ATTRIBUTES; + +void _thread_created_hook(pid_t) { } diff --git a/libc/bionic/thread_atexit.c b/libc/bionic/thread_atexit.cpp similarity index 82% rename from libc/bionic/thread_atexit.c rename to libc/bionic/thread_atexit.cpp index dc4a5a0ff..cad65d37d 100644 --- a/libc/bionic/thread_atexit.c +++ b/libc/bionic/thread_atexit.cpp @@ -29,17 +29,18 @@ /* some simple glue used to make the BSD atexit code happy */ #include -#include "pthread_internal.h" -static pthread_mutex_t gAtExitLock = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t gAtExitLock = PTHREAD_MUTEX_INITIALIZER; -void _thread_atexit_lock( void ) -{ - pthread_mutex_lock( &gAtExitLock ); +__BEGIN_DECLS +__LIBC_HIDDEN__ void _thread_atexit_lock(); +__LIBC_HIDDEN__ void _thread_atexit_unlock(); +__END_DECLS + +void _thread_atexit_lock() { + pthread_mutex_lock(&gAtExitLock); } -void _thread_atexit_unlock( void ) -{ - pthread_mutex_unlock( &gAtExitLock ); +void _thread_atexit_unlock() { + pthread_mutex_unlock(&gAtExitLock); } - diff --git a/libc/bionic/pthread-timers.c b/libc/bionic/timer.cpp similarity index 87% rename from libc/bionic/pthread-timers.c rename to libc/bionic/timer.cpp index d81bfef22..76619f312 100644 --- a/libc/bionic/pthread-timers.c +++ b/libc/bionic/timer.cpp @@ -33,11 +33,8 @@ #include #include -extern int __pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const struct timespec*, - clockid_t); - -extern int __pthread_cond_timedwait_relative(pthread_cond_t*, pthread_mutex_t*, - const struct timespec*); +extern int __pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const timespec*, clockid_t); +extern int __pthread_cond_timedwait_relative(pthread_cond_t*, pthread_mutex_t*, const timespec*); // Normal (i.e. non-SIGEV_THREAD) timers are created directly by the kernel // and are passed as is to/from the caller. @@ -110,8 +107,8 @@ struct thr_timer { pthread_cond_t cond; /* signal a state change to thread */ int volatile done; /* set by timer_delete */ int volatile stopped; /* set by _start_stop() */ - struct timespec volatile expires; /* next expiration time, or 0 */ - struct timespec volatile period; /* reload value, or 0 */ + timespec volatile expires; /* next expiration time, or 0 */ + timespec volatile period; /* reload value, or 0 */ int volatile overruns; /* current number of overruns */ }; @@ -240,7 +237,7 @@ static pthread_once_t __timer_table_once = PTHREAD_ONCE_INIT; static thr_timer_table_t* __timer_table; static void __timer_table_init(void) { - __timer_table = calloc(1, sizeof(*__timer_table)); + __timer_table = reinterpret_cast(calloc(1, sizeof(*__timer_table))); if (__timer_table != NULL) { thr_timer_table_init(__timer_table); } @@ -258,7 +255,7 @@ static thr_timer_table_t* __timer_table_get(void) { ** requirements: the timers of fork child processes must be ** disarmed but not deleted. **/ -__LIBC_HIDDEN__ void __timer_table_start_stop(int stop) { +void __timer_table_start_stop(int stop) { // We access __timer_table directly so we don't create it if it doesn't yet exist. thr_timer_table_start_stop(__timer_table, stop); } @@ -286,7 +283,7 @@ thr_timer_unlock( thr_timer_t* t ) } -static __inline__ void timespec_add(struct timespec* a, const struct timespec* b) { +static __inline__ void timespec_add(timespec* a, const timespec* b) { a->tv_sec += b->tv_sec; a->tv_nsec += b->tv_nsec; if (a->tv_nsec >= 1000000000) { @@ -295,7 +292,7 @@ static __inline__ void timespec_add(struct timespec* a, const struct timespec* b } } -static __inline__ void timespec_sub(struct timespec* a, const struct timespec* b) { +static __inline__ void timespec_sub(timespec* a, const timespec* b) { a->tv_sec -= b->tv_sec; a->tv_nsec -= b->tv_nsec; if (a->tv_nsec < 0) { @@ -304,15 +301,15 @@ static __inline__ void timespec_sub(struct timespec* a, const struct timespec* b } } -static __inline__ void timespec_zero(struct timespec* a) { +static __inline__ void timespec_zero(timespec* a) { a->tv_sec = a->tv_nsec = 0; } -static __inline__ int timespec_is_zero(const struct timespec* a) { +static __inline__ int timespec_is_zero(const timespec* a) { return (a->tv_sec == 0 && a->tv_nsec == 0); } -static __inline__ int timespec_cmp(const struct timespec* a, const struct timespec* b) { +static __inline__ int timespec_cmp(const timespec* a, const timespec* b) { if (a->tv_sec < b->tv_sec) return -1; if (a->tv_sec > b->tv_sec) return +1; if (a->tv_nsec < b->tv_nsec) return -1; @@ -320,7 +317,7 @@ static __inline__ int timespec_cmp(const struct timespec* a, const struct timesp return 0; } -static __inline__ int timespec_cmp0(const struct timespec* a) { +static __inline__ int timespec_cmp0(const timespec* a) { if (a->tv_sec < 0) return -1; if (a->tv_sec > 0) return +1; if (a->tv_nsec < 0) return -1; @@ -330,15 +327,15 @@ static __inline__ int timespec_cmp0(const struct timespec* a) { /** POSIX TIMERS APIs */ -extern int __timer_create(clockid_t, struct sigevent*, timer_t*); -extern int __timer_delete(timer_t); -extern int __timer_gettime(timer_t, struct itimerspec*); -extern int __timer_settime(timer_t, int, const struct itimerspec*, struct itimerspec*); -extern int __timer_getoverrun(timer_t); +extern "C" int __timer_create(clockid_t, sigevent*, timer_t*); +extern "C" int __timer_delete(timer_t); +extern "C" int __timer_gettime(timer_t, itimerspec*); +extern "C" int __timer_settime(timer_t, int, const itimerspec*, itimerspec*); +extern "C" int __timer_getoverrun(timer_t); static void* timer_thread_start(void*); -int timer_create(clockid_t clock_id, struct sigevent* evp, timer_t* timer_id) { +int timer_create(clockid_t clock_id, sigevent* evp, timer_t* timer_id) { // If not a SIGEV_THREAD timer, the kernel can handle it without our help. if (__predict_true(evp == NULL || evp->sigev_notify != SIGEV_THREAD)) { return __timer_create(clock_id, evp, timer_id); @@ -351,7 +348,7 @@ int timer_create(clockid_t clock_id, struct sigevent* evp, timer_t* timer_id) { } // Check that the clock id is supported by the kernel. - struct timespec dummy; + timespec dummy; if (clock_gettime(clock_id, &dummy) < 0 && errno == EINVAL) { return -1; } @@ -435,34 +432,26 @@ timer_delete( timer_t id ) /* return the relative time until the next expiration, or 0 if * the timer is disarmed */ -static void -timer_gettime_internal( thr_timer_t* timer, - struct itimerspec* spec) -{ - struct timespec diff; +static void timer_gettime_internal(thr_timer_t* timer, itimerspec* spec) { + timespec diff = const_cast(timer->expires); + if (!timespec_is_zero(&diff)) { + timespec now; - diff = timer->expires; - if (!timespec_is_zero(&diff)) - { - struct timespec now; + clock_gettime(timer->clock, &now); + timespec_sub(&diff, &now); - clock_gettime( timer->clock, &now ); - timespec_sub(&diff, &now); - - /* in case of overrun, return 0 */ - if (timespec_cmp0(&diff) < 0) { - timespec_zero(&diff); - } + /* in case of overrun, return 0 */ + if (timespec_cmp0(&diff) < 0) { + timespec_zero(&diff); } + } - spec->it_value = diff; - spec->it_interval = timer->period; + spec->it_value = diff; + spec->it_interval = const_cast(timer->period); } -int -timer_gettime( timer_t id, struct itimerspec* ospec ) -{ +int timer_gettime(timer_t id, itimerspec* ospec) { if (ospec == NULL) { errno = EINVAL; return -1; @@ -486,11 +475,7 @@ timer_gettime( timer_t id, struct itimerspec* ospec ) int -timer_settime( timer_t id, - int flags, - const struct itimerspec* spec, - struct itimerspec* ospec ) -{ +timer_settime(timer_t id, int flags, const itimerspec* spec, itimerspec* ospec) { if (spec == NULL) { errno = EINVAL; return -1; @@ -500,7 +485,7 @@ timer_settime( timer_t id, return __timer_settime( id, flags, spec, ospec ); } else { thr_timer_t* timer = thr_timer_from_id(id); - struct timespec expires, now; + timespec expires, now; if (timer == NULL) { errno = EINVAL; @@ -526,8 +511,8 @@ timer_settime( timer_t id, expires = now; } } - timer->expires = expires; - timer->period = spec->it_interval; + const_cast(timer->expires) = expires; + const_cast(timer->period) = spec->it_interval; thr_timer_unlock( timer ); /* signal the change to the thread */ @@ -561,7 +546,7 @@ timer_getoverrun(timer_t id) static void* timer_thread_start(void* arg) { - thr_timer_t* timer = arg; + thr_timer_t* timer = reinterpret_cast(arg); thr_timer_lock(timer); @@ -572,8 +557,8 @@ static void* timer_thread_start(void* arg) { // We loop until timer->done is set in timer_delete(). while (!timer->done) { - struct timespec expires = timer->expires; - struct timespec period = timer->period; + timespec expires = const_cast(timer->expires); + timespec period = const_cast(timer->period); // If the timer is stopped or disarmed, wait indefinitely // for a state change from timer_settime/_delete/_start_stop. @@ -584,13 +569,13 @@ static void* timer_thread_start(void* arg) { // Otherwise, we need to do a timed wait until either a // state change of the timer expiration time. - struct timespec now; + timespec now; clock_gettime(timer->clock, &now); if (timespec_cmp(&expires, &now) > 0) { // Cool, there was no overrun, so compute the // relative timeout as 'expires - now', then wait. - struct timespec diff = expires; + timespec diff = expires; timespec_sub(&diff, &now); int ret = __pthread_cond_timedwait_relative(&timer->cond, &timer->mutex, &diff); @@ -627,7 +612,7 @@ static void* timer_thread_start(void* arg) { } else { timespec_zero(&expires); } - timer->expires = expires; + const_cast(timer->expires) = expires; // Now call the timer callback function. Release the // lock to allow the function to modify the timer setting diff --git a/libc/include/pthread.h b/libc/include/pthread.h index dbdee7044..c5380be9d 100644 --- a/libc/include/pthread.h +++ b/libc/include/pthread.h @@ -25,6 +25,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ + #ifndef _PTHREAD_H_ #define _PTHREAD_H_ @@ -34,12 +35,8 @@ #include #include -/* - * Types - */ -typedef struct -{ - int volatile value; +typedef struct { + int volatile value; } pthread_mutex_t; #define __PTHREAD_MUTEX_INIT_VALUE 0 @@ -61,241 +58,167 @@ enum { PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL }; - - -typedef struct -{ - int volatile value; +typedef struct { + int volatile value; } pthread_cond_t; -typedef struct -{ - uint32_t flags; - void * stack_base; - size_t stack_size; - size_t guard_size; - int32_t sched_policy; - int32_t sched_priority; +#define PTHREAD_COND_INITIALIZER {0} + +typedef struct { + uint32_t flags; + void* stack_base; + size_t stack_size; + size_t guard_size; + int32_t sched_policy; + int32_t sched_priority; } pthread_attr_t; typedef long pthread_mutexattr_t; typedef long pthread_condattr_t; +typedef int pthread_rwlockattr_t; + +typedef struct { + pthread_mutex_t lock; + pthread_cond_t cond; + int numLocks; + int writerThreadId; + int pendingReaders; + int pendingWriters; + void* reserved[4]; /* for future extensibility */ +} pthread_rwlock_t; + +#define PTHREAD_RWLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0, 0, 0, { NULL, NULL, NULL, NULL } } + typedef int pthread_key_t; typedef long pthread_t; -typedef volatile int pthread_once_t; +typedef volatile int pthread_once_t; -/* - * Defines - */ -#define PTHREAD_COND_INITIALIZER {0} +#define PTHREAD_ONCE_INIT 0 #define PTHREAD_STACK_MIN (2 * PAGE_SIZE) #define PTHREAD_CREATE_DETACHED 0x00000001 #define PTHREAD_CREATE_JOINABLE 0x00000000 -#define PTHREAD_ONCE_INIT 0 - #define PTHREAD_PROCESS_PRIVATE 0 #define PTHREAD_PROCESS_SHARED 1 #define PTHREAD_SCOPE_SYSTEM 0 #define PTHREAD_SCOPE_PROCESS 1 -/* - * Prototypes - */ -#ifdef __cplusplus -extern "C" { -#endif +__BEGIN_DECLS -int pthread_attr_init(pthread_attr_t * attr); -int pthread_attr_destroy(pthread_attr_t * attr); +int pthread_atfork(void (*)(void), void (*)(void), void(*)(void)); -int pthread_attr_setdetachstate(pthread_attr_t * attr, int state); -int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state); +int pthread_attr_destroy(pthread_attr_t*) __nonnull((1)); +int pthread_attr_getdetachstate(const pthread_attr_t*, int*) __nonnull((1, 2)); +int pthread_attr_getguardsize(const pthread_attr_t*, size_t*) __nonnull((1, 2)); +int pthread_attr_getschedparam(const pthread_attr_t*, struct sched_param*) __nonnull((1, 2)); +int pthread_attr_getschedpolicy(const pthread_attr_t*, int*) __nonnull((1, 2)); +int pthread_attr_getscope(const pthread_attr_t*, int*) __nonnull((1, 2)); +int pthread_attr_getstack(const pthread_attr_t*, void**, size_t*) __nonnull((1, 2, 3)); +int pthread_attr_getstacksize(const pthread_attr_t*, size_t*) __nonnull((1, 2)); +int pthread_attr_init(pthread_attr_t*) __nonnull((1)); +int pthread_attr_setdetachstate(pthread_attr_t*, int) __nonnull((1)); +int pthread_attr_setguardsize(pthread_attr_t*, size_t) __nonnull((1)); +int pthread_attr_setschedparam(pthread_attr_t*, const struct sched_param*) __nonnull((1, 2)); +int pthread_attr_setschedpolicy(pthread_attr_t*, int) __nonnull((1)); +int pthread_attr_setscope(pthread_attr_t*, int) __nonnull((1)); +int pthread_attr_setstack(pthread_attr_t*, void*, size_t) __nonnull((1)); +int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size) __nonnull((1)); -int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy); -int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy); +int pthread_condattr_destroy(pthread_condattr_t*) __nonnull((1)); +int pthread_condattr_getpshared(const pthread_condattr_t*, int*) __nonnull((1, 2)); +int pthread_condattr_init(pthread_condattr_t*) __nonnull((1)); +int pthread_condattr_setpshared(pthread_condattr_t*, int) __nonnull((1)); -int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param); -int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param); +int pthread_cond_broadcast(pthread_cond_t*) __nonnull((1)); +int pthread_cond_destroy(pthread_cond_t*) __nonnull((1)); +int pthread_cond_init(pthread_cond_t*, const pthread_condattr_t*) __nonnull((1)); +int pthread_cond_signal(pthread_cond_t*) __nonnull((1)); +int pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const struct timespec*) __nonnull((1, 2, 3)); +int pthread_cond_wait(pthread_cond_t*, pthread_mutex_t*) __nonnull((1, 2)); -int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size); -int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size); +int pthread_create(pthread_t*, pthread_attr_t const*, void *(*)(void*), void*) __nonnull((1, 3)); +int pthread_detach(pthread_t); +void pthread_exit(void*) __noreturn; -int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stackaddr); -int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stackaddr); +int pthread_equal(pthread_t, pthread_t); -int pthread_attr_setstack(pthread_attr_t * attr, void * stackaddr, size_t stack_size); -int pthread_attr_getstack(pthread_attr_t const * attr, void ** stackaddr, size_t * stack_size); +int pthread_getattr_np(pthread_t, pthread_attr_t*) __nonnull((2)); -int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size); -int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size); +int pthread_getcpuclockid(pthread_t, clockid_t*) __nonnull((2)); -int pthread_attr_setscope(pthread_attr_t *attr, int scope); -int pthread_attr_getscope(pthread_attr_t const *attr); +int pthread_getschedparam(pthread_t, int*, struct sched_param*) __nonnull((2, 3)); -int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr); +void* pthread_getspecific(pthread_key_t); -int pthread_create(pthread_t *thread, pthread_attr_t const * attr, - void *(*start_routine)(void *), void * arg); -void pthread_exit(void * retval); -int pthread_join(pthread_t thid, void ** ret_val); -int pthread_detach(pthread_t thid); +int pthread_join(pthread_t, void**); + +int pthread_key_create(pthread_key_t*, void (*)(void*)) __nonnull((1)); +int pthread_key_delete(pthread_key_t); + +int pthread_kill(pthread_t, int); + +int pthread_mutexattr_destroy(pthread_mutexattr_t*) __nonnull((1)); +int pthread_mutexattr_getpshared(const pthread_mutexattr_t*, int*) __nonnull((1, 2)); +int pthread_mutexattr_gettype(const pthread_mutexattr_t*, int*) __nonnull((1, 2)); +int pthread_mutexattr_init(pthread_mutexattr_t*) __nonnull((1)); +int pthread_mutexattr_setpshared(pthread_mutexattr_t*, int) __nonnull((1)); +int pthread_mutexattr_settype(pthread_mutexattr_t*, int) __nonnull((1)); + +int pthread_mutex_destroy(pthread_mutex_t*) __nonnull((1)); +int pthread_mutex_init(pthread_mutex_t*, const pthread_mutexattr_t*) __nonnull((1)); +int pthread_mutex_lock(pthread_mutex_t*) __nonnull((1)); +int pthread_mutex_timedlock(pthread_mutex_t*, struct timespec*) __nonnull((1, 2)); +int pthread_mutex_trylock(pthread_mutex_t*) __nonnull((1)); +int pthread_mutex_unlock(pthread_mutex_t*) __nonnull((1)); + +int pthread_once(pthread_once_t*, void (*)(void)) __nonnull((1, 2)); + +int pthread_rwlockattr_destroy(pthread_rwlockattr_t*) __nonnull((1)); +int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t*, int*) __nonnull((1, 2)); +int pthread_rwlockattr_init(pthread_rwlockattr_t*) __nonnull((1)); +int pthread_rwlockattr_setpshared(pthread_rwlockattr_t*, int) __nonnull((1)); + +int pthread_rwlock_destroy(pthread_rwlock_t*) __nonnull((1)); +int pthread_rwlock_init(pthread_rwlock_t*, const pthread_rwlockattr_t*) __nonnull((1)); +int pthread_rwlock_rdlock(pthread_rwlock_t*) __nonnull((1)); +int pthread_rwlock_timedrdlock(pthread_rwlock_t*, const struct timespec*) __nonnull((1, 2)); +int pthread_rwlock_timedwrlock(pthread_rwlock_t*, const struct timespec*) __nonnull((1, 2)); +int pthread_rwlock_tryrdlock(pthread_rwlock_t*) __nonnull((1)); +int pthread_rwlock_trywrlock(pthread_rwlock_t*) __nonnull((1)); +int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) __nonnull((1)); +int pthread_rwlock_wrlock(pthread_rwlock_t*) __nonnull((1)); pthread_t pthread_self(void); -int pthread_equal(pthread_t one, pthread_t two); -int pthread_getschedparam(pthread_t thid, int * policy, - struct sched_param * param); -int pthread_setschedparam(pthread_t thid, int policy, - struct sched_param const * param); +int pthread_setname_np(pthread_t, const char*) __nonnull((2)); -int pthread_mutexattr_init(pthread_mutexattr_t *attr); -int pthread_mutexattr_destroy(pthread_mutexattr_t *attr); -int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type); -int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type); -int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared); -int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared); +int pthread_setschedparam(pthread_t, int, const struct sched_param*) __nonnull((3)); -int pthread_mutex_init(pthread_mutex_t *mutex, - const pthread_mutexattr_t *attr); -int pthread_mutex_destroy(pthread_mutex_t *mutex); -int pthread_mutex_lock(pthread_mutex_t *mutex); -int pthread_mutex_unlock(pthread_mutex_t *mutex); -int pthread_mutex_trylock(pthread_mutex_t *mutex); -int pthread_mutex_timedlock(pthread_mutex_t *mutex, struct timespec* ts); +int pthread_setspecific(pthread_key_t, const void*); -int pthread_condattr_init(pthread_condattr_t *attr); -int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared); -int pthread_condattr_setpshared(pthread_condattr_t* attr, int pshared); -int pthread_condattr_destroy(pthread_condattr_t *attr); +int pthread_sigmask(int, const sigset_t*, sigset_t*); -int pthread_cond_init(pthread_cond_t *cond, - const pthread_condattr_t *attr); -int pthread_cond_destroy(pthread_cond_t *cond); -int pthread_cond_broadcast(pthread_cond_t *cond); -int pthread_cond_signal(pthread_cond_t *cond); -int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex); -int pthread_cond_timedwait(pthread_cond_t *cond, - pthread_mutex_t * mutex, - const struct timespec *abstime); - -/* BIONIC: same as pthread_cond_timedwait, except the 'abstime' given refers - * to the CLOCK_MONOTONIC clock instead, to avoid any problems when - * the wall-clock time is changed brutally - */ -int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond, - pthread_mutex_t *mutex, - const struct timespec *abstime); - -/* BIONIC: DEPRECATED. same as pthread_cond_timedwait_monotonic_np() - * unfortunately pthread_cond_timedwait_monotonic has shipped already - */ -int pthread_cond_timedwait_monotonic(pthread_cond_t *cond, - pthread_mutex_t *mutex, - const struct timespec *abstime); - -#define HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC 1 - -/* BIONIC: same as pthread_cond_timedwait, except the 'reltime' given refers - * is relative to the current time. - */ -int pthread_cond_timedwait_relative_np(pthread_cond_t *cond, - pthread_mutex_t *mutex, - const struct timespec *reltime); - -#define HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE 1 - - - -int pthread_cond_timeout_np(pthread_cond_t *cond, - pthread_mutex_t * mutex, - unsigned msecs); - -/* same as pthread_mutex_lock(), but will wait up to 'msecs' milli-seconds - * before returning. same return values than pthread_mutex_trylock though, i.e. - * returns EBUSY if the lock could not be acquired after the timeout - * expired. - */ -int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs); - -/* read-write lock support */ - -typedef int pthread_rwlockattr_t; - -typedef struct { - pthread_mutex_t lock; - pthread_cond_t cond; - int numLocks; - int writerThreadId; - int pendingReaders; - int pendingWriters; - void* reserved[4]; /* for future extensibility */ -} pthread_rwlock_t; - -#define PTHREAD_RWLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0, 0, 0, { NULL, NULL, NULL, NULL } } - -int pthread_rwlockattr_init(pthread_rwlockattr_t *attr); -int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr); -int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared); -int pthread_rwlockattr_getpshared(pthread_rwlockattr_t *attr, int *pshared); - -int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr); -int pthread_rwlock_destroy(pthread_rwlock_t *rwlock); - -int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock); -int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock); -int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout); - -int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock); -int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock); -int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout); - -int pthread_rwlock_unlock(pthread_rwlock_t *rwlock); - - -int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *)); -int pthread_key_delete (pthread_key_t); -int pthread_setspecific(pthread_key_t key, const void *value); -void *pthread_getspecific(pthread_key_t key); - -int pthread_kill(pthread_t tid, int sig); -int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset); - -int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid); - -int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)); - -int pthread_setname_np(pthread_t thid, const char *thname); - -int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)); - -typedef void (*__pthread_cleanup_func_t)(void*); +typedef void (*__pthread_cleanup_func_t)(void*); typedef struct __pthread_cleanup_t { - struct __pthread_cleanup_t* __cleanup_prev; - __pthread_cleanup_func_t __cleanup_routine; - void* __cleanup_arg; + struct __pthread_cleanup_t* __cleanup_prev; + __pthread_cleanup_func_t __cleanup_routine; + void* __cleanup_arg; } __pthread_cleanup_t; -extern void __pthread_cleanup_push(__pthread_cleanup_t* c, - __pthread_cleanup_func_t routine, - void* arg); - -extern void __pthread_cleanup_pop(__pthread_cleanup_t* c, - int execute); +extern void __pthread_cleanup_push(__pthread_cleanup_t* c, __pthread_cleanup_func_t, void*); +extern void __pthread_cleanup_pop(__pthread_cleanup_t*, int); /* Believe or not, the definitions of pthread_cleanup_push and * pthread_cleanup_pop below are correct. Posix states that these * can be implemented as macros that might introduce opening and * closing braces, and that using setjmp/longjmp/return/break/continue - * between them results in undefined behaviour. - * - * And indeed, GLibc and other C libraries use a similar definition + * between them results in undefined behavior. */ #define pthread_cleanup_push(routine, arg) \ do { \ @@ -304,10 +227,40 @@ extern void __pthread_cleanup_pop(__pthread_cleanup_t* c, #define pthread_cleanup_pop(execute) \ __pthread_cleanup_pop( &__cleanup, (execute)); \ - } while (0); + } while (0); \ -#ifdef __cplusplus -} /* extern "C" */ -#endif + +#if !defined(__LP64__) + +/* Deprecated by POSIX. TODO: support for LP64 but add deprecated attribute instead? */ +int pthread_attr_getstackaddr(const pthread_attr_t*, void**) __nonnull((1, 2)); /* deprecated */ +int pthread_attr_setstackaddr(pthread_attr_t*, void*) __nonnull((1)); /* deprecated */ + +/* Bionic additions that are deprecated even in the 32-bit ABI. */ +int pthread_cond_timedwait_monotonic_np(pthread_cond_t*, pthread_mutex_t*, const struct timespec*); +int pthread_cond_timedwait_monotonic(pthread_cond_t*, pthread_mutex_t*, const struct timespec*); +#define HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC 1 + +/* + * Like pthread_cond_timedwait except 'reltime' is relative to the current time. + * TODO: not like glibc; include in LP64? + */ +int pthread_cond_timedwait_relative_np(pthread_cond_t*, pthread_mutex_t*, const struct timespec*); +#define HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE 1 + +/* TODO: not like glibc; include in LP64? */ +int pthread_cond_timeout_np(pthread_cond_t*, pthread_mutex_t*, unsigned); + +/* Like pthread_mutex_lock(), but will wait up to 'msecs' milli-seconds + * before returning. Same return values as pthread_mutex_trylock though, i.e. + * returns EBUSY if the lock could not be acquired after the timeout expired. + * + * TODO: replace with pthread_mutex_timedlock_np for LP64. + */ +int pthread_mutex_lock_timeout_np(pthread_mutex_t*, unsigned); + +#endif /* !defined(__LP64__) */ + +__END_DECLS #endif /* _PTHREAD_H_ */ diff --git a/libc/private/bionic_futex.h b/libc/private/bionic_futex.h index 69658b1e3..bfc3520f7 100644 --- a/libc/private/bionic_futex.h +++ b/libc/private/bionic_futex.h @@ -51,11 +51,11 @@ extern int __futex_syscall4(volatile void *ftx, int op, int val, const struct ti #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG) #endif -/* Like __futex_wait/wake, but take an additionnal 'pshared' argument. +/* Like __futex_wait/wake, but take an additional 'pshared' argument. * when non-0, this will use normal futexes. Otherwise, private futexes. */ -extern int __futex_wake_ex(volatile void *ftx, int pshared, int val); -extern int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout); +extern int __futex_wake_ex(volatile void *ftx, int pshared, int val); +extern int __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout); __END_DECLS diff --git a/libc/private/bionic_pthread.h b/libc/private/bionic_pthread.h index 28d6ad83e..07bcbd4b0 100644 --- a/libc/private/bionic_pthread.h +++ b/libc/private/bionic_pthread.h @@ -35,7 +35,6 @@ __BEGIN_DECLS /* Internal, not an NDK API */ extern pid_t __pthread_gettid(pthread_t thid); -extern int __pthread_settid(pthread_t thid, pid_t tid); __END_DECLS diff --git a/tests/pthread_test.cpp b/tests/pthread_test.cpp index a03232ff4..42bd2b94f 100644 --- a/tests/pthread_test.cpp +++ b/tests/pthread_test.cpp @@ -464,3 +464,65 @@ TEST(pthread, pthread_attr_setstacksize) { ASSERT_EQ(GetActualStackSize(attributes), 32*1024U); #endif } + +TEST(pthread, pthread_rwlock_smoke) { + pthread_rwlock_t l; + ASSERT_EQ(0, pthread_rwlock_init(&l, NULL)); + + ASSERT_EQ(0, pthread_rwlock_rdlock(&l)); + ASSERT_EQ(0, pthread_rwlock_unlock(&l)); + + ASSERT_EQ(0, pthread_rwlock_wrlock(&l)); + ASSERT_EQ(0, pthread_rwlock_unlock(&l)); + + ASSERT_EQ(0, pthread_rwlock_destroy(&l)); +} + +static int gOnceFnCallCount = 0; +static void OnceFn() { + ++gOnceFnCallCount; +} + +TEST(pthread, pthread_once_smoke) { + pthread_once_t once_control = PTHREAD_ONCE_INIT; + ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); + ASSERT_EQ(0, pthread_once(&once_control, OnceFn)); + ASSERT_EQ(1, gOnceFnCallCount); +} + +static int gAtForkPrepareCalls = 0; +static void AtForkPrepare1() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 1; } +static void AtForkPrepare2() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 2; } +static int gAtForkParentCalls = 0; +static void AtForkParent1() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 1; } +static void AtForkParent2() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 2; } +static int gAtForkChildCalls = 0; +static void AtForkChild1() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 1; } +static void AtForkChild2() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 2; } + +TEST(pthread, pthread_atfork) { + ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1)); + ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2)); + + int pid = fork(); + ASSERT_NE(-1, pid) << strerror(errno); + + // Child and parent calls are made in the order they were registered. + if (pid == 0) { + ASSERT_EQ(0x12, gAtForkChildCalls); + _exit(0); + } + ASSERT_EQ(0x12, gAtForkParentCalls); + + // Prepare calls are made in the reverse order. + ASSERT_EQ(0x21, gAtForkPrepareCalls); +} + +TEST(pthread, pthread_attr_getscope) { + pthread_attr_t attr; + ASSERT_EQ(0, pthread_attr_init(&attr)); + + int scope; + ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope)); + ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope); +}