diff --git a/libc/bionic/__cxa_guard.cpp b/libc/bionic/__cxa_guard.cpp index 5b34b584b..97284d5b0 100644 --- a/libc/bionic/__cxa_guard.cpp +++ b/libc/bionic/__cxa_guard.cpp @@ -109,7 +109,7 @@ extern "C" int __cxa_guard_acquire(_guard_t* gv) { } } - __futex_wait_ex(&gv->state, false, CONSTRUCTION_UNDERWAY_WITH_WAITER, NULL); + __futex_wait_ex(&gv->state, false, CONSTRUCTION_UNDERWAY_WITH_WAITER, false, nullptr); old_value = atomic_load_explicit(&gv->state, memory_order_relaxed); } } diff --git a/libc/bionic/bionic_time_conversions.cpp b/libc/bionic/bionic_time_conversions.cpp index 75e8d4986..f3ca46a73 100644 --- a/libc/bionic/bionic_time_conversions.cpp +++ b/libc/bionic/bionic_time_conversions.cpp @@ -52,18 +52,12 @@ void timeval_from_timespec(timeval& tv, const timespec& ts) { tv.tv_usec = ts.tv_nsec / 1000; } -// Initializes 'ts' with the difference between 'abs_ts' and the current time -// according to 'clock'. Returns false if abstime already expired, true otherwise. -bool timespec_from_absolute_timespec(timespec& ts, const timespec& abs_ts, clockid_t clock) { - clock_gettime(clock, &ts); - ts.tv_sec = abs_ts.tv_sec - ts.tv_sec; - ts.tv_nsec = abs_ts.tv_nsec - ts.tv_nsec; - if (ts.tv_nsec < 0) { - ts.tv_sec--; - ts.tv_nsec += NS_PER_S; +void absolute_timespec_from_timespec(timespec& abs_ts, const timespec& ts, clockid_t clock) { + clock_gettime(clock, &abs_ts); + abs_ts.tv_sec += ts.tv_sec; + abs_ts.tv_nsec += ts.tv_nsec; + if (abs_ts.tv_nsec >= NS_PER_S) { + abs_ts.tv_nsec -= NS_PER_S; + abs_ts.tv_sec++; } - if (ts.tv_nsec < 0 || ts.tv_sec < 0) { - return false; - } - return true; } diff --git a/libc/bionic/pthread_barrier.cpp b/libc/bionic/pthread_barrier.cpp index 3227dafa9..1bcd12a14 100644 --- a/libc/bionic/pthread_barrier.cpp +++ b/libc/bionic/pthread_barrier.cpp @@ -118,7 +118,7 @@ int pthread_barrier_wait(pthread_barrier_t* barrier_interface) { // threads have left the barrier. Use acquire operation here to synchronize with // the last thread leaving the previous cycle, so we can read correct wait_count below. while(atomic_load_explicit(&barrier->state, memory_order_acquire) == RELEASE) { - __futex_wait_ex(&barrier->state, barrier->pshared, RELEASE, nullptr); + __futex_wait_ex(&barrier->state, barrier->pshared, RELEASE, false, nullptr); } uint32_t prev_wait_count = atomic_load_explicit(&barrier->wait_count, memory_order_relaxed); @@ -152,7 +152,7 @@ int pthread_barrier_wait(pthread_barrier_t* barrier_interface) { // Use acquire operation here to synchronize between the last thread entering the // barrier with all threads leaving the barrier. while (atomic_load_explicit(&barrier->state, memory_order_acquire) == WAIT) { - __futex_wait_ex(&barrier->state, barrier->pshared, WAIT, nullptr); + __futex_wait_ex(&barrier->state, barrier->pshared, WAIT, false, nullptr); } } // Use release operation here to make it not reordered with previous operations. @@ -173,7 +173,7 @@ int pthread_barrier_destroy(pthread_barrier_t* barrier_interface) { // Use acquire operation here to synchronize with the last thread leaving the barrier. // So we can read correct wait_count below. while (atomic_load_explicit(&barrier->state, memory_order_acquire) == RELEASE) { - __futex_wait_ex(&barrier->state, barrier->pshared, RELEASE, nullptr); + __futex_wait_ex(&barrier->state, barrier->pshared, RELEASE, false, nullptr); } if (atomic_load_explicit(&barrier->wait_count, memory_order_relaxed) != 0) { return EBUSY; diff --git a/libc/bionic/pthread_cond.cpp b/libc/bionic/pthread_cond.cpp index 4a69da558..adbce07f1 100644 --- a/libc/bionic/pthread_cond.cpp +++ b/libc/bionic/pthread_cond.cpp @@ -111,8 +111,8 @@ struct pthread_cond_internal_t { return COND_IS_SHARED(atomic_load_explicit(&state, memory_order_relaxed)); } - int get_clock() { - return COND_GET_CLOCK(atomic_load_explicit(&state, memory_order_relaxed)); + bool use_realtime_clock() { + return COND_GET_CLOCK(atomic_load_explicit(&state, memory_order_relaxed)) == CLOCK_REALTIME; } #if defined(__LP64__) @@ -170,12 +170,17 @@ static int __pthread_cond_pulse(pthread_cond_internal_t* cond, int thread_count) return 0; } -static int __pthread_cond_timedwait_relative(pthread_cond_internal_t* cond, pthread_mutex_t* mutex, - const timespec* rel_timeout_or_null) { - unsigned int old_state = atomic_load_explicit(&cond->state, memory_order_relaxed); +static int __pthread_cond_timedwait(pthread_cond_internal_t* cond, pthread_mutex_t* mutex, + bool use_realtime_clock, const timespec* abs_timeout_or_null) { + int result = check_timespec(abs_timeout_or_null); + if (result != 0) { + return result; + } + unsigned int old_state = atomic_load_explicit(&cond->state, memory_order_relaxed); pthread_mutex_unlock(mutex); - int status = __futex_wait_ex(&cond->state, cond->process_shared(), old_state, rel_timeout_or_null); + int status = __futex_wait_ex(&cond->state, cond->process_shared(), old_state, + use_realtime_clock, abs_timeout_or_null); pthread_mutex_lock(mutex); if (status == -ETIMEDOUT) { @@ -184,21 +189,6 @@ static int __pthread_cond_timedwait_relative(pthread_cond_internal_t* cond, pthr return 0; } -static int __pthread_cond_timedwait(pthread_cond_internal_t* cond, pthread_mutex_t* mutex, - const timespec* abs_timeout_or_null, clockid_t clock) { - timespec ts; - timespec* rel_timeout = NULL; - - if (abs_timeout_or_null != NULL) { - rel_timeout = &ts; - if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) { - return ETIMEDOUT; - } - } - - return __pthread_cond_timedwait_relative(cond, mutex, rel_timeout); -} - int pthread_cond_broadcast(pthread_cond_t* cond_interface) { return __pthread_cond_pulse(__get_internal_cond(cond_interface), INT_MAX); } @@ -209,14 +199,14 @@ int pthread_cond_signal(pthread_cond_t* cond_interface) { int pthread_cond_wait(pthread_cond_t* cond_interface, pthread_mutex_t* mutex) { pthread_cond_internal_t* cond = __get_internal_cond(cond_interface); - return __pthread_cond_timedwait(cond, mutex, NULL, cond->get_clock()); + return __pthread_cond_timedwait(cond, mutex, false, nullptr); } int pthread_cond_timedwait(pthread_cond_t *cond_interface, pthread_mutex_t * mutex, const timespec *abstime) { pthread_cond_internal_t* cond = __get_internal_cond(cond_interface); - return __pthread_cond_timedwait(cond, mutex, abstime, cond->get_clock()); + return __pthread_cond_timedwait(cond, mutex, cond->use_realtime_clock(), abstime); } #if !defined(__LP64__) @@ -225,8 +215,7 @@ extern "C" int pthread_cond_timedwait_monotonic(pthread_cond_t* cond_interface, pthread_mutex_t* mutex, const timespec* abs_timeout) { - return __pthread_cond_timedwait(__get_internal_cond(cond_interface), mutex, abs_timeout, - CLOCK_MONOTONIC); + return __pthread_cond_timedwait(__get_internal_cond(cond_interface), mutex, false, abs_timeout); } extern "C" int pthread_cond_timedwait_monotonic_np(pthread_cond_t* cond_interface, @@ -238,8 +227,13 @@ extern "C" int pthread_cond_timedwait_monotonic_np(pthread_cond_t* cond_interfac extern "C" int pthread_cond_timedwait_relative_np(pthread_cond_t* cond_interface, pthread_mutex_t* mutex, const timespec* rel_timeout) { - - return __pthread_cond_timedwait_relative(__get_internal_cond(cond_interface), mutex, rel_timeout); + timespec ts; + timespec* abs_timeout = nullptr; + if (rel_timeout != nullptr) { + absolute_timespec_from_timespec(ts, *rel_timeout, CLOCK_REALTIME); + abs_timeout = &ts; + } + return __pthread_cond_timedwait(__get_internal_cond(cond_interface), mutex, true, abs_timeout); } extern "C" int pthread_cond_timeout_np(pthread_cond_t* cond_interface, diff --git a/libc/bionic/pthread_mutex.cpp b/libc/bionic/pthread_mutex.cpp index 851fc3d99..23dc3b0b7 100644 --- a/libc/bionic/pthread_mutex.cpp +++ b/libc/bionic/pthread_mutex.cpp @@ -296,11 +296,15 @@ static inline __always_inline int __pthread_normal_mutex_trylock(pthread_mutex_i */ static inline __always_inline int __pthread_normal_mutex_lock(pthread_mutex_internal_t* mutex, uint16_t shared, - const timespec* abs_timeout_or_null, - clockid_t clock) { + bool use_realtime_clock, + const timespec* abs_timeout_or_null) { if (__predict_true(__pthread_normal_mutex_trylock(mutex, shared) == 0)) { return 0; } + int result = check_timespec(abs_timeout_or_null); + if (result != 0) { + return result; + } ScopedTrace trace("Contending for pthread mutex"); @@ -317,15 +321,8 @@ static inline __always_inline int __pthread_normal_mutex_lock(pthread_mutex_inte // made by other threads visible to the current CPU. while (atomic_exchange_explicit(&mutex->state, locked_contended, memory_order_acquire) != unlocked) { - timespec ts; - timespec* rel_timeout = NULL; - if (abs_timeout_or_null != NULL) { - rel_timeout = &ts; - if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) { - return ETIMEDOUT; - } - } - if (__futex_wait_ex(&mutex->state, shared, locked_contended, rel_timeout) == -ETIMEDOUT) { + if (__futex_wait_ex(&mutex->state, shared, locked_contended, use_realtime_clock, + abs_timeout_or_null) == -ETIMEDOUT) { return ETIMEDOUT; } } @@ -396,14 +393,15 @@ static inline __always_inline int __recursive_or_errorcheck_mutex_wait( pthread_mutex_internal_t* mutex, uint16_t shared, uint16_t old_state, - const timespec* rel_timeout) { + bool use_realtime_clock, + const timespec* abs_timeout) { // __futex_wait always waits on a 32-bit value. But state is 16-bit. For a normal mutex, the owner_tid // field in mutex is not used. On 64-bit devices, the __pad field in mutex is not used. // But when a recursive or errorcheck mutex is used on 32-bit devices, we need to add the // owner_tid value in the value argument for __futex_wait, otherwise we may always get EAGAIN error. #if defined(__LP64__) - return __futex_wait_ex(&mutex->state, shared, old_state, rel_timeout); + return __futex_wait_ex(&mutex->state, shared, old_state, use_realtime_clock, abs_timeout); #else // This implementation works only when the layout of pthread_mutex_internal_t matches below expectation. @@ -412,19 +410,21 @@ static inline __always_inline int __recursive_or_errorcheck_mutex_wait( static_assert(offsetof(pthread_mutex_internal_t, owner_tid) == 2, ""); uint32_t owner_tid = atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed); - return __futex_wait_ex(&mutex->state, shared, (owner_tid << 16) | old_state, rel_timeout); + return __futex_wait_ex(&mutex->state, shared, (owner_tid << 16) | old_state, + use_realtime_clock, abs_timeout); #endif } static int __pthread_mutex_lock_with_timeout(pthread_mutex_internal_t* mutex, - const timespec* abs_timeout_or_null, clockid_t clock) { + bool use_realtime_clock, + const timespec* abs_timeout_or_null) { uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed); uint16_t mtype = (old_state & MUTEX_TYPE_MASK); uint16_t shared = (old_state & MUTEX_SHARED_MASK); // Handle common case first. if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) { - return __pthread_normal_mutex_lock(mutex, shared, abs_timeout_or_null, clock); + return __pthread_normal_mutex_lock(mutex, shared, use_realtime_clock, abs_timeout_or_null); } // Do we already own this recursive or error-check mutex? @@ -484,16 +484,13 @@ static int __pthread_mutex_lock_with_timeout(pthread_mutex_internal_t* mutex, old_state = new_state; } - // We are in locked_contended state, sleep until someone wakes us up. - timespec ts; - timespec* rel_timeout = NULL; - if (abs_timeout_or_null != NULL) { - rel_timeout = &ts; - if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) { - return ETIMEDOUT; - } + int result = check_timespec(abs_timeout_or_null); + if (result != 0) { + return result; } - if (__recursive_or_errorcheck_mutex_wait(mutex, shared, old_state, rel_timeout) == -ETIMEDOUT) { + // We are in locked_contended state, sleep until someone wakes us up. + if (__recursive_or_errorcheck_mutex_wait(mutex, shared, old_state, use_realtime_clock, + abs_timeout_or_null) == -ETIMEDOUT) { return ETIMEDOUT; } old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed); @@ -518,7 +515,7 @@ int pthread_mutex_lock(pthread_mutex_t* mutex_interface) { return 0; } } - return __pthread_mutex_lock_with_timeout(mutex, NULL, 0); + return __pthread_mutex_lock_with_timeout(mutex, false, nullptr); } int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) { @@ -613,17 +610,12 @@ int pthread_mutex_trylock(pthread_mutex_t* mutex_interface) { #if !defined(__LP64__) extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex_interface, unsigned ms) { + timespec ts; + timespec_from_ms(ts, ms); timespec abs_timeout; - clock_gettime(CLOCK_MONOTONIC, &abs_timeout); - abs_timeout.tv_sec += ms / 1000; - abs_timeout.tv_nsec += (ms % 1000) * 1000000; - if (abs_timeout.tv_nsec >= NS_PER_S) { - abs_timeout.tv_sec++; - abs_timeout.tv_nsec -= NS_PER_S; - } - + absolute_timespec_from_timespec(abs_timeout, ts, CLOCK_MONOTONIC); int error = __pthread_mutex_lock_with_timeout(__get_internal_mutex(mutex_interface), - &abs_timeout, CLOCK_MONOTONIC); + false, &abs_timeout); if (error == ETIMEDOUT) { error = EBUSY; } @@ -633,7 +625,7 @@ extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex_interface, u int pthread_mutex_timedlock(pthread_mutex_t* mutex_interface, const timespec* abs_timeout) { return __pthread_mutex_lock_with_timeout(__get_internal_mutex(mutex_interface), - abs_timeout, CLOCK_REALTIME); + true, abs_timeout); } int pthread_mutex_destroy(pthread_mutex_t* mutex_interface) { diff --git a/libc/bionic/pthread_once.cpp b/libc/bionic/pthread_once.cpp index 7688a23c4..f48eadcd3 100644 --- a/libc/bionic/pthread_once.cpp +++ b/libc/bionic/pthread_once.cpp @@ -79,7 +79,7 @@ int pthread_once(pthread_once_t* once_control, void (*init_routine)(void)) { } // The initialization is underway, wait for its finish. - __futex_wait_ex(once_control_ptr, 0, old_value, NULL); + __futex_wait_ex(once_control_ptr, 0, old_value, false, nullptr); old_value = atomic_load_explicit(once_control_ptr, memory_order_acquire); } } diff --git a/libc/bionic/pthread_rwlock.cpp b/libc/bionic/pthread_rwlock.cpp index 934210eb4..b1c48c8f2 100644 --- a/libc/bionic/pthread_rwlock.cpp +++ b/libc/bionic/pthread_rwlock.cpp @@ -294,9 +294,13 @@ static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock, } while (true) { - int ret = __pthread_rwlock_tryrdlock(rwlock); - if (ret == 0 || ret == EAGAIN) { - return ret; + int result = __pthread_rwlock_tryrdlock(rwlock); + if (result == 0 || result == EAGAIN) { + return result; + } + result = check_timespec(abs_timeout_or_null); + if (result != 0) { + return result; } int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); @@ -304,16 +308,6 @@ static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock, continue; } - timespec ts; - timespec* rel_timeout = NULL; - - if (abs_timeout_or_null != NULL) { - rel_timeout = &ts; - if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) { - return ETIMEDOUT; - } - } - rwlock->pending_lock.lock(); rwlock->pending_reader_count++; @@ -327,10 +321,10 @@ static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock, int old_serial = rwlock->pending_reader_wakeup_serial; rwlock->pending_lock.unlock(); - int futex_ret = 0; + int futex_result = 0; if (!__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) { - futex_ret = __futex_wait_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, - old_serial, rel_timeout); + futex_result = __futex_wait_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, + old_serial, true, abs_timeout_or_null); } rwlock->pending_lock.lock(); @@ -341,7 +335,7 @@ static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock, } rwlock->pending_lock.unlock(); - if (futex_ret == -ETIMEDOUT) { + if (futex_result == -ETIMEDOUT) { return ETIMEDOUT; } } @@ -372,9 +366,13 @@ static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock, return EDEADLK; } while (true) { - int ret = __pthread_rwlock_trywrlock(rwlock); - if (ret == 0) { - return ret; + int result = __pthread_rwlock_trywrlock(rwlock); + if (result == 0) { + return result; + } + result = check_timespec(abs_timeout_or_null); + if (result != 0) { + return result; } int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); @@ -382,16 +380,6 @@ static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock, continue; } - timespec ts; - timespec* rel_timeout = NULL; - - if (abs_timeout_or_null != NULL) { - rel_timeout = &ts; - if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) { - return ETIMEDOUT; - } - } - rwlock->pending_lock.lock(); rwlock->pending_writer_count++; @@ -401,10 +389,10 @@ static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock, int old_serial = rwlock->pending_writer_wakeup_serial; rwlock->pending_lock.unlock(); - int futex_ret = 0; + int futex_result = 0; if (!__can_acquire_write_lock(old_state)) { - futex_ret = __futex_wait_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, - old_serial, rel_timeout); + futex_result = __futex_wait_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, + old_serial, true, abs_timeout_or_null); } rwlock->pending_lock.lock(); @@ -415,7 +403,7 @@ static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock, } rwlock->pending_lock.unlock(); - if (futex_ret == -ETIMEDOUT) { + if (futex_result == -ETIMEDOUT) { return ETIMEDOUT; } } @@ -427,7 +415,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock_interface) { if (__predict_true(__pthread_rwlock_tryrdlock(rwlock) == 0)) { return 0; } - return __pthread_rwlock_timedrdlock(rwlock, NULL); + return __pthread_rwlock_timedrdlock(rwlock, nullptr); } int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) { @@ -446,7 +434,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) { if (__predict_true(__pthread_rwlock_trywrlock(rwlock) == 0)) { return 0; } - return __pthread_rwlock_timedwrlock(rwlock, NULL); + return __pthread_rwlock_timedwrlock(rwlock, nullptr); } int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) { diff --git a/libc/bionic/semaphore.cpp b/libc/bionic/semaphore.cpp index ff84443e1..79b5d6372 100644 --- a/libc/bionic/semaphore.cpp +++ b/libc/bionic/semaphore.cpp @@ -220,7 +220,7 @@ int sem_wait(sem_t* sem) { return 0; } - __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, NULL); + __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, false, nullptr); } } @@ -235,36 +235,29 @@ int sem_timedwait(sem_t* sem, const timespec* abs_timeout) { } // Check it as per POSIX. - if (abs_timeout == NULL || abs_timeout->tv_sec < 0 || abs_timeout->tv_nsec < 0 || abs_timeout->tv_nsec >= NS_PER_S) { - errno = EINVAL; + int result = check_timespec(abs_timeout); + if (result != 0) { + errno = result; return -1; } unsigned int shared = SEM_GET_SHARED(sem_count_ptr); while (true) { - // POSIX mandates CLOCK_REALTIME here. - timespec ts; - if (!timespec_from_absolute_timespec(ts, *abs_timeout, CLOCK_REALTIME)) { - errno = ETIMEDOUT; - return -1; - } - // Try to grab the semaphore. If the value was 0, this will also change it to -1. if (__sem_dec(sem_count_ptr) > 0) { - break; + return 0; } // Contention detected. Wait for a wakeup event. - int ret = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, &ts); + int result = __futex_wait_ex(sem_count_ptr, shared, shared | SEMCOUNT_MINUS_ONE, true, abs_timeout); // Return in case of timeout or interrupt. - if (ret == -ETIMEDOUT || ret == -EINTR) { - errno = -ret; + if (result == -ETIMEDOUT || result == -EINTR) { + errno = -result; return -1; } } - return 0; } int sem_post(sem_t* sem) { diff --git a/libc/private/bionic_futex.h b/libc/private/bionic_futex.h index 401577ab8..946d9dd1f 100644 --- a/libc/private/bionic_futex.h +++ b/libc/private/bionic_futex.h @@ -40,10 +40,12 @@ __BEGIN_DECLS struct timespec; -static inline __always_inline int __futex(volatile void* ftx, int op, int value, const struct timespec* timeout) { +static inline __always_inline int __futex(volatile void* ftx, int op, int value, + const struct timespec* timeout, + int bitset) { // Our generated syscall assembler sets errno, but our callers (pthread functions) don't want to. int saved_errno = errno; - int result = syscall(__NR_futex, ftx, op, value, timeout); + int result = syscall(__NR_futex, ftx, op, value, timeout, NULL, bitset); if (__predict_false(result == -1)) { result = -errno; errno = saved_errno; @@ -52,19 +54,22 @@ static inline __always_inline int __futex(volatile void* ftx, int op, int value, } static inline int __futex_wake(volatile void* ftx, int count) { - return __futex(ftx, FUTEX_WAKE, count, NULL); + return __futex(ftx, FUTEX_WAKE, count, NULL, 0); } static inline int __futex_wake_ex(volatile void* ftx, bool shared, int count) { - return __futex(ftx, shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, count, NULL); + return __futex(ftx, shared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, count, NULL, 0); } static inline int __futex_wait(volatile void* ftx, int value, const struct timespec* timeout) { - return __futex(ftx, FUTEX_WAIT, value, timeout); + return __futex(ftx, FUTEX_WAIT, value, timeout, 0); } -static inline int __futex_wait_ex(volatile void* ftx, bool shared, int value, const struct timespec* timeout) { - return __futex(ftx, shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, value, timeout); +static inline int __futex_wait_ex(volatile void* ftx, bool shared, int value, + bool use_realtime_clock, const struct timespec* abs_timeout) { + return __futex(ftx, (shared ? FUTEX_WAIT_BITSET : FUTEX_WAIT_BITSET_PRIVATE) | + (use_realtime_clock ? FUTEX_CLOCK_REALTIME : 0), value, abs_timeout, + FUTEX_BITSET_MATCH_ANY); } __END_DECLS diff --git a/libc/private/bionic_lock.h b/libc/private/bionic_lock.h index a36d1edd3..eb732745d 100644 --- a/libc/private/bionic_lock.h +++ b/libc/private/bionic_lock.h @@ -57,7 +57,7 @@ class Lock { } while (atomic_exchange_explicit(&state, LockedWithWaiter, memory_order_acquire) != Unlocked) { // TODO: As the critical section is brief, it is a better choice to spin a few times befor sleeping. - __futex_wait_ex(&state, process_shared, LockedWithWaiter, NULL); + __futex_wait_ex(&state, process_shared, LockedWithWaiter, false, nullptr); } return; } diff --git a/libc/private/bionic_time_conversions.h b/libc/private/bionic_time_conversions.h index cf0046ae9..294c29a79 100644 --- a/libc/private/bionic_time_conversions.h +++ b/libc/private/bionic_time_conversions.h @@ -29,9 +29,12 @@ #ifndef _BIONIC_TIME_CONVERSIONS_H #define _BIONIC_TIME_CONVERSIONS_H +#include #include #include +#include "private/bionic_constants.h" + __BEGIN_DECLS __LIBC_HIDDEN__ bool timespec_from_timeval(timespec& ts, const timeval& tv); @@ -39,8 +42,21 @@ __LIBC_HIDDEN__ void timespec_from_ms(timespec& ts, const int ms); __LIBC_HIDDEN__ void timeval_from_timespec(timeval& tv, const timespec& ts); -__LIBC_HIDDEN__ bool timespec_from_absolute_timespec(timespec& ts, const timespec& abs_ts, clockid_t clock); +__LIBC_HIDDEN__ void absolute_timespec_from_timespec(timespec& abs_ts, const timespec& ts, + clockid_t clock); __END_DECLS +static inline int check_timespec(const timespec* ts) { + if (ts != nullptr) { + if (ts->tv_nsec < 0 || ts->tv_nsec >= NS_PER_S) { + return EINVAL; + } + if (ts->tv_sec < 0) { + return ETIMEDOUT; + } + } + return 0; +} + #endif diff --git a/tests/pthread_test.cpp b/tests/pthread_test.cpp index e62518ac8..974652ef3 100755 --- a/tests/pthread_test.cpp +++ b/tests/pthread_test.cpp @@ -36,6 +36,7 @@ #include #include +#include "private/bionic_constants.h" #include "private/bionic_macros.h" #include "private/ScopeGuard.h" #include "BionicDeathTest.h" @@ -744,35 +745,41 @@ struct RwlockWakeupHelperArg { LOCK_INITIALIZED, LOCK_WAITING, LOCK_RELEASED, - LOCK_ACCESSED + LOCK_ACCESSED, + LOCK_TIMEDOUT, }; std::atomic progress; std::atomic tid; + std::function trylock_function; + std::function lock_function; + std::function timed_lock_function; }; -static void pthread_rwlock_reader_wakeup_writer_helper(RwlockWakeupHelperArg* arg) { +static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) { arg->tid = gettid(); ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; - ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&arg->lock)); - ASSERT_EQ(0, pthread_rwlock_wrlock(&arg->lock)); + ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock)); + ASSERT_EQ(0, arg->lock_function(&arg->lock)); ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; } -TEST(pthread, pthread_rwlock_reader_wakeup_writer) { +static void test_pthread_rwlock_reader_wakeup_writer(std::function lock_function) { RwlockWakeupHelperArg wakeup_arg; ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock)); wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; wakeup_arg.tid = 0; + wakeup_arg.trylock_function = pthread_rwlock_trywrlock; + wakeup_arg.lock_function = lock_function; pthread_t thread; ASSERT_EQ(0, pthread_create(&thread, NULL, - reinterpret_cast(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg)); + reinterpret_cast(pthread_rwlock_wakeup_helper), &wakeup_arg)); WaitUntilThreadSleep(wakeup_arg.tid); ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); @@ -784,29 +791,31 @@ TEST(pthread, pthread_rwlock_reader_wakeup_writer) { ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); } -static void pthread_rwlock_writer_wakeup_reader_helper(RwlockWakeupHelperArg* arg) { - arg->tid = gettid(); - ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); - arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; - - ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&arg->lock)); - ASSERT_EQ(0, pthread_rwlock_rdlock(&arg->lock)); - ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress); - ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock)); - - arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED; +TEST(pthread, pthread_rwlock_reader_wakeup_writer) { + test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock); } -TEST(pthread, pthread_rwlock_writer_wakeup_reader) { +TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) { + timespec ts; + ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); + ts.tv_sec += 1; + test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) { + return pthread_rwlock_timedwrlock(lock, &ts); + }); +} + +static void test_pthread_rwlock_writer_wakeup_reader(std::function lock_function) { RwlockWakeupHelperArg wakeup_arg; ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, NULL)); ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock)); wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; wakeup_arg.tid = 0; + wakeup_arg.trylock_function = pthread_rwlock_tryrdlock; + wakeup_arg.lock_function = lock_function; pthread_t thread; ASSERT_EQ(0, pthread_create(&thread, NULL, - reinterpret_cast(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg)); + reinterpret_cast(pthread_rwlock_wakeup_helper), &wakeup_arg)); WaitUntilThreadSleep(wakeup_arg.tid); ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); @@ -818,6 +827,85 @@ TEST(pthread, pthread_rwlock_writer_wakeup_reader) { ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); } +TEST(pthread, pthread_rwlock_writer_wakeup_reader) { + test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock); +} + +TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) { + timespec ts; + ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); + ts.tv_sec += 1; + test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) { + return pthread_rwlock_timedrdlock(lock, &ts); + }); +} + +static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) { + arg->tid = gettid(); + ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress); + arg->progress = RwlockWakeupHelperArg::LOCK_WAITING; + + ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock)); + + timespec ts; + ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); + ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts)); + ts.tv_nsec = -1; + ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts)); + ts.tv_nsec = NS_PER_S; + ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts)); + ts.tv_nsec = NS_PER_S - 1; + ts.tv_sec = -1; + ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts)); + ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); + ts.tv_sec += 1; + ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts)); + ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress); + arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT; +} + +TEST(pthread, pthread_rwlock_timedrdlock_timeout) { + RwlockWakeupHelperArg wakeup_arg; + ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr)); + ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock)); + wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; + wakeup_arg.tid = 0; + wakeup_arg.trylock_function = pthread_rwlock_tryrdlock; + wakeup_arg.timed_lock_function = pthread_rwlock_timedrdlock; + + pthread_t thread; + ASSERT_EQ(0, pthread_create(&thread, nullptr, + reinterpret_cast(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg)); + WaitUntilThreadSleep(wakeup_arg.tid); + ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); + + ASSERT_EQ(0, pthread_join(thread, nullptr)); + ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress); + ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); + ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); +} + +TEST(pthread, pthread_rwlock_timedwrlock_timeout) { + RwlockWakeupHelperArg wakeup_arg; + ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr)); + ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock)); + wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED; + wakeup_arg.tid = 0; + wakeup_arg.trylock_function = pthread_rwlock_trywrlock; + wakeup_arg.timed_lock_function = pthread_rwlock_timedwrlock; + + pthread_t thread; + ASSERT_EQ(0, pthread_create(&thread, nullptr, + reinterpret_cast(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg)); + WaitUntilThreadSleep(wakeup_arg.tid); + ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress); + + ASSERT_EQ(0, pthread_join(thread, nullptr)); + ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress); + ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock)); + ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock)); +} + class RwlockKindTestHelper { private: struct ThreadArg { @@ -1062,36 +1150,44 @@ class pthread_CondWakeupTest : public ::testing::Test { }; std::atomic progress; pthread_t thread; + std::function wait_function; protected: - virtual void SetUp() { - ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL)); - ASSERT_EQ(0, pthread_cond_init(&cond, NULL)); + void SetUp() override { + ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr)); + } + + void InitCond(clockid_t clock=CLOCK_REALTIME) { + pthread_condattr_t attr; + ASSERT_EQ(0, pthread_condattr_init(&attr)); + ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock)); + ASSERT_EQ(0, pthread_cond_init(&cond, &attr)); + ASSERT_EQ(0, pthread_condattr_destroy(&attr)); + } + + void StartWaitingThread(std::function wait_function) { progress = INITIALIZED; - ASSERT_EQ(0, - pthread_create(&thread, NULL, reinterpret_cast(WaitThreadFn), this)); - } - - virtual void TearDown() { - ASSERT_EQ(0, pthread_join(thread, NULL)); - ASSERT_EQ(FINISHED, progress); - ASSERT_EQ(0, pthread_cond_destroy(&cond)); - ASSERT_EQ(0, pthread_mutex_destroy(&mutex)); - } - - void SleepUntilProgress(Progress expected_progress) { - while (progress != expected_progress) { + this->wait_function = wait_function; + ASSERT_EQ(0, pthread_create(&thread, NULL, reinterpret_cast(WaitThreadFn), this)); + while (progress != WAITING) { usleep(5000); } usleep(5000); } + void TearDown() override { + ASSERT_EQ(0, pthread_join(thread, nullptr)); + ASSERT_EQ(FINISHED, progress); + ASSERT_EQ(0, pthread_cond_destroy(&cond)); + ASSERT_EQ(0, pthread_mutex_destroy(&mutex)); + } + private: static void WaitThreadFn(pthread_CondWakeupTest* test) { ASSERT_EQ(0, pthread_mutex_lock(&test->mutex)); test->progress = WAITING; while (test->progress == WAITING) { - ASSERT_EQ(0, pthread_cond_wait(&test->cond, &test->mutex)); + ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex)); } ASSERT_EQ(SIGNALED, test->progress); test->progress = FINISHED; @@ -1099,39 +1195,65 @@ class pthread_CondWakeupTest : public ::testing::Test { } }; -TEST_F(pthread_CondWakeupTest, signal) { - SleepUntilProgress(WAITING); +TEST_F(pthread_CondWakeupTest, signal_wait) { + InitCond(); + StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) { + return pthread_cond_wait(cond, mutex); + }); progress = SIGNALED; - pthread_cond_signal(&cond); + ASSERT_EQ(0, pthread_cond_signal(&cond)); } -TEST_F(pthread_CondWakeupTest, broadcast) { - SleepUntilProgress(WAITING); +TEST_F(pthread_CondWakeupTest, broadcast_wait) { + InitCond(); + StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) { + return pthread_cond_wait(cond, mutex); + }); progress = SIGNALED; - pthread_cond_broadcast(&cond); + ASSERT_EQ(0, pthread_cond_broadcast(&cond)); } -TEST(pthread, pthread_mutex_timedlock) { - pthread_mutex_t m; - ASSERT_EQ(0, pthread_mutex_init(&m, NULL)); - - // If the mutex is already locked, pthread_mutex_timedlock should time out. - ASSERT_EQ(0, pthread_mutex_lock(&m)); - +TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) { + InitCond(CLOCK_REALTIME); timespec ts; ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); - ts.tv_nsec += 1; - ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts)); + ts.tv_sec += 1; + StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) { + return pthread_cond_timedwait(cond, mutex, &ts); + }); + progress = SIGNALED; + ASSERT_EQ(0, pthread_cond_signal(&cond)); +} - // If the mutex is unlocked, pthread_mutex_timedlock should succeed. - ASSERT_EQ(0, pthread_mutex_unlock(&m)); +TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) { + InitCond(CLOCK_MONOTONIC); + timespec ts; + ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts)); + ts.tv_sec += 1; + StartWaitingThread([&](pthread_cond_t* cond, pthread_mutex_t* mutex) { + return pthread_cond_timedwait(cond, mutex, &ts); + }); + progress = SIGNALED; + ASSERT_EQ(0, pthread_cond_signal(&cond)); +} +TEST(pthread, pthread_cond_timedwait_timeout) { + pthread_mutex_t mutex; + ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr)); + pthread_cond_t cond; + ASSERT_EQ(0, pthread_cond_init(&cond, nullptr)); + ASSERT_EQ(0, pthread_mutex_lock(&mutex)); + timespec ts; ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); - ts.tv_nsec += 1; - ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts)); - - ASSERT_EQ(0, pthread_mutex_unlock(&m)); - ASSERT_EQ(0, pthread_mutex_destroy(&m)); + ASSERT_EQ(ETIMEDOUT, pthread_cond_timedwait(&cond, &mutex, &ts)); + ts.tv_nsec = -1; + ASSERT_EQ(EINVAL, pthread_cond_timedwait(&cond, &mutex, &ts)); + ts.tv_nsec = NS_PER_S; + ASSERT_EQ(EINVAL, pthread_cond_timedwait(&cond, &mutex, &ts)); + ts.tv_nsec = NS_PER_S - 1; + ts.tv_sec = -1; + ASSERT_EQ(ETIMEDOUT, pthread_cond_timedwait(&cond, &mutex, &ts)); + ASSERT_EQ(0, pthread_mutex_unlock(&mutex)); } TEST(pthread, pthread_attr_getstack__main_thread) { @@ -1552,6 +1674,35 @@ TEST(pthread, pthread_mutex_owner_tid_limit) { #endif } +TEST(pthread, pthread_mutex_timedlock) { + pthread_mutex_t m; + ASSERT_EQ(0, pthread_mutex_init(&m, nullptr)); + + // If the mutex is already locked, pthread_mutex_timedlock should time out. + ASSERT_EQ(0, pthread_mutex_lock(&m)); + + timespec ts; + ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); + ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts)); + ts.tv_nsec = -1; + ASSERT_EQ(EINVAL, pthread_mutex_timedlock(&m, &ts)); + ts.tv_nsec = NS_PER_S; + ASSERT_EQ(EINVAL, pthread_mutex_timedlock(&m, &ts)); + ts.tv_nsec = NS_PER_S - 1; + ts.tv_sec = -1; + ASSERT_EQ(ETIMEDOUT, pthread_mutex_timedlock(&m, &ts)); + + // If the mutex is unlocked, pthread_mutex_timedlock should succeed. + ASSERT_EQ(0, pthread_mutex_unlock(&m)); + + ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts)); + ts.tv_sec += 1; + ASSERT_EQ(0, pthread_mutex_timedlock(&m, &ts)); + + ASSERT_EQ(0, pthread_mutex_unlock(&m)); + ASSERT_EQ(0, pthread_mutex_destroy(&m)); +} + class StrictAlignmentAllocator { public: void* allocate(size_t size, size_t alignment) { @@ -1749,13 +1900,13 @@ void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) { const size_t ITERATION_COUNT = 10000; for (size_t i = 1; i <= ITERATION_COUNT; ++i) { arg->array[arg->id] = i; - int ret = pthread_barrier_wait(arg->barrier); - ASSERT_TRUE(ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD); + int result = pthread_barrier_wait(arg->barrier); + ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD); for (size_t j = 0; j < arg->array_length; ++j) { ASSERT_EQ(i, arg->array[j]); } - ret = pthread_barrier_wait(arg->barrier); - ASSERT_TRUE(ret == 0 || ret == PTHREAD_BARRIER_SERIAL_THREAD); + result = pthread_barrier_wait(arg->barrier); + ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD); } } diff --git a/tests/semaphore_test.cpp b/tests/semaphore_test.cpp index e517f818f..b65bfb859 100644 --- a/tests/semaphore_test.cpp +++ b/tests/semaphore_test.cpp @@ -117,6 +117,16 @@ TEST(semaphore, sem_timedwait) { ts.tv_nsec = -1; ASSERT_EQ(-1, sem_timedwait(&s, &ts)); ASSERT_EQ(EINVAL, errno); + errno = 0; + ts.tv_nsec = NS_PER_S; + ASSERT_EQ(-1, sem_timedwait(&s, &ts)); + ASSERT_EQ(EINVAL, errno); + + errno = 0; + ts.tv_nsec = NS_PER_S - 1; + ts.tv_sec = -1; + ASSERT_EQ(-1, sem_timedwait(&s, &ts)); + ASSERT_EQ(ETIMEDOUT, errno); ASSERT_EQ(0, sem_destroy(&s)); }