am 5490bebd: Merge "Remove duplication in pthread_mutex.cpp."
* commit '5490bebd7cdd4406780358f590391b75ab8a7d84': Remove duplication in pthread_mutex.cpp.
This commit is contained in:
commit
ca7ac7d36a
@ -237,7 +237,7 @@ int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline atomic_int* MUTEX_TO_ATOMIC_POINTER(pthread_mutex_t* mutex) {
|
static inline atomic_int* get_mutex_value_pointer(pthread_mutex_t* mutex) {
|
||||||
static_assert(sizeof(atomic_int) == sizeof(mutex->value),
|
static_assert(sizeof(atomic_int) == sizeof(mutex->value),
|
||||||
"mutex->value should actually be atomic_int in implementation.");
|
"mutex->value should actually be atomic_int in implementation.");
|
||||||
|
|
||||||
@ -247,7 +247,7 @@ static inline atomic_int* MUTEX_TO_ATOMIC_POINTER(pthread_mutex_t* mutex) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
|
int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
|
||||||
atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
|
atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
|
||||||
|
|
||||||
if (__predict_true(attr == NULL)) {
|
if (__predict_true(attr == NULL)) {
|
||||||
atomic_init(mutex_value_ptr, MUTEX_TYPE_BITS_NORMAL);
|
atomic_init(mutex_value_ptr, MUTEX_TYPE_BITS_NORMAL);
|
||||||
@ -277,6 +277,19 @@ int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int __pthread_normal_mutex_trylock(atomic_int* mutex_value_ptr, int shared) {
|
||||||
|
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||||
|
const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
||||||
|
|
||||||
|
int mvalue = unlocked;
|
||||||
|
if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
|
||||||
|
locked_uncontended,
|
||||||
|
memory_order_acquire,
|
||||||
|
memory_order_relaxed))) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock a mutex of type NORMAL.
|
* Lock a mutex of type NORMAL.
|
||||||
@ -290,25 +303,17 @@ int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr)
|
|||||||
* "type" value is zero, so the only bits that will be set are the ones in
|
* "type" value is zero, so the only bits that will be set are the ones in
|
||||||
* the lock state field.
|
* the lock state field.
|
||||||
*/
|
*/
|
||||||
static inline void _normal_mutex_lock(atomic_int* mutex_value_ptr, int shared) {
|
static inline int __pthread_normal_mutex_lock(atomic_int* mutex_value_ptr, int shared,
|
||||||
/* convenience shortcuts */
|
const timespec* abs_timeout_or_null, clockid_t clock) {
|
||||||
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
|
if (__predict_true(__normal_mutex_trylock(mutex_value_ptr, shared) == 0)) {
|
||||||
const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
return 0;
|
||||||
|
|
||||||
// The common case is an unlocked mutex, so we begin by trying to
|
|
||||||
// change the lock's state from unlocked to locked_uncontended.
|
|
||||||
// If exchanged successfully, An acquire fence is required to make
|
|
||||||
// all memory accesses made by other threads visible in current CPU.
|
|
||||||
int mvalue = unlocked;
|
|
||||||
if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
|
|
||||||
locked_uncontended,
|
|
||||||
memory_order_acquire,
|
|
||||||
memory_order_relaxed))) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ScopedTrace trace("Contending for pthread mutex");
|
ScopedTrace trace("Contending for pthread mutex");
|
||||||
|
|
||||||
|
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||||
|
const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
|
||||||
|
|
||||||
// We want to go to sleep until the mutex is available, which requires
|
// We want to go to sleep until the mutex is available, which requires
|
||||||
// promoting it to locked_contended. We need to swap in the new state
|
// promoting it to locked_contended. We need to swap in the new state
|
||||||
// value and then wait until somebody wakes us up.
|
// value and then wait until somebody wakes us up.
|
||||||
@ -316,20 +321,29 @@ static inline void _normal_mutex_lock(atomic_int* mutex_value_ptr, int shared) {
|
|||||||
// If it returns unlocked, we have acquired the lock, otherwise another
|
// If it returns unlocked, we have acquired the lock, otherwise another
|
||||||
// thread still holds the lock and we should wait again.
|
// thread still holds the lock and we should wait again.
|
||||||
// If lock is acquired, an acquire fence is needed to make all memory accesses
|
// If lock is acquired, an acquire fence is needed to make all memory accesses
|
||||||
// made by other threads visible in current CPU.
|
// made by other threads visible to the current CPU.
|
||||||
const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
|
|
||||||
while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
|
while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
|
||||||
memory_order_acquire) != unlocked) {
|
memory_order_acquire) != unlocked) {
|
||||||
|
timespec ts;
|
||||||
__futex_wait_ex(mutex_value_ptr, shared, locked_contended, NULL);
|
timespec* rel_timeout = NULL;
|
||||||
|
if (abs_timeout_or_null != NULL) {
|
||||||
|
rel_timeout = &ts;
|
||||||
|
if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) {
|
||||||
|
return ETIMEDOUT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (__futex_wait_ex(mutex_value_ptr, shared, locked_contended, rel_timeout) == -ETIMEDOUT) {
|
||||||
|
return ETIMEDOUT;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Release a mutex of type NORMAL. The caller is responsible for determining
|
* Release a mutex of type NORMAL. The caller is responsible for determining
|
||||||
* that we are in fact the owner of this lock.
|
* that we are in fact the owner of this lock.
|
||||||
*/
|
*/
|
||||||
static inline void _normal_mutex_unlock(atomic_int* mutex_value_ptr, int shared) {
|
static inline void __pthread_normal_mutex_unlock(atomic_int* mutex_value_ptr, int shared) {
|
||||||
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
|
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||||
const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
|
const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
|
||||||
|
|
||||||
@ -362,25 +376,13 @@ static inline void _normal_mutex_unlock(atomic_int* mutex_value_ptr, int shared)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This common inlined function is used to increment the counter of an
|
/* This common inlined function is used to increment the counter of a recursive mutex.
|
||||||
* errorcheck or recursive mutex.
|
|
||||||
*
|
*
|
||||||
* For errorcheck mutexes, it will return EDEADLK
|
* If the counter overflows, it will return EAGAIN.
|
||||||
* If the counter overflows, it will return EAGAIN
|
* Otherwise, it atomically increments the counter and returns 0.
|
||||||
* Otherwise, it atomically increments the counter and returns 0
|
|
||||||
* after providing an acquire barrier.
|
|
||||||
*
|
*
|
||||||
* mtype is the current mutex type
|
|
||||||
* mvalue is the current mutex value (already loaded)
|
|
||||||
* mutex pointers to the mutex.
|
|
||||||
*/
|
*/
|
||||||
static inline __always_inline
|
static inline int __recursive_increment(atomic_int* mutex_value_ptr, int mvalue) {
|
||||||
int _recursive_increment(atomic_int* mutex_value_ptr, int mvalue, int mtype) {
|
|
||||||
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
|
|
||||||
// Trying to re-lock a mutex we already acquired.
|
|
||||||
return EDEADLK;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect recursive lock overflow and return EAGAIN.
|
// Detect recursive lock overflow and return EAGAIN.
|
||||||
// This is safe because only the owner thread can modify the
|
// This is safe because only the owner thread can modify the
|
||||||
// counter bits in the mutex value.
|
// counter bits in the mutex value.
|
||||||
@ -393,15 +395,13 @@ int _recursive_increment(atomic_int* mutex_value_ptr, int mvalue, int mtype) {
|
|||||||
// loop to update the counter. The counter will not overflow in the loop,
|
// loop to update the counter. The counter will not overflow in the loop,
|
||||||
// as only the owner thread can change it.
|
// as only the owner thread can change it.
|
||||||
// The mutex is still locked, so we don't need a release fence.
|
// The mutex is still locked, so we don't need a release fence.
|
||||||
while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
|
atomic_fetch_add_explicit(mutex_value_ptr, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
|
||||||
mvalue + MUTEX_COUNTER_BITS_ONE,
|
|
||||||
memory_order_relaxed,
|
|
||||||
memory_order_relaxed)) { }
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int pthread_mutex_lock(pthread_mutex_t* mutex) {
|
static int __pthread_mutex_lock_with_timeout(pthread_mutex_t* mutex,
|
||||||
atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
|
const timespec* abs_timeout_or_null, clockid_t clock) {
|
||||||
|
atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
|
||||||
|
|
||||||
int mvalue, mtype, tid, shared;
|
int mvalue, mtype, tid, shared;
|
||||||
|
|
||||||
@ -411,24 +411,28 @@ int pthread_mutex_lock(pthread_mutex_t* mutex) {
|
|||||||
|
|
||||||
// Handle common case first.
|
// Handle common case first.
|
||||||
if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
|
if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
|
||||||
_normal_mutex_lock(mutex_value_ptr, shared);
|
return __pthread_normal_mutex_lock(mutex_value_ptr, shared, abs_timeout_or_null, clock);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do we already own this recursive or error-check mutex?
|
// Do we already own this recursive or error-check mutex?
|
||||||
tid = __get_thread()->tid;
|
tid = __get_thread()->tid;
|
||||||
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
|
if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
|
||||||
return _recursive_increment(mutex_value_ptr, mvalue, mtype);
|
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
|
||||||
|
return EDEADLK;
|
||||||
|
}
|
||||||
|
return __recursive_increment(mutex_value_ptr, mvalue);
|
||||||
|
}
|
||||||
|
|
||||||
// Add in shared state to avoid extra 'or' operations below.
|
const int unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||||
mtype |= shared;
|
const int locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
||||||
|
const int locked_contended = mtype | shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
|
||||||
|
|
||||||
// First, if the mutex is unlocked, try to quickly acquire it.
|
// First, if the mutex is unlocked, try to quickly acquire it.
|
||||||
// In the optimistic case where this works, set the state to locked_uncontended.
|
// In the optimistic case where this works, set the state to locked_uncontended.
|
||||||
if (mvalue == mtype) {
|
if (mvalue == unlocked) {
|
||||||
int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
int newval = MUTEX_OWNER_TO_BITS(tid) | locked_uncontended;
|
||||||
// If exchanged successfully, An acquire fence is required to make
|
// If exchanged successfully, an acquire fence is required to make
|
||||||
// all memory accesses made by other threads visible in current CPU.
|
// all memory accesses made by other threads visible to the current CPU.
|
||||||
if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
|
if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
|
||||||
newval, memory_order_acquire, memory_order_relaxed))) {
|
newval, memory_order_acquire, memory_order_relaxed))) {
|
||||||
return 0;
|
return 0;
|
||||||
@ -438,16 +442,14 @@ int pthread_mutex_lock(pthread_mutex_t* mutex) {
|
|||||||
ScopedTrace trace("Contending for pthread mutex");
|
ScopedTrace trace("Contending for pthread mutex");
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
if (mvalue == mtype) {
|
if (mvalue == unlocked) {
|
||||||
// If the mutex is unlocked, its value should be 'mtype' and
|
|
||||||
// we try to acquire it by setting its owner and state atomically.
|
|
||||||
// NOTE: We put the state to locked_contended since we _know_ there
|
// NOTE: We put the state to locked_contended since we _know_ there
|
||||||
// is contention when we are in this loop. This ensures all waiters
|
// is contention when we are in this loop. This ensures all waiters
|
||||||
// will be unlocked.
|
// will be unlocked.
|
||||||
|
|
||||||
int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
|
int newval = MUTEX_OWNER_TO_BITS(tid) | locked_contended;
|
||||||
// If exchanged successfully, An acquire fence is required to make
|
// If exchanged successfully, an acquire fence is required to make
|
||||||
// all memory accesses made by other threads visible in current CPU.
|
// all memory accesses made by other threads visible to the current CPU.
|
||||||
if (__predict_true(atomic_compare_exchange_weak_explicit(mutex_value_ptr,
|
if (__predict_true(atomic_compare_exchange_weak_explicit(mutex_value_ptr,
|
||||||
&mvalue, newval,
|
&mvalue, newval,
|
||||||
memory_order_acquire,
|
memory_order_acquire,
|
||||||
@ -456,8 +458,7 @@ int pthread_mutex_lock(pthread_mutex_t* mutex) {
|
|||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
} else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
|
} else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
|
||||||
// The mutex is already locked by another thread, if the state is locked_uncontended,
|
// We should set it to locked_contended beforing going to sleep. This can make
|
||||||
// we should set it to locked_contended beforing going to sleep. This can make
|
|
||||||
// sure waiters will be woken up eventually.
|
// sure waiters will be woken up eventually.
|
||||||
|
|
||||||
int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
|
int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
|
||||||
@ -470,14 +471,39 @@ int pthread_mutex_lock(pthread_mutex_t* mutex) {
|
|||||||
mvalue = newval;
|
mvalue = newval;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are in locked_contended state, sleep until someone wake us up.
|
// We are in locked_contended state, sleep until someone wakes us up.
|
||||||
__futex_wait_ex(mutex_value_ptr, shared, mvalue, NULL);
|
timespec ts;
|
||||||
|
timespec* rel_timeout = NULL;
|
||||||
|
if (abs_timeout_or_null != NULL) {
|
||||||
|
rel_timeout = &ts;
|
||||||
|
if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, clock)) {
|
||||||
|
return ETIMEDOUT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (__futex_wait_ex(mutex_value_ptr, shared, mvalue, rel_timeout) == -ETIMEDOUT) {
|
||||||
|
return ETIMEDOUT;
|
||||||
|
}
|
||||||
mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
|
mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int pthread_mutex_lock(pthread_mutex_t* mutex) {
|
||||||
|
atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
|
||||||
|
|
||||||
|
int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
|
||||||
|
int mtype = (mvalue & MUTEX_TYPE_MASK);
|
||||||
|
int shared = (mvalue & MUTEX_SHARED_MASK);
|
||||||
|
// Avoid slowing down fast path of normal mutex lock operation.
|
||||||
|
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
|
||||||
|
if (__predict_true(__pthread_normal_mutex_trylock(mutex_value_ptr, shared) == 0)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return __pthread_mutex_lock_with_timeout(mutex, NULL, 0);
|
||||||
|
}
|
||||||
|
|
||||||
int pthread_mutex_unlock(pthread_mutex_t* mutex) {
|
int pthread_mutex_unlock(pthread_mutex_t* mutex) {
|
||||||
atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
|
atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
|
||||||
|
|
||||||
int mvalue, mtype, tid, shared;
|
int mvalue, mtype, tid, shared;
|
||||||
|
|
||||||
@ -487,7 +513,7 @@ int pthread_mutex_unlock(pthread_mutex_t* mutex) {
|
|||||||
|
|
||||||
// Handle common case first.
|
// Handle common case first.
|
||||||
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
|
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
|
||||||
_normal_mutex_unlock(mutex_value_ptr, shared);
|
__pthread_normal_mutex_unlock(mutex_value_ptr, shared);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -501,10 +527,7 @@ int pthread_mutex_unlock(pthread_mutex_t* mutex) {
|
|||||||
// lower state bits), use a compare_exchange loop to do it.
|
// lower state bits), use a compare_exchange loop to do it.
|
||||||
if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
|
if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
|
||||||
// We still own the mutex, so a release fence is not needed.
|
// We still own the mutex, so a release fence is not needed.
|
||||||
while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
|
atomic_fetch_sub_explicit(mutex_value_ptr, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
|
||||||
mvalue - MUTEX_COUNTER_BITS_ONE,
|
|
||||||
memory_order_relaxed,
|
|
||||||
memory_order_relaxed)) { }
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -514,9 +537,8 @@ int pthread_mutex_unlock(pthread_mutex_t* mutex) {
|
|||||||
// to awake.
|
// to awake.
|
||||||
// A release fence is required to make previous stores visible to next
|
// A release fence is required to make previous stores visible to next
|
||||||
// lock owner threads.
|
// lock owner threads.
|
||||||
mvalue = atomic_exchange_explicit(mutex_value_ptr,
|
const int unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||||
mtype | shared | MUTEX_STATE_BITS_UNLOCKED,
|
mvalue = atomic_exchange_explicit(mutex_value_ptr, unlocked, memory_order_release);
|
||||||
memory_order_release);
|
|
||||||
if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
|
if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
|
||||||
__futex_wake_ex(mutex_value_ptr, shared, 1);
|
__futex_wake_ex(mutex_value_ptr, shared, 1);
|
||||||
}
|
}
|
||||||
@ -525,25 +547,18 @@ int pthread_mutex_unlock(pthread_mutex_t* mutex) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int pthread_mutex_trylock(pthread_mutex_t* mutex) {
|
int pthread_mutex_trylock(pthread_mutex_t* mutex) {
|
||||||
atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
|
atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
|
||||||
|
|
||||||
int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
|
int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
|
||||||
int mtype = (mvalue & MUTEX_TYPE_MASK);
|
int mtype = (mvalue & MUTEX_TYPE_MASK);
|
||||||
int shared = (mvalue & MUTEX_SHARED_MASK);
|
int shared = (mvalue & MUTEX_SHARED_MASK);
|
||||||
|
|
||||||
|
const int unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
|
||||||
|
const int locked_uncontended = mtype | shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
||||||
|
|
||||||
// Handle common case first.
|
// Handle common case first.
|
||||||
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
|
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
|
||||||
mvalue = shared | MUTEX_STATE_BITS_UNLOCKED;
|
return __pthread_normal_mutex_trylock(mutex_value_ptr, shared);
|
||||||
// If exchanged successfully, An acquire fence is required to make
|
|
||||||
// all memory accesses made by other threads visible in current CPU.
|
|
||||||
if (atomic_compare_exchange_strong_explicit(mutex_value_ptr,
|
|
||||||
&mvalue,
|
|
||||||
shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
|
|
||||||
memory_order_acquire,
|
|
||||||
memory_order_relaxed)) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return EBUSY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do we already own this recursive or error-check mutex?
|
// Do we already own this recursive or error-check mutex?
|
||||||
@ -552,19 +567,17 @@ int pthread_mutex_trylock(pthread_mutex_t* mutex) {
|
|||||||
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
|
if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
|
||||||
return EBUSY;
|
return EBUSY;
|
||||||
}
|
}
|
||||||
return _recursive_increment(mutex_value_ptr, mvalue, mtype);
|
return __recursive_increment(mutex_value_ptr, mvalue);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Same as pthread_mutex_lock, except that we don't want to wait, and
|
// Same as pthread_mutex_lock, except that we don't want to wait, and
|
||||||
// the only operation that can succeed is a single compare_exchange to acquire the
|
// the only operation that can succeed is a single compare_exchange to acquire the
|
||||||
// lock if it is released / not owned by anyone. No need for a complex loop.
|
// lock if it is released / not owned by anyone. No need for a complex loop.
|
||||||
// If exchanged successfully, An acquire fence is required to make
|
// If exchanged successfully, an acquire fence is required to make
|
||||||
// all memory accesses made by other threads visible in current CPU.
|
// all memory accesses made by other threads visible to the current CPU.
|
||||||
mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
|
mvalue = unlocked;
|
||||||
mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
int newval = MUTEX_OWNER_TO_BITS(tid) | locked_uncontended;
|
||||||
|
if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
|
||||||
if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
|
|
||||||
&mtype, mvalue,
|
|
||||||
memory_order_acquire,
|
memory_order_acquire,
|
||||||
memory_order_relaxed))) {
|
memory_order_relaxed))) {
|
||||||
return 0;
|
return 0;
|
||||||
@ -572,112 +585,6 @@ int pthread_mutex_trylock(pthread_mutex_t* mutex) {
|
|||||||
return EBUSY;
|
return EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_ts, clockid_t clock) {
|
|
||||||
atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
|
|
||||||
|
|
||||||
timespec ts;
|
|
||||||
|
|
||||||
int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
|
|
||||||
int mtype = (mvalue & MUTEX_TYPE_MASK);
|
|
||||||
int shared = (mvalue & MUTEX_SHARED_MASK);
|
|
||||||
|
|
||||||
// Handle common case first.
|
|
||||||
if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
|
|
||||||
const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
|
|
||||||
const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
|
||||||
const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
|
|
||||||
|
|
||||||
// If exchanged successfully, An acquire fence is required to make
|
|
||||||
// all memory accesses made by other threads visible in current CPU.
|
|
||||||
mvalue = unlocked;
|
|
||||||
if (atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, locked_uncontended,
|
|
||||||
memory_order_acquire, memory_order_relaxed)) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ScopedTrace trace("Contending for timed pthread mutex");
|
|
||||||
|
|
||||||
// Same as pthread_mutex_lock, except that we can only wait for a specified
|
|
||||||
// time interval. If lock is acquired, an acquire fence is needed to make
|
|
||||||
// all memory accesses made by other threads visible in current CPU.
|
|
||||||
while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
|
|
||||||
memory_order_acquire) != unlocked) {
|
|
||||||
if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
|
|
||||||
return ETIMEDOUT;
|
|
||||||
}
|
|
||||||
__futex_wait_ex(mutex_value_ptr, shared, locked_contended, &ts);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do we already own this recursive or error-check mutex?
|
|
||||||
pid_t tid = __get_thread()->tid;
|
|
||||||
if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
|
|
||||||
return _recursive_increment(mutex_value_ptr, mvalue, mtype);
|
|
||||||
}
|
|
||||||
|
|
||||||
mtype |= shared;
|
|
||||||
|
|
||||||
// First try a quick lock.
|
|
||||||
if (mvalue == mtype) {
|
|
||||||
int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
|
|
||||||
// If exchanged successfully, An acquire fence is required to make
|
|
||||||
// all memory accesses made by other threads visible in current CPU.
|
|
||||||
if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
|
|
||||||
&mvalue, newval,
|
|
||||||
memory_order_acquire,
|
|
||||||
memory_order_relaxed))) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ScopedTrace trace("Contending for timed pthread mutex");
|
|
||||||
|
|
||||||
// The following implements the same loop as pthread_mutex_lock,
|
|
||||||
// but adds checks to ensure that the operation never exceeds the
|
|
||||||
// absolute expiration time.
|
|
||||||
while (true) {
|
|
||||||
if (mvalue == mtype) { // Unlocked.
|
|
||||||
int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
|
|
||||||
// An acquire fence is needed for successful exchange.
|
|
||||||
if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
|
|
||||||
memory_order_acquire,
|
|
||||||
memory_order_relaxed)) {
|
|
||||||
goto check_time;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
} else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
|
|
||||||
// The value is locked. If the state is locked_uncontended, we need to switch
|
|
||||||
// it to locked_contended before sleep, so we can get woken up later.
|
|
||||||
int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
|
|
||||||
if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
|
|
||||||
memory_order_relaxed,
|
|
||||||
memory_order_relaxed)) {
|
|
||||||
goto check_time;
|
|
||||||
}
|
|
||||||
mvalue = newval;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
|
|
||||||
return ETIMEDOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (__futex_wait_ex(mutex_value_ptr, shared, mvalue, &ts) == -ETIMEDOUT) {
|
|
||||||
return ETIMEDOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
check_time:
|
|
||||||
if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
|
|
||||||
return ETIMEDOUT;
|
|
||||||
}
|
|
||||||
// After futex_wait or time costly timespec_from_absolte_timespec,
|
|
||||||
// we'd better read mvalue again in case it is changed.
|
|
||||||
mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#if !defined(__LP64__)
|
#if !defined(__LP64__)
|
||||||
extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
|
extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
|
||||||
timespec abs_timeout;
|
timespec abs_timeout;
|
||||||
@ -689,7 +596,7 @@ extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms
|
|||||||
abs_timeout.tv_nsec -= NS_PER_S;
|
abs_timeout.tv_nsec -= NS_PER_S;
|
||||||
}
|
}
|
||||||
|
|
||||||
int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
|
int error = __pthread_mutex_lock_with_timeout(mutex, &abs_timeout, CLOCK_MONOTONIC);
|
||||||
if (error == ETIMEDOUT) {
|
if (error == ETIMEDOUT) {
|
||||||
error = EBUSY;
|
error = EBUSY;
|
||||||
}
|
}
|
||||||
@ -698,7 +605,7 @@ extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
|
int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
|
||||||
return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
|
return __pthread_mutex_lock_with_timeout(mutex, abs_timeout, CLOCK_REALTIME);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pthread_mutex_destroy(pthread_mutex_t* mutex) {
|
int pthread_mutex_destroy(pthread_mutex_t* mutex) {
|
||||||
@ -708,7 +615,7 @@ int pthread_mutex_destroy(pthread_mutex_t* mutex) {
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
|
atomic_int* mutex_value_ptr = get_mutex_value_pointer(mutex);
|
||||||
atomic_store_explicit(mutex_value_ptr, 0xdead10cc, memory_order_relaxed);
|
atomic_store_explicit(mutex_value_ptr, 0xdead10cc, memory_order_relaxed);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -732,8 +732,10 @@ TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
|
|||||||
pthread_t thread;
|
pthread_t thread;
|
||||||
ASSERT_EQ(0, pthread_create(&thread, NULL,
|
ASSERT_EQ(0, pthread_create(&thread, NULL,
|
||||||
reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
|
reinterpret_cast<void* (*)(void*)>(pthread_rwlock_reader_wakeup_writer_helper), &wakeup_arg));
|
||||||
sleep(1);
|
while (wakeup_arg.progress != RwlockWakeupHelperArg::LOCK_WAITING) {
|
||||||
ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
|
usleep(5000);
|
||||||
|
}
|
||||||
|
usleep(5000);
|
||||||
wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
|
wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
|
||||||
ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
|
ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
|
||||||
|
|
||||||
@ -763,8 +765,10 @@ TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
|
|||||||
pthread_t thread;
|
pthread_t thread;
|
||||||
ASSERT_EQ(0, pthread_create(&thread, NULL,
|
ASSERT_EQ(0, pthread_create(&thread, NULL,
|
||||||
reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
|
reinterpret_cast<void* (*)(void*)>(pthread_rwlock_writer_wakeup_reader_helper), &wakeup_arg));
|
||||||
sleep(1);
|
while (wakeup_arg.progress != RwlockWakeupHelperArg::LOCK_WAITING) {
|
||||||
ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
|
usleep(5000);
|
||||||
|
}
|
||||||
|
usleep(5000);
|
||||||
wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
|
wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
|
||||||
ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
|
ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
|
||||||
|
|
||||||
@ -1177,15 +1181,21 @@ TEST(pthread, pthread_mutexattr_gettype) {
|
|||||||
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
|
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
|
||||||
ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
|
ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
|
||||||
ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
|
ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
|
||||||
|
|
||||||
|
ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void CreateMutex(pthread_mutex_t& mutex, int mutex_type) {
|
||||||
|
pthread_mutexattr_t attr;
|
||||||
|
ASSERT_EQ(0, pthread_mutexattr_init(&attr));
|
||||||
|
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
|
||||||
|
ASSERT_EQ(0, pthread_mutex_init(&mutex, &attr));
|
||||||
|
ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(pthread, pthread_mutex_lock_NORMAL) {
|
TEST(pthread, pthread_mutex_lock_NORMAL) {
|
||||||
pthread_mutexattr_t attr;
|
|
||||||
ASSERT_EQ(0, pthread_mutexattr_init(&attr));
|
|
||||||
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
|
|
||||||
|
|
||||||
pthread_mutex_t lock;
|
pthread_mutex_t lock;
|
||||||
ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
|
CreateMutex(lock, PTHREAD_MUTEX_NORMAL);
|
||||||
|
|
||||||
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
||||||
ASSERT_EQ(0, pthread_mutex_unlock(&lock));
|
ASSERT_EQ(0, pthread_mutex_unlock(&lock));
|
||||||
@ -1193,12 +1203,8 @@ TEST(pthread, pthread_mutex_lock_NORMAL) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
|
TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
|
||||||
pthread_mutexattr_t attr;
|
|
||||||
ASSERT_EQ(0, pthread_mutexattr_init(&attr));
|
|
||||||
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
|
|
||||||
|
|
||||||
pthread_mutex_t lock;
|
pthread_mutex_t lock;
|
||||||
ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
|
CreateMutex(lock, PTHREAD_MUTEX_ERRORCHECK);
|
||||||
|
|
||||||
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
||||||
ASSERT_EQ(EDEADLK, pthread_mutex_lock(&lock));
|
ASSERT_EQ(EDEADLK, pthread_mutex_lock(&lock));
|
||||||
@ -1211,12 +1217,8 @@ TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST(pthread, pthread_mutex_lock_RECURSIVE) {
|
TEST(pthread, pthread_mutex_lock_RECURSIVE) {
|
||||||
pthread_mutexattr_t attr;
|
|
||||||
ASSERT_EQ(0, pthread_mutexattr_init(&attr));
|
|
||||||
ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
|
|
||||||
|
|
||||||
pthread_mutex_t lock;
|
pthread_mutex_t lock;
|
||||||
ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
|
CreateMutex(lock, PTHREAD_MUTEX_RECURSIVE);
|
||||||
|
|
||||||
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
||||||
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
ASSERT_EQ(0, pthread_mutex_lock(&lock));
|
||||||
@ -1228,6 +1230,66 @@ TEST(pthread, pthread_mutex_lock_RECURSIVE) {
|
|||||||
ASSERT_EQ(0, pthread_mutex_destroy(&lock));
|
ASSERT_EQ(0, pthread_mutex_destroy(&lock));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class MutexWakeupHelper {
|
||||||
|
private:
|
||||||
|
pthread_mutex_t mutex;
|
||||||
|
enum Progress {
|
||||||
|
LOCK_INITIALIZED,
|
||||||
|
LOCK_WAITING,
|
||||||
|
LOCK_RELEASED,
|
||||||
|
LOCK_ACCESSED
|
||||||
|
};
|
||||||
|
std::atomic<Progress> progress;
|
||||||
|
|
||||||
|
static void thread_fn(MutexWakeupHelper* helper) {
|
||||||
|
ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
|
||||||
|
helper->progress = LOCK_WAITING;
|
||||||
|
|
||||||
|
ASSERT_EQ(0, pthread_mutex_lock(&helper->mutex));
|
||||||
|
ASSERT_EQ(LOCK_RELEASED, helper->progress);
|
||||||
|
ASSERT_EQ(0, pthread_mutex_unlock(&helper->mutex));
|
||||||
|
|
||||||
|
helper->progress = LOCK_ACCESSED;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
void test(int mutex_type) {
|
||||||
|
CreateMutex(mutex, mutex_type);
|
||||||
|
ASSERT_EQ(0, pthread_mutex_lock(&mutex));
|
||||||
|
progress = LOCK_INITIALIZED;
|
||||||
|
|
||||||
|
pthread_t thread;
|
||||||
|
ASSERT_EQ(0, pthread_create(&thread, NULL,
|
||||||
|
reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
|
||||||
|
|
||||||
|
while (progress != LOCK_WAITING) {
|
||||||
|
usleep(5000);
|
||||||
|
}
|
||||||
|
usleep(5000);
|
||||||
|
progress = LOCK_RELEASED;
|
||||||
|
ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
|
||||||
|
|
||||||
|
ASSERT_EQ(0, pthread_join(thread, NULL));
|
||||||
|
ASSERT_EQ(LOCK_ACCESSED, progress);
|
||||||
|
ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST(pthread, pthread_mutex_NORMAL_wakeup) {
|
||||||
|
MutexWakeupHelper helper;
|
||||||
|
helper.test(PTHREAD_MUTEX_NORMAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
|
||||||
|
MutexWakeupHelper helper;
|
||||||
|
helper.test(PTHREAD_MUTEX_ERRORCHECK);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
|
||||||
|
MutexWakeupHelper helper;
|
||||||
|
helper.test(PTHREAD_MUTEX_RECURSIVE);
|
||||||
|
}
|
||||||
|
|
||||||
TEST(pthread, pthread_mutex_owner_tid_limit) {
|
TEST(pthread, pthread_mutex_owner_tid_limit) {
|
||||||
FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
|
FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
|
||||||
ASSERT_TRUE(fp != NULL);
|
ASSERT_TRUE(fp != NULL);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user