Merge "Fix bug for recursive/errorcheck mutex on 32-bit devices."
This commit is contained in:
@@ -392,6 +392,30 @@ static inline __always_inline int __recursive_increment(pthread_mutex_internal_t
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline __always_inline int __recursive_or_errorcheck_mutex_wait(
|
||||
pthread_mutex_internal_t* mutex,
|
||||
uint16_t shared,
|
||||
uint16_t old_state,
|
||||
const timespec* rel_timeout) {
|
||||
// __futex_wait always waits on a 32-bit value. But state is 16-bit. For a normal mutex, the owner_tid
|
||||
// field in mutex is not used. On 64-bit devices, the __pad field in mutex is not used.
|
||||
// But when a recursive or errorcheck mutex is used on 32-bit devices, we need to add the
|
||||
// owner_tid value in the value argument for __futex_wait, otherwise we may always get EAGAIN error.
|
||||
|
||||
#if defined(__LP64__)
|
||||
return __futex_wait_ex(&mutex->state, shared, old_state, rel_timeout);
|
||||
|
||||
#else
|
||||
// This implementation works only when the layout of pthread_mutex_internal_t matches below expectation.
|
||||
// And it is based on the assumption that Android is always in little-endian devices.
|
||||
static_assert(offsetof(pthread_mutex_internal_t, state) == 0, "");
|
||||
static_assert(offsetof(pthread_mutex_internal_t, owner_tid) == 2, "");
|
||||
|
||||
uint32_t owner_tid = atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed);
|
||||
return __futex_wait_ex(&mutex->state, shared, (owner_tid << 16) | old_state, rel_timeout);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __pthread_mutex_lock_with_timeout(pthread_mutex_internal_t* mutex,
|
||||
const timespec* abs_timeout_or_null, clockid_t clock) {
|
||||
uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
|
||||
@@ -469,7 +493,7 @@ static int __pthread_mutex_lock_with_timeout(pthread_mutex_internal_t* mutex,
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
if (__futex_wait_ex(&mutex->state, shared, old_state, rel_timeout) == -ETIMEDOUT) {
|
||||
if (__recursive_or_errorcheck_mutex_wait(mutex, shared, old_state, rel_timeout) == -ETIMEDOUT) {
|
||||
return ETIMEDOUT;
|
||||
}
|
||||
old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
|
||||
|
Reference in New Issue
Block a user