libc: Fix recursive mutex lock implementation.

This fixes a bug that was introduced in the latest pthread optimization.
It happens when a recursive lock is contented by several threads. The main
issue was that the atomic counter increment in _recursive_increment() could
be annihilated by a non-conditional write in pthread_mutex_lock() used to
update the value's lower bits to indicate contention.

This patch re-introduces the use of the global recursive lock in
_recursive_increment(). This will hit performance, but a future patch
will be provided to remove it from the source code.

Change-Id: Ie22069d376cebf2e7d613ba00b6871567f333544
This commit is contained in:
David 'Digit' Turner 2012-01-24 13:20:38 +01:00
parent 79fcc6948d
commit b57db7581c

View File

@ -1001,6 +1001,20 @@ _normal_unlock(pthread_mutex_t* mutex, int shared)
} }
} }
static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
static __inline__ void
_recursive_lock(void)
{
_normal_lock(&__recursive_lock, 0);
}
static __inline__ void
_recursive_unlock(void)
{
_normal_unlock(&__recursive_lock, 0);
}
/* This common inlined function is used to increment the counter of an /* This common inlined function is used to increment the counter of an
* errorcheck or recursive mutex. * errorcheck or recursive mutex.
* *
@ -1023,43 +1037,30 @@ _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype)
/* Detect recursive lock overflow and return EAGAIN. /* Detect recursive lock overflow and return EAGAIN.
* This is safe because only the owner thread can modify the * This is safe because only the owner thread can modify the
* counter bits in the mutes value * counter bits in the mutex value.
*/ */
if ((mvalue & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK) { if ((mvalue & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK) {
return EAGAIN; return EAGAIN;
} }
/* We own the mutex, but other threads are able to change /* We own the mutex, but other threads are able to change
* the contents (e.g. promoting it to "contended" by changing * the lower bits (e.g. promoting it to "contended"), so we
* its lower bits), so we need to atomically update it using * need to use the recursive global lock to do that.
* a cmpxchg loop. *
* The lock/unlock sequence also provides a full memory barrier
* so we don't need to add one here explicitely.
*/ */
for (;;) { _recursive_lock();
/* increment counter, overflow was already checked */
int newvalue = mvalue + (1 << MUTEX_COUNTER_SHIFT);
if (__likely(__bionic_cmpxchg(mvalue, newvalue, &mutex->value) == 0)) {
ANDROID_MEMBAR_FULL();
return 0;
}
/* argh, value was changed, by another thread which updated the 'state'
* field, so reload and try again.
*/
mvalue = mutex->value;
}
}
static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER; /* increment counter, overflow was already checked */
static __inline__ void /* NOTE: we need to reload the value since its lower bits could have
_recursive_lock(void) * been modified since the exit of _recursive_lock()
{ */
_normal_lock(&__recursive_lock, 0); mutex->value = mutex->value + (1 << MUTEX_COUNTER_SHIFT);
}
static __inline__ void _recursive_unlock();
_recursive_unlock(void) return 0;
{
_normal_unlock(&__recursive_lock, 0);
} }
__LIBC_HIDDEN__ __LIBC_HIDDEN__