bionic: pthread: use private futexes by default for mutexes and condvars

Private futexes are a recent kernel addition: faster futexes that cannot be
shared between processes. This patch uses them by default, unless the PROCESS_SHARED
attribute flag is used when creating a mutex and/or conditional variable.

Also introduces pthread_condattr_init/destroy/setpshared/getpshared.

Change-Id: I3a0e2116f467072b046524cb5babc00e41057a53
This commit is contained in:
David 'Digit' Turner
2010-03-10 16:44:08 -08:00
parent 1cfbda826c
commit ba9c6f0989
6 changed files with 612 additions and 299 deletions

View File

@@ -43,6 +43,19 @@
#define FUTEX_WAIT 0 #define FUTEX_WAIT 0
#define FUTEX_WAKE 1 #define FUTEX_WAKE 1
/* Private futexes belong to a single address space and cannot be
* shared among processes. They are however significantly faster to
* operate than standard futexes.
*/
.global __futex_wait_private
.type __futex_wait_private, %function
.global __futex_wake_private
.type __futex_wake_private, %function
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
#if 1 #if 1
.equ kernel_cmpxchg, 0xFFFF0FC0 .equ kernel_cmpxchg, 0xFFFF0FC0
.equ kernel_atomic_base, 0xFFFF0FFF .equ kernel_atomic_base, 0xFFFF0FFF
@@ -185,6 +198,28 @@ __futex_wake:
ldmia sp!, {r4, r7} ldmia sp!, {r4, r7}
bx lr bx lr
__futex_wait_private:
.fnstart
stmdb sp!, {r4, r7}
.save {r4, r7}
mov r3, r2
mov r2, r1
mov r1, #FUTEX_WAIT_PRIVATE
ldr r7, =__NR_futex
swi #0
ldmia sp!, {r4, r7}
bx lr
.fnend
__futex_wake_private:
stmdb sp!, {r4, r7}
mov r2, r1
mov r1, #FUTEX_WAKE_PRIVATE
ldr r7, =__NR_futex
swi #0
ldmia sp!, {r4, r7}
bx lr
#else #else
__futex_wait: __futex_wait:
@@ -200,4 +235,17 @@ __futex_wake:
swi #__NR_futex swi #__NR_futex
bx lr bx lr
__futex_wait_private:
mov r3, r2
mov r2, r1
mov r1, #FUTEX_WAIT_PRIVATE
swi #__NR_futex
bx lr
__futex_wake_private:
mov r2, r1
mov r1, #FUTEX_WAKE_PRIVATE
swi #__NR_futex
bx lr
#endif #endif

View File

@@ -98,3 +98,21 @@ int __futex_wake(volatile void *ftx, int count)
{ {
return futex(ftx, FUTEX_WAKE, count, NULL, NULL, 0); return futex(ftx, FUTEX_WAKE, count, NULL, NULL, 0);
} }
/* Private futexes belong to a single address space and cannot be
* shared among processes. They are however significantly faster to
* operate than standard futexes.
*/
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
int __futex_wait_private(volatile void *ftx, int val, const struct timespec *timeout)
{
return futex(ftx, FUTEX_WAIT_PRIVATE, val, (void *)timeout, NULL, 0);
}
int __futex_wake_private(volatile void *ftx, int count)
{
return futex(ftx, FUTEX_WAKE_PRIVATE, count, NULL, NULL, 0);
}

View File

@@ -60,6 +60,43 @@ int __futex_wake(volatile void *ftx, int count)
return ret; return ret;
} }
/* Private futexes belong to a single address space and cannot be
* shared among processes. They are however significantly faster to
* operate than standard futexes.
*/
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT|FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE|FUTEX_PRIVATE_FLAG)
int __futex_wait_private(volatile void *ftx, int val)
{
int ret;
asm volatile (
"int $0x80;"
: "=a" (ret)
: "0" (FUTEX_SYSCALL),
"b" (ftx),
"c" (FUTEX_WAIT_PRIVATE),
"d" (val),
"S" (0)
);
return ret;
}
int __futex_wake_private(volatile void *ftx, int count)
{
int ret;
asm volatile (
"int $0x80;"
: "=a" (ret)
: "0" (FUTEX_SYSCALL),
"b" (ftx),
"c" (FUTEX_WAKE_PRIVATE),
"d" (count)
);
return ret;
}
int __atomic_cmpxchg(int old, int new, volatile int* addr) { int __atomic_cmpxchg(int old, int new, volatile int* addr) {
int xchg; int xchg;
asm volatile ( asm volatile (

View File

@@ -44,6 +44,9 @@
#include <assert.h> #include <assert.h>
#include <malloc.h> #include <malloc.h>
#define __likely(cond) __builtin_expect(!!(cond), 1)
#define __unlikely(cond) __builtin_expect(!!(cond), 0)
extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg); extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode); extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
extern void _exit_thread(int retCode); extern void _exit_thread(int retCode);
@@ -712,6 +715,9 @@ int pthread_setschedparam(pthread_t thid, int policy,
int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout); int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
int __futex_wake(volatile void *ftx, int count); int __futex_wake(volatile void *ftx, int count);
int __futex_wait_private(volatile void *ftx, int val, const struct timespec *timeout);
int __futex_wake_private(volatile void *ftx, int count);
// mutex lock states // mutex lock states
// //
// 0: unlocked // 0: unlocked
@@ -723,7 +729,8 @@ int __futex_wake(volatile void *ftx, int count);
* bits: name description * bits: name description
* 31-16 tid owner thread's kernel id (recursive and errorcheck only) * 31-16 tid owner thread's kernel id (recursive and errorcheck only)
* 15-14 type mutex type * 15-14 type mutex type
* 13-2 counter counter of recursive mutexes * 13 sharing sharing flag
* 12-2 counter counter of recursive mutexes
* 1-0 state lock state (0, 1 or 2) * 1-0 state lock state (0, 1 or 2)
*/ */
@@ -737,10 +744,21 @@ int __futex_wake(volatile void *ftx, int count);
#define MUTEX_TYPE_ERRORCHECK 0x8000 #define MUTEX_TYPE_ERRORCHECK 0x8000
#define MUTEX_COUNTER_SHIFT 2 #define MUTEX_COUNTER_SHIFT 2
#define MUTEX_COUNTER_MASK 0x3ffc #define MUTEX_COUNTER_MASK 0x1ffc
#define MUTEX_SHARING_MASK 0x2000
#define MUTEX_IS_SHARED(m) (((m)->value & MUTEX_SHARING_MASK) != 0)
/* A mutex attribute stores the following in its fields:
*
* bits: name description
* 0-3 type type of thread (NORMAL/RECURSIVE/ERRORCHECK)
* 4 sharing 1 if shared, or 0 otherwise.
*/
#define MUTEXATTR_TYPE_MASK 0x0007
#define MUTEXATTR_SHARING_MASK 0x0010
int pthread_mutexattr_init(pthread_mutexattr_t *attr) int pthread_mutexattr_init(pthread_mutexattr_t *attr)
{ {
@@ -764,11 +782,13 @@ int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type) int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
{ {
if (attr && *attr >= PTHREAD_MUTEX_NORMAL && if (attr) {
*attr <= PTHREAD_MUTEX_ERRORCHECK ) { int atype = (*attr & MUTEXATTR_TYPE_MASK);
*type = *attr; if (atype >= PTHREAD_MUTEX_NORMAL && atype <= PTHREAD_MUTEX_ERRORCHECK) {
*type = atype;
return 0; return 0;
} }
}
return EINVAL; return EINVAL;
} }
@@ -776,7 +796,7 @@ int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
{ {
if (attr && type >= PTHREAD_MUTEX_NORMAL && if (attr && type >= PTHREAD_MUTEX_NORMAL &&
type <= PTHREAD_MUTEX_ERRORCHECK ) { type <= PTHREAD_MUTEX_ERRORCHECK ) {
*attr = type; *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
return 0; return 0;
} }
return EINVAL; return EINVAL;
@@ -791,54 +811,70 @@ int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
switch (pshared) { switch (pshared) {
case PTHREAD_PROCESS_PRIVATE: case PTHREAD_PROCESS_PRIVATE:
*attr &= ~MUTEXATTR_SHARING_MASK;
return 0;
case PTHREAD_PROCESS_SHARED: case PTHREAD_PROCESS_SHARED:
/* our current implementation of pthread actually supports shared /* our current implementation of pthread actually supports shared
* mutexes but won't cleanup if a process dies with the mutex held. * mutexes but won't cleanup if a process dies with the mutex held.
* Nevertheless, it's better than nothing. Shared mutexes are used * Nevertheless, it's better than nothing. Shared mutexes are used
* by surfaceflinger and audioflinger. * by surfaceflinger and audioflinger.
*/ */
*attr |= MUTEXATTR_SHARING_MASK;
return 0; return 0;
} }
return ENOTSUP; return EINVAL;
} }
int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared) int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
{ {
if (!attr) if (!attr || !pshared)
return EINVAL; return EINVAL;
*pshared = PTHREAD_PROCESS_PRIVATE; *pshared = (*attr & MUTEXATTR_SHARING_MASK) ? PTHREAD_PROCESS_SHARED
: PTHREAD_PROCESS_PRIVATE;
return 0; return 0;
} }
int pthread_mutex_init(pthread_mutex_t *mutex, int pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *attr) const pthread_mutexattr_t *attr)
{ {
if ( mutex ) { int value = 0;
if (attr == NULL) {
mutex->value = MUTEX_TYPE_NORMAL;
return 0;
}
switch ( *attr ) {
case PTHREAD_MUTEX_NORMAL:
mutex->value = MUTEX_TYPE_NORMAL;
return 0;
case PTHREAD_MUTEX_RECURSIVE: if (__unlikely(mutex == NULL))
mutex->value = MUTEX_TYPE_RECURSIVE;
return 0;
case PTHREAD_MUTEX_ERRORCHECK:
mutex->value = MUTEX_TYPE_ERRORCHECK;
return 0;
}
}
return EINVAL; return EINVAL;
if (__likely(attr == NULL)) {
mutex->value = MUTEX_TYPE_NORMAL;
return 0;
}
if ((*attr & MUTEXATTR_SHARING_MASK) != 0)
value |= MUTEX_SHARING_MASK;
switch (*attr & MUTEXATTR_TYPE_MASK) {
case PTHREAD_MUTEX_NORMAL:
value |= MUTEX_TYPE_NORMAL;
break;
case PTHREAD_MUTEX_RECURSIVE:
value |= MUTEX_TYPE_RECURSIVE;
break;
case PTHREAD_MUTEX_ERRORCHECK:
value |= MUTEX_TYPE_ERRORCHECK;
break;
default:
return EINVAL;
}
mutex->value = value;
return 0;
} }
int pthread_mutex_destroy(pthread_mutex_t *mutex) int pthread_mutex_destroy(pthread_mutex_t *mutex)
{ {
if (__unlikely(mutex == NULL))
return EINVAL;
mutex->value = 0xdead10cc; mutex->value = 0xdead10cc;
return 0; return 0;
} }
@@ -855,17 +891,20 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
* Non-recursive mutexes don't use the thread-id or counter fields, and the * Non-recursive mutexes don't use the thread-id or counter fields, and the
* "type" value is zero, so the only bits that will be set are the ones in * "type" value is zero, so the only bits that will be set are the ones in
* the lock state field. * the lock state field.
*
* This routine is used for both shared and private mutexes.
*/ */
static __inline__ void static __inline__ void
_normal_lock(pthread_mutex_t* mutex) _normal_lock(pthread_mutex_t* mutex)
{ {
if (__likely(!MUTEX_IS_SHARED(mutex))) {
/* /*
* The common case is an unlocked mutex, so we begin by trying to * The common case is an unlocked mutex, so we begin by trying to
* change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0 * change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0
* if it made the swap successfully. If the result is nonzero, this * if it made the swap successfully. If the result is nonzero, this
* lock is already held by another thread. * lock is already held by another thread.
*/ */
if (__atomic_cmpxchg(0, 1, &mutex->value ) != 0) { if (__atomic_cmpxchg(0, 1, &mutex->value) != 0) {
/* /*
* We want to go to sleep until the mutex is available, which * We want to go to sleep until the mutex is available, which
* requires promoting it to state 2. We need to swap in the new * requires promoting it to state 2. We need to swap in the new
@@ -883,7 +922,15 @@ _normal_lock(pthread_mutex_t* mutex)
* guarantees a wake-up call. * guarantees a wake-up call.
*/ */
while (__atomic_swap(2, &mutex->value ) != 0) while (__atomic_swap(2, &mutex->value ) != 0)
__futex_wait(&mutex->value, 2, 0); __futex_wait_private(&mutex->value, 2, 0);
}
} else {
/* Same algorithm, with the sharing bit flag set */
const int sharing = MUTEX_SHARING_MASK;
if (__atomic_cmpxchg(sharing|0, sharing|1, &mutex->value) != 0) {
while (__atomic_swap(sharing|2, &mutex->value ) != (sharing|0))
__futex_wait(&mutex->value, sharing|2, 0);
}
} }
} }
@@ -894,6 +941,7 @@ _normal_lock(pthread_mutex_t* mutex)
static __inline__ void static __inline__ void
_normal_unlock(pthread_mutex_t* mutex) _normal_unlock(pthread_mutex_t* mutex)
{ {
if (__likely(!MUTEX_IS_SHARED(mutex))) {
/* /*
* The mutex value will be 1 or (rarely) 2. We use an atomic decrement * The mutex value will be 1 or (rarely) 2. We use an atomic decrement
* to release the lock. __atomic_dec() returns the previous value; * to release the lock. __atomic_dec() returns the previous value;
@@ -937,8 +985,16 @@ _normal_unlock(pthread_mutex_t* mutex)
* Either way we have correct behavior and nobody is orphaned on * Either way we have correct behavior and nobody is orphaned on
* the wait queue. * the wait queue.
*/ */
__futex_wake_private(&mutex->value, 1);
}
} else {
/* Same algorithm with sharing bit flag set */
const int sharing = MUTEX_SHARING_MASK;
if (__atomic_dec(&mutex->value) != (sharing|1)) {
mutex->value = sharing;
__futex_wake(&mutex->value, 1); __futex_wake(&mutex->value, 1);
} }
}
} }
static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
@@ -955,22 +1011,26 @@ _recursive_unlock(void)
_normal_unlock( &__recursive_lock ); _normal_unlock( &__recursive_lock );
} }
#define __likely(cond) __builtin_expect(!!(cond), 1)
#define __unlikely(cond) __builtin_expect(!!(cond), 0)
int pthread_mutex_lock(pthread_mutex_t *mutex) int pthread_mutex_lock(pthread_mutex_t *mutex)
{ {
if (__likely(mutex != NULL)) int mtype, tid, new_lock_type, sharing;
{
int mtype = (mutex->value & MUTEX_TYPE_MASK);
if (__unlikely(mutex == NULL))
return EINVAL;
/* get mutex type */
mtype = (mutex->value & MUTEX_TYPE_MASK);
/* Handle normal mutexes quickly */
if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) { if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
_normal_lock(mutex); _normal_lock(mutex);
return 0;
} }
else
{
int tid = __get_thread()->kernel_id;
/* This is a recursive or error check mutex.
* Check that we don't already own it.
*/
tid = __get_thread()->kernel_id;
if ( tid == MUTEX_OWNER(mutex) ) if ( tid == MUTEX_OWNER(mutex) )
{ {
int oldv, counter; int oldv, counter;
@@ -989,14 +1049,24 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK; counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter; mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
_recursive_unlock(); _recursive_unlock();
return 0;
} }
else
{ /* We don't own it, try to lock it.
/*
* If the new lock is available immediately, we grab it in * If the new lock is available immediately, we grab it in
* the "uncontended" state. * the "uncontended" state.
*/ */
int new_lock_type = 1; new_lock_type = 1;
sharing = (mutex->value & MUTEX_SHARING_MASK);
mtype |= sharing; /* restore sharing bit flag */
/* here, mtype corresponds to the uncontended value for the mutex,
* i.e. something like:
*
* <tid=0><type=?><sharing=?><counter=0><state=0>
*/
for (;;) { for (;;) {
int oldv; int oldv;
@@ -1022,78 +1092,91 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
*/ */
new_lock_type = 2; new_lock_type = 2;
__futex_wait( &mutex->value, oldv, 0 ); if (sharing) {
} __futex_wait(&mutex->value, oldv, 0);
} else {
__futex_wait_private(&mutex->value, oldv, 0);
} }
} }
return 0; return 0;
}
return EINVAL;
} }
int pthread_mutex_unlock(pthread_mutex_t *mutex) int pthread_mutex_unlock(pthread_mutex_t *mutex)
{ {
if (__likely(mutex != NULL)) int mtype, tid, sharing, oldv;
{
int mtype = (mutex->value & MUTEX_TYPE_MASK); if (__unlikely(mutex == NULL))
return EINVAL;
mtype = (mutex->value & MUTEX_TYPE_MASK);
if (__likely(mtype == MUTEX_TYPE_NORMAL)) { if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
_normal_unlock(mutex); _normal_unlock(mutex);
return 0;
} }
else
{
int tid = __get_thread()->kernel_id;
if ( tid == MUTEX_OWNER(mutex) ) tid = __get_thread()->kernel_id;
{ sharing = (mutex->value & MUTEX_SHARING_MASK);
int oldv;
mtype |= sharing; /* restore sharing bit flag */
/* ensure that we own the mutex */
if (__unlikely(tid != MUTEX_OWNER(mutex)))
return EPERM;
/* decrement or unlock it */
_recursive_lock(); _recursive_lock();
oldv = mutex->value; oldv = mutex->value;
if (oldv & MUTEX_COUNTER_MASK) { if (oldv & MUTEX_COUNTER_MASK) {
/* decrement non-0 counter */
mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT); mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
oldv = 0; oldv = 0;
} else { } else {
/* counter was 0, revert to uncontended value */
mutex->value = mtype; mutex->value = mtype;
} }
_recursive_unlock(); _recursive_unlock();
if ((oldv & 3) == 2) /* if the mutex was contended, wake one waiting thread */
__futex_wake( &mutex->value, 1 ); if ((oldv & 3) == 2) {
} if (sharing) {
else { __futex_wake(&mutex->value, 1);
/* trying to unlock a lock we do not own */ } else {
return EPERM; __futex_wake_private(&mutex->value, 1);
} }
} }
return 0; return 0;
}
return EINVAL;
} }
int pthread_mutex_trylock(pthread_mutex_t *mutex) int pthread_mutex_trylock(pthread_mutex_t *mutex)
{ {
if (__likely(mutex != NULL)) int mtype, sharing, tid, oldv;
{
int mtype = (mutex->value & MUTEX_TYPE_MASK);
if (__unlikely(mutex == NULL))
return EINVAL;
mtype = (mutex->value & MUTEX_TYPE_MASK);
/* handle normal mutex first */
if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
{ {
if (__atomic_cmpxchg(0, 1, &mutex->value) == 0) int sharing = (mutex->value & MUTEX_SHARING_MASK);
if (__atomic_cmpxchg(sharing|0, sharing|1, &mutex->value) == 0)
return 0; return 0;
return EBUSY; return EBUSY;
} }
else
{ /* recursive or errorcheck mutex, do we already own it ? */
int tid = __get_thread()->kernel_id; tid = __get_thread()->kernel_id;
int oldv; sharing = mutex->value & MUTEX_SHARING_MASK;
if ( tid == MUTEX_OWNER(mutex) ) if ( tid == MUTEX_OWNER(mutex) )
{ {
int oldv, counter; int counter;
if (mtype == MUTEX_TYPE_ERRORCHECK) { if (mtype == MUTEX_TYPE_ERRORCHECK) {
/* already locked by ourselves */ /* already locked by ourselves */
@@ -1108,6 +1191,9 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
return 0; return 0;
} }
/* we don't own it, so try to get it */
mtype |= sharing;
/* try to lock it */ /* try to lock it */
_recursive_lock(); _recursive_lock();
oldv = mutex->value; oldv = mutex->value;
@@ -1119,9 +1205,6 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
return EBUSY; return EBUSY;
return 0; return 0;
}
}
return EINVAL;
} }
@@ -1164,16 +1247,21 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
clockid_t clock = CLOCK_MONOTONIC; clockid_t clock = CLOCK_MONOTONIC;
struct timespec abstime; struct timespec abstime;
struct timespec ts; struct timespec ts;
int mtype, tid, oldv, sharing, new_lock_type;
/* compute absolute expiration time */ /* compute absolute expiration time */
__timespec_to_relative_msec(&abstime, msecs, clock); __timespec_to_relative_msec(&abstime, msecs, clock);
if (__likely(mutex != NULL)) if (__unlikely(mutex == NULL))
{ return EINVAL;
int mtype = (mutex->value & MUTEX_TYPE_MASK);
/* handle normal mutexes first */
mtype = (mutex->value & MUTEX_TYPE_MASK);
if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
{ {
if (__likely(!MUTEX_IS_SHARED(mutex))) {
/* fast path for unconteded lock */ /* fast path for unconteded lock */
if (__atomic_cmpxchg(0, 1, &mutex->value) == 0) if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
return 0; return 0;
@@ -1183,18 +1271,30 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
if (__timespec_to_absolute(&ts, &abstime, clock) < 0) if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
return EBUSY; return EBUSY;
__futex_wait(&mutex->value, 2, &ts); __futex_wait_private(&mutex->value, 2, &ts);
}
} else /* sharing */ {
const int sharing = MUTEX_SHARING_MASK;
if (__atomic_cmpxchg(sharing|0, sharing|1, &mutex->value) == 0)
return 0;
/* loop while needed */
while (__atomic_swap(sharing|2, &mutex->value) != (sharing|0)) {
if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
return EBUSY;
__futex_wait(&mutex->value, sharing|2, &ts);
}
} }
return 0; return 0;
} }
else
{ /* recursive or errorcheck - do we own the mutex ? */
int tid = __get_thread()->kernel_id; tid = __get_thread()->kernel_id;
int oldv;
if ( tid == MUTEX_OWNER(mutex) ) if ( tid == MUTEX_OWNER(mutex) )
{ {
int oldv, counter; int counter;
if (mtype == MUTEX_TYPE_ERRORCHECK) { if (mtype == MUTEX_TYPE_ERRORCHECK) {
/* already locked by ourselves */ /* already locked by ourselves */
@@ -1208,16 +1308,14 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
_recursive_unlock(); _recursive_unlock();
return 0; return 0;
} }
else
{ /* we don't own it, try to lock it */
/* new_lock_type = 1;
* If the new lock is available immediately, we grab it in sharing = (mutex->value & MUTEX_SHARING_MASK);
* the "uncontended" state.
*/ mtype |= sharing;
int new_lock_type = 1;
for (;;) { for (;;) {
int oldv;
struct timespec ts; struct timespec ts;
_recursive_lock(); _recursive_lock();
@@ -1244,16 +1342,54 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
if (__timespec_to_absolute(&ts, &abstime, clock) < 0) if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
return EBUSY; return EBUSY;
__futex_wait( &mutex->value, oldv, &ts ); if (sharing) {
__futex_wait(&mutex->value, oldv, &ts);
} else {
__futex_wait_private(&mutex->value, oldv, &ts);
}
} }
return 0; return 0;
}
}
}
return EINVAL;
} }
int
pthread_condattr_init(pthread_condattr_t *attr)
{
*attr = PTHREAD_PROCESS_PRIVATE;
return 0;
}
int
pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
{
if (attr == NULL)
return EINVAL;
if (pshared != PTHREAD_PROCESS_PRIVATE &&
pshared != PTHREAD_PROCESS_SHARED)
return EINVAL;
*attr = pshared;
return 0;
}
int
pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
{
if (attr == NULL || pshared == NULL)
return EINVAL;
*pshared = *attr;
return 0;
}
int
pthread_condattr_destroy(pthread_condattr_t *attr)
{
*attr = 0xdeada11d;
return 0;
}
/* XXX *technically* there is a race condition that could allow /* XXX *technically* there is a race condition that could allow
* XXX a signal to be missed. If thread A is preempted in _wait() * XXX a signal to be missed. If thread A is preempted in _wait()
* XXX after unlocking the mutex and before waiting, and if other * XXX after unlocking the mutex and before waiting, and if other
@@ -1262,10 +1398,29 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
* XXX then the signal will be lost. * XXX then the signal will be lost.
*/ */
/* Condition variables:
* bits name description
* 0 sharing 1 if process-shared, 0 if private
* 2-31 counter counter increment on each signal/broadcast
*/
#define COND_SHARING_MASK 0x0001
#define COND_COUNTER_INCREMENT 0x0002
#define COND_COUNTER_MASK (~COND_SHARING_MASK)
#define COND_IS_SHARED(cond) (((cond)->value & COND_SHARING_MASK) != 0)
int pthread_cond_init(pthread_cond_t *cond, int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr) const pthread_condattr_t *attr)
{ {
if (cond == NULL)
return EINVAL;
cond->value = 0; cond->value = 0;
if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
cond->value |= COND_SHARING_MASK;
return 0; return 0;
} }
@@ -1275,17 +1430,53 @@ int pthread_cond_destroy(pthread_cond_t *cond)
return 0; return 0;
} }
/* This function is used by pthread_cond_broadcast and
* pthread_cond_signal to 'pulse' the condition variable.
*
* This means atomically decrementing the counter value
* while leaving the other bits untouched.
*/
static void
__pthread_cond_pulse(pthread_cond_t *cond)
{
long flags = (cond->value & ~COND_COUNTER_MASK);
for (;;) {
long oldval = cond->value;
long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK) | flags;
if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0)
break;
}
}
int pthread_cond_broadcast(pthread_cond_t *cond) int pthread_cond_broadcast(pthread_cond_t *cond)
{ {
__atomic_dec(&cond->value); if (cond == NULL)
return EINVAL;
__pthread_cond_pulse(cond);
if (COND_IS_SHARED(cond)) {
__futex_wake(&cond->value, INT_MAX); __futex_wake(&cond->value, INT_MAX);
} else {
__futex_wake_private(&cond->value, INT_MAX);
}
return 0; return 0;
} }
int pthread_cond_signal(pthread_cond_t *cond) int pthread_cond_signal(pthread_cond_t *cond)
{ {
__atomic_dec(&cond->value); if (cond == NULL)
return EINVAL;
__pthread_cond_pulse(cond);
if (COND_IS_SHARED(cond)) {
__futex_wake(&cond->value, 1); __futex_wake(&cond->value, 1);
} else {
__futex_wake_private(&cond->value, 1);
}
return 0; return 0;
} }
@@ -1302,7 +1493,11 @@ int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
int oldvalue = cond->value; int oldvalue = cond->value;
pthread_mutex_unlock(mutex); pthread_mutex_unlock(mutex);
if (COND_IS_SHARED(cond)) {
status = __futex_wait(&cond->value, oldvalue, reltime); status = __futex_wait(&cond->value, oldvalue, reltime);
} else {
status = __futex_wait_private(&cond->value, oldvalue, reltime);
}
pthread_mutex_lock(mutex); pthread_mutex_lock(mutex);
if (status == (-ETIMEDOUT)) return ETIMEDOUT; if (status == (-ETIMEDOUT)) return ETIMEDOUT;

View File

@@ -80,6 +80,16 @@ Differences between current and Android 2.1:
an unknown domain name. Due to an initialization bug, a random DNS search an unknown domain name. Due to an initialization bug, a random DNS search
list was generated for each thread is net.dns.search is not defined. list was generated for each thread is net.dns.search is not defined.
- <pthread.h>: Add pthread_condattr_init/destroy/setpshared/getpshared.
Also modify mutex and condvar implementation to use private futexes by
default, unless PROCESS_SHARED is specified in the init attributes.
Private futexes are limited to a single address space and can't be shared
among processes. However they are much faster to wake/wait for. This should
speed up mutex and condvar operations.
NOTE: PROCESS_SHARED mutexes are still NOT robust (see note below).
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Differences between Android 2.1 and 2.0.1: Differences between Android 2.1 and 2.0.1:

View File

@@ -165,6 +165,11 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex);
int pthread_mutex_trylock(pthread_mutex_t *mutex); int pthread_mutex_trylock(pthread_mutex_t *mutex);
int pthread_mutex_timedlock(pthread_mutex_t *mutex, struct timespec* ts); int pthread_mutex_timedlock(pthread_mutex_t *mutex, struct timespec* ts);
int pthread_condattr_init(pthread_condattr_t *attr);
int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared);
int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared);
int pthread_condattr_destroy(pthread_condattr_t *attr);
int pthread_cond_init(pthread_cond_t *cond, int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr); const pthread_condattr_t *attr);
int pthread_cond_destroy(pthread_cond_t *cond); int pthread_cond_destroy(pthread_cond_t *cond);