Atomic/SMP update, part 3.
Update ARM atomic ops to use LDREX/STREX. Stripped out #if 0 chunk. Insert explicit memory barriers in pthread and semaphore code. For bug 2721865. Change-Id: I0f153b797753a655702d8be41679273d1d5d6ae7
This commit is contained in:
parent
4fdbadde92
commit
fcd00ebbdf
@ -455,6 +455,14 @@ else # !arm
|
||||
endif # x86
|
||||
endif # !arm
|
||||
|
||||
# Define ANDROID_SMP appropriately.
|
||||
ifeq ($(TARGET_CPU_SMP),true)
|
||||
libc_common_cflags += -DANDROID_SMP=1
|
||||
else
|
||||
libc_common_cflags += -DANDROID_SMP=0
|
||||
endif
|
||||
|
||||
|
||||
# Define some common includes
|
||||
# ========================================================
|
||||
libc_common_c_includes := \
|
||||
|
@ -26,6 +26,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
#include <sys/linux-syscalls.h>
|
||||
#include <machine/cpu-features.h>
|
||||
|
||||
.global __atomic_cmpxchg
|
||||
.type __atomic_cmpxchg, %function
|
||||
@ -39,9 +40,73 @@
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
#if 1
|
||||
.equ kernel_cmpxchg, 0xFFFF0FC0
|
||||
.equ kernel_atomic_base, 0xFFFF0FFF
|
||||
#if defined(__ARM_HAVE_LDREX_STREX)
|
||||
/*
|
||||
* ===========================================================================
|
||||
* ARMv6+ implementation
|
||||
* ===========================================================================
|
||||
*/
|
||||
|
||||
/* r0(addr) -> r0(old) */
|
||||
__atomic_dec:
|
||||
.fnstart
|
||||
mov r1, r0 @ copy addr so we don't clobber it
|
||||
1: ldrex r0, [r1] @ load current value into r0
|
||||
sub r2, r0, #1 @ generate new value into r2
|
||||
strex r3, r2, [r1] @ try to store new value; result in r3
|
||||
cmp r3, #0 @ success?
|
||||
bxeq lr @ yes, return
|
||||
b 1b @ no, retry
|
||||
.fnend
|
||||
|
||||
/* r0(addr) -> r0(old) */
|
||||
__atomic_inc:
|
||||
.fnstart
|
||||
mov r1, r0
|
||||
1: ldrex r0, [r1]
|
||||
add r2, r0, #1
|
||||
strex r3, r2, [r1]
|
||||
cmp r3, #0
|
||||
bxeq lr
|
||||
b 1b
|
||||
.fnend
|
||||
|
||||
/* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */
|
||||
__atomic_cmpxchg:
|
||||
.fnstart
|
||||
1: mov ip, #2 @ ip=2 means "new != old"
|
||||
ldrex r3, [r2] @ load current value into r3
|
||||
teq r0, r3 @ new == old?
|
||||
strexeq ip, r1, [r2] @ yes, try store, set ip to 0 or 1
|
||||
teq ip, #1 @ strex failure?
|
||||
beq 1b @ yes, retry
|
||||
mov r0, ip @ return 0 on success, 2 on failure
|
||||
bx lr
|
||||
.fnend
|
||||
|
||||
/* r0(new) r1(addr) -> r0(old) */
|
||||
__atomic_swap:
|
||||
.fnstart
|
||||
1: ldrex r2, [r1]
|
||||
strex r3, r0, [r1]
|
||||
teq r3, #0
|
||||
bne 1b
|
||||
mov r0, r2
|
||||
bx lr
|
||||
.fnend
|
||||
|
||||
#else /*not defined __ARM_HAVE_LDREX_STREX*/
|
||||
/*
|
||||
* ===========================================================================
|
||||
* Pre-ARMv6 implementation
|
||||
* ===========================================================================
|
||||
*/
|
||||
|
||||
/* int __kernel_cmpxchg(int oldval, int newval, int* ptr) */
|
||||
.equ kernel_cmpxchg, 0xFFFF0FC0
|
||||
.equ kernel_atomic_base, 0xFFFF0FFF
|
||||
|
||||
/* r0(addr) -> r0(old) */
|
||||
__atomic_dec:
|
||||
.fnstart
|
||||
.save {r4, lr}
|
||||
@ -59,6 +124,7 @@ __atomic_dec:
|
||||
bx lr
|
||||
.fnend
|
||||
|
||||
/* r0(addr) -> r0(old) */
|
||||
__atomic_inc:
|
||||
.fnstart
|
||||
.save {r4, lr}
|
||||
@ -95,64 +161,16 @@ __atomic_cmpxchg:
|
||||
ldmia sp!, {r4, lr}
|
||||
bx lr
|
||||
.fnend
|
||||
#else
|
||||
#define KUSER_CMPXCHG 0xffffffc0
|
||||
|
||||
/* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */
|
||||
__atomic_cmpxchg:
|
||||
stmdb sp!, {r4, lr}
|
||||
mov r4, r0 /* r4 = save oldvalue */
|
||||
1: add lr, pc, #4
|
||||
mov r0, r4 /* r0 = oldvalue */
|
||||
mov pc, #KUSER_CMPXCHG
|
||||
bcs 2f /* swap was made. we're good, return. */
|
||||
ldr r3, [r2] /* swap not made, see if it's because *ptr!=oldvalue */
|
||||
cmp r3, r4
|
||||
beq 1b
|
||||
2: ldmia sp!, {r4, lr}
|
||||
bx lr
|
||||
|
||||
/* r0(addr) -> r0(old) */
|
||||
__atomic_dec:
|
||||
stmdb sp!, {r4, lr}
|
||||
mov r2, r0 /* address */
|
||||
1: ldr r0, [r2] /* oldvalue */
|
||||
add lr, pc, #4
|
||||
sub r1, r0, #1 /* newvalue = oldvalue - 1 */
|
||||
mov pc, #KUSER_CMPXCHG
|
||||
bcc 1b /* no swap, try again until we get it right */
|
||||
mov r0, ip /* swapped, return the old value */
|
||||
ldmia sp!, {r4, lr}
|
||||
bx lr
|
||||
|
||||
/* r0(addr) -> r0(old) */
|
||||
__atomic_inc:
|
||||
stmdb sp!, {r4, lr}
|
||||
mov r2, r0 /* address */
|
||||
1: ldr r0, [r2] /* oldvalue */
|
||||
add lr, pc, #4
|
||||
add r1, r0, #1 /* newvalue = oldvalue + 1 */
|
||||
mov pc, #KUSER_CMPXCHG
|
||||
bcc 1b /* no swap, try again until we get it right */
|
||||
mov r0, ip /* swapped, return the old value */
|
||||
ldmia sp!, {r4, lr}
|
||||
bx lr
|
||||
#endif
|
||||
|
||||
/* r0(new) r1(addr) -> r0(old) */
|
||||
/* replaced swp instruction with ldrex/strex for ARMv6 & ARMv7 */
|
||||
__atomic_swap:
|
||||
#if defined (__ARM_HAVE_LDREX_STREX)
|
||||
1: ldrex r2, [r1]
|
||||
strex r3, r0, [r1]
|
||||
teq r3, #0
|
||||
bne 1b
|
||||
mov r0, r2
|
||||
mcr p15, 0, r0, c7, c10, 5 /* or, use dmb */
|
||||
#else
|
||||
.fnstart
|
||||
swp r0, r0, [r1]
|
||||
#endif
|
||||
bx lr
|
||||
.fnend
|
||||
|
||||
#endif /*not defined __ARM_HAVE_LDREX_STREX*/
|
||||
|
||||
|
||||
/* __futex_wait(*ftx, val, *timespec) */
|
||||
/* __futex_wake(*ftx, counter) */
|
||||
@ -197,6 +215,8 @@ __futex_wait:
|
||||
.fnend
|
||||
|
||||
__futex_wake:
|
||||
.fnstart
|
||||
.save {r4, r7}
|
||||
stmdb sp!, {r4, r7}
|
||||
mov r2, r1
|
||||
mov r1, #FUTEX_WAKE
|
||||
@ -204,6 +224,7 @@ __futex_wake:
|
||||
swi #0
|
||||
ldmia sp!, {r4, r7}
|
||||
bx lr
|
||||
.fnend
|
||||
|
||||
#else
|
||||
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <assert.h>
|
||||
#include <malloc.h>
|
||||
#include <linux/futex.h>
|
||||
#include <cutils/atomic-inline.h>
|
||||
|
||||
extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
|
||||
extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
|
||||
@ -936,6 +937,7 @@ _normal_lock(pthread_mutex_t* mutex)
|
||||
while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
|
||||
__futex_syscall4(&mutex->value, wait_op, shared|2, 0);
|
||||
}
|
||||
ANDROID_MEMBAR_FULL();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -945,6 +947,8 @@ _normal_lock(pthread_mutex_t* mutex)
|
||||
static __inline__ void
|
||||
_normal_unlock(pthread_mutex_t* mutex)
|
||||
{
|
||||
ANDROID_MEMBAR_FULL();
|
||||
|
||||
/* We need to preserve the shared flag during operations */
|
||||
int shared = mutex->value & MUTEX_SHARED_MASK;
|
||||
|
||||
@ -1144,8 +1148,10 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
|
||||
/* Handle common case first */
|
||||
if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
|
||||
{
|
||||
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0)
|
||||
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return EBUSY;
|
||||
}
|
||||
@ -1241,9 +1247,11 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
|
||||
{
|
||||
int wait_op = shared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE;
|
||||
|
||||
/* fast path for unconteded lock */
|
||||
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0)
|
||||
/* fast path for uncontended lock */
|
||||
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* loop while needed */
|
||||
while (__atomic_swap(shared|2, &mutex->value) != (shared|0)) {
|
||||
@ -1252,6 +1260,7 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
|
||||
|
||||
__futex_syscall4(&mutex->value, wait_op, shared|2, &ts);
|
||||
}
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <sys/time.h>
|
||||
#include <sys/atomics.h>
|
||||
#include <time.h>
|
||||
#include <cutils/atomic-inline.h>
|
||||
|
||||
int sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
{
|
||||
@ -103,6 +104,7 @@ __atomic_dec_if_positive( volatile unsigned int* pvalue )
|
||||
return old;
|
||||
}
|
||||
|
||||
/* lock a semaphore */
|
||||
int sem_wait(sem_t *sem)
|
||||
{
|
||||
if (sem == NULL) {
|
||||
@ -116,6 +118,7 @@ int sem_wait(sem_t *sem)
|
||||
|
||||
__futex_wait(&sem->count, 0, 0);
|
||||
}
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -130,8 +133,10 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
|
||||
|
||||
/* POSIX says we need to try to decrement the semaphore
|
||||
* before checking the timeout value */
|
||||
if (__atomic_dec_if_positive(&sem->count))
|
||||
if (__atomic_dec_if_positive(&sem->count)) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* check it as per Posix */
|
||||
if (abs_timeout == NULL ||
|
||||
@ -169,17 +174,21 @@ int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (__atomic_dec_if_positive(&sem->count))
|
||||
if (__atomic_dec_if_positive(&sem->count)) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* unlock a semaphore */
|
||||
int sem_post(sem_t *sem)
|
||||
{
|
||||
if (sem == NULL)
|
||||
return EINVAL;
|
||||
|
||||
ANDROID_MEMBAR_FULL();
|
||||
if (__atomic_inc((volatile int*)&sem->count) >= 0)
|
||||
__futex_wake(&sem->count, 1);
|
||||
|
||||
@ -194,6 +203,7 @@ int sem_trywait(sem_t *sem)
|
||||
}
|
||||
|
||||
if (__atomic_dec_if_positive(&sem->count) > 0) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
} else {
|
||||
errno = EAGAIN;
|
||||
|
Loading…
Reference in New Issue
Block a user