Merge "bionic: Do not use <sys/atomics.h> for platform code."
This commit is contained in:
commit
90c4c1e82b
@ -1,95 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008 The Android Open Source Project
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
||||
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
#include <sys/atomics.h>
|
||||
|
||||
#define FUTEX_SYSCALL 240
|
||||
#define FUTEX_WAIT 0
|
||||
#define FUTEX_WAKE 1
|
||||
|
||||
int __futex_wait(volatile void *ftx, int val)
|
||||
{
|
||||
int ret;
|
||||
asm volatile (
|
||||
"int $0x80;"
|
||||
: "=a" (ret)
|
||||
: "0" (FUTEX_SYSCALL),
|
||||
"b" (ftx),
|
||||
"c" (FUTEX_WAIT),
|
||||
"d" (val),
|
||||
"S" (0)
|
||||
);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __futex_wake(volatile void *ftx, int count)
|
||||
{
|
||||
int ret;
|
||||
asm volatile (
|
||||
"int $0x80;"
|
||||
: "=a" (ret)
|
||||
: "0" (FUTEX_SYSCALL),
|
||||
"b" (ftx),
|
||||
"c" (FUTEX_WAKE),
|
||||
"d" (count)
|
||||
);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __atomic_cmpxchg(int old, int new, volatile int* addr) {
|
||||
int xchg;
|
||||
asm volatile (
|
||||
"lock;"
|
||||
"cmpxchg %%ecx, (%%edx);"
|
||||
"setne %%al;"
|
||||
: "=a" (xchg)
|
||||
: "a" (old),
|
||||
"c" (new),
|
||||
"d" (addr)
|
||||
);
|
||||
return xchg;
|
||||
}
|
||||
|
||||
int __atomic_swap(int new, volatile int* addr) {
|
||||
int old;
|
||||
asm volatile (
|
||||
"lock;"
|
||||
"xchg %%ecx, (%%edx);"
|
||||
: "=c" (old)
|
||||
: "c" (new),
|
||||
"d" (addr)
|
||||
);
|
||||
return old;
|
||||
}
|
||||
|
||||
int __atomic_dec(volatile int* addr) {
|
||||
int old;
|
||||
do {
|
||||
old = *addr;
|
||||
} while (atomic_cmpxchg(old, old-1, addr));
|
||||
return old;
|
||||
}
|
@ -692,7 +692,7 @@ FoundIt:
|
||||
goto Exit;
|
||||
}
|
||||
}
|
||||
while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
|
||||
while ( __bionic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
|
||||
(volatile int*)&thread->attr.flags ) != 0 );
|
||||
Exit:
|
||||
pthread_mutex_unlock(&gThreadListLock);
|
||||
@ -926,17 +926,17 @@ _normal_lock(pthread_mutex_t* mutex)
|
||||
int shared = mutex->value & MUTEX_SHARED_MASK;
|
||||
/*
|
||||
* The common case is an unlocked mutex, so we begin by trying to
|
||||
* change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0
|
||||
* change the lock's state from 0 to 1. __bionic_cmpxchg() returns 0
|
||||
* if it made the swap successfully. If the result is nonzero, this
|
||||
* lock is already held by another thread.
|
||||
*/
|
||||
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value ) != 0) {
|
||||
if (__bionic_cmpxchg(shared|0, shared|1, &mutex->value ) != 0) {
|
||||
/*
|
||||
* We want to go to sleep until the mutex is available, which
|
||||
* requires promoting it to state 2. We need to swap in the new
|
||||
* state value and then wait until somebody wakes us up.
|
||||
*
|
||||
* __atomic_swap() returns the previous value. We swap 2 in and
|
||||
* __bionic_swap() returns the previous value. We swap 2 in and
|
||||
* see if we got zero back; if so, we have acquired the lock. If
|
||||
* not, another thread still holds the lock and we wait again.
|
||||
*
|
||||
@ -947,7 +947,7 @@ _normal_lock(pthread_mutex_t* mutex)
|
||||
* that the mutex is in state 2 when we go to sleep on it, which
|
||||
* guarantees a wake-up call.
|
||||
*/
|
||||
while (__atomic_swap(shared|2, &mutex->value ) != (shared|0))
|
||||
while (__bionic_swap(shared|2, &mutex->value ) != (shared|0))
|
||||
__futex_wait_ex(&mutex->value, shared, shared|2, 0);
|
||||
}
|
||||
ANDROID_MEMBAR_FULL();
|
||||
@ -967,10 +967,10 @@ _normal_unlock(pthread_mutex_t* mutex)
|
||||
|
||||
/*
|
||||
* The mutex state will be 1 or (rarely) 2. We use an atomic decrement
|
||||
* to release the lock. __atomic_dec() returns the previous value;
|
||||
* to release the lock. __bionic_atomic_dec() returns the previous value;
|
||||
* if it wasn't 1 we have to do some additional work.
|
||||
*/
|
||||
if (__atomic_dec(&mutex->value) != (shared|1)) {
|
||||
if (__bionic_atomic_dec(&mutex->value) != (shared|1)) {
|
||||
/*
|
||||
* Start by releasing the lock. The decrement changed it from
|
||||
* "contended lock" to "uncontended lock", which means we still
|
||||
@ -1158,7 +1158,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
|
||||
/* Handle common case first */
|
||||
if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
|
||||
{
|
||||
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
|
||||
if (__bionic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
@ -1256,13 +1256,13 @@ int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
|
||||
if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
|
||||
{
|
||||
/* fast path for uncontended lock */
|
||||
if (__atomic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
|
||||
if (__bionic_cmpxchg(shared|0, shared|1, &mutex->value) == 0) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* loop while needed */
|
||||
while (__atomic_swap(shared|2, &mutex->value) != (shared|0)) {
|
||||
while (__bionic_swap(shared|2, &mutex->value) != (shared|0)) {
|
||||
if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
|
||||
return EBUSY;
|
||||
|
||||
@ -1431,7 +1431,7 @@ __pthread_cond_pulse(pthread_cond_t *cond, int counter)
|
||||
long oldval = cond->value;
|
||||
long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
|
||||
| flags;
|
||||
if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0)
|
||||
if (__bionic_cmpxchg(oldval, newval, &cond->value) == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ __sem_dec(volatile unsigned int *pvalue)
|
||||
|
||||
new = SEMCOUNT_DECREMENT(old);
|
||||
}
|
||||
while (__atomic_cmpxchg((int)(old|shared),
|
||||
while (__bionic_cmpxchg((int)(old|shared),
|
||||
(int)(new|shared),
|
||||
(volatile int *)pvalue) != 0);
|
||||
return ret;
|
||||
@ -198,7 +198,7 @@ __sem_trydec(volatile unsigned int *pvalue)
|
||||
|
||||
new = SEMCOUNT_DECREMENT(old);
|
||||
}
|
||||
while (__atomic_cmpxchg((int)(old|shared),
|
||||
while (__bionic_cmpxchg((int)(old|shared),
|
||||
(int)(new|shared),
|
||||
(volatile int *)pvalue) != 0);
|
||||
|
||||
@ -235,7 +235,7 @@ __sem_inc(volatile unsigned int *pvalue)
|
||||
else
|
||||
new = SEMCOUNT_INCREMENT(old);
|
||||
}
|
||||
while ( __atomic_cmpxchg((int)(old|shared),
|
||||
while ( __bionic_cmpxchg((int)(old|shared),
|
||||
(int)(new|shared),
|
||||
(volatile int*)pvalue) != 0);
|
||||
|
||||
|
284
libc/private/bionic_atomic_arm.h
Normal file
284
libc/private/bionic_atomic_arm.h
Normal file
@ -0,0 +1,284 @@
|
||||
/*
|
||||
* Copyright (C) 2011 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef BIONIC_ATOMIC_ARM_H
|
||||
#define BIONIC_ATOMIC_ARM_H
|
||||
|
||||
#include <machine/cpu-features.h>
|
||||
|
||||
/* Some of the harware instructions used below are not available in Thumb-1
|
||||
* mode (they are if you build in ARM or Thumb-2 mode though). To solve this
|
||||
* problem, we're going to use the same technique than libatomics_ops,
|
||||
* which is to temporarily switch to ARM, do the operation, then switch
|
||||
* back to Thumb-1.
|
||||
*
|
||||
* This results in two 'bx' jumps, just like a normal function call, but
|
||||
* everything is kept inlined, avoids loading or computing the function's
|
||||
* address, and prevents a little I-cache trashing too.
|
||||
*
|
||||
* However, it is highly recommended to avoid compiling any C library source
|
||||
* file that use these functions in Thumb-1 mode.
|
||||
*
|
||||
* Define three helper macros to implement this:
|
||||
*/
|
||||
#if defined(__thumb__) && !defined(__thumb2__)
|
||||
# define __ATOMIC_SWITCH_TO_ARM \
|
||||
"adr r3, 5f\n" \
|
||||
"bx r3\n" \
|
||||
".align\n" \
|
||||
".arm\n" \
|
||||
"5:\n"
|
||||
/* note: the leading \n below is intentional */
|
||||
# define __ATOMIC_SWITCH_TO_THUMB \
|
||||
"\n" \
|
||||
"adr r3, 6f\n" \
|
||||
"bx r3\n" \
|
||||
".thumb" \
|
||||
"6:\n"
|
||||
|
||||
# define __ATOMIC_CLOBBERS "r3" /* list of clobbered registers */
|
||||
|
||||
/* Warn the user that ARM mode should really be preferred! */
|
||||
# warning Rebuilding this source file in ARM mode is highly recommended for performance!!
|
||||
|
||||
#else
|
||||
# define __ATOMIC_SWITCH_TO_ARM /* nothing */
|
||||
# define __ATOMIC_SWITCH_TO_THUMB /* nothing */
|
||||
# define __ATOMIC_CLOBBERS /* nothing */
|
||||
#endif
|
||||
|
||||
|
||||
/* Define a full memory barrier, this is only needed if we build the
|
||||
* platform for a multi-core device. For the record, using a 'dmb'
|
||||
* instruction on a Nexus One device can take up to 180 ns even if
|
||||
* it is completely un-necessary on this device.
|
||||
*
|
||||
* NOTE: This is where the platform and NDK headers atomic headers are
|
||||
* going to diverge. With the NDK, we don't know if the generated
|
||||
* code is going to run on a single or multi-core device, so we
|
||||
* need to be cautious.
|
||||
*
|
||||
* Fortunately, we can use the kernel helper function that is
|
||||
* mapped at address 0xffff0fa0 in all user process, and that
|
||||
* provides a device-specific barrier operation.
|
||||
*
|
||||
* I.e. on single-core devices, the helper immediately returns,
|
||||
* on multi-core devices, it uses "dmb" or any other means to
|
||||
* perform a full-memory barrier.
|
||||
*
|
||||
* There are three cases to consider for the platform:
|
||||
*
|
||||
* - multi-core ARMv7-A => use the 'dmb' hardware instruction
|
||||
* - multi-core ARMv6 => use the coprocessor
|
||||
* - single core ARMv5TE/6/7 => do not use any hardware barrier
|
||||
*/
|
||||
#if defined(ANDROID_SMP) && ANDROID_SMP == 1
|
||||
|
||||
/* Sanity check, multi-core is only supported starting from ARMv6 */
|
||||
# if __ARM_ARCH__ < 6
|
||||
# error ANDROID_SMP should not be set to 1 for an ARM architecture less than 6
|
||||
# endif
|
||||
|
||||
# ifdef __ARM_HAVE_DMB
|
||||
/* For ARMv7-A, we can use the 'dmb' instruction directly */
|
||||
__ATOMIC_INLINE__ void
|
||||
__bionic_memory_barrier(void)
|
||||
{
|
||||
/* Note: we always build in ARM or Thumb-2 on ARMv7-A, so don't
|
||||
* bother with __ATOMIC_SWITCH_TO_ARM */
|
||||
__asm__ __volatile__ ( "dmb" : : : "memory" );
|
||||
}
|
||||
# else /* !__ARM_HAVE_DMB */
|
||||
/* Otherwise, i.e. for multi-core ARMv6, we need to use the coprocessor,
|
||||
* which requires the use of a general-purpose register, which is slightly
|
||||
* less efficient.
|
||||
*/
|
||||
__ATOMIC_INLINE__ void
|
||||
__bionic_memory_barrier(void)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
__SWITCH_TO_ARM
|
||||
"mcr p15, 0, %0, c7, c10, 5"
|
||||
__SWITCH_TO_THUMB
|
||||
: : "r" (0) : __ATOMIC_CLOBBERS "memory");
|
||||
}
|
||||
# endif /* !__ARM_HAVE_DMB */
|
||||
#else /* !ANDROID_SMP */
|
||||
__ATOMIC_INLINE__ void
|
||||
__bionic_memory_barrier(void)
|
||||
{
|
||||
/* A simple compiler barrier */
|
||||
__asm__ __volatile__ ( "" : : : "memory" );
|
||||
}
|
||||
#endif /* !ANDROID_SMP */
|
||||
|
||||
/* Compare-and-swap, without any explicit barriers. Note that this functions
|
||||
* returns 0 on success, and 1 on failure. The opposite convention is typically
|
||||
* used on other platforms.
|
||||
*
|
||||
* There are two cases to consider:
|
||||
*
|
||||
* - ARMv6+ => use LDREX/STREX instructions
|
||||
* - < ARMv6 => use kernel helper function mapped at 0xffff0fc0
|
||||
*
|
||||
* LDREX/STREX are only available starting from ARMv6
|
||||
*/
|
||||
#ifdef __ARM_HAVE_LDREX_STREX
|
||||
__ATOMIC_INLINE__ int
|
||||
__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev, status;
|
||||
do {
|
||||
__asm__ __volatile__ (
|
||||
__ATOMIC_SWITCH_TO_ARM
|
||||
"ldrex %0, [%3]\n"
|
||||
"mov %1, #0\n"
|
||||
"teq %0, %4\n"
|
||||
#ifdef __thumb2__
|
||||
"it eq\n"
|
||||
#endif
|
||||
"strexeq %1, %5, [%3]"
|
||||
__ATOMIC_SWITCH_TO_THUMB
|
||||
: "=&r" (prev), "=&r" (status), "+m"(*ptr)
|
||||
: "r" (ptr), "Ir" (old_value), "r" (new_value)
|
||||
: __ATOMIC_CLOBBERS "cc");
|
||||
} while (__builtin_expect(status != 0, 0));
|
||||
return prev != old_value;
|
||||
}
|
||||
# else /* !__ARM_HAVE_LDREX_STREX */
|
||||
|
||||
/* Use the handy kernel helper function mapped at 0xffff0fc0 */
|
||||
typedef int (kernel_cmpxchg)(int32_t, int32_t, volatile int32_t *);
|
||||
|
||||
__ATOMIC_INLINE__ int
|
||||
__kernel_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
|
||||
{
|
||||
/* Note: the kernel function returns 0 on success too */
|
||||
return (*(kernel_cmpxchg *)0xffff0fc0)(old_value, new_value, ptr);
|
||||
}
|
||||
|
||||
__ATOMIC_INLINE__ int
|
||||
__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
|
||||
{
|
||||
return __kernel_cmpxchg(old_value, new_value, ptr);
|
||||
}
|
||||
#endif /* !__ARM_HAVE_LDREX_STREX */
|
||||
|
||||
/* Swap operation, without any explicit barriers.
|
||||
* There are again two similar cases to consider:
|
||||
*
|
||||
* ARMv6+ => use LDREX/STREX
|
||||
* < ARMv6 => use SWP instead.
|
||||
*/
|
||||
#ifdef __ARM_HAVE_LDREX_STREX
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_swap(int32_t new_value, volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev, status;
|
||||
do {
|
||||
__asm__ __volatile__ (
|
||||
__ATOMIC_SWITCH_TO_ARM
|
||||
"ldrex %0, [%3]\n"
|
||||
"strex %1, %4, [%3]"
|
||||
__ATOMIC_SWITCH_TO_THUMB
|
||||
: "=&r" (prev), "=&r" (status), "+m" (*ptr)
|
||||
: "r" (ptr), "r" (new_value)
|
||||
: __ATOMIC_CLOBBERS "cc");
|
||||
} while (__builtin_expect(status != 0, 0));
|
||||
return prev;
|
||||
}
|
||||
#else /* !__ARM_HAVE_LDREX_STREX */
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_swap(int32_t new_value, volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev;
|
||||
/* NOTE: SWP is available in Thumb-1 too */
|
||||
__asm__ __volatile__ ("swp %0, %2, [%3]"
|
||||
: "=&r" (prev), "+m" (*ptr)
|
||||
: "r" (new_value), "r" (ptr)
|
||||
: "cc");
|
||||
return prev;
|
||||
}
|
||||
#endif /* !__ARM_HAVE_LDREX_STREX */
|
||||
|
||||
/* Atomic increment - without any barriers
|
||||
* This returns the old value
|
||||
*/
|
||||
#ifdef __ARM_HAVE_LDREX_STREX
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_atomic_inc(volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev, tmp, status;
|
||||
do {
|
||||
__asm__ __volatile__ (
|
||||
__ATOMIC_SWITCH_TO_ARM
|
||||
"ldrex %0, [%4]\n"
|
||||
"add %1, %0, #1\n"
|
||||
"strex %2, %1, [%4]"
|
||||
__ATOMIC_SWITCH_TO_THUMB
|
||||
: "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
|
||||
: "r" (ptr)
|
||||
: __ATOMIC_CLOBBERS "cc");
|
||||
} while (__builtin_expect(status != 0, 0));
|
||||
return prev;
|
||||
}
|
||||
#else
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_atomic_inc(volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev, status;
|
||||
do {
|
||||
prev = *ptr;
|
||||
status = __kernel_cmpxchg(prev, prev+1, ptr);
|
||||
} while (__builtin_expect(status != 0, 0));
|
||||
return prev;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Atomic decrement - without any barriers
|
||||
* This returns the old value.
|
||||
*/
|
||||
#ifdef __ARM_HAVE_LDREX_STREX
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_atomic_dec(volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev, tmp, status;
|
||||
do {
|
||||
__asm__ __volatile__ (
|
||||
__ATOMIC_SWITCH_TO_ARM
|
||||
"ldrex %0, [%4]\n"
|
||||
"sub %1, %0, #1\n"
|
||||
"strex %2, %1, [%4]"
|
||||
__ATOMIC_SWITCH_TO_THUMB
|
||||
: "=&r" (prev), "=&r" (tmp), "=&r" (status), "+m"(*ptr)
|
||||
: "r" (ptr)
|
||||
: __ATOMIC_CLOBBERS "cc");
|
||||
} while (__builtin_expect(status != 0, 0));
|
||||
return prev;
|
||||
}
|
||||
#else
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_atomic_dec(volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev, status;
|
||||
do {
|
||||
prev = *ptr;
|
||||
status = __kernel_cmpxchg(prev, prev-1, ptr);
|
||||
} while (__builtin_expect(status != 0, 0));
|
||||
return prev;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* SYS_ATOMICS_ARM_H */
|
62
libc/private/bionic_atomic_gcc_builtin.h
Normal file
62
libc/private/bionic_atomic_gcc_builtin.h
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (C) 2011 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef BIONIC_ATOMIC_GCC_BUILTIN_H
|
||||
#define BIONIC_ATOMIC_GCC_BUILTIN_H
|
||||
|
||||
/* This header file is used by default if we don't have optimized atomic
|
||||
* routines for a given platform. See bionic_atomic_arm.h and
|
||||
* bionic_atomic_x86.h for examples.
|
||||
*/
|
||||
|
||||
__ATOMIC_INLINE__ void
|
||||
__bionic_memory_barrier(void)
|
||||
{
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
__ATOMIC_INLINE__ int
|
||||
__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
|
||||
{
|
||||
/* We must return 0 on success */
|
||||
return __sync_bool_compare_and_swap(ptr, old_value, new_value) == 0;
|
||||
}
|
||||
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_swap(int32_t new_value, volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev;
|
||||
do {
|
||||
prev = *ptr;
|
||||
status = __sync_val_compare_and_swap(ptr, prev, new_value);
|
||||
} while (status == 0);
|
||||
return prev;
|
||||
}
|
||||
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_atomic_inc(volatile int32_t* ptr)
|
||||
{
|
||||
/* We must return the old value */
|
||||
return __sync_fetch_and_add(ptr, 1);
|
||||
}
|
||||
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_atomic_dec(volatile int32_t* ptr)
|
||||
{
|
||||
/* We must return the old value */
|
||||
return __sync_fetch_and_add(ptr, -1);
|
||||
}
|
||||
|
||||
#endif /* BIONIC_ATOMIC_GCC_BUILTIN_H */
|
@ -43,62 +43,21 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define the full memory barrier for an SMP system. This is
|
||||
* platform-specific.
|
||||
/* Define __ATOMIC_INLINE__ to control the inlining of all atomics
|
||||
* functions declared here. For a slight performance boost, we want
|
||||
* all of them to be always_inline
|
||||
*/
|
||||
#define __ATOMIC_INLINE__ static __inline__ __attribute__((always_inline))
|
||||
|
||||
#ifdef __arm__
|
||||
#include <machine/cpu-features.h>
|
||||
|
||||
/*
|
||||
* For ARMv6K we need to issue a specific MCR instead of the DMB, since
|
||||
* that wasn't added until v7. For anything older, SMP isn't relevant.
|
||||
* Since we don't have an ARMv6K to test with, we're not going to deal
|
||||
* with that now.
|
||||
*
|
||||
* The DMB instruction is found in the ARM and Thumb2 instruction sets.
|
||||
* This will fail on plain 16-bit Thumb.
|
||||
*/
|
||||
#if defined(__ARM_HAVE_DMB)
|
||||
# define _ANDROID_MEMBAR_FULL_SMP() \
|
||||
do { __asm__ __volatile__ ("dmb" ::: "memory"); } while (0)
|
||||
# include <bionic_atomic_arm.h>
|
||||
#elif defined(__i386__)
|
||||
# include <bionic_atomic_x86.h>
|
||||
#else
|
||||
# define _ANDROID_MEMBAR_FULL_SMP() ARM_SMP_defined_but_no_DMB()
|
||||
# include <bionic_atomic_gcc_builtin.h>
|
||||
#endif
|
||||
|
||||
#elif defined(__i386__) || defined(__x86_64__)
|
||||
/*
|
||||
* For recent x86, we can use the SSE2 mfence instruction.
|
||||
*/
|
||||
# define _ANDROID_MEMBAR_FULL_SMP() \
|
||||
do { __asm__ __volatile__ ("mfence" ::: "memory"); } while (0)
|
||||
|
||||
#else
|
||||
/*
|
||||
* Implementation not defined for this platform. Hopefully we're building
|
||||
* in uniprocessor mode.
|
||||
*/
|
||||
# define _ANDROID_MEMBAR_FULL_SMP() SMP_barrier_not_defined_for_platform()
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Full barrier. On uniprocessors this is just a compiler reorder barrier,
|
||||
* which ensures that the statements appearing above the barrier in the C/C++
|
||||
* code will be issued after the statements appearing below the barrier.
|
||||
*
|
||||
* For SMP this also includes a memory barrier instruction. On an ARM
|
||||
* CPU this means that the current core will flush pending writes, wait
|
||||
* for pending reads to complete, and discard any cached reads that could
|
||||
* be stale. Other CPUs may do less, but the end result is equivalent.
|
||||
*/
|
||||
#if ANDROID_SMP != 0
|
||||
# define ANDROID_MEMBAR_FULL() _ANDROID_MEMBAR_FULL_SMP()
|
||||
#else
|
||||
# define ANDROID_MEMBAR_FULL() \
|
||||
do { __asm__ __volatile__ ("" ::: "memory"); } while (0)
|
||||
#endif
|
||||
#define ANDROID_MEMBAR_FULL __bionic_memory_barrier
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
88
libc/private/bionic_atomic_x86.h
Normal file
88
libc/private/bionic_atomic_x86.h
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright (C) 2011 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef BIONIC_ATOMIC_X86_H
|
||||
#define BIONIC_ATOMIC_X86_H
|
||||
|
||||
/* Define a full memory barrier, this is only needed if we build the
|
||||
* platform for a multi-core device.
|
||||
*/
|
||||
#if defined(ANDROID_SMP) && ANDROID_SMP == 1
|
||||
__ATOMIC_INLINE__ void
|
||||
__bionic_memory_barrier()
|
||||
{
|
||||
__asm__ __volatile__ ( "mfence" : : : "memory" );
|
||||
}
|
||||
#else
|
||||
__ATOMIC_INLINE__ void
|
||||
__bionic_memory_barrier()
|
||||
{
|
||||
/* A simple compiler barrier */
|
||||
__asm__ __volatile__ ( "" : : : "memory" );
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Compare-and-swap, without any explicit barriers. Note that this function
|
||||
* returns 0 on success, and 1 on failure. The opposite convention is typically
|
||||
* used on other platforms.
|
||||
*/
|
||||
__ATOMIC_INLINE__ int
|
||||
__bionic_cmpxchg(int32_t old_value, int32_t new_value, volatile int32_t* ptr)
|
||||
{
|
||||
int32_t prev;
|
||||
__asm__ __volatile__ ("lock; cmpxchgl %1, %2"
|
||||
: "=a" (prev)
|
||||
: "q" (new_value), "m" (*ptr), "0" (old_value)
|
||||
: "memory");
|
||||
return prev != old_value;
|
||||
}
|
||||
|
||||
|
||||
/* Swap, without any explicit barriers */
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_swap(int32_t new_value, volatile int32_t *ptr)
|
||||
{
|
||||
__asm__ __volatile__ ("xchgl %1, %0"
|
||||
: "=r" (new_value)
|
||||
: "m" (*ptr), "0" (new_value)
|
||||
: "memory");
|
||||
return new_value;
|
||||
}
|
||||
|
||||
/* Atomic increment, without explicit barriers */
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_atomic_inc(volatile int32_t *ptr)
|
||||
{
|
||||
int increment = 1;
|
||||
__asm__ __volatile__ ("lock; xaddl %0, %1"
|
||||
: "+r" (increment), "+m" (*ptr)
|
||||
: : "memory");
|
||||
/* increment now holds the old value of *ptr */
|
||||
return increment;
|
||||
}
|
||||
|
||||
/* Atomic decrement, without explicit barriers */
|
||||
__ATOMIC_INLINE__ int32_t
|
||||
__bionic_atomic_dec(volatile int32_t *ptr)
|
||||
{
|
||||
int increment = -1;
|
||||
__asm__ __volatile__ ("lock; xaddl %0, %1"
|
||||
: "+r" (increment), "+m" (*ptr)
|
||||
: : "memory");
|
||||
/* increment now holds the old value of *ptr */
|
||||
return increment;
|
||||
}
|
||||
|
||||
#endif /* BIONIC_ATOMIC_X86_H */
|
@ -20,11 +20,11 @@ extern "C" int __cxa_guard_acquire(int volatile * gv)
|
||||
// 6 untouched, wait and return 0
|
||||
// 1 untouched, return 0
|
||||
retry:
|
||||
if (__atomic_cmpxchg(0, 0x2, gv) == 0) {
|
||||
if (__bionic_cmpxchg(0, 0x2, gv) == 0) {
|
||||
ANDROID_MEMBAR_FULL();
|
||||
return 1;
|
||||
}
|
||||
__atomic_cmpxchg(0x2, 0x6, gv); // Indicate there is a waiter
|
||||
__bionic_cmpxchg(0x2, 0x6, gv); // Indicate there is a waiter
|
||||
__futex_wait(gv, 0x6, NULL);
|
||||
|
||||
if(*gv != 1) // __cxa_guard_abort was called, let every thread try since there is no return code for this condition
|
||||
@ -39,7 +39,7 @@ extern "C" void __cxa_guard_release(int volatile * gv)
|
||||
// 2 -> 1
|
||||
// 6 -> 1, and wake
|
||||
ANDROID_MEMBAR_FULL();
|
||||
if (__atomic_cmpxchg(0x2, 0x1, gv) == 0) {
|
||||
if (__bionic_cmpxchg(0x2, 0x1, gv) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user