am 39804dcd: Merge "Fix the pthread_setname_np test."
* commit '39804dcde6c1c596285432b28cdb09382ce59663': Fix the pthread_setname_np test.
This commit is contained in:
commit
6af19237b7
@ -29,10 +29,10 @@
|
|||||||
#include <machine/asm.h>
|
#include <machine/asm.h>
|
||||||
#include <sys/linux-syscalls.h>
|
#include <sys/linux-syscalls.h>
|
||||||
|
|
||||||
// int __pthread_clone(int (*fn)(void*), void* child_stack, int flags, void* arg);
|
// int __pthread_clone(void* (*fn)(void*), void* child_stack, int flags, void* arg);
|
||||||
ENTRY(__pthread_clone)
|
ENTRY(__pthread_clone)
|
||||||
# Copy the args onto the new stack.
|
# Copy the args onto the new stack.
|
||||||
stmdb r1!, {r0, r3}
|
stmdb r1!, {r0, r3}
|
||||||
|
|
||||||
# The sys_clone system call only takes two arguments: 'flags' and 'child_stack'.
|
# The sys_clone system call only takes two arguments: 'flags' and 'child_stack'.
|
||||||
# 'child_stack' is already in r1, but we need to move 'flags' into position.
|
# 'child_stack' is already in r1, but we need to move 'flags' into position.
|
||||||
|
@ -36,7 +36,7 @@
|
|||||||
.ent __pthread_clone
|
.ent __pthread_clone
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* int __pthread_clone(int (*fn)(void*), void *child_stack,
|
* int __pthread_clone(void* (*fn)(void*), void *child_stack,
|
||||||
* int flags, void *arg);
|
* int flags, void *arg);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -77,7 +77,7 @@ __pthread_clone:
|
|||||||
lw $a1,4($sp) # arg
|
lw $a1,4($sp) # arg
|
||||||
addu $a2,$sp,16 # tls
|
addu $a2,$sp,16 # tls
|
||||||
|
|
||||||
# void __thread_entry(int (*func)(void*), void *arg, void *tls)
|
# void __thread_entry(void* (*func)(void*), void *arg, void *tls)
|
||||||
la $t9, __thread_entry
|
la $t9, __thread_entry
|
||||||
j $t9
|
j $t9
|
||||||
|
|
||||||
@ -142,4 +142,3 @@ __bionic_clone:
|
|||||||
j $t9
|
j $t9
|
||||||
|
|
||||||
.end __bionic_clone
|
.end __bionic_clone
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include <machine/asm.h>
|
#include <machine/asm.h>
|
||||||
#include <sys/linux-syscalls.h>
|
#include <sys/linux-syscalls.h>
|
||||||
|
|
||||||
// int __pthread_clone(int (*fn)(void*), void* tls, int flags, void* arg);
|
// int __pthread_clone(void* (*fn)(void*), void* tls, int flags, void* arg);
|
||||||
ENTRY(__pthread_clone)
|
ENTRY(__pthread_clone)
|
||||||
pushl %ebx
|
pushl %ebx
|
||||||
pushl %ecx
|
pushl %ecx
|
||||||
|
@ -58,9 +58,8 @@ clone(int (*fn)(void *), void *child_stack, int flags, void* arg, ...)
|
|||||||
int *parent_tidptr = NULL;
|
int *parent_tidptr = NULL;
|
||||||
void *new_tls = NULL;
|
void *new_tls = NULL;
|
||||||
int *child_tidptr = NULL;
|
int *child_tidptr = NULL;
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* extract optional parameters - they are cummulative */
|
/* extract optional parameters - they are cumulative. */
|
||||||
va_start(args, arg);
|
va_start(args, arg);
|
||||||
if (flags & (CLONE_PARENT_SETTID|CLONE_SETTLS|CLONE_CHILD_SETTID)) {
|
if (flags & (CLONE_PARENT_SETTID|CLONE_SETTLS|CLONE_CHILD_SETTID)) {
|
||||||
parent_tidptr = va_arg(args, int*);
|
parent_tidptr = va_arg(args, int*);
|
||||||
@ -73,6 +72,5 @@ clone(int (*fn)(void *), void *child_stack, int flags, void* arg, ...)
|
|||||||
}
|
}
|
||||||
va_end(args);
|
va_end(args);
|
||||||
|
|
||||||
ret = __bionic_clone(flags, child_stack, parent_tidptr, new_tls, child_tidptr, fn, arg);
|
return __bionic_clone(flags, child_stack, parent_tidptr, new_tls, child_tidptr, fn, arg);
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
@ -49,8 +49,8 @@ int fork(void)
|
|||||||
__timer_table_start_stop(0);
|
__timer_table_start_stop(0);
|
||||||
__bionic_atfork_run_parent();
|
__bionic_atfork_run_parent();
|
||||||
} else {
|
} else {
|
||||||
/* Adjusting the kernel id after a fork */
|
// Fix the tid in the pthread_internal_t struct after a fork.
|
||||||
(void)__pthread_settid(pthread_self(), gettid());
|
__pthread_settid(pthread_self(), gettid());
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Newly created process must update cpu accounting.
|
* Newly created process must update cpu accounting.
|
||||||
|
@ -73,14 +73,15 @@ void __libc_init_tls(KernelArgumentBlock& args) {
|
|||||||
unsigned stack_size = 128 * 1024;
|
unsigned stack_size = 128 * 1024;
|
||||||
unsigned stack_bottom = stack_top - stack_size;
|
unsigned stack_bottom = stack_top - stack_size;
|
||||||
|
|
||||||
|
static void* tls[BIONIC_TLS_SLOTS];
|
||||||
static pthread_internal_t thread;
|
static pthread_internal_t thread;
|
||||||
|
thread.tid = gettid();
|
||||||
|
thread.tls = tls;
|
||||||
pthread_attr_init(&thread.attr);
|
pthread_attr_init(&thread.attr);
|
||||||
pthread_attr_setstack(&thread.attr, (void*) stack_bottom, stack_size);
|
pthread_attr_setstack(&thread.attr, (void*) stack_bottom, stack_size);
|
||||||
_init_thread(&thread, gettid(), false);
|
_init_thread(&thread, false);
|
||||||
|
__init_tls(&thread);
|
||||||
static void* tls_area[BIONIC_TLS_SLOTS];
|
tls[TLS_SLOT_BIONIC_PREINIT] = &args;
|
||||||
__init_tls(tls_area, &thread);
|
|
||||||
tls_area[TLS_SLOT_BIONIC_PREINIT] = &args;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __libc_init_common(KernelArgumentBlock& args) {
|
void __libc_init_common(KernelArgumentBlock& args) {
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
* - trying to get the read-lock while there is a writer blocks
|
* - trying to get the read-lock while there is a writer blocks
|
||||||
* - a single thread can acquire the lock multiple times in the same mode
|
* - a single thread can acquire the lock multiple times in the same mode
|
||||||
*
|
*
|
||||||
* - Posix states that behaviour is undefined it a thread tries to acquire
|
* - Posix states that behavior is undefined it a thread tries to acquire
|
||||||
* the lock in two distinct modes (e.g. write after read, or read after write).
|
* the lock in two distinct modes (e.g. write after read, or read after write).
|
||||||
*
|
*
|
||||||
* - This implementation tries to avoid writer starvation by making the readers
|
* - This implementation tries to avoid writer starvation by making the readers
|
||||||
@ -61,12 +61,6 @@
|
|||||||
|
|
||||||
extern pthread_internal_t* __get_thread(void);
|
extern pthread_internal_t* __get_thread(void);
|
||||||
|
|
||||||
/* Return a global kernel ID for the current thread */
|
|
||||||
static int __get_thread_id(void)
|
|
||||||
{
|
|
||||||
return __get_thread()->kernel_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
|
int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
|
||||||
{
|
{
|
||||||
if (!attr)
|
if (!attr)
|
||||||
@ -150,8 +144,6 @@ int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *at
|
|||||||
|
|
||||||
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
|
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (rwlock == NULL)
|
if (rwlock == NULL)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
@ -164,7 +156,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Returns TRUE iff we can acquire a read lock. */
|
/* Returns TRUE iff we can acquire a read lock. */
|
||||||
static __inline__ int read_precondition(pthread_rwlock_t *rwlock, int thread_id)
|
static __inline__ int read_precondition(pthread_rwlock_t* rwlock, int tid)
|
||||||
{
|
{
|
||||||
/* We can't have the lock if any writer is waiting for it (writer bias).
|
/* We can't have the lock if any writer is waiting for it (writer bias).
|
||||||
* This tries to avoid starvation when there are multiple readers racing.
|
* This tries to avoid starvation when there are multiple readers racing.
|
||||||
@ -174,7 +166,7 @@ static __inline__ int read_precondition(pthread_rwlock_t *rwlock, int thread_id
|
|||||||
|
|
||||||
/* We can have the lock if there is no writer, or if we write-own it */
|
/* We can have the lock if there is no writer, or if we write-own it */
|
||||||
/* The second test avoids a self-dead lock in case of buggy code. */
|
/* The second test avoids a self-dead lock in case of buggy code. */
|
||||||
if (rwlock->writerThreadId == 0 || rwlock->writerThreadId == thread_id)
|
if (rwlock->writerThreadId == 0 || rwlock->writerThreadId == tid)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/* Otherwise, we can't have it */
|
/* Otherwise, we can't have it */
|
||||||
@ -182,14 +174,14 @@ static __inline__ int read_precondition(pthread_rwlock_t *rwlock, int thread_id
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* returns TRUE iff we can acquire a write lock. */
|
/* returns TRUE iff we can acquire a write lock. */
|
||||||
static __inline__ int write_precondition(pthread_rwlock_t *rwlock, int thread_id)
|
static __inline__ int write_precondition(pthread_rwlock_t* rwlock, int tid)
|
||||||
{
|
{
|
||||||
/* We can get the lock if nobody has it */
|
/* We can get the lock if nobody has it */
|
||||||
if (rwlock->numLocks == 0)
|
if (rwlock->numLocks == 0)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/* Or if we already own it */
|
/* Or if we already own it */
|
||||||
if (rwlock->writerThreadId == thread_id)
|
if (rwlock->writerThreadId == tid)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
/* Otherwise, not */
|
/* Otherwise, not */
|
||||||
@ -220,7 +212,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
|
|||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
pthread_mutex_lock(&rwlock->lock);
|
pthread_mutex_lock(&rwlock->lock);
|
||||||
if (__unlikely(!read_precondition(rwlock, __get_thread_id())))
|
if (__unlikely(!read_precondition(rwlock, __get_thread()->tid)))
|
||||||
ret = EBUSY;
|
ret = EBUSY;
|
||||||
else
|
else
|
||||||
rwlock->numLocks ++;
|
rwlock->numLocks ++;
|
||||||
@ -231,18 +223,18 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
|
|||||||
|
|
||||||
int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
|
int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
|
||||||
{
|
{
|
||||||
int thread_id, ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (rwlock == NULL)
|
if (rwlock == NULL)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
pthread_mutex_lock(&rwlock->lock);
|
pthread_mutex_lock(&rwlock->lock);
|
||||||
thread_id = __get_thread_id();
|
int tid = __get_thread()->tid;
|
||||||
if (__unlikely(!read_precondition(rwlock, thread_id))) {
|
if (__unlikely(!read_precondition(rwlock, tid))) {
|
||||||
rwlock->pendingReaders += 1;
|
rwlock->pendingReaders += 1;
|
||||||
do {
|
do {
|
||||||
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
|
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
|
||||||
} while (ret == 0 && !read_precondition(rwlock, thread_id));
|
} while (ret == 0 && !read_precondition(rwlock, tid));
|
||||||
rwlock->pendingReaders -= 1;
|
rwlock->pendingReaders -= 1;
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto EXIT;
|
goto EXIT;
|
||||||
@ -261,18 +253,18 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
|
|||||||
|
|
||||||
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
|
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
|
||||||
{
|
{
|
||||||
int thread_id, ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (rwlock == NULL)
|
if (rwlock == NULL)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
pthread_mutex_lock(&rwlock->lock);
|
pthread_mutex_lock(&rwlock->lock);
|
||||||
thread_id = __get_thread_id();
|
int tid = __get_thread()->tid;
|
||||||
if (__unlikely(!write_precondition(rwlock, thread_id))) {
|
if (__unlikely(!write_precondition(rwlock, tid))) {
|
||||||
ret = EBUSY;
|
ret = EBUSY;
|
||||||
} else {
|
} else {
|
||||||
rwlock->numLocks ++;
|
rwlock->numLocks ++;
|
||||||
rwlock->writerThreadId = thread_id;
|
rwlock->writerThreadId = tid;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&rwlock->lock);
|
pthread_mutex_unlock(&rwlock->lock);
|
||||||
return ret;
|
return ret;
|
||||||
@ -280,14 +272,14 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
|
|||||||
|
|
||||||
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
|
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
|
||||||
{
|
{
|
||||||
int thread_id, ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (rwlock == NULL)
|
if (rwlock == NULL)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
pthread_mutex_lock(&rwlock->lock);
|
pthread_mutex_lock(&rwlock->lock);
|
||||||
thread_id = __get_thread_id();
|
int tid = __get_thread()->tid;
|
||||||
if (__unlikely(!write_precondition(rwlock, thread_id))) {
|
if (__unlikely(!write_precondition(rwlock, tid))) {
|
||||||
/* If we can't read yet, wait until the rwlock is unlocked
|
/* If we can't read yet, wait until the rwlock is unlocked
|
||||||
* and try again. Increment pendingReaders to get the
|
* and try again. Increment pendingReaders to get the
|
||||||
* cond broadcast when that happens.
|
* cond broadcast when that happens.
|
||||||
@ -295,13 +287,13 @@ int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *
|
|||||||
rwlock->pendingWriters += 1;
|
rwlock->pendingWriters += 1;
|
||||||
do {
|
do {
|
||||||
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
|
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
|
||||||
} while (ret == 0 && !write_precondition(rwlock, thread_id));
|
} while (ret == 0 && !write_precondition(rwlock, tid));
|
||||||
rwlock->pendingWriters -= 1;
|
rwlock->pendingWriters -= 1;
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto EXIT;
|
goto EXIT;
|
||||||
}
|
}
|
||||||
rwlock->numLocks ++;
|
rwlock->numLocks ++;
|
||||||
rwlock->writerThreadId = thread_id;
|
rwlock->writerThreadId = tid;
|
||||||
EXIT:
|
EXIT:
|
||||||
pthread_mutex_unlock(&rwlock->lock);
|
pthread_mutex_unlock(&rwlock->lock);
|
||||||
return ret;
|
return ret;
|
||||||
@ -332,7 +324,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
|
|||||||
* must be ourselves.
|
* must be ourselves.
|
||||||
*/
|
*/
|
||||||
else {
|
else {
|
||||||
if (rwlock->writerThreadId != __get_thread_id()) {
|
if (rwlock->writerThreadId != __get_thread()->tid) {
|
||||||
ret = EPERM;
|
ret = EPERM;
|
||||||
goto EXIT;
|
goto EXIT;
|
||||||
}
|
}
|
||||||
|
@ -313,9 +313,9 @@ int pthread_getschedparam(pthread_t thid, int * policy,
|
|||||||
int old_errno = errno;
|
int old_errno = errno;
|
||||||
|
|
||||||
pthread_internal_t * thread = (pthread_internal_t *)thid;
|
pthread_internal_t * thread = (pthread_internal_t *)thid;
|
||||||
int err = sched_getparam(thread->kernel_id, param);
|
int err = sched_getparam(thread->tid, param);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
*policy = sched_getscheduler(thread->kernel_id);
|
*policy = sched_getscheduler(thread->tid);
|
||||||
} else {
|
} else {
|
||||||
err = errno;
|
err = errno;
|
||||||
errno = old_errno;
|
errno = old_errno;
|
||||||
@ -330,7 +330,7 @@ int pthread_setschedparam(pthread_t thid, int policy,
|
|||||||
int old_errno = errno;
|
int old_errno = errno;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = sched_setscheduler(thread->kernel_id, policy, param);
|
ret = sched_setscheduler(thread->tid, policy, param);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = errno;
|
ret = errno;
|
||||||
errno = old_errno;
|
errno = old_errno;
|
||||||
@ -342,7 +342,7 @@ int pthread_setschedparam(pthread_t thid, int policy,
|
|||||||
/* a mutex is implemented as a 32-bit integer holding the following fields
|
/* a mutex is implemented as a 32-bit integer holding the following fields
|
||||||
*
|
*
|
||||||
* bits: name description
|
* bits: name description
|
||||||
* 31-16 tid owner thread's kernel id (recursive and errorcheck only)
|
* 31-16 tid owner thread's tid (recursive and errorcheck only)
|
||||||
* 15-14 type mutex type
|
* 15-14 type mutex type
|
||||||
* 13 shared process-shared flag
|
* 13 shared process-shared flag
|
||||||
* 12-2 counter counter of recursive mutexes
|
* 12-2 counter counter of recursive mutexes
|
||||||
@ -452,8 +452,8 @@ int pthread_setschedparam(pthread_t thid, int policy,
|
|||||||
/* Mutex owner field:
|
/* Mutex owner field:
|
||||||
*
|
*
|
||||||
* This is only used for recursive and errorcheck mutexes. It holds the
|
* This is only used for recursive and errorcheck mutexes. It holds the
|
||||||
* kernel TID of the owning thread. Note that this works because the Linux
|
* tid of the owning thread. Note that this works because the Linux
|
||||||
* kernel _only_ uses 16-bit values for thread ids.
|
* kernel _only_ uses 16-bit values for tids.
|
||||||
*
|
*
|
||||||
* More specifically, it will wrap to 10000 when it reaches over 32768 for
|
* More specifically, it will wrap to 10000 when it reaches over 32768 for
|
||||||
* application processes. You can check this by running the following inside
|
* application processes. You can check this by running the following inside
|
||||||
@ -783,7 +783,7 @@ int pthread_mutex_lock_impl(pthread_mutex_t *mutex)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Do we already own this recursive or error-check mutex ? */
|
/* Do we already own this recursive or error-check mutex ? */
|
||||||
tid = __get_thread()->kernel_id;
|
tid = __get_thread()->tid;
|
||||||
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
|
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
|
||||||
return _recursive_increment(mutex, mvalue, mtype);
|
return _recursive_increment(mutex, mvalue, mtype);
|
||||||
|
|
||||||
@ -877,7 +877,7 @@ int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Do we already own this recursive or error-check mutex ? */
|
/* Do we already own this recursive or error-check mutex ? */
|
||||||
tid = __get_thread()->kernel_id;
|
tid = __get_thread()->tid;
|
||||||
if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
|
if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
|
||||||
return EPERM;
|
return EPERM;
|
||||||
|
|
||||||
@ -951,7 +951,7 @@ int pthread_mutex_trylock_impl(pthread_mutex_t *mutex)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Do we already own this recursive or error-check mutex ? */
|
/* Do we already own this recursive or error-check mutex ? */
|
||||||
tid = __get_thread()->kernel_id;
|
tid = __get_thread()->tid;
|
||||||
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
|
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
|
||||||
return _recursive_increment(mutex, mvalue, mtype);
|
return _recursive_increment(mutex, mvalue, mtype);
|
||||||
|
|
||||||
@ -1060,7 +1060,7 @@ int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Do we already own this recursive or error-check mutex ? */
|
/* Do we already own this recursive or error-check mutex ? */
|
||||||
tid = __get_thread()->kernel_id;
|
tid = __get_thread()->tid;
|
||||||
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
|
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
|
||||||
return _recursive_increment(mutex, mvalue, mtype);
|
return _recursive_increment(mutex, mvalue, mtype);
|
||||||
|
|
||||||
@ -1379,7 +1379,7 @@ int pthread_kill(pthread_t tid, int sig)
|
|||||||
int old_errno = errno;
|
int old_errno = errno;
|
||||||
pthread_internal_t * thread = (pthread_internal_t *)tid;
|
pthread_internal_t * thread = (pthread_internal_t *)tid;
|
||||||
|
|
||||||
ret = tgkill(getpid(), thread->kernel_id, sig);
|
ret = tgkill(getpid(), thread->tid, sig);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = errno;
|
ret = errno;
|
||||||
errno = old_errno;
|
errno = old_errno;
|
||||||
@ -1397,7 +1397,7 @@ int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid)
|
|||||||
if (!thread)
|
if (!thread)
|
||||||
return ESRCH;
|
return ESRCH;
|
||||||
|
|
||||||
*clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
|
*clockid = CLOCK_THREAD_CPUTIME_ID | (thread->tid << CLOCK_IDTYPE_BITS);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1474,25 +1474,18 @@ int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) )
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the kernel thread ID for a pthread.
|
pid_t __pthread_gettid(pthread_t thid) {
|
||||||
* This is only defined for implementations where pthread <-> kernel is 1:1, which this is.
|
pthread_internal_t* thread = (pthread_internal_t*) thid;
|
||||||
* Not the same as pthread_getthreadid_np, which is commonly defined to be opaque.
|
return thread->tid;
|
||||||
* Internal, not an NDK API.
|
|
||||||
*/
|
|
||||||
|
|
||||||
pid_t __pthread_gettid(pthread_t thid)
|
|
||||||
{
|
|
||||||
pthread_internal_t* thread = (pthread_internal_t*)thid;
|
|
||||||
return thread->kernel_id;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int __pthread_settid(pthread_t thid, pid_t tid)
|
int __pthread_settid(pthread_t thid, pid_t tid) {
|
||||||
{
|
if (thid == 0) {
|
||||||
if (thid == 0)
|
return EINVAL;
|
||||||
return EINVAL;
|
}
|
||||||
|
|
||||||
pthread_internal_t* thread = (pthread_internal_t*)thid;
|
pthread_internal_t* thread = (pthread_internal_t*) thid;
|
||||||
thread->kernel_id = tid;
|
thread->tid = tid;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
#include "private/ErrnoRestorer.h"
|
#include "private/ErrnoRestorer.h"
|
||||||
#include "private/ScopedPthreadMutexLocker.h"
|
#include "private/ScopedPthreadMutexLocker.h"
|
||||||
|
|
||||||
extern "C" int __pthread_clone(int (*fn)(void*), void* child_stack, int flags, void* arg);
|
extern "C" int __pthread_clone(void* (*fn)(void*), void* child_stack, int flags, void* arg);
|
||||||
|
|
||||||
#ifdef __i386__
|
#ifdef __i386__
|
||||||
#define ATTRIBUTES __attribute__((noinline)) __attribute__((fastcall))
|
#define ATTRIBUTES __attribute__((noinline)) __attribute__((fastcall))
|
||||||
@ -57,25 +57,23 @@ static pthread_mutex_t gPthreadStackCreationLock = PTHREAD_MUTEX_INITIALIZER;
|
|||||||
|
|
||||||
static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
|
static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
void __init_tls(void** tls, void* thread) {
|
void __init_tls(pthread_internal_t* thread) {
|
||||||
((pthread_internal_t*) thread)->tls = tls;
|
|
||||||
|
|
||||||
// Zero-initialize all the slots.
|
// Zero-initialize all the slots.
|
||||||
for (size_t i = 0; i < BIONIC_TLS_SLOTS; ++i) {
|
for (size_t i = 0; i < BIONIC_TLS_SLOTS; ++i) {
|
||||||
tls[i] = NULL;
|
thread->tls[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slot 0 must point to itself. The x86 Linux kernel reads the TLS from %fs:0.
|
// Slot 0 must point to itself. The x86 Linux kernel reads the TLS from %fs:0.
|
||||||
tls[TLS_SLOT_SELF] = tls;
|
thread->tls[TLS_SLOT_SELF] = thread->tls;
|
||||||
tls[TLS_SLOT_THREAD_ID] = thread;
|
thread->tls[TLS_SLOT_THREAD_ID] = thread;
|
||||||
// GCC looks in the TLS for the stack guard on x86, so copy it there from our global.
|
// GCC looks in the TLS for the stack guard on x86, so copy it there from our global.
|
||||||
tls[TLS_SLOT_STACK_GUARD] = (void*) __stack_chk_guard;
|
thread->tls[TLS_SLOT_STACK_GUARD] = (void*) __stack_chk_guard;
|
||||||
|
|
||||||
__set_tls((void*) tls);
|
__set_tls(thread->tls);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This trampoline is called from the assembly _pthread_clone() function.
|
// This trampoline is called from the assembly _pthread_clone() function.
|
||||||
extern "C" void __thread_entry(int (*func)(void*), void *arg, void **tls) {
|
extern "C" void __thread_entry(void* (*func)(void*), void* arg, void** tls) {
|
||||||
// Wait for our creating thread to release us. This lets it have time to
|
// Wait for our creating thread to release us. This lets it have time to
|
||||||
// notify gdb about this thread before we start doing anything.
|
// notify gdb about this thread before we start doing anything.
|
||||||
// This also provides the memory barrier needed to ensure that all memory
|
// This also provides the memory barrier needed to ensure that all memory
|
||||||
@ -85,27 +83,26 @@ extern "C" void __thread_entry(int (*func)(void*), void *arg, void **tls) {
|
|||||||
pthread_mutex_destroy(start_mutex);
|
pthread_mutex_destroy(start_mutex);
|
||||||
|
|
||||||
pthread_internal_t* thread = (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
|
pthread_internal_t* thread = (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
|
||||||
__init_tls(tls, thread);
|
thread->tls = tls;
|
||||||
|
__init_tls(thread);
|
||||||
|
|
||||||
if ((thread->internal_flags & kPthreadInitFailed) != 0) {
|
if ((thread->internal_flags & kPthreadInitFailed) != 0) {
|
||||||
pthread_exit(NULL);
|
pthread_exit(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int result = func(arg);
|
void* result = func(arg);
|
||||||
pthread_exit((void*) result);
|
pthread_exit(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
__LIBC_ABI_PRIVATE__
|
__LIBC_ABI_PRIVATE__
|
||||||
int _init_thread(pthread_internal_t* thread, pid_t kernel_id, bool add_to_thread_list) {
|
int _init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
thread->kernel_id = kernel_id;
|
|
||||||
|
|
||||||
// Set the scheduling policy/priority of the thread.
|
// Set the scheduling policy/priority of the thread.
|
||||||
if (thread->attr.sched_policy != SCHED_NORMAL) {
|
if (thread->attr.sched_policy != SCHED_NORMAL) {
|
||||||
struct sched_param param;
|
struct sched_param param;
|
||||||
param.sched_priority = thread->attr.sched_priority;
|
param.sched_priority = thread->attr.sched_priority;
|
||||||
if (sched_setscheduler(kernel_id, thread->attr.sched_policy, ¶m) == -1) {
|
if (sched_setscheduler(thread->tid, thread->attr.sched_policy, ¶m) == -1) {
|
||||||
// For backwards compatibility reasons, we just warn about failures here.
|
// For backwards compatibility reasons, we just warn about failures here.
|
||||||
// error = errno;
|
// error = errno;
|
||||||
const char* msg = "pthread_create sched_setscheduler call failed: %s\n";
|
const char* msg = "pthread_create sched_setscheduler call failed: %s\n";
|
||||||
@ -198,9 +195,9 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
|||||||
|
|
||||||
tls[TLS_SLOT_THREAD_ID] = thread;
|
tls[TLS_SLOT_THREAD_ID] = thread;
|
||||||
|
|
||||||
int flags = CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED;
|
int flags = CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM;
|
||||||
int tid = __pthread_clone((int(*)(void*))start_routine, tls, flags, arg);
|
|
||||||
|
|
||||||
|
int tid = __pthread_clone(start_routine, tls, flags, arg);
|
||||||
if (tid < 0) {
|
if (tid < 0) {
|
||||||
int clone_errno = errno;
|
int clone_errno = errno;
|
||||||
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) == 0) {
|
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) == 0) {
|
||||||
@ -210,7 +207,9 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
|||||||
return clone_errno;
|
return clone_errno;
|
||||||
}
|
}
|
||||||
|
|
||||||
int init_errno = _init_thread(thread, tid, true);
|
thread->tid = tid;
|
||||||
|
|
||||||
|
int init_errno = _init_thread(thread, true);
|
||||||
if (init_errno != 0) {
|
if (init_errno != 0) {
|
||||||
// Mark the thread detached and let its __thread_entry run to
|
// Mark the thread detached and let its __thread_entry run to
|
||||||
// completion. (It'll just exit immediately, cleaning up its resources.)
|
// completion. (It'll just exit immediately, cleaning up its resources.)
|
||||||
@ -222,7 +221,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
|||||||
// Notify any debuggers about the new thread.
|
// Notify any debuggers about the new thread.
|
||||||
{
|
{
|
||||||
ScopedPthreadMutexLocker debugger_locker(&gDebuggerNotificationLock);
|
ScopedPthreadMutexLocker debugger_locker(&gDebuggerNotificationLock);
|
||||||
_thread_created_hook(tid);
|
_thread_created_hook(thread->tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Publish the pthread_t and let the thread run.
|
// Publish the pthread_t and let the thread run.
|
||||||
|
@ -38,7 +38,7 @@ typedef struct pthread_internal_t
|
|||||||
struct pthread_internal_t* next;
|
struct pthread_internal_t* next;
|
||||||
struct pthread_internal_t* prev;
|
struct pthread_internal_t* prev;
|
||||||
pthread_attr_t attr;
|
pthread_attr_t attr;
|
||||||
pid_t kernel_id;
|
pid_t tid;
|
||||||
bool allocated_on_heap;
|
bool allocated_on_heap;
|
||||||
pthread_cond_t join_cond;
|
pthread_cond_t join_cond;
|
||||||
int join_count;
|
int join_count;
|
||||||
@ -55,7 +55,8 @@ typedef struct pthread_internal_t
|
|||||||
char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE];
|
char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE];
|
||||||
} pthread_internal_t;
|
} pthread_internal_t;
|
||||||
|
|
||||||
int _init_thread(pthread_internal_t* thread, pid_t kernel_id, bool add_to_thread_list);
|
int _init_thread(pthread_internal_t* thread, bool add_to_thread_list);
|
||||||
|
void __init_tls(pthread_internal_t* thread);
|
||||||
void _pthread_internal_add( pthread_internal_t* thread );
|
void _pthread_internal_add( pthread_internal_t* thread );
|
||||||
pthread_internal_t* __get_thread(void);
|
pthread_internal_t* __get_thread(void);
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@
|
|||||||
|
|
||||||
// This value is not exported by kernel headers.
|
// This value is not exported by kernel headers.
|
||||||
#define MAX_TASK_COMM_LEN 16
|
#define MAX_TASK_COMM_LEN 16
|
||||||
#define TASK_COMM_FMT "/proc/self/task/%u/comm"
|
#define TASK_COMM_FMT "/proc/self/task/%d/comm"
|
||||||
|
|
||||||
int pthread_setname_np(pthread_t thread, const char* thread_name) {
|
int pthread_setname_np(pthread_t thread, const char* thread_name) {
|
||||||
ErrnoRestorer errno_restorer;
|
ErrnoRestorer errno_restorer;
|
||||||
@ -56,14 +56,14 @@ int pthread_setname_np(pthread_t thread, const char* thread_name) {
|
|||||||
|
|
||||||
// Changing our own name is an easy special case.
|
// Changing our own name is an easy special case.
|
||||||
if (thread == pthread_self()) {
|
if (thread == pthread_self()) {
|
||||||
return prctl(PR_SET_NAME, (unsigned long)thread_name, 0, 0, 0) ? errno : 0;
|
return prctl(PR_SET_NAME, thread_name) ? errno : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Have to change another thread's name.
|
// Have to change another thread's name.
|
||||||
pthread_internal_t* t = reinterpret_cast<pthread_internal_t*>(thread);
|
pthread_internal_t* t = reinterpret_cast<pthread_internal_t*>(thread);
|
||||||
char comm_name[sizeof(TASK_COMM_FMT) + 8];
|
char comm_name[sizeof(TASK_COMM_FMT) + 8];
|
||||||
snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, (unsigned int) t->kernel_id);
|
snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, t->tid);
|
||||||
int fd = open(comm_name, O_RDWR);
|
int fd = open(comm_name, O_WRONLY);
|
||||||
if (fd == -1) {
|
if (fd == -1) {
|
||||||
return errno;
|
return errno;
|
||||||
}
|
}
|
||||||
|
@ -78,9 +78,6 @@ enum {
|
|||||||
#define GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT 4
|
#define GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT 4
|
||||||
#define BIONIC_TLS_SLOTS 64
|
#define BIONIC_TLS_SLOTS 64
|
||||||
|
|
||||||
/* set the Thread Local Storage, must contain at least BIONIC_TLS_SLOTS pointers */
|
|
||||||
extern void __init_tls(void** tls, void* thread_info);
|
|
||||||
|
|
||||||
/* syscall only, do not call directly */
|
/* syscall only, do not call directly */
|
||||||
extern int __set_tls(void* ptr);
|
extern int __set_tls(void* ptr);
|
||||||
|
|
||||||
|
@ -161,7 +161,7 @@ td_err_e
|
|||||||
td_thr_get_info(td_thrhandle_t const * handle, td_thrinfo_t * info)
|
td_thr_get_info(td_thrhandle_t const * handle, td_thrinfo_t * info)
|
||||||
{
|
{
|
||||||
info->ti_tid = handle->tid;
|
info->ti_tid = handle->tid;
|
||||||
info->ti_lid = handle->tid; // Our pthreads uses kernel ids for tids
|
info->ti_lid = handle->tid;
|
||||||
info->ti_state = TD_THR_SLEEP; /* XXX this needs to be read from /proc/<pid>/task/<tid>.
|
info->ti_state = TD_THR_SLEEP; /* XXX this needs to be read from /proc/<pid>/task/<tid>.
|
||||||
This is only used to see if the thread is a zombie or not */
|
This is only used to see if the thread is a zombie or not */
|
||||||
return TD_OK;
|
return TD_OK;
|
||||||
|
@ -187,7 +187,7 @@ TEST(pthread, pthread_sigmask) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if __BIONIC__
|
#if __BIONIC__
|
||||||
extern "C" int __pthread_clone(int (*fn)(void*), void* child_stack, int flags, void* arg);
|
extern "C" int __pthread_clone(void* (*fn)(void*), void* child_stack, int flags, void* arg);
|
||||||
TEST(pthread, __pthread_clone) {
|
TEST(pthread, __pthread_clone) {
|
||||||
uintptr_t fake_child_stack[16];
|
uintptr_t fake_child_stack[16];
|
||||||
errno = 0;
|
errno = 0;
|
||||||
@ -210,9 +210,20 @@ TEST(pthread, pthread_setname_np__self) {
|
|||||||
|
|
||||||
#if __BIONIC__ // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
|
#if __BIONIC__ // Not all build servers have a new enough glibc? TODO: remove when they're on gprecise.
|
||||||
TEST(pthread, pthread_setname_np__other) {
|
TEST(pthread, pthread_setname_np__other) {
|
||||||
pthread_t t1;
|
// Emulator kernels don't currently support setting the name of other threads.
|
||||||
ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
|
char* filename = NULL;
|
||||||
ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
|
asprintf(&filename, "/proc/self/task/%d/comm", gettid());
|
||||||
|
struct stat sb;
|
||||||
|
bool has_comm = (stat(filename, &sb) != -1);
|
||||||
|
free(filename);
|
||||||
|
|
||||||
|
if (has_comm) {
|
||||||
|
pthread_t t1;
|
||||||
|
ASSERT_EQ(0, pthread_create(&t1, NULL, SleepFn, reinterpret_cast<void*>(5)));
|
||||||
|
ASSERT_EQ(0, pthread_setname_np(t1, "short 2"));
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "skipping test: this kernel doesn't have /proc/self/task/tid/comm files!\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user