* commit '8e8daba18540fc7d5d11b6fb1d85234049086f29': Remove PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK.
This commit is contained in:
commit
023b5d1aa5
@ -85,8 +85,10 @@ void __libc_init_tls(KernelArgumentBlock& args) {
|
||||
// because things like environment variables with global scope live on it.
|
||||
// We also can't free the pthread_internal_t itself, since that lives on the main
|
||||
// thread's stack rather than on the heap.
|
||||
// The main thread has no mmap allocated space for stack or pthread_internal_t.
|
||||
main_thread.mmap_size = 0;
|
||||
pthread_attr_init(&main_thread.attr);
|
||||
main_thread.attr.flags = PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK | PTHREAD_ATTR_FLAG_MAIN_THREAD;
|
||||
main_thread.attr.flags = PTHREAD_ATTR_FLAG_MAIN_THREAD;
|
||||
main_thread.attr.guard_size = 0; // The main thread has no guard page.
|
||||
main_thread.attr.stack_size = 0; // User code should never see this; we'll compute it when asked.
|
||||
// TODO: the main thread's sched_policy and sched_priority need to be queried.
|
||||
|
@ -52,8 +52,9 @@ extern "C" int __isthreaded;
|
||||
|
||||
// This code is used both by each new pthread and the code that initializes the main thread.
|
||||
void __init_tls(pthread_internal_t* thread) {
|
||||
if (thread->user_allocated_stack()) {
|
||||
// We don't know where the user got their stack, so assume the worst and zero the TLS area.
|
||||
if (thread->mmap_size == 0) {
|
||||
// If the TLS area was not allocated by mmap(), it may not have been cleared to zero.
|
||||
// So assume the worst and zero the TLS area.
|
||||
memset(&thread->tls[0], 0, BIONIC_TLS_SLOTS * sizeof(void*));
|
||||
}
|
||||
|
||||
@ -106,62 +107,62 @@ int __init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
|
||||
return error;
|
||||
}
|
||||
|
||||
static void* __create_thread_stack(size_t stack_size, size_t guard_size) {
|
||||
static void* __create_thread_mapped_space(size_t mmap_size, size_t stack_guard_size) {
|
||||
// Create a new private anonymous map.
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
|
||||
void* stack = mmap(NULL, stack_size, prot, flags, -1, 0);
|
||||
if (stack == MAP_FAILED) {
|
||||
void* space = mmap(NULL, mmap_size, prot, flags, -1, 0);
|
||||
if (space == MAP_FAILED) {
|
||||
__libc_format_log(ANDROID_LOG_WARN,
|
||||
"libc",
|
||||
"pthread_create failed: couldn't allocate %zd-byte stack: %s",
|
||||
stack_size, strerror(errno));
|
||||
"pthread_create failed: couldn't allocate %zu-bytes mapped space: %s",
|
||||
mmap_size, strerror(errno));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Set the guard region at the end of the stack to PROT_NONE.
|
||||
if (mprotect(stack, guard_size, PROT_NONE) == -1) {
|
||||
// Stack is at the lower end of mapped space, stack guard region is at the lower end of stack.
|
||||
// Set the stack guard region to PROT_NONE, so we can detect thread stack overflow.
|
||||
if (mprotect(space, stack_guard_size, PROT_NONE) == -1) {
|
||||
__libc_format_log(ANDROID_LOG_WARN, "libc",
|
||||
"pthread_create failed: couldn't mprotect PROT_NONE %zd-byte stack guard region: %s",
|
||||
guard_size, strerror(errno));
|
||||
munmap(stack, stack_size);
|
||||
"pthread_create failed: couldn't mprotect PROT_NONE %zu-byte stack guard region: %s",
|
||||
stack_guard_size, strerror(errno));
|
||||
munmap(space, mmap_size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return stack;
|
||||
return space;
|
||||
}
|
||||
|
||||
static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp, void** child_stack) {
|
||||
size_t allocate_stack_size;
|
||||
size_t mmap_size;
|
||||
uint8_t* stack_top;
|
||||
|
||||
if (attr->stack_base == NULL) {
|
||||
// The caller didn't provide a stack, so allocate one.
|
||||
// Make sure the stack size and guard size are multiples of PAGE_SIZE.
|
||||
allocate_stack_size = BIONIC_ALIGN(attr->stack_size + sizeof(pthread_internal_t), PAGE_SIZE);
|
||||
mmap_size = BIONIC_ALIGN(attr->stack_size + sizeof(pthread_internal_t), PAGE_SIZE);
|
||||
attr->guard_size = BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);
|
||||
attr->stack_base = __create_thread_stack(allocate_stack_size, attr->guard_size);
|
||||
attr->stack_base = __create_thread_mapped_space(mmap_size, attr->guard_size);
|
||||
if (attr->stack_base == NULL) {
|
||||
return EAGAIN;
|
||||
}
|
||||
stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + allocate_stack_size;
|
||||
stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + mmap_size;
|
||||
} else {
|
||||
// The caller did provide a stack, so remember we're not supposed to free it.
|
||||
attr->flags |= PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK;
|
||||
allocate_stack_size = 0;
|
||||
// Remember the mmap size is zero and we don't need to free it.
|
||||
mmap_size = 0;
|
||||
stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + attr->stack_size;
|
||||
}
|
||||
|
||||
// Thread stack is used for two sections:
|
||||
// pthread_internal_t.
|
||||
// regular stack, from top to down.
|
||||
// Mapped space(or user allocated stack) is used for:
|
||||
// thread_internal_t (including tls array)
|
||||
// thread stack (including guard page)
|
||||
stack_top -= sizeof(pthread_internal_t);
|
||||
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);
|
||||
|
||||
// No need to check stack_top alignment. The size of pthread_internal_t is 16-bytes aligned,
|
||||
// and user allocated stack is guaranteed by pthread_attr_setstack.
|
||||
|
||||
thread->allocated_stack_size = allocate_stack_size;
|
||||
thread->mmap_size = mmap_size;
|
||||
thread->attr = *attr;
|
||||
__init_tls(thread);
|
||||
|
||||
@ -248,8 +249,8 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
||||
// be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
|
||||
// reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
|
||||
pthread_mutex_unlock(&thread->startup_handshake_mutex);
|
||||
if (!thread->user_allocated_stack()) {
|
||||
munmap(thread->attr.stack_base, thread->allocated_stack_size);
|
||||
if (thread->mmap_size != 0) {
|
||||
munmap(thread->attr.stack_base, thread->mmap_size);
|
||||
}
|
||||
__libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", strerror(errno));
|
||||
return clone_errno;
|
||||
|
@ -87,30 +87,23 @@ void pthread_exit(void* return_value) {
|
||||
thread->alternate_signal_stack = NULL;
|
||||
}
|
||||
|
||||
// Keep track of what we need to know about the stack before we lose the pthread_internal_t.
|
||||
void* stack_base = thread->attr.stack_base;
|
||||
size_t stack_size = thread->allocated_stack_size;
|
||||
bool free_stack = false;
|
||||
|
||||
bool free_mapped_space = false;
|
||||
pthread_mutex_lock(&g_thread_list_lock);
|
||||
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
|
||||
// The thread is detached, so we can free the pthread_internal_t.
|
||||
// The thread is detached, no one will use pthread_internal_t after pthread_exit.
|
||||
// So we can free mapped space, which includes pthread_internal_t and thread stack.
|
||||
// First make sure that the kernel does not try to clear the tid field
|
||||
// because we'll have freed the memory before the thread actually exits.
|
||||
__set_tid_address(NULL);
|
||||
|
||||
// pthread_internal_t is freed below with stack, not here.
|
||||
_pthread_internal_remove_locked(thread, false);
|
||||
if (!thread->user_allocated_stack()) {
|
||||
free_stack = true;
|
||||
}
|
||||
free_mapped_space = true;
|
||||
}
|
||||
pthread_mutex_unlock(&g_thread_list_lock);
|
||||
|
||||
// Detached threads exit with stack teardown, and everything deallocated here.
|
||||
// Threads that can be joined exit but leave their stacks for the pthread_join caller to clean up.
|
||||
if (free_stack) {
|
||||
// We need to munmap the stack we're running on before calling exit.
|
||||
if (free_mapped_space && thread->mmap_size != 0) {
|
||||
// We need to free mapped space for detached threads when they exit.
|
||||
// That's not something we can do in C.
|
||||
|
||||
// We don't want to take a signal after we've unmapped the stack.
|
||||
@ -119,8 +112,10 @@ void pthread_exit(void* return_value) {
|
||||
sigfillset(&mask);
|
||||
sigprocmask(SIG_SETMASK, &mask, NULL);
|
||||
|
||||
_exit_with_stack_teardown(stack_base, stack_size);
|
||||
_exit_with_stack_teardown(thread->attr.stack_base, thread->mmap_size);
|
||||
} else {
|
||||
// No need to free mapped space. Either there was no space mapped, or it is left for
|
||||
// the pthread_join caller to clean up.
|
||||
__exit(0);
|
||||
}
|
||||
}
|
||||
|
@ -35,11 +35,8 @@
|
||||
/* Has the thread been detached by a pthread_join or pthread_detach call? */
|
||||
#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
|
||||
|
||||
/* Was the thread's stack allocated by the user rather than by us? */
|
||||
#define PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK 0x00000002
|
||||
|
||||
/* Has the thread been joined by another thread? */
|
||||
#define PTHREAD_ATTR_FLAG_JOINED 0x00000004
|
||||
#define PTHREAD_ATTR_FLAG_JOINED 0x00000002
|
||||
|
||||
/* Is this the main thread? */
|
||||
#define PTHREAD_ATTR_FLAG_MAIN_THREAD 0x80000000
|
||||
@ -70,10 +67,6 @@ struct pthread_internal_t {
|
||||
return (*cached_pid != 0);
|
||||
}
|
||||
|
||||
bool user_allocated_stack() {
|
||||
return (attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0;
|
||||
}
|
||||
|
||||
pthread_attr_t attr;
|
||||
|
||||
__pthread_cleanup_t* cleanup_stack;
|
||||
@ -86,8 +79,7 @@ struct pthread_internal_t {
|
||||
|
||||
pthread_mutex_t startup_handshake_mutex;
|
||||
|
||||
/* Store real allocated stack size, including thread stack and pthread_internal_t. */
|
||||
int allocated_stack_size;
|
||||
size_t mmap_size;
|
||||
|
||||
void* tls[BIONIC_TLS_SLOTS];
|
||||
|
||||
|
@ -51,11 +51,9 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread, bool free_threa
|
||||
g_thread_list = thread->next;
|
||||
}
|
||||
|
||||
// For threads using user allocated stack (including the main thread), the pthread_internal_t
|
||||
// can't be freed since it is on the stack.
|
||||
if (free_thread && !thread->user_allocated_stack()) {
|
||||
// Use one munmap to free allocated stack size, including thread stack and pthread_internal_t.
|
||||
munmap(thread->attr.stack_base, thread->allocated_stack_size);
|
||||
if (free_thread && thread->mmap_size != 0) {
|
||||
// Free mapped space, including thread stack and pthread_internal_t.
|
||||
munmap(thread->attr.stack_base, thread->mmap_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user