Merge "Reserve enough user request stack space in pthread_create."

This commit is contained in:
Yabin Cui 2015-01-02 22:37:55 +00:00 committed by Gerrit Code Review
commit 541b0b187d
5 changed files with 24 additions and 16 deletions

View File

@ -106,25 +106,25 @@ int __init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
return error; return error;
} }
static void* __create_thread_stack(const pthread_attr_t& attr) { static void* __create_thread_stack(size_t stack_size, size_t guard_size) {
// Create a new private anonymous map. // Create a new private anonymous map.
int prot = PROT_READ | PROT_WRITE; int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
void* stack = mmap(NULL, attr.stack_size, prot, flags, -1, 0); void* stack = mmap(NULL, stack_size, prot, flags, -1, 0);
if (stack == MAP_FAILED) { if (stack == MAP_FAILED) {
__libc_format_log(ANDROID_LOG_WARN, __libc_format_log(ANDROID_LOG_WARN,
"libc", "libc",
"pthread_create failed: couldn't allocate %zd-byte stack: %s", "pthread_create failed: couldn't allocate %zd-byte stack: %s",
attr.stack_size, strerror(errno)); stack_size, strerror(errno));
return NULL; return NULL;
} }
// Set the guard region at the end of the stack to PROT_NONE. // Set the guard region at the end of the stack to PROT_NONE.
if (mprotect(stack, attr.guard_size, PROT_NONE) == -1) { if (mprotect(stack, guard_size, PROT_NONE) == -1) {
__libc_format_log(ANDROID_LOG_WARN, "libc", __libc_format_log(ANDROID_LOG_WARN, "libc",
"pthread_create failed: couldn't mprotect PROT_NONE %zd-byte stack guard region: %s", "pthread_create failed: couldn't mprotect PROT_NONE %zd-byte stack guard region: %s",
attr.guard_size, strerror(errno)); guard_size, strerror(errno));
munmap(stack, attr.stack_size); munmap(stack, stack_size);
return NULL; return NULL;
} }
@ -132,30 +132,36 @@ static void* __create_thread_stack(const pthread_attr_t& attr) {
} }
static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp, void** child_stack) { static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp, void** child_stack) {
size_t allocate_stack_size;
uint8_t* stack_top;
if (attr->stack_base == NULL) { if (attr->stack_base == NULL) {
// The caller didn't provide a stack, so allocate one. // The caller didn't provide a stack, so allocate one.
// Make sure the stack size and guard size are multiples of PAGE_SIZE. // Make sure the stack size and guard size are multiples of PAGE_SIZE.
attr->stack_size = BIONIC_ALIGN(attr->stack_size, PAGE_SIZE); allocate_stack_size = BIONIC_ALIGN(attr->stack_size + sizeof(pthread_internal_t), PAGE_SIZE);
attr->guard_size = BIONIC_ALIGN(attr->guard_size, PAGE_SIZE); attr->guard_size = BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);
attr->stack_base = __create_thread_stack(*attr); attr->stack_base = __create_thread_stack(allocate_stack_size, attr->guard_size);
if (attr->stack_base == NULL) { if (attr->stack_base == NULL) {
return EAGAIN; return EAGAIN;
} }
stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + allocate_stack_size;
} else { } else {
// The caller did provide a stack, so remember we're not supposed to free it. // The caller did provide a stack, so remember we're not supposed to free it.
attr->flags |= PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK; attr->flags |= PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK;
allocate_stack_size = 0;
stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + attr->stack_size;
} }
// Thread stack is used for two sections: // Thread stack is used for two sections:
// pthread_internal_t. // pthread_internal_t.
// regular stack, from top to down. // regular stack, from top to down.
uint8_t* stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + attr->stack_size;
stack_top -= sizeof(pthread_internal_t); stack_top -= sizeof(pthread_internal_t);
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top); pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);
// No need to check stack_top alignment. The size of pthread_internal_t is 16-bytes aligned, // No need to check stack_top alignment. The size of pthread_internal_t is 16-bytes aligned,
// and user allocated stack is guaranteed by pthread_attr_setstack. // and user allocated stack is guaranteed by pthread_attr_setstack.
thread->allocated_stack_size = allocate_stack_size;
thread->attr = *attr; thread->attr = *attr;
__init_tls(thread); __init_tls(thread);
@ -243,7 +249,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
// reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker. // reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
pthread_mutex_unlock(&thread->startup_handshake_mutex); pthread_mutex_unlock(&thread->startup_handshake_mutex);
if (!thread->user_allocated_stack()) { if (!thread->user_allocated_stack()) {
munmap(thread->attr.stack_base, thread->attr.stack_size); munmap(thread->attr.stack_base, thread->allocated_stack_size);
} }
__libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", strerror(errno)); __libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", strerror(errno));
return clone_errno; return clone_errno;

View File

@ -89,7 +89,7 @@ void pthread_exit(void* return_value) {
// Keep track of what we need to know about the stack before we lose the pthread_internal_t. // Keep track of what we need to know about the stack before we lose the pthread_internal_t.
void* stack_base = thread->attr.stack_base; void* stack_base = thread->attr.stack_base;
size_t stack_size = thread->attr.stack_size; size_t stack_size = thread->allocated_stack_size;
bool free_stack = false; bool free_stack = false;
pthread_mutex_lock(&g_thread_list_lock); pthread_mutex_lock(&g_thread_list_lock);

View File

@ -86,6 +86,9 @@ struct pthread_internal_t {
pthread_mutex_t startup_handshake_mutex; pthread_mutex_t startup_handshake_mutex;
/* Store real allocated stack size, including thread stack and pthread_internal_t. */
int allocated_stack_size;
void* tls[BIONIC_TLS_SLOTS]; void* tls[BIONIC_TLS_SLOTS];
/* /*

View File

@ -53,9 +53,9 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread, bool free_threa
// For threads using user allocated stack (including the main thread), the pthread_internal_t // For threads using user allocated stack (including the main thread), the pthread_internal_t
// can't be freed since it is on the stack. // can't be freed since it is on the stack.
if (free_thread && !(thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK)) { if (free_thread && !thread->user_allocated_stack()) {
// Use one munmap to free the whole thread stack, including pthread_internal_t. // Use one munmap to free allocated stack size, including thread stack and pthread_internal_t.
munmap(thread->attr.stack_base, thread->attr.stack_size); munmap(thread->attr.stack_base, thread->allocated_stack_size);
} }
} }

View File

@ -644,8 +644,7 @@ TEST(pthread, pthread_attr_setstacksize) {
ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size)); ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
ASSERT_EQ(32*1024U + 1, stack_size); ASSERT_EQ(32*1024U + 1, stack_size);
#if defined(__BIONIC__) #if defined(__BIONIC__)
// Bionic rounds up, which is what POSIX allows. ASSERT_EQ(GetActualStackSize(attributes), 32*1024U + 1);
ASSERT_EQ(GetActualStackSize(attributes), (32 + 4)*1024U);
#else // __BIONIC__ #else // __BIONIC__
// glibc rounds down, in violation of POSIX. They document this in their BUGS section. // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
ASSERT_EQ(GetActualStackSize(attributes), 32*1024U); ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);