* commit '6eb0fe2b02bcc7d82ba23df6cfaef0369e7b068b': Switch to g_ for globals.
This commit is contained in:
commit
b2da973fe9
@ -25,13 +25,13 @@
|
|||||||
|
|
||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
|
|
||||||
static int64_t gBytesProcessed;
|
static int64_t g_bytes_processed;
|
||||||
static int64_t gBenchmarkTotalTimeNs;
|
static int64_t g_benchmark_total_time_ns;
|
||||||
static int64_t gBenchmarkStartTimeNs;
|
static int64_t g_benchmark_start_time_ns;
|
||||||
|
|
||||||
typedef std::map<std::string, ::testing::Benchmark*> BenchmarkMap;
|
typedef std::map<std::string, ::testing::Benchmark*> BenchmarkMap;
|
||||||
typedef BenchmarkMap::iterator BenchmarkMapIt;
|
typedef BenchmarkMap::iterator BenchmarkMapIt;
|
||||||
static BenchmarkMap gBenchmarks;
|
static BenchmarkMap g_benchmarks;
|
||||||
|
|
||||||
static int Round(int n) {
|
static int Round(int n) {
|
||||||
int base = 1;
|
int base = 1;
|
||||||
@ -96,7 +96,7 @@ void Benchmark::Register(const char* name, void (*fn)(int), void (*fn_range)(int
|
|||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
gBenchmarks.insert(std::make_pair(name, this));
|
g_benchmarks.insert(std::make_pair(name, this));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Benchmark::Run() {
|
void Benchmark::Run() {
|
||||||
@ -114,16 +114,16 @@ void Benchmark::Run() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Benchmark::RunRepeatedlyWithArg(int iterations, int arg) {
|
void Benchmark::RunRepeatedlyWithArg(int iterations, int arg) {
|
||||||
gBytesProcessed = 0;
|
g_bytes_processed = 0;
|
||||||
gBenchmarkTotalTimeNs = 0;
|
g_benchmark_total_time_ns = 0;
|
||||||
gBenchmarkStartTimeNs = NanoTime();
|
g_benchmark_start_time_ns = NanoTime();
|
||||||
if (fn_ != NULL) {
|
if (fn_ != NULL) {
|
||||||
fn_(iterations);
|
fn_(iterations);
|
||||||
} else {
|
} else {
|
||||||
fn_range_(iterations, arg);
|
fn_range_(iterations, arg);
|
||||||
}
|
}
|
||||||
if (gBenchmarkStartTimeNs != 0) {
|
if (g_benchmark_start_time_ns != 0) {
|
||||||
gBenchmarkTotalTimeNs += NanoTime() - gBenchmarkStartTimeNs;
|
g_benchmark_total_time_ns += NanoTime() - g_benchmark_start_time_ns;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,12 +131,12 @@ void Benchmark::RunWithArg(int arg) {
|
|||||||
// run once in case it's expensive
|
// run once in case it's expensive
|
||||||
int iterations = 1;
|
int iterations = 1;
|
||||||
RunRepeatedlyWithArg(iterations, arg);
|
RunRepeatedlyWithArg(iterations, arg);
|
||||||
while (gBenchmarkTotalTimeNs < 1e9 && iterations < 1e9) {
|
while (g_benchmark_total_time_ns < 1e9 && iterations < 1e9) {
|
||||||
int last = iterations;
|
int last = iterations;
|
||||||
if (gBenchmarkTotalTimeNs/iterations == 0) {
|
if (g_benchmark_total_time_ns/iterations == 0) {
|
||||||
iterations = 1e9;
|
iterations = 1e9;
|
||||||
} else {
|
} else {
|
||||||
iterations = 1e9 / (gBenchmarkTotalTimeNs/iterations);
|
iterations = 1e9 / (g_benchmark_total_time_ns/iterations);
|
||||||
}
|
}
|
||||||
iterations = std::max(last + 1, std::min(iterations + iterations/2, 100*last));
|
iterations = std::max(last + 1, std::min(iterations + iterations/2, 100*last));
|
||||||
iterations = Round(iterations);
|
iterations = Round(iterations);
|
||||||
@ -145,9 +145,9 @@ void Benchmark::RunWithArg(int arg) {
|
|||||||
|
|
||||||
char throughput[100];
|
char throughput[100];
|
||||||
throughput[0] = '\0';
|
throughput[0] = '\0';
|
||||||
if (gBenchmarkTotalTimeNs > 0 && gBytesProcessed > 0) {
|
if (g_benchmark_total_time_ns > 0 && g_bytes_processed > 0) {
|
||||||
double mib_processed = static_cast<double>(gBytesProcessed)/1e6;
|
double mib_processed = static_cast<double>(g_bytes_processed)/1e6;
|
||||||
double seconds = static_cast<double>(gBenchmarkTotalTimeNs)/1e9;
|
double seconds = static_cast<double>(g_benchmark_total_time_ns)/1e9;
|
||||||
snprintf(throughput, sizeof(throughput), " %8.2f MiB/s", mib_processed/seconds);
|
snprintf(throughput, sizeof(throughput), " %8.2f MiB/s", mib_processed/seconds);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,37 +165,37 @@ void Benchmark::RunWithArg(int arg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
printf("%-20s %10d %10" PRId64 "%s\n", full_name,
|
printf("%-20s %10d %10" PRId64 "%s\n", full_name,
|
||||||
iterations, gBenchmarkTotalTimeNs/iterations, throughput);
|
iterations, g_benchmark_total_time_ns/iterations, throughput);
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace testing
|
} // namespace testing
|
||||||
|
|
||||||
void SetBenchmarkBytesProcessed(int64_t x) {
|
void SetBenchmarkBytesProcessed(int64_t x) {
|
||||||
gBytesProcessed = x;
|
g_bytes_processed = x;
|
||||||
}
|
}
|
||||||
|
|
||||||
void StopBenchmarkTiming() {
|
void StopBenchmarkTiming() {
|
||||||
if (gBenchmarkStartTimeNs != 0) {
|
if (g_benchmark_start_time_ns != 0) {
|
||||||
gBenchmarkTotalTimeNs += NanoTime() - gBenchmarkStartTimeNs;
|
g_benchmark_total_time_ns += NanoTime() - g_benchmark_start_time_ns;
|
||||||
}
|
}
|
||||||
gBenchmarkStartTimeNs = 0;
|
g_benchmark_start_time_ns = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void StartBenchmarkTiming() {
|
void StartBenchmarkTiming() {
|
||||||
if (gBenchmarkStartTimeNs == 0) {
|
if (g_benchmark_start_time_ns == 0) {
|
||||||
gBenchmarkStartTimeNs = NanoTime();
|
g_benchmark_start_time_ns = NanoTime();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
if (gBenchmarks.empty()) {
|
if (g_benchmarks.empty()) {
|
||||||
fprintf(stderr, "No benchmarks registered!\n");
|
fprintf(stderr, "No benchmarks registered!\n");
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool need_header = true;
|
bool need_header = true;
|
||||||
for (BenchmarkMapIt it = gBenchmarks.begin(); it != gBenchmarks.end(); ++it) {
|
for (BenchmarkMapIt it = g_benchmarks.begin(); it != g_benchmarks.end(); ++it) {
|
||||||
::testing::Benchmark* b = it->second;
|
::testing::Benchmark* b = it->second;
|
||||||
if (b->ShouldRun(argc, argv)) {
|
if (b->ShouldRun(argc, argv)) {
|
||||||
if (need_header) {
|
if (need_header) {
|
||||||
@ -210,7 +210,7 @@ int main(int argc, char* argv[]) {
|
|||||||
if (need_header) {
|
if (need_header) {
|
||||||
fprintf(stderr, "No matching benchmarks!\n");
|
fprintf(stderr, "No matching benchmarks!\n");
|
||||||
fprintf(stderr, "Available benchmarks:\n");
|
fprintf(stderr, "Available benchmarks:\n");
|
||||||
for (BenchmarkMapIt it = gBenchmarks.begin(); it != gBenchmarks.end(); ++it) {
|
for (BenchmarkMapIt it = g_benchmarks.begin(); it != g_benchmarks.end(); ++it) {
|
||||||
fprintf(stderr, " %s\n", it->second->Name());
|
fprintf(stderr, " %s\n", it->second->Name());
|
||||||
}
|
}
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
|
@ -50,30 +50,30 @@ typedef struct _Unwind_Context __unwind_context;
|
|||||||
typedef _Unwind_Context __unwind_context;
|
typedef _Unwind_Context __unwind_context;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static mapinfo_t* gMapInfo = NULL;
|
static mapinfo_t* g_map_info = NULL;
|
||||||
static void* gDemangler;
|
static void* g_demangler;
|
||||||
typedef char* (*DemanglerFn)(const char*, char*, size_t*, int*);
|
typedef char* (*DemanglerFn)(const char*, char*, size_t*, int*);
|
||||||
static DemanglerFn gDemanglerFn = NULL;
|
static DemanglerFn g_demangler_fn = NULL;
|
||||||
|
|
||||||
__LIBC_HIDDEN__ void backtrace_startup() {
|
__LIBC_HIDDEN__ void backtrace_startup() {
|
||||||
gMapInfo = mapinfo_create(getpid());
|
g_map_info = mapinfo_create(getpid());
|
||||||
gDemangler = dlopen("libgccdemangle.so", RTLD_NOW);
|
g_demangler = dlopen("libgccdemangle.so", RTLD_NOW);
|
||||||
if (gDemangler != NULL) {
|
if (g_demangler != NULL) {
|
||||||
void* sym = dlsym(gDemangler, "__cxa_demangle");
|
void* sym = dlsym(g_demangler, "__cxa_demangle");
|
||||||
gDemanglerFn = reinterpret_cast<DemanglerFn>(sym);
|
g_demangler_fn = reinterpret_cast<DemanglerFn>(sym);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__LIBC_HIDDEN__ void backtrace_shutdown() {
|
__LIBC_HIDDEN__ void backtrace_shutdown() {
|
||||||
mapinfo_destroy(gMapInfo);
|
mapinfo_destroy(g_map_info);
|
||||||
dlclose(gDemangler);
|
dlclose(g_demangler);
|
||||||
}
|
}
|
||||||
|
|
||||||
static char* demangle(const char* symbol) {
|
static char* demangle(const char* symbol) {
|
||||||
if (gDemanglerFn == NULL) {
|
if (g_demangler_fn == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
return (*gDemanglerFn)(symbol, NULL, NULL, NULL);
|
return (*g_demangler_fn)(symbol, NULL, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct stack_crawl_state_t {
|
struct stack_crawl_state_t {
|
||||||
@ -147,7 +147,7 @@ __LIBC_HIDDEN__ void log_backtrace(uintptr_t* frames, size_t frame_count) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t rel_pc;
|
uintptr_t rel_pc;
|
||||||
const mapinfo_t* mi = (gMapInfo != NULL) ? mapinfo_find(gMapInfo, frames[i], &rel_pc) : NULL;
|
const mapinfo_t* mi = (g_map_info != NULL) ? mapinfo_find(g_map_info, frames[i], &rel_pc) : NULL;
|
||||||
const char* soname = (mi != NULL) ? mi->name : info.dli_fname;
|
const char* soname = (mi != NULL) ? mi->name : info.dli_fname;
|
||||||
if (soname == NULL) {
|
if (soname == NULL) {
|
||||||
soname = "<unknown>";
|
soname = "<unknown>";
|
||||||
|
@ -77,7 +77,7 @@ static size_t get_main_thread_stack_size() {
|
|||||||
* apply to linker-private copies and will not be visible from libc later on.
|
* apply to linker-private copies and will not be visible from libc later on.
|
||||||
*
|
*
|
||||||
* Note: this function creates a pthread_internal_t for the initial thread and
|
* Note: this function creates a pthread_internal_t for the initial thread and
|
||||||
* stores the pointer in TLS, but does not add it to pthread's gThreadList. This
|
* stores the pointer in TLS, but does not add it to pthread's thread list. This
|
||||||
* has to be done later from libc itself (see __libc_init_common).
|
* has to be done later from libc itself (see __libc_init_common).
|
||||||
*
|
*
|
||||||
* This function also stores a pointer to the kernel argument block in a TLS slot to be
|
* This function also stores a pointer to the kernel argument block in a TLS slot to be
|
||||||
|
@ -45,7 +45,7 @@
|
|||||||
#include <time.h>
|
#include <time.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
static pthread_mutex_t gAbortMsgLock = PTHREAD_MUTEX_INITIALIZER;
|
static pthread_mutex_t g_abort_msg_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
__LIBC_HIDDEN__ abort_msg_t** __abort_message_ptr; // Accessible to __libc_init_common.
|
__LIBC_HIDDEN__ abort_msg_t** __abort_message_ptr; // Accessible to __libc_init_common.
|
||||||
|
|
||||||
@ -643,7 +643,7 @@ void __libc_fatal(const char* format, ...) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void __android_set_abort_message(const char* msg) {
|
void __android_set_abort_message(const char* msg) {
|
||||||
ScopedPthreadMutexLocker locker(&gAbortMsgLock);
|
ScopedPthreadMutexLocker locker(&g_abort_msg_lock);
|
||||||
|
|
||||||
if (__abort_message_ptr == NULL) {
|
if (__abort_message_ptr == NULL) {
|
||||||
// We must have crashed _very_ early.
|
// We must have crashed _very_ early.
|
||||||
|
@ -36,43 +36,43 @@ struct __locale_t {
|
|||||||
// Because we only support one locale, these are just tokens with no data.
|
// Because we only support one locale, these are just tokens with no data.
|
||||||
};
|
};
|
||||||
|
|
||||||
static pthread_once_t gLocaleOnce = PTHREAD_ONCE_INIT;
|
static pthread_once_t g_locale_once = PTHREAD_ONCE_INIT;
|
||||||
static lconv gLocale;
|
static lconv g_locale;
|
||||||
|
|
||||||
// We don't use pthread_once for this so that we know when the resource (a TLS slot) will be taken.
|
// We don't use pthread_once for this so that we know when the resource (a TLS slot) will be taken.
|
||||||
static pthread_key_t gUselocaleKey;
|
static pthread_key_t g_uselocale_key;
|
||||||
__attribute__((constructor)) static void __bionic_tls_uselocale_key_init() {
|
__attribute__((constructor)) static void __bionic_tls_uselocale_key_init() {
|
||||||
pthread_key_create(&gUselocaleKey, NULL);
|
pthread_key_create(&g_uselocale_key, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __locale_init() {
|
static void __locale_init() {
|
||||||
gLocale.decimal_point = const_cast<char*>(".");
|
g_locale.decimal_point = const_cast<char*>(".");
|
||||||
|
|
||||||
char* not_available = const_cast<char*>("");
|
char* not_available = const_cast<char*>("");
|
||||||
gLocale.thousands_sep = not_available;
|
g_locale.thousands_sep = not_available;
|
||||||
gLocale.grouping = not_available;
|
g_locale.grouping = not_available;
|
||||||
gLocale.int_curr_symbol = not_available;
|
g_locale.int_curr_symbol = not_available;
|
||||||
gLocale.currency_symbol = not_available;
|
g_locale.currency_symbol = not_available;
|
||||||
gLocale.mon_decimal_point = not_available;
|
g_locale.mon_decimal_point = not_available;
|
||||||
gLocale.mon_thousands_sep = not_available;
|
g_locale.mon_thousands_sep = not_available;
|
||||||
gLocale.mon_grouping = not_available;
|
g_locale.mon_grouping = not_available;
|
||||||
gLocale.positive_sign = not_available;
|
g_locale.positive_sign = not_available;
|
||||||
gLocale.negative_sign = not_available;
|
g_locale.negative_sign = not_available;
|
||||||
|
|
||||||
gLocale.int_frac_digits = CHAR_MAX;
|
g_locale.int_frac_digits = CHAR_MAX;
|
||||||
gLocale.frac_digits = CHAR_MAX;
|
g_locale.frac_digits = CHAR_MAX;
|
||||||
gLocale.p_cs_precedes = CHAR_MAX;
|
g_locale.p_cs_precedes = CHAR_MAX;
|
||||||
gLocale.p_sep_by_space = CHAR_MAX;
|
g_locale.p_sep_by_space = CHAR_MAX;
|
||||||
gLocale.n_cs_precedes = CHAR_MAX;
|
g_locale.n_cs_precedes = CHAR_MAX;
|
||||||
gLocale.n_sep_by_space = CHAR_MAX;
|
g_locale.n_sep_by_space = CHAR_MAX;
|
||||||
gLocale.p_sign_posn = CHAR_MAX;
|
g_locale.p_sign_posn = CHAR_MAX;
|
||||||
gLocale.n_sign_posn = CHAR_MAX;
|
g_locale.n_sign_posn = CHAR_MAX;
|
||||||
gLocale.int_p_cs_precedes = CHAR_MAX;
|
g_locale.int_p_cs_precedes = CHAR_MAX;
|
||||||
gLocale.int_p_sep_by_space = CHAR_MAX;
|
g_locale.int_p_sep_by_space = CHAR_MAX;
|
||||||
gLocale.int_n_cs_precedes = CHAR_MAX;
|
g_locale.int_n_cs_precedes = CHAR_MAX;
|
||||||
gLocale.int_n_sep_by_space = CHAR_MAX;
|
g_locale.int_n_sep_by_space = CHAR_MAX;
|
||||||
gLocale.int_p_sign_posn = CHAR_MAX;
|
g_locale.int_p_sign_posn = CHAR_MAX;
|
||||||
gLocale.int_n_sign_posn = CHAR_MAX;
|
g_locale.int_n_sign_posn = CHAR_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __bionic_current_locale_is_utf8 = false;
|
static bool __bionic_current_locale_is_utf8 = false;
|
||||||
@ -88,8 +88,8 @@ static locale_t __new_locale() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
lconv* localeconv() {
|
lconv* localeconv() {
|
||||||
pthread_once(&gLocaleOnce, __locale_init);
|
pthread_once(&g_locale_once, __locale_init);
|
||||||
return &gLocale;
|
return &g_locale;
|
||||||
}
|
}
|
||||||
|
|
||||||
locale_t duplocale(locale_t l) {
|
locale_t duplocale(locale_t l) {
|
||||||
@ -140,7 +140,7 @@ char* setlocale(int category, const char* locale_name) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
locale_t uselocale(locale_t new_locale) {
|
locale_t uselocale(locale_t new_locale) {
|
||||||
locale_t old_locale = static_cast<locale_t>(pthread_getspecific(gUselocaleKey));
|
locale_t old_locale = static_cast<locale_t>(pthread_getspecific(g_uselocale_key));
|
||||||
|
|
||||||
// If this is the first call to uselocale(3) on this thread, we return LC_GLOBAL_LOCALE.
|
// If this is the first call to uselocale(3) on this thread, we return LC_GLOBAL_LOCALE.
|
||||||
if (old_locale == NULL) {
|
if (old_locale == NULL) {
|
||||||
@ -148,7 +148,7 @@ locale_t uselocale(locale_t new_locale) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (new_locale != NULL) {
|
if (new_locale != NULL) {
|
||||||
pthread_setspecific(gUselocaleKey, new_locale);
|
pthread_setspecific(g_uselocale_key, new_locale);
|
||||||
}
|
}
|
||||||
|
|
||||||
return old_locale;
|
return old_locale;
|
||||||
|
@ -53,8 +53,8 @@
|
|||||||
#include "private/ScopedPthreadMutexLocker.h"
|
#include "private/ScopedPthreadMutexLocker.h"
|
||||||
|
|
||||||
/* libc.debug.malloc.backlog */
|
/* libc.debug.malloc.backlog */
|
||||||
extern unsigned int gMallocDebugBacklog;
|
extern unsigned int g_malloc_debug_backlog;
|
||||||
extern int gMallocDebugLevel;
|
extern int g_malloc_debug_level;
|
||||||
|
|
||||||
#define MAX_BACKTRACE_DEPTH 16
|
#define MAX_BACKTRACE_DEPTH 16
|
||||||
#define ALLOCATION_TAG 0x1ee7d00d
|
#define ALLOCATION_TAG 0x1ee7d00d
|
||||||
@ -108,8 +108,10 @@ static inline const hdr_t* const_meta(const void* user) {
|
|||||||
return reinterpret_cast<const hdr_t*>(user) - 1;
|
return reinterpret_cast<const hdr_t*>(user) - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: introduce a struct for this global state.
|
||||||
static unsigned gAllocatedBlockCount;
|
// There are basically two lists here, the regular list and the backlog list.
|
||||||
|
// We should be able to remove the duplication.
|
||||||
|
static unsigned g_allocated_block_count;
|
||||||
static hdr_t* tail;
|
static hdr_t* tail;
|
||||||
static hdr_t* head;
|
static hdr_t* head;
|
||||||
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
|
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
@ -188,7 +190,7 @@ static inline void add(hdr_t* hdr, size_t size) {
|
|||||||
hdr->size = size;
|
hdr->size = size;
|
||||||
init_front_guard(hdr);
|
init_front_guard(hdr);
|
||||||
init_rear_guard(hdr);
|
init_rear_guard(hdr);
|
||||||
++gAllocatedBlockCount;
|
++g_allocated_block_count;
|
||||||
add_locked(hdr, &tail, &head);
|
add_locked(hdr, &tail, &head);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,7 +201,7 @@ static inline int del(hdr_t* hdr) {
|
|||||||
|
|
||||||
ScopedPthreadMutexLocker locker(&lock);
|
ScopedPthreadMutexLocker locker(&lock);
|
||||||
del_locked(hdr, &tail, &head);
|
del_locked(hdr, &tail, &head);
|
||||||
--gAllocatedBlockCount;
|
--g_allocated_block_count;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -306,7 +308,7 @@ static inline void del_from_backlog(hdr_t* hdr) {
|
|||||||
|
|
||||||
static inline int del_leak(hdr_t* hdr, int* safe) {
|
static inline int del_leak(hdr_t* hdr, int* safe) {
|
||||||
ScopedPthreadMutexLocker locker(&lock);
|
ScopedPthreadMutexLocker locker(&lock);
|
||||||
return del_and_check_locked(hdr, &tail, &head, &gAllocatedBlockCount, safe);
|
return del_and_check_locked(hdr, &tail, &head, &g_allocated_block_count, safe);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void add_to_backlog(hdr_t* hdr) {
|
static inline void add_to_backlog(hdr_t* hdr) {
|
||||||
@ -316,7 +318,7 @@ static inline void add_to_backlog(hdr_t* hdr) {
|
|||||||
add_locked(hdr, &backlog_tail, &backlog_head);
|
add_locked(hdr, &backlog_tail, &backlog_head);
|
||||||
poison(hdr);
|
poison(hdr);
|
||||||
/* If we've exceeded the maximum backlog, clear it up */
|
/* If we've exceeded the maximum backlog, clear it up */
|
||||||
while (backlog_num > gMallocDebugBacklog) {
|
while (backlog_num > g_malloc_debug_backlog) {
|
||||||
hdr_t* gone = backlog_tail;
|
hdr_t* gone = backlog_tail;
|
||||||
del_from_backlog_locked(gone);
|
del_from_backlog_locked(gone);
|
||||||
dlfree(gone->base);
|
dlfree(gone->base);
|
||||||
@ -508,7 +510,7 @@ extern "C" size_t chk_malloc_usable_size(const void* ptr) {
|
|||||||
|
|
||||||
static void ReportMemoryLeaks() {
|
static void ReportMemoryLeaks() {
|
||||||
// We only track leaks at level 10.
|
// We only track leaks at level 10.
|
||||||
if (gMallocDebugLevel != 10) {
|
if (g_malloc_debug_level != 10) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -522,13 +524,13 @@ static void ReportMemoryLeaks() {
|
|||||||
exe[count] = '\0';
|
exe[count] = '\0';
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gAllocatedBlockCount == 0) {
|
if (g_allocated_block_count == 0) {
|
||||||
log_message("+++ %s did not leak", exe);
|
log_message("+++ %s did not leak", exe);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t index = 1;
|
size_t index = 1;
|
||||||
const size_t total = gAllocatedBlockCount;
|
const size_t total = g_allocated_block_count;
|
||||||
while (head != NULL) {
|
while (head != NULL) {
|
||||||
int safe;
|
int safe;
|
||||||
hdr_t* block = head;
|
hdr_t* block = head;
|
||||||
|
@ -54,8 +54,8 @@
|
|||||||
*/
|
*/
|
||||||
int gMallocLeakZygoteChild = 0;
|
int gMallocLeakZygoteChild = 0;
|
||||||
|
|
||||||
pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
|
pthread_mutex_t g_allocations_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||||
HashTable gHashTable;
|
HashTable g_hash_table;
|
||||||
|
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
// output functions
|
// output functions
|
||||||
@ -122,9 +122,9 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||||||
}
|
}
|
||||||
*totalMemory = 0;
|
*totalMemory = 0;
|
||||||
|
|
||||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
ScopedPthreadMutexLocker locker(&g_allocations_mutex);
|
||||||
|
|
||||||
if (gHashTable.count == 0) {
|
if (g_hash_table.count == 0) {
|
||||||
*info = NULL;
|
*info = NULL;
|
||||||
*overallSize = 0;
|
*overallSize = 0;
|
||||||
*infoSize = 0;
|
*infoSize = 0;
|
||||||
@ -132,12 +132,12 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
HashEntry** list = static_cast<HashEntry**>(dlmalloc(sizeof(void*) * gHashTable.count));
|
HashEntry** list = static_cast<HashEntry**>(dlmalloc(sizeof(void*) * g_hash_table.count));
|
||||||
|
|
||||||
// get the entries into an array to be sorted
|
// get the entries into an array to be sorted
|
||||||
int index = 0;
|
int index = 0;
|
||||||
for (size_t i = 0 ; i < HASHTABLE_SIZE ; ++i) {
|
for (size_t i = 0 ; i < HASHTABLE_SIZE ; ++i) {
|
||||||
HashEntry* entry = gHashTable.slots[i];
|
HashEntry* entry = g_hash_table.slots[i];
|
||||||
while (entry != NULL) {
|
while (entry != NULL) {
|
||||||
list[index] = entry;
|
list[index] = entry;
|
||||||
*totalMemory = *totalMemory +
|
*totalMemory = *totalMemory +
|
||||||
@ -149,7 +149,7 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||||||
|
|
||||||
// XXX: the protocol doesn't allow variable size for the stack trace (yet)
|
// XXX: the protocol doesn't allow variable size for the stack trace (yet)
|
||||||
*infoSize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * BACKTRACE_SIZE);
|
*infoSize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * BACKTRACE_SIZE);
|
||||||
*overallSize = *infoSize * gHashTable.count;
|
*overallSize = *infoSize * g_hash_table.count;
|
||||||
*backtraceSize = BACKTRACE_SIZE;
|
*backtraceSize = BACKTRACE_SIZE;
|
||||||
|
|
||||||
// now get a byte array big enough for this
|
// now get a byte array big enough for this
|
||||||
@ -161,10 +161,10 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
qsort(list, gHashTable.count, sizeof(void*), hash_entry_compare);
|
qsort(list, g_hash_table.count, sizeof(void*), hash_entry_compare);
|
||||||
|
|
||||||
uint8_t* head = *info;
|
uint8_t* head = *info;
|
||||||
const int count = gHashTable.count;
|
const int count = g_hash_table.count;
|
||||||
for (int i = 0 ; i < count ; ++i) {
|
for (int i = 0 ; i < count ; ++i) {
|
||||||
HashEntry* entry = list[i];
|
HashEntry* entry = list[i];
|
||||||
size_t entrySize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * entry->numEntries);
|
size_t entrySize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * entry->numEntries);
|
||||||
@ -253,7 +253,7 @@ extern "C" size_t malloc_usable_size(const void* mem) {
|
|||||||
#include "private/libc_logging.h"
|
#include "private/libc_logging.h"
|
||||||
|
|
||||||
/* Table for dispatching malloc calls, depending on environment. */
|
/* Table for dispatching malloc calls, depending on environment. */
|
||||||
static MallocDebug gMallocUse __attribute__((aligned(32))) = {
|
static MallocDebug g_malloc_dispatch_table __attribute__((aligned(32))) = {
|
||||||
dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign, dlmalloc_usable_size
|
dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign, dlmalloc_usable_size
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -286,11 +286,11 @@ static void* libc_malloc_impl_handle = NULL;
|
|||||||
* backlog we use to detect multiple frees. If the property is not set, the
|
* backlog we use to detect multiple frees. If the property is not set, the
|
||||||
* backlog length defaults to BACKLOG_DEFAULT_LEN.
|
* backlog length defaults to BACKLOG_DEFAULT_LEN.
|
||||||
*/
|
*/
|
||||||
unsigned int gMallocDebugBacklog;
|
unsigned int g_malloc_debug_backlog;
|
||||||
#define BACKLOG_DEFAULT_LEN 100
|
#define BACKLOG_DEFAULT_LEN 100
|
||||||
|
|
||||||
/* The value of libc.debug.malloc. */
|
/* The value of libc.debug.malloc. */
|
||||||
int gMallocDebugLevel;
|
int g_malloc_debug_level;
|
||||||
|
|
||||||
template<typename FunctionType>
|
template<typename FunctionType>
|
||||||
static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, const char* prefix, const char* suffix) {
|
static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, const char* prefix, const char* suffix) {
|
||||||
@ -304,7 +304,7 @@ static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, co
|
|||||||
|
|
||||||
static void InitMalloc(void* malloc_impl_handler, MallocDebug* table, const char* prefix) {
|
static void InitMalloc(void* malloc_impl_handler, MallocDebug* table, const char* prefix) {
|
||||||
__libc_format_log(ANDROID_LOG_INFO, "libc", "%s: using libc.debug.malloc %d (%s)\n",
|
__libc_format_log(ANDROID_LOG_INFO, "libc", "%s: using libc.debug.malloc %d (%s)\n",
|
||||||
__progname, gMallocDebugLevel, prefix);
|
__progname, g_malloc_debug_level, prefix);
|
||||||
|
|
||||||
InitMallocFunction<MallocDebugMalloc>(malloc_impl_handler, &table->malloc, prefix, "malloc");
|
InitMallocFunction<MallocDebugMalloc>(malloc_impl_handler, &table->malloc, prefix, "malloc");
|
||||||
InitMallocFunction<MallocDebugFree>(malloc_impl_handler, &table->free, prefix, "free");
|
InitMallocFunction<MallocDebugFree>(malloc_impl_handler, &table->free, prefix, "free");
|
||||||
@ -332,7 +332,7 @@ static void malloc_init_impl() {
|
|||||||
if (__system_property_get("ro.kernel.memcheck", memcheck_tracing)) {
|
if (__system_property_get("ro.kernel.memcheck", memcheck_tracing)) {
|
||||||
if (memcheck_tracing[0] != '0') {
|
if (memcheck_tracing[0] != '0') {
|
||||||
// Emulator has started with memory tracing enabled. Enforce it.
|
// Emulator has started with memory tracing enabled. Enforce it.
|
||||||
gMallocDebugLevel = 20;
|
g_malloc_debug_level = 20;
|
||||||
memcheck_enabled = 1;
|
memcheck_enabled = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -340,13 +340,13 @@ static void malloc_init_impl() {
|
|||||||
|
|
||||||
/* If debug level has not been set by memcheck option in the emulator,
|
/* If debug level has not been set by memcheck option in the emulator,
|
||||||
* lets grab it from libc.debug.malloc system property. */
|
* lets grab it from libc.debug.malloc system property. */
|
||||||
if (gMallocDebugLevel == 0 && __system_property_get("libc.debug.malloc", env)) {
|
if (g_malloc_debug_level == 0 && __system_property_get("libc.debug.malloc", env)) {
|
||||||
gMallocDebugLevel = atoi(env);
|
g_malloc_debug_level = atoi(env);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Debug level 0 means that we should use dlxxx allocation
|
/* Debug level 0 means that we should use dlxxx allocation
|
||||||
* routines (default). */
|
* routines (default). */
|
||||||
if (gMallocDebugLevel == 0) {
|
if (g_malloc_debug_level == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -360,24 +360,24 @@ static void malloc_init_impl() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mksh is way too leaky. http://b/7291287.
|
// mksh is way too leaky. http://b/7291287.
|
||||||
if (gMallocDebugLevel >= 10) {
|
if (g_malloc_debug_level >= 10) {
|
||||||
if (strcmp(__progname, "sh") == 0 || strcmp(__progname, "/system/bin/sh") == 0) {
|
if (strcmp(__progname, "sh") == 0 || strcmp(__progname, "/system/bin/sh") == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Choose the appropriate .so for the requested debug level.
|
// Choose the appropriate .so for the requested debug level.
|
||||||
switch (gMallocDebugLevel) {
|
switch (g_malloc_debug_level) {
|
||||||
case 1:
|
case 1:
|
||||||
case 5:
|
case 5:
|
||||||
case 10: {
|
case 10: {
|
||||||
char debug_backlog[PROP_VALUE_MAX];
|
char debug_backlog[PROP_VALUE_MAX];
|
||||||
if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) {
|
if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) {
|
||||||
gMallocDebugBacklog = atoi(debug_backlog);
|
g_malloc_debug_backlog = atoi(debug_backlog);
|
||||||
info_log("%s: setting backlog length to %d\n", __progname, gMallocDebugBacklog);
|
info_log("%s: setting backlog length to %d\n", __progname, g_malloc_debug_backlog);
|
||||||
}
|
}
|
||||||
if (gMallocDebugBacklog == 0) {
|
if (g_malloc_debug_backlog == 0) {
|
||||||
gMallocDebugBacklog = BACKLOG_DEFAULT_LEN;
|
g_malloc_debug_backlog = BACKLOG_DEFAULT_LEN;
|
||||||
}
|
}
|
||||||
so_name = "libc_malloc_debug_leak.so";
|
so_name = "libc_malloc_debug_leak.so";
|
||||||
break;
|
break;
|
||||||
@ -386,7 +386,7 @@ static void malloc_init_impl() {
|
|||||||
// Quick check: debug level 20 can only be handled in emulator.
|
// Quick check: debug level 20 can only be handled in emulator.
|
||||||
if (!qemu_running) {
|
if (!qemu_running) {
|
||||||
error_log("%s: Debug level %d can only be set in emulator\n",
|
error_log("%s: Debug level %d can only be set in emulator\n",
|
||||||
__progname, gMallocDebugLevel);
|
__progname, g_malloc_debug_level);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Make sure that memory checking has been enabled in emulator.
|
// Make sure that memory checking has been enabled in emulator.
|
||||||
@ -398,7 +398,7 @@ static void malloc_init_impl() {
|
|||||||
so_name = "libc_malloc_debug_qemu.so";
|
so_name = "libc_malloc_debug_qemu.so";
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
error_log("%s: Debug level %d is unknown\n", __progname, gMallocDebugLevel);
|
error_log("%s: Debug level %d is unknown\n", __progname, g_malloc_debug_level);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -406,7 +406,7 @@ static void malloc_init_impl() {
|
|||||||
void* malloc_impl_handle = dlopen(so_name, RTLD_LAZY);
|
void* malloc_impl_handle = dlopen(so_name, RTLD_LAZY);
|
||||||
if (malloc_impl_handle == NULL) {
|
if (malloc_impl_handle == NULL) {
|
||||||
error_log("%s: Missing module %s required for malloc debug level %d: %s",
|
error_log("%s: Missing module %s required for malloc debug level %d: %s",
|
||||||
__progname, so_name, gMallocDebugLevel, dlerror());
|
__progname, so_name, g_malloc_debug_level, dlerror());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,7 +424,7 @@ static void malloc_init_impl() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gMallocDebugLevel == 20) {
|
if (g_malloc_debug_level == 20) {
|
||||||
// For memory checker we need to do extra initialization.
|
// For memory checker we need to do extra initialization.
|
||||||
typedef int (*MemCheckInit)(int, const char*);
|
typedef int (*MemCheckInit)(int, const char*);
|
||||||
MemCheckInit memcheck_initialize =
|
MemCheckInit memcheck_initialize =
|
||||||
@ -445,35 +445,35 @@ static void malloc_init_impl() {
|
|||||||
|
|
||||||
|
|
||||||
// Initialize malloc dispatch table with appropriate routines.
|
// Initialize malloc dispatch table with appropriate routines.
|
||||||
switch (gMallocDebugLevel) {
|
switch (g_malloc_debug_level) {
|
||||||
case 1:
|
case 1:
|
||||||
InitMalloc(malloc_impl_handle, &gMallocUse, "leak");
|
InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "leak");
|
||||||
break;
|
break;
|
||||||
case 5:
|
case 5:
|
||||||
InitMalloc(malloc_impl_handle, &gMallocUse, "fill");
|
InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "fill");
|
||||||
break;
|
break;
|
||||||
case 10:
|
case 10:
|
||||||
InitMalloc(malloc_impl_handle, &gMallocUse, "chk");
|
InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "chk");
|
||||||
break;
|
break;
|
||||||
case 20:
|
case 20:
|
||||||
InitMalloc(malloc_impl_handle, &gMallocUse, "qemu_instrumented");
|
InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "qemu_instrumented");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure dispatch table is initialized
|
// Make sure dispatch table is initialized
|
||||||
if ((gMallocUse.malloc == NULL) ||
|
if ((g_malloc_dispatch_table.malloc == NULL) ||
|
||||||
(gMallocUse.free == NULL) ||
|
(g_malloc_dispatch_table.free == NULL) ||
|
||||||
(gMallocUse.calloc == NULL) ||
|
(g_malloc_dispatch_table.calloc == NULL) ||
|
||||||
(gMallocUse.realloc == NULL) ||
|
(g_malloc_dispatch_table.realloc == NULL) ||
|
||||||
(gMallocUse.memalign == NULL) ||
|
(g_malloc_dispatch_table.memalign == NULL) ||
|
||||||
(gMallocUse.malloc_usable_size == NULL)) {
|
(g_malloc_dispatch_table.malloc_usable_size == NULL)) {
|
||||||
error_log("%s: some symbols for libc.debug.malloc level %d were not found (see above)",
|
error_log("%s: some symbols for libc.debug.malloc level %d were not found (see above)",
|
||||||
__progname, gMallocDebugLevel);
|
__progname, g_malloc_debug_level);
|
||||||
dlclose(malloc_impl_handle);
|
dlclose(malloc_impl_handle);
|
||||||
} else {
|
} else {
|
||||||
__libc_malloc_dispatch = &gMallocUse;
|
__libc_malloc_dispatch = &g_malloc_dispatch_table;
|
||||||
libc_malloc_impl_handle = malloc_impl_handle;
|
libc_malloc_impl_handle = malloc_impl_handle;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,8 +61,8 @@
|
|||||||
|
|
||||||
// Global variables defined in malloc_debug_common.c
|
// Global variables defined in malloc_debug_common.c
|
||||||
extern int gMallocLeakZygoteChild;
|
extern int gMallocLeakZygoteChild;
|
||||||
extern pthread_mutex_t gAllocationsMutex;
|
extern pthread_mutex_t g_allocations_mutex;
|
||||||
extern HashTable gHashTable;
|
extern HashTable g_hash_table;
|
||||||
|
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
// stack trace functions
|
// stack trace functions
|
||||||
@ -138,7 +138,7 @@ static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size
|
|||||||
size |= SIZE_FLAG_ZYGOTE_CHILD;
|
size |= SIZE_FLAG_ZYGOTE_CHILD;
|
||||||
}
|
}
|
||||||
|
|
||||||
HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
|
HashEntry* entry = find_entry(&g_hash_table, slot, backtrace, numEntries, size);
|
||||||
|
|
||||||
if (entry != NULL) {
|
if (entry != NULL) {
|
||||||
entry->allocations++;
|
entry->allocations++;
|
||||||
@ -151,20 +151,20 @@ static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size
|
|||||||
entry->allocations = 1;
|
entry->allocations = 1;
|
||||||
entry->slot = slot;
|
entry->slot = slot;
|
||||||
entry->prev = NULL;
|
entry->prev = NULL;
|
||||||
entry->next = gHashTable.slots[slot];
|
entry->next = g_hash_table.slots[slot];
|
||||||
entry->numEntries = numEntries;
|
entry->numEntries = numEntries;
|
||||||
entry->size = size;
|
entry->size = size;
|
||||||
|
|
||||||
memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
|
memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
|
||||||
|
|
||||||
gHashTable.slots[slot] = entry;
|
g_hash_table.slots[slot] = entry;
|
||||||
|
|
||||||
if (entry->next != NULL) {
|
if (entry->next != NULL) {
|
||||||
entry->next->prev = entry;
|
entry->next->prev = entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
// we just added an entry, increase the size of the hashtable
|
// we just added an entry, increase the size of the hashtable
|
||||||
gHashTable.count++;
|
g_hash_table.count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
@ -174,7 +174,7 @@ static int is_valid_entry(HashEntry* entry) {
|
|||||||
if (entry != NULL) {
|
if (entry != NULL) {
|
||||||
int i;
|
int i;
|
||||||
for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
|
for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
|
||||||
HashEntry* e1 = gHashTable.slots[i];
|
HashEntry* e1 = g_hash_table.slots[i];
|
||||||
|
|
||||||
while (e1 != NULL) {
|
while (e1 != NULL) {
|
||||||
if (e1 == entry) {
|
if (e1 == entry) {
|
||||||
@ -198,11 +198,11 @@ static void remove_entry(HashEntry* entry) {
|
|||||||
|
|
||||||
if (prev == NULL) {
|
if (prev == NULL) {
|
||||||
// we are the head of the list. set the head to be next
|
// we are the head of the list. set the head to be next
|
||||||
gHashTable.slots[entry->slot] = entry->next;
|
g_hash_table.slots[entry->slot] = entry->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// we just removed and entry, decrease the size of the hashtable
|
// we just removed and entry, decrease the size of the hashtable
|
||||||
gHashTable.count--;
|
g_hash_table.count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
// =============================================================================
|
// =============================================================================
|
||||||
@ -277,7 +277,7 @@ extern "C" void* leak_malloc(size_t bytes) {
|
|||||||
|
|
||||||
void* base = dlmalloc(size);
|
void* base = dlmalloc(size);
|
||||||
if (base != NULL) {
|
if (base != NULL) {
|
||||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
ScopedPthreadMutexLocker locker(&g_allocations_mutex);
|
||||||
|
|
||||||
uintptr_t backtrace[BACKTRACE_SIZE];
|
uintptr_t backtrace[BACKTRACE_SIZE];
|
||||||
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
|
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
|
||||||
@ -296,7 +296,7 @@ extern "C" void* leak_malloc(size_t bytes) {
|
|||||||
|
|
||||||
extern "C" void leak_free(void* mem) {
|
extern "C" void leak_free(void* mem) {
|
||||||
if (mem != NULL) {
|
if (mem != NULL) {
|
||||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
ScopedPthreadMutexLocker locker(&g_allocations_mutex);
|
||||||
|
|
||||||
// check the guard to make sure it is valid
|
// check the guard to make sure it is valid
|
||||||
AllocationEntry* header = to_header(mem);
|
AllocationEntry* header = to_header(mem);
|
||||||
|
@ -26,7 +26,7 @@ class pthread_accessor {
|
|||||||
public:
|
public:
|
||||||
explicit pthread_accessor(pthread_t desired_thread) {
|
explicit pthread_accessor(pthread_t desired_thread) {
|
||||||
Lock();
|
Lock();
|
||||||
for (thread_ = gThreadList; thread_ != NULL; thread_ = thread_->next) {
|
for (thread_ = g_thread_list; thread_ != NULL; thread_ = thread_->next) {
|
||||||
if (thread_ == reinterpret_cast<pthread_internal_t*>(desired_thread)) {
|
if (thread_ == reinterpret_cast<pthread_internal_t*>(desired_thread)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -41,7 +41,7 @@ class pthread_accessor {
|
|||||||
if (is_locked_) {
|
if (is_locked_) {
|
||||||
is_locked_ = false;
|
is_locked_ = false;
|
||||||
thread_ = NULL;
|
thread_ = NULL;
|
||||||
pthread_mutex_unlock(&gThreadListLock);
|
pthread_mutex_unlock(&g_thread_list_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ class pthread_accessor {
|
|||||||
bool is_locked_;
|
bool is_locked_;
|
||||||
|
|
||||||
void Lock() {
|
void Lock() {
|
||||||
pthread_mutex_lock(&gThreadListLock);
|
pthread_mutex_lock(&g_thread_list_lock);
|
||||||
is_locked_ = true;
|
is_locked_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,8 +29,6 @@
|
|||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
|
|
||||||
static pthread_mutex_t gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
|
||||||
|
|
||||||
struct atfork_t {
|
struct atfork_t {
|
||||||
atfork_t* next;
|
atfork_t* next;
|
||||||
atfork_t* prev;
|
atfork_t* prev;
|
||||||
@ -45,7 +43,8 @@ struct atfork_list_t {
|
|||||||
atfork_t* last;
|
atfork_t* last;
|
||||||
};
|
};
|
||||||
|
|
||||||
static atfork_list_t gAtForkList = { NULL, NULL };
|
static pthread_mutex_t g_atfork_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||||
|
static atfork_list_t g_atfork_list = { NULL, NULL };
|
||||||
|
|
||||||
void __bionic_atfork_run_prepare() {
|
void __bionic_atfork_run_prepare() {
|
||||||
// We lock the atfork list here, unlock it in the parent, and reset it in the child.
|
// We lock the atfork list here, unlock it in the parent, and reset it in the child.
|
||||||
@ -54,12 +53,12 @@ void __bionic_atfork_run_prepare() {
|
|||||||
//
|
//
|
||||||
// TODO: If a handler tries to mutate the list, they'll block. We should probably copy
|
// TODO: If a handler tries to mutate the list, they'll block. We should probably copy
|
||||||
// the list before forking, and have prepare, parent, and child all work on the consistent copy.
|
// the list before forking, and have prepare, parent, and child all work on the consistent copy.
|
||||||
pthread_mutex_lock(&gAtForkListMutex);
|
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||||
|
|
||||||
// Call pthread_atfork() prepare handlers. POSIX states that the prepare
|
// Call pthread_atfork() prepare handlers. POSIX states that the prepare
|
||||||
// handlers should be called in the reverse order of the parent/child
|
// handlers should be called in the reverse order of the parent/child
|
||||||
// handlers, so we iterate backwards.
|
// handlers, so we iterate backwards.
|
||||||
for (atfork_t* it = gAtForkList.last; it != NULL; it = it->prev) {
|
for (atfork_t* it = g_atfork_list.last; it != NULL; it = it->prev) {
|
||||||
if (it->prepare != NULL) {
|
if (it->prepare != NULL) {
|
||||||
it->prepare();
|
it->prepare();
|
||||||
}
|
}
|
||||||
@ -67,23 +66,23 @@ void __bionic_atfork_run_prepare() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void __bionic_atfork_run_child() {
|
void __bionic_atfork_run_child() {
|
||||||
for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) {
|
for (atfork_t* it = g_atfork_list.first; it != NULL; it = it->next) {
|
||||||
if (it->child != NULL) {
|
if (it->child != NULL) {
|
||||||
it->child();
|
it->child();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
g_atfork_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __bionic_atfork_run_parent() {
|
void __bionic_atfork_run_parent() {
|
||||||
for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) {
|
for (atfork_t* it = g_atfork_list.first; it != NULL; it = it->next) {
|
||||||
if (it->parent != NULL) {
|
if (it->parent != NULL) {
|
||||||
it->parent();
|
it->parent();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock(&gAtForkListMutex);
|
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) {
|
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) {
|
||||||
@ -96,20 +95,20 @@ int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(voi
|
|||||||
entry->parent = parent;
|
entry->parent = parent;
|
||||||
entry->child = child;
|
entry->child = child;
|
||||||
|
|
||||||
pthread_mutex_lock(&gAtForkListMutex);
|
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||||
|
|
||||||
// Append 'entry' to the list.
|
// Append 'entry' to the list.
|
||||||
entry->next = NULL;
|
entry->next = NULL;
|
||||||
entry->prev = gAtForkList.last;
|
entry->prev = g_atfork_list.last;
|
||||||
if (entry->prev != NULL) {
|
if (entry->prev != NULL) {
|
||||||
entry->prev->next = entry;
|
entry->prev->next = entry;
|
||||||
}
|
}
|
||||||
if (gAtForkList.first == NULL) {
|
if (g_atfork_list.first == NULL) {
|
||||||
gAtForkList.first = entry;
|
g_atfork_list.first = entry;
|
||||||
}
|
}
|
||||||
gAtForkList.last = entry;
|
g_atfork_list.last = entry;
|
||||||
|
|
||||||
pthread_mutex_unlock(&gAtForkListMutex);
|
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -52,9 +52,9 @@ extern "C" __attribute__((noinline)) void _thread_created_hook(pid_t) {}
|
|||||||
extern "C" __LIBC_HIDDEN__ void __init_user_desc(struct user_desc*, int, void*);
|
extern "C" __LIBC_HIDDEN__ void __init_user_desc(struct user_desc*, int, void*);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static pthread_mutex_t gPthreadStackCreationLock = PTHREAD_MUTEX_INITIALIZER;
|
static pthread_mutex_t g_pthread_stack_creation_ock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
|
static pthread_mutex_t g_debugger_notification_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
extern "C" int __isthreaded;
|
extern "C" int __isthreaded;
|
||||||
|
|
||||||
@ -111,7 +111,7 @@ int __init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void* __create_thread_stack(pthread_internal_t* thread) {
|
static void* __create_thread_stack(pthread_internal_t* thread) {
|
||||||
ScopedPthreadMutexLocker lock(&gPthreadStackCreationLock);
|
ScopedPthreadMutexLocker lock(&g_pthread_stack_creation_ock);
|
||||||
|
|
||||||
// Create a new private anonymous map.
|
// Create a new private anonymous map.
|
||||||
int prot = PROT_READ | PROT_WRITE;
|
int prot = PROT_READ | PROT_WRITE;
|
||||||
@ -258,7 +258,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
|||||||
|
|
||||||
// Notify any debuggers about the new thread.
|
// Notify any debuggers about the new thread.
|
||||||
{
|
{
|
||||||
ScopedPthreadMutexLocker debugger_locker(&gDebuggerNotificationLock);
|
ScopedPthreadMutexLocker debugger_locker(&g_debugger_notification_lock);
|
||||||
_thread_created_hook(thread->tid);
|
_thread_created_hook(thread->tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,9 +127,9 @@ extern const char* __progname;
|
|||||||
* level 2 : deadlock prediction enabled w/ call stacks
|
* level 2 : deadlock prediction enabled w/ call stacks
|
||||||
*/
|
*/
|
||||||
#define CAPTURE_CALLSTACK 2
|
#define CAPTURE_CALLSTACK 2
|
||||||
static int sPthreadDebugLevel = 0;
|
static int g_pthread_debug_level = 0;
|
||||||
static pid_t sPthreadDebugDisabledThread = -1;
|
static pid_t g_pthread_debug_disabled_thread = -1;
|
||||||
static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
|
static pthread_mutex_t g_dbg_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
/****************************************************************************/
|
/****************************************************************************/
|
||||||
|
|
||||||
@ -138,23 +138,23 @@ static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#define DBG_ALLOC_BLOCK_SIZE PAGESIZE
|
#define DBG_ALLOC_BLOCK_SIZE PAGESIZE
|
||||||
static size_t sDbgAllocOffset = DBG_ALLOC_BLOCK_SIZE;
|
static size_t g_dbg_alloc_offset = DBG_ALLOC_BLOCK_SIZE;
|
||||||
static char* sDbgAllocPtr = NULL;
|
static char* g_dbg_alloc_ptr = NULL;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static T* DbgAllocLocked(size_t count = 1) {
|
static T* DbgAllocLocked(size_t count = 1) {
|
||||||
size_t size = sizeof(T) * count;
|
size_t size = sizeof(T) * count;
|
||||||
if ((sDbgAllocOffset + size) > DBG_ALLOC_BLOCK_SIZE) {
|
if ((g_dbg_alloc_offset + size) > DBG_ALLOC_BLOCK_SIZE) {
|
||||||
sDbgAllocOffset = 0;
|
g_dbg_alloc_offset = 0;
|
||||||
sDbgAllocPtr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
|
g_dbg_alloc_ptr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
|
||||||
PROT_READ|PROT_WRITE,
|
PROT_READ|PROT_WRITE,
|
||||||
MAP_ANON | MAP_PRIVATE, 0, 0));
|
MAP_ANON | MAP_PRIVATE, 0, 0));
|
||||||
if (sDbgAllocPtr == MAP_FAILED) {
|
if (g_dbg_alloc_ptr == MAP_FAILED) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void* addr = sDbgAllocPtr + sDbgAllocOffset;
|
void* addr = g_dbg_alloc_ptr + g_dbg_alloc_offset;
|
||||||
sDbgAllocOffset += size;
|
g_dbg_alloc_offset += size;
|
||||||
return reinterpret_cast<T*>(addr);
|
return reinterpret_cast<T*>(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -365,7 +365,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
|
|||||||
uintptr_t addrs[STACK_TRACE_DEPTH];
|
uintptr_t addrs[STACK_TRACE_DEPTH];
|
||||||
|
|
||||||
/* Turn off prediction temporarily in this thread while logging */
|
/* Turn off prediction temporarily in this thread while logging */
|
||||||
sPthreadDebugDisabledThread = gettid();
|
g_pthread_debug_disabled_thread = gettid();
|
||||||
|
|
||||||
backtrace_startup();
|
backtrace_startup();
|
||||||
|
|
||||||
@ -384,7 +384,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
|
|||||||
MutexInfo* parent = cur->parents.list[i];
|
MutexInfo* parent = cur->parents.list[i];
|
||||||
if (parent->owner == ourtid) {
|
if (parent->owner == ourtid) {
|
||||||
LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
|
LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
|
||||||
if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
|
if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
|
||||||
log_backtrace(parent->stackTrace, parent->stackDepth);
|
log_backtrace(parent->stackTrace, parent->stackDepth);
|
||||||
}
|
}
|
||||||
cur = parent;
|
cur = parent;
|
||||||
@ -405,7 +405,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
|
|||||||
MutexInfo* child = pList->list[i];
|
MutexInfo* child = pList->list[i];
|
||||||
if (!traverseTree(child, obj)) {
|
if (!traverseTree(child, obj)) {
|
||||||
LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
|
LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
|
||||||
if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
|
if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
|
||||||
int index = historyListHas(&obj->parents, objParent);
|
int index = historyListHas(&obj->parents, objParent);
|
||||||
if ((size_t)index < (size_t)obj->stacks.count) {
|
if ((size_t)index < (size_t)obj->stacks.count) {
|
||||||
log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth);
|
log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth);
|
||||||
@ -435,7 +435,7 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
|
|||||||
object->owner = tid;
|
object->owner = tid;
|
||||||
object->lockCount = 0;
|
object->lockCount = 0;
|
||||||
|
|
||||||
if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
|
if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
|
||||||
// always record the call stack when acquiring a lock.
|
// always record the call stack when acquiring a lock.
|
||||||
// it's not efficient, but is useful during diagnostics
|
// it's not efficient, but is useful during diagnostics
|
||||||
object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
|
object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
|
||||||
@ -451,7 +451,7 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
|
|||||||
if (historyListHas(&mrl->children, object) >= 0)
|
if (historyListHas(&mrl->children, object) >= 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pthread_mutex_lock_unchecked(&sDbgLock);
|
pthread_mutex_lock_unchecked(&g_dbg_lock);
|
||||||
|
|
||||||
linkParentToChild(mrl, object);
|
linkParentToChild(mrl, object);
|
||||||
if (!traverseTree(object, mrl)) {
|
if (!traverseTree(object, mrl)) {
|
||||||
@ -459,20 +459,20 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
|
|||||||
LOGW("%s\n", kEndBanner);
|
LOGW("%s\n", kEndBanner);
|
||||||
unlinkParentFromChild(mrl, object);
|
unlinkParentFromChild(mrl, object);
|
||||||
// reenable pthread debugging for this thread
|
// reenable pthread debugging for this thread
|
||||||
sPthreadDebugDisabledThread = -1;
|
g_pthread_debug_disabled_thread = -1;
|
||||||
} else {
|
} else {
|
||||||
// record the call stack for this link
|
// record the call stack for this link
|
||||||
// NOTE: the call stack is added at the same index
|
// NOTE: the call stack is added at the same index
|
||||||
// as mrl in object->parents[]
|
// as mrl in object->parents[]
|
||||||
// ie: object->parents.count == object->stacks.count, which is
|
// ie: object->parents.count == object->stacks.count, which is
|
||||||
// also the index.
|
// also the index.
|
||||||
if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
|
if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
|
||||||
callstackListAdd(&object->stacks,
|
callstackListAdd(&object->stacks,
|
||||||
object->stackDepth, object->stackTrace);
|
object->stackDepth, object->stackTrace);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock_unchecked(&sDbgLock);
|
pthread_mutex_unlock_unchecked(&g_dbg_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mutex_unlock_checked(MutexInfo* object)
|
static void mutex_unlock_checked(MutexInfo* object)
|
||||||
@ -509,8 +509,8 @@ struct HashTable {
|
|||||||
HashEntry* slots[HASHTABLE_SIZE];
|
HashEntry* slots[HASHTABLE_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
static HashTable sMutexMap;
|
static HashTable g_mutex_map;
|
||||||
static HashTable sThreadMap;
|
static HashTable g_thread_map;
|
||||||
|
|
||||||
/****************************************************************************/
|
/****************************************************************************/
|
||||||
|
|
||||||
@ -593,9 +593,9 @@ static int MutexInfo_equals(void const* data, void const* key) {
|
|||||||
|
|
||||||
static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
|
static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock_unchecked(&sDbgLock);
|
pthread_mutex_lock_unchecked(&g_dbg_lock);
|
||||||
|
|
||||||
HashEntry* entry = hashmap_lookup(&sMutexMap,
|
HashEntry* entry = hashmap_lookup(&g_mutex_map,
|
||||||
&mutex, sizeof(mutex),
|
&mutex, sizeof(mutex),
|
||||||
&MutexInfo_equals);
|
&MutexInfo_equals);
|
||||||
if (entry->data == NULL) {
|
if (entry->data == NULL) {
|
||||||
@ -604,7 +604,7 @@ static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
|
|||||||
initMutexInfo(mutex_info, mutex);
|
initMutexInfo(mutex_info, mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock_unchecked(&sDbgLock);
|
pthread_mutex_unlock_unchecked(&g_dbg_lock);
|
||||||
|
|
||||||
return (MutexInfo *)entry->data;
|
return (MutexInfo *)entry->data;
|
||||||
}
|
}
|
||||||
@ -617,9 +617,9 @@ static int ThreadInfo_equals(void const* data, void const* key) {
|
|||||||
|
|
||||||
static ThreadInfo* get_thread_info(pid_t pid)
|
static ThreadInfo* get_thread_info(pid_t pid)
|
||||||
{
|
{
|
||||||
pthread_mutex_lock_unchecked(&sDbgLock);
|
pthread_mutex_lock_unchecked(&g_dbg_lock);
|
||||||
|
|
||||||
HashEntry* entry = hashmap_lookup(&sThreadMap,
|
HashEntry* entry = hashmap_lookup(&g_thread_map,
|
||||||
&pid, sizeof(pid),
|
&pid, sizeof(pid),
|
||||||
&ThreadInfo_equals);
|
&ThreadInfo_equals);
|
||||||
if (entry->data == NULL) {
|
if (entry->data == NULL) {
|
||||||
@ -628,7 +628,7 @@ static ThreadInfo* get_thread_info(pid_t pid)
|
|||||||
initThreadInfo(thread_info, pid);
|
initThreadInfo(thread_info, pid);
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_mutex_unlock_unchecked(&sDbgLock);
|
pthread_mutex_unlock_unchecked(&g_dbg_lock);
|
||||||
|
|
||||||
return (ThreadInfo *)entry->data;
|
return (ThreadInfo *)entry->data;
|
||||||
}
|
}
|
||||||
@ -672,9 +672,9 @@ static MutexInfo* get_most_recently_locked() {
|
|||||||
|
|
||||||
extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
|
extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
if (sPthreadDebugLevel == 0) return;
|
if (g_pthread_debug_level == 0) return;
|
||||||
// prediction disabled for this thread
|
// prediction disabled for this thread
|
||||||
if (sPthreadDebugDisabledThread == gettid())
|
if (g_pthread_debug_disabled_thread == gettid())
|
||||||
return;
|
return;
|
||||||
MutexInfo* object = get_mutex_info(mutex);
|
MutexInfo* object = get_mutex_info(mutex);
|
||||||
MutexInfo* mrl = get_most_recently_locked();
|
MutexInfo* mrl = get_most_recently_locked();
|
||||||
@ -689,9 +689,9 @@ extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *
|
|||||||
|
|
||||||
extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
|
extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
if (sPthreadDebugLevel == 0) return;
|
if (g_pthread_debug_level == 0) return;
|
||||||
// prediction disabled for this thread
|
// prediction disabled for this thread
|
||||||
if (sPthreadDebugDisabledThread == gettid())
|
if (g_pthread_debug_disabled_thread == gettid())
|
||||||
return;
|
return;
|
||||||
MutexInfo* object = get_mutex_info(mutex);
|
MutexInfo* object = get_mutex_info(mutex);
|
||||||
remove_most_recently_locked(object);
|
remove_most_recently_locked(object);
|
||||||
@ -709,8 +709,8 @@ extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
|
|||||||
if (level) {
|
if (level) {
|
||||||
LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
|
LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
|
||||||
level, getpid(), __progname);
|
level, getpid(), __progname);
|
||||||
hashmap_init(&sMutexMap);
|
hashmap_init(&g_mutex_map);
|
||||||
sPthreadDebugLevel = level;
|
g_pthread_debug_level = level;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -92,7 +92,7 @@ void pthread_exit(void* return_value) {
|
|||||||
size_t stack_size = thread->attr.stack_size;
|
size_t stack_size = thread->attr.stack_size;
|
||||||
bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0);
|
bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0);
|
||||||
|
|
||||||
pthread_mutex_lock(&gThreadListLock);
|
pthread_mutex_lock(&g_thread_list_lock);
|
||||||
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
|
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
|
||||||
// The thread is detached, so we can free the pthread_internal_t.
|
// The thread is detached, so we can free the pthread_internal_t.
|
||||||
// First make sure that the kernel does not try to clear the tid field
|
// First make sure that the kernel does not try to clear the tid field
|
||||||
@ -110,7 +110,7 @@ void pthread_exit(void* return_value) {
|
|||||||
// pthread_join is responsible for destroying the pthread_internal_t for non-detached threads.
|
// pthread_join is responsible for destroying the pthread_internal_t for non-detached threads.
|
||||||
// The kernel will futex_wake on the pthread_internal_t::tid field to wake pthread_join.
|
// The kernel will futex_wake on the pthread_internal_t::tid field to wake pthread_join.
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&gThreadListLock);
|
pthread_mutex_unlock(&g_thread_list_lock);
|
||||||
|
|
||||||
if (user_allocated_stack) {
|
if (user_allocated_stack) {
|
||||||
// Cleaning up this thread's stack is the creator's responsibility, not ours.
|
// Cleaning up this thread's stack is the creator's responsibility, not ours.
|
||||||
|
@ -86,8 +86,8 @@ __LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread)
|
|||||||
*/
|
*/
|
||||||
#define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGSTKSZ)
|
#define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGSTKSZ)
|
||||||
|
|
||||||
__LIBC_HIDDEN__ extern pthread_internal_t* gThreadList;
|
__LIBC_HIDDEN__ extern pthread_internal_t* g_thread_list;
|
||||||
__LIBC_HIDDEN__ extern pthread_mutex_t gThreadListLock;
|
__LIBC_HIDDEN__ extern pthread_mutex_t g_thread_list_lock;
|
||||||
|
|
||||||
__LIBC_HIDDEN__ int __timespec_from_absolute(timespec*, const timespec*, clockid_t);
|
__LIBC_HIDDEN__ int __timespec_from_absolute(timespec*, const timespec*, clockid_t);
|
||||||
|
|
||||||
|
@ -33,8 +33,8 @@
|
|||||||
#include "private/bionic_tls.h"
|
#include "private/bionic_tls.h"
|
||||||
#include "private/ScopedPthreadMutexLocker.h"
|
#include "private/ScopedPthreadMutexLocker.h"
|
||||||
|
|
||||||
pthread_internal_t* gThreadList = NULL;
|
pthread_internal_t* g_thread_list = NULL;
|
||||||
pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
|
pthread_mutex_t g_thread_list_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
void _pthread_internal_remove_locked(pthread_internal_t* thread) {
|
void _pthread_internal_remove_locked(pthread_internal_t* thread) {
|
||||||
if (thread->next != NULL) {
|
if (thread->next != NULL) {
|
||||||
@ -43,7 +43,7 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread) {
|
|||||||
if (thread->prev != NULL) {
|
if (thread->prev != NULL) {
|
||||||
thread->prev->next = thread->next;
|
thread->prev->next = thread->next;
|
||||||
} else {
|
} else {
|
||||||
gThreadList = thread->next;
|
g_thread_list = thread->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The main thread is not heap-allocated. See __libc_init_tls for the declaration,
|
// The main thread is not heap-allocated. See __libc_init_tls for the declaration,
|
||||||
@ -54,15 +54,15 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void _pthread_internal_add(pthread_internal_t* thread) {
|
void _pthread_internal_add(pthread_internal_t* thread) {
|
||||||
ScopedPthreadMutexLocker locker(&gThreadListLock);
|
ScopedPthreadMutexLocker locker(&g_thread_list_lock);
|
||||||
|
|
||||||
// We insert at the head.
|
// We insert at the head.
|
||||||
thread->next = gThreadList;
|
thread->next = g_thread_list;
|
||||||
thread->prev = NULL;
|
thread->prev = NULL;
|
||||||
if (thread->next != NULL) {
|
if (thread->next != NULL) {
|
||||||
thread->next->prev = thread;
|
thread->next->prev = thread;
|
||||||
}
|
}
|
||||||
gThreadList = thread;
|
g_thread_list = thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_internal_t* __get_thread(void) {
|
pthread_internal_t* __get_thread(void) {
|
||||||
|
@ -210,8 +210,8 @@ int pthread_key_delete(pthread_key_t key) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Clear value in all threads.
|
// Clear value in all threads.
|
||||||
pthread_mutex_lock(&gThreadListLock);
|
pthread_mutex_lock(&g_thread_list_lock);
|
||||||
for (pthread_internal_t* t = gThreadList; t != NULL; t = t->next) {
|
for (pthread_internal_t* t = g_thread_list; t != NULL; t = t->next) {
|
||||||
// Skip zombie threads. They don't have a valid TLS area any more.
|
// Skip zombie threads. They don't have a valid TLS area any more.
|
||||||
// Similarly, it is possible to have t->tls == NULL for threads that
|
// Similarly, it is possible to have t->tls == NULL for threads that
|
||||||
// were just recently created through pthread_create() but whose
|
// were just recently created through pthread_create() but whose
|
||||||
@ -226,7 +226,7 @@ int pthread_key_delete(pthread_key_t key) {
|
|||||||
}
|
}
|
||||||
tls_map.DeleteKey(key);
|
tls_map.DeleteKey(key);
|
||||||
|
|
||||||
pthread_mutex_unlock(&gThreadListLock);
|
pthread_mutex_unlock(&g_thread_list_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
|
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
|
|
||||||
static pthread_mutex_t gAtExitLock = PTHREAD_MUTEX_INITIALIZER;
|
static pthread_mutex_t g_atexit_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
__BEGIN_DECLS
|
__BEGIN_DECLS
|
||||||
__LIBC_HIDDEN__ void _thread_atexit_lock();
|
__LIBC_HIDDEN__ void _thread_atexit_lock();
|
||||||
@ -38,9 +38,9 @@ __LIBC_HIDDEN__ void _thread_atexit_unlock();
|
|||||||
__END_DECLS
|
__END_DECLS
|
||||||
|
|
||||||
void _thread_atexit_lock() {
|
void _thread_atexit_lock() {
|
||||||
pthread_mutex_lock(&gAtExitLock);
|
pthread_mutex_lock(&g_atexit_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _thread_atexit_unlock() {
|
void _thread_atexit_unlock() {
|
||||||
pthread_mutex_unlock(&gAtExitLock);
|
pthread_mutex_unlock(&g_atexit_lock);
|
||||||
}
|
}
|
||||||
|
@ -2255,12 +2255,12 @@ static int __bionic_tzload_cached(const char* name, struct state* const sp, cons
|
|||||||
_tzLock();
|
_tzLock();
|
||||||
|
|
||||||
// Our single-item cache.
|
// Our single-item cache.
|
||||||
static char* gCachedTimeZoneName;
|
static char* g_cached_time_zone_name;
|
||||||
static struct state gCachedTimeZone;
|
static struct state g_cached_time_zone;
|
||||||
|
|
||||||
// Do we already have this timezone cached?
|
// Do we already have this timezone cached?
|
||||||
if (gCachedTimeZoneName != NULL && strcmp(name, gCachedTimeZoneName) == 0) {
|
if (g_cached_time_zone_name != NULL && strcmp(name, g_cached_time_zone_name) == 0) {
|
||||||
*sp = gCachedTimeZone;
|
*sp = g_cached_time_zone;
|
||||||
_tzUnlock();
|
_tzUnlock();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2269,9 +2269,9 @@ static int __bionic_tzload_cached(const char* name, struct state* const sp, cons
|
|||||||
int rc = tzload(name, sp, doextend);
|
int rc = tzload(name, sp, doextend);
|
||||||
if (rc == 0) {
|
if (rc == 0) {
|
||||||
// Update the cache.
|
// Update the cache.
|
||||||
free(gCachedTimeZoneName);
|
free(g_cached_time_zone_name);
|
||||||
gCachedTimeZoneName = strdup(name);
|
g_cached_time_zone_name = strdup(name);
|
||||||
gCachedTimeZone = *sp;
|
g_cached_time_zone = *sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
_tzUnlock();
|
_tzUnlock();
|
||||||
|
@ -217,7 +217,7 @@ static void send_debuggerd_packet(siginfo_t* info) {
|
|||||||
debugger_msg_t msg;
|
debugger_msg_t msg;
|
||||||
msg.action = DEBUGGER_ACTION_CRASH;
|
msg.action = DEBUGGER_ACTION_CRASH;
|
||||||
msg.tid = gettid();
|
msg.tid = gettid();
|
||||||
msg.abort_msg_address = reinterpret_cast<uintptr_t>(gAbortMessage);
|
msg.abort_msg_address = reinterpret_cast<uintptr_t>(g_abort_message);
|
||||||
msg.original_si_code = (info != NULL) ? info->si_code : 0;
|
msg.original_si_code = (info != NULL) ? info->si_code : 0;
|
||||||
int ret = TEMP_FAILURE_RETRY(write(s, &msg, sizeof(msg)));
|
int ret = TEMP_FAILURE_RETRY(write(s, &msg, sizeof(msg)));
|
||||||
if (ret == sizeof(msg)) {
|
if (ret == sizeof(msg)) {
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
|
|
||||||
/* This file hijacks the symbols stubbed out in libdl.so. */
|
/* This file hijacks the symbols stubbed out in libdl.so. */
|
||||||
|
|
||||||
static pthread_mutex_t gDlMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
static pthread_mutex_t g_dl_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||||
|
|
||||||
static const char* __bionic_set_dlerror(char* new_value) {
|
static const char* __bionic_set_dlerror(char* new_value) {
|
||||||
char** dlerror_slot = &reinterpret_cast<char**>(__get_tls())[TLS_SLOT_DLERROR];
|
char** dlerror_slot = &reinterpret_cast<char**>(__get_tls())[TLS_SLOT_DLERROR];
|
||||||
@ -56,18 +56,18 @@ const char* dlerror() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
|
void android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
|
||||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||||
do_android_get_LD_LIBRARY_PATH(buffer, buffer_size);
|
do_android_get_LD_LIBRARY_PATH(buffer, buffer_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
|
void android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
|
||||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||||
do_android_update_LD_LIBRARY_PATH(ld_library_path);
|
do_android_update_LD_LIBRARY_PATH(ld_library_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* android_dlopen_ext(const char* filename, int flags, const android_dlextinfo* extinfo)
|
void* android_dlopen_ext(const char* filename, int flags, const android_dlextinfo* extinfo)
|
||||||
{
|
{
|
||||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||||
soinfo* result = do_dlopen(filename, flags, extinfo);
|
soinfo* result = do_dlopen(filename, flags, extinfo);
|
||||||
if (result == NULL) {
|
if (result == NULL) {
|
||||||
__bionic_format_dlerror("dlopen failed", linker_get_error_buffer());
|
__bionic_format_dlerror("dlopen failed", linker_get_error_buffer());
|
||||||
@ -81,7 +81,7 @@ void* dlopen(const char* filename, int flags) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void* dlsym(void* handle, const char* symbol) {
|
void* dlsym(void* handle, const char* symbol) {
|
||||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||||
|
|
||||||
if (handle == NULL) {
|
if (handle == NULL) {
|
||||||
__bionic_format_dlerror("dlsym library handle is null", NULL);
|
__bionic_format_dlerror("dlsym library handle is null", NULL);
|
||||||
@ -125,7 +125,7 @@ void* dlsym(void* handle, const char* symbol) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int dladdr(const void* addr, Dl_info* info) {
|
int dladdr(const void* addr, Dl_info* info) {
|
||||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||||
|
|
||||||
// Determine if this address can be found in any library currently mapped.
|
// Determine if this address can be found in any library currently mapped.
|
||||||
soinfo* si = find_containing_library(addr);
|
soinfo* si = find_containing_library(addr);
|
||||||
@ -150,7 +150,7 @@ int dladdr(const void* addr, Dl_info* info) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int dlclose(void* handle) {
|
int dlclose(void* handle) {
|
||||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||||
return do_dlclose(reinterpret_cast<soinfo*>(handle));
|
return do_dlclose(reinterpret_cast<soinfo*>(handle));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,7 +187,7 @@ int dlclose(void* handle) {
|
|||||||
# error Unsupported architecture. Only arm, arm64, mips, mips64, x86 and x86_64 are presently supported.
|
# error Unsupported architecture. Only arm, arm64, mips, mips64, x86 and x86_64 are presently supported.
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static ElfW(Sym) gLibDlSymtab[] = {
|
static ElfW(Sym) g_libdl_symtab[] = {
|
||||||
// Total length of libdl_info.strtab, including trailing 0.
|
// Total length of libdl_info.strtab, including trailing 0.
|
||||||
// This is actually the STH_UNDEF entry. Technically, it's
|
// This is actually the STH_UNDEF entry. Technically, it's
|
||||||
// supposed to have st_name == 0, but instead, it points to an index
|
// supposed to have st_name == 0, but instead, it points to an index
|
||||||
@ -209,20 +209,20 @@ static ElfW(Sym) gLibDlSymtab[] = {
|
|||||||
|
|
||||||
// Fake out a hash table with a single bucket.
|
// Fake out a hash table with a single bucket.
|
||||||
//
|
//
|
||||||
// A search of the hash table will look through gLibDlSymtab starting with index 1, then
|
// A search of the hash table will look through g_libdl_symtab starting with index 1, then
|
||||||
// use gLibDlChains to find the next index to look at. gLibDlChains should be set up to
|
// use g_libdl_chains to find the next index to look at. g_libdl_chains should be set up to
|
||||||
// walk through every element in gLibDlSymtab, and then end with 0 (sentinel value).
|
// walk through every element in g_libdl_symtab, and then end with 0 (sentinel value).
|
||||||
//
|
//
|
||||||
// That is, gLibDlChains should look like { 0, 2, 3, ... N, 0 } where N is the number
|
// That is, g_libdl_chains should look like { 0, 2, 3, ... N, 0 } where N is the number
|
||||||
// of actual symbols, or nelems(gLibDlSymtab)-1 (since the first element of gLibDlSymtab is not
|
// of actual symbols, or nelems(g_libdl_symtab)-1 (since the first element of g_libdl_symtab is not
|
||||||
// a real symbol). (See soinfo_elf_lookup().)
|
// a real symbol). (See soinfo_elf_lookup().)
|
||||||
//
|
//
|
||||||
// Note that adding any new symbols here requires stubbing them out in libdl.
|
// Note that adding any new symbols here requires stubbing them out in libdl.
|
||||||
static unsigned gLibDlBuckets[1] = { 1 };
|
static unsigned g_libdl_buckets[1] = { 1 };
|
||||||
#if defined(__arm__)
|
#if defined(__arm__)
|
||||||
static unsigned gLibDlChains[] = { 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0 };
|
static unsigned g_libdl_chains[] = { 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0 };
|
||||||
#else
|
#else
|
||||||
static unsigned gLibDlChains[] = { 0, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
|
static unsigned g_libdl_chains[] = { 0, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// This is used by the dynamic linker. Every process gets these symbols for free.
|
// This is used by the dynamic linker. Every process gets these symbols for free.
|
||||||
@ -250,12 +250,12 @@ soinfo libdl_info = {
|
|||||||
.flags = FLAG_LINKED,
|
.flags = FLAG_LINKED,
|
||||||
|
|
||||||
.strtab = ANDROID_LIBDL_STRTAB,
|
.strtab = ANDROID_LIBDL_STRTAB,
|
||||||
.symtab = gLibDlSymtab,
|
.symtab = g_libdl_symtab,
|
||||||
|
|
||||||
.nbucket = sizeof(gLibDlBuckets)/sizeof(unsigned),
|
.nbucket = sizeof(g_libdl_buckets)/sizeof(unsigned),
|
||||||
.nchain = sizeof(gLibDlChains)/sizeof(unsigned),
|
.nchain = sizeof(g_libdl_chains)/sizeof(unsigned),
|
||||||
.bucket = gLibDlBuckets,
|
.bucket = g_libdl_buckets,
|
||||||
.chain = gLibDlChains,
|
.chain = g_libdl_chains,
|
||||||
|
|
||||||
#if defined(USE_RELA)
|
#if defined(USE_RELA)
|
||||||
.plt_rela = 0,
|
.plt_rela = 0,
|
||||||
|
@ -71,13 +71,13 @@ static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
|
|||||||
// We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous
|
// We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous
|
||||||
// maps, each a single page in size. The pages are broken up into as many struct soinfo
|
// maps, each a single page in size. The pages are broken up into as many struct soinfo
|
||||||
// objects as will fit.
|
// objects as will fit.
|
||||||
static LinkerAllocator<soinfo> gSoInfoAllocator;
|
static LinkerAllocator<soinfo> g_soinfo_allocator;
|
||||||
|
|
||||||
static soinfo* solist = &libdl_info;
|
static soinfo* solist = &libdl_info;
|
||||||
static soinfo* sonext = &libdl_info;
|
static soinfo* sonext = &libdl_info;
|
||||||
static soinfo* somain; /* main process, always the one after libdl_info */
|
static soinfo* somain; /* main process, always the one after libdl_info */
|
||||||
|
|
||||||
static const char* const gDefaultLdPaths[] = {
|
static const char* const kDefaultLdPaths[] = {
|
||||||
#if defined(__LP64__)
|
#if defined(__LP64__)
|
||||||
"/vendor/lib64",
|
"/vendor/lib64",
|
||||||
"/system/lib64",
|
"/system/lib64",
|
||||||
@ -94,17 +94,17 @@ static const char* const gDefaultLdPaths[] = {
|
|||||||
#define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
|
#define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
|
||||||
#define LDPRELOAD_MAX 8
|
#define LDPRELOAD_MAX 8
|
||||||
|
|
||||||
static char gLdPathsBuffer[LDPATH_BUFSIZE];
|
static char g_ld_library_paths_buffer[LDPATH_BUFSIZE];
|
||||||
static const char* gLdPaths[LDPATH_MAX + 1];
|
static const char* g_ld_library_paths[LDPATH_MAX + 1];
|
||||||
|
|
||||||
static char gLdPreloadsBuffer[LDPRELOAD_BUFSIZE];
|
static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE];
|
||||||
static const char* gLdPreloadNames[LDPRELOAD_MAX + 1];
|
static const char* g_ld_preload_names[LDPRELOAD_MAX + 1];
|
||||||
|
|
||||||
static soinfo* gLdPreloads[LDPRELOAD_MAX + 1];
|
static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1];
|
||||||
|
|
||||||
__LIBC_HIDDEN__ int gLdDebugVerbosity;
|
__LIBC_HIDDEN__ int g_ld_debug_verbosity;
|
||||||
|
|
||||||
__LIBC_HIDDEN__ abort_msg_t* gAbortMessage = NULL; // For debuggerd.
|
__LIBC_HIDDEN__ abort_msg_t* g_abort_message = NULL; // For debuggerd.
|
||||||
|
|
||||||
enum RelocationKind {
|
enum RelocationKind {
|
||||||
kRelocAbsolute = 0,
|
kRelocAbsolute = 0,
|
||||||
@ -179,11 +179,10 @@ size_t linker_get_error_buffer_size() {
|
|||||||
*/
|
*/
|
||||||
extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
|
extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
|
||||||
|
|
||||||
|
static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||||
static r_debug _r_debug = {1, NULL, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
|
static r_debug _r_debug = {1, NULL, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
|
||||||
static link_map* r_debug_tail = 0;
|
static link_map* r_debug_tail = 0;
|
||||||
|
|
||||||
static pthread_mutex_t gDebugMutex = PTHREAD_MUTEX_INITIALIZER;
|
|
||||||
|
|
||||||
static void insert_soinfo_into_debug_map(soinfo* info) {
|
static void insert_soinfo_into_debug_map(soinfo* info) {
|
||||||
// Copy the necessary fields into the debug structure.
|
// Copy the necessary fields into the debug structure.
|
||||||
link_map* map = &(info->link_map_head);
|
link_map* map = &(info->link_map_head);
|
||||||
@ -229,7 +228,7 @@ static void notify_gdb_of_load(soinfo* info) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ScopedPthreadMutexLocker locker(&gDebugMutex);
|
ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
|
||||||
|
|
||||||
_r_debug.r_state = r_debug::RT_ADD;
|
_r_debug.r_state = r_debug::RT_ADD;
|
||||||
rtld_db_dlactivity();
|
rtld_db_dlactivity();
|
||||||
@ -246,7 +245,7 @@ static void notify_gdb_of_unload(soinfo* info) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ScopedPthreadMutexLocker locker(&gDebugMutex);
|
ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
|
||||||
|
|
||||||
_r_debug.r_state = r_debug::RT_DELETE;
|
_r_debug.r_state = r_debug::RT_DELETE;
|
||||||
rtld_db_dlactivity();
|
rtld_db_dlactivity();
|
||||||
@ -270,7 +269,7 @@ static soinfo* soinfo_alloc(const char* name) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
soinfo* si = gSoInfoAllocator.alloc();
|
soinfo* si = g_soinfo_allocator.alloc();
|
||||||
|
|
||||||
// Initialize the new element.
|
// Initialize the new element.
|
||||||
memset(si, 0, sizeof(soinfo));
|
memset(si, 0, sizeof(soinfo));
|
||||||
@ -310,7 +309,7 @@ static void soinfo_free(soinfo* si) {
|
|||||||
sonext = prev;
|
sonext = prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
gSoInfoAllocator.free(si);
|
g_soinfo_allocator.free(si);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -340,14 +339,14 @@ static void parse_path(const char* path, const char* delimiters,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void parse_LD_LIBRARY_PATH(const char* path) {
|
static void parse_LD_LIBRARY_PATH(const char* path) {
|
||||||
parse_path(path, ":", gLdPaths,
|
parse_path(path, ":", g_ld_library_paths,
|
||||||
gLdPathsBuffer, sizeof(gLdPathsBuffer), LDPATH_MAX);
|
g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void parse_LD_PRELOAD(const char* path) {
|
static void parse_LD_PRELOAD(const char* path) {
|
||||||
// We have historically supported ':' as well as ' ' in LD_PRELOAD.
|
// We have historically supported ':' as well as ' ' in LD_PRELOAD.
|
||||||
parse_path(path, " :", gLdPreloadNames,
|
parse_path(path, " :", g_ld_preload_names,
|
||||||
gLdPreloadsBuffer, sizeof(gLdPreloadsBuffer), LDPRELOAD_MAX);
|
g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(__arm__)
|
#if defined(__arm__)
|
||||||
@ -505,10 +504,10 @@ static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Next, look for it in the preloads list */
|
/* Next, look for it in the preloads list */
|
||||||
for (int i = 0; gLdPreloads[i] != NULL; i++) {
|
for (int i = 0; g_ld_preloads[i] != NULL; i++) {
|
||||||
s = soinfo_elf_lookup(gLdPreloads[i], elf_hash, name);
|
s = soinfo_elf_lookup(g_ld_preloads[i], elf_hash, name);
|
||||||
if (s != NULL) {
|
if (s != NULL) {
|
||||||
*lsi = gLdPreloads[i];
|
*lsi = g_ld_preloads[i];
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -637,9 +636,9 @@ static int open_library(const char* name) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
|
// Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
|
||||||
int fd = open_library_on_path(name, gLdPaths);
|
int fd = open_library_on_path(name, g_ld_library_paths);
|
||||||
if (fd == -1) {
|
if (fd == -1) {
|
||||||
fd = open_library_on_path(name, gDefaultLdPaths);
|
fd = open_library_on_path(name, kDefaultLdPaths);
|
||||||
}
|
}
|
||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
@ -756,7 +755,7 @@ static int soinfo_unload(soinfo* si) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
|
void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
|
||||||
snprintf(buffer, buffer_size, "%s:%s", gDefaultLdPaths[0], gDefaultLdPaths[1]);
|
snprintf(buffer, buffer_size, "%s:%s", kDefaultLdPaths[0], kDefaultLdPaths[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
|
void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
|
||||||
@ -774,19 +773,19 @@ soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo)
|
|||||||
DL_ERR("invalid extended flags to android_dlopen_ext: %x", extinfo->flags);
|
DL_ERR("invalid extended flags to android_dlopen_ext: %x", extinfo->flags);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
g_soinfo_allocator.protect_all(PROT_READ | PROT_WRITE);
|
||||||
soinfo* si = find_library(name, extinfo);
|
soinfo* si = find_library(name, extinfo);
|
||||||
if (si != NULL) {
|
if (si != NULL) {
|
||||||
si->CallConstructors();
|
si->CallConstructors();
|
||||||
}
|
}
|
||||||
gSoInfoAllocator.protect_all(PROT_READ);
|
g_soinfo_allocator.protect_all(PROT_READ);
|
||||||
return si;
|
return si;
|
||||||
}
|
}
|
||||||
|
|
||||||
int do_dlclose(soinfo* si) {
|
int do_dlclose(soinfo* si) {
|
||||||
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
g_soinfo_allocator.protect_all(PROT_READ | PROT_WRITE);
|
||||||
int result = soinfo_unload(si);
|
int result = soinfo_unload(si);
|
||||||
gSoInfoAllocator.protect_all(PROT_READ);
|
g_soinfo_allocator.protect_all(PROT_READ);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1334,7 +1333,7 @@ void soinfo::CallFunction(const char* function_name __unused, linker_function_t
|
|||||||
|
|
||||||
// The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
|
// The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
|
||||||
// are still writable. This happens with our debug malloc (see http://b/7941716).
|
// are still writable. This happens with our debug malloc (see http://b/7941716).
|
||||||
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
g_soinfo_allocator.protect_all(PROT_READ | PROT_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void soinfo::CallPreInitConstructors() {
|
void soinfo::CallPreInitConstructors() {
|
||||||
@ -1688,16 +1687,16 @@ static bool soinfo_link_image(soinfo* si, const android_dlextinfo* extinfo) {
|
|||||||
|
|
||||||
// If this is the main executable, then load all of the libraries from LD_PRELOAD now.
|
// If this is the main executable, then load all of the libraries from LD_PRELOAD now.
|
||||||
if (si->flags & FLAG_EXE) {
|
if (si->flags & FLAG_EXE) {
|
||||||
memset(gLdPreloads, 0, sizeof(gLdPreloads));
|
memset(g_ld_preloads, 0, sizeof(g_ld_preloads));
|
||||||
size_t preload_count = 0;
|
size_t preload_count = 0;
|
||||||
for (size_t i = 0; gLdPreloadNames[i] != NULL; i++) {
|
for (size_t i = 0; g_ld_preload_names[i] != NULL; i++) {
|
||||||
soinfo* lsi = find_library(gLdPreloadNames[i], NULL);
|
soinfo* lsi = find_library(g_ld_preload_names[i], NULL);
|
||||||
if (lsi != NULL) {
|
if (lsi != NULL) {
|
||||||
gLdPreloads[preload_count++] = lsi;
|
g_ld_preloads[preload_count++] = lsi;
|
||||||
} else {
|
} else {
|
||||||
// As with glibc, failure to load an LD_PRELOAD library is just a warning.
|
// As with glibc, failure to load an LD_PRELOAD library is just a warning.
|
||||||
DL_WARN("could not load library \"%s\" from LD_PRELOAD for \"%s\"; caused by %s",
|
DL_WARN("could not load library \"%s\" from LD_PRELOAD for \"%s\"; caused by %s",
|
||||||
gLdPreloadNames[i], si->name, linker_get_error_buffer());
|
g_ld_preload_names[i], si->name, linker_get_error_buffer());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1873,7 +1872,7 @@ static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(
|
|||||||
// Get a few environment variables.
|
// Get a few environment variables.
|
||||||
const char* LD_DEBUG = linker_env_get("LD_DEBUG");
|
const char* LD_DEBUG = linker_env_get("LD_DEBUG");
|
||||||
if (LD_DEBUG != NULL) {
|
if (LD_DEBUG != NULL) {
|
||||||
gLdDebugVerbosity = atoi(LD_DEBUG);
|
g_ld_debug_verbosity = atoi(LD_DEBUG);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normally, these are cleaned by linker_env_init, but the test
|
// Normally, these are cleaned by linker_env_init, but the test
|
||||||
@ -1888,7 +1887,7 @@ static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(
|
|||||||
// Linker does not call constructors for its own
|
// Linker does not call constructors for its own
|
||||||
// global variables so we need to initialize
|
// global variables so we need to initialize
|
||||||
// the allocator explicitly.
|
// the allocator explicitly.
|
||||||
gSoInfoAllocator.init();
|
g_soinfo_allocator.init();
|
||||||
|
|
||||||
INFO("[ android linker & debugger ]");
|
INFO("[ android linker & debugger ]");
|
||||||
|
|
||||||
@ -1982,8 +1981,8 @@ static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(
|
|||||||
|
|
||||||
si->CallPreInitConstructors();
|
si->CallPreInitConstructors();
|
||||||
|
|
||||||
for (size_t i = 0; gLdPreloads[i] != NULL; ++i) {
|
for (size_t i = 0; g_ld_preloads[i] != NULL; ++i) {
|
||||||
gLdPreloads[i]->CallConstructors();
|
g_ld_preloads[i]->CallConstructors();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* After the link_image, the si->load_bias is initialized.
|
/* After the link_image, the si->load_bias is initialized.
|
||||||
@ -2104,10 +2103,10 @@ extern "C" ElfW(Addr) __linker_init(void* raw_args) {
|
|||||||
|
|
||||||
// We have successfully fixed our own relocations. It's safe to run
|
// We have successfully fixed our own relocations. It's safe to run
|
||||||
// the main part of the linker now.
|
// the main part of the linker now.
|
||||||
args.abort_message_ptr = &gAbortMessage;
|
args.abort_message_ptr = &g_abort_message;
|
||||||
ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
|
ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
|
||||||
|
|
||||||
gSoInfoAllocator.protect_all(PROT_READ);
|
g_soinfo_allocator.protect_all(PROT_READ);
|
||||||
|
|
||||||
// Return the address that the calling assembly stub should jump to.
|
// Return the address that the calling assembly stub should jump to.
|
||||||
return start_address;
|
return start_address;
|
||||||
|
@ -203,7 +203,7 @@ ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr);
|
|||||||
ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name);
|
ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name);
|
||||||
|
|
||||||
void debuggerd_init();
|
void debuggerd_init();
|
||||||
extern "C" abort_msg_t* gAbortMessage;
|
extern "C" abort_msg_t* g_abort_message;
|
||||||
extern "C" void notify_gdb_of_libraries();
|
extern "C" void notify_gdb_of_libraries();
|
||||||
|
|
||||||
char* linker_get_error_buffer();
|
char* linker_get_error_buffer();
|
||||||
|
@ -55,17 +55,17 @@
|
|||||||
|
|
||||||
#include "private/libc_logging.h"
|
#include "private/libc_logging.h"
|
||||||
|
|
||||||
__LIBC_HIDDEN__ extern int gLdDebugVerbosity;
|
__LIBC_HIDDEN__ extern int g_ld_debug_verbosity;
|
||||||
|
|
||||||
#if LINKER_DEBUG_TO_LOG
|
#if LINKER_DEBUG_TO_LOG
|
||||||
#define _PRINTVF(v, x...) \
|
#define _PRINTVF(v, x...) \
|
||||||
do { \
|
do { \
|
||||||
if (gLdDebugVerbosity > (v)) __libc_format_log(5-(v), "linker", x); \
|
if (g_ld_debug_verbosity > (v)) __libc_format_log(5-(v), "linker", x); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#else /* !LINKER_DEBUG_TO_LOG */
|
#else /* !LINKER_DEBUG_TO_LOG */
|
||||||
#define _PRINTVF(v, x...) \
|
#define _PRINTVF(v, x...) \
|
||||||
do { \
|
do { \
|
||||||
if (gLdDebugVerbosity > (v)) { __libc_format_fd(1, x); write(1, "\n", 1); } \
|
if (g_ld_debug_verbosity > (v)) { __libc_format_fd(1, x); write(1, "\n", 1); } \
|
||||||
} while (0)
|
} while (0)
|
||||||
#endif /* !LINKER_DEBUG_TO_LOG */
|
#endif /* !LINKER_DEBUG_TO_LOG */
|
||||||
|
|
||||||
|
@ -27,9 +27,9 @@
|
|||||||
#define ASSERT_SUBSTR(needle, haystack) \
|
#define ASSERT_SUBSTR(needle, haystack) \
|
||||||
ASSERT_PRED_FORMAT2(::testing::IsSubstring, needle, haystack)
|
ASSERT_PRED_FORMAT2(::testing::IsSubstring, needle, haystack)
|
||||||
|
|
||||||
static bool gCalled = false;
|
static bool g_called = false;
|
||||||
extern "C" void DlSymTestFunction() {
|
extern "C" void DlSymTestFunction() {
|
||||||
gCalled = true;
|
g_called = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(dlfcn, dlsym_in_self) {
|
TEST(dlfcn, dlsym_in_self) {
|
||||||
@ -43,9 +43,9 @@ TEST(dlfcn, dlsym_in_self) {
|
|||||||
|
|
||||||
void (*function)() = reinterpret_cast<void(*)()>(sym);
|
void (*function)() = reinterpret_cast<void(*)()>(sym);
|
||||||
|
|
||||||
gCalled = false;
|
g_called = false;
|
||||||
function();
|
function();
|
||||||
ASSERT_TRUE(gCalled);
|
ASSERT_TRUE(g_called);
|
||||||
|
|
||||||
ASSERT_EQ(0, dlclose(self));
|
ASSERT_EQ(0, dlclose(self));
|
||||||
}
|
}
|
||||||
|
@ -560,27 +560,27 @@ TEST(pthread, pthread_rwlock_smoke) {
|
|||||||
ASSERT_EQ(0, pthread_rwlock_destroy(&l));
|
ASSERT_EQ(0, pthread_rwlock_destroy(&l));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gOnceFnCallCount = 0;
|
static int g_once_fn_call_count = 0;
|
||||||
static void OnceFn() {
|
static void OnceFn() {
|
||||||
++gOnceFnCallCount;
|
++g_once_fn_call_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(pthread, pthread_once_smoke) {
|
TEST(pthread, pthread_once_smoke) {
|
||||||
pthread_once_t once_control = PTHREAD_ONCE_INIT;
|
pthread_once_t once_control = PTHREAD_ONCE_INIT;
|
||||||
ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
|
ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
|
||||||
ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
|
ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
|
||||||
ASSERT_EQ(1, gOnceFnCallCount);
|
ASSERT_EQ(1, g_once_fn_call_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gAtForkPrepareCalls = 0;
|
static int g_atfork_prepare_calls = 0;
|
||||||
static void AtForkPrepare1() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 1; }
|
static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; }
|
||||||
static void AtForkPrepare2() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 2; }
|
static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; }
|
||||||
static int gAtForkParentCalls = 0;
|
static int g_atfork_parent_calls = 0;
|
||||||
static void AtForkParent1() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 1; }
|
static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; }
|
||||||
static void AtForkParent2() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 2; }
|
static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; }
|
||||||
static int gAtForkChildCalls = 0;
|
static int g_atfork_child_calls = 0;
|
||||||
static void AtForkChild1() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 1; }
|
static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; }
|
||||||
static void AtForkChild2() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 2; }
|
static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; }
|
||||||
|
|
||||||
TEST(pthread, pthread_atfork) {
|
TEST(pthread, pthread_atfork) {
|
||||||
ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
|
ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
|
||||||
@ -591,13 +591,13 @@ TEST(pthread, pthread_atfork) {
|
|||||||
|
|
||||||
// Child and parent calls are made in the order they were registered.
|
// Child and parent calls are made in the order they were registered.
|
||||||
if (pid == 0) {
|
if (pid == 0) {
|
||||||
ASSERT_EQ(0x12, gAtForkChildCalls);
|
ASSERT_EQ(0x12, g_atfork_child_calls);
|
||||||
_exit(0);
|
_exit(0);
|
||||||
}
|
}
|
||||||
ASSERT_EQ(0x12, gAtForkParentCalls);
|
ASSERT_EQ(0x12, g_atfork_parent_calls);
|
||||||
|
|
||||||
// Prepare calls are made in the reverse order.
|
// Prepare calls are made in the reverse order.
|
||||||
ASSERT_EQ(0x21, gAtForkPrepareCalls);
|
ASSERT_EQ(0x21, g_atfork_prepare_calls);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(pthread, pthread_attr_getscope) {
|
TEST(pthread, pthread_attr_getscope) {
|
||||||
|
@ -146,10 +146,10 @@ TEST(signal, sigwait) {
|
|||||||
ASSERT_EQ(SIGALRM, received_signal);
|
ASSERT_EQ(SIGALRM, received_signal);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gSigSuspendTestHelperCallCount = 0;
|
static int g_sigsuspend_test_helper_call_count = 0;
|
||||||
|
|
||||||
static void SigSuspendTestHelper(int) {
|
static void SigSuspendTestHelper(int) {
|
||||||
++gSigSuspendTestHelperCallCount;
|
++g_sigsuspend_test_helper_call_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(signal, sigsuspend_sigpending) {
|
TEST(signal, sigsuspend_sigpending) {
|
||||||
@ -172,7 +172,7 @@ TEST(signal, sigsuspend_sigpending) {
|
|||||||
|
|
||||||
// Raise SIGALRM and check our signal handler wasn't called.
|
// Raise SIGALRM and check our signal handler wasn't called.
|
||||||
raise(SIGALRM);
|
raise(SIGALRM);
|
||||||
ASSERT_EQ(0, gSigSuspendTestHelperCallCount);
|
ASSERT_EQ(0, g_sigsuspend_test_helper_call_count);
|
||||||
|
|
||||||
// We should now have a pending SIGALRM but nothing else.
|
// We should now have a pending SIGALRM but nothing else.
|
||||||
sigemptyset(&pending);
|
sigemptyset(&pending);
|
||||||
@ -188,7 +188,7 @@ TEST(signal, sigsuspend_sigpending) {
|
|||||||
ASSERT_EQ(-1, sigsuspend(¬_SIGALRM));
|
ASSERT_EQ(-1, sigsuspend(¬_SIGALRM));
|
||||||
ASSERT_EQ(EINTR, errno);
|
ASSERT_EQ(EINTR, errno);
|
||||||
// ...and check that we now receive our pending SIGALRM.
|
// ...and check that we now receive our pending SIGALRM.
|
||||||
ASSERT_EQ(1, gSigSuspendTestHelperCallCount);
|
ASSERT_EQ(1, g_sigsuspend_test_helper_call_count);
|
||||||
|
|
||||||
// Restore the original set.
|
// Restore the original set.
|
||||||
ASSERT_EQ(0, sigprocmask(SIG_SETMASK, &original_set, NULL));
|
ASSERT_EQ(0, sigprocmask(SIG_SETMASK, &original_set, NULL));
|
||||||
|
@ -114,18 +114,18 @@ TEST(unistd, ftruncate64) {
|
|||||||
ASSERT_EQ(123, sb.st_size);
|
ASSERT_EQ(123, sb.st_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool gPauseTestFlag = false;
|
static bool g_pause_test_flag = false;
|
||||||
static void PauseTestSignalHandler(int) {
|
static void PauseTestSignalHandler(int) {
|
||||||
gPauseTestFlag = true;
|
g_pause_test_flag = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(unistd, pause) {
|
TEST(unistd, pause) {
|
||||||
ScopedSignalHandler handler(SIGALRM, PauseTestSignalHandler);
|
ScopedSignalHandler handler(SIGALRM, PauseTestSignalHandler);
|
||||||
|
|
||||||
alarm(1);
|
alarm(1);
|
||||||
ASSERT_FALSE(gPauseTestFlag);
|
ASSERT_FALSE(g_pause_test_flag);
|
||||||
ASSERT_EQ(-1, pause());
|
ASSERT_EQ(-1, pause());
|
||||||
ASSERT_TRUE(gPauseTestFlag);
|
ASSERT_TRUE(g_pause_test_flag);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(unistd, read) {
|
TEST(unistd, read) {
|
||||||
|
Loading…
Reference in New Issue
Block a user