* commit '6eb0fe2b02bcc7d82ba23df6cfaef0369e7b068b': Switch to g_ for globals.
This commit is contained in:
commit
b2da973fe9
@ -25,13 +25,13 @@
|
||||
|
||||
#include <inttypes.h>
|
||||
|
||||
static int64_t gBytesProcessed;
|
||||
static int64_t gBenchmarkTotalTimeNs;
|
||||
static int64_t gBenchmarkStartTimeNs;
|
||||
static int64_t g_bytes_processed;
|
||||
static int64_t g_benchmark_total_time_ns;
|
||||
static int64_t g_benchmark_start_time_ns;
|
||||
|
||||
typedef std::map<std::string, ::testing::Benchmark*> BenchmarkMap;
|
||||
typedef BenchmarkMap::iterator BenchmarkMapIt;
|
||||
static BenchmarkMap gBenchmarks;
|
||||
static BenchmarkMap g_benchmarks;
|
||||
|
||||
static int Round(int n) {
|
||||
int base = 1;
|
||||
@ -96,7 +96,7 @@ void Benchmark::Register(const char* name, void (*fn)(int), void (*fn_range)(int
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
gBenchmarks.insert(std::make_pair(name, this));
|
||||
g_benchmarks.insert(std::make_pair(name, this));
|
||||
}
|
||||
|
||||
void Benchmark::Run() {
|
||||
@ -114,16 +114,16 @@ void Benchmark::Run() {
|
||||
}
|
||||
|
||||
void Benchmark::RunRepeatedlyWithArg(int iterations, int arg) {
|
||||
gBytesProcessed = 0;
|
||||
gBenchmarkTotalTimeNs = 0;
|
||||
gBenchmarkStartTimeNs = NanoTime();
|
||||
g_bytes_processed = 0;
|
||||
g_benchmark_total_time_ns = 0;
|
||||
g_benchmark_start_time_ns = NanoTime();
|
||||
if (fn_ != NULL) {
|
||||
fn_(iterations);
|
||||
} else {
|
||||
fn_range_(iterations, arg);
|
||||
}
|
||||
if (gBenchmarkStartTimeNs != 0) {
|
||||
gBenchmarkTotalTimeNs += NanoTime() - gBenchmarkStartTimeNs;
|
||||
if (g_benchmark_start_time_ns != 0) {
|
||||
g_benchmark_total_time_ns += NanoTime() - g_benchmark_start_time_ns;
|
||||
}
|
||||
}
|
||||
|
||||
@ -131,12 +131,12 @@ void Benchmark::RunWithArg(int arg) {
|
||||
// run once in case it's expensive
|
||||
int iterations = 1;
|
||||
RunRepeatedlyWithArg(iterations, arg);
|
||||
while (gBenchmarkTotalTimeNs < 1e9 && iterations < 1e9) {
|
||||
while (g_benchmark_total_time_ns < 1e9 && iterations < 1e9) {
|
||||
int last = iterations;
|
||||
if (gBenchmarkTotalTimeNs/iterations == 0) {
|
||||
if (g_benchmark_total_time_ns/iterations == 0) {
|
||||
iterations = 1e9;
|
||||
} else {
|
||||
iterations = 1e9 / (gBenchmarkTotalTimeNs/iterations);
|
||||
iterations = 1e9 / (g_benchmark_total_time_ns/iterations);
|
||||
}
|
||||
iterations = std::max(last + 1, std::min(iterations + iterations/2, 100*last));
|
||||
iterations = Round(iterations);
|
||||
@ -145,9 +145,9 @@ void Benchmark::RunWithArg(int arg) {
|
||||
|
||||
char throughput[100];
|
||||
throughput[0] = '\0';
|
||||
if (gBenchmarkTotalTimeNs > 0 && gBytesProcessed > 0) {
|
||||
double mib_processed = static_cast<double>(gBytesProcessed)/1e6;
|
||||
double seconds = static_cast<double>(gBenchmarkTotalTimeNs)/1e9;
|
||||
if (g_benchmark_total_time_ns > 0 && g_bytes_processed > 0) {
|
||||
double mib_processed = static_cast<double>(g_bytes_processed)/1e6;
|
||||
double seconds = static_cast<double>(g_benchmark_total_time_ns)/1e9;
|
||||
snprintf(throughput, sizeof(throughput), " %8.2f MiB/s", mib_processed/seconds);
|
||||
}
|
||||
|
||||
@ -165,37 +165,37 @@ void Benchmark::RunWithArg(int arg) {
|
||||
}
|
||||
|
||||
printf("%-20s %10d %10" PRId64 "%s\n", full_name,
|
||||
iterations, gBenchmarkTotalTimeNs/iterations, throughput);
|
||||
iterations, g_benchmark_total_time_ns/iterations, throughput);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
} // namespace testing
|
||||
|
||||
void SetBenchmarkBytesProcessed(int64_t x) {
|
||||
gBytesProcessed = x;
|
||||
g_bytes_processed = x;
|
||||
}
|
||||
|
||||
void StopBenchmarkTiming() {
|
||||
if (gBenchmarkStartTimeNs != 0) {
|
||||
gBenchmarkTotalTimeNs += NanoTime() - gBenchmarkStartTimeNs;
|
||||
if (g_benchmark_start_time_ns != 0) {
|
||||
g_benchmark_total_time_ns += NanoTime() - g_benchmark_start_time_ns;
|
||||
}
|
||||
gBenchmarkStartTimeNs = 0;
|
||||
g_benchmark_start_time_ns = 0;
|
||||
}
|
||||
|
||||
void StartBenchmarkTiming() {
|
||||
if (gBenchmarkStartTimeNs == 0) {
|
||||
gBenchmarkStartTimeNs = NanoTime();
|
||||
if (g_benchmark_start_time_ns == 0) {
|
||||
g_benchmark_start_time_ns = NanoTime();
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
if (gBenchmarks.empty()) {
|
||||
if (g_benchmarks.empty()) {
|
||||
fprintf(stderr, "No benchmarks registered!\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
bool need_header = true;
|
||||
for (BenchmarkMapIt it = gBenchmarks.begin(); it != gBenchmarks.end(); ++it) {
|
||||
for (BenchmarkMapIt it = g_benchmarks.begin(); it != g_benchmarks.end(); ++it) {
|
||||
::testing::Benchmark* b = it->second;
|
||||
if (b->ShouldRun(argc, argv)) {
|
||||
if (need_header) {
|
||||
@ -210,7 +210,7 @@ int main(int argc, char* argv[]) {
|
||||
if (need_header) {
|
||||
fprintf(stderr, "No matching benchmarks!\n");
|
||||
fprintf(stderr, "Available benchmarks:\n");
|
||||
for (BenchmarkMapIt it = gBenchmarks.begin(); it != gBenchmarks.end(); ++it) {
|
||||
for (BenchmarkMapIt it = g_benchmarks.begin(); it != g_benchmarks.end(); ++it) {
|
||||
fprintf(stderr, " %s\n", it->second->Name());
|
||||
}
|
||||
exit(EXIT_FAILURE);
|
||||
|
@ -50,30 +50,30 @@ typedef struct _Unwind_Context __unwind_context;
|
||||
typedef _Unwind_Context __unwind_context;
|
||||
#endif
|
||||
|
||||
static mapinfo_t* gMapInfo = NULL;
|
||||
static void* gDemangler;
|
||||
static mapinfo_t* g_map_info = NULL;
|
||||
static void* g_demangler;
|
||||
typedef char* (*DemanglerFn)(const char*, char*, size_t*, int*);
|
||||
static DemanglerFn gDemanglerFn = NULL;
|
||||
static DemanglerFn g_demangler_fn = NULL;
|
||||
|
||||
__LIBC_HIDDEN__ void backtrace_startup() {
|
||||
gMapInfo = mapinfo_create(getpid());
|
||||
gDemangler = dlopen("libgccdemangle.so", RTLD_NOW);
|
||||
if (gDemangler != NULL) {
|
||||
void* sym = dlsym(gDemangler, "__cxa_demangle");
|
||||
gDemanglerFn = reinterpret_cast<DemanglerFn>(sym);
|
||||
g_map_info = mapinfo_create(getpid());
|
||||
g_demangler = dlopen("libgccdemangle.so", RTLD_NOW);
|
||||
if (g_demangler != NULL) {
|
||||
void* sym = dlsym(g_demangler, "__cxa_demangle");
|
||||
g_demangler_fn = reinterpret_cast<DemanglerFn>(sym);
|
||||
}
|
||||
}
|
||||
|
||||
__LIBC_HIDDEN__ void backtrace_shutdown() {
|
||||
mapinfo_destroy(gMapInfo);
|
||||
dlclose(gDemangler);
|
||||
mapinfo_destroy(g_map_info);
|
||||
dlclose(g_demangler);
|
||||
}
|
||||
|
||||
static char* demangle(const char* symbol) {
|
||||
if (gDemanglerFn == NULL) {
|
||||
if (g_demangler_fn == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return (*gDemanglerFn)(symbol, NULL, NULL, NULL);
|
||||
return (*g_demangler_fn)(symbol, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
struct stack_crawl_state_t {
|
||||
@ -147,7 +147,7 @@ __LIBC_HIDDEN__ void log_backtrace(uintptr_t* frames, size_t frame_count) {
|
||||
}
|
||||
|
||||
uintptr_t rel_pc;
|
||||
const mapinfo_t* mi = (gMapInfo != NULL) ? mapinfo_find(gMapInfo, frames[i], &rel_pc) : NULL;
|
||||
const mapinfo_t* mi = (g_map_info != NULL) ? mapinfo_find(g_map_info, frames[i], &rel_pc) : NULL;
|
||||
const char* soname = (mi != NULL) ? mi->name : info.dli_fname;
|
||||
if (soname == NULL) {
|
||||
soname = "<unknown>";
|
||||
|
@ -77,7 +77,7 @@ static size_t get_main_thread_stack_size() {
|
||||
* apply to linker-private copies and will not be visible from libc later on.
|
||||
*
|
||||
* Note: this function creates a pthread_internal_t for the initial thread and
|
||||
* stores the pointer in TLS, but does not add it to pthread's gThreadList. This
|
||||
* stores the pointer in TLS, but does not add it to pthread's thread list. This
|
||||
* has to be done later from libc itself (see __libc_init_common).
|
||||
*
|
||||
* This function also stores a pointer to the kernel argument block in a TLS slot to be
|
||||
|
@ -45,7 +45,7 @@
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static pthread_mutex_t gAbortMsgLock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_mutex_t g_abort_msg_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
__LIBC_HIDDEN__ abort_msg_t** __abort_message_ptr; // Accessible to __libc_init_common.
|
||||
|
||||
@ -643,7 +643,7 @@ void __libc_fatal(const char* format, ...) {
|
||||
}
|
||||
|
||||
void __android_set_abort_message(const char* msg) {
|
||||
ScopedPthreadMutexLocker locker(&gAbortMsgLock);
|
||||
ScopedPthreadMutexLocker locker(&g_abort_msg_lock);
|
||||
|
||||
if (__abort_message_ptr == NULL) {
|
||||
// We must have crashed _very_ early.
|
||||
|
@ -36,43 +36,43 @@ struct __locale_t {
|
||||
// Because we only support one locale, these are just tokens with no data.
|
||||
};
|
||||
|
||||
static pthread_once_t gLocaleOnce = PTHREAD_ONCE_INIT;
|
||||
static lconv gLocale;
|
||||
static pthread_once_t g_locale_once = PTHREAD_ONCE_INIT;
|
||||
static lconv g_locale;
|
||||
|
||||
// We don't use pthread_once for this so that we know when the resource (a TLS slot) will be taken.
|
||||
static pthread_key_t gUselocaleKey;
|
||||
static pthread_key_t g_uselocale_key;
|
||||
__attribute__((constructor)) static void __bionic_tls_uselocale_key_init() {
|
||||
pthread_key_create(&gUselocaleKey, NULL);
|
||||
pthread_key_create(&g_uselocale_key, NULL);
|
||||
}
|
||||
|
||||
static void __locale_init() {
|
||||
gLocale.decimal_point = const_cast<char*>(".");
|
||||
g_locale.decimal_point = const_cast<char*>(".");
|
||||
|
||||
char* not_available = const_cast<char*>("");
|
||||
gLocale.thousands_sep = not_available;
|
||||
gLocale.grouping = not_available;
|
||||
gLocale.int_curr_symbol = not_available;
|
||||
gLocale.currency_symbol = not_available;
|
||||
gLocale.mon_decimal_point = not_available;
|
||||
gLocale.mon_thousands_sep = not_available;
|
||||
gLocale.mon_grouping = not_available;
|
||||
gLocale.positive_sign = not_available;
|
||||
gLocale.negative_sign = not_available;
|
||||
g_locale.thousands_sep = not_available;
|
||||
g_locale.grouping = not_available;
|
||||
g_locale.int_curr_symbol = not_available;
|
||||
g_locale.currency_symbol = not_available;
|
||||
g_locale.mon_decimal_point = not_available;
|
||||
g_locale.mon_thousands_sep = not_available;
|
||||
g_locale.mon_grouping = not_available;
|
||||
g_locale.positive_sign = not_available;
|
||||
g_locale.negative_sign = not_available;
|
||||
|
||||
gLocale.int_frac_digits = CHAR_MAX;
|
||||
gLocale.frac_digits = CHAR_MAX;
|
||||
gLocale.p_cs_precedes = CHAR_MAX;
|
||||
gLocale.p_sep_by_space = CHAR_MAX;
|
||||
gLocale.n_cs_precedes = CHAR_MAX;
|
||||
gLocale.n_sep_by_space = CHAR_MAX;
|
||||
gLocale.p_sign_posn = CHAR_MAX;
|
||||
gLocale.n_sign_posn = CHAR_MAX;
|
||||
gLocale.int_p_cs_precedes = CHAR_MAX;
|
||||
gLocale.int_p_sep_by_space = CHAR_MAX;
|
||||
gLocale.int_n_cs_precedes = CHAR_MAX;
|
||||
gLocale.int_n_sep_by_space = CHAR_MAX;
|
||||
gLocale.int_p_sign_posn = CHAR_MAX;
|
||||
gLocale.int_n_sign_posn = CHAR_MAX;
|
||||
g_locale.int_frac_digits = CHAR_MAX;
|
||||
g_locale.frac_digits = CHAR_MAX;
|
||||
g_locale.p_cs_precedes = CHAR_MAX;
|
||||
g_locale.p_sep_by_space = CHAR_MAX;
|
||||
g_locale.n_cs_precedes = CHAR_MAX;
|
||||
g_locale.n_sep_by_space = CHAR_MAX;
|
||||
g_locale.p_sign_posn = CHAR_MAX;
|
||||
g_locale.n_sign_posn = CHAR_MAX;
|
||||
g_locale.int_p_cs_precedes = CHAR_MAX;
|
||||
g_locale.int_p_sep_by_space = CHAR_MAX;
|
||||
g_locale.int_n_cs_precedes = CHAR_MAX;
|
||||
g_locale.int_n_sep_by_space = CHAR_MAX;
|
||||
g_locale.int_p_sign_posn = CHAR_MAX;
|
||||
g_locale.int_n_sign_posn = CHAR_MAX;
|
||||
}
|
||||
|
||||
static bool __bionic_current_locale_is_utf8 = false;
|
||||
@ -88,8 +88,8 @@ static locale_t __new_locale() {
|
||||
}
|
||||
|
||||
lconv* localeconv() {
|
||||
pthread_once(&gLocaleOnce, __locale_init);
|
||||
return &gLocale;
|
||||
pthread_once(&g_locale_once, __locale_init);
|
||||
return &g_locale;
|
||||
}
|
||||
|
||||
locale_t duplocale(locale_t l) {
|
||||
@ -140,7 +140,7 @@ char* setlocale(int category, const char* locale_name) {
|
||||
}
|
||||
|
||||
locale_t uselocale(locale_t new_locale) {
|
||||
locale_t old_locale = static_cast<locale_t>(pthread_getspecific(gUselocaleKey));
|
||||
locale_t old_locale = static_cast<locale_t>(pthread_getspecific(g_uselocale_key));
|
||||
|
||||
// If this is the first call to uselocale(3) on this thread, we return LC_GLOBAL_LOCALE.
|
||||
if (old_locale == NULL) {
|
||||
@ -148,7 +148,7 @@ locale_t uselocale(locale_t new_locale) {
|
||||
}
|
||||
|
||||
if (new_locale != NULL) {
|
||||
pthread_setspecific(gUselocaleKey, new_locale);
|
||||
pthread_setspecific(g_uselocale_key, new_locale);
|
||||
}
|
||||
|
||||
return old_locale;
|
||||
|
@ -53,8 +53,8 @@
|
||||
#include "private/ScopedPthreadMutexLocker.h"
|
||||
|
||||
/* libc.debug.malloc.backlog */
|
||||
extern unsigned int gMallocDebugBacklog;
|
||||
extern int gMallocDebugLevel;
|
||||
extern unsigned int g_malloc_debug_backlog;
|
||||
extern int g_malloc_debug_level;
|
||||
|
||||
#define MAX_BACKTRACE_DEPTH 16
|
||||
#define ALLOCATION_TAG 0x1ee7d00d
|
||||
@ -108,8 +108,10 @@ static inline const hdr_t* const_meta(const void* user) {
|
||||
return reinterpret_cast<const hdr_t*>(user) - 1;
|
||||
}
|
||||
|
||||
|
||||
static unsigned gAllocatedBlockCount;
|
||||
// TODO: introduce a struct for this global state.
|
||||
// There are basically two lists here, the regular list and the backlog list.
|
||||
// We should be able to remove the duplication.
|
||||
static unsigned g_allocated_block_count;
|
||||
static hdr_t* tail;
|
||||
static hdr_t* head;
|
||||
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
@ -188,7 +190,7 @@ static inline void add(hdr_t* hdr, size_t size) {
|
||||
hdr->size = size;
|
||||
init_front_guard(hdr);
|
||||
init_rear_guard(hdr);
|
||||
++gAllocatedBlockCount;
|
||||
++g_allocated_block_count;
|
||||
add_locked(hdr, &tail, &head);
|
||||
}
|
||||
|
||||
@ -199,7 +201,7 @@ static inline int del(hdr_t* hdr) {
|
||||
|
||||
ScopedPthreadMutexLocker locker(&lock);
|
||||
del_locked(hdr, &tail, &head);
|
||||
--gAllocatedBlockCount;
|
||||
--g_allocated_block_count;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -306,7 +308,7 @@ static inline void del_from_backlog(hdr_t* hdr) {
|
||||
|
||||
static inline int del_leak(hdr_t* hdr, int* safe) {
|
||||
ScopedPthreadMutexLocker locker(&lock);
|
||||
return del_and_check_locked(hdr, &tail, &head, &gAllocatedBlockCount, safe);
|
||||
return del_and_check_locked(hdr, &tail, &head, &g_allocated_block_count, safe);
|
||||
}
|
||||
|
||||
static inline void add_to_backlog(hdr_t* hdr) {
|
||||
@ -316,7 +318,7 @@ static inline void add_to_backlog(hdr_t* hdr) {
|
||||
add_locked(hdr, &backlog_tail, &backlog_head);
|
||||
poison(hdr);
|
||||
/* If we've exceeded the maximum backlog, clear it up */
|
||||
while (backlog_num > gMallocDebugBacklog) {
|
||||
while (backlog_num > g_malloc_debug_backlog) {
|
||||
hdr_t* gone = backlog_tail;
|
||||
del_from_backlog_locked(gone);
|
||||
dlfree(gone->base);
|
||||
@ -508,7 +510,7 @@ extern "C" size_t chk_malloc_usable_size(const void* ptr) {
|
||||
|
||||
static void ReportMemoryLeaks() {
|
||||
// We only track leaks at level 10.
|
||||
if (gMallocDebugLevel != 10) {
|
||||
if (g_malloc_debug_level != 10) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -522,13 +524,13 @@ static void ReportMemoryLeaks() {
|
||||
exe[count] = '\0';
|
||||
}
|
||||
|
||||
if (gAllocatedBlockCount == 0) {
|
||||
if (g_allocated_block_count == 0) {
|
||||
log_message("+++ %s did not leak", exe);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t index = 1;
|
||||
const size_t total = gAllocatedBlockCount;
|
||||
const size_t total = g_allocated_block_count;
|
||||
while (head != NULL) {
|
||||
int safe;
|
||||
hdr_t* block = head;
|
||||
|
@ -54,8 +54,8 @@
|
||||
*/
|
||||
int gMallocLeakZygoteChild = 0;
|
||||
|
||||
pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
HashTable gHashTable;
|
||||
pthread_mutex_t g_allocations_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
HashTable g_hash_table;
|
||||
|
||||
// =============================================================================
|
||||
// output functions
|
||||
@ -122,9 +122,9 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
||||
}
|
||||
*totalMemory = 0;
|
||||
|
||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_allocations_mutex);
|
||||
|
||||
if (gHashTable.count == 0) {
|
||||
if (g_hash_table.count == 0) {
|
||||
*info = NULL;
|
||||
*overallSize = 0;
|
||||
*infoSize = 0;
|
||||
@ -132,12 +132,12 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
||||
return;
|
||||
}
|
||||
|
||||
HashEntry** list = static_cast<HashEntry**>(dlmalloc(sizeof(void*) * gHashTable.count));
|
||||
HashEntry** list = static_cast<HashEntry**>(dlmalloc(sizeof(void*) * g_hash_table.count));
|
||||
|
||||
// get the entries into an array to be sorted
|
||||
int index = 0;
|
||||
for (size_t i = 0 ; i < HASHTABLE_SIZE ; ++i) {
|
||||
HashEntry* entry = gHashTable.slots[i];
|
||||
HashEntry* entry = g_hash_table.slots[i];
|
||||
while (entry != NULL) {
|
||||
list[index] = entry;
|
||||
*totalMemory = *totalMemory +
|
||||
@ -149,7 +149,7 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
||||
|
||||
// XXX: the protocol doesn't allow variable size for the stack trace (yet)
|
||||
*infoSize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * BACKTRACE_SIZE);
|
||||
*overallSize = *infoSize * gHashTable.count;
|
||||
*overallSize = *infoSize * g_hash_table.count;
|
||||
*backtraceSize = BACKTRACE_SIZE;
|
||||
|
||||
// now get a byte array big enough for this
|
||||
@ -161,10 +161,10 @@ extern "C" void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
|
||||
return;
|
||||
}
|
||||
|
||||
qsort(list, gHashTable.count, sizeof(void*), hash_entry_compare);
|
||||
qsort(list, g_hash_table.count, sizeof(void*), hash_entry_compare);
|
||||
|
||||
uint8_t* head = *info;
|
||||
const int count = gHashTable.count;
|
||||
const int count = g_hash_table.count;
|
||||
for (int i = 0 ; i < count ; ++i) {
|
||||
HashEntry* entry = list[i];
|
||||
size_t entrySize = (sizeof(size_t) * 2) + (sizeof(uintptr_t) * entry->numEntries);
|
||||
@ -253,7 +253,7 @@ extern "C" size_t malloc_usable_size(const void* mem) {
|
||||
#include "private/libc_logging.h"
|
||||
|
||||
/* Table for dispatching malloc calls, depending on environment. */
|
||||
static MallocDebug gMallocUse __attribute__((aligned(32))) = {
|
||||
static MallocDebug g_malloc_dispatch_table __attribute__((aligned(32))) = {
|
||||
dlmalloc, dlfree, dlcalloc, dlrealloc, dlmemalign, dlmalloc_usable_size
|
||||
};
|
||||
|
||||
@ -286,11 +286,11 @@ static void* libc_malloc_impl_handle = NULL;
|
||||
* backlog we use to detect multiple frees. If the property is not set, the
|
||||
* backlog length defaults to BACKLOG_DEFAULT_LEN.
|
||||
*/
|
||||
unsigned int gMallocDebugBacklog;
|
||||
unsigned int g_malloc_debug_backlog;
|
||||
#define BACKLOG_DEFAULT_LEN 100
|
||||
|
||||
/* The value of libc.debug.malloc. */
|
||||
int gMallocDebugLevel;
|
||||
int g_malloc_debug_level;
|
||||
|
||||
template<typename FunctionType>
|
||||
static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, const char* prefix, const char* suffix) {
|
||||
@ -304,7 +304,7 @@ static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, co
|
||||
|
||||
static void InitMalloc(void* malloc_impl_handler, MallocDebug* table, const char* prefix) {
|
||||
__libc_format_log(ANDROID_LOG_INFO, "libc", "%s: using libc.debug.malloc %d (%s)\n",
|
||||
__progname, gMallocDebugLevel, prefix);
|
||||
__progname, g_malloc_debug_level, prefix);
|
||||
|
||||
InitMallocFunction<MallocDebugMalloc>(malloc_impl_handler, &table->malloc, prefix, "malloc");
|
||||
InitMallocFunction<MallocDebugFree>(malloc_impl_handler, &table->free, prefix, "free");
|
||||
@ -332,7 +332,7 @@ static void malloc_init_impl() {
|
||||
if (__system_property_get("ro.kernel.memcheck", memcheck_tracing)) {
|
||||
if (memcheck_tracing[0] != '0') {
|
||||
// Emulator has started with memory tracing enabled. Enforce it.
|
||||
gMallocDebugLevel = 20;
|
||||
g_malloc_debug_level = 20;
|
||||
memcheck_enabled = 1;
|
||||
}
|
||||
}
|
||||
@ -340,13 +340,13 @@ static void malloc_init_impl() {
|
||||
|
||||
/* If debug level has not been set by memcheck option in the emulator,
|
||||
* lets grab it from libc.debug.malloc system property. */
|
||||
if (gMallocDebugLevel == 0 && __system_property_get("libc.debug.malloc", env)) {
|
||||
gMallocDebugLevel = atoi(env);
|
||||
if (g_malloc_debug_level == 0 && __system_property_get("libc.debug.malloc", env)) {
|
||||
g_malloc_debug_level = atoi(env);
|
||||
}
|
||||
|
||||
/* Debug level 0 means that we should use dlxxx allocation
|
||||
* routines (default). */
|
||||
if (gMallocDebugLevel == 0) {
|
||||
if (g_malloc_debug_level == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -360,24 +360,24 @@ static void malloc_init_impl() {
|
||||
}
|
||||
|
||||
// mksh is way too leaky. http://b/7291287.
|
||||
if (gMallocDebugLevel >= 10) {
|
||||
if (g_malloc_debug_level >= 10) {
|
||||
if (strcmp(__progname, "sh") == 0 || strcmp(__progname, "/system/bin/sh") == 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Choose the appropriate .so for the requested debug level.
|
||||
switch (gMallocDebugLevel) {
|
||||
switch (g_malloc_debug_level) {
|
||||
case 1:
|
||||
case 5:
|
||||
case 10: {
|
||||
char debug_backlog[PROP_VALUE_MAX];
|
||||
if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) {
|
||||
gMallocDebugBacklog = atoi(debug_backlog);
|
||||
info_log("%s: setting backlog length to %d\n", __progname, gMallocDebugBacklog);
|
||||
g_malloc_debug_backlog = atoi(debug_backlog);
|
||||
info_log("%s: setting backlog length to %d\n", __progname, g_malloc_debug_backlog);
|
||||
}
|
||||
if (gMallocDebugBacklog == 0) {
|
||||
gMallocDebugBacklog = BACKLOG_DEFAULT_LEN;
|
||||
if (g_malloc_debug_backlog == 0) {
|
||||
g_malloc_debug_backlog = BACKLOG_DEFAULT_LEN;
|
||||
}
|
||||
so_name = "libc_malloc_debug_leak.so";
|
||||
break;
|
||||
@ -386,7 +386,7 @@ static void malloc_init_impl() {
|
||||
// Quick check: debug level 20 can only be handled in emulator.
|
||||
if (!qemu_running) {
|
||||
error_log("%s: Debug level %d can only be set in emulator\n",
|
||||
__progname, gMallocDebugLevel);
|
||||
__progname, g_malloc_debug_level);
|
||||
return;
|
||||
}
|
||||
// Make sure that memory checking has been enabled in emulator.
|
||||
@ -398,7 +398,7 @@ static void malloc_init_impl() {
|
||||
so_name = "libc_malloc_debug_qemu.so";
|
||||
break;
|
||||
default:
|
||||
error_log("%s: Debug level %d is unknown\n", __progname, gMallocDebugLevel);
|
||||
error_log("%s: Debug level %d is unknown\n", __progname, g_malloc_debug_level);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -406,7 +406,7 @@ static void malloc_init_impl() {
|
||||
void* malloc_impl_handle = dlopen(so_name, RTLD_LAZY);
|
||||
if (malloc_impl_handle == NULL) {
|
||||
error_log("%s: Missing module %s required for malloc debug level %d: %s",
|
||||
__progname, so_name, gMallocDebugLevel, dlerror());
|
||||
__progname, so_name, g_malloc_debug_level, dlerror());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -424,7 +424,7 @@ static void malloc_init_impl() {
|
||||
return;
|
||||
}
|
||||
|
||||
if (gMallocDebugLevel == 20) {
|
||||
if (g_malloc_debug_level == 20) {
|
||||
// For memory checker we need to do extra initialization.
|
||||
typedef int (*MemCheckInit)(int, const char*);
|
||||
MemCheckInit memcheck_initialize =
|
||||
@ -445,35 +445,35 @@ static void malloc_init_impl() {
|
||||
|
||||
|
||||
// Initialize malloc dispatch table with appropriate routines.
|
||||
switch (gMallocDebugLevel) {
|
||||
switch (g_malloc_debug_level) {
|
||||
case 1:
|
||||
InitMalloc(malloc_impl_handle, &gMallocUse, "leak");
|
||||
InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "leak");
|
||||
break;
|
||||
case 5:
|
||||
InitMalloc(malloc_impl_handle, &gMallocUse, "fill");
|
||||
InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "fill");
|
||||
break;
|
||||
case 10:
|
||||
InitMalloc(malloc_impl_handle, &gMallocUse, "chk");
|
||||
InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "chk");
|
||||
break;
|
||||
case 20:
|
||||
InitMalloc(malloc_impl_handle, &gMallocUse, "qemu_instrumented");
|
||||
InitMalloc(malloc_impl_handle, &g_malloc_dispatch_table, "qemu_instrumented");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// Make sure dispatch table is initialized
|
||||
if ((gMallocUse.malloc == NULL) ||
|
||||
(gMallocUse.free == NULL) ||
|
||||
(gMallocUse.calloc == NULL) ||
|
||||
(gMallocUse.realloc == NULL) ||
|
||||
(gMallocUse.memalign == NULL) ||
|
||||
(gMallocUse.malloc_usable_size == NULL)) {
|
||||
if ((g_malloc_dispatch_table.malloc == NULL) ||
|
||||
(g_malloc_dispatch_table.free == NULL) ||
|
||||
(g_malloc_dispatch_table.calloc == NULL) ||
|
||||
(g_malloc_dispatch_table.realloc == NULL) ||
|
||||
(g_malloc_dispatch_table.memalign == NULL) ||
|
||||
(g_malloc_dispatch_table.malloc_usable_size == NULL)) {
|
||||
error_log("%s: some symbols for libc.debug.malloc level %d were not found (see above)",
|
||||
__progname, gMallocDebugLevel);
|
||||
__progname, g_malloc_debug_level);
|
||||
dlclose(malloc_impl_handle);
|
||||
} else {
|
||||
__libc_malloc_dispatch = &gMallocUse;
|
||||
__libc_malloc_dispatch = &g_malloc_dispatch_table;
|
||||
libc_malloc_impl_handle = malloc_impl_handle;
|
||||
}
|
||||
}
|
||||
|
@ -61,8 +61,8 @@
|
||||
|
||||
// Global variables defined in malloc_debug_common.c
|
||||
extern int gMallocLeakZygoteChild;
|
||||
extern pthread_mutex_t gAllocationsMutex;
|
||||
extern HashTable gHashTable;
|
||||
extern pthread_mutex_t g_allocations_mutex;
|
||||
extern HashTable g_hash_table;
|
||||
|
||||
// =============================================================================
|
||||
// stack trace functions
|
||||
@ -138,7 +138,7 @@ static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size
|
||||
size |= SIZE_FLAG_ZYGOTE_CHILD;
|
||||
}
|
||||
|
||||
HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
|
||||
HashEntry* entry = find_entry(&g_hash_table, slot, backtrace, numEntries, size);
|
||||
|
||||
if (entry != NULL) {
|
||||
entry->allocations++;
|
||||
@ -151,20 +151,20 @@ static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size
|
||||
entry->allocations = 1;
|
||||
entry->slot = slot;
|
||||
entry->prev = NULL;
|
||||
entry->next = gHashTable.slots[slot];
|
||||
entry->next = g_hash_table.slots[slot];
|
||||
entry->numEntries = numEntries;
|
||||
entry->size = size;
|
||||
|
||||
memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
|
||||
|
||||
gHashTable.slots[slot] = entry;
|
||||
g_hash_table.slots[slot] = entry;
|
||||
|
||||
if (entry->next != NULL) {
|
||||
entry->next->prev = entry;
|
||||
}
|
||||
|
||||
// we just added an entry, increase the size of the hashtable
|
||||
gHashTable.count++;
|
||||
g_hash_table.count++;
|
||||
}
|
||||
|
||||
return entry;
|
||||
@ -174,7 +174,7 @@ static int is_valid_entry(HashEntry* entry) {
|
||||
if (entry != NULL) {
|
||||
int i;
|
||||
for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
|
||||
HashEntry* e1 = gHashTable.slots[i];
|
||||
HashEntry* e1 = g_hash_table.slots[i];
|
||||
|
||||
while (e1 != NULL) {
|
||||
if (e1 == entry) {
|
||||
@ -198,11 +198,11 @@ static void remove_entry(HashEntry* entry) {
|
||||
|
||||
if (prev == NULL) {
|
||||
// we are the head of the list. set the head to be next
|
||||
gHashTable.slots[entry->slot] = entry->next;
|
||||
g_hash_table.slots[entry->slot] = entry->next;
|
||||
}
|
||||
|
||||
// we just removed and entry, decrease the size of the hashtable
|
||||
gHashTable.count--;
|
||||
g_hash_table.count--;
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
@ -277,7 +277,7 @@ extern "C" void* leak_malloc(size_t bytes) {
|
||||
|
||||
void* base = dlmalloc(size);
|
||||
if (base != NULL) {
|
||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_allocations_mutex);
|
||||
|
||||
uintptr_t backtrace[BACKTRACE_SIZE];
|
||||
size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
|
||||
@ -296,7 +296,7 @@ extern "C" void* leak_malloc(size_t bytes) {
|
||||
|
||||
extern "C" void leak_free(void* mem) {
|
||||
if (mem != NULL) {
|
||||
ScopedPthreadMutexLocker locker(&gAllocationsMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_allocations_mutex);
|
||||
|
||||
// check the guard to make sure it is valid
|
||||
AllocationEntry* header = to_header(mem);
|
||||
|
@ -26,7 +26,7 @@ class pthread_accessor {
|
||||
public:
|
||||
explicit pthread_accessor(pthread_t desired_thread) {
|
||||
Lock();
|
||||
for (thread_ = gThreadList; thread_ != NULL; thread_ = thread_->next) {
|
||||
for (thread_ = g_thread_list; thread_ != NULL; thread_ = thread_->next) {
|
||||
if (thread_ == reinterpret_cast<pthread_internal_t*>(desired_thread)) {
|
||||
break;
|
||||
}
|
||||
@ -41,7 +41,7 @@ class pthread_accessor {
|
||||
if (is_locked_) {
|
||||
is_locked_ = false;
|
||||
thread_ = NULL;
|
||||
pthread_mutex_unlock(&gThreadListLock);
|
||||
pthread_mutex_unlock(&g_thread_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ class pthread_accessor {
|
||||
bool is_locked_;
|
||||
|
||||
void Lock() {
|
||||
pthread_mutex_lock(&gThreadListLock);
|
||||
pthread_mutex_lock(&g_thread_list_lock);
|
||||
is_locked_ = true;
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,6 @@
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
|
||||
static pthread_mutex_t gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||
|
||||
struct atfork_t {
|
||||
atfork_t* next;
|
||||
atfork_t* prev;
|
||||
@ -45,7 +43,8 @@ struct atfork_list_t {
|
||||
atfork_t* last;
|
||||
};
|
||||
|
||||
static atfork_list_t gAtForkList = { NULL, NULL };
|
||||
static pthread_mutex_t g_atfork_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||
static atfork_list_t g_atfork_list = { NULL, NULL };
|
||||
|
||||
void __bionic_atfork_run_prepare() {
|
||||
// We lock the atfork list here, unlock it in the parent, and reset it in the child.
|
||||
@ -54,12 +53,12 @@ void __bionic_atfork_run_prepare() {
|
||||
//
|
||||
// TODO: If a handler tries to mutate the list, they'll block. We should probably copy
|
||||
// the list before forking, and have prepare, parent, and child all work on the consistent copy.
|
||||
pthread_mutex_lock(&gAtForkListMutex);
|
||||
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
|
||||
// Call pthread_atfork() prepare handlers. POSIX states that the prepare
|
||||
// handlers should be called in the reverse order of the parent/child
|
||||
// handlers, so we iterate backwards.
|
||||
for (atfork_t* it = gAtForkList.last; it != NULL; it = it->prev) {
|
||||
for (atfork_t* it = g_atfork_list.last; it != NULL; it = it->prev) {
|
||||
if (it->prepare != NULL) {
|
||||
it->prepare();
|
||||
}
|
||||
@ -67,23 +66,23 @@ void __bionic_atfork_run_prepare() {
|
||||
}
|
||||
|
||||
void __bionic_atfork_run_child() {
|
||||
for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) {
|
||||
for (atfork_t* it = g_atfork_list.first; it != NULL; it = it->next) {
|
||||
if (it->child != NULL) {
|
||||
it->child();
|
||||
}
|
||||
}
|
||||
|
||||
gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||
g_atfork_list_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||
}
|
||||
|
||||
void __bionic_atfork_run_parent() {
|
||||
for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) {
|
||||
for (atfork_t* it = g_atfork_list.first; it != NULL; it = it->next) {
|
||||
if (it->parent != NULL) {
|
||||
it->parent();
|
||||
}
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&gAtForkListMutex);
|
||||
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
}
|
||||
|
||||
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) {
|
||||
@ -96,20 +95,20 @@ int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(voi
|
||||
entry->parent = parent;
|
||||
entry->child = child;
|
||||
|
||||
pthread_mutex_lock(&gAtForkListMutex);
|
||||
pthread_mutex_lock(&g_atfork_list_mutex);
|
||||
|
||||
// Append 'entry' to the list.
|
||||
entry->next = NULL;
|
||||
entry->prev = gAtForkList.last;
|
||||
entry->prev = g_atfork_list.last;
|
||||
if (entry->prev != NULL) {
|
||||
entry->prev->next = entry;
|
||||
}
|
||||
if (gAtForkList.first == NULL) {
|
||||
gAtForkList.first = entry;
|
||||
if (g_atfork_list.first == NULL) {
|
||||
g_atfork_list.first = entry;
|
||||
}
|
||||
gAtForkList.last = entry;
|
||||
g_atfork_list.last = entry;
|
||||
|
||||
pthread_mutex_unlock(&gAtForkListMutex);
|
||||
pthread_mutex_unlock(&g_atfork_list_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -52,9 +52,9 @@ extern "C" __attribute__((noinline)) void _thread_created_hook(pid_t) {}
|
||||
extern "C" __LIBC_HIDDEN__ void __init_user_desc(struct user_desc*, int, void*);
|
||||
#endif
|
||||
|
||||
static pthread_mutex_t gPthreadStackCreationLock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_mutex_t g_pthread_stack_creation_ock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_mutex_t g_debugger_notification_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
extern "C" int __isthreaded;
|
||||
|
||||
@ -111,7 +111,7 @@ int __init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
|
||||
}
|
||||
|
||||
static void* __create_thread_stack(pthread_internal_t* thread) {
|
||||
ScopedPthreadMutexLocker lock(&gPthreadStackCreationLock);
|
||||
ScopedPthreadMutexLocker lock(&g_pthread_stack_creation_ock);
|
||||
|
||||
// Create a new private anonymous map.
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
@ -258,7 +258,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
||||
|
||||
// Notify any debuggers about the new thread.
|
||||
{
|
||||
ScopedPthreadMutexLocker debugger_locker(&gDebuggerNotificationLock);
|
||||
ScopedPthreadMutexLocker debugger_locker(&g_debugger_notification_lock);
|
||||
_thread_created_hook(thread->tid);
|
||||
}
|
||||
|
||||
|
@ -127,9 +127,9 @@ extern const char* __progname;
|
||||
* level 2 : deadlock prediction enabled w/ call stacks
|
||||
*/
|
||||
#define CAPTURE_CALLSTACK 2
|
||||
static int sPthreadDebugLevel = 0;
|
||||
static pid_t sPthreadDebugDisabledThread = -1;
|
||||
static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static int g_pthread_debug_level = 0;
|
||||
static pid_t g_pthread_debug_disabled_thread = -1;
|
||||
static pthread_mutex_t g_dbg_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
/****************************************************************************/
|
||||
|
||||
@ -138,23 +138,23 @@ static pthread_mutex_t sDbgLock = PTHREAD_MUTEX_INITIALIZER;
|
||||
*/
|
||||
|
||||
#define DBG_ALLOC_BLOCK_SIZE PAGESIZE
|
||||
static size_t sDbgAllocOffset = DBG_ALLOC_BLOCK_SIZE;
|
||||
static char* sDbgAllocPtr = NULL;
|
||||
static size_t g_dbg_alloc_offset = DBG_ALLOC_BLOCK_SIZE;
|
||||
static char* g_dbg_alloc_ptr = NULL;
|
||||
|
||||
template <typename T>
|
||||
static T* DbgAllocLocked(size_t count = 1) {
|
||||
size_t size = sizeof(T) * count;
|
||||
if ((sDbgAllocOffset + size) > DBG_ALLOC_BLOCK_SIZE) {
|
||||
sDbgAllocOffset = 0;
|
||||
sDbgAllocPtr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
|
||||
if ((g_dbg_alloc_offset + size) > DBG_ALLOC_BLOCK_SIZE) {
|
||||
g_dbg_alloc_offset = 0;
|
||||
g_dbg_alloc_ptr = reinterpret_cast<char*>(mmap(NULL, DBG_ALLOC_BLOCK_SIZE,
|
||||
PROT_READ|PROT_WRITE,
|
||||
MAP_ANON | MAP_PRIVATE, 0, 0));
|
||||
if (sDbgAllocPtr == MAP_FAILED) {
|
||||
if (g_dbg_alloc_ptr == MAP_FAILED) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
void* addr = sDbgAllocPtr + sDbgAllocOffset;
|
||||
sDbgAllocOffset += size;
|
||||
void* addr = g_dbg_alloc_ptr + g_dbg_alloc_offset;
|
||||
g_dbg_alloc_offset += size;
|
||||
return reinterpret_cast<T*>(addr);
|
||||
}
|
||||
|
||||
@ -365,7 +365,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
|
||||
uintptr_t addrs[STACK_TRACE_DEPTH];
|
||||
|
||||
/* Turn off prediction temporarily in this thread while logging */
|
||||
sPthreadDebugDisabledThread = gettid();
|
||||
g_pthread_debug_disabled_thread = gettid();
|
||||
|
||||
backtrace_startup();
|
||||
|
||||
@ -384,7 +384,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
|
||||
MutexInfo* parent = cur->parents.list[i];
|
||||
if (parent->owner == ourtid) {
|
||||
LOGW("--- pthread_mutex_t at %p\n", parent->mutex);
|
||||
if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
|
||||
if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
|
||||
log_backtrace(parent->stackTrace, parent->stackDepth);
|
||||
}
|
||||
cur = parent;
|
||||
@ -405,7 +405,7 @@ static int traverseTree(MutexInfo* obj, MutexInfo const* objParent)
|
||||
MutexInfo* child = pList->list[i];
|
||||
if (!traverseTree(child, obj)) {
|
||||
LOGW("--- pthread_mutex_t at %p\n", obj->mutex);
|
||||
if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
|
||||
if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
|
||||
int index = historyListHas(&obj->parents, objParent);
|
||||
if ((size_t)index < (size_t)obj->stacks.count) {
|
||||
log_backtrace(obj->stacks.stack[index].addrs, obj->stacks.stack[index].depth);
|
||||
@ -435,7 +435,7 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
|
||||
object->owner = tid;
|
||||
object->lockCount = 0;
|
||||
|
||||
if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
|
||||
if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
|
||||
// always record the call stack when acquiring a lock.
|
||||
// it's not efficient, but is useful during diagnostics
|
||||
object->stackDepth = get_backtrace(object->stackTrace, STACK_TRACE_DEPTH);
|
||||
@ -451,7 +451,7 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
|
||||
if (historyListHas(&mrl->children, object) >= 0)
|
||||
return;
|
||||
|
||||
pthread_mutex_lock_unchecked(&sDbgLock);
|
||||
pthread_mutex_lock_unchecked(&g_dbg_lock);
|
||||
|
||||
linkParentToChild(mrl, object);
|
||||
if (!traverseTree(object, mrl)) {
|
||||
@ -459,20 +459,20 @@ static void mutex_lock_checked(MutexInfo* mrl, MutexInfo* object)
|
||||
LOGW("%s\n", kEndBanner);
|
||||
unlinkParentFromChild(mrl, object);
|
||||
// reenable pthread debugging for this thread
|
||||
sPthreadDebugDisabledThread = -1;
|
||||
g_pthread_debug_disabled_thread = -1;
|
||||
} else {
|
||||
// record the call stack for this link
|
||||
// NOTE: the call stack is added at the same index
|
||||
// as mrl in object->parents[]
|
||||
// ie: object->parents.count == object->stacks.count, which is
|
||||
// also the index.
|
||||
if (sPthreadDebugLevel >= CAPTURE_CALLSTACK) {
|
||||
if (g_pthread_debug_level >= CAPTURE_CALLSTACK) {
|
||||
callstackListAdd(&object->stacks,
|
||||
object->stackDepth, object->stackTrace);
|
||||
}
|
||||
}
|
||||
|
||||
pthread_mutex_unlock_unchecked(&sDbgLock);
|
||||
pthread_mutex_unlock_unchecked(&g_dbg_lock);
|
||||
}
|
||||
|
||||
static void mutex_unlock_checked(MutexInfo* object)
|
||||
@ -509,8 +509,8 @@ struct HashTable {
|
||||
HashEntry* slots[HASHTABLE_SIZE];
|
||||
};
|
||||
|
||||
static HashTable sMutexMap;
|
||||
static HashTable sThreadMap;
|
||||
static HashTable g_mutex_map;
|
||||
static HashTable g_thread_map;
|
||||
|
||||
/****************************************************************************/
|
||||
|
||||
@ -593,9 +593,9 @@ static int MutexInfo_equals(void const* data, void const* key) {
|
||||
|
||||
static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
|
||||
{
|
||||
pthread_mutex_lock_unchecked(&sDbgLock);
|
||||
pthread_mutex_lock_unchecked(&g_dbg_lock);
|
||||
|
||||
HashEntry* entry = hashmap_lookup(&sMutexMap,
|
||||
HashEntry* entry = hashmap_lookup(&g_mutex_map,
|
||||
&mutex, sizeof(mutex),
|
||||
&MutexInfo_equals);
|
||||
if (entry->data == NULL) {
|
||||
@ -604,7 +604,7 @@ static MutexInfo* get_mutex_info(pthread_mutex_t *mutex)
|
||||
initMutexInfo(mutex_info, mutex);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock_unchecked(&sDbgLock);
|
||||
pthread_mutex_unlock_unchecked(&g_dbg_lock);
|
||||
|
||||
return (MutexInfo *)entry->data;
|
||||
}
|
||||
@ -617,9 +617,9 @@ static int ThreadInfo_equals(void const* data, void const* key) {
|
||||
|
||||
static ThreadInfo* get_thread_info(pid_t pid)
|
||||
{
|
||||
pthread_mutex_lock_unchecked(&sDbgLock);
|
||||
pthread_mutex_lock_unchecked(&g_dbg_lock);
|
||||
|
||||
HashEntry* entry = hashmap_lookup(&sThreadMap,
|
||||
HashEntry* entry = hashmap_lookup(&g_thread_map,
|
||||
&pid, sizeof(pid),
|
||||
&ThreadInfo_equals);
|
||||
if (entry->data == NULL) {
|
||||
@ -628,7 +628,7 @@ static ThreadInfo* get_thread_info(pid_t pid)
|
||||
initThreadInfo(thread_info, pid);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock_unchecked(&sDbgLock);
|
||||
pthread_mutex_unlock_unchecked(&g_dbg_lock);
|
||||
|
||||
return (ThreadInfo *)entry->data;
|
||||
}
|
||||
@ -672,9 +672,9 @@ static MutexInfo* get_most_recently_locked() {
|
||||
|
||||
extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex)
|
||||
{
|
||||
if (sPthreadDebugLevel == 0) return;
|
||||
if (g_pthread_debug_level == 0) return;
|
||||
// prediction disabled for this thread
|
||||
if (sPthreadDebugDisabledThread == gettid())
|
||||
if (g_pthread_debug_disabled_thread == gettid())
|
||||
return;
|
||||
MutexInfo* object = get_mutex_info(mutex);
|
||||
MutexInfo* mrl = get_most_recently_locked();
|
||||
@ -689,9 +689,9 @@ extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_lock_check(pthread_mutex_t *
|
||||
|
||||
extern "C" __LIBC_HIDDEN__ void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex)
|
||||
{
|
||||
if (sPthreadDebugLevel == 0) return;
|
||||
if (g_pthread_debug_level == 0) return;
|
||||
// prediction disabled for this thread
|
||||
if (sPthreadDebugDisabledThread == gettid())
|
||||
if (g_pthread_debug_disabled_thread == gettid())
|
||||
return;
|
||||
MutexInfo* object = get_mutex_info(mutex);
|
||||
remove_most_recently_locked(object);
|
||||
@ -709,8 +709,8 @@ extern "C" __LIBC_HIDDEN__ void pthread_debug_init() {
|
||||
if (level) {
|
||||
LOGI("pthread deadlock detection level %d enabled for pid %d (%s)",
|
||||
level, getpid(), __progname);
|
||||
hashmap_init(&sMutexMap);
|
||||
sPthreadDebugLevel = level;
|
||||
hashmap_init(&g_mutex_map);
|
||||
g_pthread_debug_level = level;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -92,7 +92,7 @@ void pthread_exit(void* return_value) {
|
||||
size_t stack_size = thread->attr.stack_size;
|
||||
bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0);
|
||||
|
||||
pthread_mutex_lock(&gThreadListLock);
|
||||
pthread_mutex_lock(&g_thread_list_lock);
|
||||
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
|
||||
// The thread is detached, so we can free the pthread_internal_t.
|
||||
// First make sure that the kernel does not try to clear the tid field
|
||||
@ -110,7 +110,7 @@ void pthread_exit(void* return_value) {
|
||||
// pthread_join is responsible for destroying the pthread_internal_t for non-detached threads.
|
||||
// The kernel will futex_wake on the pthread_internal_t::tid field to wake pthread_join.
|
||||
}
|
||||
pthread_mutex_unlock(&gThreadListLock);
|
||||
pthread_mutex_unlock(&g_thread_list_lock);
|
||||
|
||||
if (user_allocated_stack) {
|
||||
// Cleaning up this thread's stack is the creator's responsibility, not ours.
|
||||
|
@ -86,8 +86,8 @@ __LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread)
|
||||
*/
|
||||
#define PTHREAD_STACK_SIZE_DEFAULT ((1 * 1024 * 1024) - SIGSTKSZ)
|
||||
|
||||
__LIBC_HIDDEN__ extern pthread_internal_t* gThreadList;
|
||||
__LIBC_HIDDEN__ extern pthread_mutex_t gThreadListLock;
|
||||
__LIBC_HIDDEN__ extern pthread_internal_t* g_thread_list;
|
||||
__LIBC_HIDDEN__ extern pthread_mutex_t g_thread_list_lock;
|
||||
|
||||
__LIBC_HIDDEN__ int __timespec_from_absolute(timespec*, const timespec*, clockid_t);
|
||||
|
||||
|
@ -33,8 +33,8 @@
|
||||
#include "private/bionic_tls.h"
|
||||
#include "private/ScopedPthreadMutexLocker.h"
|
||||
|
||||
pthread_internal_t* gThreadList = NULL;
|
||||
pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
|
||||
pthread_internal_t* g_thread_list = NULL;
|
||||
pthread_mutex_t g_thread_list_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
void _pthread_internal_remove_locked(pthread_internal_t* thread) {
|
||||
if (thread->next != NULL) {
|
||||
@ -43,7 +43,7 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread) {
|
||||
if (thread->prev != NULL) {
|
||||
thread->prev->next = thread->next;
|
||||
} else {
|
||||
gThreadList = thread->next;
|
||||
g_thread_list = thread->next;
|
||||
}
|
||||
|
||||
// The main thread is not heap-allocated. See __libc_init_tls for the declaration,
|
||||
@ -54,15 +54,15 @@ void _pthread_internal_remove_locked(pthread_internal_t* thread) {
|
||||
}
|
||||
|
||||
void _pthread_internal_add(pthread_internal_t* thread) {
|
||||
ScopedPthreadMutexLocker locker(&gThreadListLock);
|
||||
ScopedPthreadMutexLocker locker(&g_thread_list_lock);
|
||||
|
||||
// We insert at the head.
|
||||
thread->next = gThreadList;
|
||||
thread->next = g_thread_list;
|
||||
thread->prev = NULL;
|
||||
if (thread->next != NULL) {
|
||||
thread->next->prev = thread;
|
||||
}
|
||||
gThreadList = thread;
|
||||
g_thread_list = thread;
|
||||
}
|
||||
|
||||
pthread_internal_t* __get_thread(void) {
|
||||
|
@ -210,8 +210,8 @@ int pthread_key_delete(pthread_key_t key) {
|
||||
}
|
||||
|
||||
// Clear value in all threads.
|
||||
pthread_mutex_lock(&gThreadListLock);
|
||||
for (pthread_internal_t* t = gThreadList; t != NULL; t = t->next) {
|
||||
pthread_mutex_lock(&g_thread_list_lock);
|
||||
for (pthread_internal_t* t = g_thread_list; t != NULL; t = t->next) {
|
||||
// Skip zombie threads. They don't have a valid TLS area any more.
|
||||
// Similarly, it is possible to have t->tls == NULL for threads that
|
||||
// were just recently created through pthread_create() but whose
|
||||
@ -226,7 +226,7 @@ int pthread_key_delete(pthread_key_t key) {
|
||||
}
|
||||
tls_map.DeleteKey(key);
|
||||
|
||||
pthread_mutex_unlock(&gThreadListLock);
|
||||
pthread_mutex_unlock(&g_thread_list_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
static pthread_mutex_t gAtExitLock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_mutex_t g_atexit_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
__BEGIN_DECLS
|
||||
__LIBC_HIDDEN__ void _thread_atexit_lock();
|
||||
@ -38,9 +38,9 @@ __LIBC_HIDDEN__ void _thread_atexit_unlock();
|
||||
__END_DECLS
|
||||
|
||||
void _thread_atexit_lock() {
|
||||
pthread_mutex_lock(&gAtExitLock);
|
||||
pthread_mutex_lock(&g_atexit_lock);
|
||||
}
|
||||
|
||||
void _thread_atexit_unlock() {
|
||||
pthread_mutex_unlock(&gAtExitLock);
|
||||
pthread_mutex_unlock(&g_atexit_lock);
|
||||
}
|
||||
|
@ -2255,12 +2255,12 @@ static int __bionic_tzload_cached(const char* name, struct state* const sp, cons
|
||||
_tzLock();
|
||||
|
||||
// Our single-item cache.
|
||||
static char* gCachedTimeZoneName;
|
||||
static struct state gCachedTimeZone;
|
||||
static char* g_cached_time_zone_name;
|
||||
static struct state g_cached_time_zone;
|
||||
|
||||
// Do we already have this timezone cached?
|
||||
if (gCachedTimeZoneName != NULL && strcmp(name, gCachedTimeZoneName) == 0) {
|
||||
*sp = gCachedTimeZone;
|
||||
if (g_cached_time_zone_name != NULL && strcmp(name, g_cached_time_zone_name) == 0) {
|
||||
*sp = g_cached_time_zone;
|
||||
_tzUnlock();
|
||||
return 0;
|
||||
}
|
||||
@ -2269,9 +2269,9 @@ static int __bionic_tzload_cached(const char* name, struct state* const sp, cons
|
||||
int rc = tzload(name, sp, doextend);
|
||||
if (rc == 0) {
|
||||
// Update the cache.
|
||||
free(gCachedTimeZoneName);
|
||||
gCachedTimeZoneName = strdup(name);
|
||||
gCachedTimeZone = *sp;
|
||||
free(g_cached_time_zone_name);
|
||||
g_cached_time_zone_name = strdup(name);
|
||||
g_cached_time_zone = *sp;
|
||||
}
|
||||
|
||||
_tzUnlock();
|
||||
|
@ -217,7 +217,7 @@ static void send_debuggerd_packet(siginfo_t* info) {
|
||||
debugger_msg_t msg;
|
||||
msg.action = DEBUGGER_ACTION_CRASH;
|
||||
msg.tid = gettid();
|
||||
msg.abort_msg_address = reinterpret_cast<uintptr_t>(gAbortMessage);
|
||||
msg.abort_msg_address = reinterpret_cast<uintptr_t>(g_abort_message);
|
||||
msg.original_si_code = (info != NULL) ? info->si_code : 0;
|
||||
int ret = TEMP_FAILURE_RETRY(write(s, &msg, sizeof(msg)));
|
||||
if (ret == sizeof(msg)) {
|
||||
|
@ -29,7 +29,7 @@
|
||||
|
||||
/* This file hijacks the symbols stubbed out in libdl.so. */
|
||||
|
||||
static pthread_mutex_t gDlMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||
static pthread_mutex_t g_dl_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
|
||||
|
||||
static const char* __bionic_set_dlerror(char* new_value) {
|
||||
char** dlerror_slot = &reinterpret_cast<char**>(__get_tls())[TLS_SLOT_DLERROR];
|
||||
@ -56,18 +56,18 @@ const char* dlerror() {
|
||||
}
|
||||
|
||||
void android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
|
||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||
do_android_get_LD_LIBRARY_PATH(buffer, buffer_size);
|
||||
}
|
||||
|
||||
void android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
|
||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||
do_android_update_LD_LIBRARY_PATH(ld_library_path);
|
||||
}
|
||||
|
||||
void* android_dlopen_ext(const char* filename, int flags, const android_dlextinfo* extinfo)
|
||||
{
|
||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||
soinfo* result = do_dlopen(filename, flags, extinfo);
|
||||
if (result == NULL) {
|
||||
__bionic_format_dlerror("dlopen failed", linker_get_error_buffer());
|
||||
@ -81,7 +81,7 @@ void* dlopen(const char* filename, int flags) {
|
||||
}
|
||||
|
||||
void* dlsym(void* handle, const char* symbol) {
|
||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||
|
||||
if (handle == NULL) {
|
||||
__bionic_format_dlerror("dlsym library handle is null", NULL);
|
||||
@ -125,7 +125,7 @@ void* dlsym(void* handle, const char* symbol) {
|
||||
}
|
||||
|
||||
int dladdr(const void* addr, Dl_info* info) {
|
||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||
|
||||
// Determine if this address can be found in any library currently mapped.
|
||||
soinfo* si = find_containing_library(addr);
|
||||
@ -150,7 +150,7 @@ int dladdr(const void* addr, Dl_info* info) {
|
||||
}
|
||||
|
||||
int dlclose(void* handle) {
|
||||
ScopedPthreadMutexLocker locker(&gDlMutex);
|
||||
ScopedPthreadMutexLocker locker(&g_dl_mutex);
|
||||
return do_dlclose(reinterpret_cast<soinfo*>(handle));
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ int dlclose(void* handle) {
|
||||
# error Unsupported architecture. Only arm, arm64, mips, mips64, x86 and x86_64 are presently supported.
|
||||
#endif
|
||||
|
||||
static ElfW(Sym) gLibDlSymtab[] = {
|
||||
static ElfW(Sym) g_libdl_symtab[] = {
|
||||
// Total length of libdl_info.strtab, including trailing 0.
|
||||
// This is actually the STH_UNDEF entry. Technically, it's
|
||||
// supposed to have st_name == 0, but instead, it points to an index
|
||||
@ -209,20 +209,20 @@ static ElfW(Sym) gLibDlSymtab[] = {
|
||||
|
||||
// Fake out a hash table with a single bucket.
|
||||
//
|
||||
// A search of the hash table will look through gLibDlSymtab starting with index 1, then
|
||||
// use gLibDlChains to find the next index to look at. gLibDlChains should be set up to
|
||||
// walk through every element in gLibDlSymtab, and then end with 0 (sentinel value).
|
||||
// A search of the hash table will look through g_libdl_symtab starting with index 1, then
|
||||
// use g_libdl_chains to find the next index to look at. g_libdl_chains should be set up to
|
||||
// walk through every element in g_libdl_symtab, and then end with 0 (sentinel value).
|
||||
//
|
||||
// That is, gLibDlChains should look like { 0, 2, 3, ... N, 0 } where N is the number
|
||||
// of actual symbols, or nelems(gLibDlSymtab)-1 (since the first element of gLibDlSymtab is not
|
||||
// That is, g_libdl_chains should look like { 0, 2, 3, ... N, 0 } where N is the number
|
||||
// of actual symbols, or nelems(g_libdl_symtab)-1 (since the first element of g_libdl_symtab is not
|
||||
// a real symbol). (See soinfo_elf_lookup().)
|
||||
//
|
||||
// Note that adding any new symbols here requires stubbing them out in libdl.
|
||||
static unsigned gLibDlBuckets[1] = { 1 };
|
||||
static unsigned g_libdl_buckets[1] = { 1 };
|
||||
#if defined(__arm__)
|
||||
static unsigned gLibDlChains[] = { 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0 };
|
||||
static unsigned g_libdl_chains[] = { 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0 };
|
||||
#else
|
||||
static unsigned gLibDlChains[] = { 0, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
|
||||
static unsigned g_libdl_chains[] = { 0, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
|
||||
#endif
|
||||
|
||||
// This is used by the dynamic linker. Every process gets these symbols for free.
|
||||
@ -250,12 +250,12 @@ soinfo libdl_info = {
|
||||
.flags = FLAG_LINKED,
|
||||
|
||||
.strtab = ANDROID_LIBDL_STRTAB,
|
||||
.symtab = gLibDlSymtab,
|
||||
.symtab = g_libdl_symtab,
|
||||
|
||||
.nbucket = sizeof(gLibDlBuckets)/sizeof(unsigned),
|
||||
.nchain = sizeof(gLibDlChains)/sizeof(unsigned),
|
||||
.bucket = gLibDlBuckets,
|
||||
.chain = gLibDlChains,
|
||||
.nbucket = sizeof(g_libdl_buckets)/sizeof(unsigned),
|
||||
.nchain = sizeof(g_libdl_chains)/sizeof(unsigned),
|
||||
.bucket = g_libdl_buckets,
|
||||
.chain = g_libdl_chains,
|
||||
|
||||
#if defined(USE_RELA)
|
||||
.plt_rela = 0,
|
||||
|
@ -71,13 +71,13 @@ static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
|
||||
// We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous
|
||||
// maps, each a single page in size. The pages are broken up into as many struct soinfo
|
||||
// objects as will fit.
|
||||
static LinkerAllocator<soinfo> gSoInfoAllocator;
|
||||
static LinkerAllocator<soinfo> g_soinfo_allocator;
|
||||
|
||||
static soinfo* solist = &libdl_info;
|
||||
static soinfo* sonext = &libdl_info;
|
||||
static soinfo* somain; /* main process, always the one after libdl_info */
|
||||
|
||||
static const char* const gDefaultLdPaths[] = {
|
||||
static const char* const kDefaultLdPaths[] = {
|
||||
#if defined(__LP64__)
|
||||
"/vendor/lib64",
|
||||
"/system/lib64",
|
||||
@ -94,17 +94,17 @@ static const char* const gDefaultLdPaths[] = {
|
||||
#define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
|
||||
#define LDPRELOAD_MAX 8
|
||||
|
||||
static char gLdPathsBuffer[LDPATH_BUFSIZE];
|
||||
static const char* gLdPaths[LDPATH_MAX + 1];
|
||||
static char g_ld_library_paths_buffer[LDPATH_BUFSIZE];
|
||||
static const char* g_ld_library_paths[LDPATH_MAX + 1];
|
||||
|
||||
static char gLdPreloadsBuffer[LDPRELOAD_BUFSIZE];
|
||||
static const char* gLdPreloadNames[LDPRELOAD_MAX + 1];
|
||||
static char g_ld_preloads_buffer[LDPRELOAD_BUFSIZE];
|
||||
static const char* g_ld_preload_names[LDPRELOAD_MAX + 1];
|
||||
|
||||
static soinfo* gLdPreloads[LDPRELOAD_MAX + 1];
|
||||
static soinfo* g_ld_preloads[LDPRELOAD_MAX + 1];
|
||||
|
||||
__LIBC_HIDDEN__ int gLdDebugVerbosity;
|
||||
__LIBC_HIDDEN__ int g_ld_debug_verbosity;
|
||||
|
||||
__LIBC_HIDDEN__ abort_msg_t* gAbortMessage = NULL; // For debuggerd.
|
||||
__LIBC_HIDDEN__ abort_msg_t* g_abort_message = NULL; // For debuggerd.
|
||||
|
||||
enum RelocationKind {
|
||||
kRelocAbsolute = 0,
|
||||
@ -179,11 +179,10 @@ size_t linker_get_error_buffer_size() {
|
||||
*/
|
||||
extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
|
||||
|
||||
static pthread_mutex_t g__r_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static r_debug _r_debug = {1, NULL, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
|
||||
static link_map* r_debug_tail = 0;
|
||||
|
||||
static pthread_mutex_t gDebugMutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static void insert_soinfo_into_debug_map(soinfo* info) {
|
||||
// Copy the necessary fields into the debug structure.
|
||||
link_map* map = &(info->link_map_head);
|
||||
@ -229,7 +228,7 @@ static void notify_gdb_of_load(soinfo* info) {
|
||||
return;
|
||||
}
|
||||
|
||||
ScopedPthreadMutexLocker locker(&gDebugMutex);
|
||||
ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
|
||||
|
||||
_r_debug.r_state = r_debug::RT_ADD;
|
||||
rtld_db_dlactivity();
|
||||
@ -246,7 +245,7 @@ static void notify_gdb_of_unload(soinfo* info) {
|
||||
return;
|
||||
}
|
||||
|
||||
ScopedPthreadMutexLocker locker(&gDebugMutex);
|
||||
ScopedPthreadMutexLocker locker(&g__r_debug_mutex);
|
||||
|
||||
_r_debug.r_state = r_debug::RT_DELETE;
|
||||
rtld_db_dlactivity();
|
||||
@ -270,7 +269,7 @@ static soinfo* soinfo_alloc(const char* name) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
soinfo* si = gSoInfoAllocator.alloc();
|
||||
soinfo* si = g_soinfo_allocator.alloc();
|
||||
|
||||
// Initialize the new element.
|
||||
memset(si, 0, sizeof(soinfo));
|
||||
@ -310,7 +309,7 @@ static void soinfo_free(soinfo* si) {
|
||||
sonext = prev;
|
||||
}
|
||||
|
||||
gSoInfoAllocator.free(si);
|
||||
g_soinfo_allocator.free(si);
|
||||
}
|
||||
|
||||
|
||||
@ -340,14 +339,14 @@ static void parse_path(const char* path, const char* delimiters,
|
||||
}
|
||||
|
||||
static void parse_LD_LIBRARY_PATH(const char* path) {
|
||||
parse_path(path, ":", gLdPaths,
|
||||
gLdPathsBuffer, sizeof(gLdPathsBuffer), LDPATH_MAX);
|
||||
parse_path(path, ":", g_ld_library_paths,
|
||||
g_ld_library_paths_buffer, sizeof(g_ld_library_paths_buffer), LDPATH_MAX);
|
||||
}
|
||||
|
||||
static void parse_LD_PRELOAD(const char* path) {
|
||||
// We have historically supported ':' as well as ' ' in LD_PRELOAD.
|
||||
parse_path(path, " :", gLdPreloadNames,
|
||||
gLdPreloadsBuffer, sizeof(gLdPreloadsBuffer), LDPRELOAD_MAX);
|
||||
parse_path(path, " :", g_ld_preload_names,
|
||||
g_ld_preloads_buffer, sizeof(g_ld_preloads_buffer), LDPRELOAD_MAX);
|
||||
}
|
||||
|
||||
#if defined(__arm__)
|
||||
@ -505,10 +504,10 @@ static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, s
|
||||
}
|
||||
|
||||
/* Next, look for it in the preloads list */
|
||||
for (int i = 0; gLdPreloads[i] != NULL; i++) {
|
||||
s = soinfo_elf_lookup(gLdPreloads[i], elf_hash, name);
|
||||
for (int i = 0; g_ld_preloads[i] != NULL; i++) {
|
||||
s = soinfo_elf_lookup(g_ld_preloads[i], elf_hash, name);
|
||||
if (s != NULL) {
|
||||
*lsi = gLdPreloads[i];
|
||||
*lsi = g_ld_preloads[i];
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
@ -637,9 +636,9 @@ static int open_library(const char* name) {
|
||||
}
|
||||
|
||||
// Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
|
||||
int fd = open_library_on_path(name, gLdPaths);
|
||||
int fd = open_library_on_path(name, g_ld_library_paths);
|
||||
if (fd == -1) {
|
||||
fd = open_library_on_path(name, gDefaultLdPaths);
|
||||
fd = open_library_on_path(name, kDefaultLdPaths);
|
||||
}
|
||||
return fd;
|
||||
}
|
||||
@ -756,7 +755,7 @@ static int soinfo_unload(soinfo* si) {
|
||||
}
|
||||
|
||||
void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
|
||||
snprintf(buffer, buffer_size, "%s:%s", gDefaultLdPaths[0], gDefaultLdPaths[1]);
|
||||
snprintf(buffer, buffer_size, "%s:%s", kDefaultLdPaths[0], kDefaultLdPaths[1]);
|
||||
}
|
||||
|
||||
void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
|
||||
@ -774,19 +773,19 @@ soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo)
|
||||
DL_ERR("invalid extended flags to android_dlopen_ext: %x", extinfo->flags);
|
||||
return NULL;
|
||||
}
|
||||
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
||||
g_soinfo_allocator.protect_all(PROT_READ | PROT_WRITE);
|
||||
soinfo* si = find_library(name, extinfo);
|
||||
if (si != NULL) {
|
||||
si->CallConstructors();
|
||||
}
|
||||
gSoInfoAllocator.protect_all(PROT_READ);
|
||||
g_soinfo_allocator.protect_all(PROT_READ);
|
||||
return si;
|
||||
}
|
||||
|
||||
int do_dlclose(soinfo* si) {
|
||||
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
||||
g_soinfo_allocator.protect_all(PROT_READ | PROT_WRITE);
|
||||
int result = soinfo_unload(si);
|
||||
gSoInfoAllocator.protect_all(PROT_READ);
|
||||
g_soinfo_allocator.protect_all(PROT_READ);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1334,7 +1333,7 @@ void soinfo::CallFunction(const char* function_name __unused, linker_function_t
|
||||
|
||||
// The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
|
||||
// are still writable. This happens with our debug malloc (see http://b/7941716).
|
||||
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
||||
g_soinfo_allocator.protect_all(PROT_READ | PROT_WRITE);
|
||||
}
|
||||
|
||||
void soinfo::CallPreInitConstructors() {
|
||||
@ -1688,16 +1687,16 @@ static bool soinfo_link_image(soinfo* si, const android_dlextinfo* extinfo) {
|
||||
|
||||
// If this is the main executable, then load all of the libraries from LD_PRELOAD now.
|
||||
if (si->flags & FLAG_EXE) {
|
||||
memset(gLdPreloads, 0, sizeof(gLdPreloads));
|
||||
memset(g_ld_preloads, 0, sizeof(g_ld_preloads));
|
||||
size_t preload_count = 0;
|
||||
for (size_t i = 0; gLdPreloadNames[i] != NULL; i++) {
|
||||
soinfo* lsi = find_library(gLdPreloadNames[i], NULL);
|
||||
for (size_t i = 0; g_ld_preload_names[i] != NULL; i++) {
|
||||
soinfo* lsi = find_library(g_ld_preload_names[i], NULL);
|
||||
if (lsi != NULL) {
|
||||
gLdPreloads[preload_count++] = lsi;
|
||||
g_ld_preloads[preload_count++] = lsi;
|
||||
} else {
|
||||
// As with glibc, failure to load an LD_PRELOAD library is just a warning.
|
||||
DL_WARN("could not load library \"%s\" from LD_PRELOAD for \"%s\"; caused by %s",
|
||||
gLdPreloadNames[i], si->name, linker_get_error_buffer());
|
||||
g_ld_preload_names[i], si->name, linker_get_error_buffer());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1873,7 +1872,7 @@ static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(
|
||||
// Get a few environment variables.
|
||||
const char* LD_DEBUG = linker_env_get("LD_DEBUG");
|
||||
if (LD_DEBUG != NULL) {
|
||||
gLdDebugVerbosity = atoi(LD_DEBUG);
|
||||
g_ld_debug_verbosity = atoi(LD_DEBUG);
|
||||
}
|
||||
|
||||
// Normally, these are cleaned by linker_env_init, but the test
|
||||
@ -1888,7 +1887,7 @@ static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(
|
||||
// Linker does not call constructors for its own
|
||||
// global variables so we need to initialize
|
||||
// the allocator explicitly.
|
||||
gSoInfoAllocator.init();
|
||||
g_soinfo_allocator.init();
|
||||
|
||||
INFO("[ android linker & debugger ]");
|
||||
|
||||
@ -1982,8 +1981,8 @@ static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(
|
||||
|
||||
si->CallPreInitConstructors();
|
||||
|
||||
for (size_t i = 0; gLdPreloads[i] != NULL; ++i) {
|
||||
gLdPreloads[i]->CallConstructors();
|
||||
for (size_t i = 0; g_ld_preloads[i] != NULL; ++i) {
|
||||
g_ld_preloads[i]->CallConstructors();
|
||||
}
|
||||
|
||||
/* After the link_image, the si->load_bias is initialized.
|
||||
@ -2104,10 +2103,10 @@ extern "C" ElfW(Addr) __linker_init(void* raw_args) {
|
||||
|
||||
// We have successfully fixed our own relocations. It's safe to run
|
||||
// the main part of the linker now.
|
||||
args.abort_message_ptr = &gAbortMessage;
|
||||
args.abort_message_ptr = &g_abort_message;
|
||||
ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
|
||||
|
||||
gSoInfoAllocator.protect_all(PROT_READ);
|
||||
g_soinfo_allocator.protect_all(PROT_READ);
|
||||
|
||||
// Return the address that the calling assembly stub should jump to.
|
||||
return start_address;
|
||||
|
@ -203,7 +203,7 @@ ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr);
|
||||
ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name);
|
||||
|
||||
void debuggerd_init();
|
||||
extern "C" abort_msg_t* gAbortMessage;
|
||||
extern "C" abort_msg_t* g_abort_message;
|
||||
extern "C" void notify_gdb_of_libraries();
|
||||
|
||||
char* linker_get_error_buffer();
|
||||
|
@ -55,17 +55,17 @@
|
||||
|
||||
#include "private/libc_logging.h"
|
||||
|
||||
__LIBC_HIDDEN__ extern int gLdDebugVerbosity;
|
||||
__LIBC_HIDDEN__ extern int g_ld_debug_verbosity;
|
||||
|
||||
#if LINKER_DEBUG_TO_LOG
|
||||
#define _PRINTVF(v, x...) \
|
||||
do { \
|
||||
if (gLdDebugVerbosity > (v)) __libc_format_log(5-(v), "linker", x); \
|
||||
if (g_ld_debug_verbosity > (v)) __libc_format_log(5-(v), "linker", x); \
|
||||
} while (0)
|
||||
#else /* !LINKER_DEBUG_TO_LOG */
|
||||
#define _PRINTVF(v, x...) \
|
||||
do { \
|
||||
if (gLdDebugVerbosity > (v)) { __libc_format_fd(1, x); write(1, "\n", 1); } \
|
||||
if (g_ld_debug_verbosity > (v)) { __libc_format_fd(1, x); write(1, "\n", 1); } \
|
||||
} while (0)
|
||||
#endif /* !LINKER_DEBUG_TO_LOG */
|
||||
|
||||
|
@ -27,9 +27,9 @@
|
||||
#define ASSERT_SUBSTR(needle, haystack) \
|
||||
ASSERT_PRED_FORMAT2(::testing::IsSubstring, needle, haystack)
|
||||
|
||||
static bool gCalled = false;
|
||||
static bool g_called = false;
|
||||
extern "C" void DlSymTestFunction() {
|
||||
gCalled = true;
|
||||
g_called = true;
|
||||
}
|
||||
|
||||
TEST(dlfcn, dlsym_in_self) {
|
||||
@ -43,9 +43,9 @@ TEST(dlfcn, dlsym_in_self) {
|
||||
|
||||
void (*function)() = reinterpret_cast<void(*)()>(sym);
|
||||
|
||||
gCalled = false;
|
||||
g_called = false;
|
||||
function();
|
||||
ASSERT_TRUE(gCalled);
|
||||
ASSERT_TRUE(g_called);
|
||||
|
||||
ASSERT_EQ(0, dlclose(self));
|
||||
}
|
||||
|
@ -560,27 +560,27 @@ TEST(pthread, pthread_rwlock_smoke) {
|
||||
ASSERT_EQ(0, pthread_rwlock_destroy(&l));
|
||||
}
|
||||
|
||||
static int gOnceFnCallCount = 0;
|
||||
static int g_once_fn_call_count = 0;
|
||||
static void OnceFn() {
|
||||
++gOnceFnCallCount;
|
||||
++g_once_fn_call_count;
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_once_smoke) {
|
||||
pthread_once_t once_control = PTHREAD_ONCE_INIT;
|
||||
ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
|
||||
ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
|
||||
ASSERT_EQ(1, gOnceFnCallCount);
|
||||
ASSERT_EQ(1, g_once_fn_call_count);
|
||||
}
|
||||
|
||||
static int gAtForkPrepareCalls = 0;
|
||||
static void AtForkPrepare1() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 1; }
|
||||
static void AtForkPrepare2() { gAtForkPrepareCalls = (gAtForkPrepareCalls << 4) | 2; }
|
||||
static int gAtForkParentCalls = 0;
|
||||
static void AtForkParent1() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 1; }
|
||||
static void AtForkParent2() { gAtForkParentCalls = (gAtForkParentCalls << 4) | 2; }
|
||||
static int gAtForkChildCalls = 0;
|
||||
static void AtForkChild1() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 1; }
|
||||
static void AtForkChild2() { gAtForkChildCalls = (gAtForkChildCalls << 4) | 2; }
|
||||
static int g_atfork_prepare_calls = 0;
|
||||
static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 1; }
|
||||
static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls << 4) | 2; }
|
||||
static int g_atfork_parent_calls = 0;
|
||||
static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 1; }
|
||||
static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls << 4) | 2; }
|
||||
static int g_atfork_child_calls = 0;
|
||||
static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 1; }
|
||||
static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls << 4) | 2; }
|
||||
|
||||
TEST(pthread, pthread_atfork) {
|
||||
ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
|
||||
@ -591,13 +591,13 @@ TEST(pthread, pthread_atfork) {
|
||||
|
||||
// Child and parent calls are made in the order they were registered.
|
||||
if (pid == 0) {
|
||||
ASSERT_EQ(0x12, gAtForkChildCalls);
|
||||
ASSERT_EQ(0x12, g_atfork_child_calls);
|
||||
_exit(0);
|
||||
}
|
||||
ASSERT_EQ(0x12, gAtForkParentCalls);
|
||||
ASSERT_EQ(0x12, g_atfork_parent_calls);
|
||||
|
||||
// Prepare calls are made in the reverse order.
|
||||
ASSERT_EQ(0x21, gAtForkPrepareCalls);
|
||||
ASSERT_EQ(0x21, g_atfork_prepare_calls);
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_attr_getscope) {
|
||||
|
@ -146,10 +146,10 @@ TEST(signal, sigwait) {
|
||||
ASSERT_EQ(SIGALRM, received_signal);
|
||||
}
|
||||
|
||||
static int gSigSuspendTestHelperCallCount = 0;
|
||||
static int g_sigsuspend_test_helper_call_count = 0;
|
||||
|
||||
static void SigSuspendTestHelper(int) {
|
||||
++gSigSuspendTestHelperCallCount;
|
||||
++g_sigsuspend_test_helper_call_count;
|
||||
}
|
||||
|
||||
TEST(signal, sigsuspend_sigpending) {
|
||||
@ -172,7 +172,7 @@ TEST(signal, sigsuspend_sigpending) {
|
||||
|
||||
// Raise SIGALRM and check our signal handler wasn't called.
|
||||
raise(SIGALRM);
|
||||
ASSERT_EQ(0, gSigSuspendTestHelperCallCount);
|
||||
ASSERT_EQ(0, g_sigsuspend_test_helper_call_count);
|
||||
|
||||
// We should now have a pending SIGALRM but nothing else.
|
||||
sigemptyset(&pending);
|
||||
@ -188,7 +188,7 @@ TEST(signal, sigsuspend_sigpending) {
|
||||
ASSERT_EQ(-1, sigsuspend(¬_SIGALRM));
|
||||
ASSERT_EQ(EINTR, errno);
|
||||
// ...and check that we now receive our pending SIGALRM.
|
||||
ASSERT_EQ(1, gSigSuspendTestHelperCallCount);
|
||||
ASSERT_EQ(1, g_sigsuspend_test_helper_call_count);
|
||||
|
||||
// Restore the original set.
|
||||
ASSERT_EQ(0, sigprocmask(SIG_SETMASK, &original_set, NULL));
|
||||
|
@ -114,18 +114,18 @@ TEST(unistd, ftruncate64) {
|
||||
ASSERT_EQ(123, sb.st_size);
|
||||
}
|
||||
|
||||
static bool gPauseTestFlag = false;
|
||||
static bool g_pause_test_flag = false;
|
||||
static void PauseTestSignalHandler(int) {
|
||||
gPauseTestFlag = true;
|
||||
g_pause_test_flag = true;
|
||||
}
|
||||
|
||||
TEST(unistd, pause) {
|
||||
ScopedSignalHandler handler(SIGALRM, PauseTestSignalHandler);
|
||||
|
||||
alarm(1);
|
||||
ASSERT_FALSE(gPauseTestFlag);
|
||||
ASSERT_FALSE(g_pause_test_flag);
|
||||
ASSERT_EQ(-1, pause());
|
||||
ASSERT_TRUE(gPauseTestFlag);
|
||||
ASSERT_TRUE(g_pause_test_flag);
|
||||
}
|
||||
|
||||
TEST(unistd, read) {
|
||||
|
Loading…
Reference in New Issue
Block a user