2009-03-04 04:28:35 +01:00
|
|
|
/*
|
2009-10-24 03:11:40 +02:00
|
|
|
* Copyright (C) 2008, 2009 The Android Open Source Project
|
2009-03-04 04:28:35 +01:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
|
|
|
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
|
|
|
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2012-08-04 01:49:39 +02:00
|
|
|
#include <dlfcn.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
2014-02-11 02:46:57 +01:00
|
|
|
#include <inttypes.h>
|
2012-08-04 01:49:39 +02:00
|
|
|
#include <pthread.h>
|
2009-03-04 04:28:35 +01:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/atomics.h>
|
2012-08-04 01:49:39 +02:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <unistd.h>
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-08-04 01:49:39 +02:00
|
|
|
// Private C library headers.
|
2013-10-10 00:50:50 +02:00
|
|
|
#include "private/bionic_tls.h"
|
|
|
|
#include "private/KernelArgumentBlock.h"
|
|
|
|
#include "private/ScopedPthreadMutexLocker.h"
|
2009-03-04 04:28:35 +01:00
|
|
|
|
|
|
|
#include "linker.h"
|
|
|
|
#include "linker_debug.h"
|
2010-12-16 19:52:02 +01:00
|
|
|
#include "linker_environ.h"
|
2012-06-18 18:13:49 +02:00
|
|
|
#include "linker_phdr.h"
|
2014-05-06 01:49:04 +02:00
|
|
|
#include "linker_allocator.h"
|
2009-03-04 04:28:35 +01:00
|
|
|
|
|
|
|
/* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
|
|
|
|
*
|
|
|
|
* Do NOT use malloc() and friends or pthread_*() code here.
|
|
|
|
* Don't use printf() either; it's caused mysterious memory
|
|
|
|
* corruption in the past.
|
|
|
|
* The linker runs before we bring up libc and it's easiest
|
|
|
|
* to make sure it does not depend on any complex libc features
|
|
|
|
*
|
|
|
|
* open issues / todo:
|
|
|
|
*
|
|
|
|
* - cleaner error reporting
|
|
|
|
* - after linking, set as much stuff as possible to READONLY
|
|
|
|
* and NOEXEC
|
2012-08-04 01:49:39 +02:00
|
|
|
*/
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-27 14:18:00 +01:00
|
|
|
static bool soinfo_link_image(soinfo* si, const android_dlextinfo* extinfo);
|
2014-02-11 02:46:57 +01:00
|
|
|
static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-09-12 13:00:55 +02:00
|
|
|
// We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous
|
|
|
|
// maps, each a single page in size. The pages are broken up into as many struct soinfo
|
2014-05-06 01:49:04 +02:00
|
|
|
// objects as will fit.
|
|
|
|
static LinkerAllocator<soinfo> gSoInfoAllocator;
|
2012-09-12 13:00:55 +02:00
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
static soinfo* solist = &libdl_info;
|
|
|
|
static soinfo* sonext = &libdl_info;
|
|
|
|
static soinfo* somain; /* main process, always the one after libdl_info */
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-01-14 01:37:47 +01:00
|
|
|
static const char* const gDefaultLdPaths[] = {
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__LP64__)
|
2013-10-08 23:27:10 +02:00
|
|
|
"/vendor/lib64",
|
|
|
|
"/system/lib64",
|
|
|
|
#else
|
2012-10-31 22:20:03 +01:00
|
|
|
"/vendor/lib",
|
|
|
|
"/system/lib",
|
2013-10-08 23:27:10 +02:00
|
|
|
#endif
|
2012-10-31 22:20:03 +01:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2014-01-14 01:37:47 +01:00
|
|
|
#define LDPATH_BUFSIZE (LDPATH_MAX*64)
|
|
|
|
#define LDPATH_MAX 8
|
|
|
|
|
|
|
|
#define LDPRELOAD_BUFSIZE (LDPRELOAD_MAX*64)
|
|
|
|
#define LDPRELOAD_MAX 8
|
|
|
|
|
2012-10-31 22:20:03 +01:00
|
|
|
static char gLdPathsBuffer[LDPATH_BUFSIZE];
|
|
|
|
static const char* gLdPaths[LDPATH_MAX + 1];
|
2009-06-03 03:27:28 +02:00
|
|
|
|
2012-10-31 22:20:03 +01:00
|
|
|
static char gLdPreloadsBuffer[LDPRELOAD_BUFSIZE];
|
|
|
|
static const char* gLdPreloadNames[LDPRELOAD_MAX + 1];
|
2009-12-31 19:09:10 +01:00
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
static soinfo* gLdPreloads[LDPRELOAD_MAX + 1];
|
2009-12-31 19:09:10 +01:00
|
|
|
|
2013-03-06 03:47:58 +01:00
|
|
|
__LIBC_HIDDEN__ int gLdDebugVerbosity;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-04-04 22:46:46 +02:00
|
|
|
__LIBC_HIDDEN__ abort_msg_t* gAbortMessage = NULL; // For debuggerd.
|
|
|
|
|
2012-08-14 23:07:59 +02:00
|
|
|
enum RelocationKind {
|
|
|
|
kRelocAbsolute = 0,
|
|
|
|
kRelocRelative,
|
|
|
|
kRelocCopy,
|
|
|
|
kRelocSymbol,
|
|
|
|
kRelocMax
|
|
|
|
};
|
2010-12-16 19:52:02 +01:00
|
|
|
|
2009-03-04 04:28:35 +01:00
|
|
|
#if STATS
|
2012-08-14 23:07:59 +02:00
|
|
|
struct linker_stats_t {
|
|
|
|
int count[kRelocMax];
|
|
|
|
};
|
|
|
|
|
|
|
|
static linker_stats_t linker_stats;
|
|
|
|
|
|
|
|
static void count_relocation(RelocationKind kind) {
|
|
|
|
++linker_stats.count[kind];
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void count_relocation(RelocationKind) {
|
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if COUNT_PAGES
|
2012-08-14 23:07:59 +02:00
|
|
|
static unsigned bitmask[4096];
|
2013-10-10 16:19:31 +02:00
|
|
|
#if defined(__LP64__)
|
|
|
|
#define MARK(offset) \
|
|
|
|
do { \
|
|
|
|
if ((((offset) >> 12) >> 5) < 4096) \
|
|
|
|
bitmask[((offset) >> 12) >> 5] |= (1 << (((offset) >> 12) & 31)); \
|
2014-02-12 01:59:37 +01:00
|
|
|
} while (0)
|
2013-10-10 16:19:31 +02:00
|
|
|
#else
|
2012-08-14 23:07:59 +02:00
|
|
|
#define MARK(offset) \
|
|
|
|
do { \
|
|
|
|
bitmask[((offset) >> 12) >> 3] |= (1 << (((offset) >> 12) & 7)); \
|
2014-02-12 01:59:37 +01:00
|
|
|
} while (0)
|
2013-10-10 16:19:31 +02:00
|
|
|
#endif
|
2012-08-14 23:07:59 +02:00
|
|
|
#else
|
|
|
|
#define MARK(x) do {} while (0)
|
2009-03-04 04:28:35 +01:00
|
|
|
#endif
|
|
|
|
|
2012-08-04 01:49:39 +02:00
|
|
|
// You shouldn't try to call memory-allocating functions in the dynamic linker.
|
|
|
|
// Guard against the most obvious ones.
|
2013-03-15 23:30:25 +01:00
|
|
|
#define DISALLOW_ALLOCATION(return_type, name, ...) \
|
|
|
|
return_type name __VA_ARGS__ \
|
|
|
|
{ \
|
2012-08-04 01:49:39 +02:00
|
|
|
const char* msg = "ERROR: " #name " called from the dynamic linker!\n"; \
|
2013-03-15 23:30:25 +01:00
|
|
|
__libc_format_log(ANDROID_LOG_FATAL, "linker", "%s", msg); \
|
|
|
|
write(2, msg, strlen(msg)); \
|
|
|
|
abort(); \
|
2012-08-04 01:49:39 +02:00
|
|
|
}
|
2014-03-25 15:53:56 +01:00
|
|
|
DISALLOW_ALLOCATION(void*, malloc, (size_t u __unused));
|
|
|
|
DISALLOW_ALLOCATION(void, free, (void* u __unused));
|
|
|
|
DISALLOW_ALLOCATION(void*, realloc, (void* u1 __unused, size_t u2 __unused));
|
|
|
|
DISALLOW_ALLOCATION(void*, calloc, (size_t u1 __unused, size_t u2 __unused));
|
2009-05-21 03:28:09 +02:00
|
|
|
|
2009-05-30 02:30:25 +02:00
|
|
|
static char tmp_err_buf[768];
|
2009-05-21 03:28:09 +02:00
|
|
|
static char __linker_dl_err_buf[768];
|
|
|
|
|
2013-03-06 03:47:58 +01:00
|
|
|
char* linker_get_error_buffer() {
|
2012-10-17 00:54:46 +02:00
|
|
|
return &__linker_dl_err_buf[0];
|
2009-05-21 03:28:09 +02:00
|
|
|
}
|
|
|
|
|
2013-03-06 03:47:58 +01:00
|
|
|
size_t linker_get_error_buffer_size() {
|
|
|
|
return sizeof(__linker_dl_err_buf);
|
|
|
|
}
|
|
|
|
|
2009-03-04 04:28:35 +01:00
|
|
|
/*
|
|
|
|
* This function is an empty stub where GDB locates a breakpoint to get notified
|
|
|
|
* about linker activity.
|
|
|
|
*/
|
2012-10-17 00:54:46 +02:00
|
|
|
extern "C" void __attribute__((noinline)) __attribute__((visibility("default"))) rtld_db_dlactivity();
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-10 22:31:13 +01:00
|
|
|
static r_debug _r_debug = {1, NULL, reinterpret_cast<uintptr_t>(&rtld_db_dlactivity), r_debug::RT_CONSISTENT, 0};
|
|
|
|
static link_map* r_debug_tail = 0;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-10-12 01:08:51 +02:00
|
|
|
static pthread_mutex_t gDebugMutex = PTHREAD_MUTEX_INITIALIZER;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-10 22:31:13 +01:00
|
|
|
static void insert_soinfo_into_debug_map(soinfo* info) {
|
2012-08-14 23:07:59 +02:00
|
|
|
// Copy the necessary fields into the debug structure.
|
2014-02-10 22:31:13 +01:00
|
|
|
link_map* map = &(info->link_map_head);
|
2013-10-31 15:02:12 +01:00
|
|
|
map->l_addr = info->load_bias;
|
2014-02-12 01:59:37 +01:00
|
|
|
map->l_name = reinterpret_cast<char*>(info->name);
|
2014-02-10 22:31:13 +01:00
|
|
|
map->l_ld = info->dynamic;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
|
|
|
/* Stick the new library at the end of the list.
|
|
|
|
* gdb tends to care more about libc than it does
|
|
|
|
* about leaf libraries, and ordering it this way
|
|
|
|
* reduces the back-and-forth over the wire.
|
|
|
|
*/
|
|
|
|
if (r_debug_tail) {
|
|
|
|
r_debug_tail->l_next = map;
|
|
|
|
map->l_prev = r_debug_tail;
|
|
|
|
map->l_next = 0;
|
|
|
|
} else {
|
|
|
|
_r_debug.r_map = map;
|
|
|
|
map->l_prev = 0;
|
|
|
|
map->l_next = 0;
|
|
|
|
}
|
|
|
|
r_debug_tail = map;
|
|
|
|
}
|
|
|
|
|
2012-08-14 23:07:59 +02:00
|
|
|
static void remove_soinfo_from_debug_map(soinfo* info) {
|
2014-02-10 22:31:13 +01:00
|
|
|
link_map* map = &(info->link_map_head);
|
2009-03-25 03:02:00 +01:00
|
|
|
|
2012-08-14 23:07:59 +02:00
|
|
|
if (r_debug_tail == map) {
|
2009-03-25 03:02:00 +01:00
|
|
|
r_debug_tail = map->l_prev;
|
2012-08-14 23:07:59 +02:00
|
|
|
}
|
2009-03-25 03:02:00 +01:00
|
|
|
|
2012-08-14 23:07:59 +02:00
|
|
|
if (map->l_prev) {
|
|
|
|
map->l_prev->l_next = map->l_next;
|
|
|
|
}
|
|
|
|
if (map->l_next) {
|
|
|
|
map->l_next->l_prev = map->l_prev;
|
|
|
|
}
|
2009-03-25 03:02:00 +01:00
|
|
|
}
|
|
|
|
|
2012-08-14 23:07:59 +02:00
|
|
|
static void notify_gdb_of_load(soinfo* info) {
|
2009-03-04 04:28:35 +01:00
|
|
|
if (info->flags & FLAG_EXE) {
|
|
|
|
// GDB already knows about the main executable
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-10-12 01:08:51 +02:00
|
|
|
ScopedPthreadMutexLocker locker(&gDebugMutex);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-10 22:31:13 +01:00
|
|
|
_r_debug.r_state = r_debug::RT_ADD;
|
2009-03-04 04:28:35 +01:00
|
|
|
rtld_db_dlactivity();
|
|
|
|
|
|
|
|
insert_soinfo_into_debug_map(info);
|
|
|
|
|
2014-02-10 22:31:13 +01:00
|
|
|
_r_debug.r_state = r_debug::RT_CONSISTENT;
|
2009-03-04 04:28:35 +01:00
|
|
|
rtld_db_dlactivity();
|
2009-03-25 03:02:00 +01:00
|
|
|
}
|
|
|
|
|
2012-08-14 23:07:59 +02:00
|
|
|
static void notify_gdb_of_unload(soinfo* info) {
|
2009-03-25 03:02:00 +01:00
|
|
|
if (info->flags & FLAG_EXE) {
|
|
|
|
// GDB already knows about the main executable
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-10-12 01:08:51 +02:00
|
|
|
ScopedPthreadMutexLocker locker(&gDebugMutex);
|
2009-03-25 03:02:00 +01:00
|
|
|
|
2014-02-10 22:31:13 +01:00
|
|
|
_r_debug.r_state = r_debug::RT_DELETE;
|
2009-03-25 03:02:00 +01:00
|
|
|
rtld_db_dlactivity();
|
|
|
|
|
|
|
|
remove_soinfo_from_debug_map(info);
|
|
|
|
|
2014-02-10 22:31:13 +01:00
|
|
|
_r_debug.r_state = r_debug::RT_CONSISTENT;
|
2009-03-25 03:02:00 +01:00
|
|
|
rtld_db_dlactivity();
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2012-10-30 01:37:13 +01:00
|
|
|
void notify_gdb_of_libraries() {
|
2014-02-10 22:31:13 +01:00
|
|
|
_r_debug.r_state = r_debug::RT_ADD;
|
|
|
|
rtld_db_dlactivity();
|
|
|
|
_r_debug.r_state = r_debug::RT_CONSISTENT;
|
|
|
|
rtld_db_dlactivity();
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2012-09-12 13:00:55 +02:00
|
|
|
static soinfo* soinfo_alloc(const char* name) {
|
|
|
|
if (strlen(name) >= SOINFO_NAME_LEN) {
|
|
|
|
DL_ERR("library name \"%s\" too long", name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-05-06 01:49:04 +02:00
|
|
|
soinfo* si = gSoInfoAllocator.alloc();
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-09-12 13:00:55 +02:00
|
|
|
// Initialize the new element.
|
|
|
|
memset(si, 0, sizeof(soinfo));
|
|
|
|
strlcpy(si->name, name, sizeof(si->name));
|
|
|
|
sonext->next = si;
|
|
|
|
sonext = si;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("name %s: allocated soinfo @ %p", name, si);
|
2012-09-12 13:00:55 +02:00
|
|
|
return si;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2014-02-12 01:59:37 +01:00
|
|
|
static void soinfo_free(soinfo* si) {
|
2012-08-04 01:49:39 +02:00
|
|
|
if (si == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-03-04 04:28:35 +01:00
|
|
|
soinfo *prev = NULL, *trav;
|
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("name %s: freeing soinfo @ %p", si->name, si);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
for (trav = solist; trav != NULL; trav = trav->next) {
|
2009-03-04 04:28:35 +01:00
|
|
|
if (trav == si)
|
|
|
|
break;
|
|
|
|
prev = trav;
|
|
|
|
}
|
|
|
|
if (trav == NULL) {
|
2013-03-01 00:58:45 +01:00
|
|
|
/* si was not in solist */
|
2012-08-04 01:49:39 +02:00
|
|
|
DL_ERR("name \"%s\" is not in solist!", si->name);
|
2009-03-04 04:28:35 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-12-16 19:52:02 +01:00
|
|
|
/* prev will never be NULL, because the first entry in solist is
|
2009-03-04 04:28:35 +01:00
|
|
|
always the static libdl_info.
|
|
|
|
*/
|
|
|
|
prev->next = si->next;
|
2013-03-01 00:58:45 +01:00
|
|
|
if (si == sonext) {
|
|
|
|
sonext = prev;
|
|
|
|
}
|
2014-05-06 01:49:04 +02:00
|
|
|
|
|
|
|
gSoInfoAllocator.free(si);
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2012-12-20 23:42:14 +01:00
|
|
|
|
|
|
|
static void parse_path(const char* path, const char* delimiters,
|
|
|
|
const char** array, char* buf, size_t buf_size, size_t max_count) {
|
|
|
|
if (path == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t len = strlcpy(buf, path, buf_size);
|
|
|
|
|
|
|
|
size_t i = 0;
|
|
|
|
char* buf_p = buf;
|
|
|
|
while (i < max_count && (array[i] = strsep(&buf_p, delimiters))) {
|
|
|
|
if (*array[i] != '\0') {
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Forget the last path if we had to truncate; this occurs if the 2nd to
|
|
|
|
// last char isn't '\0' (i.e. wasn't originally a delimiter).
|
|
|
|
if (i > 0 && len >= buf_size && buf[buf_size - 2] != '\0') {
|
|
|
|
array[i - 1] = NULL;
|
|
|
|
} else {
|
|
|
|
array[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void parse_LD_LIBRARY_PATH(const char* path) {
|
|
|
|
parse_path(path, ":", gLdPaths,
|
|
|
|
gLdPathsBuffer, sizeof(gLdPathsBuffer), LDPATH_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void parse_LD_PRELOAD(const char* path) {
|
|
|
|
// We have historically supported ':' as well as ' ' in LD_PRELOAD.
|
|
|
|
parse_path(path, " :", gLdPreloadNames,
|
|
|
|
gLdPreloadsBuffer, sizeof(gLdPreloadsBuffer), LDPRELOAD_MAX);
|
|
|
|
}
|
|
|
|
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__arm__)
|
2012-08-04 01:49:39 +02:00
|
|
|
|
2009-03-04 04:28:35 +01:00
|
|
|
/* For a given PC, find the .so that it belongs to.
|
|
|
|
* Returns the base address of the .ARM.exidx section
|
|
|
|
* for that .so, and the number of 8-byte entries
|
|
|
|
* in that section (via *pcount).
|
|
|
|
*
|
|
|
|
* Intended to be called by libc's __gnu_Unwind_Find_exidx().
|
|
|
|
*
|
2012-10-12 01:08:51 +02:00
|
|
|
* This function is exposed via dlfcn.cpp and libdl.so.
|
2009-03-04 04:28:35 +01:00
|
|
|
*/
|
2014-02-12 01:59:37 +01:00
|
|
|
_Unwind_Ptr dl_unwind_find_exidx(_Unwind_Ptr pc, int* pcount) {
|
2009-03-04 04:28:35 +01:00
|
|
|
unsigned addr = (unsigned)pc;
|
|
|
|
|
2014-02-12 01:59:37 +01:00
|
|
|
for (soinfo* si = solist; si != 0; si = si->next) {
|
2011-11-12 00:53:17 +01:00
|
|
|
if ((addr >= si->base) && (addr < (si->base + si->size))) {
|
|
|
|
*pcount = si->ARM_exidx_count;
|
2012-05-31 13:20:36 +02:00
|
|
|
return (_Unwind_Ptr)si->ARM_exidx;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
}
|
2014-02-12 01:59:37 +01:00
|
|
|
*pcount = 0;
|
2009-03-04 04:28:35 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
2012-08-04 01:49:39 +02:00
|
|
|
|
2013-08-20 02:45:09 +02:00
|
|
|
#endif
|
2012-08-04 01:49:39 +02:00
|
|
|
|
2009-03-04 04:28:35 +01:00
|
|
|
/* Here, we only have to provide a callback to iterate across all the
|
|
|
|
* loaded libraries. gcc_eh does the rest. */
|
2014-02-12 01:59:37 +01:00
|
|
|
int dl_iterate_phdr(int (*cb)(dl_phdr_info* info, size_t size, void* data), void* data) {
|
2009-03-04 04:28:35 +01:00
|
|
|
int rv = 0;
|
2012-08-14 23:07:59 +02:00
|
|
|
for (soinfo* si = solist; si != NULL; si = si->next) {
|
|
|
|
dl_phdr_info dl_info;
|
2014-02-10 22:31:13 +01:00
|
|
|
dl_info.dlpi_addr = si->link_map_head.l_addr;
|
|
|
|
dl_info.dlpi_name = si->link_map_head.l_name;
|
2009-03-04 04:28:35 +01:00
|
|
|
dl_info.dlpi_phdr = si->phdr;
|
|
|
|
dl_info.dlpi_phnum = si->phnum;
|
2012-08-14 23:07:59 +02:00
|
|
|
rv = cb(&dl_info, sizeof(dl_phdr_info), data);
|
|
|
|
if (rv != 0) {
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
2012-08-14 23:07:59 +02:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
2012-08-04 01:49:39 +02:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
static ElfW(Sym)* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
|
|
|
|
ElfW(Sym)* symtab = si->symtab;
|
|
|
|
const char* strtab = si->strtab;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
|
|
|
|
name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
|
|
|
|
ElfW(Sym)* s = symtab + n;
|
|
|
|
if (strcmp(strtab + s->st_name, name)) continue;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
/* only concern ourselves with global and weak symbol definitions */
|
|
|
|
switch (ELF_ST_BIND(s->st_info)) {
|
|
|
|
case STB_GLOBAL:
|
|
|
|
case STB_WEAK:
|
|
|
|
if (s->st_shndx == SHN_UNDEF) {
|
|
|
|
continue;
|
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
|
|
|
|
name, si->name, reinterpret_cast<void*>(s->st_value),
|
|
|
|
static_cast<size_t>(s->st_size));
|
|
|
|
return s;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
2014-02-11 02:46:57 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
return NULL;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
static unsigned elfhash(const char* _name) {
|
2014-02-12 01:59:37 +01:00
|
|
|
const unsigned char* name = reinterpret_cast<const unsigned char*>(_name);
|
2009-03-04 04:28:35 +01:00
|
|
|
unsigned h = 0, g;
|
|
|
|
|
2014-02-12 01:59:37 +01:00
|
|
|
while (*name) {
|
2009-03-04 04:28:35 +01:00
|
|
|
h = (h << 4) + *name++;
|
|
|
|
g = h & 0xf0000000;
|
|
|
|
h ^= g;
|
|
|
|
h ^= g >> 24;
|
|
|
|
}
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
|
2009-10-24 03:11:40 +02:00
|
|
|
unsigned elf_hash = elfhash(name);
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* s = NULL;
|
2009-09-29 04:38:04 +02:00
|
|
|
|
2012-10-31 10:55:51 +01:00
|
|
|
if (si != NULL && somain != NULL) {
|
2012-08-30 12:48:32 +02:00
|
|
|
/*
|
2012-10-31 10:55:51 +01:00
|
|
|
* Local scope is executable scope. Just start looking into it right away
|
|
|
|
* for the shortcut.
|
2012-08-30 12:48:32 +02:00
|
|
|
*/
|
|
|
|
|
2012-10-31 10:55:51 +01:00
|
|
|
if (si == somain) {
|
|
|
|
s = soinfo_elf_lookup(si, elf_hash, name);
|
2012-08-30 12:48:32 +02:00
|
|
|
if (s != NULL) {
|
2012-10-31 10:55:51 +01:00
|
|
|
*lsi = si;
|
2012-08-30 12:48:32 +02:00
|
|
|
goto done;
|
|
|
|
}
|
2012-10-31 10:55:51 +01:00
|
|
|
} else {
|
|
|
|
/* Order of symbol lookup is controlled by DT_SYMBOLIC flag */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this object was built with symbolic relocations disabled, the
|
|
|
|
* first place to look to resolve external references is the main
|
|
|
|
* executable.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!si->has_DT_SYMBOLIC) {
|
2013-03-12 18:40:45 +01:00
|
|
|
DEBUG("%s: looking up %s in executable %s",
|
2012-11-02 20:37:13 +01:00
|
|
|
si->name, name, somain->name);
|
2012-10-31 10:55:51 +01:00
|
|
|
s = soinfo_elf_lookup(somain, elf_hash, name);
|
|
|
|
if (s != NULL) {
|
|
|
|
*lsi = somain;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2012-08-30 12:48:32 +02:00
|
|
|
|
2012-10-31 10:55:51 +01:00
|
|
|
/* Look for symbols in the local scope (the object who is
|
2013-10-26 02:38:02 +02:00
|
|
|
* searching). This happens with C++ templates on x86 for some
|
2012-10-31 10:55:51 +01:00
|
|
|
* reason.
|
|
|
|
*
|
|
|
|
* Notes on weak symbols:
|
|
|
|
* The ELF specs are ambiguous about treatment of weak definitions in
|
|
|
|
* dynamic linking. Some systems return the first definition found
|
|
|
|
* and some the first non-weak definition. This is system dependent.
|
|
|
|
* Here we return the first definition found for simplicity. */
|
|
|
|
|
|
|
|
s = soinfo_elf_lookup(si, elf_hash, name);
|
|
|
|
if (s != NULL) {
|
|
|
|
*lsi = si;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this object was built with -Bsymbolic and symbol is not found
|
|
|
|
* in the local scope, try to find the symbol in the main executable.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (si->has_DT_SYMBOLIC) {
|
2013-03-12 18:40:45 +01:00
|
|
|
DEBUG("%s: looking up %s in executable %s after local scope",
|
2012-11-02 20:37:13 +01:00
|
|
|
si->name, name, somain->name);
|
2012-10-31 10:55:51 +01:00
|
|
|
s = soinfo_elf_lookup(somain, elf_hash, name);
|
|
|
|
if (s != NULL) {
|
|
|
|
*lsi = somain;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2012-08-30 12:48:32 +02:00
|
|
|
}
|
2012-08-24 22:25:51 +02:00
|
|
|
}
|
2009-09-29 04:38:04 +02:00
|
|
|
|
2009-12-31 19:09:10 +01:00
|
|
|
/* Next, look for it in the preloads list */
|
2013-03-01 00:58:45 +01:00
|
|
|
for (int i = 0; gLdPreloads[i] != NULL; i++) {
|
|
|
|
s = soinfo_elf_lookup(gLdPreloads[i], elf_hash, name);
|
|
|
|
if (s != NULL) {
|
|
|
|
*lsi = gLdPreloads[i];
|
2009-12-31 19:09:10 +01:00
|
|
|
goto done;
|
2012-08-30 12:48:32 +02:00
|
|
|
}
|
2009-12-31 19:09:10 +01:00
|
|
|
}
|
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
for (int i = 0; needed[i] != NULL; i++) {
|
2013-03-12 18:40:45 +01:00
|
|
|
DEBUG("%s: looking up %s in %s",
|
2012-11-02 20:37:13 +01:00
|
|
|
si->name, name, needed[i]->name);
|
2012-08-30 12:48:32 +02:00
|
|
|
s = soinfo_elf_lookup(needed[i], elf_hash, name);
|
|
|
|
if (s != NULL) {
|
|
|
|
*lsi = needed[i];
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
goto done;
|
2012-08-30 12:48:32 +02:00
|
|
|
}
|
2009-09-29 04:38:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2013-03-01 00:58:45 +01:00
|
|
|
if (s != NULL) {
|
2013-10-05 02:01:33 +02:00
|
|
|
TRACE_TYPE(LOOKUP, "si %s sym %s s->st_value = %p, "
|
|
|
|
"found in %s, base = %p, load bias = %p",
|
|
|
|
si->name, name, reinterpret_cast<void*>(s->st_value),
|
|
|
|
(*lsi)->name, reinterpret_cast<void*>((*lsi)->base),
|
|
|
|
reinterpret_cast<void*>((*lsi)->load_bias));
|
2009-09-29 04:38:04 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2009-10-24 03:11:40 +02:00
|
|
|
return NULL;
|
2009-09-29 04:38:04 +02:00
|
|
|
}
|
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
/* This is used by dlsym(3). It performs symbol lookup only within the
|
2009-09-29 04:38:04 +02:00
|
|
|
specified soinfo object and not in any of its dependencies.
|
2013-03-01 00:58:45 +01:00
|
|
|
|
|
|
|
TODO: Only looking in the specified soinfo seems wrong. dlsym(3) says
|
|
|
|
that it should do a breadth first search through the dependency
|
|
|
|
tree. This agrees with the ELF spec (aka System V Application
|
|
|
|
Binary Interface) where in Chapter 5 it discuss resolving "Shared
|
|
|
|
Object Dependencies" in breadth first search order.
|
2009-09-29 04:38:04 +02:00
|
|
|
*/
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name) {
|
2012-06-12 16:25:37 +02:00
|
|
|
return soinfo_elf_lookup(si, elfhash(name), name);
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
/* This is used by dlsym(3) to performs a global symbol lookup. If the
|
|
|
|
start value is null (for RTLD_DEFAULT), the search starts at the
|
|
|
|
beginning of the global solist. Otherwise the search starts at the
|
|
|
|
specified soinfo (for RTLD_NEXT).
|
2009-09-29 04:38:04 +02:00
|
|
|
*/
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
|
2012-12-20 23:42:14 +01:00
|
|
|
unsigned elf_hash = elfhash(name);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-12-20 23:42:14 +01:00
|
|
|
if (start == NULL) {
|
|
|
|
start = solist;
|
|
|
|
}
|
2009-12-31 19:17:56 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* s = NULL;
|
2012-12-20 23:42:14 +01:00
|
|
|
for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) {
|
|
|
|
s = soinfo_elf_lookup(si, elf_hash, name);
|
|
|
|
if (s != NULL) {
|
|
|
|
*found = si;
|
|
|
|
break;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
2012-12-20 23:42:14 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-12-20 23:42:14 +01:00
|
|
|
if (s != NULL) {
|
2013-10-05 02:01:33 +02:00
|
|
|
TRACE_TYPE(LOOKUP, "%s s->st_value = %p, found->base = %p",
|
|
|
|
name, reinterpret_cast<void*>(s->st_value), reinterpret_cast<void*>((*found)->base));
|
2012-12-20 23:42:14 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-12-20 23:42:14 +01:00
|
|
|
return s;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2013-03-12 07:58:06 +01:00
|
|
|
soinfo* find_containing_library(const void* p) {
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
|
2013-03-12 07:58:06 +01:00
|
|
|
for (soinfo* si = solist; si != NULL; si = si->next) {
|
|
|
|
if (address >= si->base && address - si->base < si->size) {
|
|
|
|
return si;
|
2009-12-31 19:17:40 +01:00
|
|
|
}
|
2013-03-12 07:58:06 +01:00
|
|
|
}
|
|
|
|
return NULL;
|
2009-12-31 19:17:40 +01:00
|
|
|
}
|
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr) {
|
|
|
|
ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - si->base;
|
2013-03-12 07:58:06 +01:00
|
|
|
|
|
|
|
// Search the library's symbol table for any defined symbol which
|
|
|
|
// contains this address.
|
|
|
|
for (size_t i = 0; i < si->nchain; ++i) {
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* sym = &si->symtab[i];
|
2013-03-12 07:58:06 +01:00
|
|
|
if (sym->st_shndx != SHN_UNDEF &&
|
|
|
|
soaddr >= sym->st_value &&
|
|
|
|
soaddr < sym->st_value + sym->st_size) {
|
|
|
|
return sym;
|
2009-12-31 19:17:40 +01:00
|
|
|
}
|
2013-03-12 07:58:06 +01:00
|
|
|
}
|
2009-12-31 19:17:40 +01:00
|
|
|
|
2013-03-12 07:58:06 +01:00
|
|
|
return NULL;
|
2009-12-31 19:17:40 +01:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:20:03 +01:00
|
|
|
static int open_library_on_path(const char* name, const char* const paths[]) {
|
|
|
|
char buf[512];
|
|
|
|
for (size_t i = 0; paths[i] != NULL; ++i) {
|
2013-01-18 03:36:06 +01:00
|
|
|
int n = __libc_format_buffer(buf, sizeof(buf), "%s/%s", paths[i], name);
|
2012-10-31 22:20:03 +01:00
|
|
|
if (n < 0 || n >= static_cast<int>(sizeof(buf))) {
|
2013-03-12 18:40:45 +01:00
|
|
|
PRINT("Warning: ignoring very long library path: %s/%s", paths[i], name);
|
2012-10-31 22:20:03 +01:00
|
|
|
continue;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
2012-10-31 22:20:03 +01:00
|
|
|
int fd = TEMP_FAILURE_RETRY(open(buf, O_RDONLY | O_CLOEXEC));
|
|
|
|
if (fd != -1) {
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:20:03 +01:00
|
|
|
static int open_library(const char* name) {
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("[ opening %s ]", name);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-10-31 22:20:03 +01:00
|
|
|
// If the name contains a slash, we should attempt to open it directly and not search the paths.
|
|
|
|
if (strchr(name, '/') != NULL) {
|
2012-11-02 06:59:19 +01:00
|
|
|
int fd = TEMP_FAILURE_RETRY(open(name, O_RDONLY | O_CLOEXEC));
|
|
|
|
if (fd != -1) {
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
// ...but nvidia binary blobs (at least) rely on this behavior, so fall through for now.
|
2014-05-03 03:18:50 +02:00
|
|
|
#if defined(__LP64__)
|
|
|
|
return -1;
|
|
|
|
#endif
|
2012-10-31 22:20:03 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-10-31 22:20:03 +01:00
|
|
|
// Otherwise we try LD_LIBRARY_PATH first, and fall back to the built-in well known paths.
|
|
|
|
int fd = open_library_on_path(name, gLdPaths);
|
|
|
|
if (fd == -1) {
|
2014-01-14 01:37:47 +01:00
|
|
|
fd = open_library_on_path(name, gDefaultLdPaths);
|
2012-10-31 22:20:03 +01:00
|
|
|
}
|
|
|
|
return fd;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2014-02-06 15:34:21 +01:00
|
|
|
static soinfo* load_library(const char* name, const android_dlextinfo* extinfo) {
|
2012-08-04 01:49:39 +02:00
|
|
|
// Open the file.
|
2013-03-06 03:47:58 +01:00
|
|
|
int fd = open_library(name);
|
|
|
|
if (fd == -1) {
|
2012-08-04 01:49:39 +02:00
|
|
|
DL_ERR("library \"%s\" not found", name);
|
2009-03-04 04:28:35 +01:00
|
|
|
return NULL;
|
2009-05-21 03:28:09 +02:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-03-06 03:47:58 +01:00
|
|
|
// Read the ELF header and load the segments.
|
|
|
|
ElfReader elf_reader(name, fd);
|
2014-02-06 15:34:21 +01:00
|
|
|
if (!elf_reader.Load(extinfo)) {
|
2012-08-04 01:49:39 +02:00
|
|
|
return NULL;
|
2012-06-18 18:13:49 +02:00
|
|
|
}
|
|
|
|
|
2013-03-06 03:47:58 +01:00
|
|
|
const char* bname = strrchr(name, '/');
|
|
|
|
soinfo* si = soinfo_alloc(bname ? bname + 1 : name);
|
|
|
|
if (si == NULL) {
|
2012-08-04 01:49:39 +02:00
|
|
|
return NULL;
|
2012-06-18 18:13:49 +02:00
|
|
|
}
|
2013-03-06 03:47:58 +01:00
|
|
|
si->base = elf_reader.load_start();
|
|
|
|
si->size = elf_reader.load_size();
|
|
|
|
si->load_bias = elf_reader.load_bias();
|
|
|
|
si->flags = 0;
|
|
|
|
si->entry = 0;
|
|
|
|
si->dynamic = NULL;
|
|
|
|
si->phnum = elf_reader.phdr_count();
|
|
|
|
si->phdr = elf_reader.loaded_phdr();
|
|
|
|
return si;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2014-02-12 01:59:37 +01:00
|
|
|
static soinfo *find_loaded_library(const char* name) {
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
// TODO: don't use basename only for determining libraries
|
|
|
|
// http://code.google.com/p/android/issues/detail?id=6670
|
|
|
|
|
2014-02-12 01:59:37 +01:00
|
|
|
const char* bname = strrchr(name, '/');
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
bname = bname ? bname + 1 : name;
|
|
|
|
|
2014-02-12 01:59:37 +01:00
|
|
|
for (soinfo* si = solist; si != NULL; si = si->next) {
|
2013-03-01 00:58:45 +01:00
|
|
|
if (!strcmp(bname, si->name)) {
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
return si;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-02-06 15:34:21 +01:00
|
|
|
static soinfo* find_library_internal(const char* name, const android_dlextinfo* extinfo) {
|
2012-11-01 23:16:56 +01:00
|
|
|
if (name == NULL) {
|
|
|
|
return somain;
|
|
|
|
}
|
|
|
|
|
|
|
|
soinfo* si = find_loaded_library(name);
|
|
|
|
if (si != NULL) {
|
|
|
|
if (si->flags & FLAG_LINKED) {
|
|
|
|
return si;
|
|
|
|
}
|
|
|
|
DL_ERR("OOPS: recursive link to \"%s\"", si->name);
|
|
|
|
return NULL;
|
|
|
|
}
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("[ '%s' has not been loaded yet. Locating...]", name);
|
2014-02-06 15:34:21 +01:00
|
|
|
si = load_library(name, extinfo);
|
2012-12-20 23:42:14 +01:00
|
|
|
if (si == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point we know that whatever is loaded @ base is a valid ELF
|
|
|
|
// shared library whose segments are properly mapped in.
|
2013-12-03 12:47:34 +01:00
|
|
|
TRACE("[ find_library_internal base=%p size=%zu name='%s' ]",
|
2013-10-05 02:01:33 +02:00
|
|
|
reinterpret_cast<void*>(si->base), si->size, si->name);
|
2012-12-20 23:42:14 +01:00
|
|
|
|
2014-02-27 14:18:00 +01:00
|
|
|
if (!soinfo_link_image(si, extinfo)) {
|
2012-12-20 23:42:14 +01:00
|
|
|
munmap(reinterpret_cast<void*>(si->base), si->size);
|
|
|
|
soinfo_free(si);
|
|
|
|
return NULL;
|
2012-11-01 23:16:56 +01:00
|
|
|
}
|
2010-07-22 01:18:21 +02:00
|
|
|
|
2012-11-01 23:16:56 +01:00
|
|
|
return si;
|
|
|
|
}
|
|
|
|
|
2014-02-06 15:34:21 +01:00
|
|
|
static soinfo* find_library(const char* name, const android_dlextinfo* extinfo) {
|
|
|
|
soinfo* si = find_library_internal(name, extinfo);
|
2012-11-01 23:16:56 +01:00
|
|
|
if (si != NULL) {
|
2013-03-12 18:40:45 +01:00
|
|
|
si->ref_count++;
|
2012-11-01 23:16:56 +01:00
|
|
|
}
|
|
|
|
return si;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int soinfo_unload(soinfo* si) {
|
2013-03-12 18:40:45 +01:00
|
|
|
if (si->ref_count == 1) {
|
|
|
|
TRACE("unloading '%s'", si->name);
|
2012-11-01 23:16:56 +01:00
|
|
|
si->CallDestructors();
|
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
|
2013-03-01 00:58:45 +01:00
|
|
|
if (d->d_tag == DT_NEEDED) {
|
|
|
|
const char* library_name = si->strtab + d->d_un.d_val;
|
2013-05-09 23:19:58 +02:00
|
|
|
TRACE("%s needs to unload %s", si->name, library_name);
|
|
|
|
soinfo_unload(find_loaded_library(library_name));
|
2012-11-01 23:16:56 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2012-11-01 23:16:56 +01:00
|
|
|
munmap(reinterpret_cast<void*>(si->base), si->size);
|
|
|
|
notify_gdb_of_unload(si);
|
2013-03-12 18:40:45 +01:00
|
|
|
si->ref_count = 0;
|
2014-05-06 01:49:04 +02:00
|
|
|
soinfo_free(si);
|
2012-11-01 23:16:56 +01:00
|
|
|
} else {
|
2013-03-12 18:40:45 +01:00
|
|
|
si->ref_count--;
|
2013-10-01 03:43:46 +02:00
|
|
|
TRACE("not unloading '%s', decrementing ref_count to %zd", si->name, si->ref_count);
|
2012-11-01 23:16:56 +01:00
|
|
|
}
|
|
|
|
return 0;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2014-01-14 01:37:47 +01:00
|
|
|
void do_android_get_LD_LIBRARY_PATH(char* buffer, size_t buffer_size) {
|
|
|
|
snprintf(buffer, buffer_size, "%s:%s", gDefaultLdPaths[0], gDefaultLdPaths[1]);
|
|
|
|
}
|
|
|
|
|
2012-12-20 23:42:14 +01:00
|
|
|
void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path) {
|
|
|
|
if (!get_AT_SECURE()) {
|
|
|
|
parse_LD_LIBRARY_PATH(ld_library_path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-06 15:34:21 +01:00
|
|
|
soinfo* do_dlopen(const char* name, int flags, const android_dlextinfo* extinfo) {
|
2012-12-19 00:57:55 +01:00
|
|
|
if ((flags & ~(RTLD_NOW|RTLD_LAZY|RTLD_LOCAL|RTLD_GLOBAL)) != 0) {
|
|
|
|
DL_ERR("invalid flags to dlopen: %x", flags);
|
|
|
|
return NULL;
|
|
|
|
}
|
2014-02-06 15:34:21 +01:00
|
|
|
if (extinfo != NULL && ((extinfo->flags & ~(ANDROID_DLEXT_VALID_FLAG_BITS)) != 0)) {
|
|
|
|
DL_ERR("invalid extended flags to android_dlopen_ext: %x", extinfo->flags);
|
|
|
|
return NULL;
|
|
|
|
}
|
2014-05-06 01:49:04 +02:00
|
|
|
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
2014-02-06 15:34:21 +01:00
|
|
|
soinfo* si = find_library(name, extinfo);
|
2012-11-01 23:16:56 +01:00
|
|
|
if (si != NULL) {
|
|
|
|
si->CallConstructors();
|
|
|
|
}
|
2014-05-06 01:49:04 +02:00
|
|
|
gSoInfoAllocator.protect_all(PROT_READ);
|
2012-11-01 23:16:56 +01:00
|
|
|
return si;
|
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-11-01 23:16:56 +01:00
|
|
|
int do_dlclose(soinfo* si) {
|
2014-05-06 01:49:04 +02:00
|
|
|
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
2012-11-01 23:16:56 +01:00
|
|
|
int result = soinfo_unload(si);
|
2014-05-06 01:49:04 +02:00
|
|
|
gSoInfoAllocator.protect_all(PROT_READ);
|
2012-11-01 23:16:56 +01:00
|
|
|
return result;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(USE_RELA)
|
2014-02-07 05:36:51 +01:00
|
|
|
static int soinfo_relocate(soinfo* si, ElfW(Rela)* rela, unsigned count, soinfo* needed[]) {
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* s;
|
2013-10-05 02:01:33 +02:00
|
|
|
soinfo* lsi;
|
|
|
|
|
|
|
|
for (size_t idx = 0; idx < count; ++idx, ++rela) {
|
2014-02-11 02:46:57 +01:00
|
|
|
unsigned type = ELFW(R_TYPE)(rela->r_info);
|
|
|
|
unsigned sym = ELFW(R_SYM)(rela->r_info);
|
|
|
|
ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + si->load_bias);
|
|
|
|
ElfW(Addr) sym_addr = 0;
|
2014-02-12 01:59:37 +01:00
|
|
|
const char* sym_name = NULL;
|
2013-10-05 02:01:33 +02:00
|
|
|
|
|
|
|
DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
|
|
|
|
if (type == 0) { // R_*_NONE
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (sym != 0) {
|
2014-02-13 02:17:41 +01:00
|
|
|
sym_name = reinterpret_cast<const char*>(si->strtab + si->symtab[sym].st_name);
|
2013-10-05 02:01:33 +02:00
|
|
|
s = soinfo_do_lookup(si, sym_name, &lsi, needed);
|
|
|
|
if (s == NULL) {
|
|
|
|
// We only allow an undefined symbol if this is a weak reference...
|
2014-02-13 02:17:41 +01:00
|
|
|
s = &si->symtab[sym];
|
2013-10-05 02:01:33 +02:00
|
|
|
if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
|
|
|
|
DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IHI0044C AAELF 4.5.1.1:
|
|
|
|
|
|
|
|
Libraries are not searched to resolve weak references.
|
|
|
|
It is not an error for a weak reference to remain unsatisfied.
|
|
|
|
|
|
|
|
During linking, the value of an undefined weak reference is:
|
|
|
|
- Zero if the relocation type is absolute
|
|
|
|
- The address of the place if the relocation is pc-relative
|
|
|
|
- The address of nominal base address if the relocation
|
|
|
|
type is base-relative.
|
|
|
|
*/
|
|
|
|
|
|
|
|
switch (type) {
|
2013-10-10 16:19:31 +02:00
|
|
|
#if defined(__aarch64__)
|
|
|
|
case R_AARCH64_JUMP_SLOT:
|
|
|
|
case R_AARCH64_GLOB_DAT:
|
|
|
|
case R_AARCH64_ABS64:
|
|
|
|
case R_AARCH64_ABS32:
|
|
|
|
case R_AARCH64_ABS16:
|
|
|
|
case R_AARCH64_RELATIVE:
|
|
|
|
/*
|
|
|
|
* The sym_addr was initialized to be zero above, or the relocation
|
|
|
|
* code below does not care about value of sym_addr.
|
|
|
|
* No need to do anything.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
#elif defined(__x86_64__)
|
2013-10-05 02:01:33 +02:00
|
|
|
case R_X86_64_JUMP_SLOT:
|
|
|
|
case R_X86_64_GLOB_DAT:
|
|
|
|
case R_X86_64_32:
|
|
|
|
case R_X86_64_RELATIVE:
|
|
|
|
// No need to do anything.
|
|
|
|
break;
|
|
|
|
case R_X86_64_PC32:
|
|
|
|
sym_addr = reloc;
|
|
|
|
break;
|
2013-10-26 02:38:02 +02:00
|
|
|
#endif
|
2013-10-05 02:01:33 +02:00
|
|
|
default:
|
2014-02-12 01:59:37 +01:00
|
|
|
DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rela, idx);
|
2013-10-05 02:01:33 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We got a definition.
|
2014-02-11 02:46:57 +01:00
|
|
|
sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
|
2013-10-05 02:01:33 +02:00
|
|
|
}
|
|
|
|
count_relocation(kRelocSymbol);
|
|
|
|
} else {
|
|
|
|
s = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
2013-10-10 16:19:31 +02:00
|
|
|
#if defined(__aarch64__)
|
|
|
|
case R_AARCH64_JUMP_SLOT:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rela->r_offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), sym_name);
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
|
2013-10-10 16:19:31 +02:00
|
|
|
break;
|
|
|
|
case R_AARCH64_GLOB_DAT:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rela->r_offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), sym_name);
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
|
2013-10-10 16:19:31 +02:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS64:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rela->r_offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), sym_name);
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
|
2013-10-10 16:19:31 +02:00
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS32:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rela->r_offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), sym_name);
|
|
|
|
if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
|
|
|
|
((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
|
2013-10-10 16:19:31 +02:00
|
|
|
} else {
|
2014-02-11 02:46:57 +01:00
|
|
|
DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
|
|
|
|
(*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
|
|
|
|
static_cast<ElfW(Addr)>(INT32_MIN),
|
|
|
|
static_cast<ElfW(Addr)>(UINT32_MAX));
|
2013-10-10 16:19:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case R_AARCH64_ABS16:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rela->r_offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), sym_name);
|
|
|
|
if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
|
|
|
|
((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
|
2013-10-10 16:19:31 +02:00
|
|
|
} else {
|
2014-02-11 02:46:57 +01:00
|
|
|
DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
|
|
|
|
(*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
|
|
|
|
static_cast<ElfW(Addr)>(INT16_MIN),
|
|
|
|
static_cast<ElfW(Addr)>(UINT16_MAX));
|
2013-10-10 16:19:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL64:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rela->r_offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
|
2013-10-10 16:19:31 +02:00
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL32:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rela->r_offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
|
|
|
|
if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
|
|
|
|
((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
|
2013-10-10 16:19:31 +02:00
|
|
|
} else {
|
2014-02-11 02:46:57 +01:00
|
|
|
DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
|
|
|
|
(*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
|
|
|
|
static_cast<ElfW(Addr)>(INT32_MIN),
|
|
|
|
static_cast<ElfW(Addr)>(UINT32_MAX));
|
2013-10-10 16:19:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case R_AARCH64_PREL16:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rela->r_offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
|
|
|
|
if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
|
|
|
|
((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
|
2013-10-10 16:19:31 +02:00
|
|
|
} else {
|
2014-02-11 02:46:57 +01:00
|
|
|
DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
|
|
|
|
(*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
|
|
|
|
static_cast<ElfW(Addr)>(INT16_MIN),
|
|
|
|
static_cast<ElfW(Addr)>(UINT16_MAX));
|
2013-10-10 16:19:31 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case R_AARCH64_RELATIVE:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rela->r_offset);
|
|
|
|
if (sym) {
|
|
|
|
DL_ERR("odd RELATIVE form...");
|
|
|
|
return -1;
|
|
|
|
}
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
|
|
|
|
reloc, (si->base + rela->r_addend));
|
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = (si->base + rela->r_addend);
|
2013-10-10 16:19:31 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case R_AARCH64_COPY:
|
2014-05-12 18:06:14 +02:00
|
|
|
/*
|
|
|
|
* ET_EXEC is not supported so this should not happen.
|
|
|
|
*
|
|
|
|
* http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
|
|
|
|
*
|
|
|
|
* Section 4.7.1.10 "Dynamic relocations"
|
|
|
|
* R_AARCH64_COPY may only appear in executable objects where e_type is
|
|
|
|
* set to ET_EXEC.
|
|
|
|
*/
|
|
|
|
DL_ERR("%s R_AARCH64_COPY relocations are not supported", si->name);
|
|
|
|
return -1;
|
2013-10-10 16:19:31 +02:00
|
|
|
case R_AARCH64_TLS_TPREL64:
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), rela->r_offset);
|
2013-10-10 16:19:31 +02:00
|
|
|
break;
|
|
|
|
case R_AARCH64_TLS_DTPREL32:
|
2014-02-11 02:46:57 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
|
|
|
|
reloc, (sym_addr + rela->r_addend), rela->r_offset);
|
2013-10-10 16:19:31 +02:00
|
|
|
break;
|
|
|
|
#elif defined(__x86_64__)
|
2013-10-05 02:01:33 +02:00
|
|
|
case R_X86_64_JUMP_SLOT:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rela->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
|
|
|
|
static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
|
|
|
case R_X86_64_GLOB_DAT:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rela->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
|
|
|
|
static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
|
|
|
case R_X86_64_RELATIVE:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rela->r_offset);
|
|
|
|
if (sym) {
|
|
|
|
DL_ERR("odd RELATIVE form...");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
|
|
|
|
static_cast<size_t>(si->base));
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = si->base + rela->r_addend;
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
|
|
|
case R_X86_64_32:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rela->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
|
|
|
|
static_cast<size_t>(sym_addr), sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
2013-10-16 17:13:58 +02:00
|
|
|
case R_X86_64_64:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rela->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
|
|
|
|
static_cast<size_t>(sym_addr), sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
|
2013-10-16 17:13:58 +02:00
|
|
|
break;
|
2013-10-05 02:01:33 +02:00
|
|
|
case R_X86_64_PC32:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rela->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
|
|
|
|
static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
|
|
|
|
static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
2013-10-26 02:38:02 +02:00
|
|
|
#endif
|
2013-10-10 16:19:31 +02:00
|
|
|
|
2013-10-05 02:01:33 +02:00
|
|
|
default:
|
2014-02-12 01:59:37 +01:00
|
|
|
DL_ERR("unknown reloc type %d @ %p (%zu)", type, rela, idx);
|
2013-10-05 02:01:33 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2014-02-07 05:36:51 +01:00
|
|
|
|
|
|
|
#else // REL, not RELA.
|
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
static int soinfo_relocate(soinfo* si, ElfW(Rel)* rel, unsigned count, soinfo* needed[]) {
|
|
|
|
ElfW(Sym)* s;
|
2013-03-01 00:58:45 +01:00
|
|
|
soinfo* lsi;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-08-04 01:49:39 +02:00
|
|
|
for (size_t idx = 0; idx < count; ++idx, ++rel) {
|
2014-02-11 02:46:57 +01:00
|
|
|
unsigned type = ELFW(R_TYPE)(rel->r_info);
|
|
|
|
// TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
|
|
|
|
unsigned sym = ELFW(R_SYM)(rel->r_info);
|
|
|
|
ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + si->load_bias);
|
|
|
|
ElfW(Addr) sym_addr = 0;
|
2014-02-12 01:59:37 +01:00
|
|
|
const char* sym_name = NULL;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-10-01 03:43:46 +02:00
|
|
|
DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
|
2012-07-31 21:07:22 +02:00
|
|
|
if (type == 0) { // R_*_NONE
|
|
|
|
continue;
|
|
|
|
}
|
2013-03-01 00:58:45 +01:00
|
|
|
if (sym != 0) {
|
2014-02-13 02:17:41 +01:00
|
|
|
sym_name = reinterpret_cast<const char*>(si->strtab + si->symtab[sym].st_name);
|
2012-08-30 12:48:32 +02:00
|
|
|
s = soinfo_do_lookup(si, sym_name, &lsi, needed);
|
2013-03-01 00:58:45 +01:00
|
|
|
if (s == NULL) {
|
2013-10-26 02:38:02 +02:00
|
|
|
// We only allow an undefined symbol if this is a weak reference...
|
2014-02-13 02:17:41 +01:00
|
|
|
s = &si->symtab[sym];
|
2013-10-05 02:01:33 +02:00
|
|
|
if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
|
2012-08-29 22:10:54 +02:00
|
|
|
DL_ERR("cannot locate symbol \"%s\" referenced by \"%s\"...", sym_name, si->name);
|
2009-10-26 20:05:23 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IHI0044C AAELF 4.5.1.1:
|
|
|
|
|
|
|
|
Libraries are not searched to resolve weak references.
|
|
|
|
It is not an error for a weak reference to remain
|
|
|
|
unsatisfied.
|
|
|
|
|
|
|
|
During linking, the value of an undefined weak reference is:
|
|
|
|
- Zero if the relocation type is absolute
|
|
|
|
- The address of the place if the relocation is pc-relative
|
2012-08-14 23:07:59 +02:00
|
|
|
- The address of nominal base address if the relocation
|
2009-10-26 20:05:23 +01:00
|
|
|
type is base-relative.
|
|
|
|
*/
|
|
|
|
|
|
|
|
switch (type) {
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__arm__)
|
2009-10-26 20:05:23 +01:00
|
|
|
case R_ARM_JUMP_SLOT:
|
|
|
|
case R_ARM_GLOB_DAT:
|
|
|
|
case R_ARM_ABS32:
|
|
|
|
case R_ARM_RELATIVE: /* Don't care. */
|
2013-10-26 02:38:02 +02:00
|
|
|
// sym_addr was initialized to be zero above or relocation
|
|
|
|
// code below does not care about value of sym_addr.
|
|
|
|
// No need to do anything.
|
|
|
|
break;
|
|
|
|
#elif defined(__i386__)
|
2012-07-31 21:07:22 +02:00
|
|
|
case R_386_JMP_SLOT:
|
2009-10-26 20:05:23 +01:00
|
|
|
case R_386_GLOB_DAT:
|
|
|
|
case R_386_32:
|
2013-10-05 02:01:33 +02:00
|
|
|
case R_386_RELATIVE: /* Don't care. */
|
2013-10-26 02:38:02 +02:00
|
|
|
// sym_addr was initialized to be zero above or relocation
|
|
|
|
// code below does not care about value of sym_addr.
|
|
|
|
// No need to do anything.
|
2009-10-26 20:05:23 +01:00
|
|
|
break;
|
|
|
|
case R_386_PC32:
|
|
|
|
sym_addr = reloc;
|
|
|
|
break;
|
2013-10-26 02:38:02 +02:00
|
|
|
#endif
|
2009-10-26 20:05:23 +01:00
|
|
|
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__arm__)
|
2009-10-26 20:05:23 +01:00
|
|
|
case R_ARM_COPY:
|
2013-10-26 02:38:02 +02:00
|
|
|
// Fall through. Can't really copy if weak symbol is not found at run-time.
|
|
|
|
#endif
|
2009-10-26 20:05:23 +01:00
|
|
|
default:
|
2014-02-12 01:59:37 +01:00
|
|
|
DL_ERR("unknown weak reloc type %d @ %p (%zu)", type, rel, idx);
|
2009-10-26 20:05:23 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
2013-10-26 02:38:02 +02:00
|
|
|
// We got a definition.
|
2014-02-11 02:46:57 +01:00
|
|
|
sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
|
2012-07-31 21:07:22 +02:00
|
|
|
}
|
2012-08-14 23:07:59 +02:00
|
|
|
count_relocation(kRelocSymbol);
|
2009-03-04 04:28:35 +01:00
|
|
|
} else {
|
2009-10-26 20:05:23 +01:00
|
|
|
s = NULL;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2013-10-05 02:01:33 +02:00
|
|
|
switch (type) {
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__arm__)
|
2009-03-04 04:28:35 +01:00
|
|
|
case R_ARM_JUMP_SLOT:
|
2012-08-14 23:07:59 +02:00
|
|
|
count_relocation(kRelocAbsolute);
|
2009-03-04 04:28:35 +01:00
|
|
|
MARK(rel->r_offset);
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case R_ARM_GLOB_DAT:
|
2012-08-14 23:07:59 +02:00
|
|
|
count_relocation(kRelocAbsolute);
|
2009-03-04 04:28:35 +01:00
|
|
|
MARK(rel->r_offset);
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case R_ARM_ABS32:
|
2012-08-14 23:07:59 +02:00
|
|
|
count_relocation(kRelocAbsolute);
|
2009-03-04 04:28:35 +01:00
|
|
|
MARK(rel->r_offset);
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
2009-11-17 23:56:26 +01:00
|
|
|
case R_ARM_REL32:
|
2012-08-14 23:07:59 +02:00
|
|
|
count_relocation(kRelocRelative);
|
2009-11-17 23:56:26 +01:00
|
|
|
MARK(rel->r_offset);
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
|
2009-11-17 23:56:26 +01:00
|
|
|
reloc, sym_addr, rel->r_offset, sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
|
2009-11-17 23:56:26 +01:00
|
|
|
break;
|
2009-03-04 04:28:35 +01:00
|
|
|
case R_ARM_COPY:
|
2014-05-12 18:06:14 +02:00
|
|
|
/*
|
2014-05-12 20:36:56 +02:00
|
|
|
* ET_EXEC is not supported so this should not happen.
|
|
|
|
*
|
2014-05-12 18:06:14 +02:00
|
|
|
* http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
|
|
|
|
*
|
|
|
|
* Section 4.7.1.10 "Dynamic relocations"
|
|
|
|
* R_ARM_COPY may only appear in executable objects where e_type is
|
|
|
|
* set to ET_EXEC.
|
|
|
|
*/
|
|
|
|
DL_ERR("%s R_ARM_COPY relocations are not supported", si->name);
|
|
|
|
return -1;
|
2013-10-26 02:38:02 +02:00
|
|
|
#elif defined(__i386__)
|
|
|
|
case R_386_JMP_SLOT:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rel->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
|
2013-10-26 02:38:02 +02:00
|
|
|
break;
|
|
|
|
case R_386_GLOB_DAT:
|
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rel->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
|
2013-10-26 02:38:02 +02:00
|
|
|
break;
|
|
|
|
case R_386_32:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rel->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
|
2013-10-26 02:38:02 +02:00
|
|
|
break;
|
|
|
|
case R_386_PC32:
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rel->r_offset);
|
|
|
|
TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
|
|
|
|
reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
|
2013-10-26 02:38:02 +02:00
|
|
|
break;
|
|
|
|
#elif defined(__mips__)
|
|
|
|
case R_MIPS_REL32:
|
2014-02-07 05:36:51 +01:00
|
|
|
#if defined(__LP64__)
|
|
|
|
// MIPS Elf64_Rel entries contain compound relocations
|
|
|
|
// We only handle the R_MIPS_NONE|R_MIPS_64|R_MIPS_REL32 case
|
|
|
|
if (ELF64_R_TYPE2(rel->r_info) != R_MIPS_64 ||
|
|
|
|
ELF64_R_TYPE3(rel->r_info) != R_MIPS_NONE) {
|
2014-02-12 01:59:37 +01:00
|
|
|
DL_ERR("Unexpected compound relocation type:%d type2:%d type3:%d @ %p (%zu)",
|
2014-02-07 05:36:51 +01:00
|
|
|
type, (unsigned)ELF64_R_TYPE2(rel->r_info),
|
2014-02-12 01:59:37 +01:00
|
|
|
(unsigned)ELF64_R_TYPE3(rel->r_info), rel, idx);
|
2014-02-07 05:36:51 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
2013-10-26 02:38:02 +02:00
|
|
|
count_relocation(kRelocAbsolute);
|
|
|
|
MARK(rel->r_offset);
|
2014-02-07 05:36:51 +01:00
|
|
|
TRACE_TYPE(RELO, "RELO REL32 %08zx <- %08zx %s", static_cast<size_t>(reloc),
|
|
|
|
static_cast<size_t>(sym_addr), sym_name ? sym_name : "*SECTIONHDR*");
|
2013-10-26 02:38:02 +02:00
|
|
|
if (s) {
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
|
2013-10-26 02:38:02 +02:00
|
|
|
} else {
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
|
2013-10-26 02:38:02 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__arm__)
|
|
|
|
case R_ARM_RELATIVE:
|
|
|
|
#elif defined(__i386__)
|
|
|
|
case R_386_RELATIVE:
|
|
|
|
#endif
|
|
|
|
count_relocation(kRelocRelative);
|
|
|
|
MARK(rel->r_offset);
|
|
|
|
if (sym) {
|
|
|
|
DL_ERR("odd RELATIVE form...");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
|
|
|
|
reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(si->base));
|
2014-02-11 02:46:57 +01:00
|
|
|
*reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
|
2013-10-26 02:38:02 +02:00
|
|
|
break;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
|
|
|
default:
|
2014-02-12 01:59:37 +01:00
|
|
|
DL_ERR("unknown reloc type %d @ %p (%zu)", type, rel, idx);
|
2009-03-04 04:28:35 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2013-10-05 02:01:33 +02:00
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__mips__)
|
2013-08-21 06:05:44 +02:00
|
|
|
static bool mips_relocate_got(soinfo* si, soinfo* needed[]) {
|
2014-02-07 05:36:51 +01:00
|
|
|
ElfW(Addr)** got = si->plt_got;
|
2013-08-21 06:05:44 +02:00
|
|
|
if (got == NULL) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
unsigned local_gotno = si->mips_local_gotno;
|
|
|
|
unsigned gotsym = si->mips_gotsym;
|
|
|
|
unsigned symtabno = si->mips_symtabno;
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* symtab = si->symtab;
|
2012-07-31 21:07:22 +02:00
|
|
|
|
2014-02-07 05:36:51 +01:00
|
|
|
// got[0] is the address of the lazy resolver function.
|
|
|
|
// got[1] may be used for a GNU extension.
|
|
|
|
// Set it to a recognizable address in case someone calls it (should be _rtld_bind_start).
|
|
|
|
// FIXME: maybe this should be in a separate routine?
|
2012-07-31 21:07:22 +02:00
|
|
|
if ((si->flags & FLAG_LINKER) == 0) {
|
2013-08-21 06:05:44 +02:00
|
|
|
size_t g = 0;
|
2014-02-07 05:36:51 +01:00
|
|
|
got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadbeef);
|
|
|
|
if (reinterpret_cast<intptr_t>(got[g]) < 0) {
|
|
|
|
got[g++] = reinterpret_cast<ElfW(Addr)*>(0xdeadfeed);
|
2012-07-31 21:07:22 +02:00
|
|
|
}
|
2014-02-07 05:36:51 +01:00
|
|
|
// Relocate the local GOT entries.
|
2012-07-31 21:07:22 +02:00
|
|
|
for (; g < local_gotno; g++) {
|
2014-02-07 05:36:51 +01:00
|
|
|
got[g] = reinterpret_cast<ElfW(Addr)*>(reinterpret_cast<uintptr_t>(got[g]) + si->load_bias);
|
2012-07-31 21:07:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-07 05:36:51 +01:00
|
|
|
// Now for the global GOT entries...
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Sym)* sym = symtab + gotsym;
|
2012-07-31 21:07:22 +02:00
|
|
|
got = si->plt_got + local_gotno;
|
2013-08-21 06:05:44 +02:00
|
|
|
for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
|
2014-02-07 05:36:51 +01:00
|
|
|
// This is an undefined reference... try to locate it.
|
|
|
|
const char* sym_name = si->strtab + sym->st_name;
|
2013-03-01 00:58:45 +01:00
|
|
|
soinfo* lsi;
|
2014-02-07 05:36:51 +01:00
|
|
|
ElfW(Sym)* s = soinfo_do_lookup(si, sym_name, &lsi, needed);
|
2012-07-31 21:07:22 +02:00
|
|
|
if (s == NULL) {
|
2014-02-07 05:36:51 +01:00
|
|
|
// We only allow an undefined symbol if this is a weak reference.
|
2012-07-31 21:07:22 +02:00
|
|
|
s = &symtab[g];
|
2013-10-05 02:01:33 +02:00
|
|
|
if (ELF_ST_BIND(s->st_info) != STB_WEAK) {
|
2012-08-04 01:49:39 +02:00
|
|
|
DL_ERR("cannot locate \"%s\"...", sym_name);
|
2013-08-21 06:05:44 +02:00
|
|
|
return false;
|
2012-07-31 21:07:22 +02:00
|
|
|
}
|
|
|
|
*got = 0;
|
2014-02-07 05:36:51 +01:00
|
|
|
} else {
|
|
|
|
// FIXME: is this sufficient?
|
|
|
|
// For reference see NetBSD link loader
|
|
|
|
// http://cvsweb.netbsd.org/bsdweb.cgi/src/libexec/ld.elf_so/arch/mips/mips_reloc.c?rev=1.53&content-type=text/x-cvsweb-markup
|
|
|
|
*got = reinterpret_cast<ElfW(Addr)*>(lsi->load_bias + s->st_value);
|
2012-07-31 21:07:22 +02:00
|
|
|
}
|
|
|
|
}
|
2013-08-21 06:05:44 +02:00
|
|
|
return true;
|
2012-07-31 21:07:22 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-03-25 15:53:56 +01:00
|
|
|
void soinfo::CallArray(const char* array_name __unused, linker_function_t* functions, size_t count, bool reverse) {
|
2013-03-12 18:40:45 +01:00
|
|
|
if (functions == NULL) {
|
2012-11-01 23:16:56 +01:00
|
|
|
return;
|
|
|
|
}
|
2009-05-18 14:37:41 +02:00
|
|
|
|
2013-10-01 03:43:46 +02:00
|
|
|
TRACE("[ Calling %s (size %zd) @ %p for '%s' ]", array_name, count, functions, name);
|
2009-05-18 14:37:41 +02:00
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
int begin = reverse ? (count - 1) : 0;
|
|
|
|
int end = reverse ? -1 : count;
|
|
|
|
int step = reverse ? -1 : 1;
|
2009-05-18 14:37:41 +02:00
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
for (int i = begin; i != end; i += step) {
|
|
|
|
TRACE("[ %s[%d] == %p ]", array_name, i, functions[i]);
|
|
|
|
CallFunction("function", functions[i]);
|
2012-11-01 23:16:56 +01:00
|
|
|
}
|
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("[ Done calling %s for '%s' ]", array_name, name);
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2014-03-25 15:53:56 +01:00
|
|
|
void soinfo::CallFunction(const char* function_name __unused, linker_function_t function) {
|
2013-01-04 00:44:03 +01:00
|
|
|
if (function == NULL || reinterpret_cast<uintptr_t>(function) == static_cast<uintptr_t>(-1)) {
|
2012-11-01 23:16:56 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("[ Calling %s @ %p for '%s' ]", function_name, function, name);
|
2012-11-01 23:16:56 +01:00
|
|
|
function();
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("[ Done calling %s @ %p for '%s' ]", function_name, function, name);
|
2013-01-04 00:44:03 +01:00
|
|
|
|
|
|
|
// The function may have called dlopen(3) or dlclose(3), so we need to ensure our data structures
|
|
|
|
// are still writable. This happens with our debug malloc (see http://b/7941716).
|
2014-05-06 01:49:04 +02:00
|
|
|
gSoInfoAllocator.protect_all(PROT_READ | PROT_WRITE);
|
2012-08-13 15:58:37 +02:00
|
|
|
}
|
|
|
|
|
2012-11-01 23:16:56 +01:00
|
|
|
void soinfo::CallPreInitConstructors() {
|
2013-05-09 23:19:58 +02:00
|
|
|
// DT_PREINIT_ARRAY functions are called before any other constructors for executables,
|
|
|
|
// but ignored in a shared library.
|
2012-11-01 23:16:56 +01:00
|
|
|
CallArray("DT_PREINIT_ARRAY", preinit_array, preinit_array_count, false);
|
|
|
|
}
|
2011-12-21 10:03:54 +01:00
|
|
|
|
2012-11-01 23:16:56 +01:00
|
|
|
void soinfo::CallConstructors() {
|
|
|
|
if (constructors_called) {
|
|
|
|
return;
|
|
|
|
}
|
2011-12-21 10:03:54 +01:00
|
|
|
|
2012-11-01 23:16:56 +01:00
|
|
|
// We set constructors_called before actually calling the constructors, otherwise it doesn't
|
|
|
|
// protect against recursive constructor calls. One simple example of constructor recursion
|
|
|
|
// is the libc debug malloc, which is implemented in libc_malloc_debug_leak.so:
|
|
|
|
// 1. The program depends on libc, so libc's constructor is called here.
|
|
|
|
// 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
|
|
|
|
// 3. dlopen() calls the constructors on the newly created
|
|
|
|
// soinfo for libc_malloc_debug_leak.so.
|
|
|
|
// 4. The debug .so depends on libc, so CallConstructors is
|
|
|
|
// called again with the libc soinfo. If it doesn't trigger the early-
|
|
|
|
// out above, the libc constructor will be called again (recursively!).
|
|
|
|
constructors_called = true;
|
|
|
|
|
2013-05-09 23:19:58 +02:00
|
|
|
if ((flags & FLAG_EXE) == 0 && preinit_array != NULL) {
|
|
|
|
// The GNU dynamic linker silently ignores these, but we warn the developer.
|
2013-10-01 03:43:46 +02:00
|
|
|
PRINT("\"%s\": ignoring %zd-entry DT_PREINIT_ARRAY in shared library!",
|
2013-05-09 23:19:58 +02:00
|
|
|
name, preinit_array_count);
|
2012-11-01 23:16:56 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
if (dynamic != NULL) {
|
2014-02-11 02:46:57 +01:00
|
|
|
for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
|
2013-03-01 00:58:45 +01:00
|
|
|
if (d->d_tag == DT_NEEDED) {
|
|
|
|
const char* library_name = strtab + d->d_un.d_val;
|
2013-05-09 23:19:58 +02:00
|
|
|
TRACE("\"%s\": calling constructors in DT_NEEDED \"%s\"", name, library_name);
|
|
|
|
find_loaded_library(library_name)->CallConstructors();
|
2012-11-01 23:16:56 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
2012-11-01 23:16:56 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-05-09 23:19:58 +02:00
|
|
|
TRACE("\"%s\": calling constructors", name);
|
|
|
|
|
|
|
|
// DT_INIT should be called before DT_INIT_ARRAY if both are present.
|
2012-11-01 23:16:56 +01:00
|
|
|
CallFunction("DT_INIT", init_func);
|
|
|
|
CallArray("DT_INIT_ARRAY", init_array, init_array_count, false);
|
2011-12-21 10:03:54 +01:00
|
|
|
}
|
2009-05-18 14:37:41 +02:00
|
|
|
|
2012-11-01 23:16:56 +01:00
|
|
|
void soinfo::CallDestructors() {
|
2013-05-09 23:19:58 +02:00
|
|
|
TRACE("\"%s\": calling destructors", name);
|
|
|
|
|
|
|
|
// DT_FINI_ARRAY must be parsed in reverse order.
|
2012-11-01 23:16:56 +01:00
|
|
|
CallArray("DT_FINI_ARRAY", fini_array, fini_array_count, true);
|
2013-05-09 23:19:58 +02:00
|
|
|
|
|
|
|
// DT_FINI should be called after DT_FINI_ARRAY if both are present.
|
2012-11-01 23:16:56 +01:00
|
|
|
CallFunction("DT_FINI", fini_func);
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Force any of the closed stdin, stdout and stderr to be associated with
|
|
|
|
/dev/null. */
|
2012-10-17 00:54:46 +02:00
|
|
|
static int nullify_closed_stdio() {
|
2009-03-04 04:28:35 +01:00
|
|
|
int dev_null, i, status;
|
|
|
|
int return_value = 0;
|
|
|
|
|
2012-06-12 16:25:37 +02:00
|
|
|
dev_null = TEMP_FAILURE_RETRY(open("/dev/null", O_RDWR));
|
2009-03-04 04:28:35 +01:00
|
|
|
if (dev_null < 0) {
|
2012-08-04 01:49:39 +02:00
|
|
|
DL_ERR("cannot open /dev/null: %s", strerror(errno));
|
2009-03-04 04:28:35 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("[ Opened /dev/null file-descriptor=%d]", dev_null);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
|
|
|
/* If any of the stdio file descriptors is valid and not associated
|
|
|
|
with /dev/null, dup /dev/null to it. */
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
/* If it is /dev/null already, we are done. */
|
2012-08-04 01:49:39 +02:00
|
|
|
if (i == dev_null) {
|
2009-03-04 04:28:35 +01:00
|
|
|
continue;
|
2012-08-04 01:49:39 +02:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("[ Nullifying stdio file descriptor %d]", i);
|
2012-08-04 01:49:39 +02:00
|
|
|
status = TEMP_FAILURE_RETRY(fcntl(i, F_GETFL));
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-08-04 01:49:39 +02:00
|
|
|
/* If file is opened, we are good. */
|
|
|
|
if (status != -1) {
|
|
|
|
continue;
|
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
|
|
|
/* The only error we allow is that the file descriptor does not
|
|
|
|
exist, in which case we dup /dev/null to it. */
|
|
|
|
if (errno != EBADF) {
|
2012-08-04 01:49:39 +02:00
|
|
|
DL_ERR("fcntl failed: %s", strerror(errno));
|
2009-03-04 04:28:35 +01:00
|
|
|
return_value = -1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try dupping /dev/null to this stdio file descriptor and
|
|
|
|
repeat if there is a signal. Note that any errors in closing
|
|
|
|
the stdio descriptor are lost. */
|
2012-08-04 01:49:39 +02:00
|
|
|
status = TEMP_FAILURE_RETRY(dup2(dev_null, i));
|
2009-03-04 04:28:35 +01:00
|
|
|
if (status < 0) {
|
2012-08-04 01:49:39 +02:00
|
|
|
DL_ERR("dup2 failed: %s", strerror(errno));
|
2009-03-04 04:28:35 +01:00
|
|
|
return_value = -1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If /dev/null is not one of the stdio file descriptors, close it. */
|
|
|
|
if (dev_null > 2) {
|
2013-03-12 18:40:45 +01:00
|
|
|
TRACE("[ Closing /dev/null file-descriptor=%d]", dev_null);
|
2012-08-04 01:49:39 +02:00
|
|
|
status = TEMP_FAILURE_RETRY(close(dev_null));
|
|
|
|
if (status == -1) {
|
|
|
|
DL_ERR("close failed: %s", strerror(errno));
|
2009-03-04 04:28:35 +01:00
|
|
|
return_value = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return return_value;
|
|
|
|
}
|
|
|
|
|
2014-02-27 14:18:00 +01:00
|
|
|
static bool soinfo_link_image(soinfo* si, const android_dlextinfo* extinfo) {
|
2012-05-31 13:20:36 +02:00
|
|
|
/* "base" might wrap around UINT32_MAX. */
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Addr) base = si->load_bias;
|
|
|
|
const ElfW(Phdr)* phdr = si->phdr;
|
2009-03-04 04:28:35 +01:00
|
|
|
int phnum = si->phnum;
|
2013-03-01 00:58:45 +01:00
|
|
|
bool relocating_linker = (si->flags & FLAG_LINKER) != 0;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-06-19 01:24:17 +02:00
|
|
|
/* We can't debug anything until the linker is relocated */
|
|
|
|
if (!relocating_linker) {
|
2013-03-12 18:40:45 +01:00
|
|
|
INFO("[ linking %s ]", si->name);
|
2013-10-05 02:01:33 +02:00
|
|
|
DEBUG("si->base = %p si->flags = 0x%08x", reinterpret_cast<void*>(si->base), si->flags);
|
2012-06-19 01:24:17 +02:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-06-19 00:08:39 +02:00
|
|
|
/* Extract dynamic section */
|
2012-10-31 22:20:03 +01:00
|
|
|
size_t dynamic_count;
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Word) dynamic_flags;
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic,
|
2013-01-12 00:32:20 +01:00
|
|
|
&dynamic_count, &dynamic_flags);
|
2012-06-19 00:08:39 +02:00
|
|
|
if (si->dynamic == NULL) {
|
2012-06-19 01:24:17 +02:00
|
|
|
if (!relocating_linker) {
|
2012-10-31 22:20:03 +01:00
|
|
|
DL_ERR("missing PT_DYNAMIC in \"%s\"", si->name);
|
2012-06-19 01:24:17 +02:00
|
|
|
}
|
2012-10-31 22:20:03 +01:00
|
|
|
return false;
|
2012-06-19 00:08:39 +02:00
|
|
|
} else {
|
2012-06-19 01:24:17 +02:00
|
|
|
if (!relocating_linker) {
|
2013-03-12 18:40:45 +01:00
|
|
|
DEBUG("dynamic = %p", si->dynamic);
|
2012-06-19 01:24:17 +02:00
|
|
|
}
|
2012-06-19 00:08:39 +02:00
|
|
|
}
|
|
|
|
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__arm__)
|
2012-06-19 00:08:39 +02:00
|
|
|
(void) phdr_table_get_arm_exidx(phdr, phnum, base,
|
|
|
|
&si->ARM_exidx, &si->ARM_exidx_count);
|
|
|
|
#endif
|
|
|
|
|
2013-05-09 23:19:58 +02:00
|
|
|
// Extract useful information from dynamic section.
|
2013-03-01 00:58:45 +01:00
|
|
|
uint32_t needed_count = 0;
|
2014-02-11 02:46:57 +01:00
|
|
|
for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
|
2013-10-05 02:01:33 +02:00
|
|
|
DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
|
|
|
|
d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
|
|
|
|
switch (d->d_tag) {
|
2009-03-04 04:28:35 +01:00
|
|
|
case DT_HASH:
|
2014-02-12 01:59:37 +01:00
|
|
|
si->nbucket = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr)[0];
|
|
|
|
si->nchain = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr)[1];
|
|
|
|
si->bucket = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr + 8);
|
|
|
|
si->chain = reinterpret_cast<uint32_t*>(base + d->d_un.d_ptr + 8 + si->nbucket * 4);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_STRTAB:
|
2014-02-12 01:59:37 +01:00
|
|
|
si->strtab = reinterpret_cast<const char*>(base + d->d_un.d_ptr);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_SYMTAB:
|
2014-02-12 01:59:37 +01:00
|
|
|
si->symtab = reinterpret_cast<ElfW(Sym)*>(base + d->d_un.d_ptr);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
2013-10-26 02:38:02 +02:00
|
|
|
#if !defined(__LP64__)
|
2009-03-04 04:28:35 +01:00
|
|
|
case DT_PLTREL:
|
2013-03-01 00:58:45 +01:00
|
|
|
if (d->d_un.d_val != DT_REL) {
|
2012-10-31 22:20:03 +01:00
|
|
|
DL_ERR("unsupported DT_RELA in \"%s\"", si->name);
|
|
|
|
return false;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
break;
|
2013-10-05 02:01:33 +02:00
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
case DT_JMPREL:
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(USE_RELA)
|
2014-02-12 01:59:37 +01:00
|
|
|
si->plt_rela = reinterpret_cast<ElfW(Rela)*>(base + d->d_un.d_ptr);
|
2013-10-05 02:01:33 +02:00
|
|
|
#else
|
2014-02-12 01:59:37 +01:00
|
|
|
si->plt_rel = reinterpret_cast<ElfW(Rel)*>(base + d->d_un.d_ptr);
|
2013-10-05 02:01:33 +02:00
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_PLTRELSZ:
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(USE_RELA)
|
2014-02-11 02:46:57 +01:00
|
|
|
si->plt_rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
|
2013-10-05 02:01:33 +02:00
|
|
|
#else
|
2014-02-11 02:46:57 +01:00
|
|
|
si->plt_rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
|
2013-10-05 02:01:33 +02:00
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
2014-02-07 05:36:51 +01:00
|
|
|
#if defined(__mips__)
|
2009-03-04 04:28:35 +01:00
|
|
|
case DT_PLTGOT:
|
2014-02-07 05:36:51 +01:00
|
|
|
// Used by mips and mips64.
|
|
|
|
si->plt_got = reinterpret_cast<ElfW(Addr)**>(base + d->d_un.d_ptr);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
2013-10-05 02:01:33 +02:00
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
case DT_DEBUG:
|
2012-08-14 23:07:59 +02:00
|
|
|
// Set the DT_DEBUG entry to the address of _r_debug for GDB
|
2013-01-12 00:32:20 +01:00
|
|
|
// if the dynamic table is writable
|
2014-02-07 05:36:51 +01:00
|
|
|
// FIXME: not working currently for N64
|
|
|
|
// The flags for the LOAD and DYNAMIC program headers do not agree.
|
|
|
|
// The LOAD section containng the dynamic table has been mapped as
|
|
|
|
// read-only, but the DYNAMIC header claims it is writable.
|
|
|
|
#if !(defined(__mips__) && defined(__LP64__))
|
2013-01-14 18:56:21 +01:00
|
|
|
if ((dynamic_flags & PF_W) != 0) {
|
2013-10-01 03:43:46 +02:00
|
|
|
d->d_un.d_val = reinterpret_cast<uintptr_t>(&_r_debug);
|
2013-01-14 18:56:21 +01:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
2014-02-07 05:36:51 +01:00
|
|
|
#endif
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(USE_RELA)
|
2013-10-05 02:01:33 +02:00
|
|
|
case DT_RELA:
|
2014-02-12 01:59:37 +01:00
|
|
|
si->rela = reinterpret_cast<ElfW(Rela)*>(base + d->d_un.d_ptr);
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
|
|
|
case DT_RELASZ:
|
2014-02-11 02:46:57 +01:00
|
|
|
si->rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
|
|
|
case DT_REL:
|
|
|
|
DL_ERR("unsupported DT_REL in \"%s\"", si->name);
|
|
|
|
return false;
|
|
|
|
case DT_RELSZ:
|
|
|
|
DL_ERR("unsupported DT_RELSZ in \"%s\"", si->name);
|
|
|
|
return false;
|
|
|
|
#else
|
|
|
|
case DT_REL:
|
2014-02-12 01:59:37 +01:00
|
|
|
si->rel = reinterpret_cast<ElfW(Rel)*>(base + d->d_un.d_ptr);
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
|
|
|
case DT_RELSZ:
|
2014-02-11 02:46:57 +01:00
|
|
|
si->rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
|
2013-10-05 02:01:33 +02:00
|
|
|
break;
|
2009-11-06 02:36:37 +01:00
|
|
|
case DT_RELA:
|
2012-10-31 22:20:03 +01:00
|
|
|
DL_ERR("unsupported DT_RELA in \"%s\"", si->name);
|
|
|
|
return false;
|
2013-10-05 02:01:33 +02:00
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
case DT_INIT:
|
2013-03-12 18:40:45 +01:00
|
|
|
si->init_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr);
|
2013-05-09 23:19:58 +02:00
|
|
|
DEBUG("%s constructors (DT_INIT) found at %p", si->name, si->init_func);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_FINI:
|
2013-03-12 18:40:45 +01:00
|
|
|
si->fini_func = reinterpret_cast<linker_function_t>(base + d->d_un.d_ptr);
|
2013-05-09 23:19:58 +02:00
|
|
|
DEBUG("%s destructors (DT_FINI) found at %p", si->name, si->fini_func);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_INIT_ARRAY:
|
2013-03-12 18:40:45 +01:00
|
|
|
si->init_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
|
2013-05-09 23:19:58 +02:00
|
|
|
DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_INIT_ARRAYSZ:
|
2014-02-11 02:46:57 +01:00
|
|
|
si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_FINI_ARRAY:
|
2013-03-12 18:40:45 +01:00
|
|
|
si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
|
2013-05-09 23:19:58 +02:00
|
|
|
DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_FINI_ARRAYSZ:
|
2014-02-11 02:46:57 +01:00
|
|
|
si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_PREINIT_ARRAY:
|
2013-03-12 18:40:45 +01:00
|
|
|
si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
|
2013-05-09 23:19:58 +02:00
|
|
|
DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array);
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_PREINIT_ARRAYSZ:
|
2014-02-11 02:46:57 +01:00
|
|
|
si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
|
|
|
case DT_TEXTREL:
|
2013-10-28 22:19:05 +01:00
|
|
|
#if defined(__LP64__)
|
|
|
|
DL_ERR("text relocations (DT_TEXTREL) found in 64-bit ELF file \"%s\"", si->name);
|
|
|
|
return false;
|
|
|
|
#else
|
2012-08-11 06:08:42 +02:00
|
|
|
si->has_text_relocations = true;
|
2009-03-04 04:28:35 +01:00
|
|
|
break;
|
2013-10-28 22:19:05 +01:00
|
|
|
#endif
|
2012-08-30 12:48:32 +02:00
|
|
|
case DT_SYMBOLIC:
|
|
|
|
si->has_DT_SYMBOLIC = true;
|
|
|
|
break;
|
2013-03-01 00:58:45 +01:00
|
|
|
case DT_NEEDED:
|
|
|
|
++needed_count;
|
|
|
|
break;
|
2012-08-30 12:48:32 +02:00
|
|
|
case DT_FLAGS:
|
2013-03-01 00:58:45 +01:00
|
|
|
if (d->d_un.d_val & DF_TEXTREL) {
|
2013-10-28 22:19:05 +01:00
|
|
|
#if defined(__LP64__)
|
|
|
|
DL_ERR("text relocations (DF_TEXTREL) found in 64-bit ELF file \"%s\"", si->name);
|
|
|
|
return false;
|
|
|
|
#else
|
2012-08-30 12:48:32 +02:00
|
|
|
si->has_text_relocations = true;
|
2013-10-28 22:19:05 +01:00
|
|
|
#endif
|
2012-08-30 12:48:32 +02:00
|
|
|
}
|
2013-03-01 00:58:45 +01:00
|
|
|
if (d->d_un.d_val & DF_SYMBOLIC) {
|
2012-08-30 12:48:32 +02:00
|
|
|
si->has_DT_SYMBOLIC = true;
|
|
|
|
}
|
|
|
|
break;
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__mips__)
|
2012-07-31 21:07:22 +02:00
|
|
|
case DT_STRSZ:
|
|
|
|
case DT_SYMENT:
|
|
|
|
case DT_RELENT:
|
|
|
|
break;
|
|
|
|
case DT_MIPS_RLD_MAP:
|
2012-08-14 23:07:59 +02:00
|
|
|
// Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
|
2012-07-31 21:07:22 +02:00
|
|
|
{
|
2014-02-19 00:50:32 +01:00
|
|
|
r_debug** dp = reinterpret_cast<r_debug**>(base + d->d_un.d_ptr);
|
2012-07-31 21:07:22 +02:00
|
|
|
*dp = &_r_debug;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DT_MIPS_RLD_VERSION:
|
|
|
|
case DT_MIPS_FLAGS:
|
|
|
|
case DT_MIPS_BASE_ADDRESS:
|
|
|
|
case DT_MIPS_UNREFEXTNO:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_MIPS_SYMTABNO:
|
2013-03-01 00:58:45 +01:00
|
|
|
si->mips_symtabno = d->d_un.d_val;
|
2012-07-31 21:07:22 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_MIPS_LOCAL_GOTNO:
|
2013-03-01 00:58:45 +01:00
|
|
|
si->mips_local_gotno = d->d_un.d_val;
|
2012-07-31 21:07:22 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_MIPS_GOTSYM:
|
2013-03-01 00:58:45 +01:00
|
|
|
si->mips_gotsym = d->d_un.d_val;
|
2012-07-31 21:07:22 +02:00
|
|
|
break;
|
2013-10-26 02:38:02 +02:00
|
|
|
#endif
|
2012-07-31 21:07:22 +02:00
|
|
|
|
|
|
|
default:
|
2013-10-26 02:38:02 +02:00
|
|
|
DEBUG("Unused DT entry: type %p arg %p",
|
|
|
|
reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
|
2012-07-31 21:07:22 +02:00
|
|
|
break;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-05 02:01:33 +02:00
|
|
|
DEBUG("si->base = %p, si->strtab = %p, si->symtab = %p",
|
|
|
|
reinterpret_cast<void*>(si->base), si->strtab, si->symtab);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-10-31 22:20:03 +01:00
|
|
|
// Sanity checks.
|
2013-03-01 00:58:45 +01:00
|
|
|
if (relocating_linker && needed_count != 0) {
|
|
|
|
DL_ERR("linker cannot have DT_NEEDED dependencies on other libraries");
|
|
|
|
return false;
|
|
|
|
}
|
2012-10-31 22:20:03 +01:00
|
|
|
if (si->nbucket == 0) {
|
|
|
|
DL_ERR("empty/missing DT_HASH in \"%s\" (built with --hash-style=gnu?)", si->name);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (si->strtab == 0) {
|
|
|
|
DL_ERR("empty/missing DT_STRTAB in \"%s\"", si->name);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (si->symtab == 0) {
|
|
|
|
DL_ERR("empty/missing DT_SYMTAB in \"%s\"", si->name);
|
|
|
|
return false;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2013-06-18 22:15:00 +02:00
|
|
|
// If this is the main executable, then load all of the libraries from LD_PRELOAD now.
|
2012-11-01 23:16:56 +01:00
|
|
|
if (si->flags & FLAG_EXE) {
|
2013-03-01 00:58:45 +01:00
|
|
|
memset(gLdPreloads, 0, sizeof(gLdPreloads));
|
2013-06-18 22:15:00 +02:00
|
|
|
size_t preload_count = 0;
|
2012-11-01 23:16:56 +01:00
|
|
|
for (size_t i = 0; gLdPreloadNames[i] != NULL; i++) {
|
2014-02-06 15:34:21 +01:00
|
|
|
soinfo* lsi = find_library(gLdPreloadNames[i], NULL);
|
2013-06-18 22:15:00 +02:00
|
|
|
if (lsi != NULL) {
|
|
|
|
gLdPreloads[preload_count++] = lsi;
|
|
|
|
} else {
|
|
|
|
// As with glibc, failure to load an LD_PRELOAD library is just a warning.
|
|
|
|
DL_WARN("could not load library \"%s\" from LD_PRELOAD for \"%s\"; caused by %s",
|
|
|
|
gLdPreloadNames[i], si->name, linker_get_error_buffer());
|
2009-12-31 19:09:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-12 01:59:37 +01:00
|
|
|
soinfo** needed = reinterpret_cast<soinfo**>(alloca((1 + needed_count) * sizeof(soinfo*)));
|
2013-03-01 00:58:45 +01:00
|
|
|
soinfo** pneeded = needed;
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
|
2013-03-01 00:58:45 +01:00
|
|
|
if (d->d_tag == DT_NEEDED) {
|
|
|
|
const char* library_name = si->strtab + d->d_un.d_val;
|
2013-03-12 18:40:45 +01:00
|
|
|
DEBUG("%s needs %s", si->name, library_name);
|
2014-02-06 15:34:21 +01:00
|
|
|
soinfo* lsi = find_library(library_name, NULL);
|
2012-11-01 23:16:56 +01:00
|
|
|
if (lsi == NULL) {
|
2013-03-06 03:47:58 +01:00
|
|
|
strlcpy(tmp_err_buf, linker_get_error_buffer(), sizeof(tmp_err_buf));
|
2012-08-04 01:49:39 +02:00
|
|
|
DL_ERR("could not load library \"%s\" needed by \"%s\"; caused by %s",
|
2013-03-01 00:58:45 +01:00
|
|
|
library_name, si->name, tmp_err_buf);
|
2012-10-31 22:20:03 +01:00
|
|
|
return false;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
*pneeded++ = lsi;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
}
|
linker: avoid clobbering the .dynamic section of shared libs
This patch removes the DT_NEEDED hack which stores pointers
to soinfo structs in the .dynamic section of the library
being loaded.
Instead, it caches the soinfo struct pointers on the stack
during relocation time. After relocation time, i.e. when
calling constructors and destructors of the shared library
and its dependencies, uncached access is used instead,
doing lookups using the string table entries pointed to by
the DT_NEEDED entries.
By removing this hack, it is no longer needed to undo the
PT_GNURELRO protection, i.e., all non-writable mappings
can remain non-writable during their entire lifespan.
Even though, strictly speaking, the algorithmic complexity
has increased somewhat, the real-world adverse effect
is negligible on the systems I have tested.
Change-Id: I2361502560b96b5878f7f94a8e8a215350d70d64
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@gmail.com>
2012-08-14 12:30:09 +02:00
|
|
|
*pneeded = NULL;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-10-28 22:19:05 +01:00
|
|
|
#if !defined(__LP64__)
|
2012-08-11 06:08:42 +02:00
|
|
|
if (si->has_text_relocations) {
|
2013-10-28 22:19:05 +01:00
|
|
|
// Make segments writable to allow text relocations to work properly. We will later call
|
|
|
|
// phdr_table_protect_segments() after all of them are applied and all constructors are run.
|
2014-04-18 13:17:40 +02:00
|
|
|
#if !defined(__i386__) // The platform itself has too many text relocations on x86.
|
2013-10-22 21:06:36 +02:00
|
|
|
DL_WARN("%s has text relocations. This is wasting memory and prevents "
|
|
|
|
"security hardening. Please fix.", si->name);
|
2014-04-18 13:17:40 +02:00
|
|
|
#endif
|
2012-08-11 06:08:42 +02:00
|
|
|
if (phdr_table_unprotect_segments(si->phdr, si->phnum, si->load_bias) < 0) {
|
|
|
|
DL_ERR("can't unprotect loadable segments for \"%s\": %s",
|
|
|
|
si->name, strerror(errno));
|
2012-10-31 22:20:03 +01:00
|
|
|
return false;
|
2012-08-11 06:08:42 +02:00
|
|
|
}
|
|
|
|
}
|
2013-10-28 22:19:05 +01:00
|
|
|
#endif
|
2012-08-11 06:08:42 +02:00
|
|
|
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(USE_RELA)
|
2013-10-05 02:01:33 +02:00
|
|
|
if (si->plt_rela != NULL) {
|
2014-02-12 01:59:37 +01:00
|
|
|
DEBUG("[ relocating %s plt ]\n", si->name);
|
2014-02-07 05:36:51 +01:00
|
|
|
if (soinfo_relocate(si, si->plt_rela, si->plt_rela_count, needed)) {
|
2013-10-05 02:01:33 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (si->rela != NULL) {
|
2014-02-12 01:59:37 +01:00
|
|
|
DEBUG("[ relocating %s ]\n", si->name);
|
2014-02-07 05:36:51 +01:00
|
|
|
if (soinfo_relocate(si, si->rela, si->rela_count, needed)) {
|
2013-10-05 02:01:33 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2013-03-01 00:58:45 +01:00
|
|
|
if (si->plt_rel != NULL) {
|
2014-02-12 01:59:37 +01:00
|
|
|
DEBUG("[ relocating %s plt ]", si->name);
|
2013-03-01 00:58:45 +01:00
|
|
|
if (soinfo_relocate(si, si->plt_rel, si->plt_rel_count, needed)) {
|
2012-10-31 22:20:03 +01:00
|
|
|
return false;
|
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
2013-03-01 00:58:45 +01:00
|
|
|
if (si->rel != NULL) {
|
2014-02-12 01:59:37 +01:00
|
|
|
DEBUG("[ relocating %s ]", si->name);
|
2013-03-01 00:58:45 +01:00
|
|
|
if (soinfo_relocate(si, si->rel, si->rel_count, needed)) {
|
2012-10-31 22:20:03 +01:00
|
|
|
return false;
|
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
2013-10-05 02:01:33 +02:00
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__mips__)
|
2013-08-21 06:05:44 +02:00
|
|
|
if (!mips_relocate_got(si, needed)) {
|
2012-10-31 22:20:03 +01:00
|
|
|
return false;
|
2012-07-31 21:07:22 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-03-04 04:28:35 +01:00
|
|
|
si->flags |= FLAG_LINKED;
|
2013-03-12 18:40:45 +01:00
|
|
|
DEBUG("[ finished linking %s ]", si->name);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-10-28 22:19:05 +01:00
|
|
|
#if !defined(__LP64__)
|
2012-08-11 06:08:42 +02:00
|
|
|
if (si->has_text_relocations) {
|
2013-10-28 22:19:05 +01:00
|
|
|
// All relocations are done, we can protect our segments back to read-only.
|
2012-08-11 06:08:42 +02:00
|
|
|
if (phdr_table_protect_segments(si->phdr, si->phnum, si->load_bias) < 0) {
|
|
|
|
DL_ERR("can't protect segments for \"%s\": %s",
|
|
|
|
si->name, strerror(errno));
|
2012-10-31 22:20:03 +01:00
|
|
|
return false;
|
2012-08-11 06:08:42 +02:00
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
2013-10-28 22:19:05 +01:00
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-06-19 01:24:17 +02:00
|
|
|
/* We can also turn on GNU RELRO protection */
|
|
|
|
if (phdr_table_protect_gnu_relro(si->phdr, si->phnum, si->load_bias) < 0) {
|
2012-08-04 01:49:39 +02:00
|
|
|
DL_ERR("can't enable GNU RELRO protection for \"%s\": %s",
|
|
|
|
si->name, strerror(errno));
|
2012-10-31 22:20:03 +01:00
|
|
|
return false;
|
2012-02-28 19:40:00 +01:00
|
|
|
}
|
|
|
|
|
2014-02-27 14:18:00 +01:00
|
|
|
/* Handle serializing/sharing the RELRO segment */
|
|
|
|
if (extinfo && (extinfo->flags & ANDROID_DLEXT_WRITE_RELRO)) {
|
|
|
|
if (phdr_table_serialize_gnu_relro(si->phdr, si->phnum, si->load_bias,
|
|
|
|
extinfo->relro_fd) < 0) {
|
|
|
|
DL_ERR("failed serializing GNU RELRO section for \"%s\": %s",
|
|
|
|
si->name, strerror(errno));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else if (extinfo && (extinfo->flags & ANDROID_DLEXT_USE_RELRO)) {
|
|
|
|
if (phdr_table_map_gnu_relro(si->phdr, si->phnum, si->load_bias,
|
|
|
|
extinfo->relro_fd) < 0) {
|
|
|
|
DL_ERR("failed mapping GNU RELRO section for \"%s\": %s",
|
|
|
|
si->name, strerror(errno));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-03-04 04:28:35 +01:00
|
|
|
notify_gdb_of_load(si);
|
2012-10-31 22:20:03 +01:00
|
|
|
return true;
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2013-01-25 13:40:13 +01:00
|
|
|
/*
|
|
|
|
* This function add vdso to internal dso list.
|
|
|
|
* It helps to stack unwinding through signal handlers.
|
|
|
|
* Also, it makes bionic more like glibc.
|
|
|
|
*/
|
2014-03-25 15:53:56 +01:00
|
|
|
static void add_vdso(KernelArgumentBlock& args __unused) {
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(AT_SYSINFO_EHDR)
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
|
|
|
|
if (ehdr_vdso == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
2013-01-25 13:40:13 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
soinfo* si = soinfo_alloc("[vdso]");
|
2013-10-31 15:02:12 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
|
|
|
|
si->phnum = ehdr_vdso->e_phnum;
|
|
|
|
si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
|
|
|
|
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
|
|
|
|
si->flags = 0;
|
|
|
|
si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
|
2013-10-31 15:02:12 +01:00
|
|
|
|
2014-04-22 12:59:26 +02:00
|
|
|
soinfo_link_image(si, NULL);
|
2013-01-25 13:40:13 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-11-12 00:53:17 +01:00
|
|
|
/*
|
|
|
|
* This code is called after the linker has linked itself and
|
|
|
|
* fixed it's own GOT. It is safe to make references to externs
|
|
|
|
* and other non-local data at this point.
|
|
|
|
*/
|
2014-02-11 02:46:57 +01:00
|
|
|
static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
|
2013-02-07 19:14:39 +01:00
|
|
|
/* NOTE: we store the args pointer on a special location
|
2009-07-17 17:55:01 +02:00
|
|
|
* of the temporary TLS area in order to pass it to
|
|
|
|
* the C Library's runtime initializer.
|
|
|
|
*
|
|
|
|
* The initializer must clear the slot and reset the TLS
|
|
|
|
* to point to a different location to ensure that no other
|
|
|
|
* shared library constructor can access it.
|
|
|
|
*/
|
2013-02-08 03:39:34 +01:00
|
|
|
__libc_init_tls(args);
|
2012-03-22 15:01:53 +01:00
|
|
|
|
|
|
|
#if TIMING
|
|
|
|
struct timeval t0, t1;
|
|
|
|
gettimeofday(&t0, 0);
|
|
|
|
#endif
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-10-30 01:37:13 +01:00
|
|
|
// Initialize environment functions, and get to the ELF aux vectors table.
|
2013-02-07 19:14:39 +01:00
|
|
|
linker_env_init(args);
|
2010-12-16 19:52:02 +01:00
|
|
|
|
2013-04-25 22:15:24 +02:00
|
|
|
// If this is a setuid/setgid program, close the security hole described in
|
|
|
|
// ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
|
|
|
|
if (get_AT_SECURE()) {
|
|
|
|
nullify_closed_stdio();
|
|
|
|
}
|
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
debuggerd_init();
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2012-10-30 01:37:13 +01:00
|
|
|
// Get a few environment variables.
|
2012-11-02 20:37:13 +01:00
|
|
|
const char* LD_DEBUG = linker_env_get("LD_DEBUG");
|
|
|
|
if (LD_DEBUG != NULL) {
|
2013-03-06 03:47:58 +01:00
|
|
|
gLdDebugVerbosity = atoi(LD_DEBUG);
|
2012-10-30 01:37:13 +01:00
|
|
|
}
|
2010-12-16 19:52:02 +01:00
|
|
|
|
2012-10-30 01:37:13 +01:00
|
|
|
// Normally, these are cleaned by linker_env_init, but the test
|
|
|
|
// doesn't cost us anything.
|
|
|
|
const char* ldpath_env = NULL;
|
|
|
|
const char* ldpreload_env = NULL;
|
|
|
|
if (!get_AT_SECURE()) {
|
|
|
|
ldpath_env = linker_env_get("LD_LIBRARY_PATH");
|
|
|
|
ldpreload_env = linker_env_get("LD_PRELOAD");
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2014-05-06 01:49:04 +02:00
|
|
|
// Linker does not call constructors for its own
|
|
|
|
// global variables so we need to initialize
|
|
|
|
// the allocator explicitly.
|
|
|
|
gSoInfoAllocator.init();
|
|
|
|
|
2013-03-12 18:40:45 +01:00
|
|
|
INFO("[ android linker & debugger ]");
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-02-07 19:14:39 +01:00
|
|
|
soinfo* si = soinfo_alloc(args.argv[0]);
|
2012-10-30 01:37:13 +01:00
|
|
|
if (si == NULL) {
|
|
|
|
exit(EXIT_FAILURE);
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2012-08-24 22:25:51 +02:00
|
|
|
/* bootstrap the link map, the main exe always needs to be first */
|
2009-03-04 04:28:35 +01:00
|
|
|
si->flags |= FLAG_EXE;
|
2014-02-10 22:31:13 +01:00
|
|
|
link_map* map = &(si->link_map_head);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
|
|
|
map->l_addr = 0;
|
2013-02-07 19:14:39 +01:00
|
|
|
map->l_name = args.argv[0];
|
2009-03-04 04:28:35 +01:00
|
|
|
map->l_prev = NULL;
|
|
|
|
map->l_next = NULL;
|
|
|
|
|
|
|
|
_r_debug.r_map = map;
|
|
|
|
r_debug_tail = map;
|
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
/* gdb expects the linker to be in the debug shared object list.
|
|
|
|
* Without this, gdb has trouble locating the linker's ".text"
|
|
|
|
* and ".plt" sections. Gdb could also potentially use this to
|
|
|
|
* relocate the offset of our exported 'rtld_db_dlactivity' symbol.
|
|
|
|
* Don't use soinfo_alloc(), because the linker shouldn't
|
|
|
|
* be on the soinfo list.
|
2012-08-11 01:07:02 +02:00
|
|
|
*/
|
2013-03-01 00:58:45 +01:00
|
|
|
{
|
|
|
|
static soinfo linker_soinfo;
|
2013-10-26 02:38:02 +02:00
|
|
|
#if defined(__LP64__)
|
2013-02-06 16:21:46 +01:00
|
|
|
strlcpy(linker_soinfo.name, "/system/bin/linker64", sizeof(linker_soinfo.name));
|
|
|
|
#else
|
2013-03-01 00:58:45 +01:00
|
|
|
strlcpy(linker_soinfo.name, "/system/bin/linker", sizeof(linker_soinfo.name));
|
2013-02-06 16:21:46 +01:00
|
|
|
#endif
|
2013-03-01 00:58:45 +01:00
|
|
|
linker_soinfo.flags = 0;
|
|
|
|
linker_soinfo.base = linker_base;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the dynamic field in the link map otherwise gdb will complain with
|
|
|
|
* the following:
|
|
|
|
* warning: .dynamic section for "/system/bin/linker" is not at the
|
|
|
|
* expected address (wrong library or version mismatch?)
|
|
|
|
*/
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
|
2014-02-12 01:59:37 +01:00
|
|
|
ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_base + elf_hdr->e_phoff);
|
2013-03-01 00:58:45 +01:00
|
|
|
phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
|
|
|
|
&linker_soinfo.dynamic, NULL, NULL);
|
|
|
|
insert_soinfo_into_debug_map(&linker_soinfo);
|
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2013-02-07 19:14:39 +01:00
|
|
|
// Extract information passed from the kernel.
|
2014-02-11 02:46:57 +01:00
|
|
|
si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
|
2013-02-07 19:14:39 +01:00
|
|
|
si->phnum = args.getauxval(AT_PHNUM);
|
|
|
|
si->entry = args.getauxval(AT_ENTRY);
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2011-11-15 17:17:28 +01:00
|
|
|
/* Compute the value of si->base. We can't rely on the fact that
|
|
|
|
* the first entry is the PHDR because this will not be true
|
|
|
|
* for certain executables (e.g. some in the NDK unit test suite)
|
|
|
|
*/
|
|
|
|
si->base = 0;
|
2012-06-19 01:24:17 +02:00
|
|
|
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
|
2012-06-18 23:38:46 +02:00
|
|
|
si->load_bias = 0;
|
2013-03-12 18:40:45 +01:00
|
|
|
for (size_t i = 0; i < si->phnum; ++i) {
|
2013-02-07 19:14:39 +01:00
|
|
|
if (si->phdr[i].p_type == PT_PHDR) {
|
2014-02-11 02:46:57 +01:00
|
|
|
si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
|
|
|
|
si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
|
2013-02-07 19:14:39 +01:00
|
|
|
break;
|
|
|
|
}
|
2011-11-15 17:17:28 +01:00
|
|
|
}
|
2013-03-01 00:58:45 +01:00
|
|
|
si->dynamic = NULL;
|
2013-03-12 18:40:45 +01:00
|
|
|
si->ref_count = 1;
|
2009-03-04 04:28:35 +01:00
|
|
|
|
2014-05-07 19:32:39 +02:00
|
|
|
ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base);
|
|
|
|
if (elf_hdr->e_type != ET_DYN) {
|
|
|
|
__libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n");
|
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
2012-08-04 01:49:39 +02:00
|
|
|
// Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
|
|
|
|
parse_LD_LIBRARY_PATH(ldpath_env);
|
|
|
|
parse_LD_PRELOAD(ldpreload_env);
|
2009-12-31 19:09:10 +01:00
|
|
|
|
2012-08-30 12:48:32 +02:00
|
|
|
somain = si;
|
|
|
|
|
2014-02-27 14:18:00 +01:00
|
|
|
if (!soinfo_link_image(si, NULL)) {
|
2013-03-06 03:47:58 +01:00
|
|
|
__libc_format_fd(2, "CANNOT LINK EXECUTABLE: %s\n", linker_get_error_buffer());
|
2012-10-30 01:37:13 +01:00
|
|
|
exit(EXIT_FAILURE);
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
|
2013-01-25 13:40:13 +01:00
|
|
|
add_vdso(args);
|
|
|
|
|
2012-11-01 23:16:56 +01:00
|
|
|
si->CallPreInitConstructors();
|
2012-08-13 15:58:37 +02:00
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
for (size_t i = 0; gLdPreloads[i] != NULL; ++i) {
|
|
|
|
gLdPreloads[i]->CallConstructors();
|
2012-07-14 18:49:27 +02:00
|
|
|
}
|
|
|
|
|
2013-03-01 00:58:45 +01:00
|
|
|
/* After the link_image, the si->load_bias is initialized.
|
|
|
|
* For so lib, the map->l_addr will be updated in notify_gdb_of_load.
|
|
|
|
* We need to update this value for so exe here. So Unwind_Backtrace
|
|
|
|
* for some arch like x86 could work correctly within so exe.
|
2012-09-13 12:07:24 +02:00
|
|
|
*/
|
2012-11-15 11:00:17 +01:00
|
|
|
map->l_addr = si->load_bias;
|
2012-11-01 23:16:56 +01:00
|
|
|
si->CallConstructors();
|
2011-12-21 10:03:54 +01:00
|
|
|
|
2009-03-04 04:28:35 +01:00
|
|
|
#if TIMING
|
2014-02-12 01:59:37 +01:00
|
|
|
gettimeofday(&t1, NULL);
|
2013-03-12 18:40:45 +01:00
|
|
|
PRINT("LINKER TIME: %s: %d microseconds", args.argv[0], (int) (
|
2009-03-04 04:28:35 +01:00
|
|
|
(((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
|
2014-02-12 01:59:37 +01:00
|
|
|
(((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)));
|
2009-03-04 04:28:35 +01:00
|
|
|
#endif
|
|
|
|
#if STATS
|
2013-03-12 18:40:45 +01:00
|
|
|
PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol", args.argv[0],
|
2012-08-14 23:07:59 +02:00
|
|
|
linker_stats.count[kRelocAbsolute],
|
|
|
|
linker_stats.count[kRelocRelative],
|
|
|
|
linker_stats.count[kRelocCopy],
|
|
|
|
linker_stats.count[kRelocSymbol]);
|
2009-03-04 04:28:35 +01:00
|
|
|
#endif
|
|
|
|
#if COUNT_PAGES
|
|
|
|
{
|
|
|
|
unsigned n;
|
|
|
|
unsigned i;
|
|
|
|
unsigned count = 0;
|
2013-03-01 00:58:45 +01:00
|
|
|
for (n = 0; n < 4096; n++) {
|
|
|
|
if (bitmask[n]) {
|
2009-03-04 04:28:35 +01:00
|
|
|
unsigned x = bitmask[n];
|
2013-10-10 16:19:31 +02:00
|
|
|
#if defined(__LP64__)
|
|
|
|
for (i = 0; i < 32; i++) {
|
|
|
|
#else
|
2013-03-01 00:58:45 +01:00
|
|
|
for (i = 0; i < 8; i++) {
|
2013-10-10 16:19:31 +02:00
|
|
|
#endif
|
2013-03-01 00:58:45 +01:00
|
|
|
if (x & 1) {
|
|
|
|
count++;
|
|
|
|
}
|
2009-03-04 04:28:35 +01:00
|
|
|
x >>= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-03-12 18:40:45 +01:00
|
|
|
PRINT("PAGES MODIFIED: %s: %d (%dKB)", args.argv[0], count, count * 4);
|
2009-03-04 04:28:35 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if TIMING || STATS || COUNT_PAGES
|
|
|
|
fflush(stdout);
|
|
|
|
#endif
|
|
|
|
|
2013-10-05 02:01:33 +02:00
|
|
|
TRACE("[ Ready to execute '%s' @ %p ]", si->name, reinterpret_cast<void*>(si->entry));
|
2009-03-04 04:28:35 +01:00
|
|
|
return si->entry;
|
|
|
|
}
|
2011-11-12 00:53:17 +01:00
|
|
|
|
2012-06-18 23:38:46 +02:00
|
|
|
/* Compute the load-bias of an existing executable. This shall only
|
|
|
|
* be used to compute the load bias of an executable or shared library
|
|
|
|
* that was loaded by the kernel itself.
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* elf -> address of ELF header, assumed to be at the start of the file.
|
|
|
|
* Return:
|
|
|
|
* load bias, i.e. add the value of any p_vaddr in the file to get
|
|
|
|
* the corresponding address in memory.
|
|
|
|
*/
|
2014-02-11 02:46:57 +01:00
|
|
|
static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
|
|
|
|
ElfW(Addr) offset = elf->e_phoff;
|
2014-02-12 01:59:37 +01:00
|
|
|
const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(elf) + offset);
|
2014-02-11 02:46:57 +01:00
|
|
|
const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
|
2013-03-12 07:58:06 +01:00
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
|
2013-03-12 07:58:06 +01:00
|
|
|
if (phdr->p_type == PT_LOAD) {
|
2014-02-11 02:46:57 +01:00
|
|
|
return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
|
2012-06-18 23:38:46 +02:00
|
|
|
}
|
2013-03-12 07:58:06 +01:00
|
|
|
}
|
|
|
|
return 0;
|
2012-06-18 23:38:46 +02:00
|
|
|
}
|
|
|
|
|
2011-11-12 00:53:17 +01:00
|
|
|
/*
|
|
|
|
* This is the entry point for the linker, called from begin.S. This
|
|
|
|
* method is responsible for fixing the linker's own relocations, and
|
|
|
|
* then calling __linker_init_post_relocation().
|
|
|
|
*
|
|
|
|
* Because this method is called before the linker has fixed it's own
|
|
|
|
* relocations, any attempt to reference an extern variable, extern
|
|
|
|
* function, or other GOT reference will generate a segfault.
|
|
|
|
*/
|
2014-02-11 02:46:57 +01:00
|
|
|
extern "C" ElfW(Addr) __linker_init(void* raw_args) {
|
2013-02-07 19:14:39 +01:00
|
|
|
KernelArgumentBlock args(raw_args);
|
|
|
|
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
|
|
|
|
ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
|
2014-02-12 01:59:37 +01:00
|
|
|
ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>(linker_addr + elf_hdr->e_phoff);
|
2013-02-07 19:14:39 +01:00
|
|
|
|
|
|
|
soinfo linker_so;
|
|
|
|
memset(&linker_so, 0, sizeof(soinfo));
|
|
|
|
|
2013-12-22 01:07:45 +01:00
|
|
|
strcpy(linker_so.name, "[dynamic linker]");
|
2013-02-07 19:14:39 +01:00
|
|
|
linker_so.base = linker_addr;
|
|
|
|
linker_so.size = phdr_table_get_load_size(phdr, elf_hdr->e_phnum);
|
|
|
|
linker_so.load_bias = get_elf_exec_load_bias(elf_hdr);
|
2013-03-01 00:58:45 +01:00
|
|
|
linker_so.dynamic = NULL;
|
2013-02-07 19:14:39 +01:00
|
|
|
linker_so.phdr = phdr;
|
|
|
|
linker_so.phnum = elf_hdr->e_phnum;
|
|
|
|
linker_so.flags |= FLAG_LINKER;
|
|
|
|
|
2014-02-27 14:18:00 +01:00
|
|
|
if (!soinfo_link_image(&linker_so, NULL)) {
|
2013-02-07 19:14:39 +01:00
|
|
|
// It would be nice to print an error message, but if the linker
|
|
|
|
// can't link itself, there's no guarantee that we'll be able to
|
2013-12-22 01:07:45 +01:00
|
|
|
// call write() (because it involves a GOT reference). We may as
|
|
|
|
// well try though...
|
|
|
|
const char* msg = "CANNOT LINK EXECUTABLE: ";
|
|
|
|
write(2, msg, strlen(msg));
|
|
|
|
write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
|
|
|
|
write(2, "\n", 1);
|
|
|
|
_exit(EXIT_FAILURE);
|
2013-02-07 19:14:39 +01:00
|
|
|
}
|
2011-11-12 00:53:17 +01:00
|
|
|
|
2013-02-07 19:14:39 +01:00
|
|
|
// We have successfully fixed our own relocations. It's safe to run
|
|
|
|
// the main part of the linker now.
|
2013-04-04 22:46:46 +02:00
|
|
|
args.abort_message_ptr = &gAbortMessage;
|
2014-02-11 02:46:57 +01:00
|
|
|
ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
|
2012-10-17 00:54:46 +02:00
|
|
|
|
2014-05-06 01:49:04 +02:00
|
|
|
gSoInfoAllocator.protect_all(PROT_READ);
|
2012-11-01 23:16:56 +01:00
|
|
|
|
2013-02-07 19:14:39 +01:00
|
|
|
// Return the address that the calling assembly stub should jump to.
|
|
|
|
return start_address;
|
2011-11-12 00:53:17 +01:00
|
|
|
}
|