am 71e4466b: Merge "Switch <elf.h> over to linux uapi under the covers."

* commit '71e4466b100359f36a29c8c0277888df6081a265':
  Switch <elf.h> over to linux uapi under the covers.
This commit is contained in:
Elliott Hughes 2014-03-07 02:26:30 +00:00 committed by Android Git Automerger
commit 72ef407406
20 changed files with 533 additions and 1833 deletions

View File

@ -579,7 +579,7 @@ include $(LOCAL_PATH)/arch-$(TARGET_ARCH)/$(TARGET_ARCH).mk
libc_bionic_src_files += $(_LIBC_ARCH_COMMON_SRC_FILES)
libc_bionic_src_files += $(_LIBC_ARCH_CPU_VARIANT_SRC_FILES)
libc_arch_static_src_files := $(_LIBC_ARCH_STATIC_SRC_FILES)
libc_arch_static_src_files := $(_LIBC_ARCH_STATIC_SRC_FILES) bionic/dl_iterate_phdr_static.cpp
libc_arch_dynamic_src_files := $(_LIBC_ARCH_DYNAMIC_SRC_FILES)
libc_common_additional_dependencies += $(_LIBC_ARCH_ADDITIONAL_DEPENDENCIES)

View File

@ -14,11 +14,8 @@ _LIBC_ARCH_COMMON_SRC_FILES := \
arch-arm/bionic/sigsetjmp.S \
arch-arm/bionic/syscall.S \
# These are used by the static and dynamic versions of the libc
# respectively.
_LIBC_ARCH_STATIC_SRC_FILES := \
arch-arm/bionic/exidx_static.c \
bionic/dl_iterate_phdr_static.c \
_LIBC_ARCH_DYNAMIC_SRC_FILES := \
arch-arm/bionic/exidx_dynamic.c \

View File

@ -12,8 +12,3 @@ _LIBC_ARCH_COMMON_SRC_FILES := \
arch-arm64/bionic/sigsetjmp.S \
arch-arm64/bionic/syscall.S \
arch-arm64/bionic/vfork.S \
_LIBC_ARCH_STATIC_SRC_FILES := \
bionic/dl_iterate_phdr_static.c \
_LIBC_ARCH_DYNAMIC_SRC_FILES :=

View File

@ -15,8 +15,3 @@ _LIBC_ARCH_COMMON_SRC_FILES := \
arch-mips/string/memcpy.S \
arch-mips/string/memset.S \
arch-mips/string/mips_strlen.c \
_LIBC_ARCH_STATIC_SRC_FILES := \
bionic/dl_iterate_phdr_static.c \
_LIBC_ARCH_DYNAMIC_SRC_FILES :=

View File

@ -21,8 +21,3 @@ _LIBC_ARCH_COMMON_SRC_FILES := \
_LIBC_ARCH_COMMON_SRC_FILES += bionic/memcpy.c
_LIBC_ARCH_COMMON_SRC_FILES += bionic/memset.c
_LIBC_ARCH_COMMON_SRC_FILES += string/strlen.c
_LIBC_ARCH_STATIC_SRC_FILES := \
bionic/dl_iterate_phdr_static.c \
_LIBC_ARCH_DYNAMIC_SRC_FILES :=

View File

@ -62,8 +62,3 @@ _LIBC_ARCH_COMMON_SRC_FILES += \
arch-x86/string/sse2-wcsrchr-atom.S \
arch-x86/string/sse2-wcslen-atom.S \
arch-x86/string/sse2-wcscmp-atom.S \
_LIBC_ARCH_STATIC_SRC_FILES := \
bionic/dl_iterate_phdr_static.c \
_LIBC_ARCH_DYNAMIC_SRC_FILES :=

View File

@ -11,8 +11,3 @@ _LIBC_ARCH_COMMON_SRC_FILES := \
arch-x86_64/bionic/syscall.S \
arch-x86_64/bionic/vfork.S \
string/memcmp16.c \
_LIBC_ARCH_STATIC_SRC_FILES := \
bionic/dl_iterate_phdr_static.c \
_LIBC_ARCH_DYNAMIC_SRC_FILES :=

View File

@ -1,82 +0,0 @@
/*
* Copyright (C) 2006 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <elf.h>
#include <sys/auxv.h>
#include <sys/types.h>
#include <link.h>
/* ld provides this to us in the default link script */
extern void* __executable_start;
int dl_iterate_phdr(int (*cb)(struct dl_phdr_info* info, size_t size, void* data), void* data) {
Elf_Ehdr* ehdr = (Elf_Ehdr*) &__executable_start;
// TODO: again, copied from linker.c. Find a better home for this later.
if (ehdr->e_ident[EI_MAG0] != ELFMAG0) return -1;
if (ehdr->e_ident[EI_MAG1] != ELFMAG1) return -1;
if (ehdr->e_ident[EI_MAG2] != ELFMAG2) return -1;
if (ehdr->e_ident[EI_MAG3] != ELFMAG3) return -1;
// Dynamic binaries get their dl_iterate_phdr from the dynamic linker, but
// static binaries get this. We don't have a list of shared objects to
// iterate over, since there's really only a single monolithic blob of
// code/data, plus optionally a VDSO.
struct dl_phdr_info exe_info;
exe_info.dlpi_addr = 0;
exe_info.dlpi_name = NULL;
exe_info.dlpi_phdr = (Elf_Phdr*) ((unsigned long) ehdr + ehdr->e_phoff);
exe_info.dlpi_phnum = ehdr->e_phnum;
#ifdef AT_SYSINFO_EHDR
// Try the executable first.
int rc = cb(&exe_info, sizeof(exe_info), data);
if (rc != 0) {
return rc;
}
// Try the VDSO if that didn't work.
Elf_Ehdr* ehdr_vdso = (Elf_Ehdr*) getauxval(AT_SYSINFO_EHDR);
struct dl_phdr_info vdso_info;
vdso_info.dlpi_addr = 0;
vdso_info.dlpi_name = NULL;
vdso_info.dlpi_phdr = (Elf_Phdr*) ((char*) ehdr_vdso + ehdr_vdso->e_phoff);
vdso_info.dlpi_phnum = ehdr_vdso->e_phnum;
for (size_t i = 0; i < vdso_info.dlpi_phnum; ++i) {
if (vdso_info.dlpi_phdr[i].p_type == PT_LOAD) {
vdso_info.dlpi_addr = (Elf_Addr) ehdr_vdso - vdso_info.dlpi_phdr[i].p_vaddr;
break;
}
}
return cb(&vdso_info, sizeof(vdso_info), data);
#else
// There's only the executable to try.
return cb(&exe_info, sizeof(exe_info), data);
#endif
}

View File

@ -0,0 +1,82 @@
/*
* Copyright (C) 2006 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <elf.h>
#include <sys/auxv.h>
#include <sys/types.h>
#include <link.h>
/* ld provides this to us in the default link script */
extern "C" void* __executable_start;
int dl_iterate_phdr(int (*cb)(struct dl_phdr_info* info, size_t size, void* data), void* data) {
ElfW(Ehdr)* ehdr = reinterpret_cast<ElfW(Ehdr)*>(&__executable_start);
// TODO: again, copied from linker.c. Find a better home for this later.
if (ehdr->e_ident[EI_MAG0] != ELFMAG0) return -1;
if (ehdr->e_ident[EI_MAG1] != ELFMAG1) return -1;
if (ehdr->e_ident[EI_MAG2] != ELFMAG2) return -1;
if (ehdr->e_ident[EI_MAG3] != ELFMAG3) return -1;
// Dynamic binaries get their dl_iterate_phdr from the dynamic linker, but
// static binaries get this. We don't have a list of shared objects to
// iterate over, since there's really only a single monolithic blob of
// code/data, plus optionally a VDSO.
struct dl_phdr_info exe_info;
exe_info.dlpi_addr = 0;
exe_info.dlpi_name = NULL;
exe_info.dlpi_phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<uintptr_t>(ehdr) + ehdr->e_phoff);
exe_info.dlpi_phnum = ehdr->e_phnum;
#ifdef AT_SYSINFO_EHDR
// Try the executable first.
int rc = cb(&exe_info, sizeof(exe_info), data);
if (rc != 0) {
return rc;
}
// Try the VDSO if that didn't work.
ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(getauxval(AT_SYSINFO_EHDR));
struct dl_phdr_info vdso_info;
vdso_info.dlpi_addr = 0;
vdso_info.dlpi_name = NULL;
vdso_info.dlpi_phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
vdso_info.dlpi_phnum = ehdr_vdso->e_phnum;
for (size_t i = 0; i < vdso_info.dlpi_phnum; ++i) {
if (vdso_info.dlpi_phdr[i].p_type == PT_LOAD) {
vdso_info.dlpi_addr = (ElfW(Addr)) ehdr_vdso - vdso_info.dlpi_phdr[i].p_vaddr;
break;
}
}
return cb(&vdso_info, sizeof(vdso_info), data);
#else
// There's only the executable to try.
return cb(&exe_info, sizeof(exe_info), data);
#endif
}

View File

@ -32,10 +32,10 @@
#include <private/bionic_auxv.h>
#include <elf.h>
__LIBC_HIDDEN__ Elf_auxv_t* __libc_auxv = NULL;
__LIBC_HIDDEN__ ElfW(auxv_t)* __libc_auxv = NULL;
extern "C" unsigned long int getauxval(unsigned long int type) {
for (Elf_auxv_t* v = __libc_auxv; v->a_type != AT_NULL; ++v) {
for (ElfW(auxv_t)* v = __libc_auxv; v->a_type != AT_NULL; ++v) {
if (v->a_type == type) {
return v->a_un.a_val;
}

View File

@ -68,16 +68,16 @@ static void call_array(void(**list)()) {
}
static void apply_gnu_relro() {
Elf_Phdr* phdr_start = reinterpret_cast<Elf_Phdr*>(getauxval(AT_PHDR));
ElfW(Phdr)* phdr_start = reinterpret_cast<ElfW(Phdr)*>(getauxval(AT_PHDR));
unsigned long int phdr_ct = getauxval(AT_PHNUM);
for (Elf_Phdr* phdr = phdr_start; phdr < (phdr_start + phdr_ct); phdr++) {
for (ElfW(Phdr)* phdr = phdr_start; phdr < (phdr_start + phdr_ct); phdr++) {
if (phdr->p_type != PT_GNU_RELRO) {
continue;
}
Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr);
Elf_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz);
ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr);
ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz);
// Check return value here? What do we do if we fail?
mprotect(reinterpret_cast<void*>(seg_page_start), seg_page_end - seg_page_start, PROT_READ);

View File

@ -28,33 +28,58 @@
#ifndef _ELF_H
#define _ELF_H
#include <stdint.h>
#include <linux/auxvec.h>
#include <linux/elf.h>
#include <linux/elf-em.h>
/* TODO: can we switch to <linux/elf.h> instead? http://b/12476126. */
#include <sys/exec_elf.h>
#include <machine/elf_machdep.h>
typedef struct {
uint32_t a_type;
__u32 a_type;
union {
uint32_t a_val;
__u32 a_val;
} a_un;
} Elf32_auxv_t;
typedef struct {
uint64_t a_type;
__u64 a_type;
union {
uint64_t a_val;
__u64 a_val;
} a_un;
} Elf64_auxv_t;
#ifdef __LP64__
# define Elf_auxv_t Elf64_auxv_t
#else
# define Elf_auxv_t Elf32_auxv_t
#endif
#define DF_ORIGIN 0x00000001
#define DF_SYMBOLIC 0x00000002
#define DF_TEXTREL 0x00000004
#define DF_BIND_NOW 0x00000008
#define DF_STATIC_TLS 0x00000010
/* <sys/exec_elf.h> doesn't contain any NT_ constants. aarch64 strace needs this one. */
#define NT_PRSTATUS 1
#define DT_BIND_NOW 24
#define DT_INIT_ARRAY 25
#define DT_FINI_ARRAY 26
#define DT_INIT_ARRAYSZ 27
#define DT_FINI_ARRAYSZ 28
#define DT_RUNPATH 29
#define DT_FLAGS 30
/* glibc and BSD disagree for DT_ENCODING; glibc looks wrong. */
#define DT_PREINIT_ARRAY 32
#define DT_PREINIT_ARRAYSZ 33
#define ELFOSABI_SYSV 0 /* Synonym for ELFOSABI_NONE used by valgrind. */
#define EM_ARM 40
#define EM_AARCH64 183
#define PT_GNU_RELRO 0x6474e552
#define STB_LOOS 10
#define STB_HIOS 12
#define STB_LOPROC 13
#define STB_HIPROC 15
#define STT_LOOS 10
#define STT_HIOS 12
#define STT_LOPROC 13
#define STT_HIPROC 15
#endif /* _ELF_H */

File diff suppressed because it is too large Load Diff

View File

@ -18,6 +18,7 @@
#define KERNEL_ARGUMENT_BLOCK_H
#include <elf.h>
#include <link.h>
#include <stdint.h>
#include <sys/auxv.h>
@ -43,14 +44,14 @@ class KernelArgumentBlock {
}
++p; // Skip second NULL;
auxv = reinterpret_cast<Elf_auxv_t*>(p);
auxv = reinterpret_cast<ElfW(auxv_t)*>(p);
}
// Similar to ::getauxval but doesn't require the libc global variables to be set up,
// so it's safe to call this really early on. This function also lets you distinguish
// between the inability to find the given type and its value just happening to be 0.
unsigned long getauxval(unsigned long type, bool* found_match = NULL) {
for (Elf_auxv_t* v = auxv; v->a_type != AT_NULL; ++v) {
for (ElfW(auxv_t)* v = auxv; v->a_type != AT_NULL; ++v) {
if (v->a_type == type) {
if (found_match != NULL) {
*found_match = true;
@ -67,7 +68,7 @@ class KernelArgumentBlock {
int argc;
char** argv;
char** envp;
Elf_auxv_t* auxv;
ElfW(auxv_t)* auxv;
abort_msg_t** abort_message_ptr;

View File

@ -29,11 +29,12 @@
#define _PRIVATE_BIONIC_AUXV_H_
#include <elf.h>
#include <link.h>
#include <sys/cdefs.h>
__BEGIN_DECLS
extern Elf_auxv_t* __libc_auxv;
extern ElfW(auxv_t)* __libc_auxv;
__END_DECLS

View File

@ -87,7 +87,7 @@ void* dlsym(void* handle, const char* symbol) {
}
soinfo* found = NULL;
Elf_Sym* sym = NULL;
ElfW(Sym)* sym = NULL;
if (handle == RTLD_DEFAULT) {
sym = dlsym_linear_lookup(symbol, &found, NULL);
} else if (handle == RTLD_NEXT) {
@ -134,7 +134,7 @@ int dladdr(const void* addr, Dl_info* info) {
info->dli_fbase = (void*) si->base;
// Determine if any symbol in the library contains the specified address.
Elf_Sym *sym = dladdr_find_symbol(si, addr);
ElfW(Sym)* sym = dladdr_find_symbol(si, addr);
if (sym != NULL) {
info->dli_sname = si->strtab + sym->st_name;
info->dli_saddr = (void*)(si->load_bias + sym->st_value);
@ -167,12 +167,6 @@ int dlclose(void* handle) {
/* st_size */ 0, \
}
#if defined(__LP64__)
# define ELF_SYM_INITIALIZER ELF64_SYM_INITIALIZER
#else
# define ELF_SYM_INITIALIZER ELF32_SYM_INITIALIZER
#endif
#if defined(__arm__)
// 0000000 00011111 111112 22222222 2333333 3333444444444455555555556666666 6667777777777888888888899999 9999900000000001 1
// 0123456 78901234 567890 12345678 9012345 6789012345678901234567890123456 7890123456789012345678901234 5678901234567890 1
@ -187,22 +181,22 @@ int dlclose(void* handle) {
# error Unsupported architecture. Only arm, arm64, mips, x86, and x86_64 are presently supported.
#endif
static Elf_Sym gLibDlSymtab[] = {
static ElfW(Sym) gLibDlSymtab[] = {
// Total length of libdl_info.strtab, including trailing 0.
// This is actually the STH_UNDEF entry. Technically, it's
// supposed to have st_name == 0, but instead, it points to an index
// in the strtab with a \0 to make iterating through the symtab easier.
ELF_SYM_INITIALIZER(sizeof(ANDROID_LIBDL_STRTAB) - 1, NULL, 0),
ELF_SYM_INITIALIZER( 0, &dlopen, 1),
ELF_SYM_INITIALIZER( 7, &dlclose, 1),
ELF_SYM_INITIALIZER( 15, &dlsym, 1),
ELF_SYM_INITIALIZER( 21, &dlerror, 1),
ELF_SYM_INITIALIZER( 29, &dladdr, 1),
ELF_SYM_INITIALIZER( 36, &android_update_LD_LIBRARY_PATH, 1),
ELF_SYM_INITIALIZER( 67, &android_get_LD_LIBRARY_PATH, 1),
ELF_SYM_INITIALIZER( 95, &dl_iterate_phdr, 1),
ELFW(SYM_INITIALIZER)(sizeof(ANDROID_LIBDL_STRTAB) - 1, NULL, 0),
ELFW(SYM_INITIALIZER)( 0, &dlopen, 1),
ELFW(SYM_INITIALIZER)( 7, &dlclose, 1),
ELFW(SYM_INITIALIZER)( 15, &dlsym, 1),
ELFW(SYM_INITIALIZER)( 21, &dlerror, 1),
ELFW(SYM_INITIALIZER)( 29, &dladdr, 1),
ELFW(SYM_INITIALIZER)( 36, &android_update_LD_LIBRARY_PATH, 1),
ELFW(SYM_INITIALIZER)( 67, &android_get_LD_LIBRARY_PATH, 1),
ELFW(SYM_INITIALIZER)( 95, &dl_iterate_phdr, 1),
#if defined(__arm__)
ELF_SYM_INITIALIZER(111, &dl_unwind_find_exidx, 1),
ELFW(SYM_INITIALIZER)(111, &dl_unwind_find_exidx, 1),
#endif
};

View File

@ -29,9 +29,8 @@
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/auxvec.h>
#include <inttypes.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -67,7 +66,7 @@
*/
static bool soinfo_link_image(soinfo* si);
static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf);
static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf);
// We can't use malloc(3) in the dynamic linker. We use a linked list of anonymous
// maps, each a single page in size. The pages are broken up into as many struct soinfo
@ -451,33 +450,33 @@ dl_iterate_phdr(int (*cb)(dl_phdr_info *info, size_t size, void *data),
return rv;
}
static Elf_Sym* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
Elf_Sym* symtab = si->symtab;
const char* strtab = si->strtab;
static ElfW(Sym)* soinfo_elf_lookup(soinfo* si, unsigned hash, const char* name) {
ElfW(Sym)* symtab = si->symtab;
const char* strtab = si->strtab;
TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
TRACE_TYPE(LOOKUP, "SEARCH %s in %s@%p %x %zd",
name, si->name, reinterpret_cast<void*>(si->base), hash, hash % si->nbucket);
for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
Elf_Sym* s = symtab + n;
if (strcmp(strtab + s->st_name, name)) continue;
for (unsigned n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]) {
ElfW(Sym)* s = symtab + n;
if (strcmp(strtab + s->st_name, name)) continue;
/* only concern ourselves with global and weak symbol definitions */
switch (ELF_ST_BIND(s->st_info)) {
case STB_GLOBAL:
case STB_WEAK:
if (s->st_shndx == SHN_UNDEF) {
continue;
}
/* only concern ourselves with global and weak symbol definitions */
switch (ELF_ST_BIND(s->st_info)) {
case STB_GLOBAL:
case STB_WEAK:
if (s->st_shndx == SHN_UNDEF) {
continue;
}
TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
name, si->name, reinterpret_cast<void*>(s->st_value),
static_cast<size_t>(s->st_size));
return s;
}
TRACE_TYPE(LOOKUP, "FOUND %s in %s (%p) %zd",
name, si->name, reinterpret_cast<void*>(s->st_value),
static_cast<size_t>(s->st_size));
return s;
}
}
return NULL;
return NULL;
}
static unsigned elfhash(const char* _name) {
@ -493,9 +492,9 @@ static unsigned elfhash(const char* _name) {
return h;
}
static Elf_Sym* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
static ElfW(Sym)* soinfo_do_lookup(soinfo* si, const char* name, soinfo** lsi, soinfo* needed[]) {
unsigned elf_hash = elfhash(name);
Elf_Sym* s = NULL;
ElfW(Sym)* s = NULL;
if (si != NULL && somain != NULL) {
@ -603,7 +602,7 @@ done:
Binary Interface) where in Chapter 5 it discuss resolving "Shared
Object Dependencies" in breadth first search order.
*/
Elf_Sym* dlsym_handle_lookup(soinfo* si, const char* name) {
ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name) {
return soinfo_elf_lookup(si, elfhash(name), name);
}
@ -612,14 +611,14 @@ Elf_Sym* dlsym_handle_lookup(soinfo* si, const char* name) {
beginning of the global solist. Otherwise the search starts at the
specified soinfo (for RTLD_NEXT).
*/
Elf_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
unsigned elf_hash = elfhash(name);
if (start == NULL) {
start = solist;
}
Elf_Sym* s = NULL;
ElfW(Sym)* s = NULL;
for (soinfo* si = start; (s == NULL) && (si != NULL); si = si->next) {
s = soinfo_elf_lookup(si, elf_hash, name);
if (s != NULL) {
@ -637,7 +636,7 @@ Elf_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start) {
}
soinfo* find_containing_library(const void* p) {
Elf_Addr address = reinterpret_cast<Elf_Addr>(p);
ElfW(Addr) address = reinterpret_cast<ElfW(Addr)>(p);
for (soinfo* si = solist; si != NULL; si = si->next) {
if (address >= si->base && address - si->base < si->size) {
return si;
@ -646,13 +645,13 @@ soinfo* find_containing_library(const void* p) {
return NULL;
}
Elf_Sym* dladdr_find_symbol(soinfo* si, const void* addr) {
Elf_Addr soaddr = reinterpret_cast<Elf_Addr>(addr) - si->base;
ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr) {
ElfW(Addr) soaddr = reinterpret_cast<ElfW(Addr)>(addr) - si->base;
// Search the library's symbol table for any defined symbol which
// contains this address.
for (size_t i = 0; i < si->nchain; ++i) {
Elf_Sym* sym = &si->symtab[i];
ElfW(Sym)* sym = &si->symtab[i];
if (sym->st_shndx != SHN_UNDEF &&
soaddr >= sym->st_value &&
soaddr < sym->st_value + sym->st_size) {
@ -666,13 +665,13 @@ Elf_Sym* dladdr_find_symbol(soinfo* si, const void* addr) {
#if 0
static void dump(soinfo* si)
{
Elf_Sym* s = si->symtab;
for (unsigned n = 0; n < si->nchain; n++) {
TRACE("%04d> %08x: %02x %04x %08x %08x %s", n, s,
s->st_info, s->st_shndx, s->st_value, s->st_size,
si->strtab + s->st_name);
s++;
}
ElfW(Sym)* s = si->symtab;
for (unsigned n = 0; n < si->nchain; n++) {
TRACE("%04d> %08x: %02x %04x %08x %08x %s", n, s,
s->st_info, s->st_shndx, s->st_value, s->st_size,
si->strtab + s->st_name);
s++;
}
}
#endif
@ -808,7 +807,7 @@ static int soinfo_unload(soinfo* si) {
TRACE("unloading '%s'", si->name);
si->CallDestructors();
for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = si->strtab + d->d_un.d_val;
TRACE("%s needs to unload %s", si->name, library_name);
@ -859,18 +858,18 @@ int do_dlclose(soinfo* si) {
}
#if defined(USE_RELA)
static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo* needed[]) {
Elf_Sym* symtab = si->symtab;
static int soinfo_relocate_a(soinfo* si, ElfW(Rela)* rela, unsigned count, soinfo* needed[]) {
ElfW(Sym)* symtab = si->symtab;
const char* strtab = si->strtab;
Elf_Sym* s;
Elf_Rela* start = rela;
ElfW(Sym)* s;
ElfW(Rela)* start = rela;
soinfo* lsi;
for (size_t idx = 0; idx < count; ++idx, ++rela) {
unsigned type = ELF_R_TYPE(rela->r_info);
unsigned sym = ELF_R_SYM(rela->r_info);
Elf_Addr reloc = static_cast<Elf_Addr>(rela->r_offset + si->load_bias);
Elf_Addr sym_addr = 0;
unsigned type = ELFW(R_TYPE)(rela->r_info);
unsigned sym = ELFW(R_SYM)(rela->r_info);
ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rela->r_offset + si->load_bias);
ElfW(Addr) sym_addr = 0;
char* sym_name = NULL;
DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
@ -931,7 +930,7 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
}
} else {
// We got a definition.
sym_addr = static_cast<Elf_Addr>(s->st_value + lsi->load_bias);
sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
}
count_relocation(kRelocSymbol);
} else {
@ -943,117 +942,92 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
case R_AARCH64_JUMP_SLOT:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %16lx <- %16lx %s\n",
reloc,
(sym_addr + rela->r_addend),
sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend);
TRACE_TYPE(RELO, "RELO JMP_SLOT %16llx <- %16llx %s\n",
reloc, (sym_addr + rela->r_addend), sym_name);
*reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
break;
case R_AARCH64_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %16lx <- %16lx %s\n",
reloc,
(sym_addr + rela->r_addend),
sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = (sym_addr + rela->r_addend);
TRACE_TYPE(RELO, "RELO GLOB_DAT %16llx <- %16llx %s\n",
reloc, (sym_addr + rela->r_addend), sym_name);
*reinterpret_cast<ElfW(Addr)*>(reloc) = (sym_addr + rela->r_addend);
break;
case R_AARCH64_ABS64:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO ABS64 %16lx <- %16lx %s\n",
reloc,
(sym_addr + rela->r_addend),
sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
TRACE_TYPE(RELO, "RELO ABS64 %16llx <- %16llx %s\n",
reloc, (sym_addr + rela->r_addend), sym_name);
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
break;
case R_AARCH64_ABS32:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO ABS32 %16lx <- %16lx %s\n",
reloc,
(sym_addr + rela->r_addend),
sym_name);
if ((static_cast<Elf_Addr>(INT32_MIN) <=
(*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) &&
((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <=
static_cast<Elf_Addr>(UINT32_MAX))) {
*reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
TRACE_TYPE(RELO, "RELO ABS32 %16llx <- %16llx %s\n",
reloc, (sym_addr + rela->r_addend), sym_name);
if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
} else {
DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
(*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)),
static_cast<Elf_Addr>(INT32_MIN),
static_cast<Elf_Addr>(UINT32_MAX));
DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
(*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
static_cast<ElfW(Addr)>(INT32_MIN),
static_cast<ElfW(Addr)>(UINT32_MAX));
return -1;
}
break;
case R_AARCH64_ABS16:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO ABS16 %16lx <- %16lx %s\n",
reloc,
(sym_addr + rela->r_addend),
sym_name);
if ((static_cast<Elf_Addr>(INT16_MIN) <=
(*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend))) &&
((*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)) <=
static_cast<Elf_Addr>(UINT16_MAX))) {
*reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend);
TRACE_TYPE(RELO, "RELO ABS16 %16llx <- %16llx %s\n",
reloc, (sym_addr + rela->r_addend), sym_name);
if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend))) &&
((*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend);
} else {
DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
(*reinterpret_cast<Elf_Addr*>(reloc) + (sym_addr + rela->r_addend)),
static_cast<Elf_Addr>(INT16_MIN),
static_cast<Elf_Addr>(UINT16_MAX));
DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
(*reinterpret_cast<ElfW(Addr)*>(reloc) + (sym_addr + rela->r_addend)),
static_cast<ElfW(Addr)>(INT16_MIN),
static_cast<ElfW(Addr)>(UINT16_MAX));
return -1;
}
break;
case R_AARCH64_PREL64:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO REL64 %16lx <- %16lx - %16lx %s\n",
reloc,
(sym_addr + rela->r_addend),
rela->r_offset,
sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
TRACE_TYPE(RELO, "RELO REL64 %16llx <- %16llx - %16llx %s\n",
reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr + rela->r_addend) - rela->r_offset;
break;
case R_AARCH64_PREL32:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO REL32 %16lx <- %16lx - %16lx %s\n",
reloc,
(sym_addr + rela->r_addend),
rela->r_offset, sym_name);
if ((static_cast<Elf_Addr>(INT32_MIN) <=
(*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <=
static_cast<Elf_Addr>(UINT32_MAX))) {
*reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
TRACE_TYPE(RELO, "RELO REL32 %16llx <- %16llx - %16llx %s\n",
reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
if ((static_cast<ElfW(Addr)>(INT32_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT32_MAX))) {
*reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
} else {
DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
(*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
static_cast<Elf_Addr>(INT32_MIN),
static_cast<Elf_Addr>(UINT32_MAX));
DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
(*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
static_cast<ElfW(Addr)>(INT32_MIN),
static_cast<ElfW(Addr)>(UINT32_MAX));
return -1;
}
break;
case R_AARCH64_PREL16:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO REL16 %16lx <- %16lx - %16lx %s\n",
reloc,
(sym_addr + rela->r_addend),
rela->r_offset, sym_name);
if ((static_cast<Elf_Addr>(INT16_MIN) <=
(*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
((*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <=
static_cast<Elf_Addr>(UINT16_MAX))) {
*reinterpret_cast<Elf_Addr*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
TRACE_TYPE(RELO, "RELO REL16 %16llx <- %16llx - %16llx %s\n",
reloc, (sym_addr + rela->r_addend), rela->r_offset, sym_name);
if ((static_cast<ElfW(Addr)>(INT16_MIN) <= (*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset))) &&
((*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)) <= static_cast<ElfW(Addr)>(UINT16_MAX))) {
*reinterpret_cast<ElfW(Addr)*>(reloc) += ((sym_addr + rela->r_addend) - rela->r_offset);
} else {
DL_ERR("0x%016lx out of range 0x%016lx to 0x%016lx",
(*reinterpret_cast<Elf_Addr*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
static_cast<Elf_Addr>(INT16_MIN),
static_cast<Elf_Addr>(UINT16_MAX));
DL_ERR("0x%016llx out of range 0x%016llx to 0x%016llx",
(*reinterpret_cast<ElfW(Addr)*>(reloc) + ((sym_addr + rela->r_addend) - rela->r_offset)),
static_cast<ElfW(Addr)>(INT16_MIN),
static_cast<ElfW(Addr)>(UINT16_MAX));
return -1;
}
break;
@ -1065,10 +1039,9 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
DL_ERR("odd RELATIVE form...");
return -1;
}
TRACE_TYPE(RELO, "RELO RELATIVE %16lx <- %16lx\n",
reloc,
(si->base + rela->r_addend));
*reinterpret_cast<Elf_Addr*>(reloc) = (si->base + rela->r_addend);
TRACE_TYPE(RELO, "RELO RELATIVE %16llx <- %16llx\n",
reloc, (si->base + rela->r_addend));
*reinterpret_cast<ElfW(Addr)*>(reloc) = (si->base + rela->r_addend);
break;
case R_AARCH64_COPY:
@ -1089,13 +1062,13 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
}
count_relocation(kRelocCopy);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO COPY %16lx <- %ld @ %16lx %s\n",
reloc,
s->st_size,
(sym_addr + rela->r_addend),
sym_name);
TRACE_TYPE(RELO, "RELO COPY %16llx <- %lld @ %16llx %s\n",
reloc,
s->st_size,
(sym_addr + rela->r_addend),
sym_name);
if (reloc == (sym_addr + rela->r_addend)) {
Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
if (src == NULL) {
DL_ERR("%s R_AARCH64_COPY relocation source cannot be resolved", si->name);
@ -1103,12 +1076,12 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
}
if (lsi->has_DT_SYMBOLIC) {
DL_ERR("%s invalid R_AARCH64_COPY relocation against DT_SYMBOLIC shared "
"library %s (built with -Bsymbolic?)", si->name, lsi->name);
"library %s (built with -Bsymbolic?)", si->name, lsi->name);
return -1;
}
if (s->st_size < src->st_size) {
DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%ld < %ld)",
si->name, s->st_size, src->st_size);
DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%lld < %lld)",
si->name, s->st_size, src->st_size);
return -1;
}
memcpy((void*)reloc, (void*)(src->st_value + lsi->load_bias), src->st_size);
@ -1118,16 +1091,12 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
}
break;
case R_AARCH64_TLS_TPREL64:
TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16lx <- %16lx - %16lx\n",
reloc,
(sym_addr + rela->r_addend),
rela->r_offset);
TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
reloc, (sym_addr + rela->r_addend), rela->r_offset);
break;
case R_AARCH64_TLS_DTPREL32:
TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16lx <- %16lx - %16lx\n",
reloc,
(sym_addr + rela->r_addend),
rela->r_offset);
TRACE_TYPE(RELO, "RELO TLS_DTPREL32 *** %16llx <- %16llx - %16llx\n",
reloc, (sym_addr + rela->r_addend), rela->r_offset);
break;
#elif defined(__x86_64__)
case R_X86_64_JUMP_SLOT:
@ -1135,14 +1104,14 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %08zx <- %08zx %s", static_cast<size_t>(reloc),
static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
break;
case R_X86_64_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %08zx <- %08zx %s", static_cast<size_t>(reloc),
static_cast<size_t>(sym_addr + rela->r_addend), sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
break;
case R_X86_64_RELATIVE:
count_relocation(kRelocRelative);
@ -1153,21 +1122,21 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
}
TRACE_TYPE(RELO, "RELO RELATIVE %08zx <- +%08zx", static_cast<size_t>(reloc),
static_cast<size_t>(si->base));
*reinterpret_cast<Elf_Addr*>(reloc) = si->base + rela->r_addend;
*reinterpret_cast<ElfW(Addr)*>(reloc) = si->base + rela->r_addend;
break;
case R_X86_64_32:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO R_X86_64_32 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
static_cast<size_t>(sym_addr), sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
break;
case R_X86_64_64:
count_relocation(kRelocRelative);
MARK(rela->r_offset);
TRACE_TYPE(RELO, "RELO R_X86_64_64 %08zx <- +%08zx %s", static_cast<size_t>(reloc),
static_cast<size_t>(sym_addr), sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend;
break;
case R_X86_64_PC32:
count_relocation(kRelocRelative);
@ -1175,7 +1144,7 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
TRACE_TYPE(RELO, "RELO R_X86_64_PC32 %08zx <- +%08zx (%08zx - %08zx) %s",
static_cast<size_t>(reloc), static_cast<size_t>(sym_addr - reloc),
static_cast<size_t>(sym_addr), static_cast<size_t>(reloc), sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr + rela->r_addend - reloc;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr + rela->r_addend - reloc;
break;
#endif
@ -1187,21 +1156,19 @@ static int soinfo_relocate_a(soinfo* si, Elf_Rela* rela, unsigned count, soinfo*
return 0;
}
#else
static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
soinfo* needed[])
{
Elf_Sym* symtab = si->symtab;
static int soinfo_relocate(soinfo* si, ElfW(Rel)* rel, unsigned count, soinfo* needed[]) {
ElfW(Sym)* symtab = si->symtab;
const char* strtab = si->strtab;
Elf_Sym* s;
Elf_Rel* start = rel;
ElfW(Sym)* s;
ElfW(Rel)* start = rel;
soinfo* lsi;
for (size_t idx = 0; idx < count; ++idx, ++rel) {
unsigned type = ELF_R_TYPE(rel->r_info);
// TODO: don't use unsigned for 'sym'. Use uint32_t or Elf_Addr instead.
unsigned sym = ELF_R_SYM(rel->r_info);
Elf_Addr reloc = static_cast<Elf_Addr>(rel->r_offset + si->load_bias);
Elf_Addr sym_addr = 0;
unsigned type = ELFW(R_TYPE)(rel->r_info);
// TODO: don't use unsigned for 'sym'. Use uint32_t or ElfW(Addr) instead.
unsigned sym = ELFW(R_SYM)(rel->r_info);
ElfW(Addr) reloc = static_cast<ElfW(Addr)>(rel->r_offset + si->load_bias);
ElfW(Addr) sym_addr = 0;
char* sym_name = NULL;
DEBUG("Processing '%s' relocation at index %zd", si->name, idx);
@ -1266,7 +1233,7 @@ static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
}
} else {
// We got a definition.
sym_addr = static_cast<Elf_Addr>(s->st_value + lsi->load_bias);
sym_addr = static_cast<ElfW(Addr)>(s->st_value + lsi->load_bias);
}
count_relocation(kRelocSymbol);
} else {
@ -1279,26 +1246,26 @@ static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
break;
case R_ARM_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
break;
case R_ARM_ABS32:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO ABS %08x <- %08x %s", reloc, sym_addr, sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
*reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
break;
case R_ARM_REL32:
count_relocation(kRelocRelative);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x - %08x %s",
reloc, sym_addr, rel->r_offset, sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) += sym_addr - rel->r_offset;
*reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
break;
case R_ARM_COPY:
if ((si->flags & FLAG_EXE) == 0) {
@ -1320,7 +1287,7 @@ static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO %08x <- %d @ %08x %s", reloc, s->st_size, sym_addr, sym_name);
if (reloc == sym_addr) {
Elf_Sym *src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
if (src == NULL) {
DL_ERR("%s R_ARM_COPY relocation source cannot be resolved", si->name);
@ -1347,26 +1314,26 @@ static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO JMP_SLOT %08x <- %08x %s", reloc, sym_addr, sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
break;
case R_386_GLOB_DAT:
count_relocation(kRelocAbsolute);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO GLOB_DAT %08x <- %08x %s", reloc, sym_addr, sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) = sym_addr;
*reinterpret_cast<ElfW(Addr)*>(reloc) = sym_addr;
break;
case R_386_32:
count_relocation(kRelocRelative);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO R_386_32 %08x <- +%08x %s", reloc, sym_addr, sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
*reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
break;
case R_386_PC32:
count_relocation(kRelocRelative);
MARK(rel->r_offset);
TRACE_TYPE(RELO, "RELO R_386_PC32 %08x <- +%08x (%08x - %08x) %s",
reloc, (sym_addr - reloc), sym_addr, reloc, sym_name);
*reinterpret_cast<Elf_Addr*>(reloc) += (sym_addr - reloc);
*reinterpret_cast<ElfW(Addr)*>(reloc) += (sym_addr - reloc);
break;
#elif defined(__mips__)
case R_MIPS_REL32:
@ -1375,9 +1342,9 @@ static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
TRACE_TYPE(RELO, "RELO REL32 %08x <- %08x %s",
reloc, sym_addr, (sym_name) ? sym_name : "*SECTIONHDR*");
if (s) {
*reinterpret_cast<Elf_Addr*>(reloc) += sym_addr;
*reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr;
} else {
*reinterpret_cast<Elf_Addr*>(reloc) += si->base;
*reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
}
break;
#endif
@ -1395,7 +1362,7 @@ static int soinfo_relocate(soinfo* si, Elf_Rel* rel, unsigned count,
}
TRACE_TYPE(RELO, "RELO RELATIVE %p <- +%p",
reinterpret_cast<void*>(reloc), reinterpret_cast<void*>(si->base));
*reinterpret_cast<Elf_Addr*>(reloc) += si->base;
*reinterpret_cast<ElfW(Addr)*>(reloc) += si->base;
break;
default:
@ -1416,7 +1383,7 @@ static bool mips_relocate_got(soinfo* si, soinfo* needed[]) {
unsigned local_gotno = si->mips_local_gotno;
unsigned gotsym = si->mips_gotsym;
unsigned symtabno = si->mips_symtabno;
Elf_Sym* symtab = si->symtab;
ElfW(Sym)* symtab = si->symtab;
/*
* got[0] is address of lazy resolver function
@ -1441,11 +1408,11 @@ static bool mips_relocate_got(soinfo* si, soinfo* needed[]) {
}
/* Now for the global GOT entries */
Elf_Sym* sym = symtab + gotsym;
ElfW(Sym)* sym = symtab + gotsym;
got = si->plt_got + local_gotno;
for (size_t g = gotsym; g < symtabno; g++, sym++, got++) {
const char* sym_name;
Elf_Sym* s;
ElfW(Sym)* s;
soinfo* lsi;
/* This is an undefined reference... try to locate it */
@ -1536,7 +1503,7 @@ void soinfo::CallConstructors() {
}
if (dynamic != NULL) {
for (Elf_Dyn* d = dynamic; d->d_tag != DT_NULL; ++d) {
for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = strtab + d->d_un.d_val;
TRACE("\"%s\": calling constructors in DT_NEEDED \"%s\"", name, library_name);
@ -1625,8 +1592,8 @@ static int nullify_closed_stdio() {
static bool soinfo_link_image(soinfo* si) {
/* "base" might wrap around UINT32_MAX. */
Elf_Addr base = si->load_bias;
const Elf_Phdr *phdr = si->phdr;
ElfW(Addr) base = si->load_bias;
const ElfW(Phdr)* phdr = si->phdr;
int phnum = si->phnum;
bool relocating_linker = (si->flags & FLAG_LINKER) != 0;
@ -1638,7 +1605,7 @@ static bool soinfo_link_image(soinfo* si) {
/* Extract dynamic section */
size_t dynamic_count;
Elf_Word dynamic_flags;
ElfW(Word) dynamic_flags;
phdr_table_get_dynamic_section(phdr, phnum, base, &si->dynamic,
&dynamic_count, &dynamic_flags);
if (si->dynamic == NULL) {
@ -1659,7 +1626,7 @@ static bool soinfo_link_image(soinfo* si) {
// Extract useful information from dynamic section.
uint32_t needed_count = 0;
for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
DEBUG("d = %p, d[0](tag) = %p d[1](val) = %p",
d, reinterpret_cast<void*>(d->d_tag), reinterpret_cast<void*>(d->d_un.d_val));
switch (d->d_tag) {
@ -1673,7 +1640,7 @@ static bool soinfo_link_image(soinfo* si) {
si->strtab = (const char *) (base + d->d_un.d_ptr);
break;
case DT_SYMTAB:
si->symtab = (Elf_Sym *) (base + d->d_un.d_ptr);
si->symtab = (ElfW(Sym)*) (base + d->d_un.d_ptr);
break;
#if !defined(__LP64__)
case DT_PLTREL:
@ -1685,16 +1652,16 @@ static bool soinfo_link_image(soinfo* si) {
#endif
case DT_JMPREL:
#if defined(USE_RELA)
si->plt_rela = (Elf_Rela*) (base + d->d_un.d_ptr);
si->plt_rela = (ElfW(Rela)*) (base + d->d_un.d_ptr);
#else
si->plt_rel = (Elf_Rel*) (base + d->d_un.d_ptr);
si->plt_rel = (ElfW(Rel)*) (base + d->d_un.d_ptr);
#endif
break;
case DT_PLTRELSZ:
#if defined(USE_RELA)
si->plt_rela_count = d->d_un.d_val / sizeof(Elf_Rela);
si->plt_rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
#else
si->plt_rel_count = d->d_un.d_val / sizeof(Elf_Rel);
si->plt_rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
#endif
break;
#if !defined(__LP64__)
@ -1712,10 +1679,10 @@ static bool soinfo_link_image(soinfo* si) {
break;
#if defined(USE_RELA)
case DT_RELA:
si->rela = (Elf_Rela*) (base + d->d_un.d_ptr);
si->rela = (ElfW(Rela)*) (base + d->d_un.d_ptr);
break;
case DT_RELASZ:
si->rela_count = d->d_un.d_val / sizeof(Elf_Rela);
si->rela_count = d->d_un.d_val / sizeof(ElfW(Rela));
break;
case DT_REL:
DL_ERR("unsupported DT_REL in \"%s\"", si->name);
@ -1725,10 +1692,10 @@ static bool soinfo_link_image(soinfo* si) {
return false;
#else
case DT_REL:
si->rel = (Elf_Rel*) (base + d->d_un.d_ptr);
si->rel = (ElfW(Rel)*) (base + d->d_un.d_ptr);
break;
case DT_RELSZ:
si->rel_count = d->d_un.d_val / sizeof(Elf_Rel);
si->rel_count = d->d_un.d_val / sizeof(ElfW(Rel));
break;
case DT_RELA:
DL_ERR("unsupported DT_RELA in \"%s\"", si->name);
@ -1747,21 +1714,21 @@ static bool soinfo_link_image(soinfo* si) {
DEBUG("%s constructors (DT_INIT_ARRAY) found at %p", si->name, si->init_array);
break;
case DT_INIT_ARRAYSZ:
si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
si->init_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
break;
case DT_FINI_ARRAY:
si->fini_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
DEBUG("%s destructors (DT_FINI_ARRAY) found at %p", si->name, si->fini_array);
break;
case DT_FINI_ARRAYSZ:
si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
si->fini_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
break;
case DT_PREINIT_ARRAY:
si->preinit_array = reinterpret_cast<linker_function_t*>(base + d->d_un.d_ptr);
DEBUG("%s constructors (DT_PREINIT_ARRAY) found at %p", si->name, si->preinit_array);
break;
case DT_PREINIT_ARRAYSZ:
si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(Elf_Addr);
si->preinit_array_count = ((unsigned)d->d_un.d_val) / sizeof(ElfW(Addr));
break;
case DT_TEXTREL:
#if defined(__LP64__)
@ -1868,7 +1835,7 @@ static bool soinfo_link_image(soinfo* si) {
soinfo** needed = (soinfo**) alloca((1 + needed_count) * sizeof(soinfo*));
soinfo** pneeded = needed;
for (Elf_Dyn* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
for (ElfW(Dyn)* d = si->dynamic; d->d_tag != DT_NULL; ++d) {
if (d->d_tag == DT_NEEDED) {
const char* library_name = si->strtab + d->d_un.d_val;
DEBUG("%s needs %s", si->name, library_name);
@ -1964,21 +1931,21 @@ static bool soinfo_link_image(soinfo* si) {
*/
static void add_vdso(KernelArgumentBlock& args UNUSED) {
#if defined(AT_SYSINFO_EHDR)
Elf_Ehdr* ehdr_vdso = reinterpret_cast<Elf_Ehdr*>(args.getauxval(AT_SYSINFO_EHDR));
if (ehdr_vdso == NULL) {
return;
}
ElfW(Ehdr)* ehdr_vdso = reinterpret_cast<ElfW(Ehdr)*>(args.getauxval(AT_SYSINFO_EHDR));
if (ehdr_vdso == NULL) {
return;
}
soinfo* si = soinfo_alloc("[vdso]");
soinfo* si = soinfo_alloc("[vdso]");
si->phdr = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
si->phnum = ehdr_vdso->e_phnum;
si->base = reinterpret_cast<Elf_Addr>(ehdr_vdso);
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
si->flags = 0;
si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
si->phdr = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(ehdr_vdso) + ehdr_vdso->e_phoff);
si->phnum = ehdr_vdso->e_phnum;
si->base = reinterpret_cast<ElfW(Addr)>(ehdr_vdso);
si->size = phdr_table_get_load_size(si->phdr, si->phnum);
si->flags = 0;
si->load_bias = get_elf_exec_load_bias(ehdr_vdso);
soinfo_link_image(si);
soinfo_link_image(si);
#endif
}
@ -1987,7 +1954,7 @@ static void add_vdso(KernelArgumentBlock& args UNUSED) {
* fixed it's own GOT. It is safe to make references to externs
* and other non-local data at this point.
*/
static Elf_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf_Addr linker_base) {
static ElfW(Addr) __linker_init_post_relocation(KernelArgumentBlock& args, ElfW(Addr) linker_base) {
/* NOTE: we store the args pointer on a special location
* of the temporary TLS area in order to pass it to
* the C Library's runtime initializer.
@ -2071,15 +2038,15 @@ static Elf_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf_Add
* warning: .dynamic section for "/system/bin/linker" is not at the
* expected address (wrong library or version mismatch?)
*/
Elf_Ehdr *elf_hdr = (Elf_Ehdr *) linker_base;
Elf_Phdr *phdr = (Elf_Phdr*)((unsigned char*) linker_base + elf_hdr->e_phoff);
ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_base);
ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>((unsigned char*) linker_base + elf_hdr->e_phoff);
phdr_table_get_dynamic_section(phdr, elf_hdr->e_phnum, linker_base,
&linker_soinfo.dynamic, NULL, NULL);
insert_soinfo_into_debug_map(&linker_soinfo);
}
// Extract information passed from the kernel.
si->phdr = reinterpret_cast<Elf_Phdr*>(args.getauxval(AT_PHDR));
si->phdr = reinterpret_cast<ElfW(Phdr)*>(args.getauxval(AT_PHDR));
si->phnum = args.getauxval(AT_PHNUM);
si->entry = args.getauxval(AT_ENTRY);
@ -2092,8 +2059,8 @@ static Elf_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf_Add
si->load_bias = 0;
for (size_t i = 0; i < si->phnum; ++i) {
if (si->phdr[i].p_type == PT_PHDR) {
si->load_bias = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_vaddr;
si->base = reinterpret_cast<Elf_Addr>(si->phdr) - si->phdr[i].p_offset;
si->load_bias = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_vaddr;
si->base = reinterpret_cast<ElfW(Addr)>(si->phdr) - si->phdr[i].p_offset;
break;
}
}
@ -2183,14 +2150,14 @@ static Elf_Addr __linker_init_post_relocation(KernelArgumentBlock& args, Elf_Add
* load bias, i.e. add the value of any p_vaddr in the file to get
* the corresponding address in memory.
*/
static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf) {
Elf_Addr offset = elf->e_phoff;
const Elf_Phdr* phdr_table = (const Elf_Phdr*)((char*)elf + offset);
const Elf_Phdr* phdr_end = phdr_table + elf->e_phnum;
static ElfW(Addr) get_elf_exec_load_bias(const ElfW(Ehdr)* elf) {
ElfW(Addr) offset = elf->e_phoff;
const ElfW(Phdr)* phdr_table = reinterpret_cast<const ElfW(Phdr)*>((char*)elf + offset);
const ElfW(Phdr)* phdr_end = phdr_table + elf->e_phnum;
for (const Elf_Phdr* phdr = phdr_table; phdr < phdr_end; phdr++) {
for (const ElfW(Phdr)* phdr = phdr_table; phdr < phdr_end; phdr++) {
if (phdr->p_type == PT_LOAD) {
return reinterpret_cast<Elf_Addr>(elf) + phdr->p_offset - phdr->p_vaddr;
return reinterpret_cast<ElfW(Addr)>(elf) + phdr->p_offset - phdr->p_vaddr;
}
}
return 0;
@ -2205,12 +2172,12 @@ static Elf_Addr get_elf_exec_load_bias(const Elf_Ehdr* elf) {
* relocations, any attempt to reference an extern variable, extern
* function, or other GOT reference will generate a segfault.
*/
extern "C" Elf_Addr __linker_init(void* raw_args) {
extern "C" ElfW(Addr) __linker_init(void* raw_args) {
KernelArgumentBlock args(raw_args);
Elf_Addr linker_addr = args.getauxval(AT_BASE);
Elf_Ehdr* elf_hdr = reinterpret_cast<Elf_Ehdr*>(linker_addr);
Elf_Phdr* phdr = (Elf_Phdr*)((unsigned char*) linker_addr + elf_hdr->e_phoff);
ElfW(Addr) linker_addr = args.getauxval(AT_BASE);
ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(linker_addr);
ElfW(Phdr)* phdr = reinterpret_cast<ElfW(Phdr)*>((unsigned char*) linker_addr + elf_hdr->e_phoff);
soinfo linker_so;
memset(&linker_so, 0, sizeof(soinfo));
@ -2239,7 +2206,7 @@ extern "C" Elf_Addr __linker_init(void* raw_args) {
// We have successfully fixed our own relocations. It's safe to run
// the main part of the linker now.
args.abort_message_ptr = &gAbortMessage;
Elf_Addr start_address = __linker_init_post_relocation(args, linker_addr);
ElfW(Addr) start_address = __linker_init_post_relocation(args, linker_addr);
set_soinfo_pool_protection(PROT_READ);

View File

@ -50,6 +50,11 @@
__libc_format_fd(2, "\n"); \
} while (false)
#if defined(__LP64__)
#define ELFW(what) ELF64_ ## what
#else
#define ELFW(what) ELF32_ ## what
#endif
// Returns the address of the page containing address 'x'.
#define PAGE_START(x) ((x) & PAGE_MASK)
@ -77,17 +82,17 @@ typedef void (*linker_function_t)();
struct soinfo {
public:
char name[SOINFO_NAME_LEN];
const Elf_Phdr* phdr;
const ElfW(Phdr)* phdr;
size_t phnum;
Elf_Addr entry;
Elf_Addr base;
ElfW(Addr) entry;
ElfW(Addr) base;
size_t size;
#ifndef __LP64__
uint32_t unused1; // DO NOT USE, maintained for compatibility.
#endif
Elf_Dyn* dynamic;
ElfW(Dyn)* dynamic;
#ifndef __LP64__
uint32_t unused2; // DO NOT USE, maintained for compatibility
@ -98,7 +103,7 @@ struct soinfo {
unsigned flags;
const char* strtab;
Elf_Sym* symtab;
ElfW(Sym)* symtab;
size_t nbucket;
size_t nchain;
@ -112,16 +117,16 @@ struct soinfo {
#endif
#if defined(USE_RELA)
Elf_Rela* plt_rela;
ElfW(Rela)* plt_rela;
size_t plt_rela_count;
Elf_Rela* rela;
ElfW(Rela)* rela;
size_t rela_count;
#else
Elf_Rel* plt_rel;
ElfW(Rel)* plt_rel;
size_t plt_rel_count;
Elf_Rel* rel;
ElfW(Rel)* rel;
size_t rel_count;
#endif
@ -153,7 +158,7 @@ struct soinfo {
// When you read a virtual address from the ELF file, add this
// value to get the corresponding address in the process' address space.
Elf_Addr load_bias;
ElfW(Addr) load_bias;
#if !defined(__LP64__)
bool has_text_relocations;
@ -176,11 +181,11 @@ void do_android_update_LD_LIBRARY_PATH(const char* ld_library_path);
soinfo* do_dlopen(const char* name, int flags);
int do_dlclose(soinfo* si);
Elf_Sym* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start);
ElfW(Sym)* dlsym_linear_lookup(const char* name, soinfo** found, soinfo* start);
soinfo* find_containing_library(const void* addr);
Elf_Sym* dladdr_find_symbol(soinfo* si, const void* addr);
Elf_Sym* dlsym_handle_lookup(soinfo* si, const char* name);
ElfW(Sym)* dladdr_find_symbol(soinfo* si, const void* addr);
ElfW(Sym)* dlsym_handle_lookup(soinfo* si, const char* name);
void debuggerd_init();
extern "C" abort_msg_t* gAbortMessage;

View File

@ -50,7 +50,7 @@
p_vaddr -> segment's virtual address
p_flags -> segment flags (e.g. readable, writable, executable)
We will ignore the p_paddr and p_align fields of Elf_Phdr for now.
We will ignore the p_paddr and p_align fields of ElfW(Phdr) for now.
The loadable segments can be seen as a list of [p_vaddr ... p_vaddr+p_memsz)
ranges of virtual addresses. A few rules apply:
@ -217,14 +217,14 @@ bool ElfReader::ReadProgramHeader() {
// Like the kernel, we only accept program header tables that
// are smaller than 64KiB.
if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(Elf_Phdr)) {
if (phdr_num_ < 1 || phdr_num_ > 65536/sizeof(ElfW(Phdr))) {
DL_ERR("\"%s\" has invalid e_phnum: %zd", name_, phdr_num_);
return false;
}
Elf_Addr page_min = PAGE_START(header_.e_phoff);
Elf_Addr page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(Elf_Phdr)));
Elf_Addr page_offset = PAGE_OFFSET(header_.e_phoff);
ElfW(Addr) page_min = PAGE_START(header_.e_phoff);
ElfW(Addr) page_max = PAGE_END(header_.e_phoff + (phdr_num_ * sizeof(ElfW(Phdr))));
ElfW(Addr) page_offset = PAGE_OFFSET(header_.e_phoff);
phdr_size_ = page_max - page_min;
@ -235,7 +235,7 @@ bool ElfReader::ReadProgramHeader() {
}
phdr_mmap_ = mmap_result;
phdr_table_ = reinterpret_cast<Elf_Phdr*>(reinterpret_cast<char*>(mmap_result) + page_offset);
phdr_table_ = reinterpret_cast<ElfW(Phdr)*>(reinterpret_cast<char*>(mmap_result) + page_offset);
return true;
}
@ -249,50 +249,50 @@ bool ElfReader::ReadProgramHeader() {
* set to the minimum and maximum addresses of pages to be reserved,
* or 0 if there is nothing to load.
*/
size_t phdr_table_get_load_size(const Elf_Phdr* phdr_table, size_t phdr_count,
Elf_Addr* out_min_vaddr,
Elf_Addr* out_max_vaddr) {
Elf_Addr min_vaddr = UINTPTR_MAX;
Elf_Addr max_vaddr = 0;
size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr)* out_min_vaddr,
ElfW(Addr)* out_max_vaddr) {
ElfW(Addr) min_vaddr = UINTPTR_MAX;
ElfW(Addr) max_vaddr = 0;
bool found_pt_load = false;
for (size_t i = 0; i < phdr_count; ++i) {
const Elf_Phdr* phdr = &phdr_table[i];
bool found_pt_load = false;
for (size_t i = 0; i < phdr_count; ++i) {
const ElfW(Phdr)* phdr = &phdr_table[i];
if (phdr->p_type != PT_LOAD) {
continue;
}
found_pt_load = true;
if (phdr->p_vaddr < min_vaddr) {
min_vaddr = phdr->p_vaddr;
}
if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
max_vaddr = phdr->p_vaddr + phdr->p_memsz;
}
if (phdr->p_type != PT_LOAD) {
continue;
}
if (!found_pt_load) {
min_vaddr = 0;
found_pt_load = true;
if (phdr->p_vaddr < min_vaddr) {
min_vaddr = phdr->p_vaddr;
}
min_vaddr = PAGE_START(min_vaddr);
max_vaddr = PAGE_END(max_vaddr);
if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
max_vaddr = phdr->p_vaddr + phdr->p_memsz;
}
}
if (!found_pt_load) {
min_vaddr = 0;
}
if (out_min_vaddr != NULL) {
*out_min_vaddr = min_vaddr;
}
if (out_max_vaddr != NULL) {
*out_max_vaddr = max_vaddr;
}
return max_vaddr - min_vaddr;
min_vaddr = PAGE_START(min_vaddr);
max_vaddr = PAGE_END(max_vaddr);
if (out_min_vaddr != NULL) {
*out_min_vaddr = min_vaddr;
}
if (out_max_vaddr != NULL) {
*out_max_vaddr = max_vaddr;
}
return max_vaddr - min_vaddr;
}
// Reserve a virtual address range big enough to hold all loadable
// segments of a program header table. This is done by creating a
// private anonymous mmap() with PROT_NONE.
bool ElfReader::ReserveAddressSpace() {
Elf_Addr min_vaddr;
ElfW(Addr) min_vaddr;
load_size_ = phdr_table_get_load_size(phdr_table_, phdr_num_, &min_vaddr);
if (load_size_ == 0) {
DL_ERR("\"%s\" has no loadable segments", name_);
@ -314,27 +314,27 @@ bool ElfReader::ReserveAddressSpace() {
bool ElfReader::LoadSegments() {
for (size_t i = 0; i < phdr_num_; ++i) {
const Elf_Phdr* phdr = &phdr_table_[i];
const ElfW(Phdr)* phdr = &phdr_table_[i];
if (phdr->p_type != PT_LOAD) {
continue;
}
// Segment addresses in memory.
Elf_Addr seg_start = phdr->p_vaddr + load_bias_;
Elf_Addr seg_end = seg_start + phdr->p_memsz;
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
ElfW(Addr) seg_end = seg_start + phdr->p_memsz;
Elf_Addr seg_page_start = PAGE_START(seg_start);
Elf_Addr seg_page_end = PAGE_END(seg_end);
ElfW(Addr) seg_page_start = PAGE_START(seg_start);
ElfW(Addr) seg_page_end = PAGE_END(seg_end);
Elf_Addr seg_file_end = seg_start + phdr->p_filesz;
ElfW(Addr) seg_file_end = seg_start + phdr->p_filesz;
// File offsets.
Elf_Addr file_start = phdr->p_offset;
Elf_Addr file_end = file_start + phdr->p_filesz;
ElfW(Addr) file_start = phdr->p_offset;
ElfW(Addr) file_end = file_start + phdr->p_filesz;
Elf_Addr file_page_start = PAGE_START(file_start);
Elf_Addr file_length = file_end - file_page_start;
ElfW(Addr) file_page_start = PAGE_START(file_start);
ElfW(Addr) file_length = file_end - file_page_start;
if (file_length != 0) {
void* seg_addr = mmap((void*)seg_page_start,
@ -381,26 +381,27 @@ bool ElfReader::LoadSegments() {
* with optional extra flags (i.e. really PROT_WRITE). Used by
* phdr_table_protect_segments and phdr_table_unprotect_segments.
*/
static int _phdr_table_set_load_prot(const Elf_Phdr* phdr_table, size_t phdr_count,
Elf_Addr load_bias, int extra_prot_flags) {
const Elf_Phdr* phdr = phdr_table;
const Elf_Phdr* phdr_limit = phdr + phdr_count;
static int _phdr_table_set_load_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int extra_prot_flags) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
for (; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0)
continue;
Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
Elf_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
int ret = mprotect((void*)seg_page_start,
seg_page_end - seg_page_start,
PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
if (ret < 0) {
return -1;
}
for (; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_LOAD || (phdr->p_flags & PF_W) != 0) {
continue;
}
return 0;
ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
int ret = mprotect((void*)seg_page_start,
seg_page_end - seg_page_start,
PFLAGS_TO_PROT(phdr->p_flags) | extra_prot_flags);
if (ret < 0) {
return -1;
}
}
return 0;
}
/* Restore the original protection modes for all loadable segments.
@ -414,8 +415,8 @@ static int _phdr_table_set_load_prot(const Elf_Phdr* phdr_table, size_t phdr_cou
* Return:
* 0 on error, -1 on failure (error code in errno).
*/
int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, 0);
}
/* Change the protection of all loaded segments in memory to writable.
@ -434,50 +435,50 @@ int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, E
* Return:
* 0 on error, -1 on failure (error code in errno).
*/
int phdr_table_unprotect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
return _phdr_table_set_load_prot(phdr_table, phdr_count, load_bias, PROT_WRITE);
}
/* Used internally by phdr_table_protect_gnu_relro and
* phdr_table_unprotect_gnu_relro.
*/
static int _phdr_table_set_gnu_relro_prot(const Elf_Phdr* phdr_table, size_t phdr_count,
Elf_Addr load_bias, int prot_flags) {
const Elf_Phdr* phdr = phdr_table;
const Elf_Phdr* phdr_limit = phdr + phdr_count;
static int _phdr_table_set_gnu_relro_prot(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias, int prot_flags) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_GNU_RELRO)
continue;
/* Tricky: what happens when the relro segment does not start
* or end at page boundaries?. We're going to be over-protective
* here and put every page touched by the segment as read-only.
*
* This seems to match Ian Lance Taylor's description of the
* feature at http://www.airs.com/blog/archives/189.
*
* Extract:
* Note that the current dynamic linker code will only work
* correctly if the PT_GNU_RELRO segment starts on a page
* boundary. This is because the dynamic linker rounds the
* p_vaddr field down to the previous page boundary. If
* there is anything on the page which should not be read-only,
* the program is likely to fail at runtime. So in effect the
* linker must only emit a PT_GNU_RELRO segment if it ensures
* that it starts on a page boundary.
*/
Elf_Addr seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
Elf_Addr seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
int ret = mprotect((void*)seg_page_start,
seg_page_end - seg_page_start,
prot_flags);
if (ret < 0) {
return -1;
}
for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_GNU_RELRO) {
continue;
}
return 0;
// Tricky: what happens when the relro segment does not start
// or end at page boundaries? We're going to be over-protective
// here and put every page touched by the segment as read-only.
// This seems to match Ian Lance Taylor's description of the
// feature at http://www.airs.com/blog/archives/189.
// Extract:
// Note that the current dynamic linker code will only work
// correctly if the PT_GNU_RELRO segment starts on a page
// boundary. This is because the dynamic linker rounds the
// p_vaddr field down to the previous page boundary. If
// there is anything on the page which should not be read-only,
// the program is likely to fail at runtime. So in effect the
// linker must only emit a PT_GNU_RELRO segment if it ensures
// that it starts on a page boundary.
ElfW(Addr) seg_page_start = PAGE_START(phdr->p_vaddr) + load_bias;
ElfW(Addr) seg_page_end = PAGE_END(phdr->p_vaddr + phdr->p_memsz) + load_bias;
int ret = mprotect((void*)seg_page_start,
seg_page_end - seg_page_start,
prot_flags);
if (ret < 0) {
return -1;
}
}
return 0;
}
/* Apply GNU relro protection if specified by the program header. This will
@ -496,8 +497,8 @@ static int _phdr_table_set_gnu_relro_prot(const Elf_Phdr* phdr_table, size_t phd
* Return:
* 0 on error, -1 on failure (error code in errno).
*/
int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias) {
return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias) {
return _phdr_table_set_gnu_relro_prot(phdr_table, phdr_count, load_bias, PROT_READ);
}
#if defined(__arm__)
@ -519,23 +520,24 @@ int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count,
* Return:
* 0 on error, -1 on failure (_no_ error code in errno)
*/
int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count,
Elf_Addr load_bias,
Elf_Addr** arm_exidx, unsigned* arm_exidx_count) {
const Elf_Phdr* phdr = phdr_table;
const Elf_Phdr* phdr_limit = phdr + phdr_count;
int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias,
ElfW(Addr)** arm_exidx, unsigned* arm_exidx_count) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_ARM_EXIDX)
continue;
*arm_exidx = (Elf_Addr*)(load_bias + phdr->p_vaddr);
*arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
return 0;
for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_ARM_EXIDX) {
continue;
}
*arm_exidx = NULL;
*arm_exidx_count = 0;
return -1;
*arm_exidx = reinterpret_cast<ElfW(Addr)*>(load_bias + phdr->p_vaddr);
*arm_exidx_count = (unsigned)(phdr->p_memsz / 8);
return 0;
}
*arm_exidx = NULL;
*arm_exidx_count = 0;
return -1;
}
#endif
@ -553,40 +555,40 @@ int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count,
* Return:
* void
*/
void phdr_table_get_dynamic_section(const Elf_Phdr* phdr_table, size_t phdr_count,
Elf_Addr load_bias,
Elf_Dyn** dynamic, size_t* dynamic_count, Elf_Word* dynamic_flags) {
const Elf_Phdr* phdr = phdr_table;
const Elf_Phdr* phdr_limit = phdr + phdr_count;
void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias,
ElfW(Dyn)** dynamic, size_t* dynamic_count, ElfW(Word)* dynamic_flags) {
const ElfW(Phdr)* phdr = phdr_table;
const ElfW(Phdr)* phdr_limit = phdr + phdr_count;
for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_DYNAMIC) {
continue;
}
*dynamic = reinterpret_cast<Elf_Dyn*>(load_bias + phdr->p_vaddr);
if (dynamic_count) {
*dynamic_count = (unsigned)(phdr->p_memsz / 8);
}
if (dynamic_flags) {
*dynamic_flags = phdr->p_flags;
}
return;
for (phdr = phdr_table; phdr < phdr_limit; phdr++) {
if (phdr->p_type != PT_DYNAMIC) {
continue;
}
*dynamic = NULL;
*dynamic = reinterpret_cast<ElfW(Dyn)*>(load_bias + phdr->p_vaddr);
if (dynamic_count) {
*dynamic_count = 0;
*dynamic_count = (unsigned)(phdr->p_memsz / 8);
}
if (dynamic_flags) {
*dynamic_flags = phdr->p_flags;
}
return;
}
*dynamic = NULL;
if (dynamic_count) {
*dynamic_count = 0;
}
}
// Returns the address of the program header table as it appears in the loaded
// segments in memory. This is in contrast with 'phdr_table_' which
// is temporary and will be released before the library is relocated.
bool ElfReader::FindPhdr() {
const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_;
const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
// If there is a PT_PHDR, use it directly.
for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
if (phdr->p_type == PT_PHDR) {
return CheckPhdr(load_bias_ + phdr->p_vaddr);
}
@ -595,13 +597,13 @@ bool ElfReader::FindPhdr() {
// Otherwise, check the first loadable segment. If its file offset
// is 0, it starts with the ELF header, and we can trivially find the
// loaded program header from it.
for (const Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
for (const ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
if (phdr->p_type == PT_LOAD) {
if (phdr->p_offset == 0) {
Elf_Addr elf_addr = load_bias_ + phdr->p_vaddr;
const Elf_Ehdr* ehdr = (const Elf_Ehdr*)(void*)elf_addr;
Elf_Addr offset = ehdr->e_phoff;
return CheckPhdr((Elf_Addr)ehdr + offset);
ElfW(Addr) elf_addr = load_bias_ + phdr->p_vaddr;
const ElfW(Ehdr)* ehdr = (const ElfW(Ehdr)*)(void*)elf_addr;
ElfW(Addr) offset = ehdr->e_phoff;
return CheckPhdr((ElfW(Addr))ehdr + offset);
}
break;
}
@ -614,17 +616,17 @@ bool ElfReader::FindPhdr() {
// Ensures that our program header is actually within a loadable
// segment. This should help catch badly-formed ELF files that
// would cause the linker to crash later when trying to access it.
bool ElfReader::CheckPhdr(Elf_Addr loaded) {
const Elf_Phdr* phdr_limit = phdr_table_ + phdr_num_;
Elf_Addr loaded_end = loaded + (phdr_num_ * sizeof(Elf_Phdr));
for (Elf_Phdr* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
bool ElfReader::CheckPhdr(ElfW(Addr) loaded) {
const ElfW(Phdr)* phdr_limit = phdr_table_ + phdr_num_;
ElfW(Addr) loaded_end = loaded + (phdr_num_ * sizeof(ElfW(Phdr)));
for (ElfW(Phdr)* phdr = phdr_table_; phdr < phdr_limit; ++phdr) {
if (phdr->p_type != PT_LOAD) {
continue;
}
Elf_Addr seg_start = phdr->p_vaddr + load_bias_;
Elf_Addr seg_end = phdr->p_filesz + seg_start;
ElfW(Addr) seg_start = phdr->p_vaddr + load_bias_;
ElfW(Addr) seg_end = phdr->p_filesz + seg_start;
if (seg_start <= loaded && loaded_end <= seg_end) {
loaded_phdr_ = reinterpret_cast<const Elf_Phdr*>(loaded);
loaded_phdr_ = reinterpret_cast<const ElfW(Phdr)*>(loaded);
return true;
}
}

View File

@ -45,10 +45,10 @@ class ElfReader {
bool Load();
size_t phdr_count() { return phdr_num_; }
Elf_Addr load_start() { return reinterpret_cast<Elf_Addr>(load_start_); }
ElfW(Addr) load_start() { return reinterpret_cast<ElfW(Addr)>(load_start_); }
size_t load_size() { return load_size_; }
Elf_Addr load_bias() { return load_bias_; }
const Elf_Phdr* loaded_phdr() { return loaded_phdr_; }
ElfW(Addr) load_bias() { return load_bias_; }
const ElfW(Phdr)* loaded_phdr() { return loaded_phdr_; }
private:
bool ReadElfHeader();
@ -57,46 +57,46 @@ class ElfReader {
bool ReserveAddressSpace();
bool LoadSegments();
bool FindPhdr();
bool CheckPhdr(Elf_Addr);
bool CheckPhdr(ElfW(Addr));
const char* name_;
int fd_;
Elf_Ehdr header_;
ElfW(Ehdr) header_;
size_t phdr_num_;
void* phdr_mmap_;
Elf_Phdr* phdr_table_;
Elf_Addr phdr_size_;
ElfW(Phdr)* phdr_table_;
ElfW(Addr) phdr_size_;
// First page of reserved address space.
void* load_start_;
// Size in bytes of reserved address space.
size_t load_size_;
// Load bias.
Elf_Addr load_bias_;
ElfW(Addr) load_bias_;
// Loaded phdr.
const Elf_Phdr* loaded_phdr_;
const ElfW(Phdr)* loaded_phdr_;
};
size_t phdr_table_get_load_size(const Elf_Phdr* phdr_table, size_t phdr_count,
Elf_Addr* min_vaddr = NULL, Elf_Addr* max_vaddr = NULL);
size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr)* min_vaddr = NULL, ElfW(Addr)* max_vaddr = NULL);
int phdr_table_protect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias);
int phdr_table_protect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias);
int phdr_table_unprotect_segments(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias);
int phdr_table_unprotect_segments(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias);
int phdr_table_protect_gnu_relro(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias);
int phdr_table_protect_gnu_relro(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias);
#if defined(__arm__)
int phdr_table_get_arm_exidx(const Elf_Phdr* phdr_table, size_t phdr_count, Elf_Addr load_bias,
Elf_Addr** arm_exidx, unsigned* arm_exidix_count);
int phdr_table_get_arm_exidx(const ElfW(Phdr)* phdr_table, size_t phdr_count, ElfW(Addr) load_bias,
ElfW(Addr)** arm_exidx, unsigned* arm_exidix_count);
#endif
void phdr_table_get_dynamic_section(const Elf_Phdr* phdr_table, size_t phdr_count,
Elf_Addr load_bias,
Elf_Dyn** dynamic, size_t* dynamic_count, Elf_Word* dynamic_flags);
void phdr_table_get_dynamic_section(const ElfW(Phdr)* phdr_table, size_t phdr_count,
ElfW(Addr) load_bias,
ElfW(Dyn)** dynamic, size_t* dynamic_count, ElfW(Word)* dynamic_flags);
#endif /* LINKER_PHDR_H */