Add function marks and size indications

Add a macro to annotate function end and start using both ENTRY and END
for each function. This allows valgrind (and presumably other debugging
tools) to use the debug symbols to trace the functions.

Change-Id: I5f09cef8e22fb356eb6f5cee952b031e567599b6
This commit is contained in:
Kenny Root
2011-02-16 11:55:58 -08:00
parent 3a3c1853ac
commit 420878c690
15 changed files with 83 additions and 131 deletions

View File

@@ -26,15 +26,10 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#include <asm/unistd.h> #include <asm/unistd.h>
#include <machine/asm.h>
.text
.type _exit_with_stack_teardown, #function
.globl _exit_with_stack_teardown
.align 4
@ void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode) @ void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode)
ENTRY(_exit_with_stack_teardown)
_exit_with_stack_teardown:
#if __ARM_EABI__ #if __ARM_EABI__
mov lr, r2 mov lr, r2
@@ -53,3 +48,4 @@ _exit_with_stack_teardown:
@ exit() should never return, cause a crash if it does @ exit() should never return, cause a crash if it does
mov r0, #0 mov r0, #0
ldr r0, [r0] ldr r0, [r0]
END(_exit_with_stack_teardown)

View File

@@ -70,6 +70,7 @@ ENTRY(_setjmp)
mov r0, #0x00000000 mov r0, #0x00000000
bx lr bx lr
END(_setjmp)
.L_setjmp_magic: .L_setjmp_magic:
.word _JB_MAGIC__SETJMP .word _JB_MAGIC__SETJMP
@@ -109,3 +110,4 @@ botch:
bl PIC_SYM(_C_LABEL(longjmperror), PLT) bl PIC_SYM(_C_LABEL(longjmperror), PLT)
bl PIC_SYM(_C_LABEL(abort), PLT) bl PIC_SYM(_C_LABEL(abort), PLT)
b . - 8 /* Cannot get here */ b . - 8 /* Cannot get here */
END(_longjmp)

View File

@@ -26,17 +26,9 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#include <sys/linux-syscalls.h> #include <sys/linux-syscalls.h>
#include <machine/asm.h>
#include <machine/cpu-features.h> #include <machine/cpu-features.h>
.global __atomic_cmpxchg
.type __atomic_cmpxchg, %function
.global __atomic_swap
.type __atomic_swap, %function
.global __atomic_dec
.type __atomic_dec, %function
.global __atomic_inc
.type __atomic_inc, %function
#define FUTEX_WAIT 0 #define FUTEX_WAIT 0
#define FUTEX_WAKE 1 #define FUTEX_WAKE 1
@@ -48,8 +40,7 @@
*/ */
/* r0(addr) -> r0(old) */ /* r0(addr) -> r0(old) */
__atomic_dec: ENTRY(__atomic_dec)
.fnstart
mov r1, r0 @ copy addr so we don't clobber it mov r1, r0 @ copy addr so we don't clobber it
1: ldrex r0, [r1] @ load current value into r0 1: ldrex r0, [r1] @ load current value into r0
sub r2, r0, #1 @ generate new value into r2 sub r2, r0, #1 @ generate new value into r2
@@ -57,11 +48,10 @@ __atomic_dec:
cmp r3, #0 @ success? cmp r3, #0 @ success?
bxeq lr @ yes, return bxeq lr @ yes, return
b 1b @ no, retry b 1b @ no, retry
.fnend END(__atomic_dec)
/* r0(addr) -> r0(old) */ /* r0(addr) -> r0(old) */
__atomic_inc: ENTRY(__atomic_inc)
.fnstart
mov r1, r0 mov r1, r0
1: ldrex r0, [r1] 1: ldrex r0, [r1]
add r2, r0, #1 add r2, r0, #1
@@ -69,11 +59,10 @@ __atomic_inc:
cmp r3, #0 cmp r3, #0
bxeq lr bxeq lr
b 1b b 1b
.fnend END(__atomic_inc)
/* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */ /* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */
__atomic_cmpxchg: ENTRY(__atomic_cmpxchg)
.fnstart
1: mov ip, #2 @ ip=2 means "new != old" 1: mov ip, #2 @ ip=2 means "new != old"
ldrex r3, [r2] @ load current value into r3 ldrex r3, [r2] @ load current value into r3
teq r0, r3 @ new == old? teq r0, r3 @ new == old?
@@ -82,18 +71,17 @@ __atomic_cmpxchg:
beq 1b @ yes, retry beq 1b @ yes, retry
mov r0, ip @ return 0 on success, 2 on failure mov r0, ip @ return 0 on success, 2 on failure
bx lr bx lr
.fnend END(__atomic_cmpxchg)
/* r0(new) r1(addr) -> r0(old) */ /* r0(new) r1(addr) -> r0(old) */
__atomic_swap: ENTRY(__atomic_swap)
.fnstart
1: ldrex r2, [r1] 1: ldrex r2, [r1]
strex r3, r0, [r1] strex r3, r0, [r1]
teq r3, #0 teq r3, #0
bne 1b bne 1b
mov r0, r2 mov r0, r2
bx lr bx lr
.fnend END(__atomic_swap)
#else /*not defined __ARM_HAVE_LDREX_STREX*/ #else /*not defined __ARM_HAVE_LDREX_STREX*/
/* /*
@@ -107,8 +95,7 @@ __atomic_swap:
.equ kernel_atomic_base, 0xFFFF0FFF .equ kernel_atomic_base, 0xFFFF0FFF
/* r0(addr) -> r0(old) */ /* r0(addr) -> r0(old) */
__atomic_dec: ENTRY(__atomic_dec)
.fnstart
.save {r4, lr} .save {r4, lr}
stmdb sp!, {r4, lr} stmdb sp!, {r4, lr}
mov r2, r0 mov r2, r0
@@ -122,11 +109,10 @@ __atomic_dec:
add r0, r1, #1 add r0, r1, #1
ldmia sp!, {r4, lr} ldmia sp!, {r4, lr}
bx lr bx lr
.fnend END(__atomic_dec)
/* r0(addr) -> r0(old) */ /* r0(addr) -> r0(old) */
__atomic_inc: ENTRY(__atomic_inc)
.fnstart
.save {r4, lr} .save {r4, lr}
stmdb sp!, {r4, lr} stmdb sp!, {r4, lr}
mov r2, r0 mov r2, r0
@@ -140,11 +126,10 @@ __atomic_inc:
sub r0, r1, #1 sub r0, r1, #1
ldmia sp!, {r4, lr} ldmia sp!, {r4, lr}
bx lr bx lr
.fnend END(__atomic_inc)
/* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */ /* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */
__atomic_cmpxchg: ENTRY(__atomic_cmpxchg)
.fnstart
.save {r4, lr} .save {r4, lr}
stmdb sp!, {r4, lr} stmdb sp!, {r4, lr}
mov r4, r0 /* r4 = save oldvalue */ mov r4, r0 /* r4 = save oldvalue */
@@ -160,14 +145,13 @@ __atomic_cmpxchg:
2: @ atomic_cmpxchg 2: @ atomic_cmpxchg
ldmia sp!, {r4, lr} ldmia sp!, {r4, lr}
bx lr bx lr
.fnend END(__atomic_cmpxchg)
/* r0(new) r1(addr) -> r0(old) */ /* r0(new) r1(addr) -> r0(old) */
__atomic_swap: ENTRY(__atomic_swap)
.fnstart
swp r0, r0, [r1] swp r0, r0, [r1]
bx lr bx lr
.fnend END(__atomic_swap)
#endif /*not defined __ARM_HAVE_LDREX_STREX*/ #endif /*not defined __ARM_HAVE_LDREX_STREX*/
@@ -191,18 +175,16 @@ __atomic_swap:
#if __ARM_EABI__ #if __ARM_EABI__
__futex_syscall3: ENTRY(__futex_syscall3)
.fnstart
stmdb sp!, {r4, r7} stmdb sp!, {r4, r7}
.save {r4, r7} .save {r4, r7}
ldr r7, =__NR_futex ldr r7, =__NR_futex
swi #0 swi #0
ldmia sp!, {r4, r7} ldmia sp!, {r4, r7}
bx lr bx lr
.fnend END(__futex_syscall3)
__futex_wait: ENTRY(__futex_wait)
.fnstart
stmdb sp!, {r4, r7} stmdb sp!, {r4, r7}
.save {r4, r7} .save {r4, r7}
mov r3, r2 mov r3, r2
@@ -212,10 +194,9 @@ __futex_wait:
swi #0 swi #0
ldmia sp!, {r4, r7} ldmia sp!, {r4, r7}
bx lr bx lr
.fnend END(__futex_wait)
__futex_wake: ENTRY(__futex_wake)
.fnstart
.save {r4, r7} .save {r4, r7}
stmdb sp!, {r4, r7} stmdb sp!, {r4, r7}
mov r2, r1 mov r2, r1
@@ -224,28 +205,32 @@ __futex_wake:
swi #0 swi #0
ldmia sp!, {r4, r7} ldmia sp!, {r4, r7}
bx lr bx lr
.fnend END(__futex_wake)
#else #else
__futex_syscall3: ENTRY(__futex_syscall3)
swi #__NR_futex swi #__NR_futex
bx lr bx lr
END(__futex_syscall3)
__futex_wait: ENTRY(__futex_wait)
mov r3, r2 mov r3, r2
mov r2, r1 mov r2, r1
mov r1, #FUTEX_WAIT mov r1, #FUTEX_WAIT
swi #__NR_futex swi #__NR_futex
bx lr bx lr
END(__futex_wait)
__futex_wake: ENTRY(__futex_wake)
mov r2, r1 mov r2, r1
mov r1, #FUTEX_WAKE mov r1, #FUTEX_WAKE
swi #__NR_futex swi #__NR_futex
bx lr bx lr
END(__futex_wake)
#endif #endif
__futex_syscall4: ENTRY(__futex_syscall4)
b __futex_syscall3 b __futex_syscall3
END(__futex_syscall4)

View File

@@ -26,14 +26,9 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#include <sys/linux-syscalls.h> #include <sys/linux-syscalls.h>
#include <machine/asm.h>
.text ENTRY(__pthread_clone)
.type __pthread_clone, #function
.global __pthread_clone
.align 4
.fnstart
__pthread_clone:
@ insert the args onto the new stack @ insert the args onto the new stack
str r0, [r1, #-4] str r0, [r1, #-4]
str r3, [r1, #-8] str r3, [r1, #-8]
@@ -73,7 +68,7 @@ __pthread_clone:
__error: __error:
mov r0, #-1 mov r0, #-1
bx lr bx lr
.fnend END(__pthread_clone)
# #
@@ -88,12 +83,8 @@ __error:
# at the end of the parameter list makes the # at the end of the parameter list makes the
# implementation much simpler. # implementation much simpler.
# #
.type __bionic_clone, #function
.globl __bionic_clone
.align 4
.fnstart
__bionic_clone: ENTRY(__bionic_clone)
mov ip, sp mov ip, sp
.save {r4, r5, r6, r7} .save {r4, r5, r6, r7}
@@ -124,5 +115,4 @@ __bionic_clone:
ldr r0, [sp, #-4] ldr r0, [sp, #-4]
ldr r1, [sp, #-8] ldr r1, [sp, #-8]
b __bionic_clone_entry b __bionic_clone_entry
END(__bionic_clone)
.fnend

View File

@@ -61,6 +61,7 @@ ENTRY(ffs)
ldrneb r0, [ r2, r0, lsr #26 ] ldrneb r0, [ r2, r0, lsr #26 ]
bx lr bx lr
END(ffs)
.text; .text;
.type .L_ffs_table, _ASM_TYPE_OBJECT; .type .L_ffs_table, _ASM_TYPE_OBJECT;
@@ -78,5 +79,6 @@ ENTRY(ffs)
clzne r0, r0 clzne r0, r0
rsbne r0, r0, #32 rsbne r0, r0, #32
bx lr bx lr
END(ffs)
#endif /* !defined(__ARM_HAVE_CLZ) */ #endif /* !defined(__ARM_HAVE_CLZ) */

View File

@@ -33,17 +33,13 @@
of a corrupted malloc heap). of a corrupted malloc heap).
*/ */
#include <sys/linux-syscalls.h> #include <sys/linux-syscalls.h>
#include <machine/asm.h>
#ifndef __NR_kill #ifndef __NR_kill
#define __NR_kill 37 #define __NR_kill 37
#endif #endif
.text ENTRY(kill)
.type kill, #function
.globl kill
.align 4
kill:
stmfd sp!, {r4-r7, ip, lr} stmfd sp!, {r4-r7, ip, lr}
ldr r7, =__NR_kill ldr r7, =__NR_kill
swi #0 swi #0
@@ -51,3 +47,4 @@ kill:
movs r0, r0 movs r0, r0
bxpl lr bxpl lr
b __set_syscall_errno b __set_syscall_errno
END(kill)

View File

@@ -27,12 +27,7 @@
*/ */
#include <machine/cpu-features.h> #include <machine/cpu-features.h>
#include <machine/asm.h>
.text
.global memcmp
.type memcmp, %function
.align 4
/* /*
* Optimized memcmp() for ARM9. * Optimized memcmp() for ARM9.
@@ -43,8 +38,7 @@
* (2) The loads are scheduled in a way they won't stall * (2) The loads are scheduled in a way they won't stall
*/ */
memcmp: ENTRY(memcmp)
.fnstart
PLD (r0, #0) PLD (r0, #0)
PLD (r1, #0) PLD (r1, #0)
@@ -176,7 +170,7 @@ memcmp:
9: /* restore registers and return */ 9: /* restore registers and return */
ldmfd sp!, {r4, lr} ldmfd sp!, {r4, lr}
bx lr bx lr
.fnend END(memcmp)

View File

@@ -27,12 +27,7 @@
*/ */
#include <machine/cpu-features.h> #include <machine/cpu-features.h>
#include <machine/asm.h>
.text
.global __memcmp16
.type __memcmp16, %function
.align 4
/* /*
* Optimized memcmp16() for ARM9. * Optimized memcmp16() for ARM9.
@@ -43,8 +38,7 @@
* (2) The loads are scheduled in a way they won't stall * (2) The loads are scheduled in a way they won't stall
*/ */
__memcmp16: ENTRY(__memcmp16)
.fnstart
PLD (r0, #0) PLD (r0, #0)
PLD (r1, #0) PLD (r1, #0)
@@ -95,8 +89,6 @@ __memcmp16:
/* restore registers and return */ /* restore registers and return */
ldmnefd sp!, {r4, lr} ldmnefd sp!, {r4, lr}
bxne lr bxne lr
.fnend
0: /* here the first pointer is aligned, and we have at least 3 words 0: /* here the first pointer is aligned, and we have at least 3 words
@@ -237,3 +229,4 @@ __memcmp16:
7: /* fix up the 2 pointers and fallthrough... */ 7: /* fix up the 2 pointers and fallthrough... */
sub r1, r1, #2 sub r1, r1, #2
b 2b b 2b
END(__memcmp16)

View File

@@ -27,6 +27,7 @@
*/ */
#include <machine/cpu-features.h> #include <machine/cpu-features.h>
#include <machine/asm.h>
#if defined(__ARM_NEON__) #if defined(__ARM_NEON__)
@@ -143,18 +144,12 @@ memcpy:
ldmfd sp!, {r0, lr} ldmfd sp!, {r0, lr}
bx lr bx lr
.fnend END(memcpy)
#else /* __ARM_ARCH__ < 7 */ #else /* __ARM_ARCH__ < 7 */
.text
.global memcpy
.type memcpy, %function
.align 4
/* /*
* Optimized memcpy() for ARM. * Optimized memcpy() for ARM.
* *
@@ -162,12 +157,11 @@ memcpy:
* so we have to preserve R0. * so we have to preserve R0.
*/ */
memcpy: ENTRY(memcpy)
/* The stack must always be 64-bits aligned to be compliant with the /* The stack must always be 64-bits aligned to be compliant with the
* ARM ABI. Since we have to save R0, we might as well save R4 * ARM ABI. Since we have to save R0, we might as well save R4
* which we can use for better pipelining of the reads below * which we can use for better pipelining of the reads below
*/ */
.fnstart
.save {r0, r4, lr} .save {r0, r4, lr}
stmfd sp!, {r0, r4, lr} stmfd sp!, {r0, r4, lr}
/* Making room for r5-r11 which will be spilled later */ /* Making room for r5-r11 which will be spilled later */
@@ -504,7 +498,7 @@ copy_last_3_and_return:
add sp, sp, #28 add sp, sp, #28
ldmfd sp!, {r0, r4, lr} ldmfd sp!, {r0, r4, lr}
bx lr bx lr
.fnend END(memcpy)
#endif /* __ARM_ARCH__ < 7 */ #endif /* __ARM_ARCH__ < 7 */

View File

@@ -25,15 +25,8 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
.text
.global memset #include <machine/asm.h>
.type memset, %function
.global bzero
.type bzero, %function
.align
/* /*
* Optimized memset() for ARM. * Optimized memset() for ARM.
@@ -41,15 +34,15 @@
* memset() returns its first argument. * memset() returns its first argument.
*/ */
bzero: ENTRY(bzero)
mov r2, r1 mov r2, r1
mov r1, #0 mov r1, #0
END(bzero)
memset: ENTRY(memset)
/* compute the offset to align the destination /* compute the offset to align the destination
* offset = (4-(src&3))&3 = -src & 3 * offset = (4-(src&3))&3 = -src & 3
*/ */
.fnstart
.save {r0, r4-r7, lr} .save {r0, r4-r7, lr}
stmfd sp!, {r0, r4-r7, lr} stmfd sp!, {r0, r4-r7, lr}
rsb r3, r0, #0 rsb r3, r0, #0
@@ -113,5 +106,4 @@ memset:
strcsb r1, [r0] strcsb r1, [r0]
ldmfd sp!, {r0, r4-r7, lr} ldmfd sp!, {r0, r4-r7, lr}
bx lr bx lr
.fnend END(memset)

View File

@@ -79,6 +79,7 @@ ENTRY(setjmp)
mov r0, #0x00000000 mov r0, #0x00000000
bx lr bx lr
END(setjmp)
.Lsetjmp_magic: .Lsetjmp_magic:
.word _JB_MAGIC_SETJMP .word _JB_MAGIC_SETJMP
@@ -138,3 +139,4 @@ botch:
bl PIC_SYM(_C_LABEL(longjmperror), PLT) bl PIC_SYM(_C_LABEL(longjmperror), PLT)
bl PIC_SYM(_C_LABEL(abort), PLT) bl PIC_SYM(_C_LABEL(abort), PLT)
b . - 8 /* Cannot get here */ b . - 8 /* Cannot get here */
END(longjmp)

View File

@@ -33,6 +33,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#define _ALIGN_TEXT .align 0
#include <machine/asm.h> #include <machine/asm.h>
#include <machine/setjmp.h> #include <machine/setjmp.h>
@@ -50,6 +52,7 @@ ENTRY(sigsetjmp)
teq r1, #0 teq r1, #0
beq PIC_SYM(_C_LABEL(_setjmp), PLT) beq PIC_SYM(_C_LABEL(_setjmp), PLT)
b PIC_SYM(_C_LABEL(setjmp), PLT) b PIC_SYM(_C_LABEL(setjmp), PLT)
END(sigsetjmp)
.L_setjmp_magic: .L_setjmp_magic:
.word _JB_MAGIC__SETJMP .word _JB_MAGIC__SETJMP
@@ -60,3 +63,4 @@ ENTRY(siglongjmp)
teq r2, r3 teq r2, r3
beq PIC_SYM(_C_LABEL(_longjmp), PLT) beq PIC_SYM(_C_LABEL(_longjmp), PLT)
b PIC_SYM(_C_LABEL(longjmp), PLT) b PIC_SYM(_C_LABEL(longjmp), PLT)
END(siglongjmp)

View File

@@ -30,15 +30,9 @@
*/ */
#include <machine/cpu-features.h> #include <machine/cpu-features.h>
#include <machine/asm.h>
.text ENTRY(strcpy)
.global strcpy
.type strcpy, %function
.align 4
strcpy:
.fnstart
PLD(r1, #0) PLD(r1, #0)
eor r2, r0, r1 eor r2, r0, r1
mov ip, r0 mov ip, r0
@@ -136,3 +130,4 @@ strcpy:
cmp r2, #0 cmp r2, #0
bne 4b bne 4b
bx lr bx lr
END(strcpy)

View File

@@ -32,18 +32,15 @@
abort due to a fatal runtime error (e.g. detection abort due to a fatal runtime error (e.g. detection
of a corrupted malloc heap). of a corrupted malloc heap).
*/ */
#include <sys/linux-syscalls.h> #include <sys/linux-syscalls.h>
#include <machine/asm.h>
#ifndef __NR_tkill #ifndef __NR_tkill
#define __NR_tkill 238 #define __NR_tkill 238
#endif #endif
.text ENTRY(tkill)
.type tkill, #function
.globl tkill
.align 4
tkill:
stmfd sp!, {r4-r7, ip, lr} stmfd sp!, {r4-r7, ip, lr}
ldr r7, =__NR_tkill ldr r7, =__NR_tkill
swi #0 swi #0
@@ -51,3 +48,4 @@ tkill:
movs r0, r0 movs r0, r0
bxpl lr bxpl lr
b __set_syscall_errno b __set_syscall_errno
END(tkill)

View File

@@ -70,7 +70,13 @@
#define _ASM_TYPE_FUNCTION #function #define _ASM_TYPE_FUNCTION #function
#define _ASM_TYPE_OBJECT #object #define _ASM_TYPE_OBJECT #object
#define _ENTRY(x) \ #define _ENTRY(x) \
.text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: .text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: .fnstart
#define _ASM_SIZE(x) .size x, .-x;
#define _END(x) \
.fnend; \
_ASM_SIZE(x)
#ifdef GPROF #ifdef GPROF
# ifdef __ELF__ # ifdef __ELF__
@@ -86,8 +92,10 @@
#define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE #define ENTRY(y) _ENTRY(_C_LABEL(y)); _PROF_PROLOGUE
#define ENTRY_NP(y) _ENTRY(_C_LABEL(y)) #define ENTRY_NP(y) _ENTRY(_C_LABEL(y))
#define END(y) _END(_C_LABEL(y))
#define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE #define ASENTRY(y) _ENTRY(_ASM_LABEL(y)); _PROF_PROLOGUE
#define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y)) #define ASENTRY_NP(y) _ENTRY(_ASM_LABEL(y))
#define ASEND(y) _END(_ASM_LABEL(y))
#define ASMSTR .asciz #define ASMSTR .asciz