Merge "Use ENTRY/END in custom x86 assembler too."

This commit is contained in:
Elliott Hughes 2013-02-13 04:01:01 +00:00 committed by Gerrit Code Review
commit 59aeff9417
6 changed files with 34 additions and 68 deletions

View File

@ -25,11 +25,10 @@
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
.text
.type __get_sp, @function
.global __get_sp
.align 4
__get_sp:
mov %esp, %eax
ret
#include <machine/asm.h>
ENTRY(__get_sp)
mov %esp, %eax
ret
END(__get_sp)

View File

@ -1,15 +1,8 @@
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
.text
.type _exit_with_stack_teardown, @function
.globl _exit_with_stack_teardown
.align 4
/*
* void _exit_with_stack_teardown(void *stackBase, int stackSize, int *retCode)
*/
_exit_with_stack_teardown:
// void _exit_with_stack_teardown(void *stackBase, int stackSize, int *retCode)
ENTRY(_exit_with_stack_teardown)
/* we can trash %ebx here since this call should never return. */
/* We can also take advantage of the fact that the linux syscall trap
* handler saves all the registers, so we don't need a stack to keep
@ -32,3 +25,4 @@ _exit_with_stack_teardown:
* that presently, 'hlt' will cause the program to segfault.. but this
* should never happen :) */
hlt
END(_exit_with_stack_teardown)

View File

@ -1,12 +1,8 @@
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
.text
// int __pthread_clone(int (*fn)(void*), void* tls, int flags, void* arg);
.globl __pthread_clone
.type __pthread_clone, @function
.align 4
__pthread_clone:
ENTRY(__pthread_clone)
pushl %ebx
pushl %ecx
movl 16(%esp), %ecx
@ -49,6 +45,7 @@ __pthread_clone:
popl %ecx
popl %ebx
ret
END(__pthread_clone)
/*
@ -60,11 +57,7 @@ __pthread_clone:
* int (*fn)(void *),
* void *arg);
*/
.text
.globl __bionic_clone
.type __bionic_clone, @function
.align 4
__bionic_clone:
ENTRY(__bionic_clone)
pushl %ebx
pushl %esi
pushl %edi
@ -107,3 +100,4 @@ __bionic_clone:
popl %esi
popl %ebx
ret
END(__bionic_clone)

View File

@ -1,17 +1,11 @@
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
/*
* int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout)
*/
.text
.globl __futex_wait
.type __futex_wait, @function
.align 4
__futex_wait:
// int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout)
ENTRY(__futex_wait)
pushl %ebx
pushl %esi
mov 12(%esp), %ebx /* ftx */
@ -23,15 +17,11 @@ __futex_wait:
popl %esi
popl %ebx
ret
END(__futex_wait)
/* int __futex_wake(volatile void *ftx, int count) */
.text
.globl __futex_wake
.type __futex_wake, @function
.align 4
__futex_wake:
// int __futex_wake(volatile void *ftx, int count)
ENTRY(__futex_wake)
pushl %ebx
mov 8(%esp), %ebx /* ftx */
movl $FUTEX_WAKE, %ecx
@ -40,13 +30,10 @@ __futex_wake:
int $0x80
popl %ebx
ret
END(__futex_wake)
/* int __futex_syscall3(volatile void *ftx, int op, int count) */
.text
.globl __futex_syscall3
.type __futex_syscall3, @function
.align 4
__futex_syscall3:
// int __futex_syscall3(volatile void *ftx, int op, int count)
ENTRY(__futex_syscall3)
pushl %ebx
movl 8(%esp), %ebx /* ftx */
movl 12(%esp), %ecx /* op */
@ -55,13 +42,10 @@ __futex_syscall3:
int $0x80
popl %ebx
ret
END(__futex_syscall3)
/* int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout) */
.text
.globl __futex_syscall4
.type __futex_syscall4, @function
.align 4
__futex_syscall4:
// int __futex_syscall4(volatile void *ftx, int op, int val, const struct timespec *timeout)
ENTRY(__futex_syscall4)
pushl %ebx
pushl %esi
movl 12(%esp), %ebx /* ftx */
@ -73,3 +57,4 @@ __futex_syscall4:
popl %esi
popl %ebx
ret
END(__futex_syscall4)

View File

@ -10,14 +10,10 @@
* %ebp: arg5 - callee save
*/
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
.text
.type syscall, @function
.globl syscall
.align 4
syscall:
ENTRY(syscall)
# Push the callee save registers.
push %ebx
push %esi
@ -53,3 +49,4 @@ syscall:
pop %esi
pop %ebx
ret
END(syscall)

View File

@ -1,3 +1,4 @@
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
#ifndef __NR_vfork
@ -5,17 +6,12 @@
#endif
.text
.type vfork, @function
.globl vfork
.align 4
/* Get rid of the stack modifications (popl/ret) after vfork() success.
* vfork is VERY sneaky. One has to be very careful about what can be done
* between a successful vfork and a a subsequent execve()
*/
vfork:
ENTRY(vfork)
/* grab the return address */
popl %ecx
movl $__NR_vfork, %eax
@ -28,3 +24,4 @@ vfork:
orl $-1, %eax
1:
jmp *%ecx
END(vfork)