Merge "Add a bunch more missing ENDs to assembler routines."

This commit is contained in:
Elliott Hughes 2013-02-13 23:18:23 +00:00 committed by Gerrit Code Review
commit 95b1ea1bb3
18 changed files with 28 additions and 12 deletions

View File

@ -115,7 +115,7 @@ ENTRY(memcmp)
* pointer somewhere else * pointer somewhere else
*/ */
mov r4, r0 mov r4, r0
/* align first pointer to word boundary /* align first pointer to word boundary
* offset = -src & 3 * offset = -src & 3
*/ */
@ -151,7 +151,7 @@ ENTRY(memcmp)
ldr ip, [r1] ldr ip, [r1]
subs r2, r2, #(32 + 4) subs r2, r2, #(32 + 4)
bmi 1f bmi 1f
0: pld [r4, #(CACHE_LINE_SIZE * 2)] 0: pld [r4, #(CACHE_LINE_SIZE * 2)]
pld [r1, #(CACHE_LINE_SIZE * 2)] pld [r1, #(CACHE_LINE_SIZE * 2)]
ldr r0, [r4], #4 ldr r0, [r4], #4
@ -178,14 +178,14 @@ ENTRY(memcmp)
ldreq r0, [r4], #4 ldreq r0, [r4], #4
ldreq ip, [r1, #4]! ldreq ip, [r1, #4]!
eoreqs r0, r0, lr eoreqs r0, r0, lr
bne 2f bne 2f
subs r2, r2, #32 subs r2, r2, #32
bhs 0b bhs 0b
/* do we have at least 4 bytes left? */ /* do we have at least 4 bytes left? */
1: adds r2, r2, #(32 - 4 + 4) 1: adds r2, r2, #(32 - 4 + 4)
bmi 4f bmi 4f
/* finish off 4 bytes at a time */ /* finish off 4 bytes at a time */
3: ldr r0, [r4], #4 3: ldr r0, [r4], #4
ldr ip, [r1], #4 ldr ip, [r1], #4
@ -233,17 +233,14 @@ ENTRY(memcmp)
subs r2, r2, #1 subs r2, r2, #1
bne 11b bne 11b
bx lr bx lr
END(memcmp)
5: /*************** non-congruent case ***************/ 5: /*************** non-congruent case ***************/
and r0, r1, #3 and r0, r1, #3
cmp r0, #2 cmp r0, #2
bne 4f bne 4f
/* here, offset is 2 (16-bits aligned, special cased) */ /* here, offset is 2 (16-bits aligned, special cased) */
/* make sure we have at least 16 bytes to process */ /* make sure we have at least 16 bytes to process */
subs r2, r2, #16 subs r2, r2, #16
addmi r2, r2, #16 addmi r2, r2, #16
@ -341,3 +338,4 @@ END(memcmp)
mov r2, #4 mov r2, #4
ldmfd sp!, {r5, r6, r7} ldmfd sp!, {r5, r6, r7}
b 8b b 8b
END(memcmp)

View File

@ -42,6 +42,7 @@
ENTRY(bzero) ENTRY(bzero)
mov r2, r1 mov r2, r1
mov r1, #0 mov r1, #0
// Fall through to memset...
END(bzero) END(bzero)
ENTRY(memset) ENTRY(memset)

View File

@ -54,6 +54,7 @@ ENTRY(_setjmp)
movl %edi,20(%eax) movl %edi,20(%eax)
xorl %eax,%eax xorl %eax,%eax
ret ret
END(_setjmp)
ENTRY(_longjmp) ENTRY(_longjmp)
movl 4(%esp),%edx movl 4(%esp),%edx
@ -69,3 +70,4 @@ ENTRY(_longjmp)
incl %eax incl %eax
1: movl %ecx,0(%esp) 1: movl %ecx,0(%esp)
ret ret
END(_longjmp)

View File

@ -19,7 +19,6 @@ ENTRY(__futex_wait)
ret ret
END(__futex_wait) END(__futex_wait)
// int __futex_wake(volatile void *ftx, int count) // int __futex_wake(volatile void *ftx, int count)
ENTRY(__futex_wake) ENTRY(__futex_wake)
pushl %ebx pushl %ebx

View File

@ -52,7 +52,7 @@ ENTRY(setjmp)
call _C_LABEL(sigblock) call _C_LABEL(sigblock)
#endif #endif
addl $4,%esp addl $4,%esp
PIC_EPILOGUE PIC_EPILOGUE
movl 4(%esp),%ecx movl 4(%esp),%ecx
movl 0(%esp),%edx movl 0(%esp),%edx
@ -65,6 +65,7 @@ ENTRY(setjmp)
movl %eax,24(%ecx) movl %eax,24(%ecx)
xorl %eax,%eax xorl %eax,%eax
ret ret
END(setjmp)
ENTRY(longjmp) ENTRY(longjmp)
movl 4(%esp),%edx movl 4(%esp),%edx
@ -76,7 +77,7 @@ ENTRY(longjmp)
call _C_LABEL(sigsetmask) call _C_LABEL(sigsetmask)
#endif #endif
addl $4,%esp addl $4,%esp
PIC_EPILOGUE PIC_EPILOGUE
movl 4(%esp),%edx movl 4(%esp),%edx
movl 8(%esp),%eax movl 8(%esp),%eax
@ -91,3 +92,4 @@ ENTRY(longjmp)
incl %eax incl %eax
1: movl %ecx,0(%esp) 1: movl %ecx,0(%esp)
ret ret
END(longjmp)

View File

@ -61,6 +61,7 @@ ENTRY(sigsetjmp)
movl %edi,20(%ecx) movl %edi,20(%ecx)
xorl %eax,%eax xorl %eax,%eax
ret ret
END(sigsetjmp)
ENTRY(siglongjmp) ENTRY(siglongjmp)
movl 4(%esp),%edx movl 4(%esp),%edx
@ -90,3 +91,4 @@ ENTRY(siglongjmp)
incl %eax incl %eax
2: movl %ecx,0(%esp) 2: movl %ecx,0(%esp)
ret ret
END(siglongjmp)

View File

@ -30,3 +30,4 @@ L1: incl %eax
L2: popl %esi L2: popl %esi
popl %edi popl %edi
ret ret
END(bcmp)

View File

@ -41,3 +41,4 @@ L1: movl %edx,%ecx /* zero remainder by bytes */
popl %edi popl %edi
ret ret
END(bzero)

View File

@ -15,3 +15,4 @@ ENTRY(ffs)
.align 2 .align 2
L1: xorl %eax,%eax /* clear result */ L1: xorl %eax,%eax /* clear result */
ret ret
END(ffs)

View File

@ -24,3 +24,4 @@ ENTRY(memchr)
L1: xorl %eax,%eax L1: xorl %eax,%eax
popl %edi popl %edi
ret ret
END(memchr)

View File

@ -41,3 +41,4 @@ L6: movzbl -1(%edi),%eax /* Perform unsigned comparison */
popl %esi popl %esi
popl %edi popl %edi
ret ret
END(memcmp)

View File

@ -53,3 +53,4 @@ L1: rep
popl %ebx popl %ebx
popl %edi popl %edi
ret ret
END(memset)

View File

@ -71,3 +71,4 @@ L1: movb (%edx),%al /* unroll loop, but not too much */
L2: popl %eax /* pop destination address */ L2: popl %eax /* pop destination address */
popl %edi /* restore edi */ popl %edi /* restore edi */
ret ret
END(strcat)

View File

@ -79,3 +79,4 @@ L3: movzbl (%eax),%eax /* unsigned comparison */
movzbl (%edx),%edx movzbl (%edx),%edx
subl %edx,%eax subl %edx,%eax
ret ret
END(strcmp)

View File

@ -61,3 +61,4 @@ L1: movb (%edx),%al /* unroll loop, but not too much */
jnz L1 jnz L1
L2: popl %eax /* pop dst address */ L2: popl %eax /* pop dst address */
ret ret
END(strcpy)

View File

@ -18,3 +18,4 @@ ENTRY(strlen)
leal -1(%ecx),%eax /* and subtracting one */ leal -1(%ecx),%eax /* and subtracting one */
popl %edi popl %edi
ret ret
END(strlen)

View File

@ -111,3 +111,4 @@ L3: movzbl (%eax),%eax /* unsigned comparision */
L4: xorl %eax,%eax L4: xorl %eax,%eax
popl %ebx popl %ebx
ret ret
END(strncmp)

View File

@ -65,3 +65,4 @@ L3: lodsw
L4: popl %edi L4: popl %edi
popl %esi popl %esi
ret ret
END(swab)