__set_errno returns -1 exactly so that callers don't need to bother. The other architectures were already taking advantage of this, but no one had ever fixed x86 and x86_64. Change-Id: Ie131494be664f6c4a1bbf8c61bbbed58eac56122
		
			
				
	
	
		
			32 lines
		
	
	
		
			673 B
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			32 lines
		
	
	
		
			673 B
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
/* Generated by gensyscalls.py. Do not edit. */
 | 
						|
 | 
						|
#include <private/bionic_asm.h>
 | 
						|
 | 
						|
ENTRY(sched_setaffinity)
 | 
						|
    pushl   %ebx
 | 
						|
    .cfi_def_cfa_offset 8
 | 
						|
    .cfi_rel_offset ebx, 0
 | 
						|
    pushl   %ecx
 | 
						|
    .cfi_adjust_cfa_offset 4
 | 
						|
    .cfi_rel_offset ecx, 0
 | 
						|
    pushl   %edx
 | 
						|
    .cfi_adjust_cfa_offset 4
 | 
						|
    .cfi_rel_offset edx, 0
 | 
						|
    mov     16(%esp), %ebx
 | 
						|
    mov     20(%esp), %ecx
 | 
						|
    mov     24(%esp), %edx
 | 
						|
    movl    $__NR_sched_setaffinity, %eax
 | 
						|
    int     $0x80
 | 
						|
    cmpl    $-MAX_ERRNO, %eax
 | 
						|
    jb      1f
 | 
						|
    negl    %eax
 | 
						|
    pushl   %eax
 | 
						|
    call    __set_errno
 | 
						|
    addl    $4, %esp
 | 
						|
1:
 | 
						|
    popl    %edx
 | 
						|
    popl    %ecx
 | 
						|
    popl    %ebx
 | 
						|
    ret
 | 
						|
END(sched_setaffinity)
 |