Add support for x64 and win64 yasm flags.

Some projects must define only win64 for Windows 64bit builds using
yasm.

Change-Id: I1d09590d66a7bfc8b4412e1cc8685978ac60b748
This commit is contained in:
Frank Galligan 2013-01-31 15:36:55 -08:00
parent 14301116e2
commit f67d740b34
14 changed files with 59 additions and 46 deletions

View File

@ -61,6 +61,8 @@
%define mangle(x) x %define mangle(x) x
%elifidn __OUTPUT_FORMAT__,x64 %elifidn __OUTPUT_FORMAT__,x64
%define mangle(x) x %define mangle(x) x
%elifidn __OUTPUT_FORMAT__,win64
%define mangle(x) x
%else %else
%define mangle(x) _ %+ x %define mangle(x) _ %+ x
%endif %endif

View File

@ -136,7 +136,7 @@
global sym(vp8_loop_filter_bh_y_sse2) PRIVATE global sym(vp8_loop_filter_bh_y_sse2) PRIVATE
sym(vp8_loop_filter_bh_y_sse2): sym(vp8_loop_filter_bh_y_sse2):
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
%define src rcx ; src_ptr %define src rcx ; src_ptr
%define stride rdx ; src_pixel_step %define stride rdx ; src_pixel_step
%define blimit r8 %define blimit r8
@ -256,7 +256,7 @@ LF_FILTER xmm0, xmm1, xmm3, xmm8, xmm4, xmm2
movdqa i12, xmm3 movdqa i12, xmm3
movdqa i13, xmm8 movdqa i13, xmm8
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop r13 pop r13
pop r12 pop r12
RESTORE_XMM RESTORE_XMM
@ -278,7 +278,7 @@ LF_FILTER xmm0, xmm1, xmm3, xmm8, xmm4, xmm2
global sym(vp8_loop_filter_bv_y_sse2) PRIVATE global sym(vp8_loop_filter_bv_y_sse2) PRIVATE
sym(vp8_loop_filter_bv_y_sse2): sym(vp8_loop_filter_bv_y_sse2):
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
%define src rcx ; src_ptr %define src rcx ; src_ptr
%define stride rdx ; src_pixel_step %define stride rdx ; src_pixel_step
%define blimit r8 %define blimit r8
@ -779,7 +779,7 @@ LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
; un-ALIGN_STACK ; un-ALIGN_STACK
pop rsp pop rsp
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop r13 pop r13
pop r12 pop r12
RESTORE_XMM RESTORE_XMM

View File

@ -33,7 +33,7 @@
movsxd rax, dword ptr arg(1) ; src_stride movsxd rax, dword ptr arg(1) ; src_stride
movsxd rdx, dword ptr arg(3) ; ref_stride movsxd rdx, dword ptr arg(3) ; ref_stride
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u SAVE_XMM 7, u
%define src_ptr rcx %define src_ptr rcx
%define src_stride rdx %define src_stride rdx
@ -76,7 +76,7 @@
pop rsi pop rsi
pop rbp pop rbp
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
RESTORE_XMM RESTORE_XMM
%endif %endif
%endif %endif
@ -111,7 +111,7 @@
xchg rbx, rax xchg rbx, rax
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u SAVE_XMM 7, u
%define src_ptr rcx %define src_ptr rcx
%define src_stride rdx %define src_stride rdx
@ -156,7 +156,7 @@
pop rsi pop rsi
pop rbp pop rbp
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
RESTORE_XMM RESTORE_XMM
%endif %endif

View File

@ -29,7 +29,7 @@
movsxd rax, dword ptr arg(2) movsxd rax, dword ptr arg(2)
lea rcx, [rsi + rax*2] lea rcx, [rsi + rax*2]
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
%define input rcx %define input rcx
%define output rdx %define output rdx
%define pitch r8 %define pitch r8
@ -53,7 +53,7 @@
RESTORE_GOT RESTORE_GOT
pop rbp pop rbp
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
RESTORE_XMM RESTORE_XMM
%endif %endif
%endif %endif

View File

@ -27,7 +27,7 @@ sym(vp8_regular_quantize_b_sse2):
push rdi push rdi
push rsi push rsi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
push rdi push rdi
push rsi push rsi
%endif %endif
@ -46,7 +46,7 @@ sym(vp8_regular_quantize_b_sse2):
mov rdi, arg(0) ; BLOCK *b mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d mov rsi, arg(1) ; BLOCKD *d
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d mov rsi, rdx ; BLOCKD *d
%else %else
@ -226,7 +226,7 @@ ZIGZAG_LOOP 15
pop rsi pop rsi
pop rdi pop rdi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
pop rdi pop rdi
%endif %endif
@ -250,7 +250,7 @@ sym(vp8_fast_quantize_b_sse2):
push rdi push rdi
push rsi push rsi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
push rdi push rdi
push rsi push rsi
%else %else
@ -264,7 +264,7 @@ sym(vp8_fast_quantize_b_sse2):
mov rdi, arg(0) ; BLOCK *b mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d mov rsi, arg(1) ; BLOCKD *d
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d mov rsi, rdx ; BLOCKD *d
%else %else
@ -367,7 +367,7 @@ sym(vp8_fast_quantize_b_sse2):
pop rsi pop rsi
pop rdi pop rdi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
pop rdi pop rdi
%endif %endif

View File

@ -31,7 +31,7 @@ sym(vp8_regular_quantize_b_sse4):
%define stack_size 32 %define stack_size 32
sub rsp, stack_size sub rsp, stack_size
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
SAVE_XMM 8, u SAVE_XMM 8, u
push rdi push rdi
push rsi push rsi
@ -43,7 +43,7 @@ sym(vp8_regular_quantize_b_sse4):
mov rdi, arg(0) ; BLOCK *b mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d mov rsi, arg(1) ; BLOCKD *d
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d mov rsi, rdx ; BLOCKD *d
%else %else
@ -240,7 +240,7 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
pop rbp pop rbp
%else %else
%undef xmm5 %undef xmm5
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
pop rdi pop rdi
RESTORE_XMM RESTORE_XMM

View File

@ -27,7 +27,7 @@ sym(vp8_fast_quantize_b_ssse3):
push rdi push rdi
push rsi push rsi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
push rdi push rdi
push rsi push rsi
%endif %endif
@ -38,7 +38,7 @@ sym(vp8_fast_quantize_b_ssse3):
mov rdi, arg(0) ; BLOCK *b mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d mov rsi, arg(1) ; BLOCKD *d
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d mov rsi, rdx ; BLOCKD *d
%else %else
@ -122,7 +122,7 @@ sym(vp8_fast_quantize_b_ssse3):
pop rsi pop rsi
pop rdi pop rdi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
pop rdi pop rdi
%endif %endif

View File

@ -29,7 +29,7 @@
movsxd rax, dword ptr arg(2) movsxd rax, dword ptr arg(2)
lea rcx, [rsi + rax*2] lea rcx, [rsi + rax*2]
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
%define input rcx %define input rcx
%define output rdx %define output rdx
%define pitch r8 %define pitch r8
@ -53,7 +53,7 @@
RESTORE_GOT RESTORE_GOT
pop rbp pop rbp
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
RESTORE_XMM RESTORE_XMM
%endif %endif
%endif %endif

View File

@ -27,7 +27,7 @@ sym(vp9_regular_quantize_b_sse2):
push rdi push rdi
push rsi push rsi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
push rdi push rdi
push rsi push rsi
%endif %endif
@ -46,7 +46,7 @@ sym(vp9_regular_quantize_b_sse2):
mov rdi, arg(0) ; BLOCK *b mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d mov rsi, arg(1) ; BLOCKD *d
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d mov rsi, rdx ; BLOCKD *d
%else %else
@ -223,7 +223,7 @@ ZIGZAG_LOOP 15
pop rsi pop rsi
pop rdi pop rdi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
pop rdi pop rdi
%endif %endif
@ -247,7 +247,7 @@ sym(vp9_fast_quantize_b_sse2):
push rdi push rdi
push rsi push rsi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
push rdi push rdi
push rsi push rsi
%else %else
@ -261,7 +261,7 @@ sym(vp9_fast_quantize_b_sse2):
mov rdi, arg(0) ; BLOCK *b mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d mov rsi, arg(1) ; BLOCKD *d
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d mov rsi, rdx ; BLOCKD *d
%else %else
@ -361,7 +361,7 @@ sym(vp9_fast_quantize_b_sse2):
pop rsi pop rsi
pop rdi pop rdi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
pop rdi pop rdi
%endif %endif

View File

@ -31,7 +31,7 @@ sym(vp9_regular_quantize_b_sse4):
%define stack_size 32 %define stack_size 32
sub rsp, stack_size sub rsp, stack_size
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
SAVE_XMM 8, u SAVE_XMM 8, u
push rdi push rdi
push rsi push rsi
@ -43,7 +43,7 @@ sym(vp9_regular_quantize_b_sse4):
mov rdi, arg(0) ; BLOCK *b mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d mov rsi, arg(1) ; BLOCKD *d
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d mov rsi, rdx ; BLOCKD *d
%else %else
@ -238,7 +238,7 @@ ZIGZAG_LOOP 15, 7, xmm3, xmm7, xmm8
pop rbp pop rbp
%else %else
%undef xmm5 %undef xmm5
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
pop rdi pop rdi
RESTORE_XMM RESTORE_XMM

View File

@ -27,7 +27,7 @@ sym(vp9_fast_quantize_b_ssse3):
push rdi push rdi
push rsi push rsi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
push rdi push rdi
push rsi push rsi
%endif %endif
@ -38,7 +38,7 @@ sym(vp9_fast_quantize_b_ssse3):
mov rdi, arg(0) ; BLOCK *b mov rdi, arg(0) ; BLOCK *b
mov rsi, arg(1) ; BLOCKD *d mov rsi, arg(1) ; BLOCKD *d
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
mov rdi, rcx ; BLOCK *b mov rdi, rcx ; BLOCK *b
mov rsi, rdx ; BLOCKD *d mov rsi, rdx ; BLOCKD *d
%else %else
@ -122,7 +122,7 @@ sym(vp9_fast_quantize_b_ssse3):
pop rsi pop rsi
pop rdi pop rdi
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
pop rdi pop rdi
%endif %endif

View File

@ -33,7 +33,7 @@
movsxd rax, dword ptr arg(1) ; src_stride movsxd rax, dword ptr arg(1) ; src_stride
movsxd rdx, dword ptr arg(3) ; ref_stride movsxd rdx, dword ptr arg(3) ; ref_stride
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u SAVE_XMM 7, u
%define src_ptr rcx %define src_ptr rcx
%define src_stride rdx %define src_stride rdx
@ -76,7 +76,7 @@
pop rsi pop rsi
pop rbp pop rbp
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
RESTORE_XMM RESTORE_XMM
%endif %endif
%endif %endif
@ -111,7 +111,7 @@
xchg rbx, rax xchg rbx, rax
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
SAVE_XMM 7, u SAVE_XMM 7, u
%define src_ptr rcx %define src_ptr rcx
%define src_stride rdx %define src_stride rdx
@ -156,7 +156,7 @@
pop rsi pop rsi
pop rbp pop rbp
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
pop rsi pop rsi
RESTORE_XMM RESTORE_XMM
%endif %endif

View File

@ -18,7 +18,7 @@ sym(vpx_reset_mmx_state):
ret ret
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
global sym(vpx_winx64_fldcw) PRIVATE global sym(vpx_winx64_fldcw) PRIVATE
sym(vpx_winx64_fldcw): sym(vpx_winx64_fldcw):
sub rsp, 8 sub rsp, 8

View File

@ -78,6 +78,17 @@
%endif %endif
; LIBVPX_YASM_WIN64
; Set LIBVPX_YASM_WIN64 if output is Windows 64bit so the code will work if x64
; or win64 is defined on the Yasm command line.
%ifidn __OUTPUT_FORMAT__,win64
%define LIBVPX_YASM_WIN64 1
%elifidn __OUTPUT_FORMAT__,x64
%define LIBVPX_YASM_WIN64 1
%else
%define LIBVPX_YASM_WIN64 0
%endif
; sym() ; sym()
; Return the proper symbol name for the target ABI. ; Return the proper symbol name for the target ABI.
; ;
@ -90,7 +101,7 @@
%define sym(x) x %define sym(x) x
%elifidn __OUTPUT_FORMAT__,elfx32 %elifidn __OUTPUT_FORMAT__,elfx32
%define sym(x) x %define sym(x) x
%elifidn __OUTPUT_FORMAT__,x64 %elif LIBVPX_YASM_WIN64
%define sym(x) x %define sym(x) x
%else %else
%define sym(x) _ %+ x %define sym(x) _ %+ x
@ -114,7 +125,7 @@
%define PRIVATE :hidden %define PRIVATE :hidden
%elifidn __OUTPUT_FORMAT__,elfx32 %elifidn __OUTPUT_FORMAT__,elfx32
%define PRIVATE :hidden %define PRIVATE :hidden
%elifidn __OUTPUT_FORMAT__,x64 %elif LIBVPX_YASM_WIN64
%define PRIVATE %define PRIVATE
%else %else
%define PRIVATE :private_extern %define PRIVATE :private_extern
@ -131,7 +142,7 @@
%else %else
; 64 bit ABI passes arguments in registers. This is a workaround to get up ; 64 bit ABI passes arguments in registers. This is a workaround to get up
; and running quickly. Relies on SHADOW_ARGS_TO_STACK ; and running quickly. Relies on SHADOW_ARGS_TO_STACK
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
%define arg(x) [rbp+16+8*x] %define arg(x) [rbp+16+8*x]
%else %else
%define arg(x) [rbp-8-8*x] %define arg(x) [rbp-8-8*x]
@ -257,7 +268,7 @@
%endm %endm
%define UNSHADOW_ARGS %define UNSHADOW_ARGS
%else %else
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
%macro SHADOW_ARGS_TO_STACK 1 ; argc %macro SHADOW_ARGS_TO_STACK 1 ; argc
%if %1 > 0 %if %1 > 0
mov arg(0),rcx mov arg(0),rcx
@ -313,7 +324,7 @@
; Win64 ABI requires 16 byte stack alignment, but then pushes an 8 byte return ; Win64 ABI requires 16 byte stack alignment, but then pushes an 8 byte return
; value. Typically we follow this up with 'push rbp' - re-aligning the stack - ; value. Typically we follow this up with 'push rbp' - re-aligning the stack -
; but in some cases this is not done and unaligned movs must be used. ; but in some cases this is not done and unaligned movs must be used.
%ifidn __OUTPUT_FORMAT__,x64 %if LIBVPX_YASM_WIN64
%macro SAVE_XMM 1-2 a %macro SAVE_XMM 1-2 a
%if %1 < 6 %if %1 < 6
%error Only xmm registers 6-15 must be preserved %error Only xmm registers 6-15 must be preserved