vpx/vp8/common/x86/sad_ssse3.asm
Johann e50f96a4a3 Move SAD and variance functions to common
The MFQE function of the postprocessor depends on these

Change-Id: I256a37c6de079fe92ce744b1f11e16526d06b50a
2012-03-05 16:50:33 -08:00

371 lines
13 KiB
NASM

;
; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
%include "vpx_ports/x86_abi_support.asm"
%macro PROCESS_16X2X3 1
%if %1
movdqa xmm0, XMMWORD PTR [rsi]
lddqu xmm5, XMMWORD PTR [rdi]
lddqu xmm6, XMMWORD PTR [rdi+1]
lddqu xmm7, XMMWORD PTR [rdi+2]
psadbw xmm5, xmm0
psadbw xmm6, xmm0
psadbw xmm7, xmm0
%else
movdqa xmm0, XMMWORD PTR [rsi]
lddqu xmm1, XMMWORD PTR [rdi]
lddqu xmm2, XMMWORD PTR [rdi+1]
lddqu xmm3, XMMWORD PTR [rdi+2]
psadbw xmm1, xmm0
psadbw xmm2, xmm0
psadbw xmm3, xmm0
paddw xmm5, xmm1
paddw xmm6, xmm2
paddw xmm7, xmm3
%endif
movdqa xmm0, XMMWORD PTR [rsi+rax]
lddqu xmm1, XMMWORD PTR [rdi+rdx]
lddqu xmm2, XMMWORD PTR [rdi+rdx+1]
lddqu xmm3, XMMWORD PTR [rdi+rdx+2]
lea rsi, [rsi+rax*2]
lea rdi, [rdi+rdx*2]
psadbw xmm1, xmm0
psadbw xmm2, xmm0
psadbw xmm3, xmm0
paddw xmm5, xmm1
paddw xmm6, xmm2
paddw xmm7, xmm3
%endmacro
%macro PROCESS_16X2X3_OFFSET 2
%if %1
movdqa xmm0, XMMWORD PTR [rsi]
movdqa xmm4, XMMWORD PTR [rdi]
movdqa xmm7, XMMWORD PTR [rdi+16]
movdqa xmm5, xmm7
palignr xmm5, xmm4, %2
movdqa xmm6, xmm7
palignr xmm6, xmm4, (%2+1)
palignr xmm7, xmm4, (%2+2)
psadbw xmm5, xmm0
psadbw xmm6, xmm0
psadbw xmm7, xmm0
%else
movdqa xmm0, XMMWORD PTR [rsi]
movdqa xmm4, XMMWORD PTR [rdi]
movdqa xmm3, XMMWORD PTR [rdi+16]
movdqa xmm1, xmm3
palignr xmm1, xmm4, %2
movdqa xmm2, xmm3
palignr xmm2, xmm4, (%2+1)
palignr xmm3, xmm4, (%2+2)
psadbw xmm1, xmm0
psadbw xmm2, xmm0
psadbw xmm3, xmm0
paddw xmm5, xmm1
paddw xmm6, xmm2
paddw xmm7, xmm3
%endif
movdqa xmm0, XMMWORD PTR [rsi+rax]
movdqa xmm4, XMMWORD PTR [rdi+rdx]
movdqa xmm3, XMMWORD PTR [rdi+rdx+16]
movdqa xmm1, xmm3
palignr xmm1, xmm4, %2
movdqa xmm2, xmm3
palignr xmm2, xmm4, (%2+1)
palignr xmm3, xmm4, (%2+2)
lea rsi, [rsi+rax*2]
lea rdi, [rdi+rdx*2]
psadbw xmm1, xmm0
psadbw xmm2, xmm0
psadbw xmm3, xmm0
paddw xmm5, xmm1
paddw xmm6, xmm2
paddw xmm7, xmm3
%endmacro
%macro PROCESS_16X16X3_OFFSET 2
%2_aligned_by_%1:
sub rdi, %1
PROCESS_16X2X3_OFFSET 1, %1
PROCESS_16X2X3_OFFSET 0, %1
PROCESS_16X2X3_OFFSET 0, %1
PROCESS_16X2X3_OFFSET 0, %1
PROCESS_16X2X3_OFFSET 0, %1
PROCESS_16X2X3_OFFSET 0, %1
PROCESS_16X2X3_OFFSET 0, %1
PROCESS_16X2X3_OFFSET 0, %1
jmp %2_store_off
%endmacro
%macro PROCESS_16X8X3_OFFSET 2
%2_aligned_by_%1:
sub rdi, %1
PROCESS_16X2X3_OFFSET 1, %1
PROCESS_16X2X3_OFFSET 0, %1
PROCESS_16X2X3_OFFSET 0, %1
PROCESS_16X2X3_OFFSET 0, %1
jmp %2_store_off
%endmacro
;void int vp8_sad16x16x3_ssse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad16x16x3_ssse3)
sym(vp8_sad16x16x3_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
SAVE_XMM 7
push rsi
push rdi
push rcx
; end prolog
mov rsi, arg(0) ;src_ptr
mov rdi, arg(2) ;ref_ptr
mov rdx, 0xf
and rdx, rdi
jmp .vp8_sad16x16x3_ssse3_skiptable
.vp8_sad16x16x3_ssse3_jumptable:
dd .vp8_sad16x16x3_ssse3_aligned_by_0 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_1 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_2 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_3 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_4 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_5 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_6 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_7 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_8 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_9 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_10 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_11 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_12 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_13 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_14 - .vp8_sad16x16x3_ssse3_do_jump
dd .vp8_sad16x16x3_ssse3_aligned_by_15 - .vp8_sad16x16x3_ssse3_do_jump
.vp8_sad16x16x3_ssse3_skiptable:
call .vp8_sad16x16x3_ssse3_do_jump
.vp8_sad16x16x3_ssse3_do_jump:
pop rcx ; get the address of do_jump
mov rax, .vp8_sad16x16x3_ssse3_jumptable - .vp8_sad16x16x3_ssse3_do_jump
add rax, rcx ; get the absolute address of vp8_sad16x16x3_ssse3_jumptable
movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
add rcx, rax
movsxd rax, dword ptr arg(1) ;src_stride
movsxd rdx, dword ptr arg(3) ;ref_stride
jmp rcx
PROCESS_16X16X3_OFFSET 0, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 1, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 2, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 3, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 4, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 5, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 6, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 7, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 8, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 9, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 10, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 11, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 12, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 13, .vp8_sad16x16x3_ssse3
PROCESS_16X16X3_OFFSET 14, .vp8_sad16x16x3_ssse3
.vp8_sad16x16x3_ssse3_aligned_by_15:
PROCESS_16X2X3 1
PROCESS_16X2X3 0
PROCESS_16X2X3 0
PROCESS_16X2X3 0
PROCESS_16X2X3 0
PROCESS_16X2X3 0
PROCESS_16X2X3 0
PROCESS_16X2X3 0
.vp8_sad16x16x3_ssse3_store_off:
mov rdi, arg(4) ;Results
movq xmm0, xmm5
psrldq xmm5, 8
paddw xmm0, xmm5
movd [rdi], xmm0
;-
movq xmm0, xmm6
psrldq xmm6, 8
paddw xmm0, xmm6
movd [rdi+4], xmm0
;-
movq xmm0, xmm7
psrldq xmm7, 8
paddw xmm0, xmm7
movd [rdi+8], xmm0
; begin epilog
pop rcx
pop rdi
pop rsi
RESTORE_XMM
UNSHADOW_ARGS
pop rbp
ret
;void int vp8_sad16x8x3_ssse3(
; unsigned char *src_ptr,
; int src_stride,
; unsigned char *ref_ptr,
; int ref_stride,
; int *results)
global sym(vp8_sad16x8x3_ssse3)
sym(vp8_sad16x8x3_ssse3):
push rbp
mov rbp, rsp
SHADOW_ARGS_TO_STACK 5
SAVE_XMM 7
push rsi
push rdi
push rcx
; end prolog
mov rsi, arg(0) ;src_ptr
mov rdi, arg(2) ;ref_ptr
mov rdx, 0xf
and rdx, rdi
jmp .vp8_sad16x8x3_ssse3_skiptable
.vp8_sad16x8x3_ssse3_jumptable:
dd .vp8_sad16x8x3_ssse3_aligned_by_0 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_1 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_2 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_3 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_4 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_5 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_6 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_7 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_8 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_9 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_10 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_11 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_12 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_13 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_14 - .vp8_sad16x8x3_ssse3_do_jump
dd .vp8_sad16x8x3_ssse3_aligned_by_15 - .vp8_sad16x8x3_ssse3_do_jump
.vp8_sad16x8x3_ssse3_skiptable:
call .vp8_sad16x8x3_ssse3_do_jump
.vp8_sad16x8x3_ssse3_do_jump:
pop rcx ; get the address of do_jump
mov rax, .vp8_sad16x8x3_ssse3_jumptable - .vp8_sad16x8x3_ssse3_do_jump
add rax, rcx ; get the absolute address of vp8_sad16x8x3_ssse3_jumptable
movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable
add rcx, rax
movsxd rax, dword ptr arg(1) ;src_stride
movsxd rdx, dword ptr arg(3) ;ref_stride
jmp rcx
PROCESS_16X8X3_OFFSET 0, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 1, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 2, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 3, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 4, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 5, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 6, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 7, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 8, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 9, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 10, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 11, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 12, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 13, .vp8_sad16x8x3_ssse3
PROCESS_16X8X3_OFFSET 14, .vp8_sad16x8x3_ssse3
.vp8_sad16x8x3_ssse3_aligned_by_15:
PROCESS_16X2X3 1
PROCESS_16X2X3 0
PROCESS_16X2X3 0
PROCESS_16X2X3 0
.vp8_sad16x8x3_ssse3_store_off:
mov rdi, arg(4) ;Results
movq xmm0, xmm5
psrldq xmm5, 8
paddw xmm0, xmm5
movd [rdi], xmm0
;-
movq xmm0, xmm6
psrldq xmm6, 8
paddw xmm0, xmm6
movd [rdi+4], xmm0
;-
movq xmm0, xmm7
psrldq xmm7, 8
paddw xmm0, xmm7
movd [rdi+8], xmm0
; begin epilog
pop rcx
pop rdi
pop rsi
RESTORE_XMM
UNSHADOW_ARGS
pop rbp
ret