vpx/vp9/encoder/x86/vp9_error_sse2.asm
Ronald S. Bultje af660715c0 Make coefficient skip condition an explicit RD choice.
This commit replaces zrun_zbin_boost, a method of biasing non-zero
coefficients following runs of zero-coefficients to be rounded towards
zero, with an explicit skip-block choice in the RD loop.

The logic is basically that if individual coefficients should be rounded
towards zero (from a RD point of view), the trellis/optimize loop should
take care of it. If whole blocks should be zero (from a RD point of
view), a single RD check is much more efficient than a complete
serialization of the quantization loop.

Quality change: derf +0.5% psnr, +1.6% ssim; yt +0.6% psnr, +1.1% ssim.
SIMD for quantize will follow in a separate patch. Results for other
test sets pending.

Change-Id: Ife5fa641163ac5150ac428011e87188f1937c1f4
2013-06-28 10:28:49 -07:00

75 lines
1.9 KiB
NASM

;
; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
%include "third_party/x86inc/x86inc.asm"
SECTION .text
; int64_t vp9_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
; int64_t *ssz)
INIT_XMM sse2
cglobal block_error, 3, 3, 8, uqc, dqc, size, ssz
pxor m4, m4 ; sse accumulator
pxor m6, m6 ; ssz accumulator
pxor m5, m5 ; dedicated zero register
lea uqcq, [uqcq+sizeq*2]
lea dqcq, [dqcq+sizeq*2]
neg sizeq
.loop:
mova m2, [uqcq+sizeq*2]
mova m0, [dqcq+sizeq*2]
mova m3, [uqcq+sizeq*2+mmsize]
mova m1, [dqcq+sizeq*2+mmsize]
psubw m0, m2
psubw m1, m3
; individual errors are max. 15bit+sign, so squares are 30bit, and
; thus the sum of 2 should fit in a 31bit integer (+ unused sign bit)
pmaddwd m0, m0
pmaddwd m1, m1
pmaddwd m2, m2
pmaddwd m3, m3
; accumulate in 64bit
punpckldq m7, m0, m5
punpckhdq m0, m5
paddq m4, m7
punpckldq m7, m1, m5
paddq m4, m0
punpckhdq m1, m5
paddq m4, m7
punpckldq m7, m2, m5
paddq m4, m1
punpckhdq m2, m5
paddq m6, m7
punpckldq m7, m3, m5
paddq m6, m2
punpckhdq m3, m5
paddq m6, m7
paddq m6, m3
add sizeq, mmsize
jl .loop
; accumulate horizontally and store in return value
movhlps m5, m4
movhlps m7, m6
paddq m4, m5
paddq m6, m7
%if ARCH_X86_64
movq rax, m4
movq [sszq], m6
%else
mov eax, sszm
pshufd m5, m4, 0x1
movq [eax], m6
movd eax, m4
movd edx, m5
%endif
RET