Merge "quantize avx: copy 32x32 implementation"

This commit is contained in:
Johann Koenig 2017-08-24 18:55:03 +00:00 committed by Gerrit Code Review
commit 6c21650c0e
5 changed files with 223 additions and 512 deletions

View File

@ -391,16 +391,14 @@ INSTANTIATE_TEST_CASE_P(
// TODO(johannkoenig): AVX optimizations do not yet pass the 32x32 test or
// highbitdepth configurations.
#if HAVE_AVX && !CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(AVX, VP9QuantizeTest,
::testing::Values(make_tuple(&vpx_quantize_b_avx,
&vpx_quantize_b_c,
VPX_BITS_8, 16)));
#if ARCH_X86_64
INSTANTIATE_TEST_CASE_P(DISABLED_AVX, VP9QuantizeTest,
::testing::Values(make_tuple(&vpx_quantize_b_32x32_avx,
&vpx_quantize_b_32x32_c,
VPX_BITS_8, 32)));
#endif // ARCH_X86_64
INSTANTIATE_TEST_CASE_P(
AVX, VP9QuantizeTest,
::testing::Values(make_tuple(&vpx_quantize_b_avx, &vpx_quantize_b_c,
VPX_BITS_8, 16),
// Even though SSSE3 and AVX do not match the reference
// code, we can keep them in sync with each other.
make_tuple(&vpx_quantize_b_32x32_avx,
&vpx_quantize_b_32x32_ssse3, VPX_BITS_8, 32)));
#endif // HAVE_AVX && !CONFIG_VP9_HIGHBITDEPTH
// TODO(webm:1448): dqcoeff is not handled correctly in HBD builds.

View File

@ -282,9 +282,6 @@ DSP_SRCS-$(HAVE_NEON) += arm/quantize_neon.c
ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_quantize_intrin_sse2.c
endif
ifeq ($(ARCH_X86_64),yes)
DSP_SRCS-$(HAVE_AVX) += x86/quantize_avx_x86_64.asm
endif
# avg
DSP_SRCS-yes += avg.c

View File

@ -673,7 +673,7 @@ if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") {
specialize qw/vpx_quantize_b neon sse2 ssse3 avx/;
add_proto qw/void vpx_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vpx_quantize_b_32x32 neon ssse3/, "$avx_x86_64";
specialize qw/vpx_quantize_b_32x32 neon ssse3 avx/;
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
add_proto qw/void vpx_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";

View File

@ -46,7 +46,7 @@ void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
// Setup global values.
zbin = _mm_load_si128((const __m128i *)zbin_ptr);
// x86 has no "greater *or equal* comparison. Subtract 1 from zbin so
// x86 has no "greater *or equal*" comparison. Subtract 1 from zbin so
// it is a strict "greater" comparison.
zbin = _mm_sub_epi16(zbin, _mm_set1_epi16(1));
round = _mm_load_si128((const __m128i *)round_ptr);
@ -200,3 +200,216 @@ void vpx_quantize_b_avx(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
*eob_ptr = _mm_extract_epi16(eob, 1);
}
}
void vpx_quantize_b_32x32_avx(
const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
const int16_t *scan_ptr, const int16_t *iscan_ptr) {
const __m128i zero = _mm_setzero_si128();
const __m128i one = _mm_set1_epi16(1);
const __m256i big_zero = _mm256_setzero_si256();
int index;
__m128i zbin, round, quant, dequant, shift;
__m128i coeff0, coeff1;
__m128i qcoeff0, qcoeff1;
__m128i cmp_mask0, cmp_mask1;
__m128i all_zero;
__m128i qtmp0, qtmp1;
__m128i zero_coeff0, zero_coeff1, iscan0, iscan1;
__m128i eob = zero, eob0, eob1;
(void)scan_ptr;
(void)n_coeffs;
(void)skip_block;
assert(!skip_block);
*eob_ptr = 0;
// Setup global values.
// The 32x32 halves zbin and round.
zbin = _mm_load_si128((const __m128i *)zbin_ptr);
// Shift with rounding.
zbin = _mm_add_epi16(zbin, one);
zbin = _mm_srli_epi16(zbin, 1);
// x86 has no "greater *or equal*" comparison. Subtract 1 from zbin so
// it is a strict "greater" comparison.
zbin = _mm_sub_epi16(zbin, one);
round = _mm_load_si128((const __m128i *)round_ptr);
round = _mm_add_epi16(round, one);
round = _mm_srli_epi16(round, 1);
quant = _mm_load_si128((const __m128i *)quant_ptr);
dequant = _mm_load_si128((const __m128i *)dequant_ptr);
shift = _mm_load_si128((const __m128i *)quant_shift_ptr);
shift = _mm_slli_epi16(shift, 1);
// Do DC and first 15 AC.
coeff0 = load_tran_low(coeff_ptr);
coeff1 = load_tran_low(coeff_ptr + 8);
qcoeff0 = _mm_abs_epi16(coeff0);
qcoeff1 = _mm_abs_epi16(coeff1);
cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
zbin = _mm_unpackhi_epi64(zbin, zbin); // Switch DC to AC
cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
if (_mm_test_all_zeros(all_zero, all_zero)) {
_mm256_store_si256((__m256i *)(qcoeff_ptr), big_zero);
_mm256_store_si256((__m256i *)(dqcoeff_ptr), big_zero);
#if CONFIG_VP9_HIGHBITDEPTH
_mm256_store_si256((__m256i *)(qcoeff_ptr + 8), big_zero);
_mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), big_zero);
#endif // CONFIG_VP9_HIGHBITDEPTH
round = _mm_unpackhi_epi64(round, round);
quant = _mm_unpackhi_epi64(quant, quant);
shift = _mm_unpackhi_epi64(shift, shift);
dequant = _mm_unpackhi_epi64(dequant, dequant);
} else {
qcoeff0 = _mm_adds_epi16(qcoeff0, round);
round = _mm_unpackhi_epi64(round, round);
qcoeff1 = _mm_adds_epi16(qcoeff1, round);
qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
quant = _mm_unpackhi_epi64(quant, quant);
qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
shift = _mm_unpackhi_epi64(shift, shift);
qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
// Reinsert signs
qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
// Mask out zbin threshold coeffs
qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
store_tran_low(qcoeff0, qcoeff_ptr);
store_tran_low(qcoeff1, qcoeff_ptr + 8);
// Un-sign to bias rounding like C.
// dequant is almost always negative, so this is probably the backwards way
// to handle the sign. However, it matches the previous assembly.
coeff0 = _mm_abs_epi16(qcoeff0);
coeff1 = _mm_abs_epi16(qcoeff1);
coeff0 = _mm_mullo_epi16(coeff0, dequant);
dequant = _mm_unpackhi_epi64(dequant, dequant);
coeff1 = _mm_mullo_epi16(coeff1, dequant);
// "Divide" by 2.
coeff0 = _mm_srli_epi16(coeff0, 1);
coeff1 = _mm_srli_epi16(coeff1, 1);
coeff0 = _mm_sign_epi16(coeff0, qcoeff0);
coeff1 = _mm_sign_epi16(coeff1, qcoeff1);
store_tran_low(coeff0, dqcoeff_ptr);
store_tran_low(coeff1, dqcoeff_ptr + 8);
// Scan for eob.
zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr));
iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + 8));
// Add one to convert from indices to counts
iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
eob = _mm_andnot_si128(zero_coeff0, iscan0);
eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
eob = _mm_max_epi16(eob, eob1);
}
// AC only loop.
for (index = 16; index < 32 * 32; index += 16) {
coeff0 = load_tran_low(coeff_ptr + index);
coeff1 = load_tran_low(coeff_ptr + index + 8);
qcoeff0 = _mm_abs_epi16(coeff0);
qcoeff1 = _mm_abs_epi16(coeff1);
cmp_mask0 = _mm_cmpgt_epi16(qcoeff0, zbin);
cmp_mask1 = _mm_cmpgt_epi16(qcoeff1, zbin);
all_zero = _mm_or_si128(cmp_mask0, cmp_mask1);
if (_mm_test_all_zeros(all_zero, all_zero)) {
_mm256_store_si256((__m256i *)(qcoeff_ptr + index), big_zero);
_mm256_store_si256((__m256i *)(dqcoeff_ptr + index), big_zero);
#if CONFIG_VP9_HIGHBITDEPTH
_mm256_store_si256((__m256i *)(qcoeff_ptr + index + 8), big_zero);
_mm256_store_si256((__m256i *)(dqcoeff_ptr + index + 8), big_zero);
#endif // CONFIG_VP9_HIGHBITDEPTH
continue;
}
qcoeff0 = _mm_adds_epi16(qcoeff0, round);
qcoeff1 = _mm_adds_epi16(qcoeff1, round);
qtmp0 = _mm_mulhi_epi16(qcoeff0, quant);
qtmp1 = _mm_mulhi_epi16(qcoeff1, quant);
qtmp0 = _mm_add_epi16(qtmp0, qcoeff0);
qtmp1 = _mm_add_epi16(qtmp1, qcoeff1);
qcoeff0 = _mm_mulhi_epi16(qtmp0, shift);
qcoeff1 = _mm_mulhi_epi16(qtmp1, shift);
qcoeff0 = _mm_sign_epi16(qcoeff0, coeff0);
qcoeff1 = _mm_sign_epi16(qcoeff1, coeff1);
qcoeff0 = _mm_and_si128(qcoeff0, cmp_mask0);
qcoeff1 = _mm_and_si128(qcoeff1, cmp_mask1);
store_tran_low(qcoeff0, qcoeff_ptr + index);
store_tran_low(qcoeff1, qcoeff_ptr + index + 8);
coeff0 = _mm_abs_epi16(qcoeff0);
coeff1 = _mm_abs_epi16(qcoeff1);
coeff0 = _mm_mullo_epi16(coeff0, dequant);
coeff1 = _mm_mullo_epi16(coeff1, dequant);
coeff0 = _mm_srli_epi16(coeff0, 1);
coeff1 = _mm_srli_epi16(coeff1, 1);
coeff0 = _mm_sign_epi16(coeff0, qcoeff0);
coeff1 = _mm_sign_epi16(coeff1, qcoeff1);
store_tran_low(coeff0, dqcoeff_ptr + index);
store_tran_low(coeff1, dqcoeff_ptr + index + 8);
zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + index));
iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + index + 8));
iscan0 = _mm_sub_epi16(iscan0, cmp_mask0);
iscan1 = _mm_sub_epi16(iscan1, cmp_mask1);
eob0 = _mm_andnot_si128(zero_coeff0, iscan0);
eob1 = _mm_andnot_si128(zero_coeff1, iscan1);
eob0 = _mm_max_epi16(eob0, eob1);
eob = _mm_max_epi16(eob, eob0);
}
// Accumulate eob.
{
__m128i eob_shuffled;
eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
eob = _mm_max_epi16(eob, eob_shuffled);
eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
eob = _mm_max_epi16(eob, eob_shuffled);
eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
eob = _mm_max_epi16(eob, eob_shuffled);
*eob_ptr = _mm_extract_epi16(eob, 1);
}
}

View File

@ -1,497 +0,0 @@
;
; Copyright (c) 2015 The WebM project authors. All Rights Reserved.
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
%include "third_party/x86inc/x86inc.asm"
SECTION .text
%macro QUANTIZE_FN 2
cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
shift, qcoeff, dqcoeff, dequant, \
eob, scan, iscan
vzeroupper
%ifnidn %1, b_32x32
; Special case for ncoeff == 16, as it is frequent and we can save on
; not setting up a loop.
cmp ncoeffmp, 16
jne .generic
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Special case of ncoeff == 16
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
.single:
movifnidn coeffq, coeffmp
movifnidn zbinq, zbinmp
mova m0, [zbinq] ; m0 = zbin
; Get DC and first 15 AC coeffs - in this special case, that is all.
%if CONFIG_VP9_HIGHBITDEPTH
; coeff stored as 32bit numbers but we process them as 16 bit numbers
mova m9, [coeffq]
packssdw m9, [coeffq+16] ; m9 = c[i]
mova m10, [coeffq+32]
packssdw m10, [coeffq+48] ; m10 = c[i]
%else
mova m9, [coeffq] ; m9 = c[i]
mova m10, [coeffq+16] ; m10 = c[i]
%endif
mov r0, eobmp ; Output pointer
mov r1, qcoeffmp ; Output pointer
mov r2, dqcoeffmp ; Output pointer
pxor m5, m5 ; m5 = dedicated zero
pcmpeqw m4, m4 ; All word lanes -1
paddw m0, m4 ; m0 = zbin - 1
pabsw m6, m9 ; m6 = abs(m9)
pabsw m11, m10 ; m11 = abs(m10)
pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
punpckhqdq m0, m0
pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
; Check if all coeffs are less than zbin. If yes, we just write zeros
; to the outputs and we are done.
por m14, m7, m12
ptest m14, m14
jnz .single_nonzero
%if CONFIG_VP9_HIGHBITDEPTH
mova [r1 ], ymm5
mova [r1+32], ymm5
mova [r2 ], ymm5
mova [r2+32], ymm5
%else
mova [r1], ymm5
mova [r2], ymm5
%endif
mov [r0], word 0
vzeroupper
RET
.single_nonzero:
; Actual quantization of size 16 block - setup pointers, rounders, etc.
movifnidn r4, roundmp
movifnidn r5, quantmp
mov r3, dequantmp
mov r6, shiftmp
mova m1, [r4] ; m1 = round
mova m2, [r5] ; m2 = quant
mova m3, [r3] ; m3 = dequant
mova m4, [r6] ; m4 = shift
mov r3, iscanmp
DEFINE_ARGS eob, qcoeff, dqcoeff, iscan
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
paddsw m6, m1 ; m6 += round
punpckhqdq m1, m1
paddsw m11, m1 ; m11 += round
pmulhw m8, m6, m2 ; m8 = m6*q>>16
punpckhqdq m2, m2
pmulhw m13, m11, m2 ; m13 = m11*q>>16
paddw m8, m6 ; m8 += m6
paddw m13, m11 ; m13 += m11
pmulhw m8, m4 ; m8 = m8*qsh>>16
punpckhqdq m4, m4
pmulhw m13, m4 ; m13 = m13*qsh>>16
psignw m8, m9 ; m8 = reinsert sign
psignw m13, m10 ; m13 = reinsert sign
pand m8, m7
pand m13, m12
%if CONFIG_VP9_HIGHBITDEPTH
; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
pmovsxwd m11, m8
mova [qcoeffq ], m11
mova [qcoeffq+16], m6
pcmpgtw m6, m5, m13
punpckhwd m6, m13, m6
pmovsxwd m11, m13
mova [qcoeffq+32], m11
mova [qcoeffq+48], m6
%else
mova [qcoeffq ], m8
mova [qcoeffq+16], m13
%endif
pmullw m8, m3 ; dqc[i] = qc[i] * q
punpckhqdq m3, m3
pmullw m13, m3 ; dqc[i] = qc[i] * q
%if CONFIG_VP9_HIGHBITDEPTH
; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
pmovsxwd m11, m8
mova [dqcoeffq ], m11
mova [dqcoeffq+16], m6
pcmpgtw m6, m5, m13
punpckhwd m6, m13, m6
pmovsxwd m11, m13
mova [dqcoeffq+32], m11
mova [dqcoeffq+48], m6
%else
mova [dqcoeffq ], m8
mova [dqcoeffq+16], m13
%endif
mova m6, [iscanq] ; m6 = scan[i]
mova m11, [iscanq+16] ; m11 = scan[i]
pcmpeqw m8, m8, m5 ; m8 = c[i] == 0
pcmpeqw m13, m13, m5 ; m13 = c[i] == 0
psubw m6, m6, m7 ; m6 = scan[i] + 1
psubw m11, m11, m12 ; m11 = scan[i] + 1
pandn m8, m8, m6 ; m8 = max(eob)
pandn m13, m13, m11 ; m13 = max(eob)
pmaxsw m8, m8, m13
; Horizontally accumulate/max eobs and write into [eob] memory pointer
pshufd m7, m8, 0xe
pmaxsw m8, m7
pshuflw m7, m8, 0xe
pmaxsw m8, m7
pshuflw m7, m8, 0x1
pmaxsw m8, m7
movq rax, m8
mov [eobq], ax
vzeroupper
RET
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Generic case of ncoeff != 16
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
.generic:
%endif ; %ifnidn %1, b_32x32
DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
qcoeff, dqcoeff, dequant, eob, scan, iscan
; Actual quantization loop - setup pointers, rounders, etc.
movifnidn coeffq, coeffmp
movifnidn ncoeffq, ncoeffmp
mov r2, dequantmp
movifnidn zbinq, zbinmp
movifnidn roundq, roundmp
movifnidn quantq, quantmp
mova m0, [zbinq] ; m0 = zbin
mova m1, [roundq] ; m1 = round
mova m2, [quantq] ; m2 = quant
mova m3, [r2] ; m3 = dequant
pcmpeqw m4, m4 ; All lanes -1
%ifidn %1, b_32x32
psubw m0, m4
psubw m1, m4
psrlw m0, 1 ; m0 = (m0 + 1) / 2
psrlw m1, 1 ; m1 = (m1 + 1) / 2
%endif
paddw m0, m4 ; m0 = m0 + 1
mov r2, shiftmp
mov r3, qcoeffmp
mova m4, [r2] ; m4 = shift
mov r4, dqcoeffmp
mov r5, iscanmp
%ifidn %1, b_32x32
psllw m4, 1
%endif
pxor m5, m5 ; m5 = dedicated zero
DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
%if CONFIG_VP9_HIGHBITDEPTH
lea coeffq, [ coeffq+ncoeffq*4]
lea qcoeffq, [ qcoeffq+ncoeffq*4]
lea dqcoeffq, [dqcoeffq+ncoeffq*4]
%else
lea coeffq, [ coeffq+ncoeffq*2]
lea qcoeffq, [ qcoeffq+ncoeffq*2]
lea dqcoeffq, [dqcoeffq+ncoeffq*2]
%endif
lea iscanq, [ iscanq+ncoeffq*2]
neg ncoeffq
; get DC and first 15 AC coeffs
%if CONFIG_VP9_HIGHBITDEPTH
; coeff stored as 32bit numbers & require 16bit numbers
mova m9, [coeffq+ncoeffq*4+ 0]
packssdw m9, [coeffq+ncoeffq*4+16]
mova m10, [coeffq+ncoeffq*4+32]
packssdw m10, [coeffq+ncoeffq*4+48]
%else
mova m9, [coeffq+ncoeffq*2+ 0] ; m9 = c[i]
mova m10, [coeffq+ncoeffq*2+16] ; m10 = c[i]
%endif
pabsw m6, m9 ; m6 = abs(m9)
pabsw m11, m10 ; m11 = abs(m10)
pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
punpckhqdq m0, m0
pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
; Check if all coeffs are less than zbin. If yes, skip forward quickly.
por m14, m7, m12
ptest m14, m14
jnz .first_nonzero
%if CONFIG_VP9_HIGHBITDEPTH
mova [qcoeffq+ncoeffq*4 ], ymm5
mova [qcoeffq+ncoeffq*4+32], ymm5
mova [dqcoeffq+ncoeffq*4 ], ymm5
mova [dqcoeffq+ncoeffq*4+32], ymm5
%else
mova [qcoeffq+ncoeffq*2], ymm5
mova [dqcoeffq+ncoeffq*2], ymm5
%endif
add ncoeffq, mmsize
punpckhqdq m1, m1
punpckhqdq m2, m2
punpckhqdq m3, m3
punpckhqdq m4, m4
pxor m8, m8
jmp .ac_only_loop
.first_nonzero:
paddsw m6, m1 ; m6 += round
punpckhqdq m1, m1
paddsw m11, m1 ; m11 += round
pmulhw m8, m6, m2 ; m8 = m6*q>>16
punpckhqdq m2, m2
pmulhw m13, m11, m2 ; m13 = m11*q>>16
paddw m8, m6 ; m8 += m6
paddw m13, m11 ; m13 += m11
pmulhw m8, m4 ; m8 = m8*qsh>>16
punpckhqdq m4, m4
pmulhw m13, m4 ; m13 = m13*qsh>>16
psignw m8, m9 ; m8 = reinsert sign
psignw m13, m10 ; m13 = reinsert sign
pand m8, m7
pand m13, m12
%if CONFIG_VP9_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
pmovsxwd m11, m8
mova [qcoeffq+ncoeffq*4+ 0], m11
mova [qcoeffq+ncoeffq*4+16], m6
pcmpgtw m6, m5, m13
punpckhwd m6, m13, m6
pmovsxwd m11, m13
mova [qcoeffq+ncoeffq*4+32], m11
mova [qcoeffq+ncoeffq*4+48], m6
%else
mova [qcoeffq+ncoeffq*2+ 0], m8
mova [qcoeffq+ncoeffq*2+16], m13
%endif
%ifidn %1, b_32x32
pabsw m8, m8
pabsw m13, m13
%endif
pmullw m8, m3 ; dqc[i] = qc[i] * q
punpckhqdq m3, m3
pmullw m13, m3 ; dqc[i] = qc[i] * q
%ifidn %1, b_32x32
psrlw m8, 1
psrlw m13, 1
psignw m8, m9
psignw m13, m10
%endif
%if CONFIG_VP9_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m8
punpckhwd m6, m8, m6
pmovsxwd m11, m8
mova [dqcoeffq+ncoeffq*4+ 0], m11
mova [dqcoeffq+ncoeffq*4+16], m6
pcmpgtw m6, m5, m13
punpckhwd m6, m13, m6
pmovsxwd m11, m13
mova [dqcoeffq+ncoeffq*4+32], m11
mova [dqcoeffq+ncoeffq*4+48], m6
%else
mova [dqcoeffq+ncoeffq*2+ 0], m8
mova [dqcoeffq+ncoeffq*2+16], m13
%endif
pcmpeqw m8, m5 ; m8 = c[i] == 0
pcmpeqw m13, m5 ; m13 = c[i] == 0
mova m6, [iscanq+ncoeffq*2] ; m6 = scan[i]
mova m11, [iscanq+ncoeffq*2+16] ; m11 = scan[i]
psubw m6, m7 ; m6 = scan[i] + 1
psubw m11, m12 ; m11 = scan[i] + 1
pandn m8, m6 ; m8 = max(eob)
pandn m13, m11 ; m13 = max(eob)
pmaxsw m8, m13
add ncoeffq, mmsize
.ac_only_loop:
%if CONFIG_VP9_HIGHBITDEPTH
; pack coeff from 32bit to 16bit array
mova m9, [coeffq+ncoeffq*4+ 0]
packssdw m9, [coeffq+ncoeffq*4+16]
mova m10, [coeffq+ncoeffq*4+32]
packssdw m10, [coeffq+ncoeffq*4+48]
%else
mova m9, [coeffq+ncoeffq*2+ 0] ; m9 = c[i]
mova m10, [coeffq+ncoeffq*2+16] ; m10 = c[i]
%endif
pabsw m6, m9 ; m6 = abs(m9)
pabsw m11, m10 ; m11 = abs(m10)
pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
; Check if all coeffs are less than zbin. If yes, skip this itertion.
; And just write zeros as the result would be.
por m14, m7, m12
ptest m14, m14
jnz .rest_nonzero
%if CONFIG_VP9_HIGHBITDEPTH
mova [qcoeffq+ncoeffq*4+ 0], ymm5
mova [qcoeffq+ncoeffq*4+32], ymm5
mova [dqcoeffq+ncoeffq*4+ 0], ymm5
mova [dqcoeffq+ncoeffq*4+32], ymm5
%else
mova [qcoeffq+ncoeffq*2+ 0], ymm5
mova [dqcoeffq+ncoeffq*2+ 0], ymm5
%endif
add ncoeffq, mmsize
jnz .ac_only_loop
; Horizontally accumulate/max eobs and write into [eob] memory pointer
mov r2, eobmp
pshufd m7, m8, 0xe
pmaxsw m8, m7
pshuflw m7, m8, 0xe
pmaxsw m8, m7
pshuflw m7, m8, 0x1
pmaxsw m8, m7
movq rax, m8
mov [r2], ax
vzeroupper
RET
.rest_nonzero:
paddsw m6, m1 ; m6 += round
paddsw m11, m1 ; m11 += round
pmulhw m14, m6, m2 ; m14 = m6*q>>16
pmulhw m13, m11, m2 ; m13 = m11*q>>16
paddw m14, m6 ; m14 += m6
paddw m13, m11 ; m13 += m11
pmulhw m14, m4 ; m14 = m14*qsh>>16
pmulhw m13, m4 ; m13 = m13*qsh>>16
psignw m14, m9 ; m14 = reinsert sign
psignw m13, m10 ; m13 = reinsert sign
pand m14, m7
pand m13, m12
%if CONFIG_VP9_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m14
punpckhwd m6, m14, m6
pmovsxwd m11, m14
mova [qcoeffq+ncoeffq*4+ 0], m11
mova [qcoeffq+ncoeffq*4+16], m6
pcmpgtw m6, m5, m13
punpckhwd m6, m13, m6
pmovsxwd m11, m13
mova [qcoeffq+ncoeffq*4+32], m11
mova [qcoeffq+ncoeffq*4+48], m6
%else
mova [qcoeffq+ncoeffq*2+ 0], m14
mova [qcoeffq+ncoeffq*2+16], m13
%endif
%ifidn %1, b_32x32
pabsw m14, m14
pabsw m13, m13
%endif
pmullw m14, m3 ; dqc[i] = qc[i] * q
pmullw m13, m3 ; dqc[i] = qc[i] * q
%ifidn %1, b_32x32
psrlw m14, 1
psrlw m13, 1
psignw m14, m9
psignw m13, m10
%endif
%if CONFIG_VP9_HIGHBITDEPTH
; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
pcmpgtw m6, m5, m14
punpckhwd m6, m14, m6
pmovsxwd m11, m14
mova [dqcoeffq+ncoeffq*4+ 0], m11
mova [dqcoeffq+ncoeffq*4+16], m6
pcmpgtw m6, m5, m13
punpckhwd m6, m13, m6
pmovsxwd m11, m13
mova [dqcoeffq+ncoeffq*4+32], m11
mova [dqcoeffq+ncoeffq*4+48], m6
%else
mova [dqcoeffq+ncoeffq*2+ 0], m14
mova [dqcoeffq+ncoeffq*2+16], m13
%endif
pcmpeqw m14, m5 ; m14 = c[i] == 0
pcmpeqw m13, m5 ; m13 = c[i] == 0
mova m6, [iscanq+ncoeffq*2+ 0] ; m6 = scan[i]
mova m11, [iscanq+ncoeffq*2+16] ; m11 = scan[i]
psubw m6, m7 ; m6 = scan[i] + 1
psubw m11, m12 ; m11 = scan[i] + 1
pandn m14, m6 ; m14 = max(eob)
pandn m13, m11 ; m13 = max(eob)
pmaxsw m8, m14
pmaxsw m8, m13
add ncoeffq, mmsize
jnz .ac_only_loop
; Horizontally accumulate/max eobs and write into [eob] memory pointer
mov r2, eobmp
pshufd m7, m8, 0xe
pmaxsw m8, m7
pshuflw m7, m8, 0xe
pmaxsw m8, m7
pshuflw m7, m8, 0x1
pmaxsw m8, m7
movq rax, m8
mov [r2], ax
vzeroupper
RET
%endmacro
INIT_XMM avx
QUANTIZE_FN b_32x32, 7
END