Merge changes Id5beb35d,I2945fe54,Ib0f3cfd6,I78a2eba8

* changes:
  partial fdct neon: add 32x32_1
  partial fdct neon: add 16x16_1
  partial fdct neon: add 4x4_1
  partial fdct neon: move 8x8_1 and enable hbd tests
This commit is contained in:
Johann Koenig 2017-06-30 01:00:07 +00:00 committed by Gerrit Code Review
commit 89d3dc043e
5 changed files with 128 additions and 30 deletions

View File

@ -139,9 +139,23 @@ INSTANTIATE_TEST_CASE_P(
#endif // HAVE_SSE2 #endif // HAVE_SSE2
#if HAVE_NEON #if HAVE_NEON
INSTANTIATE_TEST_CASE_P(NEON, PartialFdctTest, #if CONFIG_VP9_HIGHBITDEPTH
::testing::Values(make_tuple(&vpx_fdct8x8_1_neon, 8, INSTANTIATE_TEST_CASE_P(
VPX_BITS_8))); NEON, PartialFdctTest,
::testing::Values(make_tuple(&vpx_fdct32x32_1_neon, 32, VPX_BITS_8),
make_tuple(&vpx_fdct16x16_1_neon, 16, VPX_BITS_8),
make_tuple(&vpx_fdct8x8_1_neon, 8, VPX_BITS_12),
make_tuple(&vpx_fdct8x8_1_neon, 8, VPX_BITS_10),
make_tuple(&vpx_fdct8x8_1_neon, 8, VPX_BITS_8),
make_tuple(&vpx_fdct4x4_1_neon, 4, VPX_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(
NEON, PartialFdctTest,
::testing::Values(make_tuple(&vpx_fdct32x32_1_neon, 32, VPX_BITS_8),
make_tuple(&vpx_fdct16x16_1_neon, 16, VPX_BITS_8),
make_tuple(&vpx_fdct8x8_1_neon, 8, VPX_BITS_8),
make_tuple(&vpx_fdct4x4_1_neon, 4, VPX_BITS_8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // HAVE_NEON #endif // HAVE_NEON
#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH

View File

@ -0,0 +1,102 @@
/*
* Copyright (c) 2017 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "./vpx_dsp_rtcd.h"
#include "./vpx_config.h"
#include "vpx_dsp/arm/mem_neon.h"
static INLINE int32_t sum_int16x8(const int16x8_t a) {
const int32x4_t b = vpaddlq_s16(a);
const int64x2_t c = vpaddlq_s32(b);
const int32x2_t d = vadd_s32(vreinterpret_s32_s64(vget_low_s64(c)),
vreinterpret_s32_s64(vget_high_s64(c)));
return vget_lane_s32(d, 0);
}
void vpx_fdct4x4_1_neon(const int16_t *input, tran_low_t *output, int stride) {
int16x4_t a0, a1, a2, a3;
int16x8_t b0, b1;
int16x8_t c;
a0 = vld1_s16(input);
input += stride;
a1 = vld1_s16(input);
input += stride;
a2 = vld1_s16(input);
input += stride;
a3 = vld1_s16(input);
b0 = vcombine_s16(a0, a1);
b1 = vcombine_s16(a2, a3);
c = vaddq_s16(b0, b1);
output[0] = (tran_low_t)(sum_int16x8(c) << 1);
output[1] = 0;
}
void vpx_fdct8x8_1_neon(const int16_t *input, tran_low_t *output, int stride) {
int r;
int16x8_t sum = vld1q_s16(&input[0]);
for (r = 1; r < 8; ++r) {
const int16x8_t input_00 = vld1q_s16(&input[r * stride]);
sum = vaddq_s16(sum, input_00);
}
output[0] = (tran_low_t)sum_int16x8(sum);
output[1] = 0;
}
void vpx_fdct16x16_1_neon(const int16_t *input, tran_low_t *output,
int stride) {
int r;
int16x8_t left = vld1q_s16(input);
int16x8_t right = vld1q_s16(input + 8);
input += stride;
for (r = 1; r < 16; ++r) {
const int16x8_t a = vld1q_s16(input);
const int16x8_t b = vld1q_s16(input + 8);
input += stride;
left = vaddq_s16(left, a);
right = vaddq_s16(right, b);
}
output[0] = (tran_low_t)((sum_int16x8(left) + sum_int16x8(right)) >> 1);
output[1] = 0;
}
void vpx_fdct32x32_1_neon(const int16_t *input, tran_low_t *output,
int stride) {
int r;
int16x8_t a0 = vld1q_s16(input);
int16x8_t a1 = vld1q_s16(input + 8);
int16x8_t a2 = vld1q_s16(input + 16);
int16x8_t a3 = vld1q_s16(input + 24);
input += stride;
for (r = 1; r < 32; ++r) {
const int16x8_t b0 = vld1q_s16(input);
const int16x8_t b1 = vld1q_s16(input + 8);
const int16x8_t b2 = vld1q_s16(input + 16);
const int16x8_t b3 = vld1q_s16(input + 24);
input += stride;
a0 = vaddq_s16(a0, b0);
a1 = vaddq_s16(a1, b1);
a2 = vaddq_s16(a2, b2);
a3 = vaddq_s16(a3, b3);
}
// TODO(johannkoenig): sum and shift the values in neon registers.
output[0] = (tran_low_t)(
(sum_int16x8(a0) + sum_int16x8(a1) + sum_int16x8(a2) + sum_int16x8(a3)) >>
3);
output[1] = 0;
}

View File

@ -207,24 +207,3 @@ void vpx_fdct8x8_neon(const int16_t *input, tran_low_t *final_output,
store_s16q_to_tran_low(final_output + 7 * 8, input_7); store_s16q_to_tran_low(final_output + 7 * 8, input_7);
} }
} }
void vpx_fdct8x8_1_neon(const int16_t *input, tran_low_t *output, int stride) {
int r;
int16x8_t sum = vld1q_s16(&input[0]);
for (r = 1; r < 8; ++r) {
const int16x8_t input_00 = vld1q_s16(&input[r * stride]);
sum = vaddq_s16(sum, input_00);
}
{
const int32x4_t a = vpaddlq_s16(sum);
const int64x2_t b = vpaddlq_s32(a);
const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
vreinterpret_s32_s64(vget_high_s64(b)));
#if CONFIG_VP9_HIGHBITDEPTH
output[0] = vget_lane_s32(c, 0);
#else
output[0] = vget_lane_s16(vreinterpret_s16_s32(c), 0);
#endif
output[1] = 0;
}
}

View File

@ -196,6 +196,7 @@ DSP_SRCS-$(HAVE_AVX2) += x86/fwd_dct32x32_impl_avx2.h
DSP_SRCS-$(HAVE_NEON) += arm/fdct_neon.c DSP_SRCS-$(HAVE_NEON) += arm/fdct_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/fdct16x16_neon.c DSP_SRCS-$(HAVE_NEON) += arm/fdct16x16_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/fdct32x32_neon.c DSP_SRCS-$(HAVE_NEON) += arm/fdct32x32_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/fdct_partial_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/fwd_txfm_neon.c DSP_SRCS-$(HAVE_NEON) += arm/fwd_txfm_neon.c
DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.h DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.h
DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.c DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.c

View File

@ -487,7 +487,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_fdct4x4 neon sse2/; specialize qw/vpx_fdct4x4 neon sse2/;
add_proto qw/void vpx_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct4x4_1 sse2/; specialize qw/vpx_fdct4x4_1 sse2 neon/;
add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct8x8 neon sse2/; specialize qw/vpx_fdct8x8 neon sse2/;
@ -499,7 +499,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_fdct16x16 neon sse2/; specialize qw/vpx_fdct16x16 neon sse2/;
add_proto qw/void vpx_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct16x16_1 sse2/; specialize qw/vpx_fdct16x16_1 sse2 neon/;
add_proto qw/void vpx_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct32x32 neon sse2/; specialize qw/vpx_fdct32x32 neon sse2/;
@ -508,7 +508,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_fdct32x32_rd sse2/; specialize qw/vpx_fdct32x32_rd sse2/;
add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct32x32_1 sse2/; specialize qw/vpx_fdct32x32_1 sse2 neon/;
add_proto qw/void vpx_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_highbd_fdct4x4 sse2/; specialize qw/vpx_highbd_fdct4x4 sse2/;
@ -517,6 +517,8 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_highbd_fdct8x8 sse2/; specialize qw/vpx_highbd_fdct8x8 sse2/;
add_proto qw/void vpx_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_highbd_fdct8x8_1 neon/;
$vpx_highbd_fdct8x8_1_neon=vpx_fdct8x8_1_neon;
add_proto qw/void vpx_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_highbd_fdct16x16 sse2/; specialize qw/vpx_highbd_fdct16x16 sse2/;
@ -535,7 +537,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_fdct4x4 neon sse2 msa/; specialize qw/vpx_fdct4x4 neon sse2 msa/;
add_proto qw/void vpx_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct4x4_1 sse2/; specialize qw/vpx_fdct4x4_1 sse2 neon/;
add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct8x8 sse2 neon msa/, "$ssse3_x86_64"; specialize qw/vpx_fdct8x8 sse2 neon msa/, "$ssse3_x86_64";
@ -547,7 +549,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_fdct16x16 neon sse2 msa/; specialize qw/vpx_fdct16x16 neon sse2 msa/;
add_proto qw/void vpx_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct16x16_1 sse2 msa/; specialize qw/vpx_fdct16x16_1 sse2 neon msa/;
add_proto qw/void vpx_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct32x32 neon sse2 avx2 msa/; specialize qw/vpx_fdct32x32 neon sse2 avx2 msa/;
@ -556,7 +558,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_fdct32x32_rd sse2 avx2 msa/; specialize qw/vpx_fdct32x32_rd sse2 avx2 msa/;
add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride"; add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vpx_fdct32x32_1 sse2 msa/; specialize qw/vpx_fdct32x32_1 sse2 neon msa/;
} # CONFIG_VP9_HIGHBITDEPTH } # CONFIG_VP9_HIGHBITDEPTH
} # CONFIG_VP9_ENCODER } # CONFIG_VP9_ENCODER