mips msa vp9 fdct 8x8 optimization

average improvement ~4x-5x

Change-Id: I37582efc2622bc20b2bf99617a76110ab24e9f6a
This commit is contained in:
Parag Salasakar 2015-06-18 12:03:30 +05:30
parent b6ea0c4c57
commit 7ca84888c2
6 changed files with 329 additions and 8 deletions

View File

@ -786,13 +786,13 @@ INSTANTIATE_TEST_CASE_P(
INSTANTIATE_TEST_CASE_P(
MSA, FwdTrans8x8DCT,
::testing::Values(
make_tuple(&vp9_fdct8x8_c, &vp9_idct8x8_64_add_msa, 0, VPX_BITS_8)));
make_tuple(&vp9_fdct8x8_msa, &vp9_idct8x8_64_add_msa, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
MSA, FwdTrans8x8HT,
::testing::Values(
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_msa, 0, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_msa, 1, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_msa, 2, VPX_BITS_8),
make_tuple(&vp9_fht8x8_c, &vp9_iht8x8_64_add_msa, 3, VPX_BITS_8)));
make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 0, VPX_BITS_8),
make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 1, VPX_BITS_8),
make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 2, VPX_BITS_8),
make_tuple(&vp9_fht8x8_msa, &vp9_iht8x8_64_add_msa, 3, VPX_BITS_8)));
#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace

View File

@ -856,6 +856,15 @@
}
#define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
#define INSERT_W4(RTYPE, in0, in1, in2, in3, out) { \
out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \
out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \
}
#define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
#define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
/* Description : Insert specified double word elements from input vectors to 1
destination vector
Arguments : Inputs - in0, in1 (2 input vectors)
@ -902,6 +911,19 @@
#define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
#define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
/* Description : Interleave even word elements from vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Even word elements of 'in0' and 'in1' are interleaved
and written to 'out0'
*/
#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) { \
out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \
}
#define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
/* Description : Interleave even double word elements from vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
@ -1296,6 +1318,24 @@
}
#define XORI_B7_128_SB(...) XORI_B7_128(v16i8, __VA_ARGS__)
/* Description : Average of signed halfword elements -> (a + b) / 2
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
Outputs - out0, out1, out2, out3
Return Type - as per RTYPE
Details : Each signed halfword element from 'in0' is added to each
signed halfword element of 'in1' with full precision resulting
in one extra bit in the result. The result is then divided by
2 and written to 'out0'
*/
#define AVE_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3) { \
out0 = (RTYPE)__msa_ave_s_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ave_s_h((v8i16)in2, (v8i16)in3); \
out2 = (RTYPE)__msa_ave_s_h((v8i16)in4, (v8i16)in5); \
out3 = (RTYPE)__msa_ave_s_h((v8i16)in6, (v8i16)in7); \
}
#define AVE_SH4_SH(...) AVE_SH4(v8i16, __VA_ARGS__)
/* Description : Addition of signed halfword elements and signed saturation
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
@ -1350,6 +1390,27 @@
in3 = in3 >> shift; \
}
/* Description : Shift right arithmetic rounded words
Arguments : Inputs - in0, in1, shift
Outputs - in place operation
Return Type - as per RTYPE
Details : Each element of vector 'in0' is shifted right arithmetically by
the number of bits in the corresponding element in the vector
'shift'. The last discarded bit is added to shifted value for
rounding and the result is written in-place.
'shift' is a vector.
*/
#define SRAR_W2(RTYPE, in0, in1, shift) { \
in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \
in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \
}
#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) { \
SRAR_W2(RTYPE, in0, in1, shift) \
SRAR_W2(RTYPE, in2, in3, shift) \
}
#define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__)
/* Description : Shift right arithmetic rounded (immediate)
Arguments : Inputs - in0, in1, in2, in3, shift
Outputs - in0, in1, in2, in3 (in place)
@ -1396,6 +1457,21 @@
}
#define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
/* Description : Logical shift right all elements of vector (immediate)
Arguments : Inputs - in0, in1, in2, in3, shift
Outputs - out0, out1, out2, out3
Return Type - as per RTYPE
Details : Each element of vector 'in0' is right shifted by 'shift' and
the result is written in-place. 'shift' is an immediate value.
*/
#define SRLI_H4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, shift) { \
out0 = (RTYPE)__msa_srli_h((v8i16)in0, shift); \
out1 = (RTYPE)__msa_srli_h((v8i16)in1, shift); \
out2 = (RTYPE)__msa_srli_h((v8i16)in2, shift); \
out3 = (RTYPE)__msa_srli_h((v8i16)in3, shift); \
}
#define SRLI_H4_SH(...) SRLI_H4(v8i16, __VA_ARGS__)
/* Description : Addition of 2 pairs of vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1

View File

@ -1026,7 +1026,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vp9_fht4x4 sse2/;
add_proto qw/void vp9_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
specialize qw/vp9_fht8x8 sse2/;
specialize qw/vp9_fht8x8 sse2 msa/;
add_proto qw/void vp9_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
specialize qw/vp9_fht16x16 sse2 msa/;
@ -1041,10 +1041,10 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vp9_fdct4x4 sse2/;
add_proto qw/void vp9_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vp9_fdct8x8_1 sse2 neon/;
specialize qw/vp9_fdct8x8_1 sse2 neon msa/;
add_proto qw/void vp9_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vp9_fdct8x8 sse2 neon/, "$ssse3_x86_64";
specialize qw/vp9_fdct8x8 sse2 neon msa/, "$ssse3_x86_64";
add_proto qw/void vp9_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
specialize qw/vp9_fdct16x16_1 sse2 msa/;

View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2015 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include "./vp9_rtcd.h"
#include "vp9/encoder/mips/msa/vp9_fdct_msa.h"
void vp9_fdct8x8_msa(const int16_t *input, int16_t *output,
int32_t src_stride) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
LD_SH8(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7);
SLLI_4V(in0, in1, in2, in3, 2);
SLLI_4V(in4, in5, in6, in7, 2);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
}
void vp9_fdct8x8_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
out[0] = VP9_LD_HADD(input, stride);
out[1] = 0;
}
void vp9_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
int32_t tx_type) {
v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7);
SLLI_4V(in0, in1, in2, in3, 2);
SLLI_4V(in4, in5, in6, in7, 2);
switch (tx_type) {
case DCT_DCT:
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
break;
case ADST_DCT:
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
break;
case DCT_ADST:
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
break;
case ADST_ADST:
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
break;
default:
assert(0);
break;
}
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
}

View File

@ -33,6 +33,23 @@
out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \
}
#define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \
dst0, dst1, dst2, dst3) { \
v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m; \
v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m; \
\
DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, \
tp0_m, tp2_m, tp3_m, tp4_m); \
DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7, \
tp5_m, tp6_m, tp7_m, tp8_m); \
BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m); \
BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m); \
SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, DCT_CONST_BITS); \
SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, DCT_CONST_BITS); \
PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m, \
dst0, dst1, dst2, dst3); \
}
#define VP9_DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) ({ \
v8i16 dst_m; \
v4i32 tp0_m, tp1_m; \
@ -44,6 +61,72 @@
dst_m; \
})
#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3, out4, out5, out6, out7) { \
v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
cospi_24_64, -cospi_24_64, 0, 0 }; \
\
SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \
cnst2_m = -cnst0_m; \
ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \
cnst4_m = -cnst2_m; \
ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
\
ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
cnst1_m, cnst2_m, cnst3_m, in7, in0, \
in4, in3); \
\
SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
cnst2_m = -cnst0_m; \
ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \
cnst4_m = -cnst2_m; \
ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
\
ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
\
VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
cnst1_m, cnst2_m, cnst3_m, in5, in2, \
in6, in1); \
BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
out7 = -s0_m; \
out0 = s1_m; \
\
SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \
\
ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \
cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
cnst1_m = cnst0_m; \
\
ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
cnst2_m, cnst3_m, cnst1_m, out1, out6, \
s0_m, s1_m); \
\
SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
\
ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
\
out1 = -out1; \
out3 = -out3; \
out5 = -out5; \
}
#define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1) { \
v4i32 madd0_m, madd1_m, madd2_m, madd3_m; \
v8i16 madd_s0_m, madd_s1_m; \
@ -107,6 +190,77 @@
vec1 >>= 2; \
}
#define VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3, out4, out5, out6, out7) { \
v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m; \
v8i16 s7_m, x0_m, x1_m, x2_m, x3_m; \
v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, \
cospi_24_64, cospi_4_64, cospi_28_64, \
cospi_12_64, cospi_20_64 }; \
\
/* FDCT stage1 */ \
BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \
s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m); \
BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m); \
ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m); \
ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m); \
SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m); \
x1_m = __msa_ilvev_h(x1_m, x0_m); \
out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
\
SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m); \
x2_m = -x2_m; \
x2_m = __msa_ilvev_h(x3_m, x2_m); \
out6 = VP9_DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
\
out0 = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
x2_m = __msa_splati_h(coeff_m, 2); \
x2_m = __msa_ilvev_h(x2_m, x3_m); \
out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \
\
/* stage2 */ \
ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m); \
\
s6_m = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \
s5_m = VP9_DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \
\
/* stage3 */ \
BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m); \
\
/* stage4 */ \
ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m); \
ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m); \
\
SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m); \
x1_m = __msa_ilvev_h(x0_m, x1_m); \
out1 = VP9_DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \
\
SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m); \
x2_m = __msa_ilvev_h(x3_m, x2_m); \
out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
\
x1_m = __msa_splati_h(coeff_m, 5); \
x0_m = -x0_m; \
x0_m = __msa_ilvev_h(x1_m, x0_m); \
out7 = VP9_DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \
\
x2_m = __msa_splati_h(coeff_m, 6); \
x3_m = -x3_m; \
x2_m = __msa_ilvev_h(x2_m, x3_m); \
out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \
}
#define VP9_SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) { \
v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
\
SRLI_H4_SH(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m, 15); \
SRLI_H4_SH(in4, in5, in6, in7, vec4_m, vec5_m, vec6_m, vec7_m, 15); \
AVE_SH4_SH(vec0_m, in0, vec1_m, in1, vec2_m, in2, vec3_m, in3, \
in0, in1, in2, in3); \
AVE_SH4_SH(vec4_m, in4, vec5_m, in5, vec6_m, in6, vec7_m, in7, \
in4, in5, in6, in7); \
}
#define VP9_FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3, out4, out5, out6, out7) { \
v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \

View File

@ -152,6 +152,7 @@ VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_quantize_neon.c
VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_subtract_neon.c
VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_variance_neon.c
VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_fdct8x8_msa.c
VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_fdct16x16_msa.c
VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_fdct32x32_msa.c
VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_fdct_msa.h