mips msa vp8 idct optimization

average improvement ~2x-5x

Change-Id: I19e82f78772993bcd67fcf975fe180232172f86d
This commit is contained in:
Parag Salasakar 2015-07-02 18:08:06 +05:30
parent 8565a1c99a
commit 3d938d71b0
5 changed files with 987 additions and 7 deletions

View File

@ -113,4 +113,8 @@ INSTANTIATE_TEST_CASE_P(C, IDCTTest, ::testing::Values(vp8_short_idct4x4llm_c));
INSTANTIATE_TEST_CASE_P(MMX, IDCTTest,
::testing::Values(vp8_short_idct4x4llm_mmx));
#endif
#if HAVE_MSA
INSTANTIATE_TEST_CASE_P(MSA, IDCTTest,
::testing::Values(vp8_short_idct4x4llm_msa));
#endif
}

View File

@ -0,0 +1,457 @@
/*
* Copyright (c) 2015 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "./vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vp8/common/mips/msa/vp8_macros_msa.h"
static const int32_t cospi8sqrt2minus1 = 20091;
static const int32_t sinpi8sqrt2 = 35468;
#define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v8i16 s4_m, s5_m, s6_m, s7_m; \
\
TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \
ILVR_D2_SH(s6_m, s4_m, s7_m, s5_m, out0, out2); \
out1 = (v8i16)__msa_ilvl_d((v2i64)s6_m, (v2i64)s4_m); \
out3 = (v8i16)__msa_ilvl_d((v2i64)s7_m, (v2i64)s5_m); \
}
#define EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in) \
({ \
v8i16 out_m; \
v8i16 zero_m = { 0 }; \
v4i32 tmp1_m, tmp2_m; \
v4i32 sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \
\
ILVRL_H2_SW(in, zero_m, tmp1_m, tmp2_m); \
tmp1_m >>= 16; \
tmp2_m >>= 16; \
tmp1_m = (tmp1_m * sinpi8_sqrt2_m) >> 16; \
tmp2_m = (tmp2_m * sinpi8_sqrt2_m) >> 16; \
out_m = __msa_pckev_h((v8i16)tmp2_m, (v8i16)tmp1_m); \
\
out_m; \
})
#define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v8i16 a1_m, b1_m, c1_m, d1_m; \
v8i16 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
v8i16 const_cospi8sqrt2minus1_m; \
\
const_cospi8sqrt2minus1_m = __msa_fill_h(cospi8sqrt2minus1); \
a1_m = in0 + in2; \
b1_m = in0 - in2; \
c_tmp1_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in1); \
c_tmp2_m = __msa_mul_q_h(in3, const_cospi8sqrt2minus1_m); \
c_tmp2_m = c_tmp2_m >> 1; \
c_tmp2_m = in3 + c_tmp2_m; \
c1_m = c_tmp1_m - c_tmp2_m; \
d_tmp1_m = __msa_mul_q_h(in1, const_cospi8sqrt2minus1_m); \
d_tmp1_m = d_tmp1_m >> 1; \
d_tmp1_m = in1 + d_tmp1_m; \
d_tmp2_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3); \
d1_m = d_tmp1_m + d_tmp2_m; \
BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
}
#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v4i32 a1_m, b1_m, c1_m, d1_m; \
v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
v4i32 const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m; \
\
const_cospi8sqrt2minus1_m = __msa_fill_w(cospi8sqrt2minus1); \
sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \
a1_m = in0 + in2; \
b1_m = in0 - in2; \
c_tmp1_m = (in1 * sinpi8_sqrt2_m) >> 16; \
c_tmp2_m = in3 + ((in3 * const_cospi8sqrt2minus1_m) >> 16); \
c1_m = c_tmp1_m - c_tmp2_m; \
d_tmp1_m = in1 + ((in1 * const_cospi8sqrt2minus1_m) >> 16); \
d_tmp2_m = (in3 * sinpi8_sqrt2_m) >> 16; \
d1_m = d_tmp1_m + d_tmp2_m; \
BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
}
static void idct4x4_addblk_msa(int16_t *input, uint8_t *pred,
int32_t pred_stride,
uint8_t *dest, int32_t dest_stride)
{
v8i16 input0, input1;
v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
v4i32 res0, res1, res2, res3;
v16i8 zero = { 0 };
v16i8 pred0, pred1, pred2, pred3, dest0, dest1, dest2, dest3;
v16i8 mask = { 0, 4, 8, 12, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31 };
LD_SH2(input, 8, input0, input1);
UNPCK_SH_SW(input0, in0, in1);
UNPCK_SH_SW(input1, in2, in3);
VP8_IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3);
SRARI_W4_SW(vt0, vt1, vt2, vt3, 3);
TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3);
ILVR_B4_SW(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1,
res2, res3);
ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1,
res2, res3);
ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
res0 = CLIP_SW_0_255(res0);
res1 = CLIP_SW_0_255(res1);
res2 = CLIP_SW_0_255(res2);
res3 = CLIP_SW_0_255(res3);
LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3);
VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1);
VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3);
ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride);
}
static void idct4x4_addconst_msa(int16_t in_dc, uint8_t *pred,
int32_t pred_stride,
uint8_t *dest, int32_t dest_stride)
{
v8i16 vec;
v8i16 res0, res1, res2, res3;
v16i8 zero = { 0 };
v16i8 pred0, pred1, pred2, pred3, dest0, dest1, dest2, dest3;
v16i8 mask = { 0, 2, 4, 6, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 };
vec = __msa_fill_h(in_dc);
vec = __msa_srari_h(vec, 3);
LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3);
ILVR_B4_SH(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1,
res2, res3);
ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
CLIP_SH4_0_255(res0, res1, res2, res3);
LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3);
VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1);
VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3);
ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride);
}
void vp8_short_inv_walsh4x4_msa(int16_t *input, int16_t *mb_dq_coeff)
{
v8i16 input0, input1;
v4i32 in0, in1, in2, in3, a1, b1, c1, d1;
v4i32 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
LD_SH2(input, 8, input0, input1);
UNPCK_SH_SW(input0, in0, in1);
UNPCK_SH_SW(input1, in2, in3);
BUTTERFLY_4(in0, in1, in2, in3, a1, b1, c1, d1);
BUTTERFLY_4(a1, d1, c1, b1, hz0, hz1, hz3, hz2);
TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
BUTTERFLY_4(hz0, hz1, hz2, hz3, a1, b1, c1, d1);
BUTTERFLY_4(a1, d1, c1, b1, vt0, vt1, vt3, vt2);
ADD4(vt0, 3, vt1, 3, vt2, 3, vt3, 3, vt0, vt1, vt2, vt3);
SRA_4V(vt0, vt1, vt2, vt3, 3);
mb_dq_coeff[0] = __msa_copy_s_h((v8i16)vt0, 0);
mb_dq_coeff[16] = __msa_copy_s_h((v8i16)vt1, 0);
mb_dq_coeff[32] = __msa_copy_s_h((v8i16)vt2, 0);
mb_dq_coeff[48] = __msa_copy_s_h((v8i16)vt3, 0);
mb_dq_coeff[64] = __msa_copy_s_h((v8i16)vt0, 2);
mb_dq_coeff[80] = __msa_copy_s_h((v8i16)vt1, 2);
mb_dq_coeff[96] = __msa_copy_s_h((v8i16)vt2, 2);
mb_dq_coeff[112] = __msa_copy_s_h((v8i16)vt3, 2);
mb_dq_coeff[128] = __msa_copy_s_h((v8i16)vt0, 4);
mb_dq_coeff[144] = __msa_copy_s_h((v8i16)vt1, 4);
mb_dq_coeff[160] = __msa_copy_s_h((v8i16)vt2, 4);
mb_dq_coeff[176] = __msa_copy_s_h((v8i16)vt3, 4);
mb_dq_coeff[192] = __msa_copy_s_h((v8i16)vt0, 6);
mb_dq_coeff[208] = __msa_copy_s_h((v8i16)vt1, 6);
mb_dq_coeff[224] = __msa_copy_s_h((v8i16)vt2, 6);
mb_dq_coeff[240] = __msa_copy_s_h((v8i16)vt3, 6);
}
static void dequant_idct4x4_addblk_msa(int16_t *input, int16_t *dequant_input,
uint8_t *dest, int32_t dest_stride)
{
v8i16 input0, input1, dequant_in0, dequant_in1, mul0, mul1;
v8i16 in0, in1, in2, in3;
v8i16 hz0_h, hz1_h, hz2_h, hz3_h;
v16i8 dest0, dest1, dest2, dest3;
v4i32 hz0_w, hz1_w, hz2_w, hz3_w;
v4i32 vt0, vt1, vt2, vt3, res0, res1, res2, res3;
v2i64 zero = { 0 };
v16i8 mask = { 0, 4, 8, 12, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31 };
LD_SH2(input, 8, input0, input1);
LD_SH2(dequant_input, 8, dequant_in0, dequant_in1);
MUL2(input0, dequant_in0, input1, dequant_in1, mul0, mul1);
PCKEV_D2_SH(zero, mul0, zero, mul1, in0, in2);
PCKOD_D2_SH(zero, mul0, zero, mul1, in1, in3);
VP8_IDCT_1D_H(in0, in1, in2, in3, hz0_h, hz1_h, hz2_h, hz3_h);
PCKEV_D2_SH(hz1_h, hz0_h, hz3_h, hz2_h, mul0, mul1);
UNPCK_SH_SW(mul0, hz0_w, hz1_w);
UNPCK_SH_SW(mul1, hz2_w, hz3_w);
TRANSPOSE4x4_SW_SW(hz0_w, hz1_w, hz2_w, hz3_w, hz0_w, hz1_w, hz2_w, hz3_w);
VP8_IDCT_1D_W(hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3);
SRARI_W4_SW(vt0, vt1, vt2, vt3, 3);
TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3);
ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1,
res2, res3);
ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1,
res2, res3);
ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
res0 = CLIP_SW_0_255(res0);
res1 = CLIP_SW_0_255(res1);
res2 = CLIP_SW_0_255(res2);
res3 = CLIP_SW_0_255(res3);
VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1);
VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3);
ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride);
}
static void dequant_idct4x4_addblk_2x_msa(int16_t *input,
int16_t *dequant_input,
uint8_t *dest, int32_t dest_stride)
{
v16u8 dest0, dest1, dest2, dest3;
v8i16 in0, in1, in2, in3;
v8i16 mul0, mul1, mul2, mul3, dequant_in0, dequant_in1;
v8i16 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
v8i16 res0, res1, res2, res3;
v4i32 hz0l, hz1l, hz2l, hz3l, hz0r, hz1r, hz2r, hz3r;
v4i32 vt0l, vt1l, vt2l, vt3l, vt0r, vt1r, vt2r, vt3r;
v16i8 zero = { 0 };
LD_SH4(input, 8, in0, in1, in2, in3);
LD_SH2(dequant_input, 8, dequant_in0, dequant_in1);
MUL4(in0, dequant_in0, in1, dequant_in1, in2, dequant_in0, in3, dequant_in1,
mul0, mul1, mul2, mul3);
PCKEV_D2_SH(mul2, mul0, mul3, mul1, in0, in2);
PCKOD_D2_SH(mul2, mul0, mul3, mul1, in1, in3);
VP8_IDCT_1D_H(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
TRANSPOSE_TWO_4x4_H(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
UNPCK_SH_SW(hz0, hz0r, hz0l);
UNPCK_SH_SW(hz1, hz1r, hz1l);
UNPCK_SH_SW(hz2, hz2r, hz2l);
UNPCK_SH_SW(hz3, hz3r, hz3l);
VP8_IDCT_1D_W(hz0l, hz1l, hz2l, hz3l, vt0l, vt1l, vt2l, vt3l);
SRARI_W4_SW(vt0l, vt1l, vt2l, vt3l, 3);
VP8_IDCT_1D_W(hz0r, hz1r, hz2r, hz3r, vt0r, vt1r, vt2r, vt3r);
SRARI_W4_SW(vt0r, vt1r, vt2r, vt3r, 3);
PCKEV_H4_SH(vt0l, vt0r, vt1l, vt1r, vt2l, vt2r, vt3l, vt3r, vt0, vt1, vt2,
vt3);
TRANSPOSE_TWO_4x4_H(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3);
ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1,
res2, res3);
ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
CLIP_SH4_0_255(res0, res1, res2, res3);
PCKEV_B4_SH(res0, res0, res1, res1, res2, res2, res3, res3, res0, res1,
res2, res3);
PCKOD_D2_UB(dest0, res0, dest1, res1, dest0, dest1);
PCKOD_D2_UB(dest2, res2, dest3, res3, dest2, dest3);
ST_UB4(dest0, dest1, dest2, dest3, dest, dest_stride);
__asm__ __volatile__(
"sw $zero, 0(%[input]) \n\t"
"sw $zero, 4(%[input]) \n\t"
"sw $zero, 8(%[input]) \n\t"
"sw $zero, 12(%[input]) \n\t"
"sw $zero, 16(%[input]) \n\t"
"sw $zero, 20(%[input]) \n\t"
"sw $zero, 24(%[input]) \n\t"
"sw $zero, 28(%[input]) \n\t"
"sw $zero, 32(%[input]) \n\t"
"sw $zero, 36(%[input]) \n\t"
"sw $zero, 40(%[input]) \n\t"
"sw $zero, 44(%[input]) \n\t"
"sw $zero, 48(%[input]) \n\t"
"sw $zero, 52(%[input]) \n\t"
"sw $zero, 56(%[input]) \n\t"
"sw $zero, 60(%[input]) \n\t"::
[input] "r"(input)
);
}
static void dequant_idct_addconst_2x_msa(int16_t *input, int16_t *dequant_input,
uint8_t *dest, int32_t dest_stride)
{
v8i16 input_dc0, input_dc1, vec;
v16u8 dest0, dest1, dest2, dest3;
v16i8 zero = { 0 };
v8i16 res0, res1, res2, res3;
input_dc0 = __msa_fill_h(input[0] * dequant_input[0]);
input_dc1 = __msa_fill_h(input[16] * dequant_input[0]);
SRARI_H2_SH(input_dc0, input_dc1, 3);
vec = (v8i16)__msa_pckev_d((v2i64)input_dc1, (v2i64)input_dc0);
input[0] = 0;
input[16] = 0;
LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3);
ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0,
res1, res2, res3);
ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
CLIP_SH4_0_255(res0, res1, res2, res3);
PCKEV_B4_SH(res0, res0, res1, res1, res2, res2, res3, res3, res0, res1,
res2, res3);
PCKOD_D2_UB(dest0, res0, dest1, res1, dest0, dest1);
PCKOD_D2_UB(dest2, res2, dest3, res3, dest2, dest3);
ST_UB4(dest0, dest1, dest2, dest3, dest, dest_stride);
}
void vp8_short_idct4x4llm_msa(int16_t *input, uint8_t *pred_ptr,
int32_t pred_stride, uint8_t *dst_ptr,
int32_t dst_stride)
{
idct4x4_addblk_msa(input, pred_ptr, pred_stride, dst_ptr, dst_stride);
}
void vp8_dc_only_idct_add_msa(int16_t input_dc, uint8_t *pred_ptr,
int32_t pred_stride, uint8_t *dst_ptr,
int32_t dst_stride)
{
idct4x4_addconst_msa(input_dc, pred_ptr, pred_stride, dst_ptr, dst_stride);
}
void vp8_dequantize_b_msa(BLOCKD *d, int16_t *DQC)
{
v8i16 dqc0, dqc1, q0, q1, dq0, dq1;
LD_SH2(DQC, 8, dqc0, dqc1);
LD_SH2(d->qcoeff, 8, q0, q1);
MUL2(dqc0, q0, dqc1, q1, dq0, dq1);
ST_SH2(dq0, dq1, d->dqcoeff, 8);
}
void vp8_dequant_idct_add_msa(int16_t *input, int16_t *dq,
uint8_t *dest, int32_t stride)
{
dequant_idct4x4_addblk_msa(input, dq, dest, stride);
__asm__ __volatile__ (
"sw $zero, 0(%[input]) \n\t"
"sw $zero, 4(%[input]) \n\t"
"sw $zero, 8(%[input]) \n\t"
"sw $zero, 12(%[input]) \n\t"
"sw $zero, 16(%[input]) \n\t"
"sw $zero, 20(%[input]) \n\t"
"sw $zero, 24(%[input]) \n\t"
"sw $zero, 28(%[input]) \n\t"
:
: [input] "r" (input)
);
}
void vp8_dequant_idct_add_y_block_msa(int16_t *q, int16_t *dq,
uint8_t *dst, int32_t stride,
char *eobs)
{
int16_t *eobs_h = (int16_t *)eobs;
uint8_t i;
for (i = 4; i--;)
{
if (eobs_h[0])
{
if (eobs_h[0] & 0xfefe)
{
dequant_idct4x4_addblk_2x_msa(q, dq, dst, stride);
}
else
{
dequant_idct_addconst_2x_msa(q, dq, dst, stride);
}
}
q += 32;
if (eobs_h[1])
{
if (eobs_h[1] & 0xfefe)
{
dequant_idct4x4_addblk_2x_msa(q, dq, dst + 8, stride);
}
else
{
dequant_idct_addconst_2x_msa(q, dq, dst + 8, stride);
}
}
q += 32;
dst += (4 * stride);
eobs_h += 2;
}
}
void vp8_dequant_idct_add_uv_block_msa(int16_t *q, int16_t *dq,
uint8_t *dstu, uint8_t *dstv,
int32_t stride, char *eobs)
{
int16_t *eobs_h = (int16_t *)eobs;
if (eobs_h[0])
{
if (eobs_h[0] & 0xfefe)
{
dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride);
}
else
{
dequant_idct_addconst_2x_msa(q, dq, dstu, stride);
}
}
q += 32;
dstu += (stride * 4);
if (eobs_h[1])
{
if (eobs_h[1] & 0xfefe)
{
dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride);
}
else
{
dequant_idct_addconst_2x_msa(q, dq, dstu, stride);
}
}
q += 32;
if (eobs_h[2])
{
if (eobs_h[2] & 0xfefe)
{
dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride);
}
else
{
dequant_idct_addconst_2x_msa(q, dq, dstv, stride);
}
}
q += 32;
dstv += (stride * 4);
if (eobs_h[3])
{
if (eobs_h[3] & 0xfefe)
{
dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride);
}
else
{
dequant_idct_addconst_2x_msa(q, dq, dstv, stride);
}
}
}

View File

@ -0,0 +1,515 @@
/*
* Copyright (c) 2015 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_
#define VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_
#include <msa.h>
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
#define LD_B(RTYPE, psrc) *((const RTYPE *)(psrc))
#define LD_UB(...) LD_B(v16u8, __VA_ARGS__)
#define LD_SB(...) LD_B(v16i8, __VA_ARGS__)
#define LD_H(RTYPE, psrc) *((const RTYPE *)(psrc))
#define LD_UH(...) LD_H(v8u16, __VA_ARGS__)
#define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
#define ST_B(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
#define ST_UB(...) ST_B(v16u8, __VA_ARGS__)
#define ST_SB(...) ST_B(v16i8, __VA_ARGS__)
#define ST_H(RTYPE, in, pdst) *((RTYPE *)(pdst)) = (in)
#define ST_UH(...) ST_H(v8u16, __VA_ARGS__)
#define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
/* Description : Load vectors with 16 byte elements with stride
Arguments : Inputs - psrc, stride
Outputs - out0, out1
Return Type - as per RTYPE
Details : Load 16 byte elements in 'out0' from (psrc)
Load 16 byte elements in 'out1' from (psrc + stride)
*/
#define LD_B2(RTYPE, psrc, stride, out0, out1) \
{ \
out0 = LD_B(RTYPE, (psrc)); \
out1 = LD_B(RTYPE, (psrc) + stride); \
}
#define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
#define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \
{ \
LD_B2(RTYPE, (psrc), stride, out0, out1); \
LD_B2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \
}
#define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
#define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
/* Description : Load vectors with 8 halfword elements with stride
Arguments : Inputs - psrc, stride
Outputs - out0, out1
Details : Load 8 halfword elements in 'out0' from (psrc)
Load 8 halfword elements in 'out1' from (psrc + stride)
*/
#define LD_H2(RTYPE, psrc, stride, out0, out1) \
{ \
out0 = LD_H(RTYPE, (psrc)); \
out1 = LD_H(RTYPE, (psrc) + (stride)); \
}
#define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) \
{ \
LD_H2(RTYPE, (psrc), stride, out0, out1); \
LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
}
#define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
/* Description : Store vectors of 16 byte elements with stride
Arguments : Inputs - in0, in1, pdst, stride
Details : Store 16 byte elements from 'in0' to (pdst)
Store 16 byte elements from 'in1' to (pdst + stride)
*/
#define ST_B2(RTYPE, in0, in1, pdst, stride) \
{ \
ST_B(RTYPE, in0, (pdst)); \
ST_B(RTYPE, in1, (pdst) + stride); \
}
#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \
{ \
ST_B2(RTYPE, in0, in1, (pdst), stride); \
ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
}
#define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
#define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__)
/* Description : Store vectors of 8 halfword elements with stride
Arguments : Inputs - in0, in1, pdst, stride
Details : Store 8 halfword elements from 'in0' to (pdst)
Store 8 halfword elements from 'in1' to (pdst + stride)
*/
#define ST_H2(RTYPE, in0, in1, pdst, stride) \
{ \
ST_H(RTYPE, in0, (pdst)); \
ST_H(RTYPE, in1, (pdst) + stride); \
}
#define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
/* Description : Shuffle byte vector elements as per mask vector
Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
Outputs - out0, out1
Return Type - as per RTYPE
Details : Byte elements from 'in0' & 'in1' are copied selectively to
'out0' as per control vector 'mask0'
*/
#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
{ \
out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \
out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
}
#define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
/* Description : Clips all signed halfword elements of input vector
between 0 & 255
Arguments : Input - in
Output - out_m
Return Type - signed halfword
*/
#define CLIP_SH_0_255(in) \
({ \
v8i16 max_m = __msa_ldi_h(255); \
v8i16 out_m; \
\
out_m = __msa_maxi_s_h((v8i16)in, 0); \
out_m = __msa_min_s_h((v8i16)max_m, (v8i16)out_m); \
out_m; \
})
#define CLIP_SH2_0_255(in0, in1) \
{ \
in0 = CLIP_SH_0_255(in0); \
in1 = CLIP_SH_0_255(in1); \
}
#define CLIP_SH4_0_255(in0, in1, in2, in3) \
{ \
CLIP_SH2_0_255(in0, in1); \
CLIP_SH2_0_255(in2, in3); \
}
/* Description : Clips all signed word elements of input vector
between 0 & 255
Arguments : Input - in
Output - out_m
Return Type - signed word
*/
#define CLIP_SW_0_255(in) \
({ \
v4i32 max_m = __msa_ldi_w(255); \
v4i32 out_m; \
\
out_m = __msa_maxi_s_w((v4i32)in, 0); \
out_m = __msa_min_s_w((v4i32)max_m, (v4i32)out_m); \
out_m; \
})
/* Description : Interleave left half of halfword elements from vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Left half of halfword elements of 'in0' and 'in1' are
interleaved and written to 'out0'.
*/
#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \
}
#define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__)
#define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__)
/* Description : Interleave left half of word elements from vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Left half of word elements of 'in0' and 'in1' are interleaved
and written to 'out0'.
*/
#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \
}
#define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__)
/* Description : Interleave right half of byte elements from vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Right half of byte elements of 'in0' and 'in1' are interleaved
and written to out0.
*/
#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \
}
#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3) \
{ \
ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
}
#define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
#define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
/* Description : Interleave right half of halfword elements from vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Right half of halfword elements of 'in0' and 'in1' are
interleaved and written to 'out0'.
*/
#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \
}
#define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
#define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3) \
{ \
ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
}
#define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
#define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \
}
#define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__)
/* Description : Interleave right half of double word elements from vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Right half of double word elements of 'in0' and 'in1' are
interleaved and written to 'out0'.
*/
#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \
out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \
}
#define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
#define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
#define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
/* Description : Interleave both left and right half of input vectors
Arguments : Inputs - in0, in1
Outputs - out0, out1
Return Type - as per RTYPE
Details : Right half of byte elements from 'in0' and 'in1' are
interleaved and written to 'out0'
*/
#define ILVRL_H2(RTYPE, in0, in1, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
}
#define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
#define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
#define ILVRL_W2(RTYPE, in0, in1, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
}
#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
#define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
#define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
/* Description : Pack even byte elements of vector pairs
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Even byte elements of 'in0' are copied to the left half of
'out0' & even byte elements of 'in1' are copied to the right
half of 'out0'.
*/
#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \
}
#define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
#define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3) \
{ \
PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
}
#define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
#define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
#define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
/* Description : Pack even halfword elements of vector pairs
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Even halfword elements of 'in0' are copied to the left half of
'out0' & even halfword elements of 'in1' are copied to the
right half of 'out0'.
*/
#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \
out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \
}
#define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3) \
{ \
PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
}
#define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__)
/* Description : Pack even double word elements of vector pairs
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Even double elements of 'in0' are copied to the left half of
'out0' & even double elements of 'in1' are copied to the right
half of 'out0'.
*/
#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \
out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \
}
#define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__)
#define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__)
/* Description : Pack odd double word elements of vector pairs
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Return Type - as per RTYPE
Details : Odd double word elements of 'in0' are copied to the left half
of 'out0' & odd double word elements of 'in1' are copied to
the right half of 'out0'.
*/
#define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \
{ \
out0 = (RTYPE)__msa_pckod_d((v2i64)in0, (v2i64)in1); \
out1 = (RTYPE)__msa_pckod_d((v2i64)in2, (v2i64)in3); \
}
#define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__)
#define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__)
/* Description : Arithmetic shift right all elements of vector
(generic for all data types)
Arguments : Inputs - in0, in1, in2, in3, shift
Outputs - in place operation
Return Type - as per input vector RTYPE
Details : Each element of vector 'in0' is right shifted by 'shift' and
the result is written in-place. 'shift' is a GP variable.
*/
#define SRA_4V(in0, in1, in2, in3, shift) \
{ \
in0 = in0 >> shift; \
in1 = in1 >> shift; \
in2 = in2 >> shift; \
in3 = in3 >> shift; \
}
/* Description : Shift right arithmetic rounded (immediate)
Arguments : Inputs - in0, in1, shift
Outputs - in place operation
Return Type - as per RTYPE
Details : Each element of vector 'in0' is shifted right arithmetically by
the value in 'shift'. The last discarded bit is added to the
shifted value for rounding and the result is written in-place.
'shift' is an immediate value.
*/
#define SRARI_H2(RTYPE, in0, in1, shift) \
{ \
in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \
in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \
}
#define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__)
#define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__)
#define SRARI_W2(RTYPE, in0, in1, shift) \
{ \
in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \
in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \
}
#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \
{ \
SRARI_W2(RTYPE, in0, in1, shift); \
SRARI_W2(RTYPE, in2, in3, shift); \
}
#define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
/* Description : Multiplication of pairs of vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Details : Each element from 'in0' is multiplied with elements from 'in1'
and the result is written to 'out0'
*/
#define MUL2(in0, in1, in2, in3, out0, out1) \
{ \
out0 = in0 * in1; \
out1 = in2 * in3; \
}
#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3) \
{ \
MUL2(in0, in1, in2, in3, out0, out1); \
MUL2(in4, in5, in6, in7, out2, out3); \
}
/* Description : Addition of 2 pairs of vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1
Details : Each element in 'in0' is added to 'in1' and result is written
to 'out0'.
*/
#define ADD2(in0, in1, in2, in3, out0, out1) \
{ \
out0 = in0 + in1; \
out1 = in2 + in3; \
}
#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \
out0, out1, out2, out3) \
{ \
ADD2(in0, in1, in2, in3, out0, out1); \
ADD2(in4, in5, in6, in7, out2, out3); \
}
/* Description : Sign extend halfword elements from input vector and return
the result in pair of vectors
Arguments : Input - in (halfword vector)
Outputs - out0, out1 (sign extended word vectors)
Return Type - signed word
Details : Sign bit of halfword elements from input vector 'in' is
extracted and interleaved right with same vector 'in0' to
generate 4 signed word elements in 'out0'
Then interleaved left with same vector 'in0' to
generate 4 signed word elements in 'out1'
*/
#define UNPCK_SH_SW(in, out0, out1) \
{ \
v8i16 tmp_m; \
\
tmp_m = __msa_clti_s_h((v8i16)in, 0); \
ILVRL_H2_SW(tmp_m, in, out0, out1); \
}
/* Description : Butterfly of 4 input vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1, out2, out3
Details : Butterfly operation
*/
#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = in0 + in3; \
out1 = in1 + in2; \
\
out2 = in1 - in2; \
out3 = in0 - in3; \
}
/* Description : Transpose 8x4 block with half word elements in vectors
Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
Outputs - out0, out1, out2, out3, out4, out5, out6, out7
Return Type - signed halfword
*/
#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
\
ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m); \
ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m); \
ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \
ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \
}
/* Description : Transpose 4x4 block with word elements in vectors
Arguments : Inputs - in0, in1, in2, in3
Outputs - out0, out1, out2, out3
Return Type - signed word
*/
#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
v4i32 s0_m, s1_m, s2_m, s3_m; \
\
ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
\
out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \
out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \
out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \
out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \
}
#endif /* VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_ */

View File

@ -29,21 +29,21 @@ $vp8_clear_system_state_mmx=vpx_reset_mmx_state;
# Dequant
#
add_proto qw/void vp8_dequantize_b/, "struct blockd*, short *dqc";
specialize qw/vp8_dequantize_b mmx media neon/;
specialize qw/vp8_dequantize_b mmx media neon msa/;
$vp8_dequantize_b_media=vp8_dequantize_b_v6;
add_proto qw/void vp8_dequant_idct_add/, "short *input, short *dq, unsigned char *output, int stride";
specialize qw/vp8_dequant_idct_add mmx media neon dspr2/;
specialize qw/vp8_dequant_idct_add mmx media neon dspr2 msa/;
$vp8_dequant_idct_add_media=vp8_dequant_idct_add_v6;
$vp8_dequant_idct_add_dspr2=vp8_dequant_idct_add_dspr2;
add_proto qw/void vp8_dequant_idct_add_y_block/, "short *q, short *dq, unsigned char *dst, int stride, char *eobs";
specialize qw/vp8_dequant_idct_add_y_block mmx sse2 media neon dspr2/;
specialize qw/vp8_dequant_idct_add_y_block mmx sse2 media neon dspr2 msa/;
$vp8_dequant_idct_add_y_block_media=vp8_dequant_idct_add_y_block_v6;
$vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2;
add_proto qw/void vp8_dequant_idct_add_uv_block/, "short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs";
specialize qw/vp8_dequant_idct_add_uv_block mmx sse2 media neon dspr2/;
specialize qw/vp8_dequant_idct_add_uv_block mmx sse2 media neon dspr2 msa/;
$vp8_dequant_idct_add_uv_block_media=vp8_dequant_idct_add_uv_block_v6;
$vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2;
@ -108,7 +108,7 @@ $vp8_loop_filter_simple_bh_neon=vp8_loop_filter_bhs_neon;
#
#idct16
add_proto qw/void vp8_short_idct4x4llm/, "short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride";
specialize qw/vp8_short_idct4x4llm mmx media neon dspr2/;
specialize qw/vp8_short_idct4x4llm mmx media neon dspr2 msa/;
$vp8_short_idct4x4llm_media=vp8_short_idct4x4llm_v6_dual;
$vp8_short_idct4x4llm_dspr2=vp8_short_idct4x4llm_dspr2;
@ -120,13 +120,13 @@ $vp8_short_inv_walsh4x4_1_dspr2=vp8_short_inv_walsh4x4_1_dspr2;
#iwalsh16
add_proto qw/void vp8_short_inv_walsh4x4/, "short *input, short *output";
specialize qw/vp8_short_inv_walsh4x4 mmx sse2 media neon dspr2/;
specialize qw/vp8_short_inv_walsh4x4 mmx sse2 media neon dspr2 msa/;
$vp8_short_inv_walsh4x4_media=vp8_short_inv_walsh4x4_v6;
$vp8_short_inv_walsh4x4_dspr2=vp8_short_inv_walsh4x4_dspr2;
#idct1_scalar_add
add_proto qw/void vp8_dc_only_idct_add/, "short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride";
specialize qw/vp8_dc_only_idct_add mmx media neon dspr2/;
specialize qw/vp8_dc_only_idct_add mmx media neon dspr2 msa/;
$vp8_dc_only_idct_add_media=vp8_dc_only_idct_add_v6;
$vp8_dc_only_idct_add_dspr2=vp8_dc_only_idct_add_dspr2;

View File

@ -121,6 +121,10 @@ VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/reconinter_dspr2.c
VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/idct_blk_dspr2.c
VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/dequantize_dspr2.c
# common (c)
VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct_msa.c
VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp8_macros_msa.h
# common (c)
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/filter_arm.c
VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/loopfilter_arm.c