From 55c0df5ef13f2c828b968788a319f27bf5b944cf Mon Sep 17 00:00:00 2001 From: Parag Salasakar Date: Mon, 13 Jul 2015 09:23:52 +0530 Subject: [PATCH] mips msa vp8 sixtap filter optimization average improvement ~3x-5x Change-Id: I5fd88cb088814be443d04be384b9fca99b22adef --- test/sixtap_predict_test.cc | 12 + vp8/common/mips/msa/sixtap_filter_msa.c | 1850 +++++++++++++++++++++++ vp8/common/mips/msa/vp8_macros_msa.h | 274 ++++ vp8/common/rtcd_defs.pl | 8 +- vp8/vp8_common.mk | 1 + 5 files changed, 2141 insertions(+), 4 deletions(-) create mode 100644 vp8/common/mips/msa/sixtap_filter_msa.c diff --git a/test/sixtap_predict_test.cc b/test/sixtap_predict_test.cc index 1e6d91547..07383aea1 100644 --- a/test/sixtap_predict_test.cc +++ b/test/sixtap_predict_test.cc @@ -238,4 +238,16 @@ INSTANTIATE_TEST_CASE_P( make_tuple(8, 4, sixtap_8x4_ssse3), make_tuple(4, 4, sixtap_4x4_ssse3))); #endif +#if HAVE_MSA +const SixtapPredictFunc sixtap_16x16_msa = vp8_sixtap_predict16x16_msa; +const SixtapPredictFunc sixtap_8x8_msa = vp8_sixtap_predict8x8_msa; +const SixtapPredictFunc sixtap_8x4_msa = vp8_sixtap_predict8x4_msa; +const SixtapPredictFunc sixtap_4x4_msa = vp8_sixtap_predict4x4_msa; +INSTANTIATE_TEST_CASE_P( + MSA, SixtapPredictTest, ::testing::Values( + make_tuple(16, 16, sixtap_16x16_msa), + make_tuple(8, 8, sixtap_8x8_msa), + make_tuple(8, 4, sixtap_8x4_msa), + make_tuple(4, 4, sixtap_4x4_msa))); +#endif } // namespace diff --git a/vp8/common/mips/msa/sixtap_filter_msa.c b/vp8/common/mips/msa/sixtap_filter_msa.c new file mode 100644 index 000000000..fb60fc134 --- /dev/null +++ b/vp8/common/mips/msa/sixtap_filter_msa.c @@ -0,0 +1,1850 @@ +/* + * Copyright (c) 2015 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "./vp8_rtcd.h" +#include "vpx_ports/mem.h" +#include "vp8/common/filter.h" +#include "vp8/common/mips/msa/vp8_macros_msa.h" + +DECLARE_ALIGNED(16, static const int8_t, vp8_subpel_filters_msa[7][8]) = +{ + { 0, -6, 123, 12, -1, 0, 0, 0 }, + { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */ + { 0, -9, 93, 50, -6, 0, 0, 0 }, + { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */ + { 0, -6, 50, 93, -9, 0, 0, 0 }, + { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */ + { 0, -1, 12, 123, -6, 0, 0, 0 }, +}; + +static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = +{ + /* 8 width cases */ + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + /* 4 width cases */ + 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20, + /* 4 width cases */ + 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28 +}; + +#define HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, \ + filt_h0, filt_h1, filt_h2) \ +({ \ + v16i8 vec0_m, vec1_m, vec2_m; \ + v8i16 hz_out_m; \ + \ + VSHF_B3_SB(src0, src1, src0, src1, src0, src1, mask0, mask1, mask2, \ + vec0_m, vec1_m, vec2_m); \ + hz_out_m = DPADD_SH3_SH(vec0_m, vec1_m, vec2_m, \ + filt_h0, filt_h1, filt_h2); \ + \ + hz_out_m = __msa_srari_h(hz_out_m, VP8_FILTER_SHIFT); \ + hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ + \ + hz_out_m; \ +}) + +#define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ + mask0, mask1, mask2, \ + filt0, filt1, filt2, \ + out0, out1) \ +{ \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \ + \ + VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ + DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \ + VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ + DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \ + VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ + DPADD_SB2_SH(vec4_m, vec5_m, filt2, filt2, out0, out1); \ +} + +#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ + mask0, mask1, mask2, \ + filt0, filt1, filt2, \ + out0, out1, out2, out3) \ +{ \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ + \ + VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ + DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ + out0, out1, out2, out3); \ + VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \ + VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec4_m, vec5_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec6_m, vec7_m); \ + DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \ + out0, out1, out2, out3); \ + DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt2, filt2, filt2, filt2, \ + out0, out1, out2, out3); \ +} + +#define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \ +({ \ + v8i16 tmp0; \ + \ + tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ + tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \ + \ + tmp0; \ +}) + +#define HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_h0, filt_h1) \ +({ \ + v16i8 vec0_m, vec1_m; \ + v8i16 hz_out_m; \ + \ + VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0_m, vec1_m); \ + hz_out_m = FILT_4TAP_DPADD_S_H(vec0_m, vec1_m, filt_h0, filt_h1); \ + \ + hz_out_m = __msa_srari_h(hz_out_m, VP8_FILTER_SHIFT); \ + hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ + \ + hz_out_m; \ +}) + +#define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ + mask0, mask1, filt0, filt1, \ + out0, out1) \ +{ \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ + \ + VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ + DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \ + VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ + DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \ +} + +#define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ + mask0, mask1, filt0, filt1, \ + out0, out1, out2, out3) \ +{ \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ + \ + VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ + DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ + out0, out1, out2, out3); \ + VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \ + DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \ + out0, out1, out2, out3); \ +} + +static void common_hz_6t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter) +{ + v16i8 src0, src1, src2, src3, filt0, filt1, filt2; + v16u8 mask0, mask1, mask2, out; + v8i16 filt, out0, out1; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= 2; + + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + + mask1 = mask0 + 2; + mask2 = mask0 + 4; + + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out0, out1); + SRARI_H2_SH(out0, out1, VP8_FILTER_SHIFT); + SAT_SH2_SH(out0, out1, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); +} + +static void common_hz_6t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter) +{ + v16i8 src0, src1, src2, src3, filt0, filt1, filt2; + v16u8 mask0, mask1, mask2, out; + v8i16 filt, out0, out1, out2, out3; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= 2; + + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + + mask1 = mask0 + 2; + mask2 = mask0 + 4; + + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + src += (4 * src_stride); + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out0, out1); + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + out = PCKEV_XORI128_UB(out2, out3); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); +} + +static void common_hz_6t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + if (4 == height) + { + common_hz_6t_4x4_msa(src, src_stride, dst, dst_stride, filter); + } + else if (8 == height) + { + common_hz_6t_4x8_msa(src, src_stride, dst, dst_stride, filter); + } +} + +static void common_hz_6t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, filt0, filt1, filt2; + v16u8 mask0, mask1, mask2, tmp0, tmp1; + v8i16 filt, out0, out1, out2, out3; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); + src -= 2; + + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + + mask1 = mask0 + 2; + mask2 = mask0 + 4; + + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + src += (4 * src_stride); + HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out0, out1, out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + tmp0 = PCKEV_XORI128_UB(out0, out1); + tmp1 = PCKEV_XORI128_UB(out2, out3); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); + + for (loop_cnt = (height >> 2) - 1; loop_cnt--;) + { + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + src += (4 * src_stride); + HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out0, out1, out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + tmp0 = PCKEV_XORI128_UB(out0, out1); + tmp1 = PCKEV_XORI128_UB(out2, out3); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); + } +} + +static void common_hz_6t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1, filt2; + v16u8 mask0, mask1, mask2, out; + v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); + src -= 2; + + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + + mask1 = mask0 + 2; + mask2 = mask0 + 4; + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src0, src2, src4, src6); + LD_SB4(src + 8, src_stride, src1, src3, src5, src7); + XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7); + src += (4 * src_stride); + + HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out0, out1, out2, out3); + HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2, + filt0, filt1, filt2, out4, out5, out6, out7); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SRARI_H4_SH(out4, out5, out6, out7, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + SAT_SH4_SH(out4, out5, out6, out7, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out2, out3); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out4, out5); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out6, out7); + ST_UB(out, dst); + dst += dst_stride; + } +} + +static void common_vt_6t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; + v16i8 src87_r, src2110, src4332, src6554, src8776, filt0, filt1, filt2; + v16u8 out; + v8i16 filt, out10, out32; + + src -= (2 * src_stride); + + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); + + ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, + src32_r, src43_r); + ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); + XORI_B2_128_SB(src2110, src4332); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src5, src6, src7, src8); + src += (4 * src_stride); + + ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, + src65_r, src76_r, src87_r); + ILVR_D2_SB(src65_r, src54_r, src87_r, src76_r, src6554, src8776); + XORI_B2_128_SB(src6554, src8776); + out10 = DPADD_SH3_SH(src2110, src4332, src6554, filt0, filt1, filt2); + out32 = DPADD_SH3_SH(src4332, src6554, src8776, filt0, filt1, filt2); + SRARI_H2_SH(out10, out32, VP8_FILTER_SHIFT); + SAT_SH2_SH(out10, out32, 7); + out = PCKEV_XORI128_UB(out10, out32); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + + src2110 = src6554; + src4332 = src8776; + src4 = src8; + } +} + +static void common_vt_6t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10; + v16i8 src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r; + v16i8 src109_r, filt0, filt1, filt2; + v16u8 tmp0, tmp1; + v8i16 filt, out0_r, out1_r, out2_r, out3_r; + + src -= (2 * src_stride); + + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); + + XORI_B5_128_SB(src0, src1, src2, src3, src4); + ILVR_B4_SB(src1, src0, src3, src2, src2, src1, src4, src3, src10_r, src32_r, + src21_r, src43_r); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src7, src8, src9, src10); + XORI_B4_128_SB(src7, src8, src9, src10); + src += (4 * src_stride); + + ILVR_B4_SB(src7, src4, src8, src7, src9, src8, src10, src9, src76_r, + src87_r, src98_r, src109_r); + out0_r = DPADD_SH3_SH(src10_r, src32_r, src76_r, filt0, filt1, filt2); + out1_r = DPADD_SH3_SH(src21_r, src43_r, src87_r, filt0, filt1, filt2); + out2_r = DPADD_SH3_SH(src32_r, src76_r, src98_r, filt0, filt1, filt2); + out3_r = DPADD_SH3_SH(src43_r, src87_r, src109_r, filt0, filt1, filt2); + SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); + tmp0 = PCKEV_XORI128_UB(out0_r, out1_r); + tmp1 = PCKEV_XORI128_UB(out2_r, out3_r); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); + + src10_r = src76_r; + src32_r = src98_r; + src21_r = src87_r; + src43_r = src109_r; + src4 = src10; + } +} + +static void common_vt_6t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; + v16i8 src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l; + v16i8 src65_l, src87_l, filt0, filt1, filt2; + v16u8 tmp0, tmp1, tmp2, tmp3; + v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt; + + src -= (2 * src_stride); + + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); + + XORI_B5_128_SB(src0, src1, src2, src3, src4); + ILVR_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_r, + src32_r, src43_r, src21_r); + ILVL_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_l, + src32_l, src43_l, src21_l); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src5, src6, src7, src8); + src += (4 * src_stride); + + XORI_B4_128_SB(src5, src6, src7, src8); + ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, + src65_r, src76_r, src87_r); + ILVL_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_l, + src65_l, src76_l, src87_l); + out0_r = DPADD_SH3_SH(src10_r, src32_r, src54_r, filt0, filt1, filt2); + out1_r = DPADD_SH3_SH(src21_r, src43_r, src65_r, filt0, filt1, filt2); + out2_r = DPADD_SH3_SH(src32_r, src54_r, src76_r, filt0, filt1, filt2); + out3_r = DPADD_SH3_SH(src43_r, src65_r, src87_r, filt0, filt1, filt2); + out0_l = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2); + out1_l = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2); + out2_l = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2); + out3_l = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2); + SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); + SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); + SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); + PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, + out3_r, tmp0, tmp1, tmp2, tmp3); + XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); + ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); + dst += (4 * dst_stride); + + src10_r = src54_r; + src32_r = src76_r; + src21_r = src65_r; + src43_r = src87_r; + src10_l = src54_l; + src32_l = src76_l; + src21_l = src65_l; + src43_l = src87_l; + src4 = src8; + } +} + +static void common_hv_6ht_6vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 filt_hz0, filt_hz1, filt_hz2; + v16u8 mask0, mask1, mask2, out; + v8i16 tmp0, tmp1; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8i16 hz_out7, filt, filt_vt0, filt_vt1, filt_vt2, out0, out1, out2, out3; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= (2 + 2 * src_stride); + + filt = LD_SH(filter_horiz); + SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); + filt = LD_SH(filter_vert); + SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + + mask1 = mask0 + 2; + mask2 = mask0 + 4; + + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); + + XORI_B5_128_SB(src0, src1, src2, src3, src4); + hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src3, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out1 = (v8i16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); + hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB2(src, src_stride, src5, src6); + src += (2 * src_stride); + + XORI_B2_128_SB(src5, src6); + hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + + LD_SB2(src, src_stride, src7, src8); + src += (2 * src_stride); + + XORI_B2_128_SB(src7, src8); + hz_out7 = HORIZ_6TAP_FILT(src7, src8, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); + + out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); + tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2); + + SRARI_H2_SH(tmp0, tmp1, 7); + SAT_SH2_SH(tmp0, tmp1, 7); + out = PCKEV_XORI128_UB(tmp0, tmp1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + + hz_out3 = hz_out7; + out0 = out2; + out1 = out3; + } +} + +static void common_hv_6ht_6vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 filt_hz0, filt_hz1, filt_hz2; + v16u8 mask0, mask1, mask2, vec0, vec1; + v8i16 filt, filt_vt0, filt_vt1, filt_vt2; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7; + v8i16 tmp0, tmp1, tmp2, tmp3; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); + src -= (2 + 2 * src_stride); + + filt = LD_SH(filter_horiz); + SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); + + mask1 = mask0 + 2; + mask2 = mask0 + 4; + + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); + + XORI_B5_128_SB(src0, src1, src2, src3, src4); + hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out4 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + + filt = LD_SH(filter_vert); + SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + + ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src5, src6, src7, src8); + src += (4 * src_stride); + + XORI_B4_128_SB(src5, src6, src7, src8); + hz_out5 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + hz_out6 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + out5 = (v8i16)__msa_ilvev_b((v16i8)hz_out6, (v16i8)hz_out5); + tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2); + + hz_out7 = HORIZ_6TAP_FILT(src7, src7, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); + tmp2 = DPADD_SH3_SH(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2); + + hz_out8 = HORIZ_6TAP_FILT(src8, src8, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + out6 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7); + tmp3 = DPADD_SH3_SH(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2); + + SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); + SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); + vec0 = PCKEV_XORI128_UB(tmp0, tmp1); + vec1 = PCKEV_XORI128_UB(tmp2, tmp3); + ST8x4_UB(vec0, vec1, dst, dst_stride); + dst += (4 * dst_stride); + + hz_out4 = hz_out8; + out0 = out2; + out1 = out7; + out3 = out5; + out4 = out6; + } +} + +static void common_hv_6ht_6vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + int32_t multiple8_cnt; + for (multiple8_cnt = 2; multiple8_cnt--;) + { + common_hv_6ht_6vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + src += 8; + dst += 8; + } +} + +static void common_hz_4t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter) +{ + v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; + v8i16 filt, out0, out1; + v16u8 out; + + mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); + src -= 1; + + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + + mask1 = mask0 + 2; + + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, + filt0, filt1, out0, out1); + SRARI_H2_SH(out0, out1, VP8_FILTER_SHIFT); + SAT_SH2_SH(out0, out1, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); +} + +static void common_hz_4t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter) +{ + v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; + v16u8 out; + v8i16 filt, out0, out1, out2, out3; + + mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); + src -= 1; + + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + + mask1 = mask0 + 2; + + LD_SB4(src, src_stride, src0, src1, src2, src3); + src += (4 * src_stride); + + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, + filt0, filt1, out0, out1); + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, + filt0, filt1, out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + out = PCKEV_XORI128_UB(out2, out3); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); +} + +static void common_hz_4t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + if (4 == height) + { + common_hz_4t_4x4_msa(src, src_stride, dst, dst_stride, filter); + } + else if (8 == height) + { + common_hz_4t_4x8_msa(src, src_stride, dst, dst_stride, filter); + } +} + +static void common_hz_4t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; + v16u8 tmp0, tmp1; + v8i16 filt, out0, out1, out2, out3; + + mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); + src -= 1; + + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + + mask1 = mask0 + 2; + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src0, src1, src2, src3); + src += (4 * src_stride); + + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, + filt1, out0, out1, out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + tmp0 = PCKEV_XORI128_UB(out0, out1); + tmp1 = PCKEV_XORI128_UB(out2, out3); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); + } +} + +static void common_hz_4t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7; + v16i8 filt0, filt1, mask0, mask1; + v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7; + v16u8 out; + + mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); + src -= 1; + + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + + mask1 = mask0 + 2; + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src0, src2, src4, src6); + LD_SB4(src + 8, src_stride, src1, src3, src5, src7); + src += (4 * src_stride); + + XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7); + HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, + filt1, out0, out1, out2, out3); + HORIZ_4TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, filt0, + filt1, out4, out5, out6, out7); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SRARI_H4_SH(out4, out5, out6, out7, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + SAT_SH4_SH(out4, out5, out6, out7, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out2, out3); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out4, out5); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out6, out7); + ST_UB(out, dst); + dst += dst_stride; + } +} + +static void common_vt_4t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5; + v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r; + v16i8 src2110, src4332, filt0, filt1; + v8i16 filt, out10, out32; + v16u8 out; + + src -= src_stride; + + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); + + ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); + + src2110 = (v16i8)__msa_ilvr_d((v2i64)src21_r, (v2i64)src10_r); + src2110 = (v16i8)__msa_xori_b((v16u8)src2110, 128); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB3(src, src_stride, src3, src4, src5); + src += (3 * src_stride); + ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r); + src4332 = (v16i8)__msa_ilvr_d((v2i64)src43_r, (v2i64)src32_r); + src4332 = (v16i8)__msa_xori_b((v16u8)src4332, 128); + out10 = FILT_4TAP_DPADD_S_H(src2110, src4332, filt0, filt1); + + src2 = LD_SB(src); + src += (src_stride); + ILVR_B2_SB(src5, src4, src2, src5, src54_r, src65_r); + src2110 = (v16i8)__msa_ilvr_d((v2i64)src65_r, (v2i64)src54_r); + src2110 = (v16i8)__msa_xori_b((v16u8)src2110, 128); + out32 = FILT_4TAP_DPADD_S_H(src4332, src2110, filt0, filt1); + SRARI_H2_SH(out10, out32, VP8_FILTER_SHIFT); + SAT_SH2_SH(out10, out32, 7); + out = PCKEV_XORI128_UB(out10, out32); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + } +} + +static void common_vt_4t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src7, src8, src9, src10; + v16i8 src10_r, src72_r, src98_r, src21_r, src87_r, src109_r, filt0, filt1; + v16u8 tmp0, tmp1; + v8i16 filt, out0_r, out1_r, out2_r, out3_r; + + src -= src_stride; + + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); + + XORI_B3_128_SB(src0, src1, src2); + ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src7, src8, src9, src10); + src += (4 * src_stride); + + XORI_B4_128_SB(src7, src8, src9, src10); + ILVR_B4_SB(src7, src2, src8, src7, src9, src8, src10, src9, + src72_r, src87_r, src98_r, src109_r); + out0_r = FILT_4TAP_DPADD_S_H(src10_r, src72_r, filt0, filt1); + out1_r = FILT_4TAP_DPADD_S_H(src21_r, src87_r, filt0, filt1); + out2_r = FILT_4TAP_DPADD_S_H(src72_r, src98_r, filt0, filt1); + out3_r = FILT_4TAP_DPADD_S_H(src87_r, src109_r, filt0, filt1); + SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); + tmp0 = PCKEV_XORI128_UB(out0_r, out1_r); + tmp1 = PCKEV_XORI128_UB(out2_r, out3_r); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); + + src10_r = src98_r; + src21_r = src109_r; + src2 = src10; + } +} + +static void common_vt_4t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter, int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6; + v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r, src10_l; + v16i8 src32_l, src54_l, src21_l, src43_l, src65_l, filt0, filt1; + v16u8 tmp0, tmp1, tmp2, tmp3; + v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; + + src -= src_stride; + + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); + + XORI_B3_128_SB(src0, src1, src2); + ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); + ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); + + XORI_B4_128_SB(src3, src4, src5, src6); + ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, + src32_r, src43_r, src54_r, src65_r); + ILVL_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, + src32_l, src43_l, src54_l, src65_l); + out0_r = FILT_4TAP_DPADD_S_H(src10_r, src32_r, filt0, filt1); + out1_r = FILT_4TAP_DPADD_S_H(src21_r, src43_r, filt0, filt1); + out2_r = FILT_4TAP_DPADD_S_H(src32_r, src54_r, filt0, filt1); + out3_r = FILT_4TAP_DPADD_S_H(src43_r, src65_r, filt0, filt1); + out0_l = FILT_4TAP_DPADD_S_H(src10_l, src32_l, filt0, filt1); + out1_l = FILT_4TAP_DPADD_S_H(src21_l, src43_l, filt0, filt1); + out2_l = FILT_4TAP_DPADD_S_H(src32_l, src54_l, filt0, filt1); + out3_l = FILT_4TAP_DPADD_S_H(src43_l, src65_l, filt0, filt1); + SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); + SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); + SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); + PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, + out3_r, tmp0, tmp1, tmp2, tmp3); + XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); + ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); + dst += (4 * dst_stride); + + src10_r = src54_r; + src21_r = src65_r; + src10_l = src54_l; + src21_l = src65_l; + src2 = src6; + } +} + +static void common_hv_4ht_4vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1; + v16u8 mask0, mask1, out; + v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= (1 + 1 * src_stride); + + filt = LD_SH(filter_horiz); + SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); + + mask1 = mask0 + 2; + + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); + + XORI_B3_128_SB(src0, src1, src2); + hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src2, mask0, mask1, filt_hz0, filt_hz1); + vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + + filt = LD_SH(filter_vert); + SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); + + XORI_B2_128_SB(src3, src4); + hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = (v8i16)__msa_sldi_b((v16i8)hz_out3, (v16i8)hz_out1, 8); + vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); + tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + + XORI_B2_128_SB(src5, src6); + hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); + + SRARI_H2_SH(tmp0, tmp1, 7); + SAT_SH2_SH(tmp0, tmp1, 7); + out = PCKEV_XORI128_UB(tmp0, tmp1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + + hz_out1 = hz_out5; + vec0 = vec2; + } +} + +static void common_hv_4ht_4vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1; + v16u8 mask0, mask1, out0, out1; + v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, tmp2, tmp3; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3; + v8i16 vec0, vec1, vec2, vec3, vec4; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); + src -= (1 + 1 * src_stride); + + filt = LD_SH(filter_horiz); + SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); + + mask1 = mask0 + 2; + + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); + + XORI_B3_128_SB(src0, src1, src2); + hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2); + + filt = LD_SH(filter_vert); + SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); + + XORI_B4_128_SB(src3, src4, src5, src6); + hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1); + vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); + tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + + hz_out0 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1); + vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out3); + tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1); + + hz_out1 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1); + vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec4, filt_vt0, filt_vt1); + + hz_out2 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1); + ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec0, vec1); + tmp3 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + + SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); + SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); + out0 = PCKEV_XORI128_UB(tmp0, tmp1); + out1 = PCKEV_XORI128_UB(tmp2, tmp3); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); + + vec0 = vec4; + vec2 = vec1; + } +} + +static void common_hv_4ht_4vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + int32_t multiple8_cnt; + for (multiple8_cnt = 2; multiple8_cnt--;) + { + common_hv_4ht_4vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + src += 8; + dst += 8; + } +} + +static void common_hv_6ht_4vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6; + v16i8 filt_hz0, filt_hz1, filt_hz2; + v16u8 res0, res1, mask0, mask1, mask2; + v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5; + + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= (2 + 1 * src_stride); + + filt = LD_SH(filter_horiz); + SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); + + mask1 = mask0 + 2; + mask2 = mask0 + 4; + + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); + + XORI_B3_128_SB(src0, src1, src2); + hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src2, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + + filt = LD_SH(filter_vert); + SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); + + XORI_B4_128_SB(src3, src4, src5, src6); + hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out2 = (v8i16)__msa_sldi_b((v16i8)hz_out3, (v16i8)hz_out1, 8); + vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); + tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + + hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); + + SRARI_H2_SH(tmp0, tmp1, 7); + SAT_SH2_SH(tmp0, tmp1, 7); + PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1); + XORI_B2_128_UB(res0, res1); + ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); + dst += (4 * dst_stride); + + hz_out1 = hz_out5; + vec0 = vec2; + } +} + +static void common_hv_6ht_4vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6; + v16i8 filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2; + v8i16 filt, filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3; + v8i16 tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3; + v16u8 out0, out1; + + mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); + src -= (2 + src_stride); + + filt = LD_SH(filter_horiz); + SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); + + mask1 = mask0 + 2; + mask2 = mask0 + 4; + + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); + + XORI_B3_128_SB(src0, src1, src2); + hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2); + + filt = LD_SH(filter_vert); + SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); + + XORI_B4_128_SB(src3, src4, src5, src6); + + hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); + tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + + hz_out0 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out3); + tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1); + + hz_out1 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec0, filt_vt0, filt_vt1); + + hz_out2 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec1, vec2); + tmp3 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); + + SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); + SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); + out0 = PCKEV_XORI128_UB(tmp0, tmp1); + out1 = PCKEV_XORI128_UB(tmp2, tmp3); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); + } +} + +static void common_hv_6ht_4vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + int32_t multiple8_cnt; + for (multiple8_cnt = 2; multiple8_cnt--;) + { + common_hv_6ht_4vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + src += 8; + dst += 8; + } +} + +static void common_hv_4ht_6vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 filt_hz0, filt_hz1, mask0, mask1; + v16u8 out; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8i16 hz_out7, tmp0, tmp1, out0, out1, out2, out3; + v8i16 filt, filt_vt0, filt_vt1, filt_vt2; + + mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); + + src -= (1 + 2 * src_stride); + + filt = LD_SH(filter_horiz); + SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); + + mask1 = mask0 + 2; + + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); + + XORI_B5_128_SB(src0, src1, src2, src3, src4); + hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src3, mask0, mask1, filt_hz0, filt_hz1); + hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = (v8i16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + + filt = LD_SH(filter_vert); + SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src5, src6, src7, src8); + XORI_B4_128_SB(src5, src6, src7, src8); + src += (4 * src_stride); + + hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + hz_out7 = HORIZ_4TAP_FILT(src7, src8, mask0, mask1, filt_hz0, filt_hz1); + hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); + out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); + tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2); + + SRARI_H2_SH(tmp0, tmp1, 7); + SAT_SH2_SH(tmp0, tmp1, 7); + out = PCKEV_XORI128_UB(tmp0, tmp1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + + hz_out3 = hz_out7; + out0 = out2; + out1 = out3; + } +} + +static void common_hv_4ht_6vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 filt_hz0, filt_hz1, mask0, mask1; + v8i16 filt, filt_vt0, filt_vt1, filt_vt2, tmp0, tmp1, tmp2, tmp3; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7; + v16u8 vec0, vec1; + + mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); + src -= (1 + 2 * src_stride); + + filt = LD_SH(filter_horiz); + SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); + + mask1 = mask0 + 2; + + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); + + XORI_B5_128_SB(src0, src1, src2, src3, src4); + hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1); + hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4); + + filt = LD_SH(filter_vert); + SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + + for (loop_cnt = (height >> 2); loop_cnt--;) + { + LD_SB4(src, src_stride, src5, src6, src7, src8); + src += (4 * src_stride); + + XORI_B4_128_SB(src5, src6, src7, src8); + + hz_out5 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1); + out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + hz_out6 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1); + out5 = (v8i16)__msa_ilvev_b((v16i8)hz_out6, (v16i8)hz_out5); + tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2); + + hz_out7 = HORIZ_4TAP_FILT(src7, src7, mask0, mask1, filt_hz0, filt_hz1); + out6 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); + tmp2 = DPADD_SH3_SH(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2); + + hz_out8 = HORIZ_4TAP_FILT(src8, src8, mask0, mask1, filt_hz0, filt_hz1); + out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7); + tmp3 = DPADD_SH3_SH(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2); + + SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); + SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); + vec0 = PCKEV_XORI128_UB(tmp0, tmp1); + vec1 = PCKEV_XORI128_UB(tmp2, tmp3); + ST8x4_UB(vec0, vec1, dst, dst_stride); + dst += (4 * dst_stride); + + hz_out4 = hz_out8; + out0 = out2; + out1 = out6; + out3 = out5; + out4 = out7; + } +} + +static void common_hv_4ht_6vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, + uint8_t *RESTRICT dst, int32_t dst_stride, + const int8_t *filter_horiz, + const int8_t *filter_vert, + int32_t height) +{ + int32_t multiple8_cnt; + for (multiple8_cnt = 2; multiple8_cnt--;) + { + common_hv_4ht_6vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + src += 8; + dst += 8; + } +} + +void vp8_sixtap_predict4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, + int32_t xoffset, int32_t yoffset, + uint8_t *RESTRICT dst, int32_t dst_stride) +{ + const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; + + if (yoffset) + { + if (xoffset) + { + switch (xoffset) + { + case 2: + case 4: + case 6: + switch (yoffset) + { + case 2: + case 4: + case 6: + common_hv_6ht_6vt_4w_msa(src, src_stride, dst, + dst_stride, h_filter, + v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_6ht_4vt_4w_msa(src, src_stride, dst, + dst_stride, h_filter, + v_filter + 1, 4); + break; + } + break; + + case 1: + case 3: + case 5: + case 7: + switch (yoffset) + { + case 2: + case 4: + case 6: + common_hv_4ht_6vt_4w_msa(src, src_stride, dst, + dst_stride, h_filter + 1, + v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_4ht_4vt_4w_msa(src, src_stride, dst, + dst_stride, h_filter + 1, + v_filter + 1, 4); + break; + } + break; + } + } + else + { + switch (yoffset) + { + case 2: + case 4: + case 6: + common_vt_6t_4w_msa(src, src_stride, dst, dst_stride, + v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_vt_4t_4w_msa(src, src_stride, dst, dst_stride, + v_filter + 1, 4); + break; + } + } + } + else + { + switch (xoffset) + { + case 0: + { + uint32_t tp0, tp1, tp2, tp3; + + LW4(src, src_stride, tp0, tp1, tp2, tp3); + SW4(tp0, tp1, tp2, tp3, dst, dst_stride); + break; + } + case 2: + case 4: + case 6: + common_hz_6t_4w_msa(src, src_stride, dst, dst_stride, + h_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hz_4t_4w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, 4); + break; + } + } +} + +void vp8_sixtap_predict8x4_msa(uint8_t *RESTRICT src, int32_t src_stride, + int32_t xoffset, int32_t yoffset, + uint8_t *RESTRICT dst, int32_t dst_stride) +{ + const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; + + if (yoffset) + { + if (xoffset) + { + switch (xoffset) + { + case 2: + case 4: + case 6: + switch (yoffset) + { + case 2: + case 4: + case 6: + common_hv_6ht_6vt_8w_msa(src, src_stride, dst, + dst_stride, h_filter, + v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_6ht_4vt_8w_msa(src, src_stride, dst, + dst_stride, h_filter, + v_filter + 1, 4); + break; + } + break; + + case 1: + case 3: + case 5: + case 7: + switch (yoffset) + { + case 2: + case 4: + case 6: + common_hv_4ht_6vt_8w_msa(src, src_stride, dst, + dst_stride, h_filter + 1, + v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_4ht_4vt_8w_msa(src, src_stride, dst, + dst_stride, h_filter + 1, + v_filter + 1, 4); + break; + } + break; + } + } + else + { + switch (yoffset) + { + case 2: + case 4: + case 6: + common_vt_6t_8w_msa(src, src_stride, dst, dst_stride, + v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_vt_4t_8w_msa(src, src_stride, dst, dst_stride, + v_filter + 1, 4); + break; + } + } + } + else + { + switch (xoffset) + { + case 0: + vp8_copy_mem8x4(src, src_stride, dst, dst_stride); + break; + case 2: + case 4: + case 6: + common_hz_6t_8w_msa(src, src_stride, dst, dst_stride, + h_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hz_4t_8w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, 4); + break; + } + } +} + +void vp8_sixtap_predict8x8_msa(uint8_t *RESTRICT src, int32_t src_stride, + int32_t xoffset, int32_t yoffset, + uint8_t *RESTRICT dst, int32_t dst_stride) +{ + const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; + + if (yoffset) + { + if (xoffset) + { + switch (xoffset) + { + case 2: + case 4: + case 6: + switch (yoffset) + { + case 2: + case 4: + case 6: + common_hv_6ht_6vt_8w_msa(src, src_stride, dst, + dst_stride, h_filter, + v_filter, 8); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_6ht_4vt_8w_msa(src, src_stride, dst, + dst_stride, h_filter, + v_filter + 1, 8); + break; + } + break; + + case 1: + case 3: + case 5: + case 7: + switch (yoffset) + { + case 2: + case 4: + case 6: + common_hv_4ht_6vt_8w_msa(src, src_stride, dst, + dst_stride, h_filter + 1, + v_filter, 8); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_4ht_4vt_8w_msa(src, src_stride, dst, + dst_stride, h_filter + 1, + v_filter + 1, 8); + break; + } + break; + } + } + else + { + switch (yoffset) + { + case 2: + case 4: + case 6: + common_vt_6t_8w_msa(src, src_stride, dst, dst_stride, + v_filter, 8); + break; + + case 1: + case 3: + case 5: + case 7: + common_vt_4t_8w_msa(src, src_stride, dst, dst_stride, + v_filter + 1, 8); + break; + } + } + } + else + { + switch (xoffset) + { + case 0: + vp8_copy_mem8x8(src, src_stride, dst, dst_stride); + break; + case 2: + case 4: + case 6: + common_hz_6t_8w_msa(src, src_stride, dst, dst_stride, h_filter, + 8); + break; + + case 1: + case 3: + case 5: + case 7: + common_hz_4t_8w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, 8); + break; + } + } +} + +void vp8_sixtap_predict16x16_msa(uint8_t *RESTRICT src, int32_t src_stride, + int32_t xoffset, int32_t yoffset, + uint8_t *RESTRICT dst, int32_t dst_stride) +{ + const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; + + if (yoffset) + { + if (xoffset) + { + switch (xoffset) + { + case 2: + case 4: + case 6: + switch (yoffset) + { + case 2: + case 4: + case 6: + common_hv_6ht_6vt_16w_msa(src, src_stride, dst, + dst_stride, h_filter, + v_filter, 16); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_6ht_4vt_16w_msa(src, src_stride, dst, + dst_stride, h_filter, + v_filter + 1, 16); + break; + } + break; + + case 1: + case 3: + case 5: + case 7: + switch (yoffset) + { + case 2: + case 4: + case 6: + common_hv_4ht_6vt_16w_msa(src, src_stride, dst, + dst_stride, h_filter + 1, + v_filter, 16); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_4ht_4vt_16w_msa(src, src_stride, dst, + dst_stride, h_filter + 1, + v_filter + 1, 16); + break; + } + break; + } + } + else + { + switch (yoffset) + { + case 2: + case 4: + case 6: + common_vt_6t_16w_msa(src, src_stride, dst, dst_stride, + v_filter, 16); + break; + + case 1: + case 3: + case 5: + case 7: + common_vt_4t_16w_msa(src, src_stride, dst, dst_stride, + v_filter + 1, 16); + break; + } + } + } + else + { + switch (xoffset) + { + case 0: + vp8_copy_mem16x16(src, src_stride, dst, dst_stride); + break; + case 2: + case 4: + case 6: + common_hz_6t_16w_msa(src, src_stride, dst, dst_stride, + h_filter, 16); + break; + + case 1: + case 3: + case 5: + case 7: + common_hz_4t_16w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, 16); + break; + } + } +} diff --git a/vp8/common/mips/msa/vp8_macros_msa.h b/vp8/common/mips/msa/vp8_macros_msa.h index e0fec1210..8170ecf1d 100644 --- a/vp8/common/mips/msa/vp8_macros_msa.h +++ b/vp8/common/mips/msa/vp8_macros_msa.h @@ -206,6 +206,22 @@ } #endif // (__mips_isa_rev >= 6) +/* Description : Load 4 words with stride + Arguments : Inputs - psrc, stride + Outputs - out0, out1, out2, out3 + Details : Load word in 'out0' from (psrc) + Load word in 'out1' from (psrc + stride) + Load word in 'out2' from (psrc + 2 * stride) + Load word in 'out3' from (psrc + 3 * stride) +*/ +#define LW4(psrc, stride, out0, out1, out2, out3) \ +{ \ + out0 = LW((psrc)); \ + out1 = LW((psrc) + stride); \ + out2 = LW((psrc) + 2 * stride); \ + out3 = LW((psrc) + 3 * stride); \ +} + /* Description : Store 4 words with stride Arguments : Inputs - in0, in1, in2, in3, pdst, stride Details : Store word from 'in0' to (pdst) @@ -251,6 +267,14 @@ #define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__) #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__) +#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) \ +{ \ + LD_B2(RTYPE, (psrc), stride, out0, out1); \ + out2 = LD_B(RTYPE, (psrc) + 2 * stride); \ +} +#define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__) +#define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__) + #define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \ { \ LD_B2(RTYPE, (psrc), stride, out0, out1); \ @@ -259,6 +283,14 @@ #define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__) #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__) +#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \ +{ \ + LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ + out4 = LD_B(RTYPE, (psrc) + 4 * stride); \ +} +#define LD_UB5(...) LD_B5(v16u8, __VA_ARGS__) +#define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__) + #define LD_B8(RTYPE, psrc, stride, \ out0, out1, out2, out3, out4, out5, out6, out7) \ { \ @@ -377,6 +409,31 @@ ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \ } +/* Description : Store 8x4 byte block to destination memory from input + vectors + Arguments : Inputs - in0, in1, pdst, stride + Details : Index 0 double word element from 'in0' vector is copied to the + GP register and stored to (pdst) + Index 1 double word element from 'in0' vector is copied to the + GP register and stored to (pdst + stride) + Index 0 double word element from 'in1' vector is copied to the + GP register and stored to (pdst + 2 * stride) + Index 1 double word element from 'in1' vector is copied to the + GP register and stored to (pdst + 3 * stride) +*/ +#define ST8x4_UB(in0, in1, pdst, stride) \ +{ \ + uint64_t out0_m, out1_m, out2_m, out3_m; \ + uint8_t *pblk_8x4_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_d((v2i64)in0, 0); \ + out1_m = __msa_copy_u_d((v2i64)in0, 1); \ + out2_m = __msa_copy_u_d((v2i64)in1, 0); \ + out3_m = __msa_copy_u_d((v2i64)in1, 1); \ + \ + SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \ +} + /* Description : Shuffle byte vector elements as per mask vector Arguments : Inputs - in0, in1, in2, in3, mask0, mask1 Outputs - out0, out1 @@ -391,6 +448,64 @@ } #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__) +#define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \ + out0, out1, out2) \ +{ \ + VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \ + out2 = (RTYPE)__msa_vshf_b((v16i8)mask2, (v16i8)in5, (v16i8)in4); \ +} +#define VSHF_B3_SB(...) VSHF_B3(v16i8, __VA_ARGS__) + +/* Description : Dot product of byte vector elements + Arguments : Inputs - mult0, mult1, cnst0, cnst1 + Outputs - out0, out1 + Return Type - as per RTYPE + Details : Signed byte elements from 'mult0' are multiplied with + signed byte elements from 'cnst0' producing a result + twice the size of input i.e. signed halfword. + The multiplication result of adjacent odd-even elements + are added together and written to the 'out0' vector +*/ +#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \ +} +#define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__) + +#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ +{ \ + DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} +#define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__) + +/* Description : Dot product & addition of byte vector elements + Arguments : Inputs - mult0, mult1, cnst0, cnst1 + Outputs - out0, out1 + Return Type - as per RTYPE + Details : Signed byte elements from 'mult0' are multiplied with + signed byte elements from 'cnst0' producing a result + twice the size of input i.e. signed halfword. + The multiplication result of adjacent odd-even elements + are added to the 'out0' vector +*/ +#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ +{ \ + out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \ + out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \ +} +#define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__) + +#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, \ + cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ +{ \ + DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ +} +#define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__) + /* Description : Clips all signed halfword elements of input vector between 0 & 255 Arguments : Input - in @@ -445,6 +560,8 @@ out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \ out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \ } +#define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__) +#define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__) #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__) /* Description : Interleave even halfword elements from vectors @@ -501,8 +618,19 @@ out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \ } +#define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__) +#define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__) #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__) +#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) \ +{ \ + ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} +#define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__) +#define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__) + /* Description : Interleave left half of halfword elements from vectors Arguments : Inputs - in0, in1, in2, in3 Outputs - out0, out1 @@ -544,6 +672,8 @@ out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \ } +#define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__) +#define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__) #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__) #define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__) @@ -553,6 +683,9 @@ ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ } +#define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__) +#define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__) +#define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__) #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__) #define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__) @@ -642,6 +775,56 @@ #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__) #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__) +/* Description : Saturate the halfword element values to the max + unsigned value of (sat_val + 1) bits + The element data width remains unchanged + Arguments : Inputs - in0, in1, sat_val + Outputs - in place operation + Return Type - as per RTYPE + Details : Each unsigned halfword element from 'in0' is saturated to the + value generated with (sat_val + 1) bit range + The results are written in place +*/ +#define SAT_SH2(RTYPE, in0, in1, sat_val) \ +{ \ + in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \ + in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \ +} +#define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__) + +#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \ +{ \ + SAT_SH2(RTYPE, in0, in1, sat_val); \ + SAT_SH2(RTYPE, in2, in3, sat_val); \ +} +#define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__) + +/* Description : Indexed halfword element values are replicated to all + elements in output vector + Arguments : Inputs - in, idx0, idx1 + Outputs - out0, out1 + Return Type - as per RTYPE + Details : 'idx0' element value from 'in' vector is replicated to all + elements in 'out0' vector + Valid index range for halfword operation is 0-7 +*/ +#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \ +{ \ + out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0); \ + out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1); \ +} +#define SPLATI_H2_SB(...) SPLATI_H2(v16i8, __VA_ARGS__) +#define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__) + +#define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, \ + out0, out1, out2) \ +{ \ + SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \ + out2 = (RTYPE)__msa_splati_h((v8i16)in, idx2); \ +} +#define SPLATI_H3_SB(...) SPLATI_H3(v16i8, __VA_ARGS__) +#define SPLATI_H3_SH(...) SPLATI_H3(v8i16, __VA_ARGS__) + /* Description : Pack even byte elements of vector pairs Arguments : Inputs - in0, in1, in2, in3 Outputs - out0, out1 @@ -723,6 +906,50 @@ #define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__) #define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__) +/* Description : Each byte element is logically xor'ed with immediate 128 + Arguments : Inputs - in0, in1 + Outputs - in place operation + Return Type - as per RTYPE + Details : Each unsigned byte element from input vector 'in0' is + logically xor'ed with 128 and the result is stored in-place. +*/ +#define XORI_B2_128(RTYPE, in0, in1) \ +{ \ + in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \ + in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \ +} +#define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__) +#define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__) + +#define XORI_B3_128(RTYPE, in0, in1, in2) \ +{ \ + XORI_B2_128(RTYPE, in0, in1); \ + in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \ +} +#define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__) + +#define XORI_B4_128(RTYPE, in0, in1, in2, in3) \ +{ \ + XORI_B2_128(RTYPE, in0, in1); \ + XORI_B2_128(RTYPE, in2, in3); \ +} +#define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__) +#define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__) + +#define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \ +{ \ + XORI_B3_128(RTYPE, in0, in1, in2); \ + XORI_B2_128(RTYPE, in3, in4); \ +} +#define XORI_B5_128_SB(...) XORI_B5_128(v16i8, __VA_ARGS__) + +#define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \ +{ \ + XORI_B4_128(RTYPE, in0, in1, in2, in3); \ + XORI_B4_128(RTYPE, in4, in5, in6, in7); \ +} +#define XORI_B8_128_SB(...) XORI_B8_128(v16i8, __VA_ARGS__) + /* Description : Arithmetic shift right all elements of vector (generic for all data types) Arguments : Inputs - in0, in1, in2, in3, shift @@ -756,6 +983,14 @@ #define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__) #define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__) +#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \ +{ \ + SRARI_H2(RTYPE, in0, in1, shift); \ + SRARI_H2(RTYPE, in2, in3, shift); \ +} +#define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__) +#define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__) + #define SRARI_W2(RTYPE, in0, in1, shift) \ { \ in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \ @@ -952,4 +1187,43 @@ out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \ out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \ } + +/* Description : Dot product and addition of 3 signed halfword input vectors + Arguments : Inputs - in0, in1, in2, coeff0, coeff1, coeff2 + Output - out0_m + Return Type - signed halfword + Details : Dot product of 'in0' with 'coeff0' + Dot product of 'in1' with 'coeff1' + Dot product of 'in2' with 'coeff2' + Addition of all the 3 vector results + out0_m = (in0 * coeff0) + (in1 * coeff1) + (in2 * coeff2) +*/ +#define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \ +({ \ + v8i16 tmp1_m; \ + v8i16 out0_m; \ + \ + out0_m = __msa_dotp_s_h((v16i8)in0, (v16i8)coeff0); \ + out0_m = __msa_dpadd_s_h(out0_m, (v16i8)in1, (v16i8)coeff1); \ + tmp1_m = __msa_dotp_s_h((v16i8)in2, (v16i8)coeff2); \ + out0_m = __msa_adds_s_h(out0_m, tmp1_m); \ + \ + out0_m; \ +}) + +/* Description : Pack even elements of input vectors & xor with 128 + Arguments : Inputs - in0, in1 + Output - out_m + Return Type - unsigned byte + Details : Signed byte even elements from 'in0' and 'in1' are packed + together in one vector and the resulting vector is xor'ed with + 128 to shift the range from signed to unsigned byte +*/ +#define PCKEV_XORI128_UB(in0, in1) \ +({ \ + v16u8 out_m; \ + out_m = (v16u8)__msa_pckev_b((v16i8)in1, (v16i8)in0); \ + out_m = (v16u8)__msa_xori_b((v16u8)out_m, 128); \ + out_m; \ +}) #endif /* VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_ */ diff --git a/vp8/common/rtcd_defs.pl b/vp8/common/rtcd_defs.pl index 1dbe80e1d..3a278f7a8 100644 --- a/vp8/common/rtcd_defs.pl +++ b/vp8/common/rtcd_defs.pl @@ -204,23 +204,23 @@ if (vpx_config("CONFIG_POSTPROC") eq "yes") { # Subpixel # add_proto qw/void vp8_sixtap_predict16x16/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_sixtap_predict16x16 mmx sse2 ssse3 media neon dspr2/; +specialize qw/vp8_sixtap_predict16x16 mmx sse2 ssse3 media neon dspr2 msa/; $vp8_sixtap_predict16x16_media=vp8_sixtap_predict16x16_armv6; $vp8_sixtap_predict16x16_dspr2=vp8_sixtap_predict16x16_dspr2; add_proto qw/void vp8_sixtap_predict8x8/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_sixtap_predict8x8 mmx sse2 ssse3 media neon dspr2/; +specialize qw/vp8_sixtap_predict8x8 mmx sse2 ssse3 media neon dspr2 msa/; $vp8_sixtap_predict8x8_media=vp8_sixtap_predict8x8_armv6; $vp8_sixtap_predict8x8_dspr2=vp8_sixtap_predict8x8_dspr2; add_proto qw/void vp8_sixtap_predict8x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_sixtap_predict8x4 mmx sse2 ssse3 media neon dspr2/; +specialize qw/vp8_sixtap_predict8x4 mmx sse2 ssse3 media neon dspr2 msa/; $vp8_sixtap_predict8x4_media=vp8_sixtap_predict8x4_armv6; $vp8_sixtap_predict8x4_dspr2=vp8_sixtap_predict8x4_dspr2; add_proto qw/void vp8_sixtap_predict4x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; #TODO(johannkoenig): fix the neon version https://code.google.com/p/webm/issues/detail?id=817 -specialize qw/vp8_sixtap_predict4x4 mmx ssse3 media dspr2/; +specialize qw/vp8_sixtap_predict4x4 mmx ssse3 media dspr2 msa/; $vp8_sixtap_predict4x4_media=vp8_sixtap_predict4x4_armv6; $vp8_sixtap_predict4x4_dspr2=vp8_sixtap_predict4x4_dspr2; diff --git a/vp8/vp8_common.mk b/vp8/vp8_common.mk index 9f6e9364a..8248cb70c 100644 --- a/vp8/vp8_common.mk +++ b/vp8/vp8_common.mk @@ -116,6 +116,7 @@ VP8_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/dequantize_dspr2.c # common (c) VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct_msa.c VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/loopfilter_filters_msa.c +VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/sixtap_filter_msa.c VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp8_macros_msa.h # common (c)