Merge "mips msa vpx_dsp sad sad4d avgsad optimization"
This commit is contained in:
commit
29a17db913
@ -1114,4 +1114,98 @@ const SadMxNx4Param x4d_avx2_tests[] = {
|
||||
INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests));
|
||||
#endif // HAVE_AVX2
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// MIPS functions
|
||||
#if HAVE_MSA
|
||||
const SadMxNFunc sad64x64_msa = vpx_sad64x64_msa;
|
||||
const SadMxNFunc sad64x32_msa = vpx_sad64x32_msa;
|
||||
const SadMxNFunc sad32x64_msa = vpx_sad32x64_msa;
|
||||
const SadMxNFunc sad32x32_msa = vpx_sad32x32_msa;
|
||||
const SadMxNFunc sad32x16_msa = vpx_sad32x16_msa;
|
||||
const SadMxNFunc sad16x32_msa = vpx_sad16x32_msa;
|
||||
const SadMxNFunc sad16x16_msa = vpx_sad16x16_msa;
|
||||
const SadMxNFunc sad16x8_msa = vpx_sad16x8_msa;
|
||||
const SadMxNFunc sad8x16_msa = vpx_sad8x16_msa;
|
||||
const SadMxNFunc sad8x8_msa = vpx_sad8x8_msa;
|
||||
const SadMxNFunc sad8x4_msa = vpx_sad8x4_msa;
|
||||
const SadMxNFunc sad4x8_msa = vpx_sad4x8_msa;
|
||||
const SadMxNFunc sad4x4_msa = vpx_sad4x4_msa;
|
||||
const SadMxNParam msa_tests[] = {
|
||||
make_tuple(64, 64, sad64x64_msa, -1),
|
||||
make_tuple(64, 32, sad64x32_msa, -1),
|
||||
make_tuple(32, 64, sad32x64_msa, -1),
|
||||
make_tuple(32, 32, sad32x32_msa, -1),
|
||||
make_tuple(32, 16, sad32x16_msa, -1),
|
||||
make_tuple(16, 32, sad16x32_msa, -1),
|
||||
make_tuple(16, 16, sad16x16_msa, -1),
|
||||
make_tuple(16, 8, sad16x8_msa, -1),
|
||||
make_tuple(8, 16, sad8x16_msa, -1),
|
||||
make_tuple(8, 8, sad8x8_msa, -1),
|
||||
make_tuple(8, 4, sad8x4_msa, -1),
|
||||
make_tuple(4, 8, sad4x8_msa, -1),
|
||||
make_tuple(4, 4, sad4x4_msa, -1),
|
||||
};
|
||||
INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests));
|
||||
|
||||
const SadMxNAvgFunc sad64x64_avg_msa = vpx_sad64x64_avg_msa;
|
||||
const SadMxNAvgFunc sad64x32_avg_msa = vpx_sad64x32_avg_msa;
|
||||
const SadMxNAvgFunc sad32x64_avg_msa = vpx_sad32x64_avg_msa;
|
||||
const SadMxNAvgFunc sad32x32_avg_msa = vpx_sad32x32_avg_msa;
|
||||
const SadMxNAvgFunc sad32x16_avg_msa = vpx_sad32x16_avg_msa;
|
||||
const SadMxNAvgFunc sad16x32_avg_msa = vpx_sad16x32_avg_msa;
|
||||
const SadMxNAvgFunc sad16x16_avg_msa = vpx_sad16x16_avg_msa;
|
||||
const SadMxNAvgFunc sad16x8_avg_msa = vpx_sad16x8_avg_msa;
|
||||
const SadMxNAvgFunc sad8x16_avg_msa = vpx_sad8x16_avg_msa;
|
||||
const SadMxNAvgFunc sad8x8_avg_msa = vpx_sad8x8_avg_msa;
|
||||
const SadMxNAvgFunc sad8x4_avg_msa = vpx_sad8x4_avg_msa;
|
||||
const SadMxNAvgFunc sad4x8_avg_msa = vpx_sad4x8_avg_msa;
|
||||
const SadMxNAvgFunc sad4x4_avg_msa = vpx_sad4x4_avg_msa;
|
||||
const SadMxNAvgParam avg_msa_tests[] = {
|
||||
make_tuple(64, 64, sad64x64_avg_msa, -1),
|
||||
make_tuple(64, 32, sad64x32_avg_msa, -1),
|
||||
make_tuple(32, 64, sad32x64_avg_msa, -1),
|
||||
make_tuple(32, 32, sad32x32_avg_msa, -1),
|
||||
make_tuple(32, 16, sad32x16_avg_msa, -1),
|
||||
make_tuple(16, 32, sad16x32_avg_msa, -1),
|
||||
make_tuple(16, 16, sad16x16_avg_msa, -1),
|
||||
make_tuple(16, 8, sad16x8_avg_msa, -1),
|
||||
make_tuple(8, 16, sad8x16_avg_msa, -1),
|
||||
make_tuple(8, 8, sad8x8_avg_msa, -1),
|
||||
make_tuple(8, 4, sad8x4_avg_msa, -1),
|
||||
make_tuple(4, 8, sad4x8_avg_msa, -1),
|
||||
make_tuple(4, 4, sad4x4_avg_msa, -1),
|
||||
};
|
||||
INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests));
|
||||
|
||||
const SadMxNx4Func sad64x64x4d_msa = vpx_sad64x64x4d_msa;
|
||||
const SadMxNx4Func sad64x32x4d_msa = vpx_sad64x32x4d_msa;
|
||||
const SadMxNx4Func sad32x64x4d_msa = vpx_sad32x64x4d_msa;
|
||||
const SadMxNx4Func sad32x32x4d_msa = vpx_sad32x32x4d_msa;
|
||||
const SadMxNx4Func sad32x16x4d_msa = vpx_sad32x16x4d_msa;
|
||||
const SadMxNx4Func sad16x32x4d_msa = vpx_sad16x32x4d_msa;
|
||||
const SadMxNx4Func sad16x16x4d_msa = vpx_sad16x16x4d_msa;
|
||||
const SadMxNx4Func sad16x8x4d_msa = vpx_sad16x8x4d_msa;
|
||||
const SadMxNx4Func sad8x16x4d_msa = vpx_sad8x16x4d_msa;
|
||||
const SadMxNx4Func sad8x8x4d_msa = vpx_sad8x8x4d_msa;
|
||||
const SadMxNx4Func sad8x4x4d_msa = vpx_sad8x4x4d_msa;
|
||||
const SadMxNx4Func sad4x8x4d_msa = vpx_sad4x8x4d_msa;
|
||||
const SadMxNx4Func sad4x4x4d_msa = vpx_sad4x4x4d_msa;
|
||||
const SadMxNx4Param x4d_msa_tests[] = {
|
||||
make_tuple(64, 64, sad64x64x4d_msa, -1),
|
||||
make_tuple(64, 32, sad64x32x4d_msa, -1),
|
||||
make_tuple(32, 64, sad32x64x4d_msa, -1),
|
||||
make_tuple(32, 32, sad32x32x4d_msa, -1),
|
||||
make_tuple(32, 16, sad32x16x4d_msa, -1),
|
||||
make_tuple(16, 32, sad16x32x4d_msa, -1),
|
||||
make_tuple(16, 16, sad16x16x4d_msa, -1),
|
||||
make_tuple(16, 8, sad16x8x4d_msa, -1),
|
||||
make_tuple(8, 16, sad8x16x4d_msa, -1),
|
||||
make_tuple(8, 8, sad8x8x4d_msa, -1),
|
||||
make_tuple(8, 4, sad8x4x4d_msa, -1),
|
||||
make_tuple(4, 8, sad4x8x4d_msa, -1),
|
||||
make_tuple(4, 4, sad4x4x4d_msa, -1),
|
||||
};
|
||||
INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests));
|
||||
#endif // HAVE_MSA
|
||||
|
||||
} // namespace
|
||||
|
@ -82,12 +82,24 @@
|
||||
}
|
||||
#define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
|
||||
|
||||
#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) { \
|
||||
LD_B2(RTYPE, (psrc), stride, out0, out1); \
|
||||
out2 = LD_B(RTYPE, (psrc) + 2 * stride); \
|
||||
}
|
||||
#define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__)
|
||||
|
||||
#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) { \
|
||||
LD_B2(RTYPE, (psrc), stride, out0, out1); \
|
||||
LD_B2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \
|
||||
}
|
||||
#define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
|
||||
|
||||
#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) { \
|
||||
LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \
|
||||
out4 = LD_B(RTYPE, (psrc) + 4 * stride); \
|
||||
}
|
||||
#define LD_UB5(...) LD_B5(v16u8, __VA_ARGS__)
|
||||
|
||||
/* Description : Load vectors with 8 halfword elements with stride
|
||||
Arguments : Inputs - psrc, stride
|
||||
Outputs - out0, out1
|
||||
@ -105,6 +117,40 @@
|
||||
}
|
||||
#define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
|
||||
|
||||
/* Description : average with rounding (in0 + in1 + 1) / 2.
|
||||
Arguments : Inputs - in0, in1, in2, in3,
|
||||
Outputs - out0, out1
|
||||
Return Type - as per RTYPE
|
||||
Details : Each unsigned byte element from 'in0' vector is added with
|
||||
each unsigned byte element from 'in1' vector. Then the average
|
||||
with rounding is calculated and written to 'out0'
|
||||
*/
|
||||
#define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) { \
|
||||
out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \
|
||||
out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
|
||||
}
|
||||
#define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
|
||||
|
||||
#define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
|
||||
out0, out1, out2, out3) { \
|
||||
AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \
|
||||
AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \
|
||||
}
|
||||
#define AVER_UB4_UB(...) AVER_UB4(v16u8, __VA_ARGS__)
|
||||
|
||||
/* Description : Immediate number of elements to slide
|
||||
Arguments : Inputs - in0_0, in0_1, in1_0, in1_1, slide_val
|
||||
Outputs - out0, out1
|
||||
Return Type - as per RTYPE
|
||||
Details : Byte elements from 'in0_0' vector are slid into 'in1_0' by
|
||||
value specified in the 'slide_val'
|
||||
*/
|
||||
#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) { \
|
||||
out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val); \
|
||||
out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val); \
|
||||
}
|
||||
#define SLDI_B2_UB(...) SLDI_B2(v16u8, __VA_ARGS__)
|
||||
|
||||
/* Description : Dot product & addition of halfword vector elements
|
||||
Arguments : Inputs - mult0, mult1, cnst0, cnst1
|
||||
Outputs - out0, out1
|
||||
@ -155,6 +201,26 @@
|
||||
sum_m; \
|
||||
})
|
||||
|
||||
/* Description : Horizontal addition of 8 unsigned halfword elements
|
||||
Arguments : Inputs - in (unsigned halfword vector)
|
||||
Outputs - sum_m (u32 sum)
|
||||
Return Type - unsigned word
|
||||
Details : 8 unsigned halfword elements of input vector are added
|
||||
together and the resulting integer sum is returned
|
||||
*/
|
||||
#define HADD_UH_U32(in) ({ \
|
||||
v4u32 res_m; \
|
||||
v2u64 res0_m, res1_m; \
|
||||
uint32_t sum_m; \
|
||||
\
|
||||
res_m = __msa_hadd_u_w((v8u16)in, (v8u16)in); \
|
||||
res0_m = __msa_hadd_u_d(res_m, res_m); \
|
||||
res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \
|
||||
res0_m = res0_m + res1_m; \
|
||||
sum_m = __msa_copy_u_w((v4i32)res0_m, 0); \
|
||||
sum_m; \
|
||||
})
|
||||
|
||||
/* Description : Horizontal subtraction of unsigned byte vector elements
|
||||
Arguments : Inputs - in0, in1
|
||||
Outputs - out0, out1
|
||||
@ -169,6 +235,27 @@
|
||||
}
|
||||
#define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
|
||||
|
||||
/* Description : SAD (Sum of Absolute Difference)
|
||||
Arguments : Inputs - in0, in1, ref0, ref1
|
||||
Outputs - sad_m (halfword vector)
|
||||
Return Type - unsigned halfword
|
||||
Details : Absolute difference of all the byte elements from 'in0' with
|
||||
'ref0' is calculated and preserved in 'diff0'. Then even-odd
|
||||
pairs are added together to generate 8 halfword results.
|
||||
*/
|
||||
#define SAD_UB2_UH(in0, in1, ref0, ref1) ({ \
|
||||
v16u8 diff0_m, diff1_m; \
|
||||
v8u16 sad_m = { 0 }; \
|
||||
\
|
||||
diff0_m = __msa_asub_u_b((v16u8)in0, (v16u8)ref0); \
|
||||
diff1_m = __msa_asub_u_b((v16u8)in1, (v16u8)ref1); \
|
||||
\
|
||||
sad_m += __msa_hadd_u_h((v16u8)diff0_m, (v16u8)diff0_m); \
|
||||
sad_m += __msa_hadd_u_h((v16u8)diff1_m, (v16u8)diff1_m); \
|
||||
\
|
||||
sad_m; \
|
||||
})
|
||||
|
||||
/* Description : Set element n input vector to GPR value
|
||||
Arguments : Inputs - in0, in1, in2, in3
|
||||
Output - out
|
||||
|
787
vpx_dsp/mips/sad_msa.c
Normal file
787
vpx_dsp/mips/sad_msa.c
Normal file
@ -0,0 +1,787 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "./vpx_dsp_rtcd.h"
|
||||
#include "vpx_dsp/mips/macros_msa.h"
|
||||
|
||||
static uint32_t sad_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
|
||||
const uint8_t *ref_ptr, int32_t ref_stride,
|
||||
int32_t height) {
|
||||
int32_t ht_cnt;
|
||||
uint32_t src0, src1, src2, src3, ref0, ref1, ref2, ref3;
|
||||
v16u8 src = { 0 };
|
||||
v16u8 ref = { 0 };
|
||||
v16u8 diff;
|
||||
v8u16 sad = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LW4(src_ptr, src_stride, src0, src1, src2, src3);
|
||||
src_ptr += (4 * src_stride);
|
||||
LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
|
||||
ref_ptr += (4 * ref_stride);
|
||||
|
||||
INSERT_W4_UB(src0, src1, src2, src3, src);
|
||||
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
|
||||
|
||||
diff = __msa_asub_u_b(src, ref);
|
||||
sad += __msa_hadd_u_h(diff, diff);
|
||||
}
|
||||
|
||||
return HADD_UH_U32(sad);
|
||||
}
|
||||
|
||||
static uint32_t sad_8width_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t *ref, int32_t ref_stride,
|
||||
int32_t height) {
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3;
|
||||
v8u16 sad = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LD_UB4(src, src_stride, src0, src1, src2, src3);
|
||||
src += (4 * src_stride);
|
||||
LD_UB4(ref, ref_stride, ref0, ref1, ref2, ref3);
|
||||
ref += (4 * ref_stride);
|
||||
|
||||
PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2,
|
||||
src0, src1, ref0, ref1);
|
||||
sad += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
}
|
||||
|
||||
return HADD_UH_U32(sad);
|
||||
}
|
||||
|
||||
static uint32_t sad_16width_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t *ref, int32_t ref_stride,
|
||||
int32_t height) {
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, ref0, ref1;
|
||||
v8u16 sad = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LD_UB2(src, src_stride, src0, src1);
|
||||
src += (2 * src_stride);
|
||||
LD_UB2(ref, ref_stride, ref0, ref1);
|
||||
ref += (2 * ref_stride);
|
||||
sad += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
LD_UB2(src, src_stride, src0, src1);
|
||||
src += (2 * src_stride);
|
||||
LD_UB2(ref, ref_stride, ref0, ref1);
|
||||
ref += (2 * ref_stride);
|
||||
sad += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
}
|
||||
|
||||
return HADD_UH_U32(sad);
|
||||
}
|
||||
|
||||
static uint32_t sad_32width_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t *ref, int32_t ref_stride,
|
||||
int32_t height) {
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, ref0, ref1;
|
||||
v8u16 sad = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LD_UB2(src, 16, src0, src1);
|
||||
src += src_stride;
|
||||
LD_UB2(ref, 16, ref0, ref1);
|
||||
ref += ref_stride;
|
||||
sad += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
LD_UB2(src, 16, src0, src1);
|
||||
src += src_stride;
|
||||
LD_UB2(ref, 16, ref0, ref1);
|
||||
ref += ref_stride;
|
||||
sad += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
LD_UB2(src, 16, src0, src1);
|
||||
src += src_stride;
|
||||
LD_UB2(ref, 16, ref0, ref1);
|
||||
ref += ref_stride;
|
||||
sad += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
LD_UB2(src, 16, src0, src1);
|
||||
src += src_stride;
|
||||
LD_UB2(ref, 16, ref0, ref1);
|
||||
ref += ref_stride;
|
||||
sad += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
}
|
||||
|
||||
return HADD_UH_U32(sad);
|
||||
}
|
||||
|
||||
static uint32_t sad_64width_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t *ref, int32_t ref_stride,
|
||||
int32_t height) {
|
||||
int32_t ht_cnt;
|
||||
uint32_t sad = 0;
|
||||
v16u8 src0, src1, src2, src3;
|
||||
v16u8 ref0, ref1, ref2, ref3;
|
||||
v8u16 sad0 = { 0 };
|
||||
v8u16 sad1 = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 1); ht_cnt--;) {
|
||||
LD_UB4(src, 16, src0, src1, src2, src3);
|
||||
src += src_stride;
|
||||
LD_UB4(ref, 16, ref0, ref1, ref2, ref3);
|
||||
ref += ref_stride;
|
||||
sad0 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
sad1 += SAD_UB2_UH(src2, src3, ref2, ref3);
|
||||
|
||||
LD_UB4(src, 16, src0, src1, src2, src3);
|
||||
src += src_stride;
|
||||
LD_UB4(ref, 16, ref0, ref1, ref2, ref3);
|
||||
ref += ref_stride;
|
||||
sad0 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
sad1 += SAD_UB2_UH(src2, src3, ref2, ref3);
|
||||
}
|
||||
|
||||
sad = HADD_UH_U32(sad0);
|
||||
sad += HADD_UH_U32(sad1);
|
||||
|
||||
return sad;
|
||||
}
|
||||
|
||||
static void sad_4width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
|
||||
const uint8_t * const aref_ptr[],
|
||||
int32_t ref_stride,
|
||||
int32_t height, uint32_t *sad_array) {
|
||||
const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
|
||||
int32_t ht_cnt;
|
||||
uint32_t src0, src1, src2, src3;
|
||||
uint32_t ref0, ref1, ref2, ref3;
|
||||
v16u8 src = { 0 };
|
||||
v16u8 ref = { 0 };
|
||||
v16u8 diff;
|
||||
v8u16 sad0 = { 0 };
|
||||
v8u16 sad1 = { 0 };
|
||||
v8u16 sad2 = { 0 };
|
||||
v8u16 sad3 = { 0 };
|
||||
|
||||
ref0_ptr = aref_ptr[0];
|
||||
ref1_ptr = aref_ptr[1];
|
||||
ref2_ptr = aref_ptr[2];
|
||||
ref3_ptr = aref_ptr[3];
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LW4(src_ptr, src_stride, src0, src1, src2, src3);
|
||||
INSERT_W4_UB(src0, src1, src2, src3, src);
|
||||
src_ptr += (4 * src_stride);
|
||||
|
||||
LW4(ref0_ptr, ref_stride, ref0, ref1, ref2, ref3);
|
||||
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
|
||||
ref0_ptr += (4 * ref_stride);
|
||||
|
||||
diff = __msa_asub_u_b(src, ref);
|
||||
sad0 += __msa_hadd_u_h(diff, diff);
|
||||
|
||||
LW4(ref1_ptr, ref_stride, ref0, ref1, ref2, ref3);
|
||||
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
|
||||
ref1_ptr += (4 * ref_stride);
|
||||
|
||||
diff = __msa_asub_u_b(src, ref);
|
||||
sad1 += __msa_hadd_u_h(diff, diff);
|
||||
|
||||
LW4(ref2_ptr, ref_stride, ref0, ref1, ref2, ref3);
|
||||
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
|
||||
ref2_ptr += (4 * ref_stride);
|
||||
|
||||
diff = __msa_asub_u_b(src, ref);
|
||||
sad2 += __msa_hadd_u_h(diff, diff);
|
||||
|
||||
LW4(ref3_ptr, ref_stride, ref0, ref1, ref2, ref3);
|
||||
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
|
||||
ref3_ptr += (4 * ref_stride);
|
||||
|
||||
diff = __msa_asub_u_b(src, ref);
|
||||
sad3 += __msa_hadd_u_h(diff, diff);
|
||||
}
|
||||
|
||||
sad_array[0] = HADD_UH_U32(sad0);
|
||||
sad_array[1] = HADD_UH_U32(sad1);
|
||||
sad_array[2] = HADD_UH_U32(sad2);
|
||||
sad_array[3] = HADD_UH_U32(sad3);
|
||||
}
|
||||
|
||||
static void sad_8width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
|
||||
const uint8_t * const aref_ptr[],
|
||||
int32_t ref_stride,
|
||||
int32_t height, uint32_t *sad_array) {
|
||||
int32_t ht_cnt;
|
||||
const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
|
||||
v16u8 src0, src1, src2, src3;
|
||||
v16u8 ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7;
|
||||
v16u8 ref8, ref9, ref10, ref11, ref12, ref13, ref14, ref15;
|
||||
v8u16 sad0 = { 0 };
|
||||
v8u16 sad1 = { 0 };
|
||||
v8u16 sad2 = { 0 };
|
||||
v8u16 sad3 = { 0 };
|
||||
|
||||
ref0_ptr = aref_ptr[0];
|
||||
ref1_ptr = aref_ptr[1];
|
||||
ref2_ptr = aref_ptr[2];
|
||||
ref3_ptr = aref_ptr[3];
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LD_UB4(src_ptr, src_stride, src0, src1, src2, src3);
|
||||
src_ptr += (4 * src_stride);
|
||||
LD_UB4(ref0_ptr, ref_stride, ref0, ref1, ref2, ref3);
|
||||
ref0_ptr += (4 * ref_stride);
|
||||
LD_UB4(ref1_ptr, ref_stride, ref4, ref5, ref6, ref7);
|
||||
ref1_ptr += (4 * ref_stride);
|
||||
LD_UB4(ref2_ptr, ref_stride, ref8, ref9, ref10, ref11);
|
||||
ref2_ptr += (4 * ref_stride);
|
||||
LD_UB4(ref3_ptr, ref_stride, ref12, ref13, ref14, ref15);
|
||||
ref3_ptr += (4 * ref_stride);
|
||||
|
||||
PCKEV_D2_UB(src1, src0, src3, src2, src0, src1);
|
||||
PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
|
||||
sad0 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
PCKEV_D2_UB(ref5, ref4, ref7, ref6, ref0, ref1);
|
||||
sad1 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
PCKEV_D2_UB(ref9, ref8, ref11, ref10, ref0, ref1);
|
||||
sad2 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
PCKEV_D2_UB(ref13, ref12, ref15, ref14, ref0, ref1);
|
||||
sad3 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
}
|
||||
|
||||
sad_array[0] = HADD_UH_U32(sad0);
|
||||
sad_array[1] = HADD_UH_U32(sad1);
|
||||
sad_array[2] = HADD_UH_U32(sad2);
|
||||
sad_array[3] = HADD_UH_U32(sad3);
|
||||
}
|
||||
|
||||
static void sad_16width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride,
|
||||
const uint8_t * const aref_ptr[],
|
||||
int32_t ref_stride,
|
||||
int32_t height, uint32_t *sad_array) {
|
||||
int32_t ht_cnt;
|
||||
const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
|
||||
v16u8 src, ref0, ref1, ref2, ref3, diff;
|
||||
v8u16 sad0 = { 0 };
|
||||
v8u16 sad1 = { 0 };
|
||||
v8u16 sad2 = { 0 };
|
||||
v8u16 sad3 = { 0 };
|
||||
|
||||
ref0_ptr = aref_ptr[0];
|
||||
ref1_ptr = aref_ptr[1];
|
||||
ref2_ptr = aref_ptr[2];
|
||||
ref3_ptr = aref_ptr[3];
|
||||
|
||||
for (ht_cnt = (height >> 1); ht_cnt--;) {
|
||||
src = LD_UB(src_ptr);
|
||||
src_ptr += src_stride;
|
||||
ref0 = LD_UB(ref0_ptr);
|
||||
ref0_ptr += ref_stride;
|
||||
ref1 = LD_UB(ref1_ptr);
|
||||
ref1_ptr += ref_stride;
|
||||
ref2 = LD_UB(ref2_ptr);
|
||||
ref2_ptr += ref_stride;
|
||||
ref3 = LD_UB(ref3_ptr);
|
||||
ref3_ptr += ref_stride;
|
||||
|
||||
diff = __msa_asub_u_b(src, ref0);
|
||||
sad0 += __msa_hadd_u_h(diff, diff);
|
||||
diff = __msa_asub_u_b(src, ref1);
|
||||
sad1 += __msa_hadd_u_h(diff, diff);
|
||||
diff = __msa_asub_u_b(src, ref2);
|
||||
sad2 += __msa_hadd_u_h(diff, diff);
|
||||
diff = __msa_asub_u_b(src, ref3);
|
||||
sad3 += __msa_hadd_u_h(diff, diff);
|
||||
|
||||
src = LD_UB(src_ptr);
|
||||
src_ptr += src_stride;
|
||||
ref0 = LD_UB(ref0_ptr);
|
||||
ref0_ptr += ref_stride;
|
||||
ref1 = LD_UB(ref1_ptr);
|
||||
ref1_ptr += ref_stride;
|
||||
ref2 = LD_UB(ref2_ptr);
|
||||
ref2_ptr += ref_stride;
|
||||
ref3 = LD_UB(ref3_ptr);
|
||||
ref3_ptr += ref_stride;
|
||||
|
||||
diff = __msa_asub_u_b(src, ref0);
|
||||
sad0 += __msa_hadd_u_h(diff, diff);
|
||||
diff = __msa_asub_u_b(src, ref1);
|
||||
sad1 += __msa_hadd_u_h(diff, diff);
|
||||
diff = __msa_asub_u_b(src, ref2);
|
||||
sad2 += __msa_hadd_u_h(diff, diff);
|
||||
diff = __msa_asub_u_b(src, ref3);
|
||||
sad3 += __msa_hadd_u_h(diff, diff);
|
||||
}
|
||||
|
||||
sad_array[0] = HADD_UH_U32(sad0);
|
||||
sad_array[1] = HADD_UH_U32(sad1);
|
||||
sad_array[2] = HADD_UH_U32(sad2);
|
||||
sad_array[3] = HADD_UH_U32(sad3);
|
||||
}
|
||||
|
||||
static void sad_32width_x4d_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t * const aref_ptr[],
|
||||
int32_t ref_stride,
|
||||
int32_t height, uint32_t *sad_array) {
|
||||
const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, ref0, ref1;
|
||||
v8u16 sad0 = { 0 };
|
||||
v8u16 sad1 = { 0 };
|
||||
v8u16 sad2 = { 0 };
|
||||
v8u16 sad3 = { 0 };
|
||||
|
||||
ref0_ptr = aref_ptr[0];
|
||||
ref1_ptr = aref_ptr[1];
|
||||
ref2_ptr = aref_ptr[2];
|
||||
ref3_ptr = aref_ptr[3];
|
||||
|
||||
for (ht_cnt = height; ht_cnt--;) {
|
||||
LD_UB2(src, 16, src0, src1);
|
||||
src += src_stride;
|
||||
|
||||
LD_UB2(ref0_ptr, 16, ref0, ref1);
|
||||
ref0_ptr += ref_stride;
|
||||
sad0 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
LD_UB2(ref1_ptr, 16, ref0, ref1);
|
||||
ref1_ptr += ref_stride;
|
||||
sad1 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
LD_UB2(ref2_ptr, 16, ref0, ref1);
|
||||
ref2_ptr += ref_stride;
|
||||
sad2 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
|
||||
LD_UB2(ref3_ptr, 16, ref0, ref1);
|
||||
ref3_ptr += ref_stride;
|
||||
sad3 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
}
|
||||
|
||||
sad_array[0] = HADD_UH_U32(sad0);
|
||||
sad_array[1] = HADD_UH_U32(sad1);
|
||||
sad_array[2] = HADD_UH_U32(sad2);
|
||||
sad_array[3] = HADD_UH_U32(sad3);
|
||||
}
|
||||
|
||||
static void sad_64width_x4d_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t * const aref_ptr[],
|
||||
int32_t ref_stride,
|
||||
int32_t height, uint32_t *sad_array) {
|
||||
const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr;
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, src2, src3;
|
||||
v16u8 ref0, ref1, ref2, ref3;
|
||||
v8u16 sad0_0 = { 0 };
|
||||
v8u16 sad0_1 = { 0 };
|
||||
v8u16 sad1_0 = { 0 };
|
||||
v8u16 sad1_1 = { 0 };
|
||||
v8u16 sad2_0 = { 0 };
|
||||
v8u16 sad2_1 = { 0 };
|
||||
v8u16 sad3_0 = { 0 };
|
||||
v8u16 sad3_1 = { 0 };
|
||||
|
||||
ref0_ptr = aref_ptr[0];
|
||||
ref1_ptr = aref_ptr[1];
|
||||
ref2_ptr = aref_ptr[2];
|
||||
ref3_ptr = aref_ptr[3];
|
||||
|
||||
for (ht_cnt = height; ht_cnt--;) {
|
||||
LD_UB4(src, 16, src0, src1, src2, src3);
|
||||
src += src_stride;
|
||||
|
||||
LD_UB4(ref0_ptr, 16, ref0, ref1, ref2, ref3);
|
||||
ref0_ptr += ref_stride;
|
||||
sad0_0 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
sad0_1 += SAD_UB2_UH(src2, src3, ref2, ref3);
|
||||
|
||||
LD_UB4(ref1_ptr, 16, ref0, ref1, ref2, ref3);
|
||||
ref1_ptr += ref_stride;
|
||||
sad1_0 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
sad1_1 += SAD_UB2_UH(src2, src3, ref2, ref3);
|
||||
|
||||
LD_UB4(ref2_ptr, 16, ref0, ref1, ref2, ref3);
|
||||
ref2_ptr += ref_stride;
|
||||
sad2_0 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
sad2_1 += SAD_UB2_UH(src2, src3, ref2, ref3);
|
||||
|
||||
LD_UB4(ref3_ptr, 16, ref0, ref1, ref2, ref3);
|
||||
ref3_ptr += ref_stride;
|
||||
sad3_0 += SAD_UB2_UH(src0, src1, ref0, ref1);
|
||||
sad3_1 += SAD_UB2_UH(src2, src3, ref2, ref3);
|
||||
}
|
||||
|
||||
sad_array[0] = HADD_UH_U32(sad0_0);
|
||||
sad_array[0] += HADD_UH_U32(sad0_1);
|
||||
sad_array[1] = HADD_UH_U32(sad1_0);
|
||||
sad_array[1] += HADD_UH_U32(sad1_1);
|
||||
sad_array[2] = HADD_UH_U32(sad2_0);
|
||||
sad_array[2] += HADD_UH_U32(sad2_1);
|
||||
sad_array[3] = HADD_UH_U32(sad3_0);
|
||||
sad_array[3] += HADD_UH_U32(sad3_1);
|
||||
}
|
||||
|
||||
static uint32_t avgsad_4width_msa(const uint8_t *src_ptr, int32_t src_stride,
|
||||
const uint8_t *ref_ptr, int32_t ref_stride,
|
||||
int32_t height, const uint8_t *sec_pred) {
|
||||
int32_t ht_cnt;
|
||||
uint32_t src0, src1, src2, src3, ref0, ref1, ref2, ref3;
|
||||
v16u8 src = { 0 };
|
||||
v16u8 ref = { 0 };
|
||||
v16u8 diff, pred, comp;
|
||||
v8u16 sad = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LW4(src_ptr, src_stride, src0, src1, src2, src3);
|
||||
src_ptr += (4 * src_stride);
|
||||
LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
|
||||
ref_ptr += (4 * ref_stride);
|
||||
pred = LD_UB(sec_pred);
|
||||
sec_pred += 16;
|
||||
|
||||
INSERT_W4_UB(src0, src1, src2, src3, src);
|
||||
INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
|
||||
|
||||
comp = __msa_aver_u_b(pred, ref);
|
||||
diff = __msa_asub_u_b(src, comp);
|
||||
sad += __msa_hadd_u_h(diff, diff);
|
||||
}
|
||||
|
||||
return HADD_UH_U32(sad);
|
||||
}
|
||||
|
||||
static uint32_t avgsad_8width_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t *ref, int32_t ref_stride,
|
||||
int32_t height, const uint8_t *sec_pred) {
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3;
|
||||
v16u8 diff0, diff1, pred0, pred1;
|
||||
v8u16 sad = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LD_UB4(src, src_stride, src0, src1, src2, src3);
|
||||
src += (4 * src_stride);
|
||||
LD_UB4(ref, ref_stride, ref0, ref1, ref2, ref3);
|
||||
ref += (4 * ref_stride);
|
||||
LD_UB2(sec_pred, 16, pred0, pred1);
|
||||
sec_pred += 32;
|
||||
PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2,
|
||||
src0, src1, ref0, ref1);
|
||||
AVER_UB2_UB(pred0, ref0, pred1, ref1, diff0, diff1);
|
||||
sad += SAD_UB2_UH(src0, src1, diff0, diff1);
|
||||
}
|
||||
|
||||
return HADD_UH_U32(sad);
|
||||
}
|
||||
|
||||
static uint32_t avgsad_16width_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t *ref, int32_t ref_stride,
|
||||
int32_t height, const uint8_t *sec_pred) {
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3;
|
||||
v16u8 pred0, pred1, pred2, pred3, comp0, comp1;
|
||||
v8u16 sad = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 3); ht_cnt--;) {
|
||||
LD_UB4(src, src_stride, src0, src1, src2, src3);
|
||||
src += (4 * src_stride);
|
||||
LD_UB4(ref, ref_stride, ref0, ref1, ref2, ref3);
|
||||
ref += (4 * ref_stride);
|
||||
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
|
||||
sec_pred += (4 * 16);
|
||||
AVER_UB2_UB(pred0, ref0, pred1, ref1, comp0, comp1);
|
||||
sad += SAD_UB2_UH(src0, src1, comp0, comp1);
|
||||
AVER_UB2_UB(pred2, ref2, pred3, ref3, comp0, comp1);
|
||||
sad += SAD_UB2_UH(src2, src3, comp0, comp1);
|
||||
|
||||
LD_UB4(src, src_stride, src0, src1, src2, src3);
|
||||
src += (4 * src_stride);
|
||||
LD_UB4(ref, ref_stride, ref0, ref1, ref2, ref3);
|
||||
ref += (4 * ref_stride);
|
||||
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
|
||||
sec_pred += (4 * 16);
|
||||
AVER_UB2_UB(pred0, ref0, pred1, ref1, comp0, comp1);
|
||||
sad += SAD_UB2_UH(src0, src1, comp0, comp1);
|
||||
AVER_UB2_UB(pred2, ref2, pred3, ref3, comp0, comp1);
|
||||
sad += SAD_UB2_UH(src2, src3, comp0, comp1);
|
||||
}
|
||||
|
||||
return HADD_UH_U32(sad);
|
||||
}
|
||||
|
||||
static uint32_t avgsad_32width_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t *ref, int32_t ref_stride,
|
||||
int32_t height, const uint8_t *sec_pred) {
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
|
||||
v16u8 ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7;
|
||||
v16u8 pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7;
|
||||
v16u8 comp0, comp1;
|
||||
v8u16 sad = { 0 };
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LD_UB4(src, src_stride, src0, src2, src4, src6);
|
||||
LD_UB4(src + 16, src_stride, src1, src3, src5, src7);
|
||||
src += (4 * src_stride);
|
||||
|
||||
LD_UB4(ref, ref_stride, ref0, ref2, ref4, ref6);
|
||||
LD_UB4(ref + 16, ref_stride, ref1, ref3, ref5, ref7);
|
||||
ref += (4 * ref_stride);
|
||||
|
||||
LD_UB4(sec_pred, 32, pred0, pred2, pred4, pred6);
|
||||
LD_UB4(sec_pred + 16, 32, pred1, pred3, pred5, pred7);
|
||||
sec_pred += (4 * 32);
|
||||
|
||||
AVER_UB2_UB(pred0, ref0, pred1, ref1, comp0, comp1);
|
||||
sad += SAD_UB2_UH(src0, src1, comp0, comp1);
|
||||
AVER_UB2_UB(pred2, ref2, pred3, ref3, comp0, comp1);
|
||||
sad += SAD_UB2_UH(src2, src3, comp0, comp1);
|
||||
AVER_UB2_UB(pred4, ref4, pred5, ref5, comp0, comp1);
|
||||
sad += SAD_UB2_UH(src4, src5, comp0, comp1);
|
||||
AVER_UB2_UB(pred6, ref6, pred7, ref7, comp0, comp1);
|
||||
sad += SAD_UB2_UH(src6, src7, comp0, comp1);
|
||||
}
|
||||
|
||||
return HADD_UH_U32(sad);
|
||||
}
|
||||
|
||||
static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride,
|
||||
const uint8_t *ref, int32_t ref_stride,
|
||||
int32_t height, const uint8_t *sec_pred) {
|
||||
int32_t ht_cnt;
|
||||
v16u8 src0, src1, src2, src3;
|
||||
v16u8 ref0, ref1, ref2, ref3;
|
||||
v16u8 comp0, comp1, comp2, comp3;
|
||||
v16u8 pred0, pred1, pred2, pred3;
|
||||
v8u16 sad0 = { 0 };
|
||||
v8u16 sad1 = { 0 };
|
||||
v4u32 sad;
|
||||
|
||||
for (ht_cnt = (height >> 2); ht_cnt--;) {
|
||||
LD_UB4(src, 16, src0, src1, src2, src3);
|
||||
src += src_stride;
|
||||
LD_UB4(ref, 16, ref0, ref1, ref2, ref3);
|
||||
ref += ref_stride;
|
||||
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
|
||||
sec_pred += 64;
|
||||
AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3,
|
||||
comp0, comp1, comp2, comp3);
|
||||
sad0 += SAD_UB2_UH(src0, src1, comp0, comp1);
|
||||
sad1 += SAD_UB2_UH(src2, src3, comp2, comp3);
|
||||
|
||||
LD_UB4(src, 16, src0, src1, src2, src3);
|
||||
src += src_stride;
|
||||
LD_UB4(ref, 16, ref0, ref1, ref2, ref3);
|
||||
ref += ref_stride;
|
||||
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
|
||||
sec_pred += 64;
|
||||
AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3,
|
||||
comp0, comp1, comp2, comp3);
|
||||
sad0 += SAD_UB2_UH(src0, src1, comp0, comp1);
|
||||
sad1 += SAD_UB2_UH(src2, src3, comp2, comp3);
|
||||
|
||||
LD_UB4(src, 16, src0, src1, src2, src3);
|
||||
src += src_stride;
|
||||
LD_UB4(ref, 16, ref0, ref1, ref2, ref3);
|
||||
ref += ref_stride;
|
||||
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
|
||||
sec_pred += 64;
|
||||
AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3,
|
||||
comp0, comp1, comp2, comp3);
|
||||
sad0 += SAD_UB2_UH(src0, src1, comp0, comp1);
|
||||
sad1 += SAD_UB2_UH(src2, src3, comp2, comp3);
|
||||
|
||||
LD_UB4(src, 16, src0, src1, src2, src3);
|
||||
src += src_stride;
|
||||
LD_UB4(ref, 16, ref0, ref1, ref2, ref3);
|
||||
ref += ref_stride;
|
||||
LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
|
||||
sec_pred += 64;
|
||||
AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3,
|
||||
comp0, comp1, comp2, comp3);
|
||||
sad0 += SAD_UB2_UH(src0, src1, comp0, comp1);
|
||||
sad1 += SAD_UB2_UH(src2, src3, comp2, comp3);
|
||||
}
|
||||
|
||||
sad = __msa_hadd_u_w(sad0, sad0);
|
||||
sad += __msa_hadd_u_w(sad1, sad1);
|
||||
|
||||
return HADD_SW_S32(sad);
|
||||
}
|
||||
|
||||
#define VPX_SAD_4xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad4x##height##_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride) { \
|
||||
return sad_4width_msa(src, src_stride, ref, ref_stride, height); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_8xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad8x##height##_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride) { \
|
||||
return sad_8width_msa(src, src_stride, ref, ref_stride, height); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_16xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad16x##height##_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride) { \
|
||||
return sad_16width_msa(src, src_stride, ref, ref_stride, height); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_32xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad32x##height##_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride) { \
|
||||
return sad_32width_msa(src, src_stride, ref, ref_stride, height); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_64xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad64x##height##_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride) { \
|
||||
return sad_64width_msa(src, src_stride, ref, ref_stride, height); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_4xHEIGHTx4D_MSA(height) \
|
||||
void vpx_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *const refs[], \
|
||||
int32_t ref_stride, uint32_t *sads) { \
|
||||
sad_4width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_8xHEIGHTx4D_MSA(height) \
|
||||
void vpx_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *const refs[], \
|
||||
int32_t ref_stride, uint32_t *sads) { \
|
||||
sad_8width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_16xHEIGHTx4D_MSA(height) \
|
||||
void vpx_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *const refs[], \
|
||||
int32_t ref_stride, uint32_t *sads) { \
|
||||
sad_16width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_32xHEIGHTx4D_MSA(height) \
|
||||
void vpx_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *const refs[], \
|
||||
int32_t ref_stride, uint32_t *sads) { \
|
||||
sad_32width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
|
||||
}
|
||||
|
||||
#define VPX_SAD_64xHEIGHTx4D_MSA(height) \
|
||||
void vpx_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *const refs[], \
|
||||
int32_t ref_stride, uint32_t *sads) { \
|
||||
sad_64width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \
|
||||
}
|
||||
|
||||
#define VPX_AVGSAD_4xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride, \
|
||||
const uint8_t *second_pred) { \
|
||||
return avgsad_4width_msa(src, src_stride, ref, ref_stride, \
|
||||
height, second_pred); \
|
||||
}
|
||||
|
||||
#define VPX_AVGSAD_8xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride, \
|
||||
const uint8_t *second_pred) { \
|
||||
return avgsad_8width_msa(src, src_stride, ref, ref_stride, \
|
||||
height, second_pred); \
|
||||
}
|
||||
|
||||
#define VPX_AVGSAD_16xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad16x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride, \
|
||||
const uint8_t *second_pred) { \
|
||||
return avgsad_16width_msa(src, src_stride, ref, ref_stride, \
|
||||
height, second_pred); \
|
||||
}
|
||||
|
||||
#define VPX_AVGSAD_32xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad32x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride, \
|
||||
const uint8_t *second_pred) { \
|
||||
return avgsad_32width_msa(src, src_stride, ref, ref_stride, \
|
||||
height, second_pred); \
|
||||
}
|
||||
|
||||
#define VPX_AVGSAD_64xHEIGHT_MSA(height) \
|
||||
uint32_t vpx_sad64x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \
|
||||
const uint8_t *ref, int32_t ref_stride, \
|
||||
const uint8_t *second_pred) { \
|
||||
return avgsad_64width_msa(src, src_stride, ref, ref_stride, \
|
||||
height, second_pred); \
|
||||
}
|
||||
|
||||
// 64x64
|
||||
VPX_SAD_64xHEIGHT_MSA(64);
|
||||
VPX_SAD_64xHEIGHTx4D_MSA(64);
|
||||
VPX_AVGSAD_64xHEIGHT_MSA(64);
|
||||
|
||||
// 64x32
|
||||
VPX_SAD_64xHEIGHT_MSA(32);
|
||||
VPX_SAD_64xHEIGHTx4D_MSA(32);
|
||||
VPX_AVGSAD_64xHEIGHT_MSA(32);
|
||||
|
||||
// 32x64
|
||||
VPX_SAD_32xHEIGHT_MSA(64);
|
||||
VPX_SAD_32xHEIGHTx4D_MSA(64);
|
||||
VPX_AVGSAD_32xHEIGHT_MSA(64);
|
||||
|
||||
// 32x32
|
||||
VPX_SAD_32xHEIGHT_MSA(32);
|
||||
VPX_SAD_32xHEIGHTx4D_MSA(32);
|
||||
VPX_AVGSAD_32xHEIGHT_MSA(32);
|
||||
|
||||
// 32x16
|
||||
VPX_SAD_32xHEIGHT_MSA(16);
|
||||
VPX_SAD_32xHEIGHTx4D_MSA(16);
|
||||
VPX_AVGSAD_32xHEIGHT_MSA(16);
|
||||
|
||||
// 16x32
|
||||
VPX_SAD_16xHEIGHT_MSA(32);
|
||||
VPX_SAD_16xHEIGHTx4D_MSA(32);
|
||||
VPX_AVGSAD_16xHEIGHT_MSA(32);
|
||||
|
||||
// 16x16
|
||||
VPX_SAD_16xHEIGHT_MSA(16);
|
||||
VPX_SAD_16xHEIGHTx4D_MSA(16);
|
||||
VPX_AVGSAD_16xHEIGHT_MSA(16);
|
||||
|
||||
// 16x8
|
||||
VPX_SAD_16xHEIGHT_MSA(8);
|
||||
VPX_SAD_16xHEIGHTx4D_MSA(8);
|
||||
VPX_AVGSAD_16xHEIGHT_MSA(8);
|
||||
|
||||
// 8x16
|
||||
VPX_SAD_8xHEIGHT_MSA(16);
|
||||
VPX_SAD_8xHEIGHTx4D_MSA(16);
|
||||
VPX_AVGSAD_8xHEIGHT_MSA(16);
|
||||
|
||||
// 8x8
|
||||
VPX_SAD_8xHEIGHT_MSA(8);
|
||||
VPX_SAD_8xHEIGHTx4D_MSA(8);
|
||||
VPX_AVGSAD_8xHEIGHT_MSA(8);
|
||||
|
||||
// 8x4
|
||||
VPX_SAD_8xHEIGHT_MSA(4);
|
||||
VPX_SAD_8xHEIGHTx4D_MSA(4);
|
||||
VPX_AVGSAD_8xHEIGHT_MSA(4);
|
||||
|
||||
// 4x8
|
||||
VPX_SAD_4xHEIGHT_MSA(8);
|
||||
VPX_SAD_4xHEIGHTx4D_MSA(8);
|
||||
VPX_AVGSAD_4xHEIGHT_MSA(8);
|
||||
|
||||
// 4x4
|
||||
VPX_SAD_4xHEIGHT_MSA(4);
|
||||
VPX_SAD_4xHEIGHTx4D_MSA(4);
|
||||
VPX_AVGSAD_4xHEIGHT_MSA(4);
|
@ -28,6 +28,7 @@ DSP_SRCS-$(HAVE_AVX2) += x86/sad4d_avx2.c
|
||||
DSP_SRCS-$(HAVE_AVX2) += x86/sad_avx2.c
|
||||
|
||||
DSP_SRCS-$(HAVE_MSA) += mips/macros_msa.h
|
||||
DSP_SRCS-$(HAVE_MSA) += mips/sad_msa.c
|
||||
|
||||
ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
|
||||
DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad4d_sse2.asm
|
||||
|
@ -40,85 +40,85 @@ if (vpx_config("CONFIG_ENCODERS") eq "yes") {
|
||||
# Single block SAD
|
||||
#
|
||||
add_proto qw/unsigned int vpx_sad64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad64x64 avx2 neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad64x64 avx2 neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad64x32 avx2/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad64x32 avx2 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad32x64 avx2/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x64 avx2 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad32x32 avx2 neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x32 avx2 neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad32x16 avx2/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x16 avx2 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad16x32/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x32 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad16x16 mmx media neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x16 mmx media neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad16x8 mmx neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x8 mmx neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad8x16 mmx neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x16 mmx neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad8x8 mmx neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x8 mmx neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad8x4/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x4 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad4x8/, "$sse_x86inc";
|
||||
specialize qw/vpx_sad4x8 msa/, "$sse_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
|
||||
specialize qw/vpx_sad4x4 mmx neon/, "$sse_x86inc";
|
||||
specialize qw/vpx_sad4x4 mmx neon msa/, "$sse_x86inc";
|
||||
|
||||
#
|
||||
# Avg
|
||||
#
|
||||
add_proto qw/unsigned int vpx_sad64x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad64x64_avg avx2/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad64x64_avg avx2 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad64x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad64x32_avg avx2/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad64x32_avg avx2 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad32x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad32x64_avg avx2/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x64_avg avx2 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad32x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad32x32_avg avx2/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x32_avg avx2 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad32x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad32x16_avg avx2/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x16_avg avx2 msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad16x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad16x32_avg/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x32_avg msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad16x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad16x16_avg/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x16_avg msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad16x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad16x8_avg/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x8_avg msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad8x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad8x16_avg/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x16_avg msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad8x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad8x8_avg/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x8_avg msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad8x4_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad8x4_avg/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x4_avg msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad4x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad4x8_avg/, "$sse_x86inc";
|
||||
specialize qw/vpx_sad4x8_avg msa/, "$sse_x86inc";
|
||||
|
||||
add_proto qw/unsigned int vpx_sad4x4_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
|
||||
specialize qw/vpx_sad4x4_avg/, "$sse_x86inc";
|
||||
specialize qw/vpx_sad4x4_avg msa/, "$sse_x86inc";
|
||||
|
||||
#
|
||||
# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally
|
||||
@ -171,43 +171,43 @@ specialize qw/vpx_sad4x4x8 sse4_1/;
|
||||
# Multi-block SAD, comparing a reference to N independent blocks
|
||||
#
|
||||
add_proto qw/void vpx_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad64x64x4d avx2 neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad64x64x4d avx2 neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad64x32x4d/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad64x32x4d msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad32x64x4d/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x64x4d msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad32x32x4d avx2 neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x32x4d avx2 neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad32x16x4d/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad32x16x4d msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad16x32x4d/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x32x4d msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad16x16x4d neon/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x16x4d neon msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad16x8x4d/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad16x8x4d msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad8x16x4d/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x16x4d msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad8x8x4d/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x8x4d msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad8x4x4d/, "$sse2_x86inc";
|
||||
specialize qw/vpx_sad8x4x4d msa/, "$sse2_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad4x8x4d/, "$sse_x86inc";
|
||||
specialize qw/vpx_sad4x8x4d msa/, "$sse_x86inc";
|
||||
|
||||
add_proto qw/void vpx_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||
specialize qw/vpx_sad4x4x4d/, "$sse_x86inc";
|
||||
specialize qw/vpx_sad4x4x4d msa/, "$sse_x86inc";
|
||||
|
||||
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
|
||||
#
|
||||
|
Loading…
Reference in New Issue
Block a user