Merge "ppc: Add vpx_sadnxmx4d_vsx for n,m = {8, 16, 32 ,64}"
This commit is contained in:
@@ -946,5 +946,17 @@ const SadMxNAvgParam avg_vsx_tests[] = {
|
|||||||
SadMxNAvgParam(16, 8, &vpx_sad16x8_avg_vsx),
|
SadMxNAvgParam(16, 8, &vpx_sad16x8_avg_vsx),
|
||||||
};
|
};
|
||||||
INSTANTIATE_TEST_CASE_P(VSX, SADavgTest, ::testing::ValuesIn(avg_vsx_tests));
|
INSTANTIATE_TEST_CASE_P(VSX, SADavgTest, ::testing::ValuesIn(avg_vsx_tests));
|
||||||
|
|
||||||
|
const SadMxNx4Param x4d_vsx_tests[] = {
|
||||||
|
SadMxNx4Param(64, 64, &vpx_sad64x64x4d_vsx),
|
||||||
|
SadMxNx4Param(64, 32, &vpx_sad64x32x4d_vsx),
|
||||||
|
SadMxNx4Param(32, 64, &vpx_sad32x64x4d_vsx),
|
||||||
|
SadMxNx4Param(32, 32, &vpx_sad32x32x4d_vsx),
|
||||||
|
SadMxNx4Param(32, 16, &vpx_sad32x16x4d_vsx),
|
||||||
|
SadMxNx4Param(16, 32, &vpx_sad16x32x4d_vsx),
|
||||||
|
SadMxNx4Param(16, 16, &vpx_sad16x16x4d_vsx),
|
||||||
|
SadMxNx4Param(16, 8, &vpx_sad16x8x4d_vsx),
|
||||||
|
};
|
||||||
|
INSTANTIATE_TEST_CASE_P(VSX, SADx4Test, ::testing::ValuesIn(x4d_vsx_tests));
|
||||||
#endif // HAVE_VSX
|
#endif // HAVE_VSX
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@@ -133,7 +133,6 @@ SAD64(64);
|
|||||||
DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * height]); \
|
DECLARE_ALIGNED(64, uint8_t, comp_pred[64 * height]); \
|
||||||
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 64, height, ref, \
|
vpx_comp_avg_pred_vsx(comp_pred, second_pred, 64, height, ref, \
|
||||||
ref_stride); \
|
ref_stride); \
|
||||||
\
|
|
||||||
return vpx_sad64x##height##_vsx(src, src_stride, comp_pred, 64); \
|
return vpx_sad64x##height##_vsx(src, src_stride, comp_pred, 64); \
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,3 +144,111 @@ SAD32AVG(32);
|
|||||||
SAD32AVG(64);
|
SAD32AVG(64);
|
||||||
SAD64AVG(32);
|
SAD64AVG(32);
|
||||||
SAD64AVG(64);
|
SAD64AVG(64);
|
||||||
|
|
||||||
|
#define PROCESS16_4D(offset, ref, v_h, v_l) \
|
||||||
|
v_b = vec_vsx_ld(offset, ref); \
|
||||||
|
v_bh = unpack_to_s16_h(v_b); \
|
||||||
|
v_bl = unpack_to_s16_l(v_b); \
|
||||||
|
v_subh = vec_sub(v_h, v_bh); \
|
||||||
|
v_subl = vec_sub(v_l, v_bl); \
|
||||||
|
v_absh = vec_abs(v_subh); \
|
||||||
|
v_absl = vec_abs(v_subl); \
|
||||||
|
v_sad = vec_sum4s(v_absh, v_sad); \
|
||||||
|
v_sad = vec_sum4s(v_absl, v_sad);
|
||||||
|
|
||||||
|
#define UNPACK_SRC(offset, srcv_h, srcv_l) \
|
||||||
|
v_a = vec_vsx_ld(offset, src); \
|
||||||
|
srcv_h = unpack_to_s16_h(v_a); \
|
||||||
|
srcv_l = unpack_to_s16_l(v_a);
|
||||||
|
|
||||||
|
#define SAD16_4D(height) \
|
||||||
|
void vpx_sad16x##height##x4d_vsx(const uint8_t *src, int src_stride, \
|
||||||
|
const uint8_t *const ref_array[], \
|
||||||
|
int ref_stride, uint32_t *sad_array) { \
|
||||||
|
int i; \
|
||||||
|
int y; \
|
||||||
|
unsigned int sad[4]; \
|
||||||
|
uint8x16_t v_a, v_b; \
|
||||||
|
int16x8_t v_ah, v_al, v_bh, v_bl, v_absh, v_absl, v_subh, v_subl; \
|
||||||
|
\
|
||||||
|
for (i = 0; i < 4; i++) sad_array[i] = 0; \
|
||||||
|
\
|
||||||
|
for (y = 0; y < height; y++) { \
|
||||||
|
UNPACK_SRC(y *src_stride, v_ah, v_al); \
|
||||||
|
for (i = 0; i < 4; i++) { \
|
||||||
|
int32x4_t v_sad = vec_splat_s32(0); \
|
||||||
|
PROCESS16_4D(y *ref_stride, ref_array[i], v_ah, v_al); \
|
||||||
|
\
|
||||||
|
vec_vsx_st((uint32x4_t)v_sad, 0, sad); \
|
||||||
|
sad_array[i] += (sad[3] + sad[2] + sad[1] + sad[0]); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SAD32_4D(height) \
|
||||||
|
void vpx_sad32x##height##x4d_vsx(const uint8_t *src, int src_stride, \
|
||||||
|
const uint8_t *const ref_array[], \
|
||||||
|
int ref_stride, uint32_t *sad_array) { \
|
||||||
|
int i; \
|
||||||
|
int y; \
|
||||||
|
unsigned int sad[4]; \
|
||||||
|
uint8x16_t v_a, v_b; \
|
||||||
|
int16x8_t v_ah1, v_al1, v_ah2, v_al2, v_bh, v_bl; \
|
||||||
|
int16x8_t v_absh, v_absl, v_subh, v_subl; \
|
||||||
|
\
|
||||||
|
for (i = 0; i < 4; i++) sad_array[i] = 0; \
|
||||||
|
\
|
||||||
|
for (y = 0; y < height; y++) { \
|
||||||
|
UNPACK_SRC(y *src_stride, v_ah1, v_al1); \
|
||||||
|
UNPACK_SRC(y *src_stride + 16, v_ah2, v_al2); \
|
||||||
|
for (i = 0; i < 4; i++) { \
|
||||||
|
int32x4_t v_sad = vec_splat_s32(0); \
|
||||||
|
PROCESS16_4D(y *ref_stride, ref_array[i], v_ah1, v_al1); \
|
||||||
|
PROCESS16_4D(y *ref_stride + 16, ref_array[i], v_ah2, v_al2); \
|
||||||
|
\
|
||||||
|
vec_vsx_st((uint32x4_t)v_sad, 0, sad); \
|
||||||
|
sad_array[i] += (sad[3] + sad[2] + sad[1] + sad[0]); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SAD64_4D(height) \
|
||||||
|
void vpx_sad64x##height##x4d_vsx(const uint8_t *src, int src_stride, \
|
||||||
|
const uint8_t *const ref_array[], \
|
||||||
|
int ref_stride, uint32_t *sad_array) { \
|
||||||
|
int i; \
|
||||||
|
int y; \
|
||||||
|
unsigned int sad[4]; \
|
||||||
|
uint8x16_t v_a, v_b; \
|
||||||
|
int16x8_t v_ah1, v_al1, v_ah2, v_al2, v_bh, v_bl; \
|
||||||
|
int16x8_t v_ah3, v_al3, v_ah4, v_al4; \
|
||||||
|
int16x8_t v_absh, v_absl, v_subh, v_subl; \
|
||||||
|
\
|
||||||
|
for (i = 0; i < 4; i++) sad_array[i] = 0; \
|
||||||
|
\
|
||||||
|
for (y = 0; y < height; y++) { \
|
||||||
|
UNPACK_SRC(y *src_stride, v_ah1, v_al1); \
|
||||||
|
UNPACK_SRC(y *src_stride + 16, v_ah2, v_al2); \
|
||||||
|
UNPACK_SRC(y *src_stride + 32, v_ah3, v_al3); \
|
||||||
|
UNPACK_SRC(y *src_stride + 48, v_ah4, v_al4); \
|
||||||
|
for (i = 0; i < 4; i++) { \
|
||||||
|
int32x4_t v_sad = vec_splat_s32(0); \
|
||||||
|
PROCESS16_4D(y *ref_stride, ref_array[i], v_ah1, v_al1); \
|
||||||
|
PROCESS16_4D(y *ref_stride + 16, ref_array[i], v_ah2, v_al2); \
|
||||||
|
PROCESS16_4D(y *ref_stride + 32, ref_array[i], v_ah3, v_al3); \
|
||||||
|
PROCESS16_4D(y *ref_stride + 48, ref_array[i], v_ah4, v_al4); \
|
||||||
|
\
|
||||||
|
vec_vsx_st((uint32x4_t)v_sad, 0, sad); \
|
||||||
|
sad_array[i] += (sad[3] + sad[2] + sad[1] + sad[0]); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
SAD16_4D(8);
|
||||||
|
SAD16_4D(16);
|
||||||
|
SAD16_4D(32);
|
||||||
|
SAD32_4D(16);
|
||||||
|
SAD32_4D(32);
|
||||||
|
SAD32_4D(64);
|
||||||
|
SAD64_4D(32);
|
||||||
|
SAD64_4D(64);
|
||||||
|
@@ -873,28 +873,28 @@ specialize qw/vpx_sad4x4x8 sse4_1 msa/;
|
|||||||
# Multi-block SAD, comparing a reference to N independent blocks
|
# Multi-block SAD, comparing a reference to N independent blocks
|
||||||
#
|
#
|
||||||
add_proto qw/void vpx_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad64x64x4d avx2 neon msa sse2/;
|
specialize qw/vpx_sad64x64x4d avx2 neon msa sse2 vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad64x32x4d msa sse2/;
|
specialize qw/vpx_sad64x32x4d msa sse2 vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad32x64x4d msa sse2/;
|
specialize qw/vpx_sad32x64x4d msa sse2 vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad32x32x4d avx2 neon msa sse2/;
|
specialize qw/vpx_sad32x32x4d avx2 neon msa sse2 vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad32x16x4d msa sse2/;
|
specialize qw/vpx_sad32x16x4d msa sse2 vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad16x32x4d msa sse2/;
|
specialize qw/vpx_sad16x32x4d msa sse2 vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad16x16x4d neon msa sse2/;
|
specialize qw/vpx_sad16x16x4d neon msa sse2 vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad16x8x4d msa sse2/;
|
specialize qw/vpx_sad16x8x4d msa sse2 vsx/;
|
||||||
|
|
||||||
add_proto qw/void vpx_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
add_proto qw/void vpx_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array";
|
||||||
specialize qw/vpx_sad8x16x4d msa sse2/;
|
specialize qw/vpx_sad8x16x4d msa sse2/;
|
||||||
|
Reference in New Issue
Block a user