diff --git a/test/sad_test.cc b/test/sad_test.cc index ef8b01cae..e37356a68 100644 --- a/test/sad_test.cc +++ b/test/sad_test.cc @@ -676,8 +676,17 @@ INSTANTIATE_TEST_CASE_P(NEON, SADavgTest, ::testing::ValuesIn(avg_neon_tests)); const SadMxNx4Param x4d_neon_tests[] = { SadMxNx4Param(64, 64, &vpx_sad64x64x4d_neon), + SadMxNx4Param(32, 64, &vpx_sad32x64x4d_neon), SadMxNx4Param(32, 32, &vpx_sad32x32x4d_neon), + SadMxNx4Param(32, 16, &vpx_sad32x16x4d_neon), + SadMxNx4Param(16, 32, &vpx_sad16x32x4d_neon), SadMxNx4Param(16, 16, &vpx_sad16x16x4d_neon), + SadMxNx4Param(16, 8, &vpx_sad16x8x4d_neon), + SadMxNx4Param(8, 16, &vpx_sad8x16x4d_neon), + SadMxNx4Param(8, 8, &vpx_sad8x8x4d_neon), + SadMxNx4Param(8, 4, &vpx_sad8x4x4d_neon), + SadMxNx4Param(4, 8, &vpx_sad4x8x4d_neon), + SadMxNx4Param(4, 4, &vpx_sad4x4x4d_neon), }; INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests)); #endif // HAVE_NEON diff --git a/vpx_dsp/arm/sad4d_neon.c b/vpx_dsp/arm/sad4d_neon.c index dc2039800..afb320aca 100644 --- a/vpx_dsp/arm/sad4d_neon.c +++ b/vpx_dsp/arm/sad4d_neon.c @@ -13,6 +13,168 @@ #include "./vpx_config.h" #include "./vpx_dsp_rtcd.h" #include "vpx/vpx_integer.h" +#include "vpx_dsp/arm/mem_neon.h" +#include "vpx_dsp/arm/sum_neon.h" + +void vpx_sad4x4x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + int i; + const uint8x16_t src_u8 = load_unaligned_u8q(src, src_stride); + for (i = 0; i < 4; ++i) { + const uint8x16_t ref_u8 = load_unaligned_u8q(ref[i], ref_stride); + uint16x8_t abs = vabdl_u8(vget_low_u8(src_u8), vget_low_u8(ref_u8)); + abs = vabal_u8(abs, vget_high_u8(src_u8), vget_high_u8(ref_u8)); + res[i] = vget_lane_u32(horizontal_add_uint16x8(abs), 0); + } +} + +void vpx_sad4x8x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + int i; + const uint8x16_t src_0 = load_unaligned_u8q(src, src_stride); + const uint8x16_t src_1 = load_unaligned_u8q(src + 4 * src_stride, src_stride); + for (i = 0; i < 4; ++i) { + const uint8x16_t ref_0 = load_unaligned_u8q(ref[i], ref_stride); + const uint8x16_t ref_1 = + load_unaligned_u8q(ref[i] + 4 * ref_stride, ref_stride); + uint16x8_t abs = vabdl_u8(vget_low_u8(src_0), vget_low_u8(ref_0)); + abs = vabal_u8(abs, vget_high_u8(src_0), vget_high_u8(ref_0)); + abs = vabal_u8(abs, vget_low_u8(src_1), vget_low_u8(ref_1)); + abs = vabal_u8(abs, vget_high_u8(src_1), vget_high_u8(ref_1)); + res[i] = vget_lane_u32(horizontal_add_uint16x8(abs), 0); + } +} + +static INLINE void sad8x_4d(const uint8_t *a, int a_stride, + const uint8_t *const b[4], int b_stride, + uint32_t *result, const int height) { + int i, j; + uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0), + vdupq_n_u16(0) }; + const uint8_t *b_loop[4] = { b[0], b[1], b[2], b[3] }; + + for (i = 0; i < height; ++i) { + const uint8x8_t a_u8 = vld1_u8(a); + a += a_stride; + for (j = 0; j < 4; ++j) { + const uint8x8_t b_u8 = vld1_u8(b_loop[j]); + b_loop[j] += b_stride; + sum[j] = vabal_u8(sum[j], a_u8, b_u8); + } + } + + for (j = 0; j < 4; ++j) { + result[j] = vget_lane_u32(horizontal_add_uint16x8(sum[j]), 0); + } +} + +void vpx_sad8x4x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad8x_4d(src, src_stride, ref, ref_stride, res, 4); +} + +void vpx_sad8x8x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad8x_4d(src, src_stride, ref, ref_stride, res, 8); +} + +void vpx_sad8x16x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad8x_4d(src, src_stride, ref, ref_stride, res, 16); +} + +static INLINE void sad16x_4d(const uint8_t *a, int a_stride, + const uint8_t *const b[4], int b_stride, + uint32_t *result, const int height) { + int i, j; + uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0), + vdupq_n_u16(0) }; + const uint8_t *b_loop[4] = { b[0], b[1], b[2], b[3] }; + + for (i = 0; i < height; ++i) { + const uint8x16_t a_u8 = vld1q_u8(a); + a += a_stride; + for (j = 0; j < 4; ++j) { + const uint8x16_t b_u8 = vld1q_u8(b_loop[j]); + b_loop[j] += b_stride; + sum[j] = vabal_u8(sum[j], vget_low_u8(a_u8), vget_low_u8(b_u8)); + sum[j] = vabal_u8(sum[j], vget_high_u8(a_u8), vget_high_u8(b_u8)); + } + } + + for (j = 0; j < 4; ++j) { + result[j] = vget_lane_u32(horizontal_add_uint16x8(sum[j]), 0); + } +} + +void vpx_sad16x8x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad16x_4d(src, src_stride, ref, ref_stride, res, 8); +} + +void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad16x_4d(src, src_stride, ref, ref_stride, res, 16); +} + +void vpx_sad16x32x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad16x_4d(src, src_stride, ref, ref_stride, res, 32); +} + +static INLINE void sad32x_4d(const uint8_t *a, int a_stride, + const uint8_t *const b[4], int b_stride, + uint32_t *result, const int height) { + int i, j; + uint16x8_t sum[4] = { vdupq_n_u16(0), vdupq_n_u16(0), vdupq_n_u16(0), + vdupq_n_u16(0) }; + const uint8_t *b_loop[4] = { b[0], b[1], b[2], b[3] }; + + for (i = 0; i < height; ++i) { + const uint8x16_t a_0 = vld1q_u8(a); + const uint8x16_t a_1 = vld1q_u8(a + 16); + a += a_stride; + for (j = 0; j < 4; ++j) { + const uint8x16_t b_0 = vld1q_u8(b_loop[j]); + const uint8x16_t b_1 = vld1q_u8(b_loop[j] + 16); + b_loop[j] += b_stride; + sum[j] = vabal_u8(sum[j], vget_low_u8(a_0), vget_low_u8(b_0)); + sum[j] = vabal_u8(sum[j], vget_high_u8(a_0), vget_high_u8(b_0)); + sum[j] = vabal_u8(sum[j], vget_low_u8(a_1), vget_low_u8(b_1)); + sum[j] = vabal_u8(sum[j], vget_high_u8(a_1), vget_high_u8(b_1)); + } + } + + for (j = 0; j < 4; ++j) { + result[j] = vget_lane_u32(horizontal_add_uint16x8(sum[j]), 0); + } +} + +void vpx_sad32x16x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad32x_4d(src, src_stride, ref, ref_stride, res, 16); +} + +void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad32x_4d(src, src_stride, ref, ref_stride, res, 32); +} + +void vpx_sad32x64x4d_neon(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, + uint32_t *res) { + sad32x_4d(src, src_stride, ref, ref_stride, res, 64); +} static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo, const uint16x8_t vec_hi) { @@ -59,25 +221,6 @@ static void sad_neon_64(const uint8x16_t vec_src_00, vget_high_u8(vec_ref_48)); } -// Calculate the absolute difference of 32 bytes from vec_src_00, vec_src_16, -// and ref. Accumulate partial sums in vec_sum_ref_lo and vec_sum_ref_hi. -static void sad_neon_32(const uint8x16_t vec_src_00, - const uint8x16_t vec_src_16, const uint8_t *ref, - uint16x8_t *vec_sum_ref_lo, - uint16x8_t *vec_sum_ref_hi) { - const uint8x16_t vec_ref_00 = vld1q_u8(ref); - const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16); - - *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_00), - vget_low_u8(vec_ref_00)); - *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_00), - vget_high_u8(vec_ref_00)); - *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_16), - vget_low_u8(vec_ref_16)); - *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_16), - vget_high_u8(vec_ref_16)); -} - void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride, const uint8_t *const ref[4], int ref_stride, uint32_t *res) { @@ -123,102 +266,3 @@ void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride, res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi); res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi); } - -void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride, - const uint8_t *const ref[4], int ref_stride, - uint32_t *res) { - int i; - uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0); - uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0); - uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0); - uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0); - uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0); - uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0); - uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0); - uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0); - const uint8_t *ref0, *ref1, *ref2, *ref3; - ref0 = ref[0]; - ref1 = ref[1]; - ref2 = ref[2]; - ref3 = ref[3]; - - for (i = 0; i < 32; ++i) { - const uint8x16_t vec_src_00 = vld1q_u8(src); - const uint8x16_t vec_src_16 = vld1q_u8(src + 16); - - sad_neon_32(vec_src_00, vec_src_16, ref0, &vec_sum_ref0_lo, - &vec_sum_ref0_hi); - sad_neon_32(vec_src_00, vec_src_16, ref1, &vec_sum_ref1_lo, - &vec_sum_ref1_hi); - sad_neon_32(vec_src_00, vec_src_16, ref2, &vec_sum_ref2_lo, - &vec_sum_ref2_hi); - sad_neon_32(vec_src_00, vec_src_16, ref3, &vec_sum_ref3_lo, - &vec_sum_ref3_hi); - - src += src_stride; - ref0 += ref_stride; - ref1 += ref_stride; - ref2 += ref_stride; - ref3 += ref_stride; - } - - res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi); - res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi); - res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi); - res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi); -} - -void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride, - const uint8_t *const ref[4], int ref_stride, - uint32_t *res) { - int i; - uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0); - uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0); - uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0); - uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0); - uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0); - uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0); - uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0); - uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0); - const uint8_t *ref0, *ref1, *ref2, *ref3; - ref0 = ref[0]; - ref1 = ref[1]; - ref2 = ref[2]; - ref3 = ref[3]; - - for (i = 0; i < 16; ++i) { - const uint8x16_t vec_src = vld1q_u8(src); - const uint8x16_t vec_ref0 = vld1q_u8(ref0); - const uint8x16_t vec_ref1 = vld1q_u8(ref1); - const uint8x16_t vec_ref2 = vld1q_u8(ref2); - const uint8x16_t vec_ref3 = vld1q_u8(ref3); - - vec_sum_ref0_lo = - vabal_u8(vec_sum_ref0_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref0)); - vec_sum_ref0_hi = vabal_u8(vec_sum_ref0_hi, vget_high_u8(vec_src), - vget_high_u8(vec_ref0)); - vec_sum_ref1_lo = - vabal_u8(vec_sum_ref1_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref1)); - vec_sum_ref1_hi = vabal_u8(vec_sum_ref1_hi, vget_high_u8(vec_src), - vget_high_u8(vec_ref1)); - vec_sum_ref2_lo = - vabal_u8(vec_sum_ref2_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref2)); - vec_sum_ref2_hi = vabal_u8(vec_sum_ref2_hi, vget_high_u8(vec_src), - vget_high_u8(vec_ref2)); - vec_sum_ref3_lo = - vabal_u8(vec_sum_ref3_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref3)); - vec_sum_ref3_hi = vabal_u8(vec_sum_ref3_hi, vget_high_u8(vec_src), - vget_high_u8(vec_ref3)); - - src += src_stride; - ref0 += ref_stride; - ref1 += ref_stride; - ref2 += ref_stride; - ref3 += ref_stride; - } - - res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi); - res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi); - res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi); - res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi); -} diff --git a/vpx_dsp/vpx_dsp_rtcd_defs.pl b/vpx_dsp/vpx_dsp_rtcd_defs.pl index 4a7859237..44d6e4c71 100644 --- a/vpx_dsp/vpx_dsp_rtcd_defs.pl +++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl @@ -863,37 +863,37 @@ add_proto qw/void vpx_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, con specialize qw/vpx_sad64x32x4d msa sse2 vsx/; add_proto qw/void vpx_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad32x64x4d msa sse2 vsx/; +specialize qw/vpx_sad32x64x4d neon msa sse2 vsx/; add_proto qw/void vpx_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; specialize qw/vpx_sad32x32x4d avx2 neon msa sse2 vsx/; add_proto qw/void vpx_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad32x16x4d msa sse2 vsx/; +specialize qw/vpx_sad32x16x4d neon msa sse2 vsx/; add_proto qw/void vpx_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad16x32x4d msa sse2 vsx/; +specialize qw/vpx_sad16x32x4d neon msa sse2 vsx/; add_proto qw/void vpx_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; specialize qw/vpx_sad16x16x4d neon msa sse2 vsx/; add_proto qw/void vpx_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad16x8x4d msa sse2 vsx/; +specialize qw/vpx_sad16x8x4d neon msa sse2 vsx/; add_proto qw/void vpx_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad8x16x4d msa sse2/; +specialize qw/vpx_sad8x16x4d neon msa sse2/; add_proto qw/void vpx_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad8x8x4d msa sse2/; +specialize qw/vpx_sad8x8x4d neon msa sse2/; add_proto qw/void vpx_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad8x4x4d msa sse2/; +specialize qw/vpx_sad8x4x4d neon msa sse2/; add_proto qw/void vpx_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad4x8x4d msa sse2/; +specialize qw/vpx_sad4x8x4d neon msa sse2/; add_proto qw/void vpx_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad4x4x4d msa sse2/; +specialize qw/vpx_sad4x4x4d neon msa sse2/; add_proto qw/uint64_t vpx_sum_squares_2d_i16/, "const int16_t *src, int stride, int size"; specialize qw/vpx_sum_squares_2d_i16 sse2 msa/;