Merge "Add vp9_sad32x32x4d_neon Neon intrinsic function."

This commit is contained in:
Frank Galligan 2015-01-28 10:35:50 -08:00 committed by Gerrit Code Review
commit d1e6b8231a
3 changed files with 67 additions and 1 deletions

View File

@ -1245,9 +1245,11 @@ INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::Values(
#if HAVE_NEON
const SadMxNx4Func sad_16x16x4d_neon = vp9_sad16x16x4d_neon;
const SadMxNx4Func sad_32x32x4d_neon = vp9_sad32x32x4d_neon;
const SadMxNx4Func sad_64x64x4d_neon = vp9_sad64x64x4d_neon;
INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::Values(
make_tuple(16, 16, sad_16x16x4d_neon, -1),
make_tuple(32, 32, sad_32x32x4d_neon, -1),
make_tuple(64, 64, sad_64x64x4d_neon, -1)));
#endif // HAVE_NEON
#endif // CONFIG_VP9_ENCODER

View File

@ -1064,7 +1064,7 @@ add_proto qw/void vp9_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, co
specialize qw/vp9_sad16x32x4d sse2/;
add_proto qw/void vp9_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
specialize qw/vp9_sad32x32x4d sse2 avx2/;
specialize qw/vp9_sad32x32x4d sse2 avx2 neon/;
add_proto qw/void vp9_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
specialize qw/vp9_sad16x16x4d sse2 neon/;

View File

@ -60,6 +60,26 @@ static void sad_neon_64(const uint8x16_t vec_src_00,
vget_high_u8(vec_ref_48));
}
// Calculate the absolute difference of 32 bytes from vec_src_00, vec_src_16,
// and ref. Accumulate partial sums in vec_sum_ref_lo and vec_sum_ref_hi.
static void sad_neon_32(const uint8x16_t vec_src_00,
const uint8x16_t vec_src_16,
const uint8_t *ref,
uint16x8_t *vec_sum_ref_lo,
uint16x8_t *vec_sum_ref_hi) {
const uint8x16_t vec_ref_00 = vld1q_u8(ref);
const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16);
*vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_00),
vget_low_u8(vec_ref_00));
*vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_00),
vget_high_u8(vec_ref_00));
*vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_16),
vget_low_u8(vec_ref_16));
*vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_16),
vget_high_u8(vec_ref_16));
}
void vp9_sad64x64x4d_neon(const uint8_t *src, int src_stride,
const uint8_t* const ref[4], int ref_stride,
unsigned int *res) {
@ -106,6 +126,50 @@ void vp9_sad64x64x4d_neon(const uint8_t *src, int src_stride,
res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
}
void vp9_sad32x32x4d_neon(const uint8_t *src, int src_stride,
const uint8_t* const ref[4], int ref_stride,
unsigned int *res) {
int i;
uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0);
uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0);
uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0);
uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0);
uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0);
uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0);
uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0);
const uint8_t *ref0, *ref1, *ref2, *ref3;
ref0 = ref[0];
ref1 = ref[1];
ref2 = ref[2];
ref3 = ref[3];
for (i = 0; i < 32; ++i) {
const uint8x16_t vec_src_00 = vld1q_u8(src);
const uint8x16_t vec_src_16 = vld1q_u8(src + 16);
sad_neon_32(vec_src_00, vec_src_16, ref0,
&vec_sum_ref0_lo, &vec_sum_ref0_hi);
sad_neon_32(vec_src_00, vec_src_16, ref1,
&vec_sum_ref1_lo, &vec_sum_ref1_hi);
sad_neon_32(vec_src_00, vec_src_16, ref2,
&vec_sum_ref2_lo, &vec_sum_ref2_hi);
sad_neon_32(vec_src_00, vec_src_16, ref3,
&vec_sum_ref3_lo, &vec_sum_ref3_hi);
src += src_stride;
ref0 += ref_stride;
ref1 += ref_stride;
ref2 += ref_stride;
ref3 += ref_stride;
}
res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi);
res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi);
res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
}
void vp9_sad16x16x4d_neon(const uint8_t *src, int src_stride,
const uint8_t* const ref[4], int ref_stride,
unsigned int *res) {