mips msa vp9 avg optimization

average improvement ~2x-3x

Change-Id: I76f7fc00c0ffdf2b4ba41bf3819f3b6044bcdeff
This commit is contained in:
Parag Salasakar 2015-06-23 07:32:25 +05:30
parent 7b71cdb0b4
commit 7555e2b822
4 changed files with 71 additions and 2 deletions

@ -288,4 +288,16 @@ INSTANTIATE_TEST_CASE_P(
#endif
#if HAVE_MSA
INSTANTIATE_TEST_CASE_P(
MSA, AverageTest,
::testing::Values(
make_tuple(16, 16, 0, 8, &vp9_avg_8x8_msa),
make_tuple(16, 16, 5, 8, &vp9_avg_8x8_msa),
make_tuple(32, 32, 15, 8, &vp9_avg_8x8_msa),
make_tuple(16, 16, 0, 4, &vp9_avg_4x4_msa),
make_tuple(16, 16, 5, 4, &vp9_avg_4x4_msa),
make_tuple(32, 32, 15, 4, &vp9_avg_4x4_msa)));
#endif
} // namespace

@ -878,10 +878,10 @@ add_proto qw/unsigned int vp9_sub_pixel_avg_variance4x4/, "const uint8_t *src_pt
specialize qw/vp9_sub_pixel_avg_variance4x4/, "$sse_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_avg_8x8/, "const uint8_t *, int p";
specialize qw/vp9_avg_8x8 sse2 neon/;
specialize qw/vp9_avg_8x8 sse2 neon msa/;
add_proto qw/unsigned int vp9_avg_4x4/, "const uint8_t *, int p";
specialize qw/vp9_avg_4x4 sse2/;
specialize qw/vp9_avg_4x4 sse2 msa/;
add_proto qw/void vp9_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
specialize qw/vp9_minmax_8x8 sse2/;

@ -0,0 +1,56 @@
/*
* Copyright (c) 2015 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "./vp9_rtcd.h"
#include "vp9/common/mips/msa/vp9_macros_msa.h"
uint32_t vp9_avg_8x8_msa(const uint8_t *src, int32_t src_stride) {
uint32_t sum_out;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
v4u32 sum = { 0 };
LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3);
HADD_UB4_UH(src4, src5, src6, src7, sum4, sum5, sum6, sum7);
ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6);
ADD2(sum0, sum2, sum4, sum6, sum0, sum4);
sum0 += sum4;
sum = __msa_hadd_u_w(sum0, sum0);
sum0 = (v8u16)__msa_pckev_h((v8i16)sum, (v8i16)sum);
sum = __msa_hadd_u_w(sum0, sum0);
sum = (v4u32)__msa_srari_w((v4i32)sum, 6);
sum_out = __msa_copy_u_w((v4i32)sum, 0);
return sum_out;
}
uint32_t vp9_avg_4x4_msa(const uint8_t *src, int32_t src_stride) {
uint32_t sum_out;
uint32_t src0, src1, src2, src3;
v16u8 vec = { 0 };
v8u16 sum0;
v4u32 sum1;
v2u64 sum2;
LW4(src, src_stride, src0, src1, src2, src3);
INSERT_W4_UB(src0, src1, src2, src3, vec);
sum0 = __msa_hadd_u_h(vec, vec);
sum1 = __msa_hadd_u_w(sum0, sum0);
sum0 = (v8u16)__msa_pckev_h((v8i16)sum1, (v8i16)sum1);
sum1 = __msa_hadd_u_w(sum0, sum0);
sum2 = __msa_hadd_u_d(sum1, sum1);
sum1 = (v4u32)__msa_srari_w((v4i32)sum2, 4);
sum_out = __msa_copy_u_w((v4i32)sum1, 0);
return sum_out;
}

@ -157,5 +157,6 @@ VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_fdct8x8_msa.c
VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_fdct16x16_msa.c
VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_fdct32x32_msa.c
VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_fdct_msa.h
VP9_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/vp9_avg_msa.c
VP9_CX_SRCS-yes := $(filter-out $(VP9_CX_SRCS_REMOVE-yes),$(VP9_CX_SRCS-yes))