ppc: Add comp_avg_pred_vsx
Change-Id: I59788cd98231e707239c2ad95ae54f67cfe24e10
This commit is contained in:
parent
f7e767d8ee
commit
a7f8bd451b
@ -156,4 +156,8 @@ INSTANTIATE_TEST_CASE_P(C, AvgPredTest,
|
||||
INSTANTIATE_TEST_CASE_P(SSE2, AvgPredTest,
|
||||
::testing::Values(&vpx_comp_avg_pred_sse2));
|
||||
#endif // HAVE_SSE2
|
||||
#if HAVE_VSX
|
||||
INSTANTIATE_TEST_CASE_P(VSX, AvgPredTest,
|
||||
::testing::Values(&vpx_comp_avg_pred_vsx));
|
||||
#endif // HAVE_VSX
|
||||
} // namespace
|
||||
|
61
vpx_dsp/ppc/variance_vsx.c
Normal file
61
vpx_dsp/ppc/variance_vsx.c
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "./vpx_dsp_rtcd.h"
|
||||
#include "vpx_dsp/ppc/types_vsx.h"
|
||||
|
||||
void vpx_comp_avg_pred_vsx(uint8_t *comp_pred, const uint8_t *pred, int width,
|
||||
int height, const uint8_t *ref, int ref_stride) {
|
||||
int i, j;
|
||||
/* comp_pred and pred must be 16 byte aligned. */
|
||||
assert(((intptr_t)comp_pred & 0xf) == 0);
|
||||
assert(((intptr_t)pred & 0xf) == 0);
|
||||
if (width >= 16) {
|
||||
for (i = 0; i < height; ++i) {
|
||||
for (j = 0; j < width; j += 16) {
|
||||
const uint8x16_t v = vec_avg(vec_vsx_ld(j, pred), vec_vsx_ld(j, ref));
|
||||
vec_vsx_st(v, j, comp_pred);
|
||||
}
|
||||
comp_pred += width;
|
||||
pred += width;
|
||||
ref += ref_stride;
|
||||
}
|
||||
} else if (width == 8) {
|
||||
// Process 2 lines at time
|
||||
for (i = 0; i < height / 2; ++i) {
|
||||
const uint8x16_t r0 = vec_vsx_ld(0, ref);
|
||||
const uint8x16_t r1 = vec_vsx_ld(0, ref + ref_stride);
|
||||
const uint8x16_t r = xxpermdi(r0, r1, 0);
|
||||
const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r);
|
||||
vec_vsx_st(v, 0, comp_pred);
|
||||
comp_pred += 16; // width * 2;
|
||||
pred += 16; // width * 2;
|
||||
ref += ref_stride * 2;
|
||||
}
|
||||
} else {
|
||||
assert(width == 4);
|
||||
// process 4 lines at time
|
||||
for (i = 0; i < height / 4; ++i) {
|
||||
const uint32x4_t r0 = (uint32x4_t)vec_vsx_ld(0, ref);
|
||||
const uint32x4_t r1 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride);
|
||||
const uint32x4_t r2 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 2);
|
||||
const uint32x4_t r3 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 3);
|
||||
const uint8x16_t r =
|
||||
(uint8x16_t)xxpermdi(vec_mergeh(r0, r1), vec_mergeh(r2, r3), 0);
|
||||
const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r);
|
||||
vec_vsx_st(v, 0, comp_pred);
|
||||
comp_pred += 16; // width * 4;
|
||||
pred += 16; // width * 4;
|
||||
ref += ref_stride * 4;
|
||||
}
|
||||
}
|
||||
}
|
@ -333,6 +333,7 @@ DSP_SRCS-$(HAVE_SSE2) += x86/avg_pred_sse2.c
|
||||
DSP_SRCS-$(HAVE_SSE2) += x86/variance_sse2.c # Contains SSE2 and SSSE3
|
||||
DSP_SRCS-$(HAVE_AVX2) += x86/variance_avx2.c
|
||||
DSP_SRCS-$(HAVE_AVX2) += x86/variance_impl_avx2.c
|
||||
DSP_SRCS-$(HAVE_VSX) += ppc/variance_vsx.c
|
||||
|
||||
ifeq ($(ARCH_X86_64),yes)
|
||||
DSP_SRCS-$(HAVE_SSE2) += x86/ssim_opt_x86_64.asm
|
||||
|
@ -1177,7 +1177,7 @@ add_proto qw/unsigned int vpx_get4x4sse_cs/, "const unsigned char *src_ptr, int
|
||||
specialize qw/vpx_get4x4sse_cs neon msa/;
|
||||
|
||||
add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
|
||||
specialize qw/vpx_comp_avg_pred sse2/;
|
||||
specialize qw/vpx_comp_avg_pred sse2 vsx/;
|
||||
|
||||
#
|
||||
# Subpixel Variance
|
||||
|
Loading…
Reference in New Issue
Block a user