Merge changes Ib8dd96f7,Ie9854b77
* changes: neon variance: process 4x blocks use memcpy for unaligned neon stores
This commit is contained in:
commit
e7cac13016
@ -1259,7 +1259,9 @@ INSTANTIATE_TEST_CASE_P(
|
||||
VarianceParams(4, 3, &vpx_variance16x8_neon),
|
||||
VarianceParams(3, 4, &vpx_variance8x16_neon),
|
||||
VarianceParams(3, 3, &vpx_variance8x8_neon),
|
||||
VarianceParams(3, 2, &vpx_variance8x4_neon)));
|
||||
VarianceParams(3, 2, &vpx_variance8x4_neon),
|
||||
VarianceParams(2, 3, &vpx_variance4x8_neon),
|
||||
VarianceParams(2, 2, &vpx_variance4x4_neon)));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
NEON, VpxSubpelVarianceTest,
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <arm_neon.h>
|
||||
#include <string.h>
|
||||
#include "./vpx_config.h"
|
||||
#include "vpx_dsp/arm/mem_neon.h"
|
||||
|
||||
static const uint8_t bifilter4_coeff[8][2] = { { 128, 0 }, { 112, 16 },
|
||||
{ 96, 32 }, { 80, 48 },
|
||||
@ -21,35 +22,6 @@ static INLINE uint8x8_t load_and_shift(const unsigned char *a) {
|
||||
return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32));
|
||||
}
|
||||
|
||||
static INLINE void store4x4(unsigned char *dst, int dst_stride,
|
||||
const uint8x8_t a0, const uint8x8_t a1) {
|
||||
if (!((uintptr_t)dst & 0x3) && !(dst_stride & 0x3)) {
|
||||
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 0);
|
||||
dst += dst_stride;
|
||||
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 1);
|
||||
dst += dst_stride;
|
||||
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 0);
|
||||
dst += dst_stride;
|
||||
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 1);
|
||||
} else {
|
||||
// Store to the aligned local buffer and memcpy instead of vget_lane_u8
|
||||
// which is really really slow.
|
||||
uint32_t output_buffer[4];
|
||||
vst1_lane_u32(output_buffer, vreinterpret_u32_u8(a0), 0);
|
||||
vst1_lane_u32(output_buffer + 1, vreinterpret_u32_u8(a0), 1);
|
||||
vst1_lane_u32(output_buffer + 2, vreinterpret_u32_u8(a1), 0);
|
||||
vst1_lane_u32(output_buffer + 3, vreinterpret_u32_u8(a1), 1);
|
||||
|
||||
memcpy(dst, output_buffer, 4);
|
||||
dst += dst_stride;
|
||||
memcpy(dst, output_buffer + 1, 4);
|
||||
dst += dst_stride;
|
||||
memcpy(dst, output_buffer + 2, 4);
|
||||
dst += dst_stride;
|
||||
memcpy(dst, output_buffer + 3, 4);
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr,
|
||||
int src_pixels_per_line, int xoffset,
|
||||
int yoffset, unsigned char *dst_ptr,
|
||||
@ -122,7 +94,7 @@ void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr,
|
||||
|
||||
// secondpass_filter
|
||||
if (yoffset == 0) { // skip_2ndpass_filter
|
||||
store4x4(dst_ptr, dst_pitch, e0, e1);
|
||||
store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1));
|
||||
} else {
|
||||
uint8x8_t f0, f1;
|
||||
const uint8x8_t filter0 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
|
||||
@ -140,7 +112,7 @@ void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr,
|
||||
f0 = vqrshrn_n_u16(b0, 7);
|
||||
f1 = vqrshrn_n_u16(b1, 7);
|
||||
|
||||
store4x4(dst_ptr, dst_pitch, f0, f1);
|
||||
store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(f0, f1));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <arm_neon.h>
|
||||
#include <string.h>
|
||||
#include "./vpx_config.h"
|
||||
#include "vpx_dsp/arm/mem_neon.h"
|
||||
#include "vpx_ports/mem.h"
|
||||
|
||||
static const int8_t vp8_sub_pel_filters[8][8] = {
|
||||
@ -42,35 +43,6 @@ static INLINE uint8x8_t load_and_shift(const unsigned char *a) {
|
||||
return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32));
|
||||
}
|
||||
|
||||
static INLINE void store4x4(unsigned char *dst, int dst_stride,
|
||||
const uint8x8_t a0, const uint8x8_t a1) {
|
||||
if (!((uintptr_t)dst & 0x3) && !(dst_stride & 0x3)) {
|
||||
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 0);
|
||||
dst += dst_stride;
|
||||
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 1);
|
||||
dst += dst_stride;
|
||||
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 0);
|
||||
dst += dst_stride;
|
||||
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 1);
|
||||
} else {
|
||||
// Store to the aligned local buffer and memcpy instead of vget_lane_u8
|
||||
// which is really really slow.
|
||||
uint32_t output_buffer[4];
|
||||
vst1_lane_u32(output_buffer, vreinterpret_u32_u8(a0), 0);
|
||||
vst1_lane_u32(output_buffer + 1, vreinterpret_u32_u8(a0), 1);
|
||||
vst1_lane_u32(output_buffer + 2, vreinterpret_u32_u8(a1), 0);
|
||||
vst1_lane_u32(output_buffer + 3, vreinterpret_u32_u8(a1), 1);
|
||||
|
||||
memcpy(dst, output_buffer, 4);
|
||||
dst += dst_stride;
|
||||
memcpy(dst, output_buffer + 1, 4);
|
||||
dst += dst_stride;
|
||||
memcpy(dst, output_buffer + 2, 4);
|
||||
dst += dst_stride;
|
||||
memcpy(dst, output_buffer + 3, 4);
|
||||
}
|
||||
}
|
||||
|
||||
static INLINE void filter_add_accumulate(const uint8x16_t a, const uint8x16_t b,
|
||||
const uint8x8_t filter, uint16x8_t *c,
|
||||
uint16x8_t *d) {
|
||||
@ -180,7 +152,7 @@ static INLINE void yonly4x4(const unsigned char *src, int src_stride,
|
||||
e0 = vqrshrun_n_s16(d0, 7);
|
||||
e1 = vqrshrun_n_s16(d1, 7);
|
||||
|
||||
store4x4(dst, dst_stride, e0, e1);
|
||||
store_unaligned_u8q(dst, dst_stride, vcombine_u8(e0, e1));
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
|
||||
@ -297,7 +269,7 @@ void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
|
||||
b2 = vqrshrun_n_s16(e4567, 7);
|
||||
|
||||
if (yoffset == 0) { // firstpass_filter4x4_only
|
||||
store4x4(dst_ptr, dst_pitch, b0, b2);
|
||||
store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(b0, b2));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -411,7 +383,7 @@ void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
|
||||
e0 = vqrshrun_n_s16(d0, 7);
|
||||
e1 = vqrshrun_n_s16(d1, 7);
|
||||
|
||||
store4x4(dst_ptr, dst_pitch, e0, e1);
|
||||
store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1));
|
||||
}
|
||||
|
||||
void vp8_sixtap_predict8x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
|
||||
|
@ -69,6 +69,48 @@ static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) {
|
||||
#endif
|
||||
}
|
||||
|
||||
// Propagate type information to the compiler. Without this the compiler may
|
||||
// assume the required alignment of uint32_t (4 bytes) and add alignment hints
|
||||
// to the memory access.
|
||||
//
|
||||
// This is used for functions operating on uint8_t which wish to load or store 4
|
||||
// values at a time but which may not be on 4 byte boundaries.
|
||||
static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) {
|
||||
memcpy(buf, &a, 4);
|
||||
}
|
||||
|
||||
// Load 4 sets of 4 bytes when alignment is not guaranteed.
|
||||
static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) {
|
||||
uint32_t a;
|
||||
uint32x4_t a_u32 = vdupq_n_u32(0);
|
||||
memcpy(&a, buf, 4);
|
||||
buf += stride;
|
||||
a_u32 = vld1q_lane_u32(&a, a_u32, 0);
|
||||
memcpy(&a, buf, 4);
|
||||
buf += stride;
|
||||
a_u32 = vld1q_lane_u32(&a, a_u32, 1);
|
||||
memcpy(&a, buf, 4);
|
||||
buf += stride;
|
||||
a_u32 = vld1q_lane_u32(&a, a_u32, 2);
|
||||
memcpy(&a, buf, 4);
|
||||
buf += stride;
|
||||
a_u32 = vld1q_lane_u32(&a, a_u32, 3);
|
||||
return vreinterpretq_u8_u32(a_u32);
|
||||
}
|
||||
|
||||
// Store 4 sets of 4 bytes when alignment is not guaranteed.
|
||||
static INLINE void store_unaligned_u8q(uint8_t *buf, int stride,
|
||||
const uint8x16_t a) {
|
||||
const uint32x4_t a_u32 = vreinterpretq_u32_u8(a);
|
||||
uint32_to_mem(buf, vgetq_lane_u32(a_u32, 0));
|
||||
buf += stride;
|
||||
uint32_to_mem(buf, vgetq_lane_u32(a_u32, 1));
|
||||
buf += stride;
|
||||
uint32_to_mem(buf, vgetq_lane_u32(a_u32, 2));
|
||||
buf += stride;
|
||||
uint32_to_mem(buf, vgetq_lane_u32(a_u32, 3));
|
||||
}
|
||||
|
||||
// Load 2 sets of 4 bytes when alignment is guaranteed.
|
||||
static INLINE uint8x8_t load_u8(const uint8_t *buf, int stride) {
|
||||
uint32x2_t a = vdup_n_u32(0);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "./vpx_config.h"
|
||||
|
||||
#include "vpx/vpx_integer.h"
|
||||
#include "vpx_dsp/arm/mem_neon.h"
|
||||
#include "vpx_ports/mem.h"
|
||||
|
||||
static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
|
||||
@ -31,6 +32,47 @@ static INLINE int horizontal_add_s32x4(const int32x4_t v_32x4) {
|
||||
return vget_lane_s32(c, 0);
|
||||
}
|
||||
|
||||
// w * h must be less than 2048 or sum_s16 may overflow.
|
||||
// Process a block of width 4 four rows at a time.
|
||||
static void variance_neon_w4x4(const uint8_t *a, int a_stride, const uint8_t *b,
|
||||
int b_stride, int h, uint32_t *sse, int *sum) {
|
||||
int i;
|
||||
int16x8_t sum_s16 = vdupq_n_s16(0);
|
||||
int32x4_t sse_lo_s32 = vdupq_n_s32(0);
|
||||
int32x4_t sse_hi_s32 = vdupq_n_s32(0);
|
||||
|
||||
for (i = 0; i < h; i += 4) {
|
||||
const uint8x16_t a_u8 = load_unaligned_u8q(a, a_stride);
|
||||
const uint8x16_t b_u8 = load_unaligned_u8q(b, b_stride);
|
||||
const uint16x8_t diff_lo_u16 =
|
||||
vsubl_u8(vget_low_u8(a_u8), vget_low_u8(b_u8));
|
||||
const uint16x8_t diff_hi_u16 =
|
||||
vsubl_u8(vget_high_u8(a_u8), vget_high_u8(b_u8));
|
||||
|
||||
const int16x8_t diff_lo_s16 = vreinterpretq_s16_u16(diff_lo_u16);
|
||||
const int16x8_t diff_hi_s16 = vreinterpretq_s16_u16(diff_hi_u16);
|
||||
|
||||
sum_s16 = vaddq_s16(sum_s16, diff_lo_s16);
|
||||
sum_s16 = vaddq_s16(sum_s16, diff_hi_s16);
|
||||
|
||||
sse_lo_s32 = vmlal_s16(sse_lo_s32, vget_low_s16(diff_lo_s16),
|
||||
vget_low_s16(diff_lo_s16));
|
||||
sse_lo_s32 = vmlal_s16(sse_lo_s32, vget_high_s16(diff_lo_s16),
|
||||
vget_high_s16(diff_lo_s16));
|
||||
|
||||
sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_low_s16(diff_hi_s16),
|
||||
vget_low_s16(diff_hi_s16));
|
||||
sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_high_s16(diff_hi_s16),
|
||||
vget_high_s16(diff_hi_s16));
|
||||
|
||||
a += 4 * a_stride;
|
||||
b += 4 * b_stride;
|
||||
}
|
||||
|
||||
*sum = horizontal_add_s16x8(sum_s16);
|
||||
*sse = (uint32_t)horizontal_add_s32x4(vaddq_s32(sse_lo_s32, sse_hi_s32));
|
||||
}
|
||||
|
||||
// w * h must be less than 2048 or sum_s16 may overflow.
|
||||
// Process a block of any size where the width is divisible by 16.
|
||||
static void variance_neon_w16(const uint8_t *a, int a_stride, const uint8_t *b,
|
||||
@ -127,7 +169,9 @@ void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
|
||||
const uint8_t *b, int b_stride, \
|
||||
unsigned int *sse) { \
|
||||
int sum; \
|
||||
if (n == 8) \
|
||||
if (n == 4) \
|
||||
variance_neon_w4x4(a, a_stride, b, b_stride, m, sse, &sum); \
|
||||
else if (n == 8) \
|
||||
variance_neon_w8x2(a, a_stride, b, b_stride, m, sse, &sum); \
|
||||
else \
|
||||
variance_neon_w16(a, a_stride, b, b_stride, n, m, sse, &sum); \
|
||||
@ -137,6 +181,8 @@ void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
|
||||
return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
|
||||
}
|
||||
|
||||
varianceNxM(4, 4, 4);
|
||||
varianceNxM(4, 8, 5);
|
||||
varianceNxM(8, 4, 5);
|
||||
varianceNxM(8, 8, 6);
|
||||
varianceNxM(8, 16, 7);
|
||||
|
@ -1142,12 +1142,10 @@ add_proto qw/unsigned int vpx_variance8x4/, "const uint8_t *src_ptr, int source_
|
||||
specialize qw/vpx_variance8x4 sse2 neon msa/;
|
||||
|
||||
add_proto qw/unsigned int vpx_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
|
||||
# TODO(johannkoenig): neon
|
||||
specialize qw/vpx_variance4x8 sse2 msa/;
|
||||
specialize qw/vpx_variance4x8 sse2 neon msa/;
|
||||
|
||||
add_proto qw/unsigned int vpx_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
|
||||
# TODO(johannkoenig): neon
|
||||
specialize qw/vpx_variance4x4 sse2 msa/;
|
||||
specialize qw/vpx_variance4x4 sse2 neon msa/;
|
||||
|
||||
#
|
||||
# Specialty Variance
|
||||
|
Loading…
x
Reference in New Issue
Block a user