VP9 common for ARMv8 by using NEON intrinsics 10
Add vp9_idct32x32_1_add_neon.c - vp9_idct32x32_1_add_neon Change-Id: If9ffe9a857228f5c67f61dc2b428b40965816eda Signed-off-by: James Yu <james.yu@linaro.org>
This commit is contained in:
161
vp9/common/arm/neon/vp9_idct32x32_1_add_neon.c
Normal file
161
vp9/common/arm/neon/vp9_idct32x32_1_add_neon.c
Normal file
@@ -0,0 +1,161 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <arm_neon.h>
|
||||
#include "vp9/common/vp9_idct.h"
|
||||
|
||||
static inline void LD_16x8(
|
||||
uint8_t *d,
|
||||
int d_stride,
|
||||
uint8x16_t *q8u8,
|
||||
uint8x16_t *q9u8,
|
||||
uint8x16_t *q10u8,
|
||||
uint8x16_t *q11u8,
|
||||
uint8x16_t *q12u8,
|
||||
uint8x16_t *q13u8,
|
||||
uint8x16_t *q14u8,
|
||||
uint8x16_t *q15u8) {
|
||||
*q8u8 = vld1q_u8(d);
|
||||
d += d_stride;
|
||||
*q9u8 = vld1q_u8(d);
|
||||
d += d_stride;
|
||||
*q10u8 = vld1q_u8(d);
|
||||
d += d_stride;
|
||||
*q11u8 = vld1q_u8(d);
|
||||
d += d_stride;
|
||||
*q12u8 = vld1q_u8(d);
|
||||
d += d_stride;
|
||||
*q13u8 = vld1q_u8(d);
|
||||
d += d_stride;
|
||||
*q14u8 = vld1q_u8(d);
|
||||
d += d_stride;
|
||||
*q15u8 = vld1q_u8(d);
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void ADD_DIFF_16x8(
|
||||
uint8x16_t qdiffu8,
|
||||
uint8x16_t *q8u8,
|
||||
uint8x16_t *q9u8,
|
||||
uint8x16_t *q10u8,
|
||||
uint8x16_t *q11u8,
|
||||
uint8x16_t *q12u8,
|
||||
uint8x16_t *q13u8,
|
||||
uint8x16_t *q14u8,
|
||||
uint8x16_t *q15u8) {
|
||||
*q8u8 = vqaddq_u8(*q8u8, qdiffu8);
|
||||
*q9u8 = vqaddq_u8(*q9u8, qdiffu8);
|
||||
*q10u8 = vqaddq_u8(*q10u8, qdiffu8);
|
||||
*q11u8 = vqaddq_u8(*q11u8, qdiffu8);
|
||||
*q12u8 = vqaddq_u8(*q12u8, qdiffu8);
|
||||
*q13u8 = vqaddq_u8(*q13u8, qdiffu8);
|
||||
*q14u8 = vqaddq_u8(*q14u8, qdiffu8);
|
||||
*q15u8 = vqaddq_u8(*q15u8, qdiffu8);
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void SUB_DIFF_16x8(
|
||||
uint8x16_t qdiffu8,
|
||||
uint8x16_t *q8u8,
|
||||
uint8x16_t *q9u8,
|
||||
uint8x16_t *q10u8,
|
||||
uint8x16_t *q11u8,
|
||||
uint8x16_t *q12u8,
|
||||
uint8x16_t *q13u8,
|
||||
uint8x16_t *q14u8,
|
||||
uint8x16_t *q15u8) {
|
||||
*q8u8 = vqsubq_u8(*q8u8, qdiffu8);
|
||||
*q9u8 = vqsubq_u8(*q9u8, qdiffu8);
|
||||
*q10u8 = vqsubq_u8(*q10u8, qdiffu8);
|
||||
*q11u8 = vqsubq_u8(*q11u8, qdiffu8);
|
||||
*q12u8 = vqsubq_u8(*q12u8, qdiffu8);
|
||||
*q13u8 = vqsubq_u8(*q13u8, qdiffu8);
|
||||
*q14u8 = vqsubq_u8(*q14u8, qdiffu8);
|
||||
*q15u8 = vqsubq_u8(*q15u8, qdiffu8);
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void ST_16x8(
|
||||
uint8_t *d,
|
||||
int d_stride,
|
||||
uint8x16_t *q8u8,
|
||||
uint8x16_t *q9u8,
|
||||
uint8x16_t *q10u8,
|
||||
uint8x16_t *q11u8,
|
||||
uint8x16_t *q12u8,
|
||||
uint8x16_t *q13u8,
|
||||
uint8x16_t *q14u8,
|
||||
uint8x16_t *q15u8) {
|
||||
vst1q_u8(d, *q8u8);
|
||||
d += d_stride;
|
||||
vst1q_u8(d, *q9u8);
|
||||
d += d_stride;
|
||||
vst1q_u8(d, *q10u8);
|
||||
d += d_stride;
|
||||
vst1q_u8(d, *q11u8);
|
||||
d += d_stride;
|
||||
vst1q_u8(d, *q12u8);
|
||||
d += d_stride;
|
||||
vst1q_u8(d, *q13u8);
|
||||
d += d_stride;
|
||||
vst1q_u8(d, *q14u8);
|
||||
d += d_stride;
|
||||
vst1q_u8(d, *q15u8);
|
||||
return;
|
||||
}
|
||||
|
||||
void vp9_idct32x32_1_add_neon(
|
||||
int16_t *input,
|
||||
uint8_t *dest,
|
||||
int dest_stride) {
|
||||
uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
|
||||
int i, j, dest_stride8;
|
||||
uint8_t *d;
|
||||
int16_t a1, cospi_16_64 = 11585;
|
||||
int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
|
||||
|
||||
out = dct_const_round_shift(out * cospi_16_64);
|
||||
a1 = ROUND_POWER_OF_TWO(out, 6);
|
||||
|
||||
dest_stride8 = dest_stride * 8;
|
||||
if (a1 >= 0) { // diff_positive_32_32
|
||||
a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1;
|
||||
q0u8 = vdupq_n_u8(a1);
|
||||
for (i = 0; i < 2; i++, dest += 16) { // diff_positive_32_32_loop
|
||||
d = dest;
|
||||
for (j = 0; j < 4; j++) {
|
||||
LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
|
||||
&q12u8, &q13u8, &q14u8, &q15u8);
|
||||
ADD_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8,
|
||||
&q12u8, &q13u8, &q14u8, &q15u8);
|
||||
ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
|
||||
&q12u8, &q13u8, &q14u8, &q15u8);
|
||||
d += dest_stride8;
|
||||
}
|
||||
}
|
||||
} else { // diff_negative_32_32
|
||||
a1 = -a1;
|
||||
a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1;
|
||||
q0u8 = vdupq_n_u8(a1);
|
||||
for (i = 0; i < 2; i++, dest += 16) { // diff_negative_32_32_loop
|
||||
d = dest;
|
||||
for (j = 0; j < 4; j++) {
|
||||
LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
|
||||
&q12u8, &q13u8, &q14u8, &q15u8);
|
||||
SUB_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8,
|
||||
&q12u8, &q13u8, &q14u8, &q15u8);
|
||||
ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8,
|
||||
&q12u8, &q13u8, &q14u8, &q15u8);
|
||||
d += dest_stride8;
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
@@ -460,8 +460,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
|
||||
$vp9_idct32x32_34_add_neon_asm=vp9_idct32x32_1024_add_neon;
|
||||
|
||||
add_proto qw/void vp9_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/vp9_idct32x32_1_add sse2 neon_asm dspr2/;
|
||||
$vp9_idct32x32_1_add_neon_asm=vp9_idct32x32_1_add_neon;
|
||||
specialize qw/vp9_idct32x32_1_add sse2 neon dspr2/;
|
||||
|
||||
add_proto qw/void vp9_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
|
||||
specialize qw/vp9_iht4x4_16_add sse2 neon_asm dspr2/;
|
||||
|
@@ -138,7 +138,6 @@ VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct4x4_add_neon$(ASM)
|
||||
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct8x8_add_neon$(ASM)
|
||||
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct16x16_1_add_neon$(ASM)
|
||||
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct16x16_add_neon$(ASM)
|
||||
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct32x32_1_add_neon$(ASM)
|
||||
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_idct32x32_add_neon$(ASM)
|
||||
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_iht4x4_add_neon$(ASM)
|
||||
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_iht8x8_add_neon$(ASM)
|
||||
@@ -154,6 +153,7 @@ VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_avg_neon_asm$(ASM)
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_neon_asm$(ASM)
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_neon.c
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_copy_neon_asm$(ASM)
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct32x32_1_add_neon_asm$(ASM)
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct4x4_1_add_neon_asm$(ASM)
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_1_add_neon_asm$(ASM)
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon_asm$(ASM)
|
||||
@@ -165,6 +165,7 @@ VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_avg_neon.c
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve8_neon.c
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_convolve_neon.c
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_copy_neon.c
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct32x32_1_add_neon.c
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct4x4_1_add_neon.c
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_1_add_neon.c
|
||||
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon.c
|
||||
|
Reference in New Issue
Block a user