Merge "Update vp9_iht8x8_64_add_neon()"

This commit is contained in:
Linfeng Zhang 2018-01-30 01:20:41 +00:00 committed by Gerrit Code Review
commit 5eca3c23c3
3 changed files with 160 additions and 518 deletions

View File

@ -14,528 +14,165 @@
#include "./vp9_rtcd.h"
#include "./vpx_config.h"
#include "vp9/common/vp9_common.h"
#include "vpx_dsp/arm/idct_neon.h"
#include "vpx_dsp/arm/mem_neon.h"
#include "vpx_dsp/arm/transpose_neon.h"
static int16_t cospi_2_64 = 16305;
static int16_t cospi_4_64 = 16069;
static int16_t cospi_6_64 = 15679;
static int16_t cospi_8_64 = 15137;
static int16_t cospi_10_64 = 14449;
static int16_t cospi_12_64 = 13623;
static int16_t cospi_14_64 = 12665;
static int16_t cospi_16_64 = 11585;
static int16_t cospi_18_64 = 10394;
static int16_t cospi_20_64 = 9102;
static int16_t cospi_22_64 = 7723;
static int16_t cospi_24_64 = 6270;
static int16_t cospi_26_64 = 4756;
static int16_t cospi_28_64 = 3196;
static int16_t cospi_30_64 = 1606;
static INLINE void iadst_half_butterfly_neon(int16x8_t *const x) {
const int16x4_t c = vdup_n_s16(cospi_16_64);
const int16x8_t sum = vaddq_s16(x[0], x[1]);
const int16x8_t sub = vsubq_s16(x[0], x[1]);
int32x4_t t0[2], t1[2];
static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
int16x8_t *q10s16, int16x8_t *q11s16,
int16x8_t *q12s16, int16x8_t *q13s16,
int16x8_t *q14s16, int16x8_t *q15s16) {
int16x4_t d0s16, d1s16, d2s16, d3s16;
int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32;
int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
d0s16 = vdup_n_s16(cospi_28_64);
d1s16 = vdup_n_s16(cospi_4_64);
d2s16 = vdup_n_s16(cospi_12_64);
d3s16 = vdup_n_s16(cospi_20_64);
d16s16 = vget_low_s16(*q8s16);
d17s16 = vget_high_s16(*q8s16);
d18s16 = vget_low_s16(*q9s16);
d19s16 = vget_high_s16(*q9s16);
d20s16 = vget_low_s16(*q10s16);
d21s16 = vget_high_s16(*q10s16);
d22s16 = vget_low_s16(*q11s16);
d23s16 = vget_high_s16(*q11s16);
d24s16 = vget_low_s16(*q12s16);
d25s16 = vget_high_s16(*q12s16);
d26s16 = vget_low_s16(*q13s16);
d27s16 = vget_high_s16(*q13s16);
d28s16 = vget_low_s16(*q14s16);
d29s16 = vget_high_s16(*q14s16);
d30s16 = vget_low_s16(*q15s16);
d31s16 = vget_high_s16(*q15s16);
q2s32 = vmull_s16(d18s16, d0s16);
q3s32 = vmull_s16(d19s16, d0s16);
q5s32 = vmull_s16(d26s16, d2s16);
q6s32 = vmull_s16(d27s16, d2s16);
q2s32 = vmlsl_s16(q2s32, d30s16, d1s16);
q3s32 = vmlsl_s16(q3s32, d31s16, d1s16);
q5s32 = vmlsl_s16(q5s32, d22s16, d3s16);
q6s32 = vmlsl_s16(q6s32, d23s16, d3s16);
d8s16 = vrshrn_n_s32(q2s32, 14);
d9s16 = vrshrn_n_s32(q3s32, 14);
d10s16 = vrshrn_n_s32(q5s32, 14);
d11s16 = vrshrn_n_s32(q6s32, 14);
q4s16 = vcombine_s16(d8s16, d9s16);
q5s16 = vcombine_s16(d10s16, d11s16);
q2s32 = vmull_s16(d18s16, d1s16);
q3s32 = vmull_s16(d19s16, d1s16);
q9s32 = vmull_s16(d26s16, d3s16);
q13s32 = vmull_s16(d27s16, d3s16);
q2s32 = vmlal_s16(q2s32, d30s16, d0s16);
q3s32 = vmlal_s16(q3s32, d31s16, d0s16);
q9s32 = vmlal_s16(q9s32, d22s16, d2s16);
q13s32 = vmlal_s16(q13s32, d23s16, d2s16);
d14s16 = vrshrn_n_s32(q2s32, 14);
d15s16 = vrshrn_n_s32(q3s32, 14);
d12s16 = vrshrn_n_s32(q9s32, 14);
d13s16 = vrshrn_n_s32(q13s32, 14);
q6s16 = vcombine_s16(d12s16, d13s16);
q7s16 = vcombine_s16(d14s16, d15s16);
d0s16 = vdup_n_s16(cospi_16_64);
q2s32 = vmull_s16(d16s16, d0s16);
q3s32 = vmull_s16(d17s16, d0s16);
q13s32 = vmull_s16(d16s16, d0s16);
q15s32 = vmull_s16(d17s16, d0s16);
q2s32 = vmlal_s16(q2s32, d24s16, d0s16);
q3s32 = vmlal_s16(q3s32, d25s16, d0s16);
q13s32 = vmlsl_s16(q13s32, d24s16, d0s16);
q15s32 = vmlsl_s16(q15s32, d25s16, d0s16);
d0s16 = vdup_n_s16(cospi_24_64);
d1s16 = vdup_n_s16(cospi_8_64);
d18s16 = vrshrn_n_s32(q2s32, 14);
d19s16 = vrshrn_n_s32(q3s32, 14);
d22s16 = vrshrn_n_s32(q13s32, 14);
d23s16 = vrshrn_n_s32(q15s32, 14);
*q9s16 = vcombine_s16(d18s16, d19s16);
*q11s16 = vcombine_s16(d22s16, d23s16);
q2s32 = vmull_s16(d20s16, d0s16);
q3s32 = vmull_s16(d21s16, d0s16);
q8s32 = vmull_s16(d20s16, d1s16);
q12s32 = vmull_s16(d21s16, d1s16);
q2s32 = vmlsl_s16(q2s32, d28s16, d1s16);
q3s32 = vmlsl_s16(q3s32, d29s16, d1s16);
q8s32 = vmlal_s16(q8s32, d28s16, d0s16);
q12s32 = vmlal_s16(q12s32, d29s16, d0s16);
d26s16 = vrshrn_n_s32(q2s32, 14);
d27s16 = vrshrn_n_s32(q3s32, 14);
d30s16 = vrshrn_n_s32(q8s32, 14);
d31s16 = vrshrn_n_s32(q12s32, 14);
*q13s16 = vcombine_s16(d26s16, d27s16);
*q15s16 = vcombine_s16(d30s16, d31s16);
q0s16 = vaddq_s16(*q9s16, *q15s16);
q1s16 = vaddq_s16(*q11s16, *q13s16);
q2s16 = vsubq_s16(*q11s16, *q13s16);
q3s16 = vsubq_s16(*q9s16, *q15s16);
*q13s16 = vsubq_s16(q4s16, q5s16);
q4s16 = vaddq_s16(q4s16, q5s16);
*q14s16 = vsubq_s16(q7s16, q6s16);
q7s16 = vaddq_s16(q7s16, q6s16);
d26s16 = vget_low_s16(*q13s16);
d27s16 = vget_high_s16(*q13s16);
d28s16 = vget_low_s16(*q14s16);
d29s16 = vget_high_s16(*q14s16);
d16s16 = vdup_n_s16(cospi_16_64);
q9s32 = vmull_s16(d28s16, d16s16);
q10s32 = vmull_s16(d29s16, d16s16);
q11s32 = vmull_s16(d28s16, d16s16);
q12s32 = vmull_s16(d29s16, d16s16);
q9s32 = vmlsl_s16(q9s32, d26s16, d16s16);
q10s32 = vmlsl_s16(q10s32, d27s16, d16s16);
q11s32 = vmlal_s16(q11s32, d26s16, d16s16);
q12s32 = vmlal_s16(q12s32, d27s16, d16s16);
d10s16 = vrshrn_n_s32(q9s32, 14);
d11s16 = vrshrn_n_s32(q10s32, 14);
d12s16 = vrshrn_n_s32(q11s32, 14);
d13s16 = vrshrn_n_s32(q12s32, 14);
q5s16 = vcombine_s16(d10s16, d11s16);
q6s16 = vcombine_s16(d12s16, d13s16);
*q8s16 = vaddq_s16(q0s16, q7s16);
*q9s16 = vaddq_s16(q1s16, q6s16);
*q10s16 = vaddq_s16(q2s16, q5s16);
*q11s16 = vaddq_s16(q3s16, q4s16);
*q12s16 = vsubq_s16(q3s16, q4s16);
*q13s16 = vsubq_s16(q2s16, q5s16);
*q14s16 = vsubq_s16(q1s16, q6s16);
*q15s16 = vsubq_s16(q0s16, q7s16);
t0[0] = vmull_s16(c, vget_low_s16(sum));
t0[1] = vmull_s16(c, vget_high_s16(sum));
t1[0] = vmull_s16(c, vget_low_s16(sub));
t1[1] = vmull_s16(c, vget_high_s16(sub));
x[0] = dct_const_round_shift_low_8(t0);
x[1] = dct_const_round_shift_low_8(t1);
}
static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
int16x8_t *q10s16, int16x8_t *q11s16,
int16x8_t *q12s16, int16x8_t *q13s16,
int16x8_t *q14s16, int16x8_t *q15s16) {
int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
int16x8_t q2s16, q4s16, q5s16, q6s16;
int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32;
int32x4_t q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32;
static INLINE void iadst_butterfly_neon(const int16x8_t in0,
const int16x8_t in1, const int c0,
const int c1, int32x4_t *const s0,
int32x4_t *const s1) {
const int16x4_t cst0 = vdup_n_s16(c0);
const int16x4_t cst1 = vdup_n_s16(c1);
int32x4_t t0[2], t1[2];
d16s16 = vget_low_s16(*q8s16);
d17s16 = vget_high_s16(*q8s16);
d18s16 = vget_low_s16(*q9s16);
d19s16 = vget_high_s16(*q9s16);
d20s16 = vget_low_s16(*q10s16);
d21s16 = vget_high_s16(*q10s16);
d22s16 = vget_low_s16(*q11s16);
d23s16 = vget_high_s16(*q11s16);
d24s16 = vget_low_s16(*q12s16);
d25s16 = vget_high_s16(*q12s16);
d26s16 = vget_low_s16(*q13s16);
d27s16 = vget_high_s16(*q13s16);
d28s16 = vget_low_s16(*q14s16);
d29s16 = vget_high_s16(*q14s16);
d30s16 = vget_low_s16(*q15s16);
d31s16 = vget_high_s16(*q15s16);
t0[0] = vmull_s16(cst0, vget_low_s16(in0));
t0[1] = vmull_s16(cst0, vget_high_s16(in0));
t1[0] = vmull_s16(cst1, vget_low_s16(in0));
t1[1] = vmull_s16(cst1, vget_high_s16(in0));
d14s16 = vdup_n_s16(cospi_2_64);
d15s16 = vdup_n_s16(cospi_30_64);
s0[0] = vmlal_s16(t0[0], cst1, vget_low_s16(in1));
s0[1] = vmlal_s16(t0[1], cst1, vget_high_s16(in1));
s1[0] = vmlsl_s16(t1[0], cst0, vget_low_s16(in1));
s1[1] = vmlsl_s16(t1[1], cst0, vget_high_s16(in1));
}
q1s32 = vmull_s16(d30s16, d14s16);
q2s32 = vmull_s16(d31s16, d14s16);
q3s32 = vmull_s16(d30s16, d15s16);
q4s32 = vmull_s16(d31s16, d15s16);
static INLINE int16x8_t add_dct_const_round_shift_low_8(
const int32x4_t *const in0, const int32x4_t *const in1) {
int32x4_t sum[2];
d30s16 = vdup_n_s16(cospi_18_64);
d31s16 = vdup_n_s16(cospi_14_64);
sum[0] = vaddq_s32(in0[0], in1[0]);
sum[1] = vaddq_s32(in0[1], in1[1]);
return dct_const_round_shift_low_8(sum);
}
q1s32 = vmlal_s16(q1s32, d16s16, d15s16);
q2s32 = vmlal_s16(q2s32, d17s16, d15s16);
q3s32 = vmlsl_s16(q3s32, d16s16, d14s16);
q4s32 = vmlsl_s16(q4s32, d17s16, d14s16);
static INLINE int16x8_t sub_dct_const_round_shift_low_8(
const int32x4_t *const in0, const int32x4_t *const in1) {
int32x4_t sum[2];
q5s32 = vmull_s16(d22s16, d30s16);
q6s32 = vmull_s16(d23s16, d30s16);
q7s32 = vmull_s16(d22s16, d31s16);
q8s32 = vmull_s16(d23s16, d31s16);
sum[0] = vsubq_s32(in0[0], in1[0]);
sum[1] = vsubq_s32(in0[1], in1[1]);
return dct_const_round_shift_low_8(sum);
}
q5s32 = vmlal_s16(q5s32, d24s16, d31s16);
q6s32 = vmlal_s16(q6s32, d25s16, d31s16);
q7s32 = vmlsl_s16(q7s32, d24s16, d30s16);
q8s32 = vmlsl_s16(q8s32, d25s16, d30s16);
static INLINE void iadst8(int16x8_t *const io) {
int16x8_t x[8], t[4];
int32x4_t s0[2], s1[2], s2[2], s3[2], s4[2], s5[2], s6[2], s7[2];
q11s32 = vaddq_s32(q1s32, q5s32);
q12s32 = vaddq_s32(q2s32, q6s32);
q1s32 = vsubq_s32(q1s32, q5s32);
q2s32 = vsubq_s32(q2s32, q6s32);
x[0] = io[7];
x[1] = io[0];
x[2] = io[5];
x[3] = io[2];
x[4] = io[3];
x[5] = io[4];
x[6] = io[1];
x[7] = io[6];
d22s16 = vrshrn_n_s32(q11s32, 14);
d23s16 = vrshrn_n_s32(q12s32, 14);
*q11s16 = vcombine_s16(d22s16, d23s16);
// stage 1
iadst_butterfly_neon(x[0], x[1], cospi_2_64, cospi_30_64, s0, s1);
iadst_butterfly_neon(x[2], x[3], cospi_10_64, cospi_22_64, s2, s3);
iadst_butterfly_neon(x[4], x[5], cospi_18_64, cospi_14_64, s4, s5);
iadst_butterfly_neon(x[6], x[7], cospi_26_64, cospi_6_64, s6, s7);
q12s32 = vaddq_s32(q3s32, q7s32);
q15s32 = vaddq_s32(q4s32, q8s32);
q3s32 = vsubq_s32(q3s32, q7s32);
q4s32 = vsubq_s32(q4s32, q8s32);
x[0] = add_dct_const_round_shift_low_8(s0, s4);
x[1] = add_dct_const_round_shift_low_8(s1, s5);
x[2] = add_dct_const_round_shift_low_8(s2, s6);
x[3] = add_dct_const_round_shift_low_8(s3, s7);
x[4] = sub_dct_const_round_shift_low_8(s0, s4);
x[5] = sub_dct_const_round_shift_low_8(s1, s5);
x[6] = sub_dct_const_round_shift_low_8(s2, s6);
x[7] = sub_dct_const_round_shift_low_8(s3, s7);
d2s16 = vrshrn_n_s32(q1s32, 14);
d3s16 = vrshrn_n_s32(q2s32, 14);
d24s16 = vrshrn_n_s32(q12s32, 14);
d25s16 = vrshrn_n_s32(q15s32, 14);
d6s16 = vrshrn_n_s32(q3s32, 14);
d7s16 = vrshrn_n_s32(q4s32, 14);
*q12s16 = vcombine_s16(d24s16, d25s16);
// stage 2
t[0] = x[0];
t[1] = x[1];
t[2] = x[2];
t[3] = x[3];
iadst_butterfly_neon(x[4], x[5], cospi_8_64, cospi_24_64, s4, s5);
iadst_butterfly_neon(x[7], x[6], cospi_24_64, cospi_8_64, s7, s6);
d0s16 = vdup_n_s16(cospi_10_64);
d1s16 = vdup_n_s16(cospi_22_64);
q4s32 = vmull_s16(d26s16, d0s16);
q5s32 = vmull_s16(d27s16, d0s16);
q2s32 = vmull_s16(d26s16, d1s16);
q6s32 = vmull_s16(d27s16, d1s16);
x[0] = vaddq_s16(t[0], t[2]);
x[1] = vaddq_s16(t[1], t[3]);
x[2] = vsubq_s16(t[0], t[2]);
x[3] = vsubq_s16(t[1], t[3]);
x[4] = add_dct_const_round_shift_low_8(s4, s6);
x[5] = add_dct_const_round_shift_low_8(s5, s7);
x[6] = sub_dct_const_round_shift_low_8(s4, s6);
x[7] = sub_dct_const_round_shift_low_8(s5, s7);
d30s16 = vdup_n_s16(cospi_26_64);
d31s16 = vdup_n_s16(cospi_6_64);
// stage 3
iadst_half_butterfly_neon(x + 2);
iadst_half_butterfly_neon(x + 6);
q4s32 = vmlal_s16(q4s32, d20s16, d1s16);
q5s32 = vmlal_s16(q5s32, d21s16, d1s16);
q2s32 = vmlsl_s16(q2s32, d20s16, d0s16);
q6s32 = vmlsl_s16(q6s32, d21s16, d0s16);
q0s32 = vmull_s16(d18s16, d30s16);
q13s32 = vmull_s16(d19s16, d30s16);
q0s32 = vmlal_s16(q0s32, d28s16, d31s16);
q13s32 = vmlal_s16(q13s32, d29s16, d31s16);
q10s32 = vmull_s16(d18s16, d31s16);
q9s32 = vmull_s16(d19s16, d31s16);
q10s32 = vmlsl_s16(q10s32, d28s16, d30s16);
q9s32 = vmlsl_s16(q9s32, d29s16, d30s16);
q14s32 = vaddq_s32(q2s32, q10s32);
q15s32 = vaddq_s32(q6s32, q9s32);
q2s32 = vsubq_s32(q2s32, q10s32);
q6s32 = vsubq_s32(q6s32, q9s32);
d28s16 = vrshrn_n_s32(q14s32, 14);
d29s16 = vrshrn_n_s32(q15s32, 14);
d4s16 = vrshrn_n_s32(q2s32, 14);
d5s16 = vrshrn_n_s32(q6s32, 14);
*q14s16 = vcombine_s16(d28s16, d29s16);
q9s32 = vaddq_s32(q4s32, q0s32);
q10s32 = vaddq_s32(q5s32, q13s32);
q4s32 = vsubq_s32(q4s32, q0s32);
q5s32 = vsubq_s32(q5s32, q13s32);
d30s16 = vdup_n_s16(cospi_8_64);
d31s16 = vdup_n_s16(cospi_24_64);
d18s16 = vrshrn_n_s32(q9s32, 14);
d19s16 = vrshrn_n_s32(q10s32, 14);
d8s16 = vrshrn_n_s32(q4s32, 14);
d9s16 = vrshrn_n_s32(q5s32, 14);
*q9s16 = vcombine_s16(d18s16, d19s16);
q5s32 = vmull_s16(d2s16, d30s16);
q6s32 = vmull_s16(d3s16, d30s16);
q7s32 = vmull_s16(d2s16, d31s16);
q0s32 = vmull_s16(d3s16, d31s16);
q5s32 = vmlal_s16(q5s32, d6s16, d31s16);
q6s32 = vmlal_s16(q6s32, d7s16, d31s16);
q7s32 = vmlsl_s16(q7s32, d6s16, d30s16);
q0s32 = vmlsl_s16(q0s32, d7s16, d30s16);
q1s32 = vmull_s16(d4s16, d30s16);
q3s32 = vmull_s16(d5s16, d30s16);
q10s32 = vmull_s16(d4s16, d31s16);
q2s32 = vmull_s16(d5s16, d31s16);
q1s32 = vmlsl_s16(q1s32, d8s16, d31s16);
q3s32 = vmlsl_s16(q3s32, d9s16, d31s16);
q10s32 = vmlal_s16(q10s32, d8s16, d30s16);
q2s32 = vmlal_s16(q2s32, d9s16, d30s16);
*q8s16 = vaddq_s16(*q11s16, *q9s16);
*q11s16 = vsubq_s16(*q11s16, *q9s16);
q4s16 = vaddq_s16(*q12s16, *q14s16);
*q12s16 = vsubq_s16(*q12s16, *q14s16);
q14s32 = vaddq_s32(q5s32, q1s32);
q15s32 = vaddq_s32(q6s32, q3s32);
q5s32 = vsubq_s32(q5s32, q1s32);
q6s32 = vsubq_s32(q6s32, q3s32);
d18s16 = vrshrn_n_s32(q14s32, 14);
d19s16 = vrshrn_n_s32(q15s32, 14);
d10s16 = vrshrn_n_s32(q5s32, 14);
d11s16 = vrshrn_n_s32(q6s32, 14);
*q9s16 = vcombine_s16(d18s16, d19s16);
q1s32 = vaddq_s32(q7s32, q10s32);
q3s32 = vaddq_s32(q0s32, q2s32);
q7s32 = vsubq_s32(q7s32, q10s32);
q0s32 = vsubq_s32(q0s32, q2s32);
d28s16 = vrshrn_n_s32(q1s32, 14);
d29s16 = vrshrn_n_s32(q3s32, 14);
d14s16 = vrshrn_n_s32(q7s32, 14);
d15s16 = vrshrn_n_s32(q0s32, 14);
*q14s16 = vcombine_s16(d28s16, d29s16);
d30s16 = vdup_n_s16(cospi_16_64);
d22s16 = vget_low_s16(*q11s16);
d23s16 = vget_high_s16(*q11s16);
q2s32 = vmull_s16(d22s16, d30s16);
q3s32 = vmull_s16(d23s16, d30s16);
q13s32 = vmull_s16(d22s16, d30s16);
q1s32 = vmull_s16(d23s16, d30s16);
d24s16 = vget_low_s16(*q12s16);
d25s16 = vget_high_s16(*q12s16);
q2s32 = vmlal_s16(q2s32, d24s16, d30s16);
q3s32 = vmlal_s16(q3s32, d25s16, d30s16);
q13s32 = vmlsl_s16(q13s32, d24s16, d30s16);
q1s32 = vmlsl_s16(q1s32, d25s16, d30s16);
d4s16 = vrshrn_n_s32(q2s32, 14);
d5s16 = vrshrn_n_s32(q3s32, 14);
d24s16 = vrshrn_n_s32(q13s32, 14);
d25s16 = vrshrn_n_s32(q1s32, 14);
q2s16 = vcombine_s16(d4s16, d5s16);
*q12s16 = vcombine_s16(d24s16, d25s16);
q13s32 = vmull_s16(d10s16, d30s16);
q1s32 = vmull_s16(d11s16, d30s16);
q11s32 = vmull_s16(d10s16, d30s16);
q0s32 = vmull_s16(d11s16, d30s16);
q13s32 = vmlal_s16(q13s32, d14s16, d30s16);
q1s32 = vmlal_s16(q1s32, d15s16, d30s16);
q11s32 = vmlsl_s16(q11s32, d14s16, d30s16);
q0s32 = vmlsl_s16(q0s32, d15s16, d30s16);
d20s16 = vrshrn_n_s32(q13s32, 14);
d21s16 = vrshrn_n_s32(q1s32, 14);
d12s16 = vrshrn_n_s32(q11s32, 14);
d13s16 = vrshrn_n_s32(q0s32, 14);
*q10s16 = vcombine_s16(d20s16, d21s16);
q6s16 = vcombine_s16(d12s16, d13s16);
q5s16 = vdupq_n_s16(0);
*q9s16 = vsubq_s16(q5s16, *q9s16);
*q11s16 = vsubq_s16(q5s16, q2s16);
*q13s16 = vsubq_s16(q5s16, q6s16);
*q15s16 = vsubq_s16(q5s16, q4s16);
io[0] = x[0];
io[1] = vnegq_s16(x[4]);
io[2] = x[6];
io[3] = vnegq_s16(x[2]);
io[4] = x[3];
io[5] = vnegq_s16(x[7]);
io[6] = x[5];
io[7] = vnegq_s16(x[1]);
}
void vp9_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest, int stride,
int tx_type) {
int i;
uint8_t *d1, *d2;
uint8x8_t d0u8, d1u8, d2u8, d3u8;
uint64x1_t d0u64, d1u64, d2u64, d3u64;
int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
uint16x8_t q8u16, q9u16, q10u16, q11u16;
const int16x8_t cospis = vld1q_s16(kCospi);
const int16x4_t cospis0 = vget_low_s16(cospis); // cospi 0, 8, 16, 24
const int16x4_t cospis1 = vget_high_s16(cospis); // cospi 4, 12, 20, 28
int16x8_t a[8];
q8s16 = load_tran_low_to_s16q(input);
q9s16 = load_tran_low_to_s16q(input + 8);
q10s16 = load_tran_low_to_s16q(input + 8 * 2);
q11s16 = load_tran_low_to_s16q(input + 8 * 3);
q12s16 = load_tran_low_to_s16q(input + 8 * 4);
q13s16 = load_tran_low_to_s16q(input + 8 * 5);
q14s16 = load_tran_low_to_s16q(input + 8 * 6);
q15s16 = load_tran_low_to_s16q(input + 8 * 7);
a[0] = load_tran_low_to_s16q(input + 0 * 8);
a[1] = load_tran_low_to_s16q(input + 1 * 8);
a[2] = load_tran_low_to_s16q(input + 2 * 8);
a[3] = load_tran_low_to_s16q(input + 3 * 8);
a[4] = load_tran_low_to_s16q(input + 4 * 8);
a[5] = load_tran_low_to_s16q(input + 5 * 8);
a[6] = load_tran_low_to_s16q(input + 6 * 8);
a[7] = load_tran_low_to_s16q(input + 7 * 8);
transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
transpose_s16_8x8(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
switch (tx_type) {
case 0: // idct_idct is not supported. Fall back to C
vp9_iht8x8_64_add_c(input, dest, stride, tx_type);
return;
case 1: // iadst_idct
// generate IDCT constants
// GENERATE_IDCT_CONSTANTS
// first transform rows
IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
// transpose the matrix
transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16,
&q14s16, &q15s16);
// generate IADST constants
// GENERATE_IADST_CONSTANTS
// then transform columns
IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
case 0: // DCT_DCT
idct8x8_64_1d_bd8_kernel(cospis0, cospis1, a);
transpose_s16_8x8(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
idct8x8_64_1d_bd8_kernel(cospis0, cospis1, a);
break;
case 2: // idct_iadst
// generate IADST constants
// GENERATE_IADST_CONSTANTS
// first transform rows
IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
// transpose the matrix
transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16,
&q14s16, &q15s16);
// generate IDCT constants
// GENERATE_IDCT_CONSTANTS
// then transform columns
IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
case 1: // ADST_DCT
idct8x8_64_1d_bd8_kernel(cospis0, cospis1, a);
transpose_s16_8x8(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
iadst8(a);
break;
case 3: // iadst_iadst
// generate IADST constants
// GENERATE_IADST_CONSTANTS
// first transform rows
IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
// transpose the matrix
transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16,
&q14s16, &q15s16);
// then transform columns
IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16,
&q15s16);
case 2: // DCT_ADST
iadst8(a);
transpose_s16_8x8(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
idct8x8_64_1d_bd8_kernel(cospis0, cospis1, a);
break;
default: // iadst_idct
assert(0);
case 3: // ADST_ADST
iadst8(a);
transpose_s16_8x8(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
iadst8(a);
break;
default: assert(0); break;
}
q8s16 = vrshrq_n_s16(q8s16, 5);
q9s16 = vrshrq_n_s16(q9s16, 5);
q10s16 = vrshrq_n_s16(q10s16, 5);
q11s16 = vrshrq_n_s16(q11s16, 5);
q12s16 = vrshrq_n_s16(q12s16, 5);
q13s16 = vrshrq_n_s16(q13s16, 5);
q14s16 = vrshrq_n_s16(q14s16, 5);
q15s16 = vrshrq_n_s16(q15s16, 5);
for (d1 = d2 = dest, i = 0; i < 2; i++) {
if (i != 0) {
q8s16 = q12s16;
q9s16 = q13s16;
q10s16 = q14s16;
q11s16 = q15s16;
}
d0u64 = vld1_u64((uint64_t *)d1);
d1 += stride;
d1u64 = vld1_u64((uint64_t *)d1);
d1 += stride;
d2u64 = vld1_u64((uint64_t *)d1);
d1 += stride;
d3u64 = vld1_u64((uint64_t *)d1);
d1 += stride;
q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64));
q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64));
q10u16 =
vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64));
q11u16 =
vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64));
d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16));
d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16));
d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16));
d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16));
vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8));
d2 += stride;
vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8));
d2 += stride;
vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8));
d2 += stride;
vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
d2 += stride;
}
idct8x8_add8x8_neon(a, dest, stride);
}

View File

@ -17,28 +17,6 @@
#include "vpx_dsp/arm/transpose_neon.h"
#include "vpx_dsp/txfm_common.h"
static INLINE void idct8x8_add8x1(const int16x8_t a, uint8_t **const dest,
const int stride) {
const uint8x8_t s = vld1_u8(*dest);
const int16x8_t res = vrshrq_n_s16(a, 5);
const uint16x8_t q = vaddw_u8(vreinterpretq_u16_s16(res), s);
const uint8x8_t d = vqmovun_s16(vreinterpretq_s16_u16(q));
vst1_u8(*dest, d);
*dest += stride;
}
static INLINE void add8x8(int16x8_t *const out, uint8_t *dest,
const int stride) {
idct8x8_add8x1(out[0], &dest, stride);
idct8x8_add8x1(out[1], &dest, stride);
idct8x8_add8x1(out[2], &dest, stride);
idct8x8_add8x1(out[3], &dest, stride);
idct8x8_add8x1(out[4], &dest, stride);
idct8x8_add8x1(out[5], &dest, stride);
idct8x8_add8x1(out[6], &dest, stride);
idct8x8_add8x1(out[7], &dest, stride);
}
void vpx_idct8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
int stride) {
const int16x8_t cospis = vld1q_s16(kCospi);
@ -57,7 +35,7 @@ void vpx_idct8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
idct8x8_64_1d_bd8(cospis0, cospis1, a);
idct8x8_64_1d_bd8(cospis0, cospis1, a);
add8x8(a, dest, stride);
idct8x8_add8x8_neon(a, dest, stride);
}
void vpx_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest,
@ -77,5 +55,5 @@ void vpx_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest,
idct8x8_12_pass1_bd8(cospis0, cospisd0, cospisd1, a);
idct8x8_12_pass2_bd8(cospis0, cospisd0, cospisd1, a, b);
add8x8(b, dest, stride);
idct8x8_add8x8_neon(b, dest, stride);
}

View File

@ -417,18 +417,15 @@ static INLINE void idct8x8_12_pass2_bd8(const int16x4_t cospis0,
output[7] = vsubq_s16(step1[0], step2[7]);
}
static INLINE void idct8x8_64_1d_bd8(const int16x4_t cospis0,
const int16x4_t cospis1,
int16x8_t *const io) {
static INLINE void idct8x8_64_1d_bd8_kernel(const int16x4_t cospis0,
const int16x4_t cospis1,
int16x8_t *const io) {
int16x4_t input1l, input1h, input3l, input3h, input5l, input5h, input7l,
input7h;
int16x4_t step1l[4], step1h[4];
int16x8_t step1[8], step2[8];
int32x4_t t32[8];
transpose_s16_8x8(&io[0], &io[1], &io[2], &io[3], &io[4], &io[5], &io[6],
&io[7]);
// stage 1
input1l = vget_low_s16(io[1]);
input1h = vget_high_s16(io[1]);
@ -514,6 +511,14 @@ static INLINE void idct8x8_64_1d_bd8(const int16x4_t cospis0,
io[7] = vsubq_s16(step1[0], step2[7]);
}
static INLINE void idct8x8_64_1d_bd8(const int16x4_t cospis0,
const int16x4_t cospis1,
int16x8_t *const io) {
transpose_s16_8x8(&io[0], &io[1], &io[2], &io[3], &io[4], &io[5], &io[6],
&io[7]);
idct8x8_64_1d_bd8_kernel(cospis0, cospis1, io);
}
static INLINE void idct_cospi_8_24_q_kernel(const int16x8_t s0,
const int16x8_t s1,
const int16x4_t cospi_0_8_16_24,
@ -736,6 +741,28 @@ static INLINE void idct16x16_store_pass1(const int16x8_t *const out,
vst1q_s16(output, out[15]);
}
static INLINE void idct8x8_add8x1(const int16x8_t a, uint8_t **const dest,
const int stride) {
const uint8x8_t s = vld1_u8(*dest);
const int16x8_t res = vrshrq_n_s16(a, 5);
const uint16x8_t q = vaddw_u8(vreinterpretq_u16_s16(res), s);
const uint8x8_t d = vqmovun_s16(vreinterpretq_s16_u16(q));
vst1_u8(*dest, d);
*dest += stride;
}
static INLINE void idct8x8_add8x8_neon(int16x8_t *const out, uint8_t *dest,
const int stride) {
idct8x8_add8x1(out[0], &dest, stride);
idct8x8_add8x1(out[1], &dest, stride);
idct8x8_add8x1(out[2], &dest, stride);
idct8x8_add8x1(out[3], &dest, stride);
idct8x8_add8x1(out[4], &dest, stride);
idct8x8_add8x1(out[5], &dest, stride);
idct8x8_add8x1(out[6], &dest, stride);
idct8x8_add8x1(out[7], &dest, stride);
}
static INLINE void idct16x16_add8x1(const int16x8_t a, uint8_t **const dest,
const int stride) {
const uint8x8_t s = vld1_u8(*dest);