From 0325b95938cac650ae7fd60243f96ca8896db0cc Mon Sep 17 00:00:00 2001 From: Johann Date: Fri, 29 Jul 2016 08:29:55 -0700 Subject: [PATCH] Use common transpose for vpx_idct32x32_1024_add_neon Change-Id: I6ef7970206d588761ebe80005aecd35365ec50ff --- vpx_dsp/arm/idct32x32_add_neon.c | 66 ++++++-------------------------- 1 file changed, 11 insertions(+), 55 deletions(-) diff --git a/vpx_dsp/arm/idct32x32_add_neon.c b/vpx_dsp/arm/idct32x32_add_neon.c index 04f51bfdd..c4d1e8473 100644 --- a/vpx_dsp/arm/idct32x32_add_neon.c +++ b/vpx_dsp/arm/idct32x32_add_neon.c @@ -11,6 +11,7 @@ #include #include "./vpx_config.h" +#include "vpx_dsp/arm/transpose_neon.h" #include "vpx_dsp/txfm_common.h" #define LOAD_FROM_TRANSPOSED(prev, first, second) \ @@ -155,11 +156,7 @@ static INLINE void idct32_transpose_pair(int16_t *input, int16_t *t_buf) { int16_t *in; int i; const int stride = 32; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32; - int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16; for (i = 0; i < 4; i++, input += 8) { in = input; @@ -179,65 +176,24 @@ static INLINE void idct32_transpose_pair(int16_t *input, int16_t *t_buf) { in += stride; q15s16 = vld1q_s16(in); - d16s16 = vget_low_s16(q8s16); - d17s16 = vget_high_s16(q8s16); - d18s16 = vget_low_s16(q9s16); - d19s16 = vget_high_s16(q9s16); - d20s16 = vget_low_s16(q10s16); - d21s16 = vget_high_s16(q10s16); - d22s16 = vget_low_s16(q11s16); - d23s16 = vget_high_s16(q11s16); - d24s16 = vget_low_s16(q12s16); - d25s16 = vget_high_s16(q12s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); - d30s16 = vget_low_s16(q15s16); - d31s16 = vget_high_s16(q15s16); + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, + &q14s16, &q15s16); - q8s16 = vcombine_s16(d16s16, d24s16); // vswp d17, d24 - q9s16 = vcombine_s16(d18s16, d26s16); // vswp d19, d26 - q10s16 = vcombine_s16(d20s16, d28s16); // vswp d21, d28 - q11s16 = vcombine_s16(d22s16, d30s16); // vswp d23, d30 - q12s16 = vcombine_s16(d17s16, d25s16); - q13s16 = vcombine_s16(d19s16, d27s16); - q14s16 = vcombine_s16(d21s16, d29s16); - q15s16 = vcombine_s16(d23s16, d31s16); - - q0x2s32 = - vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q10s16)); - q1x2s32 = - vtrnq_s32(vreinterpretq_s32_s16(q9s16), vreinterpretq_s32_s16(q11s16)); - q2x2s32 = - vtrnq_s32(vreinterpretq_s32_s16(q12s16), vreinterpretq_s32_s16(q14s16)); - q3x2s32 = - vtrnq_s32(vreinterpretq_s32_s16(q13s16), vreinterpretq_s32_s16(q15s16)); - - q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]), // q8 - vreinterpretq_s16_s32(q1x2s32.val[0])); // q9 - q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]), // q10 - vreinterpretq_s16_s32(q1x2s32.val[1])); // q11 - q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]), // q12 - vreinterpretq_s16_s32(q3x2s32.val[0])); // q13 - q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]), // q14 - vreinterpretq_s16_s32(q3x2s32.val[1])); // q15 - - vst1q_s16(t_buf, q0x2s16.val[0]); + vst1q_s16(t_buf, q8s16); t_buf += 8; - vst1q_s16(t_buf, q0x2s16.val[1]); + vst1q_s16(t_buf, q9s16); t_buf += 8; - vst1q_s16(t_buf, q1x2s16.val[0]); + vst1q_s16(t_buf, q10s16); t_buf += 8; - vst1q_s16(t_buf, q1x2s16.val[1]); + vst1q_s16(t_buf, q11s16); t_buf += 8; - vst1q_s16(t_buf, q2x2s16.val[0]); + vst1q_s16(t_buf, q12s16); t_buf += 8; - vst1q_s16(t_buf, q2x2s16.val[1]); + vst1q_s16(t_buf, q13s16); t_buf += 8; - vst1q_s16(t_buf, q3x2s16.val[0]); + vst1q_s16(t_buf, q14s16); t_buf += 8; - vst1q_s16(t_buf, q3x2s16.val[1]); + vst1q_s16(t_buf, q15s16); t_buf += 8; } return;