Merge "Improve idct16x16: _256_add_sse2(x1.107)&_10_add_sse2(x1.012)"

This commit is contained in:
Yunqing Wang 2013-12-04 08:50:50 -08:00 committed by Gerrit Code Review
commit 920a074e89

View File

@ -650,6 +650,25 @@ static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
}
static INLINE void array_transpose_4X8(__m128i *in, __m128i * out) {
const __m128i zero = _mm_setzero_si128();
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
out[0] = _mm_unpacklo_epi64(tr1_0, tr1_4);
out[1] = _mm_unpackhi_epi64(tr1_0, tr1_4);
out[2] = _mm_unpacklo_epi64(tr1_2, tr1_6);
out[3] = _mm_unpackhi_epi64(tr1_2, tr1_6);
out[4] = out[5] = out[6] = out[7] = zero;
}
static void idct8_1d_sse2(__m128i *in) {
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
@ -1139,14 +1158,14 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
#define IDCT16_1D \
/* Stage2 */ \
{ \
const __m128i lo_1_15 = _mm_unpacklo_epi16(in1, in15); \
const __m128i hi_1_15 = _mm_unpackhi_epi16(in1, in15); \
const __m128i lo_9_7 = _mm_unpacklo_epi16(in9, in7); \
const __m128i hi_9_7 = _mm_unpackhi_epi16(in9, in7); \
const __m128i lo_5_11 = _mm_unpacklo_epi16(in5, in11); \
const __m128i hi_5_11 = _mm_unpackhi_epi16(in5, in11); \
const __m128i lo_13_3 = _mm_unpacklo_epi16(in13, in3); \
const __m128i hi_13_3 = _mm_unpackhi_epi16(in13, in3); \
const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]); \
const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]); \
const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]); \
const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]); \
const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]); \
const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]); \
const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]); \
const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]); \
\
MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, \
stg2_0, stg2_1, stg2_2, stg2_3, \
@ -1159,10 +1178,10 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
\
/* Stage3 */ \
{ \
const __m128i lo_2_14 = _mm_unpacklo_epi16(in2, in14); \
const __m128i hi_2_14 = _mm_unpackhi_epi16(in2, in14); \
const __m128i lo_10_6 = _mm_unpacklo_epi16(in10, in6); \
const __m128i hi_10_6 = _mm_unpackhi_epi16(in10, in6); \
const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]); \
const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]); \
const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]); \
const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]); \
\
MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, \
stg3_0, stg3_1, stg3_2, stg3_3, \
@ -1181,10 +1200,10 @@ void vp9_idct8x8_10_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
\
/* Stage4 */ \
{ \
const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8); \
const __m128i hi_0_8 = _mm_unpackhi_epi16(in0, in8); \
const __m128i lo_4_12 = _mm_unpacklo_epi16(in4, in12); \
const __m128i hi_4_12 = _mm_unpackhi_epi16(in4, in12); \
const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]); \
const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]); \
const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]); \
const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]); \
\
const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \
const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \
@ -1296,16 +1315,7 @@ void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
__m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero,
in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero,
in10 = zero, in11 = zero, in12 = zero, in13 = zero,
in14 = zero, in15 = zero;
__m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero,
l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero,
l12 = zero, l13 = zero, l14 = zero, l15 = zero;
__m128i r0 = zero, r1 = zero, r2 = zero, r3 = zero, r4 = zero, r5 = zero,
r6 = zero, r7 = zero, r8 = zero, r9 = zero, r10 = zero, r11 = zero,
r12 = zero, r13 = zero, r14 = zero, r15 = zero;
__m128i in[16], l[16], r[16], *curr1;
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
stp1_8_0, stp1_12_0;
@ -1314,162 +1324,132 @@ void vp9_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int i;
// We work on a 8x16 block each time, and loop 4 times for 2-D 16x16 idct.
for (i = 0; i < 4; i++) {
// 1-D idct
if (i < 2) {
if (i == 1) input += 128;
curr1 = l;
for (i = 0; i < 2; i++) {
// 1-D idct
// Load input data.
in0 = _mm_load_si128((const __m128i *)input);
in8 = _mm_load_si128((const __m128i *)(input + 8 * 1));
in1 = _mm_load_si128((const __m128i *)(input + 8 * 2));
in9 = _mm_load_si128((const __m128i *)(input + 8 * 3));
in2 = _mm_load_si128((const __m128i *)(input + 8 * 4));
in10 = _mm_load_si128((const __m128i *)(input + 8 * 5));
in3 = _mm_load_si128((const __m128i *)(input + 8 * 6));
in11 = _mm_load_si128((const __m128i *)(input + 8 * 7));
in4 = _mm_load_si128((const __m128i *)(input + 8 * 8));
in12 = _mm_load_si128((const __m128i *)(input + 8 * 9));
in5 = _mm_load_si128((const __m128i *)(input + 8 * 10));
in13 = _mm_load_si128((const __m128i *)(input + 8 * 11));
in6 = _mm_load_si128((const __m128i *)(input + 8 * 12));
in14 = _mm_load_si128((const __m128i *)(input + 8 * 13));
in7 = _mm_load_si128((const __m128i *)(input + 8 * 14));
in15 = _mm_load_si128((const __m128i *)(input + 8 * 15));
in[0] = _mm_load_si128((const __m128i *)input);
in[8] = _mm_load_si128((const __m128i *)(input + 8 * 1));
in[1] = _mm_load_si128((const __m128i *)(input + 8 * 2));
in[9] = _mm_load_si128((const __m128i *)(input + 8 * 3));
in[2] = _mm_load_si128((const __m128i *)(input + 8 * 4));
in[10] = _mm_load_si128((const __m128i *)(input + 8 * 5));
in[3] = _mm_load_si128((const __m128i *)(input + 8 * 6));
in[11] = _mm_load_si128((const __m128i *)(input + 8 * 7));
in[4] = _mm_load_si128((const __m128i *)(input + 8 * 8));
in[12] = _mm_load_si128((const __m128i *)(input + 8 * 9));
in[5] = _mm_load_si128((const __m128i *)(input + 8 * 10));
in[13] = _mm_load_si128((const __m128i *)(input + 8 * 11));
in[6] = _mm_load_si128((const __m128i *)(input + 8 * 12));
in[14] = _mm_load_si128((const __m128i *)(input + 8 * 13));
in[7] = _mm_load_si128((const __m128i *)(input + 8 * 14));
in[15] = _mm_load_si128((const __m128i *)(input + 8 * 15));
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
TRANSPOSE_8X8(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
in10, in11, in12, in13, in14, in15);
}
array_transpose_8x8(in, in);
array_transpose_8x8(in+8, in+8);
if (i == 2) {
TRANSPOSE_8X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE_8X8(r0, r1, r2, r3, r4, r5, r6, r7, in8, in9, in10, in11, in12,
in13, in14, in15);
}
IDCT16_1D
if (i == 3) {
TRANSPOSE_8X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3,
in4, in5, in6, in7);
TRANSPOSE_8X8(r8, r9, r10, r11, r12, r13, r14, r15, in8, in9, in10, in11,
in12, in13, in14, in15);
}
// Stage7
curr1[0] = _mm_add_epi16(stp2_0, stp1_15);
curr1[1] = _mm_add_epi16(stp2_1, stp1_14);
curr1[2] = _mm_add_epi16(stp2_2, stp2_13);
curr1[3] = _mm_add_epi16(stp2_3, stp2_12);
curr1[4] = _mm_add_epi16(stp2_4, stp2_11);
curr1[5] = _mm_add_epi16(stp2_5, stp2_10);
curr1[6] = _mm_add_epi16(stp2_6, stp1_9);
curr1[7] = _mm_add_epi16(stp2_7, stp1_8);
curr1[8] = _mm_sub_epi16(stp2_7, stp1_8);
curr1[9] = _mm_sub_epi16(stp2_6, stp1_9);
curr1[10] = _mm_sub_epi16(stp2_5, stp2_10);
curr1[11] = _mm_sub_epi16(stp2_4, stp2_11);
curr1[12] = _mm_sub_epi16(stp2_3, stp2_12);
curr1[13] = _mm_sub_epi16(stp2_2, stp2_13);
curr1[14] = _mm_sub_epi16(stp2_1, stp1_14);
curr1[15] = _mm_sub_epi16(stp2_0, stp1_15);
IDCT16_1D
curr1 = r;
input += 128;
}
for (i = 0; i < 2; i++) {
// 1-D idct
array_transpose_8x8(l+i*8, in);
array_transpose_8x8(r+i*8, in+8);
IDCT16_1D
// Stage7
if (i == 0) {
// Left 8x16
l0 = _mm_add_epi16(stp2_0, stp1_15);
l1 = _mm_add_epi16(stp2_1, stp1_14);
l2 = _mm_add_epi16(stp2_2, stp2_13);
l3 = _mm_add_epi16(stp2_3, stp2_12);
l4 = _mm_add_epi16(stp2_4, stp2_11);
l5 = _mm_add_epi16(stp2_5, stp2_10);
l6 = _mm_add_epi16(stp2_6, stp1_9);
l7 = _mm_add_epi16(stp2_7, stp1_8);
l8 = _mm_sub_epi16(stp2_7, stp1_8);
l9 = _mm_sub_epi16(stp2_6, stp1_9);
l10 = _mm_sub_epi16(stp2_5, stp2_10);
l11 = _mm_sub_epi16(stp2_4, stp2_11);
l12 = _mm_sub_epi16(stp2_3, stp2_12);
l13 = _mm_sub_epi16(stp2_2, stp2_13);
l14 = _mm_sub_epi16(stp2_1, stp1_14);
l15 = _mm_sub_epi16(stp2_0, stp1_15);
} else if (i == 1) {
// Right 8x16
r0 = _mm_add_epi16(stp2_0, stp1_15);
r1 = _mm_add_epi16(stp2_1, stp1_14);
r2 = _mm_add_epi16(stp2_2, stp2_13);
r3 = _mm_add_epi16(stp2_3, stp2_12);
r4 = _mm_add_epi16(stp2_4, stp2_11);
r5 = _mm_add_epi16(stp2_5, stp2_10);
r6 = _mm_add_epi16(stp2_6, stp1_9);
r7 = _mm_add_epi16(stp2_7, stp1_8);
r8 = _mm_sub_epi16(stp2_7, stp1_8);
r9 = _mm_sub_epi16(stp2_6, stp1_9);
r10 = _mm_sub_epi16(stp2_5, stp2_10);
r11 = _mm_sub_epi16(stp2_4, stp2_11);
r12 = _mm_sub_epi16(stp2_3, stp2_12);
r13 = _mm_sub_epi16(stp2_2, stp2_13);
r14 = _mm_sub_epi16(stp2_1, stp1_14);
r15 = _mm_sub_epi16(stp2_0, stp1_15);
} else {
// 2-D
in0 = _mm_add_epi16(stp2_0, stp1_15);
in1 = _mm_add_epi16(stp2_1, stp1_14);
in2 = _mm_add_epi16(stp2_2, stp2_13);
in3 = _mm_add_epi16(stp2_3, stp2_12);
in4 = _mm_add_epi16(stp2_4, stp2_11);
in5 = _mm_add_epi16(stp2_5, stp2_10);
in6 = _mm_add_epi16(stp2_6, stp1_9);
in7 = _mm_add_epi16(stp2_7, stp1_8);
in8 = _mm_sub_epi16(stp2_7, stp1_8);
in9 = _mm_sub_epi16(stp2_6, stp1_9);
in10 = _mm_sub_epi16(stp2_5, stp2_10);
in11 = _mm_sub_epi16(stp2_4, stp2_11);
in12 = _mm_sub_epi16(stp2_3, stp2_12);
in13 = _mm_sub_epi16(stp2_2, stp2_13);
in14 = _mm_sub_epi16(stp2_1, stp1_14);
in15 = _mm_sub_epi16(stp2_0, stp1_15);
in[0] = _mm_add_epi16(stp2_0, stp1_15);
in[1] = _mm_add_epi16(stp2_1, stp1_14);
in[2] = _mm_add_epi16(stp2_2, stp2_13);
in[3] = _mm_add_epi16(stp2_3, stp2_12);
in[4] = _mm_add_epi16(stp2_4, stp2_11);
in[5] = _mm_add_epi16(stp2_5, stp2_10);
in[6] = _mm_add_epi16(stp2_6, stp1_9);
in[7] = _mm_add_epi16(stp2_7, stp1_8);
in[8] = _mm_sub_epi16(stp2_7, stp1_8);
in[9] = _mm_sub_epi16(stp2_6, stp1_9);
in[10] = _mm_sub_epi16(stp2_5, stp2_10);
in[11] = _mm_sub_epi16(stp2_4, stp2_11);
in[12] = _mm_sub_epi16(stp2_3, stp2_12);
in[13] = _mm_sub_epi16(stp2_2, stp2_13);
in[14] = _mm_sub_epi16(stp2_1, stp1_14);
in[15] = _mm_sub_epi16(stp2_0, stp1_15);
// Final rounding and shift
in0 = _mm_adds_epi16(in0, final_rounding);
in1 = _mm_adds_epi16(in1, final_rounding);
in2 = _mm_adds_epi16(in2, final_rounding);
in3 = _mm_adds_epi16(in3, final_rounding);
in4 = _mm_adds_epi16(in4, final_rounding);
in5 = _mm_adds_epi16(in5, final_rounding);
in6 = _mm_adds_epi16(in6, final_rounding);
in7 = _mm_adds_epi16(in7, final_rounding);
in8 = _mm_adds_epi16(in8, final_rounding);
in9 = _mm_adds_epi16(in9, final_rounding);
in10 = _mm_adds_epi16(in10, final_rounding);
in11 = _mm_adds_epi16(in11, final_rounding);
in12 = _mm_adds_epi16(in12, final_rounding);
in13 = _mm_adds_epi16(in13, final_rounding);
in14 = _mm_adds_epi16(in14, final_rounding);
in15 = _mm_adds_epi16(in15, final_rounding);
in[0] = _mm_adds_epi16(in[0], final_rounding);
in[1] = _mm_adds_epi16(in[1], final_rounding);
in[2] = _mm_adds_epi16(in[2], final_rounding);
in[3] = _mm_adds_epi16(in[3], final_rounding);
in[4] = _mm_adds_epi16(in[4], final_rounding);
in[5] = _mm_adds_epi16(in[5], final_rounding);
in[6] = _mm_adds_epi16(in[6], final_rounding);
in[7] = _mm_adds_epi16(in[7], final_rounding);
in[8] = _mm_adds_epi16(in[8], final_rounding);
in[9] = _mm_adds_epi16(in[9], final_rounding);
in[10] = _mm_adds_epi16(in[10], final_rounding);
in[11] = _mm_adds_epi16(in[11], final_rounding);
in[12] = _mm_adds_epi16(in[12], final_rounding);
in[13] = _mm_adds_epi16(in[13], final_rounding);
in[14] = _mm_adds_epi16(in[14], final_rounding);
in[15] = _mm_adds_epi16(in[15], final_rounding);
in0 = _mm_srai_epi16(in0, 6);
in1 = _mm_srai_epi16(in1, 6);
in2 = _mm_srai_epi16(in2, 6);
in3 = _mm_srai_epi16(in3, 6);
in4 = _mm_srai_epi16(in4, 6);
in5 = _mm_srai_epi16(in5, 6);
in6 = _mm_srai_epi16(in6, 6);
in7 = _mm_srai_epi16(in7, 6);
in8 = _mm_srai_epi16(in8, 6);
in9 = _mm_srai_epi16(in9, 6);
in10 = _mm_srai_epi16(in10, 6);
in11 = _mm_srai_epi16(in11, 6);
in12 = _mm_srai_epi16(in12, 6);
in13 = _mm_srai_epi16(in13, 6);
in14 = _mm_srai_epi16(in14, 6);
in15 = _mm_srai_epi16(in15, 6);
in[0] = _mm_srai_epi16(in[0], 6);
in[1] = _mm_srai_epi16(in[1], 6);
in[2] = _mm_srai_epi16(in[2], 6);
in[3] = _mm_srai_epi16(in[3], 6);
in[4] = _mm_srai_epi16(in[4], 6);
in[5] = _mm_srai_epi16(in[5], 6);
in[6] = _mm_srai_epi16(in[6], 6);
in[7] = _mm_srai_epi16(in[7], 6);
in[8] = _mm_srai_epi16(in[8], 6);
in[9] = _mm_srai_epi16(in[9], 6);
in[10] = _mm_srai_epi16(in[10], 6);
in[11] = _mm_srai_epi16(in[11], 6);
in[12] = _mm_srai_epi16(in[12], 6);
in[13] = _mm_srai_epi16(in[13], 6);
in[14] = _mm_srai_epi16(in[14], 6);
in[15] = _mm_srai_epi16(in[15], 6);
RECON_AND_STORE(dest, in0);
RECON_AND_STORE(dest, in1);
RECON_AND_STORE(dest, in2);
RECON_AND_STORE(dest, in3);
RECON_AND_STORE(dest, in4);
RECON_AND_STORE(dest, in5);
RECON_AND_STORE(dest, in6);
RECON_AND_STORE(dest, in7);
RECON_AND_STORE(dest, in8);
RECON_AND_STORE(dest, in9);
RECON_AND_STORE(dest, in10);
RECON_AND_STORE(dest, in11);
RECON_AND_STORE(dest, in12);
RECON_AND_STORE(dest, in13);
RECON_AND_STORE(dest, in14);
RECON_AND_STORE(dest, in15);
RECON_AND_STORE(dest, in[0]);
RECON_AND_STORE(dest, in[1]);
RECON_AND_STORE(dest, in[2]);
RECON_AND_STORE(dest, in[3]);
RECON_AND_STORE(dest, in[4]);
RECON_AND_STORE(dest, in[5]);
RECON_AND_STORE(dest, in[6]);
RECON_AND_STORE(dest, in[7]);
RECON_AND_STORE(dest, in[8]);
RECON_AND_STORE(dest, in[9]);
RECON_AND_STORE(dest, in[10]);
RECON_AND_STORE(dest, in[11]);
RECON_AND_STORE(dest, in[12]);
RECON_AND_STORE(dest, in[13]);
RECON_AND_STORE(dest, in[14]);
RECON_AND_STORE(dest, in[15]);
dest += 8 - (stride * 16);
}
}
}
@ -2489,15 +2469,7 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64);
const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
__m128i in0 = zero, in1 = zero, in2 = zero, in3 = zero, in4 = zero,
in5 = zero, in6 = zero, in7 = zero, in8 = zero, in9 = zero,
in10 = zero, in11 = zero, in12 = zero, in13 = zero,
in14 = zero, in15 = zero;
__m128i l0 = zero, l1 = zero, l2 = zero, l3 = zero, l4 = zero, l5 = zero,
l6 = zero, l7 = zero, l8 = zero, l9 = zero, l10 = zero, l11 = zero,
l12 = zero, l13 = zero, l14 = zero, l15 = zero;
__m128i in[16], l[16];
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7,
stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15,
stp1_8_0, stp1_12_0;
@ -2505,25 +2477,26 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15;
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
int i;
in[4] = in[5] = in[6] = in[7] = in[12] = in[13] = in[14] = in[15] = zero;
// 1-D idct. Load input data.
in0 = _mm_load_si128((const __m128i *)input);
in8 = _mm_load_si128((const __m128i *)(input + 8 * 1));
in1 = _mm_load_si128((const __m128i *)(input + 8 * 2));
in9 = _mm_load_si128((const __m128i *)(input + 8 * 3));
in2 = _mm_load_si128((const __m128i *)(input + 8 * 4));
in10 = _mm_load_si128((const __m128i *)(input + 8 * 5));
in3 = _mm_load_si128((const __m128i *)(input + 8 * 6));
in11 = _mm_load_si128((const __m128i *)(input + 8 * 7));
in[0] = _mm_load_si128((const __m128i *)input);
in[8] = _mm_load_si128((const __m128i *)(input + 8 * 1));
in[1] = _mm_load_si128((const __m128i *)(input + 8 * 2));
in[9] = _mm_load_si128((const __m128i *)(input + 8 * 3));
in[2] = _mm_load_si128((const __m128i *)(input + 8 * 4));
in[10] = _mm_load_si128((const __m128i *)(input + 8 * 5));
in[3] = _mm_load_si128((const __m128i *)(input + 8 * 6));
in[11] = _mm_load_si128((const __m128i *)(input + 8 * 7));
TRANSPOSE_8X4(in0, in1, in2, in3, in0, in1, in2, in3);
TRANSPOSE_8X4(in8, in9, in10, in11, in8, in9, in10, in11);
TRANSPOSE_8X4(in[0], in[1], in[2], in[3], in[0], in[1], in[2], in[3]);
TRANSPOSE_8X4(in[8], in[9], in[10], in[11], in[8], in[9], in[10], in[11]);
// Stage2
{
const __m128i lo_1_15 = _mm_unpackhi_epi16(in0, in11);
const __m128i lo_9_7 = _mm_unpackhi_epi16(in8, in3);
const __m128i lo_5_11 = _mm_unpackhi_epi16(in2, in9);
const __m128i lo_13_3 = _mm_unpackhi_epi16(in10, in1);
const __m128i lo_1_15 = _mm_unpackhi_epi16(in[0], in[11]);
const __m128i lo_9_7 = _mm_unpackhi_epi16(in[8], in[3]);
const __m128i lo_5_11 = _mm_unpackhi_epi16(in[2], in[9]);
const __m128i lo_13_3 = _mm_unpackhi_epi16(in[10], in[1]);
tmp0 = _mm_madd_epi16(lo_1_15, stg2_0);
tmp2 = _mm_madd_epi16(lo_1_15, stg2_1);
@ -2565,8 +2538,8 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
// Stage3
{
const __m128i lo_2_14 = _mm_unpacklo_epi16(in1, in11);
const __m128i lo_10_6 = _mm_unpacklo_epi16(in9, in3);
const __m128i lo_2_14 = _mm_unpacklo_epi16(in[1], in[11]);
const __m128i lo_10_6 = _mm_unpacklo_epi16(in[9], in[3]);
tmp0 = _mm_madd_epi16(lo_2_14, stg3_0);
tmp2 = _mm_madd_epi16(lo_2_14, stg3_1);
@ -2601,8 +2574,8 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
// Stage4
{
const __m128i lo_0_8 = _mm_unpacklo_epi16(in0, in8);
const __m128i lo_4_12 = _mm_unpacklo_epi16(in2, in10);
const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]);
const __m128i lo_4_12 = _mm_unpacklo_epi16(in[2], in[10]);
const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14);
const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13);
@ -2711,106 +2684,99 @@ void vp9_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
}
// Stage7. Left 8x16 only.
l0 = _mm_add_epi16(stp2_0, stp1_15);
l1 = _mm_add_epi16(stp2_1, stp1_14);
l2 = _mm_add_epi16(stp2_2, stp2_13);
l3 = _mm_add_epi16(stp2_3, stp2_12);
l4 = _mm_add_epi16(stp2_4, stp2_11);
l5 = _mm_add_epi16(stp2_5, stp2_10);
l6 = _mm_add_epi16(stp2_6, stp1_9);
l7 = _mm_add_epi16(stp2_7, stp1_8);
l8 = _mm_sub_epi16(stp2_7, stp1_8);
l9 = _mm_sub_epi16(stp2_6, stp1_9);
l10 = _mm_sub_epi16(stp2_5, stp2_10);
l11 = _mm_sub_epi16(stp2_4, stp2_11);
l12 = _mm_sub_epi16(stp2_3, stp2_12);
l13 = _mm_sub_epi16(stp2_2, stp2_13);
l14 = _mm_sub_epi16(stp2_1, stp1_14);
l15 = _mm_sub_epi16(stp2_0, stp1_15);
l[0] = _mm_add_epi16(stp2_0, stp1_15);
l[1] = _mm_add_epi16(stp2_1, stp1_14);
l[2] = _mm_add_epi16(stp2_2, stp2_13);
l[3] = _mm_add_epi16(stp2_3, stp2_12);
l[4] = _mm_add_epi16(stp2_4, stp2_11);
l[5] = _mm_add_epi16(stp2_5, stp2_10);
l[6] = _mm_add_epi16(stp2_6, stp1_9);
l[7] = _mm_add_epi16(stp2_7, stp1_8);
l[8] = _mm_sub_epi16(stp2_7, stp1_8);
l[9] = _mm_sub_epi16(stp2_6, stp1_9);
l[10] = _mm_sub_epi16(stp2_5, stp2_10);
l[11] = _mm_sub_epi16(stp2_4, stp2_11);
l[12] = _mm_sub_epi16(stp2_3, stp2_12);
l[13] = _mm_sub_epi16(stp2_2, stp2_13);
l[14] = _mm_sub_epi16(stp2_1, stp1_14);
l[15] = _mm_sub_epi16(stp2_0, stp1_15);
// 2-D idct. We do 2 8x16 blocks.
for (i = 0; i < 2; i++) {
if (i == 0)
TRANSPOSE_4X8(l0, l1, l2, l3, l4, l5, l6, l7, in0, in1, in2, in3, in4,
in5, in6, in7);
if (i == 1)
TRANSPOSE_4X8(l8, l9, l10, l11, l12, l13, l14, l15, in0, in1, in2, in3,
in4, in5, in6, in7);
in8 = in9 = in10 = in11 = in12 = in13 = in14 = in15 = zero;
array_transpose_4X8(l + 8*i, in);
in[8] = in[9] = in[10] = in[11] = in[12] = in[13] = in[14] = in[15] = zero;
IDCT16_1D
// Stage7
in0 = _mm_add_epi16(stp2_0, stp1_15);
in1 = _mm_add_epi16(stp2_1, stp1_14);
in2 = _mm_add_epi16(stp2_2, stp2_13);
in3 = _mm_add_epi16(stp2_3, stp2_12);
in4 = _mm_add_epi16(stp2_4, stp2_11);
in5 = _mm_add_epi16(stp2_5, stp2_10);
in6 = _mm_add_epi16(stp2_6, stp1_9);
in7 = _mm_add_epi16(stp2_7, stp1_8);
in8 = _mm_sub_epi16(stp2_7, stp1_8);
in9 = _mm_sub_epi16(stp2_6, stp1_9);
in10 = _mm_sub_epi16(stp2_5, stp2_10);
in11 = _mm_sub_epi16(stp2_4, stp2_11);
in12 = _mm_sub_epi16(stp2_3, stp2_12);
in13 = _mm_sub_epi16(stp2_2, stp2_13);
in14 = _mm_sub_epi16(stp2_1, stp1_14);
in15 = _mm_sub_epi16(stp2_0, stp1_15);
in[0] = _mm_add_epi16(stp2_0, stp1_15);
in[1] = _mm_add_epi16(stp2_1, stp1_14);
in[2] = _mm_add_epi16(stp2_2, stp2_13);
in[3] = _mm_add_epi16(stp2_3, stp2_12);
in[4] = _mm_add_epi16(stp2_4, stp2_11);
in[5] = _mm_add_epi16(stp2_5, stp2_10);
in[6] = _mm_add_epi16(stp2_6, stp1_9);
in[7] = _mm_add_epi16(stp2_7, stp1_8);
in[8] = _mm_sub_epi16(stp2_7, stp1_8);
in[9] = _mm_sub_epi16(stp2_6, stp1_9);
in[10] = _mm_sub_epi16(stp2_5, stp2_10);
in[11] = _mm_sub_epi16(stp2_4, stp2_11);
in[12] = _mm_sub_epi16(stp2_3, stp2_12);
in[13] = _mm_sub_epi16(stp2_2, stp2_13);
in[14] = _mm_sub_epi16(stp2_1, stp1_14);
in[15] = _mm_sub_epi16(stp2_0, stp1_15);
// Final rounding and shift
in0 = _mm_adds_epi16(in0, final_rounding);
in1 = _mm_adds_epi16(in1, final_rounding);
in2 = _mm_adds_epi16(in2, final_rounding);
in3 = _mm_adds_epi16(in3, final_rounding);
in4 = _mm_adds_epi16(in4, final_rounding);
in5 = _mm_adds_epi16(in5, final_rounding);
in6 = _mm_adds_epi16(in6, final_rounding);
in7 = _mm_adds_epi16(in7, final_rounding);
in8 = _mm_adds_epi16(in8, final_rounding);
in9 = _mm_adds_epi16(in9, final_rounding);
in10 = _mm_adds_epi16(in10, final_rounding);
in11 = _mm_adds_epi16(in11, final_rounding);
in12 = _mm_adds_epi16(in12, final_rounding);
in13 = _mm_adds_epi16(in13, final_rounding);
in14 = _mm_adds_epi16(in14, final_rounding);
in15 = _mm_adds_epi16(in15, final_rounding);
in[0] = _mm_adds_epi16(in[0], final_rounding);
in[1] = _mm_adds_epi16(in[1], final_rounding);
in[2] = _mm_adds_epi16(in[2], final_rounding);
in[3] = _mm_adds_epi16(in[3], final_rounding);
in[4] = _mm_adds_epi16(in[4], final_rounding);
in[5] = _mm_adds_epi16(in[5], final_rounding);
in[6] = _mm_adds_epi16(in[6], final_rounding);
in[7] = _mm_adds_epi16(in[7], final_rounding);
in[8] = _mm_adds_epi16(in[8], final_rounding);
in[9] = _mm_adds_epi16(in[9], final_rounding);
in[10] = _mm_adds_epi16(in[10], final_rounding);
in[11] = _mm_adds_epi16(in[11], final_rounding);
in[12] = _mm_adds_epi16(in[12], final_rounding);
in[13] = _mm_adds_epi16(in[13], final_rounding);
in[14] = _mm_adds_epi16(in[14], final_rounding);
in[15] = _mm_adds_epi16(in[15], final_rounding);
in0 = _mm_srai_epi16(in0, 6);
in1 = _mm_srai_epi16(in1, 6);
in2 = _mm_srai_epi16(in2, 6);
in3 = _mm_srai_epi16(in3, 6);
in4 = _mm_srai_epi16(in4, 6);
in5 = _mm_srai_epi16(in5, 6);
in6 = _mm_srai_epi16(in6, 6);
in7 = _mm_srai_epi16(in7, 6);
in8 = _mm_srai_epi16(in8, 6);
in9 = _mm_srai_epi16(in9, 6);
in10 = _mm_srai_epi16(in10, 6);
in11 = _mm_srai_epi16(in11, 6);
in12 = _mm_srai_epi16(in12, 6);
in13 = _mm_srai_epi16(in13, 6);
in14 = _mm_srai_epi16(in14, 6);
in15 = _mm_srai_epi16(in15, 6);
in[0] = _mm_srai_epi16(in[0], 6);
in[1] = _mm_srai_epi16(in[1], 6);
in[2] = _mm_srai_epi16(in[2], 6);
in[3] = _mm_srai_epi16(in[3], 6);
in[4] = _mm_srai_epi16(in[4], 6);
in[5] = _mm_srai_epi16(in[5], 6);
in[6] = _mm_srai_epi16(in[6], 6);
in[7] = _mm_srai_epi16(in[7], 6);
in[8] = _mm_srai_epi16(in[8], 6);
in[9] = _mm_srai_epi16(in[9], 6);
in[10] = _mm_srai_epi16(in[10], 6);
in[11] = _mm_srai_epi16(in[11], 6);
in[12] = _mm_srai_epi16(in[12], 6);
in[13] = _mm_srai_epi16(in[13], 6);
in[14] = _mm_srai_epi16(in[14], 6);
in[15] = _mm_srai_epi16(in[15], 6);
RECON_AND_STORE(dest, in0);
RECON_AND_STORE(dest, in1);
RECON_AND_STORE(dest, in2);
RECON_AND_STORE(dest, in3);
RECON_AND_STORE(dest, in4);
RECON_AND_STORE(dest, in5);
RECON_AND_STORE(dest, in6);
RECON_AND_STORE(dest, in7);
RECON_AND_STORE(dest, in8);
RECON_AND_STORE(dest, in9);
RECON_AND_STORE(dest, in10);
RECON_AND_STORE(dest, in11);
RECON_AND_STORE(dest, in12);
RECON_AND_STORE(dest, in13);
RECON_AND_STORE(dest, in14);
RECON_AND_STORE(dest, in15);
RECON_AND_STORE(dest, in[0]);
RECON_AND_STORE(dest, in[1]);
RECON_AND_STORE(dest, in[2]);
RECON_AND_STORE(dest, in[3]);
RECON_AND_STORE(dest, in[4]);
RECON_AND_STORE(dest, in[5]);
RECON_AND_STORE(dest, in[6]);
RECON_AND_STORE(dest, in[7]);
RECON_AND_STORE(dest, in[8]);
RECON_AND_STORE(dest, in[9]);
RECON_AND_STORE(dest, in[10]);
RECON_AND_STORE(dest, in[11]);
RECON_AND_STORE(dest, in[12]);
RECON_AND_STORE(dest, in[13]);
RECON_AND_STORE(dest, in[14]);
RECON_AND_STORE(dest, in[15]);
dest += 8 - (stride * 16);
}