|
|
|
|
@@ -15,206 +15,6 @@
|
|
|
|
|
#include "vpx_dsp/x86/transpose_sse2.h"
|
|
|
|
|
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
|
|
|
|
|
|
|
|
|
void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|
|
|
|
int stride) {
|
|
|
|
|
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
|
|
|
|
|
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
|
|
|
|
|
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
|
|
|
|
|
const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
|
|
|
|
|
const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64);
|
|
|
|
|
const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64);
|
|
|
|
|
const __m128i stk2_0 = pair_set_epi16(cospi_16_64, cospi_16_64);
|
|
|
|
|
const __m128i stk2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64);
|
|
|
|
|
const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
|
|
|
|
|
const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
|
|
|
|
|
|
|
|
|
|
__m128i in[8];
|
|
|
|
|
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
|
|
|
|
|
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
|
|
|
|
|
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
// Load input data.
|
|
|
|
|
in[0] = load_input_data(input);
|
|
|
|
|
in[1] = load_input_data(input + 8 * 1);
|
|
|
|
|
in[2] = load_input_data(input + 8 * 2);
|
|
|
|
|
in[3] = load_input_data(input + 8 * 3);
|
|
|
|
|
in[4] = load_input_data(input + 8 * 4);
|
|
|
|
|
in[5] = load_input_data(input + 8 * 5);
|
|
|
|
|
in[6] = load_input_data(input + 8 * 6);
|
|
|
|
|
in[7] = load_input_data(input + 8 * 7);
|
|
|
|
|
|
|
|
|
|
// 2-D
|
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
|
// 8x8 Transpose is copied from vpx_fdct8x8_sse2()
|
|
|
|
|
transpose_16bit_8x8(in, in);
|
|
|
|
|
|
|
|
|
|
// 4-stage 1D idct8x8
|
|
|
|
|
{
|
|
|
|
|
/* Stage1 */
|
|
|
|
|
{
|
|
|
|
|
const __m128i lo_17 = _mm_unpacklo_epi16(in[1], in[7]);
|
|
|
|
|
const __m128i hi_17 = _mm_unpackhi_epi16(in[1], in[7]);
|
|
|
|
|
const __m128i lo_35 = _mm_unpacklo_epi16(in[3], in[5]);
|
|
|
|
|
const __m128i hi_35 = _mm_unpackhi_epi16(in[3], in[5]);
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
tmp0 = _mm_madd_epi16(lo_17, stg1_0);
|
|
|
|
|
tmp1 = _mm_madd_epi16(hi_17, stg1_0);
|
|
|
|
|
tmp2 = _mm_madd_epi16(lo_17, stg1_1);
|
|
|
|
|
tmp3 = _mm_madd_epi16(hi_17, stg1_1);
|
|
|
|
|
tmp4 = _mm_madd_epi16(lo_35, stg1_2);
|
|
|
|
|
tmp5 = _mm_madd_epi16(hi_35, stg1_2);
|
|
|
|
|
tmp6 = _mm_madd_epi16(lo_35, stg1_3);
|
|
|
|
|
tmp7 = _mm_madd_epi16(hi_35, stg1_3);
|
|
|
|
|
|
|
|
|
|
tmp0 = _mm_add_epi32(tmp0, rounding);
|
|
|
|
|
tmp1 = _mm_add_epi32(tmp1, rounding);
|
|
|
|
|
tmp2 = _mm_add_epi32(tmp2, rounding);
|
|
|
|
|
tmp3 = _mm_add_epi32(tmp3, rounding);
|
|
|
|
|
tmp4 = _mm_add_epi32(tmp4, rounding);
|
|
|
|
|
tmp5 = _mm_add_epi32(tmp5, rounding);
|
|
|
|
|
tmp6 = _mm_add_epi32(tmp6, rounding);
|
|
|
|
|
tmp7 = _mm_add_epi32(tmp7, rounding);
|
|
|
|
|
|
|
|
|
|
tmp0 = _mm_srai_epi32(tmp0, 14);
|
|
|
|
|
tmp1 = _mm_srai_epi32(tmp1, 14);
|
|
|
|
|
tmp2 = _mm_srai_epi32(tmp2, 14);
|
|
|
|
|
tmp3 = _mm_srai_epi32(tmp3, 14);
|
|
|
|
|
tmp4 = _mm_srai_epi32(tmp4, 14);
|
|
|
|
|
tmp5 = _mm_srai_epi32(tmp5, 14);
|
|
|
|
|
tmp6 = _mm_srai_epi32(tmp6, 14);
|
|
|
|
|
tmp7 = _mm_srai_epi32(tmp7, 14);
|
|
|
|
|
|
|
|
|
|
stp1_4 = _mm_packs_epi32(tmp0, tmp1);
|
|
|
|
|
stp1_7 = _mm_packs_epi32(tmp2, tmp3);
|
|
|
|
|
stp1_5 = _mm_packs_epi32(tmp4, tmp5);
|
|
|
|
|
stp1_6 = _mm_packs_epi32(tmp6, tmp7);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Stage2 */
|
|
|
|
|
{
|
|
|
|
|
const __m128i lo_26 = _mm_unpacklo_epi16(in[2], in[6]);
|
|
|
|
|
const __m128i hi_26 = _mm_unpackhi_epi16(in[2], in[6]);
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
tmp0 = _mm_unpacklo_epi16(in[0], in[4]);
|
|
|
|
|
tmp1 = _mm_unpackhi_epi16(in[0], in[4]);
|
|
|
|
|
|
|
|
|
|
tmp2 = _mm_madd_epi16(tmp0, stk2_0);
|
|
|
|
|
tmp3 = _mm_madd_epi16(tmp1, stk2_0);
|
|
|
|
|
tmp4 = _mm_madd_epi16(tmp0, stk2_1);
|
|
|
|
|
tmp5 = _mm_madd_epi16(tmp1, stk2_1);
|
|
|
|
|
|
|
|
|
|
tmp2 = _mm_add_epi32(tmp2, rounding);
|
|
|
|
|
tmp3 = _mm_add_epi32(tmp3, rounding);
|
|
|
|
|
tmp4 = _mm_add_epi32(tmp4, rounding);
|
|
|
|
|
tmp5 = _mm_add_epi32(tmp5, rounding);
|
|
|
|
|
|
|
|
|
|
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
|
|
|
|
|
tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
|
|
|
|
|
tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
|
|
|
|
|
tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
|
|
|
|
|
|
|
|
|
|
stp2_0 = _mm_packs_epi32(tmp2, tmp3);
|
|
|
|
|
stp2_1 = _mm_packs_epi32(tmp4, tmp5);
|
|
|
|
|
|
|
|
|
|
tmp0 = _mm_madd_epi16(lo_26, stg2_2);
|
|
|
|
|
tmp1 = _mm_madd_epi16(hi_26, stg2_2);
|
|
|
|
|
tmp2 = _mm_madd_epi16(lo_26, stg2_3);
|
|
|
|
|
tmp3 = _mm_madd_epi16(hi_26, stg2_3);
|
|
|
|
|
|
|
|
|
|
tmp0 = _mm_add_epi32(tmp0, rounding);
|
|
|
|
|
tmp1 = _mm_add_epi32(tmp1, rounding);
|
|
|
|
|
tmp2 = _mm_add_epi32(tmp2, rounding);
|
|
|
|
|
tmp3 = _mm_add_epi32(tmp3, rounding);
|
|
|
|
|
|
|
|
|
|
tmp0 = _mm_srai_epi32(tmp0, 14);
|
|
|
|
|
tmp1 = _mm_srai_epi32(tmp1, 14);
|
|
|
|
|
tmp2 = _mm_srai_epi32(tmp2, 14);
|
|
|
|
|
tmp3 = _mm_srai_epi32(tmp3, 14);
|
|
|
|
|
|
|
|
|
|
stp2_2 = _mm_packs_epi32(tmp0, tmp1);
|
|
|
|
|
stp2_3 = _mm_packs_epi32(tmp2, tmp3);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
|
|
|
|
|
stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
|
|
|
|
|
stp2_6 = _mm_sub_epi16(stp1_7, stp1_6);
|
|
|
|
|
stp2_7 = _mm_add_epi16(stp1_7, stp1_6);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Stage3 */
|
|
|
|
|
{
|
|
|
|
|
stp1_0 = _mm_add_epi16(stp2_0, stp2_3);
|
|
|
|
|
stp1_1 = _mm_add_epi16(stp2_1, stp2_2);
|
|
|
|
|
stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
|
|
|
|
|
stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
|
|
|
|
|
|
|
|
|
|
tmp0 = _mm_unpacklo_epi16(stp2_6, stp2_5);
|
|
|
|
|
tmp1 = _mm_unpackhi_epi16(stp2_6, stp2_5);
|
|
|
|
|
|
|
|
|
|
tmp2 = _mm_madd_epi16(tmp0, stk2_1);
|
|
|
|
|
tmp3 = _mm_madd_epi16(tmp1, stk2_1);
|
|
|
|
|
tmp4 = _mm_madd_epi16(tmp0, stk2_0);
|
|
|
|
|
tmp5 = _mm_madd_epi16(tmp1, stk2_0);
|
|
|
|
|
|
|
|
|
|
tmp2 = _mm_add_epi32(tmp2, rounding);
|
|
|
|
|
tmp3 = _mm_add_epi32(tmp3, rounding);
|
|
|
|
|
tmp4 = _mm_add_epi32(tmp4, rounding);
|
|
|
|
|
tmp5 = _mm_add_epi32(tmp5, rounding);
|
|
|
|
|
|
|
|
|
|
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
|
|
|
|
|
tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
|
|
|
|
|
tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);
|
|
|
|
|
tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);
|
|
|
|
|
|
|
|
|
|
stp1_5 = _mm_packs_epi32(tmp2, tmp3);
|
|
|
|
|
stp1_6 = _mm_packs_epi32(tmp4, tmp5);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Stage4 */
|
|
|
|
|
in[0] = _mm_add_epi16(stp1_0, stp2_7);
|
|
|
|
|
in[1] = _mm_add_epi16(stp1_1, stp1_6);
|
|
|
|
|
in[2] = _mm_add_epi16(stp1_2, stp1_5);
|
|
|
|
|
in[3] = _mm_add_epi16(stp1_3, stp2_4);
|
|
|
|
|
in[4] = _mm_sub_epi16(stp1_3, stp2_4);
|
|
|
|
|
in[5] = _mm_sub_epi16(stp1_2, stp1_5);
|
|
|
|
|
in[6] = _mm_sub_epi16(stp1_1, stp1_6);
|
|
|
|
|
in[7] = _mm_sub_epi16(stp1_0, stp2_7);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Final rounding and shift
|
|
|
|
|
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
|
|
|
|
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
|
|
|
|
in[2] = _mm_adds_epi16(in[2], final_rounding);
|
|
|
|
|
in[3] = _mm_adds_epi16(in[3], final_rounding);
|
|
|
|
|
in[4] = _mm_adds_epi16(in[4], final_rounding);
|
|
|
|
|
in[5] = _mm_adds_epi16(in[5], final_rounding);
|
|
|
|
|
in[6] = _mm_adds_epi16(in[6], final_rounding);
|
|
|
|
|
in[7] = _mm_adds_epi16(in[7], final_rounding);
|
|
|
|
|
|
|
|
|
|
in[0] = _mm_srai_epi16(in[0], 5);
|
|
|
|
|
in[1] = _mm_srai_epi16(in[1], 5);
|
|
|
|
|
in[2] = _mm_srai_epi16(in[2], 5);
|
|
|
|
|
in[3] = _mm_srai_epi16(in[3], 5);
|
|
|
|
|
in[4] = _mm_srai_epi16(in[4], 5);
|
|
|
|
|
in[5] = _mm_srai_epi16(in[5], 5);
|
|
|
|
|
in[6] = _mm_srai_epi16(in[6], 5);
|
|
|
|
|
in[7] = _mm_srai_epi16(in[7], 5);
|
|
|
|
|
|
|
|
|
|
recon_and_store(dest + 0 * stride, in[0]);
|
|
|
|
|
recon_and_store(dest + 1 * stride, in[1]);
|
|
|
|
|
recon_and_store(dest + 2 * stride, in[2]);
|
|
|
|
|
recon_and_store(dest + 3 * stride, in[3]);
|
|
|
|
|
recon_and_store(dest + 4 * stride, in[4]);
|
|
|
|
|
recon_and_store(dest + 5 * stride, in[5]);
|
|
|
|
|
recon_and_store(dest + 6 * stride, in[6]);
|
|
|
|
|
recon_and_store(dest + 7 * stride, in[7]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|
|
|
|
int stride) {
|
|
|
|
|
const __m128i zero = _mm_setzero_si128();
|
|
|
|
|
|