Merge changes I08b562b6,Ia275940a,I51106e90

* changes:
  Add vpx_highbd_idct32x32_{34, 135, 1024}_add_{sse2, sse4_1}
  Update highbd idct x86 optimizations.
  Update 32x32 idct sse2 and ssse3 optimizations.
This commit is contained in:
Linfeng Zhang 2017-08-16 16:36:37 +00:00 committed by Gerrit Code Review
commit f95686895b
14 changed files with 2425 additions and 1285 deletions

View File

@ -633,6 +633,36 @@ INSTANTIATE_TEST_CASE_P(NEON, PartialIDctTest,
// 32x32_135_ is implemented using the 1024 version.
const PartialInvTxfmParam sse2_partial_idct_tests[] = {
#if CONFIG_VP9_HIGHBITDEPTH
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_sse2>, TX_32X32,
1024, 8, 2),
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_sse2>, TX_32X32,
1024, 10, 2),
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_sse2>, TX_32X32,
1024, 12, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_135_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_sse2>, TX_32X32, 135, 8, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_135_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_sse2>, TX_32X32, 135, 10, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_135_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_sse2>, TX_32X32, 135, 12, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_34_add_sse2>, TX_32X32, 34, 8, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_34_add_sse2>, TX_32X32, 34, 10, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_34_add_sse2>, TX_32X32, 34, 12, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_1_add_sse2>, TX_32X32, 1, 8, 2),
@ -767,6 +797,39 @@ INSTANTIATE_TEST_CASE_P(SSSE3, PartialIDctTest,
#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
const PartialInvTxfmParam sse4_1_partial_idct_tests[] = {
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_sse4_1>, TX_32X32,
1024, 8, 2),
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_sse4_1>, TX_32X32,
1024, 10, 2),
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_1024_add_sse4_1>, TX_32X32,
1024, 12, 2),
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_sse4_1>, TX_32X32,
135, 8, 2),
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_sse4_1>, TX_32X32,
135, 10, 2),
make_tuple(&vpx_highbd_fdct32x32_c,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_135_add_sse4_1>, TX_32X32,
135, 12, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_34_add_sse4_1>, TX_32X32, 34, 8, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_34_add_sse4_1>, TX_32X32, 34, 10, 2),
make_tuple(
&vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>,
&highbd_wrapper<vpx_highbd_idct32x32_34_add_sse4_1>, TX_32X32, 34, 12, 2),
make_tuple(&vpx_highbd_fdct16x16_c,
&highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
&highbd_wrapper<vpx_highbd_idct16x16_256_add_sse4_1>, TX_16X16,

View File

@ -115,6 +115,83 @@ void vp9_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
recon_and_store(dest + 7 * stride, in[7]);
}
static INLINE void load_buffer_8x16(const tran_low_t *const input,
__m128i *const in) {
in[0] = load_input_data8(input + 0 * 16);
in[1] = load_input_data8(input + 1 * 16);
in[2] = load_input_data8(input + 2 * 16);
in[3] = load_input_data8(input + 3 * 16);
in[4] = load_input_data8(input + 4 * 16);
in[5] = load_input_data8(input + 5 * 16);
in[6] = load_input_data8(input + 6 * 16);
in[7] = load_input_data8(input + 7 * 16);
in[8] = load_input_data8(input + 8 * 16);
in[9] = load_input_data8(input + 9 * 16);
in[10] = load_input_data8(input + 10 * 16);
in[11] = load_input_data8(input + 11 * 16);
in[12] = load_input_data8(input + 12 * 16);
in[13] = load_input_data8(input + 13 * 16);
in[14] = load_input_data8(input + 14 * 16);
in[15] = load_input_data8(input + 15 * 16);
}
static INLINE void write_buffer_8x16(uint8_t *const dest, __m128i *const in,
const int stride) {
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
// Final rounding and shift
in[0] = _mm_adds_epi16(in[0], final_rounding);
in[1] = _mm_adds_epi16(in[1], final_rounding);
in[2] = _mm_adds_epi16(in[2], final_rounding);
in[3] = _mm_adds_epi16(in[3], final_rounding);
in[4] = _mm_adds_epi16(in[4], final_rounding);
in[5] = _mm_adds_epi16(in[5], final_rounding);
in[6] = _mm_adds_epi16(in[6], final_rounding);
in[7] = _mm_adds_epi16(in[7], final_rounding);
in[8] = _mm_adds_epi16(in[8], final_rounding);
in[9] = _mm_adds_epi16(in[9], final_rounding);
in[10] = _mm_adds_epi16(in[10], final_rounding);
in[11] = _mm_adds_epi16(in[11], final_rounding);
in[12] = _mm_adds_epi16(in[12], final_rounding);
in[13] = _mm_adds_epi16(in[13], final_rounding);
in[14] = _mm_adds_epi16(in[14], final_rounding);
in[15] = _mm_adds_epi16(in[15], final_rounding);
in[0] = _mm_srai_epi16(in[0], 6);
in[1] = _mm_srai_epi16(in[1], 6);
in[2] = _mm_srai_epi16(in[2], 6);
in[3] = _mm_srai_epi16(in[3], 6);
in[4] = _mm_srai_epi16(in[4], 6);
in[5] = _mm_srai_epi16(in[5], 6);
in[6] = _mm_srai_epi16(in[6], 6);
in[7] = _mm_srai_epi16(in[7], 6);
in[8] = _mm_srai_epi16(in[8], 6);
in[9] = _mm_srai_epi16(in[9], 6);
in[10] = _mm_srai_epi16(in[10], 6);
in[11] = _mm_srai_epi16(in[11], 6);
in[12] = _mm_srai_epi16(in[12], 6);
in[13] = _mm_srai_epi16(in[13], 6);
in[14] = _mm_srai_epi16(in[14], 6);
in[15] = _mm_srai_epi16(in[15], 6);
recon_and_store(dest + 0 * stride, in[0]);
recon_and_store(dest + 1 * stride, in[1]);
recon_and_store(dest + 2 * stride, in[2]);
recon_and_store(dest + 3 * stride, in[3]);
recon_and_store(dest + 4 * stride, in[4]);
recon_and_store(dest + 5 * stride, in[5]);
recon_and_store(dest + 6 * stride, in[6]);
recon_and_store(dest + 7 * stride, in[7]);
recon_and_store(dest + 8 * stride, in[8]);
recon_and_store(dest + 9 * stride, in[9]);
recon_and_store(dest + 10 * stride, in[10]);
recon_and_store(dest + 11 * stride, in[11]);
recon_and_store(dest + 12 * stride, in[12]);
recon_and_store(dest + 13 * stride, in[13]);
recon_and_store(dest + 14 * stride, in[14]);
recon_and_store(dest + 15 * stride, in[15]);
}
void vp9_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride, int tx_type) {
__m128i in0[16], in1[16];

View File

@ -247,6 +247,7 @@ DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_inv_txfm_sse4.h
DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_idct4x4_add_sse4.c
DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_idct8x8_add_sse4.c
DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_idct16x16_add_sse4.c
DSP_SRCS-$(HAVE_SSE4_1) += x86/highbd_idct32x32_add_sse4.c
endif # !CONFIG_VP9_HIGHBITDEPTH
ifeq ($(HAVE_NEON_ASM),yes)

View File

@ -658,9 +658,9 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
specialize qw/vpx_highbd_idct16x16_256_add neon sse2 sse4_1/;
specialize qw/vpx_highbd_idct16x16_38_add neon sse2 sse4_1/;
specialize qw/vpx_highbd_idct16x16_10_add neon sse2 sse4_1/;
specialize qw/vpx_highbd_idct32x32_1024_add neon/;
specialize qw/vpx_highbd_idct32x32_135_add neon/;
specialize qw/vpx_highbd_idct32x32_34_add neon/;
specialize qw/vpx_highbd_idct32x32_1024_add neon sse2 sse4_1/;
specialize qw/vpx_highbd_idct32x32_135_add neon sse2 sse4_1/;
specialize qw/vpx_highbd_idct32x32_34_add neon sse2 sse4_1/;
} # !CONFIG_EMULATE_HARDWARE
} # CONFIG_VP9_HIGHBITDEPTH
} # CONFIG_VP9

View File

@ -106,20 +106,20 @@ static INLINE void highbd_idct16x16_38_4col(__m128i *const io /*io[16]*/) {
__m128i temp1[2], sign[2];
// stage 2
highbd_multiplication_sse2(io[1], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_multiplication_neg_sse2(io[7], (int)cospi_14_64, (int)cospi_18_64,
&step2[9], &step2[14]);
highbd_multiplication_sse2(io[5], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
highbd_multiplication_neg_sse2(io[3], (int)cospi_6_64, (int)cospi_26_64,
&step2[11], &step2[12]);
highbd_partial_butterfly_sse2(io[1], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_partial_butterfly_neg_sse2(io[7], (int)cospi_14_64, (int)cospi_18_64,
&step2[9], &step2[14]);
highbd_partial_butterfly_sse2(io[5], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
highbd_partial_butterfly_neg_sse2(io[3], (int)cospi_6_64, (int)cospi_26_64,
&step2[11], &step2[12]);
// stage 3
highbd_multiplication_sse2(io[2], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_multiplication_neg_sse2(io[6], (int)cospi_12_64, (int)cospi_20_64,
&step1[5], &step1[6]);
highbd_partial_butterfly_sse2(io[2], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_partial_butterfly_neg_sse2(io[6], (int)cospi_12_64, (int)cospi_20_64,
&step1[5], &step1[6]);
step1[8] = _mm_add_epi32(step2[8], step2[9]);
step1[9] = _mm_sub_epi32(step2[8], step2[9]);
step1[10] = _mm_sub_epi32(step2[10], step2[11]); // step1[10] = -step1[10]
@ -133,8 +133,8 @@ static INLINE void highbd_idct16x16_38_4col(__m128i *const io /*io[16]*/) {
abs_extend_64bit_sse2(io[0], temp1, sign);
step2[0] = multiplication_round_shift_sse2(temp1, sign, (int)cospi_16_64);
step2[1] = step2[0];
highbd_multiplication_sse2(io[4], (int)cospi_24_64, (int)cospi_8_64,
&step2[2], &step2[3]);
highbd_partial_butterfly_sse2(io[4], (int)cospi_24_64, (int)cospi_8_64,
&step2[2], &step2[3]);
highbd_butterfly_sse2(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64,
&step2[9], &step2[14]);
highbd_butterfly_sse2(step1[10], step1[13], (int)cospi_8_64, (int)cospi_24_64,
@ -158,14 +158,14 @@ static INLINE void highbd_idct16x16_10_4col(__m128i *const io /*io[16]*/) {
__m128i temp[2], sign[2];
// stage 2
highbd_multiplication_sse2(io[1], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_multiplication_neg_sse2(io[3], (int)cospi_6_64, (int)cospi_26_64,
&step2[11], &step2[12]);
highbd_partial_butterfly_sse2(io[1], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_partial_butterfly_neg_sse2(io[3], (int)cospi_6_64, (int)cospi_26_64,
&step2[11], &step2[12]);
// stage 3
highbd_multiplication_sse2(io[2], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_partial_butterfly_sse2(io[2], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
step1[8] = step2[8];
step1[9] = step2[8];
step1[10] =
@ -209,26 +209,9 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint16_t *dest,
in = l;
for (i = 0; i < 2; i++) {
in[0] = load_pack_8_32bit(input + 0 * 16);
in[1] = load_pack_8_32bit(input + 1 * 16);
in[2] = load_pack_8_32bit(input + 2 * 16);
in[3] = load_pack_8_32bit(input + 3 * 16);
in[4] = load_pack_8_32bit(input + 4 * 16);
in[5] = load_pack_8_32bit(input + 5 * 16);
in[6] = load_pack_8_32bit(input + 6 * 16);
in[7] = load_pack_8_32bit(input + 7 * 16);
transpose_16bit_8x8(in, in);
in[8] = load_pack_8_32bit(input + 0 * 16 + 8);
in[9] = load_pack_8_32bit(input + 1 * 16 + 8);
in[10] = load_pack_8_32bit(input + 2 * 16 + 8);
in[11] = load_pack_8_32bit(input + 3 * 16 + 8);
in[12] = load_pack_8_32bit(input + 4 * 16 + 8);
in[13] = load_pack_8_32bit(input + 5 * 16 + 8);
in[14] = load_pack_8_32bit(input + 6 * 16 + 8);
in[15] = load_pack_8_32bit(input + 7 * 16 + 8);
transpose_16bit_8x8(in + 8, in + 8);
idct16_8col(in);
highbd_load_pack_transpose_32bit_8x8(&input[0], 16, &in[0]);
highbd_load_pack_transpose_32bit_8x8(&input[8], 16, &in[8]);
idct16_8col(in, in);
in = r;
input += 128;
}
@ -237,7 +220,7 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint16_t *dest,
int j;
transpose_16bit_8x8(l + i, out);
transpose_16bit_8x8(r + i, out + 8);
idct16_8col(out);
idct16_8col(out, out);
for (j = 0; j < 16; ++j) {
highbd_write_buffer_8(dest + j * stride, out[j], bd);
@ -249,52 +232,18 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint16_t *dest,
for (i = 0; i < 4; i++) {
in = all[i];
in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 0));
in[1] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 4));
in[2] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 0));
in[3] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 4));
in[4] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 0));
in[5] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 4));
in[6] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 0));
in[7] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 4));
transpose_32bit_8x4(in, in);
in[8] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 8));
in[9] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 12));
in[10] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 8));
in[11] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 12));
in[12] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 8));
in[13] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 12));
in[14] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 8));
in[15] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 12));
transpose_32bit_8x4(in + 8, in + 8);
highbd_load_transpose_32bit_8x4(&input[0], 16, &in[0]);
highbd_load_transpose_32bit_8x4(&input[8], 16, &in[8]);
highbd_idct16_4col(in);
input += 4 * 16;
}
for (i = 0; i < 16; i += 4) {
int j;
out[0] = all[0][i + 0];
out[1] = all[1][i + 0];
out[2] = all[0][i + 1];
out[3] = all[1][i + 1];
out[4] = all[0][i + 2];
out[5] = all[1][i + 2];
out[6] = all[0][i + 3];
out[7] = all[1][i + 3];
transpose_32bit_8x4(out, out);
out[8] = all[2][i + 0];
out[9] = all[3][i + 0];
out[10] = all[2][i + 1];
out[11] = all[3][i + 1];
out[12] = all[2][i + 2];
out[13] = all[3][i + 2];
out[14] = all[2][i + 3];
out[15] = all[3][i + 3];
transpose_32bit_8x4(out + 8, out + 8);
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
transpose_32bit_4x4(all[2] + i, out + 8);
transpose_32bit_4x4(all[3] + i, out + 12);
highbd_idct16_4col(out);
for (j = 0; j < 16; ++j) {
@ -311,40 +260,18 @@ void vpx_highbd_idct16x16_38_add_sse2(const tran_low_t *input, uint16_t *dest,
__m128i out[16];
if (bd == 8) {
__m128i in[16];
__m128i in[16], temp[16];
in[0] = load_pack_8_32bit(input + 0 * 16);
in[1] = load_pack_8_32bit(input + 1 * 16);
in[2] = load_pack_8_32bit(input + 2 * 16);
in[3] = load_pack_8_32bit(input + 3 * 16);
in[4] = load_pack_8_32bit(input + 4 * 16);
in[5] = load_pack_8_32bit(input + 5 * 16);
in[6] = load_pack_8_32bit(input + 6 * 16);
in[7] = load_pack_8_32bit(input + 7 * 16);
transpose_16bit_8x8(in, in);
in[8] = _mm_setzero_si128();
in[9] = _mm_setzero_si128();
in[10] = _mm_setzero_si128();
in[11] = _mm_setzero_si128();
in[12] = _mm_setzero_si128();
in[13] = _mm_setzero_si128();
in[14] = _mm_setzero_si128();
in[15] = _mm_setzero_si128();
idct16_8col(in);
highbd_load_pack_transpose_32bit_8x8(input, 16, in);
for (i = 8; i < 16; i++) {
in[i] = _mm_setzero_si128();
}
idct16_8col(in, temp);
for (i = 0; i < 16; i += 8) {
int j;
transpose_16bit_8x8(in + i, out);
out[8] = _mm_setzero_si128();
out[9] = _mm_setzero_si128();
out[10] = _mm_setzero_si128();
out[11] = _mm_setzero_si128();
out[12] = _mm_setzero_si128();
out[13] = _mm_setzero_si128();
out[14] = _mm_setzero_si128();
out[15] = _mm_setzero_si128();
idct16_8col(out);
transpose_16bit_8x8(temp + i, in);
idct16_8col(in, out);
for (j = 0; j < 16; ++j) {
highbd_write_buffer_8(dest + j * stride, out[j], bd);
@ -356,30 +283,15 @@ void vpx_highbd_idct16x16_38_add_sse2(const tran_low_t *input, uint16_t *dest,
for (i = 0; i < 2; i++) {
in = all[i];
in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 0));
in[1] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 4));
in[2] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 0));
in[3] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 4));
in[4] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 0));
in[5] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 4));
in[6] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 0));
in[7] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 4));
transpose_32bit_8x4(in, in);
highbd_load_transpose_32bit_8x4(input, 16, in);
highbd_idct16x16_38_4col(in);
input += 4 * 16;
}
for (i = 0; i < 16; i += 4) {
int j;
out[0] = all[0][i + 0];
out[1] = all[1][i + 0];
out[2] = all[0][i + 1];
out[3] = all[1][i + 1];
out[4] = all[0][i + 2];
out[5] = all[1][i + 2];
out[6] = all[0][i + 3];
out[7] = all[1][i + 3];
transpose_32bit_8x4(out, out);
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
highbd_idct16x16_38_4col(out);
for (j = 0; j < 16; ++j) {
@ -419,11 +331,7 @@ void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint16_t *dest,
for (i = 0; i < 2; i++) {
in = all[i];
in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16));
in[1] = _mm_load_si128((const __m128i *)(input + 1 * 16));
in[2] = _mm_load_si128((const __m128i *)(input + 2 * 16));
in[3] = _mm_load_si128((const __m128i *)(input + 3 * 16));
transpose_32bit_4x4(in, in);
highbd_load_transpose_32bit_4x4(input, 16, in);
highbd_idct16x16_10_4col(in);
input += 4 * 16;
}

View File

@ -107,20 +107,20 @@ static INLINE void highbd_idct16x16_38_4col(__m128i *const io /*io[16]*/) {
__m128i temp1[2];
// stage 2
highbd_multiplication_sse4_1(io[1], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_multiplication_sse4_1(io[7], -(int)cospi_18_64, (int)cospi_14_64,
&step2[9], &step2[14]);
highbd_multiplication_sse4_1(io[5], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
highbd_multiplication_sse4_1(io[3], -(int)cospi_26_64, (int)cospi_6_64,
&step2[11], &step2[12]);
highbd_partial_butterfly_sse4_1(io[1], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_partial_butterfly_sse4_1(io[7], -(int)cospi_18_64, (int)cospi_14_64,
&step2[9], &step2[14]);
highbd_partial_butterfly_sse4_1(io[5], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
highbd_partial_butterfly_sse4_1(io[3], -(int)cospi_26_64, (int)cospi_6_64,
&step2[11], &step2[12]);
// stage 3
highbd_multiplication_sse4_1(io[2], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_multiplication_sse4_1(io[6], -(int)cospi_20_64, (int)cospi_12_64,
&step1[5], &step1[6]);
highbd_partial_butterfly_sse4_1(io[2], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_partial_butterfly_sse4_1(io[6], -(int)cospi_20_64, (int)cospi_12_64,
&step1[5], &step1[6]);
step1[8] = _mm_add_epi32(step2[8], step2[9]);
step1[9] = _mm_sub_epi32(step2[8], step2[9]);
step1[10] = _mm_sub_epi32(step2[11], step2[10]);
@ -134,8 +134,8 @@ static INLINE void highbd_idct16x16_38_4col(__m128i *const io /*io[16]*/) {
extend_64bit(io[0], temp1);
step2[0] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
step2[1] = step2[0];
highbd_multiplication_sse4_1(io[4], (int)cospi_24_64, (int)cospi_8_64,
&step2[2], &step2[3]);
highbd_partial_butterfly_sse4_1(io[4], (int)cospi_24_64, (int)cospi_8_64,
&step2[2], &step2[3]);
highbd_butterfly_sse4_1(step1[14], step1[9], (int)cospi_24_64,
(int)cospi_8_64, &step2[9], &step2[14]);
highbd_butterfly_sse4_1(step1[10], step1[13], -(int)cospi_8_64,
@ -159,14 +159,14 @@ static INLINE void highbd_idct16x16_10_4col(__m128i *const io /*io[16]*/) {
__m128i temp[2];
// stage 2
highbd_multiplication_sse4_1(io[1], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_multiplication_sse4_1(io[3], -(int)cospi_26_64, (int)cospi_6_64,
&step2[11], &step2[12]);
highbd_partial_butterfly_sse4_1(io[1], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_partial_butterfly_sse4_1(io[3], -(int)cospi_26_64, (int)cospi_6_64,
&step2[11], &step2[12]);
// stage 3
highbd_multiplication_sse4_1(io[2], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_partial_butterfly_sse4_1(io[2], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
step1[8] = step2[8];
step1[9] = step2[8];
step1[10] = step2[11];
@ -208,26 +208,9 @@ void vpx_highbd_idct16x16_256_add_sse4_1(const tran_low_t *input,
in = l;
for (i = 0; i < 2; i++) {
in[0] = load_pack_8_32bit(input + 0 * 16);
in[1] = load_pack_8_32bit(input + 1 * 16);
in[2] = load_pack_8_32bit(input + 2 * 16);
in[3] = load_pack_8_32bit(input + 3 * 16);
in[4] = load_pack_8_32bit(input + 4 * 16);
in[5] = load_pack_8_32bit(input + 5 * 16);
in[6] = load_pack_8_32bit(input + 6 * 16);
in[7] = load_pack_8_32bit(input + 7 * 16);
transpose_16bit_8x8(in, in);
in[8] = load_pack_8_32bit(input + 0 * 16 + 8);
in[9] = load_pack_8_32bit(input + 1 * 16 + 8);
in[10] = load_pack_8_32bit(input + 2 * 16 + 8);
in[11] = load_pack_8_32bit(input + 3 * 16 + 8);
in[12] = load_pack_8_32bit(input + 4 * 16 + 8);
in[13] = load_pack_8_32bit(input + 5 * 16 + 8);
in[14] = load_pack_8_32bit(input + 6 * 16 + 8);
in[15] = load_pack_8_32bit(input + 7 * 16 + 8);
transpose_16bit_8x8(in + 8, in + 8);
idct16_8col(in);
highbd_load_pack_transpose_32bit_8x8(&input[0], 16, &in[0]);
highbd_load_pack_transpose_32bit_8x8(&input[8], 16, &in[8]);
idct16_8col(in, in);
in = r;
input += 128;
}
@ -236,7 +219,7 @@ void vpx_highbd_idct16x16_256_add_sse4_1(const tran_low_t *input,
int j;
transpose_16bit_8x8(l + i, out);
transpose_16bit_8x8(r + i, out + 8);
idct16_8col(out);
idct16_8col(out, out);
for (j = 0; j < 16; ++j) {
highbd_write_buffer_8(dest + j * stride, out[j], bd);
@ -248,52 +231,18 @@ void vpx_highbd_idct16x16_256_add_sse4_1(const tran_low_t *input,
for (i = 0; i < 4; i++) {
in = all[i];
in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 0));
in[1] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 4));
in[2] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 0));
in[3] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 4));
in[4] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 0));
in[5] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 4));
in[6] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 0));
in[7] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 4));
transpose_32bit_8x4(in, in);
in[8] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 8));
in[9] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 12));
in[10] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 8));
in[11] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 12));
in[12] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 8));
in[13] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 12));
in[14] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 8));
in[15] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 12));
transpose_32bit_8x4(in + 8, in + 8);
highbd_load_transpose_32bit_8x4(&input[0], 16, &in[0]);
highbd_load_transpose_32bit_8x4(&input[8], 16, &in[8]);
highbd_idct16_4col(in);
input += 4 * 16;
}
for (i = 0; i < 16; i += 4) {
int j;
out[0] = all[0][i + 0];
out[1] = all[1][i + 0];
out[2] = all[0][i + 1];
out[3] = all[1][i + 1];
out[4] = all[0][i + 2];
out[5] = all[1][i + 2];
out[6] = all[0][i + 3];
out[7] = all[1][i + 3];
transpose_32bit_8x4(out, out);
out[8] = all[2][i + 0];
out[9] = all[3][i + 0];
out[10] = all[2][i + 1];
out[11] = all[3][i + 1];
out[12] = all[2][i + 2];
out[13] = all[3][i + 2];
out[14] = all[2][i + 3];
out[15] = all[3][i + 3];
transpose_32bit_8x4(out + 8, out + 8);
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
transpose_32bit_4x4(all[2] + i, out + 8);
transpose_32bit_4x4(all[3] + i, out + 12);
highbd_idct16_4col(out);
for (j = 0; j < 16; ++j) {
@ -310,40 +259,18 @@ void vpx_highbd_idct16x16_38_add_sse4_1(const tran_low_t *input, uint16_t *dest,
__m128i out[16];
if (bd == 8) {
__m128i in[16];
__m128i in[16], temp[16];
in[0] = load_pack_8_32bit(input + 0 * 16);
in[1] = load_pack_8_32bit(input + 1 * 16);
in[2] = load_pack_8_32bit(input + 2 * 16);
in[3] = load_pack_8_32bit(input + 3 * 16);
in[4] = load_pack_8_32bit(input + 4 * 16);
in[5] = load_pack_8_32bit(input + 5 * 16);
in[6] = load_pack_8_32bit(input + 6 * 16);
in[7] = load_pack_8_32bit(input + 7 * 16);
transpose_16bit_8x8(in, in);
in[8] = _mm_setzero_si128();
in[9] = _mm_setzero_si128();
in[10] = _mm_setzero_si128();
in[11] = _mm_setzero_si128();
in[12] = _mm_setzero_si128();
in[13] = _mm_setzero_si128();
in[14] = _mm_setzero_si128();
in[15] = _mm_setzero_si128();
idct16_8col(in);
highbd_load_pack_transpose_32bit_8x8(&input[0], 16, &in[0]);
for (i = 8; i < 16; i++) {
in[i] = _mm_setzero_si128();
}
idct16_8col(in, temp);
for (i = 0; i < 16; i += 8) {
int j;
transpose_16bit_8x8(in + i, out);
out[8] = _mm_setzero_si128();
out[9] = _mm_setzero_si128();
out[10] = _mm_setzero_si128();
out[11] = _mm_setzero_si128();
out[12] = _mm_setzero_si128();
out[13] = _mm_setzero_si128();
out[14] = _mm_setzero_si128();
out[15] = _mm_setzero_si128();
idct16_8col(out);
transpose_16bit_8x8(temp + i, in);
idct16_8col(in, out);
for (j = 0; j < 16; ++j) {
highbd_write_buffer_8(dest + j * stride, out[j], bd);
@ -355,30 +282,15 @@ void vpx_highbd_idct16x16_38_add_sse4_1(const tran_low_t *input, uint16_t *dest,
for (i = 0; i < 2; i++) {
in = all[i];
in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 0));
in[1] = _mm_load_si128((const __m128i *)(input + 0 * 16 + 4));
in[2] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 0));
in[3] = _mm_load_si128((const __m128i *)(input + 1 * 16 + 4));
in[4] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 0));
in[5] = _mm_load_si128((const __m128i *)(input + 2 * 16 + 4));
in[6] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 0));
in[7] = _mm_load_si128((const __m128i *)(input + 3 * 16 + 4));
transpose_32bit_8x4(in, in);
highbd_load_transpose_32bit_8x4(input, 16, in);
highbd_idct16x16_38_4col(in);
input += 4 * 16;
}
for (i = 0; i < 16; i += 4) {
int j;
out[0] = all[0][i + 0];
out[1] = all[1][i + 0];
out[2] = all[0][i + 1];
out[3] = all[1][i + 1];
out[4] = all[0][i + 2];
out[5] = all[1][i + 2];
out[6] = all[0][i + 3];
out[7] = all[1][i + 3];
transpose_32bit_8x4(out, out);
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
highbd_idct16x16_38_4col(out);
for (j = 0; j < 16; ++j) {
@ -418,11 +330,7 @@ void vpx_highbd_idct16x16_10_add_sse4_1(const tran_low_t *input, uint16_t *dest,
for (i = 0; i < 2; i++) {
in = all[i];
in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16));
in[1] = _mm_load_si128((const __m128i *)(input + 1 * 16));
in[2] = _mm_load_si128((const __m128i *)(input + 2 * 16));
in[3] = _mm_load_si128((const __m128i *)(input + 3 * 16));
transpose_32bit_4x4(in, in);
highbd_load_transpose_32bit_4x4(input, 16, in);
highbd_idct16x16_10_4col(in);
input += 4 * 16;
}

View File

@ -14,6 +14,768 @@
#include "vpx_dsp/x86/transpose_sse2.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
static INLINE void highbd_idct32_4x32_quarter_2_stage_4_to_6(
__m128i *const step1 /*step1[16]*/, __m128i *const out /*out[16]*/) {
__m128i step2[32];
// stage 4
step2[8] = step1[8];
step2[15] = step1[15];
highbd_butterfly_sse2(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64,
&step2[9], &step2[14]);
highbd_butterfly_sse2(step1[10], step1[13], (int)cospi_8_64, (int)cospi_24_64,
&step2[13], &step2[10]);
step2[11] = step1[11];
step2[12] = step1[12];
// stage 5
step1[8] = _mm_add_epi32(step2[8], step2[11]);
step1[9] = _mm_add_epi32(step2[9], step2[10]);
step1[10] = _mm_sub_epi32(step2[9], step2[10]);
step1[11] = _mm_sub_epi32(step2[8], step2[11]);
step1[12] = _mm_sub_epi32(step2[15], step2[12]);
step1[13] = _mm_sub_epi32(step2[14], step2[13]);
step1[14] = _mm_add_epi32(step2[14], step2[13]);
step1[15] = _mm_add_epi32(step2[15], step2[12]);
// stage 6
out[8] = step1[8];
out[9] = step1[9];
highbd_butterfly_sse2(step1[13], step1[10], (int)cospi_16_64,
(int)cospi_16_64, &out[10], &out[13]);
highbd_butterfly_sse2(step1[12], step1[11], (int)cospi_16_64,
(int)cospi_16_64, &out[11], &out[12]);
out[14] = step1[14];
out[15] = step1[15];
}
static INLINE void highbd_idct32_4x32_quarter_3_4_stage_4_to_7(
__m128i *const step1 /*step1[32]*/, __m128i *const out /*out[32]*/) {
__m128i step2[32];
// stage 4
step2[16] = _mm_add_epi32(step1[16], step1[19]);
step2[17] = _mm_add_epi32(step1[17], step1[18]);
step2[18] = _mm_sub_epi32(step1[17], step1[18]);
step2[19] = _mm_sub_epi32(step1[16], step1[19]);
step2[20] = _mm_sub_epi32(step1[20], step1[23]); // step2[20] = -step2[20]
step2[21] = _mm_sub_epi32(step1[21], step1[22]); // step2[21] = -step2[21]
step2[22] = _mm_add_epi32(step1[21], step1[22]);
step2[23] = _mm_add_epi32(step1[20], step1[23]);
step2[24] = _mm_add_epi32(step1[27], step1[24]);
step2[25] = _mm_add_epi32(step1[26], step1[25]);
step2[26] = _mm_sub_epi32(step1[26], step1[25]); // step2[26] = -step2[26]
step2[27] = _mm_sub_epi32(step1[27], step1[24]); // step2[27] = -step2[27]
step2[28] = _mm_sub_epi32(step1[31], step1[28]);
step2[29] = _mm_sub_epi32(step1[30], step1[29]);
step2[30] = _mm_add_epi32(step1[29], step1[30]);
step2[31] = _mm_add_epi32(step1[28], step1[31]);
// stage 5
step1[16] = step2[16];
step1[17] = step2[17];
highbd_butterfly_sse2(step2[29], step2[18], (int)cospi_24_64, (int)cospi_8_64,
&step1[18], &step1[29]);
highbd_butterfly_sse2(step2[28], step2[19], (int)cospi_24_64, (int)cospi_8_64,
&step1[19], &step1[28]);
highbd_butterfly_sse2(step2[20], step2[27], (int)cospi_8_64, (int)cospi_24_64,
&step1[27], &step1[20]);
highbd_butterfly_sse2(step2[21], step2[26], (int)cospi_8_64, (int)cospi_24_64,
&step1[26], &step1[21]);
step1[22] = step2[22];
step1[23] = step2[23];
step1[24] = step2[24];
step1[25] = step2[25];
step1[30] = step2[30];
step1[31] = step2[31];
// stage 6
step2[16] = _mm_add_epi32(step1[16], step1[23]);
step2[17] = _mm_add_epi32(step1[17], step1[22]);
step2[18] = _mm_add_epi32(step1[18], step1[21]);
step2[19] = _mm_add_epi32(step1[19], step1[20]);
step2[20] = _mm_sub_epi32(step1[19], step1[20]);
step2[21] = _mm_sub_epi32(step1[18], step1[21]);
step2[22] = _mm_sub_epi32(step1[17], step1[22]);
step2[23] = _mm_sub_epi32(step1[16], step1[23]);
step2[24] = _mm_sub_epi32(step1[31], step1[24]);
step2[25] = _mm_sub_epi32(step1[30], step1[25]);
step2[26] = _mm_sub_epi32(step1[29], step1[26]);
step2[27] = _mm_sub_epi32(step1[28], step1[27]);
step2[28] = _mm_add_epi32(step1[27], step1[28]);
step2[29] = _mm_add_epi32(step1[26], step1[29]);
step2[30] = _mm_add_epi32(step1[25], step1[30]);
step2[31] = _mm_add_epi32(step1[24], step1[31]);
// stage 7
out[16] = step2[16];
out[17] = step2[17];
out[18] = step2[18];
out[19] = step2[19];
highbd_butterfly_sse2(step2[27], step2[20], (int)cospi_16_64,
(int)cospi_16_64, &out[20], &out[27]);
highbd_butterfly_sse2(step2[26], step2[21], (int)cospi_16_64,
(int)cospi_16_64, &out[21], &out[26]);
highbd_butterfly_sse2(step2[25], step2[22], (int)cospi_16_64,
(int)cospi_16_64, &out[22], &out[25]);
highbd_butterfly_sse2(step2[24], step2[23], (int)cospi_16_64,
(int)cospi_16_64, &out[23], &out[24]);
out[28] = step2[28];
out[29] = step2[29];
out[30] = step2[30];
out[31] = step2[31];
}
// Group the coefficient calculation into smaller functions to prevent stack
// spillover in 32x32 idct optimizations:
// quarter_1: 0-7
// quarter_2: 8-15
// quarter_3_4: 16-23, 24-31
// For each 4x32 block __m128i in[32],
// Input with index, 0, 4, 8, 12, 16, 20, 24, 28
// output pixels: 0-7 in __m128i out[32]
static INLINE void highbd_idct32_1024_4x32_quarter_1(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
// stage 3
highbd_butterfly_sse2(in[4], in[28], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_butterfly_sse2(in[20], in[12], (int)cospi_12_64, (int)cospi_20_64,
&step1[5], &step1[6]);
// stage 4
highbd_butterfly_sse2(in[0], in[16], (int)cospi_16_64, (int)cospi_16_64,
&step2[1], &step2[0]);
highbd_butterfly_sse2(in[8], in[24], (int)cospi_24_64, (int)cospi_8_64,
&step2[2], &step2[3]);
step2[4] = _mm_add_epi32(step1[4], step1[5]);
step2[5] = _mm_sub_epi32(step1[4], step1[5]);
step2[6] = _mm_sub_epi32(step1[7], step1[6]);
step2[7] = _mm_add_epi32(step1[7], step1[6]);
// stage 5
step1[0] = _mm_add_epi32(step2[0], step2[3]);
step1[1] = _mm_add_epi32(step2[1], step2[2]);
step1[2] = _mm_sub_epi32(step2[1], step2[2]);
step1[3] = _mm_sub_epi32(step2[0], step2[3]);
step1[4] = step2[4];
highbd_butterfly_sse2(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
&step1[5], &step1[6]);
step1[7] = step2[7];
// stage 6
out[0] = _mm_add_epi32(step1[0], step1[7]);
out[1] = _mm_add_epi32(step1[1], step1[6]);
out[2] = _mm_add_epi32(step1[2], step1[5]);
out[3] = _mm_add_epi32(step1[3], step1[4]);
out[4] = _mm_sub_epi32(step1[3], step1[4]);
out[5] = _mm_sub_epi32(step1[2], step1[5]);
out[6] = _mm_sub_epi32(step1[1], step1[6]);
out[7] = _mm_sub_epi32(step1[0], step1[7]);
}
// For each 4x32 block __m128i in[32],
// Input with index, 2, 6, 10, 14, 18, 22, 26, 30
// output pixels: 8-15 in __m128i out[32]
static INLINE void highbd_idct32_1024_4x32_quarter_2(
const __m128i *in /*in[32]*/, __m128i *out /*out[16]*/) {
__m128i step1[32], step2[32];
// stage 2
highbd_butterfly_sse2(in[2], in[30], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_butterfly_sse2(in[18], in[14], (int)cospi_14_64, (int)cospi_18_64,
&step2[9], &step2[14]);
highbd_butterfly_sse2(in[10], in[22], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
highbd_butterfly_sse2(in[26], in[6], (int)cospi_6_64, (int)cospi_26_64,
&step2[11], &step2[12]);
// stage 3
step1[8] = _mm_add_epi32(step2[8], step2[9]);
step1[9] = _mm_sub_epi32(step2[8], step2[9]);
step1[14] = _mm_sub_epi32(step2[15], step2[14]);
step1[15] = _mm_add_epi32(step2[15], step2[14]);
step1[10] = _mm_sub_epi32(step2[10], step2[11]); // step1[10] = -step1[10]
step1[11] = _mm_add_epi32(step2[10], step2[11]);
step1[12] = _mm_add_epi32(step2[13], step2[12]);
step1[13] = _mm_sub_epi32(step2[13], step2[12]); // step1[13] = -step1[13]
highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
}
static INLINE void highbd_idct32_1024_4x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
highbd_idct32_1024_4x32_quarter_1(in, temp);
highbd_idct32_1024_4x32_quarter_2(in, temp);
// stage 7
highbd_add_sub_butterfly(temp, out, 16);
}
// For each 4x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void highbd_idct32_1024_4x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32], step2[32];
// stage 1
highbd_butterfly_sse2(in[1], in[31], (int)cospi_31_64, (int)cospi_1_64,
&step1[16], &step1[31]);
highbd_butterfly_sse2(in[17], in[15], (int)cospi_15_64, (int)cospi_17_64,
&step1[17], &step1[30]);
highbd_butterfly_sse2(in[9], in[23], (int)cospi_23_64, (int)cospi_9_64,
&step1[18], &step1[29]);
highbd_butterfly_sse2(in[25], in[7], (int)cospi_7_64, (int)cospi_25_64,
&step1[19], &step1[28]);
highbd_butterfly_sse2(in[5], in[27], (int)cospi_27_64, (int)cospi_5_64,
&step1[20], &step1[27]);
highbd_butterfly_sse2(in[21], in[11], (int)cospi_11_64, (int)cospi_21_64,
&step1[21], &step1[26]);
highbd_butterfly_sse2(in[13], in[19], (int)cospi_19_64, (int)cospi_13_64,
&step1[22], &step1[25]);
highbd_butterfly_sse2(in[29], in[3], (int)cospi_3_64, (int)cospi_29_64,
&step1[23], &step1[24]);
// stage 2
step2[16] = _mm_add_epi32(step1[16], step1[17]);
step2[17] = _mm_sub_epi32(step1[16], step1[17]);
step2[18] = _mm_sub_epi32(step1[18], step1[19]); // step2[18] = -step2[18]
step2[19] = _mm_add_epi32(step1[18], step1[19]);
step2[20] = _mm_add_epi32(step1[20], step1[21]);
step2[21] = _mm_sub_epi32(step1[20], step1[21]);
step2[22] = _mm_sub_epi32(step1[22], step1[23]); // step2[22] = -step2[22]
step2[23] = _mm_add_epi32(step1[22], step1[23]);
step2[24] = _mm_add_epi32(step1[25], step1[24]);
step2[25] = _mm_sub_epi32(step1[25], step1[24]); // step2[25] = -step2[25]
step2[26] = _mm_sub_epi32(step1[27], step1[26]);
step2[27] = _mm_add_epi32(step1[27], step1[26]);
step2[28] = _mm_add_epi32(step1[29], step1[28]);
step2[29] = _mm_sub_epi32(step1[29], step1[28]); // step2[29] = -step2[29]
step2[30] = _mm_sub_epi32(step1[31], step1[30]);
step2[31] = _mm_add_epi32(step1[31], step1[30]);
// stage 3
step1[16] = step2[16];
step1[31] = step2[31];
highbd_butterfly_sse2(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64,
&step1[17], &step1[30]);
highbd_butterfly_sse2(step2[18], step2[29], (int)cospi_4_64, (int)cospi_28_64,
&step1[29], &step1[18]);
step1[19] = step2[19];
step1[20] = step2[20];
highbd_butterfly_sse2(step2[26], step2[21], (int)cospi_12_64,
(int)cospi_20_64, &step1[21], &step1[26]);
highbd_butterfly_sse2(step2[22], step2[25], (int)cospi_20_64,
(int)cospi_12_64, &step1[25], &step1[22]);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
step1[28] = step2[28];
highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
}
static void highbd_idct32_1024_4x32(__m128i *const io /*io[32]*/) {
__m128i temp[32];
highbd_idct32_1024_4x32_quarter_1_2(io, temp);
highbd_idct32_1024_4x32_quarter_3_4(io, temp);
// final stage
highbd_add_sub_butterfly(temp, io, 32);
}
void vpx_highbd_idct32x32_1024_add_sse2(const tran_low_t *input, uint16_t *dest,
int stride, int bd) {
int i, j;
if (bd == 8) {
__m128i col[4][32], io[32];
// rows
for (i = 0; i < 4; i++) {
highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &io[0]);
highbd_load_pack_transpose_32bit_8x8(&input[8], 32, &io[8]);
highbd_load_pack_transpose_32bit_8x8(&input[16], 32, &io[16]);
highbd_load_pack_transpose_32bit_8x8(&input[24], 32, &io[24]);
idct32_1024_8x32(io, col[i]);
input += 32 << 3;
}
// columns
for (i = 0; i < 32; i += 8) {
// Transpose 32x8 block to 8x32 block
transpose_16bit_8x8(col[0] + i, io);
transpose_16bit_8x8(col[1] + i, io + 8);
transpose_16bit_8x8(col[2] + i, io + 16);
transpose_16bit_8x8(col[3] + i, io + 24);
idct32_1024_8x32(io, io);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_8(dest + j * stride, io[j], bd);
}
dest += 8;
}
} else {
__m128i all[8][32], out[32], *in;
for (i = 0; i < 8; i++) {
in = all[i];
highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
highbd_load_transpose_32bit_8x4(&input[16], 32, &in[16]);
highbd_load_transpose_32bit_8x4(&input[24], 32, &in[24]);
highbd_idct32_1024_4x32(in);
input += 4 * 32;
}
for (i = 0; i < 32; i += 4) {
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
transpose_32bit_4x4(all[2] + i, out + 8);
transpose_32bit_4x4(all[3] + i, out + 12);
transpose_32bit_4x4(all[4] + i, out + 16);
transpose_32bit_4x4(all[5] + i, out + 20);
transpose_32bit_4x4(all[6] + i, out + 24);
transpose_32bit_4x4(all[7] + i, out + 28);
highbd_idct32_1024_4x32(out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_4(dest + j * stride, out[j], bd);
}
dest += 4;
}
}
}
// -----------------------------------------------------------------------------
// For each 4x32 block __m128i in[32],
// Input with index, 0, 4, 8, 12
// output pixels: 0-7 in __m128i out[32]
static INLINE void highbd_idct32_135_4x32_quarter_1(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
// stage 3
highbd_partial_butterfly_sse2(in[4], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_partial_butterfly_neg_sse2(in[12], (int)cospi_12_64, (int)cospi_20_64,
&step1[5], &step1[6]);
// stage 4
highbd_partial_butterfly_sse2(in[0], (int)cospi_16_64, (int)cospi_16_64,
&step2[1], &step2[0]);
highbd_partial_butterfly_sse2(in[8], (int)cospi_24_64, (int)cospi_8_64,
&step2[2], &step2[3]);
step2[4] = _mm_add_epi32(step1[4], step1[5]);
step2[5] = _mm_sub_epi32(step1[4], step1[5]);
step2[6] = _mm_sub_epi32(step1[7], step1[6]);
step2[7] = _mm_add_epi32(step1[7], step1[6]);
// stage 5
step1[0] = _mm_add_epi32(step2[0], step2[3]);
step1[1] = _mm_add_epi32(step2[1], step2[2]);
step1[2] = _mm_sub_epi32(step2[1], step2[2]);
step1[3] = _mm_sub_epi32(step2[0], step2[3]);
step1[4] = step2[4];
highbd_butterfly_sse2(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
&step1[5], &step1[6]);
step1[7] = step2[7];
// stage 6
out[0] = _mm_add_epi32(step1[0], step1[7]);
out[1] = _mm_add_epi32(step1[1], step1[6]);
out[2] = _mm_add_epi32(step1[2], step1[5]);
out[3] = _mm_add_epi32(step1[3], step1[4]);
out[4] = _mm_sub_epi32(step1[3], step1[4]);
out[5] = _mm_sub_epi32(step1[2], step1[5]);
out[6] = _mm_sub_epi32(step1[1], step1[6]);
out[7] = _mm_sub_epi32(step1[0], step1[7]);
}
// For each 4x32 block __m128i in[32],
// Input with index, 2, 6, 10, 14
// output pixels: 8-15 in __m128i out[32]
static INLINE void highbd_idct32_135_4x32_quarter_2(
const __m128i *in /*in[32]*/, __m128i *out /*out[16]*/) {
__m128i step1[32], step2[32];
// stage 2
highbd_partial_butterfly_sse2(in[2], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_partial_butterfly_neg_sse2(in[14], (int)cospi_14_64, (int)cospi_18_64,
&step2[9], &step2[14]);
highbd_partial_butterfly_sse2(in[10], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
highbd_partial_butterfly_neg_sse2(in[6], (int)cospi_6_64, (int)cospi_26_64,
&step2[11], &step2[12]);
// stage 3
step1[8] = _mm_add_epi32(step2[8], step2[9]);
step1[9] = _mm_sub_epi32(step2[8], step2[9]);
step1[14] = _mm_sub_epi32(step2[15], step2[14]);
step1[15] = _mm_add_epi32(step2[15], step2[14]);
step1[10] = _mm_sub_epi32(step2[10], step2[11]); // step1[10] = -step1[10]
step1[11] = _mm_add_epi32(step2[10], step2[11]);
step1[12] = _mm_add_epi32(step2[13], step2[12]);
step1[13] = _mm_sub_epi32(step2[13], step2[12]); // step1[13] = -step1[13]
highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
}
static INLINE void highbd_idct32_135_4x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
highbd_idct32_135_4x32_quarter_1(in, temp);
highbd_idct32_135_4x32_quarter_2(in, temp);
// stage 7
highbd_add_sub_butterfly(temp, out, 16);
}
// For each 4x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7, 9, 11, 13, 15
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void highbd_idct32_135_4x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32], step2[32];
// stage 1
highbd_partial_butterfly_sse2(in[1], (int)cospi_31_64, (int)cospi_1_64,
&step1[16], &step1[31]);
highbd_partial_butterfly_neg_sse2(in[15], (int)cospi_15_64, (int)cospi_17_64,
&step1[17], &step1[30]);
highbd_partial_butterfly_sse2(in[9], (int)cospi_23_64, (int)cospi_9_64,
&step1[18], &step1[29]);
highbd_partial_butterfly_neg_sse2(in[7], (int)cospi_7_64, (int)cospi_25_64,
&step1[19], &step1[28]);
highbd_partial_butterfly_sse2(in[5], (int)cospi_27_64, (int)cospi_5_64,
&step1[20], &step1[27]);
highbd_partial_butterfly_neg_sse2(in[11], (int)cospi_11_64, (int)cospi_21_64,
&step1[21], &step1[26]);
highbd_partial_butterfly_sse2(in[13], (int)cospi_19_64, (int)cospi_13_64,
&step1[22], &step1[25]);
highbd_partial_butterfly_neg_sse2(in[3], (int)cospi_3_64, (int)cospi_29_64,
&step1[23], &step1[24]);
// stage 2
step2[16] = _mm_add_epi32(step1[16], step1[17]);
step2[17] = _mm_sub_epi32(step1[16], step1[17]);
step2[18] = _mm_sub_epi32(step1[18], step1[19]); // step2[18] = -step2[18]
step2[19] = _mm_add_epi32(step1[18], step1[19]);
step2[20] = _mm_add_epi32(step1[20], step1[21]);
step2[21] = _mm_sub_epi32(step1[20], step1[21]);
step2[22] = _mm_sub_epi32(step1[22], step1[23]); // step2[22] = -step2[22]
step2[23] = _mm_add_epi32(step1[22], step1[23]);
step2[24] = _mm_add_epi32(step1[25], step1[24]);
step2[25] = _mm_sub_epi32(step1[25], step1[24]); // step2[25] = -step2[25]
step2[26] = _mm_sub_epi32(step1[27], step1[26]);
step2[27] = _mm_add_epi32(step1[27], step1[26]);
step2[28] = _mm_add_epi32(step1[29], step1[28]);
step2[29] = _mm_sub_epi32(step1[29], step1[28]); // step2[29] = -step2[29]
step2[30] = _mm_sub_epi32(step1[31], step1[30]);
step2[31] = _mm_add_epi32(step1[31], step1[30]);
// stage 3
step1[16] = step2[16];
step1[31] = step2[31];
highbd_butterfly_sse2(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64,
&step1[17], &step1[30]);
highbd_butterfly_sse2(step2[18], step2[29], (int)cospi_4_64, (int)cospi_28_64,
&step1[29], &step1[18]);
step1[19] = step2[19];
step1[20] = step2[20];
highbd_butterfly_sse2(step2[26], step2[21], (int)cospi_12_64,
(int)cospi_20_64, &step1[21], &step1[26]);
highbd_butterfly_sse2(step2[22], step2[25], (int)cospi_20_64,
(int)cospi_12_64, &step1[25], &step1[22]);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
step1[28] = step2[28];
highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
}
static void highbd_idct32_135_4x32(__m128i *const io /*io[32]*/) {
__m128i temp[32];
highbd_idct32_135_4x32_quarter_1_2(io, temp);
highbd_idct32_135_4x32_quarter_3_4(io, temp);
// final stage
highbd_add_sub_butterfly(temp, io, 32);
}
void vpx_highbd_idct32x32_135_add_sse2(const tran_low_t *input, uint16_t *dest,
int stride, int bd) {
int i, j;
if (bd == 8) {
__m128i col[2][32], in[32], out[32];
for (i = 16; i < 32; i++) {
in[i] = _mm_setzero_si128();
}
// rows
for (i = 0; i < 2; i++) {
highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &in[0]);
highbd_load_pack_transpose_32bit_8x8(&input[8], 32, &in[8]);
idct32_1024_8x32(in, col[i]);
input += 32 << 3;
}
// columns
for (i = 0; i < 32; i += 8) {
transpose_16bit_8x8(col[0] + i, in);
transpose_16bit_8x8(col[1] + i, in + 8);
idct32_1024_8x32(in, out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_8(dest + j * stride, out[j], bd);
}
dest += 8;
}
} else {
__m128i all[8][32], out[32], *in;
for (i = 0; i < 4; i++) {
in = all[i];
highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
highbd_idct32_135_4x32(in);
input += 4 * 32;
}
for (i = 0; i < 32; i += 4) {
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
transpose_32bit_4x4(all[2] + i, out + 8);
transpose_32bit_4x4(all[3] + i, out + 12);
highbd_idct32_135_4x32(out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_4(dest + j * stride, out[j], bd);
}
dest += 4;
}
}
}
// -----------------------------------------------------------------------------
// For each 4x32 block __m128i in[32],
// Input with index, 0, 4
// output pixels: 0-7 in __m128i out[32]
static INLINE void highbd_idct32_34_4x32_quarter_1(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
// stage 3
highbd_partial_butterfly_sse2(in[4], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
// stage 4
highbd_partial_butterfly_sse2(in[0], (int)cospi_16_64, (int)cospi_16_64,
&step2[1], &step2[0]);
step2[4] = step1[4];
step2[5] = step1[4];
step2[6] = step1[7];
step2[7] = step1[7];
// stage 5
step1[0] = step2[0];
step1[1] = step2[1];
step1[2] = step2[1];
step1[3] = step2[0];
step1[4] = step2[4];
highbd_butterfly_sse2(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
&step1[5], &step1[6]);
step1[7] = step2[7];
// stage 6
out[0] = _mm_add_epi32(step1[0], step1[7]);
out[1] = _mm_add_epi32(step1[1], step1[6]);
out[2] = _mm_add_epi32(step1[2], step1[5]);
out[3] = _mm_add_epi32(step1[3], step1[4]);
out[4] = _mm_sub_epi32(step1[3], step1[4]);
out[5] = _mm_sub_epi32(step1[2], step1[5]);
out[6] = _mm_sub_epi32(step1[1], step1[6]);
out[7] = _mm_sub_epi32(step1[0], step1[7]);
}
// For each 4x32 block __m128i in[32],
// Input with index, 2, 6
// output pixels: 8-15 in __m128i out[32]
static INLINE void highbd_idct32_34_4x32_quarter_2(const __m128i *in /*in[32]*/,
__m128i *out /*out[16]*/) {
__m128i step1[32], step2[32];
// stage 2
highbd_partial_butterfly_sse2(in[2], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_partial_butterfly_neg_sse2(in[6], (int)cospi_6_64, (int)cospi_26_64,
&step2[11], &step2[12]);
// stage 3
step1[8] = step2[8];
step1[9] = step2[8];
step1[14] = step2[15];
step1[15] = step2[15];
step1[10] = step2[11];
step1[11] = step2[11];
step1[12] = step2[12];
step1[13] = step2[12];
step1[10] =
_mm_sub_epi32(_mm_setzero_si128(), step1[10]); // step1[10] = -step1[10]
step1[13] =
_mm_sub_epi32(_mm_setzero_si128(), step1[13]); // step1[13] = -step1[13]
highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
}
static INLINE void highbd_idct32_34_4x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
highbd_idct32_135_4x32_quarter_1(in, temp);
highbd_idct32_135_4x32_quarter_2(in, temp);
// stage 7
highbd_add_sub_butterfly(temp, out, 16);
}
// For each 4x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void highbd_idct32_34_4x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32], step2[32];
// stage 1
highbd_partial_butterfly_sse2(in[1], (int)cospi_31_64, (int)cospi_1_64,
&step1[16], &step1[31]);
highbd_partial_butterfly_neg_sse2(in[7], (int)cospi_7_64, (int)cospi_25_64,
&step1[19], &step1[28]);
highbd_partial_butterfly_sse2(in[5], (int)cospi_27_64, (int)cospi_5_64,
&step1[20], &step1[27]);
highbd_partial_butterfly_neg_sse2(in[3], (int)cospi_3_64, (int)cospi_29_64,
&step1[23], &step1[24]);
// stage 2
step2[16] = step1[16];
step2[17] = step1[16];
step2[18] = step1[19];
step2[19] = step1[19];
step2[20] = step1[20];
step2[21] = step1[20];
step2[22] = step1[23];
step2[23] = step1[23];
step2[24] = step1[24];
step2[25] = step1[24];
step2[26] = step1[27];
step2[27] = step1[27];
step2[28] = step1[28];
step2[29] = step1[28];
step2[30] = step1[31];
step2[31] = step1[31];
// stage 3
step2[18] =
_mm_sub_epi32(_mm_setzero_si128(), step2[18]); // step2[18] = -step2[18]
step2[22] =
_mm_sub_epi32(_mm_setzero_si128(), step2[22]); // step2[22] = -step2[22]
step2[25] =
_mm_sub_epi32(_mm_setzero_si128(), step2[25]); // step2[25] = -step2[25]
step2[29] =
_mm_sub_epi32(_mm_setzero_si128(), step2[29]); // step2[29] = -step2[29]
step1[16] = step2[16];
step1[31] = step2[31];
highbd_butterfly_sse2(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64,
&step1[17], &step1[30]);
highbd_butterfly_sse2(step2[18], step2[29], (int)cospi_4_64, (int)cospi_28_64,
&step1[29], &step1[18]);
step1[19] = step2[19];
step1[20] = step2[20];
highbd_butterfly_sse2(step2[26], step2[21], (int)cospi_12_64,
(int)cospi_20_64, &step1[21], &step1[26]);
highbd_butterfly_sse2(step2[22], step2[25], (int)cospi_20_64,
(int)cospi_12_64, &step1[25], &step1[22]);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
step1[28] = step2[28];
highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
}
static void highbd_idct32_34_4x32(__m128i *const io /*io[32]*/) {
__m128i temp[32];
highbd_idct32_34_4x32_quarter_1_2(io, temp);
highbd_idct32_34_4x32_quarter_3_4(io, temp);
// final stage
highbd_add_sub_butterfly(temp, io, 32);
}
void vpx_highbd_idct32x32_34_add_sse2(const tran_low_t *input, uint16_t *dest,
int stride, int bd) {
int i, j;
if (bd == 8) {
__m128i col[32], in[32], out[32];
// rows
highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &in[0]);
idct32_34_8x32_sse2(in, col);
// columns
for (i = 0; i < 32; i += 8) {
transpose_16bit_8x8(col + i, in);
idct32_34_8x32_sse2(in, out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_8(dest + j * stride, out[j], bd);
}
dest += 8;
}
} else {
__m128i all[8][32], out[32], *in;
for (i = 0; i < 4; i++) {
in = all[i];
highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
highbd_idct32_34_4x32(in);
input += 4 * 32;
}
for (i = 0; i < 32; i += 4) {
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
transpose_32bit_4x4(all[2] + i, out + 8);
transpose_32bit_4x4(all[3] + i, out + 12);
highbd_idct32_34_4x32(out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_4(dest + j * stride, out[j], bd);
}
dest += 4;
}
}
}
void vpx_highbd_idct32x32_1_add_sse2(const tran_low_t *input, uint16_t *dest,
int stride, int bd) {
highbd_idct_1_add_kernel(input, dest, stride, bd, 32);

View File

@ -0,0 +1,765 @@
/*
* Copyright (c) 2017 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <smmintrin.h> // SSE4.1
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
#include "vpx_dsp/x86/highbd_inv_txfm_sse4.h"
#include "vpx_dsp/x86/inv_txfm_sse2.h"
#include "vpx_dsp/x86/inv_txfm_ssse3.h"
#include "vpx_dsp/x86/transpose_sse2.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
static INLINE void highbd_idct32_4x32_quarter_2_stage_4_to_6(
__m128i *const step1 /*step1[16]*/, __m128i *const out /*out[16]*/) {
__m128i step2[32];
// stage 4
step2[8] = step1[8];
step2[15] = step1[15];
highbd_butterfly_sse4_1(step1[14], step1[9], (int)cospi_24_64,
(int)cospi_8_64, &step2[9], &step2[14]);
highbd_butterfly_sse4_1(step1[13], step1[10], -(int)cospi_8_64,
(int)cospi_24_64, &step2[10], &step2[13]);
step2[11] = step1[11];
step2[12] = step1[12];
// stage 5
step1[8] = _mm_add_epi32(step2[8], step2[11]);
step1[9] = _mm_add_epi32(step2[9], step2[10]);
step1[10] = _mm_sub_epi32(step2[9], step2[10]);
step1[11] = _mm_sub_epi32(step2[8], step2[11]);
step1[12] = _mm_sub_epi32(step2[15], step2[12]);
step1[13] = _mm_sub_epi32(step2[14], step2[13]);
step1[14] = _mm_add_epi32(step2[14], step2[13]);
step1[15] = _mm_add_epi32(step2[15], step2[12]);
// stage 6
out[8] = step1[8];
out[9] = step1[9];
highbd_butterfly_sse4_1(step1[13], step1[10], (int)cospi_16_64,
(int)cospi_16_64, &out[10], &out[13]);
highbd_butterfly_sse4_1(step1[12], step1[11], (int)cospi_16_64,
(int)cospi_16_64, &out[11], &out[12]);
out[14] = step1[14];
out[15] = step1[15];
}
static INLINE void highbd_idct32_4x32_quarter_3_4_stage_4_to_7(
__m128i *const step1 /*step1[32]*/, __m128i *const out /*out[32]*/) {
__m128i step2[32];
// stage 4
step2[16] = _mm_add_epi32(step1[16], step1[19]);
step2[17] = _mm_add_epi32(step1[17], step1[18]);
step2[18] = _mm_sub_epi32(step1[17], step1[18]);
step2[19] = _mm_sub_epi32(step1[16], step1[19]);
step2[20] = _mm_sub_epi32(step1[23], step1[20]);
step2[21] = _mm_sub_epi32(step1[22], step1[21]);
step2[22] = _mm_add_epi32(step1[22], step1[21]);
step2[23] = _mm_add_epi32(step1[23], step1[20]);
step2[24] = _mm_add_epi32(step1[24], step1[27]);
step2[25] = _mm_add_epi32(step1[25], step1[26]);
step2[26] = _mm_sub_epi32(step1[25], step1[26]);
step2[27] = _mm_sub_epi32(step1[24], step1[27]);
step2[28] = _mm_sub_epi32(step1[31], step1[28]);
step2[29] = _mm_sub_epi32(step1[30], step1[29]);
step2[30] = _mm_add_epi32(step1[29], step1[30]);
step2[31] = _mm_add_epi32(step1[28], step1[31]);
// stage 5
step1[16] = step2[16];
step1[17] = step2[17];
highbd_butterfly_sse4_1(step2[29], step2[18], (int)cospi_24_64,
(int)cospi_8_64, &step1[18], &step1[29]);
highbd_butterfly_sse4_1(step2[28], step2[19], (int)cospi_24_64,
(int)cospi_8_64, &step1[19], &step1[28]);
highbd_butterfly_sse4_1(step2[27], step2[20], -(int)cospi_8_64,
(int)cospi_24_64, &step1[20], &step1[27]);
highbd_butterfly_sse4_1(step2[26], step2[21], -(int)cospi_8_64,
(int)cospi_24_64, &step1[21], &step1[26]);
step1[22] = step2[22];
step1[23] = step2[23];
step1[24] = step2[24];
step1[25] = step2[25];
step1[30] = step2[30];
step1[31] = step2[31];
// stage 6
step2[16] = _mm_add_epi32(step1[16], step1[23]);
step2[17] = _mm_add_epi32(step1[17], step1[22]);
step2[18] = _mm_add_epi32(step1[18], step1[21]);
step2[19] = _mm_add_epi32(step1[19], step1[20]);
step2[20] = _mm_sub_epi32(step1[19], step1[20]);
step2[21] = _mm_sub_epi32(step1[18], step1[21]);
step2[22] = _mm_sub_epi32(step1[17], step1[22]);
step2[23] = _mm_sub_epi32(step1[16], step1[23]);
step2[24] = _mm_sub_epi32(step1[31], step1[24]);
step2[25] = _mm_sub_epi32(step1[30], step1[25]);
step2[26] = _mm_sub_epi32(step1[29], step1[26]);
step2[27] = _mm_sub_epi32(step1[28], step1[27]);
step2[28] = _mm_add_epi32(step1[27], step1[28]);
step2[29] = _mm_add_epi32(step1[26], step1[29]);
step2[30] = _mm_add_epi32(step1[25], step1[30]);
step2[31] = _mm_add_epi32(step1[24], step1[31]);
// stage 7
out[16] = step2[16];
out[17] = step2[17];
out[18] = step2[18];
out[19] = step2[19];
highbd_butterfly_sse4_1(step2[27], step2[20], (int)cospi_16_64,
(int)cospi_16_64, &out[20], &out[27]);
highbd_butterfly_sse4_1(step2[26], step2[21], (int)cospi_16_64,
(int)cospi_16_64, &out[21], &out[26]);
highbd_butterfly_sse4_1(step2[25], step2[22], (int)cospi_16_64,
(int)cospi_16_64, &out[22], &out[25]);
highbd_butterfly_sse4_1(step2[24], step2[23], (int)cospi_16_64,
(int)cospi_16_64, &out[23], &out[24]);
out[28] = step2[28];
out[29] = step2[29];
out[30] = step2[30];
out[31] = step2[31];
}
// Group the coefficient calculation into smaller functions to prevent stack
// spillover in 32x32 idct optimizations:
// quarter_1: 0-7
// quarter_2: 8-15
// quarter_3_4: 16-23, 24-31
// For each 4x32 block __m128i in[32],
// Input with index, 0, 4, 8, 12, 16, 20, 24, 28
// output pixels: 0-7 in __m128i out[32]
static INLINE void highbd_idct32_1024_4x32_quarter_1(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
// stage 3
highbd_butterfly_sse4_1(in[4], in[28], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_butterfly_sse4_1(in[20], in[12], (int)cospi_12_64, (int)cospi_20_64,
&step1[5], &step1[6]);
// stage 4
highbd_butterfly_sse4_1(in[0], in[16], (int)cospi_16_64, (int)cospi_16_64,
&step2[1], &step2[0]);
highbd_butterfly_sse4_1(in[8], in[24], (int)cospi_24_64, (int)cospi_8_64,
&step2[2], &step2[3]);
step2[4] = _mm_add_epi32(step1[4], step1[5]);
step2[5] = _mm_sub_epi32(step1[4], step1[5]);
step2[6] = _mm_sub_epi32(step1[7], step1[6]);
step2[7] = _mm_add_epi32(step1[7], step1[6]);
// stage 5
step1[0] = _mm_add_epi32(step2[0], step2[3]);
step1[1] = _mm_add_epi32(step2[1], step2[2]);
step1[2] = _mm_sub_epi32(step2[1], step2[2]);
step1[3] = _mm_sub_epi32(step2[0], step2[3]);
step1[4] = step2[4];
highbd_butterfly_sse4_1(step2[6], step2[5], (int)cospi_16_64,
(int)cospi_16_64, &step1[5], &step1[6]);
step1[7] = step2[7];
// stage 6
out[0] = _mm_add_epi32(step1[0], step1[7]);
out[1] = _mm_add_epi32(step1[1], step1[6]);
out[2] = _mm_add_epi32(step1[2], step1[5]);
out[3] = _mm_add_epi32(step1[3], step1[4]);
out[4] = _mm_sub_epi32(step1[3], step1[4]);
out[5] = _mm_sub_epi32(step1[2], step1[5]);
out[6] = _mm_sub_epi32(step1[1], step1[6]);
out[7] = _mm_sub_epi32(step1[0], step1[7]);
}
// For each 4x32 block __m128i in[32],
// Input with index, 2, 6, 10, 14, 18, 22, 26, 30
// output pixels: 8-15 in __m128i out[32]
static INLINE void highbd_idct32_1024_4x32_quarter_2(
const __m128i *in /*in[32]*/, __m128i *out /*out[16]*/) {
__m128i step1[32], step2[32];
// stage 2
highbd_butterfly_sse4_1(in[2], in[30], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_butterfly_sse4_1(in[18], in[14], (int)cospi_14_64, (int)cospi_18_64,
&step2[9], &step2[14]);
highbd_butterfly_sse4_1(in[10], in[22], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
highbd_butterfly_sse4_1(in[26], in[6], (int)cospi_6_64, (int)cospi_26_64,
&step2[11], &step2[12]);
// stage 3
step1[8] = _mm_add_epi32(step2[8], step2[9]);
step1[9] = _mm_sub_epi32(step2[8], step2[9]);
step1[14] = _mm_sub_epi32(step2[15], step2[14]);
step1[15] = _mm_add_epi32(step2[15], step2[14]);
step1[10] = _mm_sub_epi32(step2[11], step2[10]);
step1[11] = _mm_add_epi32(step2[11], step2[10]);
step1[12] = _mm_add_epi32(step2[12], step2[13]);
step1[13] = _mm_sub_epi32(step2[12], step2[13]);
highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
}
static INLINE void highbd_idct32_1024_4x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
highbd_idct32_1024_4x32_quarter_1(in, temp);
highbd_idct32_1024_4x32_quarter_2(in, temp);
// stage 7
highbd_add_sub_butterfly(temp, out, 16);
}
// For each 4x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void highbd_idct32_1024_4x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32], step2[32];
// stage 1
highbd_butterfly_sse4_1(in[1], in[31], (int)cospi_31_64, (int)cospi_1_64,
&step1[16], &step1[31]);
highbd_butterfly_sse4_1(in[17], in[15], (int)cospi_15_64, (int)cospi_17_64,
&step1[17], &step1[30]);
highbd_butterfly_sse4_1(in[9], in[23], (int)cospi_23_64, (int)cospi_9_64,
&step1[18], &step1[29]);
highbd_butterfly_sse4_1(in[25], in[7], (int)cospi_7_64, (int)cospi_25_64,
&step1[19], &step1[28]);
highbd_butterfly_sse4_1(in[5], in[27], (int)cospi_27_64, (int)cospi_5_64,
&step1[20], &step1[27]);
highbd_butterfly_sse4_1(in[21], in[11], (int)cospi_11_64, (int)cospi_21_64,
&step1[21], &step1[26]);
highbd_butterfly_sse4_1(in[13], in[19], (int)cospi_19_64, (int)cospi_13_64,
&step1[22], &step1[25]);
highbd_butterfly_sse4_1(in[29], in[3], (int)cospi_3_64, (int)cospi_29_64,
&step1[23], &step1[24]);
// stage 2
step2[16] = _mm_add_epi32(step1[16], step1[17]);
step2[17] = _mm_sub_epi32(step1[16], step1[17]);
step2[18] = _mm_sub_epi32(step1[19], step1[18]);
step2[19] = _mm_add_epi32(step1[19], step1[18]);
step2[20] = _mm_add_epi32(step1[20], step1[21]);
step2[21] = _mm_sub_epi32(step1[20], step1[21]);
step2[22] = _mm_sub_epi32(step1[23], step1[22]);
step2[23] = _mm_add_epi32(step1[23], step1[22]);
step2[24] = _mm_add_epi32(step1[24], step1[25]);
step2[25] = _mm_sub_epi32(step1[24], step1[25]);
step2[26] = _mm_sub_epi32(step1[27], step1[26]);
step2[27] = _mm_add_epi32(step1[27], step1[26]);
step2[28] = _mm_add_epi32(step1[28], step1[29]);
step2[29] = _mm_sub_epi32(step1[28], step1[29]);
step2[30] = _mm_sub_epi32(step1[31], step1[30]);
step2[31] = _mm_add_epi32(step1[31], step1[30]);
// stage 3
step1[16] = step2[16];
step1[31] = step2[31];
highbd_butterfly_sse4_1(step2[30], step2[17], (int)cospi_28_64,
(int)cospi_4_64, &step1[17], &step1[30]);
highbd_butterfly_sse4_1(step2[29], step2[18], -(int)cospi_4_64,
(int)cospi_28_64, &step1[18], &step1[29]);
step1[19] = step2[19];
step1[20] = step2[20];
highbd_butterfly_sse4_1(step2[26], step2[21], (int)cospi_12_64,
(int)cospi_20_64, &step1[21], &step1[26]);
highbd_butterfly_sse4_1(step2[25], step2[22], -(int)cospi_20_64,
(int)cospi_12_64, &step1[22], &step1[25]);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
step1[28] = step2[28];
highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
}
static void highbd_idct32_1024_4x32(__m128i *const io /*io[32]*/) {
__m128i temp[32];
highbd_idct32_1024_4x32_quarter_1_2(io, temp);
highbd_idct32_1024_4x32_quarter_3_4(io, temp);
// final stage
highbd_add_sub_butterfly(temp, io, 32);
}
void vpx_highbd_idct32x32_1024_add_sse4_1(const tran_low_t *input,
uint16_t *dest, int stride, int bd) {
int i, j;
if (bd == 8) {
__m128i col[4][32], io[32];
// rows
for (i = 0; i < 4; i++) {
highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &io[0]);
highbd_load_pack_transpose_32bit_8x8(&input[8], 32, &io[8]);
highbd_load_pack_transpose_32bit_8x8(&input[16], 32, &io[16]);
highbd_load_pack_transpose_32bit_8x8(&input[24], 32, &io[24]);
idct32_1024_8x32(io, col[i]);
input += 32 << 3;
}
// columns
for (i = 0; i < 32; i += 8) {
// Transpose 32x8 block to 8x32 block
transpose_16bit_8x8(col[0] + i, io);
transpose_16bit_8x8(col[1] + i, io + 8);
transpose_16bit_8x8(col[2] + i, io + 16);
transpose_16bit_8x8(col[3] + i, io + 24);
idct32_1024_8x32(io, io);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_8(dest + j * stride, io[j], bd);
}
dest += 8;
}
} else {
__m128i all[8][32], out[32], *in;
for (i = 0; i < 8; i++) {
in = all[i];
highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
highbd_load_transpose_32bit_8x4(&input[16], 32, &in[16]);
highbd_load_transpose_32bit_8x4(&input[24], 32, &in[24]);
highbd_idct32_1024_4x32(in);
input += 4 * 32;
}
for (i = 0; i < 32; i += 4) {
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
transpose_32bit_4x4(all[2] + i, out + 8);
transpose_32bit_4x4(all[3] + i, out + 12);
transpose_32bit_4x4(all[4] + i, out + 16);
transpose_32bit_4x4(all[5] + i, out + 20);
transpose_32bit_4x4(all[6] + i, out + 24);
transpose_32bit_4x4(all[7] + i, out + 28);
highbd_idct32_1024_4x32(out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_4(dest + j * stride, out[j], bd);
}
dest += 4;
}
}
}
// -----------------------------------------------------------------------------
// For each 4x32 block __m128i in[32],
// Input with index, 0, 4, 8, 12
// output pixels: 0-7 in __m128i out[32]
static INLINE void highbd_idct32_135_4x32_quarter_1(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
// stage 3
highbd_partial_butterfly_sse4_1(in[4], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
highbd_partial_butterfly_sse4_1(in[12], -(int)cospi_20_64, (int)cospi_12_64,
&step1[5], &step1[6]);
// stage 4
highbd_partial_butterfly_sse4_1(in[0], (int)cospi_16_64, (int)cospi_16_64,
&step2[1], &step2[0]);
highbd_partial_butterfly_sse4_1(in[8], (int)cospi_24_64, (int)cospi_8_64,
&step2[2], &step2[3]);
step2[4] = _mm_add_epi32(step1[4], step1[5]);
step2[5] = _mm_sub_epi32(step1[4], step1[5]);
step2[6] = _mm_sub_epi32(step1[7], step1[6]);
step2[7] = _mm_add_epi32(step1[7], step1[6]);
// stage 5
step1[0] = _mm_add_epi32(step2[0], step2[3]);
step1[1] = _mm_add_epi32(step2[1], step2[2]);
step1[2] = _mm_sub_epi32(step2[1], step2[2]);
step1[3] = _mm_sub_epi32(step2[0], step2[3]);
step1[4] = step2[4];
highbd_butterfly_sse4_1(step2[6], step2[5], (int)cospi_16_64,
(int)cospi_16_64, &step1[5], &step1[6]);
step1[7] = step2[7];
// stage 6
out[0] = _mm_add_epi32(step1[0], step1[7]);
out[1] = _mm_add_epi32(step1[1], step1[6]);
out[2] = _mm_add_epi32(step1[2], step1[5]);
out[3] = _mm_add_epi32(step1[3], step1[4]);
out[4] = _mm_sub_epi32(step1[3], step1[4]);
out[5] = _mm_sub_epi32(step1[2], step1[5]);
out[6] = _mm_sub_epi32(step1[1], step1[6]);
out[7] = _mm_sub_epi32(step1[0], step1[7]);
}
// For each 4x32 block __m128i in[32],
// Input with index, 2, 6, 10, 14
// output pixels: 8-15 in __m128i out[32]
static INLINE void highbd_idct32_135_4x32_quarter_2(
const __m128i *in /*in[32]*/, __m128i *out /*out[16]*/) {
__m128i step1[32], step2[32];
// stage 2
highbd_partial_butterfly_sse4_1(in[2], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_partial_butterfly_sse4_1(in[14], -(int)cospi_18_64, (int)cospi_14_64,
&step2[9], &step2[14]);
highbd_partial_butterfly_sse4_1(in[10], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
highbd_partial_butterfly_sse4_1(in[6], -(int)cospi_26_64, (int)cospi_6_64,
&step2[11], &step2[12]);
// stage 3
step1[8] = _mm_add_epi32(step2[8], step2[9]);
step1[9] = _mm_sub_epi32(step2[8], step2[9]);
step1[14] = _mm_sub_epi32(step2[15], step2[14]);
step1[15] = _mm_add_epi32(step2[15], step2[14]);
step1[10] = _mm_sub_epi32(step2[11], step2[10]);
step1[11] = _mm_add_epi32(step2[11], step2[10]);
step1[12] = _mm_add_epi32(step2[12], step2[13]);
step1[13] = _mm_sub_epi32(step2[12], step2[13]);
highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
}
static INLINE void highbd_idct32_135_4x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
highbd_idct32_135_4x32_quarter_1(in, temp);
highbd_idct32_135_4x32_quarter_2(in, temp);
// stage 7
highbd_add_sub_butterfly(temp, out, 16);
}
// For each 4x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7, 9, 11, 13, 15
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void highbd_idct32_135_4x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32], step2[32];
// stage 1
highbd_partial_butterfly_sse4_1(in[1], (int)cospi_31_64, (int)cospi_1_64,
&step1[16], &step1[31]);
highbd_partial_butterfly_sse4_1(in[15], -(int)cospi_17_64, (int)cospi_15_64,
&step1[17], &step1[30]);
highbd_partial_butterfly_sse4_1(in[9], (int)cospi_23_64, (int)cospi_9_64,
&step1[18], &step1[29]);
highbd_partial_butterfly_sse4_1(in[7], -(int)cospi_25_64, (int)cospi_7_64,
&step1[19], &step1[28]);
highbd_partial_butterfly_sse4_1(in[5], (int)cospi_27_64, (int)cospi_5_64,
&step1[20], &step1[27]);
highbd_partial_butterfly_sse4_1(in[11], -(int)cospi_21_64, (int)cospi_11_64,
&step1[21], &step1[26]);
highbd_partial_butterfly_sse4_1(in[13], (int)cospi_19_64, (int)cospi_13_64,
&step1[22], &step1[25]);
highbd_partial_butterfly_sse4_1(in[3], -(int)cospi_29_64, (int)cospi_3_64,
&step1[23], &step1[24]);
// stage 2
step2[16] = _mm_add_epi32(step1[16], step1[17]);
step2[17] = _mm_sub_epi32(step1[16], step1[17]);
step2[18] = _mm_sub_epi32(step1[19], step1[18]);
step2[19] = _mm_add_epi32(step1[19], step1[18]);
step2[20] = _mm_add_epi32(step1[20], step1[21]);
step2[21] = _mm_sub_epi32(step1[20], step1[21]);
step2[22] = _mm_sub_epi32(step1[23], step1[22]);
step2[23] = _mm_add_epi32(step1[23], step1[22]);
step2[24] = _mm_add_epi32(step1[24], step1[25]);
step2[25] = _mm_sub_epi32(step1[24], step1[25]);
step2[26] = _mm_sub_epi32(step1[27], step1[26]);
step2[27] = _mm_add_epi32(step1[27], step1[26]);
step2[28] = _mm_add_epi32(step1[28], step1[29]);
step2[29] = _mm_sub_epi32(step1[28], step1[29]);
step2[30] = _mm_sub_epi32(step1[31], step1[30]);
step2[31] = _mm_add_epi32(step1[31], step1[30]);
// stage 3
step1[16] = step2[16];
step1[31] = step2[31];
highbd_butterfly_sse4_1(step2[30], step2[17], (int)cospi_28_64,
(int)cospi_4_64, &step1[17], &step1[30]);
highbd_butterfly_sse4_1(step2[29], step2[18], -(int)cospi_4_64,
(int)cospi_28_64, &step1[18], &step1[29]);
step1[19] = step2[19];
step1[20] = step2[20];
highbd_butterfly_sse4_1(step2[26], step2[21], (int)cospi_12_64,
(int)cospi_20_64, &step1[21], &step1[26]);
highbd_butterfly_sse4_1(step2[25], step2[22], -(int)cospi_20_64,
(int)cospi_12_64, &step1[22], &step1[25]);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
step1[28] = step2[28];
highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
}
static void highbd_idct32_135_4x32(__m128i *const io /*io[32]*/) {
__m128i temp[32];
highbd_idct32_135_4x32_quarter_1_2(io, temp);
highbd_idct32_135_4x32_quarter_3_4(io, temp);
// final stage
highbd_add_sub_butterfly(temp, io, 32);
}
void vpx_highbd_idct32x32_135_add_sse4_1(const tran_low_t *input,
uint16_t *dest, int stride, int bd) {
int i, j;
if (bd == 8) {
__m128i col[2][32], in[32], out[32];
// rows
for (i = 0; i < 2; i++) {
highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &in[0]);
highbd_load_pack_transpose_32bit_8x8(&input[8], 32, &in[8]);
idct32_135_8x32_ssse3(in, col[i]);
input += 32 << 3;
}
// columns
for (i = 0; i < 32; i += 8) {
transpose_16bit_8x8(col[0] + i, in);
transpose_16bit_8x8(col[1] + i, in + 8);
idct32_135_8x32_ssse3(in, out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_8(dest + j * stride, out[j], bd);
}
dest += 8;
}
} else {
__m128i all[8][32], out[32], *in;
for (i = 0; i < 4; i++) {
in = all[i];
highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
highbd_idct32_135_4x32(in);
input += 4 * 32;
}
for (i = 0; i < 32; i += 4) {
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
transpose_32bit_4x4(all[2] + i, out + 8);
transpose_32bit_4x4(all[3] + i, out + 12);
highbd_idct32_135_4x32(out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_4(dest + j * stride, out[j], bd);
}
dest += 4;
}
}
}
// -----------------------------------------------------------------------------
// For each 4x32 block __m128i in[32],
// Input with index, 0, 4
// output pixels: 0-7 in __m128i out[32]
static INLINE void highbd_idct32_34_4x32_quarter_1(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
// stage 3
highbd_partial_butterfly_sse4_1(in[4], (int)cospi_28_64, (int)cospi_4_64,
&step1[4], &step1[7]);
// stage 4
highbd_partial_butterfly_sse4_1(in[0], (int)cospi_16_64, (int)cospi_16_64,
&step2[1], &step2[0]);
step2[4] = step1[4];
step2[5] = step1[4];
step2[6] = step1[7];
step2[7] = step1[7];
// stage 5
step1[0] = step2[0];
step1[1] = step2[1];
step1[2] = step2[1];
step1[3] = step2[0];
step1[4] = step2[4];
highbd_butterfly_sse4_1(step2[6], step2[5], (int)cospi_16_64,
(int)cospi_16_64, &step1[5], &step1[6]);
step1[7] = step2[7];
// stage 6
out[0] = _mm_add_epi32(step1[0], step1[7]);
out[1] = _mm_add_epi32(step1[1], step1[6]);
out[2] = _mm_add_epi32(step1[2], step1[5]);
out[3] = _mm_add_epi32(step1[3], step1[4]);
out[4] = _mm_sub_epi32(step1[3], step1[4]);
out[5] = _mm_sub_epi32(step1[2], step1[5]);
out[6] = _mm_sub_epi32(step1[1], step1[6]);
out[7] = _mm_sub_epi32(step1[0], step1[7]);
}
// For each 4x32 block __m128i in[32],
// Input with index, 2, 6
// output pixels: 8-15 in __m128i out[32]
static INLINE void highbd_idct32_34_4x32_quarter_2(const __m128i *in /*in[32]*/,
__m128i *out /*out[16]*/) {
__m128i step1[32], step2[32];
// stage 2
highbd_partial_butterfly_sse4_1(in[2], (int)cospi_30_64, (int)cospi_2_64,
&step2[8], &step2[15]);
highbd_partial_butterfly_sse4_1(in[6], -(int)cospi_26_64, (int)cospi_6_64,
&step2[11], &step2[12]);
// stage 3
step1[8] = step2[8];
step1[9] = step2[8];
step1[14] = step2[15];
step1[15] = step2[15];
step1[10] = step2[11];
step1[11] = step2[11];
step1[12] = step2[12];
step1[13] = step2[12];
highbd_idct32_4x32_quarter_2_stage_4_to_6(step1, out);
}
static INLINE void highbd_idct32_34_4x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
highbd_idct32_135_4x32_quarter_1(in, temp);
highbd_idct32_135_4x32_quarter_2(in, temp);
// stage 7
highbd_add_sub_butterfly(temp, out, 16);
}
// For each 4x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void highbd_idct32_34_4x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32], step2[32];
// stage 1
highbd_partial_butterfly_sse4_1(in[1], (int)cospi_31_64, (int)cospi_1_64,
&step1[16], &step1[31]);
highbd_partial_butterfly_sse4_1(in[7], -(int)cospi_25_64, (int)cospi_7_64,
&step1[19], &step1[28]);
highbd_partial_butterfly_sse4_1(in[5], (int)cospi_27_64, (int)cospi_5_64,
&step1[20], &step1[27]);
highbd_partial_butterfly_sse4_1(in[3], -(int)cospi_29_64, (int)cospi_3_64,
&step1[23], &step1[24]);
// stage 2
step2[16] = step1[16];
step2[17] = step1[16];
step2[18] = step1[19];
step2[19] = step1[19];
step2[20] = step1[20];
step2[21] = step1[20];
step2[22] = step1[23];
step2[23] = step1[23];
step2[24] = step1[24];
step2[25] = step1[24];
step2[26] = step1[27];
step2[27] = step1[27];
step2[28] = step1[28];
step2[29] = step1[28];
step2[30] = step1[31];
step2[31] = step1[31];
// stage 3
step1[16] = step2[16];
step1[31] = step2[31];
highbd_butterfly_sse4_1(step2[30], step2[17], (int)cospi_28_64,
(int)cospi_4_64, &step1[17], &step1[30]);
highbd_butterfly_sse4_1(step2[29], step2[18], -(int)cospi_4_64,
(int)cospi_28_64, &step1[18], &step1[29]);
step1[19] = step2[19];
step1[20] = step2[20];
highbd_butterfly_sse4_1(step2[26], step2[21], (int)cospi_12_64,
(int)cospi_20_64, &step1[21], &step1[26]);
highbd_butterfly_sse4_1(step2[25], step2[22], -(int)cospi_20_64,
(int)cospi_12_64, &step1[22], &step1[25]);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
step1[28] = step2[28];
highbd_idct32_4x32_quarter_3_4_stage_4_to_7(step1, out);
}
static void highbd_idct32_34_4x32(__m128i *const io /*io[32]*/) {
__m128i temp[32];
highbd_idct32_34_4x32_quarter_1_2(io, temp);
highbd_idct32_34_4x32_quarter_3_4(io, temp);
// final stage
highbd_add_sub_butterfly(temp, io, 32);
}
void vpx_highbd_idct32x32_34_add_sse4_1(const tran_low_t *input, uint16_t *dest,
int stride, int bd) {
int i, j;
if (bd == 8) {
__m128i col[32], in[32], out[32];
// rows
highbd_load_pack_transpose_32bit_8x8(&input[0], 32, &in[0]);
idct32_34_8x32_ssse3(in, col);
// columns
for (i = 0; i < 32; i += 8) {
transpose_16bit_8x8(col + i, in);
idct32_34_8x32_ssse3(in, out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_8(dest + j * stride, out[j], bd);
}
dest += 8;
}
} else {
__m128i all[8][32], out[32], *in;
for (i = 0; i < 4; i++) {
in = all[i];
highbd_load_transpose_32bit_8x4(&input[0], 32, &in[0]);
highbd_load_transpose_32bit_8x4(&input[8], 32, &in[8]);
highbd_idct32_34_4x32(in);
input += 4 * 32;
}
for (i = 0; i < 32; i += 4) {
transpose_32bit_4x4(all[0] + i, out + 0);
transpose_32bit_4x4(all[1] + i, out + 4);
transpose_32bit_4x4(all[2] + i, out + 8);
transpose_32bit_4x4(all[3] + i, out + 12);
highbd_idct32_34_4x32(out);
for (j = 0; j < 32; ++j) {
highbd_write_buffer_4(dest + j * stride, out[j], bd);
}
dest += 4;
}
}
}

View File

@ -16,6 +16,7 @@
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
#include "vpx_dsp/inv_txfm.h"
#include "vpx_dsp/x86/transpose_sse2.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
static INLINE void extend_64bit(const __m128i in,
@ -84,6 +85,7 @@ static INLINE __m128i multiplication_round_shift_sse2(
const __m128i pair_c = pair_set_epi32(c << 2, 0);
__m128i t0, t1;
assert(c >= 0);
t0 = multiply_apply_sign_sse2(in[0], sign[0], pair_c);
t1 = multiply_apply_sign_sse2(in[1], sign[1], pair_c);
t0 = dct_const_round_shift_64bit(t0);
@ -99,6 +101,7 @@ static INLINE __m128i multiplication_neg_round_shift_sse2(
const __m128i pair_c = pair_set_epi32(c << 2, 0);
__m128i t0, t1;
assert(c >= 0);
t0 = multiply_apply_sign_sse2(in[0], sign[0], pair_c);
t1 = multiply_apply_sign_sse2(in[1], sign[1], pair_c);
t0 = _mm_sub_epi64(_mm_setzero_si128(), t0);
@ -118,6 +121,8 @@ static INLINE void highbd_butterfly_sse2(const __m128i in0, const __m128i in1,
const __m128i pair_c1 = pair_set_epi32(c1 << 2, 0);
__m128i temp1[4], temp2[4], sign1[2], sign2[2];
assert(c0 >= 0);
assert(c1 >= 0);
abs_extend_64bit_sse2(in0, temp1, sign1);
abs_extend_64bit_sse2(in1, temp2, sign2);
temp1[2] = multiply_apply_sign_sse2(temp1[0], sign1[0], pair_c1);
@ -140,6 +145,34 @@ static INLINE void highbd_butterfly_sse2(const __m128i in0, const __m128i in1,
*out1 = pack_4(temp2[0], temp2[1]);
}
// Note: c0 and c1 must be non negative.
static INLINE void highbd_partial_butterfly_sse2(const __m128i in, const int c0,
const int c1,
__m128i *const out0,
__m128i *const out1) {
__m128i temp[2], sign[2];
assert(c0 >= 0);
assert(c1 >= 0);
abs_extend_64bit_sse2(in, temp, sign);
*out0 = multiplication_round_shift_sse2(temp, sign, c0);
*out1 = multiplication_round_shift_sse2(temp, sign, c1);
}
// Note: c0 and c1 must be non negative.
static INLINE void highbd_partial_butterfly_neg_sse2(const __m128i in,
const int c0, const int c1,
__m128i *const out0,
__m128i *const out1) {
__m128i temp[2], sign[2];
assert(c0 >= 0);
assert(c1 >= 0);
abs_extend_64bit_sse2(in, temp, sign);
*out0 = multiplication_neg_round_shift_sse2(temp, sign, c1);
*out1 = multiplication_round_shift_sse2(temp, sign, c0);
}
static INLINE void highbd_butterfly_cospi16_sse2(const __m128i in0,
const __m128i in1,
__m128i *const out0,
@ -154,27 +187,17 @@ static INLINE void highbd_butterfly_cospi16_sse2(const __m128i in0,
*out1 = multiplication_round_shift_sse2(temp1, sign, (int)cospi_16_64);
}
// Note: c0 and c1 must be non negative.
static INLINE void highbd_multiplication_sse2(const __m128i in, const int c0,
const int c1, __m128i *const out0,
__m128i *const out1) {
__m128i temp[2], sign[2];
abs_extend_64bit_sse2(in, temp, sign);
*out0 = multiplication_round_shift_sse2(temp, sign, c0);
*out1 = multiplication_round_shift_sse2(temp, sign, c1);
}
// Note: c0 and c1 must be non negative.
static INLINE void highbd_multiplication_neg_sse2(const __m128i in,
const int c0, const int c1,
__m128i *const out0,
__m128i *const out1) {
__m128i temp[2], sign[2];
abs_extend_64bit_sse2(in, temp, sign);
*out0 = multiplication_neg_round_shift_sse2(temp, sign, c1);
*out1 = multiplication_round_shift_sse2(temp, sign, c0);
// Only do addition and subtraction butterfly, size = 16, 32
static INLINE void highbd_add_sub_butterfly(const __m128i *in, __m128i *out,
int size) {
int i = 0;
const int num = size >> 1;
const int bound = size - 1;
while (i < num) {
out[i] = _mm_add_epi32(in[i], in[bound - i]);
out[bound - i] = _mm_sub_epi32(in[i], in[bound - i]);
i++;
}
}
static INLINE void highbd_idct8_stage4(const __m128i *const in,
@ -313,6 +336,44 @@ static INLINE __m128i load_pack_8_32bit(const tran_low_t *const input) {
return _mm_packs_epi32(t0, t1);
}
static INLINE void highbd_load_pack_transpose_32bit_8x8(const tran_low_t *input,
const int stride,
__m128i *const in) {
in[0] = load_pack_8_32bit(input + 0 * stride);
in[1] = load_pack_8_32bit(input + 1 * stride);
in[2] = load_pack_8_32bit(input + 2 * stride);
in[3] = load_pack_8_32bit(input + 3 * stride);
in[4] = load_pack_8_32bit(input + 4 * stride);
in[5] = load_pack_8_32bit(input + 5 * stride);
in[6] = load_pack_8_32bit(input + 6 * stride);
in[7] = load_pack_8_32bit(input + 7 * stride);
transpose_16bit_8x8(in, in);
}
static INLINE void highbd_load_transpose_32bit_8x4(const tran_low_t *input,
const int stride,
__m128i *in) {
in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride + 0));
in[1] = _mm_load_si128((const __m128i *)(input + 0 * stride + 4));
in[2] = _mm_load_si128((const __m128i *)(input + 1 * stride + 0));
in[3] = _mm_load_si128((const __m128i *)(input + 1 * stride + 4));
in[4] = _mm_load_si128((const __m128i *)(input + 2 * stride + 0));
in[5] = _mm_load_si128((const __m128i *)(input + 2 * stride + 4));
in[6] = _mm_load_si128((const __m128i *)(input + 3 * stride + 0));
in[7] = _mm_load_si128((const __m128i *)(input + 3 * stride + 4));
transpose_32bit_8x4(in, in);
}
static INLINE void highbd_load_transpose_32bit_4x4(const tran_low_t *input,
const int stride,
__m128i *in) {
in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
transpose_32bit_4x4(in, in);
}
static INLINE void highbd_write_buffer_8(uint16_t *dest, const __m128i in,
const int bd) {
const __m128i final_rounding = _mm_set1_epi16(1 << 5);

View File

@ -73,10 +73,10 @@ static INLINE void highbd_butterfly_cospi16_sse4_1(const __m128i in0,
*out1 = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
}
static INLINE void highbd_multiplication_sse4_1(const __m128i in, const int c0,
const int c1,
__m128i *const out0,
__m128i *const out1) {
static INLINE void highbd_partial_butterfly_sse4_1(const __m128i in,
const int c0, const int c1,
__m128i *const out0,
__m128i *const out1) {
__m128i temp[2];
extend_64bit(in, temp);

View File

@ -79,7 +79,7 @@ void vpx_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest,
*(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d[0]);
}
void idct4_sse2(__m128i *in) {
void idct4_sse2(__m128i *const in) {
const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
@ -99,7 +99,7 @@ void idct4_sse2(__m128i *in) {
in[1] = _mm_shuffle_epi32(in[1], 0x4E);
}
void iadst4_sse2(__m128i *in) {
void iadst4_sse2(__m128i *const in) {
const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9);
const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9);
@ -148,6 +148,18 @@ void iadst4_sse2(__m128i *in) {
in[1] = _mm_packs_epi32(u[2], u[3]);
}
static INLINE void load_buffer_8x8(const tran_low_t *const input,
__m128i *const in) {
in[0] = load_input_data8(input + 0 * 8);
in[1] = load_input_data8(input + 1 * 8);
in[2] = load_input_data8(input + 2 * 8);
in[3] = load_input_data8(input + 3 * 8);
in[4] = load_input_data8(input + 4 * 8);
in[5] = load_input_data8(input + 5 * 8);
in[6] = load_input_data8(input + 6 * 8);
in[7] = load_input_data8(input + 7 * 8);
}
void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i in[8];
@ -213,7 +225,7 @@ void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest,
recon_and_store_8_dual(dest, dc_value, stride);
}
void idct8_sse2(__m128i *in) {
void idct8_sse2(__m128i *const in) {
// 8x8 Transpose is copied from vpx_fdct8x8_sse2()
transpose_16bit_8x8(in, in);
@ -221,7 +233,7 @@ void idct8_sse2(__m128i *in) {
idct8(in, in);
}
void iadst8_sse2(__m128i *in) {
void iadst8_sse2(__m128i *const in) {
const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
@ -434,14 +446,6 @@ static INLINE void idct16_load8x8(const tran_low_t *const input,
in[7] = load_input_data8(input + 7 * 16);
}
static INLINE void write_buffer_8x1(uint8_t *const dest, const __m128i in) {
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
__m128i out;
out = _mm_adds_epi16(in, final_rounding);
out = _mm_srai_epi16(out, 6);
recon_and_store(dest, out);
}
void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i l[16], r[16], out[16], *in;
@ -453,7 +457,7 @@ void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
transpose_16bit_8x8(in, in);
idct16_load8x8(input + 8, in + 8);
transpose_16bit_8x8(in + 8, in + 8);
idct16_8col(in);
idct16_8col(in, in);
in = r;
input += 128;
}
@ -462,7 +466,7 @@ void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
int j;
transpose_16bit_8x8(l + i, out);
transpose_16bit_8x8(r + i, out + 8);
idct16_8col(out);
idct16_8col(out, out);
for (j = 0; j < 16; ++j) {
write_buffer_8x1(dest + j * stride, out[j]);
@ -474,33 +478,21 @@ void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
void vpx_idct16x16_38_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i in[16], out[16];
__m128i in[16], temp[16], out[16];
int i;
idct16_load8x8(input, in);
transpose_16bit_8x8(in, in);
in[8] = _mm_setzero_si128();
in[9] = _mm_setzero_si128();
in[10] = _mm_setzero_si128();
in[11] = _mm_setzero_si128();
in[12] = _mm_setzero_si128();
in[13] = _mm_setzero_si128();
in[14] = _mm_setzero_si128();
in[15] = _mm_setzero_si128();
idct16_8col(in);
for (i = 8; i < 16; i++) {
in[i] = _mm_setzero_si128();
}
idct16_8col(in, temp);
for (i = 0; i < 16; i += 8) {
int j;
transpose_16bit_8x8(in + i, out);
out[8] = _mm_setzero_si128();
out[9] = _mm_setzero_si128();
out[10] = _mm_setzero_si128();
out[11] = _mm_setzero_si128();
out[12] = _mm_setzero_si128();
out[13] = _mm_setzero_si128();
out[14] = _mm_setzero_si128();
out[15] = _mm_setzero_si128();
idct16_8col(out);
transpose_16bit_8x8(temp + i, in);
idct16_8col(in, out);
for (j = 0; j < 16; ++j) {
write_buffer_8x1(dest + j * stride, out[j]);
@ -567,7 +559,7 @@ void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest,
}
}
static void iadst16_8col(__m128i *in) {
static void iadst16_8col(__m128i *const in) {
// perform 16x16 1-D ADST for 8 columns
__m128i s[16], x[16], u[32], v[32];
const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
@ -987,33 +979,38 @@ static void iadst16_8col(__m128i *in) {
in[15] = _mm_sub_epi16(kZero, s[1]);
}
void idct16_sse2(__m128i *in0, __m128i *in1) {
void idct16_sse2(__m128i *const in0, __m128i *const in1) {
transpose_16bit_16x16(in0, in1);
idct16_8col(in0);
idct16_8col(in1);
idct16_8col(in0, in0);
idct16_8col(in1, in1);
}
void iadst16_sse2(__m128i *in0, __m128i *in1) {
void iadst16_sse2(__m128i *const in0, __m128i *const in1) {
transpose_16bit_16x16(in0, in1);
iadst16_8col(in0);
iadst16_8col(in1);
}
// Group the coefficient calculation into smaller functions to prevent stack
// spillover in 32x32 idct optimizations:
// quarter_1: 0-7
// quarter_2: 8-15
// quarter_3_4: 16-23, 24-31
// For each 8x32 block __m128i in[32],
// Input with index, 0, 4
// output pixels: 0-7 in __m128i out[32]
static INLINE void idct32x32_34_8x32_quarter_1(const __m128i *in /*in[32]*/,
__m128i *out /*out[8]*/) {
static INLINE void idct32_34_8x32_quarter_1(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[8]*/) {
const __m128i zero = _mm_setzero_si128();
__m128i step1[32], step2[32];
__m128i step1[8], step2[8];
// stage 3
butterfly(in[4], zero, (int)cospi_28_64, (int)cospi_4_64, &step1[4],
&step1[7]);
// stage 4
butterfly(in[0], zero, (int)cospi_16_64, (int)cospi_16_64, &step2[1],
&step2[0]);
step2[0] = butterfly_cospi16(in[0]);
step2[4] = step1[4];
step2[5] = step1[4];
step2[6] = step1[7];
@ -1021,8 +1018,8 @@ static INLINE void idct32x32_34_8x32_quarter_1(const __m128i *in /*in[32]*/,
// stage 5
step1[0] = step2[0];
step1[1] = step2[1];
step1[2] = step2[1];
step1[1] = step2[0];
step1[2] = step2[0];
step1[3] = step2[0];
step1[4] = step2[4];
butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
@ -1043,69 +1040,46 @@ static INLINE void idct32x32_34_8x32_quarter_1(const __m128i *in /*in[32]*/,
// For each 8x32 block __m128i in[32],
// Input with index, 2, 6
// output pixels: 8-15 in __m128i out[32]
static INLINE void idct32x32_34_8x32_quarter_2(const __m128i *in /*in[32]*/,
__m128i *out /*out[16]*/) {
static INLINE void idct32_34_8x32_quarter_2(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[16]*/) {
const __m128i zero = _mm_setzero_si128();
__m128i step1[32], step2[32];
__m128i step1[16], step2[16];
// stage 2
butterfly(in[2], zero, (int)cospi_30_64, (int)cospi_2_64, &step2[8],
&step2[15]);
butterfly(zero, zero, (int)cospi_14_64, (int)cospi_18_64, &step2[9],
&step2[14]);
butterfly(zero, zero, (int)cospi_22_64, (int)cospi_10_64, &step2[10],
&step2[13]);
butterfly(zero, in[6], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
&step2[12]);
// stage 3
step1[8] = _mm_add_epi16(step2[8], step2[9]);
step1[9] = _mm_sub_epi16(step2[8], step2[9]);
step1[14] = _mm_sub_epi16(step2[15], step2[14]);
step1[15] = _mm_add_epi16(step2[15], step2[14]);
step1[10] = _mm_sub_epi16(step2[11], step2[10]);
step1[11] = _mm_add_epi16(step2[11], step2[10]);
step1[12] = _mm_add_epi16(step2[12], step2[13]);
step1[13] = _mm_sub_epi16(step2[12], step2[13]);
step1[8] = step2[8];
step1[9] = step2[8];
step1[14] = step2[15];
step1[15] = step2[15];
step1[10] = step2[11];
step1[11] = step2[11];
step1[12] = step2[12];
step1[13] = step2[12];
// stage 4
step2[8] = step1[8];
step2[15] = step1[15];
butterfly(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
&step2[14]);
butterfly(step1[13], step1[10], -(int)cospi_8_64, (int)cospi_24_64,
&step2[10], &step2[13]);
step2[11] = step1[11];
step2[12] = step1[12];
idct32_8x32_quarter_2_stage_4_to_6(step1, out);
}
// stage 5
step1[8] = _mm_add_epi16(step2[8], step2[11]);
step1[9] = _mm_add_epi16(step2[9], step2[10]);
step1[10] = _mm_sub_epi16(step2[9], step2[10]);
step1[11] = _mm_sub_epi16(step2[8], step2[11]);
step1[12] = _mm_sub_epi16(step2[15], step2[12]);
step1[13] = _mm_sub_epi16(step2[14], step2[13]);
step1[14] = _mm_add_epi16(step2[14], step2[13]);
step1[15] = _mm_add_epi16(step2[15], step2[12]);
// stage 6
out[8] = step1[8];
out[9] = step1[9];
butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64, &out[10],
&out[13]);
butterfly(step1[12], step1[11], (int)cospi_16_64, (int)cospi_16_64, &out[11],
&out[12]);
out[14] = step1[14];
out[15] = step1[15];
static INLINE void idct32_34_8x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
idct32_34_8x32_quarter_1(in, temp);
idct32_34_8x32_quarter_2(in, temp);
// stage 7
add_sub_butterfly(temp, out, 16);
}
// For each 8x32 block __m128i in[32],
// Input with odd index, 1, 3, 5, 7, 9, 11, 13, 15
// Input with odd index, 1, 3, 5, 7
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void idct32x32_34_8x32_quarter_3_4(const __m128i *in /*in[32]*/,
__m128i *out /*out[32]*/) {
static INLINE void idct32_34_8x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
const __m128i zero = _mm_setzero_si128();
__m128i step1[32], step2[32];
__m128i step1[32];
// stage 1
butterfly(in[1], zero, (int)cospi_31_64, (int)cospi_1_64, &step1[16],
@ -1117,158 +1091,46 @@ static INLINE void idct32x32_34_8x32_quarter_3_4(const __m128i *in /*in[32]*/,
butterfly(zero, in[3], (int)cospi_3_64, (int)cospi_29_64, &step1[23],
&step1[24]);
// stage 2
step2[16] = step1[16];
step2[17] = step1[16];
step2[18] = step1[19];
step2[19] = step1[19];
step2[20] = step1[20];
step2[21] = step1[20];
step2[22] = step1[23];
step2[23] = step1[23];
step2[24] = step1[24];
step2[25] = step1[24];
step2[26] = step1[27];
step2[27] = step1[27];
step2[28] = step1[28];
step2[29] = step1[28];
step2[30] = step1[31];
step2[31] = step1[31];
// stage 3
step1[16] = step2[16];
step1[31] = step2[31];
butterfly(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64, &step1[17],
butterfly(step1[31], step1[16], (int)cospi_28_64, (int)cospi_4_64, &step1[17],
&step1[30]);
butterfly(step2[29], step2[18], -(int)cospi_4_64, (int)cospi_28_64,
butterfly(step1[28], step1[19], -(int)cospi_4_64, (int)cospi_28_64,
&step1[18], &step1[29]);
step1[19] = step2[19];
step1[20] = step2[20];
butterfly(step2[26], step2[21], (int)cospi_12_64, (int)cospi_20_64,
butterfly(step1[27], step1[20], (int)cospi_12_64, (int)cospi_20_64,
&step1[21], &step1[26]);
butterfly(step2[25], step2[22], -(int)cospi_20_64, (int)cospi_12_64,
butterfly(step1[24], step1[23], -(int)cospi_20_64, (int)cospi_12_64,
&step1[22], &step1[25]);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
step1[28] = step2[28];
// stage 4
step2[16] = _mm_add_epi16(step1[16], step1[19]);
step2[17] = _mm_add_epi16(step1[17], step1[18]);
step2[18] = _mm_sub_epi16(step1[17], step1[18]);
step2[19] = _mm_sub_epi16(step1[16], step1[19]);
step2[20] = _mm_sub_epi16(step1[23], step1[20]);
step2[21] = _mm_sub_epi16(step1[22], step1[21]);
step2[22] = _mm_add_epi16(step1[22], step1[21]);
step2[23] = _mm_add_epi16(step1[23], step1[20]);
step2[24] = _mm_add_epi16(step1[24], step1[27]);
step2[25] = _mm_add_epi16(step1[25], step1[26]);
step2[26] = _mm_sub_epi16(step1[25], step1[26]);
step2[27] = _mm_sub_epi16(step1[24], step1[27]);
step2[28] = _mm_sub_epi16(step1[31], step1[28]);
step2[29] = _mm_sub_epi16(step1[30], step1[29]);
step2[30] = _mm_add_epi16(step1[29], step1[30]);
step2[31] = _mm_add_epi16(step1[28], step1[31]);
// stage 5
step1[16] = step2[16];
step1[17] = step2[17];
butterfly(step2[29], step2[18], (int)cospi_24_64, (int)cospi_8_64, &step1[18],
&step1[29]);
butterfly(step2[28], step2[19], (int)cospi_24_64, (int)cospi_8_64, &step1[19],
&step1[28]);
butterfly(step2[27], step2[20], -(int)cospi_8_64, (int)cospi_24_64,
&step1[20], &step1[27]);
butterfly(step2[26], step2[21], -(int)cospi_8_64, (int)cospi_24_64,
&step1[21], &step1[26]);
step1[22] = step2[22];
step1[23] = step2[23];
step1[24] = step2[24];
step1[25] = step2[25];
step1[30] = step2[30];
step1[31] = step2[31];
// stage 6
step2[16] = _mm_add_epi16(step1[16], step1[23]);
step2[17] = _mm_add_epi16(step1[17], step1[22]);
step2[18] = _mm_add_epi16(step1[18], step1[21]);
step2[19] = _mm_add_epi16(step1[19], step1[20]);
step2[20] = _mm_sub_epi16(step1[19], step1[20]);
step2[21] = _mm_sub_epi16(step1[18], step1[21]);
step2[22] = _mm_sub_epi16(step1[17], step1[22]);
step2[23] = _mm_sub_epi16(step1[16], step1[23]);
step2[24] = _mm_sub_epi16(step1[31], step1[24]);
step2[25] = _mm_sub_epi16(step1[30], step1[25]);
step2[26] = _mm_sub_epi16(step1[29], step1[26]);
step2[27] = _mm_sub_epi16(step1[28], step1[27]);
step2[28] = _mm_add_epi16(step1[27], step1[28]);
step2[29] = _mm_add_epi16(step1[26], step1[29]);
step2[30] = _mm_add_epi16(step1[25], step1[30]);
step2[31] = _mm_add_epi16(step1[24], step1[31]);
// stage 7
out[16] = step2[16];
out[17] = step2[17];
out[18] = step2[18];
out[19] = step2[19];
butterfly(step2[27], step2[20], (int)cospi_16_64, (int)cospi_16_64, &out[20],
&out[27]);
butterfly(step2[26], step2[21], (int)cospi_16_64, (int)cospi_16_64, &out[21],
&out[26]);
butterfly(step2[25], step2[22], (int)cospi_16_64, (int)cospi_16_64, &out[22],
&out[25]);
butterfly(step2[24], step2[23], (int)cospi_16_64, (int)cospi_16_64, &out[23],
&out[24]);
out[28] = step2[28];
out[29] = step2[29];
out[30] = step2[30];
out[31] = step2[31];
idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
}
static INLINE void idct32x32_34_8x32(const __m128i *in /*in[32]*/,
__m128i *out /*out[32]*/) {
__m128i step1[32], step2[32];
transpose_16bit_8x8(in, out);
idct32x32_34_8x32_quarter_1(out, step2);
idct32x32_34_8x32_quarter_2(out, step2);
// stage 7
add_sub_butterfly(step2, step1, 16);
idct32x32_34_8x32_quarter_3_4(out, step1);
void idct32_34_8x32_sse2(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[32]*/) {
__m128i temp[32];
idct32_34_8x32_quarter_1_2(in, temp);
idct32_34_8x32_quarter_3_4(in, temp);
// final stage
add_sub_butterfly(step1, out, 32);
add_sub_butterfly(temp, out, 32);
}
// Only upper-left 8x8 has non-zero coeff
void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i in[8], col[32], out[32];
__m128i io[32], col[32];
int i;
// Load input data. Only need to load the top left 8x8 block.
in[0] = load_input_data8(input + 0 * 32);
in[1] = load_input_data8(input + 1 * 32);
in[2] = load_input_data8(input + 2 * 32);
in[3] = load_input_data8(input + 3 * 32);
in[4] = load_input_data8(input + 4 * 32);
in[5] = load_input_data8(input + 5 * 32);
in[6] = load_input_data8(input + 6 * 32);
in[7] = load_input_data8(input + 7 * 32);
idct32x32_34_8x32(in, col);
load_transpose_16bit_8x8(input, 32, io);
idct32_34_8x32_sse2(io, col);
for (i = 0; i < 32; i += 8) {
int j;
idct32x32_34_8x32(col + i, out);
transpose_16bit_8x8(col + i, io);
idct32_34_8x32_sse2(io, io);
for (j = 0; j < 32; ++j) {
write_buffer_8x1(dest + j * stride, out[j]);
write_buffer_8x1(dest + j * stride, io[j]);
}
dest += 8;
@ -1278,9 +1140,9 @@ void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest,
// For each 8x32 block __m128i in[32],
// Input with index, 0, 4, 8, 12, 16, 20, 24, 28
// output pixels: 0-7 in __m128i out[32]
static INLINE void idct32_full_8x32_quarter_1(const __m128i *in /*in[32]*/,
__m128i *out /*out[8]*/) {
__m128i step1[32], step2[32];
static INLINE void idct32_1024_8x32_quarter_1(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
// stage 3
butterfly(in[4], in[28], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
@ -1322,9 +1184,9 @@ static INLINE void idct32_full_8x32_quarter_1(const __m128i *in /*in[32]*/,
// For each 8x32 block __m128i in[32],
// Input with index, 2, 6, 10, 14, 18, 22, 26, 30
// output pixels: 8-15 in __m128i out[32]
static INLINE void idct32_full_8x32_quarter_2(const __m128i *in /*in[32]*/,
__m128i *out /*out[16]*/) {
__m128i step1[32], step2[32];
static INLINE void idct32_1024_8x32_quarter_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[16]*/) {
__m128i step1[16], step2[16];
// stage 2
butterfly(in[2], in[30], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
@ -1339,50 +1201,31 @@ static INLINE void idct32_full_8x32_quarter_2(const __m128i *in /*in[32]*/,
// stage 3
step1[8] = _mm_add_epi16(step2[8], step2[9]);
step1[9] = _mm_sub_epi16(step2[8], step2[9]);
step1[14] = _mm_sub_epi16(step2[15], step2[14]);
step1[15] = _mm_add_epi16(step2[15], step2[14]);
step1[10] = _mm_sub_epi16(step2[11], step2[10]);
step1[11] = _mm_add_epi16(step2[11], step2[10]);
step1[12] = _mm_add_epi16(step2[12], step2[13]);
step1[13] = _mm_sub_epi16(step2[12], step2[13]);
step1[14] = _mm_sub_epi16(step2[15], step2[14]);
step1[15] = _mm_add_epi16(step2[15], step2[14]);
// stage 4
step2[8] = step1[8];
step2[15] = step1[15];
butterfly(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
&step2[14]);
butterfly(step1[13], step1[10], -(int)cospi_8_64, (int)cospi_24_64,
&step2[10], &step2[13]);
step2[11] = step1[11];
step2[12] = step1[12];
idct32_8x32_quarter_2_stage_4_to_6(step1, out);
}
// stage 5
step1[8] = _mm_add_epi16(step2[8], step2[11]);
step1[9] = _mm_add_epi16(step2[9], step2[10]);
step1[10] = _mm_sub_epi16(step2[9], step2[10]);
step1[11] = _mm_sub_epi16(step2[8], step2[11]);
step1[12] = _mm_sub_epi16(step2[15], step2[12]);
step1[13] = _mm_sub_epi16(step2[14], step2[13]);
step1[14] = _mm_add_epi16(step2[14], step2[13]);
step1[15] = _mm_add_epi16(step2[15], step2[12]);
// stage 6
out[8] = step1[8];
out[9] = step1[9];
butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64, &out[10],
&out[13]);
butterfly(step1[12], step1[11], (int)cospi_16_64, (int)cospi_16_64, &out[11],
&out[12]);
out[14] = step1[14];
out[15] = step1[15];
static INLINE void idct32_1024_8x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
idct32_1024_8x32_quarter_1(in, temp);
idct32_1024_8x32_quarter_2(in, temp);
// stage 7
add_sub_butterfly(temp, out, 16);
}
// For each 8x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void idct32_full_8x32_quarter_3_4(const __m128i *in /*in[32]*/,
__m128i *out /*out[32]*/) {
static INLINE void idct32_1024_8x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32], step2[32];
// stage 1
@ -1442,180 +1285,71 @@ static INLINE void idct32_full_8x32_quarter_3_4(const __m128i *in /*in[32]*/,
step1[27] = step2[27];
step1[28] = step2[28];
// stage 4
step2[16] = _mm_add_epi16(step1[16], step1[19]);
step2[17] = _mm_add_epi16(step1[17], step1[18]);
step2[18] = _mm_sub_epi16(step1[17], step1[18]);
step2[19] = _mm_sub_epi16(step1[16], step1[19]);
step2[20] = _mm_sub_epi16(step1[23], step1[20]);
step2[21] = _mm_sub_epi16(step1[22], step1[21]);
step2[22] = _mm_add_epi16(step1[22], step1[21]);
step2[23] = _mm_add_epi16(step1[23], step1[20]);
step2[24] = _mm_add_epi16(step1[24], step1[27]);
step2[25] = _mm_add_epi16(step1[25], step1[26]);
step2[26] = _mm_sub_epi16(step1[25], step1[26]);
step2[27] = _mm_sub_epi16(step1[24], step1[27]);
step2[28] = _mm_sub_epi16(step1[31], step1[28]);
step2[29] = _mm_sub_epi16(step1[30], step1[29]);
step2[30] = _mm_add_epi16(step1[29], step1[30]);
step2[31] = _mm_add_epi16(step1[28], step1[31]);
// stage 5
step1[16] = step2[16];
step1[17] = step2[17];
butterfly(step2[29], step2[18], (int)cospi_24_64, (int)cospi_8_64, &step1[18],
&step1[29]);
butterfly(step2[28], step2[19], (int)cospi_24_64, (int)cospi_8_64, &step1[19],
&step1[28]);
butterfly(step2[27], step2[20], -(int)cospi_8_64, (int)cospi_24_64,
&step1[20], &step1[27]);
butterfly(step2[26], step2[21], -(int)cospi_8_64, (int)cospi_24_64,
&step1[21], &step1[26]);
step1[22] = step2[22];
step1[23] = step2[23];
step1[24] = step2[24];
step1[25] = step2[25];
step1[30] = step2[30];
step1[31] = step2[31];
// stage 6
step2[16] = _mm_add_epi16(step1[16], step1[23]);
step2[17] = _mm_add_epi16(step1[17], step1[22]);
step2[18] = _mm_add_epi16(step1[18], step1[21]);
step2[19] = _mm_add_epi16(step1[19], step1[20]);
step2[20] = _mm_sub_epi16(step1[19], step1[20]);
step2[21] = _mm_sub_epi16(step1[18], step1[21]);
step2[22] = _mm_sub_epi16(step1[17], step1[22]);
step2[23] = _mm_sub_epi16(step1[16], step1[23]);
step2[24] = _mm_sub_epi16(step1[31], step1[24]);
step2[25] = _mm_sub_epi16(step1[30], step1[25]);
step2[26] = _mm_sub_epi16(step1[29], step1[26]);
step2[27] = _mm_sub_epi16(step1[28], step1[27]);
step2[28] = _mm_add_epi16(step1[27], step1[28]);
step2[29] = _mm_add_epi16(step1[26], step1[29]);
step2[30] = _mm_add_epi16(step1[25], step1[30]);
step2[31] = _mm_add_epi16(step1[24], step1[31]);
// stage 7
out[16] = step2[16];
out[17] = step2[17];
out[18] = step2[18];
out[19] = step2[19];
butterfly(step2[27], step2[20], (int)cospi_16_64, (int)cospi_16_64, &out[20],
&out[27]);
butterfly(step2[26], step2[21], (int)cospi_16_64, (int)cospi_16_64, &out[21],
&out[26]);
butterfly(step2[25], step2[22], (int)cospi_16_64, (int)cospi_16_64, &out[22],
&out[25]);
butterfly(step2[24], step2[23], (int)cospi_16_64, (int)cospi_16_64, &out[23],
&out[24]);
out[28] = step2[28];
out[29] = step2[29];
out[30] = step2[30];
out[31] = step2[31];
idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
}
static void idct32_full_8x32(const __m128i *in /*in[32]*/,
__m128i *out /*out[32]*/) {
__m128i step1[32], step2[32];
idct32_full_8x32_quarter_1(in, step2);
idct32_full_8x32_quarter_2(in, step2);
// stage 7
add_sub_butterfly(step2, step1, 16);
idct32_full_8x32_quarter_3_4(in, step1);
void idct32_1024_8x32(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[32]*/) {
__m128i temp[32];
idct32_1024_8x32_quarter_1_2(in, temp);
idct32_1024_8x32_quarter_3_4(in, temp);
// final stage
add_sub_butterfly(step1, out, 32);
}
static void idct32_load_buffer_8x16(const tran_low_t *input, __m128i *in) {
int i;
for (i = 0; i < 8; ++i) {
in[i] = load_input_data8(input);
in[i + 8] = load_input_data8(input + 8);
input += 32;
}
}
static void load_buffer_8x32(const tran_low_t *input, __m128i *in) {
int i;
for (i = 0; i < 8; ++i) {
in[i] = load_input_data8(input);
in[i + 8] = load_input_data8(input + 8);
in[i + 16] = load_input_data8(input + 16);
in[i + 24] = load_input_data8(input + 24);
input += 32;
}
add_sub_butterfly(temp, out, 32);
}
void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i col[128], in[32];
__m128i col[4][32], io[32];
int i;
// rows
for (i = 0; i < 4 * 32; i += 32) {
load_buffer_8x32(input, in);
for (i = 0; i < 4; i++) {
load_transpose_16bit_8x8(&input[0], 32, &io[0]);
load_transpose_16bit_8x8(&input[8], 32, &io[8]);
load_transpose_16bit_8x8(&input[16], 32, &io[16]);
load_transpose_16bit_8x8(&input[24], 32, &io[24]);
idct32_1024_8x32(io, col[i]);
input += 32 << 3;
// Transpose 32x8 block to 8x32 block
transpose_16bit_8x8(in, in);
transpose_16bit_8x8(in + 8, in + 8);
transpose_16bit_8x8(in + 16, in + 16);
transpose_16bit_8x8(in + 24, in + 24);
idct32_full_8x32(in, col + i);
}
// columns
for (i = 0; i < 32; i += 8) {
// Transpose 32x8 block to 8x32 block
transpose_16bit_8x8(col + i, in);
transpose_16bit_8x8(col + i + 32, in + 8);
transpose_16bit_8x8(col + i + 64, in + 16);
transpose_16bit_8x8(col + i + 96, in + 24);
transpose_16bit_8x8(col[0] + i, io);
transpose_16bit_8x8(col[1] + i, io + 8);
transpose_16bit_8x8(col[2] + i, io + 16);
transpose_16bit_8x8(col[3] + i, io + 24);
idct32_full_8x32(in, in);
store_buffer_8x32(in, dest, stride);
idct32_1024_8x32(io, io);
store_buffer_8x32(io, dest, stride);
dest += 8;
}
}
void vpx_idct32x32_135_add_sse2(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i col[64], in[32];
int i, j;
__m128i col[2][32], in[32], out[32];
int i;
for (i = 16; i < 32; i++) {
in[i] = _mm_setzero_si128();
}
// rows
for (i = 0; i < 2 * 32; i += 32) {
idct32_load_buffer_8x16(input, in);
for (i = 0; i < 2; i++) {
load_transpose_16bit_8x8(&input[0], 32, &in[0]);
load_transpose_16bit_8x8(&input[8], 32, &in[8]);
idct32_1024_8x32(in, col[i]);
input += 32 << 3;
// Transpose 32x8 block to 8x32 block
transpose_16bit_8x8(in, in);
transpose_16bit_8x8(in + 8, in + 8);
for (j = 16; j < 32; j++) {
in[j] = _mm_setzero_si128();
}
idct32_full_8x32(in, col + i);
}
// columns
for (i = 0; i < 32; i += 8) {
// Transpose 32x8 block to 8x32 block
transpose_16bit_8x8(col + i, in);
transpose_16bit_8x8(col + i + 32, in + 8);
for (j = 16; j < 32; j++) {
in[j] = _mm_setzero_si128();
}
idct32_full_8x32(in, in);
store_buffer_8x32(in, dest, stride);
transpose_16bit_8x8(col[0] + i, in);
transpose_16bit_8x8(col[1] + i, in + 8);
idct32_1024_8x32(in, out);
store_buffer_8x32(out, dest, stride);
dest += 8;
}
}

View File

@ -79,14 +79,21 @@ static INLINE __m128i idct_calc_wraplow_sse2(const __m128i in0,
// Multiply elements by constants and add them together.
static INLINE void butterfly(const __m128i in0, const __m128i in1, const int c0,
const int c1, __m128i *const res0,
__m128i *const res1) {
const int c1, __m128i *const out0,
__m128i *const out1) {
const __m128i cst0 = pair_set_epi16(c0, -c1);
const __m128i cst1 = pair_set_epi16(c1, c0);
const __m128i lo = _mm_unpacklo_epi16(in0, in1);
const __m128i hi = _mm_unpackhi_epi16(in0, in1);
*res0 = idct_calc_wraplow_sse2(lo, hi, cst0);
*res1 = idct_calc_wraplow_sse2(lo, hi, cst1);
*out0 = idct_calc_wraplow_sse2(lo, hi, cst0);
*out1 = idct_calc_wraplow_sse2(lo, hi, cst1);
}
static INLINE __m128i butterfly_cospi16(const __m128i in) {
const __m128i cst = pair_set_epi16((int)cospi_16_64, (int)cospi_16_64);
const __m128i lo = _mm_unpacklo_epi16(in, _mm_setzero_si128());
const __m128i hi = _mm_unpackhi_epi16(in, _mm_setzero_si128());
return idct_calc_wraplow_sse2(lo, hi, cst);
}
// Functions to allow 8 bit optimisations to be used when profile 0 is used with
@ -111,37 +118,18 @@ static INLINE __m128i load_input_data8(const tran_low_t *data) {
#endif
}
static INLINE void load_buffer_8x8(const tran_low_t *const input,
__m128i *const in) {
in[0] = load_input_data8(input + 0 * 8);
in[1] = load_input_data8(input + 1 * 8);
in[2] = load_input_data8(input + 2 * 8);
in[3] = load_input_data8(input + 3 * 8);
in[4] = load_input_data8(input + 4 * 8);
in[5] = load_input_data8(input + 5 * 8);
in[6] = load_input_data8(input + 6 * 8);
in[7] = load_input_data8(input + 7 * 8);
}
static INLINE void load_buffer_8x16(const tran_low_t *const input,
__m128i *const in) {
in[0] = load_input_data8(input + 0 * 16);
in[1] = load_input_data8(input + 1 * 16);
in[2] = load_input_data8(input + 2 * 16);
in[3] = load_input_data8(input + 3 * 16);
in[4] = load_input_data8(input + 4 * 16);
in[5] = load_input_data8(input + 5 * 16);
in[6] = load_input_data8(input + 6 * 16);
in[7] = load_input_data8(input + 7 * 16);
in[8] = load_input_data8(input + 8 * 16);
in[9] = load_input_data8(input + 9 * 16);
in[10] = load_input_data8(input + 10 * 16);
in[11] = load_input_data8(input + 11 * 16);
in[12] = load_input_data8(input + 12 * 16);
in[13] = load_input_data8(input + 13 * 16);
in[14] = load_input_data8(input + 14 * 16);
in[15] = load_input_data8(input + 15 * 16);
static INLINE void load_transpose_16bit_8x8(const tran_low_t *input,
const int stride,
__m128i *const in) {
in[0] = load_input_data8(input + 0 * stride);
in[1] = load_input_data8(input + 1 * stride);
in[2] = load_input_data8(input + 2 * stride);
in[3] = load_input_data8(input + 3 * stride);
in[4] = load_input_data8(input + 4 * stride);
in[5] = load_input_data8(input + 5 * stride);
in[6] = load_input_data8(input + 6 * stride);
in[7] = load_input_data8(input + 7 * stride);
transpose_16bit_8x8(in, in);
}
static INLINE void recon_and_store(uint8_t *const dest, const __m128i in_x) {
@ -192,62 +180,6 @@ static INLINE void write_buffer_8x8(const __m128i *const in,
recon_and_store(dest + 7 * stride, t[7]);
}
static INLINE void write_buffer_8x16(uint8_t *const dest, __m128i *const in,
const int stride) {
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
// Final rounding and shift
in[0] = _mm_adds_epi16(in[0], final_rounding);
in[1] = _mm_adds_epi16(in[1], final_rounding);
in[2] = _mm_adds_epi16(in[2], final_rounding);
in[3] = _mm_adds_epi16(in[3], final_rounding);
in[4] = _mm_adds_epi16(in[4], final_rounding);
in[5] = _mm_adds_epi16(in[5], final_rounding);
in[6] = _mm_adds_epi16(in[6], final_rounding);
in[7] = _mm_adds_epi16(in[7], final_rounding);
in[8] = _mm_adds_epi16(in[8], final_rounding);
in[9] = _mm_adds_epi16(in[9], final_rounding);
in[10] = _mm_adds_epi16(in[10], final_rounding);
in[11] = _mm_adds_epi16(in[11], final_rounding);
in[12] = _mm_adds_epi16(in[12], final_rounding);
in[13] = _mm_adds_epi16(in[13], final_rounding);
in[14] = _mm_adds_epi16(in[14], final_rounding);
in[15] = _mm_adds_epi16(in[15], final_rounding);
in[0] = _mm_srai_epi16(in[0], 6);
in[1] = _mm_srai_epi16(in[1], 6);
in[2] = _mm_srai_epi16(in[2], 6);
in[3] = _mm_srai_epi16(in[3], 6);
in[4] = _mm_srai_epi16(in[4], 6);
in[5] = _mm_srai_epi16(in[5], 6);
in[6] = _mm_srai_epi16(in[6], 6);
in[7] = _mm_srai_epi16(in[7], 6);
in[8] = _mm_srai_epi16(in[8], 6);
in[9] = _mm_srai_epi16(in[9], 6);
in[10] = _mm_srai_epi16(in[10], 6);
in[11] = _mm_srai_epi16(in[11], 6);
in[12] = _mm_srai_epi16(in[12], 6);
in[13] = _mm_srai_epi16(in[13], 6);
in[14] = _mm_srai_epi16(in[14], 6);
in[15] = _mm_srai_epi16(in[15], 6);
recon_and_store(dest + 0 * stride, in[0]);
recon_and_store(dest + 1 * stride, in[1]);
recon_and_store(dest + 2 * stride, in[2]);
recon_and_store(dest + 3 * stride, in[3]);
recon_and_store(dest + 4 * stride, in[4]);
recon_and_store(dest + 5 * stride, in[5]);
recon_and_store(dest + 6 * stride, in[6]);
recon_and_store(dest + 7 * stride, in[7]);
recon_and_store(dest + 8 * stride, in[8]);
recon_and_store(dest + 9 * stride, in[9]);
recon_and_store(dest + 10 * stride, in[10]);
recon_and_store(dest + 11 * stride, in[11]);
recon_and_store(dest + 12 * stride, in[12]);
recon_and_store(dest + 13 * stride, in[13]);
recon_and_store(dest + 14 * stride, in[14]);
recon_and_store(dest + 15 * stride, in[15]);
}
static INLINE void recon_and_store4x4_sse2(const __m128i *const in,
uint8_t *const dest,
const int stride) {
@ -294,6 +226,14 @@ static INLINE void store_buffer_8x32(__m128i *in, uint8_t *dst, int stride) {
}
}
static INLINE void write_buffer_8x1(uint8_t *const dest, const __m128i in) {
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
__m128i out;
out = _mm_adds_epi16(in, final_rounding);
out = _mm_srai_epi16(out, 6);
recon_and_store(dest, out);
}
// Only do addition and subtraction butterfly, size = 16, 32
static INLINE void add_sub_butterfly(const __m128i *in, __m128i *out,
int size) {
@ -405,23 +345,24 @@ static INLINE void idct8x8_12_add_kernel_sse2(__m128i *const io /*io[8]*/) {
idct8(io, io);
}
static INLINE void idct16_8col(__m128i *const io /*io[16]*/) {
static INLINE void idct16_8col(const __m128i *const in /*in[16]*/,
__m128i *const out /*out[16]*/) {
__m128i step1[16], step2[16];
// stage 2
butterfly(io[1], io[15], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
butterfly(in[1], in[15], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
&step2[15]);
butterfly(io[9], io[7], (int)cospi_14_64, (int)cospi_18_64, &step2[9],
butterfly(in[9], in[7], (int)cospi_14_64, (int)cospi_18_64, &step2[9],
&step2[14]);
butterfly(io[5], io[11], (int)cospi_22_64, (int)cospi_10_64, &step2[10],
butterfly(in[5], in[11], (int)cospi_22_64, (int)cospi_10_64, &step2[10],
&step2[13]);
butterfly(io[13], io[3], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
butterfly(in[13], in[3], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
&step2[12]);
// stage 3
butterfly(io[2], io[14], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
butterfly(in[2], in[14], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
&step1[7]);
butterfly(io[10], io[6], (int)cospi_12_64, (int)cospi_20_64, &step1[5],
butterfly(in[10], in[6], (int)cospi_12_64, (int)cospi_20_64, &step1[5],
&step1[6]);
step1[8] = _mm_add_epi16(step2[8], step2[9]);
step1[9] = _mm_sub_epi16(step2[8], step2[9]);
@ -433,9 +374,9 @@ static INLINE void idct16_8col(__m128i *const io /*io[16]*/) {
step1[15] = _mm_add_epi16(step2[14], step2[15]);
// stage 4
butterfly(io[0], io[8], (int)cospi_16_64, (int)cospi_16_64, &step2[1],
butterfly(in[0], in[8], (int)cospi_16_64, (int)cospi_16_64, &step2[1],
&step2[0]);
butterfly(io[4], io[12], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
butterfly(in[4], in[12], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
&step2[3]);
butterfly(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
&step2[14]);
@ -481,22 +422,22 @@ static INLINE void idct16_8col(__m128i *const io /*io[16]*/) {
&step2[11], &step2[12]);
// stage 7
io[0] = _mm_add_epi16(step2[0], step1[15]);
io[1] = _mm_add_epi16(step2[1], step1[14]);
io[2] = _mm_add_epi16(step2[2], step2[13]);
io[3] = _mm_add_epi16(step2[3], step2[12]);
io[4] = _mm_add_epi16(step2[4], step2[11]);
io[5] = _mm_add_epi16(step2[5], step2[10]);
io[6] = _mm_add_epi16(step2[6], step1[9]);
io[7] = _mm_add_epi16(step2[7], step1[8]);
io[8] = _mm_sub_epi16(step2[7], step1[8]);
io[9] = _mm_sub_epi16(step2[6], step1[9]);
io[10] = _mm_sub_epi16(step2[5], step2[10]);
io[11] = _mm_sub_epi16(step2[4], step2[11]);
io[12] = _mm_sub_epi16(step2[3], step2[12]);
io[13] = _mm_sub_epi16(step2[2], step2[13]);
io[14] = _mm_sub_epi16(step2[1], step1[14]);
io[15] = _mm_sub_epi16(step2[0], step1[15]);
out[0] = _mm_add_epi16(step2[0], step1[15]);
out[1] = _mm_add_epi16(step2[1], step1[14]);
out[2] = _mm_add_epi16(step2[2], step2[13]);
out[3] = _mm_add_epi16(step2[3], step2[12]);
out[4] = _mm_add_epi16(step2[4], step2[11]);
out[5] = _mm_add_epi16(step2[5], step2[10]);
out[6] = _mm_add_epi16(step2[6], step1[9]);
out[7] = _mm_add_epi16(step2[7], step1[8]);
out[8] = _mm_sub_epi16(step2[7], step1[8]);
out[9] = _mm_sub_epi16(step2[6], step1[9]);
out[10] = _mm_sub_epi16(step2[5], step2[10]);
out[11] = _mm_sub_epi16(step2[4], step2[11]);
out[12] = _mm_sub_epi16(step2[3], step2[12]);
out[13] = _mm_sub_epi16(step2[2], step2[13]);
out[14] = _mm_sub_epi16(step2[1], step1[14]);
out[15] = _mm_sub_epi16(step2[0], step1[15]);
}
static INLINE void idct16x16_10_pass1(const __m128i *const input /*input[4]*/,
@ -622,8 +563,7 @@ static INLINE void idct16x16_10_pass2(__m128i *const l /*l[8]*/,
&step1[7]);
// stage 4
butterfly(io[0], zero, (int)cospi_16_64, (int)cospi_16_64, &step1[1],
&step1[0]);
step1[0] = butterfly_cospi16(io[0]);
butterfly(step2[15], step2[8], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
&step2[14]);
butterfly(step2[11], step2[12], -(int)cospi_8_64, -(int)cospi_24_64,
@ -643,12 +583,12 @@ static INLINE void idct16x16_10_pass2(__m128i *const l /*l[8]*/,
// stage 6
step2[0] = _mm_add_epi16(step1[0], step1[7]);
step2[1] = _mm_add_epi16(step1[1], step1[6]);
step2[2] = _mm_add_epi16(step1[1], step1[5]);
step2[1] = _mm_add_epi16(step1[0], step1[6]);
step2[2] = _mm_add_epi16(step1[0], step1[5]);
step2[3] = _mm_add_epi16(step1[0], step1[4]);
step2[4] = _mm_sub_epi16(step1[0], step1[4]);
step2[5] = _mm_sub_epi16(step1[1], step1[5]);
step2[6] = _mm_sub_epi16(step1[1], step1[6]);
step2[5] = _mm_sub_epi16(step1[0], step1[5]);
step2[6] = _mm_sub_epi16(step1[0], step1[6]);
step2[7] = _mm_sub_epi16(step1[0], step1[7]);
butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64,
&step2[10], &step2[13]);
@ -674,11 +614,120 @@ static INLINE void idct16x16_10_pass2(__m128i *const l /*l[8]*/,
io[15] = _mm_sub_epi16(step2[0], step1[15]);
}
static INLINE void idct32_8x32_quarter_2_stage_4_to_6(
__m128i *const step1 /*step1[16]*/, __m128i *const out /*out[16]*/) {
__m128i step2[32];
// stage 4
step2[8] = step1[8];
step2[15] = step1[15];
butterfly(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
&step2[14]);
butterfly(step1[13], step1[10], -(int)cospi_8_64, (int)cospi_24_64,
&step2[10], &step2[13]);
step2[11] = step1[11];
step2[12] = step1[12];
// stage 5
step1[8] = _mm_add_epi16(step2[8], step2[11]);
step1[9] = _mm_add_epi16(step2[9], step2[10]);
step1[10] = _mm_sub_epi16(step2[9], step2[10]);
step1[11] = _mm_sub_epi16(step2[8], step2[11]);
step1[12] = _mm_sub_epi16(step2[15], step2[12]);
step1[13] = _mm_sub_epi16(step2[14], step2[13]);
step1[14] = _mm_add_epi16(step2[14], step2[13]);
step1[15] = _mm_add_epi16(step2[15], step2[12]);
// stage 6
out[8] = step1[8];
out[9] = step1[9];
butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64, &out[10],
&out[13]);
butterfly(step1[12], step1[11], (int)cospi_16_64, (int)cospi_16_64, &out[11],
&out[12]);
out[14] = step1[14];
out[15] = step1[15];
}
static INLINE void idct32_8x32_quarter_3_4_stage_4_to_7(
__m128i *const step1 /*step1[32]*/, __m128i *const out /*out[32]*/) {
__m128i step2[32];
// stage 4
step2[16] = _mm_add_epi16(step1[16], step1[19]);
step2[17] = _mm_add_epi16(step1[17], step1[18]);
step2[18] = _mm_sub_epi16(step1[17], step1[18]);
step2[19] = _mm_sub_epi16(step1[16], step1[19]);
step2[20] = _mm_sub_epi16(step1[23], step1[20]);
step2[21] = _mm_sub_epi16(step1[22], step1[21]);
step2[22] = _mm_add_epi16(step1[22], step1[21]);
step2[23] = _mm_add_epi16(step1[23], step1[20]);
step2[24] = _mm_add_epi16(step1[24], step1[27]);
step2[25] = _mm_add_epi16(step1[25], step1[26]);
step2[26] = _mm_sub_epi16(step1[25], step1[26]);
step2[27] = _mm_sub_epi16(step1[24], step1[27]);
step2[28] = _mm_sub_epi16(step1[31], step1[28]);
step2[29] = _mm_sub_epi16(step1[30], step1[29]);
step2[30] = _mm_add_epi16(step1[29], step1[30]);
step2[31] = _mm_add_epi16(step1[28], step1[31]);
// stage 5
step1[16] = step2[16];
step1[17] = step2[17];
butterfly(step2[29], step2[18], (int)cospi_24_64, (int)cospi_8_64, &step1[18],
&step1[29]);
butterfly(step2[28], step2[19], (int)cospi_24_64, (int)cospi_8_64, &step1[19],
&step1[28]);
butterfly(step2[27], step2[20], -(int)cospi_8_64, (int)cospi_24_64,
&step1[20], &step1[27]);
butterfly(step2[26], step2[21], -(int)cospi_8_64, (int)cospi_24_64,
&step1[21], &step1[26]);
step1[22] = step2[22];
step1[23] = step2[23];
step1[24] = step2[24];
step1[25] = step2[25];
step1[30] = step2[30];
step1[31] = step2[31];
// stage 6
out[16] = _mm_add_epi16(step1[16], step1[23]);
out[17] = _mm_add_epi16(step1[17], step1[22]);
out[18] = _mm_add_epi16(step1[18], step1[21]);
out[19] = _mm_add_epi16(step1[19], step1[20]);
step2[20] = _mm_sub_epi16(step1[19], step1[20]);
step2[21] = _mm_sub_epi16(step1[18], step1[21]);
step2[22] = _mm_sub_epi16(step1[17], step1[22]);
step2[23] = _mm_sub_epi16(step1[16], step1[23]);
step2[24] = _mm_sub_epi16(step1[31], step1[24]);
step2[25] = _mm_sub_epi16(step1[30], step1[25]);
step2[26] = _mm_sub_epi16(step1[29], step1[26]);
step2[27] = _mm_sub_epi16(step1[28], step1[27]);
out[28] = _mm_add_epi16(step1[27], step1[28]);
out[29] = _mm_add_epi16(step1[26], step1[29]);
out[30] = _mm_add_epi16(step1[25], step1[30]);
out[31] = _mm_add_epi16(step1[24], step1[31]);
// stage 7
butterfly(step2[27], step2[20], (int)cospi_16_64, (int)cospi_16_64, &out[20],
&out[27]);
butterfly(step2[26], step2[21], (int)cospi_16_64, (int)cospi_16_64, &out[21],
&out[26]);
butterfly(step2[25], step2[22], (int)cospi_16_64, (int)cospi_16_64, &out[22],
&out[25]);
butterfly(step2[24], step2[23], (int)cospi_16_64, (int)cospi_16_64, &out[23],
&out[24]);
}
void idct4_sse2(__m128i *in);
void idct8_sse2(__m128i *in);
void idct16_sse2(__m128i *in0, __m128i *in1);
void iadst4_sse2(__m128i *in);
void iadst8_sse2(__m128i *in);
void iadst16_sse2(__m128i *in0, __m128i *in1);
void idct32_1024_8x32(const __m128i *const in, __m128i *const out);
void idct32_34_8x32_sse2(const __m128i *const in, __m128i *const out);
void idct32_34_8x32_ssse3(const __m128i *const in, __m128i *const out);
#endif // VPX_DSP_X86_INV_TXFM_SSE2_H_

View File

@ -16,6 +16,20 @@
#include "vpx_dsp/x86/transpose_sse2.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
static INLINE void partial_butterfly_ssse3(const __m128i in, const int c0,
const int c1, __m128i *const out0,
__m128i *const out1) {
const __m128i cst0 = _mm_set1_epi16(2 * c0);
const __m128i cst1 = _mm_set1_epi16(2 * c1);
*out0 = _mm_mulhrs_epi16(in, cst0);
*out1 = _mm_mulhrs_epi16(in, cst1);
}
static INLINE __m128i partial_butterfly_cospi16_ssse3(const __m128i in) {
const __m128i coef_pair = _mm_set1_epi16(2 * (int)cospi_16_64);
return _mm_mulhrs_epi16(in, coef_pair);
}
void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
int stride) {
__m128i io[8];
@ -29,531 +43,327 @@ void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
write_buffer_8x8(io, dest, stride);
}
static void idct32_34_first_half(const __m128i *in, __m128i *stp1) {
const __m128i stk2_0 = pair_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
const __m128i stk2_1 = pair_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
const __m128i stk2_6 = pair_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
const __m128i stk2_7 = pair_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
// Group the coefficient calculation into smaller functions to prevent stack
// spillover in 32x32 idct optimizations:
// quarter_1: 0-7
// quarter_2: 8-15
// quarter_3_4: 16-23, 24-31
const __m128i stk3_0 = pair_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
const __m128i stk3_1 = pair_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
// For each 8x32 block __m128i in[32],
// Input with index, 0, 4
// output pixels: 0-7 in __m128i out[32]
static INLINE void idct32_34_8x32_quarter_1(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
const __m128i stk4_0 = pair_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
__m128i u0, u1, u2, u3, u4, u5, u6, u7;
__m128i x0, x1, x4, x5, x6, x7;
__m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15;
// stage 3
partial_butterfly_ssse3(in[4], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
&step1[7]);
// phase 1
// stage 4
step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
step2[4] = step1[4];
step2[5] = step1[4];
step2[6] = step1[7];
step2[7] = step1[7];
// 0, 15
u2 = _mm_mulhrs_epi16(in[2], stk2_1); // stp2_15
u3 = _mm_mulhrs_epi16(in[6], stk2_7); // stp2_12
v15 = _mm_add_epi16(u2, u3);
// in[0], in[4]
x0 = _mm_mulhrs_epi16(in[0], stk4_0); // stp1[0]
x7 = _mm_mulhrs_epi16(in[4], stk3_1); // stp1[7]
v0 = _mm_add_epi16(x0, x7); // stp2_0
stp1[0] = _mm_add_epi16(v0, v15);
stp1[15] = _mm_sub_epi16(v0, v15);
// stage 5
step1[0] = step2[0];
step1[1] = step2[0];
step1[2] = step2[0];
step1[3] = step2[0];
step1[4] = step2[4];
butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
&step1[6]);
step1[7] = step2[7];
// in[2], in[6]
u0 = _mm_mulhrs_epi16(in[2], stk2_0); // stp2_8
u1 = _mm_mulhrs_epi16(in[6], stk2_6); // stp2_11
butterfly(u2, u0, (int)cospi_24_64, (int)cospi_8_64, &u4,
&u5); // stp2_9, stp2_14
butterfly(u3, u1, -(int)cospi_8_64, (int)cospi_24_64, &u6,
&u7); // stp2_10, stp2_13
v8 = _mm_add_epi16(u0, u1);
v9 = _mm_add_epi16(u4, u6);
v10 = _mm_sub_epi16(u4, u6);
v11 = _mm_sub_epi16(u0, u1);
v12 = _mm_sub_epi16(u2, u3);
v13 = _mm_sub_epi16(u5, u7);
v14 = _mm_add_epi16(u5, u7);
butterfly(v13, v10, (int)cospi_16_64, (int)cospi_16_64, &v10, &v13);
butterfly(v12, v11, (int)cospi_16_64, (int)cospi_16_64, &v11, &v12);
// 1, 14
x1 = _mm_mulhrs_epi16(in[0], stk4_0); // stp1[1], stk4_1 = stk4_0
// stp1[2] = stp1[0], stp1[3] = stp1[1]
x4 = _mm_mulhrs_epi16(in[4], stk3_0); // stp1[4]
butterfly(x7, x4, (int)cospi_16_64, (int)cospi_16_64, &x5, &x6);
v1 = _mm_add_epi16(x1, x6); // stp2_1
v2 = _mm_add_epi16(x0, x5); // stp2_2
stp1[1] = _mm_add_epi16(v1, v14);
stp1[14] = _mm_sub_epi16(v1, v14);
stp1[2] = _mm_add_epi16(v2, v13);
stp1[13] = _mm_sub_epi16(v2, v13);
v3 = _mm_add_epi16(x1, x4); // stp2_3
v4 = _mm_sub_epi16(x1, x4); // stp2_4
v5 = _mm_sub_epi16(x0, x5); // stp2_5
v6 = _mm_sub_epi16(x1, x6); // stp2_6
v7 = _mm_sub_epi16(x0, x7); // stp2_7
stp1[3] = _mm_add_epi16(v3, v12);
stp1[12] = _mm_sub_epi16(v3, v12);
stp1[6] = _mm_add_epi16(v6, v9);
stp1[9] = _mm_sub_epi16(v6, v9);
stp1[7] = _mm_add_epi16(v7, v8);
stp1[8] = _mm_sub_epi16(v7, v8);
stp1[4] = _mm_add_epi16(v4, v11);
stp1[11] = _mm_sub_epi16(v4, v11);
stp1[5] = _mm_add_epi16(v5, v10);
stp1[10] = _mm_sub_epi16(v5, v10);
// stage 6
out[0] = _mm_add_epi16(step1[0], step1[7]);
out[1] = _mm_add_epi16(step1[1], step1[6]);
out[2] = _mm_add_epi16(step1[2], step1[5]);
out[3] = _mm_add_epi16(step1[3], step1[4]);
out[4] = _mm_sub_epi16(step1[3], step1[4]);
out[5] = _mm_sub_epi16(step1[2], step1[5]);
out[6] = _mm_sub_epi16(step1[1], step1[6]);
out[7] = _mm_sub_epi16(step1[0], step1[7]);
}
static void idct32_34_second_half(const __m128i *in, __m128i *stp1) {
const __m128i stk1_0 = pair_set_epi16(2 * cospi_31_64, 2 * cospi_31_64);
const __m128i stk1_1 = pair_set_epi16(2 * cospi_1_64, 2 * cospi_1_64);
const __m128i stk1_6 = pair_set_epi16(-2 * cospi_25_64, -2 * cospi_25_64);
const __m128i stk1_7 = pair_set_epi16(2 * cospi_7_64, 2 * cospi_7_64);
const __m128i stk1_8 = pair_set_epi16(2 * cospi_27_64, 2 * cospi_27_64);
const __m128i stk1_9 = pair_set_epi16(2 * cospi_5_64, 2 * cospi_5_64);
const __m128i stk1_14 = pair_set_epi16(-2 * cospi_29_64, -2 * cospi_29_64);
const __m128i stk1_15 = pair_set_epi16(2 * cospi_3_64, 2 * cospi_3_64);
__m128i v16, v17, v18, v19, v20, v21, v22, v23;
__m128i v24, v25, v26, v27, v28, v29, v30, v31;
__m128i u16, u17, u18, u19, u20, u21, u22, u23;
__m128i u24, u25, u26, u27, u28, u29, u30, u31;
// For each 8x32 block __m128i in[32],
// Input with index, 2, 6
// output pixels: 8-15 in __m128i out[32]
static INLINE void idct32_34_8x32_quarter_2(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[16]*/) {
__m128i step1[16], step2[16];
v16 = _mm_mulhrs_epi16(in[1], stk1_0);
v31 = _mm_mulhrs_epi16(in[1], stk1_1);
// stage 2
partial_butterfly_ssse3(in[2], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
&step2[15]);
partial_butterfly_ssse3(in[6], -(int)cospi_26_64, (int)cospi_6_64, &step2[11],
&step2[12]);
v19 = _mm_mulhrs_epi16(in[7], stk1_6);
v28 = _mm_mulhrs_epi16(in[7], stk1_7);
// stage 3
step1[8] = step2[8];
step1[9] = step2[8];
step1[14] = step2[15];
step1[15] = step2[15];
step1[10] = step2[11];
step1[11] = step2[11];
step1[12] = step2[12];
step1[13] = step2[12];
v20 = _mm_mulhrs_epi16(in[5], stk1_8);
v27 = _mm_mulhrs_epi16(in[5], stk1_9);
idct32_8x32_quarter_2_stage_4_to_6(step1, out);
}
v23 = _mm_mulhrs_epi16(in[3], stk1_14);
v24 = _mm_mulhrs_epi16(in[3], stk1_15);
static INLINE void idct32_34_8x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
idct32_34_8x32_quarter_1(in, temp);
idct32_34_8x32_quarter_2(in, temp);
// stage 7
add_sub_butterfly(temp, out, 16);
}
butterfly(v31, v16, (int)cospi_28_64, (int)cospi_4_64, &v17, &v30);
butterfly(v28, v19, -(int)cospi_4_64, (int)cospi_28_64, &v18, &v29);
butterfly(v27, v20, (int)cospi_12_64, (int)cospi_20_64, &v21, &v26);
butterfly(v24, v23, -(int)cospi_20_64, (int)cospi_12_64, &v22, &v25);
// For each 8x32 block __m128i in[32],
// Input with odd index, 1, 3, 5, 7
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void idct32_34_8x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32];
u16 = _mm_add_epi16(v16, v19);
u17 = _mm_add_epi16(v17, v18);
u18 = _mm_sub_epi16(v17, v18);
u19 = _mm_sub_epi16(v16, v19);
u20 = _mm_sub_epi16(v23, v20);
u21 = _mm_sub_epi16(v22, v21);
u22 = _mm_add_epi16(v22, v21);
u23 = _mm_add_epi16(v23, v20);
u24 = _mm_add_epi16(v24, v27);
u27 = _mm_sub_epi16(v24, v27);
u25 = _mm_add_epi16(v25, v26);
u26 = _mm_sub_epi16(v25, v26);
u28 = _mm_sub_epi16(v31, v28);
u31 = _mm_add_epi16(v28, v31);
u29 = _mm_sub_epi16(v30, v29);
u30 = _mm_add_epi16(v29, v30);
// stage 1
partial_butterfly_ssse3(in[1], (int)cospi_31_64, (int)cospi_1_64, &step1[16],
&step1[31]);
partial_butterfly_ssse3(in[7], -(int)cospi_25_64, (int)cospi_7_64, &step1[19],
&step1[28]);
partial_butterfly_ssse3(in[5], (int)cospi_27_64, (int)cospi_5_64, &step1[20],
&step1[27]);
partial_butterfly_ssse3(in[3], -(int)cospi_29_64, (int)cospi_3_64, &step1[23],
&step1[24]);
butterfly(u29, u18, (int)cospi_24_64, (int)cospi_8_64, &u18, &u29);
butterfly(u28, u19, (int)cospi_24_64, (int)cospi_8_64, &u19, &u28);
butterfly(u27, u20, -(int)cospi_8_64, (int)cospi_24_64, &u20, &u27);
butterfly(u26, u21, -(int)cospi_8_64, (int)cospi_24_64, &u21, &u26);
// stage 3
butterfly(step1[31], step1[16], (int)cospi_28_64, (int)cospi_4_64, &step1[17],
&step1[30]);
butterfly(step1[28], step1[19], -(int)cospi_4_64, (int)cospi_28_64,
&step1[18], &step1[29]);
butterfly(step1[27], step1[20], (int)cospi_12_64, (int)cospi_20_64,
&step1[21], &step1[26]);
butterfly(step1[24], step1[23], -(int)cospi_20_64, (int)cospi_12_64,
&step1[22], &step1[25]);
stp1[16] = _mm_add_epi16(u16, u23);
stp1[23] = _mm_sub_epi16(u16, u23);
idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
}
stp1[17] = _mm_add_epi16(u17, u22);
stp1[22] = _mm_sub_epi16(u17, u22);
void idct32_34_8x32_ssse3(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[32]*/) {
__m128i temp[32];
stp1[18] = _mm_add_epi16(u18, u21);
stp1[21] = _mm_sub_epi16(u18, u21);
stp1[19] = _mm_add_epi16(u19, u20);
stp1[20] = _mm_sub_epi16(u19, u20);
stp1[24] = _mm_sub_epi16(u31, u24);
stp1[31] = _mm_add_epi16(u24, u31);
stp1[25] = _mm_sub_epi16(u30, u25);
stp1[30] = _mm_add_epi16(u25, u30);
stp1[26] = _mm_sub_epi16(u29, u26);
stp1[29] = _mm_add_epi16(u26, u29);
stp1[27] = _mm_sub_epi16(u28, u27);
stp1[28] = _mm_add_epi16(u27, u28);
butterfly(stp1[27], stp1[20], (int)cospi_16_64, (int)cospi_16_64, &stp1[20],
&stp1[27]);
butterfly(stp1[26], stp1[21], (int)cospi_16_64, (int)cospi_16_64, &stp1[21],
&stp1[26]);
butterfly(stp1[25], stp1[22], (int)cospi_16_64, (int)cospi_16_64, &stp1[22],
&stp1[25]);
butterfly(stp1[24], stp1[23], (int)cospi_16_64, (int)cospi_16_64, &stp1[23],
&stp1[24]);
idct32_34_8x32_quarter_1_2(in, temp);
idct32_34_8x32_quarter_3_4(in, temp);
// final stage
add_sub_butterfly(temp, out, 32);
}
// Only upper-left 8x8 has non-zero coeff
void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
int stride) {
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
__m128i in[32], col[32];
__m128i stp1[32];
__m128i io[32], col[32];
int i;
// Load input data. Only need to load the top left 8x8 block.
in[0] = load_input_data8(input + 0 * 32);
in[1] = load_input_data8(input + 1 * 32);
in[2] = load_input_data8(input + 2 * 32);
in[3] = load_input_data8(input + 3 * 32);
in[4] = load_input_data8(input + 4 * 32);
in[5] = load_input_data8(input + 5 * 32);
in[6] = load_input_data8(input + 6 * 32);
in[7] = load_input_data8(input + 7 * 32);
load_transpose_16bit_8x8(input, 32, io);
idct32_34_8x32_ssse3(io, col);
transpose_16bit_8x8(in, in);
idct32_34_first_half(in, stp1);
idct32_34_second_half(in, stp1);
// 1_D: Store 32 intermediate results for each 8x32 block.
add_sub_butterfly(stp1, col, 32);
for (i = 0; i < 32; i += 8) {
int j;
// Transpose 32x8 block to 8x32 block
transpose_16bit_8x8(col + i, in);
idct32_34_first_half(in, stp1);
idct32_34_second_half(in, stp1);
transpose_16bit_8x8(col + i, io);
idct32_34_8x32_ssse3(io, io);
// 2_D: Calculate the results and store them to destination.
add_sub_butterfly(stp1, in, 32);
for (j = 0; j < 32; ++j) {
// Final rounding and shift
in[j] = _mm_adds_epi16(in[j], final_rounding);
in[j] = _mm_srai_epi16(in[j], 6);
recon_and_store(dest + j * stride, in[j]);
write_buffer_8x1(dest + j * stride, io[j]);
}
dest += 8;
}
}
// in0[16] represents the left 8x16 block
// in1[16] represents the right 8x16 block
static void load_buffer_16x16(const tran_low_t *input, __m128i *in0,
__m128i *in1) {
int i;
for (i = 0; i < 16; i++) {
in0[i] = load_input_data8(input);
in1[i] = load_input_data8(input + 8);
input += 32;
}
// For each 8x32 block __m128i in[32],
// Input with index, 0, 4, 8, 12
// output pixels: 0-7 in __m128i out[32]
static INLINE void idct32_135_8x32_quarter_1(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[8]*/) {
__m128i step1[8], step2[8];
// stage 3
partial_butterfly_ssse3(in[4], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
&step1[7]);
partial_butterfly_ssse3(in[12], -(int)cospi_20_64, (int)cospi_12_64,
&step1[5], &step1[6]);
// stage 4
step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
partial_butterfly_ssse3(in[8], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
&step2[3]);
step2[4] = _mm_add_epi16(step1[4], step1[5]);
step2[5] = _mm_sub_epi16(step1[4], step1[5]);
step2[6] = _mm_sub_epi16(step1[7], step1[6]);
step2[7] = _mm_add_epi16(step1[7], step1[6]);
// stage 5
step1[0] = _mm_add_epi16(step2[0], step2[3]);
step1[1] = _mm_add_epi16(step2[0], step2[2]);
step1[2] = _mm_sub_epi16(step2[0], step2[2]);
step1[3] = _mm_sub_epi16(step2[0], step2[3]);
step1[4] = step2[4];
butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
&step1[6]);
step1[7] = step2[7];
// stage 6
out[0] = _mm_add_epi16(step1[0], step1[7]);
out[1] = _mm_add_epi16(step1[1], step1[6]);
out[2] = _mm_add_epi16(step1[2], step1[5]);
out[3] = _mm_add_epi16(step1[3], step1[4]);
out[4] = _mm_sub_epi16(step1[3], step1[4]);
out[5] = _mm_sub_epi16(step1[2], step1[5]);
out[6] = _mm_sub_epi16(step1[1], step1[6]);
out[7] = _mm_sub_epi16(step1[0], step1[7]);
}
// Group the coefficient calculation into smaller functions
// to prevent stack spillover:
// quarter_1: 0-7
// quarter_2: 8-15
// quarter_3_4: 16-23, 24-31
static void idct32_8x32_135_quarter_1(const __m128i *in /*in[16]*/,
__m128i *out /*out[8]*/) {
__m128i u0, u1, u2, u3, u4, u5, u6, u7;
__m128i v0, v1, v2, v3, v4, v5, v6, v7;
// For each 8x32 block __m128i in[32],
// Input with index, 2, 6, 10, 14
// output pixels: 8-15 in __m128i out[32]
static INLINE void idct32_135_8x32_quarter_2(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[16]*/) {
__m128i step1[16], step2[16];
{
const __m128i stk4_0 = pair_set_epi16(2 * cospi_16_64, 2 * cospi_16_64);
const __m128i stk4_2 = pair_set_epi16(2 * cospi_24_64, 2 * cospi_24_64);
const __m128i stk4_3 = pair_set_epi16(2 * cospi_8_64, 2 * cospi_8_64);
u0 = _mm_mulhrs_epi16(in[0], stk4_0);
u2 = _mm_mulhrs_epi16(in[8], stk4_2);
u3 = _mm_mulhrs_epi16(in[8], stk4_3);
u1 = u0;
}
// stage 2
partial_butterfly_ssse3(in[2], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
&step2[15]);
partial_butterfly_ssse3(in[14], -(int)cospi_18_64, (int)cospi_14_64,
&step2[9], &step2[14]);
partial_butterfly_ssse3(in[10], (int)cospi_22_64, (int)cospi_10_64,
&step2[10], &step2[13]);
partial_butterfly_ssse3(in[6], -(int)cospi_26_64, (int)cospi_6_64, &step2[11],
&step2[12]);
v0 = _mm_add_epi16(u0, u3);
v1 = _mm_add_epi16(u1, u2);
v2 = _mm_sub_epi16(u1, u2);
v3 = _mm_sub_epi16(u0, u3);
// stage 3
step1[8] = _mm_add_epi16(step2[8], step2[9]);
step1[9] = _mm_sub_epi16(step2[8], step2[9]);
step1[10] = _mm_sub_epi16(step2[11], step2[10]);
step1[11] = _mm_add_epi16(step2[11], step2[10]);
step1[12] = _mm_add_epi16(step2[12], step2[13]);
step1[13] = _mm_sub_epi16(step2[12], step2[13]);
step1[14] = _mm_sub_epi16(step2[15], step2[14]);
step1[15] = _mm_add_epi16(step2[15], step2[14]);
{
const __m128i stk3_0 = pair_set_epi16(2 * cospi_28_64, 2 * cospi_28_64);
const __m128i stk3_1 = pair_set_epi16(2 * cospi_4_64, 2 * cospi_4_64);
const __m128i stk3_2 = pair_set_epi16(-2 * cospi_20_64, -2 * cospi_20_64);
const __m128i stk3_3 = pair_set_epi16(2 * cospi_12_64, 2 * cospi_12_64);
u4 = _mm_mulhrs_epi16(in[4], stk3_0);
u7 = _mm_mulhrs_epi16(in[4], stk3_1);
u5 = _mm_mulhrs_epi16(in[12], stk3_2);
u6 = _mm_mulhrs_epi16(in[12], stk3_3);
}
v4 = _mm_add_epi16(u4, u5);
v5 = _mm_sub_epi16(u4, u5);
v6 = _mm_sub_epi16(u7, u6);
v7 = _mm_add_epi16(u7, u6);
butterfly(v6, v5, (int)cospi_16_64, (int)cospi_16_64, &v5, &v6);
out[0] = _mm_add_epi16(v0, v7);
out[1] = _mm_add_epi16(v1, v6);
out[2] = _mm_add_epi16(v2, v5);
out[3] = _mm_add_epi16(v3, v4);
out[4] = _mm_sub_epi16(v3, v4);
out[5] = _mm_sub_epi16(v2, v5);
out[6] = _mm_sub_epi16(v1, v6);
out[7] = _mm_sub_epi16(v0, v7);
idct32_8x32_quarter_2_stage_4_to_6(step1, out);
}
static void idct32_8x32_135_quarter_2(const __m128i *in /*in[16]*/,
__m128i *out /*out[8]*/) {
__m128i u8, u9, u10, u11, u12, u13, u14, u15;
__m128i v8, v9, v10, v11, v12, v13, v14, v15;
{
const __m128i stk2_0 = pair_set_epi16(2 * cospi_30_64, 2 * cospi_30_64);
const __m128i stk2_1 = pair_set_epi16(2 * cospi_2_64, 2 * cospi_2_64);
const __m128i stk2_2 = pair_set_epi16(-2 * cospi_18_64, -2 * cospi_18_64);
const __m128i stk2_3 = pair_set_epi16(2 * cospi_14_64, 2 * cospi_14_64);
const __m128i stk2_4 = pair_set_epi16(2 * cospi_22_64, 2 * cospi_22_64);
const __m128i stk2_5 = pair_set_epi16(2 * cospi_10_64, 2 * cospi_10_64);
const __m128i stk2_6 = pair_set_epi16(-2 * cospi_26_64, -2 * cospi_26_64);
const __m128i stk2_7 = pair_set_epi16(2 * cospi_6_64, 2 * cospi_6_64);
u8 = _mm_mulhrs_epi16(in[2], stk2_0);
u15 = _mm_mulhrs_epi16(in[2], stk2_1);
u9 = _mm_mulhrs_epi16(in[14], stk2_2);
u14 = _mm_mulhrs_epi16(in[14], stk2_3);
u10 = _mm_mulhrs_epi16(in[10], stk2_4);
u13 = _mm_mulhrs_epi16(in[10], stk2_5);
u11 = _mm_mulhrs_epi16(in[6], stk2_6);
u12 = _mm_mulhrs_epi16(in[6], stk2_7);
}
v8 = _mm_add_epi16(u8, u9);
v9 = _mm_sub_epi16(u8, u9);
v10 = _mm_sub_epi16(u11, u10);
v11 = _mm_add_epi16(u11, u10);
v12 = _mm_add_epi16(u12, u13);
v13 = _mm_sub_epi16(u12, u13);
v14 = _mm_sub_epi16(u15, u14);
v15 = _mm_add_epi16(u15, u14);
butterfly(v14, v9, (int)cospi_24_64, (int)cospi_8_64, &v9, &v14);
butterfly(v13, v10, -(int)cospi_8_64, (int)cospi_24_64, &v10, &v13);
out[0] = _mm_add_epi16(v8, v11);
out[1] = _mm_add_epi16(v9, v10);
out[2] = _mm_sub_epi16(v9, v10);
out[3] = _mm_sub_epi16(v8, v11);
out[4] = _mm_sub_epi16(v15, v12);
out[5] = _mm_sub_epi16(v14, v13);
out[6] = _mm_add_epi16(v14, v13);
out[7] = _mm_add_epi16(v15, v12);
butterfly(out[5], out[2], (int)cospi_16_64, (int)cospi_16_64, &out[2],
&out[5]);
butterfly(out[4], out[3], (int)cospi_16_64, (int)cospi_16_64, &out[3],
&out[4]);
}
// 8x32 block even indexed 8 inputs of in[16],
// output first half 16 to out[32]
static void idct32_8x32_quarter_1_2(const __m128i *in /*in[16]*/,
__m128i *out /*out[32]*/) {
static INLINE void idct32_135_8x32_quarter_1_2(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i temp[16];
idct32_8x32_135_quarter_1(in, temp);
idct32_8x32_135_quarter_2(in, &temp[8]);
idct32_135_8x32_quarter_1(in, temp);
idct32_135_8x32_quarter_2(in, temp);
// stage 7
add_sub_butterfly(temp, out, 16);
}
// 8x32 block odd indexed 8 inputs of in[16],
// output second half 16 to out[32]
static void idct32_8x32_quarter_3_4(const __m128i *in /*in[16]*/,
__m128i *out /*out[32]*/) {
__m128i v16, v17, v18, v19, v20, v21, v22, v23;
__m128i v24, v25, v26, v27, v28, v29, v30, v31;
__m128i u16, u17, u18, u19, u20, u21, u22, u23;
__m128i u24, u25, u26, u27, u28, u29, u30, u31;
// For each 8x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7, 9, 11, 13, 15
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void idct32_135_8x32_quarter_3_4(
const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
__m128i step1[32], step2[32];
{
const __m128i stk1_0 = pair_set_epi16(2 * cospi_31_64, 2 * cospi_31_64);
const __m128i stk1_1 = pair_set_epi16(2 * cospi_1_64, 2 * cospi_1_64);
const __m128i stk1_2 = pair_set_epi16(-2 * cospi_17_64, -2 * cospi_17_64);
const __m128i stk1_3 = pair_set_epi16(2 * cospi_15_64, 2 * cospi_15_64);
// stage 1
partial_butterfly_ssse3(in[1], (int)cospi_31_64, (int)cospi_1_64, &step1[16],
&step1[31]);
partial_butterfly_ssse3(in[15], -(int)cospi_17_64, (int)cospi_15_64,
&step1[17], &step1[30]);
partial_butterfly_ssse3(in[9], (int)cospi_23_64, (int)cospi_9_64, &step1[18],
&step1[29]);
partial_butterfly_ssse3(in[7], -(int)cospi_25_64, (int)cospi_7_64, &step1[19],
&step1[28]);
const __m128i stk1_4 = pair_set_epi16(2 * cospi_23_64, 2 * cospi_23_64);
const __m128i stk1_5 = pair_set_epi16(2 * cospi_9_64, 2 * cospi_9_64);
const __m128i stk1_6 = pair_set_epi16(-2 * cospi_25_64, -2 * cospi_25_64);
const __m128i stk1_7 = pair_set_epi16(2 * cospi_7_64, 2 * cospi_7_64);
const __m128i stk1_8 = pair_set_epi16(2 * cospi_27_64, 2 * cospi_27_64);
const __m128i stk1_9 = pair_set_epi16(2 * cospi_5_64, 2 * cospi_5_64);
const __m128i stk1_10 = pair_set_epi16(-2 * cospi_21_64, -2 * cospi_21_64);
const __m128i stk1_11 = pair_set_epi16(2 * cospi_11_64, 2 * cospi_11_64);
partial_butterfly_ssse3(in[5], (int)cospi_27_64, (int)cospi_5_64, &step1[20],
&step1[27]);
partial_butterfly_ssse3(in[11], -(int)cospi_21_64, (int)cospi_11_64,
&step1[21], &step1[26]);
const __m128i stk1_12 = pair_set_epi16(2 * cospi_19_64, 2 * cospi_19_64);
const __m128i stk1_13 = pair_set_epi16(2 * cospi_13_64, 2 * cospi_13_64);
const __m128i stk1_14 = pair_set_epi16(-2 * cospi_29_64, -2 * cospi_29_64);
const __m128i stk1_15 = pair_set_epi16(2 * cospi_3_64, 2 * cospi_3_64);
u16 = _mm_mulhrs_epi16(in[1], stk1_0);
u31 = _mm_mulhrs_epi16(in[1], stk1_1);
u17 = _mm_mulhrs_epi16(in[15], stk1_2);
u30 = _mm_mulhrs_epi16(in[15], stk1_3);
partial_butterfly_ssse3(in[13], (int)cospi_19_64, (int)cospi_13_64,
&step1[22], &step1[25]);
partial_butterfly_ssse3(in[3], -(int)cospi_29_64, (int)cospi_3_64, &step1[23],
&step1[24]);
u18 = _mm_mulhrs_epi16(in[9], stk1_4);
u29 = _mm_mulhrs_epi16(in[9], stk1_5);
u19 = _mm_mulhrs_epi16(in[7], stk1_6);
u28 = _mm_mulhrs_epi16(in[7], stk1_7);
// stage 2
step2[16] = _mm_add_epi16(step1[16], step1[17]);
step2[17] = _mm_sub_epi16(step1[16], step1[17]);
step2[18] = _mm_sub_epi16(step1[19], step1[18]);
step2[19] = _mm_add_epi16(step1[19], step1[18]);
step2[20] = _mm_add_epi16(step1[20], step1[21]);
step2[21] = _mm_sub_epi16(step1[20], step1[21]);
step2[22] = _mm_sub_epi16(step1[23], step1[22]);
step2[23] = _mm_add_epi16(step1[23], step1[22]);
u20 = _mm_mulhrs_epi16(in[5], stk1_8);
u27 = _mm_mulhrs_epi16(in[5], stk1_9);
u21 = _mm_mulhrs_epi16(in[11], stk1_10);
u26 = _mm_mulhrs_epi16(in[11], stk1_11);
step2[24] = _mm_add_epi16(step1[24], step1[25]);
step2[25] = _mm_sub_epi16(step1[24], step1[25]);
step2[26] = _mm_sub_epi16(step1[27], step1[26]);
step2[27] = _mm_add_epi16(step1[27], step1[26]);
step2[28] = _mm_add_epi16(step1[28], step1[29]);
step2[29] = _mm_sub_epi16(step1[28], step1[29]);
step2[30] = _mm_sub_epi16(step1[31], step1[30]);
step2[31] = _mm_add_epi16(step1[31], step1[30]);
u22 = _mm_mulhrs_epi16(in[13], stk1_12);
u25 = _mm_mulhrs_epi16(in[13], stk1_13);
u23 = _mm_mulhrs_epi16(in[3], stk1_14);
u24 = _mm_mulhrs_epi16(in[3], stk1_15);
}
// stage 3
step1[16] = step2[16];
step1[31] = step2[31];
butterfly(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64, &step1[17],
&step1[30]);
butterfly(step2[29], step2[18], -(int)cospi_4_64, (int)cospi_28_64,
&step1[18], &step1[29]);
step1[19] = step2[19];
step1[20] = step2[20];
butterfly(step2[26], step2[21], (int)cospi_12_64, (int)cospi_20_64,
&step1[21], &step1[26]);
butterfly(step2[25], step2[22], -(int)cospi_20_64, (int)cospi_12_64,
&step1[22], &step1[25]);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
step1[28] = step2[28];
v16 = _mm_add_epi16(u16, u17);
v17 = _mm_sub_epi16(u16, u17);
v18 = _mm_sub_epi16(u19, u18);
v19 = _mm_add_epi16(u19, u18);
v20 = _mm_add_epi16(u20, u21);
v21 = _mm_sub_epi16(u20, u21);
v22 = _mm_sub_epi16(u23, u22);
v23 = _mm_add_epi16(u23, u22);
v24 = _mm_add_epi16(u24, u25);
v25 = _mm_sub_epi16(u24, u25);
v26 = _mm_sub_epi16(u27, u26);
v27 = _mm_add_epi16(u27, u26);
v28 = _mm_add_epi16(u28, u29);
v29 = _mm_sub_epi16(u28, u29);
v30 = _mm_sub_epi16(u31, u30);
v31 = _mm_add_epi16(u31, u30);
butterfly(v30, v17, (int)cospi_28_64, (int)cospi_4_64, &v17, &v30);
butterfly(v29, v18, -(int)cospi_4_64, (int)cospi_28_64, &v18, &v29);
butterfly(v26, v21, (int)cospi_12_64, (int)cospi_20_64, &v21, &v26);
butterfly(v25, v22, -(int)cospi_20_64, (int)cospi_12_64, &v22, &v25);
u16 = _mm_add_epi16(v16, v19);
u17 = _mm_add_epi16(v17, v18);
u18 = _mm_sub_epi16(v17, v18);
u19 = _mm_sub_epi16(v16, v19);
u20 = _mm_sub_epi16(v23, v20);
u21 = _mm_sub_epi16(v22, v21);
u22 = _mm_add_epi16(v22, v21);
u23 = _mm_add_epi16(v23, v20);
u24 = _mm_add_epi16(v24, v27);
u25 = _mm_add_epi16(v25, v26);
u26 = _mm_sub_epi16(v25, v26);
u27 = _mm_sub_epi16(v24, v27);
u28 = _mm_sub_epi16(v31, v28);
u29 = _mm_sub_epi16(v30, v29);
u30 = _mm_add_epi16(v29, v30);
u31 = _mm_add_epi16(v28, v31);
butterfly(u29, u18, (int)cospi_24_64, (int)cospi_8_64, &u18, &u29);
butterfly(u28, u19, (int)cospi_24_64, (int)cospi_8_64, &u19, &u28);
butterfly(u27, u20, -(int)cospi_8_64, (int)cospi_24_64, &u20, &u27);
butterfly(u26, u21, -(int)cospi_8_64, (int)cospi_24_64, &u21, &u26);
out[0] = _mm_add_epi16(u16, u23);
out[1] = _mm_add_epi16(u17, u22);
out[2] = _mm_add_epi16(u18, u21);
out[3] = _mm_add_epi16(u19, u20);
v20 = _mm_sub_epi16(u19, u20);
v21 = _mm_sub_epi16(u18, u21);
v22 = _mm_sub_epi16(u17, u22);
v23 = _mm_sub_epi16(u16, u23);
v24 = _mm_sub_epi16(u31, u24);
v25 = _mm_sub_epi16(u30, u25);
v26 = _mm_sub_epi16(u29, u26);
v27 = _mm_sub_epi16(u28, u27);
out[12] = _mm_add_epi16(u27, u28);
out[13] = _mm_add_epi16(u26, u29);
out[14] = _mm_add_epi16(u25, u30);
out[15] = _mm_add_epi16(u24, u31);
butterfly(v27, v20, (int)cospi_16_64, (int)cospi_16_64, &out[4], &out[11]);
butterfly(v26, v21, (int)cospi_16_64, (int)cospi_16_64, &out[5], &out[10]);
butterfly(v25, v22, (int)cospi_16_64, (int)cospi_16_64, &out[6], &out[9]);
butterfly(v24, v23, (int)cospi_16_64, (int)cospi_16_64, &out[7], &out[8]);
idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
}
// 8x16 block, input __m128i in[16], output __m128i in[32]
static void idct32_8x32_135(__m128i *in /*in[32]*/) {
__m128i out[32];
idct32_8x32_quarter_1_2(in, out);
idct32_8x32_quarter_3_4(in, &out[16]);
add_sub_butterfly(out, in, 32);
void idct32_135_8x32_ssse3(const __m128i *const in /*in[32]*/,
__m128i *const out /*out[32]*/) {
__m128i temp[32];
idct32_135_8x32_quarter_1_2(in, temp);
idct32_135_8x32_quarter_3_4(in, temp);
// final stage
add_sub_butterfly(temp, out, 32);
}
static INLINE void recon_and_store_ssse3(__m128i *in0, __m128i *in1,
uint8_t *dest, int stride) {
store_buffer_8x32(in0, dest, stride);
store_buffer_8x32(in1, dest + 8, stride);
}
static INLINE void idct32_135(__m128i *col0, __m128i *col1) {
idct32_8x32_135(col0);
idct32_8x32_135(col1);
}
typedef enum { left_16, right_16 } ColsIndicator;
static void transpose_and_copy_16x16(__m128i *in0, __m128i *in1, __m128i *store,
ColsIndicator cols) {
switch (cols) {
case left_16: {
int i;
transpose_16bit_16x16(in0, in1);
for (i = 0; i < 16; ++i) {
store[i] = in0[16 + i];
store[16 + i] = in1[16 + i];
}
break;
}
case right_16: {
transpose_16bit_8x8(store, in0);
transpose_16bit_8x8(&store[8], in1);
transpose_16bit_8x8(&store[16], &in0[8]);
transpose_16bit_8x8(&store[24], &in1[8]);
break;
}
default: { assert(0); }
}
}
// Only upper-left 16x16 has non-zero coeff
void vpx_idct32x32_135_add_ssse3(const tran_low_t *input, uint8_t *dest,
int stride) {
// Each array represents an 8x32 block
__m128i col0[32], col1[32];
// This array represents a 16x16 block
__m128i temp[32];
// Load input data. Only need to load the top left 16x16 block.
load_buffer_16x16(input, col0, col1);
// columns
transpose_16bit_16x16(col0, col1);
idct32_135(col0, col1);
__m128i col[2][32], io[32];
int i;
// rows
transpose_and_copy_16x16(col0, col1, temp, left_16);
idct32_135(col0, col1);
recon_and_store_ssse3(col0, col1, dest, stride);
for (i = 0; i < 2; i++) {
load_transpose_16bit_8x8(&input[0], 32, &io[0]);
load_transpose_16bit_8x8(&input[8], 32, &io[8]);
idct32_135_8x32_ssse3(io, col[i]);
input += 32 << 3;
}
transpose_and_copy_16x16(col0, col1, temp, right_16);
idct32_135(col0, col1);
recon_and_store_ssse3(col0, col1, dest + 16, stride);
// columns
for (i = 0; i < 32; i += 8) {
transpose_16bit_8x8(col[0] + i, io);
transpose_16bit_8x8(col[1] + i, io + 8);
idct32_135_8x32_ssse3(io, io);
store_buffer_8x32(io, dest, stride);
dest += 8;
}
}

View File

@ -106,4 +106,6 @@ static INLINE void idct8x8_12_add_kernel_ssse3(__m128i *const io /* io[8] */) {
io[7] = _mm_sub_epi16(step1[0], step2[7]);
}
void idct32_135_8x32_ssse3(const __m128i *const in, __m128i *const out);
#endif // VPX_DSP_X86_INV_TXFM_SSSE3_H_