Merge changes Ibf9d120b,I341399ec,Iaa5dd63b,Id59865fd
* changes:
Update high bitdepth load_input_data() in x86
Clean array_transpose_{4X8,16x16,16x16_2) in x86
Remove array_transpose_8x8() in x86
Convert 8x8 idct x86 macros to inline functions
This commit is contained in:
@@ -54,7 +54,6 @@ void vp9_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
|
|||||||
void vp9_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
|
void vp9_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
|
||||||
int tx_type) {
|
int tx_type) {
|
||||||
__m128i in[8];
|
__m128i in[8];
|
||||||
const __m128i zero = _mm_setzero_si128();
|
|
||||||
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
|
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
|
||||||
|
|
||||||
// load input data
|
// load input data
|
||||||
@@ -106,14 +105,14 @@ void vp9_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
|
|||||||
in[6] = _mm_srai_epi16(in[6], 5);
|
in[6] = _mm_srai_epi16(in[6], 5);
|
||||||
in[7] = _mm_srai_epi16(in[7], 5);
|
in[7] = _mm_srai_epi16(in[7], 5);
|
||||||
|
|
||||||
RECON_AND_STORE(dest + 0 * stride, in[0]);
|
recon_and_store(dest + 0 * stride, in[0]);
|
||||||
RECON_AND_STORE(dest + 1 * stride, in[1]);
|
recon_and_store(dest + 1 * stride, in[1]);
|
||||||
RECON_AND_STORE(dest + 2 * stride, in[2]);
|
recon_and_store(dest + 2 * stride, in[2]);
|
||||||
RECON_AND_STORE(dest + 3 * stride, in[3]);
|
recon_and_store(dest + 3 * stride, in[3]);
|
||||||
RECON_AND_STORE(dest + 4 * stride, in[4]);
|
recon_and_store(dest + 4 * stride, in[4]);
|
||||||
RECON_AND_STORE(dest + 5 * stride, in[5]);
|
recon_and_store(dest + 5 * stride, in[5]);
|
||||||
RECON_AND_STORE(dest + 6 * stride, in[6]);
|
recon_and_store(dest + 6 * stride, in[6]);
|
||||||
RECON_AND_STORE(dest + 7 * stride, in[7]);
|
recon_and_store(dest + 7 * stride, in[7]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vp9_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
|
void vp9_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
#include "./vpx_dsp_rtcd.h"
|
#include "./vpx_dsp_rtcd.h"
|
||||||
#include "vpx_dsp/txfm_common.h"
|
#include "vpx_dsp/txfm_common.h"
|
||||||
#include "vpx_dsp/x86/fwd_txfm_sse2.h"
|
#include "vpx_dsp/x86/fwd_txfm_sse2.h"
|
||||||
|
#include "vpx_dsp/x86/transpose_sse2.h"
|
||||||
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
||||||
#include "vpx_ports/mem.h"
|
#include "vpx_ports/mem.h"
|
||||||
|
|
||||||
@@ -706,58 +707,6 @@ static INLINE void write_buffer_8x8(tran_low_t *output, __m128i *res,
|
|||||||
store_output(&res[7], (output + 7 * stride));
|
store_output(&res[7], (output + 7 * stride));
|
||||||
}
|
}
|
||||||
|
|
||||||
// perform in-place transpose
|
|
||||||
static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
|
|
||||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
|
||||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
|
||||||
const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
|
|
||||||
const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
|
|
||||||
const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
|
|
||||||
const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
|
|
||||||
const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
|
|
||||||
const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
|
|
||||||
// 00 10 01 11 02 12 03 13
|
|
||||||
// 20 30 21 31 22 32 23 33
|
|
||||||
// 04 14 05 15 06 16 07 17
|
|
||||||
// 24 34 25 35 26 36 27 37
|
|
||||||
// 40 50 41 51 42 52 43 53
|
|
||||||
// 60 70 61 71 62 72 63 73
|
|
||||||
// 44 54 45 55 46 56 47 57
|
|
||||||
// 64 74 65 75 66 76 67 77
|
|
||||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
|
||||||
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
|
|
||||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
|
||||||
const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
|
|
||||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
|
|
||||||
const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
|
|
||||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
|
|
||||||
const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
|
|
||||||
// 00 10 20 30 01 11 21 31
|
|
||||||
// 40 50 60 70 41 51 61 71
|
|
||||||
// 02 12 22 32 03 13 23 33
|
|
||||||
// 42 52 62 72 43 53 63 73
|
|
||||||
// 04 14 24 34 05 15 25 35
|
|
||||||
// 44 54 64 74 45 55 65 75
|
|
||||||
// 06 16 26 36 07 17 27 37
|
|
||||||
// 46 56 66 76 47 57 67 77
|
|
||||||
res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
|
|
||||||
res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
|
|
||||||
res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
|
|
||||||
res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
|
|
||||||
res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
|
|
||||||
res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
|
|
||||||
res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
|
|
||||||
res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
|
|
||||||
// 00 10 20 30 40 50 60 70
|
|
||||||
// 01 11 21 31 41 51 61 71
|
|
||||||
// 02 12 22 32 42 52 62 72
|
|
||||||
// 03 13 23 33 43 53 63 73
|
|
||||||
// 04 14 24 34 44 54 64 74
|
|
||||||
// 05 15 25 35 45 55 65 75
|
|
||||||
// 06 16 26 36 46 56 66 76
|
|
||||||
// 07 17 27 37 47 57 67 77
|
|
||||||
}
|
|
||||||
|
|
||||||
static void fdct8_sse2(__m128i *in) {
|
static void fdct8_sse2(__m128i *in) {
|
||||||
// constants
|
// constants
|
||||||
const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
|
const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
|
||||||
@@ -895,7 +844,7 @@ static void fdct8_sse2(__m128i *in) {
|
|||||||
in[7] = _mm_packs_epi32(v6, v7);
|
in[7] = _mm_packs_epi32(v6, v7);
|
||||||
|
|
||||||
// transpose
|
// transpose
|
||||||
array_transpose_8x8(in, in);
|
transpose_16bit_8x8(in, in);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fadst8_sse2(__m128i *in) {
|
static void fadst8_sse2(__m128i *in) {
|
||||||
@@ -1125,7 +1074,7 @@ static void fadst8_sse2(__m128i *in) {
|
|||||||
in[7] = _mm_sub_epi16(k__const_0, s1);
|
in[7] = _mm_sub_epi16(k__const_0, s1);
|
||||||
|
|
||||||
// transpose
|
// transpose
|
||||||
array_transpose_8x8(in, in);
|
transpose_16bit_8x8(in, in);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vp9_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
|
void vp9_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
|
||||||
@@ -1182,23 +1131,6 @@ static INLINE void write_buffer_16x16(tran_low_t *output, __m128i *in0,
|
|||||||
write_buffer_8x8(output + 8 * stride, in1 + 8, stride);
|
write_buffer_8x8(output + 8 * stride, in1 + 8, stride);
|
||||||
}
|
}
|
||||||
|
|
||||||
static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
|
|
||||||
__m128i tbuf[8];
|
|
||||||
array_transpose_8x8(res0, res0);
|
|
||||||
array_transpose_8x8(res1, tbuf);
|
|
||||||
array_transpose_8x8(res0 + 8, res1);
|
|
||||||
array_transpose_8x8(res1 + 8, res1 + 8);
|
|
||||||
|
|
||||||
res0[8] = tbuf[0];
|
|
||||||
res0[9] = tbuf[1];
|
|
||||||
res0[10] = tbuf[2];
|
|
||||||
res0[11] = tbuf[3];
|
|
||||||
res0[12] = tbuf[4];
|
|
||||||
res0[13] = tbuf[5];
|
|
||||||
res0[14] = tbuf[6];
|
|
||||||
res0[15] = tbuf[7];
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void right_shift_16x16(__m128i *res0, __m128i *res1) {
|
static INLINE void right_shift_16x16(__m128i *res0, __m128i *res1) {
|
||||||
// perform rounding operations
|
// perform rounding operations
|
||||||
right_shift_8x8(res0, 2);
|
right_shift_8x8(res0, 2);
|
||||||
@@ -2002,13 +1934,13 @@ static void fadst16_8col(__m128i *in) {
|
|||||||
static void fdct16_sse2(__m128i *in0, __m128i *in1) {
|
static void fdct16_sse2(__m128i *in0, __m128i *in1) {
|
||||||
fdct16_8col(in0);
|
fdct16_8col(in0);
|
||||||
fdct16_8col(in1);
|
fdct16_8col(in1);
|
||||||
array_transpose_16x16(in0, in1);
|
transpose_16bit_16x16(in0, in1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fadst16_sse2(__m128i *in0, __m128i *in1) {
|
static void fadst16_sse2(__m128i *in0, __m128i *in1) {
|
||||||
fadst16_8col(in0);
|
fadst16_8col(in0);
|
||||||
fadst16_8col(in1);
|
fadst16_8col(in1);
|
||||||
array_transpose_16x16(in0, in1);
|
transpose_16bit_16x16(in0, in1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vp9_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
|
void vp9_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint16_t *dest,
|
|||||||
test = _mm_movemask_epi8(temp1);
|
test = _mm_movemask_epi8(temp1);
|
||||||
|
|
||||||
if (test) {
|
if (test) {
|
||||||
array_transpose_16x16(inptr, inptr + 16);
|
transpose_16bit_16x16(inptr, inptr + 16);
|
||||||
for (i = 0; i < 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
sign_bits = _mm_cmplt_epi16(inptr[i], zero);
|
sign_bits = _mm_cmplt_epi16(inptr[i], zero);
|
||||||
temp1 = _mm_unpacklo_epi16(inptr[i], sign_bits);
|
temp1 = _mm_unpacklo_epi16(inptr[i], sign_bits);
|
||||||
@@ -182,8 +182,8 @@ void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint16_t *dest,
|
|||||||
|
|
||||||
if (test) {
|
if (test) {
|
||||||
// Use fact only first 4 rows contain non-zero coeffs
|
// Use fact only first 4 rows contain non-zero coeffs
|
||||||
array_transpose_8x8(inptr, inptr);
|
transpose_16bit_8x8(inptr, inptr);
|
||||||
array_transpose_8x8(inptr + 8, inptr + 16);
|
transpose_16bit_8x8(inptr + 8, inptr + 16);
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
sign_bits = _mm_cmplt_epi16(inptr[i], zero);
|
sign_bits = _mm_cmplt_epi16(inptr[i], zero);
|
||||||
temp1 = _mm_unpacklo_epi16(inptr[i], sign_bits);
|
temp1 = _mm_unpacklo_epi16(inptr[i], sign_bits);
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint16_t *dest,
|
|||||||
test = _mm_movemask_epi8(temp1);
|
test = _mm_movemask_epi8(temp1);
|
||||||
|
|
||||||
if (test) {
|
if (test) {
|
||||||
array_transpose_8x8(inptr, inptr);
|
transpose_16bit_8x8(inptr, inptr);
|
||||||
for (i = 0; i < 8; i++) {
|
for (i = 0; i < 8; i++) {
|
||||||
sign_bits = _mm_cmplt_epi16(inptr[i], zero);
|
sign_bits = _mm_cmplt_epi16(inptr[i], zero);
|
||||||
temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
|
temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
|
||||||
@@ -165,7 +165,7 @@ void vpx_highbd_idct8x8_12_add_sse2(const tran_low_t *input, uint16_t *dest,
|
|||||||
|
|
||||||
if (test) {
|
if (test) {
|
||||||
// Use fact only first 4 rows contain non-zero coeffs
|
// Use fact only first 4 rows contain non-zero coeffs
|
||||||
array_transpose_4X8(inptr, inptr);
|
transpose_16bit_4x8(inptr, inptr);
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
sign_bits = _mm_cmplt_epi16(inptr[i], zero);
|
sign_bits = _mm_cmplt_epi16(inptr[i], zero);
|
||||||
temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
|
temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -15,109 +15,45 @@
|
|||||||
#include "./vpx_config.h"
|
#include "./vpx_config.h"
|
||||||
#include "vpx/vpx_integer.h"
|
#include "vpx/vpx_integer.h"
|
||||||
#include "vpx_dsp/inv_txfm.h"
|
#include "vpx_dsp/inv_txfm.h"
|
||||||
|
#include "vpx_dsp/x86/transpose_sse2.h"
|
||||||
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
||||||
|
|
||||||
// perform 8x8 transpose
|
static INLINE void idct8x8_12_transpose_16bit_4x8(const __m128i *const in,
|
||||||
static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
|
__m128i *const out) {
|
||||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
// Unpack 16 bit elements. Goes from:
|
||||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
// in[0]: 30 31 32 33 00 01 02 03
|
||||||
const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
|
// in[1]: 20 21 22 23 10 11 12 13
|
||||||
const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
|
// in[2]: 40 41 42 43 70 71 72 73
|
||||||
const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
|
// in[3]: 50 51 52 53 60 61 62 63
|
||||||
const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
|
// to:
|
||||||
const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
|
// tr0_0: 00 10 01 11 02 12 03 13
|
||||||
const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
|
// tr0_1: 20 30 21 31 22 32 23 33
|
||||||
|
// tr0_2: 40 50 41 51 42 52 43 53
|
||||||
|
// tr0_3: 60 70 61 71 62 72 63 73
|
||||||
|
const __m128i tr0_0 = _mm_unpackhi_epi16(in[0], in[1]);
|
||||||
|
const __m128i tr0_1 = _mm_unpacklo_epi16(in[1], in[0]);
|
||||||
|
const __m128i tr0_2 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||||
|
const __m128i tr0_3 = _mm_unpackhi_epi16(in[3], in[2]);
|
||||||
|
|
||||||
|
// Unpack 32 bit elements resulting in:
|
||||||
|
// tr1_0: 00 10 20 30 01 11 21 31
|
||||||
|
// tr1_1: 02 12 22 32 03 13 23 33
|
||||||
|
// tr1_2: 40 50 60 70 41 51 61 71
|
||||||
|
// tr1_3: 42 52 62 72 43 53 63 73
|
||||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||||
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
|
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
|
||||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||||
const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
|
const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
|
||||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
|
|
||||||
const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
|
|
||||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
|
|
||||||
const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
|
|
||||||
|
|
||||||
res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
|
// Unpack 64 bit elements resulting in:
|
||||||
res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
|
// out[0]: 00 10 20 30 40 50 60 70
|
||||||
res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
|
// out[1]: 01 11 21 31 41 51 61 71
|
||||||
res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
|
// out[2]: 02 12 22 32 42 52 62 72
|
||||||
res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
|
// out[3]: 03 13 23 33 43 53 63 73
|
||||||
res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
|
out[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
|
||||||
res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
|
out[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
|
||||||
res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
|
out[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
|
||||||
}
|
out[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
|
||||||
#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
|
|
||||||
out2, out3, out4, out5, out6, out7) \
|
|
||||||
{ \
|
|
||||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
|
|
||||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
|
|
||||||
const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
|
|
||||||
const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
|
|
||||||
const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
|
|
||||||
const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
|
|
||||||
const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \
|
|
||||||
const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \
|
|
||||||
\
|
|
||||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
|
|
||||||
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
|
|
||||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
|
|
||||||
const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
|
|
||||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
|
|
||||||
const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \
|
|
||||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
|
|
||||||
const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \
|
|
||||||
\
|
|
||||||
out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
|
|
||||||
out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
|
|
||||||
out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
|
|
||||||
out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
|
|
||||||
out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \
|
|
||||||
out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \
|
|
||||||
out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \
|
|
||||||
out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \
|
|
||||||
{ \
|
|
||||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
|
|
||||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
|
|
||||||
\
|
|
||||||
in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \
|
|
||||||
in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void array_transpose_4X8(__m128i *in, __m128i *out) {
|
|
||||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
|
||||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
|
||||||
const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
|
|
||||||
const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
|
|
||||||
|
|
||||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
|
||||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
|
||||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
|
|
||||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
|
|
||||||
|
|
||||||
out[0] = _mm_unpacklo_epi64(tr1_0, tr1_4);
|
|
||||||
out[1] = _mm_unpackhi_epi64(tr1_0, tr1_4);
|
|
||||||
out[2] = _mm_unpacklo_epi64(tr1_2, tr1_6);
|
|
||||||
out[3] = _mm_unpackhi_epi64(tr1_2, tr1_6);
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
|
|
||||||
__m128i tbuf[8];
|
|
||||||
array_transpose_8x8(res0, res0);
|
|
||||||
array_transpose_8x8(res1, tbuf);
|
|
||||||
array_transpose_8x8(res0 + 8, res1);
|
|
||||||
array_transpose_8x8(res1 + 8, res1 + 8);
|
|
||||||
|
|
||||||
res0[8] = tbuf[0];
|
|
||||||
res0[9] = tbuf[1];
|
|
||||||
res0[10] = tbuf[2];
|
|
||||||
res0[11] = tbuf[3];
|
|
||||||
res0[12] = tbuf[4];
|
|
||||||
res0[13] = tbuf[5];
|
|
||||||
res0[14] = tbuf[6];
|
|
||||||
res0[15] = tbuf[7];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static INLINE __m128i dct_const_round_shift_sse2(const __m128i in) {
|
static INLINE __m128i dct_const_round_shift_sse2(const __m128i in) {
|
||||||
@@ -144,14 +80,27 @@ static INLINE __m128i idct_calc_wraplow_sse2(const __m128i in0,
|
|||||||
// highbitdepth enabled
|
// highbitdepth enabled
|
||||||
static INLINE __m128i load_input_data(const tran_low_t *data) {
|
static INLINE __m128i load_input_data(const tran_low_t *data) {
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5],
|
// in0: 0 X 1 X 2 X 3 X
|
||||||
data[6], data[7]);
|
// in1: 4 X 5 X 6 X 7 X
|
||||||
|
// t0: 0 4 X X 1 5 X X
|
||||||
|
// t1: 2 6 X X 3 7 X X
|
||||||
|
// t2: 0 2 4 6 X X X X
|
||||||
|
// t3: 1 3 5 7 X X X X
|
||||||
|
// rtn: 0 1 2 3 4 5 6 7
|
||||||
|
const __m128i in0 = _mm_load_si128((const __m128i *)data);
|
||||||
|
const __m128i in1 = _mm_load_si128((const __m128i *)(data + 4));
|
||||||
|
const __m128i t0 = _mm_unpacklo_epi16(in0, in1);
|
||||||
|
const __m128i t1 = _mm_unpackhi_epi16(in0, in1);
|
||||||
|
const __m128i t2 = _mm_unpacklo_epi16(t0, t1);
|
||||||
|
const __m128i t3 = _mm_unpackhi_epi16(t0, t1);
|
||||||
|
return _mm_unpacklo_epi16(t2, t3);
|
||||||
#else
|
#else
|
||||||
return _mm_load_si128((const __m128i *)data);
|
return _mm_load_si128((const __m128i *)data);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static INLINE void load_buffer_8x16(const tran_low_t *input, __m128i *in) {
|
static INLINE void load_buffer_8x16(const tran_low_t *const input,
|
||||||
|
__m128i *const in) {
|
||||||
in[0] = load_input_data(input + 0 * 16);
|
in[0] = load_input_data(input + 0 * 16);
|
||||||
in[1] = load_input_data(input + 1 * 16);
|
in[1] = load_input_data(input + 1 * 16);
|
||||||
in[2] = load_input_data(input + 2 * 16);
|
in[2] = load_input_data(input + 2 * 16);
|
||||||
@@ -171,18 +120,17 @@ static INLINE void load_buffer_8x16(const tran_low_t *input, __m128i *in) {
|
|||||||
in[15] = load_input_data(input + 15 * 16);
|
in[15] = load_input_data(input + 15 * 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define RECON_AND_STORE(dest, in_x) \
|
static INLINE void recon_and_store(uint8_t *const dest, const __m128i in_x) {
|
||||||
{ \
|
const __m128i zero = _mm_setzero_si128();
|
||||||
__m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
|
__m128i d0 = _mm_loadl_epi64((__m128i *)(dest));
|
||||||
d0 = _mm_unpacklo_epi8(d0, zero); \
|
d0 = _mm_unpacklo_epi8(d0, zero);
|
||||||
d0 = _mm_add_epi16(in_x, d0); \
|
d0 = _mm_add_epi16(in_x, d0);
|
||||||
d0 = _mm_packus_epi16(d0, d0); \
|
d0 = _mm_packus_epi16(d0, d0);
|
||||||
_mm_storel_epi64((__m128i *)(dest), d0); \
|
_mm_storel_epi64((__m128i *)(dest), d0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
|
static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
|
||||||
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
||||||
const __m128i zero = _mm_setzero_si128();
|
|
||||||
// Final rounding and shift
|
// Final rounding and shift
|
||||||
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
||||||
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
||||||
@@ -218,60 +166,24 @@ static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
|
|||||||
in[14] = _mm_srai_epi16(in[14], 6);
|
in[14] = _mm_srai_epi16(in[14], 6);
|
||||||
in[15] = _mm_srai_epi16(in[15], 6);
|
in[15] = _mm_srai_epi16(in[15], 6);
|
||||||
|
|
||||||
RECON_AND_STORE(dest + 0 * stride, in[0]);
|
recon_and_store(dest + 0 * stride, in[0]);
|
||||||
RECON_AND_STORE(dest + 1 * stride, in[1]);
|
recon_and_store(dest + 1 * stride, in[1]);
|
||||||
RECON_AND_STORE(dest + 2 * stride, in[2]);
|
recon_and_store(dest + 2 * stride, in[2]);
|
||||||
RECON_AND_STORE(dest + 3 * stride, in[3]);
|
recon_and_store(dest + 3 * stride, in[3]);
|
||||||
RECON_AND_STORE(dest + 4 * stride, in[4]);
|
recon_and_store(dest + 4 * stride, in[4]);
|
||||||
RECON_AND_STORE(dest + 5 * stride, in[5]);
|
recon_and_store(dest + 5 * stride, in[5]);
|
||||||
RECON_AND_STORE(dest + 6 * stride, in[6]);
|
recon_and_store(dest + 6 * stride, in[6]);
|
||||||
RECON_AND_STORE(dest + 7 * stride, in[7]);
|
recon_and_store(dest + 7 * stride, in[7]);
|
||||||
RECON_AND_STORE(dest + 8 * stride, in[8]);
|
recon_and_store(dest + 8 * stride, in[8]);
|
||||||
RECON_AND_STORE(dest + 9 * stride, in[9]);
|
recon_and_store(dest + 9 * stride, in[9]);
|
||||||
RECON_AND_STORE(dest + 10 * stride, in[10]);
|
recon_and_store(dest + 10 * stride, in[10]);
|
||||||
RECON_AND_STORE(dest + 11 * stride, in[11]);
|
recon_and_store(dest + 11 * stride, in[11]);
|
||||||
RECON_AND_STORE(dest + 12 * stride, in[12]);
|
recon_and_store(dest + 12 * stride, in[12]);
|
||||||
RECON_AND_STORE(dest + 13 * stride, in[13]);
|
recon_and_store(dest + 13 * stride, in[13]);
|
||||||
RECON_AND_STORE(dest + 14 * stride, in[14]);
|
recon_and_store(dest + 14 * stride, in[14]);
|
||||||
RECON_AND_STORE(dest + 15 * stride, in[15]);
|
recon_and_store(dest + 15 * stride, in[15]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, out0, out1, out2, out3) \
|
|
||||||
{ \
|
|
||||||
const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1); \
|
|
||||||
const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0); \
|
|
||||||
const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3); \
|
|
||||||
const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2); \
|
|
||||||
\
|
|
||||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
|
|
||||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
|
|
||||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
|
|
||||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
|
|
||||||
\
|
|
||||||
out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
|
|
||||||
out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
|
|
||||||
out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
|
|
||||||
out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define TRANSPOSE_8X8_10(in0, in1, in2, in3, out0, out1) \
|
|
||||||
{ \
|
|
||||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
|
|
||||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
|
|
||||||
out0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
|
|
||||||
out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
|
|
||||||
}
|
|
||||||
|
|
||||||
// Define Macro for multiplying elements by constants and adding them together.
|
|
||||||
#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, cst0, cst1, cst2, cst3, \
|
|
||||||
res0, res1, res2, res3) \
|
|
||||||
{ \
|
|
||||||
res0 = idct_calc_wraplow_sse2(lo_0, hi_0, cst0); \
|
|
||||||
res1 = idct_calc_wraplow_sse2(lo_0, hi_0, cst1); \
|
|
||||||
res2 = idct_calc_wraplow_sse2(lo_1, hi_1, cst2); \
|
|
||||||
res3 = idct_calc_wraplow_sse2(lo_1, hi_1, cst3); \
|
|
||||||
}
|
|
||||||
|
|
||||||
static INLINE void recon_and_store4x4_sse2(const __m128i *const in,
|
static INLINE void recon_and_store4x4_sse2(const __m128i *const in,
|
||||||
uint8_t *const dest,
|
uint8_t *const dest,
|
||||||
const int stride) {
|
const int stride) {
|
||||||
|
|||||||
@@ -12,11 +12,11 @@
|
|||||||
|
|
||||||
#include "./vpx_dsp_rtcd.h"
|
#include "./vpx_dsp_rtcd.h"
|
||||||
#include "vpx_dsp/x86/inv_txfm_sse2.h"
|
#include "vpx_dsp/x86/inv_txfm_sse2.h"
|
||||||
|
#include "vpx_dsp/x86/transpose_sse2.h"
|
||||||
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
||||||
|
|
||||||
void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||||
int stride) {
|
int stride) {
|
||||||
const __m128i zero = _mm_setzero_si128();
|
|
||||||
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
|
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
|
||||||
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
|
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
|
||||||
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
|
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
|
||||||
@@ -28,36 +28,35 @@ void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
|
const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
|
||||||
const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
|
const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
|
||||||
|
|
||||||
__m128i in0, in1, in2, in3, in4, in5, in6, in7;
|
__m128i in[8];
|
||||||
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
|
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
|
||||||
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
|
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
|
||||||
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
|
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
// Load input data.
|
// Load input data.
|
||||||
in0 = load_input_data(input);
|
in[0] = load_input_data(input);
|
||||||
in1 = load_input_data(input + 8 * 1);
|
in[1] = load_input_data(input + 8 * 1);
|
||||||
in2 = load_input_data(input + 8 * 2);
|
in[2] = load_input_data(input + 8 * 2);
|
||||||
in3 = load_input_data(input + 8 * 3);
|
in[3] = load_input_data(input + 8 * 3);
|
||||||
in4 = load_input_data(input + 8 * 4);
|
in[4] = load_input_data(input + 8 * 4);
|
||||||
in5 = load_input_data(input + 8 * 5);
|
in[5] = load_input_data(input + 8 * 5);
|
||||||
in6 = load_input_data(input + 8 * 6);
|
in[6] = load_input_data(input + 8 * 6);
|
||||||
in7 = load_input_data(input + 8 * 7);
|
in[7] = load_input_data(input + 8 * 7);
|
||||||
|
|
||||||
// 2-D
|
// 2-D
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
// 8x8 Transpose is copied from vpx_fdct8x8_sse2()
|
// 8x8 Transpose is copied from vpx_fdct8x8_sse2()
|
||||||
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
|
transpose_16bit_8x8(in, in);
|
||||||
in4, in5, in6, in7);
|
|
||||||
|
|
||||||
// 4-stage 1D idct8x8
|
// 4-stage 1D idct8x8
|
||||||
{
|
{
|
||||||
/* Stage1 */
|
/* Stage1 */
|
||||||
{
|
{
|
||||||
const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7);
|
const __m128i lo_17 = _mm_unpacklo_epi16(in[1], in[7]);
|
||||||
const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7);
|
const __m128i hi_17 = _mm_unpackhi_epi16(in[1], in[7]);
|
||||||
const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5);
|
const __m128i lo_35 = _mm_unpacklo_epi16(in[3], in[5]);
|
||||||
const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5);
|
const __m128i hi_35 = _mm_unpackhi_epi16(in[3], in[5]);
|
||||||
|
|
||||||
{
|
{
|
||||||
tmp0 = _mm_madd_epi16(lo_17, stg1_0);
|
tmp0 = _mm_madd_epi16(lo_17, stg1_0);
|
||||||
@@ -96,12 +95,12 @@ void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
|
|
||||||
/* Stage2 */
|
/* Stage2 */
|
||||||
{
|
{
|
||||||
const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6);
|
const __m128i lo_26 = _mm_unpacklo_epi16(in[2], in[6]);
|
||||||
const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6);
|
const __m128i hi_26 = _mm_unpackhi_epi16(in[2], in[6]);
|
||||||
|
|
||||||
{
|
{
|
||||||
tmp0 = _mm_unpacklo_epi16(in0, in4);
|
tmp0 = _mm_unpacklo_epi16(in[0], in[4]);
|
||||||
tmp1 = _mm_unpackhi_epi16(in0, in4);
|
tmp1 = _mm_unpackhi_epi16(in[0], in[4]);
|
||||||
|
|
||||||
tmp2 = _mm_madd_epi16(tmp0, stk2_0);
|
tmp2 = _mm_madd_epi16(tmp0, stk2_0);
|
||||||
tmp3 = _mm_madd_epi16(tmp1, stk2_0);
|
tmp3 = _mm_madd_epi16(tmp1, stk2_0);
|
||||||
@@ -176,44 +175,44 @@ void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Stage4 */
|
/* Stage4 */
|
||||||
in0 = _mm_add_epi16(stp1_0, stp2_7);
|
in[0] = _mm_add_epi16(stp1_0, stp2_7);
|
||||||
in1 = _mm_add_epi16(stp1_1, stp1_6);
|
in[1] = _mm_add_epi16(stp1_1, stp1_6);
|
||||||
in2 = _mm_add_epi16(stp1_2, stp1_5);
|
in[2] = _mm_add_epi16(stp1_2, stp1_5);
|
||||||
in3 = _mm_add_epi16(stp1_3, stp2_4);
|
in[3] = _mm_add_epi16(stp1_3, stp2_4);
|
||||||
in4 = _mm_sub_epi16(stp1_3, stp2_4);
|
in[4] = _mm_sub_epi16(stp1_3, stp2_4);
|
||||||
in5 = _mm_sub_epi16(stp1_2, stp1_5);
|
in[5] = _mm_sub_epi16(stp1_2, stp1_5);
|
||||||
in6 = _mm_sub_epi16(stp1_1, stp1_6);
|
in[6] = _mm_sub_epi16(stp1_1, stp1_6);
|
||||||
in7 = _mm_sub_epi16(stp1_0, stp2_7);
|
in[7] = _mm_sub_epi16(stp1_0, stp2_7);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Final rounding and shift
|
// Final rounding and shift
|
||||||
in0 = _mm_adds_epi16(in0, final_rounding);
|
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
||||||
in1 = _mm_adds_epi16(in1, final_rounding);
|
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
||||||
in2 = _mm_adds_epi16(in2, final_rounding);
|
in[2] = _mm_adds_epi16(in[2], final_rounding);
|
||||||
in3 = _mm_adds_epi16(in3, final_rounding);
|
in[3] = _mm_adds_epi16(in[3], final_rounding);
|
||||||
in4 = _mm_adds_epi16(in4, final_rounding);
|
in[4] = _mm_adds_epi16(in[4], final_rounding);
|
||||||
in5 = _mm_adds_epi16(in5, final_rounding);
|
in[5] = _mm_adds_epi16(in[5], final_rounding);
|
||||||
in6 = _mm_adds_epi16(in6, final_rounding);
|
in[6] = _mm_adds_epi16(in[6], final_rounding);
|
||||||
in7 = _mm_adds_epi16(in7, final_rounding);
|
in[7] = _mm_adds_epi16(in[7], final_rounding);
|
||||||
|
|
||||||
in0 = _mm_srai_epi16(in0, 5);
|
in[0] = _mm_srai_epi16(in[0], 5);
|
||||||
in1 = _mm_srai_epi16(in1, 5);
|
in[1] = _mm_srai_epi16(in[1], 5);
|
||||||
in2 = _mm_srai_epi16(in2, 5);
|
in[2] = _mm_srai_epi16(in[2], 5);
|
||||||
in3 = _mm_srai_epi16(in3, 5);
|
in[3] = _mm_srai_epi16(in[3], 5);
|
||||||
in4 = _mm_srai_epi16(in4, 5);
|
in[4] = _mm_srai_epi16(in[4], 5);
|
||||||
in5 = _mm_srai_epi16(in5, 5);
|
in[5] = _mm_srai_epi16(in[5], 5);
|
||||||
in6 = _mm_srai_epi16(in6, 5);
|
in[6] = _mm_srai_epi16(in[6], 5);
|
||||||
in7 = _mm_srai_epi16(in7, 5);
|
in[7] = _mm_srai_epi16(in[7], 5);
|
||||||
|
|
||||||
RECON_AND_STORE(dest + 0 * stride, in0);
|
recon_and_store(dest + 0 * stride, in[0]);
|
||||||
RECON_AND_STORE(dest + 1 * stride, in1);
|
recon_and_store(dest + 1 * stride, in[1]);
|
||||||
RECON_AND_STORE(dest + 2 * stride, in2);
|
recon_and_store(dest + 2 * stride, in[2]);
|
||||||
RECON_AND_STORE(dest + 3 * stride, in3);
|
recon_and_store(dest + 3 * stride, in[3]);
|
||||||
RECON_AND_STORE(dest + 4 * stride, in4);
|
recon_and_store(dest + 4 * stride, in[4]);
|
||||||
RECON_AND_STORE(dest + 5 * stride, in5);
|
recon_and_store(dest + 5 * stride, in[5]);
|
||||||
RECON_AND_STORE(dest + 6 * stride, in6);
|
recon_and_store(dest + 6 * stride, in[6]);
|
||||||
RECON_AND_STORE(dest + 7 * stride, in7);
|
recon_and_store(dest + 7 * stride, in[7]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||||
@@ -232,82 +231,82 @@ void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
const __m128i stg2_3 = pair_set_epi16(2 * cospi_8_64, 2 * cospi_8_64);
|
const __m128i stg2_3 = pair_set_epi16(2 * cospi_8_64, 2 * cospi_8_64);
|
||||||
const __m128i stg3_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
|
const __m128i stg3_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
|
||||||
|
|
||||||
__m128i in0, in1, in2, in3, in4, in5, in6, in7;
|
__m128i in[8];
|
||||||
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
|
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
|
||||||
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
|
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
|
||||||
__m128i tmp0, tmp1, tmp2, tmp3;
|
__m128i tmp[4];
|
||||||
|
|
||||||
// Rows. Load 4-row input data.
|
// Rows. Load 4-row input data.
|
||||||
in0 = load_input_data(input);
|
in[0] = load_input_data(input);
|
||||||
in1 = load_input_data(input + 8 * 1);
|
in[1] = load_input_data(input + 8 * 1);
|
||||||
in2 = load_input_data(input + 8 * 2);
|
in[2] = load_input_data(input + 8 * 2);
|
||||||
in3 = load_input_data(input + 8 * 3);
|
in[3] = load_input_data(input + 8 * 3);
|
||||||
|
|
||||||
// 8x4 Transpose
|
// 4x4 Transpose
|
||||||
TRANSPOSE_8X8_10(in0, in1, in2, in3, in0, in1);
|
transpose_16bit_4x4(in, in);
|
||||||
|
|
||||||
// Stage1
|
// Stage1
|
||||||
tmp0 = _mm_mulhrs_epi16(in0, stg1_0);
|
tmp[0] = _mm_mulhrs_epi16(in[0], stg1_0);
|
||||||
tmp1 = _mm_mulhrs_epi16(in0, stg1_1);
|
tmp[1] = _mm_mulhrs_epi16(in[0], stg1_1);
|
||||||
tmp2 = _mm_mulhrs_epi16(in1, stg1_2);
|
tmp[2] = _mm_mulhrs_epi16(in[1], stg1_2);
|
||||||
tmp3 = _mm_mulhrs_epi16(in1, stg1_3);
|
tmp[3] = _mm_mulhrs_epi16(in[1], stg1_3);
|
||||||
|
|
||||||
stp1_4 = _mm_unpackhi_epi64(tmp0, tmp1);
|
stp1_4 = _mm_unpackhi_epi64(tmp[0], tmp[1]);
|
||||||
stp1_5 = _mm_unpackhi_epi64(tmp2, tmp3);
|
stp1_5 = _mm_unpackhi_epi64(tmp[2], tmp[3]);
|
||||||
|
|
||||||
// Stage2
|
// Stage2
|
||||||
tmp0 = _mm_mulhrs_epi16(in0, stg2_0);
|
tmp[0] = _mm_mulhrs_epi16(in[0], stg2_0);
|
||||||
stp2_0 = _mm_unpacklo_epi64(tmp0, tmp0);
|
stp2_0 = _mm_unpacklo_epi64(tmp[0], tmp[0]);
|
||||||
|
|
||||||
tmp1 = _mm_mulhrs_epi16(in1, stg2_2);
|
tmp[1] = _mm_mulhrs_epi16(in[1], stg2_2);
|
||||||
tmp2 = _mm_mulhrs_epi16(in1, stg2_3);
|
tmp[2] = _mm_mulhrs_epi16(in[1], stg2_3);
|
||||||
stp2_2 = _mm_unpacklo_epi64(tmp2, tmp1);
|
stp2_2 = _mm_unpacklo_epi64(tmp[2], tmp[1]);
|
||||||
|
|
||||||
tmp0 = _mm_add_epi16(stp1_4, stp1_5);
|
tmp[0] = _mm_add_epi16(stp1_4, stp1_5);
|
||||||
tmp1 = _mm_sub_epi16(stp1_4, stp1_5);
|
tmp[1] = _mm_sub_epi16(stp1_4, stp1_5);
|
||||||
|
|
||||||
stp2_4 = tmp0;
|
stp2_4 = tmp[0];
|
||||||
stp2_5 = _mm_unpacklo_epi64(tmp1, zero);
|
stp2_5 = _mm_unpacklo_epi64(tmp[1], zero);
|
||||||
stp2_6 = _mm_unpackhi_epi64(tmp1, zero);
|
stp2_6 = _mm_unpackhi_epi64(tmp[1], zero);
|
||||||
|
|
||||||
tmp0 = _mm_unpacklo_epi16(stp2_5, stp2_6);
|
tmp[0] = _mm_unpacklo_epi16(stp2_5, stp2_6);
|
||||||
tmp1 = _mm_madd_epi16(tmp0, stg3_0);
|
tmp[1] = _mm_madd_epi16(tmp[0], stg3_0);
|
||||||
tmp2 = _mm_madd_epi16(tmp0, stk2_0); // stg3_1 = stk2_0
|
tmp[2] = _mm_madd_epi16(tmp[0], stk2_0); // stg3_1 = stk2_0
|
||||||
|
|
||||||
tmp1 = _mm_add_epi32(tmp1, rounding);
|
tmp[1] = _mm_add_epi32(tmp[1], rounding);
|
||||||
tmp2 = _mm_add_epi32(tmp2, rounding);
|
tmp[2] = _mm_add_epi32(tmp[2], rounding);
|
||||||
tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
|
tmp[1] = _mm_srai_epi32(tmp[1], DCT_CONST_BITS);
|
||||||
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
|
tmp[2] = _mm_srai_epi32(tmp[2], DCT_CONST_BITS);
|
||||||
|
|
||||||
stp1_5 = _mm_packs_epi32(tmp1, tmp2);
|
stp1_5 = _mm_packs_epi32(tmp[1], tmp[2]);
|
||||||
|
|
||||||
// Stage3
|
// Stage3
|
||||||
tmp2 = _mm_add_epi16(stp2_0, stp2_2);
|
tmp[2] = _mm_add_epi16(stp2_0, stp2_2);
|
||||||
tmp3 = _mm_sub_epi16(stp2_0, stp2_2);
|
tmp[3] = _mm_sub_epi16(stp2_0, stp2_2);
|
||||||
|
|
||||||
stp1_2 = _mm_unpackhi_epi64(tmp3, tmp2);
|
stp1_2 = _mm_unpackhi_epi64(tmp[3], tmp[2]);
|
||||||
stp1_3 = _mm_unpacklo_epi64(tmp3, tmp2);
|
stp1_3 = _mm_unpacklo_epi64(tmp[3], tmp[2]);
|
||||||
|
|
||||||
// Stage4
|
// Stage4
|
||||||
tmp0 = _mm_add_epi16(stp1_3, stp2_4);
|
tmp[0] = _mm_add_epi16(stp1_3, stp2_4);
|
||||||
tmp1 = _mm_add_epi16(stp1_2, stp1_5);
|
tmp[1] = _mm_add_epi16(stp1_2, stp1_5);
|
||||||
tmp2 = _mm_sub_epi16(stp1_3, stp2_4);
|
tmp[2] = _mm_sub_epi16(stp1_3, stp2_4);
|
||||||
tmp3 = _mm_sub_epi16(stp1_2, stp1_5);
|
tmp[3] = _mm_sub_epi16(stp1_2, stp1_5);
|
||||||
|
|
||||||
TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, in0, in1, in2, in3)
|
idct8x8_12_transpose_16bit_4x8(tmp, in);
|
||||||
|
|
||||||
/* Stage1 */
|
/* Stage1 */
|
||||||
stp1_4 = _mm_mulhrs_epi16(in1, stg1_0);
|
stp1_4 = _mm_mulhrs_epi16(in[1], stg1_0);
|
||||||
stp1_7 = _mm_mulhrs_epi16(in1, stg1_1);
|
stp1_7 = _mm_mulhrs_epi16(in[1], stg1_1);
|
||||||
stp1_5 = _mm_mulhrs_epi16(in3, stg1_2);
|
stp1_5 = _mm_mulhrs_epi16(in[3], stg1_2);
|
||||||
stp1_6 = _mm_mulhrs_epi16(in3, stg1_3);
|
stp1_6 = _mm_mulhrs_epi16(in[3], stg1_3);
|
||||||
|
|
||||||
/* Stage2 */
|
/* Stage2 */
|
||||||
stp2_0 = _mm_mulhrs_epi16(in0, stg2_0);
|
stp2_0 = _mm_mulhrs_epi16(in[0], stg2_0);
|
||||||
stp2_1 = _mm_mulhrs_epi16(in0, stg2_0);
|
stp2_1 = _mm_mulhrs_epi16(in[0], stg2_0);
|
||||||
|
|
||||||
stp2_2 = _mm_mulhrs_epi16(in2, stg2_2);
|
stp2_2 = _mm_mulhrs_epi16(in[2], stg2_2);
|
||||||
stp2_3 = _mm_mulhrs_epi16(in2, stg2_3);
|
stp2_3 = _mm_mulhrs_epi16(in[2], stg2_3);
|
||||||
|
|
||||||
stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
|
stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
|
||||||
stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
|
stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
|
||||||
@@ -320,62 +319,62 @@ void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
|
stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
|
||||||
stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
|
stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
|
||||||
|
|
||||||
tmp0 = _mm_unpacklo_epi16(stp2_6, stp2_5);
|
tmp[0] = _mm_unpacklo_epi16(stp2_6, stp2_5);
|
||||||
tmp1 = _mm_unpackhi_epi16(stp2_6, stp2_5);
|
tmp[1] = _mm_unpackhi_epi16(stp2_6, stp2_5);
|
||||||
|
|
||||||
tmp2 = _mm_madd_epi16(tmp0, stk2_0);
|
tmp[2] = _mm_madd_epi16(tmp[0], stk2_0);
|
||||||
tmp3 = _mm_madd_epi16(tmp1, stk2_0);
|
tmp[3] = _mm_madd_epi16(tmp[1], stk2_0);
|
||||||
tmp2 = _mm_add_epi32(tmp2, rounding);
|
tmp[2] = _mm_add_epi32(tmp[2], rounding);
|
||||||
tmp3 = _mm_add_epi32(tmp3, rounding);
|
tmp[3] = _mm_add_epi32(tmp[3], rounding);
|
||||||
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
|
tmp[2] = _mm_srai_epi32(tmp[2], DCT_CONST_BITS);
|
||||||
tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
|
tmp[3] = _mm_srai_epi32(tmp[3], DCT_CONST_BITS);
|
||||||
stp1_6 = _mm_packs_epi32(tmp2, tmp3);
|
stp1_6 = _mm_packs_epi32(tmp[2], tmp[3]);
|
||||||
|
|
||||||
tmp2 = _mm_madd_epi16(tmp0, stk2_1);
|
tmp[2] = _mm_madd_epi16(tmp[0], stk2_1);
|
||||||
tmp3 = _mm_madd_epi16(tmp1, stk2_1);
|
tmp[3] = _mm_madd_epi16(tmp[1], stk2_1);
|
||||||
tmp2 = _mm_add_epi32(tmp2, rounding);
|
tmp[2] = _mm_add_epi32(tmp[2], rounding);
|
||||||
tmp3 = _mm_add_epi32(tmp3, rounding);
|
tmp[3] = _mm_add_epi32(tmp[3], rounding);
|
||||||
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
|
tmp[2] = _mm_srai_epi32(tmp[2], DCT_CONST_BITS);
|
||||||
tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
|
tmp[3] = _mm_srai_epi32(tmp[3], DCT_CONST_BITS);
|
||||||
stp1_5 = _mm_packs_epi32(tmp2, tmp3);
|
stp1_5 = _mm_packs_epi32(tmp[2], tmp[3]);
|
||||||
|
|
||||||
/* Stage4 */
|
/* Stage4 */
|
||||||
in0 = _mm_add_epi16(stp1_0, stp2_7);
|
in[0] = _mm_add_epi16(stp1_0, stp2_7);
|
||||||
in1 = _mm_add_epi16(stp1_1, stp1_6);
|
in[1] = _mm_add_epi16(stp1_1, stp1_6);
|
||||||
in2 = _mm_add_epi16(stp1_2, stp1_5);
|
in[2] = _mm_add_epi16(stp1_2, stp1_5);
|
||||||
in3 = _mm_add_epi16(stp1_3, stp2_4);
|
in[3] = _mm_add_epi16(stp1_3, stp2_4);
|
||||||
in4 = _mm_sub_epi16(stp1_3, stp2_4);
|
in[4] = _mm_sub_epi16(stp1_3, stp2_4);
|
||||||
in5 = _mm_sub_epi16(stp1_2, stp1_5);
|
in[5] = _mm_sub_epi16(stp1_2, stp1_5);
|
||||||
in6 = _mm_sub_epi16(stp1_1, stp1_6);
|
in[6] = _mm_sub_epi16(stp1_1, stp1_6);
|
||||||
in7 = _mm_sub_epi16(stp1_0, stp2_7);
|
in[7] = _mm_sub_epi16(stp1_0, stp2_7);
|
||||||
|
|
||||||
// Final rounding and shift
|
// Final rounding and shift
|
||||||
in0 = _mm_adds_epi16(in0, final_rounding);
|
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
||||||
in1 = _mm_adds_epi16(in1, final_rounding);
|
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
||||||
in2 = _mm_adds_epi16(in2, final_rounding);
|
in[2] = _mm_adds_epi16(in[2], final_rounding);
|
||||||
in3 = _mm_adds_epi16(in3, final_rounding);
|
in[3] = _mm_adds_epi16(in[3], final_rounding);
|
||||||
in4 = _mm_adds_epi16(in4, final_rounding);
|
in[4] = _mm_adds_epi16(in[4], final_rounding);
|
||||||
in5 = _mm_adds_epi16(in5, final_rounding);
|
in[5] = _mm_adds_epi16(in[5], final_rounding);
|
||||||
in6 = _mm_adds_epi16(in6, final_rounding);
|
in[6] = _mm_adds_epi16(in[6], final_rounding);
|
||||||
in7 = _mm_adds_epi16(in7, final_rounding);
|
in[7] = _mm_adds_epi16(in[7], final_rounding);
|
||||||
|
|
||||||
in0 = _mm_srai_epi16(in0, 5);
|
in[0] = _mm_srai_epi16(in[0], 5);
|
||||||
in1 = _mm_srai_epi16(in1, 5);
|
in[1] = _mm_srai_epi16(in[1], 5);
|
||||||
in2 = _mm_srai_epi16(in2, 5);
|
in[2] = _mm_srai_epi16(in[2], 5);
|
||||||
in3 = _mm_srai_epi16(in3, 5);
|
in[3] = _mm_srai_epi16(in[3], 5);
|
||||||
in4 = _mm_srai_epi16(in4, 5);
|
in[4] = _mm_srai_epi16(in[4], 5);
|
||||||
in5 = _mm_srai_epi16(in5, 5);
|
in[5] = _mm_srai_epi16(in[5], 5);
|
||||||
in6 = _mm_srai_epi16(in6, 5);
|
in[6] = _mm_srai_epi16(in[6], 5);
|
||||||
in7 = _mm_srai_epi16(in7, 5);
|
in[7] = _mm_srai_epi16(in[7], 5);
|
||||||
|
|
||||||
RECON_AND_STORE(dest + 0 * stride, in0);
|
recon_and_store(dest + 0 * stride, in[0]);
|
||||||
RECON_AND_STORE(dest + 1 * stride, in1);
|
recon_and_store(dest + 1 * stride, in[1]);
|
||||||
RECON_AND_STORE(dest + 2 * stride, in2);
|
recon_and_store(dest + 2 * stride, in[2]);
|
||||||
RECON_AND_STORE(dest + 3 * stride, in3);
|
recon_and_store(dest + 3 * stride, in[3]);
|
||||||
RECON_AND_STORE(dest + 4 * stride, in4);
|
recon_and_store(dest + 4 * stride, in[4]);
|
||||||
RECON_AND_STORE(dest + 5 * stride, in5);
|
recon_and_store(dest + 5 * stride, in[5]);
|
||||||
RECON_AND_STORE(dest + 6 * stride, in6);
|
recon_and_store(dest + 6 * stride, in[6]);
|
||||||
RECON_AND_STORE(dest + 7 * stride, in7);
|
recon_and_store(dest + 7 * stride, in[7]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only do addition and subtraction butterfly, size = 16, 32
|
// Only do addition and subtraction butterfly, size = 16, 32
|
||||||
@@ -618,7 +617,6 @@ static void idct32_34_second_half(const __m128i *in, __m128i *stp1) {
|
|||||||
// Only upper-left 8x8 has non-zero coeff
|
// Only upper-left 8x8 has non-zero coeff
|
||||||
void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||||
int stride) {
|
int stride) {
|
||||||
const __m128i zero = _mm_setzero_si128();
|
|
||||||
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
||||||
__m128i in[32], col[32];
|
__m128i in[32], col[32];
|
||||||
__m128i stp1[32];
|
__m128i stp1[32];
|
||||||
@@ -634,7 +632,7 @@ void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
in[6] = load_input_data(input + 192);
|
in[6] = load_input_data(input + 192);
|
||||||
in[7] = load_input_data(input + 224);
|
in[7] = load_input_data(input + 224);
|
||||||
|
|
||||||
array_transpose_8x8(in, in);
|
transpose_16bit_8x8(in, in);
|
||||||
idct32_34_first_half(in, stp1);
|
idct32_34_first_half(in, stp1);
|
||||||
idct32_34_second_half(in, stp1);
|
idct32_34_second_half(in, stp1);
|
||||||
|
|
||||||
@@ -643,7 +641,7 @@ void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
int j;
|
int j;
|
||||||
// Transpose 32x8 block to 8x32 block
|
// Transpose 32x8 block to 8x32 block
|
||||||
array_transpose_8x8(col + i * 8, in);
|
transpose_16bit_8x8(col + i * 8, in);
|
||||||
idct32_34_first_half(in, stp1);
|
idct32_34_first_half(in, stp1);
|
||||||
idct32_34_second_half(in, stp1);
|
idct32_34_second_half(in, stp1);
|
||||||
|
|
||||||
@@ -653,7 +651,7 @@ void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
// Final rounding and shift
|
// Final rounding and shift
|
||||||
in[j] = _mm_adds_epi16(in[j], final_rounding);
|
in[j] = _mm_adds_epi16(in[j], final_rounding);
|
||||||
in[j] = _mm_srai_epi16(in[j], 6);
|
in[j] = _mm_srai_epi16(in[j], 6);
|
||||||
RECON_AND_STORE(dest + j * stride, in[j]);
|
recon_and_store(dest + j * stride, in[j]);
|
||||||
}
|
}
|
||||||
|
|
||||||
dest += 8;
|
dest += 8;
|
||||||
@@ -672,14 +670,6 @@ static void load_buffer_16x16(const tran_low_t *input, __m128i *in0,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void array_transpose_16x16_2(__m128i *in0, __m128i *in1, __m128i *out0,
|
|
||||||
__m128i *out1) {
|
|
||||||
array_transpose_8x8(in0, out0);
|
|
||||||
array_transpose_8x8(&in0[8], out1);
|
|
||||||
array_transpose_8x8(in1, &out0[8]);
|
|
||||||
array_transpose_8x8(&in1[8], &out1[8]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Group the coefficient calculation into smaller functions
|
// Group the coefficient calculation into smaller functions
|
||||||
// to prevent stack spillover:
|
// to prevent stack spillover:
|
||||||
// quarter_1: 0-7
|
// quarter_1: 0-7
|
||||||
@@ -954,7 +944,6 @@ static void idct32_8x32_135(__m128i *in /*in[32]*/) {
|
|||||||
|
|
||||||
static INLINE void store_buffer_8x32(__m128i *in, uint8_t *dst, int stride) {
|
static INLINE void store_buffer_8x32(__m128i *in, uint8_t *dst, int stride) {
|
||||||
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
||||||
const __m128i zero = _mm_setzero_si128();
|
|
||||||
int j = 0;
|
int j = 0;
|
||||||
while (j < 32) {
|
while (j < 32) {
|
||||||
in[j] = _mm_adds_epi16(in[j], final_rounding);
|
in[j] = _mm_adds_epi16(in[j], final_rounding);
|
||||||
@@ -963,16 +952,16 @@ static INLINE void store_buffer_8x32(__m128i *in, uint8_t *dst, int stride) {
|
|||||||
in[j] = _mm_srai_epi16(in[j], 6);
|
in[j] = _mm_srai_epi16(in[j], 6);
|
||||||
in[j + 1] = _mm_srai_epi16(in[j + 1], 6);
|
in[j + 1] = _mm_srai_epi16(in[j + 1], 6);
|
||||||
|
|
||||||
RECON_AND_STORE(dst, in[j]);
|
recon_and_store(dst, in[j]);
|
||||||
dst += stride;
|
dst += stride;
|
||||||
RECON_AND_STORE(dst, in[j + 1]);
|
recon_and_store(dst, in[j + 1]);
|
||||||
dst += stride;
|
dst += stride;
|
||||||
j += 2;
|
j += 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static INLINE void recon_and_store(__m128i *in0, __m128i *in1, uint8_t *dest,
|
static INLINE void recon_and_store_ssse3(__m128i *in0, __m128i *in1,
|
||||||
int stride) {
|
uint8_t *dest, int stride) {
|
||||||
store_buffer_8x32(in0, dest, stride);
|
store_buffer_8x32(in0, dest, stride);
|
||||||
store_buffer_8x32(in1, dest + 8, stride);
|
store_buffer_8x32(in1, dest + 8, stride);
|
||||||
}
|
}
|
||||||
@@ -989,7 +978,7 @@ static void transpose_and_copy_16x16(__m128i *in0, __m128i *in1, __m128i *store,
|
|||||||
switch (cols) {
|
switch (cols) {
|
||||||
case left_16: {
|
case left_16: {
|
||||||
int i;
|
int i;
|
||||||
array_transpose_16x16(in0, in1);
|
transpose_16bit_16x16(in0, in1);
|
||||||
for (i = 0; i < 16; ++i) {
|
for (i = 0; i < 16; ++i) {
|
||||||
store[i] = in0[16 + i];
|
store[i] = in0[16 + i];
|
||||||
store[16 + i] = in1[16 + i];
|
store[16 + i] = in1[16 + i];
|
||||||
@@ -997,7 +986,10 @@ static void transpose_and_copy_16x16(__m128i *in0, __m128i *in1, __m128i *store,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case right_16: {
|
case right_16: {
|
||||||
array_transpose_16x16_2(store, &store[16], in0, in1);
|
transpose_16bit_8x8(store, in0);
|
||||||
|
transpose_16bit_8x8(&store[8], in1);
|
||||||
|
transpose_16bit_8x8(&store[16], &in0[8]);
|
||||||
|
transpose_16bit_8x8(&store[24], &in1[8]);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: { assert(0); }
|
default: { assert(0); }
|
||||||
@@ -1016,17 +1008,17 @@ void vpx_idct32x32_135_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
load_buffer_16x16(input, col0, col1);
|
load_buffer_16x16(input, col0, col1);
|
||||||
|
|
||||||
// columns
|
// columns
|
||||||
array_transpose_16x16(col0, col1);
|
transpose_16bit_16x16(col0, col1);
|
||||||
idct32_135(col0, col1);
|
idct32_135(col0, col1);
|
||||||
|
|
||||||
// rows
|
// rows
|
||||||
transpose_and_copy_16x16(col0, col1, temp, left_16);
|
transpose_and_copy_16x16(col0, col1, temp, left_16);
|
||||||
idct32_135(col0, col1);
|
idct32_135(col0, col1);
|
||||||
recon_and_store(col0, col1, dest, stride);
|
recon_and_store_ssse3(col0, col1, dest, stride);
|
||||||
|
|
||||||
transpose_and_copy_16x16(col0, col1, temp, right_16);
|
transpose_and_copy_16x16(col0, col1, temp, right_16);
|
||||||
idct32_135(col0, col1);
|
idct32_135(col0, col1);
|
||||||
recon_and_store(col0, col1, dest + 16, stride);
|
recon_and_store_ssse3(col0, col1, dest + 16, stride);
|
||||||
}
|
}
|
||||||
|
|
||||||
// For each 8x32 block __m128i in[32],
|
// For each 8x32 block __m128i in[32],
|
||||||
@@ -1309,10 +1301,10 @@ void vpx_idct32x32_1024_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
input += 32 << 3;
|
input += 32 << 3;
|
||||||
|
|
||||||
// Transpose 32x8 block to 8x32 block
|
// Transpose 32x8 block to 8x32 block
|
||||||
array_transpose_8x8(in, in);
|
transpose_16bit_8x8(in, in);
|
||||||
array_transpose_8x8(in + 8, in + 8);
|
transpose_16bit_8x8(in + 8, in + 8);
|
||||||
array_transpose_8x8(in + 16, in + 16);
|
transpose_16bit_8x8(in + 16, in + 16);
|
||||||
array_transpose_8x8(in + 24, in + 24);
|
transpose_16bit_8x8(in + 24, in + 24);
|
||||||
|
|
||||||
idct32_full_8x32(in, col + (i << 5));
|
idct32_full_8x32(in, col + (i << 5));
|
||||||
}
|
}
|
||||||
@@ -1321,10 +1313,10 @@ void vpx_idct32x32_1024_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
|||||||
for (i = 0; i < 4; ++i) {
|
for (i = 0; i < 4; ++i) {
|
||||||
j = i << 3;
|
j = i << 3;
|
||||||
// Transpose 32x8 block to 8x32 block
|
// Transpose 32x8 block to 8x32 block
|
||||||
array_transpose_8x8(col + j, in);
|
transpose_16bit_8x8(col + j, in);
|
||||||
array_transpose_8x8(col + j + 32, in + 8);
|
transpose_16bit_8x8(col + j + 32, in + 8);
|
||||||
array_transpose_8x8(col + j + 64, in + 16);
|
transpose_16bit_8x8(col + j + 64, in + 16);
|
||||||
array_transpose_8x8(col + j + 96, in + 24);
|
transpose_16bit_8x8(col + j + 96, in + 24);
|
||||||
|
|
||||||
idct32_full_8x32(in, in);
|
idct32_full_8x32(in, in);
|
||||||
store_buffer_8x32(in, dest, stride);
|
store_buffer_8x32(in, dest, stride);
|
||||||
|
|||||||
@@ -12,15 +12,152 @@
|
|||||||
#define VPX_DSP_X86_TRANSPOSE_SSE2_H_
|
#define VPX_DSP_X86_TRANSPOSE_SSE2_H_
|
||||||
|
|
||||||
#include "./vpx_dsp_rtcd.h"
|
#include "./vpx_dsp_rtcd.h"
|
||||||
#include "vpx_dsp/x86/inv_txfm_sse2.h"
|
|
||||||
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
|
||||||
|
|
||||||
static INLINE void transpose_16bit_4x4(__m128i *res) {
|
static INLINE void transpose_16bit_4x4(const __m128i *const in,
|
||||||
const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
|
__m128i *const out) {
|
||||||
const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
|
// Unpack 16 bit elements. Goes from:
|
||||||
|
// in[0]: 00 01 02 03 XX XX XX XX
|
||||||
|
// in[1]: 10 11 12 13 XX XX XX XX
|
||||||
|
// in[2]: 20 21 22 23 XX XX XX XX
|
||||||
|
// in[3]: 30 31 32 33 XX XX XX XX
|
||||||
|
// to:
|
||||||
|
// tr0_0: 00 10 01 11 02 12 03 13
|
||||||
|
// tr0_1: 20 30 21 31 22 32 23 33
|
||||||
|
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
||||||
|
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||||
|
|
||||||
res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1);
|
// Unpack 32 bit elements resulting in:
|
||||||
res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
|
// out[0]: 00 10 20 30 01 11 21 31
|
||||||
|
// out[1]: 02 12 22 32 03 13 23 33
|
||||||
|
out[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||||
|
out[1] = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static INLINE void transpose_16bit_4x8(const __m128i *const in,
|
||||||
|
__m128i *const out) {
|
||||||
|
// Unpack 16 bit elements. Goes from:
|
||||||
|
// in[0]: 00 01 02 03 XX XX XX XX
|
||||||
|
// in[1]: 10 11 12 13 XX XX XX XX
|
||||||
|
// in[2]: 20 21 22 23 XX XX XX XX
|
||||||
|
// in[3]: 30 31 32 33 XX XX XX XX
|
||||||
|
// in[4]: 40 41 42 43 XX XX XX XX
|
||||||
|
// in[5]: 50 51 52 53 XX XX XX XX
|
||||||
|
// in[6]: 60 61 62 63 XX XX XX XX
|
||||||
|
// in[7]: 70 71 72 73 XX XX XX XX
|
||||||
|
// to:
|
||||||
|
// tr0_0: 00 10 01 11 02 12 03 13
|
||||||
|
// tr0_1: 20 30 21 31 22 32 23 33
|
||||||
|
// tr0_2: 40 50 41 51 42 52 43 53
|
||||||
|
// tr0_3: 60 70 61 71 62 72 63 73
|
||||||
|
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
||||||
|
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||||
|
const __m128i tr0_2 = _mm_unpacklo_epi16(in[4], in[5]);
|
||||||
|
const __m128i tr0_3 = _mm_unpacklo_epi16(in[6], in[7]);
|
||||||
|
|
||||||
|
// Unpack 32 bit elements resulting in:
|
||||||
|
// tr1_0: 00 10 20 30 01 11 21 31
|
||||||
|
// tr1_1: 40 50 60 70 41 51 61 71
|
||||||
|
// tr1_2: 02 12 22 32 03 13 23 33
|
||||||
|
// tr1_3: 42 52 62 72 43 53 63 73
|
||||||
|
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||||
|
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
|
||||||
|
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||||
|
const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
|
||||||
|
|
||||||
|
// Unpack 64 bit elements resulting in:
|
||||||
|
// out[0]: 00 10 20 30 40 50 60 70
|
||||||
|
// out[1]: 01 11 21 31 41 51 61 71
|
||||||
|
// out[2]: 02 12 22 32 42 52 62 72
|
||||||
|
// out[3]: 03 13 23 33 43 53 63 73
|
||||||
|
out[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
|
||||||
|
out[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
|
||||||
|
out[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
|
||||||
|
out[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
|
||||||
|
}
|
||||||
|
|
||||||
|
static INLINE void transpose_16bit_8x8(const __m128i *const in,
|
||||||
|
__m128i *const out) {
|
||||||
|
// Unpack 16 bit elements. Goes from:
|
||||||
|
// in[0]: 00 01 02 03 04 05 06 07
|
||||||
|
// in[1]: 10 11 12 13 14 15 16 17
|
||||||
|
// in[2]: 20 21 22 23 24 25 26 27
|
||||||
|
// in[3]: 30 31 32 33 34 35 36 37
|
||||||
|
// in[4]: 40 41 42 43 44 45 46 47
|
||||||
|
// in[5]: 50 51 52 53 54 55 56 57
|
||||||
|
// in[6]: 60 61 62 63 64 65 66 67
|
||||||
|
// in[7]: 70 71 72 73 74 75 76 77
|
||||||
|
// to:
|
||||||
|
// tr0_0: 00 10 01 11 02 12 03 13
|
||||||
|
// tr0_1: 20 30 21 31 22 32 23 33
|
||||||
|
// tr0_2: 40 50 41 51 42 52 43 53
|
||||||
|
// tr0_3: 60 70 61 71 62 72 63 73
|
||||||
|
// tr0_4: 04 14 05 15 06 16 07 17
|
||||||
|
// tr0_5: 24 34 25 35 26 36 27 37
|
||||||
|
// tr0_6: 44 54 45 55 46 56 47 57
|
||||||
|
// tr0_7: 64 74 65 75 66 76 67 77
|
||||||
|
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
||||||
|
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||||
|
const __m128i tr0_2 = _mm_unpacklo_epi16(in[4], in[5]);
|
||||||
|
const __m128i tr0_3 = _mm_unpacklo_epi16(in[6], in[7]);
|
||||||
|
const __m128i tr0_4 = _mm_unpackhi_epi16(in[0], in[1]);
|
||||||
|
const __m128i tr0_5 = _mm_unpackhi_epi16(in[2], in[3]);
|
||||||
|
const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
|
||||||
|
const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
|
||||||
|
|
||||||
|
// Unpack 32 bit elements resulting in:
|
||||||
|
// tr1_0: 00 10 20 30 01 11 21 31
|
||||||
|
// tr1_1: 40 50 60 70 41 51 61 71
|
||||||
|
// tr1_2: 04 14 24 34 05 15 25 35
|
||||||
|
// tr1_3: 44 54 64 74 45 55 65 75
|
||||||
|
// tr1_4: 02 12 22 32 03 13 23 33
|
||||||
|
// tr1_5: 42 52 62 72 43 53 63 73
|
||||||
|
// tr1_6: 06 16 26 36 07 17 27 37
|
||||||
|
// tr1_7: 46 56 66 76 47 57 67 77
|
||||||
|
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||||
|
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
|
||||||
|
const __m128i tr1_2 = _mm_unpacklo_epi32(tr0_4, tr0_5);
|
||||||
|
const __m128i tr1_3 = _mm_unpacklo_epi32(tr0_6, tr0_7);
|
||||||
|
const __m128i tr1_4 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||||
|
const __m128i tr1_5 = _mm_unpackhi_epi32(tr0_2, tr0_3);
|
||||||
|
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
|
||||||
|
const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
|
||||||
|
|
||||||
|
// Unpack 64 bit elements resulting in:
|
||||||
|
// out[0]: 00 10 20 30 40 50 60 70
|
||||||
|
// out[1]: 01 11 21 31 41 51 61 71
|
||||||
|
// out[2]: 02 12 22 32 42 52 62 72
|
||||||
|
// out[3]: 03 13 23 33 43 53 63 73
|
||||||
|
// out[4]: 04 14 24 34 44 54 64 74
|
||||||
|
// out[5]: 05 15 25 35 45 55 65 75
|
||||||
|
// out[6]: 06 16 26 36 46 56 66 76
|
||||||
|
// out[7]: 07 17 27 37 47 57 67 77
|
||||||
|
out[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
|
||||||
|
out[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
|
||||||
|
out[2] = _mm_unpacklo_epi64(tr1_4, tr1_5);
|
||||||
|
out[3] = _mm_unpackhi_epi64(tr1_4, tr1_5);
|
||||||
|
out[4] = _mm_unpacklo_epi64(tr1_2, tr1_3);
|
||||||
|
out[5] = _mm_unpackhi_epi64(tr1_2, tr1_3);
|
||||||
|
out[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
|
||||||
|
out[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transpose in-place
|
||||||
|
static INLINE void transpose_16bit_16x16(__m128i *const left,
|
||||||
|
__m128i *const right) {
|
||||||
|
__m128i tbuf[8];
|
||||||
|
transpose_16bit_8x8(left, left);
|
||||||
|
transpose_16bit_8x8(right, tbuf);
|
||||||
|
transpose_16bit_8x8(left + 8, right);
|
||||||
|
transpose_16bit_8x8(right + 8, right + 8);
|
||||||
|
|
||||||
|
left[8] = tbuf[0];
|
||||||
|
left[9] = tbuf[1];
|
||||||
|
left[10] = tbuf[2];
|
||||||
|
left[11] = tbuf[3];
|
||||||
|
left[12] = tbuf[4];
|
||||||
|
left[13] = tbuf[5];
|
||||||
|
left[14] = tbuf[6];
|
||||||
|
left[15] = tbuf[7];
|
||||||
}
|
}
|
||||||
|
|
||||||
static INLINE void transpose_32bit_4x4(__m128i *const a0, __m128i *const a1,
|
static INLINE void transpose_32bit_4x4(__m128i *const a0, __m128i *const a1,
|
||||||
|
|||||||
Reference in New Issue
Block a user