Convert 8x8 idct x86 macros to inline functions
Change-Id: Id59865fd6c453a24121ce7160048d67875fc67ce
This commit is contained in:
parent
4f9d852759
commit
cbb991b6b8
@ -54,7 +54,6 @@ void vp9_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
|
||||
void vp9_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
|
||||
int tx_type) {
|
||||
__m128i in[8];
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
|
||||
|
||||
// load input data
|
||||
@ -106,14 +105,14 @@ void vp9_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
|
||||
in[6] = _mm_srai_epi16(in[6], 5);
|
||||
in[7] = _mm_srai_epi16(in[7], 5);
|
||||
|
||||
RECON_AND_STORE(dest + 0 * stride, in[0]);
|
||||
RECON_AND_STORE(dest + 1 * stride, in[1]);
|
||||
RECON_AND_STORE(dest + 2 * stride, in[2]);
|
||||
RECON_AND_STORE(dest + 3 * stride, in[3]);
|
||||
RECON_AND_STORE(dest + 4 * stride, in[4]);
|
||||
RECON_AND_STORE(dest + 5 * stride, in[5]);
|
||||
RECON_AND_STORE(dest + 6 * stride, in[6]);
|
||||
RECON_AND_STORE(dest + 7 * stride, in[7]);
|
||||
recon_and_store(dest + 0 * stride, in[0]);
|
||||
recon_and_store(dest + 1 * stride, in[1]);
|
||||
recon_and_store(dest + 2 * stride, in[2]);
|
||||
recon_and_store(dest + 3 * stride, in[3]);
|
||||
recon_and_store(dest + 4 * stride, in[4]);
|
||||
recon_and_store(dest + 5 * stride, in[5]);
|
||||
recon_and_store(dest + 6 * stride, in[6]);
|
||||
recon_and_store(dest + 7 * stride, in[7]);
|
||||
}
|
||||
|
||||
void vp9_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -46,45 +46,44 @@ static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
|
||||
res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
|
||||
res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
|
||||
}
|
||||
#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
|
||||
out2, out3, out4, out5, out6, out7) \
|
||||
{ \
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
|
||||
const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \
|
||||
const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \
|
||||
const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \
|
||||
const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \
|
||||
const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \
|
||||
const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \
|
||||
\
|
||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
|
||||
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \
|
||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
|
||||
const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \
|
||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
|
||||
const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \
|
||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
|
||||
const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \
|
||||
\
|
||||
out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
|
||||
out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
|
||||
out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
|
||||
out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
|
||||
out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \
|
||||
out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \
|
||||
out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \
|
||||
out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \
|
||||
}
|
||||
|
||||
#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \
|
||||
{ \
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
|
||||
\
|
||||
in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \
|
||||
in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \
|
||||
}
|
||||
static INLINE void idct8x8_12_transpose_16bit_4x8(const __m128i *const in,
|
||||
__m128i *const out) {
|
||||
// Unpack 16 bit elements. Goes from:
|
||||
// in[0]: 30 31 32 33 00 01 02 03
|
||||
// in[1]: 20 21 22 23 10 11 12 13
|
||||
// in[2]: 40 41 42 43 70 71 72 73
|
||||
// in[3]: 50 51 52 53 60 61 62 63
|
||||
// to:
|
||||
// tr0_0: 00 10 01 11 02 12 03 13
|
||||
// tr0_1: 20 30 21 31 22 32 23 33
|
||||
// tr0_2: 40 50 41 51 42 52 43 53
|
||||
// tr0_3: 60 70 61 71 62 72 63 73
|
||||
const __m128i tr0_0 = _mm_unpackhi_epi16(in[0], in[1]);
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in[1], in[0]);
|
||||
const __m128i tr0_2 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||
const __m128i tr0_3 = _mm_unpackhi_epi16(in[3], in[2]);
|
||||
|
||||
// Unpack 32 bit elements resulting in:
|
||||
// tr1_0: 00 10 20 30 01 11 21 31
|
||||
// tr1_1: 02 12 22 32 03 13 23 33
|
||||
// tr1_2: 40 50 60 70 41 51 61 71
|
||||
// tr1_3: 42 52 62 72 43 53 63 73
|
||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
|
||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||
const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
|
||||
|
||||
// Unpack 64 bit elements resulting in:
|
||||
// out[0]: 00 10 20 30 40 50 60 70
|
||||
// out[1]: 01 11 21 31 41 51 61 71
|
||||
// out[2]: 02 12 22 32 42 52 62 72
|
||||
// out[3]: 03 13 23 33 43 53 63 73
|
||||
out[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
|
||||
out[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
|
||||
out[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
|
||||
out[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
|
||||
}
|
||||
|
||||
static INLINE void array_transpose_4X8(__m128i *in, __m128i *out) {
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
||||
@ -151,7 +150,8 @@ static INLINE __m128i load_input_data(const tran_low_t *data) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static INLINE void load_buffer_8x16(const tran_low_t *input, __m128i *in) {
|
||||
static INLINE void load_buffer_8x16(const tran_low_t *const input,
|
||||
__m128i *const in) {
|
||||
in[0] = load_input_data(input + 0 * 16);
|
||||
in[1] = load_input_data(input + 1 * 16);
|
||||
in[2] = load_input_data(input + 2 * 16);
|
||||
@ -171,18 +171,17 @@ static INLINE void load_buffer_8x16(const tran_low_t *input, __m128i *in) {
|
||||
in[15] = load_input_data(input + 15 * 16);
|
||||
}
|
||||
|
||||
#define RECON_AND_STORE(dest, in_x) \
|
||||
{ \
|
||||
__m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
|
||||
d0 = _mm_unpacklo_epi8(d0, zero); \
|
||||
d0 = _mm_add_epi16(in_x, d0); \
|
||||
d0 = _mm_packus_epi16(d0, d0); \
|
||||
_mm_storel_epi64((__m128i *)(dest), d0); \
|
||||
}
|
||||
static INLINE void recon_and_store(uint8_t *const dest, const __m128i in_x) {
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
__m128i d0 = _mm_loadl_epi64((__m128i *)(dest));
|
||||
d0 = _mm_unpacklo_epi8(d0, zero);
|
||||
d0 = _mm_add_epi16(in_x, d0);
|
||||
d0 = _mm_packus_epi16(d0, d0);
|
||||
_mm_storel_epi64((__m128i *)(dest), d0);
|
||||
}
|
||||
|
||||
static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
|
||||
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
// Final rounding and shift
|
||||
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
||||
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
||||
@ -218,60 +217,24 @@ static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
|
||||
in[14] = _mm_srai_epi16(in[14], 6);
|
||||
in[15] = _mm_srai_epi16(in[15], 6);
|
||||
|
||||
RECON_AND_STORE(dest + 0 * stride, in[0]);
|
||||
RECON_AND_STORE(dest + 1 * stride, in[1]);
|
||||
RECON_AND_STORE(dest + 2 * stride, in[2]);
|
||||
RECON_AND_STORE(dest + 3 * stride, in[3]);
|
||||
RECON_AND_STORE(dest + 4 * stride, in[4]);
|
||||
RECON_AND_STORE(dest + 5 * stride, in[5]);
|
||||
RECON_AND_STORE(dest + 6 * stride, in[6]);
|
||||
RECON_AND_STORE(dest + 7 * stride, in[7]);
|
||||
RECON_AND_STORE(dest + 8 * stride, in[8]);
|
||||
RECON_AND_STORE(dest + 9 * stride, in[9]);
|
||||
RECON_AND_STORE(dest + 10 * stride, in[10]);
|
||||
RECON_AND_STORE(dest + 11 * stride, in[11]);
|
||||
RECON_AND_STORE(dest + 12 * stride, in[12]);
|
||||
RECON_AND_STORE(dest + 13 * stride, in[13]);
|
||||
RECON_AND_STORE(dest + 14 * stride, in[14]);
|
||||
RECON_AND_STORE(dest + 15 * stride, in[15]);
|
||||
recon_and_store(dest + 0 * stride, in[0]);
|
||||
recon_and_store(dest + 1 * stride, in[1]);
|
||||
recon_and_store(dest + 2 * stride, in[2]);
|
||||
recon_and_store(dest + 3 * stride, in[3]);
|
||||
recon_and_store(dest + 4 * stride, in[4]);
|
||||
recon_and_store(dest + 5 * stride, in[5]);
|
||||
recon_and_store(dest + 6 * stride, in[6]);
|
||||
recon_and_store(dest + 7 * stride, in[7]);
|
||||
recon_and_store(dest + 8 * stride, in[8]);
|
||||
recon_and_store(dest + 9 * stride, in[9]);
|
||||
recon_and_store(dest + 10 * stride, in[10]);
|
||||
recon_and_store(dest + 11 * stride, in[11]);
|
||||
recon_and_store(dest + 12 * stride, in[12]);
|
||||
recon_and_store(dest + 13 * stride, in[13]);
|
||||
recon_and_store(dest + 14 * stride, in[14]);
|
||||
recon_and_store(dest + 15 * stride, in[15]);
|
||||
}
|
||||
|
||||
#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, out0, out1, out2, out3) \
|
||||
{ \
|
||||
const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1); \
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0); \
|
||||
const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3); \
|
||||
const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2); \
|
||||
\
|
||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
|
||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
|
||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \
|
||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \
|
||||
\
|
||||
out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \
|
||||
out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \
|
||||
out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \
|
||||
out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \
|
||||
}
|
||||
|
||||
#define TRANSPOSE_8X8_10(in0, in1, in2, in3, out0, out1) \
|
||||
{ \
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
|
||||
out0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \
|
||||
out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \
|
||||
}
|
||||
|
||||
// Define Macro for multiplying elements by constants and adding them together.
|
||||
#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, cst0, cst1, cst2, cst3, \
|
||||
res0, res1, res2, res3) \
|
||||
{ \
|
||||
res0 = idct_calc_wraplow_sse2(lo_0, hi_0, cst0); \
|
||||
res1 = idct_calc_wraplow_sse2(lo_0, hi_0, cst1); \
|
||||
res2 = idct_calc_wraplow_sse2(lo_1, hi_1, cst2); \
|
||||
res3 = idct_calc_wraplow_sse2(lo_1, hi_1, cst3); \
|
||||
}
|
||||
|
||||
static INLINE void recon_and_store4x4_sse2(const __m128i *const in,
|
||||
uint8_t *const dest,
|
||||
const int stride) {
|
||||
|
@ -12,11 +12,11 @@
|
||||
|
||||
#include "./vpx_dsp_rtcd.h"
|
||||
#include "vpx_dsp/x86/inv_txfm_sse2.h"
|
||||
#include "vpx_dsp/x86/transpose_sse2.h"
|
||||
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
||||
|
||||
void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
int stride) {
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
|
||||
const __m128i final_rounding = _mm_set1_epi16(1 << 4);
|
||||
const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
|
||||
@ -28,36 +28,35 @@ void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64);
|
||||
const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64);
|
||||
|
||||
__m128i in0, in1, in2, in3, in4, in5, in6, in7;
|
||||
__m128i in[8];
|
||||
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
|
||||
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
|
||||
__m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
|
||||
int i;
|
||||
|
||||
// Load input data.
|
||||
in0 = load_input_data(input);
|
||||
in1 = load_input_data(input + 8 * 1);
|
||||
in2 = load_input_data(input + 8 * 2);
|
||||
in3 = load_input_data(input + 8 * 3);
|
||||
in4 = load_input_data(input + 8 * 4);
|
||||
in5 = load_input_data(input + 8 * 5);
|
||||
in6 = load_input_data(input + 8 * 6);
|
||||
in7 = load_input_data(input + 8 * 7);
|
||||
in[0] = load_input_data(input);
|
||||
in[1] = load_input_data(input + 8 * 1);
|
||||
in[2] = load_input_data(input + 8 * 2);
|
||||
in[3] = load_input_data(input + 8 * 3);
|
||||
in[4] = load_input_data(input + 8 * 4);
|
||||
in[5] = load_input_data(input + 8 * 5);
|
||||
in[6] = load_input_data(input + 8 * 6);
|
||||
in[7] = load_input_data(input + 8 * 7);
|
||||
|
||||
// 2-D
|
||||
for (i = 0; i < 2; i++) {
|
||||
// 8x8 Transpose is copied from vpx_fdct8x8_sse2()
|
||||
TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
|
||||
in4, in5, in6, in7);
|
||||
transpose_16bit_8x8(in, in);
|
||||
|
||||
// 4-stage 1D idct8x8
|
||||
{
|
||||
/* Stage1 */
|
||||
{
|
||||
const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7);
|
||||
const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7);
|
||||
const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5);
|
||||
const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5);
|
||||
const __m128i lo_17 = _mm_unpacklo_epi16(in[1], in[7]);
|
||||
const __m128i hi_17 = _mm_unpackhi_epi16(in[1], in[7]);
|
||||
const __m128i lo_35 = _mm_unpacklo_epi16(in[3], in[5]);
|
||||
const __m128i hi_35 = _mm_unpackhi_epi16(in[3], in[5]);
|
||||
|
||||
{
|
||||
tmp0 = _mm_madd_epi16(lo_17, stg1_0);
|
||||
@ -96,12 +95,12 @@ void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
|
||||
/* Stage2 */
|
||||
{
|
||||
const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6);
|
||||
const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6);
|
||||
const __m128i lo_26 = _mm_unpacklo_epi16(in[2], in[6]);
|
||||
const __m128i hi_26 = _mm_unpackhi_epi16(in[2], in[6]);
|
||||
|
||||
{
|
||||
tmp0 = _mm_unpacklo_epi16(in0, in4);
|
||||
tmp1 = _mm_unpackhi_epi16(in0, in4);
|
||||
tmp0 = _mm_unpacklo_epi16(in[0], in[4]);
|
||||
tmp1 = _mm_unpackhi_epi16(in[0], in[4]);
|
||||
|
||||
tmp2 = _mm_madd_epi16(tmp0, stk2_0);
|
||||
tmp3 = _mm_madd_epi16(tmp1, stk2_0);
|
||||
@ -176,44 +175,44 @@ void vpx_idct8x8_64_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
}
|
||||
|
||||
/* Stage4 */
|
||||
in0 = _mm_add_epi16(stp1_0, stp2_7);
|
||||
in1 = _mm_add_epi16(stp1_1, stp1_6);
|
||||
in2 = _mm_add_epi16(stp1_2, stp1_5);
|
||||
in3 = _mm_add_epi16(stp1_3, stp2_4);
|
||||
in4 = _mm_sub_epi16(stp1_3, stp2_4);
|
||||
in5 = _mm_sub_epi16(stp1_2, stp1_5);
|
||||
in6 = _mm_sub_epi16(stp1_1, stp1_6);
|
||||
in7 = _mm_sub_epi16(stp1_0, stp2_7);
|
||||
in[0] = _mm_add_epi16(stp1_0, stp2_7);
|
||||
in[1] = _mm_add_epi16(stp1_1, stp1_6);
|
||||
in[2] = _mm_add_epi16(stp1_2, stp1_5);
|
||||
in[3] = _mm_add_epi16(stp1_3, stp2_4);
|
||||
in[4] = _mm_sub_epi16(stp1_3, stp2_4);
|
||||
in[5] = _mm_sub_epi16(stp1_2, stp1_5);
|
||||
in[6] = _mm_sub_epi16(stp1_1, stp1_6);
|
||||
in[7] = _mm_sub_epi16(stp1_0, stp2_7);
|
||||
}
|
||||
}
|
||||
|
||||
// Final rounding and shift
|
||||
in0 = _mm_adds_epi16(in0, final_rounding);
|
||||
in1 = _mm_adds_epi16(in1, final_rounding);
|
||||
in2 = _mm_adds_epi16(in2, final_rounding);
|
||||
in3 = _mm_adds_epi16(in3, final_rounding);
|
||||
in4 = _mm_adds_epi16(in4, final_rounding);
|
||||
in5 = _mm_adds_epi16(in5, final_rounding);
|
||||
in6 = _mm_adds_epi16(in6, final_rounding);
|
||||
in7 = _mm_adds_epi16(in7, final_rounding);
|
||||
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
||||
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
||||
in[2] = _mm_adds_epi16(in[2], final_rounding);
|
||||
in[3] = _mm_adds_epi16(in[3], final_rounding);
|
||||
in[4] = _mm_adds_epi16(in[4], final_rounding);
|
||||
in[5] = _mm_adds_epi16(in[5], final_rounding);
|
||||
in[6] = _mm_adds_epi16(in[6], final_rounding);
|
||||
in[7] = _mm_adds_epi16(in[7], final_rounding);
|
||||
|
||||
in0 = _mm_srai_epi16(in0, 5);
|
||||
in1 = _mm_srai_epi16(in1, 5);
|
||||
in2 = _mm_srai_epi16(in2, 5);
|
||||
in3 = _mm_srai_epi16(in3, 5);
|
||||
in4 = _mm_srai_epi16(in4, 5);
|
||||
in5 = _mm_srai_epi16(in5, 5);
|
||||
in6 = _mm_srai_epi16(in6, 5);
|
||||
in7 = _mm_srai_epi16(in7, 5);
|
||||
in[0] = _mm_srai_epi16(in[0], 5);
|
||||
in[1] = _mm_srai_epi16(in[1], 5);
|
||||
in[2] = _mm_srai_epi16(in[2], 5);
|
||||
in[3] = _mm_srai_epi16(in[3], 5);
|
||||
in[4] = _mm_srai_epi16(in[4], 5);
|
||||
in[5] = _mm_srai_epi16(in[5], 5);
|
||||
in[6] = _mm_srai_epi16(in[6], 5);
|
||||
in[7] = _mm_srai_epi16(in[7], 5);
|
||||
|
||||
RECON_AND_STORE(dest + 0 * stride, in0);
|
||||
RECON_AND_STORE(dest + 1 * stride, in1);
|
||||
RECON_AND_STORE(dest + 2 * stride, in2);
|
||||
RECON_AND_STORE(dest + 3 * stride, in3);
|
||||
RECON_AND_STORE(dest + 4 * stride, in4);
|
||||
RECON_AND_STORE(dest + 5 * stride, in5);
|
||||
RECON_AND_STORE(dest + 6 * stride, in6);
|
||||
RECON_AND_STORE(dest + 7 * stride, in7);
|
||||
recon_and_store(dest + 0 * stride, in[0]);
|
||||
recon_and_store(dest + 1 * stride, in[1]);
|
||||
recon_and_store(dest + 2 * stride, in[2]);
|
||||
recon_and_store(dest + 3 * stride, in[3]);
|
||||
recon_and_store(dest + 4 * stride, in[4]);
|
||||
recon_and_store(dest + 5 * stride, in[5]);
|
||||
recon_and_store(dest + 6 * stride, in[6]);
|
||||
recon_and_store(dest + 7 * stride, in[7]);
|
||||
}
|
||||
|
||||
void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
@ -232,82 +231,82 @@ void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
const __m128i stg2_3 = pair_set_epi16(2 * cospi_8_64, 2 * cospi_8_64);
|
||||
const __m128i stg3_0 = pair_set_epi16(-cospi_16_64, cospi_16_64);
|
||||
|
||||
__m128i in0, in1, in2, in3, in4, in5, in6, in7;
|
||||
__m128i in[8];
|
||||
__m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7;
|
||||
__m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
|
||||
__m128i tmp0, tmp1, tmp2, tmp3;
|
||||
__m128i tmp[4];
|
||||
|
||||
// Rows. Load 4-row input data.
|
||||
in0 = load_input_data(input);
|
||||
in1 = load_input_data(input + 8 * 1);
|
||||
in2 = load_input_data(input + 8 * 2);
|
||||
in3 = load_input_data(input + 8 * 3);
|
||||
in[0] = load_input_data(input);
|
||||
in[1] = load_input_data(input + 8 * 1);
|
||||
in[2] = load_input_data(input + 8 * 2);
|
||||
in[3] = load_input_data(input + 8 * 3);
|
||||
|
||||
// 8x4 Transpose
|
||||
TRANSPOSE_8X8_10(in0, in1, in2, in3, in0, in1);
|
||||
// 4x4 Transpose
|
||||
transpose_16bit_4x4(in, in);
|
||||
|
||||
// Stage1
|
||||
tmp0 = _mm_mulhrs_epi16(in0, stg1_0);
|
||||
tmp1 = _mm_mulhrs_epi16(in0, stg1_1);
|
||||
tmp2 = _mm_mulhrs_epi16(in1, stg1_2);
|
||||
tmp3 = _mm_mulhrs_epi16(in1, stg1_3);
|
||||
tmp[0] = _mm_mulhrs_epi16(in[0], stg1_0);
|
||||
tmp[1] = _mm_mulhrs_epi16(in[0], stg1_1);
|
||||
tmp[2] = _mm_mulhrs_epi16(in[1], stg1_2);
|
||||
tmp[3] = _mm_mulhrs_epi16(in[1], stg1_3);
|
||||
|
||||
stp1_4 = _mm_unpackhi_epi64(tmp0, tmp1);
|
||||
stp1_5 = _mm_unpackhi_epi64(tmp2, tmp3);
|
||||
stp1_4 = _mm_unpackhi_epi64(tmp[0], tmp[1]);
|
||||
stp1_5 = _mm_unpackhi_epi64(tmp[2], tmp[3]);
|
||||
|
||||
// Stage2
|
||||
tmp0 = _mm_mulhrs_epi16(in0, stg2_0);
|
||||
stp2_0 = _mm_unpacklo_epi64(tmp0, tmp0);
|
||||
tmp[0] = _mm_mulhrs_epi16(in[0], stg2_0);
|
||||
stp2_0 = _mm_unpacklo_epi64(tmp[0], tmp[0]);
|
||||
|
||||
tmp1 = _mm_mulhrs_epi16(in1, stg2_2);
|
||||
tmp2 = _mm_mulhrs_epi16(in1, stg2_3);
|
||||
stp2_2 = _mm_unpacklo_epi64(tmp2, tmp1);
|
||||
tmp[1] = _mm_mulhrs_epi16(in[1], stg2_2);
|
||||
tmp[2] = _mm_mulhrs_epi16(in[1], stg2_3);
|
||||
stp2_2 = _mm_unpacklo_epi64(tmp[2], tmp[1]);
|
||||
|
||||
tmp0 = _mm_add_epi16(stp1_4, stp1_5);
|
||||
tmp1 = _mm_sub_epi16(stp1_4, stp1_5);
|
||||
tmp[0] = _mm_add_epi16(stp1_4, stp1_5);
|
||||
tmp[1] = _mm_sub_epi16(stp1_4, stp1_5);
|
||||
|
||||
stp2_4 = tmp0;
|
||||
stp2_5 = _mm_unpacklo_epi64(tmp1, zero);
|
||||
stp2_6 = _mm_unpackhi_epi64(tmp1, zero);
|
||||
stp2_4 = tmp[0];
|
||||
stp2_5 = _mm_unpacklo_epi64(tmp[1], zero);
|
||||
stp2_6 = _mm_unpackhi_epi64(tmp[1], zero);
|
||||
|
||||
tmp0 = _mm_unpacklo_epi16(stp2_5, stp2_6);
|
||||
tmp1 = _mm_madd_epi16(tmp0, stg3_0);
|
||||
tmp2 = _mm_madd_epi16(tmp0, stk2_0); // stg3_1 = stk2_0
|
||||
tmp[0] = _mm_unpacklo_epi16(stp2_5, stp2_6);
|
||||
tmp[1] = _mm_madd_epi16(tmp[0], stg3_0);
|
||||
tmp[2] = _mm_madd_epi16(tmp[0], stk2_0); // stg3_1 = stk2_0
|
||||
|
||||
tmp1 = _mm_add_epi32(tmp1, rounding);
|
||||
tmp2 = _mm_add_epi32(tmp2, rounding);
|
||||
tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);
|
||||
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
|
||||
tmp[1] = _mm_add_epi32(tmp[1], rounding);
|
||||
tmp[2] = _mm_add_epi32(tmp[2], rounding);
|
||||
tmp[1] = _mm_srai_epi32(tmp[1], DCT_CONST_BITS);
|
||||
tmp[2] = _mm_srai_epi32(tmp[2], DCT_CONST_BITS);
|
||||
|
||||
stp1_5 = _mm_packs_epi32(tmp1, tmp2);
|
||||
stp1_5 = _mm_packs_epi32(tmp[1], tmp[2]);
|
||||
|
||||
// Stage3
|
||||
tmp2 = _mm_add_epi16(stp2_0, stp2_2);
|
||||
tmp3 = _mm_sub_epi16(stp2_0, stp2_2);
|
||||
tmp[2] = _mm_add_epi16(stp2_0, stp2_2);
|
||||
tmp[3] = _mm_sub_epi16(stp2_0, stp2_2);
|
||||
|
||||
stp1_2 = _mm_unpackhi_epi64(tmp3, tmp2);
|
||||
stp1_3 = _mm_unpacklo_epi64(tmp3, tmp2);
|
||||
stp1_2 = _mm_unpackhi_epi64(tmp[3], tmp[2]);
|
||||
stp1_3 = _mm_unpacklo_epi64(tmp[3], tmp[2]);
|
||||
|
||||
// Stage4
|
||||
tmp0 = _mm_add_epi16(stp1_3, stp2_4);
|
||||
tmp1 = _mm_add_epi16(stp1_2, stp1_5);
|
||||
tmp2 = _mm_sub_epi16(stp1_3, stp2_4);
|
||||
tmp3 = _mm_sub_epi16(stp1_2, stp1_5);
|
||||
tmp[0] = _mm_add_epi16(stp1_3, stp2_4);
|
||||
tmp[1] = _mm_add_epi16(stp1_2, stp1_5);
|
||||
tmp[2] = _mm_sub_epi16(stp1_3, stp2_4);
|
||||
tmp[3] = _mm_sub_epi16(stp1_2, stp1_5);
|
||||
|
||||
TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, in0, in1, in2, in3)
|
||||
idct8x8_12_transpose_16bit_4x8(tmp, in);
|
||||
|
||||
/* Stage1 */
|
||||
stp1_4 = _mm_mulhrs_epi16(in1, stg1_0);
|
||||
stp1_7 = _mm_mulhrs_epi16(in1, stg1_1);
|
||||
stp1_5 = _mm_mulhrs_epi16(in3, stg1_2);
|
||||
stp1_6 = _mm_mulhrs_epi16(in3, stg1_3);
|
||||
stp1_4 = _mm_mulhrs_epi16(in[1], stg1_0);
|
||||
stp1_7 = _mm_mulhrs_epi16(in[1], stg1_1);
|
||||
stp1_5 = _mm_mulhrs_epi16(in[3], stg1_2);
|
||||
stp1_6 = _mm_mulhrs_epi16(in[3], stg1_3);
|
||||
|
||||
/* Stage2 */
|
||||
stp2_0 = _mm_mulhrs_epi16(in0, stg2_0);
|
||||
stp2_1 = _mm_mulhrs_epi16(in0, stg2_0);
|
||||
stp2_0 = _mm_mulhrs_epi16(in[0], stg2_0);
|
||||
stp2_1 = _mm_mulhrs_epi16(in[0], stg2_0);
|
||||
|
||||
stp2_2 = _mm_mulhrs_epi16(in2, stg2_2);
|
||||
stp2_3 = _mm_mulhrs_epi16(in2, stg2_3);
|
||||
stp2_2 = _mm_mulhrs_epi16(in[2], stg2_2);
|
||||
stp2_3 = _mm_mulhrs_epi16(in[2], stg2_3);
|
||||
|
||||
stp2_4 = _mm_add_epi16(stp1_4, stp1_5);
|
||||
stp2_5 = _mm_sub_epi16(stp1_4, stp1_5);
|
||||
@ -320,62 +319,62 @@ void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
stp1_2 = _mm_sub_epi16(stp2_1, stp2_2);
|
||||
stp1_3 = _mm_sub_epi16(stp2_0, stp2_3);
|
||||
|
||||
tmp0 = _mm_unpacklo_epi16(stp2_6, stp2_5);
|
||||
tmp1 = _mm_unpackhi_epi16(stp2_6, stp2_5);
|
||||
tmp[0] = _mm_unpacklo_epi16(stp2_6, stp2_5);
|
||||
tmp[1] = _mm_unpackhi_epi16(stp2_6, stp2_5);
|
||||
|
||||
tmp2 = _mm_madd_epi16(tmp0, stk2_0);
|
||||
tmp3 = _mm_madd_epi16(tmp1, stk2_0);
|
||||
tmp2 = _mm_add_epi32(tmp2, rounding);
|
||||
tmp3 = _mm_add_epi32(tmp3, rounding);
|
||||
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
|
||||
tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
|
||||
stp1_6 = _mm_packs_epi32(tmp2, tmp3);
|
||||
tmp[2] = _mm_madd_epi16(tmp[0], stk2_0);
|
||||
tmp[3] = _mm_madd_epi16(tmp[1], stk2_0);
|
||||
tmp[2] = _mm_add_epi32(tmp[2], rounding);
|
||||
tmp[3] = _mm_add_epi32(tmp[3], rounding);
|
||||
tmp[2] = _mm_srai_epi32(tmp[2], DCT_CONST_BITS);
|
||||
tmp[3] = _mm_srai_epi32(tmp[3], DCT_CONST_BITS);
|
||||
stp1_6 = _mm_packs_epi32(tmp[2], tmp[3]);
|
||||
|
||||
tmp2 = _mm_madd_epi16(tmp0, stk2_1);
|
||||
tmp3 = _mm_madd_epi16(tmp1, stk2_1);
|
||||
tmp2 = _mm_add_epi32(tmp2, rounding);
|
||||
tmp3 = _mm_add_epi32(tmp3, rounding);
|
||||
tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);
|
||||
tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);
|
||||
stp1_5 = _mm_packs_epi32(tmp2, tmp3);
|
||||
tmp[2] = _mm_madd_epi16(tmp[0], stk2_1);
|
||||
tmp[3] = _mm_madd_epi16(tmp[1], stk2_1);
|
||||
tmp[2] = _mm_add_epi32(tmp[2], rounding);
|
||||
tmp[3] = _mm_add_epi32(tmp[3], rounding);
|
||||
tmp[2] = _mm_srai_epi32(tmp[2], DCT_CONST_BITS);
|
||||
tmp[3] = _mm_srai_epi32(tmp[3], DCT_CONST_BITS);
|
||||
stp1_5 = _mm_packs_epi32(tmp[2], tmp[3]);
|
||||
|
||||
/* Stage4 */
|
||||
in0 = _mm_add_epi16(stp1_0, stp2_7);
|
||||
in1 = _mm_add_epi16(stp1_1, stp1_6);
|
||||
in2 = _mm_add_epi16(stp1_2, stp1_5);
|
||||
in3 = _mm_add_epi16(stp1_3, stp2_4);
|
||||
in4 = _mm_sub_epi16(stp1_3, stp2_4);
|
||||
in5 = _mm_sub_epi16(stp1_2, stp1_5);
|
||||
in6 = _mm_sub_epi16(stp1_1, stp1_6);
|
||||
in7 = _mm_sub_epi16(stp1_0, stp2_7);
|
||||
in[0] = _mm_add_epi16(stp1_0, stp2_7);
|
||||
in[1] = _mm_add_epi16(stp1_1, stp1_6);
|
||||
in[2] = _mm_add_epi16(stp1_2, stp1_5);
|
||||
in[3] = _mm_add_epi16(stp1_3, stp2_4);
|
||||
in[4] = _mm_sub_epi16(stp1_3, stp2_4);
|
||||
in[5] = _mm_sub_epi16(stp1_2, stp1_5);
|
||||
in[6] = _mm_sub_epi16(stp1_1, stp1_6);
|
||||
in[7] = _mm_sub_epi16(stp1_0, stp2_7);
|
||||
|
||||
// Final rounding and shift
|
||||
in0 = _mm_adds_epi16(in0, final_rounding);
|
||||
in1 = _mm_adds_epi16(in1, final_rounding);
|
||||
in2 = _mm_adds_epi16(in2, final_rounding);
|
||||
in3 = _mm_adds_epi16(in3, final_rounding);
|
||||
in4 = _mm_adds_epi16(in4, final_rounding);
|
||||
in5 = _mm_adds_epi16(in5, final_rounding);
|
||||
in6 = _mm_adds_epi16(in6, final_rounding);
|
||||
in7 = _mm_adds_epi16(in7, final_rounding);
|
||||
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
||||
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
||||
in[2] = _mm_adds_epi16(in[2], final_rounding);
|
||||
in[3] = _mm_adds_epi16(in[3], final_rounding);
|
||||
in[4] = _mm_adds_epi16(in[4], final_rounding);
|
||||
in[5] = _mm_adds_epi16(in[5], final_rounding);
|
||||
in[6] = _mm_adds_epi16(in[6], final_rounding);
|
||||
in[7] = _mm_adds_epi16(in[7], final_rounding);
|
||||
|
||||
in0 = _mm_srai_epi16(in0, 5);
|
||||
in1 = _mm_srai_epi16(in1, 5);
|
||||
in2 = _mm_srai_epi16(in2, 5);
|
||||
in3 = _mm_srai_epi16(in3, 5);
|
||||
in4 = _mm_srai_epi16(in4, 5);
|
||||
in5 = _mm_srai_epi16(in5, 5);
|
||||
in6 = _mm_srai_epi16(in6, 5);
|
||||
in7 = _mm_srai_epi16(in7, 5);
|
||||
in[0] = _mm_srai_epi16(in[0], 5);
|
||||
in[1] = _mm_srai_epi16(in[1], 5);
|
||||
in[2] = _mm_srai_epi16(in[2], 5);
|
||||
in[3] = _mm_srai_epi16(in[3], 5);
|
||||
in[4] = _mm_srai_epi16(in[4], 5);
|
||||
in[5] = _mm_srai_epi16(in[5], 5);
|
||||
in[6] = _mm_srai_epi16(in[6], 5);
|
||||
in[7] = _mm_srai_epi16(in[7], 5);
|
||||
|
||||
RECON_AND_STORE(dest + 0 * stride, in0);
|
||||
RECON_AND_STORE(dest + 1 * stride, in1);
|
||||
RECON_AND_STORE(dest + 2 * stride, in2);
|
||||
RECON_AND_STORE(dest + 3 * stride, in3);
|
||||
RECON_AND_STORE(dest + 4 * stride, in4);
|
||||
RECON_AND_STORE(dest + 5 * stride, in5);
|
||||
RECON_AND_STORE(dest + 6 * stride, in6);
|
||||
RECON_AND_STORE(dest + 7 * stride, in7);
|
||||
recon_and_store(dest + 0 * stride, in[0]);
|
||||
recon_and_store(dest + 1 * stride, in[1]);
|
||||
recon_and_store(dest + 2 * stride, in[2]);
|
||||
recon_and_store(dest + 3 * stride, in[3]);
|
||||
recon_and_store(dest + 4 * stride, in[4]);
|
||||
recon_and_store(dest + 5 * stride, in[5]);
|
||||
recon_and_store(dest + 6 * stride, in[6]);
|
||||
recon_and_store(dest + 7 * stride, in[7]);
|
||||
}
|
||||
|
||||
// Only do addition and subtraction butterfly, size = 16, 32
|
||||
@ -618,7 +617,6 @@ static void idct32_34_second_half(const __m128i *in, __m128i *stp1) {
|
||||
// Only upper-left 8x8 has non-zero coeff
|
||||
void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
int stride) {
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
||||
__m128i in[32], col[32];
|
||||
__m128i stp1[32];
|
||||
@ -653,7 +651,7 @@ void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
// Final rounding and shift
|
||||
in[j] = _mm_adds_epi16(in[j], final_rounding);
|
||||
in[j] = _mm_srai_epi16(in[j], 6);
|
||||
RECON_AND_STORE(dest + j * stride, in[j]);
|
||||
recon_and_store(dest + j * stride, in[j]);
|
||||
}
|
||||
|
||||
dest += 8;
|
||||
@ -954,7 +952,6 @@ static void idct32_8x32_135(__m128i *in /*in[32]*/) {
|
||||
|
||||
static INLINE void store_buffer_8x32(__m128i *in, uint8_t *dst, int stride) {
|
||||
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
int j = 0;
|
||||
while (j < 32) {
|
||||
in[j] = _mm_adds_epi16(in[j], final_rounding);
|
||||
@ -963,16 +960,16 @@ static INLINE void store_buffer_8x32(__m128i *in, uint8_t *dst, int stride) {
|
||||
in[j] = _mm_srai_epi16(in[j], 6);
|
||||
in[j + 1] = _mm_srai_epi16(in[j + 1], 6);
|
||||
|
||||
RECON_AND_STORE(dst, in[j]);
|
||||
recon_and_store(dst, in[j]);
|
||||
dst += stride;
|
||||
RECON_AND_STORE(dst, in[j + 1]);
|
||||
recon_and_store(dst, in[j + 1]);
|
||||
dst += stride;
|
||||
j += 2;
|
||||
}
|
||||
}
|
||||
|
||||
static INLINE void recon_and_store(__m128i *in0, __m128i *in1, uint8_t *dest,
|
||||
int stride) {
|
||||
static INLINE void recon_and_store_ssse3(__m128i *in0, __m128i *in1,
|
||||
uint8_t *dest, int stride) {
|
||||
store_buffer_8x32(in0, dest, stride);
|
||||
store_buffer_8x32(in1, dest + 8, stride);
|
||||
}
|
||||
@ -1022,11 +1019,11 @@ void vpx_idct32x32_135_add_ssse3(const tran_low_t *input, uint8_t *dest,
|
||||
// rows
|
||||
transpose_and_copy_16x16(col0, col1, temp, left_16);
|
||||
idct32_135(col0, col1);
|
||||
recon_and_store(col0, col1, dest, stride);
|
||||
recon_and_store_ssse3(col0, col1, dest, stride);
|
||||
|
||||
transpose_and_copy_16x16(col0, col1, temp, right_16);
|
||||
idct32_135(col0, col1);
|
||||
recon_and_store(col0, col1, dest + 16, stride);
|
||||
recon_and_store_ssse3(col0, col1, dest + 16, stride);
|
||||
}
|
||||
|
||||
// For each 8x32 block __m128i in[32],
|
||||
|
@ -12,15 +12,91 @@
|
||||
#define VPX_DSP_X86_TRANSPOSE_SSE2_H_
|
||||
|
||||
#include "./vpx_dsp_rtcd.h"
|
||||
#include "vpx_dsp/x86/inv_txfm_sse2.h"
|
||||
#include "vpx_dsp/x86/txfm_common_sse2.h"
|
||||
|
||||
static INLINE void transpose_16bit_4x4(__m128i *res) {
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]);
|
||||
const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]);
|
||||
static INLINE void transpose_16bit_4x4(const __m128i *const in,
|
||||
__m128i *const out) {
|
||||
// Unpack 16 bit elements. Goes from:
|
||||
// in[0]: 00 01 02 03 XX XX XX XX
|
||||
// in[1]: 10 11 12 13 XX XX XX XX
|
||||
// in[2]: 20 21 22 23 XX XX XX XX
|
||||
// in[3]: 30 31 32 33 XX XX XX XX
|
||||
// to:
|
||||
// tr0_0: 00 10 01 11 02 12 03 13
|
||||
// tr0_1: 20 30 21 31 22 32 23 33
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||
|
||||
res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1);
|
||||
res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
|
||||
// Unpack 32 bit elements resulting in:
|
||||
// out[0]: 00 10 20 30 01 11 21 31
|
||||
// out[1]: 02 12 22 32 03 13 23 33
|
||||
out[0] = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||
out[1] = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||
}
|
||||
|
||||
static INLINE void transpose_16bit_8x8(const __m128i *const in,
|
||||
__m128i *const out) {
|
||||
// Unpack 16 bit elements. Goes from:
|
||||
// in[0]: 00 01 02 03 04 05 06 07
|
||||
// in[1]: 10 11 12 13 14 15 16 17
|
||||
// in[2]: 20 21 22 23 24 25 26 27
|
||||
// in[3]: 30 31 32 33 34 35 36 37
|
||||
// in[4]: 40 41 42 43 44 45 46 47
|
||||
// in[5]: 50 51 52 53 54 55 56 57
|
||||
// in[6]: 60 61 62 63 64 65 66 67
|
||||
// in[7]: 70 71 72 73 74 75 76 77
|
||||
// to:
|
||||
// tr0_0: 00 10 01 11 02 12 03 13
|
||||
// tr0_1: 20 30 21 31 22 32 23 33
|
||||
// tr0_2: 40 50 41 51 42 52 43 53
|
||||
// tr0_3: 60 70 61 71 62 72 63 73
|
||||
// tr0_4: 04 14 05 15 06 16 07 17
|
||||
// tr0_5: 24 34 25 35 26 36 27 37
|
||||
// tr0_6: 44 54 45 55 46 56 47 57
|
||||
// tr0_7: 64 74 65 75 66 76 67 77
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||
const __m128i tr0_2 = _mm_unpacklo_epi16(in[4], in[5]);
|
||||
const __m128i tr0_3 = _mm_unpacklo_epi16(in[6], in[7]);
|
||||
const __m128i tr0_4 = _mm_unpackhi_epi16(in[0], in[1]);
|
||||
const __m128i tr0_5 = _mm_unpackhi_epi16(in[2], in[3]);
|
||||
const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
|
||||
const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
|
||||
|
||||
// Unpack 32 bit elements resulting in:
|
||||
// tr1_0: 00 10 20 30 01 11 21 31
|
||||
// tr1_1: 40 50 60 70 41 51 61 71
|
||||
// tr1_2: 04 14 24 34 05 15 25 35
|
||||
// tr1_3: 44 54 64 74 45 55 65 75
|
||||
// tr1_4: 02 12 22 32 03 13 23 33
|
||||
// tr1_5: 42 52 62 72 43 53 63 73
|
||||
// tr1_6: 06 16 26 36 07 17 27 37
|
||||
// tr1_7: 46 56 66 76 47 57 67 77
|
||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
|
||||
const __m128i tr1_2 = _mm_unpacklo_epi32(tr0_4, tr0_5);
|
||||
const __m128i tr1_3 = _mm_unpacklo_epi32(tr0_6, tr0_7);
|
||||
const __m128i tr1_4 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||
const __m128i tr1_5 = _mm_unpackhi_epi32(tr0_2, tr0_3);
|
||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
|
||||
const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
|
||||
|
||||
// Unpack 64 bit elements resulting in:
|
||||
// out[0]: 00 10 20 30 40 50 60 70
|
||||
// out[1]: 01 11 21 31 41 51 61 71
|
||||
// out[2]: 02 12 22 32 42 52 62 72
|
||||
// out[3]: 03 13 23 33 43 53 63 73
|
||||
// out[4]: 04 14 24 34 44 54 64 74
|
||||
// out[5]: 05 15 25 35 45 55 65 75
|
||||
// out[6]: 06 16 26 36 46 56 66 76
|
||||
// out[7]: 07 17 27 37 47 57 67 77
|
||||
out[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
|
||||
out[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
|
||||
out[2] = _mm_unpacklo_epi64(tr1_4, tr1_5);
|
||||
out[3] = _mm_unpackhi_epi64(tr1_4, tr1_5);
|
||||
out[4] = _mm_unpacklo_epi64(tr1_2, tr1_3);
|
||||
out[5] = _mm_unpackhi_epi64(tr1_2, tr1_3);
|
||||
out[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
|
||||
out[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
|
||||
}
|
||||
|
||||
static INLINE void transpose_32bit_4x4(__m128i *const a0, __m128i *const a1,
|
||||
|
Loading…
x
Reference in New Issue
Block a user