Remove unused copies of transform related source code
- Library size reduces: 165 kB, 292 kB (HBD). Change-Id: I50cb630dde326bd2a28c0db4b7e2d53c2fd94a2a
This commit is contained in:
@@ -30,8 +30,6 @@ AV1_COMMON_SRCS-yes += common/filter.h
|
||||
AV1_COMMON_SRCS-yes += common/filter.c
|
||||
AV1_COMMON_SRCS-yes += common/idct.h
|
||||
AV1_COMMON_SRCS-yes += common/idct.c
|
||||
AV1_COMMON_SRCS-yes += common/av1_inv_txfm.h
|
||||
AV1_COMMON_SRCS-yes += common/av1_inv_txfm.c
|
||||
AV1_COMMON_SRCS-yes += common/loopfilter.h
|
||||
AV1_COMMON_SRCS-yes += common/thread_common.h
|
||||
AV1_COMMON_SRCS-yes += common/mv.h
|
||||
@@ -61,8 +59,6 @@ AV1_COMMON_SRCS-yes += common/common_data.h
|
||||
AV1_COMMON_SRCS-yes += common/scan.c
|
||||
AV1_COMMON_SRCS-yes += common/scan.h
|
||||
# TODO(angiebird) the forward transform belongs under encoder/
|
||||
AV1_COMMON_SRCS-$(CONFIG_AV1_ENCODER) += common/av1_fwd_txfm.h
|
||||
AV1_COMMON_SRCS-$(CONFIG_AV1_ENCODER) += common/av1_fwd_txfm.c
|
||||
AV1_COMMON_SRCS-yes += common/av1_txfm.h
|
||||
AV1_COMMON_SRCS-yes += common/av1_fwd_txfm1d.h
|
||||
AV1_COMMON_SRCS-yes += common/av1_fwd_txfm1d.c
|
||||
@@ -123,9 +119,6 @@ AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
|
||||
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
|
||||
ifeq ($(CONFIG_AV1_ENCODER),yes)
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_sse2.c
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_dct32x32_impl_sse2.h
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_impl_sse2.h
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_txfm1d_sse4.h
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_fwd_txfm1d_sse4.c
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/av1_fwd_txfm2d_sse4.c
|
||||
@@ -143,7 +136,4 @@ ifeq ($(CONFIG_FILTER_INTRA),yes)
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE4_1) += common/x86/filterintra_sse4.c
|
||||
endif
|
||||
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.c
|
||||
AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.h
|
||||
|
||||
$(eval $(call rtcd_h_template,av1_rtcd,av1/common/av1_rtcd_defs.pl))
|
||||
|
||||
@@ -1,813 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
|
||||
*
|
||||
* This source code is subject to the terms of the BSD 2 Clause License and
|
||||
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
|
||||
* was not distributed with this source code in the LICENSE file, you can
|
||||
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
|
||||
* Media Patent License 1.0 was not distributed with this source code in the
|
||||
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
|
||||
*/
|
||||
|
||||
#include "av1/common/av1_fwd_txfm.h"
|
||||
#include <assert.h>
|
||||
#include "./av1_rtcd.h"
|
||||
|
||||
void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
|
||||
// The 2D transform is done with two passes which are actually pretty
|
||||
// similar. In the first one, we transform the columns and transpose
|
||||
// the results. In the second one, we transform the rows. To achieve that,
|
||||
// as the first pass results are transposed, we transpose the columns (that
|
||||
// is the transposed rows) and transpose the results (so that it goes back
|
||||
// in normal/row positions).
|
||||
int pass;
|
||||
// We need an intermediate buffer between passes.
|
||||
tran_low_t intermediate[4 * 4];
|
||||
const tran_low_t *in_low = NULL;
|
||||
tran_low_t *out = intermediate;
|
||||
// Do the two transform/transpose passes
|
||||
for (pass = 0; pass < 2; ++pass) {
|
||||
tran_high_t in_high[4]; // canbe16
|
||||
tran_high_t step[4]; // canbe16
|
||||
tran_high_t temp1, temp2; // needs32
|
||||
int i;
|
||||
for (i = 0; i < 4; ++i) {
|
||||
// Load inputs.
|
||||
if (0 == pass) {
|
||||
in_high[0] = input[0 * stride] * 16;
|
||||
in_high[1] = input[1 * stride] * 16;
|
||||
in_high[2] = input[2 * stride] * 16;
|
||||
in_high[3] = input[3 * stride] * 16;
|
||||
if (i == 0 && in_high[0]) {
|
||||
in_high[0] += 1;
|
||||
}
|
||||
} else {
|
||||
assert(in_low != NULL);
|
||||
in_high[0] = in_low[0 * 4];
|
||||
in_high[1] = in_low[1 * 4];
|
||||
in_high[2] = in_low[2 * 4];
|
||||
in_high[3] = in_low[3 * 4];
|
||||
in_low++;
|
||||
}
|
||||
// Transform.
|
||||
step[0] = in_high[0] + in_high[3];
|
||||
step[1] = in_high[1] + in_high[2];
|
||||
step[2] = in_high[1] - in_high[2];
|
||||
step[3] = in_high[0] - in_high[3];
|
||||
temp1 = (step[0] + step[1]) * cospi_16_64;
|
||||
temp2 = (step[0] - step[1]) * cospi_16_64;
|
||||
out[0] = (tran_low_t)fdct_round_shift(temp1);
|
||||
out[2] = (tran_low_t)fdct_round_shift(temp2);
|
||||
temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64;
|
||||
temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64;
|
||||
out[1] = (tran_low_t)fdct_round_shift(temp1);
|
||||
out[3] = (tran_low_t)fdct_round_shift(temp2);
|
||||
// Do next column (which is a transposed row in second/horizontal pass)
|
||||
input++;
|
||||
out += 4;
|
||||
}
|
||||
// Setup in_low/out for next pass.
|
||||
in_low = intermediate;
|
||||
out = output;
|
||||
}
|
||||
|
||||
{
|
||||
int i, j;
|
||||
for (i = 0; i < 4; ++i) {
|
||||
for (j = 0; j < 4; ++j) output[j + i * 4] = (output[j + i * 4] + 1) >> 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
|
||||
int r, c;
|
||||
tran_low_t sum = 0;
|
||||
for (r = 0; r < 4; ++r)
|
||||
for (c = 0; c < 4; ++c) sum += input[r * stride + c];
|
||||
|
||||
output[0] = sum << 1;
|
||||
output[1] = 0;
|
||||
}
|
||||
|
||||
void av1_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
|
||||
int i, j;
|
||||
tran_low_t intermediate[64];
|
||||
int pass;
|
||||
tran_low_t *output = intermediate;
|
||||
const tran_low_t *in = NULL;
|
||||
|
||||
// Transform columns
|
||||
for (pass = 0; pass < 2; ++pass) {
|
||||
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; // canbe16
|
||||
tran_high_t t0, t1, t2, t3; // needs32
|
||||
tran_high_t x0, x1, x2, x3; // canbe16
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
// stage 1
|
||||
if (pass == 0) {
|
||||
s0 = (input[0 * stride] + input[7 * stride]) * 4;
|
||||
s1 = (input[1 * stride] + input[6 * stride]) * 4;
|
||||
s2 = (input[2 * stride] + input[5 * stride]) * 4;
|
||||
s3 = (input[3 * stride] + input[4 * stride]) * 4;
|
||||
s4 = (input[3 * stride] - input[4 * stride]) * 4;
|
||||
s5 = (input[2 * stride] - input[5 * stride]) * 4;
|
||||
s6 = (input[1 * stride] - input[6 * stride]) * 4;
|
||||
s7 = (input[0 * stride] - input[7 * stride]) * 4;
|
||||
++input;
|
||||
} else {
|
||||
s0 = in[0 * 8] + in[7 * 8];
|
||||
s1 = in[1 * 8] + in[6 * 8];
|
||||
s2 = in[2 * 8] + in[5 * 8];
|
||||
s3 = in[3 * 8] + in[4 * 8];
|
||||
s4 = in[3 * 8] - in[4 * 8];
|
||||
s5 = in[2 * 8] - in[5 * 8];
|
||||
s6 = in[1 * 8] - in[6 * 8];
|
||||
s7 = in[0 * 8] - in[7 * 8];
|
||||
++in;
|
||||
}
|
||||
|
||||
// fdct4(step, step);
|
||||
x0 = s0 + s3;
|
||||
x1 = s1 + s2;
|
||||
x2 = s1 - s2;
|
||||
x3 = s0 - s3;
|
||||
t0 = (x0 + x1) * cospi_16_64;
|
||||
t1 = (x0 - x1) * cospi_16_64;
|
||||
t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
|
||||
t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
|
||||
output[0] = (tran_low_t)fdct_round_shift(t0);
|
||||
output[2] = (tran_low_t)fdct_round_shift(t2);
|
||||
output[4] = (tran_low_t)fdct_round_shift(t1);
|
||||
output[6] = (tran_low_t)fdct_round_shift(t3);
|
||||
|
||||
// Stage 2
|
||||
t0 = (s6 - s5) * cospi_16_64;
|
||||
t1 = (s6 + s5) * cospi_16_64;
|
||||
t2 = fdct_round_shift(t0);
|
||||
t3 = fdct_round_shift(t1);
|
||||
|
||||
// Stage 3
|
||||
x0 = s4 + t2;
|
||||
x1 = s4 - t2;
|
||||
x2 = s7 - t3;
|
||||
x3 = s7 + t3;
|
||||
|
||||
// Stage 4
|
||||
t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
|
||||
t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
|
||||
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
|
||||
t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
|
||||
output[1] = (tran_low_t)fdct_round_shift(t0);
|
||||
output[3] = (tran_low_t)fdct_round_shift(t2);
|
||||
output[5] = (tran_low_t)fdct_round_shift(t1);
|
||||
output[7] = (tran_low_t)fdct_round_shift(t3);
|
||||
output += 8;
|
||||
}
|
||||
in = intermediate;
|
||||
output = final_output;
|
||||
}
|
||||
|
||||
// Rows
|
||||
for (i = 0; i < 8; ++i) {
|
||||
for (j = 0; j < 8; ++j) final_output[j + i * 8] /= 2;
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
|
||||
int r, c;
|
||||
tran_low_t sum = 0;
|
||||
for (r = 0; r < 8; ++r)
|
||||
for (c = 0; c < 8; ++c) sum += input[r * stride + c];
|
||||
|
||||
output[0] = sum;
|
||||
output[1] = 0;
|
||||
}
|
||||
|
||||
void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
|
||||
// The 2D transform is done with two passes which are actually pretty
|
||||
// similar. In the first one, we transform the columns and transpose
|
||||
// the results. In the second one, we transform the rows. To achieve that,
|
||||
// as the first pass results are transposed, we transpose the columns (that
|
||||
// is the transposed rows) and transpose the results (so that it goes back
|
||||
// in normal/row positions).
|
||||
int pass;
|
||||
// We need an intermediate buffer between passes.
|
||||
tran_low_t intermediate[256];
|
||||
const tran_low_t *in_low = NULL;
|
||||
tran_low_t *out = intermediate;
|
||||
// Do the two transform/transpose passes
|
||||
for (pass = 0; pass < 2; ++pass) {
|
||||
tran_high_t step1[8]; // canbe16
|
||||
tran_high_t step2[8]; // canbe16
|
||||
tran_high_t step3[8]; // canbe16
|
||||
tran_high_t in_high[8]; // canbe16
|
||||
tran_high_t temp1, temp2; // needs32
|
||||
int i;
|
||||
for (i = 0; i < 16; i++) {
|
||||
if (0 == pass) {
|
||||
// Calculate input for the first 8 results.
|
||||
in_high[0] = (input[0 * stride] + input[15 * stride]) * 4;
|
||||
in_high[1] = (input[1 * stride] + input[14 * stride]) * 4;
|
||||
in_high[2] = (input[2 * stride] + input[13 * stride]) * 4;
|
||||
in_high[3] = (input[3 * stride] + input[12 * stride]) * 4;
|
||||
in_high[4] = (input[4 * stride] + input[11 * stride]) * 4;
|
||||
in_high[5] = (input[5 * stride] + input[10 * stride]) * 4;
|
||||
in_high[6] = (input[6 * stride] + input[9 * stride]) * 4;
|
||||
in_high[7] = (input[7 * stride] + input[8 * stride]) * 4;
|
||||
// Calculate input for the next 8 results.
|
||||
step1[0] = (input[7 * stride] - input[8 * stride]) * 4;
|
||||
step1[1] = (input[6 * stride] - input[9 * stride]) * 4;
|
||||
step1[2] = (input[5 * stride] - input[10 * stride]) * 4;
|
||||
step1[3] = (input[4 * stride] - input[11 * stride]) * 4;
|
||||
step1[4] = (input[3 * stride] - input[12 * stride]) * 4;
|
||||
step1[5] = (input[2 * stride] - input[13 * stride]) * 4;
|
||||
step1[6] = (input[1 * stride] - input[14 * stride]) * 4;
|
||||
step1[7] = (input[0 * stride] - input[15 * stride]) * 4;
|
||||
} else {
|
||||
// Calculate input for the first 8 results.
|
||||
assert(in_low != NULL);
|
||||
in_high[0] = ((in_low[0 * 16] + 1) >> 2) + ((in_low[15 * 16] + 1) >> 2);
|
||||
in_high[1] = ((in_low[1 * 16] + 1) >> 2) + ((in_low[14 * 16] + 1) >> 2);
|
||||
in_high[2] = ((in_low[2 * 16] + 1) >> 2) + ((in_low[13 * 16] + 1) >> 2);
|
||||
in_high[3] = ((in_low[3 * 16] + 1) >> 2) + ((in_low[12 * 16] + 1) >> 2);
|
||||
in_high[4] = ((in_low[4 * 16] + 1) >> 2) + ((in_low[11 * 16] + 1) >> 2);
|
||||
in_high[5] = ((in_low[5 * 16] + 1) >> 2) + ((in_low[10 * 16] + 1) >> 2);
|
||||
in_high[6] = ((in_low[6 * 16] + 1) >> 2) + ((in_low[9 * 16] + 1) >> 2);
|
||||
in_high[7] = ((in_low[7 * 16] + 1) >> 2) + ((in_low[8 * 16] + 1) >> 2);
|
||||
// Calculate input for the next 8 results.
|
||||
step1[0] = ((in_low[7 * 16] + 1) >> 2) - ((in_low[8 * 16] + 1) >> 2);
|
||||
step1[1] = ((in_low[6 * 16] + 1) >> 2) - ((in_low[9 * 16] + 1) >> 2);
|
||||
step1[2] = ((in_low[5 * 16] + 1) >> 2) - ((in_low[10 * 16] + 1) >> 2);
|
||||
step1[3] = ((in_low[4 * 16] + 1) >> 2) - ((in_low[11 * 16] + 1) >> 2);
|
||||
step1[4] = ((in_low[3 * 16] + 1) >> 2) - ((in_low[12 * 16] + 1) >> 2);
|
||||
step1[5] = ((in_low[2 * 16] + 1) >> 2) - ((in_low[13 * 16] + 1) >> 2);
|
||||
step1[6] = ((in_low[1 * 16] + 1) >> 2) - ((in_low[14 * 16] + 1) >> 2);
|
||||
step1[7] = ((in_low[0 * 16] + 1) >> 2) - ((in_low[15 * 16] + 1) >> 2);
|
||||
in_low++;
|
||||
}
|
||||
// Work on the first eight values; fdct8(input, even_results);
|
||||
{
|
||||
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; // canbe16
|
||||
tran_high_t t0, t1, t2, t3; // needs32
|
||||
tran_high_t x0, x1, x2, x3; // canbe16
|
||||
|
||||
// stage 1
|
||||
s0 = in_high[0] + in_high[7];
|
||||
s1 = in_high[1] + in_high[6];
|
||||
s2 = in_high[2] + in_high[5];
|
||||
s3 = in_high[3] + in_high[4];
|
||||
s4 = in_high[3] - in_high[4];
|
||||
s5 = in_high[2] - in_high[5];
|
||||
s6 = in_high[1] - in_high[6];
|
||||
s7 = in_high[0] - in_high[7];
|
||||
|
||||
// fdct4(step, step);
|
||||
x0 = s0 + s3;
|
||||
x1 = s1 + s2;
|
||||
x2 = s1 - s2;
|
||||
x3 = s0 - s3;
|
||||
t0 = (x0 + x1) * cospi_16_64;
|
||||
t1 = (x0 - x1) * cospi_16_64;
|
||||
t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
|
||||
t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
|
||||
out[0] = (tran_low_t)fdct_round_shift(t0);
|
||||
out[4] = (tran_low_t)fdct_round_shift(t2);
|
||||
out[8] = (tran_low_t)fdct_round_shift(t1);
|
||||
out[12] = (tran_low_t)fdct_round_shift(t3);
|
||||
|
||||
// Stage 2
|
||||
t0 = (s6 - s5) * cospi_16_64;
|
||||
t1 = (s6 + s5) * cospi_16_64;
|
||||
t2 = fdct_round_shift(t0);
|
||||
t3 = fdct_round_shift(t1);
|
||||
|
||||
// Stage 3
|
||||
x0 = s4 + t2;
|
||||
x1 = s4 - t2;
|
||||
x2 = s7 - t3;
|
||||
x3 = s7 + t3;
|
||||
|
||||
// Stage 4
|
||||
t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
|
||||
t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
|
||||
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
|
||||
t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
|
||||
out[2] = (tran_low_t)fdct_round_shift(t0);
|
||||
out[6] = (tran_low_t)fdct_round_shift(t2);
|
||||
out[10] = (tran_low_t)fdct_round_shift(t1);
|
||||
out[14] = (tran_low_t)fdct_round_shift(t3);
|
||||
}
|
||||
// Work on the next eight values; step1 -> odd_results
|
||||
{
|
||||
// step 2
|
||||
temp1 = (step1[5] - step1[2]) * cospi_16_64;
|
||||
temp2 = (step1[4] - step1[3]) * cospi_16_64;
|
||||
step2[2] = fdct_round_shift(temp1);
|
||||
step2[3] = fdct_round_shift(temp2);
|
||||
temp1 = (step1[4] + step1[3]) * cospi_16_64;
|
||||
temp2 = (step1[5] + step1[2]) * cospi_16_64;
|
||||
step2[4] = fdct_round_shift(temp1);
|
||||
step2[5] = fdct_round_shift(temp2);
|
||||
// step 3
|
||||
step3[0] = step1[0] + step2[3];
|
||||
step3[1] = step1[1] + step2[2];
|
||||
step3[2] = step1[1] - step2[2];
|
||||
step3[3] = step1[0] - step2[3];
|
||||
step3[4] = step1[7] - step2[4];
|
||||
step3[5] = step1[6] - step2[5];
|
||||
step3[6] = step1[6] + step2[5];
|
||||
step3[7] = step1[7] + step2[4];
|
||||
// step 4
|
||||
temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
|
||||
temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64;
|
||||
step2[1] = fdct_round_shift(temp1);
|
||||
step2[2] = fdct_round_shift(temp2);
|
||||
temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64;
|
||||
temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
|
||||
step2[5] = fdct_round_shift(temp1);
|
||||
step2[6] = fdct_round_shift(temp2);
|
||||
// step 5
|
||||
step1[0] = step3[0] + step2[1];
|
||||
step1[1] = step3[0] - step2[1];
|
||||
step1[2] = step3[3] + step2[2];
|
||||
step1[3] = step3[3] - step2[2];
|
||||
step1[4] = step3[4] - step2[5];
|
||||
step1[5] = step3[4] + step2[5];
|
||||
step1[6] = step3[7] - step2[6];
|
||||
step1[7] = step3[7] + step2[6];
|
||||
// step 6
|
||||
temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
|
||||
temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
|
||||
out[1] = (tran_low_t)fdct_round_shift(temp1);
|
||||
out[9] = (tran_low_t)fdct_round_shift(temp2);
|
||||
temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
|
||||
temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
|
||||
out[5] = (tran_low_t)fdct_round_shift(temp1);
|
||||
out[13] = (tran_low_t)fdct_round_shift(temp2);
|
||||
temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
|
||||
temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
|
||||
out[3] = (tran_low_t)fdct_round_shift(temp1);
|
||||
out[11] = (tran_low_t)fdct_round_shift(temp2);
|
||||
temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
|
||||
temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
|
||||
out[7] = (tran_low_t)fdct_round_shift(temp1);
|
||||
out[15] = (tran_low_t)fdct_round_shift(temp2);
|
||||
}
|
||||
// Do next column (which is a transposed row in second/horizontal pass)
|
||||
input++;
|
||||
out += 16;
|
||||
}
|
||||
// Setup in/out for next pass.
|
||||
in_low = intermediate;
|
||||
out = output;
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
|
||||
int r, c;
|
||||
tran_low_t sum = 0;
|
||||
for (r = 0; r < 16; ++r)
|
||||
for (c = 0; c < 16; ++c) sum += input[r * stride + c];
|
||||
|
||||
output[0] = sum >> 1;
|
||||
output[1] = 0;
|
||||
}
|
||||
|
||||
static INLINE tran_high_t dct_32_round(tran_high_t input) {
|
||||
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
|
||||
// TODO(debargha, peter.derivaz): Find new bounds for this assert,
|
||||
// and make the bounds consts.
|
||||
// assert(-131072 <= rv && rv <= 131071);
|
||||
return rv;
|
||||
}
|
||||
|
||||
static INLINE tran_high_t half_round_shift(tran_high_t input) {
|
||||
tran_high_t rv = (input + 1 + (input < 0)) >> 2;
|
||||
return rv;
|
||||
}
|
||||
|
||||
void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
|
||||
tran_high_t step[32];
|
||||
// Stage 1
|
||||
step[0] = input[0] + input[(32 - 1)];
|
||||
step[1] = input[1] + input[(32 - 2)];
|
||||
step[2] = input[2] + input[(32 - 3)];
|
||||
step[3] = input[3] + input[(32 - 4)];
|
||||
step[4] = input[4] + input[(32 - 5)];
|
||||
step[5] = input[5] + input[(32 - 6)];
|
||||
step[6] = input[6] + input[(32 - 7)];
|
||||
step[7] = input[7] + input[(32 - 8)];
|
||||
step[8] = input[8] + input[(32 - 9)];
|
||||
step[9] = input[9] + input[(32 - 10)];
|
||||
step[10] = input[10] + input[(32 - 11)];
|
||||
step[11] = input[11] + input[(32 - 12)];
|
||||
step[12] = input[12] + input[(32 - 13)];
|
||||
step[13] = input[13] + input[(32 - 14)];
|
||||
step[14] = input[14] + input[(32 - 15)];
|
||||
step[15] = input[15] + input[(32 - 16)];
|
||||
step[16] = -input[16] + input[(32 - 17)];
|
||||
step[17] = -input[17] + input[(32 - 18)];
|
||||
step[18] = -input[18] + input[(32 - 19)];
|
||||
step[19] = -input[19] + input[(32 - 20)];
|
||||
step[20] = -input[20] + input[(32 - 21)];
|
||||
step[21] = -input[21] + input[(32 - 22)];
|
||||
step[22] = -input[22] + input[(32 - 23)];
|
||||
step[23] = -input[23] + input[(32 - 24)];
|
||||
step[24] = -input[24] + input[(32 - 25)];
|
||||
step[25] = -input[25] + input[(32 - 26)];
|
||||
step[26] = -input[26] + input[(32 - 27)];
|
||||
step[27] = -input[27] + input[(32 - 28)];
|
||||
step[28] = -input[28] + input[(32 - 29)];
|
||||
step[29] = -input[29] + input[(32 - 30)];
|
||||
step[30] = -input[30] + input[(32 - 31)];
|
||||
step[31] = -input[31] + input[(32 - 32)];
|
||||
|
||||
// Stage 2
|
||||
output[0] = step[0] + step[16 - 1];
|
||||
output[1] = step[1] + step[16 - 2];
|
||||
output[2] = step[2] + step[16 - 3];
|
||||
output[3] = step[3] + step[16 - 4];
|
||||
output[4] = step[4] + step[16 - 5];
|
||||
output[5] = step[5] + step[16 - 6];
|
||||
output[6] = step[6] + step[16 - 7];
|
||||
output[7] = step[7] + step[16 - 8];
|
||||
output[8] = -step[8] + step[16 - 9];
|
||||
output[9] = -step[9] + step[16 - 10];
|
||||
output[10] = -step[10] + step[16 - 11];
|
||||
output[11] = -step[11] + step[16 - 12];
|
||||
output[12] = -step[12] + step[16 - 13];
|
||||
output[13] = -step[13] + step[16 - 14];
|
||||
output[14] = -step[14] + step[16 - 15];
|
||||
output[15] = -step[15] + step[16 - 16];
|
||||
|
||||
output[16] = step[16];
|
||||
output[17] = step[17];
|
||||
output[18] = step[18];
|
||||
output[19] = step[19];
|
||||
|
||||
output[20] = dct_32_round((-step[20] + step[27]) * cospi_16_64);
|
||||
output[21] = dct_32_round((-step[21] + step[26]) * cospi_16_64);
|
||||
output[22] = dct_32_round((-step[22] + step[25]) * cospi_16_64);
|
||||
output[23] = dct_32_round((-step[23] + step[24]) * cospi_16_64);
|
||||
|
||||
output[24] = dct_32_round((step[24] + step[23]) * cospi_16_64);
|
||||
output[25] = dct_32_round((step[25] + step[22]) * cospi_16_64);
|
||||
output[26] = dct_32_round((step[26] + step[21]) * cospi_16_64);
|
||||
output[27] = dct_32_round((step[27] + step[20]) * cospi_16_64);
|
||||
|
||||
output[28] = step[28];
|
||||
output[29] = step[29];
|
||||
output[30] = step[30];
|
||||
output[31] = step[31];
|
||||
|
||||
// dump the magnitude by 4, hence the intermediate values are within
|
||||
// the range of 16 bits.
|
||||
if (round) {
|
||||
output[0] = half_round_shift(output[0]);
|
||||
output[1] = half_round_shift(output[1]);
|
||||
output[2] = half_round_shift(output[2]);
|
||||
output[3] = half_round_shift(output[3]);
|
||||
output[4] = half_round_shift(output[4]);
|
||||
output[5] = half_round_shift(output[5]);
|
||||
output[6] = half_round_shift(output[6]);
|
||||
output[7] = half_round_shift(output[7]);
|
||||
output[8] = half_round_shift(output[8]);
|
||||
output[9] = half_round_shift(output[9]);
|
||||
output[10] = half_round_shift(output[10]);
|
||||
output[11] = half_round_shift(output[11]);
|
||||
output[12] = half_round_shift(output[12]);
|
||||
output[13] = half_round_shift(output[13]);
|
||||
output[14] = half_round_shift(output[14]);
|
||||
output[15] = half_round_shift(output[15]);
|
||||
|
||||
output[16] = half_round_shift(output[16]);
|
||||
output[17] = half_round_shift(output[17]);
|
||||
output[18] = half_round_shift(output[18]);
|
||||
output[19] = half_round_shift(output[19]);
|
||||
output[20] = half_round_shift(output[20]);
|
||||
output[21] = half_round_shift(output[21]);
|
||||
output[22] = half_round_shift(output[22]);
|
||||
output[23] = half_round_shift(output[23]);
|
||||
output[24] = half_round_shift(output[24]);
|
||||
output[25] = half_round_shift(output[25]);
|
||||
output[26] = half_round_shift(output[26]);
|
||||
output[27] = half_round_shift(output[27]);
|
||||
output[28] = half_round_shift(output[28]);
|
||||
output[29] = half_round_shift(output[29]);
|
||||
output[30] = half_round_shift(output[30]);
|
||||
output[31] = half_round_shift(output[31]);
|
||||
}
|
||||
|
||||
// Stage 3
|
||||
step[0] = output[0] + output[(8 - 1)];
|
||||
step[1] = output[1] + output[(8 - 2)];
|
||||
step[2] = output[2] + output[(8 - 3)];
|
||||
step[3] = output[3] + output[(8 - 4)];
|
||||
step[4] = -output[4] + output[(8 - 5)];
|
||||
step[5] = -output[5] + output[(8 - 6)];
|
||||
step[6] = -output[6] + output[(8 - 7)];
|
||||
step[7] = -output[7] + output[(8 - 8)];
|
||||
step[8] = output[8];
|
||||
step[9] = output[9];
|
||||
step[10] = dct_32_round((-output[10] + output[13]) * cospi_16_64);
|
||||
step[11] = dct_32_round((-output[11] + output[12]) * cospi_16_64);
|
||||
step[12] = dct_32_round((output[12] + output[11]) * cospi_16_64);
|
||||
step[13] = dct_32_round((output[13] + output[10]) * cospi_16_64);
|
||||
step[14] = output[14];
|
||||
step[15] = output[15];
|
||||
|
||||
step[16] = output[16] + output[23];
|
||||
step[17] = output[17] + output[22];
|
||||
step[18] = output[18] + output[21];
|
||||
step[19] = output[19] + output[20];
|
||||
step[20] = -output[20] + output[19];
|
||||
step[21] = -output[21] + output[18];
|
||||
step[22] = -output[22] + output[17];
|
||||
step[23] = -output[23] + output[16];
|
||||
step[24] = -output[24] + output[31];
|
||||
step[25] = -output[25] + output[30];
|
||||
step[26] = -output[26] + output[29];
|
||||
step[27] = -output[27] + output[28];
|
||||
step[28] = output[28] + output[27];
|
||||
step[29] = output[29] + output[26];
|
||||
step[30] = output[30] + output[25];
|
||||
step[31] = output[31] + output[24];
|
||||
|
||||
// Stage 4
|
||||
output[0] = step[0] + step[3];
|
||||
output[1] = step[1] + step[2];
|
||||
output[2] = -step[2] + step[1];
|
||||
output[3] = -step[3] + step[0];
|
||||
output[4] = step[4];
|
||||
output[5] = dct_32_round((-step[5] + step[6]) * cospi_16_64);
|
||||
output[6] = dct_32_round((step[6] + step[5]) * cospi_16_64);
|
||||
output[7] = step[7];
|
||||
output[8] = step[8] + step[11];
|
||||
output[9] = step[9] + step[10];
|
||||
output[10] = -step[10] + step[9];
|
||||
output[11] = -step[11] + step[8];
|
||||
output[12] = -step[12] + step[15];
|
||||
output[13] = -step[13] + step[14];
|
||||
output[14] = step[14] + step[13];
|
||||
output[15] = step[15] + step[12];
|
||||
|
||||
output[16] = step[16];
|
||||
output[17] = step[17];
|
||||
output[18] = dct_32_round(step[18] * -cospi_8_64 + step[29] * cospi_24_64);
|
||||
output[19] = dct_32_round(step[19] * -cospi_8_64 + step[28] * cospi_24_64);
|
||||
output[20] = dct_32_round(step[20] * -cospi_24_64 + step[27] * -cospi_8_64);
|
||||
output[21] = dct_32_round(step[21] * -cospi_24_64 + step[26] * -cospi_8_64);
|
||||
output[22] = step[22];
|
||||
output[23] = step[23];
|
||||
output[24] = step[24];
|
||||
output[25] = step[25];
|
||||
output[26] = dct_32_round(step[26] * cospi_24_64 + step[21] * -cospi_8_64);
|
||||
output[27] = dct_32_round(step[27] * cospi_24_64 + step[20] * -cospi_8_64);
|
||||
output[28] = dct_32_round(step[28] * cospi_8_64 + step[19] * cospi_24_64);
|
||||
output[29] = dct_32_round(step[29] * cospi_8_64 + step[18] * cospi_24_64);
|
||||
output[30] = step[30];
|
||||
output[31] = step[31];
|
||||
|
||||
// Stage 5
|
||||
step[0] = dct_32_round((output[0] + output[1]) * cospi_16_64);
|
||||
step[1] = dct_32_round((-output[1] + output[0]) * cospi_16_64);
|
||||
step[2] = dct_32_round(output[2] * cospi_24_64 + output[3] * cospi_8_64);
|
||||
step[3] = dct_32_round(output[3] * cospi_24_64 - output[2] * cospi_8_64);
|
||||
step[4] = output[4] + output[5];
|
||||
step[5] = -output[5] + output[4];
|
||||
step[6] = -output[6] + output[7];
|
||||
step[7] = output[7] + output[6];
|
||||
step[8] = output[8];
|
||||
step[9] = dct_32_round(output[9] * -cospi_8_64 + output[14] * cospi_24_64);
|
||||
step[10] = dct_32_round(output[10] * -cospi_24_64 + output[13] * -cospi_8_64);
|
||||
step[11] = output[11];
|
||||
step[12] = output[12];
|
||||
step[13] = dct_32_round(output[13] * cospi_24_64 + output[10] * -cospi_8_64);
|
||||
step[14] = dct_32_round(output[14] * cospi_8_64 + output[9] * cospi_24_64);
|
||||
step[15] = output[15];
|
||||
|
||||
step[16] = output[16] + output[19];
|
||||
step[17] = output[17] + output[18];
|
||||
step[18] = -output[18] + output[17];
|
||||
step[19] = -output[19] + output[16];
|
||||
step[20] = -output[20] + output[23];
|
||||
step[21] = -output[21] + output[22];
|
||||
step[22] = output[22] + output[21];
|
||||
step[23] = output[23] + output[20];
|
||||
step[24] = output[24] + output[27];
|
||||
step[25] = output[25] + output[26];
|
||||
step[26] = -output[26] + output[25];
|
||||
step[27] = -output[27] + output[24];
|
||||
step[28] = -output[28] + output[31];
|
||||
step[29] = -output[29] + output[30];
|
||||
step[30] = output[30] + output[29];
|
||||
step[31] = output[31] + output[28];
|
||||
|
||||
// Stage 6
|
||||
output[0] = step[0];
|
||||
output[1] = step[1];
|
||||
output[2] = step[2];
|
||||
output[3] = step[3];
|
||||
output[4] = dct_32_round(step[4] * cospi_28_64 + step[7] * cospi_4_64);
|
||||
output[5] = dct_32_round(step[5] * cospi_12_64 + step[6] * cospi_20_64);
|
||||
output[6] = dct_32_round(step[6] * cospi_12_64 + step[5] * -cospi_20_64);
|
||||
output[7] = dct_32_round(step[7] * cospi_28_64 + step[4] * -cospi_4_64);
|
||||
output[8] = step[8] + step[9];
|
||||
output[9] = -step[9] + step[8];
|
||||
output[10] = -step[10] + step[11];
|
||||
output[11] = step[11] + step[10];
|
||||
output[12] = step[12] + step[13];
|
||||
output[13] = -step[13] + step[12];
|
||||
output[14] = -step[14] + step[15];
|
||||
output[15] = step[15] + step[14];
|
||||
|
||||
output[16] = step[16];
|
||||
output[17] = dct_32_round(step[17] * -cospi_4_64 + step[30] * cospi_28_64);
|
||||
output[18] = dct_32_round(step[18] * -cospi_28_64 + step[29] * -cospi_4_64);
|
||||
output[19] = step[19];
|
||||
output[20] = step[20];
|
||||
output[21] = dct_32_round(step[21] * -cospi_20_64 + step[26] * cospi_12_64);
|
||||
output[22] = dct_32_round(step[22] * -cospi_12_64 + step[25] * -cospi_20_64);
|
||||
output[23] = step[23];
|
||||
output[24] = step[24];
|
||||
output[25] = dct_32_round(step[25] * cospi_12_64 + step[22] * -cospi_20_64);
|
||||
output[26] = dct_32_round(step[26] * cospi_20_64 + step[21] * cospi_12_64);
|
||||
output[27] = step[27];
|
||||
output[28] = step[28];
|
||||
output[29] = dct_32_round(step[29] * cospi_28_64 + step[18] * -cospi_4_64);
|
||||
output[30] = dct_32_round(step[30] * cospi_4_64 + step[17] * cospi_28_64);
|
||||
output[31] = step[31];
|
||||
|
||||
// Stage 7
|
||||
step[0] = output[0];
|
||||
step[1] = output[1];
|
||||
step[2] = output[2];
|
||||
step[3] = output[3];
|
||||
step[4] = output[4];
|
||||
step[5] = output[5];
|
||||
step[6] = output[6];
|
||||
step[7] = output[7];
|
||||
step[8] = dct_32_round(output[8] * cospi_30_64 + output[15] * cospi_2_64);
|
||||
step[9] = dct_32_round(output[9] * cospi_14_64 + output[14] * cospi_18_64);
|
||||
step[10] = dct_32_round(output[10] * cospi_22_64 + output[13] * cospi_10_64);
|
||||
step[11] = dct_32_round(output[11] * cospi_6_64 + output[12] * cospi_26_64);
|
||||
step[12] = dct_32_round(output[12] * cospi_6_64 + output[11] * -cospi_26_64);
|
||||
step[13] = dct_32_round(output[13] * cospi_22_64 + output[10] * -cospi_10_64);
|
||||
step[14] = dct_32_round(output[14] * cospi_14_64 + output[9] * -cospi_18_64);
|
||||
step[15] = dct_32_round(output[15] * cospi_30_64 + output[8] * -cospi_2_64);
|
||||
|
||||
step[16] = output[16] + output[17];
|
||||
step[17] = -output[17] + output[16];
|
||||
step[18] = -output[18] + output[19];
|
||||
step[19] = output[19] + output[18];
|
||||
step[20] = output[20] + output[21];
|
||||
step[21] = -output[21] + output[20];
|
||||
step[22] = -output[22] + output[23];
|
||||
step[23] = output[23] + output[22];
|
||||
step[24] = output[24] + output[25];
|
||||
step[25] = -output[25] + output[24];
|
||||
step[26] = -output[26] + output[27];
|
||||
step[27] = output[27] + output[26];
|
||||
step[28] = output[28] + output[29];
|
||||
step[29] = -output[29] + output[28];
|
||||
step[30] = -output[30] + output[31];
|
||||
step[31] = output[31] + output[30];
|
||||
|
||||
// Final stage --- outputs indices are bit-reversed.
|
||||
output[0] = step[0];
|
||||
output[16] = step[1];
|
||||
output[8] = step[2];
|
||||
output[24] = step[3];
|
||||
output[4] = step[4];
|
||||
output[20] = step[5];
|
||||
output[12] = step[6];
|
||||
output[28] = step[7];
|
||||
output[2] = step[8];
|
||||
output[18] = step[9];
|
||||
output[10] = step[10];
|
||||
output[26] = step[11];
|
||||
output[6] = step[12];
|
||||
output[22] = step[13];
|
||||
output[14] = step[14];
|
||||
output[30] = step[15];
|
||||
|
||||
output[1] = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64);
|
||||
output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64);
|
||||
output[9] = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64);
|
||||
output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64);
|
||||
output[5] = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64);
|
||||
output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64);
|
||||
output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64);
|
||||
output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64);
|
||||
output[3] = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64);
|
||||
output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64);
|
||||
output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64);
|
||||
output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64);
|
||||
output[7] = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64);
|
||||
output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64);
|
||||
output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64);
|
||||
output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
|
||||
}
|
||||
|
||||
void av1_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
|
||||
int i, j;
|
||||
tran_high_t output[32 * 32];
|
||||
|
||||
// Columns
|
||||
for (i = 0; i < 32; ++i) {
|
||||
tran_high_t temp_in[32], temp_out[32];
|
||||
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
|
||||
av1_fdct32(temp_in, temp_out, 0);
|
||||
for (j = 0; j < 32; ++j)
|
||||
output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
|
||||
}
|
||||
|
||||
// Rows
|
||||
for (i = 0; i < 32; ++i) {
|
||||
tran_high_t temp_in[32], temp_out[32];
|
||||
for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
|
||||
av1_fdct32(temp_in, temp_out, 0);
|
||||
for (j = 0; j < 32; ++j)
|
||||
out[j + i * 32] =
|
||||
(tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
|
||||
}
|
||||
}
|
||||
|
||||
// Note that although we use dct_32_round in dct32 computation flow,
|
||||
// this 2d fdct32x32 for rate-distortion optimization loop is operating
|
||||
// within 16 bits precision.
|
||||
void av1_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
|
||||
int i, j;
|
||||
tran_high_t output[32 * 32];
|
||||
|
||||
// Columns
|
||||
for (i = 0; i < 32; ++i) {
|
||||
tran_high_t temp_in[32], temp_out[32];
|
||||
for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
|
||||
av1_fdct32(temp_in, temp_out, 0);
|
||||
for (j = 0; j < 32; ++j)
|
||||
// TODO(cd): see quality impact of only doing
|
||||
// output[j * 32 + i] = (temp_out[j] + 1) >> 2;
|
||||
// PS: also change code in av1_dsp/x86/av1_dct_sse2.c
|
||||
output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
|
||||
}
|
||||
|
||||
// Rows
|
||||
for (i = 0; i < 32; ++i) {
|
||||
tran_high_t temp_in[32], temp_out[32];
|
||||
for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
|
||||
av1_fdct32(temp_in, temp_out, 1);
|
||||
for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
|
||||
int r, c;
|
||||
tran_low_t sum = 0;
|
||||
for (r = 0; r < 32; ++r)
|
||||
for (c = 0; c < 32; ++c) sum += input[r * stride + c];
|
||||
|
||||
output[0] = sum >> 3;
|
||||
output[1] = 0;
|
||||
}
|
||||
|
||||
#if CONFIG_AOM_HIGHBITDEPTH
|
||||
void av1_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
|
||||
int stride) {
|
||||
av1_fdct4x4_c(input, output, stride);
|
||||
}
|
||||
|
||||
void av1_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
|
||||
int stride) {
|
||||
av1_fdct8x8_c(input, final_output, stride);
|
||||
}
|
||||
|
||||
void av1_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
|
||||
int stride) {
|
||||
av1_fdct8x8_1_c(input, final_output, stride);
|
||||
}
|
||||
|
||||
void av1_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
|
||||
int stride) {
|
||||
av1_fdct16x16_c(input, output, stride);
|
||||
}
|
||||
|
||||
void av1_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
|
||||
int stride) {
|
||||
av1_fdct16x16_1_c(input, output, stride);
|
||||
}
|
||||
|
||||
void av1_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
|
||||
av1_fdct32x32_c(input, out, stride);
|
||||
}
|
||||
|
||||
void av1_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
|
||||
int stride) {
|
||||
av1_fdct32x32_rd_c(input, out, stride);
|
||||
}
|
||||
|
||||
void av1_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
|
||||
int stride) {
|
||||
av1_fdct32x32_1_c(input, out, stride);
|
||||
}
|
||||
#endif // CONFIG_AOM_HIGHBITDEPTH
|
||||
@@ -1,19 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
|
||||
*
|
||||
* This source code is subject to the terms of the BSD 2 Clause License and
|
||||
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
|
||||
* was not distributed with this source code in the LICENSE file, you can
|
||||
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
|
||||
* Media Patent License 1.0 was not distributed with this source code in the
|
||||
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
|
||||
*/
|
||||
|
||||
#ifndef AV1_COMMON_AV1_FWD_TXFM_H_
|
||||
#define AV1_COMMON_AV1_FWD_TXFM_H_
|
||||
|
||||
#include "aom_dsp/txfm_common.h"
|
||||
#include "aom_dsp/fwd_txfm.h"
|
||||
|
||||
void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round);
|
||||
#endif // AV1_COMMON_AV1_FWD_TXFM_H_
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,133 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
|
||||
*
|
||||
* This source code is subject to the terms of the BSD 2 Clause License and
|
||||
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
|
||||
* was not distributed with this source code in the LICENSE file, you can
|
||||
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
|
||||
* Media Patent License 1.0 was not distributed with this source code in the
|
||||
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
|
||||
*/
|
||||
|
||||
#ifndef AOM_DSP_INV_TXFM_H_
|
||||
#define AOM_DSP_INV_TXFM_H_
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "./aom_config.h"
|
||||
#include "aom_dsp/txfm_common.h"
|
||||
#include "aom_dsp/inv_txfm.h"
|
||||
#include "aom_ports/mem.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
static INLINE tran_high_t check_range(tran_high_t input) {
|
||||
#if CONFIG_COEFFICIENT_RANGE_CHECKING
|
||||
// For valid input streams, intermediate stage coefficients should always
|
||||
// stay within the range of a signed 16 bit integer. Coefficients can go out
|
||||
// of this range for invalid/corrupt streams. However, strictly checking
|
||||
// this range for every intermediate coefficient can burdensome for a decoder,
|
||||
// therefore the following assertion is only enabled when configured with
|
||||
// --enable-coefficient-range-checking.
|
||||
assert(INT16_MIN <= input);
|
||||
assert(input <= INT16_MAX);
|
||||
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
|
||||
return input;
|
||||
}
|
||||
|
||||
static INLINE tran_high_t dct_const_round_shift(tran_high_t input) {
|
||||
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
|
||||
return rv;
|
||||
}
|
||||
|
||||
#if CONFIG_AOM_HIGHBITDEPTH
|
||||
static INLINE tran_high_t highbd_check_range(tran_high_t input, int bd) {
|
||||
#if CONFIG_COEFFICIENT_RANGE_CHECKING
|
||||
// For valid highbitdepth streams, intermediate stage coefficients will
|
||||
// stay within the ranges:
|
||||
// - 8 bit: signed 16 bit integer
|
||||
// - 10 bit: signed 18 bit integer
|
||||
// - 12 bit: signed 20 bit integer
|
||||
const int32_t int_max = (1 << (7 + bd)) - 1;
|
||||
const int32_t int_min = -int_max - 1;
|
||||
assert(int_min <= input);
|
||||
assert(input <= int_max);
|
||||
(void)int_min;
|
||||
#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
|
||||
(void)bd;
|
||||
return input;
|
||||
}
|
||||
|
||||
static INLINE tran_high_t highbd_dct_const_round_shift(tran_high_t input) {
|
||||
tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
|
||||
return rv;
|
||||
}
|
||||
#endif // CONFIG_AOM_HIGHBITDEPTH
|
||||
|
||||
#if CONFIG_EMULATE_HARDWARE
|
||||
// When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
|
||||
// non-normative method to handle overflows. A stream that causes
|
||||
// overflows in the inverse transform is considered invalid,
|
||||
// and a hardware implementer is free to choose any reasonable
|
||||
// method to handle overflows. However to aid in hardware
|
||||
// verification they can use a specific implementation of the
|
||||
// WRAPLOW() macro below that is identical to their intended
|
||||
// hardware implementation (and also use configure options to trigger
|
||||
// the C-implementation of the transform).
|
||||
//
|
||||
// The particular WRAPLOW implementation below performs strict
|
||||
// overflow wrapping to match common hardware implementations.
|
||||
// bd of 8 uses trans_low with 16bits, need to remove 16bits
|
||||
// bd of 10 uses trans_low with 18bits, need to remove 14bits
|
||||
// bd of 12 uses trans_low with 20bits, need to remove 12bits
|
||||
// bd of x uses trans_low with 8+x bits, need to remove 24-x bits
|
||||
|
||||
#define WRAPLOW(x) ((((int32_t)check_range(x)) << 16) >> 16)
|
||||
#if CONFIG_AOM_HIGHBITDEPTH
|
||||
#define HIGHBD_WRAPLOW(x, bd) \
|
||||
((((int32_t)highbd_check_range((x), bd)) << (24 - bd)) >> (24 - bd))
|
||||
#endif // CONFIG_AOM_HIGHBITDEPTH
|
||||
|
||||
#else // CONFIG_EMULATE_HARDWARE
|
||||
|
||||
#define WRAPLOW(x) ((int32_t)check_range(x))
|
||||
#if CONFIG_AOM_HIGHBITDEPTH
|
||||
#define HIGHBD_WRAPLOW(x, bd) ((int32_t)highbd_check_range((x), bd))
|
||||
#endif // CONFIG_AOM_HIGHBITDEPTH
|
||||
|
||||
#endif // CONFIG_EMULATE_HARDWARE
|
||||
|
||||
void av1_idct4_c(const tran_low_t *input, tran_low_t *output);
|
||||
void av1_idct8_c(const tran_low_t *input, tran_low_t *output);
|
||||
void av1_idct16_c(const tran_low_t *input, tran_low_t *output);
|
||||
void av1_idct32_c(const tran_low_t *input, tran_low_t *output);
|
||||
void av1_iadst4_c(const tran_low_t *input, tran_low_t *output);
|
||||
void av1_iadst8_c(const tran_low_t *input, tran_low_t *output);
|
||||
void av1_iadst16_c(const tran_low_t *input, tran_low_t *output);
|
||||
|
||||
#if CONFIG_AOM_HIGHBITDEPTH
|
||||
void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
|
||||
void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
|
||||
void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
|
||||
|
||||
void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
|
||||
void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
|
||||
void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
|
||||
|
||||
static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
|
||||
int bd) {
|
||||
trans = HIGHBD_WRAPLOW(trans, bd);
|
||||
return clip_pixel_highbd(dest + (int)trans, bd);
|
||||
}
|
||||
#endif
|
||||
|
||||
static INLINE uint8_t clip_pixel_add(uint8_t dest, tran_high_t trans) {
|
||||
trans = WRAPLOW(trans);
|
||||
return clip_pixel(dest + (int)trans);
|
||||
}
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // AOM_DSP_INV_TXFM_H_
|
||||
@@ -414,62 +414,6 @@ if (aom_config("CONFIG_EXT_TX") eq "yes") {
|
||||
specialize qw/av1_fht32x16 sse2/;
|
||||
}
|
||||
|
||||
if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
|
||||
add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct4x4/;
|
||||
|
||||
add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct4x4_1/;
|
||||
|
||||
add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct8x8/;
|
||||
|
||||
add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct8x8_1/;
|
||||
|
||||
add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct16x16/;
|
||||
|
||||
add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct16x16_1/;
|
||||
|
||||
add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct32x32/;
|
||||
|
||||
add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct32x32_rd/;
|
||||
|
||||
add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct32x32_1/;
|
||||
} else {
|
||||
add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct4x4 sse2/;
|
||||
|
||||
add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct4x4_1 sse2/;
|
||||
|
||||
add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct8x8 sse2/;
|
||||
|
||||
add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct8x8_1 sse2/;
|
||||
|
||||
add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct16x16 sse2/;
|
||||
|
||||
add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct16x16_1 sse2/;
|
||||
|
||||
add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct32x32 sse2/;
|
||||
|
||||
add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct32x32_rd sse2/;
|
||||
|
||||
add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_fdct32x32_1 sse2/;
|
||||
}
|
||||
|
||||
if (aom_config("CONFIG_AOM_HIGHBITDEPTH") ne "yes") {
|
||||
if (aom_config("CONFIG_EXT_TX") ne "yes") {
|
||||
specialize qw/av1_fht4x4 msa/;
|
||||
@@ -478,243 +422,9 @@ if (aom_config("CONFIG_AOM_HIGHBITDEPTH") ne "yes") {
|
||||
}
|
||||
}
|
||||
|
||||
if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
|
||||
if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
|
||||
add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct4x4/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct8x8/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct8x8_1/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct16x16/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct16x16_1/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct32x32/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct32x32_rd/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct32x32_1/;
|
||||
} else {
|
||||
add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct4x4 sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct8x8 sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct8x8_1/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct16x16 sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct16x16_1/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct32x32 sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct32x32_rd sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
|
||||
specialize qw/av1_highbd_fdct32x32_1/;
|
||||
}
|
||||
}
|
||||
|
||||
add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
|
||||
specialize qw/av1_fwd_idtx/;
|
||||
|
||||
# Inverse transform
|
||||
if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
|
||||
# Note as optimized versions of these functions are added we need to add a check to ensure
|
||||
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
|
||||
add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct4x4_1_add/;
|
||||
|
||||
add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct4x4_16_add/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_1_add/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_64_add/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_12_add/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_1_add/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_256_add/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_10_add/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_1024_add/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_34_add/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_1_add/;
|
||||
|
||||
add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_iwht4x4_1_add/;
|
||||
|
||||
add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_iwht4x4_16_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct4x4_1_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct8x8_1_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct16x16_1_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct32x32_1024_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct32x32_34_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct32x32_1_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_iwht4x4_1_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_iwht4x4_16_add/;
|
||||
|
||||
# Force C versions if CONFIG_EMULATE_HARDWARE is 1
|
||||
if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
|
||||
add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct4x4_16_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct8x8_64_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct8x8_10_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct16x16_256_add/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct16x16_10_add/;
|
||||
} else {
|
||||
add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct4x4_16_add sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct8x8_64_add sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct8x8_10_add sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct16x16_256_add sse2/;
|
||||
|
||||
add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
|
||||
specialize qw/av1_highbd_idct16x16_10_add sse2/;
|
||||
} # CONFIG_EMULATE_HARDWARE
|
||||
} else {
|
||||
# Force C versions if CONFIG_EMULATE_HARDWARE is 1
|
||||
if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
|
||||
add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct4x4_1_add/;
|
||||
|
||||
add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct4x4_16_add/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_1_add/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_64_add/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_12_add/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_1_add/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_256_add/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_10_add/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_1024_add/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_34_add/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_1_add/;
|
||||
|
||||
add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_iwht4x4_1_add/;
|
||||
|
||||
add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_iwht4x4_16_add/;
|
||||
} else {
|
||||
add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct4x4_1_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct4x4_16_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_1_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_64_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct8x8_12_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_1_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_256_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct16x16_10_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_1024_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_34_add sse2/;
|
||||
|
||||
add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_idct32x32_1_add sse2/;
|
||||
|
||||
add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_iwht4x4_1_add/;
|
||||
|
||||
add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
|
||||
specialize qw/av1_iwht4x4_16_add/;
|
||||
} # CONFIG_EMULATE_HARDWARE
|
||||
} # CONFIG_AOM_HIGHBITDEPTH
|
||||
|
||||
if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
|
||||
#fwd txfm
|
||||
add_proto qw/void av1_fwd_txfm2d_4x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,354 +1,5 @@
|
||||
#include "av1/common/x86/av1_txfm1d_sse4.h"
|
||||
|
||||
void av1_fdct4_new_sse4_1(const __m128i *input, __m128i *output,
|
||||
const int8_t *cos_bit, const int8_t *stage_range) {
|
||||
const int txfm_size = 4;
|
||||
const int num_per_128 = 4;
|
||||
const int32_t *cospi;
|
||||
__m128i buf0[4];
|
||||
__m128i buf1[4];
|
||||
int col_num = txfm_size / num_per_128;
|
||||
int bit;
|
||||
int col;
|
||||
(void)stage_range;
|
||||
for (col = 0; col < col_num; col++) {
|
||||
// stage 0;
|
||||
int32_t stage_idx = 0;
|
||||
buf0[0] = input[0 * col_num + col];
|
||||
buf0[1] = input[1 * col_num + col];
|
||||
buf0[2] = input[2 * col_num + col];
|
||||
buf0[3] = input[3 * col_num + col];
|
||||
|
||||
// stage 1
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[3]);
|
||||
buf1[3] = _mm_sub_epi32(buf0[0], buf0[3]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[2]);
|
||||
buf1[2] = _mm_sub_epi32(buf0[1], buf0[2]);
|
||||
|
||||
// stage 2
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[0], buf1[1], buf0[0],
|
||||
buf0[1], bit);
|
||||
btf_32_sse4_1_type1(cospi[48], cospi[16], buf1[2], buf1[3], buf0[2],
|
||||
buf0[3], bit);
|
||||
|
||||
// stage 3
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = buf0[0];
|
||||
buf1[1] = buf0[2];
|
||||
buf1[2] = buf0[1];
|
||||
buf1[3] = buf0[3];
|
||||
|
||||
output[0 * col_num + col] = buf1[0];
|
||||
output[1 * col_num + col] = buf1[1];
|
||||
output[2 * col_num + col] = buf1[2];
|
||||
output[3 * col_num + col] = buf1[3];
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fdct8_new_sse4_1(const __m128i *input, __m128i *output,
|
||||
const int8_t *cos_bit, const int8_t *stage_range) {
|
||||
const int txfm_size = 8;
|
||||
const int num_per_128 = 4;
|
||||
const int32_t *cospi;
|
||||
__m128i buf0[8];
|
||||
__m128i buf1[8];
|
||||
int col_num = txfm_size / num_per_128;
|
||||
int bit;
|
||||
int col;
|
||||
(void)stage_range;
|
||||
for (col = 0; col < col_num; col++) {
|
||||
// stage 0;
|
||||
int32_t stage_idx = 0;
|
||||
buf0[0] = input[0 * col_num + col];
|
||||
buf0[1] = input[1 * col_num + col];
|
||||
buf0[2] = input[2 * col_num + col];
|
||||
buf0[3] = input[3 * col_num + col];
|
||||
buf0[4] = input[4 * col_num + col];
|
||||
buf0[5] = input[5 * col_num + col];
|
||||
buf0[6] = input[6 * col_num + col];
|
||||
buf0[7] = input[7 * col_num + col];
|
||||
|
||||
// stage 1
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[7]);
|
||||
buf1[7] = _mm_sub_epi32(buf0[0], buf0[7]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[6]);
|
||||
buf1[6] = _mm_sub_epi32(buf0[1], buf0[6]);
|
||||
buf1[2] = _mm_add_epi32(buf0[2], buf0[5]);
|
||||
buf1[5] = _mm_sub_epi32(buf0[2], buf0[5]);
|
||||
buf1[3] = _mm_add_epi32(buf0[3], buf0[4]);
|
||||
buf1[4] = _mm_sub_epi32(buf0[3], buf0[4]);
|
||||
|
||||
// stage 2
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = _mm_add_epi32(buf1[0], buf1[3]);
|
||||
buf0[3] = _mm_sub_epi32(buf1[0], buf1[3]);
|
||||
buf0[1] = _mm_add_epi32(buf1[1], buf1[2]);
|
||||
buf0[2] = _mm_sub_epi32(buf1[1], buf1[2]);
|
||||
buf0[4] = buf1[4];
|
||||
btf_32_sse4_1_type0(-cospi[32], cospi[32], buf1[5], buf1[6], buf0[5],
|
||||
buf0[6], bit);
|
||||
buf0[7] = buf1[7];
|
||||
|
||||
// stage 3
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf0[0], buf0[1], buf1[0],
|
||||
buf1[1], bit);
|
||||
btf_32_sse4_1_type1(cospi[48], cospi[16], buf0[2], buf0[3], buf1[2],
|
||||
buf1[3], bit);
|
||||
buf1[4] = _mm_add_epi32(buf0[4], buf0[5]);
|
||||
buf1[5] = _mm_sub_epi32(buf0[4], buf0[5]);
|
||||
buf1[6] = _mm_sub_epi32(buf0[7], buf0[6]);
|
||||
buf1[7] = _mm_add_epi32(buf0[7], buf0[6]);
|
||||
|
||||
// stage 4
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = buf1[0];
|
||||
buf0[1] = buf1[1];
|
||||
buf0[2] = buf1[2];
|
||||
buf0[3] = buf1[3];
|
||||
btf_32_sse4_1_type1(cospi[56], cospi[8], buf1[4], buf1[7], buf0[4], buf0[7],
|
||||
bit);
|
||||
btf_32_sse4_1_type1(cospi[24], cospi[40], buf1[5], buf1[6], buf0[5],
|
||||
buf0[6], bit);
|
||||
|
||||
// stage 5
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = buf0[0];
|
||||
buf1[1] = buf0[4];
|
||||
buf1[2] = buf0[2];
|
||||
buf1[3] = buf0[6];
|
||||
buf1[4] = buf0[1];
|
||||
buf1[5] = buf0[5];
|
||||
buf1[6] = buf0[3];
|
||||
buf1[7] = buf0[7];
|
||||
|
||||
output[0 * col_num + col] = buf1[0];
|
||||
output[1 * col_num + col] = buf1[1];
|
||||
output[2 * col_num + col] = buf1[2];
|
||||
output[3 * col_num + col] = buf1[3];
|
||||
output[4 * col_num + col] = buf1[4];
|
||||
output[5 * col_num + col] = buf1[5];
|
||||
output[6 * col_num + col] = buf1[6];
|
||||
output[7 * col_num + col] = buf1[7];
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fdct16_new_sse4_1(const __m128i *input, __m128i *output,
|
||||
const int8_t *cos_bit, const int8_t *stage_range) {
|
||||
const int txfm_size = 16;
|
||||
const int num_per_128 = 4;
|
||||
const int32_t *cospi;
|
||||
__m128i buf0[16];
|
||||
__m128i buf1[16];
|
||||
int col_num = txfm_size / num_per_128;
|
||||
int bit;
|
||||
int col;
|
||||
(void)stage_range;
|
||||
for (col = 0; col < col_num; col++) {
|
||||
// stage 0;
|
||||
int32_t stage_idx = 0;
|
||||
buf0[0] = input[0 * col_num + col];
|
||||
buf0[1] = input[1 * col_num + col];
|
||||
buf0[2] = input[2 * col_num + col];
|
||||
buf0[3] = input[3 * col_num + col];
|
||||
buf0[4] = input[4 * col_num + col];
|
||||
buf0[5] = input[5 * col_num + col];
|
||||
buf0[6] = input[6 * col_num + col];
|
||||
buf0[7] = input[7 * col_num + col];
|
||||
buf0[8] = input[8 * col_num + col];
|
||||
buf0[9] = input[9 * col_num + col];
|
||||
buf0[10] = input[10 * col_num + col];
|
||||
buf0[11] = input[11 * col_num + col];
|
||||
buf0[12] = input[12 * col_num + col];
|
||||
buf0[13] = input[13 * col_num + col];
|
||||
buf0[14] = input[14 * col_num + col];
|
||||
buf0[15] = input[15 * col_num + col];
|
||||
|
||||
// stage 1
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[15]);
|
||||
buf1[15] = _mm_sub_epi32(buf0[0], buf0[15]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[14]);
|
||||
buf1[14] = _mm_sub_epi32(buf0[1], buf0[14]);
|
||||
buf1[2] = _mm_add_epi32(buf0[2], buf0[13]);
|
||||
buf1[13] = _mm_sub_epi32(buf0[2], buf0[13]);
|
||||
buf1[3] = _mm_add_epi32(buf0[3], buf0[12]);
|
||||
buf1[12] = _mm_sub_epi32(buf0[3], buf0[12]);
|
||||
buf1[4] = _mm_add_epi32(buf0[4], buf0[11]);
|
||||
buf1[11] = _mm_sub_epi32(buf0[4], buf0[11]);
|
||||
buf1[5] = _mm_add_epi32(buf0[5], buf0[10]);
|
||||
buf1[10] = _mm_sub_epi32(buf0[5], buf0[10]);
|
||||
buf1[6] = _mm_add_epi32(buf0[6], buf0[9]);
|
||||
buf1[9] = _mm_sub_epi32(buf0[6], buf0[9]);
|
||||
buf1[7] = _mm_add_epi32(buf0[7], buf0[8]);
|
||||
buf1[8] = _mm_sub_epi32(buf0[7], buf0[8]);
|
||||
|
||||
// stage 2
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = _mm_add_epi32(buf1[0], buf1[7]);
|
||||
buf0[7] = _mm_sub_epi32(buf1[0], buf1[7]);
|
||||
buf0[1] = _mm_add_epi32(buf1[1], buf1[6]);
|
||||
buf0[6] = _mm_sub_epi32(buf1[1], buf1[6]);
|
||||
buf0[2] = _mm_add_epi32(buf1[2], buf1[5]);
|
||||
buf0[5] = _mm_sub_epi32(buf1[2], buf1[5]);
|
||||
buf0[3] = _mm_add_epi32(buf1[3], buf1[4]);
|
||||
buf0[4] = _mm_sub_epi32(buf1[3], buf1[4]);
|
||||
buf0[8] = buf1[8];
|
||||
buf0[9] = buf1[9];
|
||||
btf_32_sse4_1_type0(-cospi[32], cospi[32], buf1[10], buf1[13], buf0[10],
|
||||
buf0[13], bit);
|
||||
btf_32_sse4_1_type0(-cospi[32], cospi[32], buf1[11], buf1[12], buf0[11],
|
||||
buf0[12], bit);
|
||||
buf0[14] = buf1[14];
|
||||
buf0[15] = buf1[15];
|
||||
|
||||
// stage 3
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[3]);
|
||||
buf1[3] = _mm_sub_epi32(buf0[0], buf0[3]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[2]);
|
||||
buf1[2] = _mm_sub_epi32(buf0[1], buf0[2]);
|
||||
buf1[4] = buf0[4];
|
||||
btf_32_sse4_1_type0(-cospi[32], cospi[32], buf0[5], buf0[6], buf1[5],
|
||||
buf1[6], bit);
|
||||
buf1[7] = buf0[7];
|
||||
buf1[8] = _mm_add_epi32(buf0[8], buf0[11]);
|
||||
buf1[11] = _mm_sub_epi32(buf0[8], buf0[11]);
|
||||
buf1[9] = _mm_add_epi32(buf0[9], buf0[10]);
|
||||
buf1[10] = _mm_sub_epi32(buf0[9], buf0[10]);
|
||||
buf1[12] = _mm_sub_epi32(buf0[15], buf0[12]);
|
||||
buf1[15] = _mm_add_epi32(buf0[15], buf0[12]);
|
||||
buf1[13] = _mm_sub_epi32(buf0[14], buf0[13]);
|
||||
buf1[14] = _mm_add_epi32(buf0[14], buf0[13]);
|
||||
|
||||
// stage 4
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[0], buf1[1], buf0[0],
|
||||
buf0[1], bit);
|
||||
btf_32_sse4_1_type1(cospi[48], cospi[16], buf1[2], buf1[3], buf0[2],
|
||||
buf0[3], bit);
|
||||
buf0[4] = _mm_add_epi32(buf1[4], buf1[5]);
|
||||
buf0[5] = _mm_sub_epi32(buf1[4], buf1[5]);
|
||||
buf0[6] = _mm_sub_epi32(buf1[7], buf1[6]);
|
||||
buf0[7] = _mm_add_epi32(buf1[7], buf1[6]);
|
||||
buf0[8] = buf1[8];
|
||||
btf_32_sse4_1_type0(-cospi[16], cospi[48], buf1[9], buf1[14], buf0[9],
|
||||
buf0[14], bit);
|
||||
btf_32_sse4_1_type0(-cospi[48], -cospi[16], buf1[10], buf1[13], buf0[10],
|
||||
buf0[13], bit);
|
||||
buf0[11] = buf1[11];
|
||||
buf0[12] = buf1[12];
|
||||
buf0[15] = buf1[15];
|
||||
|
||||
// stage 5
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = buf0[0];
|
||||
buf1[1] = buf0[1];
|
||||
buf1[2] = buf0[2];
|
||||
buf1[3] = buf0[3];
|
||||
btf_32_sse4_1_type1(cospi[56], cospi[8], buf0[4], buf0[7], buf1[4], buf1[7],
|
||||
bit);
|
||||
btf_32_sse4_1_type1(cospi[24], cospi[40], buf0[5], buf0[6], buf1[5],
|
||||
buf1[6], bit);
|
||||
buf1[8] = _mm_add_epi32(buf0[8], buf0[9]);
|
||||
buf1[9] = _mm_sub_epi32(buf0[8], buf0[9]);
|
||||
buf1[10] = _mm_sub_epi32(buf0[11], buf0[10]);
|
||||
buf1[11] = _mm_add_epi32(buf0[11], buf0[10]);
|
||||
buf1[12] = _mm_add_epi32(buf0[12], buf0[13]);
|
||||
buf1[13] = _mm_sub_epi32(buf0[12], buf0[13]);
|
||||
buf1[14] = _mm_sub_epi32(buf0[15], buf0[14]);
|
||||
buf1[15] = _mm_add_epi32(buf0[15], buf0[14]);
|
||||
|
||||
// stage 6
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = buf1[0];
|
||||
buf0[1] = buf1[1];
|
||||
buf0[2] = buf1[2];
|
||||
buf0[3] = buf1[3];
|
||||
buf0[4] = buf1[4];
|
||||
buf0[5] = buf1[5];
|
||||
buf0[6] = buf1[6];
|
||||
buf0[7] = buf1[7];
|
||||
btf_32_sse4_1_type1(cospi[60], cospi[4], buf1[8], buf1[15], buf0[8],
|
||||
buf0[15], bit);
|
||||
btf_32_sse4_1_type1(cospi[28], cospi[36], buf1[9], buf1[14], buf0[9],
|
||||
buf0[14], bit);
|
||||
btf_32_sse4_1_type1(cospi[44], cospi[20], buf1[10], buf1[13], buf0[10],
|
||||
buf0[13], bit);
|
||||
btf_32_sse4_1_type1(cospi[12], cospi[52], buf1[11], buf1[12], buf0[11],
|
||||
buf0[12], bit);
|
||||
|
||||
// stage 7
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = buf0[0];
|
||||
buf1[1] = buf0[8];
|
||||
buf1[2] = buf0[4];
|
||||
buf1[3] = buf0[12];
|
||||
buf1[4] = buf0[2];
|
||||
buf1[5] = buf0[10];
|
||||
buf1[6] = buf0[6];
|
||||
buf1[7] = buf0[14];
|
||||
buf1[8] = buf0[1];
|
||||
buf1[9] = buf0[9];
|
||||
buf1[10] = buf0[5];
|
||||
buf1[11] = buf0[13];
|
||||
buf1[12] = buf0[3];
|
||||
buf1[13] = buf0[11];
|
||||
buf1[14] = buf0[7];
|
||||
buf1[15] = buf0[15];
|
||||
|
||||
output[0 * col_num + col] = buf1[0];
|
||||
output[1 * col_num + col] = buf1[1];
|
||||
output[2 * col_num + col] = buf1[2];
|
||||
output[3 * col_num + col] = buf1[3];
|
||||
output[4 * col_num + col] = buf1[4];
|
||||
output[5 * col_num + col] = buf1[5];
|
||||
output[6 * col_num + col] = buf1[6];
|
||||
output[7 * col_num + col] = buf1[7];
|
||||
output[8 * col_num + col] = buf1[8];
|
||||
output[9 * col_num + col] = buf1[9];
|
||||
output[10 * col_num + col] = buf1[10];
|
||||
output[11 * col_num + col] = buf1[11];
|
||||
output[12 * col_num + col] = buf1[12];
|
||||
output[13 * col_num + col] = buf1[13];
|
||||
output[14 * col_num + col] = buf1[14];
|
||||
output[15 * col_num + col] = buf1[15];
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fdct32_new_sse4_1(const __m128i *input, __m128i *output,
|
||||
const int8_t *cos_bit, const int8_t *stage_range) {
|
||||
const int txfm_size = 32;
|
||||
@@ -835,370 +486,6 @@ void av1_fadst4_new_sse4_1(const __m128i *input, __m128i *output,
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fadst8_new_sse4_1(const __m128i *input, __m128i *output,
|
||||
const int8_t *cos_bit, const int8_t *stage_range) {
|
||||
const int txfm_size = 8;
|
||||
const int num_per_128 = 4;
|
||||
const int32_t *cospi;
|
||||
__m128i buf0[8];
|
||||
__m128i buf1[8];
|
||||
int col_num = txfm_size / num_per_128;
|
||||
int bit;
|
||||
int col;
|
||||
(void)stage_range;
|
||||
for (col = 0; col < col_num; col++) {
|
||||
// stage 0;
|
||||
int32_t stage_idx = 0;
|
||||
buf0[0] = input[0 * col_num + col];
|
||||
buf0[1] = input[1 * col_num + col];
|
||||
buf0[2] = input[2 * col_num + col];
|
||||
buf0[3] = input[3 * col_num + col];
|
||||
buf0[4] = input[4 * col_num + col];
|
||||
buf0[5] = input[5 * col_num + col];
|
||||
buf0[6] = input[6 * col_num + col];
|
||||
buf0[7] = input[7 * col_num + col];
|
||||
|
||||
// stage 1
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = buf0[7];
|
||||
buf1[1] = buf0[0];
|
||||
buf1[2] = buf0[5];
|
||||
buf1[3] = buf0[2];
|
||||
buf1[4] = buf0[3];
|
||||
buf1[5] = buf0[4];
|
||||
buf1[6] = buf0[1];
|
||||
buf1[7] = buf0[6];
|
||||
|
||||
// stage 2
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
btf_32_sse4_1_type0(cospi[4], cospi[60], buf1[0], buf1[1], buf0[0], buf0[1],
|
||||
bit);
|
||||
btf_32_sse4_1_type0(cospi[20], cospi[44], buf1[2], buf1[3], buf0[2],
|
||||
buf0[3], bit);
|
||||
btf_32_sse4_1_type0(cospi[36], cospi[28], buf1[4], buf1[5], buf0[4],
|
||||
buf0[5], bit);
|
||||
btf_32_sse4_1_type0(cospi[52], cospi[12], buf1[6], buf1[7], buf0[6],
|
||||
buf0[7], bit);
|
||||
|
||||
// stage 3
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[4]);
|
||||
buf1[4] = _mm_sub_epi32(buf0[0], buf0[4]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[5]);
|
||||
buf1[5] = _mm_sub_epi32(buf0[1], buf0[5]);
|
||||
buf1[2] = _mm_add_epi32(buf0[2], buf0[6]);
|
||||
buf1[6] = _mm_sub_epi32(buf0[2], buf0[6]);
|
||||
buf1[3] = _mm_add_epi32(buf0[3], buf0[7]);
|
||||
buf1[7] = _mm_sub_epi32(buf0[3], buf0[7]);
|
||||
|
||||
// stage 4
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = buf1[0];
|
||||
buf0[1] = buf1[1];
|
||||
buf0[2] = buf1[2];
|
||||
buf0[3] = buf1[3];
|
||||
btf_32_sse4_1_type0(cospi[16], cospi[48], buf1[4], buf1[5], buf0[4],
|
||||
buf0[5], bit);
|
||||
btf_32_sse4_1_type0(-cospi[48], cospi[16], buf1[6], buf1[7], buf0[6],
|
||||
buf0[7], bit);
|
||||
|
||||
// stage 5
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[2]);
|
||||
buf1[2] = _mm_sub_epi32(buf0[0], buf0[2]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[3]);
|
||||
buf1[3] = _mm_sub_epi32(buf0[1], buf0[3]);
|
||||
buf1[4] = _mm_add_epi32(buf0[4], buf0[6]);
|
||||
buf1[6] = _mm_sub_epi32(buf0[4], buf0[6]);
|
||||
buf1[5] = _mm_add_epi32(buf0[5], buf0[7]);
|
||||
buf1[7] = _mm_sub_epi32(buf0[5], buf0[7]);
|
||||
|
||||
// stage 6
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = buf1[0];
|
||||
buf0[1] = buf1[1];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[2], buf1[3], buf0[2],
|
||||
buf0[3], bit);
|
||||
buf0[4] = buf1[4];
|
||||
buf0[5] = buf1[5];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[6], buf1[7], buf0[6],
|
||||
buf0[7], bit);
|
||||
|
||||
// stage 7
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = buf0[0];
|
||||
buf1[1] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[4]);
|
||||
buf1[2] = buf0[6];
|
||||
buf1[3] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[2]);
|
||||
buf1[4] = buf0[3];
|
||||
buf1[5] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[7]);
|
||||
buf1[6] = buf0[5];
|
||||
buf1[7] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[1]);
|
||||
|
||||
output[0 * col_num + col] = buf1[0];
|
||||
output[1 * col_num + col] = buf1[1];
|
||||
output[2 * col_num + col] = buf1[2];
|
||||
output[3 * col_num + col] = buf1[3];
|
||||
output[4 * col_num + col] = buf1[4];
|
||||
output[5 * col_num + col] = buf1[5];
|
||||
output[6 * col_num + col] = buf1[6];
|
||||
output[7 * col_num + col] = buf1[7];
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fadst16_new_sse4_1(const __m128i *input, __m128i *output,
|
||||
const int8_t *cos_bit, const int8_t *stage_range) {
|
||||
const int txfm_size = 16;
|
||||
const int num_per_128 = 4;
|
||||
const int32_t *cospi;
|
||||
__m128i buf0[16];
|
||||
__m128i buf1[16];
|
||||
int col_num = txfm_size / num_per_128;
|
||||
int bit;
|
||||
int col;
|
||||
(void)stage_range;
|
||||
for (col = 0; col < col_num; col++) {
|
||||
// stage 0;
|
||||
int32_t stage_idx = 0;
|
||||
buf0[0] = input[0 * col_num + col];
|
||||
buf0[1] = input[1 * col_num + col];
|
||||
buf0[2] = input[2 * col_num + col];
|
||||
buf0[3] = input[3 * col_num + col];
|
||||
buf0[4] = input[4 * col_num + col];
|
||||
buf0[5] = input[5 * col_num + col];
|
||||
buf0[6] = input[6 * col_num + col];
|
||||
buf0[7] = input[7 * col_num + col];
|
||||
buf0[8] = input[8 * col_num + col];
|
||||
buf0[9] = input[9 * col_num + col];
|
||||
buf0[10] = input[10 * col_num + col];
|
||||
buf0[11] = input[11 * col_num + col];
|
||||
buf0[12] = input[12 * col_num + col];
|
||||
buf0[13] = input[13 * col_num + col];
|
||||
buf0[14] = input[14 * col_num + col];
|
||||
buf0[15] = input[15 * col_num + col];
|
||||
|
||||
// stage 1
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = buf0[15];
|
||||
buf1[1] = buf0[0];
|
||||
buf1[2] = buf0[13];
|
||||
buf1[3] = buf0[2];
|
||||
buf1[4] = buf0[11];
|
||||
buf1[5] = buf0[4];
|
||||
buf1[6] = buf0[9];
|
||||
buf1[7] = buf0[6];
|
||||
buf1[8] = buf0[7];
|
||||
buf1[9] = buf0[8];
|
||||
buf1[10] = buf0[5];
|
||||
buf1[11] = buf0[10];
|
||||
buf1[12] = buf0[3];
|
||||
buf1[13] = buf0[12];
|
||||
buf1[14] = buf0[1];
|
||||
buf1[15] = buf0[14];
|
||||
|
||||
// stage 2
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
btf_32_sse4_1_type0(cospi[2], cospi[62], buf1[0], buf1[1], buf0[0], buf0[1],
|
||||
bit);
|
||||
btf_32_sse4_1_type0(cospi[10], cospi[54], buf1[2], buf1[3], buf0[2],
|
||||
buf0[3], bit);
|
||||
btf_32_sse4_1_type0(cospi[18], cospi[46], buf1[4], buf1[5], buf0[4],
|
||||
buf0[5], bit);
|
||||
btf_32_sse4_1_type0(cospi[26], cospi[38], buf1[6], buf1[7], buf0[6],
|
||||
buf0[7], bit);
|
||||
btf_32_sse4_1_type0(cospi[34], cospi[30], buf1[8], buf1[9], buf0[8],
|
||||
buf0[9], bit);
|
||||
btf_32_sse4_1_type0(cospi[42], cospi[22], buf1[10], buf1[11], buf0[10],
|
||||
buf0[11], bit);
|
||||
btf_32_sse4_1_type0(cospi[50], cospi[14], buf1[12], buf1[13], buf0[12],
|
||||
buf0[13], bit);
|
||||
btf_32_sse4_1_type0(cospi[58], cospi[6], buf1[14], buf1[15], buf0[14],
|
||||
buf0[15], bit);
|
||||
|
||||
// stage 3
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[8]);
|
||||
buf1[8] = _mm_sub_epi32(buf0[0], buf0[8]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[9]);
|
||||
buf1[9] = _mm_sub_epi32(buf0[1], buf0[9]);
|
||||
buf1[2] = _mm_add_epi32(buf0[2], buf0[10]);
|
||||
buf1[10] = _mm_sub_epi32(buf0[2], buf0[10]);
|
||||
buf1[3] = _mm_add_epi32(buf0[3], buf0[11]);
|
||||
buf1[11] = _mm_sub_epi32(buf0[3], buf0[11]);
|
||||
buf1[4] = _mm_add_epi32(buf0[4], buf0[12]);
|
||||
buf1[12] = _mm_sub_epi32(buf0[4], buf0[12]);
|
||||
buf1[5] = _mm_add_epi32(buf0[5], buf0[13]);
|
||||
buf1[13] = _mm_sub_epi32(buf0[5], buf0[13]);
|
||||
buf1[6] = _mm_add_epi32(buf0[6], buf0[14]);
|
||||
buf1[14] = _mm_sub_epi32(buf0[6], buf0[14]);
|
||||
buf1[7] = _mm_add_epi32(buf0[7], buf0[15]);
|
||||
buf1[15] = _mm_sub_epi32(buf0[7], buf0[15]);
|
||||
|
||||
// stage 4
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = buf1[0];
|
||||
buf0[1] = buf1[1];
|
||||
buf0[2] = buf1[2];
|
||||
buf0[3] = buf1[3];
|
||||
buf0[4] = buf1[4];
|
||||
buf0[5] = buf1[5];
|
||||
buf0[6] = buf1[6];
|
||||
buf0[7] = buf1[7];
|
||||
btf_32_sse4_1_type0(cospi[8], cospi[56], buf1[8], buf1[9], buf0[8], buf0[9],
|
||||
bit);
|
||||
btf_32_sse4_1_type0(cospi[40], cospi[24], buf1[10], buf1[11], buf0[10],
|
||||
buf0[11], bit);
|
||||
btf_32_sse4_1_type0(-cospi[56], cospi[8], buf1[12], buf1[13], buf0[12],
|
||||
buf0[13], bit);
|
||||
btf_32_sse4_1_type0(-cospi[24], cospi[40], buf1[14], buf1[15], buf0[14],
|
||||
buf0[15], bit);
|
||||
|
||||
// stage 5
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[4]);
|
||||
buf1[4] = _mm_sub_epi32(buf0[0], buf0[4]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[5]);
|
||||
buf1[5] = _mm_sub_epi32(buf0[1], buf0[5]);
|
||||
buf1[2] = _mm_add_epi32(buf0[2], buf0[6]);
|
||||
buf1[6] = _mm_sub_epi32(buf0[2], buf0[6]);
|
||||
buf1[3] = _mm_add_epi32(buf0[3], buf0[7]);
|
||||
buf1[7] = _mm_sub_epi32(buf0[3], buf0[7]);
|
||||
buf1[8] = _mm_add_epi32(buf0[8], buf0[12]);
|
||||
buf1[12] = _mm_sub_epi32(buf0[8], buf0[12]);
|
||||
buf1[9] = _mm_add_epi32(buf0[9], buf0[13]);
|
||||
buf1[13] = _mm_sub_epi32(buf0[9], buf0[13]);
|
||||
buf1[10] = _mm_add_epi32(buf0[10], buf0[14]);
|
||||
buf1[14] = _mm_sub_epi32(buf0[10], buf0[14]);
|
||||
buf1[11] = _mm_add_epi32(buf0[11], buf0[15]);
|
||||
buf1[15] = _mm_sub_epi32(buf0[11], buf0[15]);
|
||||
|
||||
// stage 6
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = buf1[0];
|
||||
buf0[1] = buf1[1];
|
||||
buf0[2] = buf1[2];
|
||||
buf0[3] = buf1[3];
|
||||
btf_32_sse4_1_type0(cospi[16], cospi[48], buf1[4], buf1[5], buf0[4],
|
||||
buf0[5], bit);
|
||||
btf_32_sse4_1_type0(-cospi[48], cospi[16], buf1[6], buf1[7], buf0[6],
|
||||
buf0[7], bit);
|
||||
buf0[8] = buf1[8];
|
||||
buf0[9] = buf1[9];
|
||||
buf0[10] = buf1[10];
|
||||
buf0[11] = buf1[11];
|
||||
btf_32_sse4_1_type0(cospi[16], cospi[48], buf1[12], buf1[13], buf0[12],
|
||||
buf0[13], bit);
|
||||
btf_32_sse4_1_type0(-cospi[48], cospi[16], buf1[14], buf1[15], buf0[14],
|
||||
buf0[15], bit);
|
||||
|
||||
// stage 7
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = _mm_add_epi32(buf0[0], buf0[2]);
|
||||
buf1[2] = _mm_sub_epi32(buf0[0], buf0[2]);
|
||||
buf1[1] = _mm_add_epi32(buf0[1], buf0[3]);
|
||||
buf1[3] = _mm_sub_epi32(buf0[1], buf0[3]);
|
||||
buf1[4] = _mm_add_epi32(buf0[4], buf0[6]);
|
||||
buf1[6] = _mm_sub_epi32(buf0[4], buf0[6]);
|
||||
buf1[5] = _mm_add_epi32(buf0[5], buf0[7]);
|
||||
buf1[7] = _mm_sub_epi32(buf0[5], buf0[7]);
|
||||
buf1[8] = _mm_add_epi32(buf0[8], buf0[10]);
|
||||
buf1[10] = _mm_sub_epi32(buf0[8], buf0[10]);
|
||||
buf1[9] = _mm_add_epi32(buf0[9], buf0[11]);
|
||||
buf1[11] = _mm_sub_epi32(buf0[9], buf0[11]);
|
||||
buf1[12] = _mm_add_epi32(buf0[12], buf0[14]);
|
||||
buf1[14] = _mm_sub_epi32(buf0[12], buf0[14]);
|
||||
buf1[13] = _mm_add_epi32(buf0[13], buf0[15]);
|
||||
buf1[15] = _mm_sub_epi32(buf0[13], buf0[15]);
|
||||
|
||||
// stage 8
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf0[0] = buf1[0];
|
||||
buf0[1] = buf1[1];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[2], buf1[3], buf0[2],
|
||||
buf0[3], bit);
|
||||
buf0[4] = buf1[4];
|
||||
buf0[5] = buf1[5];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[6], buf1[7], buf0[6],
|
||||
buf0[7], bit);
|
||||
buf0[8] = buf1[8];
|
||||
buf0[9] = buf1[9];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[10], buf1[11], buf0[10],
|
||||
buf0[11], bit);
|
||||
buf0[12] = buf1[12];
|
||||
buf0[13] = buf1[13];
|
||||
btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[14], buf1[15], buf0[14],
|
||||
buf0[15], bit);
|
||||
|
||||
// stage 9
|
||||
stage_idx++;
|
||||
bit = cos_bit[stage_idx];
|
||||
cospi = cospi_arr[bit - cos_bit_min];
|
||||
buf1[0] = buf0[0];
|
||||
buf1[1] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[8]);
|
||||
buf1[2] = buf0[12];
|
||||
buf1[3] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[4]);
|
||||
buf1[4] = buf0[6];
|
||||
buf1[5] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[14]);
|
||||
buf1[6] = buf0[10];
|
||||
buf1[7] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[2]);
|
||||
buf1[8] = buf0[3];
|
||||
buf1[9] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[11]);
|
||||
buf1[10] = buf0[15];
|
||||
buf1[11] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[7]);
|
||||
buf1[12] = buf0[5];
|
||||
buf1[13] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[13]);
|
||||
buf1[14] = buf0[9];
|
||||
buf1[15] = _mm_sub_epi32(_mm_set1_epi32(0), buf0[1]);
|
||||
|
||||
output[0 * col_num + col] = buf1[0];
|
||||
output[1 * col_num + col] = buf1[1];
|
||||
output[2 * col_num + col] = buf1[2];
|
||||
output[3 * col_num + col] = buf1[3];
|
||||
output[4 * col_num + col] = buf1[4];
|
||||
output[5 * col_num + col] = buf1[5];
|
||||
output[6 * col_num + col] = buf1[6];
|
||||
output[7 * col_num + col] = buf1[7];
|
||||
output[8 * col_num + col] = buf1[8];
|
||||
output[9 * col_num + col] = buf1[9];
|
||||
output[10 * col_num + col] = buf1[10];
|
||||
output[11 * col_num + col] = buf1[11];
|
||||
output[12 * col_num + col] = buf1[12];
|
||||
output[13 * col_num + col] = buf1[13];
|
||||
output[14 * col_num + col] = buf1[14];
|
||||
output[15 * col_num + col] = buf1[15];
|
||||
}
|
||||
}
|
||||
|
||||
void av1_fadst32_new_sse4_1(const __m128i *input, __m128i *output,
|
||||
const int8_t *cos_bit, const int8_t *stage_range) {
|
||||
const int txfm_size = 32;
|
||||
|
||||
@@ -28,13 +28,7 @@ typedef void (*TxfmFuncSSE2)(const __m128i *input, __m128i *output,
|
||||
|
||||
static INLINE TxfmFuncSSE2 fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
|
||||
switch (txfm_type) {
|
||||
case TXFM_TYPE_DCT4: return av1_fdct4_new_sse4_1; break;
|
||||
case TXFM_TYPE_DCT8: return av1_fdct8_new_sse4_1; break;
|
||||
case TXFM_TYPE_DCT16: return av1_fdct16_new_sse4_1; break;
|
||||
case TXFM_TYPE_DCT32: return av1_fdct32_new_sse4_1; break;
|
||||
case TXFM_TYPE_ADST4: return av1_fadst4_new_sse4_1; break;
|
||||
case TXFM_TYPE_ADST8: return av1_fadst8_new_sse4_1; break;
|
||||
case TXFM_TYPE_ADST16: return av1_fadst16_new_sse4_1; break;
|
||||
case TXFM_TYPE_ADST32: return av1_fadst32_new_sse4_1; break;
|
||||
default: assert(0);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,272 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
|
||||
*
|
||||
* This source code is subject to the terms of the BSD 2 Clause License and
|
||||
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
|
||||
* was not distributed with this source code in the LICENSE file, you can
|
||||
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
|
||||
* Media Patent License 1.0 was not distributed with this source code in the
|
||||
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
|
||||
*/
|
||||
|
||||
#include <emmintrin.h> // SSE2
|
||||
|
||||
#include "./aom_config.h"
|
||||
#include "aom_dsp/aom_dsp_common.h"
|
||||
#include "aom_dsp/x86/fwd_txfm_sse2.h"
|
||||
|
||||
void av1_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
|
||||
__m128i in0, in1;
|
||||
__m128i tmp;
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
|
||||
in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
|
||||
in1 = _mm_unpacklo_epi64(
|
||||
in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride)));
|
||||
in0 = _mm_unpacklo_epi64(
|
||||
in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride)));
|
||||
|
||||
tmp = _mm_add_epi16(in0, in1);
|
||||
in0 = _mm_unpacklo_epi16(zero, tmp);
|
||||
in1 = _mm_unpackhi_epi16(zero, tmp);
|
||||
in0 = _mm_srai_epi32(in0, 16);
|
||||
in1 = _mm_srai_epi32(in1, 16);
|
||||
|
||||
tmp = _mm_add_epi32(in0, in1);
|
||||
in0 = _mm_unpacklo_epi32(tmp, zero);
|
||||
in1 = _mm_unpackhi_epi32(tmp, zero);
|
||||
|
||||
tmp = _mm_add_epi32(in0, in1);
|
||||
in0 = _mm_srli_si128(tmp, 8);
|
||||
|
||||
in1 = _mm_add_epi32(tmp, in0);
|
||||
in0 = _mm_slli_epi32(in1, 1);
|
||||
store_output(&in0, output);
|
||||
}
|
||||
|
||||
void av1_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
|
||||
__m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
|
||||
__m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
|
||||
__m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
|
||||
__m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
|
||||
__m128i u0, u1, sum;
|
||||
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 4 * stride));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 5 * stride));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 6 * stride));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 7 * stride));
|
||||
|
||||
sum = _mm_add_epi16(u0, u1);
|
||||
|
||||
in0 = _mm_add_epi16(in0, in1);
|
||||
in2 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, in0);
|
||||
|
||||
u0 = _mm_setzero_si128();
|
||||
sum = _mm_add_epi16(sum, in2);
|
||||
|
||||
in0 = _mm_unpacklo_epi16(u0, sum);
|
||||
in1 = _mm_unpackhi_epi16(u0, sum);
|
||||
in0 = _mm_srai_epi32(in0, 16);
|
||||
in1 = _mm_srai_epi32(in1, 16);
|
||||
|
||||
sum = _mm_add_epi32(in0, in1);
|
||||
in0 = _mm_unpacklo_epi32(sum, u0);
|
||||
in1 = _mm_unpackhi_epi32(sum, u0);
|
||||
|
||||
sum = _mm_add_epi32(in0, in1);
|
||||
in0 = _mm_srli_si128(sum, 8);
|
||||
|
||||
in1 = _mm_add_epi32(sum, in0);
|
||||
store_output(&in1, output);
|
||||
}
|
||||
|
||||
void av1_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
|
||||
int stride) {
|
||||
__m128i in0, in1, in2, in3;
|
||||
__m128i u0, u1;
|
||||
__m128i sum = _mm_setzero_si128();
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2; ++i) {
|
||||
input += 8 * i;
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 3 * stride));
|
||||
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, u0);
|
||||
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 4 * stride));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 5 * stride));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 6 * stride));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 7 * stride));
|
||||
|
||||
sum = _mm_add_epi16(sum, u1);
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, u0);
|
||||
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 8 * stride));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 9 * stride));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 10 * stride));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 11 * stride));
|
||||
|
||||
sum = _mm_add_epi16(sum, u1);
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, u0);
|
||||
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 12 * stride));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 13 * stride));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 14 * stride));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 15 * stride));
|
||||
|
||||
sum = _mm_add_epi16(sum, u1);
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, u0);
|
||||
|
||||
sum = _mm_add_epi16(sum, u1);
|
||||
}
|
||||
|
||||
u0 = _mm_setzero_si128();
|
||||
in0 = _mm_unpacklo_epi16(u0, sum);
|
||||
in1 = _mm_unpackhi_epi16(u0, sum);
|
||||
in0 = _mm_srai_epi32(in0, 16);
|
||||
in1 = _mm_srai_epi32(in1, 16);
|
||||
|
||||
sum = _mm_add_epi32(in0, in1);
|
||||
in0 = _mm_unpacklo_epi32(sum, u0);
|
||||
in1 = _mm_unpackhi_epi32(sum, u0);
|
||||
|
||||
sum = _mm_add_epi32(in0, in1);
|
||||
in0 = _mm_srli_si128(sum, 8);
|
||||
|
||||
in1 = _mm_add_epi32(sum, in0);
|
||||
in1 = _mm_srai_epi32(in1, 1);
|
||||
store_output(&in1, output);
|
||||
}
|
||||
|
||||
void av1_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
|
||||
int stride) {
|
||||
__m128i in0, in1, in2, in3;
|
||||
__m128i u0, u1;
|
||||
__m128i sum = _mm_setzero_si128();
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; ++i) {
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 0));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 8));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 16));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 24));
|
||||
|
||||
input += stride;
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, u0);
|
||||
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 0));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 8));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 16));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 24));
|
||||
|
||||
input += stride;
|
||||
sum = _mm_add_epi16(sum, u1);
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, u0);
|
||||
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 0));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 8));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 16));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 24));
|
||||
|
||||
input += stride;
|
||||
sum = _mm_add_epi16(sum, u1);
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, u0);
|
||||
|
||||
in0 = _mm_load_si128((const __m128i *)(input + 0));
|
||||
in1 = _mm_load_si128((const __m128i *)(input + 8));
|
||||
in2 = _mm_load_si128((const __m128i *)(input + 16));
|
||||
in3 = _mm_load_si128((const __m128i *)(input + 24));
|
||||
|
||||
input += stride;
|
||||
sum = _mm_add_epi16(sum, u1);
|
||||
u0 = _mm_add_epi16(in0, in1);
|
||||
u1 = _mm_add_epi16(in2, in3);
|
||||
sum = _mm_add_epi16(sum, u0);
|
||||
|
||||
sum = _mm_add_epi16(sum, u1);
|
||||
}
|
||||
|
||||
u0 = _mm_setzero_si128();
|
||||
in0 = _mm_unpacklo_epi16(u0, sum);
|
||||
in1 = _mm_unpackhi_epi16(u0, sum);
|
||||
in0 = _mm_srai_epi32(in0, 16);
|
||||
in1 = _mm_srai_epi32(in1, 16);
|
||||
|
||||
sum = _mm_add_epi32(in0, in1);
|
||||
in0 = _mm_unpacklo_epi32(sum, u0);
|
||||
in1 = _mm_unpackhi_epi32(sum, u0);
|
||||
|
||||
sum = _mm_add_epi32(in0, in1);
|
||||
in0 = _mm_srli_si128(sum, 8);
|
||||
|
||||
in1 = _mm_add_epi32(sum, in0);
|
||||
in1 = _mm_srai_epi32(in1, 3);
|
||||
store_output(&in1, output);
|
||||
}
|
||||
|
||||
#define DCT_HIGH_BIT_DEPTH 0
|
||||
#define FDCT4x4_2D av1_fdct4x4_sse2
|
||||
#define FDCT8x8_2D av1_fdct8x8_sse2
|
||||
#define FDCT16x16_2D av1_fdct16x16_sse2
|
||||
#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"
|
||||
#undef FDCT4x4_2D
|
||||
#undef FDCT8x8_2D
|
||||
#undef FDCT16x16_2D
|
||||
|
||||
#define FDCT32x32_2D av1_fdct32x32_rd_sse2
|
||||
#define FDCT32x32_HIGH_PRECISION 0
|
||||
#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"
|
||||
#undef FDCT32x32_2D
|
||||
#undef FDCT32x32_HIGH_PRECISION
|
||||
|
||||
#define FDCT32x32_2D av1_fdct32x32_sse2
|
||||
#define FDCT32x32_HIGH_PRECISION 1
|
||||
#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
|
||||
#undef FDCT32x32_2D
|
||||
#undef FDCT32x32_HIGH_PRECISION
|
||||
#undef DCT_HIGH_BIT_DEPTH
|
||||
|
||||
#if CONFIG_AOM_HIGHBITDEPTH
|
||||
#define DCT_HIGH_BIT_DEPTH 1
|
||||
#define FDCT4x4_2D av1_highbd_fdct4x4_sse2
|
||||
#define FDCT8x8_2D av1_highbd_fdct8x8_sse2
|
||||
#define FDCT16x16_2D av1_highbd_fdct16x16_sse2
|
||||
#include "av1/common/x86/av1_fwd_txfm_impl_sse2.h" // NOLINT
|
||||
#undef FDCT4x4_2D
|
||||
#undef FDCT8x8_2D
|
||||
#undef FDCT16x16_2D
|
||||
|
||||
#define FDCT32x32_2D av1_highbd_fdct32x32_rd_sse2
|
||||
#define FDCT32x32_HIGH_PRECISION 0
|
||||
#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
|
||||
#undef FDCT32x32_2D
|
||||
#undef FDCT32x32_HIGH_PRECISION
|
||||
|
||||
#define FDCT32x32_2D av1_highbd_fdct32x32_sse2
|
||||
#define FDCT32x32_HIGH_PRECISION 1
|
||||
#include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h" // NOLINT
|
||||
#undef FDCT32x32_2D
|
||||
#undef FDCT32x32_HIGH_PRECISION
|
||||
#undef DCT_HIGH_BIT_DEPTH
|
||||
#endif // CONFIG_AOM_HIGHBITDEPTH
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,178 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
|
||||
*
|
||||
* This source code is subject to the terms of the BSD 2 Clause License and
|
||||
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
|
||||
* was not distributed with this source code in the LICENSE file, you can
|
||||
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
|
||||
* Media Patent License 1.0 was not distributed with this source code in the
|
||||
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
|
||||
*/
|
||||
|
||||
#ifndef AOM_DSP_X86_INV_TXFM_SSE2_H_
|
||||
#define AOM_DSP_X86_INV_TXFM_SSE2_H_
|
||||
|
||||
#include <emmintrin.h> // SSE2
|
||||
#include "./aom_config.h"
|
||||
#include "aom/aom_integer.h"
|
||||
#include "av1/common/av1_inv_txfm.h"
|
||||
|
||||
// perform 8x8 transpose
|
||||
static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||
const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
|
||||
const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
|
||||
const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
|
||||
const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
|
||||
const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
|
||||
const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
|
||||
|
||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||
const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
|
||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||
const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
|
||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
|
||||
const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
|
||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
|
||||
const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
|
||||
|
||||
res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
|
||||
res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
|
||||
res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
|
||||
res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
|
||||
res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
|
||||
res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
|
||||
res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
|
||||
res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
|
||||
}
|
||||
|
||||
#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \
|
||||
{ \
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
|
||||
\
|
||||
in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \
|
||||
in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \
|
||||
}
|
||||
|
||||
static INLINE void array_transpose_4X8(__m128i *in, __m128i *out) {
|
||||
const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
|
||||
const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
|
||||
const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
|
||||
const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
|
||||
|
||||
const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
|
||||
const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
|
||||
const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
|
||||
const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
|
||||
|
||||
out[0] = _mm_unpacklo_epi64(tr1_0, tr1_4);
|
||||
out[1] = _mm_unpackhi_epi64(tr1_0, tr1_4);
|
||||
out[2] = _mm_unpacklo_epi64(tr1_2, tr1_6);
|
||||
out[3] = _mm_unpackhi_epi64(tr1_2, tr1_6);
|
||||
}
|
||||
|
||||
static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
|
||||
__m128i tbuf[8];
|
||||
array_transpose_8x8(res0, res0);
|
||||
array_transpose_8x8(res1, tbuf);
|
||||
array_transpose_8x8(res0 + 8, res1);
|
||||
array_transpose_8x8(res1 + 8, res1 + 8);
|
||||
|
||||
res0[8] = tbuf[0];
|
||||
res0[9] = tbuf[1];
|
||||
res0[10] = tbuf[2];
|
||||
res0[11] = tbuf[3];
|
||||
res0[12] = tbuf[4];
|
||||
res0[13] = tbuf[5];
|
||||
res0[14] = tbuf[6];
|
||||
res0[15] = tbuf[7];
|
||||
}
|
||||
|
||||
static INLINE void load_buffer_8x16(const int16_t *input, __m128i *in) {
|
||||
in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16));
|
||||
in[1] = _mm_load_si128((const __m128i *)(input + 1 * 16));
|
||||
in[2] = _mm_load_si128((const __m128i *)(input + 2 * 16));
|
||||
in[3] = _mm_load_si128((const __m128i *)(input + 3 * 16));
|
||||
in[4] = _mm_load_si128((const __m128i *)(input + 4 * 16));
|
||||
in[5] = _mm_load_si128((const __m128i *)(input + 5 * 16));
|
||||
in[6] = _mm_load_si128((const __m128i *)(input + 6 * 16));
|
||||
in[7] = _mm_load_si128((const __m128i *)(input + 7 * 16));
|
||||
|
||||
in[8] = _mm_load_si128((const __m128i *)(input + 8 * 16));
|
||||
in[9] = _mm_load_si128((const __m128i *)(input + 9 * 16));
|
||||
in[10] = _mm_load_si128((const __m128i *)(input + 10 * 16));
|
||||
in[11] = _mm_load_si128((const __m128i *)(input + 11 * 16));
|
||||
in[12] = _mm_load_si128((const __m128i *)(input + 12 * 16));
|
||||
in[13] = _mm_load_si128((const __m128i *)(input + 13 * 16));
|
||||
in[14] = _mm_load_si128((const __m128i *)(input + 14 * 16));
|
||||
in[15] = _mm_load_si128((const __m128i *)(input + 15 * 16));
|
||||
}
|
||||
|
||||
#define RECON_AND_STORE(dest, in_x) \
|
||||
{ \
|
||||
__m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
|
||||
d0 = _mm_unpacklo_epi8(d0, zero); \
|
||||
d0 = _mm_add_epi16(in_x, d0); \
|
||||
d0 = _mm_packus_epi16(d0, d0); \
|
||||
_mm_storel_epi64((__m128i *)(dest), d0); \
|
||||
}
|
||||
|
||||
static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
|
||||
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
|
||||
const __m128i zero = _mm_setzero_si128();
|
||||
// Final rounding and shift
|
||||
in[0] = _mm_adds_epi16(in[0], final_rounding);
|
||||
in[1] = _mm_adds_epi16(in[1], final_rounding);
|
||||
in[2] = _mm_adds_epi16(in[2], final_rounding);
|
||||
in[3] = _mm_adds_epi16(in[3], final_rounding);
|
||||
in[4] = _mm_adds_epi16(in[4], final_rounding);
|
||||
in[5] = _mm_adds_epi16(in[5], final_rounding);
|
||||
in[6] = _mm_adds_epi16(in[6], final_rounding);
|
||||
in[7] = _mm_adds_epi16(in[7], final_rounding);
|
||||
in[8] = _mm_adds_epi16(in[8], final_rounding);
|
||||
in[9] = _mm_adds_epi16(in[9], final_rounding);
|
||||
in[10] = _mm_adds_epi16(in[10], final_rounding);
|
||||
in[11] = _mm_adds_epi16(in[11], final_rounding);
|
||||
in[12] = _mm_adds_epi16(in[12], final_rounding);
|
||||
in[13] = _mm_adds_epi16(in[13], final_rounding);
|
||||
in[14] = _mm_adds_epi16(in[14], final_rounding);
|
||||
in[15] = _mm_adds_epi16(in[15], final_rounding);
|
||||
|
||||
in[0] = _mm_srai_epi16(in[0], 6);
|
||||
in[1] = _mm_srai_epi16(in[1], 6);
|
||||
in[2] = _mm_srai_epi16(in[2], 6);
|
||||
in[3] = _mm_srai_epi16(in[3], 6);
|
||||
in[4] = _mm_srai_epi16(in[4], 6);
|
||||
in[5] = _mm_srai_epi16(in[5], 6);
|
||||
in[6] = _mm_srai_epi16(in[6], 6);
|
||||
in[7] = _mm_srai_epi16(in[7], 6);
|
||||
in[8] = _mm_srai_epi16(in[8], 6);
|
||||
in[9] = _mm_srai_epi16(in[9], 6);
|
||||
in[10] = _mm_srai_epi16(in[10], 6);
|
||||
in[11] = _mm_srai_epi16(in[11], 6);
|
||||
in[12] = _mm_srai_epi16(in[12], 6);
|
||||
in[13] = _mm_srai_epi16(in[13], 6);
|
||||
in[14] = _mm_srai_epi16(in[14], 6);
|
||||
in[15] = _mm_srai_epi16(in[15], 6);
|
||||
|
||||
RECON_AND_STORE(dest + 0 * stride, in[0]);
|
||||
RECON_AND_STORE(dest + 1 * stride, in[1]);
|
||||
RECON_AND_STORE(dest + 2 * stride, in[2]);
|
||||
RECON_AND_STORE(dest + 3 * stride, in[3]);
|
||||
RECON_AND_STORE(dest + 4 * stride, in[4]);
|
||||
RECON_AND_STORE(dest + 5 * stride, in[5]);
|
||||
RECON_AND_STORE(dest + 6 * stride, in[6]);
|
||||
RECON_AND_STORE(dest + 7 * stride, in[7]);
|
||||
RECON_AND_STORE(dest + 8 * stride, in[8]);
|
||||
RECON_AND_STORE(dest + 9 * stride, in[9]);
|
||||
RECON_AND_STORE(dest + 10 * stride, in[10]);
|
||||
RECON_AND_STORE(dest + 11 * stride, in[11]);
|
||||
RECON_AND_STORE(dest + 12 * stride, in[12]);
|
||||
RECON_AND_STORE(dest + 13 * stride, in[13]);
|
||||
RECON_AND_STORE(dest + 14 * stride, in[14]);
|
||||
RECON_AND_STORE(dest + 15 * stride, in[15]);
|
||||
}
|
||||
|
||||
#endif // AOM_DSP_X86_INV_TXFM_SSE2_H_
|
||||
@@ -24,7 +24,7 @@
|
||||
#include "av1/common/blockd.h"
|
||||
#include "av1/common/scan.h"
|
||||
#include "aom/aom_integer.h"
|
||||
#include "av1/common/av1_inv_txfm.h"
|
||||
#include "aom_dsp/inv_txfm.h"
|
||||
|
||||
using libaom_test::ACMRandom;
|
||||
|
||||
@@ -104,10 +104,10 @@ TEST_P(AV1InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
C, AV1InvTxfm,
|
||||
::testing::Values(IdctParam(&av1_idct4_c, &reference_idct_1d, 4, 1),
|
||||
IdctParam(&av1_idct8_c, &reference_idct_1d, 8, 2),
|
||||
IdctParam(&av1_idct16_c, &reference_idct_1d, 16, 4),
|
||||
IdctParam(&av1_idct32_c, &reference_idct_1d, 32, 6)));
|
||||
::testing::Values(IdctParam(&aom_idct4_c, &reference_idct_1d, 4, 1),
|
||||
IdctParam(&aom_idct8_c, &reference_idct_1d, 8, 2),
|
||||
IdctParam(&aom_idct16_c, &reference_idct_1d, 16, 4),
|
||||
IdctParam(&aom_idct32_c, &reference_idct_1d, 32, 6)));
|
||||
|
||||
#if CONFIG_AV1_ENCODER
|
||||
typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
|
||||
@@ -262,19 +262,19 @@ using std::tr1::make_tuple;
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
C, AV1PartialIDctTest,
|
||||
::testing::Values(make_tuple(&av1_fdct32x32_c, &av1_idct32x32_1024_add_c,
|
||||
&av1_idct32x32_34_add_c, TX_32X32, 34),
|
||||
make_tuple(&av1_fdct32x32_c, &av1_idct32x32_1024_add_c,
|
||||
&av1_idct32x32_1_add_c, TX_32X32, 1),
|
||||
make_tuple(&av1_fdct16x16_c, &av1_idct16x16_256_add_c,
|
||||
&av1_idct16x16_10_add_c, TX_16X16, 10),
|
||||
make_tuple(&av1_fdct16x16_c, &av1_idct16x16_256_add_c,
|
||||
&av1_idct16x16_1_add_c, TX_16X16, 1),
|
||||
make_tuple(&av1_fdct8x8_c, &av1_idct8x8_64_add_c,
|
||||
&av1_idct8x8_12_add_c, TX_8X8, 12),
|
||||
make_tuple(&av1_fdct8x8_c, &av1_idct8x8_64_add_c,
|
||||
&av1_idct8x8_1_add_c, TX_8X8, 1),
|
||||
make_tuple(&av1_fdct4x4_c, &av1_idct4x4_16_add_c,
|
||||
&av1_idct4x4_1_add_c, TX_4X4, 1)));
|
||||
::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
|
||||
&aom_idct32x32_34_add_c, TX_32X32, 34),
|
||||
make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
|
||||
&aom_idct32x32_1_add_c, TX_32X32, 1),
|
||||
make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
|
||||
&aom_idct16x16_10_add_c, TX_16X16, 10),
|
||||
make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_c,
|
||||
&aom_idct16x16_1_add_c, TX_16X16, 1),
|
||||
make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
|
||||
&aom_idct8x8_12_add_c, TX_8X8, 12),
|
||||
make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
|
||||
&aom_idct8x8_1_add_c, TX_8X8, 1),
|
||||
make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
|
||||
&aom_idct4x4_1_add_c, TX_4X4, 1)));
|
||||
#endif // CONFIG_AV1_ENCODER
|
||||
} // namespace
|
||||
|
||||
Reference in New Issue
Block a user