Update highbd idct functions arguments to use uint16_t dst
BUG=webm:1388 Change-Id: I3581d80d0389b99166e70987d38aba2db6c469d5
This commit is contained in:
parent
081b39f2b7
commit
d5de63d2be
@ -255,11 +255,11 @@ void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
|
|||||||
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
|
vpx_highbd_idct16x16_256_add_c(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
|
vpx_highbd_idct16x16_256_add_c(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
|
void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
|
||||||
@ -273,36 +273,36 @@ void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
||||||
vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
|
vp9_highbd_iht16x16_256_add_c(in, CAST_TO_SHORTPTR(out), stride, tx_type, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
||||||
vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
|
vp9_highbd_iht16x16_256_add_c(in, CAST_TO_SHORTPTR(out), stride, tx_type, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if HAVE_SSE2
|
#if HAVE_SSE2
|
||||||
void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
|
vpx_highbd_idct16x16_10_add_c(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
|
vpx_highbd_idct16x16_10_add_c(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
|
vpx_highbd_idct16x16_256_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
|
vpx_highbd_idct16x16_256_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
|
vpx_highbd_idct16x16_10_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
|
vpx_highbd_idct16x16_10_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
#endif // HAVE_SSE2
|
#endif // HAVE_SSE2
|
||||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||||
|
@ -71,11 +71,11 @@ typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, int, vpx_bit_depth_t>
|
|||||||
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
void idct32x32_10(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct32x32_10(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct32x32_1024_add_c(in, out, stride, 10);
|
vpx_highbd_idct32x32_1024_add_c(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct32x32_12(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct32x32_12(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct32x32_1024_add_c(in, out, stride, 12);
|
vpx_highbd_idct32x32_1024_add_c(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||||
|
|
||||||
|
@ -55,36 +55,36 @@ void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride,
|
|||||||
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct4x4_16_add_c(in, out, stride, 10);
|
vpx_highbd_idct4x4_16_add_c(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct4x4_16_add_c(in, out, stride, 12);
|
vpx_highbd_idct4x4_16_add_c(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
||||||
vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
|
vp9_highbd_iht4x4_16_add_c(in, CAST_TO_SHORTPTR(out), stride, tx_type, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
||||||
vp9_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
|
vp9_highbd_iht4x4_16_add_c(in, CAST_TO_SHORTPTR(out), stride, tx_type, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
|
void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_iwht4x4_16_add_c(in, out, stride, 10);
|
vpx_highbd_iwht4x4_16_add_c(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
|
void iwht4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_iwht4x4_16_add_c(in, out, stride, 12);
|
vpx_highbd_iwht4x4_16_add_c(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if HAVE_SSE2
|
#if HAVE_SSE2
|
||||||
void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct4x4_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 10);
|
vpx_highbd_idct4x4_16_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
|
vpx_highbd_idct4x4_16_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
#endif // HAVE_SSE2
|
#endif // HAVE_SSE2
|
||||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||||
|
@ -88,45 +88,45 @@ void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
|
|||||||
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct8x8_64_add_c(in, out, stride, 10);
|
vpx_highbd_idct8x8_64_add_c(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct8x8_64_add_c(in, out, stride, 12);
|
vpx_highbd_idct8x8_64_add_c(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
||||||
vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
|
vp9_highbd_iht8x8_64_add_c(in, CAST_TO_SHORTPTR(out), stride, tx_type, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
|
||||||
vp9_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
|
vp9_highbd_iht8x8_64_add_c(in, CAST_TO_SHORTPTR(out), stride, tx_type, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if HAVE_SSE2
|
#if HAVE_SSE2
|
||||||
|
|
||||||
void idct8x8_12_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct8x8_12_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct8x8_12_add_c(in, out, stride, 10);
|
vpx_highbd_idct8x8_12_add_c(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct8x8_12_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct8x8_12_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct8x8_12_add_c(in, out, stride, 12);
|
vpx_highbd_idct8x8_12_add_c(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct8x8_12_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct8x8_12_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct8x8_12_add_sse2(in, out, stride, 10);
|
vpx_highbd_idct8x8_12_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct8x8_12_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct8x8_12_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct8x8_12_add_sse2(in, out, stride, 12);
|
vpx_highbd_idct8x8_12_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct8x8_64_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct8x8_64_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 10);
|
vpx_highbd_idct8x8_64_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
|
||||||
vpx_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
|
vpx_highbd_idct8x8_64_add_sse2(in, CAST_TO_SHORTPTR(out), stride, 12);
|
||||||
}
|
}
|
||||||
#endif // HAVE_SSE2
|
#endif // HAVE_SSE2
|
||||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||||
|
@ -43,9 +43,11 @@ void wrapper(const tran_low_t *in, uint8_t *out, int stride, int bd) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
template <InvTxfmWithBdFunc fn>
|
typedef void (*InvTxfmHighbdFunc)(const tran_low_t *in, uint16_t *out,
|
||||||
|
int stride, int bd);
|
||||||
|
template <InvTxfmHighbdFunc fn>
|
||||||
void highbd_wrapper(const tran_low_t *in, uint8_t *out, int stride, int bd) {
|
void highbd_wrapper(const tran_low_t *in, uint8_t *out, int stride, int bd) {
|
||||||
fn(in, out, stride, bd);
|
fn(in, CAST_TO_SHORTPTR(out), stride, bd);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ void vp9_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input, uint8_t *dest,
|
|||||||
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
|
|
||||||
void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int tx_type, int bd) {
|
int stride, int tx_type, int bd) {
|
||||||
const highbd_transform_2d IHT_4[] = {
|
const highbd_transform_2d IHT_4[] = {
|
||||||
{ vpx_highbd_idct4_c, vpx_highbd_idct4_c }, // DCT_DCT = 0
|
{ vpx_highbd_idct4_c, vpx_highbd_idct4_c }, // DCT_DCT = 0
|
||||||
@ -213,7 +213,6 @@ void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
{ vpx_highbd_idct4_c, vpx_highbd_iadst4_c }, // DCT_ADST = 2
|
{ vpx_highbd_idct4_c, vpx_highbd_iadst4_c }, // DCT_ADST = 2
|
||||||
{ vpx_highbd_iadst4_c, vpx_highbd_iadst4_c } // ADST_ADST = 3
|
{ vpx_highbd_iadst4_c, vpx_highbd_iadst4_c } // ADST_ADST = 3
|
||||||
};
|
};
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[4 * 4];
|
tran_low_t out[4 * 4];
|
||||||
@ -245,14 +244,13 @@ static const highbd_transform_2d HIGH_IHT_8[] = {
|
|||||||
{ vpx_highbd_iadst8_c, vpx_highbd_iadst8_c } // ADST_ADST = 3
|
{ vpx_highbd_iadst8_c, vpx_highbd_iadst8_c } // ADST_ADST = 3
|
||||||
};
|
};
|
||||||
|
|
||||||
void vp9_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vp9_highbd_iht8x8_64_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int tx_type, int bd) {
|
int stride, int tx_type, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[8 * 8];
|
tran_low_t out[8 * 8];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[8], temp_out[8];
|
tran_low_t temp_in[8], temp_out[8];
|
||||||
const highbd_transform_2d ht = HIGH_IHT_8[tx_type];
|
const highbd_transform_2d ht = HIGH_IHT_8[tx_type];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// Inverse transform row vectors.
|
// Inverse transform row vectors.
|
||||||
for (i = 0; i < 8; ++i) {
|
for (i = 0; i < 8; ++i) {
|
||||||
@ -279,14 +277,13 @@ static const highbd_transform_2d HIGH_IHT_16[] = {
|
|||||||
{ vpx_highbd_iadst16_c, vpx_highbd_iadst16_c } // ADST_ADST = 3
|
{ vpx_highbd_iadst16_c, vpx_highbd_iadst16_c } // ADST_ADST = 3
|
||||||
};
|
};
|
||||||
|
|
||||||
void vp9_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vp9_highbd_iht16x16_256_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int tx_type, int bd) {
|
int stride, int tx_type, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[16 * 16];
|
tran_low_t out[16 * 16];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[16], temp_out[16];
|
tran_low_t temp_in[16], temp_out[16];
|
||||||
const highbd_transform_2d ht = HIGH_IHT_16[tx_type];
|
const highbd_transform_2d ht = HIGH_IHT_16[tx_type];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// Rows
|
// Rows
|
||||||
for (i = 0; i < 16; ++i) {
|
for (i = 0; i < 16; ++i) {
|
||||||
@ -307,7 +304,7 @@ void vp9_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// idct
|
// idct
|
||||||
void vp9_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
|
void vp9_highbd_idct4x4_add(const tran_low_t *input, uint16_t *dest, int stride,
|
||||||
int eob, int bd) {
|
int eob, int bd) {
|
||||||
if (eob > 1)
|
if (eob > 1)
|
||||||
vpx_highbd_idct4x4_16_add(input, dest, stride, bd);
|
vpx_highbd_idct4x4_16_add(input, dest, stride, bd);
|
||||||
@ -315,7 +312,7 @@ void vp9_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
|
|||||||
vpx_highbd_idct4x4_1_add(input, dest, stride, bd);
|
vpx_highbd_idct4x4_1_add(input, dest, stride, bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vp9_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
|
void vp9_highbd_iwht4x4_add(const tran_low_t *input, uint16_t *dest, int stride,
|
||||||
int eob, int bd) {
|
int eob, int bd) {
|
||||||
if (eob > 1)
|
if (eob > 1)
|
||||||
vpx_highbd_iwht4x4_16_add(input, dest, stride, bd);
|
vpx_highbd_iwht4x4_16_add(input, dest, stride, bd);
|
||||||
@ -323,7 +320,7 @@ void vp9_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
|
|||||||
vpx_highbd_iwht4x4_1_add(input, dest, stride, bd);
|
vpx_highbd_iwht4x4_1_add(input, dest, stride, bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vp9_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
|
void vp9_highbd_idct8x8_add(const tran_low_t *input, uint16_t *dest, int stride,
|
||||||
int eob, int bd) {
|
int eob, int bd) {
|
||||||
// If dc is 1, then input[0] is the reconstructed value, do not need
|
// If dc is 1, then input[0] is the reconstructed value, do not need
|
||||||
// dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
|
// dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
|
||||||
@ -340,7 +337,7 @@ void vp9_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vp9_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
|
void vp9_highbd_idct16x16_add(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int eob, int bd) {
|
int stride, int eob, int bd) {
|
||||||
// The calculation can be simplified if there are not many non-zero dct
|
// The calculation can be simplified if there are not many non-zero dct
|
||||||
// coefficients. Use eobs to separate different cases.
|
// coefficients. Use eobs to separate different cases.
|
||||||
@ -356,7 +353,7 @@ void vp9_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vp9_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
|
void vp9_highbd_idct32x32_add(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int eob, int bd) {
|
int stride, int eob, int bd) {
|
||||||
// Non-zero coeff only in upper-left 8x8
|
// Non-zero coeff only in upper-left 8x8
|
||||||
if (eob == 1) {
|
if (eob == 1) {
|
||||||
@ -372,7 +369,7 @@ void vp9_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
|
|||||||
|
|
||||||
// iht
|
// iht
|
||||||
void vp9_highbd_iht4x4_add(TX_TYPE tx_type, const tran_low_t *input,
|
void vp9_highbd_iht4x4_add(TX_TYPE tx_type, const tran_low_t *input,
|
||||||
uint8_t *dest, int stride, int eob, int bd) {
|
uint16_t *dest, int stride, int eob, int bd) {
|
||||||
if (tx_type == DCT_DCT)
|
if (tx_type == DCT_DCT)
|
||||||
vp9_highbd_idct4x4_add(input, dest, stride, eob, bd);
|
vp9_highbd_idct4x4_add(input, dest, stride, eob, bd);
|
||||||
else
|
else
|
||||||
@ -380,7 +377,7 @@ void vp9_highbd_iht4x4_add(TX_TYPE tx_type, const tran_low_t *input,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void vp9_highbd_iht8x8_add(TX_TYPE tx_type, const tran_low_t *input,
|
void vp9_highbd_iht8x8_add(TX_TYPE tx_type, const tran_low_t *input,
|
||||||
uint8_t *dest, int stride, int eob, int bd) {
|
uint16_t *dest, int stride, int eob, int bd) {
|
||||||
if (tx_type == DCT_DCT) {
|
if (tx_type == DCT_DCT) {
|
||||||
vp9_highbd_idct8x8_add(input, dest, stride, eob, bd);
|
vp9_highbd_idct8x8_add(input, dest, stride, eob, bd);
|
||||||
} else {
|
} else {
|
||||||
@ -389,7 +386,7 @@ void vp9_highbd_iht8x8_add(TX_TYPE tx_type, const tran_low_t *input,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void vp9_highbd_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input,
|
void vp9_highbd_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input,
|
||||||
uint8_t *dest, int stride, int eob, int bd) {
|
uint16_t *dest, int stride, int eob, int bd) {
|
||||||
if (tx_type == DCT_DCT) {
|
if (tx_type == DCT_DCT) {
|
||||||
vp9_highbd_idct16x16_add(input, dest, stride, eob, bd);
|
vp9_highbd_idct16x16_add(input, dest, stride, eob, bd);
|
||||||
} else {
|
} else {
|
||||||
|
@ -57,22 +57,22 @@ void vp9_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input, uint8_t *dest,
|
|||||||
int stride, int eob);
|
int stride, int eob);
|
||||||
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
void vp9_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
|
void vp9_highbd_iwht4x4_add(const tran_low_t *input, uint16_t *dest, int stride,
|
||||||
int eob, int bd);
|
int eob, int bd);
|
||||||
void vp9_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
|
void vp9_highbd_idct4x4_add(const tran_low_t *input, uint16_t *dest, int stride,
|
||||||
int eob, int bd);
|
int eob, int bd);
|
||||||
void vp9_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
|
void vp9_highbd_idct8x8_add(const tran_low_t *input, uint16_t *dest, int stride,
|
||||||
int eob, int bd);
|
int eob, int bd);
|
||||||
void vp9_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
|
void vp9_highbd_idct16x16_add(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int eob, int bd);
|
int stride, int eob, int bd);
|
||||||
void vp9_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
|
void vp9_highbd_idct32x32_add(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int eob, int bd);
|
int stride, int eob, int bd);
|
||||||
void vp9_highbd_iht4x4_add(TX_TYPE tx_type, const tran_low_t *input,
|
void vp9_highbd_iht4x4_add(TX_TYPE tx_type, const tran_low_t *input,
|
||||||
uint8_t *dest, int stride, int eob, int bd);
|
uint16_t *dest, int stride, int eob, int bd);
|
||||||
void vp9_highbd_iht8x8_add(TX_TYPE tx_type, const tran_low_t *input,
|
void vp9_highbd_iht8x8_add(TX_TYPE tx_type, const tran_low_t *input,
|
||||||
uint8_t *dest, int stride, int eob, int bd);
|
uint16_t *dest, int stride, int eob, int bd);
|
||||||
void vp9_highbd_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input,
|
void vp9_highbd_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input,
|
||||||
uint8_t *dest, int stride, int eob, int bd);
|
uint16_t *dest, int stride, int eob, int bd);
|
||||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
@ -101,11 +101,11 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
|
|||||||
#
|
#
|
||||||
# Note as optimized versions of these functions are added we need to add a check to ensure
|
# Note as optimized versions of these functions are added we need to add a check to ensure
|
||||||
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
|
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
|
||||||
add_proto qw/void vp9_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int stride, int tx_type, int bd";
|
add_proto qw/void vp9_highbd_iht4x4_16_add/, "const tran_low_t *input, uint16_t *dest, int stride, int tx_type, int bd";
|
||||||
|
|
||||||
add_proto qw/void vp9_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int stride, int tx_type, int bd";
|
add_proto qw/void vp9_highbd_iht8x8_64_add/, "const tran_low_t *input, uint16_t *dest, int stride, int tx_type, int bd";
|
||||||
|
|
||||||
add_proto qw/void vp9_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
|
add_proto qw/void vp9_highbd_iht16x16_256_add/, "const tran_low_t *input, uint16_t *output, int pitch, int tx_type, int bd";
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -189,7 +189,7 @@ static void inverse_transform_block_inter(MACROBLOCKD *xd, int plane,
|
|||||||
assert(eob > 0);
|
assert(eob > 0);
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||||
uint8_t *const dst16 = CAST_TO_BYTEPTR(CONVERT_TO_SHORTPTR(dst));
|
uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
|
||||||
if (xd->lossless) {
|
if (xd->lossless) {
|
||||||
vp9_highbd_iwht4x4_add(dqcoeff, dst16, stride, eob, xd->bd);
|
vp9_highbd_iwht4x4_add(dqcoeff, dst16, stride, eob, xd->bd);
|
||||||
} else {
|
} else {
|
||||||
@ -257,7 +257,7 @@ static void inverse_transform_block_intra(MACROBLOCKD *xd, int plane,
|
|||||||
assert(eob > 0);
|
assert(eob > 0);
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||||
uint8_t *const dst16 = CAST_TO_BYTEPTR(CONVERT_TO_SHORTPTR(dst));
|
uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
|
||||||
if (xd->lossless) {
|
if (xd->lossless) {
|
||||||
vp9_highbd_iwht4x4_add(dqcoeff, dst16, stride, eob, xd->bd);
|
vp9_highbd_iwht4x4_add(dqcoeff, dst16, stride, eob, xd->bd);
|
||||||
} else {
|
} else {
|
||||||
|
@ -184,7 +184,7 @@ struct macroblock {
|
|||||||
void (*fwd_txm4x4)(const int16_t *input, tran_low_t *output, int stride);
|
void (*fwd_txm4x4)(const int16_t *input, tran_low_t *output, int stride);
|
||||||
void (*itxm_add)(const tran_low_t *input, uint8_t *dest, int stride, int eob);
|
void (*itxm_add)(const tran_low_t *input, uint8_t *dest, int stride, int eob);
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
void (*highbd_itxm_add)(const tran_low_t *input, uint8_t *dest, int stride,
|
void (*highbd_itxm_add)(const tran_low_t *input, uint16_t *dest, int stride,
|
||||||
int eob, int bd);
|
int eob, int bd);
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -637,7 +637,7 @@ static void encode_block(int plane, int block, int row, int col,
|
|||||||
if (x->skip_encode || p->eobs[block] == 0) return;
|
if (x->skip_encode || p->eobs[block] == 0) return;
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||||
uint8_t *const dst16 = CAST_TO_BYTEPTR(CONVERT_TO_SHORTPTR(dst));
|
uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
|
||||||
switch (tx_size) {
|
switch (tx_size) {
|
||||||
case TX_32X32:
|
case TX_32X32:
|
||||||
vp9_highbd_idct32x32_add(dqcoeff, dst16, pd->dst.stride, p->eobs[block],
|
vp9_highbd_idct32x32_add(dqcoeff, dst16, pd->dst.stride, p->eobs[block],
|
||||||
@ -700,8 +700,8 @@ static void encode_block_pass1(int plane, int block, int row, int col,
|
|||||||
if (p->eobs[block] > 0) {
|
if (p->eobs[block] > 0) {
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||||
x->highbd_itxm_add(dqcoeff, CAST_TO_BYTEPTR(CONVERT_TO_SHORTPTR(dst)),
|
x->highbd_itxm_add(dqcoeff, CONVERT_TO_SHORTPTR(dst), pd->dst.stride,
|
||||||
pd->dst.stride, p->eobs[block], xd->bd);
|
p->eobs[block], xd->bd);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||||
@ -801,7 +801,7 @@ void vp9_encode_block_intra(int plane, int block, int row, int col,
|
|||||||
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||||
uint8_t *const dst16 = CAST_TO_BYTEPTR(CONVERT_TO_SHORTPTR(dst));
|
uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
|
||||||
switch (tx_size) {
|
switch (tx_size) {
|
||||||
case TX_32X32:
|
case TX_32X32:
|
||||||
if (!x->skip_recode) {
|
if (!x->skip_recode) {
|
||||||
|
@ -601,22 +601,21 @@ static void dist_block(const VP9_COMP *cpi, MACROBLOCK *x, int plane,
|
|||||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||||
vpx_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst), dst_stride, recon16,
|
vpx_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst), dst_stride, recon16,
|
||||||
32, NULL, 0, NULL, 0, bs, bs, xd->bd);
|
32, NULL, 0, NULL, 0, bs, bs, xd->bd);
|
||||||
recon = CAST_TO_BYTEPTR(recon16);
|
|
||||||
if (xd->lossless) {
|
if (xd->lossless) {
|
||||||
vp9_highbd_iwht4x4_add(dqcoeff, recon, 32, *eob, xd->bd);
|
vp9_highbd_iwht4x4_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
||||||
} else {
|
} else {
|
||||||
switch (tx_size) {
|
switch (tx_size) {
|
||||||
case TX_4X4:
|
case TX_4X4:
|
||||||
vp9_highbd_idct4x4_add(dqcoeff, recon, 32, *eob, xd->bd);
|
vp9_highbd_idct4x4_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
||||||
break;
|
break;
|
||||||
case TX_8X8:
|
case TX_8X8:
|
||||||
vp9_highbd_idct8x8_add(dqcoeff, recon, 32, *eob, xd->bd);
|
vp9_highbd_idct8x8_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
||||||
break;
|
break;
|
||||||
case TX_16X16:
|
case TX_16X16:
|
||||||
vp9_highbd_idct16x16_add(dqcoeff, recon, 32, *eob, xd->bd);
|
vp9_highbd_idct16x16_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
||||||
break;
|
break;
|
||||||
case TX_32X32:
|
case TX_32X32:
|
||||||
vp9_highbd_idct32x32_add(dqcoeff, recon, 32, *eob, xd->bd);
|
vp9_highbd_idct32x32_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
||||||
break;
|
break;
|
||||||
default: assert(0 && "Invalid transform size");
|
default: assert(0 && "Invalid transform size");
|
||||||
}
|
}
|
||||||
@ -1005,7 +1004,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int row,
|
|||||||
const int block = (row + idy) * 2 + (col + idx);
|
const int block = (row + idy) * 2 + (col + idx);
|
||||||
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
|
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
|
||||||
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
|
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
|
||||||
uint8_t *const dst16 = CAST_TO_BYTEPTR(CONVERT_TO_SHORTPTR(dst));
|
uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
|
||||||
int16_t *const src_diff =
|
int16_t *const src_diff =
|
||||||
vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
|
vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
|
||||||
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
|
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
|
||||||
|
@ -1268,10 +1268,8 @@ void vpx_highbd_idct16x16_10_add_half1d_pass2(const int32_t *input,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_256_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_256_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
if (bd == 8) {
|
if (bd == 8) {
|
||||||
int16_t row_idct_output[16 * 16];
|
int16_t row_idct_output[16 * 16];
|
||||||
|
|
||||||
@ -1313,10 +1311,8 @@ void vpx_highbd_idct16x16_256_add_neon(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_38_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_38_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
if (bd == 8) {
|
if (bd == 8) {
|
||||||
int16_t row_idct_output[16 * 16];
|
int16_t row_idct_output[16 * 16];
|
||||||
|
|
||||||
@ -1349,10 +1345,8 @@ void vpx_highbd_idct16x16_38_add_neon(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_10_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_10_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
if (bd == 8) {
|
if (bd == 8) {
|
||||||
int16_t row_idct_output[4 * 16];
|
int16_t row_idct_output[4 * 16];
|
||||||
|
|
||||||
@ -1414,7 +1408,7 @@ static INLINE void highbd_idct16x16_1_add_neg_kernel(uint16_t **dest,
|
|||||||
*dest += stride;
|
*dest += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_1_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_1_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
const tran_low_t out0 =
|
const tran_low_t out0 =
|
||||||
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
||||||
@ -1422,7 +1416,6 @@ void vpx_highbd_idct16x16_1_add_neon(const tran_low_t *input, uint8_t *dest8,
|
|||||||
HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
|
||||||
const int16_t a1 = ROUND_POWER_OF_TWO(out1, 6);
|
const int16_t a1 = ROUND_POWER_OF_TWO(out1, 6);
|
||||||
const int16x8_t dc = vdupq_n_s16(a1);
|
const int16x8_t dc = vdupq_n_s16(a1);
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (a1 >= 0) {
|
if (a1 >= 0) {
|
||||||
|
@ -386,15 +386,14 @@ static INLINE void idct32_bands_end_2nd_pass(const int32_t *const out,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static INLINE void vpx_highbd_idct32_32_neon(const tran_low_t *input,
|
static INLINE void vpx_highbd_idct32_32_neon(const tran_low_t *input,
|
||||||
uint8_t *const dest,
|
uint16_t *dst, const int stride,
|
||||||
const int stride, const int bd) {
|
const int bd) {
|
||||||
int i, idct32_pass_loop;
|
int i, idct32_pass_loop;
|
||||||
int32_t trans_buf[32 * 8];
|
int32_t trans_buf[32 * 8];
|
||||||
int32_t pass1[32 * 32];
|
int32_t pass1[32 * 32];
|
||||||
int32_t pass2[32 * 32];
|
int32_t pass2[32 * 32];
|
||||||
int32_t *out;
|
int32_t *out;
|
||||||
int32x4x2_t q[16];
|
int32x4x2_t q[16];
|
||||||
uint16_t *dst = CAST_TO_SHORTPTR(dest);
|
|
||||||
|
|
||||||
for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2;
|
for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2;
|
||||||
idct32_pass_loop++, input = pass1, out = pass2) {
|
idct32_pass_loop++, input = pass1, out = pass2) {
|
||||||
@ -637,10 +636,10 @@ static INLINE void vpx_highbd_idct32_32_neon(const tran_low_t *input,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest,
|
void vpx_highbd_idct32x32_1024_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
if (bd == 8) {
|
if (bd == 8) {
|
||||||
vpx_idct32_32_neon(input, dest, stride, 1);
|
vpx_idct32_32_neon(input, CAST_TO_BYTEPTR(dest), stride, 1);
|
||||||
} else {
|
} else {
|
||||||
vpx_highbd_idct32_32_neon(input, dest, stride, bd);
|
vpx_highbd_idct32_32_neon(input, dest, stride, bd);
|
||||||
}
|
}
|
||||||
|
@ -726,10 +726,9 @@ static void vpx_highbd_idct32_16_neon(const int32_t *const input,
|
|||||||
highbd_idct16x16_add_store(out + 16, output + 16 * stride, stride, bd);
|
highbd_idct16x16_add_store(out + 16, output + 16 * stride, stride, bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_135_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct32x32_135_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i;
|
int i;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
if (bd == 8) {
|
if (bd == 8) {
|
||||||
int16_t temp[32 * 16];
|
int16_t temp[32 * 16];
|
||||||
|
@ -594,10 +594,9 @@ static void vpx_highbd_idct32_8_neon(const int32_t *input, uint16_t *output,
|
|||||||
highbd_idct16x16_add_store(out + 16, output + 16 * stride, stride, bd);
|
highbd_idct16x16_add_store(out + 16, output + 16 * stride, stride, bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_34_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct32x32_34_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i;
|
int i;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
if (bd == 8) {
|
if (bd == 8) {
|
||||||
int16_t temp[32 * 8];
|
int16_t temp[32 * 8];
|
||||||
|
@ -59,7 +59,7 @@ static INLINE void highbd_idct32x32_1_add_neg_kernel(uint16_t **dest,
|
|||||||
*dest += stride;
|
*dest += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_1_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct32x32_1_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
const tran_low_t out0 =
|
const tran_low_t out0 =
|
||||||
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
||||||
@ -67,7 +67,6 @@ void vpx_highbd_idct32x32_1_add_neon(const tran_low_t *input, uint8_t *dest8,
|
|||||||
HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
|
||||||
const int16_t a1 = ROUND_POWER_OF_TWO(out1, 6);
|
const int16_t a1 = ROUND_POWER_OF_TWO(out1, 6);
|
||||||
const int16x8_t dc = vdupq_n_s16(a1);
|
const int16x8_t dc = vdupq_n_s16(a1);
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (a1 >= 0) {
|
if (a1 >= 0) {
|
||||||
|
@ -51,7 +51,7 @@ static INLINE void highbd_idct4x4_1_add_kernel2(uint16_t **dest,
|
|||||||
*dest += stride;
|
*dest += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct4x4_1_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct4x4_1_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
|
const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
|
||||||
const tran_low_t out0 =
|
const tran_low_t out0 =
|
||||||
@ -60,7 +60,6 @@ void vpx_highbd_idct4x4_1_add_neon(const tran_low_t *input, uint8_t *dest8,
|
|||||||
HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
|
||||||
const int16_t a1 = ROUND_POWER_OF_TWO(out1, 4);
|
const int16_t a1 = ROUND_POWER_OF_TWO(out1, 4);
|
||||||
const int16x8_t dc = vdupq_n_s16(a1);
|
const int16x8_t dc = vdupq_n_s16(a1);
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
highbd_idct4x4_1_add_kernel1(&dest, stride, dc, max);
|
highbd_idct4x4_1_add_kernel1(&dest, stride, dc, max);
|
||||||
highbd_idct4x4_1_add_kernel1(&dest, stride, dc, max);
|
highbd_idct4x4_1_add_kernel1(&dest, stride, dc, max);
|
||||||
@ -133,14 +132,13 @@ static INLINE void idct4x4_16_kernel_bd12(const int32x4_t cospis,
|
|||||||
*a3 = vsubq_s32(b0, b3);
|
*a3 = vsubq_s32(b0, b3);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct4x4_16_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct4x4_16_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
|
const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
|
||||||
int32x4_t c0 = vld1q_s32(input);
|
int32x4_t c0 = vld1q_s32(input);
|
||||||
int32x4_t c1 = vld1q_s32(input + 4);
|
int32x4_t c1 = vld1q_s32(input + 4);
|
||||||
int32x4_t c2 = vld1q_s32(input + 8);
|
int32x4_t c2 = vld1q_s32(input + 8);
|
||||||
int32x4_t c3 = vld1q_s32(input + 12);
|
int32x4_t c3 = vld1q_s32(input + 12);
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
int16x8_t a0, a1;
|
int16x8_t a0, a1;
|
||||||
|
|
||||||
if (bd == 8) {
|
if (bd == 8) {
|
||||||
|
@ -36,7 +36,7 @@ static INLINE void highbd_idct8x8_1_add_neg_kernel(uint16_t **dest,
|
|||||||
*dest += stride;
|
*dest += stride;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct8x8_1_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct8x8_1_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
const tran_low_t out0 =
|
const tran_low_t out0 =
|
||||||
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
||||||
@ -44,7 +44,6 @@ void vpx_highbd_idct8x8_1_add_neon(const tran_low_t *input, uint8_t *dest8,
|
|||||||
HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
|
||||||
const int16_t a1 = ROUND_POWER_OF_TWO(out1, 5);
|
const int16_t a1 = ROUND_POWER_OF_TWO(out1, 5);
|
||||||
const int16x8_t dc = vdupq_n_s16(a1);
|
const int16x8_t dc = vdupq_n_s16(a1);
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
if (a1 >= 0) {
|
if (a1 >= 0) {
|
||||||
const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
|
const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
|
||||||
@ -292,9 +291,8 @@ static INLINE void highbd_add8x8(int16x8_t a0, int16x8_t a1, int16x8_t a2,
|
|||||||
vst1q_u16(dest, d7_u16);
|
vst1q_u16(dest, d7_u16);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct8x8_12_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
int32x4_t a0 = vld1q_s32(input);
|
int32x4_t a0 = vld1q_s32(input);
|
||||||
int32x4_t a1 = vld1q_s32(input + 8);
|
int32x4_t a1 = vld1q_s32(input + 8);
|
||||||
int32x4_t a2 = vld1q_s32(input + 16);
|
int32x4_t a2 = vld1q_s32(input + 16);
|
||||||
@ -553,9 +551,8 @@ static INLINE void idct8x8_64_half1d_bd12(
|
|||||||
*io7 = vsubq_s32(step1[0], step2[7]);
|
*io7 = vsubq_s32(step1[0], step2[7]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct8x8_64_add_neon(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct8x8_64_add_neon(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
int32x4_t a0 = vld1q_s32(input);
|
int32x4_t a0 = vld1q_s32(input);
|
||||||
int32x4_t a1 = vld1q_s32(input + 4);
|
int32x4_t a1 = vld1q_s32(input + 4);
|
||||||
int32x4_t a2 = vld1q_s32(input + 8);
|
int32x4_t a2 = vld1q_s32(input + 8);
|
||||||
|
@ -1290,7 +1290,7 @@ static INLINE int detect_invalid_highbd_input(const tran_low_t *input,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
|
/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
|
||||||
0.5 shifts per pixel. */
|
0.5 shifts per pixel. */
|
||||||
@ -1299,7 +1299,6 @@ void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
tran_high_t a1, b1, c1, d1, e1;
|
tran_high_t a1, b1, c1, d1, e1;
|
||||||
const tran_low_t *ip = input;
|
const tran_low_t *ip = input;
|
||||||
tran_low_t *op = output;
|
tran_low_t *op = output;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
a1 = ip[0] >> UNIT_QUANT_SHIFT;
|
a1 = ip[0] >> UNIT_QUANT_SHIFT;
|
||||||
@ -1348,14 +1347,13 @@ void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
|
void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i;
|
int i;
|
||||||
tran_high_t a1, e1;
|
tran_high_t a1, e1;
|
||||||
tran_low_t tmp[4];
|
tran_low_t tmp[4];
|
||||||
const tran_low_t *ip = in;
|
const tran_low_t *ip = in;
|
||||||
tran_low_t *op = tmp;
|
tran_low_t *op = tmp;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
(void)bd;
|
(void)bd;
|
||||||
|
|
||||||
a1 = ip[0] >> UNIT_QUANT_SHIFT;
|
a1 = ip[0] >> UNIT_QUANT_SHIFT;
|
||||||
@ -1452,13 +1450,12 @@ void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
|
|||||||
output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd);
|
output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[4 * 4];
|
tran_low_t out[4 * 4];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[4], temp_out[4];
|
tran_low_t temp_in[4], temp_out[4];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// Rows
|
// Rows
|
||||||
for (i = 0; i < 4; ++i) {
|
for (i = 0; i < 4; ++i) {
|
||||||
@ -1478,13 +1475,12 @@ void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i;
|
int i;
|
||||||
tran_high_t a1;
|
tran_high_t a1;
|
||||||
tran_low_t out =
|
tran_low_t out =
|
||||||
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
|
out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
|
||||||
a1 = ROUND_POWER_OF_TWO(out, 4);
|
a1 = ROUND_POWER_OF_TWO(out, 4);
|
||||||
@ -1636,13 +1632,12 @@ void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
|
|||||||
output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
|
output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[8 * 8];
|
tran_low_t out[8 * 8];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[8], temp_out[8];
|
tran_low_t temp_in[8], temp_out[8];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// First transform rows
|
// First transform rows
|
||||||
for (i = 0; i < 8; ++i) {
|
for (i = 0; i < 8; ++i) {
|
||||||
@ -1662,13 +1657,12 @@ void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct8x8_12_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[8 * 8] = { 0 };
|
tran_low_t out[8 * 8] = { 0 };
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[8], temp_out[8];
|
tran_low_t temp_in[8], temp_out[8];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// First transform rows
|
// First transform rows
|
||||||
// Only first 4 row has non-zero coefs
|
// Only first 4 row has non-zero coefs
|
||||||
@ -1689,13 +1683,12 @@ void vpx_highbd_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_high_t a1;
|
tran_high_t a1;
|
||||||
tran_low_t out =
|
tran_low_t out =
|
||||||
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
|
out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
|
||||||
a1 = ROUND_POWER_OF_TWO(out, 5);
|
a1 = ROUND_POWER_OF_TWO(out, 5);
|
||||||
@ -2056,13 +2049,12 @@ void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
|
|||||||
output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
|
output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[16 * 16];
|
tran_low_t out[16 * 16];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[16], temp_out[16];
|
tran_low_t temp_in[16], temp_out[16];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// First transform rows
|
// First transform rows
|
||||||
for (i = 0; i < 16; ++i) {
|
for (i = 0; i < 16; ++i) {
|
||||||
@ -2082,13 +2074,12 @@ void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_38_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_38_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[16 * 16] = { 0 };
|
tran_low_t out[16 * 16] = { 0 };
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[16], temp_out[16];
|
tran_low_t temp_in[16], temp_out[16];
|
||||||
uint16_t *const dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// First transform rows. Since all non-zero dct coefficients are in
|
// First transform rows. Since all non-zero dct coefficients are in
|
||||||
// upper-left 8x8 area, we only need to calculate first 8 rows here.
|
// upper-left 8x8 area, we only need to calculate first 8 rows here.
|
||||||
@ -2111,13 +2102,12 @@ void vpx_highbd_idct16x16_38_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[16 * 16] = { 0 };
|
tran_low_t out[16 * 16] = { 0 };
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[16], temp_out[16];
|
tran_low_t temp_in[16], temp_out[16];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// First transform rows. Since all non-zero dct coefficients are in
|
// First transform rows. Since all non-zero dct coefficients are in
|
||||||
// upper-left 4x4 area, we only need to calculate first 4 rows here.
|
// upper-left 4x4 area, we only need to calculate first 4 rows here.
|
||||||
@ -2138,13 +2128,12 @@ void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_1_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_high_t a1;
|
tran_high_t a1;
|
||||||
tran_low_t out =
|
tran_low_t out =
|
||||||
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
|
out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
|
||||||
a1 = ROUND_POWER_OF_TWO(out, 6);
|
a1 = ROUND_POWER_OF_TWO(out, 6);
|
||||||
@ -2531,13 +2520,12 @@ static void highbd_idct32_c(const tran_low_t *input, tran_low_t *output,
|
|||||||
output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
|
output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[32 * 32];
|
tran_low_t out[32 * 32];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[32], temp_out[32];
|
tran_low_t temp_in[32], temp_out[32];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// Rows
|
// Rows
|
||||||
for (i = 0; i < 32; ++i) {
|
for (i = 0; i < 32; ++i) {
|
||||||
@ -2569,13 +2557,12 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct32x32_135_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[32 * 32] = { 0 };
|
tran_low_t out[32 * 32] = { 0 };
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[32], temp_out[32];
|
tran_low_t temp_in[32], temp_out[32];
|
||||||
uint16_t *const dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// Rows
|
// Rows
|
||||||
// Only upper-left 16x16 has non-zero coeff
|
// Only upper-left 16x16 has non-zero coeff
|
||||||
@ -2598,13 +2585,12 @@ void vpx_highbd_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
tran_low_t out[32 * 32] = { 0 };
|
tran_low_t out[32 * 32] = { 0 };
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
tran_low_t temp_in[32], temp_out[32];
|
tran_low_t temp_in[32], temp_out[32];
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
|
|
||||||
// Rows
|
// Rows
|
||||||
// Only upper-left 8x8 has non-zero coeff
|
// Only upper-left 8x8 has non-zero coeff
|
||||||
@ -2625,11 +2611,10 @@ void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct32x32_1_add_c(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
int i, j;
|
int i, j;
|
||||||
int a1;
|
int a1;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
tran_low_t out =
|
tran_low_t out =
|
||||||
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
||||||
|
|
||||||
|
@ -629,39 +629,39 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
|
|||||||
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
|
# that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
|
||||||
specialize qw/vpx_iwht4x4_16_add sse2/;
|
specialize qw/vpx_iwht4x4_16_add sse2/;
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct4x4_16_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct4x4_1_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
specialize qw/vpx_highbd_idct4x4_1_add neon/;
|
specialize qw/vpx_highbd_idct4x4_1_add neon/;
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct8x8_64_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct8x8_12_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct8x8_1_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
specialize qw/vpx_highbd_idct8x8_1_add neon/;
|
specialize qw/vpx_highbd_idct8x8_1_add neon/;
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct16x16_256_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct16x16_38_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct16x16_38_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct16x16_10_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct16x16_1_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
specialize qw/vpx_highbd_idct16x16_1_add neon/;
|
specialize qw/vpx_highbd_idct16x16_1_add neon/;
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct32x32_135_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct32x32_34_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_idct32x32_1_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
specialize qw/vpx_highbd_idct32x32_1_add neon sse2/;
|
specialize qw/vpx_highbd_idct32x32_1_add neon sse2/;
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
add_proto qw/void vpx_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int stride, int bd";
|
add_proto qw/void vpx_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint16_t *dest, int stride, int bd";
|
||||||
|
|
||||||
if (vpx_config("CONFIG_EMULATE_HARDWARE") ne "yes") {
|
if (vpx_config("CONFIG_EMULATE_HARDWARE") ne "yes") {
|
||||||
specialize qw/vpx_highbd_idct4x4_16_add neon sse2/;
|
specialize qw/vpx_highbd_idct4x4_16_add neon sse2/;
|
||||||
|
@ -3364,7 +3364,7 @@ static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
tran_low_t out[4 * 4];
|
tran_low_t out[4 * 4];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
@ -3373,7 +3373,6 @@ void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
|||||||
__m128i sign_bits[2];
|
__m128i sign_bits[2];
|
||||||
__m128i temp_mm, min_input, max_input;
|
__m128i temp_mm, min_input, max_input;
|
||||||
int test;
|
int test;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
int optimised_cols = 0;
|
int optimised_cols = 0;
|
||||||
const __m128i zero = _mm_set1_epi16(0);
|
const __m128i zero = _mm_set1_epi16(0);
|
||||||
const __m128i eight = _mm_set1_epi16(8);
|
const __m128i eight = _mm_set1_epi16(8);
|
||||||
@ -3479,14 +3478,13 @@ void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
tran_low_t out[8 * 8];
|
tran_low_t out[8 * 8];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
int i, j, test;
|
int i, j, test;
|
||||||
__m128i inptr[8];
|
__m128i inptr[8];
|
||||||
__m128i min_input, max_input, temp1, temp2, sign_bits;
|
__m128i min_input, max_input, temp1, temp2, sign_bits;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
const __m128i zero = _mm_set1_epi16(0);
|
const __m128i zero = _mm_set1_epi16(0);
|
||||||
const __m128i sixteen = _mm_set1_epi16(16);
|
const __m128i sixteen = _mm_set1_epi16(16);
|
||||||
const __m128i max = _mm_set1_epi16(6201);
|
const __m128i max = _mm_set1_epi16(6201);
|
||||||
@ -3579,14 +3577,13 @@ void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct8x8_12_add_sse2(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
tran_low_t out[8 * 8] = { 0 };
|
tran_low_t out[8 * 8] = { 0 };
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
int i, j, test;
|
int i, j, test;
|
||||||
__m128i inptr[8];
|
__m128i inptr[8];
|
||||||
__m128i min_input, max_input, temp1, temp2, sign_bits;
|
__m128i min_input, max_input, temp1, temp2, sign_bits;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
const __m128i zero = _mm_set1_epi16(0);
|
const __m128i zero = _mm_set1_epi16(0);
|
||||||
const __m128i sixteen = _mm_set1_epi16(16);
|
const __m128i sixteen = _mm_set1_epi16(16);
|
||||||
const __m128i max = _mm_set1_epi16(6201);
|
const __m128i max = _mm_set1_epi16(6201);
|
||||||
@ -3682,14 +3679,13 @@ void vpx_highbd_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
tran_low_t out[16 * 16];
|
tran_low_t out[16 * 16];
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
int i, j, test;
|
int i, j, test;
|
||||||
__m128i inptr[32];
|
__m128i inptr[32];
|
||||||
__m128i min_input, max_input, temp1, temp2, sign_bits;
|
__m128i min_input, max_input, temp1, temp2, sign_bits;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
const __m128i zero = _mm_set1_epi16(0);
|
const __m128i zero = _mm_set1_epi16(0);
|
||||||
const __m128i rounding = _mm_set1_epi16(32);
|
const __m128i rounding = _mm_set1_epi16(32);
|
||||||
const __m128i max = _mm_set1_epi16(3155);
|
const __m128i max = _mm_set1_epi16(3155);
|
||||||
@ -3795,14 +3791,13 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
tran_low_t out[16 * 16] = { 0 };
|
tran_low_t out[16 * 16] = { 0 };
|
||||||
tran_low_t *outptr = out;
|
tran_low_t *outptr = out;
|
||||||
int i, j, test;
|
int i, j, test;
|
||||||
__m128i inptr[32];
|
__m128i inptr[32];
|
||||||
__m128i min_input, max_input, temp1, temp2, sign_bits;
|
__m128i min_input, max_input, temp1, temp2, sign_bits;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
const __m128i zero = _mm_set1_epi16(0);
|
const __m128i zero = _mm_set1_epi16(0);
|
||||||
const __m128i rounding = _mm_set1_epi16(32);
|
const __m128i rounding = _mm_set1_epi16(32);
|
||||||
const __m128i max = _mm_set1_epi16(3155);
|
const __m128i max = _mm_set1_epi16(3155);
|
||||||
@ -3913,14 +3908,13 @@ void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void vpx_highbd_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest8,
|
void vpx_highbd_idct32x32_1_add_sse2(const tran_low_t *input, uint16_t *dest,
|
||||||
int stride, int bd) {
|
int stride, int bd) {
|
||||||
__m128i dc_value, d;
|
__m128i dc_value, d;
|
||||||
const __m128i zero = _mm_setzero_si128();
|
const __m128i zero = _mm_setzero_si128();
|
||||||
const __m128i one = _mm_set1_epi16(1);
|
const __m128i one = _mm_set1_epi16(1);
|
||||||
const __m128i max = _mm_sub_epi16(_mm_slli_epi16(one, bd), one);
|
const __m128i max = _mm_sub_epi16(_mm_slli_epi16(one, bd), one);
|
||||||
int a, i, j;
|
int a, i, j;
|
||||||
uint16_t *dest = CAST_TO_SHORTPTR(dest8);
|
|
||||||
tran_low_t out;
|
tran_low_t out;
|
||||||
|
|
||||||
out = HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
out = HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user