Merge "Removing extra params in dequant functions" into experimental

This commit is contained in:
Scott LaVarnway 2013-04-16 06:37:00 -07:00 committed by Gerrit Code Review
commit 5393379c84
6 changed files with 116 additions and 220 deletions

View File

@ -432,14 +432,12 @@ typedef struct macroblockd {
/* Inverse transform function pointers. */
void (*inv_txm4x4_1)(int16_t *input, int16_t *output, int pitch);
void (*inv_txm4x4)(int16_t *input, int16_t *output, int pitch);
void (*itxm_add)(int16_t *input, const int16_t *dq,
uint8_t *pred, uint8_t *output, int pitch, int stride, int eob);
void (*itxm_add)(int16_t *input, const int16_t *dq, uint8_t *dest,
int stride, int eob);
void (*itxm_add_y_block)(int16_t *q, const int16_t *dq,
uint8_t *pre, int pre_stride, uint8_t *dst, int stride,
struct macroblockd *xd);
uint8_t *dst, int stride, struct macroblockd *xd);
void (*itxm_add_uv_block)(int16_t *q, const int16_t *dq,
uint8_t *pre, int pre_stride, uint8_t *dst, int stride,
uint16_t *eobs);
uint8_t *dst, int stride, uint16_t *eobs);
struct subpix_fn_table subpix;

View File

@ -27,25 +27,25 @@ forward_decls vp9_common_forward_decls
#
# Dequant
#
prototype void vp9_dequant_idct_add_y_block_8x8 "int16_t *q, const int16_t *dq, uint8_t *pre, int pre_stride, uint8_t *dst, int stride, struct macroblockd *xd"
prototype void vp9_dequant_idct_add_y_block_8x8 "int16_t *q, const int16_t *dq, uint8_t *dst, int stride, struct macroblockd *xd"
specialize vp9_dequant_idct_add_y_block_8x8
prototype void vp9_dequant_idct_add_16x16 "int16_t *input, const int16_t *dq, uint8_t *pred, uint8_t *dest, int pitch, int stride, int eob"
prototype void vp9_dequant_idct_add_16x16 "int16_t *input, const int16_t *dq, uint8_t *dest, int stride, int eob"
specialize vp9_dequant_idct_add_16x16
prototype void vp9_dequant_idct_add_8x8 "int16_t *input, const int16_t *dq, uint8_t *pred, uint8_t *dest, int pitch, int stride, int eob"
prototype void vp9_dequant_idct_add_8x8 "int16_t *input, const int16_t *dq, uint8_t *dest, int stride, int eob"
specialize vp9_dequant_idct_add_8x8
prototype void vp9_dequant_idct_add "int16_t *input, const int16_t *dq, uint8_t *pred, uint8_t *dest, int pitch, int stride, int eob"
prototype void vp9_dequant_idct_add "int16_t *input, const int16_t *dq, uint8_t *dest, int stride, int eob"
specialize vp9_dequant_idct_add
prototype void vp9_dequant_idct_add_y_block "int16_t *q, const int16_t *dq, uint8_t *pre, int pre_stride, uint8_t *dst, int stride, struct macroblockd *xd"
prototype void vp9_dequant_idct_add_y_block "int16_t *q, const int16_t *dq, uint8_t *dst, int stride, struct macroblockd *xd"
specialize vp9_dequant_idct_add_y_block
prototype void vp9_dequant_idct_add_uv_block "int16_t *q, const int16_t *dq, uint8_t *pre, int pre_stride, uint8_t *dst, int stride, uint16_t *eobs"
prototype void vp9_dequant_idct_add_uv_block "int16_t *q, const int16_t *dq, uint8_t *dst, int stride, uint16_t *eobs"
specialize vp9_dequant_idct_add_uv_block
prototype void vp9_dequant_idct_add_32x32 "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dst, int pitch, int stride, int eob"
prototype void vp9_dequant_idct_add_32x32 "int16_t *q, const int16_t *dq, uint8_t *dst, int stride, int eob"
specialize vp9_dequant_idct_add_32x32
#

View File

@ -207,23 +207,19 @@ static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd,
if (tx_type != DCT_DCT) {
vp9_dequant_iht_add_16x16_c(tx_type, xd->plane[0].qcoeff,
xd->block[0].dequant, xd->dst.y_buffer,
xd->dst.y_buffer, xd->dst.y_stride,
xd->dst.y_stride, xd->plane[0].eobs[0]);
} else {
vp9_dequant_idct_add_16x16(xd->plane[0].qcoeff, xd->block[0].dequant,
xd->dst.y_buffer, xd->dst.y_buffer,
xd->dst.y_stride, xd->dst.y_stride,
xd->dst.y_buffer, xd->dst.y_stride,
xd->plane[0].eobs[0]);
}
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->dst.u_buffer, xd->dst.u_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->dst.u_buffer, xd->dst.uv_stride,
xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[20].dequant,
xd->dst.v_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->dst.v_buffer, xd->dst.uv_stride,
xd->plane[2].eobs[0]);
}
@ -249,21 +245,16 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
tx_type = get_tx_type_8x8(xd, ib);
if (tx_type != DCT_DCT) {
vp9_dequant_iht_add_8x8_c(tx_type, q, dq, dst, dst, stride, stride,
vp9_dequant_iht_add_8x8_c(tx_type, q, dq, dst, stride,
xd->plane[0].eobs[idx]);
} else {
vp9_dequant_idct_add_8x8_c(q, dq, dst, dst, stride, stride,
xd->plane[0].eobs[idx]);
vp9_dequant_idct_add_8x8(q, dq, dst, stride, xd->plane[0].eobs[idx]);
}
}
} else {
vp9_dequant_idct_add_y_block_8x8(xd->plane[0].qcoeff,
xd->block[0].dequant,
xd->dst.y_buffer,
xd->dst.y_stride,
xd->dst.y_buffer,
xd->dst.y_stride,
xd);
xd->block[0].dequant, xd->dst.y_buffer,
xd->dst.y_stride, xd);
}
// chroma
@ -278,34 +269,28 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, i, 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst, b->dst_stride,
xd->plane[1].eobs[i]);
b = &xd->block[20 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, i, 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst, b->dst_stride,
xd->plane[2].eobs[i]);
}
} else if (mode == SPLITMV) {
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer,
xd->dst.uv_stride, xd->plane[1].eobs);
xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer,
xd->dst.uv_stride, xd->plane[2].eobs);
xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
} else {
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->dst.u_buffer, xd->dst.u_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->dst.u_buffer, xd->dst.uv_stride,
xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->dst.v_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->dst.v_buffer, xd->dst.uv_stride,
xd->plane[2].eobs[0]);
}
}
@ -331,13 +316,11 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, ib + iblock[j], 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride,
b->dst_stride,
xd->plane[0].eobs[ib + iblock[j]]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, ib + iblock[j], 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst, b->dst_stride,
xd->plane[0].eobs[ib + iblock[j]]);
}
}
@ -345,15 +328,13 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, i, 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst, b->dst_stride,
xd->plane[1].eobs[i]);
b = &xd->block[20 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, i, 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst, b->dst_stride,
xd->plane[2].eobs[i]);
}
} else if (mode == I4X4_PRED) {
@ -373,12 +354,10 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride,
b->dst_stride, xd->plane[0].eobs[i]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst, b->dst_stride,
xd->plane[0].eobs[i]);
}
}
@ -388,24 +367,17 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
#endif
vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer,
xd->dst.uv_stride, xd->plane[1].eobs);
xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer,
xd->dst.uv_stride, xd->plane[2].eobs);
xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
} else if (mode == SPLITMV || get_tx_type_4x4(xd, 0) == DCT_DCT) {
xd->itxm_add_y_block(xd->plane[0].qcoeff,
xd->block[0].dequant,
xd->dst.y_buffer, xd->dst.y_stride,
xd->dst.y_buffer,
xd->dst.y_stride,
xd);
xd->dst.y_buffer, xd->dst.y_stride, xd);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer,
xd->dst.uv_stride, xd->plane[1].eobs);
xd->dst.u_buffer, xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer,
xd->dst.uv_stride, xd->plane[2].eobs);
xd->dst.v_buffer, xd->dst.uv_stride, xd->plane[2].eobs);
} else {
for (i = 0; i < 16; i++) {
BLOCKD *b = &xd->block[i];
@ -414,21 +386,19 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride,
b->dst_stride, xd->plane[0].eobs[i]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst, b->dst_stride,
xd->plane[0].eobs[i]);
}
}
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer,
xd->dst.uv_stride, xd->plane[1].eobs);
xd->dst.u_buffer, xd->dst.uv_stride,
xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer,
xd->dst.uv_stride, xd->plane[2].eobs);
xd->dst.v_buffer, xd->dst.uv_stride,
xd->plane[2].eobs);
}
}
@ -444,9 +414,7 @@ static INLINE void decode_sby_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
const int y_offset = (y_idx * 32) * mb->dst.y_stride + (x_idx * 32);
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[0].qcoeff, n, 1024),
mb->block[0].dequant ,
mb->dst.y_buffer + y_offset,
mb->dst.y_buffer + y_offset,
mb->dst.y_stride, mb->dst.y_stride,
mb->dst.y_buffer + y_offset, mb->dst.y_stride,
mb->plane[0].eobs[n * 64]);
}
}
@ -463,15 +431,11 @@ static INLINE void decode_sbuv_32x32(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[1].qcoeff, n, 1024),
mb->block[16].dequant,
mb->dst.u_buffer + uv_offset,
mb->dst.u_buffer + uv_offset,
mb->dst.uv_stride, mb->dst.uv_stride,
mb->plane[1].eobs[n * 64]);
mb->dst.uv_stride, mb->plane[1].eobs[n * 64]);
vp9_dequant_idct_add_32x32(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 1024),
mb->block[20].dequant,
mb->dst.v_buffer + uv_offset,
mb->dst.v_buffer + uv_offset,
mb->dst.uv_stride, mb->dst.uv_stride,
mb->plane[2].eobs[n * 64]);
mb->dst.uv_stride, mb->plane[2].eobs[n * 64]);
}
}
@ -490,17 +454,14 @@ static INLINE void decode_sby_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
if (tx_type == DCT_DCT) {
vp9_dequant_idct_add_16x16(BLOCK_OFFSET(mb->plane[0].qcoeff, n, 256),
mb->block[0].dequant ,
mb->dst.y_buffer + y_offset,
mb->dst.y_buffer + y_offset,
mb->dst.y_stride, mb->dst.y_stride,
mb->dst.y_buffer + y_offset, mb->dst.y_stride,
mb->plane[0].eobs[n * 16]);
} else {
vp9_dequant_iht_add_16x16_c(tx_type,
BLOCK_OFFSET(mb->plane[0].qcoeff, n, 256),
mb->block[0].dequant,
mb->dst.y_buffer + y_offset,
mb->dst.y_buffer + y_offset,
mb->dst.y_stride, mb->dst.y_stride,
mb->dst.y_stride,
mb->plane[0].eobs[n * 16]);
}
}
@ -520,15 +481,11 @@ static INLINE void decode_sbuv_16x16(MACROBLOCKD *mb, BLOCK_SIZE_TYPE bsize) {
const int uv_offset = (y_idx * 16) * mb->dst.uv_stride + (x_idx * 16);
vp9_dequant_idct_add_16x16(BLOCK_OFFSET(mb->plane[1].qcoeff, n, 256),
mb->block[16].dequant,
mb->dst.u_buffer + uv_offset,
mb->dst.u_buffer + uv_offset,
mb->dst.uv_stride, mb->dst.uv_stride,
mb->dst.u_buffer + uv_offset, mb->dst.uv_stride,
mb->plane[1].eobs[n * 16]);
vp9_dequant_idct_add_16x16(BLOCK_OFFSET(mb->plane[2].qcoeff, n, 256),
mb->block[20].dequant,
mb->dst.v_buffer + uv_offset,
mb->dst.v_buffer + uv_offset,
mb->dst.uv_stride, mb->dst.uv_stride,
mb->dst.v_buffer + uv_offset, mb->dst.uv_stride,
mb->plane[2].eobs[n * 16]);
}
}
@ -547,19 +504,15 @@ static INLINE void decode_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const TX_TYPE tx_type = get_tx_type_8x8(xd,
(y_idx * (2 * bw) + x_idx) * 2);
if (tx_type == DCT_DCT) {
vp9_dequant_idct_add_8x8_c(BLOCK_OFFSET(xd->plane[0].qcoeff, n, 64),
vp9_dequant_idct_add_8x8(BLOCK_OFFSET(xd->plane[0].qcoeff, n, 64),
xd->block[0].dequant,
xd->dst.y_buffer + y_offset,
xd->dst.y_buffer + y_offset,
xd->dst.y_stride, xd->dst.y_stride,
xd->plane[0].eobs[n * 4]);
xd->dst.y_stride, xd->plane[0].eobs[n * 4]);
} else {
vp9_dequant_iht_add_8x8_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, n, 64),
xd->block[0].dequant,
xd->dst.y_buffer + y_offset,
xd->dst.y_buffer + y_offset,
xd->dst.y_stride, xd->dst.y_stride,
xd->dst.y_buffer + y_offset, xd->dst.y_stride,
xd->plane[0].eobs[n * 4]);
}
}
@ -576,17 +529,13 @@ static INLINE void decode_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int x_idx = n & (bw - 1);
const int y_idx = n >> (bwl - 1);
const int uv_offset = (y_idx * 8) * xd->dst.uv_stride + (x_idx * 8);
vp9_dequant_idct_add_8x8_c(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 64),
vp9_dequant_idct_add_8x8(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 64),
xd->block[16].dequant,
xd->dst.u_buffer + uv_offset,
xd->dst.u_buffer + uv_offset,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->dst.u_buffer + uv_offset, xd->dst.uv_stride,
xd->plane[1].eobs[n * 4]);
vp9_dequant_idct_add_8x8_c(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 64),
vp9_dequant_idct_add_8x8(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 64),
xd->block[20].dequant,
xd->dst.v_buffer + uv_offset,
xd->dst.v_buffer + uv_offset,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->dst.v_buffer + uv_offset, xd->dst.uv_stride,
xd->plane[2].eobs[n * 4]);
}
}
@ -605,19 +554,13 @@ static INLINE void decode_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
if (tx_type == DCT_DCT) {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
xd->block[0].dequant,
xd->dst.y_buffer + y_offset,
xd->dst.y_buffer + y_offset,
xd->dst.y_stride, xd->dst.y_stride,
xd->dst.y_buffer + y_offset, xd->dst.y_stride,
xd->plane[0].eobs[n]);
} else {
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, n, 16),
xd->block[0].dequant,
xd->dst.y_buffer + y_offset,
xd->dst.y_buffer + y_offset,
xd->dst.y_stride,
xd->dst.y_stride,
xd->plane[0].eobs[n]);
xd->block[0].dequant, xd->dst.y_buffer + y_offset,
xd->dst.y_stride, xd->plane[0].eobs[n]);
}
}
}
@ -634,14 +577,10 @@ static INLINE void decode_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int uv_offset = (y_idx * 4) * xd->dst.uv_stride + (x_idx * 4);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, n, 16),
xd->block[16].dequant,
xd->dst.u_buffer + uv_offset,
xd->dst.u_buffer + uv_offset,
xd->dst.uv_stride, xd->dst.uv_stride, xd->plane[1].eobs[n]);
xd->dst.u_buffer + uv_offset, xd->dst.uv_stride, xd->plane[1].eobs[n]);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, n, 16),
xd->block[20].dequant,
xd->dst.v_buffer + uv_offset,
xd->dst.v_buffer + uv_offset,
xd->dst.uv_stride, xd->dst.uv_stride, xd->plane[2].eobs[n]);
xd->dst.v_buffer + uv_offset, xd->dst.uv_stride, xd->plane[2].eobs[n]);
}
}

View File

@ -81,8 +81,7 @@ void vp9_add_constant_residual_32x32_c(const int16_t diff, const uint8_t *pred,
void vp9_dequant_iht_add_c(TX_TYPE tx_type, int16_t *input,
const int16_t *dq,
uint8_t *pred, uint8_t *dest,
int pitch, int stride, int eob) {
uint8_t *dest, int stride, int eob) {
int i;
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 16);
@ -91,13 +90,12 @@ void vp9_dequant_iht_add_c(TX_TYPE tx_type, int16_t *input,
vp9_short_iht4x4(input, output, 4, tx_type);
vpx_memset(input, 0, 32);
vp9_add_residual_4x4(output, pred, pitch, dest, stride);
vp9_add_residual_4x4(output, dest, stride, dest, stride);
}
void vp9_dequant_iht_add_8x8_c(TX_TYPE tx_type, int16_t *input,
const int16_t *dq,
uint8_t *pred, uint8_t *dest,
int pitch, int stride, int eob) {
const int16_t *dq, uint8_t *dest,
int stride, int eob) {
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 64);
if (eob > 0) {
@ -109,12 +107,12 @@ void vp9_dequant_iht_add_8x8_c(TX_TYPE tx_type, int16_t *input,
vp9_short_iht8x8(input, output, 8, tx_type);
vpx_memset(input, 0, 128);
vp9_add_residual_8x8(output, pred, pitch, dest, stride);
vp9_add_residual_8x8(output, dest, stride, dest, stride);
}
}
void vp9_dequant_idct_add_c(int16_t *input, const int16_t *dq, uint8_t *pred,
uint8_t *dest, int pitch, int stride, int eob) {
void vp9_dequant_idct_add_c(int16_t *input, const int16_t *dq, uint8_t *dest,
int stride, int eob) {
int i;
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 16);
@ -125,9 +123,9 @@ void vp9_dequant_idct_add_c(int16_t *input, const int16_t *dq, uint8_t *pred,
// the idct halves ( >> 1) the pitch
vp9_short_idct4x4(input, output, 4 << 1);
vpx_memset(input, 0, 32);
vp9_add_residual_4x4(output, pred, pitch, dest, stride);
vp9_add_residual_4x4(output, dest, stride, dest, stride);
} else {
vp9_dc_only_idct_add(input[0]*dq[0], pred, dest, pitch, stride);
vp9_dc_only_idct_add(input[0]*dq[0], dest, dest, stride, stride);
((int *)input)[0] = 0;
}
}
@ -149,8 +147,7 @@ void vp9_dequant_dc_idct_add_c(int16_t *input, const int16_t *dq, uint8_t *pred,
}
void vp9_dequant_idct_add_lossless_c(int16_t *input, const int16_t *dq,
uint8_t *pred, uint8_t *dest,
int pitch, int stride, int eob) {
uint8_t *dest, int stride, int eob) {
int i;
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 16);
@ -160,17 +157,15 @@ void vp9_dequant_idct_add_lossless_c(int16_t *input, const int16_t *dq,
vp9_short_iwalsh4x4_c(input, output, 4 << 1);
vpx_memset(input, 0, 32);
vp9_add_residual_4x4(output, pred, pitch, dest, stride);
vp9_add_residual_4x4(output, dest, stride, dest, stride);
} else {
vp9_dc_only_inv_walsh_add(input[0]*dq[0], pred, dest, pitch, stride);
vp9_dc_only_inv_walsh_add(input[0]*dq[0], dest, dest, stride, stride);
((int *)input)[0] = 0;
}
}
void vp9_dequant_dc_idct_add_lossless_c(int16_t *input, const int16_t *dq,
uint8_t *pred,
uint8_t *dest,
int pitch, int stride, int dc) {
uint8_t *dest, int stride, int dc) {
int i;
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 16);
@ -181,12 +176,11 @@ void vp9_dequant_dc_idct_add_lossless_c(int16_t *input, const int16_t *dq,
vp9_short_iwalsh4x4_c(input, output, 4 << 1);
vpx_memset(input, 0, 32);
vp9_add_residual_4x4(output, pred, pitch, dest, stride);
vp9_add_residual_4x4(output, dest, stride, dest, stride);
}
void vp9_dequant_idct_add_8x8_c(int16_t *input, const int16_t *dq,
uint8_t *pred, uint8_t *dest, int pitch,
int stride, int eob) {
uint8_t *dest, int stride, int eob) {
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 64);
// If dc is 1, then input[0] is the reconstructed value, do not need
@ -208,7 +202,7 @@ void vp9_dequant_idct_add_8x8_c(int16_t *input, const int16_t *dq,
vp9_short_idct1_8x8_c(&in, &out);
input[0] = 0;
vp9_add_constant_residual_8x8(out, pred, pitch, dest, stride);
vp9_add_constant_residual_8x8(out, dest, stride, dest, stride);
#if !CONFIG_SCATTERSCAN
} else if (eob <= 10) {
input[1] *= dq[1];
@ -228,7 +222,7 @@ void vp9_dequant_idct_add_8x8_c(int16_t *input, const int16_t *dq,
input[16] = input[17] = 0;
input[24] = 0;
vp9_add_residual_8x8(output, pred, pitch, dest, stride);
vp9_add_residual_8x8(output, dest, stride, dest, stride);
#endif
} else {
int i;
@ -240,14 +234,14 @@ void vp9_dequant_idct_add_8x8_c(int16_t *input, const int16_t *dq,
// the idct halves ( >> 1) the pitch
vp9_short_idct8x8(input, output, 8 << 1);
vpx_memset(input, 0, 128);
vp9_add_residual_8x8(output, pred, pitch, dest, stride);
vp9_add_residual_8x8(output, dest, stride, dest, stride);
}
}
}
void vp9_dequant_iht_add_16x16_c(TX_TYPE tx_type, int16_t *input,
const int16_t *dq, uint8_t *pred,
uint8_t *dest, int pitch, int stride,
const int16_t *dq,
uint8_t *dest, int stride,
int eob) {
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 256);
@ -268,13 +262,12 @@ void vp9_dequant_iht_add_16x16_c(TX_TYPE tx_type, int16_t *input,
vpx_memset(input, 0, 512);
vp9_add_residual_16x16(output, pred, pitch, dest, stride);
vp9_add_residual_16x16(output, dest, stride, dest, stride);
}
}
void vp9_dequant_idct_add_16x16_c(int16_t *input, const int16_t *dq,
uint8_t *pred, uint8_t *dest, int pitch,
int stride, int eob) {
uint8_t *dest, int stride, int eob) {
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 256);
/* The calculation can be simplified if there are not many non-zero dct
@ -289,7 +282,7 @@ void vp9_dequant_idct_add_16x16_c(int16_t *input, const int16_t *dq,
vp9_short_idct1_16x16_c(&in, &out);
input[0] = 0;
vp9_add_constant_residual_16x16(out, pred, pitch, dest, stride);
vp9_add_constant_residual_16x16(out, dest, stride, dest, stride);
#if !CONFIG_SCATTERSCAN
} else if (eob <= 10) {
input[0] *= dq[0];
@ -312,7 +305,7 @@ void vp9_dequant_idct_add_16x16_c(int16_t *input, const int16_t *dq,
input[32] = input[33] = 0;
input[48] = 0;
vp9_add_residual_16x16(output, pred, pitch, dest, stride);
vp9_add_residual_16x16(output, dest, stride, dest, stride);
#endif
} else {
int i;
@ -326,21 +319,20 @@ void vp9_dequant_idct_add_16x16_c(int16_t *input, const int16_t *dq,
// the idct halves ( >> 1) the pitch
vp9_short_idct16x16(input, output, 16 << 1);
vpx_memset(input, 0, 512);
vp9_add_residual_16x16(output, pred, pitch, dest, stride);
vp9_add_residual_16x16(output, dest, stride, dest, stride);
}
}
}
void vp9_dequant_idct_add_32x32_c(int16_t *input, const int16_t *dq,
uint8_t *pred, uint8_t *dest, int pitch,
int stride, int eob) {
uint8_t *dest, int stride, int eob) {
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 1024);
if (eob) {
input[0] = input[0] * dq[0] / 2;
if (eob == 1) {
vp9_short_idct1_32x32(input, output);
vp9_add_constant_residual_32x32(output[0], pred, pitch, dest, stride);
vp9_add_constant_residual_32x32(output[0], dest, stride, dest, stride);
input[0] = 0;
#if !CONFIG_SCATTERSCAN
} else if (eob <= 10) {
@ -362,7 +354,7 @@ void vp9_dequant_idct_add_32x32_c(int16_t *input, const int16_t *dq,
input[64] = input[65] = 0;
input[96] = 0;
vp9_add_residual_32x32(output, pred, pitch, dest, stride);
vp9_add_residual_32x32(output, dest, stride, dest, stride);
#endif
} else {
int i;
@ -370,7 +362,7 @@ void vp9_dequant_idct_add_32x32_c(int16_t *input, const int16_t *dq,
input[i] = input[i] * dq[1] / 2;
vp9_short_idct32x32(input, output, 64);
vpx_memset(input, 0, 2048);
vp9_add_residual_32x32(output, pred, pitch, dest, stride);
vp9_add_residual_32x32(output, dest, stride, dest, stride);
}
}
}

View File

@ -16,14 +16,11 @@
void vp9_dequant_idct_add_lossless_c(int16_t *input, const int16_t *dq,
unsigned char *pred,
unsigned char *output,
int pitch, int stride, int eob);
unsigned char *dest, int stride, int eob);
void vp9_dequant_dc_idct_add_lossless_c(int16_t *input, const int16_t *dq,
unsigned char *pred,
unsigned char *output,
int pitch, int stride, int dc);
unsigned char *output, int stride,
int dc);
void vp9_dequant_dc_idct_add_y_block_lossless_c(int16_t *q,
const int16_t *dq,
@ -33,30 +30,23 @@ void vp9_dequant_dc_idct_add_y_block_lossless_c(int16_t *q,
const int16_t *dc);
void vp9_dequant_idct_add_y_block_lossless_c(int16_t *q, const int16_t *dq,
unsigned char *pre, int pre_stride,
unsigned char *dst,
int stride,
unsigned char *dst, int stride,
struct macroblockd *xd);
void vp9_dequant_idct_add_uv_block_lossless_c(int16_t *q, const int16_t *dq,
unsigned char *pre,
int pre_stride,
unsigned char *dst,
int stride,
uint16_t *eobs);
void vp9_dequant_iht_add_c(TX_TYPE tx_type, int16_t *input, const int16_t *dq,
unsigned char *pred, unsigned char *dest,
int pitch, int stride, int eob);
unsigned char *dest, int stride, int eob);
void vp9_dequant_iht_add_8x8_c(TX_TYPE tx_type, int16_t *input,
const int16_t *dq, unsigned char *pred,
unsigned char *dest, int pitch, int stride,
int eob);
const int16_t *dq, unsigned char *dest,
int stride, int eob);
void vp9_dequant_iht_add_16x16_c(TX_TYPE tx_type, int16_t *input,
const int16_t *dq, unsigned char *pred,
unsigned char *dest,
int pitch, int stride, int eob);
const int16_t *dq, unsigned char *dest,
int stride, int eob);
#endif // VP9_DECODER_VP9_DEQUANTIZE_H_

View File

@ -13,101 +13,78 @@
#include "vp9/decoder/vp9_dequantize.h"
void vp9_dequant_idct_add_y_block_c(int16_t *q, const int16_t *dq,
uint8_t *pre, int pre_stride,
uint8_t *dst,
int stride, MACROBLOCKD *xd) {
uint8_t *dst, int stride, MACROBLOCKD *xd) {
int i, j;
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
vp9_dequant_idct_add(q, dq, pre, dst, pre_stride, stride,
xd->plane[0].eobs[i * 4 + j]);
vp9_dequant_idct_add(q, dq, dst, stride, xd->plane[0].eobs[i * 4 + j]);
q += 16;
pre += 4;
dst += 4;
}
pre += 4 * pre_stride - 16;
dst += 4 * stride - 16;
}
}
void vp9_dequant_idct_add_uv_block_c(int16_t *q, const int16_t *dq,
uint8_t *pre, int pre_stride, uint8_t *dst,
int stride, uint16_t *eobs) {
uint8_t *dst, int stride, uint16_t *eobs) {
int i, j;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
vp9_dequant_idct_add(q, dq, pre, dst, pre_stride, stride,
eobs[i * 2 + j]);
vp9_dequant_idct_add(q, dq, dst, stride, eobs[i * 2 + j]);
q += 16;
pre += 4;
dst += 4;
}
pre += 4 * pre_stride - 8;
dst += 4 * stride - 8;
}
}
void vp9_dequant_idct_add_y_block_8x8_c(int16_t *q, const int16_t *dq,
uint8_t *pre, int pre_stride,
uint8_t *dst,
int stride, MACROBLOCKD *xd) {
uint8_t *dst, int stride,
MACROBLOCKD *xd) {
uint8_t *origdest = dst;
uint8_t *origpred = pre;
vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, pre_stride, stride,
xd->plane[0].eobs[0]);
vp9_dequant_idct_add_8x8_c(&q[64], dq, origpred + 8,
origdest + 8, pre_stride, stride,
vp9_dequant_idct_add_8x8_c(q, dq, dst, stride, xd->plane[0].eobs[0]);
vp9_dequant_idct_add_8x8_c(&q[64], dq, origdest + 8, stride,
xd->plane[0].eobs[4]);
vp9_dequant_idct_add_8x8_c(&q[128], dq, origpred + 8 * pre_stride,
origdest + 8 * stride, pre_stride, stride,
vp9_dequant_idct_add_8x8_c(&q[128], dq, origdest + 8 * stride, stride,
xd->plane[0].eobs[8]);
vp9_dequant_idct_add_8x8_c(&q[192], dq, origpred + 8 * pre_stride + 8,
origdest + 8 * stride + 8, pre_stride, stride,
vp9_dequant_idct_add_8x8_c(&q[192], dq, origdest + 8 * stride + 8, stride,
xd->plane[0].eobs[12]);
}
void vp9_dequant_idct_add_y_block_lossless_c(int16_t *q, const int16_t *dq,
uint8_t *pre, int pre_stride,
uint8_t *dst,
int stride, MACROBLOCKD *xd) {
uint8_t *dst, int stride,
MACROBLOCKD *xd) {
int i, j;
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, pre_stride, stride,
vp9_dequant_idct_add_lossless_c(q, dq, dst, stride,
xd->plane[0].eobs[i * 4 + j]);
q += 16;
pre += 4;
dst += 4;
}
pre += 4 * pre_stride - 16;
dst += 4 * stride - 16;
}
}
void vp9_dequant_idct_add_uv_block_lossless_c(int16_t *q, const int16_t *dq,
uint8_t *pre, int pre_stride,
uint8_t *dst,
int stride,
uint8_t *dst, int stride,
uint16_t *eobs) {
int i, j;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, pre_stride, stride,
eobs[i * 2 + j]);
vp9_dequant_idct_add_lossless_c(q, dq, dst, stride, eobs[i * 2 + j]);
q += 16;
pre += 4;
dst += 4;
}
pre += 4 * pre_stride - 8;
dst += 4 * stride - 8;
}
}