diff --git a/vp8/common/invtrans.c b/vp8/common/invtrans.c index 090c19cca..478cb329f 100644 --- a/vp8/common/invtrans.c +++ b/vp8/common/invtrans.c @@ -44,10 +44,13 @@ void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD * { int i; - /* do 2nd order transform on the dc block */ - IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff); + if(x->mode_info_context->mbmi.mode != SPLITMV) + { + /* do 2nd order transform on the dc block */ + IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff); - recon_dcblock(x); + recon_dcblock(x); + } for (i = 0; i < 16; i++) { diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c index 7f2b46daa..6f5f3300f 100644 --- a/vp8/encoder/encodeframe.c +++ b/vp8/encoder/encodeframe.c @@ -28,6 +28,7 @@ #include #include #include "vp8/common/subpixel.h" +#include "vp8/common/invtrans.h" #include "vpx_ports/vpx_timer.h" #if CONFIG_RUNTIME_CPU_DETECT @@ -1165,6 +1166,11 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) sum_intra_stats(cpi, x); vp8_tokenize_mb(cpi, &x->e_mbd, t); + if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED) + vp8_inverse_transform_mby(IF_RTCD(&cpi->rtcd.common->idct), &x->e_mbd); + + vp8_inverse_transform_mbuv(IF_RTCD(&cpi->rtcd.common->idct), &x->e_mbd); + return rate; } #ifdef SPEEDSTATS @@ -1337,7 +1343,15 @@ int vp8cx_encode_inter_macroblock } if (!x->skip) + { vp8_tokenize_mb(cpi, xd, t); + if (x->e_mbd.mode_info_context->mbmi.mode != B_PRED) + { + vp8_inverse_transform_mby(IF_RTCD(&cpi->rtcd.common->idct), + &x->e_mbd); + } + vp8_inverse_transform_mbuv(IF_RTCD(&cpi->rtcd.common->idct), &x->e_mbd); + } else { if (cpi->common.mb_no_coeff_skip) diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c index 1c07cbdd5..63aff4e55 100644 --- a/vp8/encoder/encodeintra.c +++ b/vp8/encoder/encodeintra.c @@ -37,11 +37,15 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) if (use_dc_pred) { + const VP8_ENCODER_RTCD *rtcd = IF_RTCD(&cpi->rtcd); + x->e_mbd.mode_info_context->mbmi.mode = DC_PRED; x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; - vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x); + vp8_encode_intra16x16mby(rtcd, x); + + vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd); } else { @@ -104,8 +108,6 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) if (x->optimize) vp8_optimize_mby(x, rtcd); - vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd); - } void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) @@ -121,6 +123,4 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) if (x->optimize) vp8_optimize_mbuv(x, rtcd); - vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd); - } diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c index db83b2f75..80c32df1b 100644 --- a/vp8/encoder/encodemb.c +++ b/vp8/encoder/encodemb.c @@ -619,67 +619,6 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) } } -static void recon_dcblock(MACROBLOCKD *x) -{ - BLOCKD *b = &x->block[24]; - int i; - - for (i = 0; i < 16; i++) - { - x->block[i].dqcoeff[0] = b->diff[i]; - } - -} - - -static void inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, - MACROBLOCKD *x) -{ - int i; - - if (x->mode_info_context->mbmi.mode != B_PRED && - x->mode_info_context->mbmi.mode != SPLITMV) - { - /* do 2nd order transform on the dc block */ - - IDCT_INVOKE(rtcd, iwalsh16)(&x->block[24].dqcoeff[0], x->block[24].diff); - recon_dcblock(x); - } - - for (i = 0; i < 16; i++) - { - BLOCKD *b = &x->block[i]; - - if (*b->eob > 1) - { - IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->predictor, 16, - *(b->base_dst) + b->dst, b->dst_stride); - } - else - { - IDCT_INVOKE(rtcd, idct1_scalar_add)(b->dqcoeff[0], b->predictor, 16, - *(b->base_dst) + b->dst, b->dst_stride); - } - } - - - for (i = 16; i < 24; i++) - { - BLOCKD *b = &x->block[i]; - - if (*b->eob > 1) - { - IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->predictor, 8, - *(b->base_dst) + b->dst, b->dst_stride); - } - else - { - IDCT_INVOKE(rtcd, idct1_scalar_add)(b->dqcoeff[0], b->predictor, 8, - *(b->base_dst) + b->dst, b->dst_stride); - } - } - -} void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { vp8_build_inter_predictors_mb_e(&x->e_mbd); @@ -693,11 +632,8 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) if (x->optimize) optimize_mb(x, rtcd); - inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd); - } - /* this funciton is used by first pass only */ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {