Add a _4x4 sufix to all 4x4-transform-related functions.
This includes trellis optimization, forward/inverse transform, quantization, tokenization and stuffing functions. Change-Id: Ibd34132e1bf0cd667671a57b3f25b3d361b9bf8a
This commit is contained in:
parent
e03715fe6c
commit
f1e629320b
@ -31,7 +31,7 @@ static void recon_dcblock_8x8(MACROBLOCKD *xd) {
|
||||
|
||||
}
|
||||
|
||||
void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) {
|
||||
void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) {
|
||||
if (b->eob <= 1)
|
||||
IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
|
||||
else
|
||||
@ -39,8 +39,8 @@ void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int
|
||||
}
|
||||
|
||||
|
||||
void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
@ -50,24 +50,24 @@ void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
recon_dcblock(xd);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
vp8_inverse_transform_b(rtcd, &blockd[i], 32);
|
||||
vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 32);
|
||||
}
|
||||
|
||||
}
|
||||
void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
for (i = 16; i < 24; i++) {
|
||||
vp8_inverse_transform_b(rtcd, &blockd[i], 16);
|
||||
vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 16);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
@ -81,12 +81,12 @@ void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
vp8_inverse_transform_b(rtcd, &blockd[i], 32);
|
||||
vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 32);
|
||||
}
|
||||
|
||||
|
||||
for (i = 16; i < 24; i++) {
|
||||
vp8_inverse_transform_b(rtcd, &blockd[i], 16);
|
||||
vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 16);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -16,10 +16,10 @@
|
||||
#include "idct.h"
|
||||
#include "blockd.h"
|
||||
|
||||
extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
|
||||
extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
|
||||
extern void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
|
||||
extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
|
||||
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
|
@ -183,8 +183,8 @@ typedef struct {
|
||||
void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
|
||||
void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
|
||||
void (*short_walsh4x4)(short *input, short *output, int pitch);
|
||||
void (*quantize_b)(BLOCK *b, BLOCKD *d);
|
||||
void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
|
||||
void (*quantize_b_4x4)(BLOCK *b, BLOCKD *d);
|
||||
void (*quantize_b_4x4_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
|
||||
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
|
||||
void (*vp8_short_fdct16x16)(short *input, short *output, int pitch);
|
||||
void (*short_fhaar2x2)(short *input, short *output, int pitch);
|
||||
|
@ -52,8 +52,8 @@ int enc_debug = 0;
|
||||
int mb_row_debug, mb_col_debug;
|
||||
#endif
|
||||
|
||||
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run);
|
||||
extern void vp8_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run);
|
||||
|
||||
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
|
||||
extern void vp8_auto_select_speed(VP8_COMP *cpi);
|
||||
@ -2153,7 +2153,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
|
||||
cpi->skip_true_count[mb_skip_context]++;
|
||||
vp8_fix_contexts(xd);
|
||||
} else {
|
||||
vp8_stuff_mb(cpi, xd, t, !output_enabled);
|
||||
vp8_stuff_mb_4x4(cpi, xd, t, !output_enabled);
|
||||
mbmi->mb_skip_coeff = 0;
|
||||
if (output_enabled)
|
||||
cpi->skip_false_count[mb_skip_context]++;
|
||||
@ -2352,7 +2352,7 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
|
||||
cpi->skip_true_count[mb_skip_context]++;
|
||||
vp8_fix_contexts(xd);
|
||||
} else {
|
||||
vp8_stuff_mb(cpi, xd, t, 0);
|
||||
vp8_stuff_mb_4x4(cpi, xd, t, 0);
|
||||
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
|
||||
cpi->skip_false_count[mb_skip_context]++;
|
||||
}
|
||||
|
@ -28,10 +28,6 @@
|
||||
#define IF_RTCD(x) NULL
|
||||
#endif
|
||||
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
|
||||
#endif
|
||||
|
||||
int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
|
||||
int i;
|
||||
int intra_pred_var = 0;
|
||||
@ -89,17 +85,17 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
|
||||
b->bmi.as_mode.test = b->bmi.as_mode.first;
|
||||
txfm_map(b, b->bmi.as_mode.first);
|
||||
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
|
||||
vp8_ht_quantize_b(be, b);
|
||||
vp8_ht_quantize_b_4x4(be, b);
|
||||
vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4);
|
||||
} else {
|
||||
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
|
||||
x->quantize_b(be, b) ;
|
||||
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32) ;
|
||||
x->quantize_b_4x4(be, b) ;
|
||||
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
|
||||
}
|
||||
#else
|
||||
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b(be, b);
|
||||
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
|
||||
x->quantize_b_4x4(be, b);
|
||||
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
|
||||
#endif
|
||||
|
||||
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
@ -156,14 +152,14 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
else if (tx_type == TX_8X8)
|
||||
vp8_transform_intra_mby_8x8(x);
|
||||
else
|
||||
vp8_transform_intra_mby(x);
|
||||
vp8_transform_intra_mby_4x4(x);
|
||||
|
||||
if (tx_type == TX_16X16)
|
||||
vp8_quantize_mby_16x16(x);
|
||||
else if (tx_type == TX_8X8)
|
||||
vp8_quantize_mby_8x8(x);
|
||||
else
|
||||
vp8_quantize_mby(x);
|
||||
vp8_quantize_mby_4x4(x);
|
||||
|
||||
if (x->optimize) {
|
||||
if (tx_type == TX_16X16)
|
||||
@ -171,7 +167,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
else if (tx_type == TX_8X8)
|
||||
vp8_optimize_mby_8x8(x, rtcd);
|
||||
else
|
||||
vp8_optimize_mby(x, rtcd);
|
||||
vp8_optimize_mby_4x4(x, rtcd);
|
||||
}
|
||||
|
||||
if (tx_type == TX_16X16)
|
||||
@ -190,7 +186,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
else if (tx_type == TX_8X8)
|
||||
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
else
|
||||
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
|
||||
RECON_INVOKE(&rtcd->common->recon, recon_mby)
|
||||
(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
|
||||
@ -214,24 +210,24 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_transform_mbuv_8x8(x);
|
||||
else
|
||||
vp8_transform_mbuv(x);
|
||||
vp8_transform_mbuv_4x4(x);
|
||||
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_quantize_mbuv_8x8(x);
|
||||
else
|
||||
vp8_quantize_mbuv(x);
|
||||
vp8_quantize_mbuv_4x4(x);
|
||||
|
||||
if (x->optimize) {
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_optimize_mbuv_8x8(x, rtcd);
|
||||
else
|
||||
vp8_optimize_mbuv(x, rtcd);
|
||||
vp8_optimize_mbuv_4x4(x, rtcd);
|
||||
}
|
||||
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
else
|
||||
vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
|
||||
vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
|
||||
}
|
||||
@ -280,8 +276,8 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
|
||||
be = &x->block[ib + iblock[i]];
|
||||
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
|
||||
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b(be, b);
|
||||
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32);
|
||||
x->quantize_b_4x4(be, b);
|
||||
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
|
||||
}
|
||||
}
|
||||
|
||||
@ -324,9 +320,9 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
|
||||
|
||||
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
|
||||
|
||||
x->quantize_b(be, b);
|
||||
x->quantize_b_4x4(be, b);
|
||||
|
||||
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 16);
|
||||
vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
|
||||
|
||||
RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
|
||||
b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
|
@ -132,7 +132,7 @@ static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
|
||||
}
|
||||
|
||||
static void build_dcblock(MACROBLOCK *x) {
|
||||
static void build_dcblock_4x4(MACROBLOCK *x) {
|
||||
short *src_diff_ptr = &x->src_diff[384];
|
||||
int i;
|
||||
|
||||
@ -140,6 +140,7 @@ static void build_dcblock(MACROBLOCK *x) {
|
||||
src_diff_ptr[i] = x->coeff[i * 16];
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_dcblock_8x8(MACROBLOCK *x) {
|
||||
short *src_diff_ptr = &x->src_diff[384];
|
||||
int i;
|
||||
@ -152,7 +153,7 @@ void vp8_build_dcblock_8x8(MACROBLOCK *x) {
|
||||
src_diff_ptr[8] = x->coeff[12 * 16];
|
||||
}
|
||||
|
||||
void vp8_transform_mbuv(MACROBLOCK *x) {
|
||||
void vp8_transform_mbuv_4x4(MACROBLOCK *x) {
|
||||
int i;
|
||||
|
||||
for (i = 16; i < 24; i += 2) {
|
||||
@ -162,7 +163,7 @@ void vp8_transform_mbuv(MACROBLOCK *x) {
|
||||
}
|
||||
|
||||
|
||||
void vp8_transform_intra_mby(MACROBLOCK *x) {
|
||||
void vp8_transform_intra_mby_4x4(MACROBLOCK *x) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i += 2) {
|
||||
@ -171,7 +172,7 @@ void vp8_transform_intra_mby(MACROBLOCK *x) {
|
||||
}
|
||||
|
||||
// build dc block from 16 y dc values
|
||||
build_dcblock(x);
|
||||
build_dcblock_4x4(x);
|
||||
|
||||
// do 2nd order transform on the dc block
|
||||
x->short_walsh4x4(&x->block[24].src_diff[0],
|
||||
@ -180,7 +181,7 @@ void vp8_transform_intra_mby(MACROBLOCK *x) {
|
||||
}
|
||||
|
||||
|
||||
static void transform_mb(MACROBLOCK *x) {
|
||||
static void transform_mb_4x4(MACROBLOCK *x) {
|
||||
int i;
|
||||
MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
|
||||
|
||||
@ -191,7 +192,7 @@ static void transform_mb(MACROBLOCK *x) {
|
||||
|
||||
// build dc block from 16 y dc values
|
||||
if (mode != SPLITMV)
|
||||
build_dcblock(x);
|
||||
build_dcblock_4x4(x);
|
||||
|
||||
for (i = 16; i < 24; i += 2) {
|
||||
x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
|
||||
@ -206,7 +207,7 @@ static void transform_mb(MACROBLOCK *x) {
|
||||
}
|
||||
|
||||
|
||||
static void transform_mby(MACROBLOCK *x) {
|
||||
static void transform_mby_4x4(MACROBLOCK *x) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i += 2) {
|
||||
@ -216,7 +217,7 @@ static void transform_mby(MACROBLOCK *x) {
|
||||
|
||||
// build dc block from 16 y dc values
|
||||
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
|
||||
build_dcblock(x);
|
||||
build_dcblock_4x4(x);
|
||||
x->short_walsh4x4(&x->block[24].src_diff[0],
|
||||
&x->block[24].coeff[0], 8);
|
||||
}
|
||||
@ -676,7 +677,7 @@ static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type,
|
||||
}
|
||||
}
|
||||
|
||||
static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
|
||||
static void optimize_mb_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
|
||||
int b;
|
||||
int type;
|
||||
int has_2nd_order;
|
||||
@ -714,7 +715,7 @@ static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
|
||||
}
|
||||
|
||||
|
||||
void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
|
||||
void vp8_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
|
||||
int b;
|
||||
int type;
|
||||
int has_2nd_order;
|
||||
@ -754,7 +755,7 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
|
||||
void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
|
||||
int b;
|
||||
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
||||
ENTROPY_CONTEXT *ta;
|
||||
@ -1106,14 +1107,14 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
else if (tx_type == TX_8X8)
|
||||
vp8_transform_mb_8x8(x);
|
||||
else
|
||||
transform_mb(x);
|
||||
transform_mb_4x4(x);
|
||||
|
||||
if (tx_type == TX_16X16)
|
||||
vp8_quantize_mb_16x16(x);
|
||||
else if (tx_type == TX_8X8)
|
||||
vp8_quantize_mb_8x8(x);
|
||||
else
|
||||
vp8_quantize_mb(x);
|
||||
vp8_quantize_mb_4x4(x);
|
||||
|
||||
if (x->optimize) {
|
||||
if (tx_type == TX_16X16)
|
||||
@ -1121,7 +1122,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
else if (tx_type == TX_8X8)
|
||||
optimize_mb_8x8(x, rtcd);
|
||||
else
|
||||
optimize_mb(x, rtcd);
|
||||
optimize_mb_4x4(x, rtcd);
|
||||
}
|
||||
|
||||
if (tx_type == TX_16X16)
|
||||
@ -1130,7 +1131,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
else
|
||||
vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
vp8_inverse_transform_mb_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
|
||||
if (tx_type == TX_8X8) {
|
||||
#ifdef ENC_DEBUG
|
||||
@ -1204,9 +1205,9 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
else if (tx_type == TX_8X8)
|
||||
vp8_transform_mby_8x8(x);
|
||||
else
|
||||
transform_mby(x);
|
||||
transform_mby_4x4(x);
|
||||
|
||||
vp8_quantize_mby(x);
|
||||
vp8_quantize_mby_4x4(x);
|
||||
|
||||
if (tx_type == TX_16X16)
|
||||
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
@ -1214,7 +1215,7 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
else
|
||||
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
|
||||
|
||||
RECON_INVOKE(&rtcd->common->recon, recon_mby)
|
||||
(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
|
||||
|
@ -105,12 +105,12 @@ struct VP8_ENCODER_RTCD;
|
||||
void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
|
||||
|
||||
void vp8_build_dcblock(MACROBLOCK *b);
|
||||
void vp8_transform_mb(MACROBLOCK *mb);
|
||||
void vp8_transform_mbuv(MACROBLOCK *x);
|
||||
void vp8_transform_intra_mby(MACROBLOCK *x);
|
||||
void vp8_transform_mb_4x4(MACROBLOCK *mb);
|
||||
void vp8_transform_mbuv_4x4(MACROBLOCK *x);
|
||||
void vp8_transform_intra_mby_4x4(MACROBLOCK *x);
|
||||
|
||||
void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
|
||||
void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
|
||||
void vp8_optimize_mby_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
|
||||
void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
|
||||
void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
|
||||
|
||||
void vp8_transform_mb_8x8(MACROBLOCK *mb);
|
||||
|
@ -1218,11 +1218,11 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
|
||||
cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
|
||||
|
||||
|
||||
cpi->mb.quantize_b = vp8_regular_quantize_b;
|
||||
cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
|
||||
cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
|
||||
cpi->mb.quantize_b_16x16= vp8_regular_quantize_b_16x16;
|
||||
cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
|
||||
cpi->mb.quantize_b_4x4 = vp8_regular_quantize_b_4x4;
|
||||
cpi->mb.quantize_b_4x4_pair = vp8_regular_quantize_b_4x4_pair;
|
||||
cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
|
||||
cpi->mb.quantize_b_16x16 = vp8_regular_quantize_b_16x16;
|
||||
cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
|
||||
|
||||
vp8cx_init_quantizer(cpi);
|
||||
|
||||
|
@ -23,7 +23,7 @@ extern int enc_debug;
|
||||
#endif
|
||||
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d) {
|
||||
void vp8_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
|
||||
int i, rc, eob;
|
||||
int zbin;
|
||||
int x, y, z, sz;
|
||||
@ -88,7 +88,7 @@ void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) {
|
||||
void vp8_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
|
||||
int i, rc, eob;
|
||||
int zbin;
|
||||
int x, y, z, sz;
|
||||
@ -137,35 +137,35 @@ void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) {
|
||||
d->eob = eob + 1;
|
||||
}
|
||||
|
||||
void vp8_quantize_mby_c(MACROBLOCK *x) {
|
||||
void vp8_quantize_mby_4x4_c(MACROBLOCK *x) {
|
||||
int i;
|
||||
int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
|
||||
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
|
||||
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
|
||||
x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
|
||||
|
||||
if (has_2nd_order)
|
||||
x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
|
||||
x->quantize_b_4x4(&x->block[24], &x->e_mbd.block[24]);
|
||||
}
|
||||
|
||||
void vp8_quantize_mb_c(MACROBLOCK *x) {
|
||||
void vp8_quantize_mb_4x4_c(MACROBLOCK *x) {
|
||||
int i;
|
||||
int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED
|
||||
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
|
||||
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
|
||||
|
||||
for (i = 0; i < 24 + has_2nd_order; i++)
|
||||
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
|
||||
x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
|
||||
}
|
||||
|
||||
|
||||
void vp8_quantize_mbuv_c(MACROBLOCK *x) {
|
||||
void vp8_quantize_mbuv_4x4_c(MACROBLOCK *x) {
|
||||
int i;
|
||||
|
||||
for (i = 16; i < 24; i++)
|
||||
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
|
||||
x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
|
||||
}
|
||||
|
||||
|
||||
@ -391,9 +391,9 @@ void vp8_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
|
||||
* these two C functions if corresponding optimized routine is not available.
|
||||
* NEON optimized version implements currently the fast quantization for pair
|
||||
* of blocks. */
|
||||
void vp8_regular_quantize_b_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
|
||||
vp8_regular_quantize_b(b1, d1);
|
||||
vp8_regular_quantize_b(b2, d2);
|
||||
void vp8_regular_quantize_b_4x4_pair(BLOCK *b1, BLOCK *b2, BLOCKD *d1, BLOCKD *d2) {
|
||||
vp8_regular_quantize_b_4x4(b1, d1);
|
||||
vp8_regular_quantize_b_4x4(b2, d2);
|
||||
}
|
||||
|
||||
static void invert_quant(short *quant,
|
||||
|
@ -31,15 +31,19 @@
|
||||
#include "arm/quantize_arm.h"
|
||||
#endif
|
||||
|
||||
#ifndef vp8_quantize_quantb
|
||||
#define vp8_quantize_quantb vp8_regular_quantize_b
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
extern prototype_quantize_block(vp8_ht_quantize_b_4x4);
|
||||
#endif
|
||||
extern prototype_quantize_block(vp8_quantize_quantb);
|
||||
|
||||
#ifndef vp8_quantize_quantb_pair
|
||||
#define vp8_quantize_quantb_pair vp8_regular_quantize_b_pair
|
||||
#ifndef vp8_quantize_quantb_4x4
|
||||
#define vp8_quantize_quantb_4x4 vp8_regular_quantize_b_4x4
|
||||
#endif
|
||||
extern prototype_quantize_block_pair(vp8_quantize_quantb_pair);
|
||||
extern prototype_quantize_block(vp8_quantize_quantb_4x4);
|
||||
|
||||
#ifndef vp8_quantize_quantb_4x4_pair
|
||||
#define vp8_quantize_quantb_4x4_pair vp8_regular_quantize_b_4x4_pair
|
||||
#endif
|
||||
extern prototype_quantize_block_pair(vp8_quantize_quantb_4x4_pair);
|
||||
|
||||
#ifndef vp8_quantize_quantb_8x8
|
||||
#define vp8_quantize_quantb_8x8 vp8_regular_quantize_b_8x8
|
||||
@ -56,21 +60,21 @@ extern prototype_quantize_block(vp8_quantize_quantb_16x16);
|
||||
#endif
|
||||
extern prototype_quantize_block(vp8_quantize_quantb_2x2);
|
||||
|
||||
#ifndef vp8_quantize_mb
|
||||
#define vp8_quantize_mb vp8_quantize_mb_c
|
||||
#ifndef vp8_quantize_mb_4x4
|
||||
#define vp8_quantize_mb_4x4 vp8_quantize_mb_4x4_c
|
||||
#endif
|
||||
extern prototype_quantize_mb(vp8_quantize_mb);
|
||||
extern prototype_quantize_mb(vp8_quantize_mb_4x4);
|
||||
void vp8_quantize_mb_8x8(MACROBLOCK *x);
|
||||
|
||||
#ifndef vp8_quantize_mbuv
|
||||
#define vp8_quantize_mbuv vp8_quantize_mbuv_c
|
||||
#ifndef vp8_quantize_mbuv_4x4
|
||||
#define vp8_quantize_mbuv_4x4 vp8_quantize_mbuv_4x4_c
|
||||
#endif
|
||||
extern prototype_quantize_mb(vp8_quantize_mbuv);
|
||||
extern prototype_quantize_mb(vp8_quantize_mbuv_4x4);
|
||||
|
||||
#ifndef vp8_quantize_mby
|
||||
#define vp8_quantize_mby vp8_quantize_mby_c
|
||||
#ifndef vp8_quantize_mby_4x4
|
||||
#define vp8_quantize_mby_4x4 vp8_quantize_mby_4x4_c
|
||||
#endif
|
||||
extern prototype_quantize_mb(vp8_quantize_mby);
|
||||
extern prototype_quantize_mb(vp8_quantize_mby_4x4);
|
||||
|
||||
extern prototype_quantize_mb(vp8_quantize_mby_8x8);
|
||||
extern prototype_quantize_mb(vp8_quantize_mbuv_8x8);
|
||||
|
@ -56,10 +56,6 @@
|
||||
extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
|
||||
extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
|
||||
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
|
||||
#endif
|
||||
|
||||
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
|
||||
|
||||
#define INVALID_MV 0x80008000
|
||||
@ -771,11 +767,11 @@ static void macro_block_yrd_4x4(MACROBLOCK *mb,
|
||||
|
||||
// Quantization
|
||||
for (b = 0; b < 16; b++) {
|
||||
mb->quantize_b(&mb->block[b], &xd->block[b]);
|
||||
mb->quantize_b_4x4(&mb->block[b], &xd->block[b]);
|
||||
}
|
||||
|
||||
// DC predication and Quantization of 2nd Order block
|
||||
mb->quantize_b(mb_y2, x_y2);
|
||||
mb->quantize_b_4x4(mb_y2, x_y2);
|
||||
|
||||
// Distortion
|
||||
d = ENCODEMB_INVOKE(&rtcd->encodemb, mberr)(mb, 1);
|
||||
@ -785,7 +781,7 @@ static void macro_block_yrd_4x4(MACROBLOCK *mb,
|
||||
*Distortion = (d >> 2);
|
||||
// rate
|
||||
*Rate = vp8_rdcost_mby(mb);
|
||||
*skippable = mby_is_skippable(&mb->e_mbd, 1);
|
||||
*skippable = mby_is_skippable_4x4(&mb->e_mbd, 1);
|
||||
}
|
||||
|
||||
static int vp8_rdcost_mby_8x8(MACROBLOCK *mb, int backup) {
|
||||
@ -1206,14 +1202,14 @@ static int64_t rd_pick_intra4x4block(VP8_COMP *cpi, MACROBLOCK *x, BLOCK *be,
|
||||
b->bmi.as_mode.test = mode;
|
||||
txfm_map(b, mode);
|
||||
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
|
||||
vp8_ht_quantize_b(be, b);
|
||||
vp8_ht_quantize_b_4x4(be, b);
|
||||
} else {
|
||||
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b(be, b);
|
||||
x->quantize_b_4x4(be, b);
|
||||
}
|
||||
#else
|
||||
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
|
||||
x->quantize_b(be, b);
|
||||
x->quantize_b_4x4(be, b);
|
||||
#endif
|
||||
|
||||
tempa = ta;
|
||||
@ -1592,10 +1588,10 @@ static int64_t rd_pick_intra8x8block(VP8_COMP *cpi, MACROBLOCK *x, int ib,
|
||||
x->vp8_short_fdct8x4(be->src_diff, be->coeff, 32);
|
||||
x->vp8_short_fdct8x4((be + 4)->src_diff, (be + 4)->coeff, 32);
|
||||
|
||||
x->quantize_b_pair(x->block + ib, x->block + ib + 1,
|
||||
xd->block + ib, xd->block + ib + 1);
|
||||
x->quantize_b_pair(x->block + ib + 4, x->block + ib + 5,
|
||||
xd->block + ib + 4, xd->block + ib + 5);
|
||||
x->quantize_b_4x4_pair(x->block + ib, x->block + ib + 1,
|
||||
xd->block + ib, xd->block + ib + 1);
|
||||
x->quantize_b_4x4_pair(x->block + ib + 4, x->block + ib + 5,
|
||||
xd->block + ib + 4, xd->block + ib + 5);
|
||||
|
||||
distortion = vp8_block_error_c((x->block + ib)->coeff,
|
||||
(xd->block + ib)->dqcoeff, 16);
|
||||
@ -1745,12 +1741,12 @@ static int64_t rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
|
||||
x->e_mbd.predictor,
|
||||
x->src.uv_stride);
|
||||
|
||||
vp8_transform_mbuv(x);
|
||||
vp8_quantize_mbuv(x);
|
||||
vp8_transform_mbuv_4x4(x);
|
||||
vp8_quantize_mbuv_4x4(x);
|
||||
|
||||
*rate = rd_cost_mbuv(x);
|
||||
*distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
|
||||
*skip = mbuv_is_skippable(&x->e_mbd);
|
||||
*skip = mbuv_is_skippable_4x4(&x->e_mbd);
|
||||
|
||||
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
||||
}
|
||||
@ -1855,8 +1851,8 @@ static int64_t rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
|
||||
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
|
||||
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
|
||||
|
||||
vp8_transform_mbuv(x);
|
||||
vp8_quantize_mbuv(x);
|
||||
vp8_transform_mbuv_4x4(x);
|
||||
vp8_quantize_mbuv_4x4(x);
|
||||
|
||||
*rate = rd_cost_mbuv(x);
|
||||
*distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
|
||||
@ -1908,8 +1904,8 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
|
||||
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
|
||||
x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
|
||||
x->src.uv_stride);
|
||||
vp8_transform_mbuv(x);
|
||||
vp8_quantize_mbuv(x);
|
||||
vp8_transform_mbuv_4x4(x);
|
||||
vp8_quantize_mbuv_4x4(x);
|
||||
|
||||
rate_to = rd_cost_mbuv(x);
|
||||
rate = rate_to
|
||||
@ -1920,7 +1916,7 @@ static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi,
|
||||
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
||||
|
||||
if (this_rd < best_rd) {
|
||||
skip = mbuv_is_skippable(xd);
|
||||
skip = mbuv_is_skippable_4x4(xd);
|
||||
best_rd = this_rd;
|
||||
d = distortion;
|
||||
r = rate;
|
||||
@ -2252,7 +2248,7 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
|
||||
|
||||
// set to 0 no way to account for 2nd order DC so discount
|
||||
// be->coeff[0] = 0;
|
||||
x->quantize_b(be, bd);
|
||||
x->quantize_b_4x4(be, bd);
|
||||
thisdistortion = ENCODEMB_INVOKE(&rtcd->encodemb, berr)(
|
||||
be->coeff, bd->dqcoeff, 16) / 4;
|
||||
distortion += thisdistortion;
|
||||
|
@ -60,8 +60,8 @@ extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
|
||||
#endif
|
||||
#endif /* ENTROPY_STATS */
|
||||
|
||||
void vp8_stuff_mb(VP8_COMP *cpi,
|
||||
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
|
||||
void vp8_stuff_mb_4x4(VP8_COMP *cpi,
|
||||
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
|
||||
void vp8_stuff_mb_8x8(VP8_COMP *cpi,
|
||||
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
|
||||
void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
|
||||
@ -258,10 +258,10 @@ static void tokenize2nd_order_b_8x8
|
||||
|
||||
}
|
||||
|
||||
static void tokenize2nd_order_b(MACROBLOCKD *xd,
|
||||
TOKENEXTRA **tp,
|
||||
VP8_COMP *cpi,
|
||||
int dry_run) {
|
||||
static void tokenize2nd_order_b_4x4(MACROBLOCKD *xd,
|
||||
TOKENEXTRA **tp,
|
||||
VP8_COMP *cpi,
|
||||
int dry_run) {
|
||||
int pt; /* near block/prev token context index */
|
||||
int c; /* start at DC */
|
||||
TOKENEXTRA *t = *tp;/* store tokens starting here */
|
||||
@ -424,11 +424,11 @@ static void tokenize1st_order_b_8x8
|
||||
}
|
||||
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
static void tokenize1st_order_ht( MACROBLOCKD *xd,
|
||||
TOKENEXTRA **tp,
|
||||
int type,
|
||||
VP8_COMP *cpi,
|
||||
int dry_run) {
|
||||
static void tokenize1st_order_ht_4x4(MACROBLOCKD *xd,
|
||||
TOKENEXTRA **tp,
|
||||
int type,
|
||||
VP8_COMP *cpi,
|
||||
int dry_run) {
|
||||
unsigned int block;
|
||||
const BLOCKD *b;
|
||||
int pt; /* near block/prev token context index */
|
||||
@ -603,7 +603,7 @@ static void tokenize1st_order_ht( MACROBLOCKD *xd,
|
||||
#endif
|
||||
|
||||
|
||||
static void tokenize1st_order_chroma
|
||||
static void tokenize1st_order_chroma_4x4
|
||||
(
|
||||
MACROBLOCKD *xd,
|
||||
TOKENEXTRA **tp,
|
||||
@ -682,7 +682,7 @@ static void tokenize1st_order_chroma
|
||||
}
|
||||
}
|
||||
|
||||
static void tokenize1st_order_b
|
||||
static void tokenize1st_order_b_4x4
|
||||
(
|
||||
MACROBLOCKD *xd,
|
||||
TOKENEXTRA **tp,
|
||||
@ -805,7 +805,7 @@ static void tokenize1st_order_b
|
||||
}
|
||||
|
||||
|
||||
int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
|
||||
int mby_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block) {
|
||||
int skip = 1;
|
||||
int i = 0;
|
||||
|
||||
@ -820,7 +820,7 @@ int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
|
||||
return skip;
|
||||
}
|
||||
|
||||
int mbuv_is_skippable(MACROBLOCKD *xd) {
|
||||
int mbuv_is_skippable_4x4(MACROBLOCKD *xd) {
|
||||
int skip = 1;
|
||||
int i;
|
||||
|
||||
@ -829,9 +829,9 @@ int mbuv_is_skippable(MACROBLOCKD *xd) {
|
||||
return skip;
|
||||
}
|
||||
|
||||
int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
|
||||
return (mby_is_skippable(xd, has_y2_block) &
|
||||
mbuv_is_skippable(xd));
|
||||
int mb_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block) {
|
||||
return (mby_is_skippable_4x4(xd, has_y2_block) &
|
||||
mbuv_is_skippable_4x4(xd));
|
||||
}
|
||||
|
||||
int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
|
||||
@ -860,7 +860,7 @@ int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block) {
|
||||
|
||||
int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd, int has_y2_block) {
|
||||
return (mby_is_skippable_8x8(xd, has_y2_block) &
|
||||
mbuv_is_skippable(xd));
|
||||
mbuv_is_skippable_4x4(xd));
|
||||
}
|
||||
|
||||
int mby_is_skippable_16x16(MACROBLOCKD *xd) {
|
||||
@ -922,7 +922,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
break;
|
||||
|
||||
default:
|
||||
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block);
|
||||
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_4x4(xd, has_y2_block);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -939,7 +939,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
else
|
||||
vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
|
||||
} else
|
||||
vp8_stuff_mb(cpi, xd, t, dry_run);
|
||||
vp8_stuff_mb_4x4(cpi, xd, t, dry_run);
|
||||
} else {
|
||||
vp8_fix_contexts(xd);
|
||||
}
|
||||
@ -962,7 +962,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
L + vp8_block2left_8x8[24],
|
||||
cpi, dry_run);
|
||||
} else
|
||||
tokenize2nd_order_b(xd, t, cpi, dry_run);
|
||||
tokenize2nd_order_b_4x4(xd, t, cpi, dry_run);
|
||||
|
||||
plane_type = 0;
|
||||
}
|
||||
@ -1004,7 +1004,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
|
||||
}
|
||||
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
|
||||
tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
|
||||
tokenize1st_order_chroma_4x4(xd, t, PLANE_TYPE_UV, cpi, dry_run);
|
||||
} else {
|
||||
for (b = 16; b < 24; b += 4) {
|
||||
tokenize1st_order_b_8x8(xd,
|
||||
@ -1019,10 +1019,10 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
} else {
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
if (active_ht)
|
||||
tokenize1st_order_ht(xd, t, plane_type, cpi, dry_run);
|
||||
tokenize1st_order_ht_4x4(xd, t, plane_type, cpi, dry_run);
|
||||
else
|
||||
#endif
|
||||
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
|
||||
tokenize1st_order_b_4x4(xd, t, plane_type, cpi, dry_run);
|
||||
}
|
||||
if (dry_run)
|
||||
*t = t_backup;
|
||||
@ -1492,7 +1492,7 @@ void vp8_stuff_mb_16x16(VP8_COMP *cpi,
|
||||
*t = t_backup;
|
||||
}
|
||||
|
||||
static __inline void stuff2nd_order_b
|
||||
static __inline void stuff2nd_order_b_4x4
|
||||
(
|
||||
MACROBLOCKD *xd,
|
||||
const BLOCKD *const b,
|
||||
@ -1518,13 +1518,13 @@ static __inline void stuff2nd_order_b
|
||||
|
||||
}
|
||||
|
||||
static __inline void stuff1st_order_b(MACROBLOCKD *xd,
|
||||
const BLOCKD *const b,
|
||||
TOKENEXTRA **tp,
|
||||
ENTROPY_CONTEXT *a,
|
||||
ENTROPY_CONTEXT *l,
|
||||
VP8_COMP *cpi,
|
||||
int dry_run) {
|
||||
static __inline void stuff1st_order_b_4x4(MACROBLOCKD *xd,
|
||||
const BLOCKD *const b,
|
||||
TOKENEXTRA **tp,
|
||||
ENTROPY_CONTEXT *a,
|
||||
ENTROPY_CONTEXT *l,
|
||||
VP8_COMP *cpi,
|
||||
int dry_run) {
|
||||
int pt; /* near block/prev token context index */
|
||||
TOKENEXTRA *t = *tp; /* store tokens starting here */
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
@ -1555,7 +1555,7 @@ static __inline void stuff1st_order_b(MACROBLOCKD *xd,
|
||||
|
||||
}
|
||||
static __inline
|
||||
void stuff1st_order_buv
|
||||
void stuff1st_order_buv_4x4
|
||||
(
|
||||
MACROBLOCKD *xd,
|
||||
const BLOCKD *const b,
|
||||
@ -1579,29 +1579,29 @@ void stuff1st_order_buv
|
||||
*a = *l = pt;
|
||||
}
|
||||
|
||||
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run) {
|
||||
void vp8_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run) {
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
|
||||
int b;
|
||||
TOKENEXTRA *t_backup = *t;
|
||||
|
||||
stuff2nd_order_b(xd, xd->block + 24, t,
|
||||
A + vp8_block2above[24],
|
||||
L + vp8_block2left[24],
|
||||
cpi, dry_run);
|
||||
stuff2nd_order_b_4x4(xd, xd->block + 24, t,
|
||||
A + vp8_block2above[24],
|
||||
L + vp8_block2left[24],
|
||||
cpi, dry_run);
|
||||
|
||||
for (b = 0; b < 16; b++)
|
||||
stuff1st_order_b(xd, xd->block + b, t,
|
||||
A + vp8_block2above[b],
|
||||
L + vp8_block2left[b],
|
||||
cpi, dry_run);
|
||||
stuff1st_order_b_4x4(xd, xd->block + b, t,
|
||||
A + vp8_block2above[b],
|
||||
L + vp8_block2left[b],
|
||||
cpi, dry_run);
|
||||
|
||||
for (b = 16; b < 24; b++)
|
||||
stuff1st_order_buv(xd, xd->block + b, t,
|
||||
A + vp8_block2above[b],
|
||||
L + vp8_block2left[b],
|
||||
cpi, dry_run);
|
||||
stuff1st_order_buv_4x4(xd, xd->block + b, t,
|
||||
A + vp8_block2above[b],
|
||||
L + vp8_block2left[b],
|
||||
cpi, dry_run);
|
||||
|
||||
if (dry_run)
|
||||
*t = t_backup;
|
||||
@ -1632,10 +1632,10 @@ void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
|
||||
}
|
||||
|
||||
for (b = 16; b < 24; b++)
|
||||
stuff1st_order_buv(xd, xd->block + b, t,
|
||||
A + vp8_block2above[b],
|
||||
L + vp8_block2left[b],
|
||||
cpi, dry_run);
|
||||
stuff1st_order_buv_4x4(xd, xd->block + b, t,
|
||||
A + vp8_block2above[b],
|
||||
L + vp8_block2left[b],
|
||||
cpi, dry_run);
|
||||
|
||||
if (dry_run)
|
||||
*t = t_backup;
|
||||
|
@ -31,9 +31,9 @@ typedef struct {
|
||||
|
||||
int rd_cost_mby(MACROBLOCKD *);
|
||||
|
||||
extern int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block);
|
||||
extern int mbuv_is_skippable(MACROBLOCKD *xd);
|
||||
extern int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block);
|
||||
extern int mby_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block);
|
||||
extern int mbuv_is_skippable_4x4(MACROBLOCKD *xd);
|
||||
extern int mb_is_skippable_4x4(MACROBLOCKD *xd, int has_y2_block);
|
||||
extern int mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
|
||||
extern int mbuv_is_skippable_8x8(MACROBLOCKD *xd);
|
||||
extern int mb_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
|
||||
|
Loading…
x
Reference in New Issue
Block a user