Cleanup: removing unused function arguments.
Change-Id: I27471768980fc631916069f24bc7c482a5c9ca17
This commit is contained in:
parent
b621e2d72e
commit
ce8dedc353
@ -240,8 +240,7 @@ static INLINE void set_partition_seg_context(VP9_COMMON *cm, MACROBLOCKD *xd,
|
||||
xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
|
||||
}
|
||||
|
||||
static int check_bsize_coverage(VP9_COMMON *cm, MACROBLOCKD *xd,
|
||||
int mi_row, int mi_col,
|
||||
static int check_bsize_coverage(VP9_COMMON *cm, int mi_row, int mi_col,
|
||||
BLOCK_SIZE_TYPE bsize) {
|
||||
int bsl = mi_width_log2(bsize), bs = 1 << bsl;
|
||||
int ms = bs / 2;
|
||||
|
@ -287,7 +287,7 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
|
||||
return;
|
||||
} else {
|
||||
int pl;
|
||||
const int idx = check_bsize_coverage(pc, xd, mi_row, mi_col, bsize);
|
||||
const int idx = check_bsize_coverage(pc, mi_row, mi_col, bsize);
|
||||
set_partition_seg_context(pc, xd, mi_row, mi_col);
|
||||
pl = partition_plane_context(xd, bsize);
|
||||
|
||||
@ -501,7 +501,7 @@ static INTERPOLATIONFILTERTYPE read_interp_filter_type(
|
||||
: vp9_rb_read_literal(rb, 2);
|
||||
}
|
||||
|
||||
static void read_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb,
|
||||
static void read_frame_size(struct vp9_read_bit_buffer *rb,
|
||||
int *width, int *height) {
|
||||
const int w = vp9_rb_read_literal(rb, 16) + 1;
|
||||
const int h = vp9_rb_read_literal(rb, 16) + 1;
|
||||
@ -509,12 +509,11 @@ static void read_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb,
|
||||
*height = h;
|
||||
}
|
||||
|
||||
static void setup_display_size(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
|
||||
cm->display_width = cm->width;
|
||||
cm->display_height = cm->height;
|
||||
if (vp9_rb_read_bit(rb))
|
||||
read_frame_size(cm, rb, &cm->display_width, &cm->display_height);
|
||||
read_frame_size(rb, &cm->display_width, &cm->display_height);
|
||||
}
|
||||
|
||||
static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
|
||||
@ -550,10 +549,9 @@ static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
|
||||
|
||||
static void setup_frame_size(VP9D_COMP *pbi,
|
||||
struct vp9_read_bit_buffer *rb) {
|
||||
VP9_COMMON *const cm = &pbi->common;
|
||||
int width, height;
|
||||
read_frame_size(cm, rb, &width, &height);
|
||||
setup_display_size(pbi, rb);
|
||||
read_frame_size(rb, &width, &height);
|
||||
setup_display_size(&pbi->common, rb);
|
||||
apply_frame_size(pbi, width, height);
|
||||
}
|
||||
|
||||
@ -574,13 +572,13 @@ static void setup_frame_size_with_refs(VP9D_COMP *pbi,
|
||||
}
|
||||
|
||||
if (!found)
|
||||
read_frame_size(cm, rb, &width, &height);
|
||||
read_frame_size(rb, &width, &height);
|
||||
|
||||
if (!width || !height)
|
||||
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
|
||||
"Referenced frame with invalid size");
|
||||
|
||||
setup_display_size(pbi, rb);
|
||||
setup_display_size(cm, rb);
|
||||
apply_frame_size(pbi, width, height);
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,6 @@ void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb,
|
||||
static void update_mode(
|
||||
vp9_writer *w,
|
||||
int n,
|
||||
const struct vp9_token tok[/* n */],
|
||||
vp9_tree tree,
|
||||
vp9_prob Pnew[/* n-1 */],
|
||||
vp9_prob Pcur[/* n-1 */],
|
||||
@ -194,8 +193,7 @@ static void update_mbintra_mode_probs(VP9_COMP* const cpi,
|
||||
unsigned int bct[VP9_INTRA_MODES - 1][2];
|
||||
|
||||
for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
|
||||
update_mode(bc, VP9_INTRA_MODES, vp9_intra_mode_encodings,
|
||||
vp9_intra_mode_tree, pnew,
|
||||
update_mode(bc, VP9_INTRA_MODES, vp9_intra_mode_tree, pnew,
|
||||
cm->fc.y_mode_prob[j], bct,
|
||||
(unsigned int *)cpi->y_mode_count[j]);
|
||||
}
|
||||
@ -398,8 +396,7 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
|
||||
// the reference frame is fully coded by the segment
|
||||
}
|
||||
|
||||
static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
|
||||
vp9_writer *bc, int mi_row, int mi_col) {
|
||||
static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
|
||||
VP9_COMMON *const pc = &cpi->common;
|
||||
const nmv_context *nmvc = &pc->fc.nmvc;
|
||||
MACROBLOCK *const x = &cpi->mb;
|
||||
@ -533,9 +530,8 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
|
||||
}
|
||||
}
|
||||
|
||||
static void write_mb_modes_kf(const VP9_COMP *cpi,
|
||||
MODE_INFO *m,
|
||||
vp9_writer *bc, int mi_row, int mi_col) {
|
||||
static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m,
|
||||
vp9_writer *bc) {
|
||||
const VP9_COMMON *const c = &cpi->common;
|
||||
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
||||
const int ym = m->mbmi.mode;
|
||||
@ -591,12 +587,12 @@ static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
|
||||
1 << mi_height_log2(m->mbmi.sb_type),
|
||||
mi_col, 1 << mi_width_log2(m->mbmi.sb_type));
|
||||
if ((cm->frame_type == KEY_FRAME) || cm->intra_only) {
|
||||
write_mb_modes_kf(cpi, m, bc, mi_row, mi_col);
|
||||
write_mb_modes_kf(cpi, m, bc);
|
||||
#ifdef ENTROPY_STATS
|
||||
active_section = 8;
|
||||
#endif
|
||||
} else {
|
||||
pack_inter_mode_mvs(cpi, m, bc, mi_row, mi_col);
|
||||
pack_inter_mode_mvs(cpi, m, bc);
|
||||
#ifdef ENTROPY_STATS
|
||||
active_section = 1;
|
||||
#endif
|
||||
@ -630,7 +626,7 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
|
||||
|
||||
if (bsize >= BLOCK_SIZE_SB8X8) {
|
||||
int pl;
|
||||
const int idx = check_bsize_coverage(cm, xd, mi_row, mi_col, bsize);
|
||||
const int idx = check_bsize_coverage(cm, mi_row, mi_col, bsize);
|
||||
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
||||
pl = partition_plane_context(xd, bsize);
|
||||
// encode the partition information
|
||||
@ -1427,7 +1423,7 @@ static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
|
||||
for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) {
|
||||
vp9_prob pnew[PARTITION_TYPES - 1];
|
||||
unsigned int bct[PARTITION_TYPES - 1][2];
|
||||
update_mode(&header_bc, PARTITION_TYPES, vp9_partition_encodings,
|
||||
update_mode(&header_bc, PARTITION_TYPES,
|
||||
vp9_partition_tree, pnew,
|
||||
fc->partition_prob[cm->frame_type][i], bct,
|
||||
(unsigned int *)cpi->partition_count[i]);
|
||||
|
@ -64,7 +64,7 @@ static const uint8_t VP9_VAR_OFFS[16] = {128, 128, 128, 128, 128, 128, 128, 128,
|
||||
128, 128, 128, 128, 128, 128, 128, 128};
|
||||
|
||||
// Original activity measure from Tim T's code.
|
||||
static unsigned int tt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x) {
|
||||
static unsigned int tt_activity_measure(MACROBLOCK *x) {
|
||||
unsigned int act;
|
||||
unsigned int sse;
|
||||
/* TODO: This could also be done over smaller areas (8x8), but that would
|
||||
@ -106,7 +106,7 @@ static unsigned int mb_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
|
||||
} else {
|
||||
// Original activity measure from Tim T's code.
|
||||
mb_activity = tt_activity_measure(cpi, x);
|
||||
mb_activity = tt_activity_measure(x);
|
||||
}
|
||||
|
||||
if (mb_activity < VP9_ACTIVITY_AVG_MIN)
|
||||
@ -573,7 +573,7 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
|
||||
bsize, ctx, best_rd);
|
||||
}
|
||||
|
||||
static void update_stats(VP9_COMP *cpi, int mi_row, int mi_col) {
|
||||
static void update_stats(VP9_COMP *cpi) {
|
||||
VP9_COMMON *const cm = &cpi->common;
|
||||
MACROBLOCK *const x = &cpi->mb;
|
||||
MACROBLOCKD *const xd = &x->e_mbd;
|
||||
@ -756,7 +756,7 @@ static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
|
||||
encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
|
||||
|
||||
if (output_enabled) {
|
||||
update_stats(cpi, mi_row, mi_col);
|
||||
update_stats(cpi);
|
||||
|
||||
(*tp)->token = EOSB_TOKEN;
|
||||
(*tp)++;
|
||||
@ -1881,9 +1881,8 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
|
||||
else
|
||||
cpi->unused_mode_skip_mask = 0xFFFFFFFFFFFFFE00;
|
||||
|
||||
if (cpi->sf.reference_masking) {
|
||||
if (cpi->sf.reference_masking)
|
||||
rd_pick_reference_frame(cpi, mi_row, mi_col);
|
||||
}
|
||||
|
||||
if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
|
||||
cpi->sf.use_one_partition_size_always ) {
|
||||
|
@ -47,7 +47,7 @@ static void inverse_transform_b_4x4_add(MACROBLOCKD *xd, int eob,
|
||||
xd->inv_txm4x4_add(dqcoeff, dest, stride);
|
||||
}
|
||||
|
||||
static void inverse_transform_b_8x8_add(MACROBLOCKD *xd, int eob,
|
||||
static void inverse_transform_b_8x8_add(int eob,
|
||||
int16_t *dqcoeff, uint8_t *dest,
|
||||
int stride) {
|
||||
if (eob <= 1)
|
||||
@ -58,7 +58,7 @@ static void inverse_transform_b_8x8_add(MACROBLOCKD *xd, int eob,
|
||||
vp9_short_idct8x8_add(dqcoeff, dest, stride);
|
||||
}
|
||||
|
||||
static void inverse_transform_b_16x16_add(MACROBLOCKD *xd, int eob,
|
||||
static void inverse_transform_b_16x16_add(int eob,
|
||||
int16_t *dqcoeff, uint8_t *dest,
|
||||
int stride) {
|
||||
if (eob <= 1)
|
||||
@ -141,7 +141,7 @@ static int trellis_get_coeff_context(const int16_t *scan,
|
||||
return pt;
|
||||
}
|
||||
|
||||
static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb,
|
||||
static void optimize_b(MACROBLOCK *mb,
|
||||
int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
|
||||
TX_SIZE tx_size) {
|
||||
@ -372,7 +372,7 @@ static void optimize_b(VP9_COMMON *const cm, MACROBLOCK *mb,
|
||||
}
|
||||
|
||||
void vp9_optimize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
int ss_txfrm_size, VP9_COMMON *cm, MACROBLOCK *mb,
|
||||
int ss_txfrm_size, MACROBLOCK *mb,
|
||||
struct optimize_ctx *ctx) {
|
||||
MACROBLOCKD *const xd = &mb->e_mbd;
|
||||
int x, y;
|
||||
@ -380,15 +380,14 @@ void vp9_optimize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
// find current entropy context
|
||||
txfrm_block_to_raster_xy(xd, bsize, plane, block, ss_txfrm_size, &x, &y);
|
||||
|
||||
optimize_b(cm, mb, plane, block, bsize,
|
||||
optimize_b(mb, plane, block, bsize,
|
||||
&ctx->ta[plane][x], &ctx->tl[plane][y], ss_txfrm_size / 2);
|
||||
}
|
||||
|
||||
static void optimize_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
int ss_txfrm_size, void *arg) {
|
||||
const struct encode_b_args* const args = arg;
|
||||
vp9_optimize_b(plane, block, bsize, ss_txfrm_size, args->cm, args->x,
|
||||
args->ctx);
|
||||
vp9_optimize_b(plane, block, bsize, ss_txfrm_size, args->x, args->ctx);
|
||||
}
|
||||
|
||||
void optimize_init_b(int plane, BLOCK_SIZE_TYPE bsize, void *arg) {
|
||||
@ -539,7 +538,7 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
xform_quant(plane, block, bsize, ss_txfrm_size, arg);
|
||||
|
||||
if (x->optimize)
|
||||
vp9_optimize_b(plane, block, bsize, ss_txfrm_size, args->cm, x, args->ctx);
|
||||
vp9_optimize_b(plane, block, bsize, ss_txfrm_size, x, args->ctx);
|
||||
|
||||
if (x->skip_encode)
|
||||
return;
|
||||
@ -551,12 +550,12 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
vp9_short_idct32x32_add(dqcoeff, dst, pd->dst.stride);
|
||||
break;
|
||||
case TX_16X16:
|
||||
inverse_transform_b_16x16_add(xd, pd->eobs[block], dqcoeff,
|
||||
dst, pd->dst.stride);
|
||||
inverse_transform_b_16x16_add(pd->eobs[block], dqcoeff, dst,
|
||||
pd->dst.stride);
|
||||
break;
|
||||
case TX_8X8:
|
||||
inverse_transform_b_8x8_add(xd, pd->eobs[block], dqcoeff,
|
||||
dst, pd->dst.stride);
|
||||
inverse_transform_b_8x8_add(pd->eobs[block], dqcoeff, dst,
|
||||
pd->dst.stride);
|
||||
break;
|
||||
case TX_4X4:
|
||||
// this is like vp9_short_idct4x4 but has a special case around eob<=1
|
||||
@ -654,7 +653,7 @@ void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
|
||||
// if (x->optimize)
|
||||
// vp9_optimize_b(plane, block, bsize, ss_txfrm_size,
|
||||
// args->cm, x, args->ctx);
|
||||
// x, args->ctx);
|
||||
|
||||
switch (tx_size) {
|
||||
case TX_32X32:
|
||||
@ -705,7 +704,7 @@ void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
pd->dequant, p->zbin_extra, eob, scan, iscan);
|
||||
if (!x->skip_encode && *eob) {
|
||||
if (tx_type == DCT_DCT)
|
||||
inverse_transform_b_16x16_add(xd, *eob, dqcoeff, dst, pd->dst.stride);
|
||||
inverse_transform_b_16x16_add(*eob, dqcoeff, dst, pd->dst.stride);
|
||||
else
|
||||
vp9_short_iht16x16_add(dqcoeff, dst, pd->dst.stride, tx_type);
|
||||
}
|
||||
@ -734,7 +733,7 @@ void encode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
pd->dequant, p->zbin_extra, eob, scan, iscan);
|
||||
if (!x->skip_encode && *eob) {
|
||||
if (tx_type == DCT_DCT)
|
||||
inverse_transform_b_8x8_add(xd, *eob, dqcoeff, dst, pd->dst.stride);
|
||||
inverse_transform_b_8x8_add(*eob, dqcoeff, dst, pd->dst.stride);
|
||||
else
|
||||
vp9_short_iht8x8_add(dqcoeff, dst, pd->dst.stride, tx_type);
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ struct encode_b_args {
|
||||
};
|
||||
|
||||
void vp9_optimize_b(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
int ss_txfrm_size, VP9_COMMON *cm, MACROBLOCK *x,
|
||||
int ss_txfrm_size, MACROBLOCK *x,
|
||||
struct optimize_ctx *ctx);
|
||||
void vp9_optimize_sby(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
|
||||
void vp9_optimize_sbuv(VP9_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
|
||||
|
@ -1282,7 +1282,6 @@ static int detect_flash(VP9_COMP *cpi, int offset) {
|
||||
|
||||
// Update the motion related elements to the GF arf boost calculation
|
||||
static void accumulate_frame_motion_stats(
|
||||
VP9_COMP *cpi,
|
||||
FIRSTPASS_STATS *this_frame,
|
||||
double *this_frame_mv_in_out,
|
||||
double *mv_in_out_accumulator,
|
||||
@ -1377,7 +1376,7 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset,
|
||||
break;
|
||||
|
||||
// Update the motion related elements to the boost calculation
|
||||
accumulate_frame_motion_stats(cpi, &this_frame,
|
||||
accumulate_frame_motion_stats(&this_frame,
|
||||
&this_frame_mv_in_out, &mv_in_out_accumulator,
|
||||
&abs_mv_in_out_accumulator, &mv_ratio_accumulator);
|
||||
|
||||
@ -1413,7 +1412,7 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset,
|
||||
break;
|
||||
|
||||
// Update the motion related elements to the boost calculation
|
||||
accumulate_frame_motion_stats(cpi, &this_frame,
|
||||
accumulate_frame_motion_stats(&this_frame,
|
||||
&this_frame_mv_in_out, &mv_in_out_accumulator,
|
||||
&abs_mv_in_out_accumulator, &mv_ratio_accumulator);
|
||||
|
||||
@ -1665,7 +1664,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
|
||||
flash_detected = detect_flash(cpi, 0);
|
||||
|
||||
// Update the motion related elements to the boost calculation
|
||||
accumulate_frame_motion_stats(cpi, &next_frame,
|
||||
accumulate_frame_motion_stats(&next_frame,
|
||||
&this_frame_mv_in_out, &mv_in_out_accumulator,
|
||||
&abs_mv_in_out_accumulator, &mv_ratio_accumulator);
|
||||
|
||||
|
@ -77,9 +77,7 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
|
||||
return best_err;
|
||||
}
|
||||
|
||||
static int do_16x16_motion_search(VP9_COMP *cpi,
|
||||
int_mv *ref_mv, int_mv *dst_mv,
|
||||
int buf_mb_y_offset, int mb_y_offset,
|
||||
static int do_16x16_motion_search(VP9_COMP *cpi, int_mv *ref_mv, int_mv *dst_mv,
|
||||
int mb_row, int mb_col) {
|
||||
MACROBLOCK *const x = &cpi->mb;
|
||||
MACROBLOCKD *const xd = &x->e_mbd;
|
||||
@ -118,9 +116,7 @@ static int do_16x16_motion_search(VP9_COMP *cpi,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_16x16_zerozero_search(VP9_COMP *cpi,
|
||||
int_mv *dst_mv,
|
||||
int buf_mb_y_offset, int mb_y_offset) {
|
||||
static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) {
|
||||
MACROBLOCK *const x = &cpi->mb;
|
||||
MACROBLOCKD *const xd = &x->e_mbd;
|
||||
unsigned int err;
|
||||
@ -210,7 +206,6 @@ static void update_mbgraph_mb_stats
|
||||
g_motion_error = do_16x16_motion_search(cpi,
|
||||
prev_golden_ref_mv,
|
||||
&stats->ref[GOLDEN_FRAME].m.mv,
|
||||
mb_y_offset, gld_y_offset,
|
||||
mb_row, mb_col);
|
||||
stats->ref[GOLDEN_FRAME].err = g_motion_error;
|
||||
} else {
|
||||
@ -224,8 +219,7 @@ static void update_mbgraph_mb_stats
|
||||
xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
|
||||
xd->plane[0].pre[0].stride = alt_ref->y_stride;
|
||||
a_motion_error = do_16x16_zerozero_search(cpi,
|
||||
&stats->ref[ALTREF_FRAME].m.mv,
|
||||
mb_y_offset, arf_y_offset);
|
||||
&stats->ref[ALTREF_FRAME].m.mv);
|
||||
|
||||
stats->ref[ALTREF_FRAME].err = a_motion_error;
|
||||
} else {
|
||||
|
@ -525,7 +525,7 @@ static const int16_t band_counts[TX_SIZES][8] = {
|
||||
{ 1, 2, 3, 4, 11, 1024 - 21, 0 },
|
||||
};
|
||||
|
||||
static INLINE int cost_coeffs(VP9_COMMON *const cm, MACROBLOCK *mb,
|
||||
static INLINE int cost_coeffs(MACROBLOCK *mb,
|
||||
int plane, int block, PLANE_TYPE type,
|
||||
ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L,
|
||||
TX_SIZE tx_size,
|
||||
@ -646,7 +646,7 @@ static void rate_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
|
||||
txfrm_block_to_raster_xy(xd, bsize, plane, block, args->tx_size * 2, &x_idx,
|
||||
&y_idx);
|
||||
|
||||
args->rate += cost_coeffs(args->cm, args->x, plane, block,
|
||||
args->rate += cost_coeffs(args->x, plane, block,
|
||||
xd->plane[plane].plane_type, args->t_above + x_idx,
|
||||
args->t_left + y_idx, args->tx_size,
|
||||
args->scan, args->nb);
|
||||
@ -1188,7 +1188,6 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
||||
int64_t best_rd = rd_thresh;
|
||||
int rate = 0;
|
||||
int64_t distortion;
|
||||
VP9_COMMON *const cm = &cpi->common;
|
||||
struct macroblock_plane *p = &x->plane[0];
|
||||
struct macroblockd_plane *pd = &xd->plane[0];
|
||||
const int src_stride = p->src.stride;
|
||||
@ -1260,7 +1259,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
||||
}
|
||||
|
||||
scan = get_scan_4x4(get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, block));
|
||||
ratey += cost_coeffs(cm, x, 0, block, PLANE_TYPE_Y_WITH_DC,
|
||||
ratey += cost_coeffs(x, 0, block, PLANE_TYPE_Y_WITH_DC,
|
||||
tempa + idx, templ + idy, TX_4X4, scan,
|
||||
vp9_get_coef_neighbors_handle(scan));
|
||||
distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff,
|
||||
@ -1667,7 +1666,6 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
|
||||
ENTROPY_CONTEXT *ta,
|
||||
ENTROPY_CONTEXT *tl) {
|
||||
int k;
|
||||
VP9_COMMON *const cm = &cpi->common;
|
||||
MACROBLOCKD *xd = &x->e_mbd;
|
||||
BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
|
||||
const int width = plane_block_width(bsize, &xd->plane[0]);
|
||||
@ -1730,7 +1728,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
|
||||
BLOCK_OFFSET(xd->plane[0].dqcoeff,
|
||||
k, 16), 16, &ssz);
|
||||
thissse += ssz;
|
||||
thisrate += cost_coeffs(cm, x, 0, k, PLANE_TYPE_Y_WITH_DC,
|
||||
thisrate += cost_coeffs(x, 0, k, PLANE_TYPE_Y_WITH_DC,
|
||||
ta + (k & 1),
|
||||
tl + (k >> 1), TX_4X4,
|
||||
vp9_default_scan_4x4,
|
||||
@ -2458,7 +2456,7 @@ static YV12_BUFFER_CONFIG *get_scaled_ref_frame(VP9_COMP *cpi, int ref_frame) {
|
||||
return scaled_ref_frame;
|
||||
}
|
||||
|
||||
static INLINE int get_switchable_rate(VP9_COMMON *cm, MACROBLOCK *x) {
|
||||
static INLINE int get_switchable_rate(MACROBLOCK *x) {
|
||||
MACROBLOCKD *xd = &x->e_mbd;
|
||||
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
|
||||
|
||||
@ -2893,7 +2891,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
const int is_intpel_interp = intpel_mv;
|
||||
mbmi->interp_filter = filter;
|
||||
vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
|
||||
rs = get_switchable_rate(cm, x);
|
||||
rs = get_switchable_rate(x);
|
||||
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
|
||||
|
||||
if (interpolating_intpel_seen && is_intpel_interp) {
|
||||
@ -2974,7 +2972,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
mbmi->interp_filter = cm->mcomp_filter_type != SWITCHABLE ?
|
||||
cm->mcomp_filter_type : *best_filter;
|
||||
vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
|
||||
rs = (cm->mcomp_filter_type == SWITCHABLE ? get_switchable_rate(cm, x) : 0);
|
||||
rs = cm->mcomp_filter_type == SWITCHABLE ? get_switchable_rate(x) : 0;
|
||||
|
||||
if (pred_exists) {
|
||||
if (best_needs_copy) {
|
||||
@ -3008,7 +3006,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
}
|
||||
|
||||
if (cpi->common.mcomp_filter_type == SWITCHABLE)
|
||||
*rate2 += get_switchable_rate(cm, x);
|
||||
*rate2 += get_switchable_rate(x);
|
||||
|
||||
if (!is_comp_pred) {
|
||||
if (cpi->active_map_enabled && x->active_ptr[0] == 0)
|
||||
@ -3619,7 +3617,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
if (tmp_rd == INT64_MAX)
|
||||
continue;
|
||||
cpi->rd_filter_cache[switchable_filter_index] = tmp_rd;
|
||||
rs = get_switchable_rate(cm, x);
|
||||
rs = get_switchable_rate(x);
|
||||
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
|
||||
cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS] =
|
||||
MIN(cpi->rd_filter_cache[VP9_SWITCHABLE_FILTERS], tmp_rd + rs_rd);
|
||||
@ -3681,7 +3679,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
continue;
|
||||
} else {
|
||||
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
|
||||
int rs = get_switchable_rate(cm, x);
|
||||
int rs = get_switchable_rate(x);
|
||||
tmp_best_rdu -= RDCOST(x->rdmult, x->rddiv, rs, 0);
|
||||
}
|
||||
tmp_rd = tmp_best_rdu;
|
||||
@ -3700,7 +3698,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
distortion2 += distortion;
|
||||
|
||||
if (cpi->common.mcomp_filter_type == SWITCHABLE)
|
||||
rate2 += get_switchable_rate(cm, x);
|
||||
rate2 += get_switchable_rate(x);
|
||||
|
||||
if (!mode_excluded) {
|
||||
if (is_comp_pred)
|
||||
|
@ -57,8 +57,7 @@ void vp9_set_segment_data(VP9_PTR ptr,
|
||||
}
|
||||
|
||||
// Based on set of segment counts calculate a probability tree
|
||||
static void calc_segtree_probs(MACROBLOCKD *xd, int *segcounts,
|
||||
vp9_prob *segment_tree_probs) {
|
||||
static void calc_segtree_probs(int *segcounts, vp9_prob *segment_tree_probs) {
|
||||
// Work out probabilities of each segment
|
||||
const int c01 = segcounts[0] + segcounts[1];
|
||||
const int c23 = segcounts[2] + segcounts[3];
|
||||
@ -75,7 +74,7 @@ static void calc_segtree_probs(MACROBLOCKD *xd, int *segcounts,
|
||||
}
|
||||
|
||||
// Based on set of segment counts and probabilities calculate a cost estimate
|
||||
static int cost_segmap(MACROBLOCKD *xd, int *segcounts, vp9_prob *probs) {
|
||||
static int cost_segmap(int *segcounts, vp9_prob *probs) {
|
||||
const int c01 = segcounts[0] + segcounts[1];
|
||||
const int c23 = segcounts[2] + segcounts[3];
|
||||
const int c45 = segcounts[4] + segcounts[5];
|
||||
@ -211,7 +210,7 @@ static void count_segs_sb(VP9_COMP *cpi, MODE_INFO *mi,
|
||||
|
||||
void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
|
||||
VP9_COMMON *const cm = &cpi->common;
|
||||
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
||||
struct segmentation *seg = &cpi->mb.e_mbd.seg;
|
||||
|
||||
int no_pred_cost;
|
||||
int t_pred_cost = INT_MAX;
|
||||
@ -231,8 +230,8 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
|
||||
|
||||
// Set default state for the segment tree probabilities and the
|
||||
// temporal coding probabilities
|
||||
vpx_memset(xd->seg.tree_probs, 255, sizeof(xd->seg.tree_probs));
|
||||
vpx_memset(xd->seg.pred_probs, 255, sizeof(xd->seg.pred_probs));
|
||||
vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
|
||||
vpx_memset(seg->pred_probs, 255, sizeof(seg->pred_probs));
|
||||
|
||||
vpx_memset(no_pred_segcounts, 0, sizeof(no_pred_segcounts));
|
||||
vpx_memset(t_unpred_seg_counts, 0, sizeof(t_unpred_seg_counts));
|
||||
@ -255,15 +254,15 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
|
||||
|
||||
// Work out probability tree for coding segments without prediction
|
||||
// and the cost.
|
||||
calc_segtree_probs(xd, no_pred_segcounts, no_pred_tree);
|
||||
no_pred_cost = cost_segmap(xd, no_pred_segcounts, no_pred_tree);
|
||||
calc_segtree_probs(no_pred_segcounts, no_pred_tree);
|
||||
no_pred_cost = cost_segmap(no_pred_segcounts, no_pred_tree);
|
||||
|
||||
// Key frames cannot use temporal prediction
|
||||
if (cm->frame_type != KEY_FRAME) {
|
||||
// Work out probability tree for coding those segments not
|
||||
// predicted using the temporal method and the cost.
|
||||
calc_segtree_probs(xd, t_unpred_seg_counts, t_pred_tree);
|
||||
t_pred_cost = cost_segmap(xd, t_unpred_seg_counts, t_pred_tree);
|
||||
calc_segtree_probs(t_unpred_seg_counts, t_pred_tree);
|
||||
t_pred_cost = cost_segmap(t_unpred_seg_counts, t_pred_tree);
|
||||
|
||||
// Add in the cost of the signalling for each prediction context
|
||||
for (i = 0; i < PREDICTION_PROBS; i++) {
|
||||
@ -280,11 +279,11 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
|
||||
|
||||
// Now choose which coding method to use.
|
||||
if (t_pred_cost < no_pred_cost) {
|
||||
xd->seg.temporal_update = 1;
|
||||
vpx_memcpy(xd->seg.tree_probs, t_pred_tree, sizeof(t_pred_tree));
|
||||
vpx_memcpy(xd->seg.pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
|
||||
seg->temporal_update = 1;
|
||||
vpx_memcpy(seg->tree_probs, t_pred_tree, sizeof(t_pred_tree));
|
||||
vpx_memcpy(seg->pred_probs, t_nopred_prob, sizeof(t_nopred_prob));
|
||||
} else {
|
||||
xd->seg.temporal_update = 0;
|
||||
vpx_memcpy(xd->seg.tree_probs, no_pred_tree, sizeof(no_pred_tree));
|
||||
seg->temporal_update = 0;
|
||||
vpx_memcpy(seg->tree_probs, no_pred_tree, sizeof(no_pred_tree));
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user