Take out skip_encode speed feature in vp10

Change-Id: Ic39d4523e78863c816b0fc85f56ea5ae5e0b3310
This commit is contained in:
Jingning Han 2015-09-10 12:42:21 -07:00
parent 4fa8e73249
commit f137697c32
7 changed files with 8 additions and 63 deletions

View File

@ -115,7 +115,6 @@ struct macroblock {
// indicate if it is in the rd search loop or encoding process
int use_lp32x32fdct;
int skip_encode;
// use fast quantization process
int quant_fp;

View File

@ -2585,20 +2585,6 @@ static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
return cpi->common.tx_mode;
}
static int get_skip_encode_frame(const VP10_COMMON *cm, ThreadData *const td) {
unsigned int intra_count = 0, inter_count = 0;
int j;
for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
intra_count += td->counts->intra_inter[j][0];
inter_count += td->counts->intra_inter[j][1];
}
return (intra_count << 2) < inter_count &&
cm->frame_type != KEY_FRAME &&
cm->show_frame;
}
void vp10_init_tile_data(VP10_COMP *cpi) {
VP10_COMMON *const cm = &cpi->common;
const int tile_cols = 1 << cm->log2_tile_cols;
@ -2690,7 +2676,6 @@ static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
#endif
static void encode_frame_internal(VP10_COMP *cpi) {
SPEED_FEATURES *const sf = &cpi->sf;
ThreadData *const td = &cpi->td;
MACROBLOCK *const x = &td->mb;
VP10_COMMON *const cm = &cpi->common;
@ -2766,9 +2751,6 @@ static void encode_frame_internal(VP10_COMP *cpi) {
cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
}
sf->skip_encode_frame = sf->skip_encode_sb ?
get_skip_encode_frame(cm, td) : 0;
#if 0
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
@ -2962,11 +2944,6 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td,
x->skip_optimize = ctx->is_coded;
ctx->is_coded = 1;
x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
x->q_index < QIDX_SKIP_THRESH);
if (x->skip_encode)
return;
if (!is_inter_block(mbmi)) {
int plane;

View File

@ -797,7 +797,7 @@ static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize,
if (p->eobs[block])
*(args->skip) = 0;
if (x->skip_encode || p->eobs[block] == 0)
if (p->eobs[block] == 0)
return;
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
@ -945,8 +945,7 @@ void vp10_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
src_diff = &p->src_diff[4 * (j * diff_stride + i)];
mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
vp10_predict_intra_block(xd, bwl, tx_size, mode, x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
vp10_predict_intra_block(xd, bwl, tx_size, mode, dst, dst_stride,
dst, dst_stride, i, j, plane);
#if CONFIG_VP9_HIGHBITDEPTH
@ -1036,7 +1035,7 @@ void vp10_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
}
if (!x->skip_encode && *eob)
if (*eob)
vp10_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, tx_type);
break;
case TX_16X16:
@ -1049,7 +1048,7 @@ void vp10_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
}
if (!x->skip_encode && *eob)
if (*eob)
vp10_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type);
break;
case TX_8X8:
@ -1062,7 +1061,7 @@ void vp10_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
pd->dequant, eob, scan_order->scan,
scan_order->iscan);
}
if (!x->skip_encode && *eob)
if (*eob)
vp10_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
break;
case TX_4X4:
@ -1076,7 +1075,7 @@ void vp10_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize,
scan_order->iscan);
}
if (!x->skip_encode && *eob) {
if (*eob) {
// this is like vp10_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.

View File

@ -614,7 +614,6 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
cm->mi_rows, cm->mi_cols);
// Do intra 16x16 prediction.
x->skip_encode = 0;
xd->mi[0]->mbmi.mode = DC_PRED;
xd->mi[0]->mbmi.tx_size = use_dc_pred ?
(bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;

View File

@ -438,19 +438,6 @@ static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
&this_sse) >> shift;
#endif // CONFIG_VP9_HIGHBITDEPTH
*out_sse = this_sse >> shift;
if (x->skip_encode && !is_inter_block(&xd->mi[0]->mbmi)) {
// TODO(jingning): tune the model to better capture the distortion.
int64_t p = (pd->dequant[1] * pd->dequant[1] *
(1 << ss_txfrm_size)) >>
#if CONFIG_VP9_HIGHBITDEPTH
(shift + 2 + (bd - 8) * 2);
#else
(shift + 2);
#endif // CONFIG_VP9_HIGHBITDEPTH
*out_dist += (p >> 4);
*out_sse += p;
}
}
static int rate_block(int plane, int block, BLOCK_SIZE plane_bsize,
@ -903,9 +890,7 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0]->bmi[block].as_mode = mode;
vp10_predict_intra_block(xd, 1, TX_4X4, mode,
x->skip_encode ? src : dst,
x->skip_encode ? src_stride : dst_stride,
vp10_predict_intra_block(xd, 1, TX_4X4, mode, dst, dst_stride,
dst, dst_stride, col + idx, row + idy, 0);
vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
@ -961,7 +946,7 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x,
{}
}
if (best_rd >= rd_thresh || x->skip_encode)
if (best_rd >= rd_thresh)
return best_rd;
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
@ -2739,7 +2724,6 @@ void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x,
int y_skip = 0, uv_skip = 0;
int64_t dist_y = 0, dist_uv = 0;
TX_SIZE max_uv_tx_size;
x->skip_encode = 0;
ctx->skip = 0;
xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME;
xd->mi[0]->mbmi.ref_frame[1] = NONE;
@ -2969,8 +2953,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
vp10_zero(best_mbmode);
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
filter_cache[i] = INT64_MAX;
@ -3563,8 +3545,6 @@ void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi,
int rate2 = 0;
const int64_t distortion2 = 0;
x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
&comp_mode_p);
@ -3688,7 +3668,6 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi,
int internal_active_edge =
vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
memset(x->zcoeff_blk[TX_4X4], 0, 4);
vp10_zero(best_mbmode);

View File

@ -301,7 +301,6 @@ static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
sf->use_square_partition_only = 1;
sf->disable_filter_search_var_thresh = 100;
sf->use_uv_intra_rd_estimate = 1;
sf->skip_encode_sb = 1;
sf->mv.subpel_iters_per_step = 1;
sf->adaptive_rd_thresh = 4;
sf->mode_skip_start = 6;
@ -384,7 +383,6 @@ static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
// Turn on this to use non-RD key frame coding mode.
sf->mv.search_method = NSTEP;
sf->mv.reduce_first_step_size = 1;
sf->skip_encode_sb = 0;
}
if (speed >= 7) {
@ -481,7 +479,6 @@ void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
sf->intra_uv_mode_mask[i] = INTRA_ALL;
}
sf->use_rd_breakout = 0;
sf->skip_encode_sb = 0;
sf->use_uv_intra_rd_estimate = 0;
sf->allow_skip_recode = 0;
sf->lpf_pick = LPF_PICK_FROM_FULL_IMAGE;

View File

@ -223,11 +223,6 @@ typedef struct SPEED_FEATURES {
// mode to be evaluated. A high value means we will be faster.
int adaptive_rd_thresh;
// Enables skipping the reconstruction step (idct, recon) in the
// intermediate steps assuming the last frame didn't have too many intra
// blocks and the q is less than a threshold.
int skip_encode_sb;
int skip_encode_frame;
// Speed feature to allow or disallow skipping of recode at block
// level within a frame.
int allow_skip_recode;