Merge "vp9 dec/com: only update frame counts when necessary"

This commit is contained in:
James Zern 2013-10-18 13:56:11 -07:00 committed by Gerrit Code Review
commit 4e6c799e9f
4 changed files with 65 additions and 45 deletions

View File

@ -175,17 +175,18 @@ static void inc_mv_component(int v, nmv_component_counts *comp_counts,
}
}
void vp9_inc_mv(const MV *mv, nmv_context_counts *counts) {
if (counts != NULL) {
const MV_JOINT_TYPE j = vp9_get_mv_joint(mv);
++counts->joints[j];
void vp9_inc_mv(const MV *mv, nmv_context_counts *counts) {
const MV_JOINT_TYPE j = vp9_get_mv_joint(mv);
++counts->joints[j];
if (mv_joint_vertical(j)) {
inc_mv_component(mv->row, &counts->comps[0], 1, 1);
}
if (mv_joint_vertical(j)) {
inc_mv_component(mv->row, &counts->comps[0], 1, 1);
}
if (mv_joint_horizontal(j)) {
inc_mv_component(mv->col, &counts->comps[1], 1, 1);
if (mv_joint_horizontal(j)) {
inc_mv_component(mv->col, &counts->comps[1], 1, 1);
}
}
}

View File

@ -34,7 +34,8 @@ static MB_PREDICTION_MODE read_intra_mode_y(VP9_COMMON *cm, vp9_reader *r,
int size_group) {
const MB_PREDICTION_MODE y_mode = read_intra_mode(r,
cm->fc.y_mode_prob[size_group]);
++cm->counts.y_mode[size_group][y_mode];
if (!cm->frame_parallel_decoding_mode)
++cm->counts.y_mode[size_group][y_mode];
return y_mode;
}
@ -42,7 +43,8 @@ static MB_PREDICTION_MODE read_intra_mode_uv(VP9_COMMON *cm, vp9_reader *r,
MB_PREDICTION_MODE y_mode) {
const MB_PREDICTION_MODE uv_mode = read_intra_mode(r,
cm->fc.uv_mode_prob[y_mode]);
++cm->counts.uv_mode[y_mode][uv_mode];
if (!cm->frame_parallel_decoding_mode)
++cm->counts.uv_mode[y_mode][uv_mode];
return uv_mode;
}
@ -50,7 +52,8 @@ static MB_PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, vp9_reader *r,
uint8_t context) {
const MB_PREDICTION_MODE mode = treed_read(r, vp9_inter_mode_tree,
cm->fc.inter_mode_probs[context]);
++cm->counts.inter_mode[context][inter_mode_offset(mode)];
if (!cm->frame_parallel_decoding_mode)
++cm->counts.inter_mode[context][inter_mode_offset(mode)];
return mode;
}
@ -69,7 +72,8 @@ static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
tx_size += vp9_read(r, tx_probs[2]);
}
update_tx_counts(bsize, context, tx_size, &cm->counts.tx);
if (!cm->frame_parallel_decoding_mode)
update_tx_counts(bsize, context, tx_size, &cm->counts.tx);
return tx_size;
}
@ -157,7 +161,8 @@ static uint8_t read_skip_coeff(VP9D_COMP *pbi, int segment_id, vp9_reader *r) {
if (!skip_coeff) {
const int ctx = vp9_get_pred_context_mbskip(xd);
skip_coeff = vp9_read(r, vp9_get_pred_prob_mbskip(cm, xd));
cm->counts.mbskip[ctx][skip_coeff]++;
if (!cm->frame_parallel_decoding_mode)
++cm->counts.mbskip[ctx][skip_coeff];
}
return skip_coeff;
}
@ -325,7 +330,8 @@ static void read_ref_frames(VP9D_COMP *pbi, vp9_reader *r,
if (cm->comp_pred_mode == HYBRID_PREDICTION) {
is_comp = vp9_read(r, fc->comp_inter_prob[comp_ctx]);
counts->comp_inter[comp_ctx][is_comp]++;
if (!cm->frame_parallel_decoding_mode)
++counts->comp_inter[comp_ctx][is_comp];
} else {
is_comp = cm->comp_pred_mode == COMP_PREDICTION_ONLY;
}
@ -335,18 +341,21 @@ static void read_ref_frames(VP9D_COMP *pbi, vp9_reader *r,
const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
const int ref_ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
const int b = vp9_read(r, fc->comp_ref_prob[ref_ctx]);
counts->comp_ref[ref_ctx][b]++;
if (!cm->frame_parallel_decoding_mode)
++counts->comp_ref[ref_ctx][b];
ref_frame[fix_ref_idx] = cm->comp_fixed_ref;
ref_frame[!fix_ref_idx] = cm->comp_var_ref[b];
} else {
const int ctx0 = vp9_get_pred_context_single_ref_p1(xd);
const int bit0 = vp9_read(r, fc->single_ref_prob[ctx0][0]);
++counts->single_ref[ctx0][0][bit0];
if (!cm->frame_parallel_decoding_mode)
++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = vp9_get_pred_context_single_ref_p2(xd);
const int bit1 = vp9_read(r, fc->single_ref_prob[ctx1][1]);
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
++counts->single_ref[ctx1][1][bit1];
if (!cm->frame_parallel_decoding_mode)
++counts->single_ref[ctx1][1][bit1];
} else {
ref_frame[0] = LAST_FRAME;
}
@ -384,7 +393,8 @@ static INLINE INTERPOLATIONFILTERTYPE read_switchable_filter_type(
const int ctx = vp9_get_pred_context_switchable_interp(xd);
const int type = treed_read(r, vp9_switchable_interp_tree,
cm->fc.switchable_interp_prob[ctx]);
++cm->counts.switchable_interp[ctx][type];
if (!cm->frame_parallel_decoding_mode)
++cm->counts.switchable_interp[ctx][type];
return type;
}
@ -430,34 +440,38 @@ static INLINE int assign_mv(VP9_COMMON *cm, MB_PREDICTION_MODE mode,
int ret = 1;
switch (mode) {
case NEWMV:
read_mv(r, &mv[0].as_mv, &best_mv[0].as_mv,
&cm->fc.nmvc, &cm->counts.mv, allow_hp);
if (is_compound)
read_mv(r, &mv[1].as_mv, &best_mv[1].as_mv,
&cm->fc.nmvc, &cm->counts.mv, allow_hp);
for (i = 0; i < 1 + is_compound; ++i) {
ret = ret && mv[i].as_mv.row < MV_UPP && mv[i].as_mv.row > MV_LOW;
ret = ret && mv[i].as_mv.col < MV_UPP && mv[i].as_mv.col > MV_LOW;
}
break;
case NEARESTMV:
case NEWMV: {
nmv_context_counts *const mv_counts = cm->frame_parallel_decoding_mode ?
NULL : &cm->counts.mv;
read_mv(r, &mv[0].as_mv, &best_mv[0].as_mv,
&cm->fc.nmvc, mv_counts, allow_hp);
if (is_compound)
read_mv(r, &mv[1].as_mv, &best_mv[1].as_mv,
&cm->fc.nmvc, mv_counts, allow_hp);
for (i = 0; i < 1 + is_compound; ++i) {
ret = ret && mv[i].as_mv.row < MV_UPP && mv[i].as_mv.row > MV_LOW;
ret = ret && mv[i].as_mv.col < MV_UPP && mv[i].as_mv.col > MV_LOW;
}
break;
}
case NEARESTMV: {
mv[0].as_int = nearest_mv[0].as_int;
if (is_compound)
mv[1].as_int = nearest_mv[1].as_int;
if (is_compound) mv[1].as_int = nearest_mv[1].as_int;
break;
case NEARMV:
}
case NEARMV: {
mv[0].as_int = near_mv[0].as_int;
if (is_compound)
mv[1].as_int = near_mv[1].as_int;
if (is_compound) mv[1].as_int = near_mv[1].as_int;
break;
case ZEROMV:
}
case ZEROMV: {
mv[0].as_int = 0;
if (is_compound)
mv[1].as_int = 0;
if (is_compound) mv[1].as_int = 0;
break;
default:
}
default: {
return 0;
}
}
return ret;
}
@ -472,7 +486,8 @@ static int read_is_inter_block(VP9D_COMP *pbi, int segment_id, vp9_reader *r) {
} else {
const int ctx = vp9_get_pred_context_intra_inter(xd);
const int is_inter = vp9_read(r, vp9_get_pred_prob_intra_inter(cm, xd));
++cm->counts.intra_inter[ctx][is_inter];
if (!cm->frame_parallel_decoding_mode)
++cm->counts.intra_inter[ctx][is_inter];
return is_inter;
}
}

View File

@ -303,7 +303,8 @@ static void decode_modes_sb(VP9D_COMP *pbi, int tile_col,
else
partition = PARTITION_SPLIT;
cm->counts.partition[pl][partition]++;
if (!cm->frame_parallel_decoding_mode)
++cm->counts.partition[pl][partition];
}
subsize = get_subsize(bsize, partition);

View File

@ -113,7 +113,8 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd,
pt = get_coef_context(nb, token_cache, c);
band = get_coef_band(band_translate, c);
prob = coef_probs[band][pt];
counts->eob_branch[tx_size][type][ref][band][pt]++;
if (!cm->frame_parallel_decoding_mode)
++counts->eob_branch[tx_size][type][ref][band][pt];
if (!vp9_read(r, prob[EOB_CONTEXT_NODE]))
break;
@ -198,8 +199,10 @@ static int decode_coefs(VP9_COMMON *cm, const MACROBLOCKD *xd,
WRITE_COEF_CONTINUE(val, DCT_VAL_CATEGORY6);
}
if (c < seg_eob)
coef_counts[type][ref][band][pt][DCT_EOB_MODEL_TOKEN]++;
if (c < seg_eob) {
if (!cm->frame_parallel_decoding_mode)
++coef_counts[type][ref][band][pt][DCT_EOB_MODEL_TOKEN];
}
return c;
}