Various bug fixes related to high precision mv

Change-Id: Ie5a7c87d71bd4a541463b68704620d89cec142cf
This commit is contained in:
Deb Mukherjee 2012-03-01 03:10:21 -08:00
parent 2ad7a4a271
commit e41e5ce5ad
7 changed files with 76 additions and 79 deletions

View File

@ -1021,6 +1021,9 @@ void init_encode_frame_mb_context(VP8_COMP *cpi)
vp8_zero(cpi->uv_mode_count)
x->mvc = cm->fc.mvc;
#if CONFIG_HIGH_PRECISION_MV
x->mvc_hp = cm->fc.mvc_hp;
#endif
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);

View File

@ -643,7 +643,7 @@ static void write_component_probs_hp(
//j=0
{
const int c = events [mv_max];
const int c = events [mv_max_hp];
is_short_ct [0] += c; // Short vector
short_ct [0] += c; // Magnitude distribution

View File

@ -38,6 +38,12 @@
#define IF_RTCD(x) NULL
#endif
#if CONFIG_HIGH_PRECISION_MV
#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
#else
#define XMVCOST (x->mvcost)
#endif
extern void vp8_build_block_offsets(MACROBLOCK *x);
extern void vp8_setup_block_ptrs(MACROBLOCK *x);
extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi);
@ -420,12 +426,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
ref_mv_full.as_mv.row = ref_mv->as_mv.row>>3;
tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, step_param,
x->sadperbit16, &num00, &v_fn_ptr,
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
ref_mv);
XMVCOST, ref_mv);
if ( tmp_err < INT_MAX-new_mv_mode_penalty )
tmp_err += new_mv_mode_penalty;
@ -451,12 +452,7 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv,
step_param + n, x->sadperbit16,
&num00, &v_fn_ptr,
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
ref_mv);
XMVCOST, ref_mv);
if ( tmp_err < INT_MAX-new_mv_mode_penalty )
tmp_err += new_mv_mode_penalty;

View File

@ -37,8 +37,8 @@ static unsigned int do_16x16_motion_iteration
int *mvsadcost[2] = { &dummy_cost[mv_max+1], &dummy_cost[mv_max+1] };
#if CONFIG_HIGH_PRECISION_MV
static int dummy_cost_hp[2*mv_max_hp+1];
int *mvcost_hp[2] = { &dummy_cost_hp[mv_max_hp+1], &dummy_cost[mv_max_hp+1] };
int *mvsadcost_hp[2] = { &dummy_cost_hp[mv_max_hp+1], &dummy_cost[mv_max_hp+1] };
int *mvcost_hp[2] = { &dummy_cost_hp[mv_max_hp+1], &dummy_cost_hp[mv_max_hp+1] };
int *mvsadcost_hp[2] = { &dummy_cost_hp[mv_max_hp+1], &dummy_cost_hp[mv_max_hp+1] };
#endif
int col_min = (ref_mv->as_mv.col>>3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.col & 7)?1:0);
int row_min = (ref_mv->as_mv.row>>3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.row & 7)?1:0);

View File

@ -22,6 +22,16 @@ static int mv_ref_ct [31] [4] [2];
static int mv_mode_cts [4] [2];
#endif
#if CONFIG_HIGH_PRECISION_MV
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp)
{
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
// over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
// cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp==0)] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp==0)]) * Weight) >> 7;
}
#else
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
{
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
@ -30,15 +40,6 @@ int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
}
#if CONFIG_HIGH_PRECISION_MV
int vp8_mv_bit_cost_hp(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
{
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
// over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
// cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row)] + mvcost[1][(mv->as_mv.col - ref->as_mv.col)]) * Weight) >> 7;
}
#endif
#if CONFIG_HIGH_PRECISION_MV
@ -278,10 +279,10 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
rr = ref_mv->as_mv.row; rc = ref_mv->as_mv.col;
br = bestmv->as_mv.row << 3; bc = bestmv->as_mv.col << 3;
hstep = 4;
minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width) - 1));
maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width) - 1));
minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width) - 1));
maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width) - 1));
minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
}
else
#endif

View File

@ -25,7 +25,11 @@ extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1) // Max full pel mv specified in 1 pel units
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Maximum size of the first step in full pel units
#if CONFIG_HIGH_PRECISION_MV
extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp);
#else
extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);
#endif
extern void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);

View File

@ -50,6 +50,12 @@
extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
#if CONFIG_HIGH_PRECISION_MV
#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
#else
#define XMVCOST (x->mvcost)
#endif
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
static const int auto_speed_thresh[17] =
@ -1498,7 +1504,12 @@ static int labels2mode(
switch (m = this_mode)
{
case NEW4X4 :
#if CONFIG_HIGH_PRECISION_MV
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
102, xd->allow_high_precision_mv);
#else
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
#endif
break;
case LEFT4X4:
this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
@ -1745,11 +1756,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
bestsme = cpi->diamond_search_sad(x, c, e, &mvp_full,
&mode_mv[NEW4X4], step_param,
sadpb, &num00, v_fn_ptr,
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
XMVCOST,
bsi->ref_mv);
n = num00;
@ -1767,11 +1774,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
&mvp_full, &temp_mv,
step_param + n, sadpb,
&num00, v_fn_ptr,
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
XMVCOST,
bsi->ref_mv);
if (thissme < bestsme)
@ -1793,12 +1796,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
thissme = cpi->full_search_sad(x, c, e, &mvp_full,
sadpb, 16, v_fn_ptr,
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
bsi->ref_mv);
XMVCOST, bsi->ref_mv);
if (thissme < bestsme)
{
@ -1818,18 +1816,13 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
int distortion;
unsigned int sse;
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
bsi->ref_mv, x->errorperbit, v_fn_ptr,
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
bsi->ref_mv, x->errorperbit, v_fn_ptr, XMVCOST,
&distortion, &sse);
}
} /* NEW4X4 */
rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
bsi->ref_mv, x->mvcost);
bsi->ref_mv, XMVCOST);
// Trap vectors that reach beyond the UMV borders
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
@ -1863,7 +1856,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
bsi->ref_mv, x->mvcost);
bsi->ref_mv, XMVCOST);
br += sbr;
bd += sbd;
@ -2848,12 +2841,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.mv,
step_param, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16],
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
&best_ref_mv);
XMVCOST, &best_ref_mv);
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Further step/diamond searches as necessary
@ -2878,12 +2866,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
thissme = cpi->diamond_search_sad(x, b, d, &mvp_full,
&d->bmi.mv, step_param + n, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16],
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
&best_ref_mv);
XMVCOST, &best_ref_mv);
/* check to see if refining search is needed. */
if (num00 > (further_steps-n))
@ -2914,12 +2897,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
//thissme = cpi->full_search_sad(x, b, d, &d->bmi.mv.as_mv, sadpb, search_range, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv, sadpb,
search_range, &cpi->fn_ptr[BLOCK_16X16],
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
&best_ref_mv);
XMVCOST, &best_ref_mv);
if (thissme < bestsme)
{
@ -2944,19 +2922,21 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv,
x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16],
#if CONFIG_HIGH_PRECISION_MV
x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost,
#else
x->mvcost,
#endif
&dis, &sse);
XMVCOST, &dis, &sse);
}
mc_search_result[x->e_mbd.mode_info_context->mbmi.ref_frame].as_int = d->bmi.mv.as_int;
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
// Add the new motion vector cost to our rolling cost variable
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96);
#if CONFIG_HIGH_PRECISION_MV
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
XMVCOST, 96,
x->e_mbd.allow_high_precision_mv);
#else
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
XMVCOST, 96);
#endif
}
case NEARESTMV:
@ -3081,10 +3061,23 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
continue;
x->e_mbd.mode_info_context->mbmi.mv.as_int = mc_search_result[ref1].as_int;
x->e_mbd.mode_info_context->mbmi.second_mv.as_int = mc_search_result[ref2].as_int;
#if CONFIG_HIGH_PRECISION_MV
rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
&frame_best_ref_mv[ref1], x->mvcost, 96);
&frame_best_ref_mv[ref1],
XMVCOST, 96,
x->e_mbd.allow_high_precision_mv);
rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
&frame_best_ref_mv[ref2], x->mvcost, 96);
&frame_best_ref_mv[ref2],
XMVCOST, 96,
x->e_mbd.allow_high_precision_mv);
#else
rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
&frame_best_ref_mv[ref1],
XMVCOST, 96);
rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
&frame_best_ref_mv[ref2],
XMVCOST, 96);
#endif
break;
case ZEROMV:
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;