Rework motion vector precision limit

This commit enables 1/8 luma component motion vector precision
for all motion vector cases. It improves the compression performance
of lowres by 0.13% and hdres by 0.49%.

Change-Id: Iccfc85e8ee1c0154dfbd18f060344f1e3db5dc18
This commit is contained in:
Jingning Han
2016-04-25 14:26:53 -07:00
parent b4cbe54ed6
commit 8678ab4c55
3 changed files with 12 additions and 27 deletions

View File

@@ -132,14 +132,11 @@ MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) {
return c; return c;
} }
// TODO(jingning): This idle function is intentionally left as is for
// experimental purpose.
int vp10_use_mv_hp(const MV *ref) { int vp10_use_mv_hp(const MV *ref) {
#if CONFIG_MISC_FIXES
(void) ref; (void) ref;
return 1; return 1;
#else
return (abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
(abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH;
#endif
} }
static void inc_mv_component(int v, nmv_component_counts *comp_counts, static void inc_mv_component(int v, nmv_component_counts *comp_counts,
@@ -160,14 +157,16 @@ static void inc_mv_component(int v, nmv_component_counts *comp_counts,
if (c == MV_CLASS_0) { if (c == MV_CLASS_0) {
comp_counts->class0[d] += incr; comp_counts->class0[d] += incr;
comp_counts->class0_fp[d][f] += incr; comp_counts->class0_fp[d][f] += incr;
comp_counts->class0_hp[e] += usehp * incr; if (usehp)
comp_counts->class0_hp[e] += incr;
} else { } else {
int i; int i;
int b = c + CLASS0_BITS - 1; // number of bits int b = c + CLASS0_BITS - 1; // number of bits
for (i = 0; i < b; ++i) for (i = 0; i < b; ++i)
comp_counts->bits[i][((d >> i) & 1)] += incr; comp_counts->bits[i][((d >> i) & 1)] += incr;
comp_counts->fp[f] += incr; comp_counts->fp[f] += incr;
comp_counts->hp[e] += usehp * incr; if (usehp)
comp_counts->hp[e] += incr;
} }
} }
@@ -182,15 +181,11 @@ void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
#endif #endif
++counts->joints[j]; ++counts->joints[j];
if (mv_joint_vertical(j)) { if (mv_joint_vertical(j))
inc_mv_component(mv->row, &counts->comps[0], 1, inc_mv_component(mv->row, &counts->comps[0], 1, usehp);
!CONFIG_MISC_FIXES || usehp);
}
if (mv_joint_horizontal(j)) { if (mv_joint_horizontal(j))
inc_mv_component(mv->col, &counts->comps[1], 1, inc_mv_component(mv->col, &counts->comps[1], 1, usehp);
!CONFIG_MISC_FIXES || usehp);
}
} }
} }

View File

@@ -243,10 +243,10 @@ void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) {
#else #else
if (cpi->common.allow_high_precision_mv) { if (cpi->common.allow_high_precision_mv) {
mb->mvcost = mb->nmvcost_hp; mb->mvcost = mb->nmvcost_hp;
mb->mvsadcost = mb->nmvsadcost_hp; mb->mvsadcost = mb->nmvcost_hp;
} else { } else {
mb->mvcost = mb->nmvcost; mb->mvcost = mb->nmvcost;
mb->mvsadcost = mb->nmvsadcost; mb->mvsadcost = mb->nmvcost;
} }
#endif #endif
} }

View File

@@ -98,22 +98,12 @@ static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref, static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
int sad_per_bit) { int sad_per_bit) {
#if CONFIG_REF_MV
const MV diff = { (mv->row - ref->row) * 8, const MV diff = { (mv->row - ref->row) * 8,
(mv->col - ref->col) * 8 }; (mv->col - ref->col) * 8 };
return ROUND_POWER_OF_TWO( return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, x->nmvjointsadcost, x->mvsadcost) * (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->mvsadcost) *
sad_per_bit, sad_per_bit,
VP9_PROB_COST_SHIFT); VP9_PROB_COST_SHIFT);
#else
const MV diff = { mv->row - ref->row,
mv->col - ref->col };
return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) *
sad_per_bit,
VP9_PROB_COST_SHIFT);
#endif
} }
void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) { void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {