Adds feature for companded MV encoding

The high-precision (1/8) pel bit is turned off if the reference
MV is larger than a threshold. The motivation for this patch is
the intuition that if motion is likely large (as indicated by
the reference), there is likley to be more motion blur, and as
a result 1/8 pel precision would be wasteful both in rd sense
as well as computationally.

The feature is incorporated as part of the newmventropy experiment.
There is a modest RD improvement with the patch. Overall the
results with the newmventropy experiment with the threshold being
16 integer pels are:

derf: +0.279%
std-hd: +0.617%
hd: +1.299%
yt: +0.822%

With threshold 8 integer pels are:

derf: +0.295%
std-hd: +0.623%
hd: +1.365%
yt: +0.847%

Patch: rebased
Patch: rebase fixes

Change-Id: I4ed14600df3c457944e6541ed407cb6e91fe428b
This commit is contained in:
Deb Mukherjee 2012-09-06 09:07:42 -07:00
parent d406334f27
commit 2b26cf1786
6 changed files with 52 additions and 26 deletions

View File

@ -19,6 +19,9 @@
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
/* Integer pel reference mv threshold for use of high-precision 1/8 mv */
#define COMPANDED_MVREF_THRESH 8
/* Smooth or bias the mv-counts before prob computation */
/* #define SMOOTH_MV_COUNTS */
@ -103,6 +106,14 @@ MV_CLASS_TYPE vp8_get_mv_class(int z, int *offset) {
return c;
}
int vp8_use_nmv_hp(const MV *ref) {
if ((abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH &&
(abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH)
return 1;
else
return 0;
}
int vp8_get_mv_mag(MV_CLASS_TYPE c, int offset) {
return mv_class_base(c) + offset;
}
@ -154,12 +165,6 @@ static void increment_nmv_component(int v,
} else {
mvcomp->hp[e] += incr;
}
} else { /* assume the extra bit is 1 */
if (c == MV_CLASS_0) {
mvcomp->class0_hp[1] += incr;
} else {
mvcomp->hp[1] += incr;
}
}
}
@ -194,6 +199,7 @@ void vp8_increment_nmv(const MV *mv, const MV *ref, nmv_context_counts *mvctx,
int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
mvctx->joints[j]++;
usehp = usehp && vp8_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
increment_nmv_component_count(mv->row, &mvctx->comps[0], 1, usehp);
}

View File

@ -21,11 +21,11 @@ struct VP8Common;
void vp8_entropy_mv_init();
void vp8_init_mv_probs(struct VP8Common *cm);
void vp8_adapt_mv_probs(struct VP8Common *cm);
#if CONFIG_NEWMVENTROPY
void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp);
#endif
#if CONFIG_NEWMVENTROPY
void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp);
void vp8_lower_mv_precision(MV *mv);
int vp8_use_nmv_hp(const MV *ref);
#define VP8_NMV_UPDATE_PROB 255
//#define MV_GROUP_UPDATE

View File

@ -20,15 +20,20 @@ const unsigned char vp8_mbsplit_offset[4][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
};
static void lower_mv_precision(int_mv *mv)
static void lower_mv_precision(int_mv *mv, int usehp)
{
if (mv->as_mv.row & 1)
mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
if (mv->as_mv.col & 1)
mv->as_mv.col += (mv->as_mv.col > 0 ? -1 : 1);
#if CONFIG_NEWMVENTROPY
if (!usehp || !vp8_use_nmv_hp(&mv->as_mv)) {
#else
if (!usehp) {
#endif
if (mv->as_mv.row & 1)
mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
if (mv->as_mv.col & 1)
mv->as_mv.col += (mv->as_mv.col > 0 ? -1 : 1);
}
}
/* Predict motion vectors using those from already-decoded nearby blocks.
Note that we only consider one 4x4 subblock from each candidate 16x16
macroblock. */
@ -173,11 +178,9 @@ void vp8_find_near_mvs
/* Make sure that the 1/8th bits of the Mvs are zero if high_precision
* is not being used, by truncating the last bit towards 0
*/
if (!xd->allow_high_precision_mv) {
lower_mv_precision(best_mv);
lower_mv_precision(nearest);
lower_mv_precision(nearby);
}
lower_mv_precision(best_mv, xd->allow_high_precision_mv);
lower_mv_precision(nearest, xd->allow_high_precision_mv);
lower_mv_precision(nearby, xd->allow_high_precision_mv);
// TODO: move clamp outside findnearmv
vp8_clamp_mv2(nearest, xd);
@ -301,9 +304,7 @@ void vp8_find_best_ref_mvs(MACROBLOCKD *xd,
// Copy back the re-ordered mv list
vpx_memcpy(mvlist, sorted_mvs, sizeof(sorted_mvs));
if (!xd->allow_high_precision_mv)
lower_mv_precision(best_mv);
lower_mv_precision(best_mv, xd->allow_high_precision_mv);
vp8_clamp_mv2(best_mv, xd);
}

View File

@ -244,6 +244,7 @@ static void read_nmv(vp8_reader *r, MV *mv, const MV *ref,
static void read_nmv_fp(vp8_reader *r, MV *mv, const MV *ref,
const nmv_context *mvctx, int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
usehp = usehp && vp8_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
mv->row = read_nmv_component_fp(r, mv->row, ref->row, &mvctx->comps[0],
usehp);
@ -252,6 +253,7 @@ static void read_nmv_fp(vp8_reader *r, MV *mv, const MV *ref,
mv->col = read_nmv_component_fp(r, mv->col, ref->col, &mvctx->comps[1],
usehp);
}
//printf(" %d: %d %d ref: %d %d\n", usehp, mv->row, mv-> col, ref->row, ref->col);
}
static void update_nmv(vp8_reader *bc, vp8_prob *const p,

View File

@ -526,6 +526,7 @@ void vp8_encode_nmv(vp8_writer *w, const MV *mv, const MV *ref,
void vp8_encode_nmv_fp(vp8_writer *w, const MV *mv, const MV *ref,
const nmv_context *mvctx, int usehp) {
MV_JOINT_TYPE j = vp8_get_mv_joint(*mv);
usehp = usehp && vp8_use_nmv_hp(ref);
if (j == MV_JOINT_HZVNZ || j == MV_JOINT_HNZVNZ) {
encode_nmv_component_fp(w, mv->row, ref->row, &mvctx->comps[0], usehp);
}

View File

@ -276,6 +276,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int maxc, minc, maxr, minr;
int y_stride;
int offset;
int usehp = xd->allow_high_precision_mv;
#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
@ -301,7 +302,6 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
y_stride = d->pre_stride;
#endif
rr = ref_mv->as_mv.row;
rc = ref_mv->as_mv.col;
br = bestmv->as_mv.row << 3;
@ -403,7 +403,15 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
if (x->e_mbd.allow_high_precision_mv) {
#if CONFIG_NEWMVENTROPY
if (xd->allow_high_precision_mv) {
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
#endif
if (usehp) {
hstep >>= 1;
while (--eighthiters) {
CHECK_BETTER(left, tr, tc - hstep);
@ -471,6 +479,7 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
int thismse;
int y_stride;
MACROBLOCKD *xd = &x->e_mbd;
int usehp = xd->allow_high_precision_mv;
#if !CONFIG_SUPERBLOCKS && (ARCH_X86 || ARCH_X86_64)
unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
@ -762,7 +771,14 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
if (!x->e_mbd.allow_high_precision_mv)
#if CONFIG_NEWMVENTROPY
if (x->e_mbd.allow_high_precision_mv) {
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
#endif
if (!usehp)
return bestmse;
/* Now do 1/8th pixel */