Merge "Cleanups around allow_high_precision_mv flag."

This commit is contained in:
Dmitry Kovalev 2013-08-02 16:27:05 -07:00 committed by Gerrit Code Review
commit a6adc82e78
3 changed files with 9 additions and 28 deletions

View File

@ -198,7 +198,7 @@ static unsigned int adapt_probs(unsigned int i,
}
void vp9_adapt_mv_probs(VP9_COMMON *cm, int usehp) {
void vp9_adapt_mv_probs(VP9_COMMON *cm, int allow_hp) {
int i, j;
FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
@ -207,7 +207,7 @@ void vp9_adapt_mv_probs(VP9_COMMON *cm, int usehp) {
nmv_context *pre_ctx = &pre_fc->nmvc;
nmv_context_counts *cts = &cm->counts.mv;
vp9_counts_process(cts, usehp);
vp9_counts_process(cts, allow_hp);
adapt_probs(0, vp9_mv_joint_tree, ctx->joints, pre_ctx->joints, cts->joints);
@ -229,7 +229,7 @@ void vp9_adapt_mv_probs(VP9_COMMON *cm, int usehp) {
adapt_probs(0, vp9_mv_fp_tree, ctx->comps[i].fp, pre_ctx->comps[i].fp,
cts->comps[i].fp);
if (usehp) {
if (allow_hp) {
ctx->comps[i].class0_hp = adapt_prob(pre_ctx->comps[i].class0_hp,
cts->comps[i].class0_hp);
ctx->comps[i].hp = adapt_prob(pre_ctx->comps[i].hp, cts->comps[i].hp);

View File

@ -14,8 +14,9 @@
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_sadmxn.h"
static void lower_mv_precision(int_mv *mv, int usehp) {
if (!usehp || !vp9_use_mv_hp(&mv->as_mv)) {
static void lower_mv_precision(int_mv *mv, int allow_hp) {
const int use_hp = allow_hp && vp9_use_mv_hp(&mv->as_mv);
if (!use_hp) {
if (mv->as_mv.row & 1)
mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
if (mv->as_mv.col & 1)

View File

@ -269,7 +269,6 @@ int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x,
int maxc, minc, maxr, minr;
int y_stride;
int offset;
int usehp = xd->allow_high_precision_mv;
uint8_t *y = xd->plane[0].pre[0].buf +
(bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
@ -370,13 +369,7 @@ int vp9_find_best_sub_pixel_step_iteratively(MACROBLOCK *x,
tc = bc;
}
if (xd->allow_high_precision_mv) {
usehp = vp9_use_mv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
if (usehp) {
if (xd->allow_high_precision_mv && vp9_use_mv_hp(&ref_mv->as_mv)) {
hstep >>= 1;
while (--eighthiters) {
CHECK_BETTER(left, tr, tc - hstep);
@ -450,7 +443,6 @@ int vp9_find_best_sub_pixel_comp(MACROBLOCK *x,
int maxc, minc, maxr, minr;
int y_stride;
int offset;
int usehp = xd->allow_high_precision_mv;
DECLARE_ALIGNED_ARRAY(16, uint8_t, comp_pred, 64 * 64);
uint8_t *y = xd->plane[0].pre[0].buf +
@ -559,13 +551,7 @@ int vp9_find_best_sub_pixel_comp(MACROBLOCK *x,
tc = bc;
}
if (xd->allow_high_precision_mv) {
usehp = vp9_use_mv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
if (usehp) {
if (xd->allow_high_precision_mv && vp9_use_mv_hp(&ref_mv->as_mv)) {
hstep >>= 1;
while (--eighthiters) {
CHECK_BETTER(left, tr, tc - hstep);
@ -636,7 +622,6 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
int thismse;
int y_stride;
MACROBLOCKD *xd = &x->e_mbd;
int usehp = xd->allow_high_precision_mv;
uint8_t *y = xd->plane[0].pre[0].buf +
(bestmv->as_mv.row) * xd->plane[0].pre[0].stride +
@ -929,12 +914,7 @@ int vp9_find_best_sub_pixel_step(MACROBLOCK *x,
*sse1 = sse;
}
if (x->e_mbd.allow_high_precision_mv) {
usehp = vp9_use_mv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
if (!usehp)
if (!(xd->allow_high_precision_mv && vp9_use_mv_hp(&ref_mv->as_mv)))
return bestmse;
/* Now do 1/8th pixel */