Moving from int_mv to MV.

Converting vp9_mv_bit_cost, mv_err_cost, and mvsad_err_cost
functions for now.

Change-Id: I60e3cc20daef773c2adf9a18e30bc85b1c2eb211
This commit is contained in:
Dmitry Kovalev 2013-09-20 13:52:43 +04:00
parent 24df77e951
commit e51e7a0e8d
3 changed files with 133 additions and 124 deletions

View File

@ -59,38 +59,39 @@ int vp9_init_search_range(VP9_COMP *cpi, int size) {
return sr; return sr;
} }
int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2], static INLINE int mv_cost(const MV *mv,
int weight) { const int *joint_cost, int *comp_cost[2]) {
MV v; return joint_cost[vp9_get_mv_joint(mv)] +
v.row = mv->as_mv.row - ref->as_mv.row; comp_cost[0][mv->row] + comp_cost[1][mv->col];
v.col = mv->as_mv.col - ref->as_mv.col;
return ROUND_POWER_OF_TWO((mvjcost[vp9_get_mv_joint(&v)] +
mvcost[0][v.row] +
mvcost[1][v.col]) * weight, 7);
} }
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvjcost, int *mvcost[2], int vp9_mv_bit_cost(const MV *mv, const MV *ref,
const int *mvjcost, int *mvcost[2], int weight) {
const MV diff = { mv->row - ref->row,
mv->col - ref->col };
return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
}
static int mv_err_cost(const MV *mv, const MV *ref,
const int *mvjcost, int *mvcost[2],
int error_per_bit) { int error_per_bit) {
if (mvcost) { if (mvcost) {
MV v; const MV diff = { mv->row - ref->row,
v.row = mv->as_mv.row - ref->as_mv.row; mv->col - ref->col };
v.col = mv->as_mv.col - ref->as_mv.col; return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) *
return ROUND_POWER_OF_TWO((mvjcost[vp9_get_mv_joint(&v)] + error_per_bit, 13);
mvcost[0][v.row] +
mvcost[1][v.col]) * error_per_bit, 13);
} }
return 0; return 0;
} }
static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvjsadcost, static int mvsad_err_cost(const MV *mv, const MV *ref,
int *mvsadcost[2], int error_per_bit) { const int *mvjsadcost, int *mvsadcost[2],
int error_per_bit) {
if (mvsadcost) { if (mvsadcost) {
MV v; const MV diff = { mv->row - ref->row,
v.row = mv->as_mv.row - ref->as_mv.row; mv->col - ref->col };
v.col = mv->as_mv.col - ref->as_mv.col; return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjsadcost, mvsadcost) *
return ROUND_POWER_OF_TWO((mvjsadcost[vp9_get_mv_joint(&v)] + error_per_bit, 8);
mvsadcost[0][v.row] +
mvsadcost[1][v.col]) * error_per_bit, 8);
} }
return 0; return 0;
} }
@ -357,7 +358,8 @@ int vp9_find_best_sub_pixel_iterative(MACROBLOCK *x,
// calculate central point error // calculate central point error
besterr = vfp->vf(y, y_stride, z, src_stride, sse1); besterr = vfp->vf(y, y_stride, z, src_stride, sse1);
*distortion = besterr; *distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); besterr += mv_err_cost(&bestmv->as_mv, &ref_mv->as_mv,
mvjcost, mvcost, error_per_bit);
// TODO: Each subsequent iteration checks at least one point in // TODO: Each subsequent iteration checks at least one point in
// common with the last iteration could be 2 ( if diag selected) // common with the last iteration could be 2 ( if diag selected)
@ -454,7 +456,8 @@ int vp9_find_best_sub_pixel_tree(MACROBLOCK *x,
// calculate central point error // calculate central point error
besterr = vfp->vf(y, y_stride, z, src_stride, sse1); besterr = vfp->vf(y, y_stride, z, src_stride, sse1);
*distortion = besterr; *distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); besterr += mv_err_cost(&bestmv->as_mv, &ref_mv->as_mv,
mvjcost, mvcost, error_per_bit);
// 1/2 pel // 1/2 pel
FIRST_LEVEL_CHECKS; FIRST_LEVEL_CHECKS;
@ -553,7 +556,8 @@ int vp9_find_best_sub_pixel_comp_iterative(MACROBLOCK *x,
comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride); comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride);
besterr = vfp->vf(comp_pred, w, z, src_stride, sse1); besterr = vfp->vf(comp_pred, w, z, src_stride, sse1);
*distortion = besterr; *distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); besterr += mv_err_cost(&bestmv->as_mv, &ref_mv->as_mv,
mvjcost, mvcost, error_per_bit);
// Each subsequent iteration checks at least one point in // Each subsequent iteration checks at least one point in
// common with the last iteration could be 2 ( if diag selected) // common with the last iteration could be 2 ( if diag selected)
@ -655,7 +659,8 @@ int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x,
comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride); comp_avg_pred(comp_pred, second_pred, w, h, y, y_stride);
besterr = vfp->vf(comp_pred, w, z, src_stride, sse1); besterr = vfp->vf(comp_pred, w, z, src_stride, sse1);
*distortion = besterr; *distortion = besterr;
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); besterr += mv_err_cost(&bestmv->as_mv, &ref_mv->as_mv,
mvjcost, mvcost, error_per_bit);
// Each subsequent iteration checks at least one point in // Each subsequent iteration checks at least one point in
// common with the last iteration could be 2 ( if diag selected) // common with the last iteration could be 2 ( if diag selected)
@ -730,7 +735,7 @@ int vp9_find_best_sub_pixel_comp_tree(MACROBLOCK *x,
if (thissad < bestsad)\ if (thissad < bestsad)\
{\ {\
if (use_mvcost) \ if (use_mvcost) \
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, \ thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv, \
mvjsadcost, mvsadcost, \ mvjsadcost, mvsadcost, \
sad_per_bit);\ sad_per_bit);\
if (thissad < bestsad)\ if (thissad < bestsad)\
@ -802,10 +807,9 @@ static int vp9_pattern_search(MACROBLOCK *x,
this_offset = base_offset + (br * in_what_stride) + bc; this_offset = base_offset + (br * in_what_stride) + bc;
this_mv.as_mv.row = br; this_mv.as_mv.row = br;
this_mv.as_mv.col = bc; this_mv.as_mv.col = bc;
bestsad = vfp->sdf(what, what_stride, this_offset, bestsad = vfp->sdf(what, what_stride, this_offset, in_what_stride, 0x7fffffff)
in_what_stride, 0x7fffffff) + mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
+ mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, mvsadcost, mvjsadcost, mvsadcost, sad_per_bit);
sad_per_bit);
// Search all possible scales upto the search param around the center point // Search all possible scales upto the search param around the center point
// pick the scale of the point that is best as the starting scale of // pick the scale of the point that is best as the starting scale of
@ -984,11 +988,12 @@ static int vp9_pattern_search(MACROBLOCK *x,
this_mv.as_mv.col = best_mv->as_mv.col * 8; this_mv.as_mv.col = best_mv->as_mv.col * 8;
if (bestsad == INT_MAX) if (bestsad == INT_MAX)
return INT_MAX; return INT_MAX;
return
vfp->vf(what, what_stride, this_offset, in_what_stride, return vfp->vf(what, what_stride, this_offset, in_what_stride,
(unsigned int *)(&bestsad)) + (unsigned int *)&bestsad) +
use_mvcost ? mv_err_cost(&this_mv, center_mv, x->nmvjointcost, x->mvcost, use_mvcost ? mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
x->errorperbit) : 0; x->nmvjointcost, x->mvcost, x->errorperbit)
: 0;
} }
@ -1164,10 +1169,9 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x,
best_address = in_what; best_address = in_what;
// Check the starting position // Check the starting position
bestsad = fn_ptr->sdf(what, what_stride, in_what, bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff)
in_what_stride, 0x7fffffff) + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv,
+ mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, mvjsadcost, mvsadcost, sad_per_bit);
sad_per_bit);
// search_param determines the length of the initial step and hence the number of iterations // search_param determines the length of the initial step and hence the number of iterations
// 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc. // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
@ -1193,7 +1197,7 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset; this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset; this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
@ -1225,7 +1229,7 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset; this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset; this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
@ -1250,8 +1254,9 @@ int vp9_diamond_search_sad_c(MACROBLOCK *x,
return INT_MAX; return INT_MAX;
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, return fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv, center_mv, mvjcost, (unsigned int *)(&thissad)) +
mvcost, x->errorperbit); mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
mvjcost, mvcost, x->errorperbit);
} }
int vp9_diamond_search_sadx4(MACROBLOCK *x, int vp9_diamond_search_sadx4(MACROBLOCK *x,
@ -1305,10 +1310,9 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x,
best_address = in_what; best_address = in_what;
// Check the starting position // Check the starting position
bestsad = fn_ptr->sdf(what, what_stride, bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff)
in_what, in_what_stride, 0x7fffffff) + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv,
+ mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, mvjsadcost, mvsadcost, sad_per_bit);
sad_per_bit);
// search_param determines the length of the initial step and hence the number of iterations // search_param determines the length of the initial step and hence the number of iterations
// 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc. // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
@ -1343,7 +1347,7 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x,
if (sad_array[t] < bestsad) { if (sad_array[t] < bestsad) {
this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row; this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col; this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv, sad_array[t] += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (sad_array[t] < bestsad) { if (sad_array[t] < bestsad) {
@ -1367,7 +1371,7 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset; this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset; this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
@ -1398,7 +1402,7 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset; this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset; this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
@ -1423,8 +1427,9 @@ int vp9_diamond_search_sadx4(MACROBLOCK *x,
return INT_MAX; return INT_MAX;
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, return fn_ptr->vf(what, what_stride, best_address, in_what_stride,
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv, (unsigned int *)(&thissad)) +
center_mv, mvjcost, mvcost, x->errorperbit); mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
mvjcost, mvcost, x->errorperbit);
} }
/* do_refine: If last step (1-away) of n-step search doesn't pick the center /* do_refine: If last step (1-away) of n-step search doesn't pick the center
@ -1535,8 +1540,8 @@ int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
// Baseline value at the centre // Baseline value at the centre
bestsad = fn_ptr->sdf(what, what_stride, bestaddress, bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
in_what_stride, 0x7fffffff) in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv,
sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch // Apply further limits to prevent us looking using vectors that stretch
// beyond the UMV border // beyond the UMV border
@ -1553,8 +1558,8 @@ int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad); thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride, bestsad);
this_mv.as_mv.col = c; this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
@ -1571,10 +1576,10 @@ int vp9_full_search_sad_c(MACROBLOCK *x, int_mv *ref_mv,
this_mv.as_mv.col = best_mv->as_mv.col * 8; this_mv.as_mv.col = best_mv->as_mv.col * 8;
if (bestsad < INT_MAX) if (bestsad < INT_MAX)
return return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad)) +
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit); mvjcost, mvcost, x->errorperbit);
else else
return INT_MAX; return INT_MAX;
} }
@ -1625,8 +1630,8 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
// Baseline value at the centre // Baseline value at the centre
bestsad = fn_ptr->sdf(what, what_stride, bestsad = fn_ptr->sdf(what, what_stride,
bestaddress, in_what_stride, 0x7fffffff) bestaddress, in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv,
sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch // Apply further limits to prevent us looking using vectors that stretch
// beyond the UMV border // beyond the UMV border
@ -1650,8 +1655,8 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.col = c; this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
@ -1671,7 +1676,7 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.col = c; this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
@ -1692,10 +1697,10 @@ int vp9_full_search_sadx3(MACROBLOCK *x, int_mv *ref_mv,
this_mv.as_mv.col = best_mv->as_mv.col * 8; this_mv.as_mv.col = best_mv->as_mv.col * 8;
if (bestsad < INT_MAX) if (bestsad < INT_MAX)
return return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad)) +
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit); mvjcost, mvcost, x->errorperbit);
else else
return INT_MAX; return INT_MAX;
} }
@ -1748,8 +1753,8 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
// Baseline value at the centre // Baseline value at the centre
bestsad = fn_ptr->sdf(what, what_stride, bestsad = fn_ptr->sdf(what, what_stride,
bestaddress, in_what_stride, 0x7fffffff) bestaddress, in_what_stride, 0x7fffffff)
+ mvsad_err_cost(best_mv, &fcenter_mv, mvjsadcost, mvsadcost, + mvsad_err_cost(&best_mv->as_mv, &fcenter_mv.as_mv,
sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
// Apply further limits to prevent us looking using vectors that stretch // Apply further limits to prevent us looking using vectors that stretch
// beyond the UMV border // beyond the UMV border
@ -1773,8 +1778,8 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.col = c; this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
@ -1799,7 +1804,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.col = c; this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
@ -1820,8 +1825,8 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.col = c; this_mv.as_mv.col = c;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, sad_per_bit); mvjsadcost, mvsadcost, sad_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
@ -1840,10 +1845,10 @@ int vp9_full_search_sadx8(MACROBLOCK *x, int_mv *ref_mv,
this_mv.as_mv.col = best_mv->as_mv.col * 8; this_mv.as_mv.col = best_mv->as_mv.col * 8;
if (bestsad < INT_MAX) if (bestsad < INT_MAX)
return return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad)) +
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit); mvjcost, mvcost, x->errorperbit);
else else
return INT_MAX; return INT_MAX;
} }
@ -1874,8 +1879,10 @@ int vp9_refining_search_sad_c(MACROBLOCK *x,
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + bestsad = fn_ptr->sdf(what, what_stride, best_address,
mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); in_what_stride, 0x7fffffff) +
mvsad_err_cost(&ref_mv->as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, error_per_bit);
for (i = 0; i < search_range; i++) { for (i = 0; i < search_range; i++) {
int best_site = -1; int best_site = -1;
@ -1892,8 +1899,8 @@ int vp9_refining_search_sad_c(MACROBLOCK *x,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset; this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset; this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvsadcost, error_per_bit); mvjsadcost, mvsadcost, error_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
@ -1916,10 +1923,10 @@ int vp9_refining_search_sad_c(MACROBLOCK *x,
this_mv.as_mv.col = ref_mv->as_mv.col * 8; this_mv.as_mv.col = ref_mv->as_mv.col * 8;
if (bestsad < INT_MAX) if (bestsad < INT_MAX)
return return fn_ptr->vf(what, what_stride, best_address, in_what_stride,
fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad)) +
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit); mvjcost, mvcost, x->errorperbit);
else else
return INT_MAX; return INT_MAX;
} }
@ -1951,8 +1958,10 @@ int vp9_refining_search_sadx4(MACROBLOCK *x,
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + bestsad = fn_ptr->sdf(what, what_stride, best_address,
mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); in_what_stride, 0x7fffffff) +
mvsad_err_cost(&ref_mv->as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, error_per_bit);
for (i = 0; i < search_range; i++) { for (i = 0; i < search_range; i++) {
int best_site = -1; int best_site = -1;
@ -1975,8 +1984,8 @@ int vp9_refining_search_sadx4(MACROBLOCK *x,
if (sad_array[j] < bestsad) { if (sad_array[j] < bestsad) {
this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row; this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col; this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, sad_array[j] += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvsadcost, error_per_bit); mvjsadcost, mvsadcost, error_per_bit);
if (sad_array[j] < bestsad) { if (sad_array[j] < bestsad) {
bestsad = sad_array[j]; bestsad = sad_array[j];
@ -1997,8 +2006,8 @@ int vp9_refining_search_sadx4(MACROBLOCK *x,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset; this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset; this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvsadcost, error_per_bit); mvjsadcost, mvsadcost, error_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
@ -2022,10 +2031,10 @@ int vp9_refining_search_sadx4(MACROBLOCK *x,
this_mv.as_mv.col = ref_mv->as_mv.col * 8; this_mv.as_mv.col = ref_mv->as_mv.col * 8;
if (bestsad < INT_MAX) if (bestsad < INT_MAX)
return return fn_ptr->vf(what, what_stride, best_address, in_what_stride,
fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad)) +
(unsigned int *)(&thissad)) + mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit); mvjcost, mvcost, x->errorperbit);
else else
return INT_MAX; return INT_MAX;
} }
@ -2065,7 +2074,8 @@ int vp9_refining_search_8p_c(MACROBLOCK *x,
/* Get compound pred by averaging two pred blocks. */ /* Get compound pred by averaging two pred blocks. */
bestsad = fn_ptr->sdaf(what, what_stride, best_address, in_what_stride, bestsad = fn_ptr->sdaf(what, what_stride, best_address, in_what_stride,
second_pred, 0x7fffffff) + second_pred, 0x7fffffff) +
mvsad_err_cost(ref_mv, &fcenter_mv, mvjsadcost, mvsadcost, error_per_bit); mvsad_err_cost(&ref_mv->as_mv, &fcenter_mv.as_mv,
mvjsadcost, mvsadcost, error_per_bit);
for (i = 0; i < search_range; i++) { for (i = 0; i < search_range; i++) {
int best_site = -1; int best_site = -1;
@ -2088,9 +2098,8 @@ int vp9_refining_search_8p_c(MACROBLOCK *x,
if (thissad < bestsad) { if (thissad < bestsad) {
this_mv.as_mv.row = this_row_offset; this_mv.as_mv.row = this_row_offset;
this_mv.as_mv.col = this_col_offset; this_mv.as_mv.col = this_col_offset;
thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvjsadcost, thissad += mvsad_err_cost(&this_mv.as_mv, &fcenter_mv.as_mv,
mvsadcost, error_per_bit); mvjsadcost, mvsadcost, error_per_bit);
if (thissad < bestsad) { if (thissad < bestsad) {
bestsad = thissad; bestsad = thissad;
best_site = j; best_site = j;
@ -2115,10 +2124,10 @@ int vp9_refining_search_8p_c(MACROBLOCK *x,
if (bestsad < INT_MAX) { if (bestsad < INT_MAX) {
// FIXME(rbultje, yunqing): add full-pixel averaging variance functions // FIXME(rbultje, yunqing): add full-pixel averaging variance functions
// so we don't have to use the subpixel with xoff=0,yoff=0 here. // so we don't have to use the subpixel with xoff=0,yoff=0 here.
return fn_ptr->svaf(best_address, in_what_stride, 0, 0, return fn_ptr->svaf(best_address, in_what_stride, 0, 0, what, what_stride,
what, what_stride, (unsigned int *)(&thissad), (unsigned int *)(&thissad), second_pred) +
second_pred) + mv_err_cost(&this_mv.as_mv, &center_mv->as_mv,
mv_err_cost(&this_mv, center_mv, mvjcost, mvcost, x->errorperbit); mvjcost, mvcost, x->errorperbit);
} else { } else {
return INT_MAX; return INT_MAX;
} }

View File

@ -24,8 +24,8 @@
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) #define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1))
void vp9_clamp_mv_min_max(MACROBLOCK *x, MV *mv); void vp9_clamp_mv_min_max(MACROBLOCK *x, MV *mv);
int vp9_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvjcost, int vp9_mv_bit_cost(const MV *mv, const MV *ref,
int *mvcost[2], int weight); const int *mvjcost, int *mvcost[2], int weight);
void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride); void vp9_init_dsmotion_compensation(MACROBLOCK *x, int stride);
void vp9_init3smotion_compensation(MACROBLOCK *x, int stride); void vp9_init3smotion_compensation(MACROBLOCK *x, int stride);

View File

@ -1458,11 +1458,12 @@ static int labels2mode(MACROBLOCK *x, int i,
switch (m = this_mode) { switch (m = this_mode) {
case NEWMV: case NEWMV:
this_mv->as_int = seg_mvs[mbmi->ref_frame[0]].as_int; this_mv->as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, mvjcost, mvcost, thismvcost = vp9_mv_bit_cost(&this_mv->as_mv, &best_ref_mv->as_mv,
102); mvjcost, mvcost, 102);
if (has_second_rf) { if (has_second_rf) {
this_second_mv->as_int = seg_mvs[mbmi->ref_frame[1]].as_int; this_second_mv->as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv, thismvcost += vp9_mv_bit_cost(&this_second_mv->as_mv,
&second_best_ref_mv->as_mv,
mvjcost, mvcost, 102); mvjcost, mvcost, 102);
} }
break; break;
@ -2442,9 +2443,8 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
x->nmvjointcost, x->mvcost, x->nmvjointcost, x->mvcost,
&dis, &sse); &dis, &sse);
} }
*rate_mv = vp9_mv_bit_cost(tmp_mv, &ref_mv, *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv.as_mv,
x->nmvjointcost, x->mvcost, x->nmvjointcost, x->mvcost, 96);
96);
if (cpi->sf.adaptive_motion_search && cpi->common.show_frame) if (cpi->sf.adaptive_motion_search && cpi->common.show_frame)
x->pred_mv[ref].as_int = tmp_mv->as_int; x->pred_mv[ref].as_int = tmp_mv->as_int;
@ -2603,11 +2603,11 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < MAX_MB_PLANE; i++) for (i = 0; i < MAX_MB_PLANE; i++)
xd->plane[i].pre[1] = backup_second_yv12[i]; xd->plane[i].pre[1] = backup_second_yv12[i];
} }
*rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]], *rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
&mbmi->ref_mvs[refs[0]][0], &mbmi->ref_mvs[refs[0]][0].as_mv,
x->nmvjointcost, x->mvcost, 96); x->nmvjointcost, x->mvcost, 96);
*rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]], *rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv,
&mbmi->ref_mvs[refs[1]][0], &mbmi->ref_mvs[refs[1]][0].as_mv,
x->nmvjointcost, x->mvcost, 96); x->nmvjointcost, x->mvcost, 96);
vpx_free(second_pred); vpx_free(second_pred);
@ -2659,11 +2659,11 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
joint_motion_search(cpi, x, bsize, frame_mv, joint_motion_search(cpi, x, bsize, frame_mv,
mi_row, mi_col, single_newmv, &rate_mv); mi_row, mi_col, single_newmv, &rate_mv);
} else { } else {
rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]], rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
&mbmi->ref_mvs[refs[0]][0], &mbmi->ref_mvs[refs[0]][0].as_mv,
x->nmvjointcost, x->mvcost, 96); x->nmvjointcost, x->mvcost, 96);
rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]], rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv,
&mbmi->ref_mvs[refs[1]][0], &mbmi->ref_mvs[refs[1]][0].as_mv,
x->nmvjointcost, x->mvcost, 96); x->nmvjointcost, x->mvcost, 96);
} }
if (frame_mv[refs[0]].as_int == INVALID_MV || if (frame_mv[refs[0]].as_int == INVALID_MV ||