A fix in MV_REF experiment

This fix ensures that the forward prob update is not turned off for
motion vectors.

Change-Id: I0b63c9401155926763c6294df6cca68b32bac340
This commit is contained in:
Deb Mukherjee 2012-11-09 10:52:08 -08:00
parent 5d65614fdd
commit 7de64f35d3
8 changed files with 277 additions and 170 deletions

View File

@ -230,6 +230,10 @@ typedef struct {
TX_SIZE txfm_size;
int_mv mv[2]; // for each reference frame used
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REFS];
int_mv best_mv, best_second_mv;
#if CONFIG_NEW_MVREF
int best_index, best_second_index;
#endif
int mb_mode_context[MAX_REF_FRAMES];

View File

@ -226,6 +226,11 @@ static void adapt_prob(vp9_prob *dest, vp9_prob prep, vp9_prob newp,
}
}
void vp9_counts_process(nmv_context_counts *NMVcount, int usehp) {
counts_to_context(&NMVcount->comps[0], usehp);
counts_to_context(&NMVcount->comps[1], usehp);
}
void vp9_counts_to_nmv_context(
nmv_context_counts *NMVcount,
nmv_context *prob,
@ -240,8 +245,7 @@ void vp9_counts_to_nmv_context(
unsigned int (*branch_ct_class0_hp)[2],
unsigned int (*branch_ct_hp)[2]) {
int i, j, k;
counts_to_context(&NMVcount->comps[0], usehp);
counts_to_context(&NMVcount->comps[1], usehp);
vp9_counts_process(NMVcount, usehp);
vp9_tree_probs_from_distribution(MV_JOINTS,
vp9_mv_joint_encodings,
vp9_mv_joint_tree,

View File

@ -125,5 +125,5 @@ void vp9_counts_to_nmv_context(
unsigned int (*branch_ct_fp)[4 - 1][2],
unsigned int (*branch_ct_class0_hp)[2],
unsigned int (*branch_ct_hp)[2]);
void vp9_counts_process(nmv_context_counts *NMVcount, int usehp);
#endif

View File

@ -255,70 +255,6 @@ static void update_refpred_stats(VP9_COMP *cpi) {
}
}
static void update_mvcount(VP9_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
MV mv;
if (mbmi->mode == SPLITMV) {
int i;
for (i = 0; i < x->partition_info->count; i++) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
if (x->e_mbd.allow_high_precision_mv) {
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 1);
}
} else {
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 0);
}
}
}
}
} else if (mbmi->mode == NEWMV) {
if (x->e_mbd.allow_high_precision_mv) {
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
if (mbmi->second_ref_frame) {
mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
}
} else {
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
if (mbmi->second_ref_frame) {
mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
}
}
}
}
static void write_ymode(vp9_writer *bc, int m, const vp9_prob *p) {
write_token(bc, vp9_ymode_tree, p, vp9_ymode_encodings + m);
}
@ -625,38 +561,6 @@ static void write_nmv(vp9_writer *bc, const MV *mv, const int_mv *ref,
}
#if CONFIG_NEW_MVREF
static int vp9_cost_mv_ref_id(vp9_prob * ref_id_probs, int mv_ref_id) {
int cost;
// Encode the index for the MV reference.
switch (mv_ref_id) {
case 0:
cost = vp9_cost_zero(ref_id_probs[0]);
break;
case 1:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_zero(ref_id_probs[1]);
break;
case 2:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_one(ref_id_probs[1]);
cost += vp9_cost_zero(ref_id_probs[2]);
break;
case 3:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_one(ref_id_probs[1]);
cost += vp9_cost_one(ref_id_probs[2]);
break;
// TRAP.. This should not happen
default:
assert(0);
break;
}
return cost;
}
static void vp9_write_mv_ref_id(vp9_writer *w,
vp9_prob * ref_id_probs,
int mv_ref_id) {
@ -686,56 +590,6 @@ static void vp9_write_mv_ref_id(vp9_writer *w,
break;
}
}
// Estimate the cost of each coding the vector using each reference candidate
static unsigned int pick_best_mv_ref(MACROBLOCK *x,
MV_REFERENCE_FRAME ref_frame,
int_mv target_mv,
int_mv * mv_ref_list,
int_mv * best_ref) {
int i;
int best_index = 0;
int cost, cost2;
int zero_seen = (mv_ref_list[0].as_int) ? FALSE : TRUE;
MACROBLOCKD *xd = &x->e_mbd;
int max_mv = MV_MAX;
cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
vp9_mv_bit_cost(&target_mv, &mv_ref_list[0], x->nmvjointcost,
x->mvcost, 96, xd->allow_high_precision_mv);
// Use 4 for now : for (i = 1; i < MAX_MV_REFS; ++i ) {
for (i = 1; i < 4; ++i) {
// If we see a 0,0 reference vector for a second time we have reached
// the end of the list of valid candidate vectors.
if (!mv_ref_list[i].as_int)
if (zero_seen)
break;
else
zero_seen = TRUE;
// Check for cases where the reference choice would give rise to an
// uncodable/out of range residual for row or col.
if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
(abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
continue;
}
cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
vp9_mv_bit_cost(&target_mv, &mv_ref_list[i], x->nmvjointcost,
x->mvcost, 96, xd->allow_high_precision_mv);
if (cost2 < cost) {
cost = cost2;
best_index = i;
}
}
(*best_ref).as_int = mv_ref_list[best_index].as_int;
return best_index;
}
#endif
// This function writes the current macro block's segnment id to the bitstream
@ -925,6 +779,7 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
for (i = 0; i < 4; i++) {
MB_MODE_INFO *mi;
MV_REFERENCE_FRAME rf;
MV_REFERENCE_FRAME sec_ref_frame;
MB_PREDICTION_MODE mode;
int segment_id, skip_coeff;
@ -944,6 +799,7 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
mi = &m->mbmi;
rf = mi->ref_frame;
sec_ref_frame = mi->second_ref_frame;
mode = mi->mode;
segment_id = mi->segment_id;
@ -1131,8 +987,14 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
unsigned int best_index;
// Choose the best mv reference
/*
best_index = pick_best_mv_ref(x, rf, mi->mv[0],
mi->ref_mvs[rf], &best_mv);
assert(best_index == mi->best_index);
assert(best_mv.as_int == mi->best_mv.as_int);
*/
best_index = mi->best_index;
best_mv.as_int = mi->best_mv.as_int;
// Encode the index of the choice.
vp9_write_mv_ref_id(bc,
@ -1150,12 +1012,18 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
if (mi->second_ref_frame) {
#if CONFIG_NEW_MVREF
unsigned int best_index;
MV_REFERENCE_FRAME sec_ref_frame = mi->second_ref_frame;
sec_ref_frame = mi->second_ref_frame;
/*
best_index =
pick_best_mv_ref(x, sec_ref_frame, mi->mv[1],
mi->ref_mvs[sec_ref_frame],
&best_second_mv);
assert(best_index == mi->best_second_index);
assert(best_second_mv.as_int == mi->best_second_mv.as_int);
*/
best_index = mi->best_second_index;
best_second_mv.as_int = mi->best_second_mv.as_int;
// Encode the index of the choice.
vp9_write_mv_ref_id(bc,
@ -1227,12 +1095,10 @@ static void pack_inter_mode_mvs(VP9_COMP *const cpi, vp9_writer *const bc) {
break;
}
}
// Update the mvcounts used to tune mv probs but only if this is
// the real pack run.
if ( !cpi->dummy_packing ) {
update_mvcount(cpi, x, &best_mv, &best_second_mv);
}
/* This is not required if the counts in cpi are consistent with the
* final packing pass */
// if (!cpi->dummy_packing)
// vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
}
if (((rf == INTRA_FRAME && mode <= I8X8_PRED) ||
@ -1352,10 +1218,6 @@ static void write_mb_modes_kf(const VP9_COMMON *c,
#endif
write_kf_bmode(bc, bm, c->kf_bmode_prob[A][L]);
#if 0 // CONFIG_NEWBINTRAMODES
if (!cpi->dummy_packing)
printf("%d: %d %d\n", i, bm, m->bmi[i].as_mode.context);
#endif
#if CONFIG_COMP_INTRA_PRED
if (uses_second) {
write_kf_bmode(bc, bm2, c->kf_bmode_prob[A][L]);
@ -2242,12 +2104,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
update_mbintra_mode_probs(cpi, &header_bc);
#if CONFIG_NEW_MVREF
// Temp defaults probabilities for ecnoding the MV ref id signal
vpx_memset(xd->mb_mv_ref_id_probs, 192, sizeof(xd->mb_mv_ref_id_probs));
#endif
vp9_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc);
}
vp9_stop_encode(&header_bc);
@ -2273,7 +2130,11 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
decide_kf_ymode_entropy(cpi);
write_kfmodes(cpi, &residual_bc);
} else {
/* This is not required if the counts in cpi are consistent with the
* final packing pass */
// if (!cpi->dummy_packing) vp9_zero(cpi->NMVcount);
pack_inter_mode_mvs(cpi, &residual_bc);
vp9_update_mode_context(&cpi->common);
}

View File

@ -361,7 +361,90 @@ void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
adjust_act_zbin(cpi, x);
}
static void update_state(VP9_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
#if CONFIG_NEW_MVREF
static int vp9_cost_mv_ref_id(vp9_prob * ref_id_probs, int mv_ref_id) {
int cost;
// Encode the index for the MV reference.
switch (mv_ref_id) {
case 0:
cost = vp9_cost_zero(ref_id_probs[0]);
break;
case 1:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_zero(ref_id_probs[1]);
break;
case 2:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_one(ref_id_probs[1]);
cost += vp9_cost_zero(ref_id_probs[2]);
break;
case 3:
cost = vp9_cost_one(ref_id_probs[0]);
cost += vp9_cost_one(ref_id_probs[1]);
cost += vp9_cost_one(ref_id_probs[2]);
break;
// TRAP.. This should not happen
default:
assert(0);
break;
}
return cost;
}
// Estimate the cost of each coding the vector using each reference candidate
static unsigned int pick_best_mv_ref(MACROBLOCK *x,
MV_REFERENCE_FRAME ref_frame,
int_mv target_mv,
int_mv * mv_ref_list,
int_mv * best_ref) {
int i;
int best_index = 0;
int cost, cost2;
int zero_seen = (mv_ref_list[0].as_int) ? FALSE : TRUE;
MACROBLOCKD *xd = &x->e_mbd;
int max_mv = MV_MAX;
cost = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], 0) +
vp9_mv_bit_cost(&target_mv, &mv_ref_list[0], x->nmvjointcost,
x->mvcost, 96, xd->allow_high_precision_mv);
// Use 4 for now : for (i = 1; i < MAX_MV_REFS; ++i ) {
for (i = 1; i < 4; ++i) {
// If we see a 0,0 reference vector for a second time we have reached
// the end of the list of valid candidate vectors.
if (!mv_ref_list[i].as_int)
if (zero_seen)
break;
else
zero_seen = TRUE;
// Check for cases where the reference choice would give rise to an
// uncodable/out of range residual for row or col.
if ((abs(target_mv.as_mv.row - mv_ref_list[i].as_mv.row) > max_mv) ||
(abs(target_mv.as_mv.col - mv_ref_list[i].as_mv.col) > max_mv)) {
continue;
}
cost2 = vp9_cost_mv_ref_id(xd->mb_mv_ref_id_probs[ref_frame], i) +
vp9_mv_bit_cost(&target_mv, &mv_ref_list[i], x->nmvjointcost,
x->mvcost, 96, xd->allow_high_precision_mv);
if (cost2 < cost) {
cost = cost2;
best_index = i;
}
}
best_ref->as_int = mv_ref_list[best_index].as_int;
return best_index;
}
#endif
static void update_state(VP9_COMP *cpi, MACROBLOCK *x,
PICK_MODE_CONTEXT *ctx) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
@ -465,6 +548,36 @@ static void update_state(VP9_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
*/
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
static int testcount = 0;
int_mv best_mv, best_second_mv;
unsigned int best_index;
MV_REFERENCE_FRAME rf = mbmi->ref_frame;
MV_REFERENCE_FRAME sec_ref_frame = mbmi->second_ref_frame;
best_mv.as_int = ctx->best_ref_mv.as_int;
best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
if (mbmi->mode == NEWMV) {
best_mv.as_int = mbmi->ref_mvs[rf][0].as_int;
best_second_mv.as_int = mbmi->ref_mvs[mbmi->second_ref_frame][0].as_int;
#if CONFIG_NEW_MVREF
best_index = pick_best_mv_ref(x, rf, mbmi->mv[0],
mbmi->ref_mvs[rf], &best_mv);
mbmi->best_index = best_index;
if (mbmi->second_ref_frame) {
unsigned int best_index;
best_index =
pick_best_mv_ref(x, sec_ref_frame, mbmi->mv[1],
mbmi->ref_mvs[sec_ref_frame],
&best_second_mv);
mbmi->best_second_index = best_index;
}
#endif
}
mbmi->best_mv.as_int = best_mv.as_int;
mbmi->best_second_mv.as_int = best_second_mv.as_int;
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
}
cpi->prediction_error += ctx->distortion;
cpi->intra_error += ctx->intra_error;

View File

@ -208,6 +208,54 @@ static int update_nmv(
}
}
void print_nmvcounts(nmv_context_counts tnmvcounts) {
int i, j, k;
printf("\nCounts =\n { ");
for (j = 0; j < MV_JOINTS; ++j)
printf("%d, ", tnmvcounts.joints[j]);
printf("},\n");
for (i = 0; i < 2; ++i) {
printf(" {\n");
printf(" %d/%d,\n", tnmvcounts.comps[i].sign[0],
tnmvcounts.comps[i].sign[1]);
printf(" { ");
for (j = 0; j < MV_CLASSES; ++j)
printf("%d, ", tnmvcounts.comps[i].classes[j]);
printf("},\n");
printf(" { ");
for (j = 0; j < CLASS0_SIZE; ++j)
printf("%d, ", tnmvcounts.comps[i].class0[j]);
printf("},\n");
printf(" { ");
for (j = 0; j < MV_OFFSET_BITS; ++j)
printf("%d/%d, ", tnmvcounts.comps[i].bits[j][0],
tnmvcounts.comps[i].bits[j][1]);
printf("},\n");
printf(" {");
for (j = 0; j < CLASS0_SIZE; ++j) {
printf("{");
for (k = 0; k < 4; ++k)
printf("%d, ", tnmvcounts.comps[i].class0_fp[j][k]);
printf("}, ");
}
printf("},\n");
printf(" { ");
for (j = 0; j < 4; ++j)
printf("%d, ", tnmvcounts.comps[i].fp[j]);
printf("},\n");
printf(" %d/%d,\n",
tnmvcounts.comps[i].class0_hp[0],
tnmvcounts.comps[i].class0_hp[1]);
printf(" %d/%d,\n",
tnmvcounts.comps[i].hp[0],
tnmvcounts.comps[i].hp[1]);
printf(" },\n");
}
}
#ifdef NMV_STATS
void init_nmvstats() {
vp9_zero(tnmvcounts);
@ -235,7 +283,7 @@ void print_nmvstats() {
for (j = 0; j < MV_JOINTS; ++j)
printf("%d, ", tnmvcounts.joints[j]);
printf("},\n");
for (i=0; i< 2; ++i) {
for (i = 0; i < 2; ++i) {
printf(" {\n");
printf(" %d/%d,\n", tnmvcounts.comps[i].sign[0],
tnmvcounts.comps[i].sign[1]);
@ -354,7 +402,7 @@ static void add_nmvcount(nmv_context_counts* const dst,
}
#endif
void vp9_write_nmvprobs(VP9_COMP* const cpi, int usehp, vp9_writer* const bc) {
void vp9_write_nmv_probs(VP9_COMP* const cpi, int usehp, vp9_writer* const bc) {
int i, j;
nmv_context prob;
unsigned int branch_ct_joint[MV_JOINTS - 1][2];
@ -545,3 +593,67 @@ void vp9_build_nmv_cost_table(int *mvjoint,
if (mvc_flag_h)
build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp);
}
void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
MV mv;
if (mbmi->mode == SPLITMV) {
int i;
for (i = 0; i < x->partition_info->count; i++) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
if (x->e_mbd.allow_high_precision_mv) {
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 1);
}
} else {
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
if (x->e_mbd.mode_info_context->mbmi.second_ref_frame) {
mv.row = (x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 0);
}
}
}
}
} else if (mbmi->mode == NEWMV) {
if (x->e_mbd.allow_high_precision_mv) {
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
if (mbmi->second_ref_frame) {
mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
}
} else {
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
if (mbmi->second_ref_frame) {
mv.row = (mbmi->mv[1].as_mv.row - second_best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp9_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
}
}
}
}

View File

@ -14,7 +14,7 @@
#include "onyx_int.h"
void vp9_write_nmvprobs(VP9_COMP* const, int usehp, vp9_writer* const);
void vp9_write_nmv_probs(VP9_COMP* const, int usehp, vp9_writer* const);
void vp9_encode_nmv(vp9_writer* const w, const MV* const mv,
const MV* const ref, const nmv_context* const mvctx);
void vp9_encode_nmv_fp(vp9_writer* const w, const MV* const mv,
@ -26,5 +26,8 @@ void vp9_build_nmv_cost_table(int *mvjoint,
int usehp,
int mvc_flag_v,
int mvc_flag_h);
void vp9_update_nmv_count(VP9_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv);
void print_nmvcounts(nmv_context_counts tnmvcounts);
#endif

View File

@ -2959,6 +2959,12 @@ static void encode_frame_to_data_rate
// Set default state for segment based loop filter update flags
xd->mode_ref_lf_delta_update = 0;
#if CONFIG_NEW_MVREF
// Temp defaults probabilities for ecnoding the MV ref id signal
vpx_memset(xd->mb_mv_ref_id_probs, 192,
sizeof(xd->mb_mv_ref_id_probs));
#endif
// Set various flags etc to special state if it is a key frame
if (cm->frame_type == KEY_FRAME) {
int i;
@ -3631,6 +3637,10 @@ static void encode_frame_to_data_rate
vp9_adapt_mode_probs(&cpi->common);
cpi->common.fc.NMVcount = cpi->NMVcount;
/*
printf("2: %d %d %d %d\n", cpi->NMVcount.joints[0], cpi->NMVcount.joints[1],
cpi->NMVcount.joints[2], cpi->NMVcount.joints[3]);
*/
vp9_adapt_nmv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
vp9_update_mode_context(&cpi->common);
}