Merging in high_precision_mv experiment
Merged in the high_precision_mv experiment to make it easier to work on new mv encoding strategies. Also removed coef_update_probs3(). Change-Id: I82d3b0bb642419fe05dba82528bc9ba010e90924
This commit is contained in:
parent
fcbff9ee04
commit
2af5473a90
1
configure
vendored
1
configure
vendored
@ -216,7 +216,6 @@ HAVE_LIST="
|
||||
EXPERIMENT_LIST="
|
||||
csm
|
||||
featureupdates
|
||||
high_precision_mv
|
||||
sixteenth_subpel_uv
|
||||
comp_intra_pred
|
||||
superblocks
|
||||
|
@ -404,9 +404,7 @@ typedef struct MacroBlockD {
|
||||
vp8_subpix_fn_t subpixel_predict_avg8x4;
|
||||
vp8_subpix_fn_t subpixel_predict_avg8x8;
|
||||
vp8_subpix_fn_t subpixel_predict_avg16x16;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
int allow_high_precision_mv;
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
void *current_bc;
|
||||
|
||||
|
@ -100,7 +100,7 @@ extern DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]);
|
||||
|
||||
#define SUBEXP_PARAM 4 /* Subexponential code parameter */
|
||||
#define MODULUS_PARAM 13 /* Modulus parameter */
|
||||
#define COEFUPDATETYPE 1 /* coef update type to use (1/2/3) */
|
||||
#define COEFUPDATETYPE 1 /* coef update type to use (1/2) */
|
||||
|
||||
|
||||
extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include "onyxc_int.h"
|
||||
#include "entropymv.h"
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
const MV_CONTEXT_HP vp8_mv_update_probs_hp[2] = {
|
||||
{{
|
||||
237,
|
||||
@ -47,7 +46,6 @@ const MV_CONTEXT_HP vp8_default_mv_context_hp[2] = {
|
||||
}
|
||||
}
|
||||
};
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
const MV_CONTEXT vp8_mv_update_probs[2] = {
|
||||
{{
|
||||
@ -84,7 +82,6 @@ const MV_CONTEXT vp8_default_mv_context[2] = {
|
||||
}
|
||||
};
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
const vp8_tree_index vp8_small_mvtree_hp [30] = {
|
||||
2, 16,
|
||||
4, 10,
|
||||
@ -103,7 +100,6 @@ const vp8_tree_index vp8_small_mvtree_hp [30] = {
|
||||
-14, -15
|
||||
};
|
||||
struct vp8_token_struct vp8_small_mvencodings_hp [16];
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
const vp8_tree_index vp8_small_mvtree [14] = {
|
||||
2, 8,
|
||||
@ -196,7 +192,6 @@ static void compute_component_probs(
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
static void compute_component_probs_hp(
|
||||
const unsigned int events [MVvals_hp],
|
||||
vp8_prob Pnew [MVPcount_hp],
|
||||
@ -268,13 +263,10 @@ static void compute_component_probs_hp(
|
||||
while (++j < mvlong_width_hp);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
void vp8_entropy_mv_init() {
|
||||
vp8_tokens_from_tree(vp8_small_mvencodings, vp8_small_mvtree);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vp8_tokens_from_tree(vp8_small_mvencodings_hp, vp8_small_mvtree_hp);
|
||||
#endif
|
||||
}
|
||||
|
||||
// #define MV_COUNT_TESTING
|
||||
@ -293,7 +285,6 @@ void vp8_adapt_mv_probs(VP8_COMMON *cm) {
|
||||
printf("},\n");
|
||||
}
|
||||
printf("};\n");
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
printf("static const unsigned int\nMVcount_hp[2][MVvals_hp]={\n");
|
||||
for (i = 0; i < 2; ++i) {
|
||||
printf(" { ");
|
||||
@ -304,7 +295,6 @@ void vp8_adapt_mv_probs(VP8_COMMON *cm) {
|
||||
printf("},\n");
|
||||
}
|
||||
printf("};\n");
|
||||
#endif
|
||||
#endif /* MV_COUNT_TESTING */
|
||||
|
||||
for (i = 0; i < 2; ++i) {
|
||||
@ -357,7 +347,6 @@ void vp8_adapt_mv_probs(VP8_COMMON *cm) {
|
||||
else cm->fc.mvc[i].prob[MVPbits + t] = prob;
|
||||
}
|
||||
}
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
for (i = 0; i < 2; ++i) {
|
||||
int prob;
|
||||
unsigned int is_short_ct[2];
|
||||
@ -408,5 +397,4 @@ void vp8_adapt_mv_probs(VP8_COMMON *cm) {
|
||||
else cm->fc.mvc_hp[i].prob[MVPbits_hp + t] = prob;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -42,7 +42,6 @@ typedef struct mv_context {
|
||||
|
||||
extern const MV_CONTEXT vp8_mv_update_probs[2], vp8_default_mv_context[2];
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
enum {
|
||||
mv_max_hp = 2047, /* max absolute value of a MV component */
|
||||
MVvals_hp = (2 * mv_max_hp) + 1, /* # possible values "" */
|
||||
@ -69,14 +68,10 @@ typedef struct mv_context_hp {
|
||||
|
||||
extern const MV_CONTEXT_HP vp8_mv_update_probs_hp[2], vp8_default_mv_context_hp[2];
|
||||
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
extern const vp8_tree_index vp8_small_mvtree[];
|
||||
extern struct vp8_token_struct vp8_small_mvencodings [8];
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
extern const vp8_tree_index vp8_small_mvtree_hp[];
|
||||
extern struct vp8_token_struct vp8_small_mvencodings_hp [16];
|
||||
#endif
|
||||
|
||||
void vp8_entropy_mv_init();
|
||||
struct VP8Common;
|
||||
|
@ -133,7 +133,6 @@ void vp8_find_near_mvs
|
||||
/* Make sure that the 1/8th bits of the Mvs are zero if high_precision
|
||||
* is not being used, by truncating the last bit towards 0
|
||||
*/
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (!xd->allow_high_precision_mv) {
|
||||
if (best_mv->as_mv.row & 1)
|
||||
best_mv->as_mv.row += (best_mv->as_mv.row > 0 ? -1 : 1);
|
||||
@ -148,7 +147,6 @@ void vp8_find_near_mvs
|
||||
if (nearby->as_mv.col & 1)
|
||||
nearby->as_mv.col += (nearby->as_mv.col > 0 ? -1 : 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
// TODO: move clamp outside findnearmv
|
||||
vp8_clamp_mv2(nearest, xd);
|
||||
|
@ -56,13 +56,9 @@ typedef struct frame_contexts {
|
||||
vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
|
||||
#endif
|
||||
MV_CONTEXT mvc[2];
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
MV_CONTEXT_HP mvc_hp[2];
|
||||
#endif
|
||||
MV_CONTEXT pre_mvc[2];
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
MV_CONTEXT_HP pre_mvc_hp[2];
|
||||
#endif
|
||||
vp8_prob pre_bmode_prob [VP8_BINTRAMODES - 1];
|
||||
vp8_prob pre_ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
|
||||
vp8_prob pre_uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
|
||||
@ -93,9 +89,7 @@ typedef struct frame_contexts {
|
||||
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
|
||||
#endif
|
||||
unsigned int MVcount [2] [MVvals];
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
unsigned int MVcount_hp [2] [MVvals_hp];
|
||||
#endif
|
||||
#if CONFIG_SWITCHABLE_INTERP
|
||||
vp8_prob switchable_interp_prob[VP8_SWITCHABLE_FILTERS+1]
|
||||
[VP8_SWITCHABLE_FILTERS-1];
|
||||
|
@ -224,7 +224,6 @@ static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) {
|
||||
} while (++i < 2);
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
static int read_mvcomponent_hp(vp8_reader *r, const MV_CONTEXT_HP *mvc) {
|
||||
const vp8_prob *const p = (const vp8_prob *) mvc;
|
||||
int x = 0;
|
||||
@ -281,7 +280,6 @@ static void read_mvcontexts_hp(vp8_reader *bc, MV_CONTEXT_HP *mvc) {
|
||||
} while (++p < pstop);
|
||||
} while (++i < 2);
|
||||
}
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
// Read the referncence frame
|
||||
static MV_REFERENCE_FRAME read_ref_frame(VP8D_COMP *pbi,
|
||||
@ -447,10 +445,8 @@ static void mb_mode_mv_init(VP8D_COMP *pbi) {
|
||||
VP8_COMMON *const cm = & pbi->common;
|
||||
vp8_reader *const bc = & pbi->bc;
|
||||
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
|
||||
MACROBLOCKD *const xd = & pbi->mb;
|
||||
#endif
|
||||
|
||||
vpx_memset(cm->mbskip_pred_probs, 0, sizeof(cm->mbskip_pred_probs));
|
||||
if (pbi->common.mb_no_coeff_skip) {
|
||||
@ -495,11 +491,9 @@ static void mb_mode_mv_init(VP8D_COMP *pbi) {
|
||||
cm->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
|
||||
} while (++i < VP8_YMODES - 1);
|
||||
}
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv)
|
||||
read_mvcontexts_hp(bc, mvc_hp);
|
||||
else
|
||||
#endif
|
||||
read_mvcontexts(bc, mvc);
|
||||
}
|
||||
}
|
||||
@ -563,9 +557,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
VP8_COMMON *const cm = & pbi->common;
|
||||
vp8_reader *const bc = & pbi->bc;
|
||||
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
|
||||
#endif
|
||||
const int mis = pbi->common.mode_info_stride;
|
||||
MACROBLOCKD *const xd = & pbi->mb;
|
||||
|
||||
@ -719,14 +711,11 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
|
||||
switch (blockmode) {
|
||||
case NEW4X4:
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
read_mv_hp(bc, &blockmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
|
||||
cm->fc.MVcount_hp[0][mv_max_hp + (blockmv.as_mv.row)]++;
|
||||
cm->fc.MVcount_hp[1][mv_max_hp + (blockmv.as_mv.col)]++;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc);
|
||||
cm->fc.MVcount[0][mv_max + (blockmv.as_mv.row >> 1)]++;
|
||||
cm->fc.MVcount[1][mv_max + (blockmv.as_mv.col >> 1)]++;
|
||||
@ -735,14 +724,11 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
blockmv.as_mv.col += best_mv.as_mv.col;
|
||||
|
||||
if (mbmi->second_ref_frame) {
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
read_mv_hp(bc, &secondmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
|
||||
cm->fc.MVcount_hp[0][mv_max_hp + (secondmv.as_mv.row)]++;
|
||||
cm->fc.MVcount_hp[1][mv_max_hp + (secondmv.as_mv.col)]++;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
read_mv(bc, &secondmv.as_mv, (const MV_CONTEXT *) mvc);
|
||||
cm->fc.MVcount[0][mv_max + (secondmv.as_mv.row >> 1)]++;
|
||||
cm->fc.MVcount[1][mv_max + (secondmv.as_mv.col >> 1)]++;
|
||||
@ -851,14 +837,11 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
break;
|
||||
|
||||
case NEWMV:
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
read_mv_hp(bc, &mv->as_mv, (const MV_CONTEXT_HP *) mvc_hp);
|
||||
cm->fc.MVcount_hp[0][mv_max_hp + (mv->as_mv.row)]++;
|
||||
cm->fc.MVcount_hp[1][mv_max_hp + (mv->as_mv.col)]++;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
read_mv(bc, &mv->as_mv, (const MV_CONTEXT *) mvc);
|
||||
cm->fc.MVcount[0][mv_max + (mv->as_mv.row >> 1)]++;
|
||||
cm->fc.MVcount[1][mv_max + (mv->as_mv.col >> 1)]++;
|
||||
@ -877,15 +860,12 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
mb_to_top_edge,
|
||||
mb_to_bottom_edge);
|
||||
if (mbmi->second_ref_frame) {
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
read_mv_hp(bc, &mbmi->second_mv.as_mv,
|
||||
(const MV_CONTEXT_HP *) mvc_hp);
|
||||
cm->fc.MVcount_hp[0][mv_max_hp + (mbmi->second_mv.as_mv.row)]++;
|
||||
cm->fc.MVcount_hp[1][mv_max_hp + (mbmi->second_mv.as_mv.col)]++;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
read_mv(bc, &mbmi->second_mv.as_mv, (const MV_CONTEXT *) mvc);
|
||||
cm->fc.MVcount[0][mv_max + (mbmi->second_mv.as_mv.row >> 1)]++;
|
||||
cm->fc.MVcount[1][mv_max + (mbmi->second_mv.as_mv.col >> 1)]++;
|
||||
|
@ -752,10 +752,8 @@ static void init_frame(VP8D_COMP *pbi) {
|
||||
if (pc->frame_type == KEY_FRAME) {
|
||||
/* Various keyframe initializations */
|
||||
vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vpx_memcpy(pc->fc.mvc_hp, vp8_default_mv_context_hp,
|
||||
sizeof(vp8_default_mv_context_hp));
|
||||
#endif
|
||||
|
||||
vp8_init_mbmode_probs(pc);
|
||||
|
||||
@ -815,49 +813,6 @@ static void init_frame(VP8D_COMP *pbi) {
|
||||
|
||||
}
|
||||
|
||||
static void read_coef_probs3(VP8D_COMP *pbi) {
|
||||
const vp8_prob grpupd = 216;
|
||||
int i, j, k, l;
|
||||
vp8_reader *const bc = & pbi->bc;
|
||||
VP8_COMMON *const pc = & pbi->common;
|
||||
for (i = 0; i < BLOCK_TYPES; i++)
|
||||
for (l = 0; l < ENTROPY_NODES; l++) {
|
||||
if (vp8_read(bc, grpupd)) {
|
||||
// printf("Decoding %d\n", l);
|
||||
for (j = !i; j < COEF_BANDS; j++)
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) ||
|
||||
(i > 0 && j == 0)))
|
||||
continue;
|
||||
{
|
||||
vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l;
|
||||
int u = vp8_read(bc, COEF_UPDATE_PROB);
|
||||
if (u) *p = read_prob_diff_update(bc, *p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pbi->common.txfm_mode == ALLOW_8X8) {
|
||||
for (i = 0; i < BLOCK_TYPES_8X8; i++)
|
||||
for (l = 0; l < ENTROPY_NODES; l++) {
|
||||
if (vp8_read(bc, grpupd)) {
|
||||
for (j = !i; j < COEF_BANDS; j++)
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
|
||||
if (k >= 3 && ((i == 0 && j == 1) ||
|
||||
(i > 0 && j == 0)))
|
||||
continue;
|
||||
{
|
||||
vp8_prob *const p = pc->fc.coef_probs_8x8 [i][j][k] + l;
|
||||
int u = vp8_read(bc, COEF_UPDATE_PROB_8X8);
|
||||
if (u) *p = read_prob_diff_update(bc, *p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void read_coef_probs2(VP8D_COMP *pbi) {
|
||||
const vp8_prob grpupd = 192;
|
||||
int i, j, k, l;
|
||||
@ -1288,10 +1243,8 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
|
||||
pc->ref_frame_sign_bias[GOLDEN_FRAME] = vp8_read_bit(bc);
|
||||
pc->ref_frame_sign_bias[ALTREF_FRAME] = vp8_read_bit(bc);
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
/* Is high precision mv allowed */
|
||||
xd->allow_high_precision_mv = (unsigned char)vp8_read_bit(bc);
|
||||
#endif
|
||||
// Read the type of subpel filter to use
|
||||
#if CONFIG_SWITCHABLE_INTERP
|
||||
if (vp8_read_bit(bc)) {
|
||||
@ -1336,9 +1289,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
|
||||
vp8_copy(pbi->common.fc.pre_sub_mv_ref_prob, pbi->common.fc.sub_mv_ref_prob);
|
||||
vp8_copy(pbi->common.fc.pre_mbsplit_prob, pbi->common.fc.mbsplit_prob);
|
||||
vp8_copy(pbi->common.fc.pre_mvc, pbi->common.fc.mvc);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vp8_copy(pbi->common.fc.pre_mvc_hp, pbi->common.fc.mvc_hp);
|
||||
#endif
|
||||
vp8_zero(pbi->common.fc.coef_counts);
|
||||
vp8_zero(pbi->common.fc.coef_counts_8x8);
|
||||
#if CONFIG_TX16X16
|
||||
@ -1351,15 +1302,11 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
|
||||
vp8_zero(pbi->common.fc.sub_mv_ref_counts);
|
||||
vp8_zero(pbi->common.fc.mbsplit_counts);
|
||||
vp8_zero(pbi->common.fc.MVcount);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vp8_zero(pbi->common.fc.MVcount_hp);
|
||||
#endif
|
||||
vp8_zero(pbi->common.fc.mv_ref_ct);
|
||||
vp8_zero(pbi->common.fc.mv_ref_ct_a);
|
||||
#if COEFUPDATETYPE == 2
|
||||
read_coef_probs2(pbi);
|
||||
#elif COEFUPDATETYPE == 3
|
||||
read_coef_probs3(pbi);
|
||||
#else
|
||||
read_coef_probs(pbi);
|
||||
#endif
|
||||
|
@ -556,7 +556,6 @@ static void write_mv
|
||||
vp8_encode_motion_vector(w, &e, mvc);
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
static void write_mv_hp
|
||||
(
|
||||
vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT_HP *mvc
|
||||
@ -567,7 +566,6 @@ static void write_mv_hp
|
||||
|
||||
vp8_encode_motion_vector_hp(w, &e, mvc);
|
||||
}
|
||||
#endif
|
||||
|
||||
// This function writes the current macro block's segnment id to the bitstream
|
||||
// It should only be called if a segment map update is indicated.
|
||||
@ -721,9 +719,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
|
||||
VP8_COMMON *const pc = & cpi->common;
|
||||
vp8_writer *const w = & cpi->bc;
|
||||
const MV_CONTEXT *mvc = pc->fc.mvc;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
const MV_CONTEXT_HP *mvc_hp = pc->fc.mvc_hp;
|
||||
#endif
|
||||
MACROBLOCKD *xd = &cpi->mb.e_mbd;
|
||||
MODE_INFO *m;
|
||||
MODE_INFO *prev_m;
|
||||
@ -799,11 +795,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
|
||||
|
||||
update_mbintra_mode_probs(cpi);
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv)
|
||||
vp8_write_mvprobs_hp(cpi);
|
||||
else
|
||||
#endif
|
||||
vp8_write_mvprobs(cpi);
|
||||
|
||||
mb_row = 0;
|
||||
@ -1007,23 +1001,17 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
|
||||
active_section = 5;
|
||||
#endif
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
write_mv_hp(w, &mi->mv.as_mv, &best_mv, mvc_hp);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
|
||||
}
|
||||
|
||||
if (mi->second_ref_frame) {
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
write_mv_hp(w, &mi->second_mv.as_mv,
|
||||
&best_second_mv, mvc_hp);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
write_mv(w, &mi->second_mv.as_mv,
|
||||
&best_second_mv, mvc);
|
||||
}
|
||||
@ -1068,25 +1056,19 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
|
||||
#ifdef ENTROPY_STATS
|
||||
active_section = 11;
|
||||
#endif
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
write_mv_hp(w, &blockmv.as_mv, &best_mv,
|
||||
(const MV_CONTEXT_HP *) mvc_hp);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
write_mv(w, &blockmv.as_mv, &best_mv,
|
||||
(const MV_CONTEXT *) mvc);
|
||||
}
|
||||
|
||||
if (mi->second_ref_frame) {
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
write_mv_hp(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
|
||||
&best_second_mv, (const MV_CONTEXT_HP *) mvc_hp);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
write_mv(w, &cpi->mb.partition_info->bmi[j].second_mv.as_mv,
|
||||
&best_second_mv, (const MV_CONTEXT *) mvc);
|
||||
}
|
||||
@ -1365,177 +1347,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static void update_coef_probs3(VP8_COMP *cpi) {
|
||||
const vp8_prob grpupd = 216;
|
||||
int i, j, k, t;
|
||||
vp8_writer *const w = & cpi->bc;
|
||||
int update[2];
|
||||
int savings;
|
||||
int bestupdndx[2 * ENTROPY_NODES];
|
||||
|
||||
vp8_clear_system_state(); // __asm emms;
|
||||
// Build the cofficient contexts based on counts collected in encode loop
|
||||
build_coeff_contexts(cpi);
|
||||
|
||||
i = 0;
|
||||
for (i = 0; i < BLOCK_TYPES; ++i) {
|
||||
for (t = 0; t < ENTROPY_NODES; ++t) {
|
||||
/* dry run to see if there is any udpate at all needed */
|
||||
savings = 0;
|
||||
update[0] = update[1] = 0;
|
||||
for (j = !i; j < COEF_BANDS; ++j) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
|
||||
vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
|
||||
const vp8_prob upd = COEF_UPDATE_PROB;
|
||||
int s;
|
||||
int u = 0;
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
|
||||
#if defined(SEARCH_NEWP)
|
||||
s = prob_diff_update_savings_search(
|
||||
cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
|
||||
if (s > 0 && newp != *Pold) u = 1;
|
||||
if (u)
|
||||
savings += s - (int)(vp8_cost_zero(upd));
|
||||
else
|
||||
savings -= (int)(vp8_cost_zero(upd));
|
||||
#else
|
||||
s = prob_update_savings(
|
||||
cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
|
||||
if (s > 0) u = 1;
|
||||
if (u)
|
||||
savings += s;
|
||||
#endif
|
||||
// printf(" %d %d %d: %d\n", i, j, k, u);
|
||||
update[u]++;
|
||||
}
|
||||
}
|
||||
if (update[1] == 0 || savings < 0) {
|
||||
vp8_write(w, 0, grpupd);
|
||||
continue;
|
||||
}
|
||||
vp8_write(w, 1, grpupd);
|
||||
for (j = !i; j < COEF_BANDS; ++j) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
|
||||
vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
|
||||
const vp8_prob upd = COEF_UPDATE_PROB;
|
||||
int s;
|
||||
int u = 0;
|
||||
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
#if defined(SEARCH_NEWP)
|
||||
s = prob_diff_update_savings_search(
|
||||
cpi->frame_branch_ct [i][j][k][t], *Pold, &newp, upd);
|
||||
if (s > 0 && newp != *Pold) u = 1;
|
||||
#else
|
||||
s = prob_update_savings(
|
||||
cpi->frame_branch_ct [i][j][k][t], *Pold, newp, upd);
|
||||
if (s > 0) u = 1;
|
||||
#endif
|
||||
// printf(" %d %d %d: %d (%d)\n", i, j, k, u, upd);
|
||||
vp8_write(w, u, upd);
|
||||
#ifdef ENTROPY_STATS
|
||||
if (!cpi->dummy_packing)
|
||||
++ tree_update_hist [i][j][k][t] [u];
|
||||
#endif
|
||||
if (u) {
|
||||
/* send/use new probability */
|
||||
write_prob_diff_update(w, newp, *Pold);
|
||||
*Pold = newp;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (cpi->common.txfm_mode != ALLOW_8X8) return;
|
||||
|
||||
for (i = 0; i < BLOCK_TYPES_8X8; ++i) {
|
||||
for (t = 0; t < ENTROPY_NODES; ++t) {
|
||||
/* dry run to see if there is any udpate at all needed */
|
||||
savings = 0;
|
||||
update[0] = update[1] = 0;
|
||||
for (j = !i; j < COEF_BANDS; ++j) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
|
||||
vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
|
||||
const vp8_prob upd = COEF_UPDATE_PROB_8X8;
|
||||
int s;
|
||||
int u = 0;
|
||||
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
#if defined(SEARCH_NEWP)
|
||||
s = prob_diff_update_savings_search(
|
||||
cpi->frame_branch_ct_8x8 [i][j][k][t],
|
||||
*Pold, &newp, upd);
|
||||
if (s > 0 && newp != *Pold)
|
||||
u = 1;
|
||||
if (u)
|
||||
savings += s - (int)(vp8_cost_zero(upd));
|
||||
else
|
||||
savings -= (int)(vp8_cost_zero(upd));
|
||||
#else
|
||||
s = prob_update_savings(
|
||||
cpi->frame_branch_ct_8x8 [i][j][k][t],
|
||||
*Pold, newp, upd);
|
||||
if (s > 0)
|
||||
u = 1;
|
||||
if (u)
|
||||
savings += s;
|
||||
#endif
|
||||
update[u]++;
|
||||
}
|
||||
}
|
||||
if (update[1] == 0 || savings < 0) {
|
||||
vp8_write(w, 0, grpupd);
|
||||
continue;
|
||||
}
|
||||
vp8_write(w, 1, grpupd);
|
||||
for (j = !i; j < COEF_BANDS; ++j) {
|
||||
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
|
||||
vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
|
||||
vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
|
||||
const vp8_prob upd = COEF_UPDATE_PROB_8X8;
|
||||
int s;
|
||||
int u = 0;
|
||||
|
||||
if (k >= 3 && ((i == 0 && j == 1) || (i > 0 && j == 0)))
|
||||
continue;
|
||||
#if defined(SEARCH_NEWP)
|
||||
s = prob_diff_update_savings_search(
|
||||
cpi->frame_branch_ct_8x8 [i][j][k][t],
|
||||
*Pold, &newp, upd);
|
||||
if (s > 0 && newp != *Pold)
|
||||
u = 1;
|
||||
#else
|
||||
s = prob_update_savings(
|
||||
cpi->frame_branch_ct_8x8 [i][j][k][t],
|
||||
*Pold, newp, upd);
|
||||
if (s > 0)
|
||||
u = 1;
|
||||
#endif
|
||||
vp8_write(w, u, upd);
|
||||
#ifdef ENTROPY_STATS
|
||||
if (!cpi->dummy_packing)
|
||||
++ tree_update_hist_8x8 [i][j][k][t] [u];
|
||||
#endif
|
||||
if (u) {
|
||||
/* send/use new probability */
|
||||
write_prob_diff_update(w, newp, *Pold);
|
||||
*Pold = newp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void update_coef_probs2(VP8_COMP *cpi) {
|
||||
const vp8_prob grpupd = 192;
|
||||
int i, j, k, t;
|
||||
@ -2334,10 +2145,8 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
|
||||
vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
|
||||
vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
// Signal whether to allow high MV precision
|
||||
vp8_write_bit(bc, (xd->allow_high_precision_mv) ? 1 : 0);
|
||||
#endif
|
||||
#if CONFIG_SWITCHABLE_INTERP
|
||||
if (pc->mcomp_filter_type == SWITCHABLE) {
|
||||
/* Check to see if only one of the filters is actually used */
|
||||
@ -2393,17 +2202,13 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
|
||||
vp8_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
|
||||
vp8_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
|
||||
vp8_copy(cpi->common.fc.pre_mvc, cpi->common.fc.mvc);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vp8_copy(cpi->common.fc.pre_mvc_hp, cpi->common.fc.mvc_hp);
|
||||
#endif
|
||||
vp8_zero(cpi->sub_mv_ref_count);
|
||||
vp8_zero(cpi->mbsplit_count);
|
||||
vp8_zero(cpi->common.fc.mv_ref_ct)
|
||||
vp8_zero(cpi->common.fc.mv_ref_ct_a)
|
||||
#if COEFUPDATETYPE == 2
|
||||
update_coef_probs2(cpi);
|
||||
#elif COEFUPDATETYPE == 3
|
||||
update_coef_probs3(cpi);
|
||||
#else
|
||||
update_coef_probs(cpi);
|
||||
#endif
|
||||
|
@ -119,12 +119,11 @@ typedef struct {
|
||||
int *mvcost[2];
|
||||
int mvsadcosts[2][MVfpvals + 1];
|
||||
int *mvsadcost[2];
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
int mvcosts_hp[2][MVvals_hp + 1];
|
||||
int *mvcost_hp[2];
|
||||
int mvsadcosts_hp[2][MVfpvals_hp + 1];
|
||||
int *mvsadcost_hp[2];
|
||||
#endif
|
||||
|
||||
int mbmode_cost[2][MB_MODE_COUNT];
|
||||
int intra_uv_mode_cost[2][MB_MODE_COUNT];
|
||||
int bmode_costs[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
|
||||
@ -156,9 +155,7 @@ typedef struct {
|
||||
|
||||
unsigned char *active_ptr;
|
||||
MV_CONTEXT *mvc;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
MV_CONTEXT_HP *mvc_hp;
|
||||
#endif
|
||||
|
||||
unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS]
|
||||
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
|
||||
|
@ -1054,9 +1054,7 @@ void init_encode_frame_mb_context(VP8_COMP *cpi) {
|
||||
// vp8_zero(cpi->uv_mode_count)
|
||||
|
||||
x->mvc = cm->fc.mvc;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
x->mvc_hp = cm->fc.mvc_hp;
|
||||
#endif
|
||||
|
||||
vpx_memset(cm->above_context, 0,
|
||||
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
|
||||
@ -1127,9 +1125,7 @@ static void encode_frame_internal(VP8_COMP *cpi) {
|
||||
xd->prev_mode_info_context = cm->prev_mi;
|
||||
|
||||
vp8_zero(cpi->MVcount);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vp8_zero(cpi->MVcount_hp);
|
||||
#endif
|
||||
vp8_zero(cpi->coef_counts);
|
||||
vp8_zero(cpi->coef_counts_8x8);
|
||||
#if CONFIG_TX16X16
|
||||
|
@ -20,11 +20,6 @@
|
||||
extern unsigned int active_section;
|
||||
#endif
|
||||
|
||||
// #define DEBUG_ENC_MV
|
||||
#ifdef DEBUG_ENC_MV
|
||||
int enc_mvcount = 0;
|
||||
#endif
|
||||
|
||||
static void encode_mvcomponent(
|
||||
vp8_writer *const w,
|
||||
const int v,
|
||||
@ -61,47 +56,10 @@ static void encode_mvcomponent(
|
||||
|
||||
vp8_write(w, v < 0, p [MVPsign]);
|
||||
}
|
||||
#if 0
|
||||
static int max_mv_r = 0;
|
||||
static int max_mv_c = 0;
|
||||
#endif
|
||||
|
||||
void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc) {
|
||||
|
||||
#if 0
|
||||
{
|
||||
if (abs(mv->row >> 1) > max_mv_r) {
|
||||
FILE *f = fopen("maxmv.stt", "a");
|
||||
max_mv_r = abs(mv->row >> 1);
|
||||
fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
|
||||
|
||||
if ((abs(mv->row) / 2) != max_mv_r)
|
||||
fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
if (abs(mv->col >> 1) > max_mv_c) {
|
||||
FILE *f = fopen("maxmv.stt", "a");
|
||||
fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
|
||||
max_mv_c = abs(mv->col >> 1);
|
||||
fclose(f);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
encode_mvcomponent(w, mv->row >> 1, &mvc[0]);
|
||||
encode_mvcomponent(w, mv->col >> 1, &mvc[1]);
|
||||
#ifdef DEBUG_ENC_MV
|
||||
{
|
||||
int i;
|
||||
printf("%d (np): %d %d\n", enc_mvcount++,
|
||||
(mv->row >> 1) << 1, (mv->col >> 1) << 1);
|
||||
// for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[0])->prob[i]);
|
||||
// printf("\n");
|
||||
// for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[1])->prob[i]);
|
||||
// printf("\n");
|
||||
fflush(stdout);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -282,43 +240,6 @@ static void write_component_probs(
|
||||
} while (++j <= mv_max);
|
||||
}
|
||||
|
||||
/*
|
||||
{
|
||||
int j = -mv_max;
|
||||
do
|
||||
{
|
||||
|
||||
const int c = events [mv_max + j];
|
||||
int a = j;
|
||||
|
||||
if( j < 0)
|
||||
{
|
||||
sign_ct [1] += c;
|
||||
a = -j;
|
||||
}
|
||||
else if( j)
|
||||
sign_ct [0] += c;
|
||||
|
||||
if( a < mvnum_short)
|
||||
{
|
||||
is_short_ct [0] += c; // Short vector
|
||||
short_ct [a] += c; // Magnitude distribution
|
||||
}
|
||||
else
|
||||
{
|
||||
int k = mvlong_width - 1;
|
||||
is_short_ct [1] += c; // Long vector
|
||||
|
||||
// bit 3 not always encoded.
|
||||
|
||||
do
|
||||
bit_ct [k] [(a >> k) & 1] += c;
|
||||
while( --k >= 0);
|
||||
}
|
||||
} while( ++j <= mv_max);
|
||||
}
|
||||
*/
|
||||
|
||||
calc_prob(Pnew + mvpis_short, is_short_ct);
|
||||
|
||||
calc_prob(Pnew + MVPsign, sign_ct);
|
||||
@ -401,7 +322,6 @@ void vp8_write_mvprobs(VP8_COMP *cpi) {
|
||||
#endif
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
|
||||
static void encode_mvcomponent_hp(
|
||||
vp8_writer *const w,
|
||||
@ -441,47 +361,12 @@ static void encode_mvcomponent_hp(
|
||||
|
||||
vp8_write(w, v < 0, p [MVPsign_hp]);
|
||||
}
|
||||
#if 0
|
||||
static int max_mv_r = 0;
|
||||
static int max_mv_c = 0;
|
||||
#endif
|
||||
|
||||
void vp8_encode_motion_vector_hp(vp8_writer *w, const MV *mv,
|
||||
const MV_CONTEXT_HP *mvc) {
|
||||
|
||||
#if 0
|
||||
{
|
||||
if (abs(mv->row >> 1) > max_mv_r) {
|
||||
FILE *f = fopen("maxmv.stt", "a");
|
||||
max_mv_r = abs(mv->row >> 1);
|
||||
fprintf(f, "New Mv Row Max %6d\n", (mv->row >> 1));
|
||||
|
||||
if ((abs(mv->row) / 2) != max_mv_r)
|
||||
fprintf(f, "MV Row conversion error %6d\n", abs(mv->row) / 2);
|
||||
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
if (abs(mv->col >> 1) > max_mv_c) {
|
||||
FILE *f = fopen("maxmv.stt", "a");
|
||||
fprintf(f, "New Mv Col Max %6d\n", (mv->col >> 1));
|
||||
max_mv_c = abs(mv->col >> 1);
|
||||
fclose(f);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
encode_mvcomponent_hp(w, mv->row, &mvc[0]);
|
||||
encode_mvcomponent_hp(w, mv->col, &mvc[1]);
|
||||
#ifdef DEBUG_ENC_MV
|
||||
{
|
||||
int i;
|
||||
printf("%d (hp): %d %d\n", enc_mvcount++, mv->row, mv->col);
|
||||
// for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[0])->prob[i]);
|
||||
// printf("\n");
|
||||
// for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[1])->prob[i]);
|
||||
// printf("\n");
|
||||
fflush(stdout);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -716,4 +601,3 @@ void vp8_write_mvprobs_hp(VP8_COMP *cpi) {
|
||||
active_section = 5;
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
@ -17,10 +17,8 @@
|
||||
void vp8_write_mvprobs(VP8_COMP *);
|
||||
void vp8_encode_motion_vector(vp8_writer *, const MV *, const MV_CONTEXT *);
|
||||
void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2]);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
void vp8_write_mvprobs_hp(VP8_COMP *);
|
||||
void vp8_encode_motion_vector_hp(vp8_writer *, const MV *, const MV_CONTEXT_HP *);
|
||||
void vp8_build_component_cost_table_hp(int *mvcost[2], const MV_CONTEXT_HP *mvc, int mvc_flag[2]);
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
#endif
|
||||
|
@ -38,11 +38,7 @@
|
||||
#define IF_RTCD(x) NULL
|
||||
#endif
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
|
||||
#else
|
||||
#define XMVCOST (x->mvcost)
|
||||
#endif
|
||||
|
||||
extern void vp8_build_block_offsets(MACROBLOCK *x);
|
||||
extern void vp8_setup_block_ptrs(MACROBLOCK *x);
|
||||
@ -499,10 +495,8 @@ void vp8_first_pass(VP8_COMP *cpi) {
|
||||
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
|
||||
vpx_memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
|
||||
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vpx_memcpy(cm->fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
|
||||
vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cm->fc.mvc_hp, flag);
|
||||
#endif
|
||||
}
|
||||
|
||||
// for each macroblock row in image
|
||||
|
@ -34,11 +34,10 @@ static unsigned int do_16x16_motion_iteration
|
||||
static int dummy_cost[2 * mv_max + 1];
|
||||
int *mvcost[2] = { &dummy_cost[mv_max + 1], &dummy_cost[mv_max + 1] };
|
||||
int *mvsadcost[2] = { &dummy_cost[mv_max + 1], &dummy_cost[mv_max + 1] };
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
static int dummy_cost_hp[2 * mv_max_hp + 1];
|
||||
int *mvcost_hp[2] = { &dummy_cost_hp[mv_max_hp + 1], &dummy_cost_hp[mv_max_hp + 1] };
|
||||
int *mvsadcost_hp[2] = { &dummy_cost_hp[mv_max_hp + 1], &dummy_cost_hp[mv_max_hp + 1] };
|
||||
#endif
|
||||
|
||||
int col_min = (ref_mv->as_mv.col >> 3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.col & 7) ? 1 : 0);
|
||||
int row_min = (ref_mv->as_mv.row >> 3) - MAX_FULL_PEL_VAL + ((ref_mv->as_mv.row & 7) ? 1 : 0);
|
||||
int col_max = (ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL;
|
||||
@ -72,32 +71,27 @@ static unsigned int do_16x16_motion_iteration
|
||||
ref_full.as_mv.row = ref_mv->as_mv.row >> 3;
|
||||
|
||||
/*cpi->sf.search_method == HEX*/
|
||||
best_err = vp8_hex_search(x, b, d,
|
||||
&ref_full, dst_mv,
|
||||
step_param,
|
||||
x->errorperbit,
|
||||
&v_fn_ptr,
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
xd->allow_high_precision_mv ? mvsadcost_hp : mvsadcost, xd->allow_high_precision_mv ? mvcost_hp : mvcost,
|
||||
#else
|
||||
mvsadcost, mvcost,
|
||||
#endif
|
||||
ref_mv);
|
||||
best_err = vp8_hex_search(
|
||||
x, b, d,
|
||||
&ref_full, dst_mv,
|
||||
step_param,
|
||||
x->errorperbit,
|
||||
&v_fn_ptr,
|
||||
xd->allow_high_precision_mv ? mvsadcost_hp : mvsadcost,
|
||||
xd->allow_high_precision_mv ? mvcost_hp : mvcost,
|
||||
ref_mv);
|
||||
|
||||
// Try sub-pixel MC
|
||||
// if (bestsme > error_thresh && bestsme < INT_MAX)
|
||||
{
|
||||
int distortion;
|
||||
unsigned int sse;
|
||||
best_err = cpi->find_fractional_mv_step(x, b, d,
|
||||
dst_mv, ref_mv,
|
||||
x->errorperbit, &v_fn_ptr,
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
xd->allow_high_precision_mv ? mvcost_hp : mvcost,
|
||||
#else
|
||||
mvcost,
|
||||
#endif
|
||||
& distortion, &sse);
|
||||
best_err = cpi->find_fractional_mv_step(
|
||||
x, b, d,
|
||||
dst_mv, ref_mv,
|
||||
x->errorperbit, &v_fn_ptr,
|
||||
xd->allow_high_precision_mv ? mvcost_hp : mvcost,
|
||||
& distortion, &sse);
|
||||
}
|
||||
|
||||
#if CONFIG_PRED_FILTER
|
||||
|
@ -22,26 +22,21 @@ static int mv_ref_ct [31] [4] [2];
|
||||
static int mv_mode_cts [4] [2];
|
||||
#endif
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp) {
|
||||
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
|
||||
// over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
|
||||
// cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
|
||||
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
|
||||
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp == 0)]) * Weight) >> 7;
|
||||
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
|
||||
int Weight, int ishp) {
|
||||
// MV costing is based on the distribution of vectors in the previous frame
|
||||
// and as such will tend to over state the cost of vectors. In addition
|
||||
// coding a new vector can have a knock on effect on the cost of subsequent
|
||||
// vectors and the quality of prediction from NEAR and NEAREST for subsequent
|
||||
// blocks. The "Weight" parameter allows, to a limited extent, for some
|
||||
// account to be taken of these factors.
|
||||
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] +
|
||||
mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> (ishp == 0)])
|
||||
* Weight) >> 7;
|
||||
}
|
||||
#else
|
||||
int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) {
|
||||
// MV costing is based on the distribution of vectors in the previous frame and as such will tend to
|
||||
// over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
|
||||
// cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
|
||||
// The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
|
||||
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit, int ishp) {
|
||||
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
|
||||
int error_per_bit, int ishp) {
|
||||
// Ignore costing if mvcost is NULL
|
||||
if (mvcost)
|
||||
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> (ishp == 0)] +
|
||||
@ -49,19 +44,10 @@ static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bi
|
||||
* error_per_bit + 128) >> 8;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit) {
|
||||
// Ignore costing if mvcost is NULL
|
||||
if (mvcost)
|
||||
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
|
||||
mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
|
||||
* error_per_bit + 128) >> 8;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit) {
|
||||
static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2],
|
||||
int error_per_bit) {
|
||||
// Calculate sad error cost on full pixel basis.
|
||||
// Ignore costing if mvcost is NULL
|
||||
if (mvsadcost)
|
||||
@ -209,7 +195,6 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
|
||||
#define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
|
||||
#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
#define PREHP(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset))) // pointer to predictor base of a motionvector
|
||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
||||
#define SPHP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
|
||||
@ -219,9 +204,6 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
|
||||
#define DISTHP(r,c) vfp->svf( PREHP(r,c), y_stride, SPHP(c),SPHP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
|
||||
#define ERRHP(r,c) (MVC(r,c)+DISTHP(r,c)) // returns distortion + motion vector cost
|
||||
#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = ((xd->allow_high_precision_mv)?DISTHP(r,c):DIST(r,c)); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
|
||||
#else
|
||||
#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
#define MIN(x,y) (((x)<(y))?(x):(y))
|
||||
#define MAX(x,y) (((x)>(y))?(x):(y))
|
||||
@ -243,9 +225,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
unsigned int whichdir;
|
||||
unsigned int halfiters = 4;
|
||||
unsigned int quarteriters = 4;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
unsigned int eighthiters = 4;
|
||||
#endif
|
||||
int thismse;
|
||||
int maxc, minc, maxr, minr;
|
||||
int y_stride;
|
||||
@ -276,7 +256,6 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
#endif
|
||||
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
rr = ref_mv->as_mv.row;
|
||||
rc = ref_mv->as_mv.col;
|
||||
@ -287,9 +266,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
|
||||
minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
|
||||
maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
rr = ref_mv->as_mv.row >> 1;
|
||||
rc = ref_mv->as_mv.col >> 1;
|
||||
br = bestmv->as_mv.row << 2;
|
||||
@ -314,13 +291,11 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
// calculate central point error
|
||||
besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
|
||||
*distortion = besterr;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
// TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
|
||||
// TODO: Each subsequent iteration checks at least one point in
|
||||
// common with the last iteration could be 2 ( if diag selected)
|
||||
while (--halfiters) {
|
||||
// 1/2 pel
|
||||
CHECK_BETTER(left, tr, tc - hstep);
|
||||
@ -353,8 +328,8 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
tc = bc;
|
||||
}
|
||||
|
||||
// TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
|
||||
// 1/4 pel
|
||||
// TODO: Each subsequent iteration checks at least one point in common with
|
||||
// the last iteration could be 2 ( if diag selected) 1/4 pel
|
||||
hstep >>= 1;
|
||||
while (--quarteriters) {
|
||||
CHECK_BETTER(left, tr, tc - hstep);
|
||||
@ -387,7 +362,6 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
tc = bc;
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (x->e_mbd.allow_high_precision_mv) {
|
||||
hstep >>= 1;
|
||||
while (--eighthiters) {
|
||||
@ -421,14 +395,10 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
tc = bc;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (x->e_mbd.allow_high_precision_mv) {
|
||||
bestmv->as_mv.row = br;
|
||||
bestmv->as_mv.col = bc;
|
||||
} else
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
{
|
||||
} else {
|
||||
bestmv->as_mv.row = br << 1;
|
||||
bestmv->as_mv.col = bc << 1;
|
||||
}
|
||||
@ -449,12 +419,10 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
#undef MIN
|
||||
#undef MAX
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
#undef PREHP
|
||||
#undef DPHP
|
||||
#undef DISTHP
|
||||
#undef ERRHP
|
||||
#endif
|
||||
|
||||
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
||||
#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
|
||||
@ -470,10 +438,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
int bestmse = INT_MAX;
|
||||
int_mv startmv;
|
||||
int_mv this_mv;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
int_mv orig_mv;
|
||||
int yrow_movedback = 0, ycol_movedback = 0;
|
||||
#endif
|
||||
unsigned char *z = (*(b->base_src) + b->src);
|
||||
int left, right, up, down, diag;
|
||||
unsigned int sse;
|
||||
@ -499,28 +465,20 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
bestmv->as_mv.row <<= 3;
|
||||
bestmv->as_mv.col <<= 3;
|
||||
startmv = *bestmv;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
orig_mv = *bestmv;
|
||||
#endif
|
||||
|
||||
// calculate central point error
|
||||
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
|
||||
*distortion = bestmse;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
// go left then right and check error
|
||||
this_mv.as_mv.row = startmv.as_mv.row;
|
||||
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
|
||||
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (left < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -531,11 +489,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
|
||||
this_mv.as_mv.col += 8;
|
||||
thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (right < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -548,11 +503,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
this_mv.as_mv.col = startmv.as_mv.col;
|
||||
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
|
||||
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (up < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -563,11 +515,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
|
||||
this_mv.as_mv.row += 8;
|
||||
thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (down < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -607,11 +556,8 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
break;
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (diag < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -626,16 +572,12 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
// time to check quarter pels.
|
||||
if (bestmv->as_mv.row < startmv.as_mv.row) {
|
||||
y -= y_stride;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
yrow_movedback = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (bestmv->as_mv.col < startmv.as_mv.col) {
|
||||
y--;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
ycol_movedback = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
startmv = *bestmv;
|
||||
@ -647,17 +589,17 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
|
||||
if (startmv.as_mv.col & 7) {
|
||||
this_mv.as_mv.col = startmv.as_mv.col - 2;
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y, y_stride,
|
||||
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
|
||||
z, b->src_stride, &sse);
|
||||
} else {
|
||||
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
|
||||
thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z,
|
||||
b->src_stride, &sse);
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (left < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -667,12 +609,11 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
}
|
||||
|
||||
this_mv.as_mv.col += 4;
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
thismse = vfp->svf(y, y_stride,
|
||||
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
|
||||
z, b->src_stride, &sse);
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (right < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -686,17 +627,17 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
|
||||
if (startmv.as_mv.row & 7) {
|
||||
this_mv.as_mv.row = startmv.as_mv.row - 2;
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y, y_stride,
|
||||
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
|
||||
z, b->src_stride, &sse);
|
||||
} else {
|
||||
this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
|
||||
thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y - y_stride, y_stride, SP(this_mv.as_mv.col), SP(6),
|
||||
z, b->src_stride, &sse);
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (up < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -706,12 +647,10 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
}
|
||||
|
||||
this_mv.as_mv.row += 4;
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
|
||||
z, b->src_stride, &sse);
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (down < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -771,25 +710,26 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
|
||||
if (startmv.as_mv.col & 7) {
|
||||
this_mv.as_mv.col -= 2;
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
|
||||
z, b->src_stride, &sse);
|
||||
} else {
|
||||
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
|
||||
thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y - 1, y_stride, SP(6), SP(this_mv.as_mv.row), z,
|
||||
b->src_stride, &sse);
|
||||
}
|
||||
|
||||
break;
|
||||
case 3:
|
||||
this_mv.as_mv.col += 2;
|
||||
this_mv.as_mv.row += 2;
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y, y_stride,
|
||||
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
|
||||
z, b->src_stride, &sse);
|
||||
break;
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (diag < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -798,7 +738,6 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
*sse1 = sse;
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (!x->e_mbd.allow_high_precision_mv)
|
||||
return bestmse;
|
||||
|
||||
@ -820,13 +759,17 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
|
||||
if (startmv.as_mv.col & 7) {
|
||||
this_mv.as_mv.col = startmv.as_mv.col - 1;
|
||||
thismse = vfp->svf(y, y_stride, SP(this_mv.as_mv.col), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y, y_stride,
|
||||
SP(this_mv.as_mv.col), SP(this_mv.as_mv.row),
|
||||
z, b->src_stride, &sse);
|
||||
} else {
|
||||
this_mv.as_mv.col = (startmv.as_mv.col - 8) | 7;
|
||||
thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row), z, b->src_stride, &sse);
|
||||
thismse = vfp->svf(y - 1, y_stride, SP(7), SP(this_mv.as_mv.row),
|
||||
z, b->src_stride, &sse);
|
||||
}
|
||||
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (left < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -877,7 +820,6 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
*sse1 = sse;
|
||||
}
|
||||
|
||||
|
||||
// now check 1 more diagonal
|
||||
whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
|
||||
|
||||
@ -951,8 +893,6 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
*sse1 = sse;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HIGH_PRECISION_MV */
|
||||
|
||||
return bestmse;
|
||||
}
|
||||
|
||||
@ -976,7 +916,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
MACROBLOCKD *xd = &x->e_mbd;
|
||||
|
||||
#if ARCH_X86 || ARCH_X86_64
|
||||
unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
|
||||
unsigned char *y0 = *(d->base_pre) + d->pre +
|
||||
(bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
|
||||
unsigned char *y;
|
||||
|
||||
y_stride = 32;
|
||||
@ -984,7 +925,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
|
||||
y = xd->y_buf + y_stride + 1;
|
||||
#else
|
||||
unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
|
||||
unsigned char *y = *(d->base_pre) + d->pre +
|
||||
(bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
|
||||
y_stride = d->pre_stride;
|
||||
#endif
|
||||
|
||||
@ -996,21 +938,15 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
// calculate central point error
|
||||
bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
|
||||
*distortion = bestmse;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
// go left then right and check error
|
||||
this_mv.as_mv.row = startmv.as_mv.row;
|
||||
this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
|
||||
thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (left < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -1021,11 +957,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
|
||||
this_mv.as_mv.col += 8;
|
||||
thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (right < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -1038,11 +971,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
this_mv.as_mv.col = startmv.as_mv.col;
|
||||
this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
|
||||
thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (up < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -1053,11 +983,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
|
||||
this_mv.as_mv.row += 8;
|
||||
thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (down < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -1094,11 +1021,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
break;
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
|
||||
#endif
|
||||
diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit,
|
||||
xd->allow_high_precision_mv);
|
||||
|
||||
if (diag < bestmse) {
|
||||
*bestmv = this_mv;
|
||||
@ -1347,12 +1271,10 @@ int vp8_diamond_search_sad
|
||||
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
|
||||
int_mv fcenter_mv;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
mvsadcost[0] = x->mvsadcost_hp[0];
|
||||
mvsadcost[1] = x->mvsadcost_hp[1];
|
||||
}
|
||||
#endif
|
||||
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
|
||||
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
|
||||
|
||||
@ -1423,12 +1345,11 @@ int vp8_diamond_search_sad
|
||||
if (bestsad == INT_MAX)
|
||||
return INT_MAX;
|
||||
|
||||
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
|
||||
#endif
|
||||
return
|
||||
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
|
||||
(unsigned int *)(&thissad)) +
|
||||
mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
|
||||
xd->allow_high_precision_mv);
|
||||
}
|
||||
|
||||
int vp8_diamond_search_sadx4
|
||||
@ -1473,12 +1394,10 @@ int vp8_diamond_search_sadx4
|
||||
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
|
||||
int_mv fcenter_mv;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
mvsadcost[0] = x->mvsadcost_hp[0];
|
||||
mvsadcost[1] = x->mvsadcost_hp[1];
|
||||
}
|
||||
#endif
|
||||
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
|
||||
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
|
||||
|
||||
@ -1582,12 +1501,11 @@ int vp8_diamond_search_sadx4
|
||||
if (bestsad == INT_MAX)
|
||||
return INT_MAX;
|
||||
|
||||
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
|
||||
#endif
|
||||
return
|
||||
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
|
||||
(unsigned int *)(&thissad)) +
|
||||
mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
|
||||
xd->allow_high_precision_mv);
|
||||
}
|
||||
|
||||
int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
@ -1620,12 +1538,10 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
|
||||
int_mv fcenter_mv;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
mvsadcost[0] = x->mvsadcost_hp[0];
|
||||
mvsadcost[1] = x->mvsadcost_hp[1];
|
||||
}
|
||||
#endif
|
||||
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
|
||||
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
|
||||
|
||||
@ -1680,12 +1596,11 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
this_mv.as_mv.col = best_mv->as_mv.col << 3;
|
||||
|
||||
if (bestsad < INT_MAX)
|
||||
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
|
||||
#endif
|
||||
return
|
||||
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
|
||||
(unsigned int *)(&thissad)) +
|
||||
mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
|
||||
xd->allow_high_precision_mv);
|
||||
else
|
||||
return INT_MAX;
|
||||
}
|
||||
@ -1722,12 +1637,10 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
|
||||
int_mv fcenter_mv;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
mvsadcost[0] = x->mvsadcost_hp[0];
|
||||
mvsadcost[1] = x->mvsadcost_hp[1];
|
||||
}
|
||||
#endif
|
||||
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
|
||||
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
|
||||
|
||||
@ -1813,12 +1726,11 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
this_mv.as_mv.col = best_mv->as_mv.col << 3;
|
||||
|
||||
if (bestsad < INT_MAX)
|
||||
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
|
||||
#endif
|
||||
return
|
||||
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
|
||||
(unsigned int *)(&thissad)) +
|
||||
mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
|
||||
xd->allow_high_precision_mv);
|
||||
else
|
||||
return INT_MAX;
|
||||
}
|
||||
@ -1856,12 +1768,10 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
|
||||
int_mv fcenter_mv;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
mvsadcost[0] = x->mvsadcost_hp[0];
|
||||
mvsadcost[1] = x->mvsadcost_hp[1];
|
||||
}
|
||||
#endif
|
||||
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
|
||||
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
|
||||
|
||||
@ -1972,12 +1882,11 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
this_mv.as_mv.col = best_mv->as_mv.col << 3;
|
||||
|
||||
if (bestsad < INT_MAX)
|
||||
return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
|
||||
#endif
|
||||
return
|
||||
fn_ptr->vf(what, what_stride, bestaddress, in_what_stride,
|
||||
(unsigned int *)(&thissad)) +
|
||||
mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
|
||||
xd->allow_high_precision_mv);
|
||||
else
|
||||
return INT_MAX;
|
||||
}
|
||||
@ -2004,12 +1913,10 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
|
||||
int_mv fcenter_mv;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
mvsadcost[0] = x->mvsadcost_hp[0];
|
||||
mvsadcost[1] = x->mvsadcost_hp[1];
|
||||
}
|
||||
#endif
|
||||
|
||||
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
|
||||
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
|
||||
@ -2054,12 +1961,11 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
|
||||
this_mv.as_mv.col = ref_mv->as_mv.col << 3;
|
||||
|
||||
if (bestsad < INT_MAX)
|
||||
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
|
||||
#endif
|
||||
return
|
||||
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
|
||||
(unsigned int *)(&thissad)) +
|
||||
mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
|
||||
xd->allow_high_precision_mv);
|
||||
else
|
||||
return INT_MAX;
|
||||
}
|
||||
@ -2086,12 +1992,10 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
|
||||
int_mv fcenter_mv;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (xd->allow_high_precision_mv) {
|
||||
mvsadcost[0] = x->mvsadcost_hp[0];
|
||||
mvsadcost[1] = x->mvsadcost_hp[1];
|
||||
}
|
||||
#endif
|
||||
|
||||
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
|
||||
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
|
||||
@ -2166,12 +2070,11 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
|
||||
this_mv.as_mv.col = ref_mv->as_mv.col << 3;
|
||||
|
||||
if (bestsad < INT_MAX)
|
||||
return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit, xd->allow_high_precision_mv);
|
||||
#else
|
||||
+ mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
|
||||
#endif
|
||||
return
|
||||
fn_ptr->vf(what, what_stride, best_address, in_what_stride,
|
||||
(unsigned int *)(&thissad)) +
|
||||
mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit,
|
||||
xd->allow_high_precision_mv);
|
||||
else
|
||||
return INT_MAX;
|
||||
}
|
||||
|
@ -25,11 +25,8 @@ extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]);
|
||||
#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1) // Max full pel mv specified in 1 pel units
|
||||
#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) // Maximum size of the first step in full pel units
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight, int ishp);
|
||||
#else
|
||||
extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight);
|
||||
#endif
|
||||
extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
|
||||
int Weight, int ishp);
|
||||
extern void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride);
|
||||
extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride);
|
||||
|
||||
|
@ -87,14 +87,12 @@ extern const int vp8_gf_interval_table[101];
|
||||
before trying each new filter */
|
||||
#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
#define ALTREF_HIGH_PRECISION_MV 1 /* whether to use high precision mv
|
||||
for altref computation */
|
||||
#define HIGH_PRECISION_MV_QTHRESH 200 /* Q threshold for use of high precision
|
||||
mv. Choose a very high value for
|
||||
now so that HIGH_PRECISION is always
|
||||
chosen */
|
||||
#endif
|
||||
|
||||
#if CONFIG_INTERNAL_STATS
|
||||
#include "math.h"
|
||||
@ -1523,9 +1521,7 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
|
||||
cm->refresh_entropy_probs = 1;
|
||||
|
||||
setup_features(cpi);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
cpi->mb.e_mbd.allow_high_precision_mv = 0; // Default mv precision adaptation
|
||||
#endif
|
||||
|
||||
{
|
||||
int i;
|
||||
@ -1679,7 +1675,6 @@ static void cal_mvsadcosts(int *mvsadcost[2]) {
|
||||
} while (++i <= mvfp_max);
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
static void cal_mvsadcosts_hp(int *mvsadcost[2]) {
|
||||
int i = 1;
|
||||
|
||||
@ -1694,7 +1689,6 @@ static void cal_mvsadcosts_hp(int *mvsadcost[2]) {
|
||||
mvsadcost [1][-i] = (int) z;
|
||||
} while (++i <= mvfp_max_hp);
|
||||
}
|
||||
#endif
|
||||
|
||||
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
|
||||
int i;
|
||||
@ -1876,14 +1870,12 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
|
||||
|
||||
cal_mvsadcosts(cpi->mb.mvsadcost);
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
cpi->mb.mvcost_hp[0] = &cpi->mb.mvcosts_hp[0][mv_max_hp + 1];
|
||||
cpi->mb.mvcost_hp[1] = &cpi->mb.mvcosts_hp[1][mv_max_hp + 1];
|
||||
cpi->mb.mvsadcost_hp[0] = &cpi->mb.mvsadcosts_hp[0][mvfp_max_hp + 1];
|
||||
cpi->mb.mvsadcost_hp[1] = &cpi->mb.mvsadcosts_hp[1][mvfp_max_hp + 1];
|
||||
|
||||
cal_mvsadcosts_hp(cpi->mb.mvsadcost_hp);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
|
||||
cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
|
||||
@ -3153,10 +3145,8 @@ static void encode_frame_to_data_rate
|
||||
(Q < SHARP_FILTER_QTHRESH ? EIGHTTAP_SHARP : EIGHTTAP);
|
||||
#endif
|
||||
}
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
/* TODO: Decide this more intelligently */
|
||||
xd->allow_high_precision_mv = (Q < HIGH_PRECISION_MV_QTHRESH);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if CONFIG_POSTPROC
|
||||
@ -3652,9 +3642,7 @@ static void encode_frame_to_data_rate
|
||||
vp8_adapt_mode_probs(&cpi->common);
|
||||
|
||||
vp8_copy(cpi->common.fc.MVcount, cpi->MVcount);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vp8_copy(cpi->common.fc.MVcount_hp, cpi->MVcount_hp);
|
||||
#endif
|
||||
vp8_adapt_mv_probs(&cpi->common);
|
||||
vp8_update_mode_context(&cpi->common);
|
||||
}
|
||||
@ -4048,9 +4036,7 @@ int vp8_get_compressed_data(VP8_PTR ptr, unsigned int *frame_flags, unsigned lon
|
||||
|
||||
cpi->source = NULL;
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
cpi->mb.e_mbd.allow_high_precision_mv = ALTREF_HIGH_PRECISION_MV;
|
||||
#endif
|
||||
// Should we code an alternate reference frame
|
||||
if (cpi->oxcf.play_alternate &&
|
||||
cpi->source_alt_ref_pending) {
|
||||
|
@ -62,10 +62,8 @@
|
||||
typedef struct {
|
||||
MV_CONTEXT mvc[2];
|
||||
int mvcosts[2][MVvals + 1];
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
MV_CONTEXT_HP mvc_hp[2];
|
||||
int mvcosts_hp[2][MVvals_hp + 1];
|
||||
#endif
|
||||
|
||||
#ifdef MODE_STATS
|
||||
// Stats
|
||||
@ -539,9 +537,7 @@ typedef struct VP8_COMP {
|
||||
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
|
||||
|
||||
unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
unsigned int MVcount_hp [2] [MVvals_hp]; /* (row,col) MV cts this frame */
|
||||
#endif
|
||||
|
||||
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
|
||||
// DECLARE_ALIGNED(16, int, coef_counts_backup [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]); //not used any more
|
||||
|
@ -134,10 +134,8 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
|
||||
|
||||
vp8_copy(cc->mvc, cm->fc.mvc);
|
||||
vp8_copy(cc->mvcosts, cpi->mb.mvcosts);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vp8_copy(cc->mvc_hp, cm->fc.mvc_hp);
|
||||
vp8_copy(cc->mvcosts_hp, cpi->mb.mvcosts_hp);
|
||||
#endif
|
||||
|
||||
vp8_copy(cc->mv_ref_ct, cm->fc.mv_ref_ct);
|
||||
vp8_copy(cc->mode_context, cm->fc.mode_context);
|
||||
@ -192,10 +190,8 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
|
||||
|
||||
vp8_copy(cm->fc.mvc, cc->mvc);
|
||||
vp8_copy(cpi->mb.mvcosts, cc->mvcosts);
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vp8_copy(cm->fc.mvc_hp, cc->mvc_hp);
|
||||
vp8_copy(cpi->mb.mvcosts_hp, cc->mvcosts_hp);
|
||||
#endif
|
||||
|
||||
vp8_copy(cm->fc.mv_ref_ct, cc->mv_ref_ct);
|
||||
vp8_copy(cm->fc.mode_context, cc->mode_context);
|
||||
@ -253,14 +249,11 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
|
||||
int flag[2] = {1, 1};
|
||||
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
|
||||
}
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
vpx_memcpy(cpi->common.fc.mvc_hp, vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
|
||||
{
|
||||
int flag[2] = {1, 1};
|
||||
vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
cpi->common.txfm_mode = ALLOW_8X8;
|
||||
|
@ -54,11 +54,7 @@ extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
|
||||
extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
|
||||
#endif
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
|
||||
#else
|
||||
#define XMVCOST (x->mvcost)
|
||||
#endif
|
||||
|
||||
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
|
||||
|
||||
@ -1711,19 +1707,13 @@ static int labels2mode(
|
||||
this_second_mv->as_int = seg_mvs[xd->mode_info_context->mbmi.second_ref_frame - 1].as_int;
|
||||
}
|
||||
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost,
|
||||
102, xd->allow_high_precision_mv);
|
||||
if (xd->mode_info_context->mbmi.second_ref_frame) {
|
||||
thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost,
|
||||
102, xd->allow_high_precision_mv);
|
||||
thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv,
|
||||
mvcost, 102,
|
||||
xd->allow_high_precision_mv);
|
||||
}
|
||||
#else
|
||||
thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
|
||||
if (xd->mode_info_context->mbmi.second_ref_frame) {
|
||||
thismvcost += vp8_mv_bit_cost(this_second_mv, second_best_ref_mv, mvcost, 102);
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
case LEFT4X4:
|
||||
this_mv->as_int = col ? d[-1].bmi.as_mv.first.as_int : left_block_mv(mic, i);
|
||||
@ -2508,7 +2498,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
|
||||
for (i = 0; i < x->partition_info->count; i++) {
|
||||
if (x->partition_info->bmi[i].mode == NEW4X4) {
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (x->e_mbd.allow_high_precision_mv) {
|
||||
cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.row
|
||||
- best_ref_mv->as_mv.row)]++;
|
||||
@ -2521,7 +2510,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
- second_best_ref_mv->as_mv.col)]++;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row
|
||||
- best_ref_mv->as_mv.row) >> 1)]++;
|
||||
@ -2537,7 +2525,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
}
|
||||
}
|
||||
} else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV) {
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
if (x->e_mbd.allow_high_precision_mv) {
|
||||
cpi->MVcount_hp[0][mv_max_hp + (x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
|
||||
- best_ref_mv->as_mv.row)]++;
|
||||
@ -2550,7 +2537,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
|
||||
- second_best_ref_mv->as_mv.col)]++;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
cpi->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row
|
||||
- best_ref_mv->as_mv.row) >> 1)]++;
|
||||
@ -3324,14 +3310,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
|
||||
mode_mv[NEWMV].as_int = d->bmi.as_mv.first.as_int;
|
||||
|
||||
// Add the new motion vector cost to our rolling cost variable
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
|
||||
XMVCOST, 96,
|
||||
x->e_mbd.allow_high_precision_mv);
|
||||
#else
|
||||
rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv,
|
||||
XMVCOST, 96);
|
||||
#endif
|
||||
}
|
||||
|
||||
case NEARESTMV:
|
||||
@ -3483,7 +3464,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
|
||||
continue;
|
||||
x->e_mbd.mode_info_context->mbmi.mv.as_int = mc_search_result[ref1].as_int;
|
||||
x->e_mbd.mode_info_context->mbmi.second_mv.as_int = mc_search_result[ref2].as_int;
|
||||
#if CONFIG_HIGH_PRECISION_MV
|
||||
rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
|
||||
&frame_best_ref_mv[ref1],
|
||||
XMVCOST, 96,
|
||||
@ -3492,14 +3472,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
|
||||
&frame_best_ref_mv[ref2],
|
||||
XMVCOST, 96,
|
||||
x->e_mbd.allow_high_precision_mv);
|
||||
#else
|
||||
rate2 += vp8_mv_bit_cost(&mc_search_result[ref1],
|
||||
&frame_best_ref_mv[ref1],
|
||||
XMVCOST, 96);
|
||||
rate2 += vp8_mv_bit_cost(&mc_search_result[ref2],
|
||||
&frame_best_ref_mv[ref2],
|
||||
XMVCOST, 96);
|
||||
#endif
|
||||
break;
|
||||
case ZEROMV:
|
||||
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user