Merge of the newmventropy experiment

Removes the old mv encoding code.

Change-Id: I1026d48cc5ac15ccb776f98463e929254c3dc7da
This commit is contained in:
Deb Mukherjee 2012-10-22 12:19:00 -07:00
parent 12c1b180f8
commit 78808ad9fb
20 changed files with 32 additions and 1674 deletions

1
configure vendored
View File

@ -224,7 +224,6 @@ EXPERIMENT_LIST="
switchable_interp
newbestrefmv
new_mvref
newmventropy
tx_select
"
CONFIG_LIST="

View File

@ -137,8 +137,8 @@ typedef enum {
typedef enum {
DCT_DCT = 0, // DCT in both horizontal and vertical
ADST_DCT = 1, // ADST in horizontal, DCT in vertical
DCT_ADST = 2, // DCT in horizontal, ADST in vertical
ADST_DCT = 1, // ADST in vertical, DCT in horizontal
DCT_ADST = 2, // DCT in vertical, ADST in horizontal
ADST_ADST = 3 // ADST in both directions
} TX_TYPE;

View File

@ -14,8 +14,6 @@
//#define MV_COUNT_TESTING
#if CONFIG_NEWMVENTROPY
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 160
@ -450,413 +448,13 @@ void vp8_adapt_nmv_probs(VP8_COMMON *cm, int usehp) {
}
}
#else /* CONFIG_NEWMVENTROPY */
#define MV_COUNT_SAT 16
#define MV_MAX_UPDATE_FACTOR 128
const MV_CONTEXT_HP vp8_mv_update_probs_hp[2] = {
{{
237,
246,
253, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 250, 250, 252, 254, 254, 254
}
},
{{
231,
243,
245, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 251, 251, 254, 254, 254, 254
}
}
};
const MV_CONTEXT_HP vp8_default_mv_context_hp[2] = {
{{
/* row */
162, /* is short */
128, /* sign */
220, 204, 180, 192, 192, 119, 192, 192, 180, 140, 192, 192, 224, 224, 224, /* short tree */
128, 129, 132, 75, 145, 178, 206, 239, 254, 254, 254 /* long bits */
}
},
{{
/* same for column */
164, /* is short */
128,
220, 204, 180, 192, 192, 119, 192, 192, 180, 140, 192, 192, 224, 224, 224, /* short tree */
128, 130, 130, 74, 148, 180, 203, 236, 254, 254, 254 /* long bits */
}
}
};
const MV_CONTEXT vp8_mv_update_probs[2] = {
{{
237,
246,
253, 253, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 250, 250, 252, 254, 254
}
},
{{
231,
243,
245, 253, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 251, 251, 254, 254, 254
}
}
};
const MV_CONTEXT vp8_default_mv_context[2] = {
{{
/* row */
162, /* is short */
128, /* sign */
225, 146, 172, 147, 214, 39, 156, /* short tree */
128, 129, 132, 75, 145, 178, 206, 239, 254, 254 /* long bits */
}
},
{{
/* same for column */
164, /* is short */
128,
204, 170, 119, 235, 140, 230, 228,
128, 130, 130, 74, 148, 180, 203, 236, 254, 254 /* long bits */
}
}
};
const vp8_tree_index vp8_small_mvtree_hp [30] = {
2, 16,
4, 10,
6, 8,
-0, -1,
-2, -3,
12, 14,
-4, -5,
-6, -7,
18, 24,
20, 22,
-8, -9,
-10, -11,
26, 28,
-12, -13,
-14, -15
};
struct vp8_token_struct vp8_small_mvencodings_hp [16];
const vp8_tree_index vp8_small_mvtree [14] = {
2, 8,
4, 6,
-0, -1,
-2, -3,
10, 12,
-4, -5,
-6, -7
};
struct vp8_token_struct vp8_small_mvencodings [8];
__inline static void calc_prob(vp8_prob *p, const unsigned int ct[2], int pbits) {
const unsigned int tot = ct[0] + ct[1];
if (tot) {
const vp8_prob x = ((ct[0] * 255) / tot) & -(1 << (8 - pbits));
*p = x ? x : 1;
} else {
*p = 128;
}
}
static void compute_component_probs(
const unsigned int events [MVvals],
vp8_prob Pnew [MVPcount],
unsigned int is_short_ct[2],
unsigned int sign_ct[2],
unsigned int bit_ct [mvlong_width] [2],
unsigned int short_ct [mvnum_short],
unsigned int short_bct [mvnum_short - 1] [2]
) {
is_short_ct[0] = is_short_ct[1] = 0;
sign_ct[0] = sign_ct[1] = 0;
vpx_memset(bit_ct, 0, sizeof(unsigned int)*mvlong_width * 2);
vpx_memset(short_ct, 0, sizeof(unsigned int)*mvnum_short);
vpx_memset(short_bct, 0, sizeof(unsigned int) * (mvnum_short - 1) * 2);
{
const int c = events [mv_max];
is_short_ct [0] += c; // Short vector
short_ct [0] += c; // Magnitude distribution
}
{
int j = 1;
do {
const int c1 = events [mv_max + j]; // positive
const int c2 = events [mv_max - j]; // negative
const int c = c1 + c2;
int a = j;
sign_ct [0] += c1;
sign_ct [1] += c2;
if (a < mvnum_short) {
is_short_ct [0] += c; // Short vector
short_ct [a] += c; // Magnitude distribution
} else {
int k = mvlong_width - 1;
is_short_ct [1] += c; // Long vector
do
bit_ct [k] [(a >> k) & 1] += c;
while (--k >= 0);
}
} while (++j <= mv_max);
}
calc_prob(Pnew + mvpis_short, is_short_ct, 8);
calc_prob(Pnew + MVPsign, sign_ct, 8);
{
vp8_prob p [mvnum_short - 1]; /* actually only need branch ct */
int j = 0;
vp8_tree_probs_from_distribution(
mvnum_short, vp8_small_mvencodings, vp8_small_mvtree,
p, short_bct, short_ct,
256, 1
);
do
calc_prob(Pnew + MVPshort + j, short_bct[j], 8);
while (++j < mvnum_short - 1);
}
{
int j = 0;
do
calc_prob(Pnew + MVPbits + j, bit_ct[j], 8);
while (++j < mvlong_width);
}
}
static void compute_component_probs_hp(
const unsigned int events [MVvals_hp],
vp8_prob Pnew [MVPcount_hp],
unsigned int is_short_ct[2],
unsigned int sign_ct[2],
unsigned int bit_ct [mvlong_width_hp] [2],
unsigned int short_ct [mvnum_short_hp],
unsigned int short_bct [mvnum_short_hp - 1] [2]
) {
is_short_ct[0] = is_short_ct[1] = 0;
sign_ct[0] = sign_ct[1] = 0;
vpx_memset(bit_ct, 0, sizeof(unsigned int)*mvlong_width_hp * 2);
vpx_memset(short_ct, 0, sizeof(unsigned int)*mvnum_short_hp);
vpx_memset(short_bct, 0, sizeof(unsigned int) * (mvnum_short_hp - 1) * 2);
{
const int c = events [mv_max_hp];
is_short_ct [0] += c; // Short vector
short_ct [0] += c; // Magnitude distribution
}
{
int j = 1;
do {
const int c1 = events [mv_max_hp + j]; // positive
const int c2 = events [mv_max_hp - j]; // negative
const int c = c1 + c2;
int a = j;
sign_ct [0] += c1;
sign_ct [1] += c2;
if (a < mvnum_short_hp) {
is_short_ct [0] += c; // Short vector
short_ct [a] += c; // Magnitude distribution
} else {
int k = mvlong_width_hp - 1;
is_short_ct [1] += c; // Long vector
do
bit_ct [k] [(a >> k) & 1] += c;
while (--k >= 0);
}
} while (++j <= mv_max_hp);
}
calc_prob(Pnew + mvpis_short_hp, is_short_ct, 8);
calc_prob(Pnew + MVPsign_hp, sign_ct, 8);
{
vp8_prob p [mvnum_short_hp - 1]; /* actually only need branch ct */
int j = 0;
vp8_tree_probs_from_distribution(
mvnum_short_hp, vp8_small_mvencodings_hp, vp8_small_mvtree_hp,
p, short_bct, short_ct,
256, 1
);
do
calc_prob(Pnew + MVPshort_hp + j, short_bct[j], 8);
while (++j < mvnum_short_hp - 1);
}
{
int j = 0;
do
calc_prob(Pnew + MVPbits_hp + j, bit_ct[j], 8);
while (++j < mvlong_width_hp);
}
}
void vp8_adapt_mv_probs(VP8_COMMON *cm) {
int i, t, count, factor;
#ifdef MV_COUNT_TESTING
printf("static const unsigned int\nMVcount[2][MVvals]={\n");
for (i = 0; i < 2; ++i) {
printf(" { ");
for (t = 0; t < MVvals; t++) {
printf("%d, ", cm->fc.MVcount[i][t]);
if (t % 16 == 15 && t != MVvals - 1) printf("\n ");
}
printf("},\n");
}
printf("};\n");
printf("static const unsigned int\nMVcount_hp[2][MVvals_hp]={\n");
for (i = 0; i < 2; ++i) {
printf(" { ");
for (t = 0; t < MVvals_hp; t++) {
printf("%d, ", cm->fc.MVcount_hp[i][t]);
if (t % 16 == 15 && t != MVvals_hp - 1) printf("\n ");
}
printf("},\n");
}
printf("};\n");
#endif /* MV_COUNT_TESTING */
for (i = 0; i < 2; ++i) {
int prob;
unsigned int is_short_ct[2];
unsigned int sign_ct[2];
unsigned int bit_ct [mvlong_width] [2];
unsigned int short_ct [mvnum_short];
unsigned int short_bct [mvnum_short - 1] [2];
vp8_prob Pnew [MVPcount];
compute_component_probs(cm->fc.MVcount[i], Pnew,
is_short_ct, sign_ct,
bit_ct, short_ct, short_bct);
count = is_short_ct[0] + is_short_ct[1];
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
prob = ((int)cm->fc.pre_mvc[i].prob[mvpis_short] * (256 - factor) +
(int)Pnew[mvpis_short] * factor + 128) >> 8;
if (prob <= 0) cm->fc.mvc[i].prob[mvpis_short] = 1;
else if (prob > 255) cm->fc.mvc[i].prob[mvpis_short] = 255;
else cm->fc.mvc[i].prob[mvpis_short] = prob;
count = sign_ct[0] + sign_ct[1];
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
prob = ((int)cm->fc.pre_mvc[i].prob[MVPsign] * (256 - factor) +
(int)Pnew[MVPsign] * factor + 128) >> 8;
if (prob <= 0) cm->fc.mvc[i].prob[MVPsign] = 1;
else if (prob > 255) cm->fc.mvc[i].prob[MVPsign] = 255;
else cm->fc.mvc[i].prob[MVPsign] = prob;
for (t = 0; t < mvnum_short - 1; ++t) {
count = short_bct[t][0] + short_bct[t][1];
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
prob = ((int)cm->fc.pre_mvc[i].prob[MVPshort + t] * (256 - factor) +
(int)Pnew[MVPshort + t] * factor + 128) >> 8;
if (prob <= 0) cm->fc.mvc[i].prob[MVPshort + t] = 1;
else if (prob > 255) cm->fc.mvc[i].prob[MVPshort + t] = 255;
else cm->fc.mvc[i].prob[MVPshort + t] = prob;
}
for (t = 0; t < mvlong_width; ++t) {
count = bit_ct[t][0] + bit_ct[t][1];
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
prob = ((int)cm->fc.pre_mvc[i].prob[MVPbits + t] * (256 - factor) +
(int)Pnew[MVPbits + t] * factor + 128) >> 8;
if (prob <= 0) cm->fc.mvc[i].prob[MVPbits + t] = 1;
else if (prob > 255) cm->fc.mvc[i].prob[MVPbits + t] = 255;
else cm->fc.mvc[i].prob[MVPbits + t] = prob;
}
}
for (i = 0; i < 2; ++i) {
int prob;
unsigned int is_short_ct[2];
unsigned int sign_ct[2];
unsigned int bit_ct [mvlong_width_hp] [2];
unsigned int short_ct [mvnum_short_hp];
unsigned int short_bct [mvnum_short_hp - 1] [2];
vp8_prob Pnew [MVPcount_hp];
compute_component_probs_hp(cm->fc.MVcount_hp[i], Pnew,
is_short_ct, sign_ct,
bit_ct, short_ct, short_bct);
count = is_short_ct[0] + is_short_ct[1];
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
prob = ((int)cm->fc.pre_mvc_hp[i].prob[mvpis_short_hp] * (256 - factor) +
(int)Pnew[mvpis_short_hp] * factor + 128) >> 8;
if (prob <= 0) cm->fc.mvc_hp[i].prob[mvpis_short_hp] = 1;
else if (prob > 255) cm->fc.mvc_hp[i].prob[mvpis_short_hp] = 255;
else cm->fc.mvc_hp[i].prob[mvpis_short_hp] = prob;
count = sign_ct[0] + sign_ct[1];
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
prob = ((int)cm->fc.pre_mvc_hp[i].prob[MVPsign_hp] * (256 - factor) +
(int)Pnew[MVPsign_hp] * factor + 128) >> 8;
if (prob <= 0) cm->fc.mvc_hp[i].prob[MVPsign_hp] = 1;
else if (prob > 255) cm->fc.mvc_hp[i].prob[MVPsign_hp] = 255;
else cm->fc.mvc_hp[i].prob[MVPsign_hp] = prob;
for (t = 0; t < mvnum_short_hp - 1; ++t) {
count = short_bct[t][0] + short_bct[t][1];
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
prob = ((int)cm->fc.pre_mvc_hp[i].prob[MVPshort_hp + t] * (256 - factor) +
(int)Pnew[MVPshort_hp + t] * factor + 128) >> 8;
if (prob <= 0) cm->fc.mvc_hp[i].prob[MVPshort_hp + t] = 1;
else if (prob > 255) cm->fc.mvc_hp[i].prob[MVPshort_hp + t] = 255;
else cm->fc.mvc_hp[i].prob[MVPshort_hp + t] = prob;
}
for (t = 0; t < mvlong_width_hp; ++t) {
count = bit_ct[t][0] + bit_ct[t][1];
count = count > MV_COUNT_SAT ? MV_COUNT_SAT : count;
factor = (MV_MAX_UPDATE_FACTOR * count / MV_COUNT_SAT);
prob = ((int)cm->fc.pre_mvc_hp[i].prob[MVPbits_hp + t] * (256 - factor) +
(int)Pnew[MVPbits_hp + t] * factor + 128) >> 8;
if (prob <= 0) cm->fc.mvc_hp[i].prob[MVPbits_hp + t] = 1;
else if (prob > 255) cm->fc.mvc_hp[i].prob[MVPbits_hp + t] = 255;
else cm->fc.mvc_hp[i].prob[MVPbits_hp + t] = prob;
}
}
}
#endif /* CONFIG_NEWMVENTROPY */
void vp8_entropy_mv_init() {
#if CONFIG_NEWMVENTROPY
vp8_tokens_from_tree(vp8_mv_joint_encodings, vp8_mv_joint_tree);
vp8_tokens_from_tree(vp8_mv_class_encodings, vp8_mv_class_tree);
vp8_tokens_from_tree(vp8_mv_class0_encodings, vp8_mv_class0_tree);
vp8_tokens_from_tree(vp8_mv_fp_encodings, vp8_mv_fp_tree);
#else
vp8_tokens_from_tree(vp8_small_mvencodings, vp8_small_mvtree);
vp8_tokens_from_tree(vp8_small_mvencodings_hp, vp8_small_mvtree_hp);
#endif
}
void vp8_init_mv_probs(VP8_COMMON *cm) {
#if CONFIG_NEWMVENTROPY
vpx_memcpy(&cm->fc.nmvc, &vp8_default_nmv_context, sizeof(nmv_context));
#else
vpx_memcpy(cm->fc.mvc,
vp8_default_mv_context, sizeof(vp8_default_mv_context));
vpx_memcpy(cm->fc.mvc_hp,
vp8_default_mv_context_hp, sizeof(vp8_default_mv_context_hp));
#endif
}

View File

@ -22,7 +22,6 @@ void vp8_entropy_mv_init();
void vp8_init_mv_probs(struct VP8Common *cm);
void vp8_adapt_mv_probs(struct VP8Common *cm);
#if CONFIG_NEWMVENTROPY
void vp8_adapt_nmv_probs(struct VP8Common *cm, int usehp);
void vp8_lower_mv_precision(MV *mv);
int vp8_use_nmv_hp(const MV *ref);
@ -129,65 +128,4 @@ void vp8_counts_to_nmv_context(
unsigned int (*branch_ct_class0_hp)[2],
unsigned int (*branch_ct_hp)[2]);
#else /* CONFIG_NEWMVENTROPY */
enum {
mv_max = 1023, /* max absolute value of a MV component */
MVvals = (2 * mv_max) + 1, /* # possible values "" */
mvlong_width = 10, /* Large MVs have 9 bit magnitudes */
mvnum_short = 8, /* magnitudes 0 through 7 */
mvnum_short_bits = 3, /* number of bits for short mvs */
mvfp_max = 255, /* max absolute value of a full pixel MV component */
MVfpvals = (2 * mvfp_max) + 1, /* # possible full pixel MV values */
/* probability offsets for coding each MV component */
mvpis_short = 0, /* short (<= 7) vs long (>= 8) */
MVPsign, /* sign for non-zero */
MVPshort, /* 8 short values = 7-position tree */
MVPbits = MVPshort + mvnum_short - 1, /* mvlong_width long value bits */
MVPcount = MVPbits + mvlong_width /* (with independent probabilities) */
};
typedef struct mv_context {
vp8_prob prob[MVPcount]; /* often come in row, col pairs */
} MV_CONTEXT;
extern const MV_CONTEXT vp8_mv_update_probs[2], vp8_default_mv_context[2];
enum {
mv_max_hp = 2047, /* max absolute value of a MV component */
MVvals_hp = (2 * mv_max_hp) + 1, /* # possible values "" */
mvlong_width_hp = 11, /* Large MVs have 9 bit magnitudes */
mvnum_short_hp = 16, /* magnitudes 0 through 15 */
mvnum_short_bits_hp = 4, /* number of bits for short mvs */
mvfp_max_hp = 255, /* max absolute value of a full pixel MV component */
MVfpvals_hp = (2 * mvfp_max_hp) + 1, /* # possible full pixel MV values */
/* probability offsets for coding each MV component */
mvpis_short_hp = 0, /* short (<= 7) vs long (>= 8) */
MVPsign_hp, /* sign for non-zero */
MVPshort_hp, /* 8 short values = 7-position tree */
MVPbits_hp = MVPshort_hp + mvnum_short_hp - 1, /* mvlong_width long value bits */
MVPcount_hp = MVPbits_hp + mvlong_width_hp /* (with independent probabilities) */
};
typedef struct mv_context_hp {
vp8_prob prob[MVPcount_hp]; /* often come in row, col pairs */
} MV_CONTEXT_HP;
extern const MV_CONTEXT_HP vp8_mv_update_probs_hp[2], vp8_default_mv_context_hp[2];
extern const vp8_tree_index vp8_small_mvtree[];
extern struct vp8_token_struct vp8_small_mvencodings [8];
extern const vp8_tree_index vp8_small_mvtree_hp[];
extern struct vp8_token_struct vp8_small_mvencodings_hp [16];
#endif /* CONFIG_NEWMVENTROPY */
#endif

View File

@ -22,11 +22,7 @@ const unsigned char vp8_mbsplit_offset[4][16] = {
static void lower_mv_precision(int_mv *mv, int usehp)
{
#if CONFIG_NEWMVENTROPY
if (!usehp || !vp8_use_nmv_hp(&mv->as_mv)) {
#else
if (!usehp) {
#endif
if (mv->as_mv.row & 1)
mv->as_mv.row += (mv->as_mv.row > 0 ? -1 : 1);
if (mv->as_mv.col & 1)

View File

@ -57,15 +57,8 @@ typedef struct frame_contexts {
vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
vp8_prob hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
nmv_context pre_nmvc;
#else
MV_CONTEXT mvc[2];
MV_CONTEXT_HP mvc_hp[2];
MV_CONTEXT pre_mvc[2];
MV_CONTEXT_HP pre_mvc_hp[2];
#endif
vp8_prob pre_bmode_prob [VP8_BINTRAMODES - 1];
vp8_prob pre_ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
vp8_prob pre_uv_mode_prob [VP8_YMODES][VP8_UV_MODES - 1];
@ -109,12 +102,7 @@ typedef struct frame_contexts {
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
#else
unsigned int MVcount [2] [MVvals];
unsigned int MVcount_hp [2] [MVvals_hp];
#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_prob switchable_interp_prob[VP8_SWITCHABLE_FILTERS+1]
[VP8_SWITCHABLE_FILTERS-1];

View File

@ -188,7 +188,6 @@ static void kfread_modes(VP8D_COMP *pbi,
}
}
#if CONFIG_NEWMVENTROPY
static int read_nmv_component(vp8_reader *r,
int rv,
const nmv_component *mvcomp) {
@ -207,7 +206,7 @@ static int read_nmv_component(vp8_reader *r,
o = d << 3;
z = vp8_get_mv_mag(c, o);
v = (s ? -(z + 1) : (z + 1));
v = (s ? -(z + 8) : (z + 8));
return v;
}
@ -219,6 +218,7 @@ static int read_nmv_component_fp(vp8_reader *r,
int s, z, c, o, d, e, f;
s = v < 0;
z = (s ? -v : v) - 1; /* magnitude - 1 */
z &= ~7;
c = vp8_get_mv_class(z, &o);
d = o >> 3;
@ -332,124 +332,6 @@ static void read_nmvprobs(vp8_reader *bc, nmv_context *mvctx,
}
}
#else
static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) {
const vp8_prob *const p = (const vp8_prob *) mvc;
int x = 0;
if (vp8_read(r, p [mvpis_short])) { /* Large */
int i = 0;
do {
x += vp8_read(r, p [MVPbits + i]) << i;
} while (++i < mvnum_short_bits);
i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
do {
x += vp8_read(r, p [MVPbits + i]) << i;
} while (--i > mvnum_short_bits);
if (!(x & ~((2 << mvnum_short_bits) - 1)) || vp8_read(r, p [MVPbits + mvnum_short_bits]))
x += (mvnum_short);
} else /* small */
x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort);
if (x && vp8_read(r, p [MVPsign]))
x = -x;
return x;
}
static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) {
mv->row = (short)(read_mvcomponent(r, mvc) << 1);
mv->col = (short)(read_mvcomponent(r, ++mvc) << 1);
#ifdef DEBUG_DEC_MV
int i;
printf("%d (np): %d %d\n", dec_mvcount++, mv->row, mv->col);
// for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[-1])->prob[i]); printf("\n");
// for (i=0; i<MVPcount;++i) printf(" %d", (&mvc[0])->prob[i]); printf("\n");
#endif
}
static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) {
int i = 0;
do {
const vp8_prob *up = vp8_mv_update_probs[i].prob;
vp8_prob *p = (vp8_prob *)(mvc + i);
vp8_prob *const pstop = p + MVPcount;
do {
if (vp8_read(bc, *up++)) {
const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
*p = x ? x << 1 : 1;
}
} while (++p < pstop);
} while (++i < 2);
}
static int read_mvcomponent_hp(vp8_reader *r, const MV_CONTEXT_HP *mvc) {
const vp8_prob *const p = (const vp8_prob *) mvc;
int x = 0;
if (vp8_read(r, p [mvpis_short_hp])) { /* Large */
int i = 0;
do {
x += vp8_read(r, p [MVPbits_hp + i]) << i;
} while (++i < mvnum_short_bits_hp);
i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
do {
x += vp8_read(r, p [MVPbits_hp + i]) << i;
} while (--i > mvnum_short_bits_hp);
if (!(x & ~((2 << mvnum_short_bits_hp) - 1)) || vp8_read(r, p [MVPbits_hp + mvnum_short_bits_hp]))
x += (mvnum_short_hp);
} else /* small */
x = vp8_treed_read(r, vp8_small_mvtree_hp, p + MVPshort_hp);
if (x && vp8_read(r, p [MVPsign_hp]))
x = -x;
return x;
}
static void read_mv_hp(vp8_reader *r, MV *mv, const MV_CONTEXT_HP *mvc) {
mv->row = (short)(read_mvcomponent_hp(r, mvc));
mv->col = (short)(read_mvcomponent_hp(r, ++mvc));
#ifdef DEBUG_DEC_MV
int i;
printf("%d (hp): %d %d\n", dec_mvcount++, mv->row, mv->col);
// for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[-1])->prob[i]); printf("\n");
// for (i=0; i<MVPcount_hp;++i) printf(" %d", (&mvc[0])->prob[i]); printf("\n");
#endif
}
static void read_mvcontexts_hp(vp8_reader *bc, MV_CONTEXT_HP *mvc) {
int i = 0;
do {
const vp8_prob *up = vp8_mv_update_probs_hp[i].prob;
vp8_prob *p = (vp8_prob *)(mvc + i);
vp8_prob *const pstop = p + MVPcount_hp;
do {
if (vp8_read(bc, *up++)) {
const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7);
*p = x ? x << 1 : 1;
}
} while (++p < pstop);
} while (++i < 2);
}
#endif /* CONFIG_NEWMVENTROPY */
// Read the referncence frame
static MV_REFERENCE_FRAME read_ref_frame(VP8D_COMP *pbi,
vp8_reader *const bc,
@ -614,12 +496,7 @@ static void read_switchable_interp_probs(VP8D_COMP* const pbi,
static void mb_mode_mv_init(VP8D_COMP *pbi, vp8_reader *bc) {
VP8_COMMON *const cm = &pbi->common;
#if CONFIG_NEWMVENTROPY
nmv_context *const nmvc = &pbi->common.fc.nmvc;
#else
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
#endif
MACROBLOCKD *const xd = &pbi->mb;
if (cm->frame_type == KEY_FRAME) {
@ -661,14 +538,7 @@ static void mb_mode_mv_init(VP8D_COMP *pbi, vp8_reader *bc) {
cm->fc.ymode_prob[i] = (vp8_prob) vp8_read_literal(bc, 8);
} while (++i < VP8_YMODES - 1);
}
#if CONFIG_NEWMVENTROPY
read_nmvprobs(bc, nmvc, xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv)
read_mvcontexts_hp(bc, mvc_hp);
else
read_mvcontexts(bc, mvc);
#endif
}
}
@ -751,12 +621,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
int mb_row, int mb_col,
BOOL_DECODER* const bc) {
VP8_COMMON *const cm = &pbi->common;
#if CONFIG_NEWMVENTROPY
nmv_context *const nmvc = &pbi->common.fc.nmvc;
#else
MV_CONTEXT *const mvc = pbi->common.fc.mvc;
MV_CONTEXT_HP *const mvc_hp = pbi->common.fc.mvc_hp;
#endif
const int mis = pbi->common.mode_info_stride;
MACROBLOCKD *const xd = &pbi->mb;
@ -1005,44 +870,20 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
switch (blockmode) {
case NEW4X4:
#if CONFIG_NEWMVENTROPY
read_nmv(bc, &blockmv.as_mv, &best_mv.as_mv, nmvc);
read_nmv_fp(bc, &blockmv.as_mv, &best_mv.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&blockmv.as_mv, &best_mv.as_mv,
&cm->fc.NMVcount, xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &blockmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (blockmv.as_mv.row)]++;
cm->fc.MVcount_hp[1][mv_max_hp + (blockmv.as_mv.col)]++;
} else {
read_mv(bc, &blockmv.as_mv, (const MV_CONTEXT *) mvc);
cm->fc.MVcount[0][mv_max + (blockmv.as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (blockmv.as_mv.col >> 1)]++;
}
#endif /* CONFIG_NEWMVENTROPY */
blockmv.as_mv.row += best_mv.as_mv.row;
blockmv.as_mv.col += best_mv.as_mv.col;
if (mbmi->second_ref_frame) {
#if CONFIG_NEWMVENTROPY
read_nmv(bc, &secondmv.as_mv, &best_mv_second.as_mv, nmvc);
read_nmv_fp(bc, &secondmv.as_mv, &best_mv_second.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&secondmv.as_mv, &best_mv_second.as_mv,
&cm->fc.NMVcount, xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &secondmv.as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (secondmv.as_mv.row)]++;
cm->fc.MVcount_hp[1][mv_max_hp + (secondmv.as_mv.col)]++;
} else {
read_mv(bc, &secondmv.as_mv, (const MV_CONTEXT *) mvc);
cm->fc.MVcount[0][mv_max + (secondmv.as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (secondmv.as_mv.col >> 1)]++;
}
#endif /* CONFIG_NEWMVENTROPY */
secondmv.as_mv.row += best_mv_second.as_mv.row;
secondmv.as_mv.col += best_mv_second.as_mv.col;
}
@ -1147,23 +988,11 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
break;
case NEWMV:
#if CONFIG_NEWMVENTROPY
read_nmv(bc, &mv->as_mv, &best_mv.as_mv, nmvc);
read_nmv_fp(bc, &mv->as_mv, &best_mv.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&mv->as_mv, &best_mv.as_mv, &cm->fc.NMVcount,
xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &mv->as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (mv->as_mv.row)]++;
cm->fc.MVcount_hp[1][mv_max_hp + (mv->as_mv.col)]++;
} else {
read_mv(bc, &mv->as_mv, (const MV_CONTEXT *) mvc);
cm->fc.MVcount[0][mv_max + (mv->as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (mv->as_mv.col >> 1)]++;
}
#endif /* CONFIG_NEWMVENTROPY */
mv->as_mv.row += best_mv.as_mv.row;
mv->as_mv.col += best_mv.as_mv.col;
@ -1178,23 +1007,11 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
mb_to_top_edge,
mb_to_bottom_edge);
if (mbmi->second_ref_frame) {
#if CONFIG_NEWMVENTROPY
read_nmv(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc);
read_nmv_fp(bc, &mbmi->mv[1].as_mv, &best_mv_second.as_mv, nmvc,
xd->allow_high_precision_mv);
vp8_increment_nmv(&mbmi->mv[1].as_mv, &best_mv_second.as_mv,
&cm->fc.NMVcount, xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv) {
read_mv_hp(bc, &mbmi->mv[1].as_mv, (const MV_CONTEXT_HP *) mvc_hp);
cm->fc.MVcount_hp[0][mv_max_hp + (mbmi->mv[1].as_mv.row)]++;
cm->fc.MVcount_hp[1][mv_max_hp + (mbmi->mv[1].as_mv.col)]++;
} else {
read_mv(bc, &mbmi->mv[1].as_mv, (const MV_CONTEXT *) mvc);
cm->fc.MVcount[0][mv_max + (mbmi->mv[1].as_mv.row >> 1)]++;
cm->fc.MVcount[1][mv_max + (mbmi->mv[1].as_mv.col >> 1)]++;
}
#endif /* CONFIG_NEWMVENTROPY */
mbmi->mv[1].as_mv.row += best_mv_second.as_mv.row;
mbmi->mv[1].as_mv.col += best_mv_second.as_mv.col;
mbmi->need_to_clamp_secondmv |=

View File

@ -1354,12 +1354,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_copy(pbi->common.fc.pre_i8x8_mode_prob, pbi->common.fc.i8x8_mode_prob);
vp8_copy(pbi->common.fc.pre_sub_mv_ref_prob, pbi->common.fc.sub_mv_ref_prob);
vp8_copy(pbi->common.fc.pre_mbsplit_prob, pbi->common.fc.mbsplit_prob);
#if CONFIG_NEWMVENTROPY
pbi->common.fc.pre_nmvc = pbi->common.fc.nmvc;
#else
vp8_copy(pbi->common.fc.pre_mvc, pbi->common.fc.mvc);
vp8_copy(pbi->common.fc.pre_mvc_hp, pbi->common.fc.mvc_hp);
#endif
vp8_zero(pbi->common.fc.coef_counts);
vp8_zero(pbi->common.fc.hybrid_coef_counts);
vp8_zero(pbi->common.fc.coef_counts_8x8);
@ -1372,12 +1367,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_zero(pbi->common.fc.i8x8_mode_counts);
vp8_zero(pbi->common.fc.sub_mv_ref_counts);
vp8_zero(pbi->common.fc.mbsplit_counts);
#if CONFIG_NEWMVENTROPY
vp8_zero(pbi->common.fc.NMVcount);
#else
vp8_zero(pbi->common.fc.MVcount);
vp8_zero(pbi->common.fc.MVcount_hp);
#endif
vp8_zero(pbi->common.fc.mv_ref_ct);
vp8_zero(pbi->common.fc.mv_ref_ct_a);
@ -1436,11 +1426,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_adapt_coef_probs(pc);
if (pc->frame_type != KEY_FRAME) {
vp8_adapt_mode_probs(pc);
#if CONFIG_NEWMVENTROPY
vp8_adapt_nmv_probs(pc, xd->allow_high_precision_mv);
#else
vp8_adapt_mv_probs(pc);
#endif
vp8_update_mode_context(&pbi->common);
}

View File

@ -643,7 +643,6 @@ static void write_sub_mv_ref
vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
}
#if CONFIG_NEWMVENTROPY
static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
const nmv_context *nmvc, int usehp) {
MV e;
@ -654,31 +653,6 @@ static void write_nmv(vp8_writer *bc, const MV *mv, const int_mv *ref,
vp8_encode_nmv_fp(bc, &e, &ref->as_mv, nmvc, usehp);
}
#else
static void write_mv
(
vp8_writer *bc, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc
) {
MV e;
e.row = mv->row - ref->as_mv.row;
e.col = mv->col - ref->as_mv.col;
vp8_encode_motion_vector(bc, &e, mvc);
}
static void write_mv_hp
(
vp8_writer *bc, const MV *mv, const int_mv *ref, const MV_CONTEXT_HP *mvc
) {
MV e;
e.row = mv->row - ref->as_mv.row;
e.col = mv->col - ref->as_mv.col;
vp8_encode_motion_vector_hp(bc, &e, mvc);
}
#endif /* CONFIG_NEWMVENTROPY */
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp8_writer *bc,
@ -815,12 +789,7 @@ static void update_ref_probs(VP8_COMP *const cpi) {
static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
int i;
VP8_COMMON *const pc = &cpi->common;
#if CONFIG_NEWMVENTROPY
const nmv_context *nmvc = &pc->fc.nmvc;
#else
const MV_CONTEXT *mvc = pc->fc.mvc;
const MV_CONTEXT_HP *mvc_hp = pc->fc.mvc_hp;
#endif
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
MODE_INFO *m;
@ -1093,17 +1062,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
}
#endif
#if CONFIG_NEWMVENTROPY
write_nmv(bc, &mi->mv[0].as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv) {
write_mv_hp(bc, &mi->mv[0].as_mv, &best_mv, mvc_hp);
} else {
write_mv(bc, &mi->mv[0].as_mv, &best_mv, mvc);
}
#endif
if (mi->second_ref_frame) {
#if 0 //CONFIG_NEW_MVREF
@ -1120,17 +1081,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
&best_second_mv);
cpi->best_ref_index_counts[best_index]++;
#endif
#if CONFIG_NEWMVENTROPY
write_nmv(bc, &mi->mv[1].as_mv, &best_second_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv) {
write_mv_hp(bc, &mi->mv[1].as_mv, &best_second_mv, mvc_hp);
} else {
write_mv(bc, &mi->mv[1].as_mv, &best_second_mv, mvc);
}
#endif
}
break;
case SPLITMV: {
@ -1172,40 +1125,16 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi, vp8_writer *const bc) {
#ifdef ENTROPY_STATS
active_section = 11;
#endif
#if CONFIG_NEWMVENTROPY
write_nmv(bc, &blockmv.as_mv, &best_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv) {
write_mv_hp(bc, &blockmv.as_mv, &best_mv,
(const MV_CONTEXT_HP *) mvc_hp);
} else {
write_mv(bc, &blockmv.as_mv, &best_mv,
(const MV_CONTEXT *) mvc);
}
#endif
if (mi->second_ref_frame) {
#if CONFIG_NEWMVENTROPY
write_nmv(bc,
&cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv,
(const nmv_context*) nmvc,
xd->allow_high_precision_mv);
#else
if (xd->allow_high_precision_mv) {
write_mv_hp(
bc,
&cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv, (const MV_CONTEXT_HP *)mvc_hp);
} else {
write_mv(
bc,
&cpi->mb.partition_info->bmi[j].second_mv.as_mv,
&best_second_mv, (const MV_CONTEXT *) mvc);
}
#endif
}
}
} while (++j < cpi->mb.partition_info->count);
@ -2724,12 +2653,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_copy(cpi->common.fc.pre_sub_mv_ref_prob, cpi->common.fc.sub_mv_ref_prob);
vp8_copy(cpi->common.fc.pre_mbsplit_prob, cpi->common.fc.mbsplit_prob);
vp8_copy(cpi->common.fc.pre_i8x8_mode_prob, cpi->common.fc.i8x8_mode_prob);
#if CONFIG_NEWMVENTROPY
cpi->common.fc.pre_nmvc = cpi->common.fc.nmvc;
#else
vp8_copy(cpi->common.fc.pre_mvc, cpi->common.fc.mvc);
vp8_copy(cpi->common.fc.pre_mvc_hp, cpi->common.fc.mvc_hp);
#endif
vp8_zero(cpi->sub_mv_ref_count);
vp8_zero(cpi->mbsplit_count);
vp8_zero(cpi->common.fc.mv_ref_ct)
@ -2801,15 +2725,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
update_mbintra_mode_probs(cpi, &header_bc);
#if CONFIG_NEWMVENTROPY
vp8_write_nmvprobs(cpi, xd->allow_high_precision_mv, &header_bc);
#else
if (xd->allow_high_precision_mv) {
vp8_write_mvprobs_hp(cpi, &header_bc);
} else {
vp8_write_mvprobs(cpi, &header_bc);
}
#endif
}
vp8_stop_encode(&header_bc);

View File

@ -114,7 +114,6 @@ typedef struct {
int *mb_norm_activity_ptr;
signed int act_zbin_adj;
#if CONFIG_NEWMVENTROPY
int nmvjointcost[MV_JOINTS];
int nmvcosts[2][MV_VALS];
int *nmvcost[2];
@ -126,16 +125,6 @@ typedef struct {
int *nmvsadcost[2];
int nmvsadcosts_hp[2][MV_VALS];
int *nmvsadcost_hp[2];
#else
int mvcosts[2][MVvals + 1];
int *mvcost[2];
int mvsadcosts[2][MVfpvals + 1];
int *mvsadcost[2];
int mvcosts_hp[2][MVvals_hp + 1];
int *mvcost_hp[2];
int mvsadcosts_hp[2][MVfpvals_hp + 1];
int *mvsadcost_hp[2];
#endif /* CONFIG_NEWMVENTROPY */
int mbmode_cost[2][MB_MODE_COUNT];
int intra_uv_mode_cost[2][MB_MODE_COUNT];

View File

@ -1355,12 +1355,7 @@ static void encode_frame_internal(VP8_COMP *cpi) {
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
#if CONFIG_NEWMVENTROPY
vp8_zero(cpi->NMVcount);
#else
vp8_zero(cpi->MVcount);
vp8_zero(cpi->MVcount_hp);
#endif
vp8_zero(cpi->coef_counts);
vp8_zero(cpi->hybrid_coef_counts);
vp8_zero(cpi->coef_counts_8x8);

View File

@ -22,8 +22,6 @@ extern unsigned int active_section;
//extern int final_packing;
#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
nmv_context_counts tnmvcounts;
#endif
@ -549,593 +547,3 @@ void vp8_build_nmv_cost_table(int *mvjoint,
if (mvc_flag_h)
build_nmv_component_cost_table(mvcost[1], &mvctx->comps[1], usehp);
}
#else /* CONFIG_NEWMVENTROPY */
static void encode_mvcomponent(
vp8_writer *const bc,
const int v,
const struct mv_context *mvc
) {
const vp8_prob *p = mvc->prob;
const int x = v < 0 ? -v : v;
if (x < mvnum_short) { // Small
vp8_write(bc, 0, p[mvpis_short]);
vp8_treed_write(bc, vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
if (!x)
return; // no sign bit
} else { // Large
int i = 0;
vp8_write(bc, 1, p[mvpis_short]);
do
vp8_write(bc, (x >> i) & 1, p[MVPbits + i]);
while (++i < mvnum_short_bits);
i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
do
vp8_write(bc, (x >> i) & 1, p[MVPbits + i]);
while (--i > mvnum_short_bits);
if (x & ~((2 << mvnum_short_bits) - 1))
vp8_write(bc, (x >> mvnum_short_bits) & 1, p[MVPbits + mvnum_short_bits]);
}
vp8_write(bc, v < 0, p[MVPsign]);
}
void vp8_encode_motion_vector(vp8_writer* const bc,
const MV* const mv,
const MV_CONTEXT* const mvc) {
encode_mvcomponent(bc, mv->row >> 1, &mvc[0]);
encode_mvcomponent(bc, mv->col >> 1, &mvc[1]);
}
static unsigned int cost_mvcomponent(const int v,
const struct mv_context* const mvc) {
const vp8_prob *p = mvc->prob;
const int x = v; // v<0? -v:v;
unsigned int cost;
if (x < mvnum_short) {
cost = vp8_cost_zero(p [mvpis_short])
+ vp8_treed_cost(vp8_small_mvtree, p + MVPshort, x, mvnum_short_bits);
if (!x)
return cost;
} else {
int i = 0;
cost = vp8_cost_one(p [mvpis_short]);
do
cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
while (++i < mvnum_short_bits);
i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
do
cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1);
while (--i > mvnum_short_bits);
if (x & ~((2 << mvnum_short_bits) - 1))
cost += vp8_cost_bit(p [MVPbits + mvnum_short_bits], (x >> mvnum_short_bits) & 1);
}
return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
}
void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc,
const int mvc_flag[2]) {
int i = 1; // -mv_max;
unsigned int cost0 = 0;
unsigned int cost1 = 0;
vp8_clear_system_state();
i = 1;
if (mvc_flag[0]) {
mvcost [0] [0] = cost_mvcomponent(0, &mvc[0]);
do {
// mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
cost0 = cost_mvcomponent(i, &mvc[0]);
mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]);
mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign]);
} while (++i <= mv_max);
}
i = 1;
if (mvc_flag[1]) {
mvcost [1] [0] = cost_mvcomponent(0, &mvc[1]);
do {
// mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
cost1 = cost_mvcomponent(i, &mvc[1]);
mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]);
mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign]);
} while (++i <= mv_max);
}
}
// Motion vector probability table update depends on benefit.
// Small correction allows for the fact that an update to an MV probability
// may have benefit in subsequent frames as well as the current one.
#define MV_PROB_UPDATE_CORRECTION -1
__inline static void calc_prob(vp8_prob *p, const unsigned int ct[2]) {
const unsigned int tot = ct[0] + ct[1];
if (tot) {
const vp8_prob x = ((ct[0] * 255) / tot) & -2;
*p = x ? x : 1;
}
}
static void update(
vp8_writer *const bc,
const unsigned int ct[2],
vp8_prob *const cur_p,
const vp8_prob new_p,
const vp8_prob update_p,
int *updated
) {
const int cur_b = vp8_cost_branch(ct, *cur_p);
const int new_b = vp8_cost_branch(ct, new_p);
const int cost = 7 + MV_PROB_UPDATE_CORRECTION + ((vp8_cost_one(update_p) - vp8_cost_zero(update_p) + 128) >> 8);
if (cur_b - new_b > cost) {
*cur_p = new_p;
vp8_write(bc, 1, update_p);
vp8_write_literal(bc, new_p >> 1, 7);
*updated = 1;
} else
vp8_write(bc, 0, update_p);
}
static void write_component_probs(
vp8_writer *const bc,
struct mv_context *cur_mvc,
const struct mv_context *default_mvc_,
const struct mv_context *update_mvc,
const unsigned int events [MVvals],
unsigned int rc,
int *updated
) {
vp8_prob *Pcur = cur_mvc->prob;
const vp8_prob *default_mvc = default_mvc_->prob;
const vp8_prob *Pupdate = update_mvc->prob;
unsigned int is_short_ct[2], sign_ct[2];
unsigned int bit_ct [mvlong_width] [2];
unsigned int short_ct [mvnum_short];
unsigned int short_bct [mvnum_short - 1] [2];
vp8_prob Pnew [MVPcount];
(void) rc;
vp8_copy_array(Pnew, default_mvc, MVPcount);
vp8_zero(is_short_ct)
vp8_zero(sign_ct)
vp8_zero(bit_ct)
vp8_zero(short_ct)
vp8_zero(short_bct)
// j=0
{
const int c = events [mv_max];
is_short_ct [0] += c; // Short vector
short_ct [0] += c; // Magnitude distribution
}
// j: 1 ~ mv_max (1023)
{
int j = 1;
do {
const int c1 = events [mv_max + j]; // positive
const int c2 = events [mv_max - j]; // negative
const int c = c1 + c2;
int a = j;
sign_ct [0] += c1;
sign_ct [1] += c2;
if (a < mvnum_short) {
is_short_ct [0] += c; // Short vector
short_ct [a] += c; // Magnitude distribution
} else {
int k = mvlong_width - 1;
is_short_ct [1] += c; // Long vector
/* bit 3 not always encoded. */
do
bit_ct [k] [(a >> k) & 1] += c;
while (--k >= 0);
}
} while (++j <= mv_max);
}
calc_prob(Pnew + mvpis_short, is_short_ct);
calc_prob(Pnew + MVPsign, sign_ct);
{
vp8_prob p [mvnum_short - 1]; /* actually only need branch ct */
int j = 0;
vp8_tree_probs_from_distribution(
mvnum_short, vp8_small_mvencodings, vp8_small_mvtree,
p, short_bct, short_ct,
256, 1
);
do
calc_prob(Pnew + MVPshort + j, short_bct[j]);
while (++j < mvnum_short - 1);
}
{
int j = 0;
do
calc_prob(Pnew + MVPbits + j, bit_ct[j]);
while (++j < mvlong_width);
}
update(bc, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short],
*Pupdate++, updated);
update(bc, sign_ct, Pcur + MVPsign, Pnew[MVPsign],
*Pupdate++, updated);
{
const vp8_prob *const new_p = Pnew + MVPshort;
vp8_prob *const cur_p = Pcur + MVPshort;
int j = 0;
do
update(bc, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
while (++j < mvnum_short - 1);
}
{
const vp8_prob *const new_p = Pnew + MVPbits;
vp8_prob *const cur_p = Pcur + MVPbits;
int j = 0;
do
update(bc, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
while (++j < mvlong_width);
}
}
void vp8_write_mvprobs(VP8_COMP* const cpi, vp8_writer* const bc) {
MV_CONTEXT *mvc = cpi->common.fc.mvc;
int flags[2] = {0, 0};
#ifdef ENTROPY_STATS
active_section = 4;
#endif
write_component_probs(
bc, &mvc[0], &vp8_default_mv_context[0], &vp8_mv_update_probs[0],
cpi->MVcount[0], 0, &flags[0]);
write_component_probs(
bc, &mvc[1], &vp8_default_mv_context[1], &vp8_mv_update_probs[1],
cpi->MVcount[1], 1, &flags[1]);
if (flags[0] || flags[1])
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flags);
#ifdef ENTROPY_STATS
active_section = 5;
#endif
}
static void encode_mvcomponent_hp(
vp8_writer *const bc,
const int v,
const struct mv_context_hp *mvc
) {
const vp8_prob *p = mvc->prob;
const int x = v < 0 ? -v : v;
if (x < mvnum_short_hp) { // Small
vp8_write(bc, 0, p[mvpis_short_hp]);
vp8_treed_write(bc, vp8_small_mvtree_hp, p + MVPshort_hp, x,
mvnum_short_bits_hp);
if (!x)
return; // no sign bit
} else { // Large
int i = 0;
vp8_write(bc, 1, p[mvpis_short_hp]);
do
vp8_write(bc, (x >> i) & 1, p[MVPbits_hp + i]);
while (++i < mvnum_short_bits_hp);
i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
do
vp8_write(bc, (x >> i) & 1, p[MVPbits_hp + i]);
while (--i > mvnum_short_bits_hp);
if (x & ~((2 << mvnum_short_bits_hp) - 1))
vp8_write(bc, (x >> mvnum_short_bits_hp) & 1,
p[MVPbits_hp + mvnum_short_bits_hp]);
}
vp8_write(bc, v < 0, p[MVPsign_hp]);
}
void vp8_encode_motion_vector_hp(vp8_writer *bc, const MV *mv,
const MV_CONTEXT_HP *mvc) {
encode_mvcomponent_hp(bc, mv->row, &mvc[0]);
encode_mvcomponent_hp(bc, mv->col, &mvc[1]);
}
static unsigned int cost_mvcomponent_hp(const int v,
const struct mv_context_hp *mvc) {
const vp8_prob *p = mvc->prob;
const int x = v; // v<0? -v:v;
unsigned int cost;
if (x < mvnum_short_hp) {
cost = vp8_cost_zero(p [mvpis_short_hp])
+ vp8_treed_cost(vp8_small_mvtree_hp, p + MVPshort_hp, x,
mvnum_short_bits_hp);
if (!x)
return cost;
} else {
int i = 0;
cost = vp8_cost_one(p [mvpis_short_hp]);
do
cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
while (++i < mvnum_short_bits_hp);
i = mvlong_width_hp - 1; /* Skip bit 3, which is sometimes implicit */
do
cost += vp8_cost_bit(p [MVPbits_hp + i], (x >> i) & 1);
while (--i > mvnum_short_bits_hp);
if (x & ~((2 << mvnum_short_bits_hp) - 1))
cost += vp8_cost_bit(p [MVPbits_hp + mvnum_short_bits_hp],
(x >> mvnum_short_bits_hp) & 1);
}
return cost; // + vp8_cost_bit( p [MVPsign], v < 0);
}
void vp8_build_component_cost_table_hp(int *mvcost[2],
const MV_CONTEXT_HP *mvc,
const int mvc_flag[2]) {
int i = 1; // -mv_max;
unsigned int cost0 = 0;
unsigned int cost1 = 0;
vp8_clear_system_state();
i = 1;
if (mvc_flag[0]) {
mvcost [0] [0] = cost_mvcomponent_hp(0, &mvc[0]);
do {
// mvcost [0] [i] = cost_mvcomponent( i, &mvc[0]);
cost0 = cost_mvcomponent_hp(i, &mvc[0]);
mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign_hp]);
mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign_hp]);
} while (++i <= mv_max_hp);
}
i = 1;
if (mvc_flag[1]) {
mvcost [1] [0] = cost_mvcomponent_hp(0, &mvc[1]);
do {
// mvcost [1] [i] = cost_mvcomponent( i, mvc[1]);
cost1 = cost_mvcomponent_hp(i, &mvc[1]);
mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign_hp]);
mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign_hp]);
} while (++i <= mv_max_hp);
}
}
static void write_component_probs_hp(
vp8_writer *const bc,
struct mv_context_hp *cur_mvc,
const struct mv_context_hp *default_mvc_,
const struct mv_context_hp *update_mvc,
const unsigned int events [MVvals_hp],
unsigned int rc,
int *updated
) {
vp8_prob *Pcur = cur_mvc->prob;
const vp8_prob *default_mvc = default_mvc_->prob;
const vp8_prob *Pupdate = update_mvc->prob;
unsigned int is_short_ct[2], sign_ct[2];
unsigned int bit_ct [mvlong_width_hp] [2];
unsigned int short_ct [mvnum_short_hp];
unsigned int short_bct [mvnum_short_hp - 1] [2];
vp8_prob Pnew [MVPcount_hp];
(void) rc;
vp8_copy_array(Pnew, default_mvc, MVPcount_hp);
vp8_zero(is_short_ct)
vp8_zero(sign_ct)
vp8_zero(bit_ct)
vp8_zero(short_ct)
vp8_zero(short_bct)
// j=0
{
const int c = events [mv_max_hp];
is_short_ct [0] += c; // Short vector
short_ct [0] += c; // Magnitude distribution
}
// j: 1 ~ mv_max (1023)
{
int j = 1;
do {
const int c1 = events [mv_max_hp + j]; // positive
const int c2 = events [mv_max_hp - j]; // negative
const int c = c1 + c2;
int a = j;
sign_ct [0] += c1;
sign_ct [1] += c2;
if (a < mvnum_short_hp) {
is_short_ct [0] += c; // Short vector
short_ct [a] += c; // Magnitude distribution
} else {
int k = mvlong_width_hp - 1;
is_short_ct [1] += c; // Long vector
/* bit 3 not always encoded. */
do
bit_ct [k] [(a >> k) & 1] += c;
while (--k >= 0);
}
} while (++j <= mv_max_hp);
}
calc_prob(Pnew + mvpis_short_hp, is_short_ct);
calc_prob(Pnew + MVPsign_hp, sign_ct);
{
vp8_prob p [mvnum_short_hp - 1]; /* actually only need branch ct */
int j = 0;
vp8_tree_probs_from_distribution(
mvnum_short_hp, vp8_small_mvencodings_hp, vp8_small_mvtree_hp,
p, short_bct, short_ct,
256, 1
);
do
calc_prob(Pnew + MVPshort_hp + j, short_bct[j]);
while (++j < mvnum_short_hp - 1);
}
{
int j = 0;
do
calc_prob(Pnew + MVPbits_hp + j, bit_ct[j]);
while (++j < mvlong_width_hp);
}
update(bc, is_short_ct, Pcur + mvpis_short_hp, Pnew[mvpis_short_hp],
*Pupdate++, updated);
update(bc, sign_ct, Pcur + MVPsign_hp, Pnew[MVPsign_hp], *Pupdate++,
updated);
{
const vp8_prob *const new_p = Pnew + MVPshort_hp;
vp8_prob *const cur_p = Pcur + MVPshort_hp;
int j = 0;
do
update(bc, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
while (++j < mvnum_short_hp - 1);
}
{
const vp8_prob *const new_p = Pnew + MVPbits_hp;
vp8_prob *const cur_p = Pcur + MVPbits_hp;
int j = 0;
do
update(bc, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
while (++j < mvlong_width_hp);
}
}
void vp8_write_mvprobs_hp(VP8_COMP* const cpi, vp8_writer* const bc) {
MV_CONTEXT_HP *mvc = cpi->common.fc.mvc_hp;
int flags[2] = {0, 0};
#ifdef ENTROPY_STATS
active_section = 4;
#endif
write_component_probs_hp(
bc, &mvc[0], &vp8_default_mv_context_hp[0], &vp8_mv_update_probs_hp[0],
cpi->MVcount_hp[0], 0, &flags[0]
);
write_component_probs_hp(
bc, &mvc[1], &vp8_default_mv_context_hp[1], &vp8_mv_update_probs_hp[1],
cpi->MVcount_hp[1], 1, &flags[1]
);
if (flags[0] || flags[1])
vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp,
(const MV_CONTEXT_HP *)
cpi->common.fc.mvc_hp, flags);
#ifdef ENTROPY_STATS
active_section = 5;
#endif
}
#endif /* CONFIG_NEWMVENTROPY */

View File

@ -14,7 +14,6 @@
#include "onyx_int.h"
#if CONFIG_NEWMVENTROPY
void vp8_write_nmvprobs(VP8_COMP* const, int usehp, vp8_writer* const);
void vp8_encode_nmv(vp8_writer* const w, const MV* const mv,
const MV* const ref, const nmv_context* const mvctx);
@ -27,19 +26,5 @@ void vp8_build_nmv_cost_table(int *mvjoint,
int usehp,
int mvc_flag_v,
int mvc_flag_h);
#else /* CONFIG_NEWMVENTROPY */
void vp8_write_mvprobs(VP8_COMP* const, vp8_writer* const);
void vp8_encode_motion_vector(vp8_writer* const, const MV* const,
const MV_CONTEXT* const);
void vp8_build_component_cost_table(int *mvcost[2],
const MV_CONTEXT*,
const int mvc_flag[2]);
void vp8_write_mvprobs_hp(VP8_COMP* const, vp8_writer* const);
void vp8_encode_motion_vector_hp(vp8_writer* const, const MV* const,
const MV_CONTEXT_HP* const);
void vp8_build_component_cost_table_hp(int *mvcost[2],
const MV_CONTEXT_HP*,
const int mvc_flag[2]);
#endif /* CONFIG_NEWMVENTROPY */
#endif

View File

@ -493,12 +493,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
{
int flag[2] = {1, 1};
vp8_init_mv_probs(cm);
#if CONFIG_NEWMVENTROPY
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
#else
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
vp8_build_component_cost_table_hp(cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cm->fc.mvc_hp, flag);
#endif
}
// for each macroblock row in image

View File

@ -47,15 +47,9 @@ int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
#if CONFIG_NEWMVENTROPY
return ((mvjcost[vp8_get_mv_joint(v)] +
mvcost[0][v.row] + mvcost[1][v.col]) *
Weight) >> 7;
#else
return ((mvcost[0][v.row >> (ishp == 0)] +
mvcost[1][v.col >> (ishp == 0)])
* Weight) >> 7;
#endif
}
static int mv_err_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
@ -64,14 +58,9 @@ static int mv_err_cost(int_mv *mv, int_mv *ref, DEC_MVCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
#if CONFIG_NEWMVENTROPY
return ((mvjcost[vp8_get_mv_joint(v)] +
mvcost[0][v.row] + mvcost[1][v.col]) *
error_per_bit + 128) >> 8;
#else
return ((mvcost[0][v.row >> (ishp == 0)] +
mvcost[1][v.col >> (ishp == 0)]) * error_per_bit + 128) >> 8;
#endif
}
return 0;
}
@ -83,14 +72,9 @@ static int mvsad_err_cost(int_mv *mv, int_mv *ref, DEC_MVSADCOSTS,
MV v;
v.row = (mv->as_mv.row - ref->as_mv.row);
v.col = (mv->as_mv.col - ref->as_mv.col);
#if CONFIG_NEWMVENTROPY
return ((mvjsadcost[vp8_get_mv_joint(v)] +
mvsadcost[0][v.row] + mvsadcost[1][v.col]) *
error_per_bit + 128) >> 8;
#else
return ((mvsadcost[0][v.row] + mvsadcost[1][v.col])
* error_per_bit + 128) >> 8;
#endif
}
return 0;
}
@ -220,35 +204,42 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) {
* could reduce the area.
*/
#if CONFIG_NEWMVENTROPY
/* estimated cost of a motion vector (r,c) */
#define MVC(r,c) \
(mvcost ? \
((mvjcost[((r)!=rr)*2 + ((c)!=rc)] + \
mvcost[0][((r)-rr)] + mvcost[1][((c)-rc)]) * error_per_bit + 128 )>>8 : 0)
#else
#define MVC(r,c) \
(mvcost ? \
((mvcost[0][((r)-rr)>>(xd->allow_high_precision_mv==0)] + \
mvcost[1][((c)-rc)>>(xd->allow_high_precision_mv==0)]) * \
error_per_bit + 128 )>>8 : 0)
#endif /* CONFIG_NEWMVENTROPY */
#define MVC(r, c) \
(mvcost ? \
((mvjcost[((r) != rr) * 2 + ((c) != rc)] + \
mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \
error_per_bit + 128) >> 8 : 0)
#define SP(x) (((x)&7)<<1) // convert motion vector component to offset for svf calc
#define SP(x) (((x) & 7) << 1) // convert motion vector component to offset
// for svf calc
#define IFMVCV(r,c,s,e) \
if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
#define IFMVCV(r, c, s, e) \
if (c >= minc && c <= maxc && r >= minr && r <= maxr) \
s \
else \
e;
/* pointer to predictor base of a motionvector */
#define PRE(r,c) (y + (((r)>>3) * y_stride + ((c)>>3) -(offset)))
#define PRE(r, c) (y + (((r) >> 3) * y_stride + ((c) >> 3) -(offset)))
/* returns subpixel variance error function */
#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse)
#define DIST(r, c) \
vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, b->src_stride, &sse)
/* checks if (r,c) has better score than previous best */
#define CHECK_BETTER(v,r,c) \
IFMVCV(r,c,{thismse = (DIST(r,c)); if((v = MVC(r,c)+thismse) < besterr) \
{ besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)
/* checks if (r, c) has better score than previous best */
#define CHECK_BETTER(v, r, c) \
IFMVCV(r, c, { \
thismse = (DIST(r, c)); \
if ((v = MVC(r, c) + thismse) < besterr) { \
besterr = v; \
br = r; \
bc = c; \
*distortion = thismse; \
*sse1 = sse; \
} \
}, \
v = INT_MAX;)
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
@ -307,17 +298,10 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
br = bestmv->as_mv.row << 3;
bc = bestmv->as_mv.col << 3;
hstep = 4;
#if CONFIG_NEWMVENTROPY
minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << MV_MAX_BITS) - 1));
maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << MV_MAX_BITS) - 1));
minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << MV_MAX_BITS) - 1));
maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << MV_MAX_BITS) - 1));
#else
minc = MAX(x->mv_col_min << 3, (ref_mv->as_mv.col) - ((1 << mvlong_width_hp) - 1));
maxc = MIN(x->mv_col_max << 3, (ref_mv->as_mv.col) + ((1 << mvlong_width_hp) - 1));
minr = MAX(x->mv_row_min << 3, (ref_mv->as_mv.row) - ((1 << mvlong_width_hp) - 1));
maxr = MIN(x->mv_row_max << 3, (ref_mv->as_mv.row) + ((1 << mvlong_width_hp) - 1));
#endif
tr = br;
tc = bc;
@ -403,13 +387,11 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
tc = bc;
}
#if CONFIG_NEWMVENTROPY
if (xd->allow_high_precision_mv) {
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
#endif
if (usehp) {
hstep >>= 1;
@ -771,13 +753,11 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
*sse1 = sse;
}
#if CONFIG_NEWMVENTROPY
if (x->e_mbd.allow_high_precision_mv) {
usehp = vp8_use_nmv_hp(&ref_mv->as_mv);
} else {
usehp = 0;
}
#endif
if (!usehp)
return bestmse;
@ -1304,16 +1284,8 @@ int vp8_diamond_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
#else
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -1423,16 +1395,8 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
#else
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -1632,16 +1596,8 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int col_max = ref_col + distance;
int_mv fcenter_mv;
#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
#else
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -1736,16 +1692,8 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
unsigned int sad_array[3];
int_mv fcenter_mv;
#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
#else
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -1873,16 +1821,8 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
unsigned int sad_array[3];
int_mv fcenter_mv;
#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
#else
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -2023,16 +1963,8 @@ int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
#else
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
@ -2107,16 +2039,8 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
MACROBLOCKD *xd = &x->e_mbd;
int_mv fcenter_mv;
#if CONFIG_NEWMVENTROPY
int *mvjsadcost = x->nmvjointsadcost;
int *mvsadcost[2] = {x->nmvsadcost[0], x->nmvsadcost[1]};
#else
int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
if (xd->allow_high_precision_mv) {
mvsadcost[0] = x->mvsadcost_hp[0];
mvsadcost[1] = x->mvsadcost_hp[1];
}
#endif
fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;

View File

@ -15,21 +15,12 @@
#include "block.h"
#include "variance.h"
#if CONFIG_NEWMVENTROPY
#define MVCOSTS mvjcost, mvcost
#define MVSADCOSTS mvjsadcost, mvsadcost
#define DEC_MVCOSTS int *mvjcost, int *mvcost[2]
#define DEC_MVSADCOSTS int *mvjsadcost, int *mvsadcost[2]
#define NULLMVCOST NULL, NULL
#define XMVCOST x->nmvjointcost, (x->e_mbd.allow_high_precision_mv?x->nmvcost_hp:x->nmvcost)
#else
#define MVCOSTS mvcost
#define MVSADCOSTS mvsadcost
#define DEC_MVCOSTS int *mvcost[2]
#define DEC_MVSADCOSTS int *mvsadcost[2]
#define NULLMVCOST NULL
#define XMVCOST (x->e_mbd.allow_high_precision_mv?x->mvcost_hp:x->mvcost)
#endif /* CONFIG_NEWMVENTROPY */
#ifdef ENTROPY_STATS
extern void init_mv_ref_counts();

View File

@ -138,12 +138,10 @@ extern int skip_false_count;
extern int intra_mode_stats[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES];
#endif
#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
extern void init_nmvstats();
extern void print_nmvstats();
#endif
#endif
#ifdef SPEEDSTATS
unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@ -1689,8 +1687,6 @@ void vp8_change_config(VP8_PTR ptr, VP8_CONFIG *oxcf) {
#define M_LOG2_E 0.693147180559945309417
#define log2f(x) (log (x) / (float) M_LOG2_E)
#if CONFIG_NEWMVENTROPY
static void cal_nmvjointsadcost(int *mvjointsadcost) {
mvjointsadcost[0] = 600;
mvjointsadcost[1] = 300;
@ -1728,40 +1724,6 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
} while (++i <= MV_MAX);
}
#else
static void cal_mvsadcosts(int *mvsadcost[2]) {
int i = 1;
mvsadcost [0] [0] = 300;
mvsadcost [1] [0] = 300;
do {
double z = 256 * (2 * (log2f(8 * i) + .6));
mvsadcost [0][i] = (int) z;
mvsadcost [1][i] = (int) z;
mvsadcost [0][-i] = (int) z;
mvsadcost [1][-i] = (int) z;
} while (++i <= mvfp_max);
}
static void cal_mvsadcosts_hp(int *mvsadcost[2]) {
int i = 1;
mvsadcost [0] [0] = 300;
mvsadcost [1] [0] = 300;
do {
double z = 256 * (2 * (log2f(8 * i) + .6));
mvsadcost [0][i] = (int) z;
mvsadcost [1][i] = (int) z;
mvsadcost [0][-i] = (int) z;
mvsadcost [1][-i] = (int) z;
} while (++i <= mvfp_max_hp);
}
#endif /* CONFIG_NEWMVENTROPY */
VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
int i;
volatile union {
@ -1877,10 +1839,8 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
vp8_zero(inter_uv_modes);
vp8_zero(inter_b_modes);
#endif
#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
init_nmvstats();
#endif
#endif
/*Initialize the feed-forward activity masking.*/
@ -1947,7 +1907,6 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->gf_rate_correction_factor = 1.0;
cpi->twopass.est_max_qcorrection_factor = 1.0;
#if CONFIG_NEWMVENTROPY
cal_nmvjointsadcost(cpi->mb.nmvjointsadcost);
cpi->mb.nmvcost[0] = &cpi->mb.nmvcosts[0][MV_MAX];
cpi->mb.nmvcost[1] = &cpi->mb.nmvcosts[1][MV_MAX];
@ -1960,19 +1919,6 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->mb.nmvsadcost_hp[0] = &cpi->mb.nmvsadcosts_hp[0][MV_MAX];
cpi->mb.nmvsadcost_hp[1] = &cpi->mb.nmvsadcosts_hp[1][MV_MAX];
cal_nmvsadcosts_hp(cpi->mb.nmvsadcost_hp);
#else
cpi->mb.mvcost[0] = &cpi->mb.mvcosts[0][mv_max + 1];
cpi->mb.mvcost[1] = &cpi->mb.mvcosts[1][mv_max + 1];
cpi->mb.mvsadcost[0] = &cpi->mb.mvsadcosts[0][mvfp_max + 1];
cpi->mb.mvsadcost[1] = &cpi->mb.mvsadcosts[1][mvfp_max + 1];
cal_mvsadcosts(cpi->mb.mvsadcost);
cpi->mb.mvcost_hp[0] = &cpi->mb.mvcosts_hp[0][mv_max_hp + 1];
cpi->mb.mvcost_hp[1] = &cpi->mb.mvcosts_hp[1][mv_max_hp + 1];
cpi->mb.mvsadcost_hp[0] = &cpi->mb.mvsadcosts_hp[0][mvfp_max_hp + 1];
cpi->mb.mvsadcost_hp[1] = &cpi->mb.mvsadcosts_hp[1][mvfp_max_hp + 1];
cal_mvsadcosts_hp(cpi->mb.mvsadcost_hp);
#endif /* CONFIG_NEWMVENTROPY */
for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
cpi->prior_key_frame_distance[i] = (int)cpi->output_frame_rate;
@ -2099,12 +2045,10 @@ void vp8_remove_compressor(VP8_PTR *ptr) {
print_mode_context();
}
#endif
#if CONFIG_NEWMVENTROPY
#ifdef NMV_STATS
if (cpi->pass != 1)
print_nmvstats();
#endif
#endif
#if CONFIG_INTERNAL_STATS
@ -3728,14 +3672,8 @@ static void encode_frame_to_data_rate
vp8_copy(cpi->common.fc.mbsplit_counts, cpi->mbsplit_count);
vp8_adapt_mode_probs(&cpi->common);
#if CONFIG_NEWMVENTROPY
cpi->common.fc.NMVcount = cpi->NMVcount;
vp8_adapt_nmv_probs(&cpi->common, cpi->mb.e_mbd.allow_high_precision_mv);
#else
vp8_copy(cpi->common.fc.MVcount, cpi->MVcount);
vp8_copy(cpi->common.fc.MVcount_hp, cpi->MVcount_hp);
vp8_adapt_mv_probs(&cpi->common);
#endif /* CONFIG_NEWMVENTROPY */
vp8_update_mode_context(&cpi->common);
}

View File

@ -60,17 +60,10 @@
#define VP8_TEMPORAL_ALT_REF 1
typedef struct {
#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
int nmvjointcost[MV_JOINTS];
int nmvcosts[2][MV_VALS];
int nmvcosts_hp[2][MV_VALS];
#else
MV_CONTEXT mvc[2];
int mvcosts[2][MVvals + 1];
MV_CONTEXT_HP mvc_hp[2];
int mvcosts_hp[2][MVvals_hp + 1];
#endif
#ifdef MODE_STATS
// Stats
@ -556,12 +549,7 @@ typedef struct VP8_COMP {
// int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];
#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
#else
unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */
unsigned int MVcount_hp [2] [MVvals_hp]; /* (row,col) MV cts this frame */
#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];

View File

@ -132,17 +132,10 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
// intended for use in a re-code loop in vp8_compress_frame where the
// quantizer value is adjusted between loop iterations.
#if CONFIG_NEWMVENTROPY
cc->nmvc = cm->fc.nmvc;
vp8_copy(cc->nmvjointcost, cpi->mb.nmvjointcost);
vp8_copy(cc->nmvcosts, cpi->mb.nmvcosts);
vp8_copy(cc->nmvcosts_hp, cpi->mb.nmvcosts_hp);
#else
vp8_copy(cc->mvc, cm->fc.mvc);
vp8_copy(cc->mvcosts, cpi->mb.mvcosts);
vp8_copy(cc->mvc_hp, cm->fc.mvc_hp);
vp8_copy(cc->mvcosts_hp, cpi->mb.mvcosts_hp);
#endif
vp8_copy(cc->mv_ref_ct, cm->fc.mv_ref_ct);
vp8_copy(cc->mode_context, cm->fc.mode_context);
@ -196,17 +189,10 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
// Restore key state variables to the snapshot state stored in the
// previous call to vp8_save_coding_context.
#if CONFIG_NEWMVENTROPY
cm->fc.nmvc = cc->nmvc;
vp8_copy(cpi->mb.nmvjointcost, cc->nmvjointcost);
vp8_copy(cpi->mb.nmvcosts, cc->nmvcosts);
vp8_copy(cpi->mb.nmvcosts_hp, cc->nmvcosts_hp);
#else
vp8_copy(cm->fc.mvc, cc->mvc);
vp8_copy(cpi->mb.mvcosts, cc->mvcosts);
vp8_copy(cm->fc.mvc_hp, cc->mvc_hp);
vp8_copy(cpi->mb.mvcosts_hp, cc->mvcosts_hp);
#endif
vp8_copy(cm->fc.mv_ref_ct, cc->mv_ref_ct);
vp8_copy(cm->fc.mode_context, cc->mode_context);
@ -263,16 +249,6 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
vp8_default_bmode_probs(cm->fc.bmode_prob);
vp8_init_mv_probs(& cpi->common);
#if CONFIG_NEWMVENTROPY == 0
/* this is not really required */
{
int flag[2] = {1, 1};
vp8_build_component_cost_table(
cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag);
vp8_build_component_cost_table_hp(
cpi->mb.mvcost_hp, (const MV_CONTEXT_HP *) cpi->common.fc.mvc_hp, flag);
}
#endif
// cpi->common.filter_level = 0; // Reset every key frame.
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8;

View File

@ -387,14 +387,12 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
if (cpi->common.frame_type != KEY_FRAME)
{
#if CONFIG_NEWMVENTROPY
vp8_build_nmv_cost_table(
cpi->mb.nmvjointcost,
cpi->mb.e_mbd.allow_high_precision_mv ?
cpi->mb.nmvcost_hp : cpi->mb.nmvcost,
&cpi->common.fc.nmvc,
cpi->mb.e_mbd.allow_high_precision_mv, 1, 1);
#endif
}
}
@ -403,19 +401,6 @@ void vp8_auto_select_speed(VP8_COMP *cpi) {
milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
#if 0
if (0) {
FILE *f;
f = fopen("speed.stt", "a");
fprintf(f, " %8ld %10ld %10ld %10ld\n",
cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
fclose(f);
}
#endif
/*
// this is done during parameter valid check
if( cpi->oxcf.cpu_used > 16)
@ -3065,9 +3050,7 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
int_mv *best_ref_mv, int_mv *second_best_ref_mv) {
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
#if CONFIG_NEWMVENTROPY
MV mv;
#endif
if (mbmi->mode == SPLITMV) {
int i;
@ -3075,7 +3058,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
for (i = 0; i < x->partition_info->count; i++) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
if (x->e_mbd.allow_high_precision_mv) {
#if CONFIG_NEWMVENTROPY
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
@ -3089,20 +3071,7 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 1);
}
#else
cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row)]++;
cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col)]++;
if (mbmi->second_ref_frame) {
cpi->MVcount_hp[0][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row)]++;
cpi->MVcount_hp[1][mv_max_hp + (x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col)]++;
}
#endif
} else {
#if CONFIG_NEWMVENTROPY
mv.row = (x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row);
mv.col = (x->partition_info->bmi[i].mv.as_mv.col
@ -3116,24 +3085,11 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv,
&cpi->NMVcount, 0);
}
#else
cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row
- best_ref_mv->as_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col
- best_ref_mv->as_mv.col) >> 1)]++;
if (mbmi->second_ref_frame) {
cpi->MVcount[0][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.row
- second_best_ref_mv->as_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max + ((x->partition_info->bmi[i].second_mv.as_mv.col
- second_best_ref_mv->as_mv.col) >> 1)]++;
}
#endif
}
}
}
} else if (mbmi->mode == NEWMV) {
if (x->e_mbd.allow_high_precision_mv) {
#if CONFIG_NEWMVENTROPY
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 1);
@ -3142,20 +3098,7 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 1);
}
#else
cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[0].as_mv.row
- best_ref_mv->as_mv.row)]++;
cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[0].as_mv.col
- best_ref_mv->as_mv.col)]++;
if (mbmi->second_ref_frame) {
cpi->MVcount_hp[0][mv_max_hp + (mbmi->mv[1].as_mv.row
- second_best_ref_mv->as_mv.row)]++;
cpi->MVcount_hp[1][mv_max_hp + (mbmi->mv[1].as_mv.col
- second_best_ref_mv->as_mv.col)]++;
}
#endif
} else {
#if CONFIG_NEWMVENTROPY
mv.row = (mbmi->mv[0].as_mv.row - best_ref_mv->as_mv.row);
mv.col = (mbmi->mv[0].as_mv.col - best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &best_ref_mv->as_mv, &cpi->NMVcount, 0);
@ -3164,18 +3107,6 @@ void rd_update_mvcount(VP8_COMP *cpi, MACROBLOCK *x,
mv.col = (mbmi->mv[1].as_mv.col - second_best_ref_mv->as_mv.col);
vp8_increment_nmv(&mv, &second_best_ref_mv->as_mv, &cpi->NMVcount, 0);
}
#else
cpi->MVcount[0][mv_max + ((mbmi->mv[0].as_mv.row
- best_ref_mv->as_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max + ((mbmi->mv[0].as_mv.col
- best_ref_mv->as_mv.col) >> 1)]++;
if (mbmi->second_ref_frame) {
cpi->MVcount[0][mv_max + ((mbmi->mv[1].as_mv.row
- second_best_ref_mv->as_mv.row) >> 1)]++;
cpi->MVcount[1][mv_max + ((mbmi->mv[1].as_mv.col
- second_best_ref_mv->as_mv.col) >> 1)]++;
}
#endif
}
}
}