Adds inter-intra combination with wedge partitions

Also fixes some build issues with certain experimental combinations.

Results on derflr with all experiments on: +5.516%

Change-Id: I9b492f3d3556bd1f057005571dc9bee63167dd95
This commit is contained in:
Deb Mukherjee 2015-01-20 16:32:13 -08:00
parent 760219d0fd
commit 7a993f850e
11 changed files with 549 additions and 37 deletions

View File

@ -189,10 +189,15 @@ typedef struct {
#if CONFIG_INTERINTRA
PREDICTION_MODE interintra_mode;
PREDICTION_MODE interintra_uv_mode;
#if CONFIG_WEDGE_PARTITION
int use_wedge_interintra;
int interintra_wedge_index;
int interintra_uv_wedge_index;
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION
int use_wedge_interinter;
int wedge_index;
int interinter_wedge_index;
#endif // CONFIG_WEDGE_PARTITION
} MB_MODE_INFO;
@ -475,6 +480,8 @@ static INLINE int is_interintra_allowed(BLOCK_SIZE sb_type) {
#define WEDGE_BITS_BIG 5
#define WEDGE_NONE -1
#define WEDGE_WEIGHT_BITS 6
static inline int get_wedge_bits(BLOCK_SIZE sb_type) {
if (sb_type < BLOCK_8X8)
return 0;

View File

@ -23,6 +23,11 @@ static const vp9_prob default_wedge_interinter_prob[BLOCK_SIZES] = {
static const vp9_prob default_interintra_prob[BLOCK_SIZES] = {
192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192
};
#if CONFIG_WEDGE_PARTITION
static const vp9_prob default_wedge_interintra_prob[BLOCK_SIZES] = {
192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192
};
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#if CONFIG_TX_SKIP
@ -502,6 +507,9 @@ void vp9_init_mode_probs(FRAME_CONTEXT *fc) {
#endif // CONFIG_COPY_MODE
#if CONFIG_INTERINTRA
vp9_copy(fc->interintra_prob, default_interintra_prob);
#if CONFIG_WEDGE_PARTITION
vp9_copy(fc->wedge_interintra_prob, default_wedge_interintra_prob);
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION
vp9_copy(fc->wedge_interinter_prob, default_wedge_interinter_prob);
@ -662,7 +670,15 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) {
fc->interintra_prob[i] = adapt_prob(pre_fc->interintra_prob[i],
counts->interintra[i]);
}
#if CONFIG_WEDGE_PARTITION
for (i = 0; i < BLOCK_SIZES; ++i) {
if (is_interintra_allowed(i) && get_wedge_bits(i))
fc->wedge_interintra_prob[i] = adapt_prob(
pre_fc->wedge_interintra_prob[i], counts->wedge_interintra[i]);
}
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION
for (i = 0; i < BLOCK_SIZES; ++i) {
if (get_wedge_bits(i))

View File

@ -80,6 +80,9 @@ typedef struct frame_contexts {
#endif // CONFIG_COPY_MODE
#if CONFIG_INTERINTRA
vp9_prob interintra_prob[BLOCK_SIZES];
#if CONFIG_WEDGE_PARTITION
vp9_prob wedge_interintra_prob[BLOCK_SIZES];
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION
vp9_prob wedge_interinter_prob[BLOCK_SIZES];
@ -127,6 +130,9 @@ typedef struct {
#endif // CONFIG_COPY_MODE
#if CONFIG_INTERINTRA
unsigned int interintra[BLOCK_SIZES][2];
#if CONFIG_WEDGE_PARTITION
unsigned int wedge_interintra[BLOCK_SIZES][2];
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION
unsigned int wedge_interinter[BLOCK_SIZES][2];

View File

@ -258,7 +258,6 @@ static MV average_split_mvs(const struct macroblockd_plane *pd,
}
#if CONFIG_WEDGE_PARTITION
#define WEDGE_WEIGHT_BITS 6
static int get_masked_weight(int m) {
#define SMOOTHER_LEN 32
@ -666,11 +665,13 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
#if CONFIG_SUPERTX
// TODO(debargha): Need high bitdepth versions
build_masked_compound_extend(dst, dst_buf->stride, tmp_dst, 64, plane,
mi->mbmi.wedge_index, mi->mbmi.sb_type,
mi->mbmi.interinter_wedge_index,
mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
build_masked_compound(dst, dst_buf->stride, tmp_dst, 64,
mi->mbmi.wedge_index, mi->mbmi.sb_type, h, w);
mi->mbmi.interinter_wedge_index, mi->mbmi.sb_type,
h, w);
#endif // CONFIG_SUPERTX
} else {
inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
@ -1217,11 +1218,13 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
#if CONFIG_SUPERTX
// TODO(debargha): highbitdepth versions
build_masked_compound_extend(dst, dst_buf->stride, tmp_dst, 64, plane,
mi->mbmi.wedge_index, mi->mbmi.sb_type,
mi->mbmi.interinter_wedge_index,
mi->mbmi.sb_type,
wedge_offset_x, wedge_offset_y, h, w);
#else
build_masked_compound(dst, dst_buf->stride, tmp_dst, 64,
mi->mbmi.wedge_index, mi->mbmi.sb_type, h, w);
mi->mbmi.interinter_wedge_index, mi->mbmi.sb_type,
h, w);
#endif // CONFIG_SUPERTX
} else {
#if CONFIG_VP9_HIGHBITDEPTH

View File

@ -1218,14 +1218,295 @@ static INLINE TX_SIZE blocklen_to_txsize(int bs) {
}
}
#if CONFIG_WEDGE_PARTITION
static int get_masked_weight_interintra(int m) {
#define SMOOTHER_LEN_INTERINTRA 32
static const uint8_t smoothfn[2 * SMOOTHER_LEN_INTERINTRA + 1] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 2, 2, 3, 4, 5, 6,
8, 9, 12, 14, 17, 21, 24, 28,
32,
36, 40, 43, 47, 50, 52, 55, 56,
58, 59, 60, 61, 62, 62, 63, 63,
63, 63, 63, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 64, 64, 64,
};
if (m < -SMOOTHER_LEN_INTERINTRA)
return 0;
else if (m > SMOOTHER_LEN_INTERINTRA)
return (1 << WEDGE_WEIGHT_BITS);
else
return smoothfn[m + SMOOTHER_LEN_INTERINTRA];
}
static int get_hard_mask_interintra(int m) {
return m > 0;
}
// Equation of line: f(x, y) = a[0]*(x - a[2]*w/4) + a[1]*(y - a[3]*h/4) = 0
// The soft mask is obtained by computing f(x, y) and then calling
// get_masked_weight(f(x, y)).
static const int wedge_params_sml_interintra[1 << WEDGE_BITS_SML][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{ 2, 1, 2, 2},
{-2, -1, 2, 2},
{ 1, 2, 2, 2},
{-1, -2, 2, 2},
};
static const int wedge_params_med_hgtw_interintra[1 << WEDGE_BITS_MED][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{ 2, 1, 2, 2},
{-2, -1, 2, 2},
{ 1, 2, 2, 2},
{-1, -2, 2, 2},
{-1, 2, 2, 1},
{ 1, -2, 2, 1},
{-1, 2, 2, 3},
{ 1, -2, 2, 3},
{ 1, 2, 2, 1},
{-1, -2, 2, 1},
{ 1, 2, 2, 3},
{-1, -2, 2, 3},
};
static const int wedge_params_med_hltw_interintra[1 << WEDGE_BITS_MED][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{ 2, 1, 2, 2},
{-2, -1, 2, 2},
{ 1, 2, 2, 2},
{-1, -2, 2, 2},
{-2, 1, 1, 2},
{ 2, -1, 1, 2},
{-2, 1, 3, 2},
{ 2, -1, 3, 2},
{ 2, 1, 1, 2},
{-2, -1, 1, 2},
{ 2, 1, 3, 2},
{-2, -1, 3, 2},
};
static const int wedge_params_med_heqw_interintra[1 << WEDGE_BITS_MED][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{ 2, 1, 2, 2},
{-2, -1, 2, 2},
{ 1, 2, 2, 2},
{-1, -2, 2, 2},
{ 0, 2, 0, 1},
{ 0, -2, 0, 1},
{ 0, 2, 0, 3},
{ 0, -2, 0, 3},
{ 2, 0, 1, 0},
{-2, 0, 1, 0},
{ 2, 0, 3, 0},
{-2, 0, 3, 0},
};
static const int wedge_params_big_hgtw_interintra[1 << WEDGE_BITS_BIG][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{ 2, 1, 2, 2},
{-2, -1, 2, 2},
{ 1, 2, 2, 2},
{-1, -2, 2, 2},
{-1, 2, 2, 1},
{ 1, -2, 2, 1},
{-1, 2, 2, 3},
{ 1, -2, 2, 3},
{ 1, 2, 2, 1},
{-1, -2, 2, 1},
{ 1, 2, 2, 3},
{-1, -2, 2, 3},
{-2, 1, 1, 2},
{ 2, -1, 1, 2},
{-2, 1, 3, 2},
{ 2, -1, 3, 2},
{ 2, 1, 1, 2},
{-2, -1, 1, 2},
{ 2, 1, 3, 2},
{-2, -1, 3, 2},
{ 0, 2, 0, 1},
{ 0, -2, 0, 1},
{ 0, 2, 0, 2},
{ 0, -2, 0, 2},
{ 0, 2, 0, 3},
{ 0, -2, 0, 3},
{ 2, 0, 2, 0},
{-2, 0, 2, 0},
};
static const int wedge_params_big_hltw_interintra[1 << WEDGE_BITS_BIG][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{ 2, 1, 2, 2},
{-2, -1, 2, 2},
{ 1, 2, 2, 2},
{-1, -2, 2, 2},
{-1, 2, 2, 1},
{ 1, -2, 2, 1},
{-1, 2, 2, 3},
{ 1, -2, 2, 3},
{ 1, 2, 2, 1},
{-1, -2, 2, 1},
{ 1, 2, 2, 3},
{-1, -2, 2, 3},
{-2, 1, 1, 2},
{ 2, -1, 1, 2},
{-2, 1, 3, 2},
{ 2, -1, 3, 2},
{ 2, 1, 1, 2},
{-2, -1, 1, 2},
{ 2, 1, 3, 2},
{-2, -1, 3, 2},
{ 0, 2, 0, 2},
{ 0, -2, 0, 2},
{ 2, 0, 1, 0},
{-2, 0, 1, 0},
{ 2, 0, 2, 0},
{-2, 0, 2, 0},
{ 2, 0, 3, 0},
{-2, 0, 3, 0},
};
static const int wedge_params_big_heqw_interintra[1 << WEDGE_BITS_BIG][4] = {
{-1, 2, 2, 2},
{ 1, -2, 2, 2},
{-2, 1, 2, 2},
{ 2, -1, 2, 2},
{ 2, 1, 2, 2},
{-2, -1, 2, 2},
{ 1, 2, 2, 2},
{-1, -2, 2, 2},
{-1, 2, 2, 1},
{ 1, -2, 2, 1},
{-1, 2, 2, 3},
{ 1, -2, 2, 3},
{ 1, 2, 2, 1},
{-1, -2, 2, 1},
{ 1, 2, 2, 3},
{-1, -2, 2, 3},
{-2, 1, 1, 2},
{ 2, -1, 1, 2},
{-2, 1, 3, 2},
{ 2, -1, 3, 2},
{ 2, 1, 1, 2},
{-2, -1, 1, 2},
{ 2, 1, 3, 2},
{-2, -1, 3, 2},
{ 0, 2, 0, 1},
{ 0, -2, 0, 1},
{ 0, 2, 0, 3},
{ 0, -2, 0, 3},
{ 2, 0, 1, 0},
{-2, 0, 1, 0},
{ 2, 0, 3, 0},
{-2, 0, 3, 0},
};
static const int *get_wedge_params_interintra(int wedge_index,
BLOCK_SIZE sb_type,
int h, int w) {
const int *a = NULL;
const int wedge_bits = get_wedge_bits(sb_type);
if (wedge_index == WEDGE_NONE)
return NULL;
if (wedge_bits == WEDGE_BITS_SML) {
a = wedge_params_sml_interintra[wedge_index];
} else if (wedge_bits == WEDGE_BITS_MED) {
if (h > w)
a = wedge_params_med_hgtw_interintra[wedge_index];
else if (h < w)
a = wedge_params_med_hltw_interintra[wedge_index];
else
a = wedge_params_med_heqw_interintra[wedge_index];
} else if (wedge_bits == WEDGE_BITS_BIG) {
if (h > w)
a = wedge_params_big_hgtw_interintra[wedge_index];
else if (h < w)
a = wedge_params_big_hltw_interintra[wedge_index];
else
a = wedge_params_big_heqw_interintra[wedge_index];
} else {
assert(0);
}
return a;
}
void vp9_generate_masked_weight_interintra(int wedge_index,
BLOCK_SIZE sb_type,
int h, int w,
uint8_t *mask, int stride) {
int i, j;
const int *a = get_wedge_params_interintra(wedge_index, sb_type, h, w);
if (!a) return;
for (i = 0; i < h; ++i)
for (j = 0; j < w; ++j) {
int x = (j - (a[2] * w) / 4);
int y = (i - (a[3] * h) / 4);
int m = a[0] * x + a[1] * y;
mask[i * stride + j] = get_masked_weight_interintra(m);
}
}
void vp9_generate_hard_mask_interintra(int wedge_index, BLOCK_SIZE sb_type,
int h, int w, uint8_t *mask, int stride) {
int i, j;
const int *a = get_wedge_params_interintra(wedge_index, sb_type, h, w);
if (!a) return;
for (i = 0; i < h; ++i)
for (j = 0; j < w; ++j) {
int x = (j - (a[2] * w) / 4);
int y = (i - (a[3] * h) / 4);
int m = a[0] * x + a[1] * y;
mask[i * stride + j] = get_hard_mask_interintra(m);
}
}
#endif // CONFIG_WEDGE_PARTITION
static void combine_interintra(PREDICTION_MODE mode,
#if CONFIG_WEDGE_PARTITION
int use_wedge_interintra,
int wedge_index,
#endif // CONFIG_WEDGE_PARTITION
BLOCK_SIZE bsize,
uint8_t *comppred,
int compstride,
uint8_t *interpred,
int interstride,
uint8_t *intrapred,
int intrastride,
int bw, int bh) {
int intrastride) {
static const int scale_bits = 8;
static const int scale_max = 256;
static const int scale_round = 127;
@ -1239,6 +1520,8 @@ static void combine_interintra(PREDICTION_MODE mode,
70, 70, 69, 69, 69, 69, 68, 68,
68, 68, 68, 67, 67, 67, 67, 67,
};
const int bw = 4 << b_width_log2_lookup[bsize];
const int bh = 4 << b_height_log2_lookup[bsize];
int size = MAX(bw, bh);
int size_scale = (size >= 64 ? 1 :
@ -1247,6 +1530,23 @@ static void combine_interintra(PREDICTION_MODE mode,
size == 8 ? 8 : 16);
int i, j;
#if CONFIG_WEDGE_PARTITION
if (use_wedge_interintra && get_wedge_bits(bsize)) {
uint8_t mask[4096];
vp9_generate_masked_weight_interintra(wedge_index, bsize, bh, bw, mask, bw);
for (i = 0; i < bh; ++i) {
for (j = 0; j < bw; ++j) {
int m = mask[i * bw + j];
comppred[i * compstride + j] =
(intrapred[i * intrastride + j] * m +
interpred[i * interstride + j] * ((1 << WEDGE_WEIGHT_BITS) - m) +
(1 << (WEDGE_WEIGHT_BITS - 1))) >> WEDGE_WEIGHT_BITS;
}
}
return;
}
#endif // CONFIG_WEDGE_PARTITION
switch (mode) {
case V_PRED:
for (i = 0; i < bh; ++i) {
@ -1541,8 +1841,13 @@ void vp9_build_interintra_predictors_sby(MACROBLOCKD *xd,
xd->mi[0].src_mi->mbmi.interintra_mode, bw, bh,
xd->up_available, xd->left_available, 0, 0);
combine_interintra(xd->mi[0].src_mi->mbmi.interintra_mode,
#if CONFIG_WEDGE_PARTITION
xd->mi[0].src_mi->mbmi.use_wedge_interintra,
xd->mi[0].src_mi->mbmi.interintra_wedge_index,
#endif // CONFIG_WEDGE_PARTITION
bsize,
xd->plane[0].dst.buf, xd->plane[0].dst.stride,
ypred, ystride, intrapredictor, bw, bw, bh);
ypred, ystride, intrapredictor, bw);
}
void vp9_build_interintra_predictors_sbuv(MACROBLOCKD *xd,
@ -1550,8 +1855,9 @@ void vp9_build_interintra_predictors_sbuv(MACROBLOCKD *xd,
uint8_t *vpred,
int ustride, int vstride,
BLOCK_SIZE bsize) {
int bwl = b_width_log2_lookup[bsize], bw = 2 << bwl;
int bhl = b_height_log2_lookup[bsize], bh = 2 << bhl;
BLOCK_SIZE uvbsize = get_plane_block_size(bsize, &xd->plane[1]);
int bw = 4 << b_width_log2_lookup[uvbsize];
int bh = 4 << b_height_log2_lookup[uvbsize];
uint8_t uintrapredictor[4096];
uint8_t vintrapredictor[4096];
build_intra_predictors_for_interintra(
@ -1565,11 +1871,21 @@ void vp9_build_interintra_predictors_sbuv(MACROBLOCKD *xd,
xd->mi[0].src_mi->mbmi.interintra_uv_mode, bw, bh,
xd->up_available, xd->left_available, 0, 2);
combine_interintra(xd->mi[0].src_mi->mbmi.interintra_uv_mode,
#if CONFIG_WEDGE_PARTITION
xd->mi[0].src_mi->mbmi.use_wedge_interintra,
xd->mi[0].src_mi->mbmi.interintra_uv_wedge_index,
#endif // CONFIG_WEDGE_PARTITION
uvbsize,
xd->plane[1].dst.buf, xd->plane[1].dst.stride,
upred, ustride, uintrapredictor, bw, bw, bh);
upred, ustride, uintrapredictor, bw);
combine_interintra(xd->mi[0].src_mi->mbmi.interintra_uv_mode,
#if CONFIG_WEDGE_PARTITION
xd->mi[0].src_mi->mbmi.use_wedge_interintra,
xd->mi[0].src_mi->mbmi.interintra_uv_wedge_index,
#endif // CONFIG_WEDGE_PARTITION
uvbsize,
xd->plane[2].dst.buf, xd->plane[2].dst.stride,
vpred, vstride, vintrapredictor, bw, bw, bh);
vpred, vstride, vintrapredictor, bw);
}
void vp9_build_interintra_predictors(MACROBLOCKD *xd,

View File

@ -47,6 +47,12 @@ void vp9_build_interintra_predictors_sbuv(MACROBLOCKD *xd,
uint8_t *vpred,
int ustride, int vstride,
BLOCK_SIZE bsize);
#if CONFIG_WEDGE_PARTITION
void vp9_generate_masked_weight_interintra(int wedge_index,
BLOCK_SIZE sb_type,
int h, int w,
uint8_t *mask, int stride);
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#ifdef __cplusplus
} // extern "C"

View File

@ -2279,6 +2279,12 @@ static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
vp9_diff_update_prob(&r, &fc->interintra_prob[i]);
}
}
#if CONFIG_WEDGE_PARTITION
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interintra_allowed(i) && get_wedge_bits(i))
vp9_diff_update_prob(&r, &fc->wedge_interintra_prob[i]);
}
#endif // CONFIG_WEDGE_PARTITION
}
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION

View File

@ -804,10 +804,25 @@ static void read_inter_block_mode_info(VP9_COMMON *const cm,
mbmi->ref_frame[1] = vp9_read(r, cm->fc.interintra_prob[bsize]) ?
INTRA_FRAME : NONE;
cm->counts.interintra[bsize][mbmi->ref_frame[1] == INTRA_FRAME]++;
#if CONFIG_WEDGE_PARTITION
mbmi->use_wedge_interintra = 0;
#endif // CONFIG_WEDGE_PARTITION
if (mbmi->ref_frame[1] == INTRA_FRAME) {
mbmi->interintra_mode =
read_intra_mode_y(cm, r, size_group_lookup[bsize]);
mbmi->interintra_uv_mode = mbmi->interintra_mode;
#if CONFIG_WEDGE_PARTITION
if (get_wedge_bits(bsize)) {
mbmi->use_wedge_interintra = vp9_read(
r, cm->fc.wedge_interintra_prob[bsize]);
cm->counts.wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
if (mbmi->use_wedge_interintra) {
mbmi->interintra_wedge_index = vp9_read_literal(
r, get_wedge_bits(bsize));
mbmi->interintra_uv_wedge_index = mbmi->interintra_wedge_index;
}
}
#endif // CONFIG_WEDGE_PARTITION
}
}
#endif // CONFIG_INTERINTRA
@ -876,14 +891,16 @@ static void read_inter_block_mode_info(VP9_COMMON *const cm,
#if CONFIG_WEDGE_PARTITION
mbmi->use_wedge_interinter = 0;
if (cm->reference_mode != SINGLE_REFERENCE &&
#if CONFIG_COMPOUND_MODES
is_inter_compound_mode(mbmi->mode) &&
#endif // CONFIG_COMPOUND_MODES
get_wedge_bits(bsize) &&
mbmi->ref_frame[1] > INTRA_FRAME) {
mbmi->use_wedge_interinter =
vp9_read(r, cm->fc.wedge_interinter_prob[bsize]);
cm->counts.wedge_interinter[bsize][mbmi->use_wedge_interinter]++;
if (mbmi->use_wedge_interinter) {
mbmi->wedge_index = vp9_read_literal(r, get_wedge_bits(bsize));
mbmi->interinter_wedge_index = vp9_read_literal(r, get_wedge_bits(bsize));
}
}
#endif // CONFIG_WEDGE_PARTITION

View File

@ -585,11 +585,21 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
!supertx_enabled &&
#endif
mbmi->ref_frame[1] <= INTRA_FRAME) {
vp9_write(w, mbmi->ref_frame[1] == INTRA_FRAME,
cm->fc.interintra_prob[bsize]);
if (mbmi->ref_frame[1] == INTRA_FRAME) {
write_intra_mode(w, mbmi->interintra_mode,
cm->fc.y_mode_prob[size_group_lookup[bsize]]);
vp9_write(w, mbmi->ref_frame[1] == INTRA_FRAME,
cm->fc.interintra_prob[bsize]);
if (mbmi->ref_frame[1] == INTRA_FRAME) {
write_intra_mode(w, mbmi->interintra_mode,
cm->fc.y_mode_prob[size_group_lookup[bsize]]);
#if CONFIG_WEDGE_PARTITION
if (get_wedge_bits(bsize)) {
vp9_write(w, mbmi->use_wedge_interintra,
cm->fc.wedge_interintra_prob[bsize]);
if (mbmi->use_wedge_interintra) {
vp9_write_literal(w, mbmi->interintra_wedge_index,
get_wedge_bits(bsize));
}
}
#endif // CONFIG_WEDGE_PARTITION
}
}
#endif // CONFIG_INTERINTRA
@ -660,13 +670,16 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
}
#if CONFIG_WEDGE_PARTITION
if (cm->reference_mode != SINGLE_REFERENCE &&
#if CONFIG_COMPOUND_MODES
is_inter_compound_mode(mode) &&
#endif // CONFIG_COMPOUND_MODES
get_wedge_bits(bsize) &&
mbmi->ref_frame[1] > INTRA_FRAME) {
vp9_write(w, mbmi->use_wedge_interinter,
cm->fc.wedge_interinter_prob[bsize]);
if (mbmi->use_wedge_interinter)
vp9_write_literal(w, mbmi->wedge_index, get_wedge_bits(bsize));
vp9_write_literal(w, mbmi->interinter_wedge_index,
get_wedge_bits(bsize));
}
#endif // CONFIG_WEDGE_PARTITION
}
@ -1751,6 +1764,14 @@ static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
cm->counts.interintra[i]);
}
}
#if CONFIG_WEDGE_PARTITION
for (i = 0; i < BLOCK_SIZES; i++) {
if (is_interintra_allowed(i) && get_wedge_bits(i))
vp9_cond_prob_diff_update(&header_bc,
&fc->wedge_interintra_prob[i],
cm->counts.wedge_interintra[i]);
}
#endif // CONFIG_WEDGE_PARTITION
}
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION

View File

@ -871,6 +871,10 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
if (mbmi->ref_frame[1] == INTRA_FRAME) {
++cm->counts.y_mode[size_group_lookup[bsize]][mbmi->interintra_mode];
++cm->counts.interintra[bsize][1];
#if CONFIG_WEDGE_PARTITION
if (get_wedge_bits(bsize))
++cm->counts.wedge_interintra[bsize][mbmi->use_wedge_interintra];
#endif // CONFIG_WEDGE_PARTITION
} else {
++cm->counts.interintra[bsize][0];
}

View File

@ -3220,9 +3220,6 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int rs = 0;
#if CONFIG_INTERINTRA || CONFIG_WEDGE_PARTITION
int rate_mv_tmp = 0;
#endif
#if CONFIG_INTERINTRA
const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
#endif
INTERP_FILTER best_filter = SWITCHABLE;
uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0};
@ -3237,6 +3234,14 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int64_t skip_sse_sb = INT64_MAX;
int64_t distortion_y = 0, distortion_uv = 0;
#if CONFIG_INTERINTRA
const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME);
*compmode_interintra_cost = 0;
#if CONFIG_WEDGE_PARTITION
mbmi->use_wedge_interintra = 0;
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION
mbmi->use_wedge_interinter = 0;
*compmode_wedge_cost = 0;
@ -3613,7 +3618,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int rate_mvs[2], tmp_rate_mv = 0;
// TODO(spencere, debargha): Reimplement to make this run faster
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mbmi->wedge_index = wedge_index;
mbmi->interinter_wedge_index = wedge_index;
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, NULL, NULL);
rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv_tmp + rate_sum, dist_sum);
@ -3622,31 +3627,31 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
best_rd_wedge = rd;
}
}
mbmi->wedge_index = best_wedge_index;
mbmi->interinter_wedge_index = best_wedge_index;
#if CONFIG_COMPOUND_MODES
if (this_mode == NEW_NEWMV) {
do_masked_motion_search_indexed(cpi, x, mbmi->wedge_index, bsize,
mi_row, mi_col,
do_masked_motion_search_indexed(cpi, x, mbmi->interinter_wedge_index,
bsize, mi_row, mi_col,
tmp_mv, rate_mvs, 2);
tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
mbmi->mv[0].as_int = tmp_mv[0].as_int;
mbmi->mv[1].as_int = tmp_mv[1].as_int;
} else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
do_masked_motion_search_indexed(cpi, x, mbmi->wedge_index, bsize,
mi_row, mi_col,
do_masked_motion_search_indexed(cpi, x, mbmi->interinter_wedge_index,
bsize, mi_row, mi_col,
tmp_mv, rate_mvs, 0);
tmp_rate_mv = rate_mvs[0];
mbmi->mv[0].as_int = tmp_mv[0].as_int;
} else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
do_masked_motion_search_indexed(cpi, x, mbmi->wedge_index, bsize,
mi_row, mi_col,
do_masked_motion_search_indexed(cpi, x, mbmi->interinter_wedge_index,
bsize, mi_row, mi_col,
tmp_mv, rate_mvs, 1);
tmp_rate_mv = rate_mvs[1];
mbmi->mv[1].as_int = tmp_mv[1].as_int;
}
#else
do_masked_motion_search_indexed(cpi, x, mbmi->wedge_index, bsize,
mi_row, mi_col,
do_masked_motion_search_indexed(cpi, x, mbmi->interinter_wedge_index,
bsize, mi_row, mi_col,
tmp_mv, rate_mvs, 2);
tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
mbmi->mv[0].as_int = tmp_mv[0].as_int;
@ -3664,7 +3669,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
if (best_rd_wedge < best_rd_nowedge) {
mbmi->use_wedge_interinter = 1;
mbmi->wedge_index = best_wedge_index;
mbmi->interinter_wedge_index = best_wedge_index;
xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
xd->mi[0].src_mi->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
rate_mv_tmp = tmp_rate_mv;
@ -3675,7 +3680,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
} else {
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mbmi->wedge_index = wedge_index;
mbmi->interinter_wedge_index = wedge_index;
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, NULL, NULL);
rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv_tmp + rate_sum, dist_sum);
@ -3686,7 +3691,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
if (best_rd_wedge < best_rd_nowedge) {
mbmi->use_wedge_interinter = 1;
mbmi->wedge_index = best_wedge_index;
mbmi->interinter_wedge_index = best_wedge_index;
} else {
mbmi->use_wedge_interinter = 0;
}
@ -3715,6 +3720,19 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int rmode, rate_sum;
int64_t dist_sum;
int j;
#if CONFIG_WEDGE_PARTITION
int wedge_bits, wedge_types, wedge_index, best_wedge_index = -1;
int64_t best_interintra_rd_nowedge, best_interintra_rd_wedge = INT64_MAX;
int rwedge;
#define WEDGE_INTERINTRA_REFINE_SEARCH
#ifdef WEDGE_INTERINTRA_REFINE_SEARCH
int bw = 4 << b_width_log2_lookup[mbmi->sb_type],
bh = 4 << b_height_log2_lookup[mbmi->sb_type];
uint8_t mask[4096];
int_mv tmp_mv;
int tmp_rate_mv = 0;
#endif
#endif // CONFIG_WEDGE_PARTITION
mbmi->ref_frame[1] = NONE;
for (j = 0; j < MAX_MB_PLANE; j++) {
xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
@ -3741,6 +3759,84 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
}
mbmi->interintra_mode = best_interintra_mode;
mbmi->interintra_uv_mode = best_interintra_mode;
#if CONFIG_WEDGE_PARTITION
wedge_bits = get_wedge_bits(bsize);
rmode = cpi->mbmode_cost[mbmi->interintra_mode];
if (wedge_bits) {
mbmi->use_wedge_interintra = 0;
vp9_build_interintra_predictors(xd, tmp_buf, tmp_buf + 64 * 64,
tmp_buf + 2* 64 * 64, 64, 64, 64, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, NULL, NULL);
rwedge = vp9_cost_bit(cm->fc.wedge_interintra_prob[bsize], 0);
rd = RDCOST(x->rdmult, x->rddiv,
rmode + rate_mv_tmp + rwedge + rate_sum, dist_sum);
best_interintra_rd_nowedge = rd;
mbmi->use_wedge_interintra = 1;
rwedge = wedge_bits * 256 +
vp9_cost_bit(cm->fc.wedge_interintra_prob[bsize], 1);
wedge_types = (1 << wedge_bits);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mbmi->interintra_wedge_index = wedge_index;
mbmi->interintra_uv_wedge_index = wedge_index;
vp9_build_interintra_predictors(xd, tmp_buf, tmp_buf + 64 * 64,
tmp_buf + 2* 64 * 64, 64, 64, 64, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, NULL, NULL);
rd = RDCOST(x->rdmult, x->rddiv,
rmode + rate_mv_tmp + rwedge + rate_sum, dist_sum);
if (rd < best_interintra_rd_wedge) {
best_interintra_rd_wedge = rd;
best_wedge_index = wedge_index;
}
}
#ifdef WEDGE_INTERINTRA_REFINE_SEARCH
// Refine motion vector.
if (this_mode == NEWMV) {
int j;
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_uv_wedge_index = best_wedge_index;
vp9_generate_masked_weight_interintra(best_wedge_index, bsize,
bh, bw, mask, bw);
for (i = 0; i < bh; ++i)
for (j = 0; j < bw; ++j)
mask[i * bw + j] = 64 - mask[i * bw + j];
do_masked_motion_search(cpi, x, mask, bw, bsize,
mi_row, mi_col, &tmp_mv, &tmp_rate_mv, 0);
mbmi->mv[0].as_int = tmp_mv.as_int;
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, NULL, NULL);
rd = RDCOST(x->rdmult, x->rddiv,
rmode + tmp_rate_mv + rwedge + rate_sum, dist_sum);
if (rd < best_interintra_rd_wedge) {
best_interintra_rd_wedge = rd;
} else {
tmp_mv.as_int = cur_mv[0].as_int;
tmp_rate_mv = rate_mv_tmp;
}
} else {
tmp_mv.as_int = cur_mv[0].as_int;
tmp_rate_mv = rate_mv_tmp;
}
mbmi->mv[0].as_int = tmp_mv.as_int;
#endif
if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
mbmi->use_wedge_interintra = 1;
mbmi->interintra_wedge_index = best_wedge_index;
mbmi->interintra_uv_wedge_index = best_wedge_index;
best_interintra_rd = best_interintra_rd_wedge;
#ifdef WEDGE_INTERINTRA_REFINE_SEARCH
mbmi->mv[0].as_int = tmp_mv.as_int;
rate_mv_tmp = tmp_rate_mv;
#endif
} else {
mbmi->use_wedge_interintra = 0;
best_interintra_rd = best_interintra_rd_nowedge;
#ifdef WEDGE_INTERINTRA_REFINE_SEARCH
mbmi->mv[0].as_int = cur_mv[0].as_int;
#endif
}
}
#endif // CONFIG_WEDGE_PARTITION
if (ref_best_rd < INT64_MAX &&
best_interintra_rd / 2 > ref_best_rd) {
return INT64_MAX;
@ -3754,6 +3850,15 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
is_comp_interintra_pred);
if (is_comp_interintra_pred) {
*compmode_interintra_cost += cpi->mbmode_cost[mbmi->interintra_mode];
#if CONFIG_WEDGE_PARTITION
if (get_wedge_bits(bsize)) {
*compmode_interintra_cost += vp9_cost_bit(
cm->fc.wedge_interintra_prob[bsize], mbmi->use_wedge_interintra);
if (mbmi->use_wedge_interintra) {
*compmode_interintra_cost += get_wedge_bits(bsize) * 256;
}
}
#endif // CONFIG_WEDGE_PARTITION
}
}
#endif // CONFIG_INTERINTRA
@ -5418,9 +5523,14 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
#if CONFIG_COPY_MODE
mbmi->copy_mode = NOREF;
#endif
#if CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION
mbmi->use_wedge_interintra = 0;
#endif // CONFIG_WEDGE_PARTITION
#endif // CONFIG_INTERINTRA
#if CONFIG_WEDGE_PARTITION
mbmi->use_wedge_interinter = 0;
#endif
#endif // CONFIG_WEDGE_PARTITION
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
vpx_memset(x->zcoeff_blk[TX_4X4], 0, 4);