diff --git a/configure b/configure index 6d0c97639..fd21538f8 100755 --- a/configure +++ b/configure @@ -288,6 +288,7 @@ EXPERIMENT_LIST=" tx_skip supertx copy_mode + interintra " CONFIG_LIST=" external_build diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index e174f6aeb..05b017fe3 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -161,6 +161,10 @@ typedef struct { COPY_MODE copy_mode; int inter_ref_count; #endif // CONFIG_COPY_MODE +#if CONFIG_INTERINTRA + PREDICTION_MODE interintra_mode; + PREDICTION_MODE interintra_uv_mode; +#endif // CONFIG_INTERINTRA } MB_MODE_INFO; typedef struct MODE_INFO { @@ -429,6 +433,12 @@ void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob, int aoff, int loff); +#if CONFIG_INTERINTRA +static INLINE int is_interintra_allowed(BLOCK_SIZE sb_type) { + return ((sb_type >= BLOCK_8X8) && (sb_type < BLOCK_64X64)); +} +#endif // CONFIG_INTERINTRA + #ifdef __cplusplus } // extern "C" #endif diff --git a/vp9/common/vp9_entropymode.c b/vp9/common/vp9_entropymode.c index e44bd59fe..e5a0b3e2c 100644 --- a/vp9/common/vp9_entropymode.c +++ b/vp9/common/vp9_entropymode.c @@ -13,10 +13,17 @@ #include "vp9/common/vp9_onyxc_int.h" #include "vp9/common/vp9_seg_common.h" +#if CONFIG_INTERINTRA +static const vp9_prob default_interintra_prob[BLOCK_SIZES] = { + 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192 +}; +#endif // CONFIG_INTERINTRA + #if CONFIG_TX_SKIP static const vp9_prob default_y_tx_skip_prob[2] = {190, 210}; static const vp9_prob default_uv_tx_skip_prob[2] = {250, 160}; -#endif +#endif // CONFIG_TX_SKIP + const vp9_prob vp9_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] = { { // above = dc { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, // left = dc @@ -456,6 +463,9 @@ void vp9_init_mode_probs(FRAME_CONTEXT *fc) { vp9_copy(fc->copy_mode_probs_l2, default_copy_mode_probs_l2); vp9_copy(fc->copy_mode_probs, default_copy_mode_probs); #endif // CONFIG_COPY_MODE +#if CONFIG_INTERINTRA + vp9_copy(fc->interintra_prob, default_interintra_prob); +#endif // CONFIG_INTERINTRA } const vp9_tree_index vp9_switchable_interp_tree @@ -598,6 +608,13 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) { counts->copy_mode[i], fc->copy_mode_probs[i]); } #endif // CONFIG_COPY_MODE +#if CONFIG_INTERINTRA + for (i = 0; i < BLOCK_SIZES; ++i) { + if (is_interintra_allowed(i)) + fc->interintra_prob[i] = adapt_prob(pre_fc->interintra_prob[i], + counts->interintra[i]); + } +#endif // CONFIG_INTERINTRA } static void set_default_lf_deltas(struct loopfilter *lf) { diff --git a/vp9/common/vp9_entropymode.h b/vp9/common/vp9_entropymode.h index 068f95e55..2b7a0c06e 100644 --- a/vp9/common/vp9_entropymode.h +++ b/vp9/common/vp9_entropymode.h @@ -74,6 +74,9 @@ typedef struct frame_contexts { vp9_prob copy_mode_probs_l2[COPY_MODE_CONTEXTS][1]; vp9_prob copy_mode_probs[COPY_MODE_CONTEXTS][COPY_MODE_COUNT - 2]; #endif // CONFIG_COPY_MODE +#if CONFIG_INTERINTRA + vp9_prob interintra_prob[BLOCK_SIZES]; +#endif // CONFIG_INTERINTRA } FRAME_CONTEXT; typedef struct { @@ -112,6 +115,9 @@ typedef struct { unsigned int copy_mode_l2[COPY_MODE_CONTEXTS][2]; unsigned int copy_mode[COPY_MODE_CONTEXTS][COPY_MODE_COUNT - 1]; #endif // CONFIG_COPY_MODE +#if CONFIG_INTERINTRA + unsigned int interintra[BLOCK_SIZES][2]; +#endif // CONFIG_INTERINTRA } FRAME_COUNTS; extern const vp9_prob vp9_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1]; diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c index 9f044be33..f40024b67 100644 --- a/vp9/common/vp9_mvref_common.c +++ b/vp9/common/vp9_mvref_common.c @@ -191,6 +191,15 @@ static int compare_interinfo(MB_MODE_INFO *mbmi, MB_MODE_INFO *ref_mbmi) { return 1; } else { int is_same; +#if CONFIG_INTERINTRA + MV_REFERENCE_FRAME mbmi_ref1_backup = mbmi->ref_frame[1]; + MV_REFERENCE_FRAME refmbmi_ref1_backup = ref_mbmi->ref_frame[1]; + + if (mbmi->ref_frame[1] == INTRA_FRAME) + mbmi->ref_frame[1] = NONE; + if (ref_mbmi->ref_frame[1] == INTRA_FRAME) + ref_mbmi->ref_frame[1] = NONE; +#endif // CONFIG_INTERINTRA if (mbmi->ref_frame[0] == ref_mbmi->ref_frame[0] && mbmi->ref_frame[1] == ref_mbmi->ref_frame[1]) { if (mbmi->ref_frame[1] > INTRA_FRAME) @@ -203,6 +212,10 @@ static int compare_interinfo(MB_MODE_INFO *mbmi, MB_MODE_INFO *ref_mbmi) { } else { is_same = 0; } +#if CONFIG_INTERINTRA + mbmi->ref_frame[1] = mbmi_ref1_backup; + ref_mbmi->ref_frame[1] = refmbmi_ref1_backup; +#endif // CONFIG_INTERINTRA return is_same; } diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c index 6d97b8384..2f92c113c 100644 --- a/vp9/common/vp9_reconinter.c +++ b/vp9/common/vp9_reconinter.c @@ -353,18 +353,41 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE bsize) { build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0); +#if CONFIG_INTERINTRA + if (xd->mi[0].src_mi->mbmi.ref_frame[1] == INTRA_FRAME && + is_interintra_allowed(xd->mi[0].src_mi->mbmi.sb_type)) + vp9_build_interintra_predictors_sby(xd, xd->plane[0].dst.buf, + xd->plane[0].dst.stride, bsize); +#endif // CONFIG_INTERINTRA } void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE bsize) { build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1, MAX_MB_PLANE - 1); +#if CONFIG_INTERINTRA + if (xd->mi[0].src_mi->mbmi.ref_frame[1] == INTRA_FRAME && + is_interintra_allowed(xd->mi[0].src_mi->mbmi.sb_type)) + vp9_build_interintra_predictors_sbuv(xd, xd->plane[1].dst.buf, + xd->plane[2].dst.buf, + xd->plane[1].dst.stride, + xd->plane[2].dst.stride, bsize); +#endif // CONFIG_INTERINTRA } void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE bsize) { build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, MAX_MB_PLANE - 1); +#if CONFIG_INTERINTRA + if (xd->mi[0].src_mi->mbmi.ref_frame[1] == INTRA_FRAME && + is_interintra_allowed(xd->mi[0].src_mi->mbmi.sb_type)) + vp9_build_interintra_predictors(xd, xd->plane[0].dst.buf, + xd->plane[1].dst.buf, xd->plane[2].dst.buf, + xd->plane[0].dst.stride, + xd->plane[1].dst.stride, + xd->plane[2].dst.stride, bsize); +#endif // CONFIG_INTERINTRA } #if CONFIG_SUPERTX @@ -765,15 +788,25 @@ void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, 0, 0, bw, bh, mi_x, mi_y); } } +#if CONFIG_INTERINTRA + if (xd->mi[0].src_mi->mbmi.ref_frame[1] == INTRA_FRAME && + is_interintra_allowed(xd->mi[0].src_mi->mbmi.sb_type)) + vp9_build_interintra_predictors(xd, xd->plane[0].dst.buf, + xd->plane[1].dst.buf, xd->plane[2].dst.buf, + xd->plane[0].dst.stride, + xd->plane[1].dst.stride, + xd->plane[2].dst.stride, bsize); +#endif // CONFIG_INTERINTRA } #if CONFIG_SUPERTX -void vp9_dec_build_inter_predictors_sby_sub8x8_extend(MACROBLOCKD *xd, - int mi_row, int mi_col, - int mi_row_ori, - int mi_col_ori, - BLOCK_SIZE top_bsize, - PARTITION_TYPE partition) { +void vp9_dec_build_inter_predictors_sby_sub8x8_extend( + MACROBLOCKD *xd, + int mi_row, int mi_col, + int mi_row_ori, + int mi_col_ori, + BLOCK_SIZE top_bsize, + PARTITION_TYPE partition) { const int mi_x = mi_col_ori * MI_SIZE; const int mi_y = mi_row_ori * MI_SIZE; uint8_t *orig_dst; diff --git a/vp9/common/vp9_reconintra.c b/vp9/common/vp9_reconintra.c index 308516ce9..6761e4d00 100644 --- a/vp9/common/vp9_reconintra.c +++ b/vp9/common/vp9_reconintra.c @@ -924,6 +924,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, pred[mode][tx_size](dst, dst_stride, const_above_row, left_col); } } + #if CONFIG_FILTERINTRA static void filter_intra_predictors_4tap(uint8_t *ypred_ptr, int y_stride, int bs, @@ -1040,7 +1041,11 @@ static void build_filter_intra_predictors(const MACROBLOCKD *xd, int plane) { int i; DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64); +#if CONFIG_TX64X64 + DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 256 + 16); +#else DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16); +#endif uint8_t *above_row = above_data + 16; const uint8_t *const_above_row = above_row; const int bs = 4 << tx_size; @@ -1186,3 +1191,395 @@ void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in, } #endif } + +#if CONFIG_INTERINTRA +static INLINE TX_SIZE intra_size_log2_for_interintra(int bs) { + switch (bs) { + case 4: + return TX_4X4; + break; + case 8: + return TX_8X8; + break; + case 16: + return TX_16X16; + break; + case 32: + return TX_32X32; + break; + case 64: + default: +#if CONFIG_TX64X64 + return TX_64X64; +#else + return TX_32X32; +#endif // CONFIG_TX64X64 + break; + } +} + +static void combine_interintra(PREDICTION_MODE mode, + uint8_t *comppred, + int compstride, + uint8_t *interpred, + int interstride, + uint8_t *intrapred, + int intrastride, + int bw, int bh) { + static const int scale_bits = 8; + static const int scale_max = 256; + static const int scale_round = 127; + static const int weights1d[64] = { + 128, 125, 122, 119, 116, 114, 111, 109, + 107, 105, 103, 101, 99, 97, 96, 94, + 93, 91, 90, 89, 88, 86, 85, 84, + 83, 82, 81, 81, 80, 79, 78, 78, + 77, 76, 76, 75, 75, 74, 74, 73, + 73, 72, 72, 71, 71, 71, 70, 70, + 70, 70, 69, 69, 69, 69, 68, 68, + 68, 68, 68, 67, 67, 67, 67, 67, + }; + + int size = MAX(bw, bh); + int size_scale = (size >= 64 ? 1 : + size == 32 ? 2 : + size == 16 ? 4 : + size == 8 ? 8 : 16); + int i, j; + + switch (mode) { + case V_PRED: + for (i = 0; i < bh; ++i) { + for (j = 0; j < bw; ++j) { + int scale = weights1d[i * size_scale]; + comppred[i * compstride + j] = + ((scale_max - scale) * interpred[i * interstride + j] + + scale * intrapred[i * intrastride + j] + scale_round) + >> scale_bits; + } + } + break; + + case H_PRED: + for (i = 0; i < bh; ++i) { + for (j = 0; j < bw; ++j) { + int scale = weights1d[j * size_scale]; + comppred[i * compstride + j] = + ((scale_max - scale) * interpred[i * interstride + j] + + scale * intrapred[i * intrastride + j] + scale_round) + >> scale_bits; + } + } + break; + + case D63_PRED: + case D117_PRED: + for (i = 0; i < bh; ++i) { + for (j = 0; j < bw; ++j) { + int scale = (weights1d[i * size_scale] * 3 + + weights1d[j * size_scale]) >> 2; + comppred[i * compstride + j] = + ((scale_max - scale) * interpred[i * interstride + j] + + scale * intrapred[i * intrastride + j] + scale_round) + >> scale_bits; + } + } + break; + + case D207_PRED: + case D153_PRED: + for (i = 0; i < bh; ++i) { + for (j = 0; j < bw; ++j) { + int scale = (weights1d[j * size_scale] * 3 + + weights1d[i * size_scale]) >> 2; + comppred[i * compstride + j] = + ((scale_max - scale) * interpred[i * interstride + j] + + scale * intrapred[i * intrastride + j] + scale_round) + >> scale_bits; + } + } + break; + + case D135_PRED: + for (i = 0; i < bh; ++i) { + for (j = 0; j < bw; ++j) { + int scale = weights1d[(i < j ? i : j) * size_scale]; + comppred[i * compstride + j] = + ((scale_max - scale) * interpred[i * interstride + j] + + scale * intrapred[i * intrastride + j] + scale_round) + >> scale_bits; + } + } + break; + + case D45_PRED: + for (i = 0; i < bh; ++i) { + for (j = 0; j < bw; ++j) { + int scale = (weights1d[i * size_scale] + + weights1d[j * size_scale]) >> 1; + comppred[i * compstride + j] = + ((scale_max - scale) * interpred[i * interstride + j] + + scale * intrapred[i * intrastride + j] + scale_round) + >> scale_bits; + } + } + break; + + case TM_PRED: + case DC_PRED: + default: + for (i = 0; i < bh; ++i) { + for (j = 0; j < bw; ++j) { + comppred[i * compstride + j] = (interpred[i * interstride + j] + + intrapred[i * intrastride + j]) >> 1; + } + } + break; + } +} + + +static void build_intra_predictors_for_2nd_block_interintra( + const MACROBLOCKD *xd, const uint8_t *ref, + int ref_stride, uint8_t *dst, int dst_stride, + PREDICTION_MODE mode, TX_SIZE tx_size, + int up_available, int left_available, + int right_available, int bwltbh, + int x, int y, int plane) { + int i; + DECLARE_ALIGNED_ARRAY(16, uint8_t, left_col, 64); +#if CONFIG_TX64X64 + DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 256 + 16); +#else + DECLARE_ALIGNED_ARRAY(16, uint8_t, above_data, 128 + 16); +#endif + uint8_t *above_row = above_data + 16; + const uint8_t *const_above_row = above_row; + const int bs = 4 << tx_size; + int frame_width, frame_height; + int x0, y0; + const struct macroblockd_plane *const pd = &xd->plane[plane]; + + const uint8_t *ref_fi; + int ref_stride_fi; + + // 127 127 127 .. 127 127 127 127 127 127 + // 129 A B .. Y Z + // 129 C D .. W X + // 129 E F .. U V + // 129 G H .. S T T T T T + // .. + + // Get current frame pointer, width and height. + if (plane == 0) { + frame_width = xd->cur_buf->y_width; + frame_height = xd->cur_buf->y_height; + } else { + frame_width = xd->cur_buf->uv_width; + frame_height = xd->cur_buf->uv_height; + } + + // Get block position in current frame. + x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; + y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; + + vpx_memset(left_col, 129, 64); + + // left + if (left_available) { + if (bwltbh) { + ref_fi = ref; + ref_stride_fi = ref_stride; + } else { + ref_fi = dst; + ref_stride_fi = dst_stride; + } + if (xd->mb_to_bottom_edge < 0) { + /* slower path if the block needs border extension */ + if (y0 + bs <= frame_height) { + for (i = 0; i < bs; ++i) + left_col[i] = ref_fi[i * ref_stride_fi - 1]; + } else { + const int extend_bottom = frame_height - y0; + assert(extend_bottom >= 0); + for (i = 0; i < extend_bottom; ++i) + left_col[i] = ref_fi[i * ref_stride_fi - 1]; + for (; i < bs; ++i) + left_col[i] = ref_fi[(extend_bottom - 1) * ref_stride_fi - 1]; + } + } else { + /* faster path if the block does not need extension */ + for (i = 0; i < bs; ++i) + left_col[i] = ref_fi[i * ref_stride_fi - 1]; + } + } + + // TODO(hkuang) do not extend 2*bs pixels for all modes. + // above + if (up_available) { + const uint8_t *above_ref; + if (bwltbh) { + ref_fi = dst; + ref_stride_fi = dst_stride; + above_row[-1] = left_available ? ref[-ref_stride-1] : 129; + } else { + ref_fi = ref; + ref_stride_fi = ref_stride; + above_row[-1] = ref[-ref_stride-1]; + } + above_ref = ref_fi - ref_stride_fi; + if (xd->mb_to_right_edge < 0) { + /* slower path if the block needs border extension */ + if (x0 + 2 * bs <= frame_width) { + if (right_available && bs == 4) { + vpx_memcpy(above_row, above_ref, 2 * bs); + } else { + vpx_memcpy(above_row, above_ref, bs); + vpx_memset(above_row + bs, above_row[bs - 1], bs); + } + } else if (x0 + bs <= frame_width) { + const int r = frame_width - x0; + if (right_available && bs == 4) { + vpx_memcpy(above_row, above_ref, r); + vpx_memset(above_row + r, above_row[r - 1], + x0 + 2 * bs - frame_width); + } else { + vpx_memcpy(above_row, above_ref, bs); + vpx_memset(above_row + bs, above_row[bs - 1], bs); + } + } else if (x0 <= frame_width) { + const int r = frame_width - x0; + assert(r >= 0); + if (right_available && bs == 4) { + vpx_memcpy(above_row, above_ref, r); + vpx_memset(above_row + r, above_row[r - 1], + x0 + 2 * bs - frame_width); + } else { + vpx_memcpy(above_row, above_ref, r); + vpx_memset(above_row + r, above_row[r - 1], + x0 + 2 * bs - frame_width); + } + } + } else { + /* faster path if the block does not need extension */ + if (bs == 4 && right_available && left_available) { + const_above_row = above_ref; + } else { + vpx_memcpy(above_row, above_ref, bs); + if (bs == 4 && right_available) + vpx_memcpy(above_row + bs, above_ref + bs, bs); + else + vpx_memset(above_row + bs, above_row[bs - 1], bs); + } + } + } else { + vpx_memset(above_row, 127, bs * 2); + above_row[-1] = 127; + } + + // predict + if (mode == DC_PRED) { + dc_pred[left_available][up_available][tx_size](dst, dst_stride, + const_above_row, left_col); + } else { + pred[mode][tx_size](dst, dst_stride, const_above_row, left_col); + } +} + +// Break down rectangular intra prediction for joint spatio-temporal prediction +// into two square intra predictions. +static void build_intra_predictors_for_interintra( + MACROBLOCKD *xd, + uint8_t *src, int src_stride, + uint8_t *pred_ptr, int stride, + PREDICTION_MODE mode, + int bw, int bh, + int up_available, int left_available, + int right_available, int plane) { + if (bw == bh) { + build_intra_predictors(xd, src, src_stride, pred_ptr, stride, + mode, intra_size_log2_for_interintra(bw), + up_available, left_available, right_available, + 0, 0, plane); + } else if (bw < bh) { + const TX_SIZE tx_size = intra_size_log2_for_interintra(bw); + uint8_t *src_bottom = src + bw * src_stride; + uint8_t *pred_ptr_bottom = pred_ptr + bw * stride; + build_intra_predictors( + xd, src, src_stride, pred_ptr, stride, mode, tx_size, + up_available, left_available, right_available, + 0, 0, plane); + build_intra_predictors_for_2nd_block_interintra( + xd, src_bottom, src_stride, pred_ptr_bottom, stride, mode, tx_size, + up_available, left_available, 0, + 1, 0, bw, plane); + } else { + const TX_SIZE tx_size = intra_size_log2_for_interintra(bh); + uint8_t *src_right = src + bh; + uint8_t *pred_ptr_right = pred_ptr + bh; + build_intra_predictors( + xd, src, src_stride, pred_ptr, stride, mode, tx_size, + up_available, left_available, 1, + 0, 0, plane); + build_intra_predictors_for_2nd_block_interintra( + xd, src_right, src_stride, pred_ptr_right, stride, mode, tx_size, + up_available, left_available, right_available, + 0, bh, 0, plane); + } +} + +void vp9_build_interintra_predictors_sby(MACROBLOCKD *xd, + uint8_t *ypred, + int ystride, + BLOCK_SIZE bsize) { + int bw = 4 << b_width_log2_lookup[bsize]; + int bh = 4 << b_height_log2_lookup[bsize]; + uint8_t intrapredictor[4096]; + build_intra_predictors_for_interintra( + xd, xd->plane[0].dst.buf, xd->plane[0].dst.stride, + intrapredictor, bw, + xd->mi[0].src_mi->mbmi.interintra_mode, bw, bh, + xd->up_available, xd->left_available, 0, 0); + combine_interintra(xd->mi[0].src_mi->mbmi.interintra_mode, + xd->plane[0].dst.buf, xd->plane[0].dst.stride, + ypred, ystride, intrapredictor, bw, bw, bh); +} + +void vp9_build_interintra_predictors_sbuv(MACROBLOCKD *xd, + uint8_t *upred, + uint8_t *vpred, + int ustride, int vstride, + BLOCK_SIZE bsize) { + int bwl = b_width_log2_lookup[bsize], bw = 2 << bwl; + int bhl = b_height_log2_lookup[bsize], bh = 2 << bhl; + uint8_t uintrapredictor[4096]; + uint8_t vintrapredictor[4096]; + build_intra_predictors_for_interintra( + xd, xd->plane[1].dst.buf, xd->plane[1].dst.stride, + uintrapredictor, bw, + xd->mi[0].src_mi->mbmi.interintra_uv_mode, bw, bh, + xd->up_available, xd->left_available, 0, 1); + build_intra_predictors_for_interintra( + xd, xd->plane[2].dst.buf, xd->plane[1].dst.stride, + vintrapredictor, bw, + xd->mi[0].src_mi->mbmi.interintra_uv_mode, bw, bh, + xd->up_available, xd->left_available, 0, 2); + combine_interintra(xd->mi[0].src_mi->mbmi.interintra_uv_mode, + xd->plane[1].dst.buf, xd->plane[1].dst.stride, + upred, ustride, uintrapredictor, bw, bw, bh); + combine_interintra(xd->mi[0].src_mi->mbmi.interintra_uv_mode, + xd->plane[2].dst.buf, xd->plane[2].dst.stride, + vpred, vstride, vintrapredictor, bw, bw, bh); +} + +void vp9_build_interintra_predictors(MACROBLOCKD *xd, + uint8_t *ypred, + uint8_t *upred, + uint8_t *vpred, + int ystride, int ustride, int vstride, + BLOCK_SIZE bsize) { + vp9_build_interintra_predictors_sby(xd, ypred, ystride, bsize); + vp9_build_interintra_predictors_sbuv(xd, upred, vpred, + ustride, vstride, bsize); +} +#endif // CONFIG_INTERINTRA diff --git a/vp9/common/vp9_reconintra.h b/vp9/common/vp9_reconintra.h index 9f7df2263..f81dba16c 100644 --- a/vp9/common/vp9_reconintra.h +++ b/vp9/common/vp9_reconintra.h @@ -28,6 +28,26 @@ void vp9_predict_intra_block(const MACROBLOCKD *xd, int block_idx, int bwl_in, const uint8_t *ref, int ref_stride, uint8_t *dst, int dst_stride, int aoff, int loff, int plane); + +#if CONFIG_INTERINTRA +void vp9_build_interintra_predictors(MACROBLOCKD *xd, + uint8_t *ypred, + uint8_t *upred, + uint8_t *vpred, + int ystride, + int ustride, + int vstride, + BLOCK_SIZE bsize); +void vp9_build_interintra_predictors_sby(MACROBLOCKD *xd, + uint8_t *ypred, + int ystride, + BLOCK_SIZE bsize); +void vp9_build_interintra_predictors_sbuv(MACROBLOCKD *xd, + uint8_t *upred, + uint8_t *vpred, + int ustride, int vstride, + BLOCK_SIZE bsize); +#endif // CONFIG_INTERINTRA #ifdef __cplusplus } // extern "C" #endif diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c index dca27e80f..57e4999d9 100644 --- a/vp9/decoder/vp9_decodeframe.c +++ b/vp9/decoder/vp9_decodeframe.c @@ -2128,6 +2128,15 @@ static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data, vp9_diff_update_prob(&r, &fc->copy_mode_probs[j][i]); } #endif +#if CONFIG_INTERINTRA + if (cm->reference_mode != COMPOUND_REFERENCE) { + for (i = 0; i < BLOCK_SIZES; i++) { + if (is_interintra_allowed(i)) { + vp9_diff_update_prob(&r, &fc->interintra_prob[i]); + } + } + } +#endif // CONFIG_INTERINTRA } return vp9_reader_has_error(&r); diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c index d5502a982..7ad33e067 100644 --- a/vp9/decoder/vp9_decodemv.c +++ b/vp9/decoder/vp9_decodemv.c @@ -681,6 +681,21 @@ static void read_inter_block_mode_info(VP9_COMMON *const cm, ? read_switchable_interp_filter(cm, xd, r) : cm->interp_filter; +#if CONFIG_INTERINTRA + if (is_interintra_allowed(bsize) && + is_inter_mode(mbmi->mode) && + (mbmi->ref_frame[1] <= INTRA_FRAME)) { + mbmi->ref_frame[1] = vp9_read(r, cm->fc.interintra_prob[bsize]) ? + INTRA_FRAME : NONE; + cm->counts.interintra[bsize][mbmi->ref_frame[1] == INTRA_FRAME]++; + if (mbmi->ref_frame[1] == INTRA_FRAME) { + mbmi->interintra_mode = + read_intra_mode_y(cm, r, size_group_lookup[bsize]); + mbmi->interintra_uv_mode = mbmi->interintra_mode; + } + } +#endif // CONFIG_INTERINTRA + if (bsize < BLOCK_8X8) { const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 @@ -775,6 +790,10 @@ static void read_inter_frame_mode_info(VP9_COMMON *const cm, inter_block = 1; *mbmi = *inter_ref_list[mbmi->copy_mode - REF0]; +#if CONFIG_INTERINTRA + if (mbmi->ref_frame[1] == INTRA_FRAME) + mbmi->ref_frame[1] = NONE; +#endif // CONFIG_INTERINTRA #if CONFIG_SUPERTX mbmi->tx_size = tx_size_backup; #endif diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c index 44cd9416e..3a463e429 100644 --- a/vp9/encoder/vp9_bitstream.c +++ b/vp9/encoder/vp9_bitstream.c @@ -524,6 +524,19 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, } else { assert(mbmi->interp_filter == cm->interp_filter); } +#if CONFIG_INTERINTRA + if (cpi->common.reference_mode != COMPOUND_REFERENCE && + is_interintra_allowed(bsize) && + is_inter_mode(mode) && + (mbmi->ref_frame[1] <= INTRA_FRAME)) { + vp9_write(w, mbmi->ref_frame[1] == INTRA_FRAME, + cm->fc.interintra_prob[bsize]); + if (mbmi->ref_frame[1] == INTRA_FRAME) { + write_intra_mode(w, mbmi->interintra_mode, + cm->fc.y_mode_prob[size_group_lookup[bsize]]); + } + } +#endif // CONFIG_INTERINTRA if (bsize < BLOCK_8X8) { const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; @@ -1621,6 +1634,17 @@ static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { &header_bc); } #endif +#if CONFIG_INTERINTRA + if (cm->reference_mode != COMPOUND_REFERENCE) { + for (i = 0; i < BLOCK_SIZES; i++) { + if (is_interintra_allowed(i)) { + vp9_cond_prob_diff_update(&header_bc, + &fc->interintra_prob[i], + cm->counts.interintra[i]); + } + } + } +#endif // CONFIG_INTERINTRA } vp9_stop_encode(&header_bc); diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index 2afee3c0f..04d5090bf 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -588,6 +588,9 @@ static void choose_partitioning(VP9_COMP *cpi, vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf); xd->mi[0].src_mi->mbmi.ref_frame[0] = LAST_FRAME; +#if CONFIG_INTERINTRA + xd->mi[0].src_mi->mbmi.ref_frame[1] = NONE; +#endif // CONFIG_INTERINTRA xd->mi[0].src_mi->mbmi.sb_type = BLOCK_64X64; vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, xd->mi[0].src_mi->mbmi.ref_mvs[LAST_FRAME], @@ -857,6 +860,18 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, const int ctx = vp9_get_pred_context_switchable_interp(xd); ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; } +#if CONFIG_INTERINTRA + if (is_interintra_allowed(bsize) && + is_inter_mode(mbmi->mode) && + (mbmi->ref_frame[1] <= INTRA_FRAME)) { + if (mbmi->ref_frame[1] == INTRA_FRAME) { + ++cm->counts.y_mode[size_group_lookup[bsize]][mbmi->interintra_mode]; + ++cm->counts.interintra[bsize][1]; + } else { + ++cm->counts.interintra[bsize][0]; + } + } +#endif } rd_opt->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; @@ -956,6 +971,19 @@ static void update_state_supertx(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, const int ctx = vp9_get_pred_context_switchable_interp(xd); ++cm->counts.switchable_interp[ctx][mbmi->interp_filter]; } +#if CONFIG_INTERINTRA + if (is_interintra_allowed(bsize) && + is_inter_mode(mbmi->mode) && + (mbmi->ref_frame[1] <= INTRA_FRAME)) { + if (mbmi->ref_frame[1] == INTRA_FRAME) { + assert(0); + ++cm->counts.y_mode[size_group_lookup[bsize]][mbmi->interintra_mode]; + ++cm->counts.interintra[bsize][1]; + } else { + ++cm->counts.interintra[bsize][0]; + } + } +#endif // CONFIG_INTERINTRA } rd_opt->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; @@ -4797,7 +4825,12 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, #if CONFIG_SUPERTX static int check_intra_b(PICK_MODE_CONTEXT *ctx) { +#if CONFIG_INTERINTRA + return !is_inter_mode((&ctx->mic)->mbmi.mode) || + (ctx->mic.mbmi.ref_frame[1] == INTRA_FRAME); +#else return !is_inter_mode((&ctx->mic)->mbmi.mode); +#endif // CONFIG_INTERINTRA } static int check_intra_sb(VP9_COMP *cpi, const TileInfo *const tile, diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c index 240ba2768..1ba19244b 100644 --- a/vp9/encoder/vp9_mbgraph.c +++ b/vp9/encoder/vp9_mbgraph.c @@ -65,6 +65,9 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, xd->mi[0].src_mi->mbmi.mode = NEWMV; xd->mi[0].src_mi->mbmi.mv[0].as_mv = *dst_mv; +#if CONFIG_INTERINTRA + xd->mi[0].src_mi->mbmi.ref_frame[1] = NONE; +#endif vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); diff --git a/vp9/encoder/vp9_rd.c b/vp9/encoder/vp9_rd.c index 1a43d134e..6054ccf41 100644 --- a/vp9/encoder/vp9_rd.c +++ b/vp9/encoder/vp9_rd.c @@ -609,6 +609,24 @@ void vp9_set_rd_speed_thresholds(VP9_COMP *cpi) { rd->thresh_mult[THR_D153_PRED] += 2500; rd->thresh_mult[THR_D207_PRED] += 2500; rd->thresh_mult[THR_D63_PRED] += 2500; + +#if CONFIG_INTERINTRA + rd->thresh_mult[THR_COMP_INTERINTRA_ZEROL ] += 1500; + rd->thresh_mult[THR_COMP_INTERINTRA_ZEROG ] += 1500; + rd->thresh_mult[THR_COMP_INTERINTRA_ZEROA ] += 1500; + + rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTL] += 1500; + rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTG] += 1500; + rd->thresh_mult[THR_COMP_INTERINTRA_NEARESTA] += 1500; + + rd->thresh_mult[THR_COMP_INTERINTRA_NEARL ] += 1500; + rd->thresh_mult[THR_COMP_INTERINTRA_NEARG ] += 1500; + rd->thresh_mult[THR_COMP_INTERINTRA_NEARA ] += 1500; + + rd->thresh_mult[THR_COMP_INTERINTRA_NEWL ] += 2000; + rd->thresh_mult[THR_COMP_INTERINTRA_NEWG ] += 2000; + rd->thresh_mult[THR_COMP_INTERINTRA_NEWA ] += 2000; +#endif } void vp9_set_rd_speed_thresholds_sub8x8(VP9_COMP *cpi) { diff --git a/vp9/encoder/vp9_rd.h b/vp9/encoder/vp9_rd.h index 1aa52663a..0f3fab3c5 100644 --- a/vp9/encoder/vp9_rd.h +++ b/vp9/encoder/vp9_rd.h @@ -33,7 +33,12 @@ extern "C" { #define INVALID_MV 0x80008000 +#if CONFIG_INTERINTRA +#define MAX_MODES 42 +#define INTERINTRA_START_MODE 30 +#else #define MAX_MODES 30 +#endif // CONFIG_INTERINTRA #define MAX_REFS 6 // This enumerator type needs to be kept aligned with the mode order in @@ -78,6 +83,23 @@ typedef enum { THR_D63_PRED, THR_D117_PRED, THR_D45_PRED, + +#if CONFIG_INTERINTRA + THR_COMP_INTERINTRA_ZEROL, + THR_COMP_INTERINTRA_NEARESTL, + THR_COMP_INTERINTRA_NEARL, + THR_COMP_INTERINTRA_NEWL, + + THR_COMP_INTERINTRA_ZEROG, + THR_COMP_INTERINTRA_NEARESTG, + THR_COMP_INTERINTRA_NEARG, + THR_COMP_INTERINTRA_NEWG, + + THR_COMP_INTERINTRA_ZEROA, + THR_COMP_INTERINTRA_NEARESTA, + THR_COMP_INTERINTRA_NEARA, + THR_COMP_INTERINTRA_NEWA, +#endif // CONFIG_INTERINTRA } THR_MODES; typedef enum { diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index bff3dc165..0672a587b 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -122,6 +122,23 @@ static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = { {D63_PRED, {INTRA_FRAME, NONE}}, {D117_PRED, {INTRA_FRAME, NONE}}, {D45_PRED, {INTRA_FRAME, NONE}}, + +#if CONFIG_INTERINTRA + {ZEROMV, {LAST_FRAME, INTRA_FRAME}}, + {NEARESTMV, {LAST_FRAME, INTRA_FRAME}}, + {NEARMV, {LAST_FRAME, INTRA_FRAME}}, + {NEWMV, {LAST_FRAME, INTRA_FRAME}}, + + {ZEROMV, {GOLDEN_FRAME, INTRA_FRAME}}, + {NEARESTMV, {GOLDEN_FRAME, INTRA_FRAME}}, + {NEARMV, {GOLDEN_FRAME, INTRA_FRAME}}, + {NEWMV, {GOLDEN_FRAME, INTRA_FRAME}}, + + {ZEROMV, {ALTREF_FRAME, INTRA_FRAME}}, + {NEARESTMV, {ALTREF_FRAME, INTRA_FRAME}}, + {NEARMV, {ALTREF_FRAME, INTRA_FRAME}}, + {NEWMV, {ALTREF_FRAME, INTRA_FRAME}}, +#endif // CONFIG_INTERINTRA }; static const REF_DEFINITION vp9_ref_order[MAX_REFS] = { @@ -1168,7 +1185,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb, bmode_costs = cpi->y_mode_costs[A][L]; } - this_rd = rd_pick_intra4x4block(cpi, mb, i, &best_mode, + this_rd = rd_pick_intra4x4block(cpi, mb, i, &best_mode, #if CONFIG_FILTERINTRA &best_fbit, #endif @@ -1881,6 +1898,9 @@ static int check_best_zero_mv( if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) && frame_mv[this_mode][ref_frames[0]].as_int == 0 && (ref_frames[1] == NONE || +#if CONFIG_INTERINTRA + ref_frames[1] == INTRA_FRAME || +#endif frame_mv[this_mode][ref_frames[1]].as_int == 0)) { int rfc = mode_context[ref_frames[0]]; int c1 = cost_mv_ref(cpi, NEARMV, rfc); @@ -1893,7 +1913,11 @@ static int check_best_zero_mv( if (c2 > c3) return 0; } else { assert(this_mode == ZEROMV); - if (ref_frames[1] == NONE) { + if (ref_frames[1] == NONE +#if CONFIG_INTERINTRA + || ref_frames[1] == INTRA_FRAME +#endif + ) { if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) || (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0)) return 0; @@ -2741,6 +2765,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int_mv single_newmv[MAX_REF_FRAMES], INTERP_FILTER (*single_filter)[MAX_REF_FRAMES], int (*single_skippable)[MAX_REF_FRAMES], +#if CONFIG_INTERINTRA + int *compmode_interintra_cost, + int single_newmv_rate[MAX_REF_FRAMES], +#endif int64_t *psse, const int64_t ref_best_rd) { VP9_COMMON *cm = &cpi->common; @@ -2768,6 +2796,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, uint8_t *orig_dst[MAX_MB_PLANE]; int orig_dst_stride[MAX_MB_PLANE]; int rs = 0; +#if CONFIG_INTERINTRA + int rate_mv_tmp = 0; + const int is_comp_interintra_pred = (mbmi->ref_frame[1] == INTRA_FRAME); +#endif INTERP_FILTER best_filter = SWITCHABLE; uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0}; int64_t bsse[MAX_MB_PLANE << 2] = {0}; @@ -2830,9 +2862,26 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, &mbmi->ref_mvs[refs[1]][0].as_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); } +#if !CONFIG_INTERINTRA *rate2 += rate_mv; +#endif } else { int_mv tmp_mv; +#if CONFIG_INTERINTRA + if (!is_comp_interintra_pred) { + single_motion_search(cpi, x, bsize, mi_row, mi_col, + &tmp_mv, &rate_mv); + if (tmp_mv.as_int == INVALID_MV) + return INT64_MAX; + frame_mv[refs[0]].as_int = + xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = tmp_mv.as_int; + single_newmv[refs[0]].as_int = tmp_mv.as_int; + single_newmv_rate[refs[0]] = rate_mv; + } else { + frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int; + rate_mv = single_newmv_rate[refs[0]]; + } +#else single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv); if (tmp_mv.as_int == INVALID_MV) @@ -2841,7 +2890,11 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, frame_mv[refs[0]].as_int = xd->mi[0].src_mi->bmi[0].as_mv[0].as_int = tmp_mv.as_int; single_newmv[refs[0]].as_int = tmp_mv.as_int; +#endif // CONFIG_INTERINTRA } +#if CONFIG_INTERINTRA + rate_mv_tmp = rate_mv; +#endif } for (i = 0; i < is_comp_pred + 1; ++i) { @@ -2925,6 +2978,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) || +#if CONFIG_INTERINTRA + (is_inter_mode(this_mode) && is_comp_interintra_pred && + is_interintra_allowed(mbmi->sb_type)) || +#endif (cm->interp_filter != SWITCHABLE && (cm->interp_filter == mbmi->interp_filter || (i == 0 && intpel_mv)))) { @@ -2988,6 +3045,61 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, cm->interp_filter : best_filter; rs = cm->interp_filter == SWITCHABLE ? vp9_get_switchable_rate(cpi) : 0; +#if CONFIG_INTERINTRA + if ((!is_comp_pred) && is_comp_interintra_pred && + is_interintra_allowed(mbmi->sb_type)) { + PREDICTION_MODE interintra_mode, best_interintra_mode = DC_PRED; + int64_t best_interintra_rd = INT64_MAX; + int rmode, rate_sum; + int64_t dist_sum; + int j; + mbmi->ref_frame[1] = NONE; + for (j = 0; j < MAX_MB_PLANE; j++) { + xd->plane[j].dst.buf = tmp_buf + j * 64 * 64; + xd->plane[j].dst.stride = 64; + } + vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); + restore_dst_buf(xd, orig_dst, orig_dst_stride); + mbmi->ref_frame[1] = INTRA_FRAME; + + for (interintra_mode = DC_PRED; interintra_mode <= TM_PRED; + ++interintra_mode) { + mbmi->interintra_mode = interintra_mode; + mbmi->interintra_uv_mode = interintra_mode; + rmode = cpi->mbmode_cost[mbmi->interintra_mode]; + vp9_build_interintra_predictors(xd, tmp_buf, tmp_buf + 64 * 64, + tmp_buf + 2* 64 * 64, 64, 64, 64, bsize); + model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, + &skip_txfm_sb, &skip_sse_sb); + rd = RDCOST(x->rdmult, x->rddiv, rmode + rate_sum, dist_sum); + if (rd < best_interintra_rd) { + best_interintra_rd = rd; + best_interintra_mode = interintra_mode; + } + } + mbmi->interintra_mode = best_interintra_mode; + mbmi->interintra_uv_mode = best_interintra_mode; + if (ref_best_rd < INT64_MAX && + best_interintra_rd / 2 > ref_best_rd) { + return INT64_MAX; + } + + pred_exists = 0; + tmp_rd = best_interintra_rd; + } + if (!is_comp_pred && is_interintra_allowed(mbmi->sb_type)) { + *compmode_interintra_cost = vp9_cost_bit(cm->fc.interintra_prob[bsize], + is_comp_interintra_pred); + if (is_comp_interintra_pred) { + *compmode_interintra_cost += cpi->mbmode_cost[mbmi->interintra_mode]; + } + } +#endif // CONFIG_INTERINTRA + +#if CONFIG_INTERINTRA + *rate2 += rate_mv_tmp; +#endif + if (pred_exists) { if (best_needs_copy) { // again temporarily set the buffers to local memory to prevent a memcpy @@ -3310,6 +3422,9 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, #if CONFIG_FILTERINTRA int fbit_uv[TX_SIZES]; #endif +#if CONFIG_INTERINTRA + int single_newmv_rate[MAX_REF_FRAMES] = { 0 }; +#endif // CONFIG_INTERINTRA const int intra_cost_penalty = vp9_get_intra_cost_penalty( cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth); int best_skip2 = 0; @@ -3471,6 +3586,9 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int64_t this_rd = INT64_MAX; int disable_skip = 0; int compmode_cost = 0; +#if CONFIG_INTERINTRA + int compmode_interintra_cost = 0; +#endif int rate2 = 0, rate_y = 0, rate_uv = 0; int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0; int skippable = 0; @@ -3626,6 +3744,11 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, this_mode, ref_frames)) continue; } +#if CONFIG_INTERINTRA + if (ref_frame > INTRA_FRAME && second_ref_frame == INTRA_FRAME && + !is_interintra_allowed(bsize)) + continue; +#endif mbmi->mode = this_mode; mbmi->uv_mode = DC_PRED; @@ -3656,6 +3779,10 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, for (i = 0; i < TX_MODES; ++i) tx_cache[i] = INT64_MAX; +#if CONFIG_INTERINTRA + mbmi->interintra_mode = (PREDICTION_MODE)(DC_PRED - 1); + mbmi->interintra_uv_mode = (PREDICTION_MODE)(DC_PRED - 1); +#endif if (ref_frame == INTRA_FRAME) { TX_SIZE uv_tx; @@ -3773,6 +3900,12 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, rate2 += intra_cost_penalty; distortion2 = distortion_y + distortion_uv; } else { +#if CONFIG_INTERINTRA + if (second_ref_frame == INTRA_FRAME) { + mbmi->interintra_mode = best_intra_mode; + mbmi->interintra_uv_mode = best_intra_mode; + } +#endif #if CONFIG_EXT_TX mbmi->ext_txfrm = NORM; #endif @@ -3782,8 +3915,14 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, &rate_y, &rate_uv, &disable_skip, frame_mv, mi_row, mi_col, - single_newmv, single_inter_filter, - single_skippable, &total_sse, best_rd); + single_newmv, + single_inter_filter, + single_skippable, +#if CONFIG_INTERINTRA + &compmode_interintra_cost, + single_newmv_rate, +#endif + &total_sse, best_rd); if (this_rd == INT64_MAX) continue; @@ -3793,6 +3932,10 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, rate2 += compmode_cost; } +#if CONFIG_INTERINTRA + rate2 += compmode_interintra_cost; +#endif // CONFIG_INTERINTRA + // Estimate the reference frame signaling cost and add it // to the rolling cost variable. if (comp_pred) { @@ -4113,6 +4256,10 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, #endif // CONFIG_EXT_TX *mbmi = *inter_ref_list[copy_mode - REF0]; +#if CONFIG_INTERINTRA + if (mbmi->ref_frame[1] == INTRA_FRAME) + mbmi->ref_frame[1] = NONE; +#endif mbmi->sb_type = bsize; mbmi->inter_ref_count = inter_ref_count; mbmi->copy_mode = copy_mode;