diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index d26048cdf..d293cf2cf 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -81,12 +81,6 @@ typedef struct { // TODO(slavarnway): Delete and use bmi[3].as_mv[] instead. int_mv mv[2]; - -#if CONFIG_VP9_ENCODER - // TODO(slavarnway): Move to encoder - int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; - uint8_t mode_context[MAX_REF_FRAMES]; -#endif } MB_MODE_INFO; typedef struct MODE_INFO { diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c index 1ebdd066b..4ca4083a6 100644 --- a/vp9/encoder/vp9_bitstream.c +++ b/vp9/encoder/vp9_bitstream.c @@ -242,6 +242,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, const MACROBLOCKD *const xd = &x->e_mbd; const struct segmentation *const seg = &cm->seg; const MB_MODE_INFO *const mbmi = &mi->mbmi; + const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; const PREDICTION_MODE mode = mbmi->mode; const int segment_id = mbmi->segment_id; const BLOCK_SIZE bsize = mbmi->sb_type; @@ -288,7 +289,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, } write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]); } else { - const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; + const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]]; const vp9_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx]; write_ref_frames(cm, xd, w); @@ -321,7 +322,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, if (b_mode == NEWMV) { for (ref = 0; ref < 1 + is_compound; ++ref) vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv, - &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, + &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, allow_hp); } } @@ -330,7 +331,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, if (mode == NEWMV) { for (ref = 0; ref < 1 + is_compound; ++ref) vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, - &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, + &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, allow_hp); } } @@ -384,6 +385,9 @@ static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col); m = xd->mi[0]; + cpi->td.mb.mbmi_ext = cpi->td.mb.mbmi_ext_base + + (mi_row * cm->mi_cols + mi_col); + set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], diff --git a/vp9/encoder/vp9_block.h b/vp9/encoder/vp9_block.h index 04a1b8f3c..ff447b764 100644 --- a/vp9/encoder/vp9_block.h +++ b/vp9/encoder/vp9_block.h @@ -47,11 +47,18 @@ struct macroblock_plane { typedef unsigned int vp9_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2] [COEFF_CONTEXTS][ENTROPY_TOKENS]; +typedef struct { + int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; + uint8_t mode_context[MAX_REF_FRAMES]; +} MB_MODE_INFO_EXT; + typedef struct macroblock MACROBLOCK; struct macroblock { struct macroblock_plane plane[MAX_MB_PLANE]; MACROBLOCKD e_mbd; + MB_MODE_INFO_EXT *mbmi_ext; + MB_MODE_INFO_EXT *mbmi_ext_base; int skip_block; int select_tx_size; int skip_recode; diff --git a/vp9/encoder/vp9_context_tree.h b/vp9/encoder/vp9_context_tree.h index 70bf032c3..4b464ce61 100644 --- a/vp9/encoder/vp9_context_tree.h +++ b/vp9/encoder/vp9_context_tree.h @@ -12,6 +12,7 @@ #define VP9_ENCODER_VP9_CONTEXT_TREE_H_ #include "vp9/common/vp9_blockd.h" +#include "vp9/encoder/vp9_block.h" struct VP9_COMP; struct VP9Common; @@ -20,6 +21,7 @@ struct ThreadData; // Structure to hold snapshot of coding context during the mode picking process typedef struct { MODE_INFO mic; + MB_MODE_INFO_EXT mbmi_ext; uint8_t *zcoeff_blk; tran_low_t *coeff[MAX_MB_PLANE][3]; tran_low_t *qcoeff[MAX_MB_PLANE][3]; diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index cd8c4e17d..cb99af781 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -170,12 +170,14 @@ static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x, // Lighter version of set_offsets that only sets the mode info // pointers. static INLINE void set_mode_info_offsets(VP9_COMMON *const cm, + MACROBLOCK *const x, MACROBLOCKD *const xd, int mi_row, int mi_col) { const int idx_str = xd->mi_stride * mi_row + mi_col; xd->mi = cm->mi_grid_visible + idx_str; xd->mi[0] = cm->mi + idx_str; + x->mbmi_ext = x->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col); } static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, @@ -190,7 +192,8 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, set_skip_context(xd, mi_row, mi_col); - set_mode_info_offsets(cm, xd, mi_row, mi_col); + set_mode_info_offsets(cm, x, xd, mi_row, mi_col); + mbmi = &xd->mi[0]->mbmi; @@ -249,11 +252,12 @@ static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd, } static void set_block_size(VP9_COMP * const cpi, + MACROBLOCK *const x, MACROBLOCKD *const xd, int mi_row, int mi_col, BLOCK_SIZE bsize) { if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) { - set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col); + set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col); xd->mi[0]->mbmi.sb_type = bsize; } } @@ -384,6 +388,7 @@ static void fill_variance_tree(void *data, BLOCK_SIZE bsize) { } static int set_vt_partitioning(VP9_COMP *cpi, + MACROBLOCK *const x, MACROBLOCKD *const xd, void *data, BLOCK_SIZE bsize, @@ -414,7 +419,7 @@ static int set_vt_partitioning(VP9_COMP *cpi, if (mi_col + block_width / 2 < cm->mi_cols && mi_row + block_height / 2 < cm->mi_rows && vt.part_variances->none.variance < threshold) { - set_block_size(cpi, xd, mi_row, mi_col, bsize); + set_block_size(cpi, x, xd, mi_row, mi_col, bsize); return 1; } return 0; @@ -432,7 +437,7 @@ static int set_vt_partitioning(VP9_COMP *cpi, if (mi_col + block_width / 2 < cm->mi_cols && mi_row + block_height / 2 < cm->mi_rows && vt.part_variances->none.variance < threshold) { - set_block_size(cpi, xd, mi_row, mi_col, bsize); + set_block_size(cpi, x, xd, mi_row, mi_col, bsize); return 1; } @@ -444,8 +449,8 @@ static int set_vt_partitioning(VP9_COMP *cpi, if (vt.part_variances->vert[0].variance < threshold && vt.part_variances->vert[1].variance < threshold && get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) { - set_block_size(cpi, xd, mi_row, mi_col, subsize); - set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize); + set_block_size(cpi, x, xd, mi_row, mi_col, subsize); + set_block_size(cpi, x, xd, mi_row, mi_col + block_width / 2, subsize); return 1; } } @@ -457,8 +462,8 @@ static int set_vt_partitioning(VP9_COMP *cpi, if (vt.part_variances->horz[0].variance < threshold && vt.part_variances->horz[1].variance < threshold && get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) { - set_block_size(cpi, xd, mi_row, mi_col, subsize); - set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize); + set_block_size(cpi, x, xd, mi_row, mi_col, subsize); + set_block_size(cpi, x, xd, mi_row + block_height / 2, mi_col, subsize); return 1; } } @@ -770,7 +775,7 @@ static int choose_partitioning(VP9_COMP *cpi, const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64]; if (mi_col + block_width / 2 < cm->mi_cols && mi_row + block_height / 2 < cm->mi_rows) { - set_block_size(cpi, xd, mi_row, mi_col, BLOCK_64X64); + set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64); return 0; } } @@ -903,13 +908,13 @@ static int choose_partitioning(VP9_COMP *cpi, // Now go through the entire structure, splitting every block size until // we get to one that's got a variance lower than our threshold. if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || - !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col, + !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col, thresholds[0], BLOCK_16X16, force_split[0])) { for (i = 0; i < 4; ++i) { const int x32_idx = ((i & 1) << 2); const int y32_idx = ((i >> 1) << 2); const int i2 = i << 2; - if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32, + if (!set_vt_partitioning(cpi, x, xd, &vt.split[i], BLOCK_32X32, (mi_row + y32_idx), (mi_col + x32_idx), thresholds[1], BLOCK_16X16, force_split[i + 1])) { @@ -922,7 +927,7 @@ static int choose_partitioning(VP9_COMP *cpi, v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1) ? &vt2[i2 + j] : &vt.split[i].split[j]; - if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16, + if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16, mi_row + y32_idx + y16_idx, mi_col + x32_idx + x16_idx, thresholds[2], @@ -932,18 +937,18 @@ static int choose_partitioning(VP9_COMP *cpi, const int x8_idx = (k & 1); const int y8_idx = (k >> 1); if (use_4x4_partition) { - if (!set_vt_partitioning(cpi, xd, &vtemp->split[k], + if (!set_vt_partitioning(cpi, x, xd, &vtemp->split[k], BLOCK_8X8, mi_row + y32_idx + y16_idx + y8_idx, mi_col + x32_idx + x16_idx + x8_idx, thresholds[3], BLOCK_8X8, 0)) { - set_block_size(cpi, xd, + set_block_size(cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx), (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_4X4); } } else { - set_block_size(cpi, xd, + set_block_size(cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx), (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_8X8); @@ -988,6 +993,7 @@ static void update_state(VP9_COMP *cpi, ThreadData *td, assert(mi->mbmi.sb_type == bsize); *mi_addr = *mi; + *x->mbmi_ext = ctx->mbmi_ext; // If segmentation in use if (seg->enabled) { @@ -1289,6 +1295,7 @@ static void update_stats(VP9_COMMON *cm, ThreadData *td) { const MACROBLOCKD *const xd = &x->e_mbd; const MODE_INFO *const mi = xd->mi[0]; const MB_MODE_INFO *const mbmi = &mi->mbmi; + const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; const BLOCK_SIZE bsize = mbmi->sb_type; if (!frame_is_intra_only(cm)) { @@ -1321,7 +1328,7 @@ static void update_stats(VP9_COMMON *cm, ThreadData *td) { } if (inter_block && !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]]; + const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]]; if (bsize >= BLOCK_8X8) { const PREDICTION_MODE mode = mbmi->mode; ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)]; @@ -1682,6 +1689,7 @@ static void update_state_rt(VP9_COMP *cpi, ThreadData *td, const int y_mis = MIN(bh, cm->mi_rows - mi_row); *(xd->mi[0]) = ctx->mic; + *(x->mbmi_ext) = ctx->mbmi_ext; if (seg->enabled && cpi->oxcf.aq_mode) { // For in frame complexity AQ or variance AQ, copy segment_id from @@ -2960,28 +2968,33 @@ static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, switch (partition) { case PARTITION_NONE: - set_mode_info_offsets(cm, xd, mi_row, mi_col); + set_mode_info_offsets(cm, x, xd, mi_row, mi_col); *(xd->mi[0]) = pc_tree->none.mic; + *(x->mbmi_ext) = pc_tree->none.mbmi_ext; duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize); break; case PARTITION_VERT: - set_mode_info_offsets(cm, xd, mi_row, mi_col); + set_mode_info_offsets(cm, x, xd, mi_row, mi_col); *(xd->mi[0]) = pc_tree->vertical[0].mic; + *(x->mbmi_ext) = pc_tree->vertical[0].mbmi_ext; duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize); if (mi_col + hbs < cm->mi_cols) { - set_mode_info_offsets(cm, xd, mi_row, mi_col + hbs); + set_mode_info_offsets(cm, x, xd, mi_row, mi_col + hbs); *(xd->mi[0]) = pc_tree->vertical[1].mic; + *(x->mbmi_ext) = pc_tree->vertical[1].mbmi_ext; duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize); } break; case PARTITION_HORZ: - set_mode_info_offsets(cm, xd, mi_row, mi_col); + set_mode_info_offsets(cm, x, xd, mi_row, mi_col); *(xd->mi[0]) = pc_tree->horizontal[0].mic; + *(x->mbmi_ext) = pc_tree->horizontal[0].mbmi_ext; duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize); if (mi_row + hbs < cm->mi_rows) { - set_mode_info_offsets(cm, xd, mi_row + hbs, mi_col); + set_mode_info_offsets(cm, x, xd, mi_row + hbs, mi_col); *(xd->mi[0]) = pc_tree->horizontal[1].mic; + *(x->mbmi_ext) = pc_tree->horizontal[1].mbmi_ext; duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize); } break; @@ -3082,6 +3095,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx); ctx->mic.mbmi = xd->mi[0]->mbmi; + ctx->mbmi_ext = *x->mbmi_ext; ctx->skip_txfm[0] = x->skip_txfm[0]; ctx->skip = x->skip; @@ -3164,6 +3178,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, &pc_tree->horizontal[0]); pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext; pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0]; pc_tree->horizontal[0].skip = x->skip; @@ -3175,6 +3190,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, &pc_tree->horizontal[1]); pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext; pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0]; pc_tree->horizontal[1].skip = x->skip; @@ -3207,6 +3223,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, &pc_tree->vertical[0]); pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0]; pc_tree->vertical[0].skip = x->skip; @@ -3217,6 +3234,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, &this_rdc, subsize, &pc_tree->vertical[1]); pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0]; pc_tree->vertical[1].skip = x->skip; @@ -3308,6 +3326,7 @@ static void nonrd_select_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize, &pc_tree->none); pc_tree->none.mic.mbmi = xd->mi[0]->mbmi; + pc_tree->none.mbmi_ext = *x->mbmi_ext; pc_tree->none.skip_txfm[0] = x->skip_txfm[0]; pc_tree->none.skip = x->skip; break; @@ -3316,6 +3335,7 @@ static void nonrd_select_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize, &pc_tree->vertical[0]); pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0]; pc_tree->vertical[0].skip = x->skip; if (mi_col + hbs < cm->mi_cols) { @@ -3323,6 +3343,7 @@ static void nonrd_select_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, &this_rdc, subsize, &pc_tree->vertical[1]); pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0]; pc_tree->vertical[1].skip = x->skip; if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX && @@ -3337,6 +3358,7 @@ static void nonrd_select_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize, &pc_tree->horizontal[0]); pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext; pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0]; pc_tree->horizontal[0].skip = x->skip; if (mi_row + hbs < cm->mi_rows) { @@ -3344,6 +3366,7 @@ static void nonrd_select_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, &this_rdc, subsize, &pc_tree->horizontal[1]); pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext; pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0]; pc_tree->horizontal[1].skip = x->skip; if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX && @@ -3428,6 +3451,7 @@ static void nonrd_use_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost, subsize, &pc_tree->none); pc_tree->none.mic.mbmi = xd->mi[0]->mbmi; + pc_tree->none.mbmi_ext = *x->mbmi_ext; pc_tree->none.skip_txfm[0] = x->skip_txfm[0]; pc_tree->none.skip = x->skip; encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, @@ -3438,6 +3462,7 @@ static void nonrd_use_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost, subsize, &pc_tree->vertical[0]); pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0]; pc_tree->vertical[0].skip = x->skip; encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, @@ -3447,6 +3472,7 @@ static void nonrd_use_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, dummy_cost, subsize, &pc_tree->vertical[1]); pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0]; pc_tree->vertical[1].skip = x->skip; encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs, @@ -3458,6 +3484,7 @@ static void nonrd_use_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost, subsize, &pc_tree->horizontal[0]); pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext; pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0]; pc_tree->horizontal[0].skip = x->skip; encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, @@ -3468,6 +3495,7 @@ static void nonrd_use_partition(VP9_COMP *cpi, nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, dummy_cost, subsize, &pc_tree->horizontal[1]); pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi; + pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext; pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0]; pc_tree->horizontal[1].skip = x->skip; encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col, diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c index 22759983f..10180f251 100644 --- a/vp9/encoder/vp9_encodemv.c +++ b/vp9/encoder/vp9_encodemv.c @@ -229,12 +229,13 @@ void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2], build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp); } -static void inc_mvs(const MB_MODE_INFO *mbmi, const int_mv mvs[2], +static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext, + const int_mv mvs[2], nmv_context_counts *counts) { int i; for (i = 0; i < 1 + has_second_ref(mbmi); ++i) { - const MV *ref = &mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_mv; + const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv; const MV diff = {mvs[i].as_mv.row - ref->row, mvs[i].as_mv.col - ref->col}; vp9_inc_mv(&diff, counts); @@ -245,6 +246,7 @@ void vp9_update_mv_count(ThreadData *td) { const MACROBLOCKD *xd = &td->mb.e_mbd; const MODE_INFO *mi = xd->mi[0]; const MB_MODE_INFO *const mbmi = &mi->mbmi; + const MB_MODE_INFO_EXT *mbmi_ext = td->mb.mbmi_ext; if (mbmi->sb_type < BLOCK_8X8) { const int num_4x4_w = num_4x4_blocks_wide_lookup[mbmi->sb_type]; @@ -255,12 +257,12 @@ void vp9_update_mv_count(ThreadData *td) { for (idx = 0; idx < 2; idx += num_4x4_w) { const int i = idy * 2 + idx; if (mi->bmi[i].as_mode == NEWMV) - inc_mvs(mbmi, mi->bmi[i].as_mv, &td->counts->mv); + inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv, &td->counts->mv); } } } else { if (mbmi->mode == NEWMV) - inc_mvs(mbmi, mbmi->mv, &td->counts->mv); + inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv); } } diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c index d708b8319..45b5df4d1 100644 --- a/vp9/encoder/vp9_encoder.c +++ b/vp9/encoder/vp9_encoder.c @@ -335,6 +335,9 @@ static void dealloc_compressor_data(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; int i; + vpx_free(cpi->mbmi_ext_base); + cpi->mbmi_ext_base = NULL; + vpx_free(cpi->tile_data); cpi->tile_data = NULL; @@ -670,11 +673,25 @@ static void alloc_util_frame_buffers(VP9_COMP *cpi) { "Failed to allocate scaled last source buffer"); } + +static int alloc_context_buffers_ext(VP9_COMP *cpi) { + VP9_COMMON *cm = &cpi->common; + int mi_size = cm->mi_cols * cm->mi_rows; + + cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base)); + if (!cpi->mbmi_ext_base) + return 1; + + return 0; +} + void vp9_alloc_compressor_data(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; vp9_alloc_context_buffers(cm, cm->width, cm->height); + alloc_context_buffers_ext(cpi); + vpx_free(cpi->tile_tok[0][0]); { @@ -716,6 +733,9 @@ static void update_frame_size(VP9_COMP *cpi) { vp9_set_mb_mi(cm, cm->width, cm->height); vp9_init_context_buffers(cm); init_macroblockd(cm, xd); + cpi->td.mb.mbmi_ext_base = cpi->mbmi_ext_base; + memset(cpi->mbmi_ext_base, 0, + cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base)); set_tile_limits(cpi); diff --git a/vp9/encoder/vp9_encoder.h b/vp9/encoder/vp9_encoder.h index 2b0da103f..afe3ae9da 100644 --- a/vp9/encoder/vp9_encoder.h +++ b/vp9/encoder/vp9_encoder.h @@ -291,6 +291,7 @@ typedef struct IMAGE_STAT { typedef struct VP9_COMP { QUANTS quants; ThreadData td; + MB_MODE_INFO_EXT *mbmi_ext_base; DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]); DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]); VP9_COMMON common; diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c index a6271362d..0cba63884 100644 --- a/vp9/encoder/vp9_pickmode.c +++ b/vp9/encoder/vp9_pickmode.c @@ -39,7 +39,8 @@ typedef struct { int in_use; } PRED_BUFFER; -static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCKD *xd, +static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCK *x, + const MACROBLOCKD *xd, const TileInfo *const tile, MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame, int_mv *mv_ref_list, @@ -111,7 +112,7 @@ static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCKD *xd, Done: - mi->mbmi.mode_context[ref_frame] = counter_to_context[context_counter]; + x->mbmi_ext->mode_context[ref_frame] = counter_to_context[context_counter]; // Clamp vectors for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) @@ -131,7 +132,7 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x, const int sadpb = x->sadperbit16; MV mvp_full; const int ref = mbmi->ref_frame[0]; - const MV ref_mv = mbmi->ref_mvs[ref][0].as_mv; + const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv; int dis; int rate_mode; const int tmp_col_min = x->mv_col_min; @@ -155,7 +156,7 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x, assert(x->mv_best_ref_index[ref] <= 2); if (x->mv_best_ref_index[ref] < 2) - mvp_full = mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv; + mvp_full = x->mbmi_ext->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv; else mvp_full = x->pred_mv[ref]; @@ -178,7 +179,7 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x, *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); - rate_mode = cpi->inter_mode_cost[mbmi->mode_context[ref]] + rate_mode = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]] [INTER_OFFSET(NEWMV)]; rv = !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) > best_rd_sofar); @@ -776,7 +777,6 @@ static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, struct buf_2d yv12_mb[][MAX_MB_PLANE], int *rate, int64_t *dist) { MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]); unsigned int var = var_y, sse = sse_y; @@ -850,7 +850,7 @@ static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, x->skip = 1; // The cost of skip bit needs to be added. - *rate = cpi->inter_mode_cost[mbmi->mode_context[ref_frame]] + *rate = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] [INTER_OFFSET(this_mode)]; // More on this part of rate @@ -1172,7 +1172,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, frame_mv[ZEROMV][ref_frame].as_int = 0; if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) { - int_mv *const candidates = mbmi->ref_mvs[ref_frame]; + int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame]; const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, @@ -1181,9 +1181,9 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (cm->use_prev_frame_mvs) vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col, NULL, NULL, - xd->mi[0]->mbmi.mode_context); + x->mbmi_ext->mode_context); else - const_motion[ref_frame] = mv_refs_rt(cm, xd, tile_info, + const_motion[ref_frame] = mv_refs_rt(cm, x, xd, tile_info, xd->mi[0], ref_frame, candidates, mi_row, mi_col); @@ -1257,13 +1257,13 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, frame_mv[NEWMV][ref_frame].as_int = mbmi->mv[0].as_int; rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv, - &mbmi->ref_mvs[ref_frame][0].as_mv, + &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); frame_mv[NEWMV][ref_frame].as_mv.row >>= 3; frame_mv[NEWMV][ref_frame].as_mv.col >>= 3; cpi->find_fractional_mv_step(x, &frame_mv[NEWMV][ref_frame].as_mv, - &mbmi->ref_mvs[ref_frame][0].as_mv, + &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv, cpi->common.allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[bsize], @@ -1426,7 +1426,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, this_rdc.rate += rate_mv; this_rdc.rate += - cpi->inter_mode_cost[mbmi->mode_context[ref_frame]][INTER_OFFSET( + cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]][INTER_OFFSET( this_mode)]; this_rdc.rate += ref_frame_cost[ref_frame]; this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); @@ -1629,6 +1629,7 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, SPEED_FEATURES *const sf = &cpi->sf; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; + MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; const struct segmentation *const seg = &cm->seg; MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE; MV_REFERENCE_FRAME best_ref_frame = NONE; @@ -1652,14 +1653,14 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, x->pred_mv_sad[ref_frame] = INT_MAX; if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) { - int_mv *const candidates = mbmi->ref_mvs[ref_frame]; + int_mv *const candidates = mbmi_ext->ref_mvs[ref_frame]; const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf); vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col, NULL, NULL, - xd->mi[0]->mbmi.mode_context); + mbmi_ext->mode_context); vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates, &dummy_mv[0], &dummy_mv[1]); @@ -1734,7 +1735,7 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, vp9_append_sub8x8_mvs_for_idx(cm, xd, i, 0, mi_row, mi_col, &b_mv[NEARESTMV], &b_mv[NEARMV], - xd->mi[0]->mbmi.mode_context); + mbmi_ext->mode_context); for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) { int b_rate = 0; @@ -1759,12 +1760,12 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, mvp_full.col = xd->mi[0]->bmi[0].as_mv[0].as_mv.col >> 3; } - vp9_set_mv_search_range(x, &mbmi->ref_mvs[0]->as_mv); + vp9_set_mv_search_range(x, &mbmi_ext->ref_mvs[0]->as_mv); vp9_full_pixel_search( cpi, x, bsize, &mvp_full, step_param, x->sadperbit4, cond_cost_list(cpi, cost_list), - &mbmi->ref_mvs[ref_frame][0].as_mv, &tmp_mv, + &mbmi_ext->ref_mvs[ref_frame][0].as_mv, &tmp_mv, INT_MAX, 0); x->mv_col_min = tmp_col_min; @@ -1777,17 +1778,17 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, mvp_full.col = tmp_mv.col * 8; b_rate += vp9_mv_bit_cost(&mvp_full, - &mbmi->ref_mvs[ref_frame][0].as_mv, + &mbmi_ext->ref_mvs[ref_frame][0].as_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); - b_rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]] + b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] [INTER_OFFSET(NEWMV)]; if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd) continue; cpi->find_fractional_mv_step(x, &tmp_mv, - &mbmi->ref_mvs[ref_frame][0].as_mv, + &mbmi_ext->ref_mvs[ref_frame][0].as_mv, cpi->common.allow_high_precision_mv, x->errorperbit, &cpi->fn_ptr[bsize], @@ -1800,7 +1801,7 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv; } else { - b_rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]] + b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] [INTER_OFFSET(this_mode)]; } @@ -1880,6 +1881,7 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, } mbmi->mode = xd->mi[0]->bmi[3].as_mode; ctx->mic = *(xd->mi[0]); + ctx->mbmi_ext = *x->mbmi_ext; ctx->skip_txfm[0] = 0; ctx->skip = 0; // Dummy assignment for speed -5. No effect in speed -6. diff --git a/vp9/encoder/vp9_rd.c b/vp9/encoder/vp9_rd.c index 90ee1e44a..930561aad 100644 --- a/vp9/encoder/vp9_rd.c +++ b/vp9/encoder/vp9_rd.c @@ -452,8 +452,6 @@ void vp9_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size, void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer, int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) { - MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; int i; int zero_seen = 0; int best_index = 0; @@ -468,13 +466,14 @@ void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, block_size < x->max_partition_size); MV pred_mv[3]; - pred_mv[0] = mbmi->ref_mvs[ref_frame][0].as_mv; - pred_mv[1] = mbmi->ref_mvs[ref_frame][1].as_mv; + pred_mv[0] = x->mbmi_ext->ref_mvs[ref_frame][0].as_mv; + pred_mv[1] = x->mbmi_ext->ref_mvs[ref_frame][1].as_mv; pred_mv[2] = x->pred_mv[ref_frame]; assert(num_mv_refs <= (int)(sizeof(pred_mv) / sizeof(pred_mv[0]))); near_same_nearest = - mbmi->ref_mvs[ref_frame][0].as_int == mbmi->ref_mvs[ref_frame][1].as_int; + x->mbmi_ext->ref_mvs[ref_frame][0].as_int == + x->mbmi_ext->ref_mvs[ref_frame][1].as_int; // Get the sad for each candidate reference mv. for (i = 0; i < num_mv_refs; ++i) { const MV *this_mv = &pred_mv[i]; diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 3f9b2eb0c..e6b7f193a 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -1275,7 +1275,8 @@ static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode, return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)]; } -static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCKD *xd, int i, +static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, + int i, PREDICTION_MODE mode, int_mv this_mv[2], int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int_mv seg_mvs[MAX_REF_FRAMES], @@ -1283,6 +1284,7 @@ static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCKD *xd, int i, int *mvcost[2]) { MODE_INFO *const mic = xd->mi[0]; const MB_MODE_INFO *const mbmi = &mic->mbmi; + const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; int thismvcost = 0; int idx, idy; const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type]; @@ -1325,7 +1327,7 @@ static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCKD *xd, int i, for (idx = 0; idx < num_4x4_blocks_wide; ++idx) memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i])); - return cost_mv_ref(cpi, mode, mbmi->mode_context[mbmi->ref_frame[0]]) + + return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mbmi->ref_frame[0]]) + thismvcost; } @@ -1582,7 +1584,7 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, #endif // CONFIG_VP9_HIGHBITDEPTH for (ref = 0; ref < 2; ++ref) { - ref_mv[ref] = mbmi->ref_mvs[refs[ref]][0]; + ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0]; if (scaled_ref_frame[ref]) { int i; @@ -1726,7 +1728,7 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, } *rate_mv += vp9_mv_bit_cost(&frame_mv[refs[ref]].as_mv, - &mbmi->ref_mvs[refs[ref]][0].as_mv, + &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); } } @@ -1765,6 +1767,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, int subpelmv = 1, have_ref = 0; const int has_second_rf = has_second_ref(mbmi); const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize]; + MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; vp9_zero(*bsi); @@ -1804,7 +1807,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, vp9_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame], &frame_mv[NEARMV][frame], - xd->mi[0]->mbmi.mode_context); + mbmi_ext->mode_context); } // search for the best motion vector on this segment @@ -1817,7 +1820,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, if (!(inter_mode_mask & (1 << this_mode))) continue; - if (!check_best_zero_mv(cpi, mbmi->mode_context, frame_mv, + if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode, mbmi->ref_frame)) continue; @@ -1959,7 +1962,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, } bsi->rdstat[i][mode_idx].brate = - set_and_cost_bmi_mvs(cpi, xd, i, this_mode, mode_mv[this_mode], + set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost, x->mvcost); @@ -2057,7 +2060,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above)); memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left)); - set_and_cost_bmi_mvs(cpi, xd, i, mode_selected, mode_mv[mode_selected], + set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected], frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost, x->mvcost); @@ -2188,6 +2191,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, ctx->skippable = skippable; ctx->best_mode_index = mode_index; ctx->mic = *xd->mi[0]; + ctx->mbmi_ext = *x->mbmi_ext; ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE]; ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE]; ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT]; @@ -2208,8 +2212,9 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame); MACROBLOCKD *const xd = &x->e_mbd; MODE_INFO *const mi = xd->mi[0]; - int_mv *const candidates = mi->mbmi.ref_mvs[ref_frame]; + int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame]; const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; + MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; assert(yv12 != NULL); @@ -2219,7 +2224,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, // Gets an initial list of candidate vectors from neighbours and orders them vp9_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col, - NULL, NULL, xd->mi[0]->mbmi.mode_context); + NULL, NULL, mbmi_ext->mode_context); // Candidate refinement carried out at encoder and decoder vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates, @@ -2247,7 +2252,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int sadpb = x->sadperbit16; MV mvp_full; int ref = mbmi->ref_frame[0]; - MV ref_mv = mbmi->ref_mvs[ref][0].as_mv; + MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv; int tmp_col_min = x->mv_col_min; int tmp_col_max = x->mv_col_max; @@ -2259,8 +2264,8 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, ref); MV pred_mv[3]; - pred_mv[0] = mbmi->ref_mvs[ref][0].as_mv; - pred_mv[1] = mbmi->ref_mvs[ref][1].as_mv; + pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv; + pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv; pred_mv[2] = x->pred_mv[ref]; if (scaled_ref_frame) { @@ -2413,6 +2418,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, VP9_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; + MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; const int is_comp_pred = has_second_ref(mbmi); const int this_mode = mbmi->mode; int_mv *frame_mv = mode_mv[this_mode]; @@ -2489,10 +2495,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, mi_row, mi_col, single_newmv, &rate_mv); } else { rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv, - &mbmi->ref_mvs[refs[0]][0].as_mv, + &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv, - &mbmi->ref_mvs[refs[1]][0].as_mv, + &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); } *rate2 += rate_mv; @@ -2549,10 +2555,12 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, // initiation of a motion field. if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv, refs[0])) { - *rate2 += MIN(cost_mv_ref(cpi, this_mode, mbmi->mode_context[refs[0]]), - cost_mv_ref(cpi, NEARESTMV, mbmi->mode_context[refs[0]])); + *rate2 += MIN(cost_mv_ref(cpi, this_mode, + mbmi_ext->mode_context[refs[0]]), + cost_mv_ref(cpi, NEARESTMV, + mbmi_ext->mode_context[refs[0]])); } else { - *rate2 += cost_mv_ref(cpi, this_mode, mbmi->mode_context[refs[0]]); + *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]); } if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd && @@ -2827,6 +2835,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, } ctx->mic = *xd->mi[0]; + ctx->mbmi_ext = *x->mbmi_ext; rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist); } @@ -2902,6 +2911,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, SPEED_FEATURES *const sf = &cpi->sf; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; + MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; const struct segmentation *const seg = &cm->seg; PREDICTION_MODE this_mode; MV_REFERENCE_FRAME ref_frame, second_ref_frame; @@ -3235,7 +3245,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, } } else { const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame}; - if (!check_best_zero_mv(cpi, mbmi->mode_context, frame_mv, + if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode, ref_frames)) continue; } @@ -3935,7 +3945,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, int tmp_best_skippable = 0; int switchable_filter_index; int_mv *second_ref = comp_pred ? - &mbmi->ref_mvs[second_ref_frame][0] : NULL; + &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL; b_mode_info tmp_best_bmodes[16]; MB_MODE_INFO tmp_best_mbmode; BEST_SEG_INFO bsi[SWITCHABLE_FILTERS]; @@ -3966,9 +3976,10 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, ++switchable_filter_index) { int newbest, rs; int64_t rs_rd; + MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext; mbmi->interp_filter = switchable_filter_index; tmp_rd = rd_pick_best_sub8x8_mode(cpi, x, - &mbmi->ref_mvs[ref_frame][0], + &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd, &rate, &rate_y, &distortion, &skippable, &total_sse, @@ -4034,7 +4045,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, // Handles the special case when a filter that is not in the // switchable list (bilinear, 6-tap) is indicated at the frame level tmp_rd = rd_pick_best_sub8x8_mode(cpi, x, - &mbmi->ref_mvs[ref_frame][0], + &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd, &rate, &rate_y, &distortion, &skippable, &total_sse, (int) this_rd_thresh, seg_mvs, bsi, 0,