Merge "Removing redundant "8x8" suffix from MODE_INFO vars."

This commit is contained in:
Dmitry Kovalev 2014-05-15 17:53:31 -07:00 committed by Gerrit Code Review
commit 619e6b539a
4 changed files with 69 additions and 78 deletions

View File

@ -24,10 +24,9 @@ static void log_frame_info(VP9_COMMON *cm, const char *str, FILE *f) {
*/
static void print_mi_data(VP9_COMMON *cm, FILE *file, const char *descriptor,
size_t member_offset) {
int mi_row;
int mi_col;
int mi_row, mi_col;
int mi_index = 0;
MODE_INFO **mi_8x8 = cm->mi_grid_visible;
MODE_INFO **mi = cm->mi_grid_visible;
int rows = cm->mi_rows;
int cols = cm->mi_cols;
char prefix = descriptor[0];
@ -38,7 +37,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, const char *descriptor,
fprintf(file, "%c ", prefix);
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(file, "%2d ",
*((int*) ((char *) (&mi_8x8[mi_index]->mbmi) +
*((int*) ((char *) (&mi[mi_index]->mbmi) +
member_offset)));
mi_index++;
}
@ -52,7 +51,7 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
int mi_col;
int mi_index = 0;
FILE *mvs = fopen(file, "a");
MODE_INFO **mi_8x8 = cm->mi_grid_visible;
MODE_INFO **mi = cm->mi_grid_visible;
int rows = cm->mi_rows;
int cols = cm->mi_cols;
@ -67,8 +66,8 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "V ");
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(mvs, "%4d:%4d ", mi_8x8[mi_index]->mbmi.mv[0].as_mv.row,
mi_8x8[mi_index]->mbmi.mv[0].as_mv.col);
fprintf(mvs, "%4d:%4d ", mi[mi_index]->mbmi.mv[0].as_mv.row,
mi[mi_index]->mbmi.mv[0].as_mv.col);
mi_index++;
}
fprintf(mvs, "\n");

View File

@ -619,12 +619,12 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
// by mi_row, mi_col.
// TODO(JBB): This function only works for yv12.
void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
MODE_INFO **mi_8x8, const int mode_info_stride,
MODE_INFO **mi, const int mode_info_stride,
LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
MODE_INFO **mip = mi_8x8;
MODE_INFO **mip2 = mi_8x8;
MODE_INFO **mip = mi;
MODE_INFO **mip2 = mi;
// These are offsets to the next mi in the 64x64 block. It is what gets
// added to the mi ptr as we go through each loop. It helps us to avoids
@ -1195,13 +1195,13 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer,
VP9_COMMON *cm, MACROBLOCKD *xd,
int start, int stop, int y_only) {
const int num_planes = y_only ? 1 : MAX_MB_PLANE;
int mi_row, mi_col;
const int use_420 = y_only || (xd->plane[1].subsampling_y == 1 &&
xd->plane[1].subsampling_x == 1);
LOOP_FILTER_MASK lfm;
int use_420 = y_only || (xd->plane[1].subsampling_y == 1 &&
xd->plane[1].subsampling_x == 1);
int mi_row, mi_col;
for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) {
MODE_INFO **mi_8x8 = cm->mi_grid_visible + mi_row * cm->mi_stride;
MODE_INFO **mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
int plane;
@ -1210,14 +1210,14 @@ void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer,
// TODO(JBB): Make setup_mask work for non 420.
if (use_420)
vp9_setup_mask(cm, mi_row, mi_col, mi_8x8 + mi_col, cm->mi_stride,
vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
&lfm);
for (plane = 0; plane < num_planes; ++plane) {
if (use_420)
vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm);
else
filter_block_plane_non420(cm, &xd->plane[plane], mi_8x8 + mi_col,
filter_block_plane_non420(cm, &xd->plane[plane], mi + mi_col,
mi_row, mi_col);
}
}

View File

@ -1805,15 +1805,11 @@ static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
BLOCK_SIZE *max_block_size) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
MODE_INFO **mi_8x8 = xd->mi;
const int left_in_image = xd->left_available && mi_8x8[-1];
const int above_in_image = xd->up_available &&
mi_8x8[-xd->mi_stride];
MODE_INFO **above_sb64_mi_8x8;
MODE_INFO **left_sb64_mi_8x8;
int row8x8_remaining = tile->mi_row_end - mi_row;
int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO **mi = xd->mi;
const int left_in_image = xd->left_available && mi[-1];
const int above_in_image = xd->up_available && mi[-xd->mi_stride];
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
int bh, bw;
BLOCK_SIZE min_size = BLOCK_4X4;
BLOCK_SIZE max_size = BLOCK_64X64;
@ -1833,15 +1829,13 @@ static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
}
// Find the min and max partition sizes used in the left SB64
if (left_in_image) {
left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE];
get_sb_partition_size_range(cpi, left_sb64_mi_8x8,
&min_size, &max_size);
MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
get_sb_partition_size_range(cpi, left_sb64_mi, &min_size, &max_size);
}
// Find the min and max partition sizes used in the above SB64.
if (above_in_image) {
above_sb64_mi_8x8 = &mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE];
get_sb_partition_size_range(cpi, above_sb64_mi_8x8,
&min_size, &max_size);
MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
get_sb_partition_size_range(cpi, above_sb64_mi, &min_size, &max_size);
}
// adjust observed min and max
if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
@ -2296,25 +2290,25 @@ static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
sf->partition_search_type == VAR_BASED_PARTITION ||
sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
const int idx_str = cm->mi_stride * mi_row + mi_col;
MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
MODE_INFO **mi = cm->mi_grid_visible + idx_str;
MODE_INFO **prev_mi = cm->prev_mi_grid_visible + idx_str;
cpi->mb.source_variance = UINT_MAX;
if (sf->partition_search_type == FIXED_PARTITION) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col,
set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col,
sf->always_this_block_size);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, x->pc_root);
} else if (sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
BLOCK_SIZE bsize;
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, x->pc_root);
} else if (sf->partition_search_type == VAR_BASED_PARTITION) {
choose_partitioning(cpi, tile, mi_row, mi_col);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, x->pc_root);
} else {
if ((cm->current_video_frame
@ -2325,7 +2319,7 @@ static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
|| cpi->rc.is_src_frame_alt_ref
|| ((sf->use_lastframe_partitioning ==
LAST_FRAME_PARTITION_LOW_MOTION) &&
sb_has_motion(cm, prev_mi_8x8))) {
sb_has_motion(cm, prev_mi))) {
// If required set upper and lower partition size limits
if (sf->auto_min_max_partition_size) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
@ -2337,12 +2331,12 @@ static void encode_rd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
&dummy_rate, &dummy_dist, 1, INT64_MAX, x->pc_root);
} else {
if (sf->constrain_copy_partition &&
sb_has_motion(cm, prev_mi_8x8))
constrain_copy_partitioning(cpi, tile, mi_8x8, prev_mi_8x8,
sb_has_motion(cm, prev_mi))
constrain_copy_partitioning(cpi, tile, mi, prev_mi,
mi_row, mi_col, BLOCK_16X16);
else
copy_partitioning(cm, mi_8x8, prev_mi_8x8);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
copy_partitioning(cm, mi, prev_mi);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, x->pc_root);
}
}
@ -2822,7 +2816,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
static void nonrd_use_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
MODE_INFO **mi,
TOKENEXTRA **tp,
int mi_row, int mi_col,
BLOCK_SIZE bsize, int output_enabled,
@ -2841,7 +2835,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
subsize = (bsize >= BLOCK_8X8) ? mi_8x8[0]->mbmi.sb_type : BLOCK_4X4;
subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
switch (partition) {
@ -2869,7 +2863,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
if (mi_row + hbs < cm->mi_rows) {
nonrd_pick_sb_modes(cpi, tile, mi_row + hbs, mi_col,
&rate, &dist, subsize);
pc_tree->horizontal[1].mic.mbmi = mi_8x8[0]->mbmi;
pc_tree->horizontal[1].mic.mbmi = mi[0]->mbmi;
if (rate != INT_MAX && dist != INT64_MAX &&
*totrate != INT_MAX && *totdist != INT64_MAX) {
*totrate += rate;
@ -2879,10 +2873,10 @@ static void nonrd_use_partition(VP9_COMP *cpi,
break;
case PARTITION_SPLIT:
subsize = get_subsize(bsize, PARTITION_SPLIT);
nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col,
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
subsize, output_enabled, totrate, totdist,
pc_tree->split[0]);
nonrd_use_partition(cpi, tile, mi_8x8 + hbs, tp,
nonrd_use_partition(cpi, tile, mi + hbs, tp,
mi_row, mi_col + hbs, subsize, output_enabled,
&rate, &dist, pc_tree->split[1]);
if (rate != INT_MAX && dist != INT64_MAX &&
@ -2890,7 +2884,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
*totrate += rate;
*totdist += dist;
}
nonrd_use_partition(cpi, tile, mi_8x8 + hbs * mis, tp,
nonrd_use_partition(cpi, tile, mi + hbs * mis, tp,
mi_row + hbs, mi_col, subsize, output_enabled,
&rate, &dist, pc_tree->split[2]);
if (rate != INT_MAX && dist != INT64_MAX &&
@ -2898,7 +2892,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
*totrate += rate;
*totdist += dist;
}
nonrd_use_partition(cpi, tile, mi_8x8 + hbs * mis + hbs, tp,
nonrd_use_partition(cpi, tile, mi + hbs * mis + hbs, tp,
mi_row + hbs, mi_col + hbs, subsize, output_enabled,
&rate, &dist, pc_tree->split[3]);
if (rate != INT_MAX && dist != INT64_MAX &&
@ -2937,8 +2931,8 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
int dummy_rate = 0;
int64_t dummy_dist = 0;
const int idx_str = cm->mi_stride * mi_row + mi_col;
MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
MODE_INFO **mi = cm->mi_grid_visible + idx_str;
MODE_INFO **prev_mi = cm->prev_mi_grid_visible + idx_str;
BLOCK_SIZE bsize;
x->in_static_area = 0;
@ -2949,12 +2943,12 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
switch (cpi->sf.partition_search_type) {
case VAR_BASED_PARTITION:
choose_partitioning(cpi, tile, mi_row, mi_col);
nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
1, &dummy_rate, &dummy_dist, x->pc_root);
break;
case SOURCE_VAR_BASED_PARTITION:
set_source_var_based_partition(cpi, tile, mi_8x8, mi_row, mi_col);
nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
set_source_var_based_partition(cpi, tile, mi, mi_row, mi_col);
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
1, &dummy_rate, &dummy_dist, x->pc_root);
break;
case VAR_BASED_FIXED_PARTITION:
@ -2962,8 +2956,8 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
bsize = cpi->sf.partition_search_type == FIXED_PARTITION ?
cpi->sf.always_this_block_size :
get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
set_fixed_partitioning(cpi, tile, mi_8x8, mi_row, mi_col, bsize);
nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
1, &dummy_rate, &dummy_dist, x->pc_root);
break;
case REFERENCE_PARTITION:
@ -2977,8 +2971,8 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
&dummy_rate, &dummy_dist, 1, INT64_MAX,
x->pc_root);
} else {
copy_partitioning(cm, mi_8x8, prev_mi_8x8);
nonrd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col,
copy_partitioning(cm, mi, prev_mi);
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
BLOCK_64X64, 1, &dummy_rate, &dummy_dist,
x->pc_root);
}

View File

@ -109,7 +109,7 @@ static int cost_segmap(int *segcounts, vp9_prob *probs) {
}
static void count_segs(VP9_COMP *cpi, const TileInfo *const tile,
MODE_INFO **mi_8x8,
MODE_INFO **mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
@ -121,7 +121,7 @@ static void count_segs(VP9_COMP *cpi, const TileInfo *const tile,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
xd->mi = mi_8x8;
xd->mi = mi;
segment_id = xd->mi[0]->mbmi.segment_id;
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
@ -131,7 +131,7 @@ static void count_segs(VP9_COMP *cpi, const TileInfo *const tile,
// Temporal prediction not allowed on key frames
if (cm->frame_type != KEY_FRAME) {
const BLOCK_SIZE bsize = mi_8x8[0]->mbmi.sb_type;
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
// Test to see if the segment id matches the predicted value.
const int pred_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
bsize, mi_row, mi_col);
@ -143,14 +143,14 @@ static void count_segs(VP9_COMP *cpi, const TileInfo *const tile,
xd->mi[0]->mbmi.seg_id_predicted = pred_flag;
temporal_predictor_count[pred_context][pred_flag]++;
// Update the "unpredicted" segment count
if (!pred_flag)
// Update the "unpredicted" segment count
t_unpred_seg_counts[segment_id]++;
}
}
static void count_segs_sb(VP9_COMP *cpi, const TileInfo *const tile,
MODE_INFO **mi_8x8,
MODE_INFO **mi,
int *no_pred_segcounts,
int (*temporal_predictor_count)[2],
int *t_unpred_seg_counts,
@ -164,22 +164,22 @@ static void count_segs_sb(VP9_COMP *cpi, const TileInfo *const tile,
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
bw = num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type];
bh = num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type];
if (bw == bs && bh == bs) {
count_segs(cpi, tile, mi_8x8, no_pred_segcounts, temporal_predictor_count,
count_segs(cpi, tile, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, bs, bs, mi_row, mi_col);
} else if (bw == bs && bh < bs) {
count_segs(cpi, tile, mi_8x8, no_pred_segcounts, temporal_predictor_count,
count_segs(cpi, tile, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, bs, hbs, mi_row, mi_col);
count_segs(cpi, tile, mi_8x8 + hbs * mis, no_pred_segcounts,
count_segs(cpi, tile, mi + hbs * mis, no_pred_segcounts,
temporal_predictor_count, t_unpred_seg_counts, bs, hbs,
mi_row + hbs, mi_col);
} else if (bw < bs && bh == bs) {
count_segs(cpi, tile, mi_8x8, no_pred_segcounts, temporal_predictor_count,
count_segs(cpi, tile, mi, no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts, hbs, bs, mi_row, mi_col);
count_segs(cpi, tile, mi_8x8 + hbs,
count_segs(cpi, tile, mi + hbs,
no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts,
hbs, bs, mi_row, mi_col + hbs);
} else {
@ -192,7 +192,7 @@ static void count_segs_sb(VP9_COMP *cpi, const TileInfo *const tile,
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
count_segs_sb(cpi, tile, &mi_8x8[mi_dr * mis + mi_dc],
count_segs_sb(cpi, tile, &mi[mi_dr * mis + mi_dc],
no_pred_segcounts, temporal_predictor_count,
t_unpred_seg_counts,
mi_row + mi_dr, mi_col + mi_dc, subsize);
@ -217,9 +217,6 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
vp9_prob t_pred_tree[SEG_TREE_PROBS];
vp9_prob t_nopred_prob[PREDICTION_PROBS];
const int mis = cm->mi_stride;
MODE_INFO **mi_ptr, **mi;
// Set default state for the segment tree probabilities and the
// temporal coding probabilities
vpx_memset(seg->tree_probs, 255, sizeof(seg->tree_probs));
@ -229,12 +226,13 @@ void vp9_choose_segmap_coding_method(VP9_COMP *cpi) {
// predicts this one
for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) {
TileInfo tile;
MODE_INFO **mi_ptr;
vp9_tile_init(&tile, cm, 0, tile_col);
mi_ptr = cm->mi_grid_visible + tile.mi_col_start;
for (mi_row = 0; mi_row < cm->mi_rows;
mi_row += 8, mi_ptr += 8 * mis) {
mi = mi_ptr;
mi_row += 8, mi_ptr += 8 * cm->mi_stride) {
MODE_INFO **mi = mi_ptr;
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += 8, mi += 8)
count_segs_sb(cpi, &tile, mi, no_pred_segcounts,