consistently name VP9_COMMON variables #1

pc -> cm

Change-Id: If3e83404f574316fdd3b9aace2487b64efdb66f3
This commit is contained in:
James Zern 2013-08-22 20:03:08 -07:00
parent e80bf802a9
commit 924d74516a
5 changed files with 98 additions and 98 deletions

View File

@ -436,11 +436,11 @@ const vp9_extra_bit vp9_extra_bits[12] = {
#include "vp9/common/vp9_default_coef_probs.h"
void vp9_default_coef_probs(VP9_COMMON *pc) {
vp9_copy(pc->fc.coef_probs[TX_4X4], default_coef_probs_4x4);
vp9_copy(pc->fc.coef_probs[TX_8X8], default_coef_probs_8x8);
vp9_copy(pc->fc.coef_probs[TX_16X16], default_coef_probs_16x16);
vp9_copy(pc->fc.coef_probs[TX_32X32], default_coef_probs_32x32);
void vp9_default_coef_probs(VP9_COMMON *cm) {
vp9_copy(cm->fc.coef_probs[TX_4X4], default_coef_probs_4x4);
vp9_copy(cm->fc.coef_probs[TX_8X8], default_coef_probs_8x8);
vp9_copy(cm->fc.coef_probs[TX_16X16], default_coef_probs_16x16);
vp9_copy(cm->fc.coef_probs[TX_32X32], default_coef_probs_32x32);
}
// Neighborhood 5-tuples for various scans and blocksizes,

View File

@ -36,7 +36,7 @@ static void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
}
void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *pc,
void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm,
MACROBLOCKD *xd,
int_mv *dst_nearest,
int_mv *dst_near,

View File

@ -255,13 +255,13 @@ static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_reader* r, BLOCK_SIZE bsize) {
VP9_COMMON *const pc = &pbi->common;
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE subsize;
if (mi_row >= pc->mi_rows || mi_col >= pc->mi_cols)
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize < BLOCK_8X8) {
@ -269,21 +269,21 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
return;
} else {
int pl;
const int idx = check_bsize_coverage(hbs, pc->mi_rows, pc->mi_cols,
const int idx = check_bsize_coverage(hbs, cm->mi_rows, cm->mi_cols,
mi_row, mi_col);
set_partition_seg_context(pc, xd, mi_row, mi_col);
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (idx == 0)
partition = treed_read(r, vp9_partition_tree,
pc->fc.partition_prob[pc->frame_type][pl]);
cm->fc.partition_prob[cm->frame_type][pl]);
else if (idx > 0 &&
!vp9_read(r, pc->fc.partition_prob[pc->frame_type][pl][idx]))
!vp9_read(r, cm->fc.partition_prob[cm->frame_type][pl][idx]))
partition = (idx == 1) ? PARTITION_HORZ : PARTITION_VERT;
else
partition = PARTITION_SPLIT;
pc->counts.partition[pl][partition]++;
cm->counts.partition[pl][partition]++;
}
subsize = get_subsize(bsize, partition);
@ -296,13 +296,13 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
case PARTITION_HORZ:
decode_modes_b(pbi, mi_row, mi_col, r, subsize);
*get_sb_index(xd, subsize) = 1;
if (mi_row + hbs < pc->mi_rows)
if (mi_row + hbs < cm->mi_rows)
decode_modes_b(pbi, mi_row + hbs, mi_col, r, subsize);
break;
case PARTITION_VERT:
decode_modes_b(pbi, mi_row, mi_col, r, subsize);
*get_sb_index(xd, subsize) = 1;
if (mi_col + hbs < pc->mi_cols)
if (mi_col + hbs < cm->mi_cols)
decode_modes_b(pbi, mi_row, mi_col + hbs, r, subsize);
break;
case PARTITION_SPLIT: {
@ -320,7 +320,7 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
// update partition context
if (bsize >= BLOCK_8X8 &&
(bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) {
set_partition_seg_context(pc, xd, mi_row, mi_col);
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}
}
@ -328,18 +328,18 @@ static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
static void setup_token_decoder(VP9D_COMP *pbi,
const uint8_t *data, size_t read_size,
vp9_reader *r) {
VP9_COMMON *pc = &pbi->common;
VP9_COMMON *cm = &pbi->common;
const uint8_t *data_end = pbi->source + pbi->source_sz;
// Validate the calculated partition length. If the buffer
// described by the partition can't be fully read, then restrict
// it to the portion that can be (for EC mode) or throw an error.
if (!read_is_valid(data, read_size, data_end))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
if (vp9_reader_init(r, data, read_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
}
@ -571,28 +571,28 @@ static void setup_frame_size_with_refs(VP9D_COMP *pbi,
static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
const int num_threads = pbi->oxcf.max_threads;
VP9_COMMON *const pc = &pbi->common;
VP9_COMMON *const cm = &pbi->common;
int mi_row, mi_col;
YV12_BUFFER_CONFIG *const fb = &pc->yv12_fb[pc->new_fb_idx];
YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[cm->new_fb_idx];
if (pbi->do_loopfilter_inline) {
if (num_threads > 1) {
LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
lf_data->frame_buffer = fb;
lf_data->cm = pc;
lf_data->cm = cm;
lf_data->xd = pbi->mb;
lf_data->stop = 0;
lf_data->y_only = 0;
}
vp9_loop_filter_frame_init(pc, pc->lf.filter_level);
vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
}
for (mi_row = pc->cur_tile_mi_row_start; mi_row < pc->cur_tile_mi_row_end;
for (mi_row = cm->cur_tile_mi_row_start; mi_row < cm->cur_tile_mi_row_end;
mi_row += MI_BLOCK_SIZE) {
// For a SB there are 2 left contexts, each pertaining to a MB row within
vp9_zero(pc->left_context);
vp9_zero(pc->left_seg_context);
for (mi_col = pc->cur_tile_mi_col_start; mi_col < pc->cur_tile_mi_col_end;
vp9_zero(cm->left_context);
vp9_zero(cm->left_seg_context);
for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
mi_col += MI_BLOCK_SIZE)
decode_modes_sb(pbi, mi_row, mi_col, r, BLOCK_64X64);
@ -605,7 +605,7 @@ static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
// decoding has completed: finish up the loop filter in this thread.
if (mi_row + MI_BLOCK_SIZE >= pc->cur_tile_mi_row_end) continue;
if (mi_row + MI_BLOCK_SIZE >= cm->cur_tile_mi_row_end) continue;
vp9_worker_sync(&pbi->lf_worker);
lf_data->start = lf_start;
@ -613,7 +613,7 @@ static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
pbi->lf_worker.hook = vp9_loop_filter_worker;
vp9_worker_launch(&pbi->lf_worker);
} else {
vp9_loop_filter_rows(fb, pc, &pbi->mb, lf_start, mi_row, 0);
vp9_loop_filter_rows(fb, cm, &pbi->mb, lf_start, mi_row, 0);
}
}
}
@ -628,8 +628,8 @@ static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
} else {
lf_start = mi_row - MI_BLOCK_SIZE;
}
vp9_loop_filter_rows(fb, pc, &pbi->mb,
lf_start, pc->mi_rows, 0);
vp9_loop_filter_rows(fb, cm, &pbi->mb,
lf_start, cm->mi_rows, 0);
}
}
@ -652,20 +652,20 @@ static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
vp9_reader residual_bc;
VP9_COMMON *const pc = &pbi->common;
VP9_COMMON *const cm = &pbi->common;
const uint8_t *const data_end = pbi->source + pbi->source_sz;
const int aligned_mi_cols = mi_cols_aligned_to_sb(pc->mi_cols);
const int tile_cols = 1 << pc->log2_tile_cols;
const int tile_rows = 1 << pc->log2_tile_rows;
const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
int tile_row, tile_col;
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset(pc->above_context[0], 0,
vpx_memset(cm->above_context[0], 0,
sizeof(ENTROPY_CONTEXT) * MAX_MB_PLANE * (2 * aligned_mi_cols));
vpx_memset(pc->above_seg_context, 0,
vpx_memset(cm->above_seg_context, 0,
sizeof(PARTITION_CONTEXT) * aligned_mi_cols);
if (pbi->oxcf.inv_tile_order) {
@ -690,9 +690,9 @@ static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
}
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
vp9_get_tile_row_offsets(pc, tile_row);
vp9_get_tile_row_offsets(cm, tile_row);
for (tile_col = tile_cols - 1; tile_col >= 0; tile_col--) {
vp9_get_tile_col_offsets(pc, tile_col);
vp9_get_tile_col_offsets(cm, tile_col);
setup_token_decoder(pbi, data_ptr2[tile_row][tile_col],
data_end - data_ptr2[tile_row][tile_col],
&residual_bc);
@ -706,16 +706,16 @@ static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
int has_more;
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
vp9_get_tile_row_offsets(pc, tile_row);
vp9_get_tile_row_offsets(cm, tile_row);
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
size_t size;
vp9_get_tile_col_offsets(pc, tile_col);
vp9_get_tile_col_offsets(cm, tile_col);
has_more = tile_col < tile_cols - 1 || tile_row < tile_rows - 1;
if (has_more) {
if (!read_is_valid(data, 4, data_end))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
size = read_be32(data);
@ -928,17 +928,17 @@ void vp9_init_dequantizer(VP9_COMMON *cm) {
int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
int i;
VP9_COMMON *const pc = &pbi->common;
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
const uint8_t *data = pbi->source;
const uint8_t *data_end = pbi->source + pbi->source_sz;
struct vp9_read_bit_buffer rb = { data, data_end, 0,
pc, error_handler };
cm, error_handler };
const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
const int keyframe = pc->frame_type == KEY_FRAME;
YV12_BUFFER_CONFIG *new_fb = &pc->yv12_fb[pc->new_fb_idx];
const int keyframe = cm->frame_type == KEY_FRAME;
YV12_BUFFER_CONFIG *new_fb = &cm->yv12_fb[cm->new_fb_idx];
if (!first_partition_size) {
// showing a frame directly
@ -949,39 +949,39 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
xd->corrupted = 0;
new_fb->corrupted = 0;
pbi->do_loopfilter_inline =
(pc->log2_tile_rows | pc->log2_tile_cols) == 0 && pc->lf.filter_level;
(cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level;
if (!pbi->decoded_key_frame && !keyframe)
return -1;
if (!read_is_valid(data, first_partition_size, data_end))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt header length");
xd->mode_info_context = pc->mi;
xd->prev_mode_info_context = pc->prev_mi;
xd->mode_info_stride = pc->mode_info_stride;
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
xd->mode_info_stride = cm->mode_info_stride;
init_dequantizer(pc, &pbi->mb);
init_dequantizer(cm, &pbi->mb);
pc->fc = pc->frame_contexts[pc->frame_context_idx];
cm->fc = cm->frame_contexts[cm->frame_context_idx];
vp9_zero(pc->counts);
vp9_zero(cm->counts);
new_fb->corrupted |= read_compressed_header(pbi, data, first_partition_size);
setup_block_dptrs(xd, pc->subsampling_x, pc->subsampling_y);
setup_block_dptrs(xd, cm->subsampling_x, cm->subsampling_y);
// clear out the coeff buffer
for (i = 0; i < MAX_MB_PLANE; ++i)
vp9_zero(xd->plane[i].qcoeff);
set_prev_mi(pc);
set_prev_mi(cm);
*p_data_end = decode_tiles(pbi, data + first_partition_size);
pc->last_width = pc->width;
pc->last_height = pc->height;
cm->last_width = cm->width;
cm->last_height = cm->height;
new_fb->corrupted |= xd->corrupted;
@ -989,21 +989,21 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
if (keyframe && !new_fb->corrupted)
pbi->decoded_key_frame = 1;
else
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"A stream must start with a complete key frame");
}
if (!pc->error_resilient_mode && !pc->frame_parallel_decoding_mode) {
vp9_adapt_coef_probs(pc);
if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
vp9_adapt_coef_probs(cm);
if (!keyframe && !pc->intra_only) {
vp9_adapt_mode_probs(pc);
vp9_adapt_mv_probs(pc, xd->allow_high_precision_mv);
if (!keyframe && !cm->intra_only) {
vp9_adapt_mode_probs(cm);
vp9_adapt_mv_probs(cm, xd->allow_high_precision_mv);
}
}
if (pc->refresh_frame_context)
pc->frame_contexts[pc->frame_context_idx] = pc->fc;
if (cm->refresh_frame_context)
cm->frame_contexts[cm->frame_context_idx] = cm->fc;
return 0;
}

View File

@ -15,7 +15,7 @@
struct VP9Common;
struct VP9Decompressor;
void vp9_init_dequantizer(struct VP9Common *pc);
void vp9_init_dequantizer(struct VP9Common *cm);
int vp9_decode_frame(struct VP9Decompressor *cpi, const uint8_t **p_data_end);
#endif // VP9_DECODER_VP9_DECODFRAME_H_

View File

@ -237,7 +237,7 @@ static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
static void update_switchable_interp_probs(VP9_COMP *const cpi,
vp9_writer* const bc) {
VP9_COMMON *const pc = &cpi->common;
VP9_COMMON *const cm = &cpi->common;
unsigned int branch_ct[SWITCHABLE_FILTERS + 1]
[SWITCHABLE_FILTERS - 1][2];
vp9_prob new_prob[SWITCHABLE_FILTERS + 1][SWITCHABLE_FILTERS - 1];
@ -246,21 +246,21 @@ static void update_switchable_interp_probs(VP9_COMP *const cpi,
vp9_tree_probs_from_distribution(
vp9_switchable_interp_tree,
new_prob[j], branch_ct[j],
pc->counts.switchable_interp[j], 0);
cm->counts.switchable_interp[j], 0);
}
for (j = 0; j <= SWITCHABLE_FILTERS; ++j) {
for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) {
vp9_cond_prob_diff_update(bc, &pc->fc.switchable_interp_prob[j][i],
vp9_cond_prob_diff_update(bc, &cm->fc.switchable_interp_prob[j][i],
MODE_UPDATE_PROB, branch_ct[j][i]);
}
}
#ifdef MODE_STATS
if (!cpi->dummy_packing)
update_switchable_interp_stats(pc);
update_switchable_interp_stats(cm);
#endif
}
static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) {
static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer* const bc) {
int i, j;
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
@ -269,10 +269,10 @@ static void update_inter_mode_probs(VP9_COMMON *pc, vp9_writer* const bc) {
vp9_tree_probs_from_distribution(vp9_inter_mode_tree,
new_prob, branch_ct,
pc->counts.inter_mode[i], NEARESTMV);
cm->counts.inter_mode[i], NEARESTMV);
for (j = 0; j < INTER_MODES - 1; ++j)
vp9_cond_prob_diff_update(bc, &pc->fc.inter_mode_probs[i][j],
vp9_cond_prob_diff_update(bc, &cm->fc.inter_mode_probs[i][j],
MODE_UPDATE_PROB, branch_ct[j]);
}
}
@ -356,39 +356,39 @@ static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
// This function encodes the reference frame
static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
VP9_COMMON *const pc = &cpi->common;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mi = &xd->mode_info_context->mbmi;
const int segment_id = mi->segment_id;
int seg_ref_active = vp9_segfeature_active(&pc->seg, segment_id,
int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
SEG_LVL_REF_FRAME);
// If segment level coding of this signal is disabled...
// or the segment allows multiple reference frame options
if (!seg_ref_active) {
// does the feature use compound prediction or not
// (if not specified at the frame/segment level)
if (pc->comp_pred_mode == HYBRID_PREDICTION) {
if (cm->comp_pred_mode == HYBRID_PREDICTION) {
vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME,
vp9_get_pred_prob_comp_inter_inter(pc, xd));
vp9_get_pred_prob_comp_inter_inter(cm, xd));
} else {
assert((mi->ref_frame[1] <= INTRA_FRAME) ==
(pc->comp_pred_mode == SINGLE_PREDICTION_ONLY));
(cm->comp_pred_mode == SINGLE_PREDICTION_ONLY));
}
if (mi->ref_frame[1] > INTRA_FRAME) {
vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME,
vp9_get_pred_prob_comp_ref_p(pc, xd));
vp9_get_pred_prob_comp_ref_p(cm, xd));
} else {
vp9_write(bc, mi->ref_frame[0] != LAST_FRAME,
vp9_get_pred_prob_single_ref_p1(pc, xd));
vp9_get_pred_prob_single_ref_p1(cm, xd));
if (mi->ref_frame[0] != LAST_FRAME)
vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME,
vp9_get_pred_prob_single_ref_p2(pc, xd));
vp9_get_pred_prob_single_ref_p2(cm, xd));
}
} else {
assert(mi->ref_frame[1] <= INTRA_FRAME);
assert(vp9_get_segdata(&pc->seg, segment_id, SEG_LVL_REF_FRAME) ==
assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) ==
mi->ref_frame[0]);
}
@ -397,11 +397,11 @@ static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
}
static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
VP9_COMMON *const pc = &cpi->common;
const nmv_context *nmvc = &pc->fc.nmvc;
VP9_COMMON *const cm = &cpi->common;
const nmv_context *nmvc = &cm->fc.nmvc;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
struct segmentation *seg = &pc->seg;
struct segmentation *seg = &cm->seg;
MB_MODE_INFO *const mi = &m->mbmi;
const MV_REFERENCE_FRAME rf = mi->ref_frame[0];
const MB_PREDICTION_MODE mode = mi->mode;
@ -410,7 +410,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
const BLOCK_SIZE bsize = mi->sb_type;
const int allow_hp = xd->allow_high_precision_mv;
x->partition_info = x->pi + (m - pc->mi);
x->partition_info = x->pi + (m - cm->mi);
#ifdef ENTROPY_STATS
active_section = 9;
@ -432,9 +432,9 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
vp9_write(bc, rf != INTRA_FRAME,
vp9_get_pred_prob_intra_inter(pc, xd));
vp9_get_pred_prob_intra_inter(cm, xd));
if (bsize >= BLOCK_8X8 && pc->tx_mode == TX_MODE_SELECT &&
if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
!(rf != INTRA_FRAME &&
(skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
write_selected_tx_size(cpi, mi->tx_size, bsize, bc);
@ -446,7 +446,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
#endif
if (bsize >= BLOCK_8X8) {
write_intra_mode(bc, mode, pc->fc.y_mode_prob[size_group_lookup[bsize]]);
write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]);
} else {
int idx, idy;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
@ -454,11 +454,11 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode;
write_intra_mode(bc, bm, pc->fc.y_mode_prob[0]);
write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]);
}
}
}
write_intra_mode(bc, mi->uv_mode, pc->fc.uv_mode_prob[mode]);
write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]);
} else {
vp9_prob *mv_ref_p;
encode_ref_frame(cpi, bc);
@ -472,18 +472,18 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (bsize >= BLOCK_8X8) {
write_sb_mv_ref(bc, mode, mv_ref_p);
++pc->counts.inter_mode[mi->mode_context[rf]]
++cm->counts.inter_mode[mi->mode_context[rf]]
[inter_mode_offset(mode)];
}
}
if (pc->mcomp_filter_type == SWITCHABLE) {
if (cm->mcomp_filter_type == SWITCHABLE) {
const int ctx = vp9_get_pred_context_switchable_interp(xd);
write_token(bc, vp9_switchable_interp_tree,
pc->fc.switchable_interp_prob[ctx],
cm->fc.switchable_interp_prob[ctx],
&vp9_switchable_interp_encodings[mi->interp_filter]);
} else {
assert(mi->interp_filter == pc->mcomp_filter_type);
assert(mi->interp_filter == cm->mcomp_filter_type);
}
if (bsize < BLOCK_8X8) {
@ -499,7 +499,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
blockmode = x->partition_info->bmi[j].mode;
blockmv = m->bmi[j].as_mv[0];
write_sb_mv_ref(bc, blockmode, mv_ref_p);
++pc->counts.inter_mode[mi->mode_context[rf]]
++cm->counts.inter_mode[mi->mode_context[rf]]
[inter_mode_offset(blockmode)];
if (blockmode == NEWMV) {
@ -1458,7 +1458,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
vp9_compute_update_table();
#ifdef ENTROPY_STATS
if (pc->frame_type == INTER_FRAME)
if (cm->frame_type == INTER_FRAME)
active_section = 0;
else
active_section = 7;