Finally removing all old block size constants.
Change-Id: I3aae21e88b876d53ecc955260479980ffe04ad8d
This commit is contained in:
parent
9f988a2edf
commit
b9c7d04e95
@ -22,20 +22,20 @@
|
|||||||
#define MI_MASK (MI_BLOCK_SIZE - 1)
|
#define MI_MASK (MI_BLOCK_SIZE - 1)
|
||||||
|
|
||||||
typedef enum BLOCK_SIZE_TYPE {
|
typedef enum BLOCK_SIZE_TYPE {
|
||||||
BLOCK_SIZE_AB4X4, BLOCK_4X4 = BLOCK_SIZE_AB4X4,
|
BLOCK_4X4,
|
||||||
BLOCK_SIZE_SB4X8, BLOCK_4X8 = BLOCK_SIZE_SB4X8,
|
BLOCK_4X8,
|
||||||
BLOCK_SIZE_SB8X4, BLOCK_8X4 = BLOCK_SIZE_SB8X4,
|
BLOCK_8X4,
|
||||||
BLOCK_SIZE_SB8X8, BLOCK_8X8 = BLOCK_SIZE_SB8X8,
|
BLOCK_8X8,
|
||||||
BLOCK_SIZE_SB8X16, BLOCK_8X16 = BLOCK_SIZE_SB8X16,
|
BLOCK_8X16,
|
||||||
BLOCK_SIZE_SB16X8, BLOCK_16X8 = BLOCK_SIZE_SB16X8,
|
BLOCK_16X8,
|
||||||
BLOCK_SIZE_MB16X16, BLOCK_16X16 = BLOCK_SIZE_MB16X16,
|
BLOCK_16X16,
|
||||||
BLOCK_SIZE_SB16X32, BLOCK_16X32 = BLOCK_SIZE_SB16X32,
|
BLOCK_16X32,
|
||||||
BLOCK_SIZE_SB32X16, BLOCK_32X16 = BLOCK_SIZE_SB32X16,
|
BLOCK_32X16,
|
||||||
BLOCK_SIZE_SB32X32, BLOCK_32X32 = BLOCK_SIZE_SB32X32,
|
BLOCK_32X32,
|
||||||
BLOCK_SIZE_SB32X64, BLOCK_32X64 = BLOCK_SIZE_SB32X64,
|
BLOCK_32X64,
|
||||||
BLOCK_SIZE_SB64X32, BLOCK_64X32 = BLOCK_SIZE_SB64X32,
|
BLOCK_64X32,
|
||||||
BLOCK_SIZE_SB64X64, BLOCK_64X64 = BLOCK_SIZE_SB64X64,
|
BLOCK_64X64,
|
||||||
BLOCK_SIZE_TYPES, BLOCK_MAX_SB_SEGMENTS = BLOCK_SIZE_TYPES
|
BLOCK_SIZE_TYPES
|
||||||
} BLOCK_SIZE_TYPE;
|
} BLOCK_SIZE_TYPE;
|
||||||
|
|
||||||
typedef enum PARTITION_TYPE {
|
typedef enum PARTITION_TYPE {
|
||||||
|
@ -52,8 +52,8 @@ static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) {
|
|||||||
|
|
||||||
if (is_inter_block(&cur_mb->mbmi)) {
|
if (is_inter_block(&cur_mb->mbmi)) {
|
||||||
return DC_PRED;
|
return DC_PRED;
|
||||||
} else if (cur_mb->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
|
} else if (cur_mb->mbmi.sb_type < BLOCK_8X8) {
|
||||||
return ((cur_mb->bmi + 1 + b)->as_mode);
|
return (cur_mb->bmi + 1 + b)->as_mode;
|
||||||
} else {
|
} else {
|
||||||
return cur_mb->mbmi.mode;
|
return cur_mb->mbmi.mode;
|
||||||
}
|
}
|
||||||
@ -70,8 +70,8 @@ static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb,
|
|||||||
|
|
||||||
if (is_inter_block(&cur_mb->mbmi)) {
|
if (is_inter_block(&cur_mb->mbmi)) {
|
||||||
return DC_PRED;
|
return DC_PRED;
|
||||||
} else if (cur_mb->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
|
} else if (cur_mb->mbmi.sb_type < BLOCK_8X8) {
|
||||||
return ((cur_mb->bmi + 2 + b)->as_mode);
|
return (cur_mb->bmi + 2 + b)->as_mode;
|
||||||
} else {
|
} else {
|
||||||
return cur_mb->mbmi.mode;
|
return cur_mb->mbmi.mode;
|
||||||
}
|
}
|
||||||
|
@ -121,7 +121,7 @@ static void clamp_mv_ref(const MACROBLOCKD *xd, int_mv *mv) {
|
|||||||
static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate,
|
static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate,
|
||||||
int check_sub_blocks, int which_mv,
|
int check_sub_blocks, int which_mv,
|
||||||
int search_col, int block_idx) {
|
int search_col, int block_idx) {
|
||||||
return (check_sub_blocks && candidate->mbmi.sb_type < BLOCK_SIZE_SB8X8
|
return (check_sub_blocks && candidate->mbmi.sb_type < BLOCK_8X8
|
||||||
? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
|
? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
|
||||||
.as_mv[which_mv]
|
.as_mv[which_mv]
|
||||||
: candidate->mbmi.mv[which_mv]);
|
: candidate->mbmi.mv[which_mv]);
|
||||||
|
@ -250,7 +250,7 @@ static int check_bsize_coverage(VP9_COMMON *cm, int mi_row, int mi_col,
|
|||||||
|
|
||||||
// frame width/height are multiples of 8, hence 8x8 block should always
|
// frame width/height are multiples of 8, hence 8x8 block should always
|
||||||
// pass the above check
|
// pass the above check
|
||||||
assert(bsize > BLOCK_SIZE_SB8X8);
|
assert(bsize > BLOCK_8X8);
|
||||||
|
|
||||||
// return the node index in the prob tree for binary coding
|
// return the node index in the prob tree for binary coding
|
||||||
// only allow horizontal/split partition types
|
// only allow horizontal/split partition types
|
||||||
|
@ -285,8 +285,8 @@ static void build_inter_predictors(int plane, int block,
|
|||||||
|
|
||||||
assert(x < (4 << bwl));
|
assert(x < (4 << bwl));
|
||||||
assert(y < (4 << bhl));
|
assert(y < (4 << bhl));
|
||||||
assert(mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 || 4 << pred_w == (4 << bwl));
|
assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == (4 << bwl));
|
||||||
assert(mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 || 4 << pred_h == (4 << bhl));
|
assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == (4 << bhl));
|
||||||
|
|
||||||
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
|
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
|
||||||
// source
|
// source
|
||||||
@ -303,7 +303,7 @@ static void build_inter_predictors(int plane, int block,
|
|||||||
// same MV (the average of the 4 luma MVs) but we could do something
|
// same MV (the average of the 4 luma MVs) but we could do something
|
||||||
// smarter for non-4:2:0. Just punt for now, pending the changes to get
|
// smarter for non-4:2:0. Just punt for now, pending the changes to get
|
||||||
// rid of SPLITMV mode entirely.
|
// rid of SPLITMV mode entirely.
|
||||||
const MV mv = mi->mbmi.sb_type < BLOCK_SIZE_SB8X8
|
const MV mv = mi->mbmi.sb_type < BLOCK_8X8
|
||||||
? (plane == 0 ? mi->bmi[block].as_mv[which_mv].as_mv
|
? (plane == 0 ? mi->bmi[block].as_mv[which_mv].as_mv
|
||||||
: mi_mv_pred_q4(mi, which_mv))
|
: mi_mv_pred_q4(mi, which_mv))
|
||||||
: mi->mbmi.mv[which_mv].as_mv;
|
: mi->mbmi.mv[which_mv].as_mv;
|
||||||
|
@ -164,7 +164,7 @@ static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m,
|
|||||||
mbmi->ref_frame[0] = INTRA_FRAME;
|
mbmi->ref_frame[0] = INTRA_FRAME;
|
||||||
mbmi->ref_frame[1] = NONE;
|
mbmi->ref_frame[1] = NONE;
|
||||||
|
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8) {
|
if (bsize >= BLOCK_8X8) {
|
||||||
const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
|
const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
|
||||||
const MB_PREDICTION_MODE L = xd->left_available ?
|
const MB_PREDICTION_MODE L = xd->left_available ?
|
||||||
left_block_mode(m, 0) : DC_PRED;
|
left_block_mode(m, 0) : DC_PRED;
|
||||||
@ -386,7 +386,7 @@ static void read_intra_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
|
|||||||
mbmi->ref_frame[0] = INTRA_FRAME;
|
mbmi->ref_frame[0] = INTRA_FRAME;
|
||||||
mbmi->ref_frame[1] = NONE;
|
mbmi->ref_frame[1] = NONE;
|
||||||
|
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8) {
|
if (bsize >= BLOCK_8X8) {
|
||||||
const int size_group = size_group_lookup[bsize];
|
const int size_group = size_group_lookup[bsize];
|
||||||
mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[size_group]);
|
mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[size_group]);
|
||||||
cm->counts.y_mode[size_group][mbmi->mode]++;
|
cm->counts.y_mode[size_group][mbmi->mode]++;
|
||||||
@ -459,13 +459,13 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
|
|||||||
|
|
||||||
if (vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP))
|
if (vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP))
|
||||||
mbmi->mode = ZEROMV;
|
mbmi->mode = ZEROMV;
|
||||||
else if (bsize >= BLOCK_SIZE_SB8X8)
|
else if (bsize >= BLOCK_8X8)
|
||||||
mbmi->mode = read_inter_mode(cm, r, inter_mode_ctx);
|
mbmi->mode = read_inter_mode(cm, r, inter_mode_ctx);
|
||||||
|
|
||||||
mbmi->uv_mode = DC_PRED;
|
mbmi->uv_mode = DC_PRED;
|
||||||
|
|
||||||
// nearest, nearby
|
// nearest, nearby
|
||||||
if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
|
if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
|
||||||
vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby);
|
vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby);
|
||||||
best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int;
|
best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int;
|
||||||
}
|
}
|
||||||
@ -479,14 +479,14 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi,
|
|||||||
ref1, mbmi->ref_mvs[ref1], cm->ref_frame_sign_bias,
|
ref1, mbmi->ref_mvs[ref1], cm->ref_frame_sign_bias,
|
||||||
mi_row, mi_col);
|
mi_row, mi_col);
|
||||||
|
|
||||||
if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) {
|
if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
|
||||||
vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1],
|
vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1],
|
||||||
&nearest_second, &nearby_second);
|
&nearest_second, &nearby_second);
|
||||||
best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int;
|
best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bsize < BLOCK_SIZE_SB8X8) {
|
if (bsize < BLOCK_8X8) {
|
||||||
const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2
|
const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2
|
||||||
const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2
|
const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2
|
||||||
int idx, idy;
|
int idx, idy;
|
||||||
|
@ -434,7 +434,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
|
|||||||
vp9_write(bc, rf != INTRA_FRAME,
|
vp9_write(bc, rf != INTRA_FRAME,
|
||||||
vp9_get_pred_prob_intra_inter(pc, xd));
|
vp9_get_pred_prob_intra_inter(pc, xd));
|
||||||
|
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8 && pc->tx_mode == TX_MODE_SELECT &&
|
if (bsize >= BLOCK_8X8 && pc->tx_mode == TX_MODE_SELECT &&
|
||||||
!(rf != INTRA_FRAME &&
|
!(rf != INTRA_FRAME &&
|
||||||
(skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
|
(skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
|
||||||
write_selected_tx_size(cpi, mi->txfm_size, bsize, bc);
|
write_selected_tx_size(cpi, mi->txfm_size, bsize, bc);
|
||||||
@ -445,7 +445,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
|
|||||||
active_section = 6;
|
active_section = 6;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8) {
|
if (bsize >= BLOCK_8X8) {
|
||||||
write_intra_mode(bc, mode, pc->fc.y_mode_prob[size_group_lookup[bsize]]);
|
write_intra_mode(bc, mode, pc->fc.y_mode_prob[size_group_lookup[bsize]]);
|
||||||
} else {
|
} else {
|
||||||
int idx, idy;
|
int idx, idy;
|
||||||
@ -470,7 +470,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
|
|||||||
|
|
||||||
// If segment skip is not enabled code the mode.
|
// If segment skip is not enabled code the mode.
|
||||||
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
|
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8) {
|
if (bsize >= BLOCK_8X8) {
|
||||||
write_sb_mv_ref(bc, mode, mv_ref_p);
|
write_sb_mv_ref(bc, mode, mv_ref_p);
|
||||||
++pc->counts.inter_mode[mi->mb_mode_context[rf]]
|
++pc->counts.inter_mode[mi->mb_mode_context[rf]]
|
||||||
[inter_mode_offset(mode)];
|
[inter_mode_offset(mode)];
|
||||||
@ -486,7 +486,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
|
|||||||
assert(mi->interp_filter == cpi->common.mcomp_filter_type);
|
assert(mi->interp_filter == cpi->common.mcomp_filter_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bsize < BLOCK_SIZE_SB8X8) {
|
if (bsize < BLOCK_8X8) {
|
||||||
int j;
|
int j;
|
||||||
MB_PREDICTION_MODE blockmode;
|
MB_PREDICTION_MODE blockmode;
|
||||||
int_mv blockmv;
|
int_mv blockmv;
|
||||||
@ -544,10 +544,10 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m,
|
|||||||
|
|
||||||
write_skip_coeff(cpi, segment_id, m, bc);
|
write_skip_coeff(cpi, segment_id, m, bc);
|
||||||
|
|
||||||
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->tx_mode == TX_MODE_SELECT)
|
if (m->mbmi.sb_type >= BLOCK_8X8 && c->tx_mode == TX_MODE_SELECT)
|
||||||
write_selected_tx_size(cpi, m->mbmi.txfm_size, m->mbmi.sb_type, bc);
|
write_selected_tx_size(cpi, m->mbmi.txfm_size, m->mbmi.sb_type, bc);
|
||||||
|
|
||||||
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
|
if (m->mbmi.sb_type >= BLOCK_8X8) {
|
||||||
const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
|
const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
|
||||||
const MB_PREDICTION_MODE L = xd->left_available ?
|
const MB_PREDICTION_MODE L = xd->left_available ?
|
||||||
left_block_mode(m, 0) : DC_PRED;
|
left_block_mode(m, 0) : DC_PRED;
|
||||||
@ -580,7 +580,7 @@ static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
|
|||||||
VP9_COMMON *const cm = &cpi->common;
|
VP9_COMMON *const cm = &cpi->common;
|
||||||
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
||||||
|
|
||||||
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8)
|
if (m->mbmi.sb_type < BLOCK_8X8)
|
||||||
if (xd->ab_index > 0)
|
if (xd->ab_index > 0)
|
||||||
return;
|
return;
|
||||||
xd->mode_info_context = m;
|
xd->mode_info_context = m;
|
||||||
@ -621,11 +621,11 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
|
|||||||
|
|
||||||
partition = partition_lookup[bsl][m->mbmi.sb_type];
|
partition = partition_lookup[bsl][m->mbmi.sb_type];
|
||||||
|
|
||||||
if (bsize < BLOCK_SIZE_SB8X8)
|
if (bsize < BLOCK_8X8)
|
||||||
if (xd->ab_index > 0)
|
if (xd->ab_index > 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8) {
|
if (bsize >= BLOCK_8X8) {
|
||||||
int pl;
|
int pl;
|
||||||
const int idx = check_bsize_coverage(cm, mi_row, mi_col, bsize);
|
const int idx = check_bsize_coverage(cm, mi_row, mi_col, bsize);
|
||||||
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
||||||
@ -672,8 +672,8 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// update partition context
|
// update partition context
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8 &&
|
if (bsize >= BLOCK_8X8 &&
|
||||||
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
|
(bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) {
|
||||||
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
||||||
update_partition_context(xd, subsize, bsize);
|
update_partition_context(xd, subsize, bsize);
|
||||||
}
|
}
|
||||||
|
@ -412,7 +412,7 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
|
|||||||
// Note how often each mode chosen as best
|
// Note how often each mode chosen as best
|
||||||
cpi->mode_chosen_counts[mb_mode_index]++;
|
cpi->mode_chosen_counts[mb_mode_index]++;
|
||||||
if (is_inter_block(mbmi)
|
if (is_inter_block(mbmi)
|
||||||
&& (mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) {
|
&& (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV)) {
|
||||||
int_mv best_mv, best_second_mv;
|
int_mv best_mv, best_second_mv;
|
||||||
const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0];
|
const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0];
|
||||||
const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
|
const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1];
|
||||||
@ -427,7 +427,7 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
|
|||||||
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
|
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bsize > BLOCK_SIZE_SB8X8 && mbmi->mode == NEWMV) {
|
if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) {
|
||||||
int i, j;
|
int i, j;
|
||||||
for (j = 0; j < mi_height; ++j)
|
for (j = 0; j < mi_height; ++j)
|
||||||
for (i = 0; i < mi_width; ++i)
|
for (i = 0; i < mi_width; ++i)
|
||||||
@ -572,7 +572,7 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col,
|
|||||||
|
|
||||||
x->rd_search = 1;
|
x->rd_search = 1;
|
||||||
|
|
||||||
if (bsize < BLOCK_SIZE_SB8X8) {
|
if (bsize < BLOCK_8X8) {
|
||||||
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
|
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
|
||||||
// there is nothing to be done.
|
// there is nothing to be done.
|
||||||
if (xd->ab_index != 0)
|
if (xd->ab_index != 0)
|
||||||
@ -769,7 +769,7 @@ static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
|
|||||||
if (sub_index != -1)
|
if (sub_index != -1)
|
||||||
*(get_sb_index(xd, bsize)) = sub_index;
|
*(get_sb_index(xd, bsize)) = sub_index;
|
||||||
|
|
||||||
if (bsize < BLOCK_SIZE_SB8X8) {
|
if (bsize < BLOCK_8X8) {
|
||||||
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
|
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
|
||||||
// there is nothing to be done.
|
// there is nothing to be done.
|
||||||
if (xd->ab_index > 0)
|
if (xd->ab_index > 0)
|
||||||
@ -792,7 +792,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
|
|||||||
VP9_COMMON * const cm = &cpi->common;
|
VP9_COMMON * const cm = &cpi->common;
|
||||||
MACROBLOCK * const x = &cpi->mb;
|
MACROBLOCK * const x = &cpi->mb;
|
||||||
MACROBLOCKD * const xd = &x->e_mbd;
|
MACROBLOCKD * const xd = &x->e_mbd;
|
||||||
BLOCK_SIZE_TYPE c1 = BLOCK_SIZE_SB8X8;
|
BLOCK_SIZE_TYPE c1 = BLOCK_8X8;
|
||||||
const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
|
const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4;
|
||||||
int UNINITIALIZED_IS_SAFE(pl);
|
int UNINITIALIZED_IS_SAFE(pl);
|
||||||
PARTITION_TYPE partition;
|
PARTITION_TYPE partition;
|
||||||
@ -803,7 +803,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
c1 = BLOCK_4X4;
|
c1 = BLOCK_4X4;
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8) {
|
if (bsize >= BLOCK_8X8) {
|
||||||
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
||||||
pl = partition_plane_context(xd, bsize);
|
pl = partition_plane_context(xd, bsize);
|
||||||
c1 = *(get_sb_partitioning(x, bsize));
|
c1 = *(get_sb_partitioning(x, bsize));
|
||||||
@ -812,7 +812,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
|
|||||||
|
|
||||||
switch (partition) {
|
switch (partition) {
|
||||||
case PARTITION_NONE:
|
case PARTITION_NONE:
|
||||||
if (output_enabled && bsize >= BLOCK_SIZE_SB8X8)
|
if (output_enabled && bsize >= BLOCK_8X8)
|
||||||
cpi->partition_count[pl][PARTITION_NONE]++;
|
cpi->partition_count[pl][PARTITION_NONE]++;
|
||||||
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
|
encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1);
|
||||||
break;
|
break;
|
||||||
@ -847,7 +847,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (partition != PARTITION_SPLIT || bsize == BLOCK_SIZE_SB8X8) {
|
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) {
|
||||||
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
||||||
update_partition_context(xd, c1, bsize);
|
update_partition_context(xd, c1, bsize);
|
||||||
}
|
}
|
||||||
@ -1093,7 +1093,7 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
|
|||||||
int pixels_wide = 64, pixels_high = 64;
|
int pixels_wide = 64, pixels_high = 64;
|
||||||
|
|
||||||
vp9_zero(vt);
|
vp9_zero(vt);
|
||||||
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
|
set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
|
||||||
|
|
||||||
if (xd->mb_to_right_edge < 0)
|
if (xd->mb_to_right_edge < 0)
|
||||||
pixels_wide += (xd->mb_to_right_edge >> 3);
|
pixels_wide += (xd->mb_to_right_edge >> 3);
|
||||||
@ -1121,12 +1121,12 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
|
|||||||
setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
|
setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
|
||||||
&xd->scale_factor[1]);
|
&xd->scale_factor[1]);
|
||||||
xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
|
xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
|
||||||
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64;
|
xd->mode_info_context->mbmi.sb_type = BLOCK_64X64;
|
||||||
vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
|
vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
|
||||||
&nearest_mv, &near_mv);
|
&nearest_mv, &near_mv);
|
||||||
|
|
||||||
xd->mode_info_context->mbmi.mv[0] = nearest_mv;
|
xd->mode_info_context->mbmi.mv[0] = nearest_mv;
|
||||||
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_SIZE_SB64X64);
|
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
|
||||||
d = xd->plane[0].dst.buf;
|
d = xd->plane[0].dst.buf;
|
||||||
dp = xd->plane[0].dst.stride;
|
dp = xd->plane[0].dst.stride;
|
||||||
}
|
}
|
||||||
@ -1228,7 +1228,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
|
|||||||
|
|
||||||
subsize = get_subsize(bsize, partition);
|
subsize = get_subsize(bsize, partition);
|
||||||
|
|
||||||
if (bsize < BLOCK_SIZE_SB8X8) {
|
if (bsize < BLOCK_8X8) {
|
||||||
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
|
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
|
||||||
// there is nothing to be done.
|
// there is nothing to be done.
|
||||||
if (xd->ab_index != 0) {
|
if (xd->ab_index != 0) {
|
||||||
@ -1247,7 +1247,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
|
|||||||
|
|
||||||
if (cpi->sf.adjust_partitioning_from_last_frame) {
|
if (cpi->sf.adjust_partitioning_from_last_frame) {
|
||||||
// Check if any of the sub blocks are further split.
|
// Check if any of the sub blocks are further split.
|
||||||
if (partition == PARTITION_SPLIT && subsize > BLOCK_SIZE_SB8X8) {
|
if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
|
||||||
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
|
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
|
||||||
splits_below = 1;
|
splits_below = 1;
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
@ -1287,7 +1287,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
|
|||||||
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
|
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
|
||||||
subsize, get_block_context(x, subsize), INT64_MAX);
|
subsize, get_block_context(x, subsize), INT64_MAX);
|
||||||
if (last_part_rate != INT_MAX &&
|
if (last_part_rate != INT_MAX &&
|
||||||
bsize >= BLOCK_SIZE_SB8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
|
bsize >= BLOCK_8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
|
||||||
int rt = 0;
|
int rt = 0;
|
||||||
int64_t dt = 0;
|
int64_t dt = 0;
|
||||||
update_state(cpi, get_block_context(x, subsize), subsize, 0);
|
update_state(cpi, get_block_context(x, subsize), subsize, 0);
|
||||||
@ -1310,7 +1310,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
|
|||||||
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
|
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
|
||||||
subsize, get_block_context(x, subsize), INT64_MAX);
|
subsize, get_block_context(x, subsize), INT64_MAX);
|
||||||
if (last_part_rate != INT_MAX &&
|
if (last_part_rate != INT_MAX &&
|
||||||
bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
|
bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
|
||||||
int rt = 0;
|
int rt = 0;
|
||||||
int64_t dt = 0;
|
int64_t dt = 0;
|
||||||
update_state(cpi, get_block_context(x, subsize), subsize, 0);
|
update_state(cpi, get_block_context(x, subsize), subsize, 0);
|
||||||
@ -1363,7 +1363,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
|
|||||||
last_part_rate += x->partition_cost[pl][partition];
|
last_part_rate += x->partition_cost[pl][partition];
|
||||||
|
|
||||||
if (cpi->sf.adjust_partitioning_from_last_frame
|
if (cpi->sf.adjust_partitioning_from_last_frame
|
||||||
&& partition != PARTITION_SPLIT && bsize > BLOCK_SIZE_SB8X8
|
&& partition != PARTITION_SPLIT && bsize > BLOCK_8X8
|
||||||
&& (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
|
&& (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
|
||||||
&& (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
|
&& (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
|
||||||
BLOCK_SIZE_TYPE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
|
BLOCK_SIZE_TYPE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
|
||||||
@ -1426,7 +1426,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
|
|||||||
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
|
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
|
||||||
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
|
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
|
||||||
m->mbmi.sb_type = bsize;
|
m->mbmi.sb_type = bsize;
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8)
|
if (bsize >= BLOCK_8X8)
|
||||||
*(get_sb_partitioning(x, bsize)) = subsize;
|
*(get_sb_partitioning(x, bsize)) = subsize;
|
||||||
chosen_rate = last_part_rate;
|
chosen_rate = last_part_rate;
|
||||||
chosen_dist = last_part_dist;
|
chosen_dist = last_part_dist;
|
||||||
@ -1434,7 +1434,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
|
|||||||
// If none was better set the partitioning to that...
|
// If none was better set the partitioning to that...
|
||||||
if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
|
if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
|
||||||
> RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
|
> RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8)
|
if (bsize >= BLOCK_8X8)
|
||||||
*(get_sb_partitioning(x, bsize)) = bsize;
|
*(get_sb_partitioning(x, bsize)) = bsize;
|
||||||
chosen_rate = none_rate;
|
chosen_rate = none_rate;
|
||||||
chosen_dist = none_dist;
|
chosen_dist = none_dist;
|
||||||
@ -1444,11 +1444,11 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
|
|||||||
|
|
||||||
// We must have chosen a partitioning and encoding or we'll fail later on.
|
// We must have chosen a partitioning and encoding or we'll fail later on.
|
||||||
// No other opportunities for success.
|
// No other opportunities for success.
|
||||||
if ( bsize == BLOCK_SIZE_SB64X64)
|
if ( bsize == BLOCK_64X64)
|
||||||
assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);
|
assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);
|
||||||
|
|
||||||
if (do_recon)
|
if (do_recon)
|
||||||
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
|
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize);
|
||||||
|
|
||||||
*rate = chosen_rate;
|
*rate = chosen_rate;
|
||||||
*dist = chosen_dist;
|
*dist = chosen_dist;
|
||||||
@ -1526,7 +1526,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
|
|||||||
|
|
||||||
(void) *tp_orig;
|
(void) *tp_orig;
|
||||||
|
|
||||||
if (bsize < BLOCK_SIZE_SB8X8) {
|
if (bsize < BLOCK_8X8) {
|
||||||
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
|
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
|
||||||
// there is nothing to be done.
|
// there is nothing to be done.
|
||||||
if (xd->ab_index != 0) {
|
if (xd->ab_index != 0) {
|
||||||
@ -1542,7 +1542,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
|
|||||||
// PARTITION_SPLIT
|
// PARTITION_SPLIT
|
||||||
if (!cpi->sf.auto_min_max_partition_size ||
|
if (!cpi->sf.auto_min_max_partition_size ||
|
||||||
bsize >= cpi->sf.min_partition_size) {
|
bsize >= cpi->sf.min_partition_size) {
|
||||||
if (bsize > BLOCK_SIZE_SB8X8) {
|
if (bsize > BLOCK_8X8) {
|
||||||
int r4 = 0;
|
int r4 = 0;
|
||||||
int64_t d4 = 0, sum_rd = 0;
|
int64_t d4 = 0, sum_rd = 0;
|
||||||
subsize = get_subsize(bsize, PARTITION_SPLIT);
|
subsize = get_subsize(bsize, PARTITION_SPLIT);
|
||||||
@ -1608,7 +1608,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
|
|||||||
block_context = x->sb8x8_context[xd->sb_index][xd->mb_index];
|
block_context = x->sb8x8_context[xd->sb_index][xd->mb_index];
|
||||||
} else if (bsize == BLOCK_32X32) {
|
} else if (bsize == BLOCK_32X32) {
|
||||||
block_context = x->mb_context[xd->sb_index];
|
block_context = x->mb_context[xd->sb_index];
|
||||||
} else if (bsize == BLOCK_SIZE_SB64X64) {
|
} else if (bsize == BLOCK_64X64) {
|
||||||
block_context = x->sb32_context;
|
block_context = x->sb32_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1695,26 +1695,26 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
|
|||||||
int64_t d;
|
int64_t d;
|
||||||
pick_sb_modes(cpi, mi_row, mi_col, &r, &d, bsize,
|
pick_sb_modes(cpi, mi_row, mi_col, &r, &d, bsize,
|
||||||
get_block_context(x, bsize), best_rd);
|
get_block_context(x, bsize), best_rd);
|
||||||
if (r != INT_MAX && bsize >= BLOCK_SIZE_SB8X8) {
|
if (r != INT_MAX && bsize >= BLOCK_8X8) {
|
||||||
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
||||||
pl = partition_plane_context(xd, bsize);
|
pl = partition_plane_context(xd, bsize);
|
||||||
r += x->partition_cost[pl][PARTITION_NONE];
|
r += x->partition_cost[pl][PARTITION_NONE];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (r != INT_MAX &&
|
if (r != INT_MAX &&
|
||||||
(bsize == BLOCK_SIZE_SB8X8 ||
|
(bsize == BLOCK_8X8 ||
|
||||||
RDCOST(x->rdmult, x->rddiv, r, d) <
|
RDCOST(x->rdmult, x->rddiv, r, d) <
|
||||||
RDCOST(x->rdmult, x->rddiv, srate, sdist))) {
|
RDCOST(x->rdmult, x->rddiv, srate, sdist))) {
|
||||||
best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r, d));
|
best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r, d));
|
||||||
srate = r;
|
srate = r;
|
||||||
sdist = d;
|
sdist = d;
|
||||||
larger_is_better = 1;
|
larger_is_better = 1;
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8)
|
if (bsize >= BLOCK_8X8)
|
||||||
*(get_sb_partitioning(x, bsize)) = bsize;
|
*(get_sb_partitioning(x, bsize)) = bsize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bsize == BLOCK_SIZE_SB8X8) {
|
if (bsize == BLOCK_8X8) {
|
||||||
int r4 = 0;
|
int r4 = 0;
|
||||||
int64_t d4 = 0, sum_rd = 0;
|
int64_t d4 = 0, sum_rd = 0;
|
||||||
subsize = get_subsize(bsize, PARTITION_SPLIT);
|
subsize = get_subsize(bsize, PARTITION_SPLIT);
|
||||||
@ -1760,7 +1760,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
|
|||||||
if (!cpi->sf.use_square_partition_only &&
|
if (!cpi->sf.use_square_partition_only &&
|
||||||
(!cpi->sf.less_rectangular_check ||!larger_is_better)) {
|
(!cpi->sf.less_rectangular_check ||!larger_is_better)) {
|
||||||
// PARTITION_HORZ
|
// PARTITION_HORZ
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
|
if (bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
|
||||||
int r2, r = 0;
|
int r2, r = 0;
|
||||||
int64_t d2, d = 0, h_rd;
|
int64_t d2, d = 0, h_rd;
|
||||||
subsize = get_subsize(bsize, PARTITION_HORZ);
|
subsize = get_subsize(bsize, PARTITION_HORZ);
|
||||||
@ -1799,7 +1799,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PARTITION_VERT
|
// PARTITION_VERT
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
|
if (bsize >= BLOCK_8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
|
||||||
int r2;
|
int r2;
|
||||||
int64_t d2, v_rd;
|
int64_t d2, v_rd;
|
||||||
subsize = get_subsize(bsize, PARTITION_VERT);
|
subsize = get_subsize(bsize, PARTITION_VERT);
|
||||||
@ -1845,9 +1845,9 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
|
|||||||
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
|
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
|
||||||
|
|
||||||
if (srate < INT_MAX && sdist < INT_MAX && do_recon)
|
if (srate < INT_MAX && sdist < INT_MAX && do_recon)
|
||||||
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
|
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize);
|
||||||
|
|
||||||
if (bsize == BLOCK_SIZE_SB64X64) {
|
if (bsize == BLOCK_64X64) {
|
||||||
assert(tp_orig < *tp);
|
assert(tp_orig < *tp);
|
||||||
assert(srate < INT_MAX);
|
assert(srate < INT_MAX);
|
||||||
assert(sdist < INT_MAX);
|
assert(sdist < INT_MAX);
|
||||||
@ -1861,7 +1861,7 @@ static void rd_pick_reference_frame(VP9_COMP *cpi, int mi_row, int mi_col) {
|
|||||||
VP9_COMMON * const cm = &cpi->common;
|
VP9_COMMON * const cm = &cpi->common;
|
||||||
MACROBLOCK * const x = &cpi->mb;
|
MACROBLOCK * const x = &cpi->mb;
|
||||||
MACROBLOCKD * const xd = &x->e_mbd;
|
MACROBLOCKD * const xd = &x->e_mbd;
|
||||||
int bsl = b_width_log2(BLOCK_SIZE_SB64X64), bs = 1 << bsl;
|
int bsl = b_width_log2(BLOCK_64X64), bs = 1 << bsl;
|
||||||
int ms = bs / 2;
|
int ms = bs / 2;
|
||||||
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
|
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
|
||||||
PARTITION_CONTEXT sl[8], sa[8];
|
PARTITION_CONTEXT sl[8], sa[8];
|
||||||
@ -1869,7 +1869,7 @@ static void rd_pick_reference_frame(VP9_COMP *cpi, int mi_row, int mi_col) {
|
|||||||
int r;
|
int r;
|
||||||
int64_t d;
|
int64_t d;
|
||||||
|
|
||||||
save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);
|
save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
|
||||||
|
|
||||||
// Default is non mask (all reference frames allowed.
|
// Default is non mask (all reference frames allowed.
|
||||||
cpi->ref_frame_mask = 0;
|
cpi->ref_frame_mask = 0;
|
||||||
@ -1878,17 +1878,17 @@ static void rd_pick_reference_frame(VP9_COMP *cpi, int mi_row, int mi_col) {
|
|||||||
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
|
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
|
||||||
(mi_col + (ms >> 1) < cm->mi_cols)) {
|
(mi_col + (ms >> 1) < cm->mi_cols)) {
|
||||||
cpi->set_ref_frame_mask = 1;
|
cpi->set_ref_frame_mask = 1;
|
||||||
pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_SIZE_SB64X64,
|
pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_64X64,
|
||||||
get_block_context(x, BLOCK_SIZE_SB64X64), INT64_MAX);
|
get_block_context(x, BLOCK_64X64), INT64_MAX);
|
||||||
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
||||||
pl = partition_plane_context(xd, BLOCK_SIZE_SB64X64);
|
pl = partition_plane_context(xd, BLOCK_64X64);
|
||||||
r += x->partition_cost[pl][PARTITION_NONE];
|
r += x->partition_cost[pl][PARTITION_NONE];
|
||||||
|
|
||||||
*(get_sb_partitioning(x, BLOCK_SIZE_SB64X64)) = BLOCK_SIZE_SB64X64;
|
*(get_sb_partitioning(x, BLOCK_64X64)) = BLOCK_64X64;
|
||||||
cpi->set_ref_frame_mask = 0;
|
cpi->set_ref_frame_mask = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);
|
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
|
static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
|
||||||
@ -1923,13 +1923,13 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
|
|||||||
MODE_INFO *p = cm->prev_mi + idx_str;
|
MODE_INFO *p = cm->prev_mi + idx_str;
|
||||||
|
|
||||||
if (cpi->sf.use_one_partition_size_always) {
|
if (cpi->sf.use_one_partition_size_always) {
|
||||||
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
|
set_offsets(cpi, mi_row, mi_col, BLOCK_64X64);
|
||||||
set_partitioning(cpi, m, cpi->sf.always_this_block_size);
|
set_partitioning(cpi, m, cpi->sf.always_this_block_size);
|
||||||
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
|
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
|
||||||
&dummy_rate, &dummy_dist, 1);
|
&dummy_rate, &dummy_dist, 1);
|
||||||
} else if (cpi->sf.partition_by_variance) {
|
} else if (cpi->sf.partition_by_variance) {
|
||||||
choose_partitioning(cpi, cm->mi, mi_row, mi_col);
|
choose_partitioning(cpi, cm->mi, mi_row, mi_col);
|
||||||
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
|
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
|
||||||
&dummy_rate, &dummy_dist, 1);
|
&dummy_rate, &dummy_dist, 1);
|
||||||
} else {
|
} else {
|
||||||
if ((cpi->common.current_video_frame
|
if ((cpi->common.current_video_frame
|
||||||
@ -1944,11 +1944,11 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
|
|||||||
&cpi->sf.min_partition_size,
|
&cpi->sf.min_partition_size,
|
||||||
&cpi->sf.max_partition_size);
|
&cpi->sf.max_partition_size);
|
||||||
}
|
}
|
||||||
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
|
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
|
||||||
&dummy_rate, &dummy_dist, 1, INT64_MAX);
|
&dummy_rate, &dummy_dist, 1, INT64_MAX);
|
||||||
} else {
|
} else {
|
||||||
copy_partitioning(cpi, m, p);
|
copy_partitioning(cpi, m, p);
|
||||||
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
|
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64,
|
||||||
&dummy_rate, &dummy_dist, 1);
|
&dummy_rate, &dummy_dist, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1959,7 +1959,7 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
|
|||||||
&cpi->sf.max_partition_size);
|
&cpi->sf.max_partition_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
|
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64,
|
||||||
&dummy_rate, &dummy_dist, 1, INT64_MAX);
|
&dummy_rate, &dummy_dist, 1, INT64_MAX);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2276,10 +2276,8 @@ static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
|
|||||||
|
|
||||||
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
|
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
|
||||||
mi = mi_ptr;
|
mi = mi_ptr;
|
||||||
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8) {
|
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8)
|
||||||
reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col,
|
reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64);
|
||||||
BLOCK_SIZE_SB64X64);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2498,7 +2496,7 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
|
|||||||
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
|
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
|
||||||
|
|
||||||
++cpi->y_uv_mode_count[m][uvm];
|
++cpi->y_uv_mode_count[m][uvm];
|
||||||
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
|
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_8X8) {
|
||||||
const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
|
const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
|
||||||
const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
|
const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
|
||||||
const int bsl = MIN(bwl, bhl);
|
const int bsl = MIN(bwl, bhl);
|
||||||
@ -2579,7 +2577,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
|
|||||||
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
|
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
|
||||||
else
|
else
|
||||||
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
|
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
|
||||||
} else if (mbmi->sb_type < BLOCK_SIZE_SB8X8) {
|
} else if (mbmi->sb_type < BLOCK_8X8) {
|
||||||
cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
|
cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST;
|
||||||
} else {
|
} else {
|
||||||
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
|
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
|
||||||
@ -2593,10 +2591,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (mbmi->ref_frame[0] == INTRA_FRAME) {
|
if (mbmi->ref_frame[0] == INTRA_FRAME) {
|
||||||
vp9_encode_intra_block_y(
|
vp9_encode_intra_block_y(cm, x, MAX(bsize, BLOCK_8X8));
|
||||||
cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
|
vp9_encode_intra_block_uv(cm, x, MAX(bsize, BLOCK_8X8));
|
||||||
vp9_encode_intra_block_uv(
|
|
||||||
cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
|
|
||||||
if (output_enabled)
|
if (output_enabled)
|
||||||
sum_intra_stats(cpi, x);
|
sum_intra_stats(cpi, x);
|
||||||
} else {
|
} else {
|
||||||
@ -2616,18 +2612,14 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
|
|||||||
&xd->scale_factor[1]);
|
&xd->scale_factor[1]);
|
||||||
|
|
||||||
|
|
||||||
vp9_build_inter_predictors_sb(
|
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
|
||||||
xd, mi_row, mi_col,
|
|
||||||
bsize < BLOCK_SIZE_SB8X8 ? BLOCK_SIZE_SB8X8 : bsize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) {
|
if (mbmi->ref_frame[0] == INTRA_FRAME) {
|
||||||
vp9_tokenize_sb(cpi, t, !output_enabled,
|
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
|
||||||
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
|
|
||||||
} else if (!x->skip) {
|
} else if (!x->skip) {
|
||||||
vp9_encode_sb(cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
|
vp9_encode_sb(cm, x, MAX(bsize, BLOCK_8X8));
|
||||||
vp9_tokenize_sb(cpi, t, !output_enabled,
|
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
|
||||||
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
|
|
||||||
} else {
|
} else {
|
||||||
int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.mb_skip_coeff : 0;
|
int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.mb_skip_coeff : 0;
|
||||||
mb_skip_context += (mi - mis)->mbmi.mb_skip_coeff;
|
mb_skip_context += (mi - mis)->mbmi.mb_skip_coeff;
|
||||||
@ -2635,8 +2627,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
|
|||||||
mbmi->mb_skip_coeff = 1;
|
mbmi->mb_skip_coeff = 1;
|
||||||
if (output_enabled)
|
if (output_enabled)
|
||||||
cm->counts.mbskip[mb_skip_context][1]++;
|
cm->counts.mbskip[mb_skip_context][1]++;
|
||||||
vp9_reset_sb_tokens_context(
|
vp9_reset_sb_tokens_context(xd, MAX(bsize, BLOCK_8X8));
|
||||||
xd, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy skip flag on all mb_mode_info contexts in this SB
|
// copy skip flag on all mb_mode_info contexts in this SB
|
||||||
@ -2645,7 +2636,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
|
|||||||
|
|
||||||
if (output_enabled) {
|
if (output_enabled) {
|
||||||
if (cm->tx_mode == TX_MODE_SELECT &&
|
if (cm->tx_mode == TX_MODE_SELECT &&
|
||||||
mbmi->sb_type >= BLOCK_SIZE_SB8X8 &&
|
mbmi->sb_type >= BLOCK_8X8 &&
|
||||||
!(is_inter_block(mbmi) &&
|
!(is_inter_block(mbmi) &&
|
||||||
(mbmi->mb_skip_coeff ||
|
(mbmi->mb_skip_coeff ||
|
||||||
vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)))) {
|
vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)))) {
|
||||||
|
@ -1190,9 +1190,9 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||||||
struct macroblockd_plane *pd = &xd->plane[0];
|
struct macroblockd_plane *pd = &xd->plane[0];
|
||||||
const int src_stride = p->src.stride;
|
const int src_stride = p->src.stride;
|
||||||
const int dst_stride = pd->dst.stride;
|
const int dst_stride = pd->dst.stride;
|
||||||
uint8_t *src_init = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, ib,
|
uint8_t *src_init = raster_block_offset_uint8(xd, BLOCK_8X8, 0, ib,
|
||||||
p->src.buf, src_stride);
|
p->src.buf, src_stride);
|
||||||
uint8_t *dst_init = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, ib,
|
uint8_t *dst_init = raster_block_offset_uint8(xd, BLOCK_8X8, 0, ib,
|
||||||
pd->dst.buf, dst_stride);
|
pd->dst.buf, dst_stride);
|
||||||
int16_t *src_diff, *coeff;
|
int16_t *src_diff, *coeff;
|
||||||
|
|
||||||
@ -1235,7 +1235,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
|||||||
|
|
||||||
block = ib + idy * 2 + idx;
|
block = ib + idy * 2 + idx;
|
||||||
xd->mode_info_context->bmi[block].as_mode = mode;
|
xd->mode_info_context->bmi[block].as_mode = mode;
|
||||||
src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, block,
|
src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, block,
|
||||||
p->src_diff);
|
p->src_diff);
|
||||||
coeff = BLOCK_OFFSET(x->plane[0].coeff, block, 16);
|
coeff = BLOCK_OFFSET(x->plane[0].coeff, block, 16);
|
||||||
vp9_predict_intra_block(xd, block, 1,
|
vp9_predict_intra_block(xd, block, 1,
|
||||||
@ -1489,7 +1489,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
int this_rate_tokenonly, this_rate, s;
|
int this_rate_tokenonly, this_rate, s;
|
||||||
int64_t this_distortion;
|
int64_t this_distortion;
|
||||||
|
|
||||||
MB_PREDICTION_MODE last_mode = bsize <= BLOCK_SIZE_SB8X8 ?
|
MB_PREDICTION_MODE last_mode = bsize <= BLOCK_8X8 ?
|
||||||
TM_PRED : cpi->sf.last_chroma_intra_mode;
|
TM_PRED : cpi->sf.last_chroma_intra_mode;
|
||||||
|
|
||||||
for (mode = DC_PRED; mode <= last_mode; mode++) {
|
for (mode = DC_PRED; mode <= last_mode; mode++) {
|
||||||
@ -1541,15 +1541,13 @@ static void choose_intra_uv_mode(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize,
|
|||||||
// appropriate speed flag is set.
|
// appropriate speed flag is set.
|
||||||
if (cpi->sf.use_uv_intra_rd_estimate) {
|
if (cpi->sf.use_uv_intra_rd_estimate) {
|
||||||
rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
|
rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
|
||||||
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 :
|
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
|
||||||
bsize);
|
|
||||||
// Else do a proper rd search for each possible transform size that may
|
// Else do a proper rd search for each possible transform size that may
|
||||||
// be considered in the main rd loop.
|
// be considered in the main rd loop.
|
||||||
} else {
|
} else {
|
||||||
rd_pick_intra_sbuv_mode(cpi, x,
|
rd_pick_intra_sbuv_mode(cpi, x,
|
||||||
rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
|
rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
|
||||||
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8
|
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
|
||||||
: bsize);
|
|
||||||
}
|
}
|
||||||
*mode_uv = x->e_mbd.mode_info_context->mbmi.uv_mode;
|
*mode_uv = x->e_mbd.mode_info_context->mbmi.uv_mode;
|
||||||
}
|
}
|
||||||
@ -1674,16 +1672,16 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
|
|||||||
const int height = plane_block_height(bsize, pd);
|
const int height = plane_block_height(bsize, pd);
|
||||||
int idx, idy;
|
int idx, idy;
|
||||||
const int src_stride = x->plane[0].src.stride;
|
const int src_stride = x->plane[0].src.stride;
|
||||||
uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
|
uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i,
|
||||||
x->plane[0].src.buf,
|
x->plane[0].src.buf,
|
||||||
src_stride);
|
src_stride);
|
||||||
int16_t* src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, i,
|
int16_t* src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, i,
|
||||||
x->plane[0].src_diff);
|
x->plane[0].src_diff);
|
||||||
int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, i);
|
int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, i);
|
||||||
uint8_t* const pre = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
|
uint8_t* const pre = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i,
|
||||||
pd->pre[0].buf,
|
pd->pre[0].buf,
|
||||||
pd->pre[0].stride);
|
pd->pre[0].stride);
|
||||||
uint8_t* const dst = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
|
uint8_t* const dst = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i,
|
||||||
pd->dst.buf,
|
pd->dst.buf,
|
||||||
pd->dst.stride);
|
pd->dst.stride);
|
||||||
int64_t thisdistortion = 0, thissse = 0;
|
int64_t thisdistortion = 0, thissse = 0;
|
||||||
@ -1697,7 +1695,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
|
|||||||
|
|
||||||
if (mi->mbmi.ref_frame[1] > 0) {
|
if (mi->mbmi.ref_frame[1] > 0) {
|
||||||
uint8_t* const second_pre =
|
uint8_t* const second_pre =
|
||||||
raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i,
|
raster_block_offset_uint8(xd, BLOCK_8X8, 0, i,
|
||||||
pd->pre[1].buf, pd->pre[1].stride);
|
pd->pre[1].buf, pd->pre[1].stride);
|
||||||
vp9_build_inter_predictor(second_pre, pd->pre[1].stride,
|
vp9_build_inter_predictor(second_pre, pd->pre[1].stride,
|
||||||
dst, pd->dst.stride,
|
dst, pd->dst.stride,
|
||||||
@ -1715,7 +1713,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi,
|
|||||||
int64_t ssz, rd, rd1, rd2;
|
int64_t ssz, rd, rd1, rd2;
|
||||||
|
|
||||||
k += (idy * 2 + idx);
|
k += (idy * 2 + idx);
|
||||||
src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, k,
|
src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, k,
|
||||||
x->plane[0].src_diff);
|
x->plane[0].src_diff);
|
||||||
coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, k);
|
coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, k);
|
||||||
x->fwd_txm4x4(src_diff, coeff, 16);
|
x->fwd_txm4x4(src_diff, coeff, 16);
|
||||||
@ -1780,17 +1778,17 @@ static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) {
|
|||||||
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
|
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
|
||||||
MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
|
MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi;
|
||||||
x->plane[0].src.buf =
|
x->plane[0].src.buf =
|
||||||
raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
|
raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i,
|
||||||
x->plane[0].src.buf,
|
x->plane[0].src.buf,
|
||||||
x->plane[0].src.stride);
|
x->plane[0].src.stride);
|
||||||
assert(((intptr_t)x->e_mbd.plane[0].pre[0].buf & 0x7) == 0);
|
assert(((intptr_t)x->e_mbd.plane[0].pre[0].buf & 0x7) == 0);
|
||||||
x->e_mbd.plane[0].pre[0].buf =
|
x->e_mbd.plane[0].pre[0].buf =
|
||||||
raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
|
raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i,
|
||||||
x->e_mbd.plane[0].pre[0].buf,
|
x->e_mbd.plane[0].pre[0].buf,
|
||||||
x->e_mbd.plane[0].pre[0].stride);
|
x->e_mbd.plane[0].pre[0].stride);
|
||||||
if (mbmi->ref_frame[1])
|
if (mbmi->ref_frame[1])
|
||||||
x->e_mbd.plane[0].pre[1].buf =
|
x->e_mbd.plane[0].pre[1].buf =
|
||||||
raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i,
|
raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i,
|
||||||
x->e_mbd.plane[0].pre[1].buf,
|
x->e_mbd.plane[0].pre[1].buf,
|
||||||
x->e_mbd.plane[0].pre[1].stride);
|
x->e_mbd.plane[0].pre[1].stride);
|
||||||
}
|
}
|
||||||
@ -2217,7 +2215,7 @@ static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
*returntotrate = bsi->r;
|
*returntotrate = bsi->r;
|
||||||
*returndistortion = bsi->d;
|
*returndistortion = bsi->d;
|
||||||
*returnyrate = bsi->segment_yrate;
|
*returnyrate = bsi->segment_yrate;
|
||||||
*skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_SIZE_SB8X8);
|
*skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_8X8);
|
||||||
*psse = bsi->sse;
|
*psse = bsi->sse;
|
||||||
mbmi->mode = bsi->modes[3];
|
mbmi->mode = bsi->modes[3];
|
||||||
|
|
||||||
@ -3132,7 +3130,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
x->skip_encode = 0;
|
x->skip_encode = 0;
|
||||||
ctx->skip = 0;
|
ctx->skip = 0;
|
||||||
xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
|
xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME;
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8) {
|
if (bsize >= BLOCK_8X8) {
|
||||||
if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
|
if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
|
||||||
&dist_y, &y_skip, bsize, tx_cache,
|
&dist_y, &y_skip, bsize, tx_cache,
|
||||||
best_rd) >= best_rd) {
|
best_rd) >= best_rd) {
|
||||||
@ -3149,7 +3147,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
|
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
|
||||||
&dist_uv, &uv_skip, BLOCK_SIZE_SB8X8);
|
&dist_uv, &uv_skip, BLOCK_8X8);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (y_skip && uv_skip) {
|
if (y_skip && uv_skip) {
|
||||||
@ -3355,7 +3353,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
if (x->fast_ms > 2 && ref_frame != x->subblock_ref)
|
if (x->fast_ms > 2 && ref_frame != x->subblock_ref)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (cpi->sf.use_avoid_tested_higherror && bsize >= BLOCK_SIZE_SB8X8) {
|
if (cpi->sf.use_avoid_tested_higherror && bsize >= BLOCK_8X8) {
|
||||||
if (!(ref_frame_mask & (1 << ref_frame))) {
|
if (!(ref_frame_mask & (1 << ref_frame))) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -3413,10 +3411,10 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
mbmi->interp_filter = cm->mcomp_filter_type;
|
mbmi->interp_filter = cm->mcomp_filter_type;
|
||||||
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
||||||
|
|
||||||
if (bsize >= BLOCK_SIZE_SB8X8 &&
|
if (bsize >= BLOCK_8X8 &&
|
||||||
(this_mode == I4X4_PRED || this_mode == SPLITMV))
|
(this_mode == I4X4_PRED || this_mode == SPLITMV))
|
||||||
continue;
|
continue;
|
||||||
if (bsize < BLOCK_SIZE_SB8X8 &&
|
if (bsize < BLOCK_8X8 &&
|
||||||
!(this_mode == I4X4_PRED || this_mode == SPLITMV))
|
!(this_mode == I4X4_PRED || this_mode == SPLITMV))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -3706,11 +3704,10 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
// If even the 'Y' rd value of split is higher than best so far
|
// If even the 'Y' rd value of split is higher than best so far
|
||||||
// then dont bother looking at UV
|
// then dont bother looking at UV
|
||||||
vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
|
vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col,
|
||||||
BLOCK_SIZE_SB8X8);
|
BLOCK_8X8);
|
||||||
vp9_subtract_sbuv(x, BLOCK_SIZE_SB8X8);
|
vp9_subtract_sbuv(x, BLOCK_8X8);
|
||||||
super_block_uvrd_for_txfm(cm, x, &rate_uv, &distortion_uv,
|
super_block_uvrd_for_txfm(cm, x, &rate_uv, &distortion_uv,
|
||||||
&uv_skippable, &uv_sse,
|
&uv_skippable, &uv_sse, BLOCK_8X8, TX_4X4);
|
||||||
BLOCK_SIZE_SB8X8, TX_4X4);
|
|
||||||
rate2 += rate_uv;
|
rate2 += rate_uv;
|
||||||
distortion2 += distortion_uv;
|
distortion2 += distortion_uv;
|
||||||
skippable = skippable && uv_skippable;
|
skippable = skippable && uv_skippable;
|
||||||
@ -3756,7 +3753,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
const int mb_skip_allowed = !vp9_segfeature_active(seg, segment_id,
|
const int mb_skip_allowed = !vp9_segfeature_active(seg, segment_id,
|
||||||
SEG_LVL_SKIP);
|
SEG_LVL_SKIP);
|
||||||
|
|
||||||
if (skippable && bsize >= BLOCK_SIZE_SB8X8) {
|
if (skippable && bsize >= BLOCK_8X8) {
|
||||||
// Back out the coefficient coding costs
|
// Back out the coefficient coding costs
|
||||||
rate2 -= (rate_y + rate_uv);
|
rate2 -= (rate_y + rate_uv);
|
||||||
// for best yrd calculation
|
// for best yrd calculation
|
||||||
@ -3985,8 +3982,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
&rate_uv_tokenonly[uv_tx_size],
|
&rate_uv_tokenonly[uv_tx_size],
|
||||||
&dist_uv[uv_tx_size],
|
&dist_uv[uv_tx_size],
|
||||||
&skip_uv[uv_tx_size],
|
&skip_uv[uv_tx_size],
|
||||||
(bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8
|
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
|
||||||
: bsize);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4021,7 +4017,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (best_rd == INT64_MAX && bsize < BLOCK_SIZE_SB8X8) {
|
if (best_rd == INT64_MAX && bsize < BLOCK_8X8) {
|
||||||
*returnrate = INT_MAX;
|
*returnrate = INT_MAX;
|
||||||
*returndistortion = INT_MAX;
|
*returndistortion = INT_MAX;
|
||||||
return best_rd;
|
return best_rd;
|
||||||
@ -4070,13 +4066,13 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
*mbmi = best_mbmode;
|
*mbmi = best_mbmode;
|
||||||
x->skip |= best_skip2;
|
x->skip |= best_skip2;
|
||||||
if (best_mbmode.ref_frame[0] == INTRA_FRAME &&
|
if (best_mbmode.ref_frame[0] == INTRA_FRAME &&
|
||||||
best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
|
best_mbmode.sb_type < BLOCK_8X8) {
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
|
xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (best_mbmode.ref_frame[0] != INTRA_FRAME &&
|
if (best_mbmode.ref_frame[0] != INTRA_FRAME &&
|
||||||
best_mbmode.sb_type < BLOCK_SIZE_SB8X8) {
|
best_mbmode.sb_type < BLOCK_8X8) {
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
xd->mode_info_context->bmi[i].as_mv[0].as_int =
|
xd->mode_info_context->bmi[i].as_mv[0].as_int =
|
||||||
best_bmodes[i].as_mv[0].as_int;
|
best_bmodes[i].as_mv[0].as_int;
|
||||||
|
Loading…
Reference in New Issue
Block a user