Move pre, second_pre to per-plane MACROBLOCKD data

Continue moving framebuffers to per-plane data.

Change-Id: I237e5a998b364c4ec20316e7249206c0bff8631a
This commit is contained in:
John Koleszar 2013-04-19 19:16:14 -07:00
parent f12509f640
commit a443447b8b
11 changed files with 146 additions and 118 deletions

View File

@ -365,8 +365,6 @@ typedef struct macroblockd {
/* 16 Y blocks, 4 U, 4 V, each with 16 entries. */
BLOCKD block[24];
YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
YV12_BUFFER_CONFIG second_pre;
struct scale_factors scale_factor[2];
struct scale_factors scale_factor_uv[2];

View File

@ -44,13 +44,13 @@ static void setup_macroblock(MACROBLOCKD *mb, BLOCKSET bs) {
u2 = NULL;
v2 = NULL;
} else {
y = &mb->pre.y_buffer;
u = &mb->pre.u_buffer;
v = &mb->pre.v_buffer;
y = &mb->plane[0].pre[0].buf;
u = &mb->plane[1].pre[0].buf;
v = &mb->plane[2].pre[0].buf;
y2 = &mb->second_pre.y_buffer;
u2 = &mb->second_pre.u_buffer;
v2 = &mb->second_pre.v_buffer;
y2 = &mb->plane[0].pre[1].buf;
u2 = &mb->plane[1].pre[1].buf;
v2 = &mb->plane[2].pre[1].buf;
}
// luma

View File

@ -581,8 +581,9 @@ void vp9_build_inter_predictors_sby(MACROBLOCKD *xd,
struct build_inter_predictors_args args = {
xd, mb_col * 16, mb_row * 16,
{xd->plane[0].dst.buf, NULL, NULL}, {xd->plane[0].dst.stride, 0, 0},
{{xd->pre.y_buffer, NULL, NULL}, {xd->second_pre.y_buffer, NULL, NULL}},
{{xd->pre.y_stride, 0, 0}, {xd->second_pre.y_stride, 0, 0}},
{{xd->plane[0].pre[0].buf, NULL, NULL},
{xd->plane[0].pre[1].buf, NULL, NULL}},
{{xd->plane[0].pre[0].stride, 0, 0}, {xd->plane[0].pre[1].stride, 0, 0}},
};
// TODO(jkoleszar): This is a hack no matter where you put it, but does it
@ -600,10 +601,10 @@ void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd,
xd, mb_col * 16, mb_row * 16,
{NULL, xd->plane[1].dst.buf, xd->plane[2].dst.buf},
{0, xd->plane[1].dst.stride, xd->plane[1].dst.stride},
{{NULL, xd->pre.u_buffer, xd->pre.v_buffer},
{NULL, xd->second_pre.u_buffer, xd->second_pre.v_buffer}},
{{0, xd->pre.uv_stride, xd->pre.uv_stride},
{0, xd->second_pre.uv_stride, xd->second_pre.uv_stride}},
{{NULL, xd->plane[1].pre[0].buf, xd->plane[2].pre[0].buf},
{NULL, xd->plane[1].pre[1].buf, xd->plane[2].pre[1].buf}},
{{0, xd->plane[1].pre[0].stride, xd->plane[1].pre[0].stride},
{0, xd->plane[1].pre[1].stride, xd->plane[1].pre[1].stride}},
};
foreach_predicted_block_uv(xd, bsize, build_inter_predictors, &args);
}
@ -889,8 +890,8 @@ static int get_implicit_compoundinter_weight(MACROBLOCKD *xd,
edge[3] = xd->mb_to_right_edge;
clamp_mvs = xd->mode_info_context->mbmi.need_to_clamp_secondmv;
base_pre = xd->second_pre.y_buffer;
pre_stride = xd->second_pre.y_stride;
base_pre = xd->plane[0].pre[1].buf;
pre_stride = xd->plane[0].pre[1].stride;
ymv.as_int = xd->mode_info_context->mbmi.mv[1].as_int;
// First generate the second predictor
scale = &xd->scale_factor[1];
@ -925,8 +926,8 @@ static int get_implicit_compoundinter_weight(MACROBLOCKD *xd,
metric_2 = get_consistency_metric(xd, tmp_y, tmp_ystride);
clamp_mvs = xd->mode_info_context->mbmi.need_to_clamp_mvs;
base_pre = xd->pre.y_buffer;
pre_stride = xd->pre.y_stride;
base_pre = xd->plane[0].pre[0].buf;
pre_stride = xd->plane[0].pre[0].stride;
ymv.as_int = xd->mode_info_context->mbmi.mv[0].as_int;
// Now generate the first predictor
scale = &xd->scale_factor[0];
@ -978,8 +979,8 @@ static void build_inter16x16_predictors_mby_w(MACROBLOCKD *xd,
xd->mode_info_context->mbmi.need_to_clamp_secondmv :
xd->mode_info_context->mbmi.need_to_clamp_mvs;
uint8_t *base_pre = which_mv ? xd->second_pre.y_buffer : xd->pre.y_buffer;
int pre_stride = which_mv ? xd->second_pre.y_stride : xd->pre.y_stride;
uint8_t *base_pre = xd->plane[0].pre[which_mv].buf;
int pre_stride = xd->plane[0].pre[which_mv].stride;
int_mv ymv;
struct scale_factors *scale = &xd->scale_factor[which_mv];
@ -1011,8 +1012,8 @@ static void build_inter16x16_predictors_mbuv_w(MACROBLOCKD *xd,
which_mv ? xd->mode_info_context->mbmi.need_to_clamp_secondmv
: xd->mode_info_context->mbmi.need_to_clamp_mvs;
uint8_t *uptr, *vptr;
int pre_stride = which_mv ? xd->second_pre.uv_stride
: xd->pre.uv_stride;
int pre_stride = which_mv ? xd->plane[1].pre[1].stride
: xd->plane[1].pre[0].stride;
int_mv mv;
struct scale_factors *scale = &xd->scale_factor_uv[which_mv];
@ -1022,8 +1023,8 @@ static void build_inter16x16_predictors_mbuv_w(MACROBLOCKD *xd,
if (clamp_mvs)
clamp_mv_to_umv_border(&mv.as_mv, xd);
uptr = (which_mv ? xd->second_pre.u_buffer : xd->pre.u_buffer);
vptr = (which_mv ? xd->second_pre.v_buffer : xd->pre.v_buffer);
uptr = (which_mv ? xd->plane[1].pre[1].buf : xd->plane[1].pre[0].buf);
vptr = (which_mv ? xd->plane[2].pre[1].buf : xd->plane[2].pre[0].buf);
scale->set_scaled_offsets(scale, mb_row * 16, mb_col * 16);
@ -1045,8 +1046,8 @@ static void build_inter_predictors_sby_w(MACROBLOCKD *x,
BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
uint8_t *y1 = x->pre.y_buffer;
uint8_t *y2 = x->second_pre.y_buffer;
uint8_t *y1 = x->plane[0].pre[0].buf;
uint8_t *y2 = x->plane[0].pre[1].buf;
int edge[4], n;
edge[0] = x->mb_to_top_edge;
@ -1062,15 +1063,15 @@ static void build_inter_predictors_sby_w(MACROBLOCKD *x,
x->mb_to_left_edge = edge[2] - ((x_idx * 16) << 3);
x->mb_to_right_edge = edge[3] + (((bw - 1 - x_idx) * 16) << 3);
x->pre.y_buffer = y1 + scaled_buffer_offset(x_idx * 16,
x->plane[0].pre[0].buf = y1 + scaled_buffer_offset(x_idx * 16,
y_idx * 16,
x->pre.y_stride,
x->plane[0].pre[0].stride,
&x->scale_factor[0]);
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
x->second_pre.y_buffer = y2 +
x->plane[0].pre[1].buf = y2 +
scaled_buffer_offset(x_idx * 16,
y_idx * 16,
x->second_pre.y_stride,
x->plane[0].pre[1].stride,
&x->scale_factor[1]);
}
build_inter16x16_predictors_mby_w(x,
@ -1082,9 +1083,9 @@ static void build_inter_predictors_sby_w(MACROBLOCKD *x,
x->mb_to_left_edge = edge[2];
x->mb_to_right_edge = edge[3];
x->pre.y_buffer = y1;
x->plane[0].pre[0].buf = y1;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
x->second_pre.y_buffer = y2;
x->plane[0].pre[1].buf = y2;
}
}
@ -1110,8 +1111,8 @@ static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
const int bhl = mb_height_log2(bsize), bh = 1 << bhl;
uint8_t *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
uint8_t *u2 = x->second_pre.u_buffer, *v2 = x->second_pre.v_buffer;
uint8_t *u1 = x->plane[1].pre[0].buf, *v1 = x->plane[2].pre[0].buf;
uint8_t *u2 = x->plane[1].pre[1].buf, *v2 = x->plane[2].pre[1].buf;
int edge[4], n;
edge[0] = x->mb_to_top_edge;
@ -1130,18 +1131,18 @@ static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
y_idx * 8,
x->pre.uv_stride,
x->plane[1].pre[0].stride,
&x->scale_factor_uv[0]);
x->pre.u_buffer = u1 + scaled_uv_offset;
x->pre.v_buffer = v1 + scaled_uv_offset;
x->plane[1].pre[0].buf = u1 + scaled_uv_offset;
x->plane[2].pre[0].buf = v1 + scaled_uv_offset;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
scaled_uv_offset = scaled_buffer_offset(x_idx * 8,
y_idx * 8,
x->second_pre.uv_stride,
x->plane[1].pre[1].stride,
&x->scale_factor_uv[1]);
x->second_pre.u_buffer = u2 + scaled_uv_offset;
x->second_pre.v_buffer = v2 + scaled_uv_offset;
x->plane[1].pre[1].buf = u2 + scaled_uv_offset;
x->plane[2].pre[1].buf = v2 + scaled_uv_offset;
}
build_inter16x16_predictors_mbuv_w(x,
@ -1154,12 +1155,12 @@ static void build_inter_predictors_sbuv_w(MACROBLOCKD *x,
x->mb_to_left_edge = edge[2];
x->mb_to_right_edge = edge[3];
x->pre.u_buffer = u1;
x->pre.v_buffer = v1;
x->plane[1].pre[0].buf = u1;
x->plane[2].pre[0].buf = v1;
if (x->mode_info_context->mbmi.second_ref_frame > 0) {
x->second_pre.u_buffer = u2;
x->second_pre.v_buffer = v2;
x->plane[1].pre[1].buf = u2;
x->plane[2].pre[1].buf = v2;
}
}

View File

@ -106,6 +106,35 @@ static void setup_dst_planes(MACROBLOCKD *xd,
xd->plane[2].subsampling_x, xd->plane[2].subsampling_y);
}
static void setup_pre_planes(MACROBLOCKD *xd,
const YV12_BUFFER_CONFIG *src0,
const YV12_BUFFER_CONFIG *src1,
int mb_row, int mb_col,
const struct scale_factors *scale,
const struct scale_factors *scale_uv) {
int i;
for (i = 0; i < 2; i++) {
const YV12_BUFFER_CONFIG *src = i ? src1 : src0;
if (!src)
continue;
setup_pred_plane(&xd->plane[0].pre[i],
src->y_buffer, src->y_stride,
mb_row, mb_col, scale ? scale + i : NULL,
xd->plane[0].subsampling_x, xd->plane[0].subsampling_y);
setup_pred_plane(&xd->plane[1].pre[i],
src->u_buffer, src->uv_stride,
mb_row, mb_col, scale_uv ? scale_uv + i : NULL,
xd->plane[1].subsampling_x, xd->plane[1].subsampling_y);
setup_pred_plane(&xd->plane[2].pre[i],
src->v_buffer, src->uv_stride,
mb_row, mb_col, scale_uv ? scale_uv + i : NULL,
xd->plane[2].subsampling_x, xd->plane[2].subsampling_y);
}
}
static void setup_pred_block(YV12_BUFFER_CONFIG *dst,
const YV12_BUFFER_CONFIG *src,
int mb_row, int mb_col,

View File

@ -644,15 +644,14 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
const MV_REFERENCE_FRAME ref_frame = mbmi->ref_frame;
struct scale_factors *sf0 = &xd->scale_factor[0];
struct scale_factors *sf_uv0 = &xd->scale_factor_uv[0];
*sf0 = cm->active_ref_scale[mbmi->ref_frame - 1];
{
// Select the appropriate reference frame for this MB
const int ref_fb_idx = cm->active_ref_idx[ref_frame - 1];
setup_pred_block(&xd->pre, &cm->yv12_fb[ref_fb_idx],
mb_row, mb_col, sf0, sf_uv0);
setup_pre_planes(xd, &cm->yv12_fb[ref_fb_idx], NULL,
mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
#ifdef DEC_DEBUG
if (dec_debug)
@ -712,12 +711,11 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
if (mbmi->second_ref_frame > 0) {
const MV_REFERENCE_FRAME second_ref_frame = mbmi->second_ref_frame;
struct scale_factors *sf1 = &xd->scale_factor[1];
struct scale_factors *sf_uv1 = &xd->scale_factor_uv[1];
const int second_ref_fb_idx = cm->active_ref_idx[second_ref_frame - 1];
*sf1 = cm->active_ref_scale[second_ref_frame - 1];
setup_pred_block(&xd->second_pre, &cm->yv12_fb[second_ref_fb_idx],
mb_row, mb_col, sf1, sf_uv1);
setup_pre_planes(xd, NULL, &cm->yv12_fb[second_ref_fb_idx],
mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
vp9_find_mv_refs(cm, xd, mi,
use_prev_in_find_mv_refs ? prev_mi : NULL,

View File

@ -815,8 +815,8 @@ static void set_refs(VP9D_COMP *pbi, int mb_row, int mb_col) {
const YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[fb_idx];
xd->scale_factor[0] = cm->active_ref_scale[mbmi->ref_frame - 1];
xd->scale_factor_uv[0] = cm->active_ref_scale[mbmi->ref_frame - 1];
setup_pred_block(&xd->pre, cfg, mb_row, mb_col,
&xd->scale_factor[0], &xd->scale_factor_uv[0]);
setup_pre_planes(xd, cfg, NULL, mb_row, mb_col,
xd->scale_factor, xd->scale_factor_uv);
xd->corrupted |= cfg->corrupted;
if (mbmi->second_ref_frame > INTRA_FRAME) {
@ -825,8 +825,8 @@ static void set_refs(VP9D_COMP *pbi, int mb_row, int mb_col) {
const YV12_BUFFER_CONFIG *second_cfg = &cm->yv12_fb[second_fb_idx];
xd->scale_factor[1] = cm->active_ref_scale[mbmi->second_ref_frame - 1];
xd->scale_factor_uv[1] = cm->active_ref_scale[mbmi->second_ref_frame - 1];
setup_pred_block(&xd->second_pre, second_cfg, mb_row, mb_col,
&xd->scale_factor[1], &xd->scale_factor_uv[1]);
setup_pre_planes(xd, NULL, second_cfg, mb_row, mb_col,
xd->scale_factor, xd->scale_factor_uv);
xd->corrupted |= second_cfg->corrupted;
}
}
@ -1589,8 +1589,8 @@ int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
#endif
// Initialize xd pointers. Any reference should do for xd->pre, so use 0.
vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->active_ref_idx[0]],
sizeof(YV12_BUFFER_CONFIG));
setup_pre_planes(xd, &pc->yv12_fb[pc->active_ref_idx[0]], NULL,
0, 0, NULL, NULL);
setup_dst_planes(xd, &pc->yv12_fb[pc->new_fb_idx], 0, 0);
// Create the segmentation map structure and set to 0

View File

@ -1235,7 +1235,10 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
// Copy data over into macro block data structures.
x->src = *cpi->Source;
xd->pre = cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]];
// TODO(jkoleszar): are these initializations required?
setup_pre_planes(xd, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], NULL,
0, 0, NULL, NULL);
setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
// set up frame for intra coded blocks
@ -1946,7 +1949,7 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
int ref_fb_idx;
int ref_fb_idx, second_ref_fb_idx;
#ifdef ENC_DEBUG
if (enc_debug)
printf("Mode %d skip %d tx_size %d ref %d ref2 %d mv %d %d interp %d\n",
@ -1965,27 +1968,21 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
else
ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
setup_pred_block(&xd->pre,
&cpi->common.yv12_fb[ref_fb_idx],
mb_row, mb_col,
&xd->scale_factor[0], &xd->scale_factor_uv[0]);
if (mbmi->second_ref_frame > 0) {
int second_ref_fb_idx;
if (mbmi->second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
else if (mbmi->second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
else
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
setup_pred_block(&xd->second_pre,
&cpi->common.yv12_fb[second_ref_fb_idx],
mb_row, mb_col,
&xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
setup_pre_planes(xd,
&cpi->common.yv12_fb[ref_fb_idx],
mbmi->second_ref_frame > 0 ? &cpi->common.yv12_fb[second_ref_fb_idx]
: NULL,
mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
if (!x->skip) {
vp9_encode_inter16x16(cm, x, mb_row, mb_col);
} else {
@ -2160,7 +2157,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
int ref_fb_idx;
int ref_fb_idx, second_ref_fb_idx;
assert(cm->frame_type != KEY_FRAME);
@ -2171,27 +2168,21 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
else
ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
setup_pred_block(&xd->pre,
&cpi->common.yv12_fb[ref_fb_idx],
mb_row, mb_col,
&xd->scale_factor[0], &xd->scale_factor_uv[0]);
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
int second_ref_fb_idx;
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
else
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
setup_pred_block(&xd->second_pre,
&cpi->common.yv12_fb[second_ref_fb_idx],
mb_row, mb_col,
&xd->scale_factor[1], &xd->scale_factor_uv[1]);
}
setup_pre_planes(xd,
&cpi->common.yv12_fb[ref_fb_idx],
xd->mode_info_context->mbmi.second_ref_frame > 0
? &cpi->common.yv12_fb[second_ref_fb_idx] : NULL,
mb_row, mb_col, xd->scale_factor, xd->scale_factor_uv);
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
}

View File

@ -358,7 +358,7 @@ static void zz_motion_search(VP9_COMP *cpi, MACROBLOCK *x, YV12_BUFFER_CONFIG *r
int ref_stride = d->pre_stride;
// Set up pointers for this macro block recon buffer
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
ref_ptr = (uint8_t *)(*(d->base_pre) + d->pre);
@ -402,7 +402,7 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
v_fn_ptr.vf = vp9_mse16x16;
// Set up pointers for this macro block recon buffer
xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset;
xd->plane[0].pre[0].buf = recon_buffer->y_buffer + recon_yoffset;
// Initial step/diamond search centred on best mv
tmp_mv.as_int = 0;
@ -485,7 +485,7 @@ void vp9_first_pass(VP9_COMP *cpi) {
vp9_clear_system_state(); // __asm emms;
x->src = * cpi->Source;
xd->pre = *lst_yv12;
setup_pre_planes(xd, lst_yv12, NULL, 0, 0, NULL, NULL);
setup_dst_planes(xd, new_yv12, 0, 0);
x->partition_info = x->pi;
@ -601,9 +601,9 @@ void vp9_first_pass(VP9_COMP *cpi) {
}
// Reset to last frame as reference buffer
xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset;
xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset;
xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset;
xd->plane[0].pre[0].buf = lst_yv12->y_buffer + recon_yoffset;
xd->plane[1].pre[0].buf = lst_yv12->u_buffer + recon_uvoffset;
xd->plane[2].pre[0].buf = lst_yv12->v_buffer + recon_uvoffset;
// In accumulating a score for the older reference frame
// take the best of the motion predicted score and

View File

@ -110,15 +110,16 @@ static int do_16x16_motion_search
b->src_stride = x->src.y_stride;
b->src = x->src.y_stride * (n & 12) + (n & 3) * 4;
d->base_pre = &xd->pre.y_buffer;
d->pre_stride = xd->pre.y_stride;
d->pre = xd->pre.y_stride * (n & 12) + (n & 3) * 4;
d->base_pre = &xd->plane[0].pre[0].buf;
d->pre_stride = xd->plane[0].pre[0].stride;
d->pre = xd->plane[0].pre[0].stride * (n & 12) + (n & 3) * 4;
}
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
xd->pre.y_buffer, xd->pre.y_stride, INT_MAX);
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
INT_MAX);
dst_mv->as_int = 0;
// Test last reference frame using the previous best mv as the
@ -162,7 +163,8 @@ static int do_16x16_zerozero_search
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
err = vp9_sad16x16(x->src.y_buffer, x->src.y_stride,
xd->pre.y_buffer, xd->pre.y_stride, INT_MAX);
xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride,
INT_MAX);
dst_mv->as_int = 0;
@ -247,8 +249,8 @@ static void update_mbgraph_mb_stats
// Golden frame MV search, if it exists and is different than last frame
if (golden_ref) {
int g_motion_error;
xd->pre.y_buffer = golden_ref->y_buffer + mb_y_offset;
xd->pre.y_stride = golden_ref->y_stride;
xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset;
xd->plane[0].pre[0].stride = golden_ref->y_stride;
g_motion_error = do_16x16_motion_search(cpi, prev_golden_ref_mv,
&stats->ref[GOLDEN_FRAME].m.mv,
buf, mb_y_offset,
@ -263,8 +265,8 @@ static void update_mbgraph_mb_stats
// Alt-ref frame MV search, if it exists and is different than last/golden frame
if (alt_ref) {
int a_motion_error;
xd->pre.y_buffer = alt_ref->y_buffer + mb_y_offset;
xd->pre.y_stride = alt_ref->y_stride;
xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset;
xd->plane[0].pre[0].stride = alt_ref->y_stride;
a_motion_error = do_16x16_zerozero_search(cpi,
&stats->ref[ALTREF_FRAME].m.mv,
buf, mb_y_offset,
@ -304,7 +306,7 @@ static void update_mbgraph_frame_stats
- 16 - VP9_INTERP_EXTEND;
xd->up_available = 0;
xd->plane[0].dst.stride = buf->y_stride;
xd->pre.y_stride = buf->y_stride;
xd->plane[0].pre[0].stride = buf->y_stride;
xd->plane[1].dst.stride = buf->uv_stride;
xd->mode_info_context = &mi_local;

View File

@ -2885,7 +2885,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
x->nmvjointcost, x->mvcost, 96,
x->e_mbd.allow_high_precision_mv);
} else {
YV12_BUFFER_CONFIG backup_yv12 = xd->pre;
struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0}};
int bestsme = INT_MAX;
int further_steps, step_param = cpi->sf.first_step;
int sadpb = x->sadperbit16;
@ -2898,13 +2898,16 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int tmp_row_max = x->mv_row_max;
if (scaled_ref_frame) {
int i;
// Swap out the reference frame for a version that's been scaled to
// match the resolution of the current frame, allowing the existing
// motion search code to be used without additional modifications.
xd->pre = *scaled_ref_frame;
xd->pre.y_buffer += mb_row * 16 * xd->pre.y_stride + mb_col * 16;
xd->pre.u_buffer += mb_row * 8 * xd->pre.uv_stride + mb_col * 8;
xd->pre.v_buffer += mb_row * 8 * xd->pre.uv_stride + mb_col * 8;
for (i = 0; i < MAX_MB_PLANE; i++)
backup_yv12[i] = xd->plane[i].pre[0];
setup_pre_planes(xd, scaled_ref_frame, NULL, mb_row, mb_col,
NULL, NULL);
}
vp9_clamp_mv_min_max(x, &ref_mv[0]);
@ -2954,7 +2957,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
// restore the predictor, if required
if (scaled_ref_frame) {
xd->pre = backup_yv12;
int i;
for (i = 0; i < MAX_MB_PLANE; i++)
xd->plane[i].pre[0] = backup_yv12[i];
}
}
break;
@ -3533,7 +3539,6 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
int ref = mbmi->ref_frame;
int fb;
xd->pre = yv12_mb[ref];
best_ref_mv = mbmi->ref_mvs[ref][0];
vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
@ -3552,10 +3557,15 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (mbmi->second_ref_frame > 0) {
int ref = mbmi->second_ref_frame;
xd->second_pre = yv12_mb[ref];
second_best_ref_mv = mbmi->ref_mvs[ref][0];
}
// TODO(jkoleszar) scaling/translation handled during creation of yv12_mb
// currently.
setup_pre_planes(xd, &yv12_mb[mbmi->ref_frame],
mbmi->second_ref_frame > 0 ? &yv12_mb[mbmi->second_ref_frame] : NULL,
0, 0, NULL, NULL);
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
if (cpi->zbin_mode_boost_enabled) {
@ -4312,7 +4322,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
MB_PREDICTION_MODE this_mode;
MB_PREDICTION_MODE best_mode = DC_PRED;
MV_REFERENCE_FRAME ref_frame;
MV_REFERENCE_FRAME ref_frame, second_ref;
unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
int comp_pred, i;
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
@ -4478,8 +4488,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
// continue;
if (comp_pred) {
int second_ref;
if (ref_frame == ALTREF_FRAME) {
second_ref = LAST_FRAME;
} else {
@ -4491,7 +4499,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
set_scale_factors(xd, mbmi->ref_frame, mbmi->second_ref_frame,
scale_factor);
xd->second_pre = yv12_mb[second_ref];
mode_excluded =
mode_excluded ?
mode_excluded : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
@ -4509,7 +4516,9 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
}
}
xd->pre = yv12_mb[ref_frame];
setup_pre_planes(xd, &yv12_mb[ref_frame],
comp_pred ? &yv12_mb[second_ref] : NULL, 0, 0, NULL, NULL);
vpx_memcpy(mdcounts, frame_mdcounts[ref_frame], sizeof(mdcounts));
// If the segment reference frame feature is enabled....

View File

@ -221,9 +221,9 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
DECLARE_ALIGNED_ARRAY(16, uint8_t, predictor, 16 * 16 + 8 * 8 + 8 * 8);
// Save input state
uint8_t *y_buffer = mbd->pre.y_buffer;
uint8_t *u_buffer = mbd->pre.u_buffer;
uint8_t *v_buffer = mbd->pre.v_buffer;
uint8_t *y_buffer = mbd->plane[0].pre[0].buf;
uint8_t *u_buffer = mbd->plane[1].pre[0].buf;
uint8_t *v_buffer = mbd->plane[2].pre[0].buf;
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
#if ALT_REF_MC_ENABLED
@ -368,9 +368,9 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
}
// Restore input state
mbd->pre.y_buffer = y_buffer;
mbd->pre.u_buffer = u_buffer;
mbd->pre.v_buffer = v_buffer;
mbd->plane[0].pre[0].buf = y_buffer;
mbd->plane[1].pre[0].buf = u_buffer;
mbd->plane[2].pre[0].buf = v_buffer;
}
void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) {