diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index 6801d72fd..38acf1e6d 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -215,7 +215,7 @@ struct macroblockd_plane { typedef struct macroblockd { struct macroblockd_plane plane[MAX_MB_PLANE]; - struct scale_factors scale_factor[2]; + const struct scale_factors *scale_factors[2]; MODE_INFO *last_mi; int mode_info_stride; diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h index bfb94e44c..117b833dd 100644 --- a/vp9/common/vp9_onyxc_int.h +++ b/vp9/common/vp9_onyxc_int.h @@ -123,7 +123,6 @@ typedef struct VP9Common { // Each frame can reference REFS_PER_FRAME buffers int active_ref_idx[REFS_PER_FRAME]; struct scale_factors active_ref_scale[REFS_PER_FRAME]; - struct scale_factors_common active_ref_scale_comm[REFS_PER_FRAME]; int new_fb_idx; YV12_BUFFER_CONFIG post_proc_buffer; diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c index 75f9532de..c84007e94 100644 --- a/vp9/common/vp9_reconinter.c +++ b/vp9/common/vp9_reconinter.c @@ -66,11 +66,11 @@ static void inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const int subpel_x, const int subpel_y, - const struct scale_factors *scale, + const struct scale_factors *sf, int w, int h, int ref, const struct subpix_fn_table *subpix, int xs, int ys) { - scale->sfc->predict[subpel_x != 0][subpel_y != 0][ref]( + sf->predict[subpel_x != 0][subpel_y != 0][ref]( src, src_stride, dst, dst_stride, subpix->filter_x[subpel_x], xs, subpix->filter_y[subpel_y], ys, @@ -80,7 +80,7 @@ static void inter_predictor(const uint8_t *src, int src_stride, void vp9_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const MV *src_mv, - struct scale_factors *scale, + const struct scale_factors *sf, int w, int h, int ref, const struct subpix_fn_table *subpix, enum mv_precision precision, @@ -88,19 +88,14 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride, const int is_q4 = precision == MV_PRECISION_Q4; const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, is_q4 ? src_mv->col : src_mv->col * 2 }; - const struct scale_factors_common *sfc = scale->sfc; - int subpel_x, subpel_y; - MV32 mv; - - sfc->set_scaled_offsets(scale, y, x); - mv = sfc->scale_mv(&mv_q4, scale); - subpel_x = mv.col & SUBPEL_MASK; - subpel_y = mv.row & SUBPEL_MASK; + MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf); + const int subpel_x = mv.col & SUBPEL_MASK; + const int subpel_y = mv.row & SUBPEL_MASK; src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, - scale, w, h, ref, subpix, sfc->x_step_q4, sfc->y_step_q4); + sf, w, h, ref, subpix, sf->x_step_q4, sf->y_step_q4); } static INLINE int round_mv_comp_q4(int value) { @@ -158,7 +153,7 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int ref; for (ref = 0; ref < 1 + is_compound; ++ref) { - struct scale_factors *const scale = &xd->scale_factor[ref]; + const struct scale_factors *const sf = xd->scale_factors[ref]; struct buf_2d *const pre_buf = &pd->pre[ref]; struct buf_2d *const dst_buf = &pd->dst; uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; @@ -185,12 +180,11 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, MV32 scaled_mv; int xs, ys, subpel_x, subpel_y; - if (vp9_is_scaled(scale->sfc)) { - pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, scale); - scale->sfc->set_scaled_offsets(scale, mi_y + y, mi_x + x); - scaled_mv = scale->sfc->scale_mv(&mv_q4, scale); - xs = scale->sfc->x_step_q4; - ys = scale->sfc->y_step_q4; + if (vp9_is_scaled(sf)) { + pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf); + scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); + xs = sf->x_step_q4; + ys = sf->y_step_q4; } else { pre = pre_buf->buf + (y * pre_buf->stride + x); scaled_mv.row = mv_q4.row; @@ -203,7 +197,7 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, + (scaled_mv.col >> SUBPEL_BITS); inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, - subpel_x, subpel_y, scale, w, h, ref, &xd->subpix, xs, ys); + subpel_x, subpel_y, sf, w, h, ref, &xd->subpix, xs, ys); } } @@ -262,7 +256,7 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int ref; for (ref = 0; ref < 1 + is_compound; ++ref) { - struct scale_factors *const scale = &xd->scale_factor[ref]; + const struct scale_factors *const sf = xd->scale_factors[ref]; struct buf_2d *const pre_buf = &pd->pre[ref]; struct buf_2d *const dst_buf = &pd->dst; uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; @@ -310,16 +304,15 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, x0_16 = x0 << SUBPEL_BITS; y0_16 = y0 << SUBPEL_BITS; - if (vp9_is_scaled(scale->sfc)) { - scale->sfc->set_scaled_offsets(scale, mi_y + y, mi_x + x); - scaled_mv = scale->sfc->scale_mv(&mv_q4, scale); - xs = scale->sfc->x_step_q4; - ys = scale->sfc->y_step_q4; + if (vp9_is_scaled(sf)) { + scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); + xs = sf->x_step_q4; + ys = sf->y_step_q4; // Get block position in the scaled reference frame. - x0 = scale->sfc->scale_value_x(x0, scale->sfc); - y0 = scale->sfc->scale_value_y(y0, scale->sfc); - x0_16 = scale->sfc->scale_value_x(x0_16, scale->sfc); - y0_16 = scale->sfc->scale_value_y(y0_16, scale->sfc); + x0 = sf->scale_value_x(x0, sf); + y0 = sf->scale_value_y(y0, sf); + x0_16 = sf->scale_value_x(x0_16, sf); + y0_16 = sf->scale_value_y(y0_16, sf); } else { scaled_mv.row = mv_q4.row; scaled_mv.col = mv_q4.col; @@ -367,7 +360,7 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, } inter_predictor(buf_ptr, pre_buf->stride, dst, dst_buf->stride, subpel_x, - subpel_y, scale, w, h, ref, &xd->subpix, xs, ys); + subpel_y, sf, w, h, ref, &xd->subpix, xs, ys); } } @@ -402,15 +395,9 @@ void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, void vp9_setup_scale_factors(VP9_COMMON *cm, int i) { const int ref = cm->active_ref_idx[i]; struct scale_factors *const sf = &cm->active_ref_scale[i]; - struct scale_factors_common *const sfc = &cm->active_ref_scale_comm[i]; - if (ref >= cm->fb_count) { - vp9_zero(*sf); - vp9_zero(*sfc); - } else { - YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref]; - vp9_setup_scale_factors_for_frame(sf, sfc, - fb->y_crop_width, fb->y_crop_height, - cm->width, cm->height); - } + YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref]; + vp9_setup_scale_factors_for_frame(sf, + fb->y_crop_width, fb->y_crop_height, + cm->width, cm->height); } diff --git a/vp9/common/vp9_reconinter.h b/vp9/common/vp9_reconinter.h index 0f95f76bb..a4e968ce6 100644 --- a/vp9/common/vp9_reconinter.h +++ b/vp9/common/vp9_reconinter.h @@ -30,18 +30,16 @@ void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, void vp9_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const MV *mv_q3, - struct scale_factors *scale, + const struct scale_factors *sf, int w, int h, int do_avg, const struct subpix_fn_table *subpix, enum mv_precision precision, int x, int y); static int scaled_buffer_offset(int x_offset, int y_offset, int stride, - const struct scale_factors *scale) { - const int x = scale ? scale->sfc->scale_value_x(x_offset, scale->sfc) : - x_offset; - const int y = scale ? scale->sfc->scale_value_y(y_offset, scale->sfc) : - y_offset; + const struct scale_factors *sf) { + const int x = sf ? sf->scale_value_x(x_offset, sf) : x_offset; + const int y = sf ? sf->scale_value_y(y_offset, sf) : y_offset; return y * stride + x; } @@ -92,10 +90,10 @@ static void setup_pre_planes(MACROBLOCKD *xd, int i, } } -static void set_scale_factors(MACROBLOCKD *xd, int ref0, int ref1, - struct scale_factors sf[MAX_REF_FRAMES]) { - xd->scale_factor[0] = sf[ref0 >= 0 ? ref0 : 0]; - xd->scale_factor[1] = sf[ref1 >= 0 ? ref1 : 0]; +static void set_scale_factors(VP9_COMMON *cm, MACROBLOCKD *xd, + int ref0, int ref1) { + xd->scale_factors[0] = &cm->active_ref_scale[ref0 >= 0 ? ref0 : 0]; + xd->scale_factors[1] = &cm->active_ref_scale[ref1 >= 0 ? ref1 : 0]; } void vp9_setup_scale_factors(VP9_COMMON *cm, int i); diff --git a/vp9/common/vp9_scale.c b/vp9/common/vp9_scale.c index 3f0994f80..e0f1e3410 100644 --- a/vp9/common/vp9_scale.c +++ b/vp9/common/vp9_scale.c @@ -12,47 +12,19 @@ #include "vp9/common/vp9_filter.h" #include "vp9/common/vp9_scale.h" -static INLINE int scaled_x(int val, const struct scale_factors_common *sfc) { - return val * sfc->x_scale_fp >> REF_SCALE_SHIFT; +static INLINE int scaled_x(int val, const struct scale_factors *sf) { + return val * sf->x_scale_fp >> REF_SCALE_SHIFT; } -static INLINE int scaled_y(int val, const struct scale_factors_common *sfc) { - return val * sfc->y_scale_fp >> REF_SCALE_SHIFT; +static INLINE int scaled_y(int val, const struct scale_factors *sf) { + return val * sf->y_scale_fp >> REF_SCALE_SHIFT; } -static int unscaled_value(int val, const struct scale_factors_common *sfc) { - (void) sfc; +static int unscaled_value(int val, const struct scale_factors *sf) { + (void) sf; return val; } -static MV32 scaled_mv(const MV *mv, const struct scale_factors *scale) { - const MV32 res = { - scaled_y(mv->row, scale->sfc) + scale->y_offset_q4, - scaled_x(mv->col, scale->sfc) + scale->x_offset_q4 - }; - return res; -} - -static MV32 unscaled_mv(const MV *mv, const struct scale_factors *scale) { - const MV32 res = { - mv->row, - mv->col - }; - return res; -} - -static void set_offsets_with_scaling(struct scale_factors *scale, - int row, int col) { - scale->x_offset_q4 = scaled_x(col << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK; - scale->y_offset_q4 = scaled_y(row << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK; -} - -static void set_offsets_without_scaling(struct scale_factors *scale, - int row, int col) { - scale->x_offset_q4 = 0; - scale->y_offset_q4 = 0; -} - static int get_fixed_point_scale_factor(int other_size, int this_size) { // Calculate scaling factor once for each reference frame // and use fixed point scaling factors in decoding and encoding routines. @@ -69,31 +41,36 @@ static int check_scale_factors(int other_w, int other_h, this_h <= 16 * other_h; } -void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, - struct scale_factors_common *scale_comm, +MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) { + const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK; + const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK; + const MV32 res = { + scaled_y(mv->row, sf) + y_off_q4, + scaled_x(mv->col, sf) + x_off_q4 + }; + return res; +} + +void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, int other_h, int this_w, int this_h) { if (!check_scale_factors(other_w, other_h, this_w, this_h)) { - scale_comm->x_scale_fp = REF_INVALID_SCALE; - scale_comm->y_scale_fp = REF_INVALID_SCALE; + sf->x_scale_fp = REF_INVALID_SCALE; + sf->y_scale_fp = REF_INVALID_SCALE; return; } - scale_comm->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); - scale_comm->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); - scale_comm->x_step_q4 = scaled_x(16, scale_comm); - scale_comm->y_step_q4 = scaled_y(16, scale_comm); + sf->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); + sf->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); + sf->x_step_q4 = scaled_x(16, sf); + sf->y_step_q4 = scaled_y(16, sf); - if (vp9_is_scaled(scale_comm)) { - scale_comm->scale_value_x = scaled_x; - scale_comm->scale_value_y = scaled_y; - scale_comm->set_scaled_offsets = set_offsets_with_scaling; - scale_comm->scale_mv = scaled_mv; + if (vp9_is_scaled(sf)) { + sf->scale_value_x = scaled_x; + sf->scale_value_y = scaled_y; } else { - scale_comm->scale_value_x = unscaled_value; - scale_comm->scale_value_y = unscaled_value; - scale_comm->set_scaled_offsets = set_offsets_without_scaling; - scale_comm->scale_mv = unscaled_mv; + sf->scale_value_x = unscaled_value; + sf->scale_value_y = unscaled_value; } // TODO(agrange): Investigate the best choice of functions to use here @@ -102,48 +79,44 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, // applied in one direction only, and not at all for 0,0, seems to give the // best quality, but it may be worth trying an additional mode that does // do the filtering on full-pel. - if (scale_comm->x_step_q4 == 16) { - if (scale_comm->y_step_q4 == 16) { + if (sf->x_step_q4 == 16) { + if (sf->y_step_q4 == 16) { // No scaling in either direction. - scale_comm->predict[0][0][0] = vp9_convolve_copy; - scale_comm->predict[0][0][1] = vp9_convolve_avg; - scale_comm->predict[0][1][0] = vp9_convolve8_vert; - scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert; - scale_comm->predict[1][0][0] = vp9_convolve8_horiz; - scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz; + sf->predict[0][0][0] = vp9_convolve_copy; + sf->predict[0][0][1] = vp9_convolve_avg; + sf->predict[0][1][0] = vp9_convolve8_vert; + sf->predict[0][1][1] = vp9_convolve8_avg_vert; + sf->predict[1][0][0] = vp9_convolve8_horiz; + sf->predict[1][0][1] = vp9_convolve8_avg_horiz; } else { // No scaling in x direction. Must always scale in the y direction. - scale_comm->predict[0][0][0] = vp9_convolve8_vert; - scale_comm->predict[0][0][1] = vp9_convolve8_avg_vert; - scale_comm->predict[0][1][0] = vp9_convolve8_vert; - scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert; - scale_comm->predict[1][0][0] = vp9_convolve8; - scale_comm->predict[1][0][1] = vp9_convolve8_avg; + sf->predict[0][0][0] = vp9_convolve8_vert; + sf->predict[0][0][1] = vp9_convolve8_avg_vert; + sf->predict[0][1][0] = vp9_convolve8_vert; + sf->predict[0][1][1] = vp9_convolve8_avg_vert; + sf->predict[1][0][0] = vp9_convolve8; + sf->predict[1][0][1] = vp9_convolve8_avg; } } else { - if (scale_comm->y_step_q4 == 16) { + if (sf->y_step_q4 == 16) { // No scaling in the y direction. Must always scale in the x direction. - scale_comm->predict[0][0][0] = vp9_convolve8_horiz; - scale_comm->predict[0][0][1] = vp9_convolve8_avg_horiz; - scale_comm->predict[0][1][0] = vp9_convolve8; - scale_comm->predict[0][1][1] = vp9_convolve8_avg; - scale_comm->predict[1][0][0] = vp9_convolve8_horiz; - scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz; + sf->predict[0][0][0] = vp9_convolve8_horiz; + sf->predict[0][0][1] = vp9_convolve8_avg_horiz; + sf->predict[0][1][0] = vp9_convolve8; + sf->predict[0][1][1] = vp9_convolve8_avg; + sf->predict[1][0][0] = vp9_convolve8_horiz; + sf->predict[1][0][1] = vp9_convolve8_avg_horiz; } else { // Must always scale in both directions. - scale_comm->predict[0][0][0] = vp9_convolve8; - scale_comm->predict[0][0][1] = vp9_convolve8_avg; - scale_comm->predict[0][1][0] = vp9_convolve8; - scale_comm->predict[0][1][1] = vp9_convolve8_avg; - scale_comm->predict[1][0][0] = vp9_convolve8; - scale_comm->predict[1][0][1] = vp9_convolve8_avg; + sf->predict[0][0][0] = vp9_convolve8; + sf->predict[0][0][1] = vp9_convolve8_avg; + sf->predict[0][1][0] = vp9_convolve8; + sf->predict[0][1][1] = vp9_convolve8_avg; + sf->predict[1][0][0] = vp9_convolve8; + sf->predict[1][0][1] = vp9_convolve8_avg; } } // 2D subpel motion always gets filtered in both directions - scale_comm->predict[1][1][0] = vp9_convolve8; - scale_comm->predict[1][1][1] = vp9_convolve8_avg; - - scale->sfc = scale_comm; - scale->x_offset_q4 = 0; // calculated per block - scale->y_offset_q4 = 0; // calculated per block + sf->predict[1][1][0] = vp9_convolve8; + sf->predict[1][1][1] = vp9_convolve8_avg; } diff --git a/vp9/common/vp9_scale.h b/vp9/common/vp9_scale.h index 1437fcd9c..55b4d8888 100644 --- a/vp9/common/vp9_scale.h +++ b/vp9/common/vp9_scale.h @@ -18,40 +18,32 @@ #define REF_NO_SCALE (1 << REF_SCALE_SHIFT) #define REF_INVALID_SCALE -1 -struct scale_factors; -struct scale_factors_common { +struct scale_factors { int x_scale_fp; // horizontal fixed point scale factor int y_scale_fp; // vertical fixed point scale factor int x_step_q4; int y_step_q4; - int (*scale_value_x)(int val, const struct scale_factors_common *sfc); - int (*scale_value_y)(int val, const struct scale_factors_common *sfc); - void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col); - MV32 (*scale_mv)(const MV *mv, const struct scale_factors *scale); + int (*scale_value_x)(int val, const struct scale_factors *sf); + int (*scale_value_y)(int val, const struct scale_factors *sf); convolve_fn_t predict[2][2][2]; // horiz, vert, avg }; -struct scale_factors { - int x_offset_q4; - int y_offset_q4; - const struct scale_factors_common *sfc; -}; +MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf); -void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, - struct scale_factors_common *scale_comm, +void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, int other_h, int this_w, int this_h); -static int vp9_is_valid_scale(const struct scale_factors_common *sfc) { - return sfc->x_scale_fp != REF_INVALID_SCALE && - sfc->y_scale_fp != REF_INVALID_SCALE; +static int vp9_is_valid_scale(const struct scale_factors *sf) { + return sf->x_scale_fp != REF_INVALID_SCALE && + sf->y_scale_fp != REF_INVALID_SCALE; } -static int vp9_is_scaled(const struct scale_factors_common *sfc) { - return sfc->x_scale_fp != REF_NO_SCALE || - sfc->y_scale_fp != REF_NO_SCALE; +static int vp9_is_scaled(const struct scale_factors *sf) { + return sf->x_scale_fp != REF_NO_SCALE || + sf->y_scale_fp != REF_NO_SCALE; } #endif // VP9_COMMON_VP9_SCALE_H_ diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c index 42976a17a..5122243da 100644 --- a/vp9/decoder/vp9_decodeframe.c +++ b/vp9/decoder/vp9_decodeframe.c @@ -380,15 +380,15 @@ static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd, MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; const int ref = mbmi->ref_frame[idx] - LAST_FRAME; const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref); - const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref]; + const struct scale_factors *sf = &cm->active_ref_scale[ref]; xd->ref_buf[idx] = cfg; - if (!vp9_is_valid_scale(sfc)) + if (!vp9_is_valid_scale(sf)) vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, "Invalid scale factors"); - xd->scale_factor[idx].sfc = sfc; - setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]); + xd->scale_factors[idx] = sf; + setup_pre_planes(xd, idx, cfg, mi_row, mi_col, xd->scale_factors[idx]); xd->corrupted |= cfg->corrupted; } @@ -1201,7 +1201,7 @@ static size_t read_uncompressed_header(VP9D_COMP *pbi, for (i = 0; i < REFS_PER_FRAME; ++i) { vp9_setup_scale_factors(cm, i); - if (vp9_is_scaled(&cm->active_ref_scale_comm[i])) + if (vp9_is_scaled(&cm->active_ref_scale[i])) vp9_extend_frame_borders(&cm->yv12_fb[cm->active_ref_idx[i]], cm->subsampling_x, cm->subsampling_y); } diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index 2eb0a0dd7..cfd6077a8 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -2594,10 +2594,9 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, assert(cm->frame_type != KEY_FRAME); - setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col, - &xd->scale_factor[0]); + setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col, xd->scale_factors[0]); setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col, - &xd->scale_factor[1]); + xd->scale_factors[1]); vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); } diff --git a/vp9/encoder/vp9_encodemb.c b/vp9/encoder/vp9_encodemb.c index 6bc1a4b02..21bc5886d 100644 --- a/vp9/encoder/vp9_encodemb.c +++ b/vp9/encoder/vp9_encodemb.c @@ -31,11 +31,11 @@ void vp9_setup_interp_filters(MACROBLOCKD *xd, if (xd->mi_8x8 && xd->mi_8x8[0]) { MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; - set_scale_factors(xd, mbmi->ref_frame[0] - LAST_FRAME, - mbmi->ref_frame[1] - LAST_FRAME, - cm->active_ref_scale); + set_scale_factors(cm, xd, mbmi->ref_frame[0] - LAST_FRAME, + mbmi->ref_frame[1] - LAST_FRAME); + } else { - set_scale_factors(xd, -1, -1, cm->active_ref_scale); + set_scale_factors(cm, xd, -1, -1); } xd->subpix.filter_x = xd->subpix.filter_y = diff --git a/vp9/encoder/vp9_onyx_if.c b/vp9/encoder/vp9_onyx_if.c index 3f813a1f6..147c34868 100644 --- a/vp9/encoder/vp9_onyx_if.c +++ b/vp9/encoder/vp9_onyx_if.c @@ -3548,7 +3548,7 @@ int vp9_get_compressed_data(VP9_PTR ptr, unsigned int *frame_flags, // Calculate scaling factors for each of the 3 available references for (i = 0; i < REFS_PER_FRAME; ++i) { vp9_setup_scale_factors(cm, i); - if (vp9_is_scaled(&cm->active_ref_scale_comm[i])) + if (vp9_is_scaled(&cm->active_ref_scale[i])) vp9_extend_frame_borders(&cm->yv12_fb[cm->active_ref_idx[i]], cm->subsampling_x, cm->subsampling_y); } diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 9c860ab2d..9c93fd408 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -1522,7 +1522,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, vp9_build_inter_predictor(pre, pd->pre[ref].stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv, - &xd->scale_factor[ref], + xd->scale_factors[ref], width, height, ref, &xd->subpix, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2)); @@ -2259,23 +2259,18 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, int mi_row, int mi_col, int_mv frame_nearest_mv[MAX_REF_FRAMES], int_mv frame_near_mv[MAX_REF_FRAMES], - struct buf_2d yv12_mb[4][MAX_MB_PLANE], - struct scale_factors scale[MAX_REF_FRAMES]) { + struct buf_2d yv12_mb[4][MAX_MB_PLANE]) { VP9_COMMON *cm = &cpi->common; YV12_BUFFER_CONFIG *yv12 = &cm->yv12_fb[cpi->common.ref_frame_map[idx]]; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; + const struct scale_factors *const sf = + &cpi->common.active_ref_scale[frame_type - 1]; - // set up scaling factors - scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1]; - - scale[frame_type].sfc->set_scaled_offsets(&scale[frame_type], - mi_row * MI_SIZE, mi_col * MI_SIZE); // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this // use the UV scaling factors. - setup_pred_block(xd, yv12_mb[frame_type], yv12, mi_row, mi_col, - &scale[frame_type], &scale[frame_type]); + setup_pred_block(xd, yv12_mb[frame_type], yv12, mi_row, mi_col, sf, sf); // Gets an initial list of candidate vectors from neighbours and orders them vp9_find_mv_refs(cm, xd, tile, xd->mi_8x8[0], @@ -2292,7 +2287,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, // Further refinement that is encode side only to test the top few candidates // in full and choose the best as the centre point for subsequent searches. // The current implementation doesn't support scaling. - if (!vp9_is_scaled(scale[frame_type].sfc) && block_size >= BLOCK_8X8) + if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8) mv_pred(cpi, x, yv12_mb[frame_type][0].buf, yv12->y_stride, frame_type, block_size); } @@ -2518,7 +2513,7 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, ref_yv12[!id].stride, second_pred, pw, &frame_mv[refs[!id]].as_mv, - &xd->scale_factor[!id], + xd->scale_factors[!id], pw, ph, 0, &xd->subpix, MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE); @@ -3129,7 +3124,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, int64_t dist_uv[TX_SIZES]; int skip_uv[TX_SIZES]; MB_PREDICTION_MODE mode_uv[TX_SIZES]; - struct scale_factors scale_factor[4]; unsigned int ref_frame_mask = 0; unsigned int mode_mask = 0; int64_t mode_distortions[MB_MODE_COUNT] = {-1}; @@ -3196,8 +3190,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, if (cpi->ref_frame_flags & flag_list[ref_frame]) { setup_buffer_inter(cpi, x, tile, idx_list[ref_frame], ref_frame, block_size, mi_row, mi_col, - frame_mv[NEARESTMV], frame_mv[NEARMV], - yv12_mb, scale_factor); + frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb); } frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; frame_mv[ZEROMV][ref_frame].as_int = 0; @@ -3310,7 +3303,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, continue; } - set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor); + set_scale_factors(cm, xd, ref_frame - 1, second_ref_frame - 1); mbmi->uv_mode = DC_PRED; // Evaluate all sub-pel filters irrespective of whether we can use @@ -3322,7 +3315,6 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue; - set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor); mode_excluded = mode_excluded ? mode_excluded : cm->reference_mode == SINGLE_REFERENCE; } else { @@ -3760,8 +3752,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, vp9_zero(best_tx_diff); } - set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1], - scale_factor); + set_scale_factors(cm, xd, mbmi->ref_frame[0] - 1, mbmi->ref_frame[1] - 1); store_coding_context(x, ctx, best_mode_index, &mbmi->ref_mvs[mbmi->ref_frame[0]][0], &mbmi->ref_mvs[mbmi->ref_frame[1] < 0 ? 0 : @@ -3815,7 +3806,6 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int64_t dist_uv[TX_SIZES]; int skip_uv[TX_SIZES]; MB_PREDICTION_MODE mode_uv[TX_SIZES] = { 0 }; - struct scale_factors scale_factor[4]; int intra_cost_penalty = 20 * vp9_dc_quant(cpi->common.base_qindex, cpi->common.y_dc_delta_q); int_mv seg_mvs[4][MAX_REF_FRAMES]; @@ -3850,7 +3840,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, setup_buffer_inter(cpi, x, tile, idx_list[ref_frame], ref_frame, block_size, mi_row, mi_col, frame_mv[NEARESTMV], frame_mv[NEARMV], - yv12_mb, scale_factor); + yv12_mb); } frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; frame_mv[ZEROMV][ref_frame].as_int = 0; @@ -3947,14 +3937,14 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, // TODO(jingning, jkoleszar): scaling reference frame not supported for // sub8x8 blocks. if (ref_frame > 0 && - vp9_is_scaled(scale_factor[ref_frame].sfc)) + vp9_is_scaled(&cpi->common.active_ref_scale[ref_frame - 1])) continue; if (second_ref_frame > 0 && - vp9_is_scaled(scale_factor[second_ref_frame].sfc)) + vp9_is_scaled(&cpi->common.active_ref_scale[second_ref_frame - 1])) continue; - set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor); + set_scale_factors(cm, xd, ref_frame - 1, second_ref_frame - 1); mbmi->uv_mode = DC_PRED; // Evaluate all sub-pel filters irrespective of whether we can use @@ -3965,7 +3955,6 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, if (comp_pred) { if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue; - set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor); mode_excluded = mode_excluded ? mode_excluded : cm->reference_mode == SINGLE_REFERENCE; @@ -4501,8 +4490,7 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, vp9_zero(best_tx_diff); } - set_scale_factors(xd, mbmi->ref_frame[0], mbmi->ref_frame[1], - scale_factor); + set_scale_factors(cm, xd, mbmi->ref_frame[0] - 1, mbmi->ref_frame[1] - 1); store_coding_context(x, ctx, best_mode_index, &mbmi->ref_mvs[mbmi->ref_frame[0]][0], &mbmi->ref_mvs[mbmi->ref_frame[1] < 0 ? 0 : diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c index b77d0d009..876219268 100644 --- a/vp9/encoder/vp9_temporal_filter.c +++ b/vp9/encoder/vp9_temporal_filter.c @@ -393,8 +393,7 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { const int num_frames_forward = vp9_lookahead_depth(cpi->lookahead) - (num_frames_backward + 1); - struct scale_factors scale; - struct scale_factors_common scale_comm; + struct scale_factors sf; switch (blur_type) { case 1: @@ -454,7 +453,7 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { #endif // Setup scaling factors. Scaling on each of the arnr frames is not supported - vp9_setup_scale_factors_for_frame(&scale, &scale_comm, + vp9_setup_scale_factors_for_frame(&sf, get_frame_new_buffer(cm)->y_crop_width, get_frame_new_buffer(cm)->y_crop_height, cm->width, cm->height); @@ -469,7 +468,7 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) { } temporal_filter_iterate_c(cpi, frames_to_blur, frames_to_blur_backward, - strength, &scale); + strength, &sf); } void configure_arnr_filter(VP9_COMP *cpi, const unsigned int this_frame,