Merge "Fix ubsan warnings: vp9/encoder/vp9_mcomp.c"
This commit is contained in:
commit
b3933e2d3c
@ -59,8 +59,8 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi,
|
|||||||
// Try sub-pixel MC
|
// Try sub-pixel MC
|
||||||
// if (bestsme > error_thresh && bestsme < INT_MAX)
|
// if (bestsme > error_thresh && bestsme < INT_MAX)
|
||||||
{
|
{
|
||||||
int distortion;
|
uint32_t distortion;
|
||||||
unsigned int sse;
|
uint32_t sse;
|
||||||
cpi->find_fractional_mv_step(
|
cpi->find_fractional_mv_step(
|
||||||
x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
|
x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
|
||||||
&v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
|
&v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
|
||||||
|
@ -162,6 +162,33 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
|
|||||||
return &buf[(r >> 3) * stride + (c >> 3)];
|
return &buf[(r >> 3) * stride + (c >> 3)];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
|
/* checks if (r, c) has better score than previous best */
|
||||||
|
#define CHECK_BETTER(v, r, c) \
|
||||||
|
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
|
||||||
|
int64_t tmpmse; \
|
||||||
|
if (second_pred == NULL) { \
|
||||||
|
thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), \
|
||||||
|
sp(r), z, src_stride, &sse); \
|
||||||
|
} else { \
|
||||||
|
thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), \
|
||||||
|
sp(r), z, src_stride, &sse, second_pred); \
|
||||||
|
} \
|
||||||
|
tmpmse = thismse; \
|
||||||
|
tmpmse += MVC(r, c); \
|
||||||
|
if (tmpmse >= INT_MAX) { \
|
||||||
|
v = INT_MAX; \
|
||||||
|
} else if ((v = (uint32_t)tmpmse) < besterr) { \
|
||||||
|
besterr = v; \
|
||||||
|
br = r; \
|
||||||
|
bc = c; \
|
||||||
|
*distortion = thismse; \
|
||||||
|
*sse1 = sse; \
|
||||||
|
} \
|
||||||
|
} else { \
|
||||||
|
v = INT_MAX; \
|
||||||
|
}
|
||||||
|
#else
|
||||||
/* checks if (r, c) has better score than previous best */
|
/* checks if (r, c) has better score than previous best */
|
||||||
#define CHECK_BETTER(v, r, c) \
|
#define CHECK_BETTER(v, r, c) \
|
||||||
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
|
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
|
||||||
@ -182,6 +209,7 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
|
|||||||
v = INT_MAX; \
|
v = INT_MAX; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
#define FIRST_LEVEL_CHECKS \
|
#define FIRST_LEVEL_CHECKS \
|
||||||
{ \
|
{ \
|
||||||
unsigned int left, right, up, down, diag; \
|
unsigned int left, right, up, down, diag; \
|
||||||
@ -310,10 +338,10 @@ static unsigned int setup_center_error(const MACROBLOCKD *xd,
|
|||||||
const uint8_t *second_pred,
|
const uint8_t *second_pred,
|
||||||
int w, int h, int offset,
|
int w, int h, int offset,
|
||||||
int *mvjcost, int *mvcost[2],
|
int *mvjcost, int *mvcost[2],
|
||||||
unsigned int *sse1,
|
uint32_t *sse1,
|
||||||
int *distortion) {
|
uint32_t *distortion) {
|
||||||
unsigned int besterr;
|
|
||||||
#if CONFIG_VP9_HIGHBITDEPTH
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
|
uint64_t besterr;
|
||||||
if (second_pred != NULL) {
|
if (second_pred != NULL) {
|
||||||
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
||||||
DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
|
DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
|
||||||
@ -329,9 +357,13 @@ static unsigned int setup_center_error(const MACROBLOCKD *xd,
|
|||||||
} else {
|
} else {
|
||||||
besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
|
besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1);
|
||||||
}
|
}
|
||||||
*distortion = besterr;
|
*distortion = (uint32_t)besterr;
|
||||||
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
|
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
|
||||||
|
if (besterr >= UINT32_MAX)
|
||||||
|
return UINT32_MAX;
|
||||||
|
return (uint32_t)besterr;
|
||||||
#else
|
#else
|
||||||
|
uint32_t besterr;
|
||||||
(void) xd;
|
(void) xd;
|
||||||
if (second_pred != NULL) {
|
if (second_pred != NULL) {
|
||||||
DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
|
DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]);
|
||||||
@ -342,8 +374,8 @@ static unsigned int setup_center_error(const MACROBLOCKD *xd,
|
|||||||
}
|
}
|
||||||
*distortion = besterr;
|
*distortion = besterr;
|
||||||
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
|
besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
|
||||||
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
||||||
return besterr;
|
return besterr;
|
||||||
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
||||||
}
|
}
|
||||||
|
|
||||||
static INLINE int divide_and_round(const int n, const int d) {
|
static INLINE int divide_and_round(const int n, const int d) {
|
||||||
@ -373,7 +405,7 @@ static void get_cost_surf_min(int *cost_list, int *ir, int *ic,
|
|||||||
(cost_list[4] - 2 * cost_list[0] + cost_list[2]));
|
(cost_list[4] - 2 * cost_list[0] + cost_list[2]));
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp9_skip_sub_pixel_tree(
|
uint32_t vp9_skip_sub_pixel_tree(
|
||||||
const MACROBLOCK *x,
|
const MACROBLOCK *x,
|
||||||
MV *bestmv, const MV *ref_mv,
|
MV *bestmv, const MV *ref_mv,
|
||||||
int allow_hp,
|
int allow_hp,
|
||||||
@ -383,8 +415,8 @@ int vp9_skip_sub_pixel_tree(
|
|||||||
int iters_per_step,
|
int iters_per_step,
|
||||||
int *cost_list,
|
int *cost_list,
|
||||||
int *mvjcost, int *mvcost[2],
|
int *mvjcost, int *mvcost[2],
|
||||||
int *distortion,
|
uint32_t *distortion,
|
||||||
unsigned int *sse1,
|
uint32_t *sse1,
|
||||||
const uint8_t *second_pred,
|
const uint8_t *second_pred,
|
||||||
int w, int h) {
|
int w, int h) {
|
||||||
SETUP_SUBPEL_SEARCH;
|
SETUP_SUBPEL_SEARCH;
|
||||||
@ -418,7 +450,7 @@ int vp9_skip_sub_pixel_tree(
|
|||||||
return besterr;
|
return besterr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp9_find_best_sub_pixel_tree_pruned_evenmore(
|
uint32_t vp9_find_best_sub_pixel_tree_pruned_evenmore(
|
||||||
const MACROBLOCK *x,
|
const MACROBLOCK *x,
|
||||||
MV *bestmv, const MV *ref_mv,
|
MV *bestmv, const MV *ref_mv,
|
||||||
int allow_hp,
|
int allow_hp,
|
||||||
@ -428,8 +460,8 @@ int vp9_find_best_sub_pixel_tree_pruned_evenmore(
|
|||||||
int iters_per_step,
|
int iters_per_step,
|
||||||
int *cost_list,
|
int *cost_list,
|
||||||
int *mvjcost, int *mvcost[2],
|
int *mvjcost, int *mvcost[2],
|
||||||
int *distortion,
|
uint32_t *distortion,
|
||||||
unsigned int *sse1,
|
uint32_t *sse1,
|
||||||
const uint8_t *second_pred,
|
const uint8_t *second_pred,
|
||||||
int w, int h) {
|
int w, int h) {
|
||||||
SETUP_SUBPEL_SEARCH;
|
SETUP_SUBPEL_SEARCH;
|
||||||
@ -498,7 +530,7 @@ int vp9_find_best_sub_pixel_tree_pruned_evenmore(
|
|||||||
return besterr;
|
return besterr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
|
uint32_t vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
|
||||||
MV *bestmv, const MV *ref_mv,
|
MV *bestmv, const MV *ref_mv,
|
||||||
int allow_hp,
|
int allow_hp,
|
||||||
int error_per_bit,
|
int error_per_bit,
|
||||||
@ -507,8 +539,8 @@ int vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
|
|||||||
int iters_per_step,
|
int iters_per_step,
|
||||||
int *cost_list,
|
int *cost_list,
|
||||||
int *mvjcost, int *mvcost[2],
|
int *mvjcost, int *mvcost[2],
|
||||||
int *distortion,
|
uint32_t *distortion,
|
||||||
unsigned int *sse1,
|
uint32_t *sse1,
|
||||||
const uint8_t *second_pred,
|
const uint8_t *second_pred,
|
||||||
int w, int h) {
|
int w, int h) {
|
||||||
SETUP_SUBPEL_SEARCH;
|
SETUP_SUBPEL_SEARCH;
|
||||||
@ -572,7 +604,7 @@ int vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x,
|
|||||||
return besterr;
|
return besterr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
|
uint32_t vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
|
||||||
MV *bestmv, const MV *ref_mv,
|
MV *bestmv, const MV *ref_mv,
|
||||||
int allow_hp,
|
int allow_hp,
|
||||||
int error_per_bit,
|
int error_per_bit,
|
||||||
@ -581,8 +613,8 @@ int vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x,
|
|||||||
int iters_per_step,
|
int iters_per_step,
|
||||||
int *cost_list,
|
int *cost_list,
|
||||||
int *mvjcost, int *mvcost[2],
|
int *mvjcost, int *mvcost[2],
|
||||||
int *distortion,
|
uint32_t *distortion,
|
||||||
unsigned int *sse1,
|
uint32_t *sse1,
|
||||||
const uint8_t *second_pred,
|
const uint8_t *second_pred,
|
||||||
int w, int h) {
|
int w, int h) {
|
||||||
SETUP_SUBPEL_SEARCH;
|
SETUP_SUBPEL_SEARCH;
|
||||||
@ -674,19 +706,19 @@ static const MV search_step_table[12] = {
|
|||||||
{0, -1}, {0, 1}, {-1, 0}, {1, 0}
|
{0, -1}, {0, 1}, {-1, 0}, {1, 0}
|
||||||
};
|
};
|
||||||
|
|
||||||
int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
|
uint32_t vp9_find_best_sub_pixel_tree(const MACROBLOCK *x,
|
||||||
MV *bestmv, const MV *ref_mv,
|
MV *bestmv, const MV *ref_mv,
|
||||||
int allow_hp,
|
int allow_hp,
|
||||||
int error_per_bit,
|
int error_per_bit,
|
||||||
const vp9_variance_fn_ptr_t *vfp,
|
const vp9_variance_fn_ptr_t *vfp,
|
||||||
int forced_stop,
|
int forced_stop,
|
||||||
int iters_per_step,
|
int iters_per_step,
|
||||||
int *cost_list,
|
int *cost_list,
|
||||||
int *mvjcost, int *mvcost[2],
|
int *mvjcost, int *mvcost[2],
|
||||||
int *distortion,
|
uint32_t *distortion,
|
||||||
unsigned int *sse1,
|
uint32_t *sse1,
|
||||||
const uint8_t *second_pred,
|
const uint8_t *second_pred,
|
||||||
int w, int h) {
|
int w, int h) {
|
||||||
const uint8_t *const z = x->plane[0].src.buf;
|
const uint8_t *const z = x->plane[0].src.buf;
|
||||||
const uint8_t *const src_address = z;
|
const uint8_t *const src_address = z;
|
||||||
const int src_stride = x->plane[0].src.stride;
|
const int src_stride = x->plane[0].src.stride;
|
||||||
@ -1381,12 +1413,22 @@ int vp9_get_mvpred_var(const MACROBLOCK *x,
|
|||||||
const struct buf_2d *const what = &x->plane[0].src;
|
const struct buf_2d *const what = &x->plane[0].src;
|
||||||
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
|
const struct buf_2d *const in_what = &xd->plane[0].pre[0];
|
||||||
const MV mv = {best_mv->row * 8, best_mv->col * 8};
|
const MV mv = {best_mv->row * 8, best_mv->col * 8};
|
||||||
unsigned int unused;
|
uint32_t unused;
|
||||||
|
#if CONFIG_VP9_HIGHBITDEPTH
|
||||||
|
uint64_t err= vfp->vf(what->buf, what->stride,
|
||||||
|
get_buf_from_mv(in_what, best_mv),
|
||||||
|
in_what->stride, &unused);
|
||||||
|
err += (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
|
||||||
|
x->mvcost, x->errorperbit) : 0);
|
||||||
|
if (err >= INT_MAX)
|
||||||
|
return INT_MAX;
|
||||||
|
return (int)err;
|
||||||
|
#else
|
||||||
return vfp->vf(what->buf, what->stride,
|
return vfp->vf(what->buf, what->stride,
|
||||||
get_buf_from_mv(in_what, best_mv), in_what->stride, &unused) +
|
get_buf_from_mv(in_what, best_mv), in_what->stride, &unused) +
|
||||||
(use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
|
(use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost,
|
||||||
x->mvcost, x->errorperbit) : 0);
|
x->mvcost, x->errorperbit) : 0);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int vp9_get_mvpred_av_var(const MACROBLOCK *x,
|
int vp9_get_mvpred_av_var(const MACROBLOCK *x,
|
||||||
|
@ -74,7 +74,7 @@ unsigned int vp9_int_pro_motion_estimation(const struct VP9_COMP *cpi,
|
|||||||
BLOCK_SIZE bsize,
|
BLOCK_SIZE bsize,
|
||||||
int mi_row, int mi_col);
|
int mi_row, int mi_col);
|
||||||
|
|
||||||
typedef int (fractional_mv_step_fp) (
|
typedef uint32_t (fractional_mv_step_fp) (
|
||||||
const MACROBLOCK *x,
|
const MACROBLOCK *x,
|
||||||
MV *bestmv, const MV *ref_mv,
|
MV *bestmv, const MV *ref_mv,
|
||||||
int allow_hp,
|
int allow_hp,
|
||||||
@ -84,7 +84,7 @@ typedef int (fractional_mv_step_fp) (
|
|||||||
int iters_per_step,
|
int iters_per_step,
|
||||||
int *cost_list,
|
int *cost_list,
|
||||||
int *mvjcost, int *mvcost[2],
|
int *mvjcost, int *mvcost[2],
|
||||||
int *distortion, unsigned int *sse1,
|
uint32_t *distortion, uint32_t *sse1,
|
||||||
const uint8_t *second_pred,
|
const uint8_t *second_pred,
|
||||||
int w, int h);
|
int w, int h);
|
||||||
|
|
||||||
|
@ -157,7 +157,7 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
const int ref = mi->ref_frame[0];
|
const int ref = mi->ref_frame[0];
|
||||||
const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
|
const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
|
||||||
MV center_mv;
|
MV center_mv;
|
||||||
int dis;
|
uint32_t dis;
|
||||||
int rate_mode;
|
int rate_mode;
|
||||||
const int tmp_col_min = x->mv_col_min;
|
const int tmp_col_min = x->mv_col_min;
|
||||||
const int tmp_col_max = x->mv_col_max;
|
const int tmp_col_max = x->mv_col_max;
|
||||||
@ -1564,7 +1564,8 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
!cpi->use_svc &&
|
!cpi->use_svc &&
|
||||||
cpi->oxcf.rc_mode == VPX_CBR) {
|
cpi->oxcf.rc_mode == VPX_CBR) {
|
||||||
int tmp_sad;
|
int tmp_sad;
|
||||||
int dis, cost_list[5];
|
uint32_t dis;
|
||||||
|
int cost_list[5];
|
||||||
|
|
||||||
if (bsize < BLOCK_16X16)
|
if (bsize < BLOCK_16X16)
|
||||||
continue;
|
continue;
|
||||||
@ -2175,7 +2176,7 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
const int tmp_col_max = x->mv_col_max;
|
const int tmp_col_max = x->mv_col_max;
|
||||||
const int tmp_row_min = x->mv_row_min;
|
const int tmp_row_min = x->mv_row_min;
|
||||||
const int tmp_row_max = x->mv_row_max;
|
const int tmp_row_max = x->mv_row_max;
|
||||||
int dummy_dist;
|
uint32_t dummy_dist;
|
||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
|
mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
|
||||||
|
@ -1715,8 +1715,8 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
x->mv_row_max = tmp_row_max;
|
x->mv_row_max = tmp_row_max;
|
||||||
|
|
||||||
if (bestsme < INT_MAX) {
|
if (bestsme < INT_MAX) {
|
||||||
int dis; /* TODO: use dis in distortion calculation later. */
|
uint32_t dis; /* TODO: use dis in distortion calculation later. */
|
||||||
unsigned int sse;
|
uint32_t sse;
|
||||||
bestsme = cpi->find_fractional_mv_step(
|
bestsme = cpi->find_fractional_mv_step(
|
||||||
x, &tmp_mv,
|
x, &tmp_mv,
|
||||||
&ref_mv[id].as_mv,
|
&ref_mv[id].as_mv,
|
||||||
@ -1916,7 +1916,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
INT_MAX, 1);
|
INT_MAX, 1);
|
||||||
|
|
||||||
if (bestsme < INT_MAX) {
|
if (bestsme < INT_MAX) {
|
||||||
int distortion;
|
uint32_t distortion;
|
||||||
cpi->find_fractional_mv_step(
|
cpi->find_fractional_mv_step(
|
||||||
x,
|
x,
|
||||||
new_mv,
|
new_mv,
|
||||||
@ -2346,7 +2346,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
|
|||||||
x->mv_row_max = tmp_row_max;
|
x->mv_row_max = tmp_row_max;
|
||||||
|
|
||||||
if (bestsme < INT_MAX) {
|
if (bestsme < INT_MAX) {
|
||||||
int dis; /* TODO: use dis in distortion calculation later. */
|
uint32_t dis; /* TODO: use dis in distortion calculation later. */
|
||||||
cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
|
cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv,
|
||||||
cm->allow_high_precision_mv,
|
cm->allow_high_precision_mv,
|
||||||
x->errorperbit,
|
x->errorperbit,
|
||||||
|
@ -264,8 +264,8 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
|
|||||||
int step_param;
|
int step_param;
|
||||||
int sadpb = x->sadperbit16;
|
int sadpb = x->sadperbit16;
|
||||||
int bestsme = INT_MAX;
|
int bestsme = INT_MAX;
|
||||||
int distortion;
|
uint32_t distortion;
|
||||||
unsigned int sse;
|
uint32_t sse;
|
||||||
int cost_list[5];
|
int cost_list[5];
|
||||||
|
|
||||||
MV best_ref_mv1 = {0, 0};
|
MV best_ref_mv1 = {0, 0};
|
||||||
|
Loading…
x
Reference in New Issue
Block a user