diff --git a/vp8/common/findnearmv.h b/vp8/common/findnearmv.h index 06ef060c2..c60e46361 100644 --- a/vp8/common/findnearmv.h +++ b/vp8/common/findnearmv.h @@ -124,7 +124,7 @@ static int above_block_mv(const MODE_INFO *cur_mb, int b, int mi_stride) b += 16; } - return (cur_mb->bmi + b - 4)->mv.as_int; + return (cur_mb->bmi + (b - 4))->mv.as_int; } static B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) { diff --git a/vp8/common/reconinter.c b/vp8/common/reconinter.c index 43f84d01b..bac3c9474 100644 --- a/vp8/common/reconinter.c +++ b/vp8/common/reconinter.c @@ -138,14 +138,10 @@ void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, { for (r = 0; r < 4; r++) { -#if !(CONFIG_FAST_UNALIGNED) pred_ptr[0] = ptr[0]; pred_ptr[1] = ptr[1]; pred_ptr[2] = ptr[2]; pred_ptr[3] = ptr[3]; -#else - *(uint32_t *)pred_ptr = *(uint32_t *)ptr ; -#endif pred_ptr += pitch; ptr += pre_stride; } @@ -196,16 +192,12 @@ static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stri { for (r = 0; r < 4; r++) { -#if !(CONFIG_FAST_UNALIGNED) dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2]; dst[3] = ptr[3]; -#else - *(uint32_t *)dst = *(uint32_t *)ptr ; -#endif - dst += dst_stride; - ptr += pre_stride; + dst += dst_stride; + ptr += pre_stride; } } } @@ -270,7 +262,7 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) + x->block[yoffset+4].bmi.mv.as_mv.row + x->block[yoffset+5].bmi.mv.as_mv.row; - temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3); + temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask; @@ -279,7 +271,7 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) + x->block[yoffset+4].bmi.mv.as_mv.col + x->block[yoffset+5].bmi.mv.as_mv.col; - temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3); + temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; @@ -558,7 +550,7 @@ void build_4x4uvmvs(MACROBLOCKD *x) + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row; - temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3); + temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask; @@ -567,7 +559,7 @@ void build_4x4uvmvs(MACROBLOCKD *x) + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col; - temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3); + temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; diff --git a/vp8/decoder/decodemv.c b/vp8/decoder/decodemv.c index 8027a07ed..759d842c3 100644 --- a/vp8/decoder/decodemv.c +++ b/vp8/decoder/decodemv.c @@ -110,8 +110,8 @@ static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) { - mv->row = (short)(read_mvcomponent(r, mvc) << 1); - mv->col = (short)(read_mvcomponent(r, ++mvc) << 1); + mv->row = (short)(read_mvcomponent(r, mvc) * 2); + mv->col = (short)(read_mvcomponent(r, ++mvc) * 2); } @@ -292,9 +292,9 @@ static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi, blockmv.as_int = 0; if( vp8_read(bc, prob[2]) ) { - blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) << 1; + blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2; blockmv.as_mv.row += best_mv.as_mv.row; - blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) << 1; + blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2; blockmv.as_mv.col += best_mv.as_mv.col; } } diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c index 0050c11c1..50ee9c52f 100644 --- a/vp8/decoder/decodframe.c +++ b/vp8/decoder/decodframe.c @@ -576,7 +576,7 @@ static void decode_mb_rows(VP8D_COMP *pbi) xd->left_available = 0; - xd->mb_to_top_edge = -((mb_row * 16)) << 3; + xd->mb_to_top_edge = -((mb_row * 16) << 3); xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; xd->recon_above[0] = dst_buffer[0] + recon_yoffset; diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c index 5f0c1f7a6..8ca4f5f72 100644 --- a/vp8/encoder/bitstream.c +++ b/vp8/encoder/bitstream.c @@ -432,7 +432,7 @@ static void write_mv_ref assert(NEARESTMV <= m && m <= SPLITMV); #endif vp8_write_token(w, vp8_mv_ref_tree, p, - vp8_mv_ref_encoding_array - NEARESTMV + m); + vp8_mv_ref_encoding_array + (m - NEARESTMV)); } static void write_sub_mv_ref @@ -444,7 +444,7 @@ static void write_sub_mv_ref assert(LEFT4X4 <= m && m <= NEW4X4); #endif vp8_write_token(w, vp8_sub_mv_ref_tree, p, - vp8_sub_mv_ref_encoding_array - LEFT4X4 + m); + vp8_sub_mv_ref_encoding_array + (m - LEFT4X4)); } static void write_mv @@ -577,7 +577,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) */ xd->mb_to_left_edge = -((mb_col * 16) << 3); xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; - xd->mb_to_top_edge = -((mb_row * 16)) << 3; + xd->mb_to_top_edge = -((mb_row * 16) << 3); xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; #ifdef VP8_ENTROPY_STATS diff --git a/vp8/encoder/firstpass.c b/vp8/encoder/firstpass.c index ded0c435d..8548e7476 100644 --- a/vp8/encoder/firstpass.c +++ b/vp8/encoder/firstpass.c @@ -711,8 +711,8 @@ skip_motion_search: neutral_count++; } - d->bmi.mv.as_mv.row <<= 3; - d->bmi.mv.as_mv.col <<= 3; + d->bmi.mv.as_mv.row *= 8; + d->bmi.mv.as_mv.col *= 8; this_error = motion_error; vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv); vp8_encode_inter16x16y(x); @@ -915,7 +915,7 @@ static int64_t estimate_modemvcost(VP8_COMP *cpi, FIRSTPASS_STATS * fpstats) { int mv_cost; - int mode_cost; + int64_t mode_cost; double av_pct_inter = fpstats->pcnt_inter / fpstats->count; double av_pct_motion = fpstats->pcnt_motion / fpstats->count; @@ -937,10 +937,9 @@ static int64_t estimate_modemvcost(VP8_COMP *cpi, /* Crude estimate of overhead cost from modes * << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb */ - mode_cost = - (int)( ( ((av_pct_inter - av_pct_motion) * zz_cost) + - (av_pct_motion * motion_cost) + - (av_intra * intra_cost) ) * cpi->common.MBs ) << 9; + mode_cost =((((av_pct_inter - av_pct_motion) * zz_cost) + + (av_pct_motion * motion_cost) + + (av_intra * intra_cost)) * cpi->common.MBs) * 512; return mv_cost + mode_cost; } diff --git a/vp8/encoder/mcomp.c b/vp8/encoder/mcomp.c index 83c39891a..0b11ea64a 100644 --- a/vp8/encoder/mcomp.c +++ b/vp8/encoder/mcomp.c @@ -210,7 +210,7 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, unsigned char *z = (*(b->base_src) + b->src); int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1; - int br = bestmv->as_mv.row << 2, bc = bestmv->as_mv.col << 2; + int br = bestmv->as_mv.row * 4, bc = bestmv->as_mv.col * 4; int tr = br, tc = bc; unsigned int besterr; unsigned int left, right, up, down, diag; @@ -220,10 +220,14 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, unsigned int quarteriters = 4; int thismse; - int minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1)); - int maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1)); - int minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1)); - int maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1)); + int minc = MAX(x->mv_col_min * 4, + (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1)); + int maxc = MIN(x->mv_col_max * 4, + (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1)); + int minr = MAX(x->mv_row_min * 4, + (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1)); + int maxr = MIN(x->mv_row_max * 4, + (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1)); int y_stride; int offset; @@ -254,8 +258,8 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col; /* central mv */ - bestmv->as_mv.row <<= 3; - bestmv->as_mv.col <<= 3; + bestmv->as_mv.row *= 8; + bestmv->as_mv.col *= 8; /* calculate central point error */ besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1); @@ -337,8 +341,8 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, tc = bc; } - bestmv->as_mv.row = br << 1; - bestmv->as_mv.col = bc << 1; + bestmv->as_mv.row = br * 2; + bestmv->as_mv.col = bc * 2; if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL<<3)) || (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL<<3))) @@ -699,8 +703,8 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, #endif /* central mv */ - bestmv->as_mv.row <<= 3; - bestmv->as_mv.col <<= 3; + bestmv->as_mv.row *= 8; + bestmv->as_mv.col *= 8; startmv = *bestmv; /* calculate central point error */ @@ -1315,8 +1319,8 @@ int vp8_diamond_search_sadx4 (*num00)++; } - this_mv.as_mv.row = best_mv->as_mv.row << 3; - this_mv.as_mv.col = best_mv->as_mv.col << 3; + this_mv.as_mv.row = best_mv->as_mv.row * 8; + this_mv.as_mv.col = best_mv->as_mv.col * 8; return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); @@ -1709,8 +1713,8 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, } } - this_mv.as_mv.row = best_mv->as_mv.row << 3; - this_mv.as_mv.col = best_mv->as_mv.col << 3; + this_mv.as_mv.row = best_mv->as_mv.row * 8; + this_mv.as_mv.col = best_mv->as_mv.col * 8; return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); @@ -1905,8 +1909,8 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, } } - this_mv.as_mv.row = ref_mv->as_mv.row << 3; - this_mv.as_mv.col = ref_mv->as_mv.col << 3; + this_mv.as_mv.row = ref_mv->as_mv.row * 8; + this_mv.as_mv.col = ref_mv->as_mv.col * 8; return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c index 521e84fda..5016cc422 100644 --- a/vp8/encoder/rdopt.c +++ b/vp8/encoder/rdopt.c @@ -935,7 +935,7 @@ int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]) assert(NEARESTMV <= m && m <= SPLITMV); vp8_mv_ref_probs(p, near_mv_ref_ct); return vp8_cost_token(vp8_mv_ref_tree, p, - vp8_mv_ref_encoding_array - NEARESTMV + m); + vp8_mv_ref_encoding_array + (m - NEARESTMV)); } void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv)