vp8: apply clang-tidy google-readability-braces-around-statements

applied against an x86_64 configure

clang-tidy-3.7.1 \
  -checks='-*,google-readability-braces-around-statements' \
  -header-filter='.*' -fix
+ clang-format afterward

Change-Id: I6694edeaee89b58b8b3082187e6756561136b459
This commit is contained in:
clang-tidy 2016-07-18 23:15:57 -07:00 committed by James Zern
parent c69cc4ce1f
commit 7f3e07f1c8
43 changed files with 1358 additions and 849 deletions

View File

@ -19,14 +19,16 @@
void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) {
int i;
for (i = 0; i < NUM_YV12_BUFFERS; ++i)
for (i = 0; i < NUM_YV12_BUFFERS; ++i) {
vp8_yv12_de_alloc_frame_buffer(&oci->yv12_fb[i]);
}
vp8_yv12_de_alloc_frame_buffer(&oci->temp_scale_frame);
#if CONFIG_POSTPROC
vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer);
if (oci->post_proc_buffer_int_used)
if (oci->post_proc_buffer_int_used) {
vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer_int);
}
vpx_free(oci->pp_limits_buffer);
oci->pp_limits_buffer = NULL;
@ -60,8 +62,9 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
oci->fb_idx_ref_cnt[i] = 0;
oci->yv12_fb[i].flags = 0;
if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height,
VP8BORDERINPIXELS) < 0)
VP8BORDERINPIXELS) < 0) {
goto allocation_fail;
}
}
oci->new_fb_idx = 0;
@ -75,8 +78,9 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
oci->fb_idx_ref_cnt[3] = 1;
if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16,
VP8BORDERINPIXELS) < 0)
VP8BORDERINPIXELS) < 0) {
goto allocation_fail;
}
oci->mb_rows = height >> 4;
oci->mb_cols = width >> 4;
@ -99,8 +103,9 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
#if CONFIG_POSTPROC
if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer, width, height,
VP8BORDERINPIXELS) < 0)
VP8BORDERINPIXELS) < 0) {
goto allocation_fail;
}
oci->post_proc_buffer_int_used = 0;
memset(&oci->postproc_state, 0, sizeof(oci->postproc_state));

View File

@ -51,10 +51,11 @@ static void filter_block2d_first_pass(unsigned char *src_ptr, int *output_ptr,
/* Normalize back to 0-255 */
Temp = Temp >> VP8_FILTER_SHIFT;
if (Temp < 0)
if (Temp < 0) {
Temp = 0;
else if (Temp > 255)
} else if (Temp > 255) {
Temp = 255;
}
output_ptr[j] = Temp;
src_ptr++;
@ -90,10 +91,11 @@ static void filter_block2d_second_pass(int *src_ptr, unsigned char *output_ptr,
/* Normalize back to 0-255 */
Temp = Temp >> VP8_FILTER_SHIFT;
if (Temp < 0)
if (Temp < 0) {
Temp = 0;
else if (Temp > 255)
} else if (Temp > 255) {
Temp = 255;
}
output_ptr[j] = (unsigned char)Temp;
src_ptr++;

View File

@ -62,8 +62,9 @@ void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
}
*cntx += 2;
} else
} else {
cnt[CNT_INTRA] += 2;
}
}
/* Process above left */
@ -81,8 +82,9 @@ void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
}
*cntx += 1;
} else
} else {
cnt[CNT_INTRA] += 1;
}
}
/* If we have three distinct MV's ... */
@ -107,8 +109,9 @@ void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest,
}
/* Use near_mvs[0] to store the "best" MV */
if (cnt[CNT_NEAREST] >= cnt[CNT_INTRA])
if (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]) {
near_mvs[CNT_INTRA] = near_mvs[CNT_NEAREST];
}
/* Set up return values */
best_mv->as_int = near_mvs[0].as_int;

View File

@ -32,15 +32,17 @@ static INLINE void mv_bias(int refmb_ref_frame_sign_bias, int refframe,
#define LEFT_TOP_MARGIN (16 << 3)
#define RIGHT_BOTTOM_MARGIN (16 << 3)
static INLINE void vp8_clamp_mv2(int_mv *mv, const MACROBLOCKD *xd) {
if (mv->as_mv.col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN))
if (mv->as_mv.col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN)) {
mv->as_mv.col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
else if (mv->as_mv.col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN)
} else if (mv->as_mv.col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN) {
mv->as_mv.col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
}
if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN))
if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN)) {
mv->as_mv.row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN)
} else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN) {
mv->as_mv.row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
}
}
static INLINE void vp8_clamp_mv(int_mv *mv, int mb_to_left_edge,

View File

@ -24,9 +24,9 @@ void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst,
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) {
if (*eobs++ > 1)
if (*eobs++ > 1) {
vp8_dequant_idct_add_c(q, dq, dst, stride);
else {
} else {
vp8_dc_only_idct_add_c(q[0] * dq[0], dst, stride, dst, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
@ -46,9 +46,9 @@ void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dstu,
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1)
if (*eobs++ > 1) {
vp8_dequant_idct_add_c(q, dq, dstu, stride);
else {
} else {
vp8_dc_only_idct_add_c(q[0] * dq[0], dstu, stride, dstu, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
@ -62,9 +62,9 @@ void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dstu,
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1)
if (*eobs++ > 1) {
vp8_dequant_idct_add_c(q, dq, dstv, stride);
else {
} else {
vp8_dc_only_idct_add_c(q[0] * dq[0], dstv, stride, dstv, stride);
memset(q, 0, 2 * sizeof(q[0]));
}

View File

@ -298,13 +298,15 @@ void vp8_loop_filter_mbh_c(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 2);
if (u_ptr)
if (u_ptr) {
vp8_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 1);
}
if (v_ptr)
if (v_ptr) {
vp8_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 1);
}
}
/* Vertical MB Filtering */
@ -314,13 +316,15 @@ void vp8_loop_filter_mbv_c(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 2);
if (u_ptr)
if (u_ptr) {
vp8_mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 1);
}
if (v_ptr)
if (v_ptr) {
vp8_mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 1);
}
}
/* Horizontal B Filtering */
@ -334,13 +338,15 @@ void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim,
lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
if (u_ptr) {
vp8_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
if (v_ptr)
if (v_ptr) {
vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
}
void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
@ -364,13 +370,15 @@ void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim,
lfi->hev_thr, 2);
if (u_ptr)
if (u_ptr) {
vp8_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim,
lfi->hev_thr, 1);
}
if (v_ptr)
if (v_ptr) {
vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim,
lfi->hev_thr, 1);
}
}
void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,

View File

@ -196,28 +196,31 @@ static void multiframe_quality_enhance_block(
{
vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
for (up = u, udp = ud, i = 0; i < uvblksize;
++i, up += uv_stride, udp += uvd_stride)
++i, up += uv_stride, udp += uvd_stride) {
memcpy(udp, up, uvblksize);
}
for (vp = v, vdp = vd, i = 0; i < uvblksize;
++i, vp += uv_stride, vdp += uvd_stride)
++i, vp += uv_stride, vdp += uvd_stride) {
memcpy(vdp, vp, uvblksize);
}
}
}
}
static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) {
if (mode_info_context->mbmi.mb_skip_coeff)
if (mode_info_context->mbmi.mb_skip_coeff) {
map[0] = map[1] = map[2] = map[3] = 1;
else if (mode_info_context->mbmi.mode == SPLITMV) {
} else if (mode_info_context->mbmi.mode == SPLITMV) {
static int ndx[4][4] = {
{ 0, 1, 4, 5 }, { 2, 3, 6, 7 }, { 8, 9, 12, 13 }, { 10, 11, 14, 15 }
};
int i, j;
for (i = 0; i < 4; ++i) {
map[i] = 1;
for (j = 0; j < 4 && map[j]; ++j)
for (j = 0; j < 4 && map[j]; ++j) {
map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 &&
mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2);
}
}
} else {
map[0] = map[1] = map[2] = map[3] =
@ -256,14 +259,15 @@ void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
/* if motion is high there will likely be no benefit */
if (frame_type == INTER_FRAME)
if (frame_type == INTER_FRAME) {
totmap = qualify_inter_mb(mode_info_context, map);
else
} else {
totmap = (frame_type == KEY_FRAME ? 4 : 0);
}
if (totmap) {
if (totmap < 4) {
int i, j;
for (i = 0; i < 2; ++i)
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (map[i * 2 + j]) {
multiframe_quality_enhance_block(
@ -292,6 +296,7 @@ void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
}
}
}
}
} else /* totmap = 4 */
{
multiframe_quality_enhance_block(

View File

@ -116,10 +116,11 @@ void vp8_deblock(VP8_COMMON *cm, YV12_BUFFER_CONFIG *source,
for (mbc = 0; mbc < cm->mb_cols; ++mbc) {
unsigned char mb_ppl;
if (mode_info_context->mbmi.mb_skip_coeff)
if (mode_info_context->mbmi.mb_skip_coeff) {
mb_ppl = (unsigned char)ppl >> 1;
else
} else {
mb_ppl = (unsigned char)ppl;
}
memset(ylptr, mb_ppl, 16);
memset(uvlptr, mb_ppl, 8);
@ -378,9 +379,10 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest,
int height = (oci->Height + 15) & ~15;
if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer_int, width, height,
VP8BORDERINPIXELS))
VP8BORDERINPIXELS)) {
vpx_internal_error(&oci->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate MFQE framebuffer");
}
oci->post_proc_buffer_int_used = 1;

View File

@ -39,10 +39,11 @@ int vp8_dc_quant(int QIndex, int Delta) {
QIndex = QIndex + Delta;
if (QIndex > 127)
if (QIndex > 127) {
QIndex = 127;
else if (QIndex < 0)
} else if (QIndex < 0) {
QIndex = 0;
}
retval = dc_qlookup[QIndex];
return retval;
@ -53,10 +54,11 @@ int vp8_dc2quant(int QIndex, int Delta) {
QIndex = QIndex + Delta;
if (QIndex > 127)
if (QIndex > 127) {
QIndex = 127;
else if (QIndex < 0)
} else if (QIndex < 0) {
QIndex = 0;
}
retval = dc_qlookup[QIndex] * 2;
return retval;
@ -66,10 +68,11 @@ int vp8_dc_uv_quant(int QIndex, int Delta) {
QIndex = QIndex + Delta;
if (QIndex > 127)
if (QIndex > 127) {
QIndex = 127;
else if (QIndex < 0)
} else if (QIndex < 0) {
QIndex = 0;
}
retval = dc_qlookup[QIndex];
@ -81,10 +84,11 @@ int vp8_dc_uv_quant(int QIndex, int Delta) {
int vp8_ac_yquant(int QIndex) {
int retval;
if (QIndex > 127)
if (QIndex > 127) {
QIndex = 127;
else if (QIndex < 0)
} else if (QIndex < 0) {
QIndex = 0;
}
retval = ac_qlookup[QIndex];
return retval;
@ -95,10 +99,11 @@ int vp8_ac2quant(int QIndex, int Delta) {
QIndex = QIndex + Delta;
if (QIndex > 127)
if (QIndex > 127) {
QIndex = 127;
else if (QIndex < 0)
} else if (QIndex < 0) {
QIndex = 0;
}
/* For all x in [0..284], x*155/100 is bitwise equal to (x*101581) >> 16.
* The smallest precision for that is '(x*6349) >> 12' but 16 is a good
@ -114,10 +119,11 @@ int vp8_ac_uv_quant(int QIndex, int Delta) {
QIndex = QIndex + Delta;
if (QIndex > 127)
if (QIndex > 127) {
QIndex = 127;
else if (QIndex < 0)
} else if (QIndex < 0) {
QIndex = 0;
}
retval = ac_qlookup[QIndex];
return retval;

View File

@ -208,9 +208,9 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) {
BLOCKD *d0 = &x->block[i];
BLOCKD *d1 = &x->block[i + 1];
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
else {
} else {
vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride,
x->subpixel_predict);
vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride,
@ -223,9 +223,9 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) {
BLOCKD *d0 = &x->block[i];
BLOCKD *d1 = &x->block[i + 1];
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride);
else {
} else {
vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride,
x->subpixel_predict);
vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride,
@ -264,15 +264,17 @@ static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) {
* filtering. The bottom and right edges use 16 pixels plus 2 pixels
* left of the central pixel when filtering.
*/
if (mv->col < (xd->mb_to_left_edge - (19 << 3)))
if (mv->col < (xd->mb_to_left_edge - (19 << 3))) {
mv->col = xd->mb_to_left_edge - (16 << 3);
else if (mv->col > xd->mb_to_right_edge + (18 << 3))
} else if (mv->col > xd->mb_to_right_edge + (18 << 3)) {
mv->col = xd->mb_to_right_edge + (16 << 3);
}
if (mv->row < (xd->mb_to_top_edge - (19 << 3)))
if (mv->row < (xd->mb_to_top_edge - (19 << 3))) {
mv->row = xd->mb_to_top_edge - (16 << 3);
else if (mv->row > xd->mb_to_bottom_edge + (18 << 3))
} else if (mv->row > xd->mb_to_bottom_edge + (18 << 3)) {
mv->row = xd->mb_to_bottom_edge + (16 << 3);
}
}
/* A version of the above function for chroma block MVs.*/
@ -392,10 +394,10 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) {
clamp_mv_to_umv_border(&x->block[i + 1].bmi.mv.as_mv, x);
}
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
base_pre, dst_stride);
else {
} else {
build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride,
base_pre, dst_stride, x->subpixel_predict);
build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride,
@ -412,10 +414,10 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) {
/* Note: uv mvs already clamped in build_4x4uvmvs() */
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
base_pre, dst_stride);
else {
} else {
build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre,
dst_stride, x->subpixel_predict);
build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre,
@ -432,10 +434,10 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) {
/* Note: uv mvs already clamped in build_4x4uvmvs() */
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) {
build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride,
base_pre, dst_stride);
else {
} else {
build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre,
dst_stride, x->subpixel_predict);
build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre,
@ -473,8 +475,9 @@ static void build_4x4uvmvs(MACROBLOCKD *x) {
x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
if (x->mode_info_context->mbmi.need_to_clamp_mvs) {
clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
}
x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
}

View File

@ -16,16 +16,19 @@ void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf) {
/* set up frame new frame for intra coded blocks */
memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
for (i = 0; i < ybf->y_height; ++i)
for (i = 0; i < ybf->y_height; ++i) {
ybf->y_buffer[ybf->y_stride * i - 1] = (unsigned char)129;
}
memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
for (i = 0; i < ybf->uv_height; ++i)
for (i = 0; i < ybf->uv_height; ++i) {
ybf->u_buffer[ybf->uv_stride * i - 1] = (unsigned char)129;
}
memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
for (i = 0; i < ybf->uv_height; ++i)
for (i = 0; i < ybf->uv_height; ++i) {
ybf->v_buffer[ybf->uv_stride * i - 1] = (unsigned char)129;
}
}
void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf) {

View File

@ -26,8 +26,9 @@ static void tree2tok(struct vp8_token_struct *const p, vp8_tree t, int i, int v,
if (j <= 0) {
p[-j].value = v;
p[-j].Len = L;
} else
} else {
tree2tok(p, t, j, v, L);
}
} while (++v & 1);
}
@ -103,7 +104,8 @@ void vp8_tree_probs_from_distribution(int n, /* n = size of alphabet */
if (tot) {
const unsigned int p = ((c[0] * Pfac) + (rd ? tot >> 1 : 0)) / tot;
probs[t] = p < 256 ? (p ? p : 1) : 255; /* agree w/old version for now */
} else
} else {
probs[t] = vp8_prob_half;
}
} while (++t < tree_len);
}

View File

@ -60,8 +60,9 @@ void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
block_inside_limit = block_inside_limit >> (sharpness_lvl > 4);
if (sharpness_lvl > 0) {
if (block_inside_limit > (9 - sharpness_lvl))
if (block_inside_limit > (9 - sharpness_lvl)) {
block_inside_limit = (9 - sharpness_lvl);
}
}
if (block_inside_limit < 1) block_inside_limit = 1;

View File

@ -27,30 +27,30 @@ void vp8_dequant_idct_add_y_block_mmx(short *q, short *dq, unsigned char *dst,
int i;
for (i = 0; i < 4; ++i) {
if (eobs[0] > 1)
if (eobs[0] > 1) {
vp8_dequant_idct_add_mmx(q, dq, dst, stride);
else if (eobs[0] == 1) {
} else if (eobs[0] == 1) {
vp8_dc_only_idct_add_mmx(q[0] * dq[0], dst, stride, dst, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
if (eobs[1] > 1)
if (eobs[1] > 1) {
vp8_dequant_idct_add_mmx(q + 16, dq, dst + 4, stride);
else if (eobs[1] == 1) {
} else if (eobs[1] == 1) {
vp8_dc_only_idct_add_mmx(q[16] * dq[0], dst + 4, stride, dst + 4, stride);
memset(q + 16, 0, 2 * sizeof(q[0]));
}
if (eobs[2] > 1)
if (eobs[2] > 1) {
vp8_dequant_idct_add_mmx(q + 32, dq, dst + 8, stride);
else if (eobs[2] == 1) {
} else if (eobs[2] == 1) {
vp8_dc_only_idct_add_mmx(q[32] * dq[0], dst + 8, stride, dst + 8, stride);
memset(q + 32, 0, 2 * sizeof(q[0]));
}
if (eobs[3] > 1)
if (eobs[3] > 1) {
vp8_dequant_idct_add_mmx(q + 48, dq, dst + 12, stride);
else if (eobs[3] == 1) {
} else if (eobs[3] == 1) {
vp8_dc_only_idct_add_mmx(q[48] * dq[0], dst + 12, stride, dst + 12,
stride);
memset(q + 48, 0, 2 * sizeof(q[0]));
@ -68,16 +68,16 @@ void vp8_dequant_idct_add_uv_block_mmx(short *q, short *dq, unsigned char *dstu,
int i;
for (i = 0; i < 2; ++i) {
if (eobs[0] > 1)
if (eobs[0] > 1) {
vp8_dequant_idct_add_mmx(q, dq, dstu, stride);
else if (eobs[0] == 1) {
} else if (eobs[0] == 1) {
vp8_dc_only_idct_add_mmx(q[0] * dq[0], dstu, stride, dstu, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
if (eobs[1] > 1)
if (eobs[1] > 1) {
vp8_dequant_idct_add_mmx(q + 16, dq, dstu + 4, stride);
else if (eobs[1] == 1) {
} else if (eobs[1] == 1) {
vp8_dc_only_idct_add_mmx(q[16] * dq[0], dstu + 4, stride, dstu + 4,
stride);
memset(q + 16, 0, 2 * sizeof(q[0]));
@ -89,16 +89,16 @@ void vp8_dequant_idct_add_uv_block_mmx(short *q, short *dq, unsigned char *dstu,
}
for (i = 0; i < 2; ++i) {
if (eobs[0] > 1)
if (eobs[0] > 1) {
vp8_dequant_idct_add_mmx(q, dq, dstv, stride);
else if (eobs[0] == 1) {
} else if (eobs[0] == 1) {
vp8_dc_only_idct_add_mmx(q[0] * dq[0], dstv, stride, dstv, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
if (eobs[1] > 1)
if (eobs[1] > 1) {
vp8_dequant_idct_add_mmx(q + 16, dq, dstv + 4, stride);
else if (eobs[1] == 1) {
} else if (eobs[1] == 1) {
vp8_dc_only_idct_add_mmx(q[16] * dq[0], dstv + 4, stride, dstv + 4,
stride);
memset(q + 16, 0, 2 * sizeof(q[0]));

View File

@ -22,16 +22,18 @@ void vp8_dequant_idct_add_y_block_sse2(short *q, short *dq, unsigned char *dst,
for (i = 0; i < 4; ++i) {
if (((short *)(eobs))[0]) {
if (((short *)(eobs))[0] & 0xfefe)
if (((short *)(eobs))[0] & 0xfefe) {
vp8_idct_dequant_full_2x_sse2(q, dq, dst, stride);
else
} else {
vp8_idct_dequant_0_2x_sse2(q, dq, dst, stride);
}
}
if (((short *)(eobs))[1]) {
if (((short *)(eobs))[1] & 0xfefe)
if (((short *)(eobs))[1] & 0xfefe) {
vp8_idct_dequant_full_2x_sse2(q + 32, dq, dst + 8, stride);
else
} else {
vp8_idct_dequant_0_2x_sse2(q + 32, dq, dst + 8, stride);
}
}
q += 64;
dst += stride * 4;
@ -44,35 +46,39 @@ void vp8_dequant_idct_add_uv_block_sse2(short *q, short *dq,
unsigned char *dstv, int stride,
char *eobs) {
if (((short *)(eobs))[0]) {
if (((short *)(eobs))[0] & 0xfefe)
if (((short *)(eobs))[0] & 0xfefe) {
vp8_idct_dequant_full_2x_sse2(q, dq, dstu, stride);
else
} else {
vp8_idct_dequant_0_2x_sse2(q, dq, dstu, stride);
}
}
q += 32;
dstu += stride * 4;
if (((short *)(eobs))[1]) {
if (((short *)(eobs))[1] & 0xfefe)
if (((short *)(eobs))[1] & 0xfefe) {
vp8_idct_dequant_full_2x_sse2(q, dq, dstu, stride);
else
} else {
vp8_idct_dequant_0_2x_sse2(q, dq, dstu, stride);
}
}
q += 32;
if (((short *)(eobs))[2]) {
if (((short *)(eobs))[2] & 0xfefe)
if (((short *)(eobs))[2] & 0xfefe) {
vp8_idct_dequant_full_2x_sse2(q, dq, dstv, stride);
else
} else {
vp8_idct_dequant_0_2x_sse2(q, dq, dstv, stride);
}
}
q += 32;
dstv += stride * 4;
if (((short *)(eobs))[3]) {
if (((short *)(eobs))[3] & 0xfefe)
if (((short *)(eobs))[3] & 0xfefe) {
vp8_idct_dequant_full_2x_sse2(q, dq, dstv, stride);
else
} else {
vp8_idct_dequant_0_2x_sse2(q, dq, dstv, stride);
}
}
}

View File

@ -52,13 +52,15 @@ void vp8_loop_filter_mbh_mmx(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_mbloop_filter_horizontal_edge_mmx(y_ptr, y_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 2);
if (u_ptr)
if (u_ptr) {
vp8_mbloop_filter_horizontal_edge_mmx(u_ptr, uv_stride, lfi->mblim,
lfi->lim, lfi->hev_thr, 1);
}
if (v_ptr)
if (v_ptr) {
vp8_mbloop_filter_horizontal_edge_mmx(v_ptr, uv_stride, lfi->mblim,
lfi->lim, lfi->hev_thr, 1);
}
}
/* Vertical MB Filtering */
@ -68,13 +70,15 @@ void vp8_loop_filter_mbv_mmx(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_mbloop_filter_vertical_edge_mmx(y_ptr, y_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 2);
if (u_ptr)
if (u_ptr) {
vp8_mbloop_filter_vertical_edge_mmx(u_ptr, uv_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 1);
}
if (v_ptr)
if (v_ptr) {
vp8_mbloop_filter_vertical_edge_mmx(v_ptr, uv_stride, lfi->mblim, lfi->lim,
lfi->hev_thr, 1);
}
}
/* Horizontal B Filtering */
@ -88,13 +92,15 @@ void vp8_loop_filter_bh_mmx(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_loop_filter_horizontal_edge_mmx(y_ptr + 12 * y_stride, y_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
if (u_ptr) {
vp8_loop_filter_horizontal_edge_mmx(u_ptr + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
if (v_ptr)
if (v_ptr) {
vp8_loop_filter_horizontal_edge_mmx(v_ptr + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
}
void vp8_loop_filter_bhs_mmx(unsigned char *y_ptr, int y_stride,
@ -118,13 +124,15 @@ void vp8_loop_filter_bv_mmx(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_loop_filter_vertical_edge_mmx(y_ptr + 12, y_stride, lfi->blim, lfi->lim,
lfi->hev_thr, 2);
if (u_ptr)
if (u_ptr) {
vp8_loop_filter_vertical_edge_mmx(u_ptr + 4, uv_stride, lfi->blim, lfi->lim,
lfi->hev_thr, 1);
}
if (v_ptr)
if (v_ptr) {
vp8_loop_filter_vertical_edge_mmx(v_ptr + 4, uv_stride, lfi->blim, lfi->lim,
lfi->hev_thr, 1);
}
}
void vp8_loop_filter_bvs_mmx(unsigned char *y_ptr, int y_stride,
@ -143,9 +151,10 @@ void vp8_loop_filter_mbh_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_mbloop_filter_horizontal_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim,
lfi->hev_thr);
if (u_ptr)
if (u_ptr) {
vp8_mbloop_filter_horizontal_edge_uv_sse2(u_ptr, uv_stride, lfi->mblim,
lfi->lim, lfi->hev_thr, v_ptr);
}
}
/* Vertical MB Filtering */
@ -155,9 +164,10 @@ void vp8_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_mbloop_filter_vertical_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim,
lfi->hev_thr);
if (u_ptr)
if (u_ptr) {
vp8_mbloop_filter_vertical_edge_uv_sse2(u_ptr, uv_stride, lfi->mblim,
lfi->lim, lfi->hev_thr, v_ptr);
}
}
/* Horizontal B Filtering */
@ -176,10 +186,11 @@ void vp8_loop_filter_bh_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
lfi->blim, lfi->lim, lfi->hev_thr);
#endif
if (u_ptr)
if (u_ptr) {
vp8_loop_filter_horizontal_edge_uv_sse2(u_ptr + 4 * uv_stride, uv_stride,
lfi->blim, lfi->lim, lfi->hev_thr,
v_ptr + 4 * uv_stride);
}
}
void vp8_loop_filter_bhs_sse2(unsigned char *y_ptr, int y_stride,
@ -208,9 +219,10 @@ void vp8_loop_filter_bv_sse2(unsigned char *y_ptr, unsigned char *u_ptr,
lfi->hev_thr);
#endif
if (u_ptr)
if (u_ptr) {
vp8_loop_filter_vertical_edge_uv_sse2(u_ptr + 4, uv_stride, lfi->blim,
lfi->lim, lfi->hev_thr, v_ptr + 4);
}
}
void vp8_loop_filter_bvs_sse2(unsigned char *y_ptr, int y_stride,

View File

@ -63,18 +63,20 @@ void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) {
/* Decide whether to use the default or alternate baseline Q value. */
if (xd->segmentation_enabled) {
/* Abs Value */
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
/* Delta Value */
else
/* Delta Value */
} else {
QIndex = pc->base_qindex +
xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
}
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ)
: 0; /* Clamp to valid range */
} else
} else {
QIndex = pc->base_qindex;
}
/* Set up the macroblock dequant constants */
xd->dequant_y1_dc[0] = 1;
@ -602,14 +604,15 @@ static void decode_mb_rows(VP8D_COMP *pbi) {
if (pc->filter_level) {
if (mb_row > 0) {
if (pc->filter_type == NORMAL_LOOPFILTER)
if (pc->filter_type == NORMAL_LOOPFILTER) {
vp8_loop_filter_row_normal(pc, lf_mic, mb_row - 1, recon_y_stride,
recon_uv_stride, lf_dst[0], lf_dst[1],
lf_dst[2]);
else
} else {
vp8_loop_filter_row_simple(pc, lf_mic, mb_row - 1, recon_y_stride,
recon_uv_stride, lf_dst[0], lf_dst[1],
lf_dst[2]);
}
if (mb_row > 1) {
yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1],
eb_dst[2]);
@ -638,14 +641,15 @@ static void decode_mb_rows(VP8D_COMP *pbi) {
}
if (pc->filter_level) {
if (pc->filter_type == NORMAL_LOOPFILTER)
if (pc->filter_type == NORMAL_LOOPFILTER) {
vp8_loop_filter_row_normal(pc, lf_mic, mb_row - 1, recon_y_stride,
recon_uv_stride, lf_dst[0], lf_dst[1],
lf_dst[2]);
else
} else {
vp8_loop_filter_row_simple(pc, lf_mic, mb_row - 1, recon_y_stride,
recon_uv_stride, lf_dst[0], lf_dst[1],
lf_dst[2]);
}
yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1],
eb_dst[2]);
@ -688,28 +692,31 @@ static unsigned int read_available_partition_size(
* or throw an error.
*/
if (i < num_part - 1) {
if (read_is_valid(partition_size_ptr, 3, first_fragment_end))
if (read_is_valid(partition_size_ptr, 3, first_fragment_end)) {
partition_size = read_partition_size(pbi, partition_size_ptr);
else if (pbi->ec_active)
} else if (pbi->ec_active) {
partition_size = (unsigned int)bytes_left;
else
} else {
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated partition size data");
} else
}
} else {
partition_size = (unsigned int)bytes_left;
}
/* Validate the calculated partition length. If the buffer
* described by the partition can't be fully read, then restrict
* it to the portion that can be (for EC mode) or throw an error.
*/
if (!read_is_valid(fragment_start, partition_size, fragment_end)) {
if (pbi->ec_active)
if (pbi->ec_active) {
partition_size = (unsigned int)bytes_left;
else
} else {
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition "
"%d length",
i + 1);
}
}
return partition_size;
}
@ -725,8 +732,9 @@ static void setup_token_decoder(VP8D_COMP *pbi,
TOKEN_PARTITION multi_token_partition =
(TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2);
if (!vp8dx_bool_error(&pbi->mbc[8]))
if (!vp8dx_bool_error(&pbi->mbc[8])) {
pbi->common.multi_token_partition = multi_token_partition;
}
num_token_partitions = 1 << pbi->common.multi_token_partition;
/* Check for partitions within the fragments and unpack the fragments
@ -777,17 +785,19 @@ static void setup_token_decoder(VP8D_COMP *pbi,
++partition_idx) {
if (vp8dx_start_decode(bool_decoder, pbi->fragments.ptrs[partition_idx],
pbi->fragments.sizes[partition_idx], pbi->decrypt_cb,
pbi->decrypt_state))
pbi->decrypt_state)) {
vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", partition_idx);
}
bool_decoder++;
}
#if CONFIG_MULTITHREAD
/* Clamp number of decoder threads */
if (pbi->decoding_thread_count > num_token_partitions - 1)
if (pbi->decoding_thread_count > num_token_partitions - 1) {
pbi->decoding_thread_count = num_token_partitions - 1;
}
#endif
}
@ -836,8 +846,9 @@ static void init_frame(VP8D_COMP *pbi) {
xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
}
if (pbi->decoded_key_frame && pbi->ec_enabled && !pbi->ec_active)
if (pbi->decoded_key_frame && pbi->ec_enabled && !pbi->ec_active) {
pbi->ec_active = 1;
}
}
xd->left_context = &pc->left_context;
@ -900,9 +911,10 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
(clear[0] | (clear[1] << 8) | (clear[2] << 16)) >> 5;
if (!pbi->ec_active && (data + first_partition_length_in_bytes > data_end ||
data + first_partition_length_in_bytes < data))
data + first_partition_length_in_bytes < data)) {
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition 0 length");
}
data += 3;
clear += 3;
@ -915,9 +927,10 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
* code if we have enough bits available
*/
if (!pbi->ec_active || data + 3 < data_end) {
if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a)
if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a) {
vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
}
}
/* If error concealment is enabled we should only parse the new size
@ -943,9 +956,10 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
init_frame(pbi);
if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data),
pbi->decrypt_cb, pbi->decrypt_state))
pbi->decrypt_cb, pbi->decrypt_state)) {
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
}
if (pc->frame_type == KEY_FRAME) {
(void)vp8_read_bit(bc); // colorspace
pc->clamp_type = (CLAMP_TYPE)vp8_read_bit(bc);
@ -973,10 +987,12 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
xd->segment_feature_data[i][j] =
(signed char)vp8_read_literal(bc, mb_feature_data_bits[i]);
if (vp8_read_bit(bc))
if (vp8_read_bit(bc)) {
xd->segment_feature_data[i][j] = -xd->segment_feature_data[i][j];
} else
}
} else {
xd->segment_feature_data[i][j] = 0;
}
}
}
}
@ -988,8 +1004,9 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
/* Read the probs used to decode the segment id for each macro block. */
for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
/* If not explicitly set value is defaulted to 255 by memset above */
if (vp8_read_bit(bc))
if (vp8_read_bit(bc)) {
xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc, 8);
}
}
}
} else {
@ -1019,8 +1036,9 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
/*sign = vp8_read_bit( bc );*/
xd->ref_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6);
if (vp8_read_bit(bc)) /* Apply sign */
if (vp8_read_bit(bc)) { /* Apply sign */
xd->ref_lf_deltas[i] = xd->ref_lf_deltas[i] * -1;
}
}
}
@ -1030,8 +1048,9 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
/*sign = vp8_read_bit( bc );*/
xd->mode_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6);
if (vp8_read_bit(bc)) /* Apply sign */
if (vp8_read_bit(bc)) { /* Apply sign */
xd->mode_lf_deltas[i] = xd->mode_lf_deltas[i] * -1;
}
}
}
}
@ -1083,8 +1102,9 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
/* Buffer to buffer copy flags. */
pc->copy_buffer_to_gf = 0;
if (!pc->refresh_golden_frame)
if (!pc->refresh_golden_frame) {
pc->copy_buffer_to_gf = vp8_read_literal(bc, 2);
}
#if CONFIG_ERROR_CONCEALMENT
/* Assume we shouldn't copy to the golden if the bit is missing */
@ -1094,8 +1114,9 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
pc->copy_buffer_to_arf = 0;
if (!pc->refresh_alt_ref_frame)
if (!pc->refresh_alt_ref_frame) {
pc->copy_buffer_to_arf = vp8_read_literal(bc, 2);
}
#if CONFIG_ERROR_CONCEALMENT
/* Assume we shouldn't copy to the alt-ref if the bit is missing */
@ -1138,18 +1159,22 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
pbi->independent_partitions = 1;
/* read coef probability tree */
for (i = 0; i < BLOCK_TYPES; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
for (i = 0; i < BLOCK_TYPES; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
for (l = 0; l < ENTROPY_NODES; ++l) {
vp8_prob *const p = pc->fc.coef_probs[i][j][k] + l;
if (vp8_read(bc, vp8_coef_update_probs[i][j][k][l])) {
*p = (vp8_prob)vp8_read_literal(bc, 8);
}
if (k > 0 && *p != pc->fc.coef_probs[i][j][k - 1][l])
if (k > 0 && *p != pc->fc.coef_probs[i][j][k - 1][l]) {
pbi->independent_partitions = 0;
}
}
}
}
}
}
/* clear out the coeff buffer */
@ -1174,8 +1199,9 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
unsigned int thread;
vp8mt_decode_mb_rows(pbi, xd);
vp8_yv12_extend_frame_borders(yv12_fb_new);
for (thread = 0; thread < pbi->decoding_thread_count; ++thread)
for (thread = 0; thread < pbi->decoding_thread_count; ++thread) {
corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted;
}
} else
#endif
{
@ -1190,11 +1216,12 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
yv12_fb_new->corrupted |= corrupt_tokens;
if (!pbi->decoded_key_frame) {
if (pc->frame_type == KEY_FRAME && !yv12_fb_new->corrupted)
if (pc->frame_type == KEY_FRAME && !yv12_fb_new->corrupted) {
pbi->decoded_key_frame = 1;
else
} else {
vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME,
"A stream must start with a complete key frame");
}
}
/* vpx_log("Decoder: Frame Decoded, Size Roughly:%d bytes

View File

@ -82,8 +82,9 @@ static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) {
} while (--i > 3);
if (!(x & 0xFFF0) || vp8_read(r, p[MVPbits + 3])) x += 8;
} else /* small */
} else { /* small */
x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort);
}
if (x && vp8_read(r, p[MVPsign])) x = -x;
@ -135,8 +136,9 @@ static void mb_mode_mv_init(VP8D_COMP *pbi) {
pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc);
pbi->prob_skip_false = 0;
if (pbi->common.mb_no_coeff_skip)
if (pbi->common.mb_no_coeff_skip) {
pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8);
}
if (pbi->common.frame_type != KEY_FRAME) {
pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8);
@ -218,21 +220,25 @@ static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi,
if (!(k & 3)) {
/* On L edge, get from MB to left of us */
if (left_mb->mbmi.mode != SPLITMV)
if (left_mb->mbmi.mode != SPLITMV) {
leftmv.as_int = left_mb->mbmi.mv.as_int;
else
} else {
leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int;
} else
}
} else {
leftmv.as_int = (mi->bmi + k - 1)->mv.as_int;
}
if (!(k >> 2)) {
/* On top edge, get from MB above us */
if (above_mb->mbmi.mode != SPLITMV)
if (above_mb->mbmi.mode != SPLITMV) {
abovemv.as_int = above_mb->mbmi.mv.as_int;
else
} else {
abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int;
} else
}
} else {
abovemv.as_int = (mi->bmi + k - 4)->mv.as_int;
}
prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int);
@ -332,8 +338,9 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi,
}
*cntx += 2;
} else
} else {
cnt[CNT_INTRA] += 2;
}
}
/* Process above left */
@ -351,8 +358,9 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi,
}
*cntx += 1;
} else
} else {
cnt[CNT_INTRA] += 1;
}
}
if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_INTRA]][0])) {
@ -471,12 +479,13 @@ static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) {
/* Is segmentation enabled */
if (x->segmentation_enabled && x->update_mb_segmentation_map) {
/* If so then read the segment id. */
if (vp8_read(r, x->mb_segment_tree_probs[0]))
if (vp8_read(r, x->mb_segment_tree_probs[0])) {
mi->segment_id =
(unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2]));
else
} else {
mi->segment_id =
(unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1]));
}
}
}
@ -488,23 +497,26 @@ static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi,
* this frame (reset to 0 above by default)
* By default on a key frame reset all MBs to segment 0
*/
if (pbi->mb.update_mb_segmentation_map)
if (pbi->mb.update_mb_segmentation_map) {
read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb);
else if (pbi->common.frame_type == KEY_FRAME)
} else if (pbi->common.frame_type == KEY_FRAME) {
mi->mbmi.segment_id = 0;
}
/* Read the macroblock coeff skip flag if this feature is in use,
* else default to 0 */
if (pbi->common.mb_no_coeff_skip)
if (pbi->common.mb_no_coeff_skip) {
mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false);
else
} else {
mi->mbmi.mb_skip_coeff = 0;
}
mi->mbmi.is_4x4 = 0;
if (pbi->common.frame_type == KEY_FRAME)
if (pbi->common.frame_type == KEY_FRAME) {
read_kf_modes(pbi, mi);
else
} else {
read_mb_modes_mv(pbi, mi, &mi->mbmi);
}
}
void vp8_decode_mode_mvs(VP8D_COMP *pbi) {

View File

@ -125,13 +125,13 @@ vpx_codec_err_t vp8dx_get_reference(VP8D_COMP *pbi,
VP8_COMMON *cm = &pbi->common;
int ref_fb_idx;
if (ref_frame_flag == VP8_LAST_FRAME)
if (ref_frame_flag == VP8_LAST_FRAME) {
ref_fb_idx = cm->lst_fb_idx;
else if (ref_frame_flag == VP8_GOLD_FRAME)
} else if (ref_frame_flag == VP8_GOLD_FRAME) {
ref_fb_idx = cm->gld_fb_idx;
else if (ref_frame_flag == VP8_ALTR_FRAME)
} else if (ref_frame_flag == VP8_ALTR_FRAME) {
ref_fb_idx = cm->alt_fb_idx;
else {
} else {
vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
"Invalid reference frame");
return pbi->common.error.error_code;
@ -156,13 +156,13 @@ vpx_codec_err_t vp8dx_set_reference(VP8D_COMP *pbi,
int *ref_fb_ptr = NULL;
int free_fb;
if (ref_frame_flag == VP8_LAST_FRAME)
if (ref_frame_flag == VP8_LAST_FRAME) {
ref_fb_ptr = &cm->lst_fb_idx;
else if (ref_frame_flag == VP8_GOLD_FRAME)
} else if (ref_frame_flag == VP8_GOLD_FRAME) {
ref_fb_ptr = &cm->gld_fb_idx;
else if (ref_frame_flag == VP8_ALTR_FRAME)
} else if (ref_frame_flag == VP8_ALTR_FRAME) {
ref_fb_ptr = &cm->alt_fb_idx;
else {
} else {
vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR,
"Invalid reference frame");
return pbi->common.error.error_code;
@ -191,8 +191,9 @@ vpx_codec_err_t vp8dx_set_reference(VP8D_COMP *pbi,
static int get_free_fb(VP8_COMMON *cm) {
int i;
for (i = 0; i < NUM_YV12_BUFFERS; ++i)
for (i = 0; i < NUM_YV12_BUFFERS; ++i) {
if (cm->fb_idx_ref_cnt[i] == 0) break;
}
assert(i < NUM_YV12_BUFFERS);
cm->fb_idx_ref_cnt[i] = 1;
@ -219,12 +220,13 @@ static int swap_frame_buffers(VP8_COMMON *cm) {
if (cm->copy_buffer_to_arf) {
int new_fb = 0;
if (cm->copy_buffer_to_arf == 1)
if (cm->copy_buffer_to_arf == 1) {
new_fb = cm->lst_fb_idx;
else if (cm->copy_buffer_to_arf == 2)
} else if (cm->copy_buffer_to_arf == 2) {
new_fb = cm->gld_fb_idx;
else
} else {
err = -1;
}
ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->alt_fb_idx, new_fb);
}
@ -232,28 +234,32 @@ static int swap_frame_buffers(VP8_COMMON *cm) {
if (cm->copy_buffer_to_gf) {
int new_fb = 0;
if (cm->copy_buffer_to_gf == 1)
if (cm->copy_buffer_to_gf == 1) {
new_fb = cm->lst_fb_idx;
else if (cm->copy_buffer_to_gf == 2)
} else if (cm->copy_buffer_to_gf == 2) {
new_fb = cm->alt_fb_idx;
else
} else {
err = -1;
}
ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->gld_fb_idx, new_fb);
}
if (cm->refresh_golden_frame)
if (cm->refresh_golden_frame) {
ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->gld_fb_idx, cm->new_fb_idx);
}
if (cm->refresh_alt_ref_frame)
if (cm->refresh_alt_ref_frame) {
ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->alt_fb_idx, cm->new_fb_idx);
}
if (cm->refresh_last_frame) {
ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->lst_fb_idx, cm->new_fb_idx);
cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx];
} else
} else {
cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx];
}
cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
@ -322,8 +328,9 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
*/
cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) {
cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
}
goto decode_exit;
}
@ -333,8 +340,9 @@ int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size,
retcode = vp8_decode_frame(pbi);
if (retcode < 0) {
if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) {
cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
}
pbi->common.error.error_code = VPX_CODEC_ERROR;
goto decode_exit;
@ -456,8 +464,9 @@ int vp8_remove_decoder_instances(struct frame_buffers *fb) {
if (!pbi) return VPX_CODEC_ERROR;
#if CONFIG_MULTITHREAD
if (pbi->b_multithreaded_rd)
if (pbi->b_multithreaded_rd) {
vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows);
}
vp8_decoder_remove_threads(pbi);
#endif

View File

@ -166,10 +166,11 @@ static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
/*Caution: For some b_mode, it needs 8 pixels (4 above + 4
* above-right).*/
if (i < 4 && pbi->common.filter_level)
if (i < 4 && pbi->common.filter_level) {
Above = xd->recon_above[0] + b->offset;
else
} else {
Above = dst - dst_stride;
}
if (i % 4 == 0 && pbi->common.filter_level) {
yleft = xd->recon_left[0] + i;
@ -179,10 +180,11 @@ static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
left_stride = dst_stride;
}
if ((i == 4 || i == 8 || i == 12) && pbi->common.filter_level)
if ((i == 4 || i == 8 || i == 12) && pbi->common.filter_level) {
top_left = *(xd->recon_left[0] + i - 1);
else
} else {
top_left = Above[-1];
}
vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, dst, dst_stride,
top_left);
@ -299,10 +301,11 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
/* select bool coder for current partition */
xd->current_bc = &pbi->mbc[mb_row % num_part];
if (mb_row > 0)
if (mb_row > 0) {
last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row - 1];
else
} else {
last_row_current_mb_col = &first_row_no_sync_above;
}
current_mb_col = &pbi->mt_current_mb_col[mb_row];
@ -452,9 +455,10 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
MODE_INFO *next = xd->mode_info_context + 1;
if (next->mbmi.ref_frame == INTRA_FRAME) {
for (i = 0; i < 16; ++i)
for (i = 0; i < 16; ++i) {
pbi->mt_yleft_col[mb_row][i] =
xd->dst.y_buffer[i * recon_y_stride + 15];
}
for (i = 0; i < 8; ++i) {
pbi->mt_uleft_col[mb_row][i] =
xd->dst.u_buffer[i * recon_uv_stride + 7];
@ -539,9 +543,10 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
pbi->mt_vabove_row[mb_row + 1][lastuv - 1];
}
}
} else
} else {
vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
}
/* last MB of row is ready just after extension is done */
protected_write(&pbi->pmutex[mb_row], current_mb_col, mb_col + nsync);
@ -567,9 +572,9 @@ static THREAD_FUNCTION thread_decoding_proc(void *p_data) {
if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd) == 0) break;
if (sem_wait(&pbi->h_event_start_decoding[ithread]) == 0) {
if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd) == 0)
if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd) == 0) {
break;
else {
} else {
MACROBLOCKD *xd = &mbrd->mbd;
xd->left_context = &mb_row_left_context;
@ -593,8 +598,9 @@ void vp8_decoder_create_threads(VP8D_COMP *pbi) {
core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads;
/* limit decoding threads to the available cores */
if (core_count > pbi->common.processor_core_count)
if (core_count > pbi->common.processor_core_count) {
core_count = pbi->common.processor_core_count;
}
if (core_count > 1) {
pbi->b_multithreaded_rd = 1;
@ -709,14 +715,15 @@ void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) {
/* our internal buffers are always multiples of 16 */
if ((width & 0xf) != 0) width += 16 - (width & 0xf);
if (width < 640)
if (width < 640) {
pbi->sync_range = 1;
else if (width <= 1280)
} else if (width <= 1280) {
pbi->sync_range = 8;
else if (width <= 2560)
} else if (width <= 2560) {
pbi->sync_range = 16;
else
} else {
pbi->sync_range = 32;
}
uv_width = width >> 1;
@ -838,14 +845,16 @@ void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
/* Initialize the loop filter for this frame. */
vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level);
} else
} else {
vp8_setup_intra_recon_top_line(yv12_fb_new);
}
setup_decoding_thread_data(pbi, xd, pbi->mb_row_di,
pbi->decoding_thread_count);
for (i = 0; i < pbi->decoding_thread_count; ++i)
for (i = 0; i < pbi->decoding_thread_count; ++i) {
sem_post(&pbi->h_event_start_decoding[i]);
}
mt_decode_mb_rows(pbi, xd, 0);

View File

@ -400,8 +400,9 @@ void vp8_convert_rfct_to_prob(VP8_COMP *const cpi) {
rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
/* Calculate the probabilities used to code the ref frame based on usage */
if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter)))
if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter))) {
cpi->prob_intra_coded = 1;
}
cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
@ -478,11 +479,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
active_section = 9;
#endif
if (cpi->mb.e_mbd.update_mb_segmentation_map)
if (cpi->mb.e_mbd.update_mb_segmentation_map) {
write_mb_features(w, mi, &cpi->mb.e_mbd);
}
if (pc->mb_no_coeff_skip)
if (pc->mb_no_coeff_skip) {
vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false);
}
if (rf == INTRA_FRAME) {
vp8_write(w, 0, cpi->prob_intra_coded);
@ -494,9 +497,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
if (mode == B_PRED) {
int j = 0;
do
do {
write_bmode(w, m->bmi[j].as_mode, pc->fc.bmode_prob);
while (++j < 16);
} while (++j < 16);
}
write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob);
@ -566,8 +569,9 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
blockmode = cpi->mb.partition_info->bmi[j].mode;
blockmv = cpi->mb.partition_info->bmi[j].mv;
#if CONFIG_DEBUG
while (j != L[++k])
while (j != L[++k]) {
if (k >= 16) assert(0);
}
#else
while (j != L[++k])
;
@ -627,11 +631,13 @@ static void write_kfmodes(VP8_COMP *cpi) {
while (++mb_col < c->mb_cols) {
const int ym = m->mbmi.mode;
if (cpi->mb.e_mbd.update_mb_segmentation_map)
if (cpi->mb.e_mbd.update_mb_segmentation_map) {
write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
}
if (c->mb_no_coeff_skip)
if (c->mb_no_coeff_skip) {
vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
}
kfwrite_ymode(bc, ym, vp8_kf_ymode_prob);
@ -734,8 +740,9 @@ static int independent_coef_context_savings(VP8_COMP *cpi) {
probs = (const unsigned int(*)[MAX_ENTROPY_TOKENS])x->coef_counts[i][j];
/* Reset to default probabilities at key frames */
if (cpi->common.frame_type == KEY_FRAME)
if (cpi->common.frame_type == KEY_FRAME) {
probs = default_coef_counts[i][j];
}
sum_probs_over_prev_coef_context(probs, prev_coef_count_sum);
@ -758,8 +765,9 @@ static int independent_coef_context_savings(VP8_COMP *cpi) {
const int s = prob_update_savings(ct, oldp, newp, upd);
if (cpi->common.frame_type != KEY_FRAME ||
(cpi->common.frame_type == KEY_FRAME && newp != oldp))
(cpi->common.frame_type == KEY_FRAME && newp != oldp)) {
prev_coef_savings[t] += s;
}
} while (++t < ENTROPY_NODES);
} while (++k < PREV_COEF_CONTEXTS);
k = 0;
@ -769,8 +777,9 @@ static int independent_coef_context_savings(VP8_COMP *cpi) {
* to get the equal probabilities across the prev coef
* contexts.
*/
if (prev_coef_savings[k] > 0 || cpi->common.frame_type == KEY_FRAME)
if (prev_coef_savings[k] > 0 || cpi->common.frame_type == KEY_FRAME) {
savings += prev_coef_savings[k];
}
} while (++k < ENTROPY_NODES);
} while (++j < COEF_BANDS);
} while (++i < BLOCK_TYPES);
@ -873,10 +882,11 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi) {
savings += (oldtotal - newtotal) / 256;
}
if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
savings += independent_coef_context_savings(cpi);
else
} else {
savings += default_coef_context_savings(cpi);
}
return savings;
}
@ -961,8 +971,9 @@ void vp8_update_coef_probs(VP8_COMP *cpi) {
*/
if ((cpi->oxcf.error_resilient_mode &
VPX_ERROR_RESILIENT_PARTITIONS) &&
cpi->common.frame_type == KEY_FRAME && newp != *Pold)
cpi->common.frame_type == KEY_FRAME && newp != *Pold) {
u = 1;
}
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
cpi->update_probs[i][j][k][t] = u;
@ -1111,8 +1122,9 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
vp8_write_bit(bc, 0);
vp8_write_bit(bc, pc->clamp_type);
} else
} else {
vp8_start_encode(bc, cx_data, cx_data_end);
}
/* Signal whether or not Segmentation is enabled */
vp8_write_bit(bc, xd->segmentation_enabled);
@ -1270,10 +1282,11 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
#if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) {
if (pc->frame_type == KEY_FRAME)
if (pc->frame_type == KEY_FRAME) {
pc->refresh_entropy_probs = 1;
else
} else {
pc->refresh_entropy_probs = 0;
}
}
#endif
@ -1410,9 +1423,9 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
vp8_start_encode(&cpi->bc[1], cx_data, cx_data_end);
#if CONFIG_MULTITHREAD
if (cpi->b_multi_threaded)
if (cpi->b_multi_threaded) {
pack_mb_row_tokens(cpi, &cpi->bc[1]);
else
} else
#endif // CONFIG_MULTITHREAD
vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count);

View File

@ -61,6 +61,7 @@ void vp8_stop_encode(BOOL_CODER *br) {
void vp8_encode_value(BOOL_CODER *br, int data, int bits) {
int bit;
for (bit = bits - 1; bit >= 0; bit--)
for (bit = bits - 1; bit >= 0; bit--) {
vp8_encode_bool(br, (1 & (data >> bit)), 0x80);
}
}

View File

@ -47,11 +47,12 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
static int validate_buffer(const unsigned char *start, size_t len,
const unsigned char *end,
struct vpx_internal_error_info *error) {
if (start + len > start && start + len < end)
if (start + len > start && start + len < end) {
return 1;
else
} else {
vpx_internal_error(error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition ");
}
return 0;
}

View File

@ -96,25 +96,28 @@ int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride,
running_avg_y[c] = mc_running_avg_y[c];
col_sum[c] += diff;
} else {
if (absdiff >= 4 + shift_inc1 && absdiff <= 7)
if (absdiff >= 4 + shift_inc1 && absdiff <= 7) {
adjustment = adj_val[0];
else if (absdiff >= 8 && absdiff <= 15)
} else if (absdiff >= 8 && absdiff <= 15) {
adjustment = adj_val[1];
else
} else {
adjustment = adj_val[2];
}
if (diff > 0) {
if ((sig[c] + adjustment) > 255)
if ((sig[c] + adjustment) > 255) {
running_avg_y[c] = 255;
else
} else {
running_avg_y[c] = sig[c] + adjustment;
}
col_sum[c] += adjustment;
} else {
if ((sig[c] - adjustment) < 0)
if ((sig[c] - adjustment) < 0) {
running_avg_y[c] = 0;
else
} else {
running_avg_y[c] = sig[c] - adjustment;
}
col_sum[c] -= adjustment;
}
@ -169,17 +172,19 @@ int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride,
if (adjustment > delta) adjustment = delta;
if (diff > 0) {
// Bring denoised signal down.
if (running_avg_y[c] - adjustment < 0)
if (running_avg_y[c] - adjustment < 0) {
running_avg_y[c] = 0;
else
} else {
running_avg_y[c] = running_avg_y[c] - adjustment;
}
col_sum[c] -= adjustment;
} else if (diff < 0) {
// Bring denoised signal up.
if (running_avg_y[c] + adjustment > 255)
if (running_avg_y[c] + adjustment > 255) {
running_avg_y[c] = 255;
else
} else {
running_avg_y[c] = running_avg_y[c] + adjustment;
}
col_sum[c] += adjustment;
}
}
@ -263,23 +268,26 @@ int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
running_avg_uv[c] = mc_running_avg_uv[c];
sum_diff += diff;
} else {
if (absdiff >= 4 && absdiff <= 7)
if (absdiff >= 4 && absdiff <= 7) {
adjustment = adj_val[0];
else if (absdiff >= 8 && absdiff <= 15)
} else if (absdiff >= 8 && absdiff <= 15) {
adjustment = adj_val[1];
else
} else {
adjustment = adj_val[2];
}
if (diff > 0) {
if ((sig[c] + adjustment) > 255)
if ((sig[c] + adjustment) > 255) {
running_avg_uv[c] = 255;
else
} else {
running_avg_uv[c] = sig[c] + adjustment;
}
sum_diff += adjustment;
} else {
if ((sig[c] - adjustment) < 0)
if ((sig[c] - adjustment) < 0) {
running_avg_uv[c] = 0;
else
} else {
running_avg_uv[c] = sig[c] - adjustment;
}
sum_diff -= adjustment;
}
}
@ -315,17 +323,19 @@ int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv,
if (adjustment > delta) adjustment = delta;
if (diff > 0) {
// Bring denoised signal down.
if (running_avg_uv[c] - adjustment < 0)
if (running_avg_uv[c] - adjustment < 0) {
running_avg_uv[c] = 0;
else
} else {
running_avg_uv[c] = running_avg_uv[c] - adjustment;
}
sum_diff -= adjustment;
} else if (diff < 0) {
// Bring denoised signal up.
if (running_avg_uv[c] + adjustment > 255)
if (running_avg_uv[c] + adjustment > 255) {
running_avg_uv[c] = 255;
else
} else {
running_avg_uv[c] = running_avg_uv[c] + adjustment;
}
sum_diff += adjustment;
}
}
@ -508,8 +518,9 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x,
// we will always choose zero_mv for denoising if
// zero_mv_see <= best_sse (i.e., sse_diff <= 0).
if ((unsigned int)(mv_row * mv_row + mv_col * mv_col) <=
NOISE_MOTION_THRESHOLD)
NOISE_MOTION_THRESHOLD) {
sse_diff_thresh = (int)SSE_DIFF_THRESHOLD;
}
if (frame == INTRA_FRAME || sse_diff <= sse_diff_thresh) {
/*
@ -539,15 +550,18 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x,
denoiser->denoise_pars.scale_motion_thresh * NOISE_MOTION_THRESHOLD;
if (motion_magnitude2 <
denoiser->denoise_pars.scale_increase_filter * NOISE_MOTION_THRESHOLD)
denoiser->denoise_pars.scale_increase_filter * NOISE_MOTION_THRESHOLD) {
x->increase_denoising = 1;
}
sse_thresh = denoiser->denoise_pars.scale_sse_thresh * SSE_THRESHOLD;
if (x->increase_denoising)
if (x->increase_denoising) {
sse_thresh = denoiser->denoise_pars.scale_sse_thresh * SSE_THRESHOLD_HIGH;
}
if (best_sse > sse_thresh || motion_magnitude2 > motion_threshold)
if (best_sse > sse_thresh || motion_magnitude2 > motion_threshold) {
decision = COPY_BLOCK;
}
// If block is considered skin, don't denoise if the block
// (1) is selected as non-zero motion for current frame, or
@ -555,8 +569,9 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x,
// in a row.
// TODO(marpan): Parameter "x" should be varied with framerate.
// In particualar, should be reduced for layers (base layer/LAST).
if (x->is_skin && (consec_zero_last < 2 || motion_magnitude2 > 0))
if (x->is_skin && (consec_zero_last < 2 || motion_magnitude2 > 0)) {
decision = COPY_BLOCK;
}
if (decision == FILTER_BLOCK) {
saved_pre = filter_xd->pre;

View File

@ -168,8 +168,9 @@ static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) {
cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
#endif
if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) {
cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
}
/* Experimental code: return fixed value normalized for several clips */
if (ALT_ACT_MEASURE) cpi->activity_avg = 100000;
@ -347,10 +348,11 @@ static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
const int *last_row_current_mb_col;
int *current_mb_col = &cpi->mt_current_mb_col[mb_row];
if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
if ((cpi->b_multi_threaded != 0) && (mb_row != 0)) {
last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
else
} else {
last_row_current_mb_col = &rightmost_col;
}
#endif
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
@ -437,16 +439,18 @@ static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
/* Code to set segment id in xd->mbmi.segment_id for current MB
* (with range checking)
*/
if (cpi->segmentation_map[map_index + mb_col] <= 3)
if (cpi->segmentation_map[map_index + mb_col] <= 3) {
xd->mode_info_context->mbmi.segment_id =
cpi->segmentation_map[map_index + mb_col];
else
} else {
xd->mode_info_context->mbmi.segment_id = 0;
}
vp8cx_mb_init_quantizer(cpi, x, 1);
} else
} else {
/* Set to Segment 0 by default */
xd->mode_info_context->mbmi.segment_id = 0;
}
x->active_ptr = cpi->active_map + map_index + mb_col;
@ -479,16 +483,19 @@ static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
if (xd->mode_info_context->mbmi.mode == ZEROMV &&
xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
// Increment, check for wrap-around.
if (cpi->consec_zero_last[map_index + mb_col] < 255)
if (cpi->consec_zero_last[map_index + mb_col] < 255) {
cpi->consec_zero_last[map_index + mb_col] += 1;
if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255)
}
if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
}
} else {
cpi->consec_zero_last[map_index + mb_col] = 0;
cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
}
if (x->zero_last_dot_suppress)
if (x->zero_last_dot_suppress) {
cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
}
}
/* Special case code for cyclic refresh
@ -509,14 +516,16 @@ static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
* been refreshed then mark it as a candidate for cleanup
* next time (marked 0) else mark it as dirty (1).
*/
if (xd->mode_info_context->mbmi.segment_id)
if (xd->mode_info_context->mbmi.segment_id) {
cpi->cyclic_refresh_map[map_index + mb_col] = -1;
else if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
(xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) {
if (cpi->cyclic_refresh_map[map_index + mb_col] == 1)
} else if ((xd->mode_info_context->mbmi.mode == ZEROMV) &&
(xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) {
if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
cpi->cyclic_refresh_map[map_index + mb_col] = 0;
} else
}
} else {
cpi->cyclic_refresh_map[map_index + mb_col] = 1;
}
}
}
@ -557,8 +566,9 @@ static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
#if CONFIG_MULTITHREAD
if (cpi->b_multi_threaded != 0)
if (cpi->b_multi_threaded != 0) {
protected_write(&cpi->pmutex[mb_row], current_mb_col, rightmost_col);
}
#endif
/* this is to account for the border */
@ -611,18 +621,19 @@ static void init_encode_frame_mb_context(VP8_COMP *cpi) {
/* Special case treatment when GF and ARF are not sensible options
* for reference
*/
if (cpi->ref_frame_flags == VP8_LAST_FRAME)
if (cpi->ref_frame_flags == VP8_LAST_FRAME) {
vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 255,
128);
else if ((cpi->oxcf.number_of_layers > 1) &&
(cpi->ref_frame_flags == VP8_GOLD_FRAME))
} else if ((cpi->oxcf.number_of_layers > 1) &&
(cpi->ref_frame_flags == VP8_GOLD_FRAME)) {
vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 255);
else if ((cpi->oxcf.number_of_layers > 1) &&
(cpi->ref_frame_flags == VP8_ALTR_FRAME))
} else if ((cpi->oxcf.number_of_layers > 1) &&
(cpi->ref_frame_flags == VP8_ALTR_FRAME)) {
vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 1);
else
} else {
vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded,
cpi->prob_last_coded, cpi->prob_gf_coded);
}
xd->fullpixel_mask = 0xffffffff;
if (cm->full_pixel) xd->fullpixel_mask = 0xfffffff8;
@ -673,10 +684,11 @@ void vp8_encode_frame(VP8_COMP *cpi) {
totalrate = 0;
if (cpi->compressor_speed == 2) {
if (cpi->oxcf.cpu_used < 0)
if (cpi->oxcf.cpu_used < 0) {
cpi->Speed = -(cpi->oxcf.cpu_used);
else
} else {
vp8_auto_select_speed(cpi);
}
}
/* Functions setup for all frame types so we can use MC in AltRef */
@ -798,8 +810,9 @@ void vp8_encode_frame(VP8_COMP *cpi) {
if (xd->segmentation_enabled) {
for (i = 0; i < cpi->encoding_thread_count; ++i) {
for (j = 0; j < 4; ++j)
for (j = 0; j < 4; ++j) {
segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
}
}
}
}
@ -811,13 +824,15 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count)
for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count) {
cpi->mb.ymode_count[mode_count] +=
cpi->mb_row_ei[i].mb.ymode_count[mode_count];
}
for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count)
for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count) {
cpi->mb.uv_mode_count[mode_count] +=
cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
}
for (c_idx = 0; c_idx < MVvals; ++c_idx) {
cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
@ -827,12 +842,14 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error;
cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx)
for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx) {
cpi->mb.count_mb_ref_frame_usage[c_idx] +=
cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
}
for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx)
for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx) {
cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx];
}
/* add up counts for each thread */
sum_coef_counts(x, &cpi->mb_row_ei[i].mb);
@ -899,8 +916,9 @@ void vp8_encode_frame(VP8_COMP *cpi) {
tot_count = segment_counts[2] + segment_counts[3];
if (tot_count > 0)
if (tot_count > 0) {
xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
}
/* Zero probabilities not allowed */
for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
@ -923,9 +941,10 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] +
cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME];
if (tot_modes)
if (tot_modes) {
cpi->this_frame_percent_intra =
cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
}
}
#if !CONFIG_REALTIME_ONLY
@ -1054,10 +1073,11 @@ static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) {
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (act > cpi->activity_avg)
if (act > cpi->activity_avg) {
x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
else
} else {
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
}
#endif
}
@ -1066,20 +1086,22 @@ int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x,
MACROBLOCKD *xd = &x->e_mbd;
int rate;
if (cpi->sf.RD && cpi->compressor_speed != 2)
if (cpi->sf.RD && cpi->compressor_speed != 2) {
vp8_rd_pick_intra_mode(x, &rate);
else
} else {
vp8_pick_intra_mode(x, &rate);
}
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
adjust_act_zbin(cpi, x);
vp8_update_zbin_extra(cpi, x);
}
if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) {
vp8_encode_intra4x4mby(x);
else
} else {
vp8_encode_intra16x16mby(x);
}
vp8_encode_intra16x16mbuv(x);
@ -1110,11 +1132,12 @@ int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
x->skip = 0;
if (xd->segmentation_enabled)
if (xd->segmentation_enabled) {
x->encode_breakout =
cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
else
} else {
x->encode_breakout = cpi->oxcf.encode_breakout;
}
#if CONFIG_TEMPORAL_DENOISING
/* Reset the best sse mode/mv for each macroblock. */
@ -1192,14 +1215,16 @@ int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
if (xd->mode_info_context->mbmi.mode == ZEROMV) {
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME &&
cpi->oxcf.number_of_layers == 1)
cpi->oxcf.number_of_layers == 1) {
x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
} else {
x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (xd->mode_info_context->mbmi.mode == SPLITMV)
}
} else if (xd->mode_info_context->mbmi.mode == SPLITMV) {
x->zbin_mode_boost = 0;
else
} else {
x->zbin_mode_boost = MV_ZBIN_BOOST;
}
}
}
@ -1223,12 +1248,13 @@ int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
} else {
int ref_fb_idx;
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
} else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) {
ref_fb_idx = cpi->common.gld_fb_idx;
else
} else {
ref_fb_idx = cpi->common.alt_fb_idx;
}
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer =
@ -1238,17 +1264,19 @@ int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
if (!x->skip) {
vp8_encode_inter16x16(x);
} else
} else {
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer,
xd->dst.v_buffer, xd->dst.y_stride,
xd->dst.uv_stride);
}
}
if (!x->skip) {
vp8_tokenize_mb(cpi, x, t);
if (xd->mode_info_context->mbmi.mode != B_PRED)
if (xd->mode_info_context->mbmi.mode != B_PRED) {
vp8_inverse_transform_mby(xd);
}
vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv,
xd->dst.u_buffer, xd->dst.v_buffer,

View File

@ -101,8 +101,9 @@ static void transform_mb(MACROBLOCK *x) {
}
/* do 2nd order transform on the dc block */
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
x->short_walsh4x4(&x->block[24].src_diff[0], &x->block[24].coeff[0], 8);
}
}
static void transform_mby(MACROBLOCK *x) {
@ -184,8 +185,9 @@ static void optimize_b(MACROBLOCK *mb, int ib, int type, ENTROPY_CONTEXT *a,
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
rdmult = mb->rdmult * err_mult;
if (mb->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME)
if (mb->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
rdmult = (rdmult * 9) >> 4;
}
rddiv = mb->rddiv;
best_mask[0] = best_mask[1] = 0;
@ -242,10 +244,11 @@ static void optimize_b(MACROBLOCK *mb, int ib, int type, ENTROPY_CONTEXT *a,
rate1 = tokens[next][1].rate;
if ((abs(x) * dequant_ptr[rc] > abs(coeff_ptr[rc])) &&
(abs(x) * dequant_ptr[rc] < abs(coeff_ptr[rc]) + dequant_ptr[rc]))
(abs(x) * dequant_ptr[rc] < abs(coeff_ptr[rc]) + dequant_ptr[rc])) {
shortcut = 1;
else
} else {
shortcut = 0;
}
if (shortcut) {
sz = -(x < 0);

View File

@ -102,17 +102,17 @@ static unsigned int cost_mvcomponent(const int v,
int i = 0;
cost = vp8_cost_one(p[mvpis_short]);
do
do {
cost += vp8_cost_bit(p[MVPbits + i], (x >> i) & 1);
while (++i < 3);
} while (++i < 3);
i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */
do
do {
cost += vp8_cost_bit(p[MVPbits + i], (x >> i) & 1);
while (--i > 3);
} while (--i > 3);
if (x & 0xFFF0) cost += vp8_cost_bit(p[MVPbits + 3], (x >> 3) & 1);
}
@ -242,10 +242,10 @@ static void write_component_probs(vp8_writer *const w,
is_short_ct[1] += c; /* Long vector */
/* bit 3 not always encoded. */
do
do {
bit_ct[k][(a >> k) & 1] += c;
while (--k >= 0);
} while (--k >= 0);
}
} while (++j <= mv_max);
}
@ -261,19 +261,19 @@ static void write_component_probs(vp8_writer *const w,
vp8_tree_probs_from_distribution(8, vp8_small_mvencodings, vp8_small_mvtree,
p, short_bct, short_ct, 256, 1);
do
do {
calc_prob(Pnew + MVPshort + j, short_bct[j]);
while (++j < mvnum_short - 1);
} while (++j < mvnum_short - 1);
}
{
int j = 0;
do
do {
calc_prob(Pnew + MVPbits + j, bit_ct[j]);
while (++j < mvlong_width);
} while (++j < mvlong_width);
}
update(w, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short], *Pupdate++,
@ -287,11 +287,10 @@ static void write_component_probs(vp8_writer *const w,
int j = 0;
do
do {
update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated);
while (++j < mvnum_short - 1);
} while (++j < mvnum_short - 1);
}
{
@ -300,11 +299,10 @@ static void write_component_probs(vp8_writer *const w,
int j = 0;
do
do {
update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated);
while (++j < mvlong_width);
} while (++j < mvlong_width);
}
}
@ -322,9 +320,10 @@ void vp8_write_mvprobs(VP8_COMP *cpi) {
&vp8_mv_update_probs[1], cpi->mb.MVcount[1], 1,
&flags[1]);
if (flags[0] || flags[1])
if (flags[0] || flags[1]) {
vp8_build_component_cost_table(
cpi->mb.mvcost, (const MV_CONTEXT *)cpi->common.fc.mvc, flags);
}
#ifdef VP8_ENTROPY_STATS
active_section = 5;

View File

@ -158,16 +158,18 @@ static THREAD_FUNCTION thread_encoding_proc(void *p_data) {
/* Code to set segment id in xd->mbmi.segment_id for
* current MB (with range checking)
*/
if (cpi->segmentation_map[map_index + mb_col] <= 3)
if (cpi->segmentation_map[map_index + mb_col] <= 3) {
xd->mode_info_context->mbmi.segment_id =
cpi->segmentation_map[map_index + mb_col];
else
} else {
xd->mode_info_context->mbmi.segment_id = 0;
}
vp8cx_mb_init_quantizer(cpi, x, 1);
} else
} else {
/* Set to Segment 0 by default */
xd->mode_info_context->mbmi.segment_id = 0;
}
x->active_ptr = cpi->active_map + map_index + mb_col;
@ -199,16 +201,19 @@ static THREAD_FUNCTION thread_encoding_proc(void *p_data) {
if (xd->mode_info_context->mbmi.mode == ZEROMV &&
xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) {
// Increment, check for wrap-around.
if (cpi->consec_zero_last[map_index + mb_col] < 255)
if (cpi->consec_zero_last[map_index + mb_col] < 255) {
cpi->consec_zero_last[map_index + mb_col] += 1;
if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255)
}
if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) {
cpi->consec_zero_last_mvbias[map_index + mb_col] += 1;
}
} else {
cpi->consec_zero_last[map_index + mb_col] = 0;
cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
}
if (x->zero_last_dot_suppress)
if (x->zero_last_dot_suppress) {
cpi->consec_zero_last_mvbias[map_index + mb_col] = 0;
}
}
/* Special case code for cyclic refresh
@ -232,14 +237,16 @@ static THREAD_FUNCTION thread_encoding_proc(void *p_data) {
* candidate for cleanup next time (marked 0) else
* mark it as dirty (1).
*/
if (mbmi->segment_id)
if (mbmi->segment_id) {
cpi->cyclic_refresh_map[map_index + mb_col] = -1;
else if ((mbmi->mode == ZEROMV) &&
(mbmi->ref_frame == LAST_FRAME)) {
if (cpi->cyclic_refresh_map[map_index + mb_col] == 1)
} else if ((mbmi->mode == ZEROMV) &&
(mbmi->ref_frame == LAST_FRAME)) {
if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) {
cpi->cyclic_refresh_map[map_index + mb_col] = 0;
} else
}
} else {
cpi->cyclic_refresh_map[map_index + mb_col] = 1;
}
}
}
@ -495,8 +502,9 @@ int vp8cx_create_encoder_threads(VP8_COMP *cpi) {
int rc = 0;
/* don't allocate more threads than cores available */
if (cpi->oxcf.multi_threaded > cm->processor_core_count)
if (cpi->oxcf.multi_threaded > cm->processor_core_count) {
th_count = cm->processor_core_count - 1;
}
/* we have th_count + 1 (main) threads processing one row each */
/* no point to have more threads than the sync range allows */

View File

@ -237,10 +237,11 @@ static double calculate_modified_err(VP8_COMP *cpi,
double this_err = this_frame->ssim_weighted_pred_err;
double modified_err;
if (this_err > av_err)
if (this_err > av_err) {
modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW1);
else
} else {
modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW2);
}
return modified_err;
}
@ -448,14 +449,15 @@ static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x,
while (n < further_steps) {
n++;
if (num00)
if (num00) {
num00--;
else {
} else {
tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv,
step_param + n, x->sadperbit16, &num00,
&v_fn_ptr, x->mvcost, ref_mv);
if (tmp_err < INT_MAX - new_mv_mode_penalty)
if (tmp_err < INT_MAX - new_mv_mode_penalty) {
tmp_err += new_mv_mode_penalty;
}
if (tmp_err < *best_motion_err) {
*best_motion_err = tmp_err;
@ -606,8 +608,9 @@ void vp8_first_pass(VP8_COMP *cpi) {
d->bmi.mv.as_mv.row = 0;
d->bmi.mv.as_mv.col = 0;
if (raw_motion_error < cpi->oxcf.encode_breakout)
if (raw_motion_error < cpi->oxcf.encode_breakout) {
goto skip_motion_search;
}
/* Test last reference frame using the previous best mv as the
* starting point (best reference) for the search
@ -688,28 +691,32 @@ void vp8_first_pass(VP8_COMP *cpi) {
/* Does the Row vector point inwards or outwards */
if (mb_row < cm->mb_rows / 2) {
if (d->bmi.mv.as_mv.row > 0)
if (d->bmi.mv.as_mv.row > 0) {
sum_in_vectors--;
else if (d->bmi.mv.as_mv.row < 0)
} else if (d->bmi.mv.as_mv.row < 0) {
sum_in_vectors++;
}
} else if (mb_row > cm->mb_rows / 2) {
if (d->bmi.mv.as_mv.row > 0)
if (d->bmi.mv.as_mv.row > 0) {
sum_in_vectors++;
else if (d->bmi.mv.as_mv.row < 0)
} else if (d->bmi.mv.as_mv.row < 0) {
sum_in_vectors--;
}
}
/* Does the Row vector point inwards or outwards */
if (mb_col < cm->mb_cols / 2) {
if (d->bmi.mv.as_mv.col > 0)
if (d->bmi.mv.as_mv.col > 0) {
sum_in_vectors--;
else if (d->bmi.mv.as_mv.col < 0)
} else if (d->bmi.mv.as_mv.col < 0) {
sum_in_vectors++;
}
} else if (mb_col > cm->mb_cols / 2) {
if (d->bmi.mv.as_mv.col > 0)
if (d->bmi.mv.as_mv.col > 0) {
sum_in_vectors++;
else if (d->bmi.mv.as_mv.col < 0)
} else if (d->bmi.mv.as_mv.col < 0) {
sum_in_vectors--;
}
}
}
}
@ -824,10 +831,11 @@ void vp8_first_pass(VP8_COMP *cpi) {
FILE *recon_file;
sprintf(filename, "enc%04d.yuv", (int)cm->current_video_frame);
if (cm->current_video_frame == 0)
if (cm->current_video_frame == 0) {
recon_file = fopen(filename, "wb");
else
} else {
recon_file = fopen(filename, "ab");
}
(void)fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file);
fclose(recon_file);
@ -843,10 +851,11 @@ extern const int vp8_bits_per_mb[2][QINDEX_RANGE];
*/
static double bitcost(double prob) {
if (prob > 0.000122)
if (prob > 0.000122) {
return -log(prob) / log(2.0);
else
} else {
return 13.0;
}
}
static int64_t estimate_modemvcost(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats) {
int mv_cost;
@ -917,8 +926,9 @@ static int estimate_max_q(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats,
double speed_correction = 1.0;
int overhead_bits_per_mb;
if (section_target_bandwitdh <= 0)
if (section_target_bandwitdh <= 0) {
return cpi->twopass.maxq_max_limit; /* Highest value allowed */
}
target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20))
? (512 * section_target_bandwitdh) / num_mbs
@ -934,10 +944,11 @@ static int estimate_max_q(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats,
rolling_ratio =
(double)cpi->rolling_actual_bits / (double)cpi->rolling_target_bits;
if (rolling_ratio < 0.95)
if (rolling_ratio < 0.95) {
cpi->twopass.est_max_qcorrection_factor -= 0.005;
else if (rolling_ratio > 1.05)
} else if (rolling_ratio > 1.05) {
cpi->twopass.est_max_qcorrection_factor += 0.005;
}
cpi->twopass.est_max_qcorrection_factor =
(cpi->twopass.est_max_qcorrection_factor < 0.1)
@ -951,10 +962,11 @@ static int estimate_max_q(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats,
* (reduced compression expected)
*/
if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) {
if (cpi->oxcf.cpu_used <= 5)
if (cpi->oxcf.cpu_used <= 5) {
speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
else
} else {
speed_correction = 1.25;
}
}
/* Estimate of overhead bits per mb */
@ -1048,10 +1060,11 @@ static int estimate_cq(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats,
* (reduced compression expected)
*/
if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) {
if (cpi->oxcf.cpu_used <= 5)
if (cpi->oxcf.cpu_used <= 5) {
speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
else
} else {
speed_correction = 1.25;
}
}
/* II ratio correction factor for clip as a whole */
@ -1111,10 +1124,11 @@ static int estimate_q(VP8_COMP *cpi, double section_err,
* (reduced compression expected)
*/
if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) {
if (cpi->oxcf.cpu_used <= 5)
if (cpi->oxcf.cpu_used <= 5) {
speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
else
} else {
speed_correction = 1.25;
}
}
/* Try and pick a Q that can encode the content at the given rate. */
@ -1164,9 +1178,9 @@ static int estimate_kf_group_q(VP8_COMP *cpi, double section_err,
* vs target bits
* This is clamped to the range 0.1 to 10.0
*/
if (cpi->long_rolling_target_bits <= 0)
if (cpi->long_rolling_target_bits <= 0) {
current_spend_ratio = 10.0;
else {
} else {
current_spend_ratio = (double)cpi->long_rolling_actual_bits /
(double)cpi->long_rolling_target_bits;
current_spend_ratio =
@ -1188,10 +1202,11 @@ static int estimate_kf_group_q(VP8_COMP *cpi, double section_err,
* (reduced compression expected)
*/
if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) {
if (cpi->oxcf.cpu_used <= 5)
if (cpi->oxcf.cpu_used <= 5) {
speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04);
else
} else {
speed_correction = 1.25;
}
}
/* Combine the various factors calculated above */
@ -1334,8 +1349,9 @@ static double get_prediction_decay_rate(VP8_COMP *cpi,
/* High % motion -> somewhat higher decay rate */
motion_decay = (1.0 - (motion_pct / 20.0));
if (motion_decay < prediction_decay_rate)
if (motion_decay < prediction_decay_rate) {
prediction_decay_rate = motion_decay;
}
/* Adjustment to decay rate based on speed of motion */
{
@ -1350,8 +1366,9 @@ static double get_prediction_decay_rate(VP8_COMP *cpi,
sqrt((this_mv_rabs * this_mv_rabs) + (this_mv_cabs * this_mv_cabs)) /
250.0;
distance_factor = ((distance_factor > 1.0) ? 0.0 : (1.0 - distance_factor));
if (distance_factor < prediction_decay_rate)
if (distance_factor < prediction_decay_rate) {
prediction_decay_rate = distance_factor;
}
}
return prediction_decay_rate;
@ -1478,23 +1495,25 @@ static double calc_frame_boost(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame,
double frame_boost;
/* Underlying boost factor is based on inter intra error ratio */
if (this_frame->intra_error > cpi->twopass.gf_intra_err_min)
if (this_frame->intra_error > cpi->twopass.gf_intra_err_min) {
frame_boost = (IIFACTOR * this_frame->intra_error /
DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
else
} else {
frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min /
DOUBLE_DIVIDE_CHECK(this_frame->coded_error));
}
/* Increase boost for frames where new data coming into frame
* (eg zoom out). Slightly reduce boost if there is a net balance
* of motion out of the frame (zoom in).
* The range for this_frame_mv_in_out is -1.0 to +1.0
*/
if (this_frame_mv_in_out > 0.0)
if (this_frame_mv_in_out > 0.0) {
frame_boost += frame_boost * (this_frame_mv_in_out * 2.0);
/* In extreme case boost is halved */
else
/* In extreme case boost is halved */
} else {
frame_boost += frame_boost * (this_frame_mv_in_out / 2.0);
}
/* Clip to maximum */
if (frame_boost > GF_RMAX) frame_boost = GF_RMAX;
@ -1743,12 +1762,13 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
int64_t df_buffer_level = cpi->oxcf.drop_frames_water_mark *
(cpi->oxcf.optimal_buffer_level / 100);
if (cpi->buffer_level > df_buffer_level)
if (cpi->buffer_level > df_buffer_level) {
max_boost =
((double)((cpi->buffer_level - df_buffer_level) * 2 / 3) * 16.0) /
DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth);
else
} else {
max_boost = 0.0;
}
} else if (cpi->buffer_level > 0) {
max_boost = ((double)(cpi->buffer_level * 2 / 3) * 16.0) /
DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth);
@ -1815,8 +1835,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
group_bits =
(int)((double)cpi->twopass.kf_group_bits *
(gf_group_err / (double)cpi->twopass.kf_group_error_left));
} else
} else {
group_bits = 0;
}
/* Boost for arf frame */
#if NEW_BOOST
@ -1827,10 +1848,11 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
Boost += (i * 50);
/* Set max and minimum boost and hence minimum allocation */
if (Boost > ((cpi->baseline_gf_interval + 1) * 200))
if (Boost > ((cpi->baseline_gf_interval + 1) * 200)) {
Boost = ((cpi->baseline_gf_interval + 1) * 200);
else if (Boost < 125)
} else if (Boost < 125) {
Boost = 125;
}
allocation_chunks = (i * 100) + Boost;
@ -1916,8 +1938,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* For even length filter there is one more frame backward
* than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff.
*/
if (frames_bwd < half_gf_int)
if (frames_bwd < half_gf_int) {
frames_bwd += (cpi->oxcf.arnr_max_frames + 1) & 0x1;
}
break;
}
@ -1951,8 +1974,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
cpi->twopass.gf_group_bits =
(int64_t)(cpi->twopass.kf_group_bits *
(gf_group_err / cpi->twopass.kf_group_error_left));
} else
} else {
cpi->twopass.gf_group_bits = 0;
}
cpi->twopass.gf_group_bits =
(cpi->twopass.gf_group_bits < 0)
@ -1965,8 +1989,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
* variability limit (cpi->oxcf.two_pass_vbrmax_section)
*/
if (cpi->twopass.gf_group_bits >
(int64_t)max_bits * cpi->baseline_gf_interval)
(int64_t)max_bits * cpi->baseline_gf_interval) {
cpi->twopass.gf_group_bits = (int64_t)max_bits * cpi->baseline_gf_interval;
}
/* Reset the file position */
reset_fpf_position(cpi, start_pos);
@ -1994,10 +2019,11 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
Boost += (cpi->baseline_gf_interval * 50);
/* Set max and minimum boost and hence minimum allocation */
if (Boost > ((cpi->baseline_gf_interval + 1) * 200))
if (Boost > ((cpi->baseline_gf_interval + 1) * 200)) {
Boost = ((cpi->baseline_gf_interval + 1) * 200);
else if (Boost < 125)
} else if (Boost < 125) {
Boost = 125;
}
allocation_chunks = ((cpi->baseline_gf_interval + 1) * 100) + Boost;
}
@ -2007,10 +2033,11 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
Boost = (cpi->gfu_boost * GFQ_ADJUSTMENT) / 100;
/* Set max and minimum boost and hence minimum allocation */
if (Boost > (cpi->baseline_gf_interval * 150))
if (Boost > (cpi->baseline_gf_interval * 150)) {
Boost = (cpi->baseline_gf_interval * 150);
else if (Boost < 125)
} else if (Boost < 125) {
Boost = 125;
}
allocation_chunks = (cpi->baseline_gf_interval * 100) + (Boost - 100);
}
@ -2063,8 +2090,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* Apply an additional limit for CBR */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
if (cpi->twopass.gf_bits > (int)(cpi->buffer_level >> 1))
if (cpi->twopass.gf_bits > (int)(cpi->buffer_level >> 1)) {
cpi->twopass.gf_bits = (int)(cpi->buffer_level >> 1);
}
}
/* Dont allow a negative value for gf_bits */
@ -2095,11 +2123,12 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
* frame of the group (except in Key frame case where this has
* already happened)
*/
if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME)
if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME) {
cpi->twopass.gf_group_error_left =
(int)(gf_group_err - gf_first_frame_err);
else
} else {
cpi->twopass.gf_group_error_left = (int)gf_group_err;
}
cpi->twopass.gf_group_bits -=
cpi->twopass.gf_bits - cpi->min_frame_bandwidth;
@ -2126,10 +2155,12 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
(int)(cpi->twopass.gf_group_bits * pct_extra) / 100;
cpi->twopass.gf_group_bits -= cpi->twopass.alt_extra_bits;
cpi->twopass.alt_extra_bits /= ((cpi->baseline_gf_interval - 1) >> 1);
} else
} else {
cpi->twopass.alt_extra_bits = 0;
} else
}
} else {
cpi->twopass.alt_extra_bits = 0;
}
}
/* Adjustments based on a measure of complexity of the section */
@ -2155,8 +2186,9 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
if (cpi->twopass.section_max_qfactor < 0.80)
if (cpi->twopass.section_max_qfactor < 0.80) {
cpi->twopass.section_max_qfactor = 0.80;
}
reset_fpf_position(cpi, start_pos);
}
@ -2176,10 +2208,11 @@ static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
modified_err = calculate_modified_err(cpi, this_frame);
/* What portion of the remaining GF group error is used by this frame */
if (cpi->twopass.gf_group_error_left > 0)
if (cpi->twopass.gf_group_error_left > 0) {
err_fraction = modified_err / cpi->twopass.gf_group_error_left;
else
} else {
err_fraction = 0.0;
}
/* How many of those bits available for allocation should we give it? */
target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction);
@ -2187,13 +2220,14 @@ static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits)
* at the top end.
*/
if (target_frame_size < 0)
if (target_frame_size < 0) {
target_frame_size = 0;
else {
} else {
if (target_frame_size > max_bits) target_frame_size = max_bits;
if (target_frame_size > cpi->twopass.gf_group_bits)
if (target_frame_size > cpi->twopass.gf_group_bits) {
target_frame_size = (int)cpi->twopass.gf_group_bits;
}
}
/* Adjust error and bits remaining */
@ -2387,10 +2421,11 @@ void vp8_second_pass(VP8_COMP *cpi) {
overhead_bits);
/* Move active_worst_quality but in a damped way */
if (tmp_q > cpi->active_worst_quality)
if (tmp_q > cpi->active_worst_quality) {
cpi->active_worst_quality++;
else if (tmp_q < cpi->active_worst_quality)
} else if (tmp_q < cpi->active_worst_quality) {
cpi->active_worst_quality--;
}
cpi->active_worst_quality =
((cpi->active_worst_quality * 3) + tmp_q + 2) / 4;
@ -2447,11 +2482,12 @@ static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame,
if (next_iiratio > RMAX) next_iiratio = RMAX;
/* Cumulative effect of decay in prediction quality */
if (local_next_frame.pcnt_inter > 0.85)
if (local_next_frame.pcnt_inter > 0.85) {
decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter;
else
} else {
decay_accumulator =
decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0);
}
/* Keep a running total */
boost_score += (decay_accumulator * next_iiratio);
@ -2475,9 +2511,9 @@ static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame,
/* If there is tolerable prediction for at least the next 3 frames
* then break out else discard this pottential key frame and move on
*/
if (boost_score > 5.0 && (i > 3))
if (boost_score > 5.0 && (i > 3)) {
is_viable_kf = 1;
else {
} else {
/* Reset the file position */
reset_fpf_position(cpi, start_pos);
@ -2588,10 +2624,12 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* If we don't have a real key frame within the next two
* forcekeyframeevery intervals then break out of the loop.
*/
if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency)
if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency) {
break;
} else
}
} else {
cpi->twopass.frames_to_key++;
}
i++;
}
@ -2633,8 +2671,9 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
reset_fpf_position(cpi, current_pos);
cpi->next_key_frame_forced = 1;
} else
} else {
cpi->next_key_frame_forced = 0;
}
/* Special case for the last frame of the file */
if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) {
@ -2668,8 +2707,9 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* Clip based on maximum per frame rate defined by the user. */
max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key;
if (cpi->twopass.kf_group_bits > max_grp_bits)
if (cpi->twopass.kf_group_bits > max_grp_bits) {
cpi->twopass.kf_group_bits = max_grp_bits;
}
/* Additional special case for CBR if buffer is getting full. */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
@ -2696,8 +2736,9 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
min_group_bits =
av_group_bits + (int64_t)(buffer_lvl - high_water_mark);
if (cpi->twopass.kf_group_bits < min_group_bits)
if (cpi->twopass.kf_group_bits < min_group_bits) {
cpi->twopass.kf_group_bits = min_group_bits;
}
}
/* We are above optimal but below the maximum */
else if (cpi->twopass.kf_group_bits < av_group_bits) {
@ -2709,8 +2750,9 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
}
}
}
} else
} else {
cpi->twopass.kf_group_bits = 0;
}
/* Reset the first pass file position */
reset_fpf_position(cpi, start_position);
@ -2726,12 +2768,13 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
if (EOF == input_stats(cpi, &next_frame)) break;
if (next_frame.intra_error > cpi->twopass.kf_intra_err_min)
if (next_frame.intra_error > cpi->twopass.kf_intra_err_min) {
r = (IIKFACTOR2 * next_frame.intra_error /
DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
else
} else {
r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min /
DOUBLE_DIVIDE_CHECK(next_frame.coded_error));
}
if (r > RMAX) r = RMAX;
@ -2772,8 +2815,9 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
DOUBLE_DIVIDE_CHECK(sectionstats.coded_error);
cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025);
if (cpi->twopass.section_max_qfactor < 0.80)
if (cpi->twopass.section_max_qfactor < 0.80) {
cpi->twopass.section_max_qfactor = 0.80;
}
}
/* When using CBR apply additional buffer fullness related upper limits */
@ -2784,12 +2828,13 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
int df_buffer_level = (int)(cpi->oxcf.drop_frames_water_mark *
(cpi->oxcf.optimal_buffer_level / 100));
if (cpi->buffer_level > df_buffer_level)
if (cpi->buffer_level > df_buffer_level) {
max_boost =
((double)((cpi->buffer_level - df_buffer_level) * 2 / 3) * 16.0) /
DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth);
else
} else {
max_boost = 0.0;
}
} else if (cpi->buffer_level > 0) {
max_boost = ((double)(cpi->buffer_level * 2 / 3) * 16.0) /
DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth);
@ -2830,10 +2875,11 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* bigger frame sizes need larger kf boosts, smaller frames smaller
* boosts...
*/
if ((lst_yv12->y_width * lst_yv12->y_height) > (320 * 240))
if ((lst_yv12->y_width * lst_yv12->y_height) > (320 * 240)) {
kf_boost += 2 * (lst_yv12->y_width * lst_yv12->y_height) / (320 * 240);
else if ((lst_yv12->y_width * lst_yv12->y_height) < (320 * 240))
} else if ((lst_yv12->y_width * lst_yv12->y_height) < (320 * 240)) {
kf_boost -= 4 * (320 * 240) / (lst_yv12->y_width * lst_yv12->y_height);
}
/* Min KF boost */
kf_boost = (int)((double)kf_boost * 100.0) >> 4; /* Scale 16 to 100 */
@ -2875,8 +2921,9 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* Apply an additional limit for CBR */
if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) {
if (cpi->twopass.kf_bits > (int)((3 * cpi->buffer_level) >> 2))
if (cpi->twopass.kf_bits > (int)((3 * cpi->buffer_level) >> 2)) {
cpi->twopass.kf_bits = (int)((3 * cpi->buffer_level) >> 2);
}
}
/* If the key frame is actually easier than the average for the
@ -2951,8 +2998,9 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
double effective_size_ratio;
if ((cpi->common.Width != cpi->oxcf.Width) ||
(cpi->common.Height != cpi->oxcf.Height))
(cpi->common.Height != cpi->oxcf.Height)) {
last_kf_resampled = 1;
}
/* Set back to unscaled by defaults */
cpi->common.horiz_scale = NORMAL;
@ -2979,15 +3027,17 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
/* Dont turn to resampling in easy sections just because they
* have been assigned a small number of bits
*/
if (bits_per_frame < av_bits_per_frame)
if (bits_per_frame < av_bits_per_frame) {
bits_per_frame = av_bits_per_frame;
}
}
/* bits_per_frame should comply with our minimum */
if (bits_per_frame <
(cpi->oxcf.target_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100))
if (bits_per_frame < (cpi->oxcf.target_bandwidth *
cpi->oxcf.two_pass_vbrmin_section / 100)) {
bits_per_frame = (cpi->oxcf.target_bandwidth *
cpi->oxcf.two_pass_vbrmin_section / 100);
}
/* Work out if spatial resampling is necessary */
kf_q = estimate_kf_group_q(cpi, err_per_frame, (int)bits_per_frame,
@ -3032,10 +3082,11 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
cpi->oxcf.optimal_buffer_level / 100)) ||
(last_kf_resampled &&
(projected_buffer_level < (cpi->oxcf.resample_up_water_mark *
cpi->oxcf.optimal_buffer_level / 100))))
cpi->oxcf.optimal_buffer_level / 100)))) {
resample_trigger = 1;
else
} else {
resample_trigger = 0;
}
} else {
int64_t clip_bits = (int64_t)(
cpi->twopass.total_stats.count * cpi->oxcf.target_bandwidth /
@ -3049,10 +3100,11 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
* bits
*/
if ((last_kf_resampled && (kf_q > cpi->worst_quality)) ||
((kf_q > cpi->worst_quality) && (over_spend > clip_bits / 20)))
((kf_q > cpi->worst_quality) && (over_spend > clip_bits / 20))) {
resample_trigger = 1;
else
} else {
resample_trigger = 0;
}
}
if (resample_trigger) {

View File

@ -40,8 +40,9 @@ void vp8_lookahead_destroy(struct lookahead_ctx *ctx) {
if (ctx->buf) {
unsigned int i;
for (i = 0; i < ctx->max_sz; ++i)
for (i = 0; i < ctx->max_sz; ++i) {
vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img);
}
free(ctx->buf);
}
free(ctx);
@ -55,10 +56,11 @@ struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
unsigned int i;
/* Clamp the lookahead queue depth */
if (depth < 1)
if (depth < 1) {
depth = 1;
else if (depth > MAX_LAG_BUFFERS)
} else if (depth > MAX_LAG_BUFFERS) {
depth = MAX_LAG_BUFFERS;
}
/* Keep last frame in lookahead buffer by increasing depth by 1.*/
depth += 1;
@ -73,10 +75,12 @@ struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
ctx->max_sz = depth;
ctx->buf = calloc(depth, sizeof(*ctx->buf));
if (!ctx->buf) goto bail;
for (i = 0; i < depth; ++i)
for (i = 0; i < depth; ++i) {
if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img, width, height,
VP8BORDERINPIXELS))
VP8BORDERINPIXELS)) {
goto bail;
}
}
}
return ctx;
bail:
@ -166,10 +170,11 @@ struct lookahead_entry *vp8_lookahead_peek(struct lookahead_ctx *ctx,
} else if (direction == PEEK_BACKWARD) {
assert(index == 1);
if (ctx->read_idx == 0)
if (ctx->read_idx == 0) {
index = ctx->max_sz - 1;
else
} else {
index = ctx->read_idx - index;
}
buf = ctx->buf + index;
}

View File

@ -43,12 +43,13 @@ int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) {
static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2],
int error_per_bit) {
/* Ignore mv costing if mvcost is NULL */
if (mvcost)
if (mvcost) {
return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) *
error_per_bit +
128) >>
8;
}
return 0;
}
@ -56,12 +57,13 @@ static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2],
int error_per_bit) {
/* Calculate sad error cost on full pixel basis. */
/* Ignore mv costing if mvsadcost is NULL */
if (mvsadcost)
if (mvsadcost) {
return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)]) *
error_per_bit +
128) >>
8;
}
return 0;
}
@ -346,8 +348,9 @@ int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
bestmv->as_mv.col = bc * 2;
if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) ||
(abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3)))
(abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3))) {
return INT_MAX;
}
return besterr;
}
@ -910,9 +913,9 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
}
}
if (best_site == -1)
if (best_site == -1) {
goto cal_neighbors;
else {
} else {
br += hex[best_site].row;
bc += hex[best_site].col;
k = best_site;
@ -943,16 +946,17 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
}
}
if (best_site == -1)
if (best_site == -1) {
break;
else {
} else {
br += next_chkpts[k][best_site].row;
bc += next_chkpts[k][best_site].col;
k += 5 + best_site;
if (k >= 12)
if (k >= 12) {
k -= 12;
else if (k >= 6)
} else if (k >= 6) {
k -= 6;
}
}
}
@ -983,9 +987,9 @@ cal_neighbors:
}
}
if (best_site == -1)
if (best_site == -1) {
break;
else {
} else {
br += neighbors[best_site].row;
bc += neighbors[best_site].col;
}
@ -1100,8 +1104,9 @@ int vp8_diamond_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
best_mv->as_mv.col += ss[best_site].mv.col;
best_address += ss[best_site].offset;
last_site = best_site;
} else if (best_address == in_what)
} else if (best_address == in_what) {
(*num00)++;
}
}
this_mv.as_mv.row = best_mv->as_mv.row << 3;
@ -1193,8 +1198,9 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
for (j = 0; j < x->searches_per_step; j += 4) {
const unsigned char *block_offset[4];
for (t = 0; t < 4; ++t)
for (t = 0; t < 4; ++t) {
block_offset[t] = ss[i + t].offset + best_address;
}
fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
sad_array);
@ -1247,8 +1253,9 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
best_mv->as_mv.col += ss[best_site].mv.col;
best_address += ss[best_site].offset;
last_site = best_site;
} else if (best_address == in_what)
} else if (best_address == in_what) {
(*num00)++;
}
}
this_mv.as_mv.row = best_mv->as_mv.row * 8;
@ -1670,9 +1677,9 @@ int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
}
if (best_site == -1)
if (best_site == -1) {
break;
else {
} else {
ref_mv->as_mv.row += neighbors[best_site].row;
ref_mv->as_mv.col += neighbors[best_site].col;
best_address += (neighbors[best_site].row) * in_what_stride +
@ -1780,9 +1787,9 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
}
if (best_site == -1)
if (best_site == -1) {
break;
else {
} else {
ref_mv->as_mv.row += neighbors[best_site].row;
ref_mv->as_mv.col += neighbors[best_site].col;
best_address += (neighbors[best_site].row) * in_what_stride +

File diff suppressed because it is too large Load Diff

View File

@ -92,13 +92,14 @@ static int is_skin_color(int y, int cb, int cr, int consec_zeromv) {
for (; i < 5; ++i) {
int skin_color_diff = evaluate_skin_color_difference(cb, cr, i);
if (skin_color_diff < skin_threshold[i + 1]) {
if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2))
if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2)) {
return 0;
else if (consec_zeromv > 25 &&
skin_color_diff > (skin_threshold[i + 1] >> 1))
} else if (consec_zeromv > 25 &&
skin_color_diff > (skin_threshold[i + 1] >> 1)) {
return 0;
else
} else {
return 1;
}
}
// Exit if difference is much large than the threshold.
if (skin_color_diff > (skin_threshold[i + 1] << 3)) {
@ -523,10 +524,11 @@ static void check_for_encode_breakout(unsigned int sse, MACROBLOCK *x) {
sse2 = VP8_UVSSE(x);
if (sse2 * 2 < x->encode_breakout)
if (sse2 * 2 < x->encode_breakout) {
x->skip = 1;
else
} else {
x->skip = 0;
}
}
}
@ -546,9 +548,10 @@ static int evaluate_inter_mode(unsigned int *sse, int rate2, int *distortion2,
}
if ((this_mode != NEWMV) || !(cpi->sf.half_pixel_search) ||
cpi->common.full_pixel == 1)
cpi->common.full_pixel == 1) {
*distortion2 =
vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], sse, mv);
}
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2);
@ -585,32 +588,39 @@ static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x,
mic -= 1;
mv_l = mic->mbmi.mv;
if (mic->mbmi.ref_frame != INTRA_FRAME)
if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8)
if (mic->mbmi.ref_frame != INTRA_FRAME) {
if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) {
local_motion_check++;
}
}
/* above-left mb */
mic -= x->e_mbd.mode_info_stride;
mv_al = mic->mbmi.mv;
if (mic->mbmi.ref_frame != INTRA_FRAME)
if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8)
if (mic->mbmi.ref_frame != INTRA_FRAME) {
if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) {
local_motion_check++;
}
}
/* above mb */
mic += 1;
mv_a = mic->mbmi.mv;
if (mic->mbmi.ref_frame != INTRA_FRAME)
if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8)
if (mic->mbmi.ref_frame != INTRA_FRAME) {
if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) {
local_motion_check++;
}
}
if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) &&
local_motion_check > 0) ||
local_motion_check > 2)
local_motion_check > 2) {
*rd_adjustment = 80;
else if (local_motion_check > 0)
} else if (local_motion_check > 0) {
*rd_adjustment = 90;
}
}
}
@ -892,8 +902,9 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* likely to be chosen */
x->rd_thresh_mult[mode_index] += 4;
if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
}
x->rd_threshes[mode_index] =
(cpi->rd_baseline_thresh[mode_index] >> 7) *
@ -924,8 +935,9 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* an unfiltered alternative */
if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
if (this_mode != ZEROMV ||
x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
continue;
}
}
switch (this_mode) {
@ -1105,9 +1117,9 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
while (n < further_steps) {
n++;
if (num00)
if (num00) {
num00--;
else {
} else {
thissme = cpi->diamond_search_sad(
x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb,
&num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
@ -1126,10 +1138,11 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->mv_row_min = tmp_row_min;
x->mv_row_max = tmp_row_max;
if (bestsme < INT_MAX)
if (bestsme < INT_MAX) {
cpi->find_fractional_mv_step(
x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse);
}
}
mode_mv[NEWMV].as_int = d->bmi.mv.as_int;
@ -1156,8 +1169,9 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
continue;
}
rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
x->e_mbd.mode_info_context->mbmi.mv.as_int = mode_mv[this_mode].as_int;
@ -1226,8 +1240,9 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
else {
x->rd_thresh_mult[mode_index] += 4;
if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
}
x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
x->rd_thresh_mult[mode_index];
@ -1281,8 +1296,9 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
// labeling on there being significant denoising in the scene
if (cpi->oxcf.noise_sensitivity == 4) {
if (cpi->denoiser.nmse_source_diff >
70 * cpi->denoiser.threshold_aggressive_mode / 100)
70 * cpi->denoiser.threshold_aggressive_mode / 100) {
is_noisy = 1;
}
} else {
if (cpi->mse_source_denoised > 1000) is_noisy = 1;
}
@ -1353,9 +1369,10 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* set to the best mb mode, this copy can be skip if x->skip since it
* already has the right content */
if (!x->skip)
if (!x->skip) {
memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode,
sizeof(MB_MODE_INFO));
}
if (best_mbmode.mode <= B_PRED) {
/* set mode_info_context->mbmi.uv_mode */
@ -1363,8 +1380,9 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
if (sign_bias !=
cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame])
cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
}
update_mvcount(x, &best_ref_mv);
}

View File

@ -95,15 +95,16 @@ static int get_min_filter_level(VP8_COMP *cpi, int base_qindex) {
int min_filter_level;
if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame &&
!cpi->common.refresh_alt_ref_frame)
!cpi->common.refresh_alt_ref_frame) {
min_filter_level = 0;
else {
if (base_qindex <= 6)
} else {
if (base_qindex <= 6) {
min_filter_level = 0;
else if (base_qindex <= 16)
} else if (base_qindex <= 16) {
min_filter_level = 1;
else
} else {
min_filter_level = (base_qindex / 8);
}
}
return min_filter_level;
@ -119,8 +120,9 @@ static int get_max_filter_level(VP8_COMP *cpi, int base_qindex) {
int max_filter_level = MAX_LOOP_FILTER;
(void)base_qindex;
if (cpi->twopass.section_intra_rating > 8)
if (cpi->twopass.section_intra_rating > 8) {
max_filter_level = MAX_LOOP_FILTER * 3 / 4;
}
return max_filter_level;
}
@ -139,10 +141,11 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
/* Replace unfiltered frame buffer with a new one */
cm->frame_to_show = &cpi->pick_lf_lvl_frame;
if (cm->frame_type == KEY_FRAME)
if (cm->frame_type == KEY_FRAME) {
cm->sharpness_level = 0;
else
} else {
cm->sharpness_level = cpi->oxcf.Sharpness;
}
if (cm->sharpness_level != cm->last_sharpness_level) {
vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level);
@ -152,10 +155,11 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
/* Start the search at the previous frame filter level unless it is
* now out of range.
*/
if (cm->filter_level < min_filter_level)
if (cm->filter_level < min_filter_level) {
cm->filter_level = min_filter_level;
else if (cm->filter_level > max_filter_level)
} else if (cm->filter_level > max_filter_level) {
cm->filter_level = max_filter_level;
}
filt_val = cm->filter_level;
best_filt_val = filt_val;
@ -183,8 +187,9 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
if (filt_err < best_err) {
best_err = filt_err;
best_filt_val = filt_val;
} else
} else {
break;
}
/* Adjust filter level */
filt_val -= 1 + (filt_val > 10);
@ -214,8 +219,9 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
best_err = filt_err - (filt_err >> 10);
best_filt_val = filt_val;
} else
} else {
break;
}
/* Adjust filter level */
filt_val += 1 + (filt_val > 10);
@ -274,20 +280,22 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
/* Replace unfiltered frame buffer with a new one */
cm->frame_to_show = &cpi->pick_lf_lvl_frame;
if (cm->frame_type == KEY_FRAME)
if (cm->frame_type == KEY_FRAME) {
cm->sharpness_level = 0;
else
} else {
cm->sharpness_level = cpi->oxcf.Sharpness;
}
/* Start the search at the previous frame filter level unless it is
* now out of range.
*/
filt_mid = cm->filter_level;
if (filt_mid < min_filter_level)
if (filt_mid < min_filter_level) {
filt_mid = min_filter_level;
else if (filt_mid > max_filter_level)
} else if (filt_mid > max_filter_level) {
filt_mid = max_filter_level;
}
/* Define the initial step size */
filter_step = (filt_mid < 16) ? 4 : filt_mid / 4;
@ -309,8 +317,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
while (filter_step > 0) {
Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
if (cpi->twopass.section_intra_rating < 20)
if (cpi->twopass.section_intra_rating < 20) {
Bias = Bias * cpi->twopass.section_intra_rating / 20;
}
filt_high = ((filt_mid + filter_step) > max_filter_level)
? max_filter_level
@ -328,8 +337,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
ss_err[filt_low] = filt_err;
} else
} else {
filt_err = ss_err[filt_low];
}
/* If value is close to the best so far then bias towards a
* lower loop filter value.
@ -351,8 +361,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) {
filt_err = vp8_calc_ss_err(sd, cm->frame_to_show);
ss_err[filt_high] = filt_err;
} else
} else {
filt_err = ss_err[filt_high];
}
/* Was it better than the previous best? */
if (filt_err < (best_err - Bias)) {

View File

@ -263,10 +263,11 @@ void vp8_setup_key_frame(VP8_COMP *cpi) {
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8;
/* Provisional interval before next GF */
if (cpi->auto_gold)
if (cpi->auto_gold) {
cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
else
} else {
cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL;
}
cpi->common.refresh_golden_frame = 1;
cpi->common.refresh_alt_ref_frame = 1;
@ -280,10 +281,11 @@ static int estimate_bits_at_q(int frame_kind, int Q, int MBs,
* chosen such that the maximum product of Bpm and MBs fits 31 bits. The
* largest Bpm takes 20 bits.
*/
if (MBs > (1 << 11))
if (MBs > (1 << 11)) {
return (Bpm >> BPER_MB_NORMBITS) * MBs;
else
} else {
return (Bpm * MBs) >> BPER_MB_NORMBITS;
}
}
static void calc_iframe_target_size(VP8_COMP *cpi) {
@ -311,8 +313,9 @@ static void calc_iframe_target_size(VP8_COMP *cpi) {
*/
target = cpi->oxcf.starting_buffer_level / 2;
if (target > cpi->oxcf.target_bandwidth * 3 / 2)
if (target > cpi->oxcf.target_bandwidth * 3 / 2) {
target = cpi->oxcf.target_bandwidth * 3 / 2;
}
} else {
/* if this keyframe was forced, use a more recent Q estimate */
int Q = (cpi->common.frame_flags & FRAMEFLAGS_KEY) ? cpi->avg_frame_qindex
@ -331,9 +334,10 @@ static void calc_iframe_target_size(VP8_COMP *cpi) {
kf_boost = kf_boost * kf_boost_qadjustment[Q] / 100;
/* frame separation adjustment ( down) */
if (cpi->frames_since_key < cpi->output_framerate / 2)
if (cpi->frames_since_key < cpi->output_framerate / 2) {
kf_boost =
(int)(kf_boost * cpi->frames_since_key / (cpi->output_framerate / 2));
}
/* Minimal target size is |2* per_frame_bandwidth|. */
if (kf_boost < 16) kf_boost = 16;
@ -385,10 +389,11 @@ static void calc_gf_params(VP8_COMP *cpi) {
int pct_gf_active = (100 * cpi->gf_active_count) /
(cpi->common.mb_rows * cpi->common.mb_cols);
if (tot_mbs)
if (tot_mbs) {
gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
cpi->recent_ref_frame_usage[ALTREF_FRAME]) *
100 / tot_mbs;
}
if (pct_gf_active > gf_frame_useage) gf_frame_useage = pct_gf_active;
@ -474,12 +479,13 @@ static void calc_gf_params(VP8_COMP *cpi) {
}
/* Apply an upper limit based on Q for 1 pass encodes */
if (Boost > kf_gf_boost_qlimits[Q] && (cpi->pass == 0))
if (Boost > kf_gf_boost_qlimits[Q] && (cpi->pass == 0)) {
Boost = kf_gf_boost_qlimits[Q];
/* Apply lower limits to boost. */
else if (Boost < 110)
/* Apply lower limits to boost. */
} else if (Boost < 110) {
Boost = 110;
}
/* Note the boost used */
cpi->last_boost = Boost;
@ -504,14 +510,17 @@ static void calc_gf_params(VP8_COMP *cpi) {
if (cpi->last_boost >= 1500) cpi->frames_till_gf_update_due++;
if (gf_interval_table[gf_frame_useage] > cpi->frames_till_gf_update_due)
if (gf_interval_table[gf_frame_useage] > cpi->frames_till_gf_update_due) {
cpi->frames_till_gf_update_due = gf_interval_table[gf_frame_useage];
}
if (cpi->frames_till_gf_update_due > cpi->max_gf_interval)
if (cpi->frames_till_gf_update_due > cpi->max_gf_interval) {
cpi->frames_till_gf_update_due = cpi->max_gf_interval;
}
}
} else
} else {
cpi->frames_till_gf_update_due = cpi->baseline_gf_interval;
}
/* ARF on or off */
if (cpi->pass != 2) {
@ -533,19 +542,22 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
int min_frame_target;
int old_per_frame_bandwidth = cpi->per_frame_bandwidth;
if (cpi->current_layer > 0)
if (cpi->current_layer > 0) {
cpi->per_frame_bandwidth =
cpi->layer_context[cpi->current_layer].avg_frame_size_for_layer;
}
min_frame_target = 0;
if (cpi->pass == 2) {
min_frame_target = cpi->min_frame_bandwidth;
if (min_frame_target < (cpi->av_per_frame_bandwidth >> 5))
if (min_frame_target < (cpi->av_per_frame_bandwidth >> 5)) {
min_frame_target = cpi->av_per_frame_bandwidth >> 5;
} else if (min_frame_target < cpi->per_frame_bandwidth / 4)
}
} else if (min_frame_target < cpi->per_frame_bandwidth / 4) {
min_frame_target = cpi->per_frame_bandwidth / 4;
}
/* Special alt reference frame case */
if ((cpi->common.refresh_alt_ref_frame) &&
@ -577,8 +589,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
? cpi->kf_bitrate_adjustment
: cpi->kf_overspend_bits;
if (Adjustment > (cpi->per_frame_bandwidth - min_frame_target))
if (Adjustment > (cpi->per_frame_bandwidth - min_frame_target)) {
Adjustment = (cpi->per_frame_bandwidth - min_frame_target);
}
cpi->kf_overspend_bits -= Adjustment;
@ -588,10 +601,12 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
*/
cpi->this_frame_target = cpi->per_frame_bandwidth - Adjustment;
if (cpi->this_frame_target < min_frame_target)
if (cpi->this_frame_target < min_frame_target) {
cpi->this_frame_target = min_frame_target;
} else
}
} else {
cpi->this_frame_target = cpi->per_frame_bandwidth;
}
/* If appropriate make an adjustment to recover bits spent on a
* recent GF
@ -602,8 +617,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
? cpi->non_gf_bitrate_adjustment
: cpi->gf_overspend_bits;
if (Adjustment > (cpi->this_frame_target - min_frame_target))
if (Adjustment > (cpi->this_frame_target - min_frame_target)) {
Adjustment = (cpi->this_frame_target - min_frame_target);
}
cpi->gf_overspend_bits -= Adjustment;
cpi->this_frame_target -= Adjustment;
@ -615,25 +631,29 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
/* % Adjustment limited to the range 1% to 10% */
Adjustment = (cpi->last_boost - 100) >> 5;
if (Adjustment < 1)
if (Adjustment < 1) {
Adjustment = 1;
else if (Adjustment > 10)
} else if (Adjustment > 10) {
Adjustment = 10;
}
/* Convert to bits */
Adjustment = (cpi->this_frame_target * Adjustment) / 100;
if (Adjustment > (cpi->this_frame_target - min_frame_target))
if (Adjustment > (cpi->this_frame_target - min_frame_target)) {
Adjustment = (cpi->this_frame_target - min_frame_target);
}
if (cpi->frames_since_golden == (cpi->current_gf_interval >> 1)) {
Adjustment = (cpi->current_gf_interval - 1) * Adjustment;
// Limit adjustment to 10% of current target.
if (Adjustment > (10 * cpi->this_frame_target) / 100)
if (Adjustment > (10 * cpi->this_frame_target) / 100) {
Adjustment = (10 * cpi->this_frame_target) / 100;
}
cpi->this_frame_target += Adjustment;
} else
} else {
cpi->this_frame_target -= Adjustment;
}
}
}
}
@ -646,12 +666,14 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
* be recovered over a longer time span via other buffer / rate control
* mechanisms.
*/
if (cpi->this_frame_target < min_frame_target)
if (cpi->this_frame_target < min_frame_target) {
cpi->this_frame_target = min_frame_target;
}
if (!cpi->common.refresh_alt_ref_frame)
if (!cpi->common.refresh_alt_ref_frame) {
/* Note the baseline target data rate for this inter frame. */
cpi->inter_frame_target = cpi->this_frame_target;
}
/* One Pass specific code */
if (cpi->pass == 0) {
@ -683,10 +705,11 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
(int)(100 * -cpi->bits_off_target / (cpi->total_byte_count * 8));
}
if (percent_low > cpi->oxcf.under_shoot_pct)
if (percent_low > cpi->oxcf.under_shoot_pct) {
percent_low = cpi->oxcf.under_shoot_pct;
else if (percent_low < 0)
} else if (percent_low < 0) {
percent_low = 0;
}
/* lower the target bandwidth for this frame. */
cpi->this_frame_target -= (cpi->this_frame_target * percent_low) / 200;
@ -763,10 +786,11 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
(int)((100 * cpi->bits_off_target) / (cpi->total_byte_count * 8));
}
if (percent_high > cpi->oxcf.over_shoot_pct)
if (percent_high > cpi->oxcf.over_shoot_pct) {
percent_high = cpi->oxcf.over_shoot_pct;
else if (percent_high < 0)
} else if (percent_high < 0) {
percent_high = 0;
}
cpi->this_frame_target += (cpi->this_frame_target * percent_high) / 200;
@ -787,8 +811,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
cpi->active_best_quality = cpi->best_quality;
/* Worst quality obviously must not be better than best quality */
if (cpi->active_worst_quality <= cpi->active_best_quality)
if (cpi->active_worst_quality <= cpi->active_best_quality) {
cpi->active_worst_quality = cpi->active_best_quality + 1;
}
if (cpi->active_worst_quality > 127) cpi->active_worst_quality = 127;
}
@ -833,8 +858,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
/* Update the buffer level variable. */
cpi->bits_off_target += cpi->av_per_frame_bandwidth;
if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size)
if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) {
cpi->bits_off_target = (int)cpi->oxcf.maximum_buffer_size;
}
cpi->buffer_level = cpi->bits_off_target;
if (cpi->oxcf.number_of_layers > 1) {
@ -844,8 +870,9 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->layer_context[i];
lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
if (lc->bits_off_target > lc->maximum_buffer_size)
if (lc->bits_off_target > lc->maximum_buffer_size) {
lc->bits_off_target = lc->maximum_buffer_size;
}
lc->buffer_level = lc->bits_off_target;
}
}
@ -867,10 +894,11 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
int pct_gf_active = (100 * cpi->gf_active_count) /
(cpi->common.mb_rows * cpi->common.mb_cols);
if (tot_mbs)
if (tot_mbs) {
gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] +
cpi->recent_ref_frame_usage[ALTREF_FRAME]) *
100 / tot_mbs;
}
if (pct_gf_active > gf_frame_useage) gf_frame_useage = pct_gf_active;
@ -880,12 +908,13 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
* low or the GF useage is high
*/
if ((cpi->pass == 0) &&
(cpi->this_frame_percent_intra < 15 || gf_frame_useage >= 5))
(cpi->this_frame_percent_intra < 15 || gf_frame_useage >= 5)) {
cpi->common.refresh_golden_frame = 1;
/* Two pass GF descision */
else if (cpi->pass == 2)
/* Two pass GF descision */
} else if (cpi->pass == 2) {
cpi->common.refresh_golden_frame = 1;
}
}
#if 0
@ -947,18 +976,20 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
}
/* Avoid loss of precision but avoid overflow */
if ((bits_in_section >> 7) > allocation_chunks)
if ((bits_in_section >> 7) > allocation_chunks) {
cpi->this_frame_target =
Boost * (bits_in_section / allocation_chunks);
else
} else {
cpi->this_frame_target =
(Boost * bits_in_section) / allocation_chunks;
}
}
} else
} else {
cpi->this_frame_target =
(estimate_bits_at_q(1, Q, cpi->common.MBs, 1.0) *
cpi->last_boost) /
100;
}
}
/* If there is an active ARF at this location use the minimum
@ -991,11 +1022,12 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
if (cpi->common.frame_type == KEY_FRAME) {
rate_correction_factor = cpi->key_frame_rate_correction_factor;
} else {
if (cpi->oxcf.number_of_layers == 1 &&
(cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame))
if (cpi->oxcf.number_of_layers == 1 && (cpi->common.refresh_alt_ref_frame ||
cpi->common.refresh_golden_frame)) {
rate_correction_factor = cpi->gf_rate_correction_factor;
else
} else {
rate_correction_factor = cpi->rate_correction_factor;
}
}
/* Work out how big we would have expected the frame to be at this Q
@ -1025,9 +1057,10 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
}
/* Work out a size correction factor. */
if (projected_size_based_on_q > 0)
if (projected_size_based_on_q > 0) {
correction_factor =
(100 * cpi->projected_frame_size) / projected_size_based_on_q;
}
/* More heavily damped adjustment used if we have been oscillating
* either side of target
@ -1047,8 +1080,9 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
((rate_correction_factor * correction_factor) / 100);
/* Keep rate_correction_factor within limits */
if (rate_correction_factor > MAX_BPB_FACTOR)
if (rate_correction_factor > MAX_BPB_FACTOR) {
rate_correction_factor = MAX_BPB_FACTOR;
}
} else if (correction_factor < 99) {
/* We are not already at the best allowable quality */
correction_factor =
@ -1057,18 +1091,20 @@ void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) {
((rate_correction_factor * correction_factor) / 100);
/* Keep rate_correction_factor within limits */
if (rate_correction_factor < MIN_BPB_FACTOR)
if (rate_correction_factor < MIN_BPB_FACTOR) {
rate_correction_factor = MIN_BPB_FACTOR;
}
}
if (cpi->common.frame_type == KEY_FRAME)
if (cpi->common.frame_type == KEY_FRAME) {
cpi->key_frame_rate_correction_factor = rate_correction_factor;
else {
if (cpi->oxcf.number_of_layers == 1 &&
(cpi->common.refresh_alt_ref_frame || cpi->common.refresh_golden_frame))
} else {
if (cpi->oxcf.number_of_layers == 1 && (cpi->common.refresh_alt_ref_frame ||
cpi->common.refresh_golden_frame)) {
cpi->gf_rate_correction_factor = rate_correction_factor;
else
} else {
cpi->rate_correction_factor = rate_correction_factor;
}
}
}
@ -1103,27 +1139,29 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
double correction_factor;
/* Select the appropriate correction factor based upon type of frame. */
if (cpi->common.frame_type == KEY_FRAME)
if (cpi->common.frame_type == KEY_FRAME) {
correction_factor = cpi->key_frame_rate_correction_factor;
else {
} else {
if (cpi->oxcf.number_of_layers == 1 &&
(cpi->common.refresh_alt_ref_frame ||
cpi->common.refresh_golden_frame))
cpi->common.refresh_golden_frame)) {
correction_factor = cpi->gf_rate_correction_factor;
else
} else {
correction_factor = cpi->rate_correction_factor;
}
}
/* Calculate required scaling factor based on target frame size and
* size of frame produced using previous Q
*/
if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS))
if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS)) {
/* Case where we would overflow int */
target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs)
<< BPER_MB_NORMBITS;
else
} else {
target_bits_per_mb =
(target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs;
}
i = cpi->active_best_quality;
@ -1133,14 +1171,16 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
correction_factor * vp8_bits_per_mb[cpi->common.frame_type][i]);
if (bits_per_mb_at_this_q <= target_bits_per_mb) {
if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error)
if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error) {
Q = i;
else
} else {
Q = i - 1;
}
break;
} else
} else {
last_error = bits_per_mb_at_this_q - target_bits_per_mb;
}
} while (++i <= cpi->active_worst_quality);
/* If we are at MAXQ then enable Q over-run which seeks to claw
@ -1153,15 +1193,16 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
double Factor = 0.99;
double factor_adjustment = 0.01 / 256.0;
if (cpi->common.frame_type == KEY_FRAME)
if (cpi->common.frame_type == KEY_FRAME) {
zbin_oqmax = 0;
else if (cpi->oxcf.number_of_layers == 1 &&
(cpi->common.refresh_alt_ref_frame ||
(cpi->common.refresh_golden_frame &&
!cpi->source_alt_ref_active)))
} else if (cpi->oxcf.number_of_layers == 1 &&
(cpi->common.refresh_alt_ref_frame ||
(cpi->common.refresh_golden_frame &&
!cpi->source_alt_ref_active))) {
zbin_oqmax = 16;
else
} else {
zbin_oqmax = ZBIN_OQ_MAX;
}
/*{
double Factor =
@ -1188,8 +1229,9 @@ int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) {
while (cpi->mb.zbin_over_quant < zbin_oqmax) {
cpi->mb.zbin_over_quant++;
if (cpi->mb.zbin_over_quant > zbin_oqmax)
if (cpi->mb.zbin_over_quant > zbin_oqmax) {
cpi->mb.zbin_over_quant = zbin_oqmax;
}
/* Adjust bits_per_mb_at_this_q estimate */
bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q);
@ -1222,8 +1264,9 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi) {
int key_freq = cpi->oxcf.key_freq > 0 ? cpi->oxcf.key_freq : 1;
av_key_frame_frequency = 1 + (int)cpi->output_framerate * 2;
if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq)
if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq) {
av_key_frame_frequency = key_freq;
}
cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1] =
av_key_frame_frequency;
@ -1236,10 +1279,11 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi) {
* KEY_FRAME_CONTEXT keyframes
*/
for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
if (i < KEY_FRAME_CONTEXT - 1)
if (i < KEY_FRAME_CONTEXT - 1) {
cpi->prior_key_frame_distance[i] = cpi->prior_key_frame_distance[i + 1];
else
} else {
cpi->prior_key_frame_distance[i] = last_kf_interval;
}
av_key_frame_frequency +=
prior_key_frame_weight[i] * cpi->prior_key_frame_distance[i];
@ -1272,9 +1316,9 @@ void vp8_adjust_key_frame_context(VP8_COMP *cpi) {
*/
overspend = (cpi->projected_frame_size - cpi->per_frame_bandwidth);
if (cpi->oxcf.number_of_layers > 1)
if (cpi->oxcf.number_of_layers > 1) {
cpi->kf_overspend_bits += overspend;
else {
} else {
cpi->kf_overspend_bits += overspend * 7 / 8;
cpi->gf_overspend_bits += overspend * 1 / 8;
}
@ -1360,9 +1404,9 @@ void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit,
int vp8_pick_frame_size(VP8_COMP *cpi) {
VP8_COMMON *cm = &cpi->common;
if (cm->frame_type == KEY_FRAME)
if (cm->frame_type == KEY_FRAME) {
calc_iframe_target_size(cpi);
else {
} else {
calc_pframe_target_size(cpi);
/* Check if we're dropping the frame: */
@ -1415,21 +1459,24 @@ int vp8_drop_encodedframe_overshoot(VP8_COMP *cpi, int Q) {
// undershoots significantly, and then we end up dropping every other
// frame because the QP/rate_correction_factor may have been too low
// before the drop and then takes too long to come up.
if (target_size >= (INT_MAX >> BPER_MB_NORMBITS))
if (target_size >= (INT_MAX >> BPER_MB_NORMBITS)) {
target_bits_per_mb = (target_size / cpi->common.MBs)
<< BPER_MB_NORMBITS;
else
} else {
target_bits_per_mb =
(target_size << BPER_MB_NORMBITS) / cpi->common.MBs;
}
// Rate correction factor based on target_size_per_mb and max_QP.
new_correction_factor =
(double)target_bits_per_mb /
(double)vp8_bits_per_mb[INTER_FRAME][cpi->worst_quality];
if (new_correction_factor > cpi->rate_correction_factor)
if (new_correction_factor > cpi->rate_correction_factor) {
cpi->rate_correction_factor =
VPXMIN(2.0 * cpi->rate_correction_factor, new_correction_factor);
if (cpi->rate_correction_factor > MAX_BPB_FACTOR)
}
if (cpi->rate_correction_factor > MAX_BPB_FACTOR) {
cpi->rate_correction_factor = MAX_BPB_FACTOR;
}
return 1;
} else {
cpi->force_maxqp = 0;

View File

@ -111,17 +111,20 @@ static void fill_token_costs(
p[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]) {
int i, j, k;
for (i = 0; i < BLOCK_TYPES; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
for (i = 0; i < BLOCK_TYPES; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
/* check for pt=0 and band > 1 if block type 0
* and 0 if blocktype 1
*/
if (k == 0 && j > (i == 0))
if (k == 0 && j > (i == 0)) {
vp8_cost_tokens2(c[i][j][k], p[i][j][k], vp8_coef_tree, 2);
else
} else {
vp8_cost_tokens(c[i][j][k], p[i][j][k], vp8_coef_tree);
}
}
}
}
}
static const int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0,
@ -180,11 +183,12 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) {
}
if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
if (cpi->twopass.next_iiratio > 31)
if (cpi->twopass.next_iiratio > 31) {
cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
else
} else {
cpi->RDMULT +=
(cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
}
}
cpi->mb.errorperbit = (cpi->RDMULT / 110);
@ -231,10 +235,11 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) {
/* build token cost array for the type of frame we have now */
FRAME_CONTEXT *l = &cpi->lfc_n;
if (cpi->common.refresh_alt_ref_frame)
if (cpi->common.refresh_alt_ref_frame) {
l = &cpi->lfc_a;
else if (cpi->common.refresh_golden_frame)
} else if (cpi->common.refresh_golden_frame) {
l = &cpi->lfc_g;
}
fill_token_costs(cpi->mb.token_costs,
(const vp8_prob(*)[8][3][11])l->coef_probs);
@ -372,15 +377,17 @@ int VP8_UVSSE(MACROBLOCK *x) {
int offset;
int pre_stride = x->e_mbd.pre.uv_stride;
if (mv_row < 0)
if (mv_row < 0) {
mv_row -= 1;
else
} else {
mv_row += 1;
}
if (mv_col < 0)
if (mv_col < 0) {
mv_col -= 1;
else
} else {
mv_col += 1;
}
mv_row /= 2;
mv_col /= 2;
@ -422,8 +429,9 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a,
pt = vp8_prev_token_class[t];
}
if (c < 16)
if (c < 16) {
cost += mb->token_costs[type][vp8_coef_bands[c]][pt][DCT_EOB_TOKEN];
}
pt = (c != !type); /* is eob first coefficient; */
*a = *l = pt;
@ -445,9 +453,10 @@ static int vp8_rdcost_mby(MACROBLOCK *mb) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 0; b < 16; ++b)
for (b = 0; b < 16; ++b) {
cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2,
ta + vp8_block2above[24], tl + vp8_block2left[24]);
@ -681,9 +690,10 @@ static int rd_cost_mbuv(MACROBLOCK *mb) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 16; b < 24; ++b)
for (b = 16; b < 24; ++b) {
cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
return cost;
}
@ -815,11 +825,11 @@ static int labels2mode(MACROBLOCK *x, int const *labelings, int which_label,
if (labelings[i] != which_label) continue;
if (col && labelings[i] == labelings[i - 1])
if (col && labelings[i] == labelings[i - 1]) {
m = LEFT4X4;
else if (row && labelings[i] == labelings[i - 4])
} else if (row && labelings[i] == labelings[i - 4]) {
m = ABOVE4X4;
else {
} else {
/* the only time we should do costing for new motion vector
* or mode is when we are on a new label (jbb May 08, 2007)
*/
@ -868,10 +878,12 @@ static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
int b;
MACROBLOCKD *x = &mb->e_mbd;
for (b = 0; b < 16; ++b)
if (labels[b] == which_label)
for (b = 0; b < 16; ++b) {
if (labels[b] == which_label) {
cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC,
ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
}
return cost;
}
@ -1018,8 +1030,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
if (cpi->compressor_speed) {
if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8) {
bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
if (i == 1 && segmentation == BLOCK_16X8)
if (i == 1 && segmentation == BLOCK_16X8) {
bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
}
step_param = bsi->sv_istep[i];
}
@ -1029,8 +1042,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
*/
if (segmentation == BLOCK_4X4 && i > 0) {
bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.mv.as_int;
if (i == 4 || i == 8 || i == 12)
if (i == 4 || i == 8 || i == 12) {
bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.mv.as_int;
}
step_param = 2;
}
}
@ -1061,9 +1075,9 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
while (n < further_steps) {
n++;
if (num00)
if (num00) {
num00--;
else {
} else {
thissme = cpi->diamond_search_sad(
x, c, e, &mvp_full, &temp_mv, step_param + n, sadpb, &num00,
v_fn_ptr, x->mvcost, bsi->ref_mv);
@ -1172,10 +1186,11 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
static void vp8_cal_step_param(int sr, int *sp) {
int step = 0;
if (sr > MAX_FIRST_STEP)
if (sr > MAX_FIRST_STEP) {
sr = MAX_FIRST_STEP;
else if (sr < 1)
} else if (sr < 1) {
sr = 1;
}
while (sr >>= 1) step++;
@ -1436,10 +1451,11 @@ void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
if (here->mbmi.ref_frame == near_ref[near_sadidx[i]]) {
mv.as_int = near_mvs[near_sadidx[i]].as_int;
find = 1;
if (i < 3)
if (i < 3) {
*sr = 3;
else
} else {
*sr = 2;
}
break;
}
}
@ -1514,22 +1530,26 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x,
if (xd->mb_to_right_edge == 0) near_sad[6] = INT_MAX;
if (xd->mb_to_bottom_edge == 0) near_sad[7] = INT_MAX;
if (near_sad[4] != INT_MAX)
if (near_sad[4] != INT_MAX) {
near_sad[4] = cpi->fn_ptr[BLOCK_16X16].sdf(
src_y_ptr, b->src_stride, pre_y_buffer - pre_y_stride * 16,
pre_y_stride);
if (near_sad[5] != INT_MAX)
}
if (near_sad[5] != INT_MAX) {
near_sad[5] = cpi->fn_ptr[BLOCK_16X16].sdf(
src_y_ptr, b->src_stride, pre_y_buffer - 16, pre_y_stride);
}
near_sad[3] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride,
pre_y_buffer, pre_y_stride);
if (near_sad[6] != INT_MAX)
if (near_sad[6] != INT_MAX) {
near_sad[6] = cpi->fn_ptr[BLOCK_16X16].sdf(
src_y_ptr, b->src_stride, pre_y_buffer + 16, pre_y_stride);
if (near_sad[7] != INT_MAX)
}
if (near_sad[7] != INT_MAX) {
near_sad[7] = cpi->fn_ptr[BLOCK_16X16].sdf(
src_y_ptr, b->src_stride, pre_y_buffer + pre_y_stride * 16,
pre_y_stride);
}
}
if (cpi->common.last_frame_type != KEY_FRAME) {
@ -1661,8 +1681,9 @@ static int calculate_final_rd_costs(int this_rd, RATE_DISTORTION *rd,
if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
for (i = 16; i < 24; ++i) tteob += x->e_mbd.eobs[i];
} else
} else {
tteob += uv_intra_tteob;
}
if (tteob == 0) {
rd->rate2 -= (rd->rate_y + rd->rate_uv);
@ -1683,8 +1704,9 @@ static int calculate_final_rd_costs(int this_rd, RATE_DISTORTION *rd,
/* Calculate the final RD estimate for this mode */
this_rd = RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2);
if (this_rd < INT_MAX &&
x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME)
x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
this_rd += intra_rd_penalty;
}
}
return this_rd;
}
@ -1818,8 +1840,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
*/
if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
if (this_mode != ZEROMV ||
x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME)
x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) {
continue;
}
}
/* everything but intra */
@ -1848,8 +1871,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
*/
x->rd_thresh_mult[mode_index] += 4;
if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
}
x->rd_threshes[mode_index] =
(cpi->rd_baseline_thresh[mode_index] >> 7) *
@ -1869,18 +1893,20 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* Increase zbin size to supress noise
*/
if (x->zbin_mode_boost_enabled) {
if (this_ref_frame == INTRA_FRAME)
if (this_ref_frame == INTRA_FRAME) {
x->zbin_mode_boost = 0;
else {
} else {
if (vp8_mode_order[mode_index] == ZEROMV) {
if (this_ref_frame != LAST_FRAME)
if (this_ref_frame != LAST_FRAME) {
x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
} else {
x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (vp8_mode_order[mode_index] == SPLITMV)
}
} else if (vp8_mode_order[mode_index] == SPLITMV) {
x->zbin_mode_boost = 0;
else
} else {
x->zbin_mode_boost = MV_ZBIN_BOOST;
}
}
vp8_update_zbin_extra(cpi, x);
@ -2047,9 +2073,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
while (n < further_steps) {
n++;
if (num00)
if (num00) {
num00--;
else {
} else {
thissme = cpi->diamond_search_sad(
x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb, &num00,
&cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv);
@ -2117,8 +2143,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* mode.
*/
if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) &&
(mode_mv[this_mode].as_int == 0))
(mode_mv[this_mode].as_int == 0)) {
continue;
}
case ZEROMV:
@ -2130,8 +2157,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
continue;
}
vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]);
this_rd = evaluate_inter_mode_rd(mdcounts, &rd, &disable_skip, cpi, x);
@ -2207,8 +2235,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
else {
x->rd_thresh_mult[mode_index] += 4;
if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) {
x->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
}
}
x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) *
x->rd_thresh_mult[mode_index];
@ -2298,13 +2327,15 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
sizeof(MB_MODE_INFO));
if (best_mode.mbmode.mode == B_PRED) {
for (i = 0; i < 16; ++i)
for (i = 0; i < 16; ++i) {
xd->mode_info_context->bmi[i].as_mode = best_mode.bmodes[i].as_mode;
}
}
if (best_mode.mbmode.mode == SPLITMV) {
for (i = 0; i < 16; ++i)
for (i = 0; i < 16; ++i) {
xd->mode_info_context->bmi[i].mv.as_int = best_mode.bmodes[i].mv.as_int;
}
memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO));
@ -2313,8 +2344,9 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
}
if (sign_bias !=
cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame])
cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) {
best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int;
}
rd_update_mvcount(x, &best_ref_mv);
}

View File

@ -80,17 +80,20 @@ static INLINE void get_predictor_pointers(const VP8_COMP *cpi,
unsigned char *plane[4][3],
unsigned int recon_yoffset,
unsigned int recon_uvoffset) {
if (cpi->ref_frame_flags & VP8_LAST_FRAME)
if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
get_plane_pointers(&cpi->common.yv12_fb[cpi->common.lst_fb_idx],
plane[LAST_FRAME], recon_yoffset, recon_uvoffset);
}
if (cpi->ref_frame_flags & VP8_GOLD_FRAME)
if (cpi->ref_frame_flags & VP8_GOLD_FRAME) {
get_plane_pointers(&cpi->common.yv12_fb[cpi->common.gld_fb_idx],
plane[GOLDEN_FRAME], recon_yoffset, recon_uvoffset);
}
if (cpi->ref_frame_flags & VP8_ALTR_FRAME)
if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
get_plane_pointers(&cpi->common.yv12_fb[cpi->common.alt_fb_idx],
plane[ALTREF_FRAME], recon_yoffset, recon_uvoffset);
}
}
static INLINE void get_reference_search_order(const VP8_COMP *cpi,

View File

@ -369,8 +369,9 @@ void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance) {
frames_to_blur_backward = num_frames_backward;
if (frames_to_blur_backward >= max_frames)
if (frames_to_blur_backward >= max_frames) {
frames_to_blur_backward = max_frames - 1;
}
frames_to_blur = frames_to_blur_backward + 1;
break;
@ -380,8 +381,9 @@ void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance) {
frames_to_blur_forward = num_frames_forward;
if (frames_to_blur_forward >= max_frames)
if (frames_to_blur_forward >= max_frames) {
frames_to_blur_forward = max_frames - 1;
}
frames_to_blur = frames_to_blur_forward + 1;
break;
@ -392,18 +394,22 @@ void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance) {
frames_to_blur_forward = num_frames_forward;
frames_to_blur_backward = num_frames_backward;
if (frames_to_blur_forward > frames_to_blur_backward)
if (frames_to_blur_forward > frames_to_blur_backward) {
frames_to_blur_forward = frames_to_blur_backward;
}
if (frames_to_blur_backward > frames_to_blur_forward)
if (frames_to_blur_backward > frames_to_blur_forward) {
frames_to_blur_backward = frames_to_blur_forward;
}
/* When max_frames is even we have 1 more frame backward than forward */
if (frames_to_blur_forward > (max_frames - 1) / 2)
if (frames_to_blur_forward > (max_frames - 1) / 2) {
frames_to_blur_forward = ((max_frames - 1) / 2);
}
if (frames_to_blur_backward > (max_frames / 2))
if (frames_to_blur_backward > (max_frames / 2)) {
frames_to_blur_backward = (max_frames / 2);
}
frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1;
break;

View File

@ -515,13 +515,15 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) {
plane_type = 0;
}
for (b = 0; b < 16; ++b)
for (b = 0; b < 16; ++b) {
stuff1st_order_b(t, A + vp8_block2above[b], L + vp8_block2left[b],
plane_type, cpi, x);
}
for (b = 16; b < 24; ++b)
for (b = 16; b < 24; ++b) {
stuff1st_order_buv(t, A + vp8_block2above[b], L + vp8_block2left[b], cpi,
x);
}
}
void vp8_fix_contexts(MACROBLOCKD *x) {
/* Clear entropy contexts for Y2 blocks */

View File

@ -18,10 +18,11 @@ static void cost(int *const C, vp8_tree T, const vp8_prob *const P, int i,
const vp8_tree_index j = T[i];
const int d = c + vp8_cost_bit(p, i & 1);
if (j <= 0)
if (j <= 0) {
C[-j] = d;
else
} else {
cost(C, T, P, j, d);
}
} while (++i & 1);
}
void vp8_cost_tokens(int *c, const vp8_prob *p, vp8_tree t) {

View File

@ -108,8 +108,9 @@ void vp8_quantize_mb(MACROBLOCK *x) {
int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
for (i = 0; i < 24 + has_2nd_order; ++i)
for (i = 0; i < 24 + has_2nd_order; ++i) {
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
}
void vp8_quantize_mbuv(MACROBLOCK *x) {
@ -296,19 +297,20 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
/* Select the baseline MB Q index. */
if (xd->segmentation_enabled) {
/* Abs Value */
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) {
QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context
->mbmi.segment_id];
/* Delta Value */
else {
/* Delta Value */
} else {
QIndex = cpi->common.base_qindex +
xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context
->mbmi.segment_id];
/* Clamp to valid range */
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
}
} else
} else {
QIndex = cpi->common.base_qindex;
}
/* This initialization should be called at least once. Use ok_to_skip to
* decide if it is ok to skip.
@ -452,8 +454,9 @@ void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) {
if (Q < 4) {
new_delta_q = 4 - Q;
} else
} else {
new_delta_q = 0;
}
update |= cm->y2dc_delta_q != new_delta_q;
cm->y2dc_delta_q = new_delta_q;

View File

@ -89,8 +89,9 @@ static vpx_codec_err_t update_error_state(
vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
vpx_codec_err_t res;
if ((res = error->error_code))
if ((res = error->error_code)) {
ctx->base.err_detail = error->has_detail ? error->detail : NULL;
}
return res;
}
@ -228,15 +229,17 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
unsigned int i;
RANGE_CHECK_HI(cfg, ts_periodicity, 16);
for (i = 1; i < cfg->ts_number_layers; ++i)
for (i = 1; i < cfg->ts_number_layers; ++i) {
if (cfg->ts_target_bitrate[i] <= cfg->ts_target_bitrate[i - 1] &&
cfg->rc_target_bitrate > 0)
ERROR("ts_target_bitrate entries are not strictly increasing");
}
RANGE_CHECK(cfg, ts_rate_decimator[cfg->ts_number_layers - 1], 1, 1);
for (i = cfg->ts_number_layers - 2; i > 0; i--)
for (i = cfg->ts_number_layers - 2; i > 0; i--) {
if (cfg->ts_rate_decimator[i - 1] != 2 * cfg->ts_rate_decimator[i])
ERROR("ts_rate_decimator factors are not powers of 2");
}
RANGE_CHECK_HI(cfg, ts_layer_id[i], cfg->ts_number_layers - 1);
}
@ -631,10 +634,11 @@ static vpx_codec_err_t vp8e_init(vpx_codec_ctx_t *ctx,
return VPX_CODEC_MEM_ERROR;
}
if (mr_cfg)
if (mr_cfg) {
ctx->priv->enc.total_encoders = mr_cfg->mr_total_resolutions;
else
} else {
ctx->priv->enc.total_encoders = 1;
}
once(vp8_initialize_enc);
@ -723,13 +727,14 @@ static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx,
new_qc = MODE_REALTIME;
#endif
if (deadline == VPX_DL_REALTIME)
if (deadline == VPX_DL_REALTIME) {
new_qc = MODE_REALTIME;
else if (ctx->cfg.g_pass == VPX_RC_FIRST_PASS)
} else if (ctx->cfg.g_pass == VPX_RC_FIRST_PASS) {
new_qc = MODE_FIRSTPASS;
else if (ctx->cfg.g_pass == VPX_RC_LAST_PASS)
} else if (ctx->cfg.g_pass == VPX_RC_LAST_PASS) {
new_qc =
(new_qc == MODE_BESTQUALITY) ? MODE_SECONDPASS_BEST : MODE_SECONDPASS;
}
if (ctx->oxcf.Mode != new_qc) {
ctx->oxcf.Mode = new_qc;
@ -825,11 +830,13 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
int comp_data_state = 0;
/* Set up internal flags */
if (ctx->base.init_flags & VPX_CODEC_USE_PSNR)
if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) {
((VP8_COMP *)ctx->cpi)->b_calculate_psnr = 1;
}
if (ctx->base.init_flags & VPX_CODEC_USE_OUTPUT_PARTITION)
if (ctx->base.init_flags & VPX_CODEC_USE_OUTPUT_PARTITION) {
((VP8_COMP *)ctx->cpi)->output_partition = 1;
}
/* Convert API flags to internal codec lib flags */
lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0;
@ -863,10 +870,11 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
ctx->cpi, &lib_flags, &size, cx_data, cx_data_end, &dst_time_stamp,
&dst_end_time_stamp, !img);
if (comp_data_state == VPX_CODEC_CORRUPT_FRAME)
if (comp_data_state == VPX_CODEC_CORRUPT_FRAME) {
return VPX_CODEC_CORRUPT_FRAME;
else if (comp_data_state == -1)
} else if (comp_data_state == -1) {
break;
}
if (size) {
vpx_codec_pts_t round, delta;
@ -885,8 +893,9 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
ctx->cfg.g_timebase.num / 10000000);
pkt.data.frame.flags = lib_flags << 16;
if (lib_flags & FRAMEFLAGS_KEY)
if (lib_flags & FRAMEFLAGS_KEY) {
pkt.data.frame.flags |= VPX_FRAME_IS_KEY;
}
if (!cpi->common.show_frame) {
pkt.data.frame.flags |= VPX_FRAME_IS_INVISIBLE;
@ -923,8 +932,9 @@ static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx,
pkt.data.frame.sz = cpi->partition_sz[i];
pkt.data.frame.partition_id = i;
/* don't set the fragment bit for the last partition */
if (i == (num_partitions - 1))
if (i == (num_partitions - 1)) {
pkt.data.frame.flags &= ~VPX_FRAME_IS_FRAGMENT;
}
vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt);
}
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
@ -966,8 +976,9 @@ static vpx_codec_err_t vp8e_set_reference(vpx_codec_alg_priv_t *ctx,
image2yuvconfig(&frame->img, &sd);
vp8_set_reference(ctx->cpi, frame->frame_type, &sd);
return VPX_CODEC_OK;
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_err_t vp8e_get_reference(vpx_codec_alg_priv_t *ctx,
@ -981,8 +992,9 @@ static vpx_codec_err_t vp8e_get_reference(vpx_codec_alg_priv_t *ctx,
image2yuvconfig(&frame->img, &sd);
vp8_get_reference(ctx->cpi, frame->frame_type, &sd);
return VPX_CODEC_OK;
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_err_t vp8e_set_previewpp(vpx_codec_alg_priv_t *ctx,
@ -993,8 +1005,9 @@ static vpx_codec_err_t vp8e_set_previewpp(vpx_codec_alg_priv_t *ctx,
if (data) {
ctx->preview_ppcfg = *((vp8_postproc_cfg_t *)data);
return VPX_CODEC_OK;
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
#else
(void)ctx;
(void)args;
@ -1043,8 +1056,9 @@ static vpx_image_t *vp8e_get_preview(vpx_codec_alg_priv_t *ctx) {
ctx->preview_img.h = sd.y_height;
return &ctx->preview_img;
} else
} else {
return NULL;
}
}
static vpx_codec_err_t vp8e_set_frame_flags(vpx_codec_alg_priv_t *ctx,
@ -1072,12 +1086,14 @@ static vpx_codec_err_t vp8e_set_roi_map(vpx_codec_alg_priv_t *ctx,
vpx_roi_map_t *roi = (vpx_roi_map_t *)data;
if (!vp8_set_roimap(ctx->cpi, roi->roi_map, roi->rows, roi->cols,
roi->delta_q, roi->delta_lf, roi->static_threshold))
roi->delta_q, roi->delta_lf, roi->static_threshold)) {
return VPX_CODEC_OK;
else
} else {
return VPX_CODEC_INVALID_PARAM;
} else
}
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_err_t vp8e_set_activemap(vpx_codec_alg_priv_t *ctx,
@ -1087,12 +1103,14 @@ static vpx_codec_err_t vp8e_set_activemap(vpx_codec_alg_priv_t *ctx,
if (data) {
vpx_active_map_t *map = (vpx_active_map_t *)data;
if (!vp8_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols))
if (!vp8_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols)) {
return VPX_CODEC_OK;
else
} else {
return VPX_CODEC_INVALID_PARAM;
} else
}
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx,
@ -1109,10 +1127,12 @@ static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx,
/*force next frame a key frame to effect scaling mode */
ctx->next_frame_flag |= FRAMEFLAGS_KEY;
return VPX_CODEC_OK;
} else
} else {
return VPX_CODEC_INVALID_PARAM;
} else
}
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_ctrl_fn_map_t vp8e_ctf_maps[] = {

View File

@ -169,8 +169,9 @@ static vpx_codec_err_t vp8_peek_si_internal(const uint8_t *data,
si->is_kf = 1;
/* vet via sync code */
if (clear[3] != 0x9d || clear[4] != 0x01 || clear[5] != 0x2a)
if (clear[3] != 0x9d || clear[4] != 0x01 || clear[5] != 0x2a) {
return VPX_CODEC_UNSUP_BITSTREAM;
}
si->w = (clear[6] | (clear[7] << 8)) & 0x3fff;
si->h = (clear[8] | (clear[9] << 8)) & 0x3fff;
@ -194,10 +195,11 @@ static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx,
vpx_codec_stream_info_t *si) {
unsigned int sz;
if (si->sz >= sizeof(vp8_stream_info_t))
if (si->sz >= sizeof(vp8_stream_info_t)) {
sz = sizeof(vp8_stream_info_t);
else
} else {
sz = sizeof(vpx_codec_stream_info_t);
}
memcpy(si, &ctx->si, sz);
si->sz = sz;
@ -209,8 +211,9 @@ static vpx_codec_err_t update_error_state(
vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) {
vpx_codec_err_t res;
if ((res = error->error_code))
if ((res = error->error_code)) {
ctx->base.err_detail = error->has_detail ? error->detail : NULL;
}
return res;
}
@ -384,9 +387,10 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
"Invalid frame height");
}
if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height)) {
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffers");
}
xd->pre = pc->yv12_fb[pc->lst_fb_idx];
xd->dst = pc->yv12_fb[pc->new_fb_idx];
@ -426,8 +430,9 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
#endif
#if CONFIG_MULTITHREAD
if (pbi->b_multithreaded_rd)
if (pbi->b_multithreaded_rd) {
vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows);
}
#else
(void)prev_mb_rows;
#endif
@ -540,8 +545,9 @@ static vpx_codec_err_t vp8_set_reference(vpx_codec_alg_priv_t *ctx,
return vp8dx_set_reference(ctx->yv12_frame_buffers.pbi[0],
frame->frame_type, &sd);
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_err_t vp8_get_reference(vpx_codec_alg_priv_t *ctx,
@ -556,8 +562,9 @@ static vpx_codec_err_t vp8_get_reference(vpx_codec_alg_priv_t *ctx,
return vp8dx_get_reference(ctx->yv12_frame_buffers.pbi[0],
frame->frame_type, &sd);
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_err_t vp8_set_postproc(vpx_codec_alg_priv_t *ctx,
@ -569,8 +576,9 @@ static vpx_codec_err_t vp8_set_postproc(vpx_codec_alg_priv_t *ctx,
ctx->postproc_cfg_set = 1;
ctx->postproc_cfg = *((vp8_postproc_cfg_t *)data);
return VPX_CODEC_OK;
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
#else
(void)ctx;
@ -639,8 +647,9 @@ static vpx_codec_err_t vp8_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
pbi->common.refresh_last_frame * (int)VP8_LAST_FRAME;
return VPX_CODEC_OK;
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
extern int vp8dx_references_buffer(VP8_COMMON *oci, int ref_frame);
@ -657,8 +666,9 @@ static vpx_codec_err_t vp8_get_last_ref_frame(vpx_codec_alg_priv_t *ctx,
(vp8dx_references_buffer(oci, LAST_FRAME) ? VP8_LAST_FRAME : 0);
return VPX_CODEC_OK;
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
@ -671,8 +681,9 @@ static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
if (frame == NULL) return VPX_CODEC_ERROR;
*corrupted = frame->corrupted;
return VPX_CODEC_OK;
} else
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
static vpx_codec_err_t vp8_set_decryptor(vpx_codec_alg_priv_t *ctx,