prepend ++ instead of post in for loops.

Applied the following regex  :
search for: (for.*\(.*;.*;) ([a-zA-Z_]*)\+\+\)
replace with: \1 ++\2)

This misses some for loops:
ie : for (mb_col = 0; mb_col < oci->mb_cols; mb_col++, mi++)

Change-Id: Icf5f6fb93cced0992e0bb71d2241780f7fb1f0a8
This commit is contained in:
Jim Bankoski 2016-07-18 06:54:50 -07:00
parent 106a8a1536
commit 3e04114f3d
60 changed files with 472 additions and 472 deletions

View File

@ -19,7 +19,7 @@
void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) {
int i;
for (i = 0; i < NUM_YV12_BUFFERS; i++)
for (i = 0; i < NUM_YV12_BUFFERS; ++i)
vp8_yv12_de_alloc_frame_buffer(&oci->yv12_fb[i]);
vp8_yv12_de_alloc_frame_buffer(&oci->temp_scale_frame);
@ -56,7 +56,7 @@ int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) {
if ((height & 0xf) != 0) height += 16 - (height & 0xf);
for (i = 0; i < NUM_YV12_BUFFERS; i++) {
for (i = 0; i < NUM_YV12_BUFFERS; ++i) {
oci->fb_idx_ref_cnt[i] = 0;
oci->yv12_fb[i].flags = 0;
if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height,

View File

@ -15,7 +15,7 @@ void vp8_dequant_idct_add_y_block_v6(short *q, short *dq, unsigned char *dst,
int stride, char *eobs) {
int i;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
if (eobs[0] > 1)
vp8_dequant_idct_add_v6(q, dq, dst, stride);
else if (eobs[0] == 1) {
@ -56,7 +56,7 @@ void vp8_dequant_idct_add_uv_block_v6(short *q, short *dq, unsigned char *dstu,
char *eobs) {
int i;
for (i = 0; i < 2; i++) {
for (i = 0; i < 2; ++i) {
if (eobs[0] > 1)
vp8_dequant_idct_add_v6(q, dq, dstu, stride);
else if (eobs[0] == 1) {
@ -77,7 +77,7 @@ void vp8_dequant_idct_add_uv_block_v6(short *q, short *dq, unsigned char *dstu,
eobs += 2;
}
for (i = 0; i < 2; i++) {
for (i = 0; i < 2; ++i) {
if (eobs[0] > 1)
vp8_dequant_idct_add_v6(q, dq, dstv, stride);
else if (eobs[0] == 1) {

View File

@ -15,7 +15,7 @@ void vp8_copy_mem8x4_neon(unsigned char *src, int src_stride,
uint8x8_t vtmp;
int r;
for (r = 0; r < 4; r++) {
for (r = 0; r < 4; ++r) {
vtmp = vld1_u8(src);
vst1_u8(dst, vtmp);
src += src_stride;
@ -28,7 +28,7 @@ void vp8_copy_mem8x8_neon(unsigned char *src, int src_stride,
uint8x8_t vtmp;
int r;
for (r = 0; r < 8; r++) {
for (r = 0; r < 8; ++r) {
vtmp = vld1_u8(src);
vst1_u8(dst, vtmp);
src += src_stride;
@ -41,7 +41,7 @@ void vp8_copy_mem16x16_neon(unsigned char *src, int src_stride,
int r;
uint8x16_t qtmp;
for (r = 0; r < 16; r++) {
for (r = 0; r < 16; ++r) {
qtmp = vld1q_u8(src);
vst1q_u8(dst, qtmp);
src += src_stride;

View File

@ -22,7 +22,7 @@ void vp8_dc_only_idct_add_neon(int16_t input_dc, unsigned char *pred_ptr,
qAdd = vdupq_n_u16(a1);
for (i = 0; i < 2; i++) {
for (i = 0; i < 2; ++i) {
d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 0);
pred_ptr += pred_stride;
d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 1);

View File

@ -22,7 +22,7 @@ void vp8_dequant_idct_add_y_block_neon(short *q, short *dq, unsigned char *dst,
int stride, char *eobs) {
int i;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
if (((short *)(eobs))[0]) {
if (((short *)eobs)[0] & 0xfefe)
idct_dequant_full_2x_neon(q, dq, dst, stride);

View File

@ -924,7 +924,7 @@ void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr,
// load src data
src_tmp = src_ptr - src_pixels_per_line * 2;
for (i = 0; i < 2; i++) {
for (i = 0; i < 2; ++i) {
src = src_tmp + i * 8;
dst = dst_ptr + i * 8;
d18u8 = vld1_u8(src);
@ -937,7 +937,7 @@ void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr,
src += src_pixels_per_line;
d22u8 = vld1_u8(src);
src += src_pixels_per_line;
for (j = 0; j < 4; j++) {
for (j = 0; j < 4; ++j) {
d23u8 = vld1_u8(src);
src += src_pixels_per_line;
d24u8 = vld1_u8(src);
@ -1034,7 +1034,7 @@ void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr,
if (yoffset == 0) { // firstpass_filter4x4_only
src = src_ptr - 2;
dst = dst_ptr;
for (i = 0; i < 8; i++) {
for (i = 0; i < 8; ++i) {
d6u8 = vld1_u8(src);
d7u8 = vld1_u8(src + 8);
d8u8 = vld1_u8(src + 16);
@ -1128,7 +1128,7 @@ void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr,
src = src_ptr - 2 - src_pixels_per_line * 2;
tmpp = tmp;
for (i = 0; i < 7; i++) {
for (i = 0; i < 7; ++i) {
d6u8 = vld1_u8(src);
d7u8 = vld1_u8(src + 8);
d8u8 = vld1_u8(src + 16);
@ -1273,7 +1273,7 @@ void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr,
d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
for (i = 0; i < 2; i++) {
for (i = 0; i < 2; ++i) {
dst = dst_ptr + 8 * i;
tmpp = tmp + 8 * i;
d18u8 = vld1_u8(tmpp);
@ -1286,7 +1286,7 @@ void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr,
tmpp += 16;
d22u8 = vld1_u8(tmpp);
tmpp += 16;
for (j = 0; j < 4; j++) {
for (j = 0; j < 4; ++j) {
d23u8 = vld1_u8(tmpp);
tmpp += 16;
d24u8 = vld1_u8(tmpp);

View File

@ -18,7 +18,7 @@ void vp8_copy32xn_c(const unsigned char *src_ptr, int src_stride,
unsigned char *dst_ptr, int dst_stride, int height) {
int r;
for (r = 0; r < height; r++) {
for (r = 0; r < height; ++r) {
memcpy(dst_ptr, src_ptr, 32);
src_ptr += src_stride;

View File

@ -22,8 +22,8 @@ void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
mb_index = 0;
fprintf(mvs, "Mb Modes for Frame %d\n", frame);
for (mb_row = 0; mb_row < rows; mb_row++) {
for (mb_col = 0; mb_col < cols; mb_col++) {
for (mb_row = 0; mb_row < rows; ++mb_row) {
for (mb_col = 0; mb_col < cols; ++mb_col) {
fprintf(mvs, "%2d ", mi[mb_index].mbmi.mode);
mb_index++;
@ -38,8 +38,8 @@ void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
mb_index = 0;
fprintf(mvs, "Mb mv ref for Frame %d\n", frame);
for (mb_row = 0; mb_row < rows; mb_row++) {
for (mb_col = 0; mb_col < cols; mb_col++) {
for (mb_row = 0; mb_row < rows; ++mb_row) {
for (mb_col = 0; mb_col < cols; ++mb_col) {
fprintf(mvs, "%2d ", mi[mb_index].mbmi.ref_frame);
mb_index++;
@ -55,8 +55,8 @@ void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
mb_index = 0;
fprintf(mvs, "UV Modes for Frame %d\n", frame);
for (mb_row = 0; mb_row < rows; mb_row++) {
for (mb_col = 0; mb_col < cols; mb_col++) {
for (mb_row = 0; mb_row < rows; ++mb_row) {
for (mb_col = 0; mb_col < cols; ++mb_col) {
fprintf(mvs, "%2d ", mi[mb_index].mbmi.uv_mode);
mb_index++;
@ -73,11 +73,11 @@ void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
{
int b_row;
for (b_row = 0; b_row < 4 * rows; b_row++) {
for (b_row = 0; b_row < 4 * rows; ++b_row) {
int b_col;
int bindex;
for (b_col = 0; b_col < 4 * cols; b_col++) {
for (b_col = 0; b_col < 4 * cols; ++b_col) {
mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2);
bindex = (b_row & 3) * 4 + (b_col & 3);
@ -96,8 +96,8 @@ void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
mb_index = 0;
fprintf(mvs, "MVs for Frame %d\n", frame);
for (mb_row = 0; mb_row < rows; mb_row++) {
for (mb_col = 0; mb_col < cols; mb_col++) {
for (mb_row = 0; mb_row < rows; ++mb_row) {
for (mb_col = 0; mb_col < cols; ++mb_col) {
fprintf(mvs, "%5d:%-5d", mi[mb_index].mbmi.mv.as_mv.row / 2,
mi[mb_index].mbmi.mv.as_mv.col / 2);
@ -115,11 +115,11 @@ void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
{
int b_row;
for (b_row = 0; b_row < 4 * rows; b_row++) {
for (b_row = 0; b_row < 4 * rows; ++b_row) {
int b_col;
int bindex;
for (b_col = 0; b_col < 4 * cols; b_col++) {
for (b_col = 0; b_col < 4 * cols; ++b_col) {
mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2);
bindex = (b_row & 3) * 4 + (b_col & 3);
fprintf(mvs, "%3d:%-3d ", mi[mb_index].bmi[bindex].mv.as_mv.row,

View File

@ -18,7 +18,7 @@ void vp8_dequantize_b_c(BLOCKD *d, short *DQC) {
short *DQ = d->dqcoeff;
short *Q = d->qcoeff;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
DQ[i] = Q[i] * DQC[i];
}
}
@ -27,7 +27,7 @@ void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *dest,
int stride) {
int i;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
input[i] = dq[i] * input[i];
}

View File

@ -51,7 +51,7 @@ DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) = {
{
int i;
for (i = 0; i < 16; i++)
for (i = 0; i < 16; ++i)
{
vp8_default_zig_zag_mask[vp8_default_zig_zag1d[i]] = 1 << i;
}

View File

@ -33,7 +33,7 @@ static void copy_and_extend_plane(unsigned char *s, /* source */
dest_ptr1 = d - el;
dest_ptr2 = d + w;
for (i = 0; i < h; i++) {
for (i = 0; i < h; ++i) {
memset(dest_ptr1, src_ptr1[0], el);
memcpy(dest_ptr1 + el, src_ptr1, w);
memset(dest_ptr2, src_ptr2[0], er);
@ -52,12 +52,12 @@ static void copy_and_extend_plane(unsigned char *s, /* source */
dest_ptr2 = d + dp * (h)-el;
linesize = el + er + w;
for (i = 0; i < et; i++) {
for (i = 0; i < et; ++i) {
memcpy(dest_ptr1, src_ptr1, linesize);
dest_ptr1 += dp;
}
for (i = 0; i < eb; i++) {
for (i = 0; i < eb; ++i) {
memcpy(dest_ptr2, src_ptr2, linesize);
dest_ptr2 += dp;
}
@ -135,7 +135,7 @@ void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr,
UPtr += ybf->uv_stride * 6;
VPtr += ybf->uv_stride * 6;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
YPtr[i] = YPtr[-1];
UPtr[i] = UPtr[-1];
VPtr[i] = VPtr[-1];
@ -145,7 +145,7 @@ void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr,
UPtr += ybf->uv_stride;
VPtr += ybf->uv_stride;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
YPtr[i] = YPtr[-1];
UPtr[i] = UPtr[-1];
VPtr[i] = VPtr[-1];

View File

@ -38,8 +38,8 @@ static void filter_block2d_first_pass(unsigned char *src_ptr, int *output_ptr,
unsigned int i, j;
int Temp;
for (i = 0; i < output_height; i++) {
for (j = 0; j < output_width; j++) {
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) +
((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
((int)src_ptr[0] * vp8_filter[2]) +
@ -76,8 +76,8 @@ static void filter_block2d_second_pass(int *src_ptr, unsigned char *output_ptr,
unsigned int i, j;
int Temp;
for (i = 0; i < output_height; i++) {
for (j = 0; j < output_width; j++) {
for (i = 0; i < output_height; ++i) {
for (j = 0; j < output_width; ++j) {
/* Apply filter */
Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) +
((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
@ -215,8 +215,8 @@ static void filter_block2d_bil_first_pass(
unsigned int height, unsigned int width, const short *vp8_filter) {
unsigned int i, j;
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
/* Apply bilinear filter */
dst_ptr[j] =
(((int)src_ptr[0] * vp8_filter[0]) +
@ -263,8 +263,8 @@ static void filter_block2d_bil_second_pass(unsigned short *src_ptr,
unsigned int i, j;
int Temp;
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
/* Apply filter */
Temp = ((int)src_ptr[0] * vp8_filter[0]) +
((int)src_ptr[width] * vp8_filter[1]) + (VP8_FILTER_WEIGHT / 2);
@ -333,7 +333,7 @@ void vp8_bilinear_predict4x4_c(unsigned char *src_ptr, int src_pixels_per_line,
bilinear_predict4x4_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, temp1, 4);
filter_block2d_bil(src_ptr, temp2, src_pixels_per_line, 4, HFilter, VFilter, 4, 4);
for (i = 0; i < 16; i++)
for (i = 0; i < 16; ++i)
{
if (temp1[i] != temp2[i])
{

View File

@ -69,7 +69,7 @@ static int get_cpu_count() {
ULONG status;
core_count = 0;
for (proc_id = 1;; proc_id++) {
for (proc_id = 1;; ++proc_id) {
if (DosGetProcessorStatus(proc_id, &status)) break;
if (status == PROC_ONLINE) core_count++;

View File

@ -22,8 +22,8 @@ void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst,
int stride, char *eobs) {
int i, j;
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) {
if (*eobs++ > 1)
vp8_dequant_idct_add_c(q, dq, dst, stride);
else {
@ -44,8 +44,8 @@ void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dstu,
char *eobs) {
int i, j;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1)
vp8_dequant_idct_add_c(q, dq, dstu, stride);
else {
@ -60,8 +60,8 @@ void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dstu,
dstu += 4 * stride - 8;
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1)
vp8_dequant_idct_add_c(q, dq, dstv, stride);
else {

View File

@ -38,7 +38,7 @@ void vp8_short_idct4x4llm_c(short *input, unsigned char *pred_ptr,
int temp1, temp2;
int shortpitch = 4;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
a1 = ip[0] + ip[8];
b1 = ip[0] - ip[8];
@ -63,7 +63,7 @@ void vp8_short_idct4x4llm_c(short *input, unsigned char *pred_ptr,
ip = output;
op = output;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
a1 = ip[0] + ip[2];
b1 = ip[0] - ip[2];
@ -86,8 +86,8 @@ void vp8_short_idct4x4llm_c(short *input, unsigned char *pred_ptr,
}
ip = output;
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
for (r = 0; r < 4; ++r) {
for (c = 0; c < 4; ++c) {
int a = ip[c] + pred_ptr[c];
if (a < 0) a = 0;
@ -108,8 +108,8 @@ void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr,
int a1 = ((input_dc + 4) >> 3);
int r, c;
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
for (r = 0; r < 4; ++r) {
for (c = 0; c < 4; ++c) {
int a = a1 + pred_ptr[c];
if (a < 0) a = 0;
@ -132,7 +132,7 @@ void vp8_short_inv_walsh4x4_c(short *input, short *mb_dqcoeff) {
short *ip = input;
short *op = output;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
a1 = ip[0] + ip[12];
b1 = ip[4] + ip[8];
c1 = ip[4] - ip[8];
@ -149,7 +149,7 @@ void vp8_short_inv_walsh4x4_c(short *input, short *mb_dqcoeff) {
ip = output;
op = output;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
a1 = ip[0] + ip[3];
b1 = ip[1] + ip[2];
c1 = ip[1] - ip[2];
@ -169,7 +169,7 @@ void vp8_short_inv_walsh4x4_c(short *input, short *mb_dqcoeff) {
op += 4;
}
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
mb_dqcoeff[i * 16] = output[i];
}
}
@ -179,7 +179,7 @@ void vp8_short_inv_walsh4x4_1_c(short *input, short *mb_dqcoeff) {
int a1;
a1 = ((input[0] + 3) >> 3);
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
mb_dqcoeff[i * 16] = a1;
}
}

View File

@ -27,7 +27,7 @@ extern "C" {
static void eob_adjust(char *eobs, short *diff) {
/* eob adjust.... the idct can only skip if both the dc and eob are zero */
int js;
for (js = 0; js < 16; js++) {
for (js = 0; js < 16; ++js) {
if ((eobs[js] == 0) && (diff[0] != 0)) eobs[js]++;
diff += 16;
}

View File

@ -13,27 +13,27 @@
void vp8_setup_block_dptrs(MACROBLOCKD *x) {
int r, c;
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
for (r = 0; r < 4; ++r) {
for (c = 0; c < 4; ++c) {
x->block[r * 4 + c].predictor = x->predictor + r * 4 * 16 + c * 4;
}
}
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
for (r = 0; r < 2; ++r) {
for (c = 0; c < 2; ++c) {
x->block[16 + r * 2 + c].predictor =
x->predictor + 256 + r * 4 * 8 + c * 4;
}
}
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
for (r = 0; r < 2; ++r) {
for (c = 0; c < 2; ++c) {
x->block[20 + r * 2 + c].predictor =
x->predictor + 320 + r * 4 * 8 + c * 4;
}
}
for (r = 0; r < 25; r++) {
for (r = 0; r < 25; ++r) {
x->block[r].qcoeff = x->qcoeff + r * 16;
x->block[r].dqcoeff = x->dqcoeff + r * 16;
x->block[r].eob = x->eobs + r;
@ -43,13 +43,13 @@ void vp8_setup_block_dptrs(MACROBLOCKD *x) {
void vp8_build_block_doffsets(MACROBLOCKD *x) {
int block;
for (block = 0; block < 16; block++) /* y blocks */
for (block = 0; block < 16; ++block) /* y blocks */
{
x->block[block].offset =
(block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4;
}
for (block = 16; block < 20; block++) /* U and V blocks */
for (block = 16; block < 20; ++block) /* U and V blocks */
{
x->block[block + 4].offset = x->block[block].offset =
((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4;

View File

@ -33,8 +33,8 @@ static void filter_by_weight(unsigned char *src, int src_stride,
int rounding_bit = 1 << (MFQE_PRECISION - 1);
int r, c;
for (r = 0; r < block_size; r++) {
for (c = 0; c < block_size; c++) {
for (r = 0; r < block_size; ++r) {
for (c = 0; c < block_size; ++c) {
dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >>
MFQE_PRECISION;
}
@ -253,8 +253,8 @@ void vp8_multiframe_quality_enhance(VP8_COMMON *cm) {
vd_ptr = dest->v_buffer;
/* postprocess each macro block */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
/* if motion is high there will likely be no benefit */
if (frame_type == INTER_FRAME)
totmap = qualify_inter_mb(mode_info_context, map);

View File

@ -17,7 +17,7 @@ void vp8_dequant_idct_add_dspr2(short *input, short *dq, unsigned char *dest,
int stride) {
int i;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
input[i] = dq[i] * input[i];
}

View File

@ -85,9 +85,9 @@ inline void prefetch_store(unsigned char *dst) {
void dsputil_static_init(void) {
int i;
for (i = 0; i < 256; i++) ff_cropTbl[i + CROP_WIDTH] = i;
for (i = 0; i < 256; ++i) ff_cropTbl[i + CROP_WIDTH] = i;
for (i = 0; i < CROP_WIDTH; i++) {
for (i = 0; i < CROP_WIDTH; ++i) {
ff_cropTbl[i] = 0;
ff_cropTbl[i + CROP_WIDTH + 256] = 255;
}
@ -112,7 +112,7 @@ void vp8_filter_block2d_first_pass_4(unsigned char *RESTRICT src_ptr,
/* if (xoffset == 0) we don't need any filtering */
if (vector3b == 0) {
for (i = 0; i < output_height; i++) {
for (i = 0; i < output_height; ++i) {
/* prefetch src_ptr data to cache memory */
prefetch_load(src_ptr + src_pixels_per_line);
dst_ptr[0] = src_ptr[0];
@ -290,7 +290,7 @@ void vp8_filter_block2d_first_pass_8_all(unsigned char *RESTRICT src_ptr,
/* if (xoffset == 0) we don't need any filtering */
if (xoffset == 0) {
for (i = 0; i < output_height; i++) {
for (i = 0; i < output_height; ++i) {
/* prefetch src_ptr data to cache memory */
prefetch_load(src_ptr + src_pixels_per_line);
@ -802,7 +802,7 @@ void vp8_filter_block2d_first_pass16_0(unsigned char *RESTRICT src_ptr,
prefetch_store(output_ptr + 32);
/* copy memory from src buffer to dst buffer */
for (i = 0; i < 7; i++) {
for (i = 0; i < 7; ++i) {
__asm__ __volatile__(
"ulw %[Temp1], 0(%[src_ptr]) \n\t"
"ulw %[Temp2], 4(%[src_ptr]) \n\t"

View File

@ -17,8 +17,8 @@ void vp8_dequant_idct_add_y_block_dspr2(short *q, short *dq, unsigned char *dst,
int stride, char *eobs) {
int i, j;
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) {
if (*eobs++ > 1)
vp8_dequant_idct_add_dspr2(q, dq, dst, stride);
else {
@ -40,8 +40,8 @@ void vp8_dequant_idct_add_uv_block_dspr2(short *q, short *dq,
char *eobs) {
int i, j;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1)
vp8_dequant_idct_add_dspr2(q, dq, dstu, stride);
else {
@ -56,8 +56,8 @@ void vp8_dequant_idct_add_uv_block_dspr2(short *q, short *dq,
dstu += 4 * stride - 8;
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
if (*eobs++ > 1)
vp8_dequant_idct_add_dspr2(q, dq, dstv, stride);
else {

View File

@ -189,8 +189,8 @@ void vp8_short_idct4x4llm_dspr2(short *input, unsigned char *pred_ptr,
ip = output;
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
for (r = 0; r < 4; ++r) {
for (c = 0; c < 4; ++c) {
short a = ip[c] + pred_ptr[c];
dst_ptr[c] = cm[a];
}
@ -311,7 +311,7 @@ void vp8_short_inv_walsh4x4_dspr2(short *input, short *mb_dqcoeff) {
op += 4;
}
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
mb_dqcoeff[i * 16] = output[i];
}
}

View File

@ -110,10 +110,10 @@ void vp8_deblock(VP8_COMMON *cm, YV12_BUFFER_CONFIG *source,
(void)flag;
if (ppl > 0) {
for (mbr = 0; mbr < cm->mb_rows; mbr++) {
for (mbr = 0; mbr < cm->mb_rows; ++mbr) {
unsigned char *ylptr = ylimits;
unsigned char *uvlptr = uvlimits;
for (mbc = 0; mbc < cm->mb_cols; mbc++) {
for (mbc = 0; mbc < cm->mb_cols; ++mbc) {
unsigned char mb_ppl;
if (mode_info_context->mbmi.mb_skip_coeff)
@ -167,7 +167,7 @@ void vp8_de_noise(VP8_COMMON *cm, YV12_BUFFER_CONFIG *source,
memset(limits, (unsigned char)ppl, 16 * mb_cols);
/* TODO: The original code don't filter the 2 outer rows and columns. */
for (mbr = 0; mbr < mb_rows; mbr++) {
for (mbr = 0; mbr < mb_rows; ++mbr) {
vpx_post_proc_down_and_across_mb_row(
source->y_buffer + 16 * mbr * source->y_stride,
source->y_buffer + 16 * mbr * source->y_stride, source->y_stride,
@ -197,8 +197,8 @@ void vp8_blend_mb_inner_c(unsigned char *y, unsigned char *u, unsigned char *v,
int v1_const = v_1 * ((1 << 16) - alpha);
y += 2 * stride + 2;
for (i = 0; i < 12; i++) {
for (j = 0; j < 12; j++) {
for (i = 0; i < 12; ++i) {
for (j = 0; j < 12; ++j) {
y[j] = (y[j] * alpha + y1_const) >> 16;
}
y += stride;
@ -209,8 +209,8 @@ void vp8_blend_mb_inner_c(unsigned char *y, unsigned char *u, unsigned char *v,
u += stride + 1;
v += stride + 1;
for (i = 0; i < 6; i++) {
for (j = 0; j < 6; j++) {
for (i = 0; i < 6; ++i) {
for (j = 0; j < 6; ++j) {
u[j] = (u[j] * alpha + u1_const) >> 16;
v[j] = (v[j] * alpha + v1_const) >> 16;
}
@ -229,14 +229,14 @@ void vp8_blend_mb_outer_c(unsigned char *y, unsigned char *u, unsigned char *v,
int u1_const = u_1 * ((1 << 16) - alpha);
int v1_const = v_1 * ((1 << 16) - alpha);
for (i = 0; i < 2; i++) {
for (j = 0; j < 16; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 16; ++j) {
y[j] = (y[j] * alpha + y1_const) >> 16;
}
y += stride;
}
for (i = 0; i < 12; i++) {
for (i = 0; i < 12; ++i) {
y[0] = (y[0] * alpha + y1_const) >> 16;
y[1] = (y[1] * alpha + y1_const) >> 16;
y[14] = (y[14] * alpha + y1_const) >> 16;
@ -244,8 +244,8 @@ void vp8_blend_mb_outer_c(unsigned char *y, unsigned char *u, unsigned char *v,
y += stride;
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 16; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 16; ++j) {
y[j] = (y[j] * alpha + y1_const) >> 16;
}
y += stride;
@ -253,14 +253,14 @@ void vp8_blend_mb_outer_c(unsigned char *y, unsigned char *u, unsigned char *v,
stride >>= 1;
for (j = 0; j < 8; j++) {
for (j = 0; j < 8; ++j) {
u[j] = (u[j] * alpha + u1_const) >> 16;
v[j] = (v[j] * alpha + v1_const) >> 16;
}
u += stride;
v += stride;
for (i = 0; i < 6; i++) {
for (i = 0; i < 6; ++i) {
u[0] = (u[0] * alpha + u1_const) >> 16;
v[0] = (v[0] * alpha + v1_const) >> 16;
@ -271,7 +271,7 @@ void vp8_blend_mb_outer_c(unsigned char *y, unsigned char *u, unsigned char *v,
v += stride;
}
for (j = 0; j < 8; j++) {
for (j = 0; j < 8; ++j) {
u[j] = (u[j] * alpha + u1_const) >> 16;
v[j] = (v[j] * alpha + v1_const) >> 16;
}
@ -284,8 +284,8 @@ void vp8_blend_b_c(unsigned char *y, unsigned char *u, unsigned char *v,
int u1_const = u_1 * ((1 << 16) - alpha);
int v1_const = v_1 * ((1 << 16) - alpha);
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
for (i = 0; i < 4; ++i) {
for (j = 0; j < 4; ++j) {
y[j] = (y[j] * alpha + y1_const) >> 16;
}
y += stride;
@ -293,8 +293,8 @@ void vp8_blend_b_c(unsigned char *y, unsigned char *u, unsigned char *v,
stride >>= 1;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
u[j] = (u[j] * alpha + u1_const) >> 16;
v[j] = (v[j] * alpha + v1_const) >> 16;
}
@ -472,8 +472,8 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest,
y_ptr = post->y_buffer + 4 * post->y_stride + 4;
/* vp8_filter each macro block */
for (i = 0; i < mb_rows; i++) {
for (j = 0; j < mb_cols; j++) {
for (i = 0; i < mb_rows; ++i) {
for (j = 0; j < mb_cols; ++j) {
char zz[4];
sprintf(zz, "%c", mi[mb_index].mbmi.mode + 'a');
@ -500,8 +500,8 @@ int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest,
y_ptr = post->y_buffer + 4 * post->y_stride + 4;
/* vp8_filter each macro block */
for (i = 0; i < mb_rows; i++) {
for (j = 0; j < mb_cols; j++) {
for (i = 0; i < mb_rows; ++i) {
for (j = 0; j < mb_cols; ++j) {
char zz[4];
int dc_diff = !(mi[mb_index].mbmi.mode != B_PRED &&
mi[mb_index].mbmi.mode != SPLITMV &&

View File

@ -24,7 +24,7 @@ void vp8_copy_mem16x16_c(unsigned char *src, int src_stride, unsigned char *dst,
int dst_stride) {
int r;
for (r = 0; r < 16; r++) {
for (r = 0; r < 16; ++r) {
memcpy(dst, src, 16);
src += src_stride;
@ -36,7 +36,7 @@ void vp8_copy_mem8x8_c(unsigned char *src, int src_stride, unsigned char *dst,
int dst_stride) {
int r;
for (r = 0; r < 8; r++) {
for (r = 0; r < 8; ++r) {
memcpy(dst, src, 8);
src += src_stride;
@ -48,7 +48,7 @@ void vp8_copy_mem8x4_c(unsigned char *src, int src_stride, unsigned char *dst,
int dst_stride) {
int r;
for (r = 0; r < 4; r++) {
for (r = 0; r < 4; ++r) {
memcpy(dst, src, 8);
src += src_stride;
@ -68,7 +68,7 @@ void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre,
sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7,
pred_ptr, pitch);
} else {
for (r = 0; r < 4; r++) {
for (r = 0; r < 4; ++r) {
pred_ptr[0] = ptr[0];
pred_ptr[1] = ptr[1];
pred_ptr[2] = ptr[2];
@ -121,7 +121,7 @@ static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst,
sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst,
dst_stride);
} else {
for (r = 0; r < 4; r++) {
for (r = 0; r < 4; ++r) {
dst[0] = ptr[0];
dst[1] = ptr[1];
dst[2] = ptr[2];
@ -173,8 +173,8 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) {
unsigned char *base_pre;
/* build uv mvs */
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
int yoffset = i * 8 + j * 2;
int uoffset = 16 + i * 2 + j;
int voffset = 20 + i * 2 + j;
@ -447,8 +447,8 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *x) {
static void build_4x4uvmvs(MACROBLOCKD *x) {
int i, j;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (i = 0; i < 2; ++i) {
for (j = 0; j < 2; ++j) {
int yoffset = i * 8 + j * 2;
int uoffset = 16 + i * 2 + j;
int voffset = 20 + i * 2 + j;

View File

@ -53,7 +53,7 @@ void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x, unsigned char *yabove_row,
int i;
intra_pred_fn fn;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
yleft_col[i] = yleft[i * left_stride];
}
@ -76,7 +76,7 @@ void vp8_build_intra_predictors_mbuv_s(
int i;
intra_pred_fn fn;
for (i = 0; i < 8; i++) {
for (i = 0; i < 8; ++i) {
uleft_col[i] = uleft[i * left_stride];
vleft_col[i] = vleft[i * left_stride];
}

View File

@ -16,15 +16,15 @@ void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf) {
/* set up frame new frame for intra coded blocks */
memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
for (i = 0; i < ybf->y_height; i++)
for (i = 0; i < ybf->y_height; ++i)
ybf->y_buffer[ybf->y_stride * i - 1] = (unsigned char)129;
memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
for (i = 0; i < ybf->uv_height; i++)
for (i = 0; i < ybf->uv_height; ++i)
ybf->u_buffer[ybf->uv_stride * i - 1] = (unsigned char)129;
memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
for (i = 0; i < ybf->uv_height; i++)
for (i = 0; i < ybf->uv_height; ++i)
ybf->v_buffer[ybf->uv_stride * i - 1] = (unsigned char)129;
}

View File

@ -26,11 +26,11 @@ static INLINE void setup_intra_recon_left(unsigned char *y_buffer,
int uv_stride) {
int i;
for (i = 0; i < 16; i++) y_buffer[y_stride * i] = (unsigned char)129;
for (i = 0; i < 16; ++i) y_buffer[y_stride * i] = (unsigned char)129;
for (i = 0; i < 8; i++) u_buffer[uv_stride * i] = (unsigned char)129;
for (i = 0; i < 8; ++i) u_buffer[uv_stride * i] = (unsigned char)129;
for (i = 0; i < 8; i++) v_buffer[uv_stride * i] = (unsigned char)129;
for (i = 0; i < 8; ++i) v_buffer[uv_stride * i] = (unsigned char)129;
}
#ifdef __cplusplus

View File

@ -40,7 +40,7 @@ void vp8_blit_text(const char *msg, unsigned char *address, const int pitch) {
letter_bitmap = font[0];
for (fontcol = 6; fontcol >= 0; fontcol--)
for (fontrow = 0; fontrow < 5; fontrow++)
for (fontrow = 0; fontrow < 5; ++fontrow)
output_pos[fontrow * pitch + fontcol] =
((letter_bitmap >> (fontcol * 5)) & (1 << fontrow) ? 255 : 0);
@ -95,7 +95,7 @@ void vp8_blit_line(int x0, int x1, int y0, int y1, unsigned char *image,
ystep = -1;
if (steep) {
for (x = x0; x <= x1; x++) {
for (x = x0; x <= x1; ++x) {
plot(y, x, image, pitch);
error = error - deltay;
@ -105,7 +105,7 @@ void vp8_blit_line(int x0, int x1, int y0, int y1, unsigned char *image,
}
}
} else {
for (x = x0; x <= x1; x++) {
for (x = x0; x <= x1; ++x) {
plot(x, y, image, pitch);
error = error - deltay;

View File

@ -17,7 +17,7 @@
static void lf_init_lut(loop_filter_info_n *lfi) {
int filt_lvl;
for (filt_lvl = 0; filt_lvl <= MAX_LOOP_FILTER; filt_lvl++) {
for (filt_lvl = 0; filt_lvl <= MAX_LOOP_FILTER; ++filt_lvl) {
if (filt_lvl >= 40) {
lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 2;
lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 3;
@ -51,7 +51,7 @@ void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
int i;
/* For each possible value for the loop filter fill out limits */
for (i = 0; i <= MAX_LOOP_FILTER; i++) {
for (i = 0; i <= MAX_LOOP_FILTER; ++i) {
int filt_lvl = i;
int block_inside_limit = 0;
@ -85,7 +85,7 @@ void vp8_loop_filter_init(VP8_COMMON *cm) {
lf_init_lut(lfi);
/* init hev threshold const vectors */
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
memset(lfi->hev_thr[i], i, SIMD_WIDTH);
}
}
@ -104,7 +104,7 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm, MACROBLOCKD *mbd,
cm->last_sharpness_level = cm->sharpness_level;
}
for (seg = 0; seg < MAX_MB_SEGMENTS; seg++) {
for (seg = 0; seg < MAX_MB_SEGMENTS; ++seg) {
int lvl_seg = default_filt_lvl;
int lvl_ref, lvl_mode;
@ -149,12 +149,12 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm, MACROBLOCKD *mbd,
lfi->lvl[seg][ref][mode] = lvl_mode;
/* LAST, GOLDEN, ALT */
for (ref = 1; ref < MAX_REF_FRAMES; ref++) {
for (ref = 1; ref < MAX_REF_FRAMES; ++ref) {
/* Apply delta for reference frame */
lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref];
/* Apply delta for Inter modes */
for (mode = 1; mode < 4; mode++) {
for (mode = 1; mode < 4; ++mode) {
lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
/* clamp */
lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0;
@ -175,7 +175,7 @@ void vp8_loop_filter_row_normal(VP8_COMMON *cm, MODE_INFO *mode_info_context,
loop_filter_info lfi;
FRAME_TYPE frame_type = cm->frame_type;
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
mode_info_context->mbmi.mode != SPLITMV &&
mode_info_context->mbmi.mb_skip_coeff);
@ -228,7 +228,7 @@ void vp8_loop_filter_row_simple(VP8_COMMON *cm, MODE_INFO *mode_info_context,
loop_filter_info_n *lfi_n = &cm->lf_info;
(void)post_uvstride;
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
mode_info_context->mbmi.mode != SPLITMV &&
mode_info_context->mbmi.mb_skip_coeff);
@ -294,8 +294,8 @@ void vp8_loop_filter_frame(VP8_COMMON *cm, MACROBLOCKD *mbd, int frame_type) {
/* vp8_filter each macro block */
if (cm->filter_type == NORMAL_LOOPFILTER) {
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
for (mb_col = 0; mb_col < mb_cols; mb_col++) {
for (mb_row = 0; mb_row < mb_rows; ++mb_row) {
for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
mode_info_context->mbmi.mode != SPLITMV &&
mode_info_context->mbmi.mb_skip_coeff);
@ -345,8 +345,8 @@ void vp8_loop_filter_frame(VP8_COMMON *cm, MACROBLOCKD *mbd, int frame_type) {
}
} else /* SIMPLE_LOOPFILTER */
{
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
for (mb_col = 0; mb_col < mb_cols; mb_col++) {
for (mb_row = 0; mb_row < mb_rows; ++mb_row) {
for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
mode_info_context->mbmi.mode != SPLITMV &&
mode_info_context->mbmi.mb_skip_coeff);
@ -416,8 +416,8 @@ void vp8_loop_filter_frame_yonly(VP8_COMMON *cm, MACROBLOCKD *mbd,
y_ptr = post->y_buffer;
/* vp8_filter each macro block */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
mode_info_context->mbmi.mode != SPLITMV &&
mode_info_context->mbmi.mb_skip_coeff);
@ -514,8 +514,8 @@ void vp8_loop_filter_partial_frame(VP8_COMMON *cm, MACROBLOCKD *mbd,
mode_info_context = cm->mi + (post->y_height >> 5) * (mb_cols + 1);
/* vp8_filter each macro block */
for (mb_row = 0; mb_row < (linestocopy >> 4); mb_row++) {
for (mb_col = 0; mb_col < mb_cols; mb_col++) {
for (mb_row = 0; mb_row < (linestocopy >> 4); ++mb_row) {
for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
int skip_lf = (mode_info_context->mbmi.mode != B_PRED &&
mode_info_context->mbmi.mode != SPLITMV &&
mode_info_context->mbmi.mb_skip_coeff);

View File

@ -26,7 +26,7 @@ void vp8_dequant_idct_add_y_block_mmx(short *q, short *dq, unsigned char *dst,
int stride, char *eobs) {
int i;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
if (eobs[0] > 1)
vp8_dequant_idct_add_mmx(q, dq, dst, stride);
else if (eobs[0] == 1) {
@ -67,7 +67,7 @@ void vp8_dequant_idct_add_uv_block_mmx(short *q, short *dq, unsigned char *dstu,
char *eobs) {
int i;
for (i = 0; i < 2; i++) {
for (i = 0; i < 2; ++i) {
if (eobs[0] > 1)
vp8_dequant_idct_add_mmx(q, dq, dstu, stride);
else if (eobs[0] == 1) {
@ -88,7 +88,7 @@ void vp8_dequant_idct_add_uv_block_mmx(short *q, short *dq, unsigned char *dstu,
eobs += 2;
}
for (i = 0; i < 2; i++) {
for (i = 0; i < 2; ++i) {
if (eobs[0] > 1)
vp8_dequant_idct_add_mmx(q, dq, dstv, stride);
else if (eobs[0] == 1) {

View File

@ -20,7 +20,7 @@ void vp8_dequant_idct_add_y_block_sse2(short *q, short *dq, unsigned char *dst,
int stride, char *eobs) {
int i;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
if (((short *)(eobs))[0]) {
if (((short *)(eobs))[0] & 0xfefe)
vp8_idct_dequant_full_2x_sse2(q, dq, dst, stride);

View File

@ -445,7 +445,7 @@ void vp8_sixtap_predict4x4_ssse3(unsigned char *src_ptr,
* six-tap function handles all possible offsets. */
int r;
for (r = 0; r < 4; r++) {
for (r = 0; r < 4; ++r) {
dst_ptr[0] = src_ptr[0];
dst_ptr[1] = src_ptr[1];
dst_ptr[2] = src_ptr[2];

View File

@ -43,7 +43,7 @@ void vp8cx_init_de_quantizer(VP8D_COMP *pbi) {
int Q;
VP8_COMMON *const pc = &pbi->common;
for (Q = 0; Q < QINDEX_RANGE; Q++) {
for (Q = 0; Q < QINDEX_RANGE; ++Q) {
pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q);
pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q);
pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q);
@ -82,7 +82,7 @@ void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) {
xd->dequant_y2[0] = pc->Y2dequant[QIndex][0];
xd->dequant_uv[0] = pc->UVdequant[QIndex][0];
for (i = 1; i < 16; i++) {
for (i = 1; i < 16; ++i) {
xd->dequant_y1_dc[i] = xd->dequant_y1[i] = pc->Y1dequant[QIndex][1];
xd->dequant_y2[i] = pc->Y2dequant[QIndex][1];
xd->dequant_uv[i] = pc->UVdequant[QIndex][1];
@ -161,7 +161,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
BLOCKD *b = &xd->block[i];
unsigned char *dst = xd->dst.y_buffer + b->offset;
B_PREDICTION_MODE b_mode = xd->mode_info_context->bmi[i].as_mode;
@ -266,7 +266,7 @@ static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf) {
src_ptr1 = ybf->y_buffer - Border;
dest_ptr1 = src_ptr1 - (Border * plane_stride);
for (i = 0; i < (int)Border; i++) {
for (i = 0; i < (int)Border; ++i) {
memcpy(dest_ptr1, src_ptr1, plane_stride);
dest_ptr1 += plane_stride;
}
@ -279,7 +279,7 @@ static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf) {
src_ptr1 = ybf->u_buffer - Border;
dest_ptr1 = src_ptr1 - (Border * plane_stride);
for (i = 0; i < (int)(Border); i++) {
for (i = 0; i < (int)(Border); ++i) {
memcpy(dest_ptr1, src_ptr1, plane_stride);
dest_ptr1 += plane_stride;
}
@ -291,7 +291,7 @@ static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf) {
src_ptr1 = ybf->v_buffer - Border;
dest_ptr1 = src_ptr1 - (Border * plane_stride);
for (i = 0; i < (int)(Border); i++) {
for (i = 0; i < (int)(Border); ++i) {
memcpy(dest_ptr1, src_ptr1, plane_stride);
dest_ptr1 += plane_stride;
}
@ -317,7 +317,7 @@ static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) {
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)Border; i++) {
for (i = 0; i < (int)Border; ++i) {
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
@ -333,7 +333,7 @@ static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) {
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)(Border); i++) {
for (i = 0; i < (int)(Border); ++i) {
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
@ -346,7 +346,7 @@ static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) {
src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
dest_ptr2 = src_ptr2 + plane_stride;
for (i = 0; i < (int)(Border); i++) {
for (i = 0; i < (int)(Border); ++i) {
memcpy(dest_ptr2, src_ptr2, plane_stride);
dest_ptr2 += plane_stride;
}
@ -379,7 +379,7 @@ static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf,
dest_ptr1 = src_ptr1 - Border;
dest_ptr2 = src_ptr2 + 1;
for (i = 0; i < plane_height; i++) {
for (i = 0; i < plane_height; ++i) {
memset(dest_ptr1, src_ptr1[0], Border);
memset(dest_ptr2, src_ptr2[0], Border);
src_ptr1 += plane_stride;
@ -402,7 +402,7 @@ static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf,
dest_ptr1 = src_ptr1 - Border;
dest_ptr2 = src_ptr2 + 1;
for (i = 0; i < plane_height; i++) {
for (i = 0; i < plane_height; ++i) {
memset(dest_ptr1, src_ptr1[0], Border);
memset(dest_ptr2, src_ptr2[0], Border);
src_ptr1 += plane_stride;
@ -421,7 +421,7 @@ static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf,
dest_ptr1 = src_ptr1 - Border;
dest_ptr2 = src_ptr2 + 1;
for (i = 0; i < plane_height; i++) {
for (i = 0; i < plane_height; ++i) {
memset(dest_ptr1, src_ptr1[0], Border);
memset(dest_ptr2, src_ptr2[0], Border);
src_ptr1 += plane_stride;
@ -458,7 +458,7 @@ static void decode_mb_rows(VP8D_COMP *pbi) {
ref_fb_corrupted[INTRA_FRAME] = 0;
for (i = 1; i < MAX_REF_FRAMES; i++) {
for (i = 1; i < MAX_REF_FRAMES; ++i) {
YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i];
ref_buffer[i][0] = this_fb->y_buffer;
@ -481,7 +481,7 @@ static void decode_mb_rows(VP8D_COMP *pbi) {
vp8_setup_intra_recon_top_line(yv12_fb_new);
/* Decode the individual macro block */
for (mb_row = 0; mb_row < pc->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < pc->mb_rows; ++mb_row) {
if (num_part > 1) {
xd->current_bc = &pbi->mbc[ibc];
ibc++;
@ -521,7 +521,7 @@ static void decode_mb_rows(VP8D_COMP *pbi) {
xd->recon_left[2], xd->dst.y_stride,
xd->dst.uv_stride);
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < pc->mb_cols; ++mb_col) {
/* Distance of Mb to the various image edges.
* These are specified to 8th pel as they are always compared to values
* that are in 1/8th pel units
@ -966,8 +966,8 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
/* For each segmentation feature (Quant and loop filter level) */
for (i = 0; i < MB_LVL_MAX; i++) {
for (j = 0; j < MAX_MB_SEGMENTS; j++) {
for (i = 0; i < MB_LVL_MAX; ++i) {
for (j = 0; j < MAX_MB_SEGMENTS; ++j) {
/* Frame level data */
if (vp8_read_bit(bc)) {
xd->segment_feature_data[i][j] =
@ -986,7 +986,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
/* Read the probs used to decode the segment id for each macro block. */
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
/* If not explicitly set value is defaulted to 255 by memset above */
if (vp8_read_bit(bc))
xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc, 8);
@ -1014,7 +1014,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
if (xd->mode_ref_lf_delta_update) {
/* Send update */
for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
for (i = 0; i < MAX_REF_LF_DELTAS; ++i) {
if (vp8_read_bit(bc)) {
/*sign = vp8_read_bit( bc );*/
xd->ref_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6);
@ -1025,7 +1025,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
}
/* Send update */
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
for (i = 0; i < MAX_MODE_LF_DELTAS; ++i) {
if (vp8_read_bit(bc)) {
/*sign = vp8_read_bit( bc );*/
xd->mode_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6);
@ -1138,10 +1138,10 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
pbi->independent_partitions = 1;
/* read coef probability tree */
for (i = 0; i < BLOCK_TYPES; i++)
for (j = 0; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
for (l = 0; l < ENTROPY_NODES; l++) {
for (i = 0; i < BLOCK_TYPES; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
for (l = 0; l < ENTROPY_NODES; ++l) {
vp8_prob *const p = pc->fc.coef_probs[i][j][k] + l;
if (vp8_read(bc, vp8_coef_update_probs[i][j][k][l])) {

View File

@ -66,7 +66,7 @@ static void assign_overlap(OVERLAP_NODE *overlaps, union b_mode_info *bmi,
if (overlap <= 0) return;
/* Find and assign to the next empty overlap node in the list of overlaps.
* Empty is defined as bmi == NULL */
for (i = 0; i < MAX_OVERLAPS; i++) {
for (i = 0; i < MAX_OVERLAPS; ++i) {
if (overlaps[i].bmi == NULL) {
overlaps[i].bmi = bmi;
overlaps[i].overlap = overlap;

View File

@ -191,7 +191,7 @@ vpx_codec_err_t vp8dx_set_reference(VP8D_COMP *pbi,
static int get_free_fb(VP8_COMMON *cm) {
int i;
for (i = 0; i < NUM_YV12_BUFFERS; i++)
for (i = 0; i < NUM_YV12_BUFFERS; ++i)
if (cm->fb_idx_ref_cnt[i] == 0) break;
assert(i < NUM_YV12_BUFFERS);
@ -421,7 +421,7 @@ int vp8dx_references_buffer(VP8_COMMON *oci, int ref_frame) {
const MODE_INFO *mi = oci->mi;
int mb_row, mb_col;
for (mb_row = 0; mb_row < oci->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < oci->mb_rows; ++mb_row) {
for (mb_col = 0; mb_col < oci->mb_cols; mb_col++, mi++) {
if (mi->mbmi.ref_frame == ref_frame) return 1;
}

View File

@ -43,7 +43,7 @@ static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd,
VP8_COMMON *const pc = &pbi->common;
int i;
for (i = 0; i < count; i++) {
for (i = 0; i < count; ++i) {
MACROBLOCKD *mbd = &mbrd[i].mbd;
mbd->subpixel_predict = xd->subpixel_predict;
mbd->subpixel_predict8x4 = xd->subpixel_predict8x4;
@ -80,7 +80,7 @@ static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd,
if (pc->full_pixel) mbd->fullpixel_mask = 0xfffffff8;
}
for (i = 0; i < pc->mb_rows; i++) pbi->mt_current_mb_col[i] = -1;
for (i = 0; i < pc->mb_rows; ++i) pbi->mt_current_mb_col[i] = -1;
}
static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
@ -155,7 +155,7 @@ static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
BLOCKD *b = &xd->block[i];
unsigned char *dst = xd->dst.y_buffer + b->offset;
B_PREDICTION_MODE b_mode = xd->mode_info_context->bmi[i].as_mode;
@ -268,7 +268,7 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
ref_fb_corrupted[INTRA_FRAME] = 0;
for (i = 1; i < MAX_REF_FRAMES; i++) {
for (i = 1; i < MAX_REF_FRAMES; ++i) {
YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i];
ref_buffer[i][0] = this_fb->y_buffer;
@ -352,7 +352,7 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->dst.uv_stride);
}
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < pc->mb_cols; ++mb_col) {
if (((mb_col - 1) % nsync) == 0) {
pthread_mutex_t *mutex = &pbi->pmutex[mb_row];
protected_write(mutex, current_mb_col, mb_col - 1);
@ -452,10 +452,10 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
MODE_INFO *next = xd->mode_info_context + 1;
if (next->mbmi.ref_frame == INTRA_FRAME) {
for (i = 0; i < 16; i++)
for (i = 0; i < 16; ++i)
pbi->mt_yleft_col[mb_row][i] =
xd->dst.y_buffer[i * recon_y_stride + 15];
for (i = 0; i < 8; i++) {
for (i = 0; i < 8; ++i) {
pbi->mt_uleft_col[mb_row][i] =
xd->dst.u_buffer[i * recon_uv_stride + 7];
pbi->mt_vleft_col[mb_row][i] =
@ -530,7 +530,7 @@ static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd,
int lasty = yv12_fb_lst->y_width + VP8BORDERINPIXELS;
int lastuv = (yv12_fb_lst->y_width >> 1) + (VP8BORDERINPIXELS >> 1);
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
pbi->mt_yabove_row[mb_row + 1][lasty + i] =
pbi->mt_yabove_row[mb_row + 1][lasty - 1];
pbi->mt_uabove_row[mb_row + 1][lastuv + i] =
@ -605,7 +605,7 @@ void vp8_decoder_create_threads(VP8D_COMP *pbi) {
CALLOC_ARRAY_ALIGNED(pbi->mb_row_di, pbi->decoding_thread_count, 32);
CALLOC_ARRAY(pbi->de_thread_data, pbi->decoding_thread_count);
for (ithread = 0; ithread < pbi->decoding_thread_count; ithread++) {
for (ithread = 0; ithread < pbi->decoding_thread_count; ++ithread) {
sem_init(&pbi->h_event_start_decoding[ithread], 0, 0);
vp8_setup_block_dptrs(&pbi->mb_row_di[ithread].mbd);
@ -630,7 +630,7 @@ void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) {
if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd)) {
/* De-allocate mutex */
if (pbi->pmutex != NULL) {
for (i = 0; i < mb_rows; i++) {
for (i = 0; i < mb_rows; ++i) {
pthread_mutex_destroy(&pbi->pmutex[i]);
}
vpx_free(pbi->pmutex);
@ -642,7 +642,7 @@ void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) {
/* Free above_row buffers. */
if (pbi->mt_yabove_row) {
for (i = 0; i < mb_rows; i++) {
for (i = 0; i < mb_rows; ++i) {
vpx_free(pbi->mt_yabove_row[i]);
pbi->mt_yabove_row[i] = NULL;
}
@ -651,7 +651,7 @@ void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) {
}
if (pbi->mt_uabove_row) {
for (i = 0; i < mb_rows; i++) {
for (i = 0; i < mb_rows; ++i) {
vpx_free(pbi->mt_uabove_row[i]);
pbi->mt_uabove_row[i] = NULL;
}
@ -660,7 +660,7 @@ void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) {
}
if (pbi->mt_vabove_row) {
for (i = 0; i < mb_rows; i++) {
for (i = 0; i < mb_rows; ++i) {
vpx_free(pbi->mt_vabove_row[i]);
pbi->mt_vabove_row[i] = NULL;
}
@ -670,7 +670,7 @@ void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) {
/* Free left_col buffers. */
if (pbi->mt_yleft_col) {
for (i = 0; i < mb_rows; i++) {
for (i = 0; i < mb_rows; ++i) {
vpx_free(pbi->mt_yleft_col[i]);
pbi->mt_yleft_col[i] = NULL;
}
@ -679,7 +679,7 @@ void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) {
}
if (pbi->mt_uleft_col) {
for (i = 0; i < mb_rows; i++) {
for (i = 0; i < mb_rows; ++i) {
vpx_free(pbi->mt_uleft_col[i]);
pbi->mt_uleft_col[i] = NULL;
}
@ -688,7 +688,7 @@ void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) {
}
if (pbi->mt_vleft_col) {
for (i = 0; i < mb_rows; i++) {
for (i = 0; i < mb_rows; ++i) {
vpx_free(pbi->mt_vleft_col[i]);
pbi->mt_vleft_col[i] = NULL;
}
@ -724,7 +724,7 @@ void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) {
CHECK_MEM_ERROR(pbi->pmutex,
vpx_malloc(sizeof(*pbi->pmutex) * pc->mb_rows));
if (pbi->pmutex) {
for (i = 0; i < pc->mb_rows; i++) {
for (i = 0; i < pc->mb_rows; ++i) {
pthread_mutex_init(&pbi->pmutex[i], NULL);
}
}
@ -734,36 +734,36 @@ void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) {
/* Allocate memory for above_row buffers. */
CALLOC_ARRAY(pbi->mt_yabove_row, pc->mb_rows);
for (i = 0; i < pc->mb_rows; i++)
for (i = 0; i < pc->mb_rows; ++i)
CHECK_MEM_ERROR(pbi->mt_yabove_row[i],
vpx_memalign(16, sizeof(unsigned char) *
(width + (VP8BORDERINPIXELS << 1))));
CALLOC_ARRAY(pbi->mt_uabove_row, pc->mb_rows);
for (i = 0; i < pc->mb_rows; i++)
for (i = 0; i < pc->mb_rows; ++i)
CHECK_MEM_ERROR(pbi->mt_uabove_row[i],
vpx_memalign(16, sizeof(unsigned char) *
(uv_width + VP8BORDERINPIXELS)));
CALLOC_ARRAY(pbi->mt_vabove_row, pc->mb_rows);
for (i = 0; i < pc->mb_rows; i++)
for (i = 0; i < pc->mb_rows; ++i)
CHECK_MEM_ERROR(pbi->mt_vabove_row[i],
vpx_memalign(16, sizeof(unsigned char) *
(uv_width + VP8BORDERINPIXELS)));
/* Allocate memory for left_col buffers. */
CALLOC_ARRAY(pbi->mt_yleft_col, pc->mb_rows);
for (i = 0; i < pc->mb_rows; i++)
for (i = 0; i < pc->mb_rows; ++i)
CHECK_MEM_ERROR(pbi->mt_yleft_col[i],
vpx_calloc(sizeof(unsigned char) * 16, 1));
CALLOC_ARRAY(pbi->mt_uleft_col, pc->mb_rows);
for (i = 0; i < pc->mb_rows; i++)
for (i = 0; i < pc->mb_rows; ++i)
CHECK_MEM_ERROR(pbi->mt_uleft_col[i],
vpx_calloc(sizeof(unsigned char) * 8, 1));
CALLOC_ARRAY(pbi->mt_vleft_col, pc->mb_rows);
for (i = 0; i < pc->mb_rows; i++)
for (i = 0; i < pc->mb_rows; ++i)
CHECK_MEM_ERROR(pbi->mt_vleft_col[i],
vpx_calloc(sizeof(unsigned char) * 8, 1));
}
@ -777,12 +777,12 @@ void vp8_decoder_remove_threads(VP8D_COMP *pbi) {
protected_write(&pbi->mt_mutex, &pbi->b_multithreaded_rd, 0);
/* allow all threads to exit */
for (i = 0; i < pbi->allocated_decoding_thread_count; i++) {
for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) {
sem_post(&pbi->h_event_start_decoding[i]);
pthread_join(pbi->h_decoding_thread[i], NULL);
}
for (i = 0; i < pbi->allocated_decoding_thread_count; i++) {
for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) {
sem_destroy(&pbi->h_event_start_decoding[i]);
}
@ -820,7 +820,7 @@ void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS >> 1) - 1, 127,
(yv12_fb_new->y_width >> 1) + 5);
for (j = 1; j < pc->mb_rows; j++) {
for (j = 1; j < pc->mb_rows; ++j) {
memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS - 1, (unsigned char)129,
1);
memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS >> 1) - 1,
@ -830,7 +830,7 @@ void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
}
/* Set left_col to 129 initially */
for (j = 0; j < pc->mb_rows; j++) {
for (j = 0; j < pc->mb_rows; ++j) {
memset(pbi->mt_yleft_col[j], (unsigned char)129, 16);
memset(pbi->mt_uleft_col[j], (unsigned char)129, 8);
memset(pbi->mt_vleft_col[j], (unsigned char)129, 8);
@ -844,7 +844,7 @@ void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) {
setup_decoding_thread_data(pbi, xd, pbi->mb_row_di,
pbi->decoding_thread_count);
for (i = 0; i < pbi->decoding_thread_count; i++)
for (i = 0; i < pbi->decoding_thread_count; ++i)
sem_post(&pbi->h_event_start_decoding[i]);
mt_decode_mb_rows(pbi, xd, 0);

View File

@ -302,7 +302,7 @@ static void pack_tokens_into_partitions(VP8_COMP *cpi, unsigned char *cx_data,
unsigned char *ptr_end = cx_data_end;
vp8_writer *w;
for (i = 0; i < num_part; i++) {
for (i = 0; i < num_part; ++i) {
int mb_row;
w = cpi->bc + i + 1;
@ -326,7 +326,7 @@ static void pack_tokens_into_partitions(VP8_COMP *cpi, unsigned char *cx_data,
static void pack_mb_row_tokens(VP8_COMP *cpi, vp8_writer *w) {
int mb_row;
for (mb_row = 0; mb_row < cpi->common.mb_rows; mb_row++) {
for (mb_row = 0; mb_row < cpi->common.mb_rows; ++mb_row) {
const TOKENEXTRA *p = cpi->tplist[mb_row].start;
const TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
int tokens = (int)(stop - p);
@ -668,16 +668,16 @@ static void print_prob_tree(vp8_prob
int i,j,k,l;
FILE* f = fopen("enc_tree_probs.txt", "a");
fprintf(f, "{\n");
for (i = 0; i < BLOCK_TYPES; i++)
for (i = 0; i < BLOCK_TYPES; ++i)
{
fprintf(f, " {\n");
for (j = 0; j < COEF_BANDS; j++)
for (j = 0; j < COEF_BANDS; ++j)
{
fprintf(f, " {\n");
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
{
fprintf(f, " {");
for (l = 0; l < ENTROPY_NODES; l++)
for (l = 0; l < ENTROPY_NODES; ++l)
{
fprintf(f, "%3u, ",
(unsigned int)(coef_probs [i][j][k][l]));
@ -1129,9 +1129,9 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
vp8_write_bit(bc, xd->mb_segement_abs_delta);
/* For each segmentation feature (Quant and loop filter level) */
for (i = 0; i < MB_LVL_MAX; i++) {
for (i = 0; i < MB_LVL_MAX; ++i) {
/* For each of the segments */
for (j = 0; j < MAX_MB_SEGMENTS; j++) {
for (j = 0; j < MAX_MB_SEGMENTS; ++j) {
Data = xd->segment_feature_data[i][j];
/* Frame level data */
@ -1154,7 +1154,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
if (xd->update_mb_segmentation_map) {
/* Write the probs used to decode the segment id for each mb */
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
int Data = xd->mb_segment_tree_probs[i];
if (Data != 255) {
@ -1185,7 +1185,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
int Data;
/* Send update */
for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
for (i = 0; i < MAX_REF_LF_DELTAS; ++i) {
Data = xd->ref_lf_deltas[i];
/* Frame level data */
@ -1207,7 +1207,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
}
/* Send update */
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
for (i = 0; i < MAX_MODE_LF_DELTAS; ++i) {
Data = xd->mode_lf_deltas[i];
if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i] ||
@ -1356,7 +1356,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
cpi->partition_sz[0] += 3 * (num_part - 1);
for (i = 1; i < num_part; i++) {
for (i = 1; i < num_part; ++i) {
write_partition_size(dp, cpi->partition_sz[i]);
dp += 3;
}
@ -1364,7 +1364,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
if (!cpi->output_partition) {
/* concatenate partition buffers */
for (i = 0; i < num_part; i++) {
for (i = 0; i < num_part; ++i) {
memmove(dp, cpi->partition_d[i + 1], cpi->partition_sz[i + 1]);
cpi->partition_d[i + 1] = dp;
dp += cpi->partition_sz[i + 1];
@ -1373,7 +1373,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
/* update total size */
*size = 0;
for (i = 0; i < num_part + 1; i++) {
for (i = 0; i < num_part + 1; ++i) {
*size += cpi->partition_sz[i];
}
}
@ -1387,14 +1387,14 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest,
validate_buffer(cx_data, 3 * (num_part - 1), cx_data_end, &pc->error);
for (i = 1; i < num_part + 1; i++) {
for (i = 1; i < num_part + 1; ++i) {
cpi->bc[i].error = &pc->error;
}
pack_tokens_into_partitions(cpi, cx_data + 3 * (num_part - 1), cx_data_end,
num_part);
for (i = 1; i < num_part; i++) {
for (i = 1; i < num_part; ++i) {
cpi->partition_sz[i] = cpi->bc[i].pos;
write_partition_size(cx_data, cpi->partition_sz[i]);
cx_data += 3;
@ -1434,16 +1434,16 @@ void print_tree_update_probs() {
"const vp8_prob tree_update_probs[BLOCK_TYPES] [COEF_BANDS] "
"[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n");
for (i = 0; i < BLOCK_TYPES; i++) {
for (i = 0; i < BLOCK_TYPES; ++i) {
fprintf(f, " { \n");
for (j = 0; j < COEF_BANDS; j++) {
for (j = 0; j < COEF_BANDS; ++j) {
fprintf(f, " {\n");
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
fprintf(f, " {");
for (l = 0; l < ENTROPY_NODES; l++) {
for (l = 0; l < ENTROPY_NODES; ++l) {
Sum =
tree_update_hist[i][j][k][l][0] + tree_update_hist[i][j][k][l][1];

View File

@ -55,7 +55,7 @@ void vp8_start_encode(BOOL_CODER *br, unsigned char *source,
void vp8_stop_encode(BOOL_CODER *br) {
int i;
for (i = 0; i < 32; i++) vp8_encode_bool(br, 0, 128);
for (i = 0; i < 32; ++i) vp8_encode_bool(br, 0, 128);
}
void vp8_encode_value(BOOL_CODER *br, int data, int bits) {

View File

@ -18,7 +18,7 @@ void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
short *ip = input;
short *op = output;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
a1 = ((ip[0] + ip[3]) * 8);
b1 = ((ip[1] + ip[2]) * 8);
c1 = ((ip[1] - ip[2]) * 8);
@ -35,7 +35,7 @@ void vp8_short_fdct4x4_c(short *input, short *output, int pitch) {
}
ip = output;
op = output;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
a1 = ip[0] + ip[12];
b1 = ip[4] + ip[8];
c1 = ip[4] - ip[8];
@ -64,7 +64,7 @@ void vp8_short_walsh4x4_c(short *input, short *output, int pitch) {
short *ip = input;
short *op = output;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
a1 = ((ip[0] + ip[2]) * 4);
d1 = ((ip[1] + ip[3]) * 4);
c1 = ((ip[1] - ip[3]) * 4);
@ -81,7 +81,7 @@ void vp8_short_walsh4x4_c(short *input, short *output, int pitch) {
ip = output;
op = output;
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
a1 = ip[0] + ip[8];
d1 = ip[4] + ip[12];
c1 = ip[4] - ip[12];

View File

@ -383,7 +383,7 @@ int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height,
assert(denoiser);
denoiser->num_mb_cols = num_mb_cols;
for (i = 0; i < MAX_REF_FRAMES; i++) {
for (i = 0; i < MAX_REF_FRAMES; ++i) {
denoiser->yv12_running_avg[i].flags = 0;
if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_running_avg[i]), width,
@ -450,7 +450,7 @@ void vp8_denoiser_free(VP8_DENOISER *denoiser) {
int i;
assert(denoiser);
for (i = 0; i < MAX_REF_FRAMES; i++) {
for (i = 0; i < MAX_REF_FRAMES; ++i) {
vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_running_avg[i]);
}
vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_mc_running_avg);

View File

@ -142,7 +142,7 @@ static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) {
sizeof(unsigned int) * cpi->common.MBs);
/* Ripple each value down to its correct position */
for (i = 1; i < cpi->common.MBs; i++) {
for (i = 1; i < cpi->common.MBs; ++i) {
for (j = i; j > 0; j--) {
if (sortlist[j] < sortlist[j - 1]) {
/* Swap values */
@ -197,9 +197,9 @@ static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) {
x->mb_activity_ptr = cpi->mb_activity_map;
/* Calculate normalized mb activity number. */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
/* Read activity from the map */
act = *(x->mb_activity_ptr);
@ -249,14 +249,14 @@ static void build_activity_map(VP8_COMP *cpi) {
int64_t activity_sum = 0;
/* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
#if ALT_ACT_MEASURE
/* reset above block coeffs */
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
#endif
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
#if ALT_ACT_MEASURE
xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
@ -386,7 +386,7 @@ static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
*tp = cpi->tok;
#endif
@ -465,7 +465,7 @@ static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row,
if (xd->mbmi.mode == SPLITMV) {
int b;
for (b = 0; b < xd->mbmi.partition_count; b++) {
for (b = 0; b < xd->mbmi.partition_count; ++b) {
inter_b_modes[x->partition->bmi[b].mode]++;
}
}
@ -726,7 +726,7 @@ void vp8_encode_frame(VP8_COMP *cpi) {
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
{
int i;
for (i = 0; i < num_part; i++) {
for (i = 0; i < num_part; ++i) {
vp8_start_encode(&bc[i], cpi->partition_d[i + 1],
cpi->partition_d_end[i + 1]);
bc[i].error = &cm->error;
@ -746,9 +746,9 @@ void vp8_encode_frame(VP8_COMP *cpi) {
vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei,
cpi->encoding_thread_count);
for (i = 0; i < cm->mb_rows; i++) cpi->mt_current_mb_col[i] = -1;
for (i = 0; i < cm->mb_rows; ++i) cpi->mt_current_mb_col[i] = -1;
for (i = 0; i < cpi->encoding_thread_count; i++) {
for (i = 0; i < cpi->encoding_thread_count; ++i) {
sem_post(&cpi->h_event_start_encoding[i]);
}
@ -788,7 +788,7 @@ void vp8_encode_frame(VP8_COMP *cpi) {
sem_wait(
&cpi->h_event_end_encoding); /* wait for other threads to finish */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
cpi->tok_count += (unsigned int)(cpi->tplist[mb_row].stop -
cpi->tplist[mb_row].start);
}
@ -797,29 +797,29 @@ void vp8_encode_frame(VP8_COMP *cpi) {
int j;
if (xd->segmentation_enabled) {
for (i = 0; i < cpi->encoding_thread_count; i++) {
for (j = 0; j < 4; j++)
for (i = 0; i < cpi->encoding_thread_count; ++i) {
for (j = 0; j < 4; ++j)
segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
}
}
}
for (i = 0; i < cpi->encoding_thread_count; i++) {
for (i = 0; i < cpi->encoding_thread_count; ++i) {
int mode_count;
int c_idx;
totalrate += cpi->mb_row_ei[i].totalrate;
cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count;
for (mode_count = 0; mode_count < VP8_YMODES; mode_count++)
for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count)
cpi->mb.ymode_count[mode_count] +=
cpi->mb_row_ei[i].mb.ymode_count[mode_count];
for (mode_count = 0; mode_count < VP8_UV_MODES; mode_count++)
for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count)
cpi->mb.uv_mode_count[mode_count] +=
cpi->mb_row_ei[i].mb.uv_mode_count[mode_count];
for (c_idx = 0; c_idx < MVvals; c_idx++) {
for (c_idx = 0; c_idx < MVvals; ++c_idx) {
cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx];
cpi->mb.MVcount[1][c_idx] += cpi->mb_row_ei[i].mb.MVcount[1][c_idx];
}
@ -827,11 +827,11 @@ void vp8_encode_frame(VP8_COMP *cpi) {
cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error;
cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error;
for (c_idx = 0; c_idx < MAX_REF_FRAMES; c_idx++)
for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx)
cpi->mb.count_mb_ref_frame_usage[c_idx] +=
cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx];
for (c_idx = 0; c_idx < MAX_ERROR_BINS; c_idx++)
for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx)
cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx];
/* add up counts for each thread */
@ -843,7 +843,7 @@ void vp8_encode_frame(VP8_COMP *cpi) {
{
/* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
vp8_zero(cm->left_context)
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
@ -864,7 +864,7 @@ void vp8_encode_frame(VP8_COMP *cpi) {
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
{
int i;
for (i = 0; i < num_part; i++) {
for (i = 0; i < num_part; ++i) {
vp8_stop_encode(&bc[i]);
cpi->partition_sz[i + 1] = bc[i].pos;
}
@ -903,7 +903,7 @@ void vp8_encode_frame(VP8_COMP *cpi) {
xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
/* Zero probabilities not allowed */
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) {
for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) {
if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1;
}
}
@ -945,27 +945,27 @@ void vp8_setup_block_ptrs(MACROBLOCK *x) {
int r, c;
int i;
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
for (r = 0; r < 4; ++r) {
for (c = 0; c < 4; ++c) {
x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
}
}
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
for (r = 0; r < 2; ++r) {
for (c = 0; c < 2; ++c) {
x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
}
}
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
for (r = 0; r < 2; ++r) {
for (c = 0; c < 2; ++c) {
x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
}
}
x->block[24].src_diff = x->src_diff + 384;
for (i = 0; i < 25; i++) {
for (i = 0; i < 25; ++i) {
x->block[i].coeff = x->coeff + i * 16;
}
}
@ -978,8 +978,8 @@ void vp8_build_block_offsets(MACROBLOCK *x) {
/* y blocks */
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++) {
for (bc = 0; bc < 4; bc++) {
for (br = 0; br < 4; ++br) {
for (bc = 0; bc < 4; ++bc) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
@ -989,8 +989,8 @@ void vp8_build_block_offsets(MACROBLOCK *x) {
}
/* u blocks */
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
for (br = 0; br < 2; ++br) {
for (bc = 0; bc < 2; ++bc) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.u_buffer;
this_block->src_stride = x->src.uv_stride;
@ -1000,8 +1000,8 @@ void vp8_build_block_offsets(MACROBLOCK *x) {
}
/* v blocks */
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
for (br = 0; br < 2; ++br) {
for (bc = 0; bc < 2; ++bc) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.v_buffer;
this_block->src_stride = x->src.uv_stride;

View File

@ -32,7 +32,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) {
vp8_inverse_transform_mby(&x->e_mbd);
} else {
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
x->e_mbd.block[i].bmi.as_mode = B_DC_PRED;
vp8_encode_intra4x4block(x, i);
}
@ -74,7 +74,7 @@ void vp8_encode_intra4x4mby(MACROBLOCK *mb) {
MACROBLOCKD *xd = &mb->e_mbd;
intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16);
for (i = 0; i < 16; i++) vp8_encode_intra4x4block(mb, i);
for (i = 0; i < 16; ++i) vp8_encode_intra4x4block(mb, i);
return;
}

View File

@ -59,7 +59,7 @@ static void build_dcblock(MACROBLOCK *x) {
short *src_diff_ptr = &x->src_diff[384];
int i;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
src_diff_ptr[i] = x->coeff[i * 16];
}
}
@ -360,7 +360,7 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type, ENTROPY_CONTEXT *a,
if (bd->dequant[0] >= 35 && bd->dequant[1] >= 35) return;
for (i = 0; i < (*bd->eob); i++) {
for (i = 0; i < (*bd->eob); ++i) {
int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]];
sum += (coef >= 0) ? coef : -coef;
if (sum >= 35) return;
@ -374,7 +374,7 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type, ENTROPY_CONTEXT *a,
fall between -35 and +35.
**************************************************************************/
if (sum < 35) {
for (i = 0; i < (*bd->eob); i++) {
for (i = 0; i < (*bd->eob); ++i) {
int rc = vp8_default_zig_zag1d[i];
bd->qcoeff[rc] = 0;
bd->dqcoeff[rc] = 0;
@ -403,11 +403,11 @@ static void optimize_mb(MACROBLOCK *x) {
x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
for (b = 0; b < 16; b++) {
for (b = 0; b < 16; ++b) {
optimize_b(x, b, type, ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
for (b = 16; b < 24; b++) {
for (b = 16; b < 24; ++b) {
optimize_b(x, b, PLANE_TYPE_UV, ta + vp8_block2above[b],
tl + vp8_block2left[b]);
}
@ -444,7 +444,7 @@ void vp8_optimize_mby(MACROBLOCK *x) {
x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
for (b = 0; b < 16; b++) {
for (b = 0; b < 16; ++b) {
optimize_b(x, b, type, ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
@ -473,7 +473,7 @@ void vp8_optimize_mbuv(MACROBLOCK *x) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 16; b < 24; b++) {
for (b = 16; b < 24; ++b) {
optimize_b(x, b, PLANE_TYPE_UV, ta + vp8_block2above[b],
tl + vp8_block2left[b]);
}

View File

@ -105,7 +105,7 @@ static THREAD_FUNCTION thread_encoding_proc(void *p_data) {
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
if (((mb_col - 1) % nsync) == 0) {
pthread_mutex_t *mutex = &cpi->pmutex[mb_row];
protected_write(mutex, current_mb_col, mb_col - 1);
@ -186,7 +186,7 @@ static THREAD_FUNCTION thread_encoding_proc(void *p_data) {
if (xd->mbmi.mode == SPLITMV) {
int b;
for (b = 0; b < xd->mbmi.partition_count; b++) {
for (b = 0; b < xd->mbmi.partition_count; ++b) {
inter_b_modes[x->partition->bmi[b].mode]++;
}
}
@ -354,7 +354,7 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc) {
z->intra_uv_mode_cost = x->intra_uv_mode_cost;
z->bmode_costs = x->bmode_costs;
for (i = 0; i < 25; i++) {
for (i = 0; i < 25; ++i) {
z->block[i].quant = x->block[i].quant;
z->block[i].quant_fast = x->block[i].quant_fast;
z->block[i].quant_shift = x->block[i].quant_shift;
@ -412,8 +412,8 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc) {
* the quantizer code uses a passed in pointer to the dequant constants.
* This will also require modifications to the x86 and neon assembly.
* */
for (i = 0; i < 16; i++) zd->block[i].dequant = zd->dequant_y1;
for (i = 16; i < 24; i++) zd->block[i].dequant = zd->dequant_uv;
for (i = 0; i < 16; ++i) zd->block[i].dequant = zd->dequant_y1;
for (i = 16; i < 24; ++i) zd->block[i].dequant = zd->dequant_uv;
zd->block[24].dequant = zd->dequant_y2;
#endif
@ -434,7 +434,7 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi, MACROBLOCK *x,
MACROBLOCKD *const xd = &x->e_mbd;
int i;
for (i = 0; i < count; i++) {
for (i = 0; i < count; ++i) {
MACROBLOCK *mb = &mbr_ei[i].mb;
MACROBLOCKD *mbd = &mb->e_mbd;
@ -526,7 +526,7 @@ int vp8cx_create_encoder_threads(VP8_COMP *cpi) {
(cpi->encoding_thread_count +1));
*/
for (ithread = 0; ithread < th_count; ithread++) {
for (ithread = 0; ithread < th_count; ++ithread) {
ENCODETHREAD_DATA *ethd = &cpi->en_thread_data[ithread];
/* Setup block ptrs and offsets */
@ -607,7 +607,7 @@ void vp8cx_remove_encoder_threads(VP8_COMP *cpi) {
{
int i;
for (i = 0; i < cpi->encoding_thread_count; i++) {
for (i = 0; i < cpi->encoding_thread_count; ++i) {
sem_post(&cpi->h_event_start_encoding[i]);
pthread_join(cpi->h_encoding_thread[i], 0);

View File

@ -536,7 +536,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
}
/* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
int_mv best_ref_mv;
best_ref_mv.as_int = 0;
@ -554,7 +554,7 @@ void vp8_first_pass(VP8_COMP *cpi) {
((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16);
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
int this_error;
int gf_motion_error = INT_MAX;
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
@ -966,7 +966,7 @@ static int estimate_max_q(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats,
/* Try and pick a max Q that will be high enough to encode the
* content at the given rate.
*/
for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; Q++) {
for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; ++Q) {
int bits_per_mb_at_this_q;
/* Error per MB based correction factor */
@ -1061,7 +1061,7 @@ static int estimate_cq(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats,
if (clip_iifactor < 0.80) clip_iifactor = 0.80;
/* Try and pick a Q that can encode the content at the given rate. */
for (Q = 0; Q < MAXQ; Q++) {
for (Q = 0; Q < MAXQ; ++Q) {
int bits_per_mb_at_this_q;
/* Error per MB based correction factor */
@ -1118,7 +1118,7 @@ static int estimate_q(VP8_COMP *cpi, double section_err,
}
/* Try and pick a Q that can encode the content at the given rate. */
for (Q = 0; Q < MAXQ; Q++) {
for (Q = 0; Q < MAXQ; ++Q) {
int bits_per_mb_at_this_q;
/* Error per MB based correction factor */
@ -1201,7 +1201,7 @@ static int estimate_kf_group_q(VP8_COMP *cpi, double section_err,
/* Try and pick a Q that should be high enough to encode the content at
* the given rate.
*/
for (Q = 0; Q < MAXQ; Q++) {
for (Q = 0; Q < MAXQ; ++Q) {
/* Error per MB based correction factor */
err_correction_factor =
calc_correction_factor(err_per_mb, 150.0, pow_lowq, pow_highq, Q);
@ -1379,7 +1379,7 @@ static int detect_transition_to_still(VP8_COMP *cpi, int frame_interval,
double decay_rate;
/* Look ahead a few frames to see if static condition persists... */
for (j = 0; j < still_interval; j++) {
for (j = 0; j < still_interval; ++j) {
if (EOF == input_stats(cpi, &tmp_next_frame)) break;
decay_rate = get_prediction_decay_rate(cpi, &tmp_next_frame);
@ -1518,7 +1518,7 @@ static int calc_arf_boost(VP8_COMP *cpi, int offset, int f_frames, int b_frames,
int flash_detected = 0;
/* Search forward from the proposed arf/next gf position */
for (i = 0; i < f_frames; i++) {
for (i = 0; i < f_frames; ++i) {
if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF) break;
/* Update the motion related elements to the boost calculation */
@ -2140,7 +2140,7 @@ static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
zero_stats(&sectionstats);
reset_fpf_position(cpi, start_pos);
for (i = 0; i < cpi->baseline_gf_interval; i++) {
for (i = 0; i < cpi->baseline_gf_interval; ++i) {
input_stats(cpi, &next_frame);
accumulate_stats(&sectionstats, &next_frame);
}
@ -2440,7 +2440,7 @@ static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame,
start_pos = cpi->twopass.stats_in;
/* Examine how well the key frame predicts subsequent frames */
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error /
DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error));
@ -2569,7 +2569,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
*/
recent_loop_decay[i % 8] = loop_decay_rate;
decay_accumulator = 1.0;
for (j = 0; j < 8; j++) {
for (j = 0; j < 8; ++j) {
decay_accumulator = decay_accumulator * recent_loop_decay[j];
}
@ -2619,7 +2619,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
kf_group_coded_err = 0;
/* Rescan to get the correct error data for the forced kf group */
for (i = 0; i < cpi->twopass.frames_to_key; i++) {
for (i = 0; i < cpi->twopass.frames_to_key; ++i) {
/* Accumulate kf group errors */
kf_group_err += calculate_modified_err(cpi, &tmp_frame);
kf_group_intra_err += tmp_frame.intra_error;
@ -2721,7 +2721,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
decay_accumulator = 1.0;
boost_score = 0.0;
for (i = 0; i < cpi->twopass.frames_to_key; i++) {
for (i = 0; i < cpi->twopass.frames_to_key; ++i) {
double r;
if (EOF == input_stats(cpi, &next_frame)) break;
@ -2757,7 +2757,7 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) {
zero_stats(&sectionstats);
reset_fpf_position(cpi, start_position);
for (i = 0; i < cpi->twopass.frames_to_key; i++) {
for (i = 0; i < cpi->twopass.frames_to_key; ++i) {
input_stats(cpi, &next_frame);
accumulate_stats(&sectionstats, &next_frame);
}

View File

@ -40,7 +40,7 @@ void vp8_lookahead_destroy(struct lookahead_ctx *ctx) {
if (ctx->buf) {
unsigned int i;
for (i = 0; i < ctx->max_sz; i++)
for (i = 0; i < ctx->max_sz; ++i)
vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img);
free(ctx->buf);
}
@ -73,7 +73,7 @@ struct lookahead_ctx *vp8_lookahead_init(unsigned int width,
ctx->max_sz = depth;
ctx->buf = calloc(depth, sizeof(*ctx->buf));
if (!ctx->buf) goto bail;
for (i = 0; i < depth; i++)
for (i = 0; i < depth; ++i)
if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img, width, height,
VP8BORDERINPIXELS))
goto bail;

View File

@ -890,7 +890,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
CHECK_BOUNDS(2)
if (all_in) {
for (i = 0; i < 6; i++) {
for (i = 0; i < 6; ++i) {
this_mv.as_mv.row = br + hex[i].row;
this_mv.as_mv.col = bc + hex[i].col;
this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) +
@ -899,7 +899,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
CHECK_BETTER
}
} else {
for (i = 0; i < 6; i++) {
for (i = 0; i < 6; ++i) {
this_mv.as_mv.row = br + hex[i].row;
this_mv.as_mv.col = bc + hex[i].col;
CHECK_POINT
@ -918,12 +918,12 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
k = best_site;
}
for (j = 1; j < hex_range; j++) {
for (j = 1; j < hex_range; ++j) {
best_site = -1;
CHECK_BOUNDS(2)
if (all_in) {
for (i = 0; i < 3; i++) {
for (i = 0; i < 3; ++i) {
this_mv.as_mv.row = br + next_chkpts[k][i].row;
this_mv.as_mv.col = bc + next_chkpts[k][i].col;
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
@ -932,7 +932,7 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
CHECK_BETTER
}
} else {
for (i = 0; i < 3; i++) {
for (i = 0; i < 3; ++i) {
this_mv.as_mv.row = br + next_chkpts[k][i].row;
this_mv.as_mv.col = bc + next_chkpts[k][i].col;
CHECK_POINT
@ -958,12 +958,12 @@ int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
/* check 4 1-away neighbors */
cal_neighbors:
for (j = 0; j < dia_range; j++) {
for (j = 0; j < dia_range; ++j) {
best_site = -1;
CHECK_BOUNDS(1)
if (all_in) {
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
this_mv.as_mv.row = br + neighbors[i].row;
this_mv.as_mv.col = bc + neighbors[i].col;
this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) +
@ -972,7 +972,7 @@ cal_neighbors:
CHECK_BETTER
}
} else {
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
this_mv.as_mv.row = br + neighbors[i].row;
this_mv.as_mv.col = bc + neighbors[i].col;
CHECK_POINT
@ -1064,8 +1064,8 @@ int vp8_diamond_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
i = 1;
for (step = 0; step < tot_steps; step++) {
for (j = 0; j < x->searches_per_step; j++) {
for (step = 0; step < tot_steps; ++step) {
for (j = 0; j < x->searches_per_step; ++j) {
/* Trap illegal vectors */
this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
@ -1175,7 +1175,7 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
i = 1;
for (step = 0; step < tot_steps; step++) {
for (step = 0; step < tot_steps; ++step) {
int all_in = 1, t;
/* To know if all neighbor points are within the bounds, 4 bounds
@ -1193,7 +1193,7 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
for (j = 0; j < x->searches_per_step; j += 4) {
const unsigned char *block_offset[4];
for (t = 0; t < 4; t++)
for (t = 0; t < 4; ++t)
block_offset[t] = ss[i + t].offset + best_address;
fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
@ -1214,7 +1214,7 @@ int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
}
}
} else {
for (j = 0; j < x->searches_per_step; j++) {
for (j = 0; j < x->searches_per_step; ++j) {
/* Trap illegal vectors */
this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
@ -1316,11 +1316,11 @@ int vp8_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (row_max > x->mv_row_max) row_max = x->mv_row_max;
for (r = row_min; r < row_max; r++) {
for (r = row_min; r < row_max; ++r) {
this_mv.as_mv.row = r;
check_here = r * mv_stride + in_what + col_min;
for (c = col_min; c < col_max; c++) {
for (c = col_min; c < col_max; ++c) {
thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride);
this_mv.as_mv.col = c;
@ -1404,7 +1404,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (row_max > x->mv_row_max) row_max = x->mv_row_max;
for (r = row_min; r < row_max; r++) {
for (r = row_min; r < row_max; ++r) {
this_mv.as_mv.row = r;
check_here = r * mv_stride + in_what + col_min;
c = col_min;
@ -1414,7 +1414,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
for (i = 0; i < 3; i++) {
for (i = 0; i < 3; ++i) {
thissad = sad_array[i];
if (thissad < bestsad) {
@ -1524,7 +1524,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
if (row_max > x->mv_row_max) row_max = x->mv_row_max;
for (r = row_min; r < row_max; r++) {
for (r = row_min; r < row_max; ++r) {
this_mv.as_mv.row = r;
check_here = r * mv_stride + in_what + col_min;
c = col_min;
@ -1534,7 +1534,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
fn_ptr->sdx8f(what, what_stride, check_here, in_what_stride, sad_array8);
for (i = 0; i < 8; i++) {
for (i = 0; i < 8; ++i) {
thissad = sad_array8[i];
if (thissad < bestsad) {
@ -1560,7 +1560,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array);
for (i = 0; i < 3; i++) {
for (i = 0; i < 3; ++i) {
thissad = sad_array[i];
if (thissad < bestsad) {
@ -1641,10 +1641,10 @@ int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) +
mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
for (i = 0; i < search_range; i++) {
for (i = 0; i < search_range; ++i) {
int best_site = -1;
for (j = 0; j < 4; j++) {
for (j = 0; j < 4; ++j) {
this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
@ -1719,7 +1719,7 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) +
mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
for (i = 0; i < search_range; i++) {
for (i = 0; i < search_range; ++i) {
int best_site = -1;
int all_in = 1;
@ -1739,7 +1739,7 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride,
sad_array);
for (j = 0; j < 4; j++) {
for (j = 0; j < 4; ++j) {
if (sad_array[j] < bestsad) {
this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
@ -1753,7 +1753,7 @@ int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
}
}
} else {
for (j = 0; j < 4; j++) {
for (j = 0; j < 4; ++j) {
this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
@ -1806,11 +1806,11 @@ void print_mode_context(void) {
fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
fprintf(f, "{\n");
for (j = 0; j < 6; j++) {
for (j = 0; j < 6; ++j) {
fprintf(f, " { /* %d */\n", j);
fprintf(f, " ");
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; ++i) {
int overal_prob;
int this_prob;
int count;

View File

@ -55,7 +55,7 @@ int32_t vp8_mbblock_error_msa(MACROBLOCK *mb, int32_t dc) {
mask0 = (v16u8)__msa_insve_w((v4i32)mask0, 0, (v4i32)zero);
}
for (loop_cnt = 0; loop_cnt < 8; loop_cnt++) {
for (loop_cnt = 0; loop_cnt < 8; ++loop_cnt) {
be = &mb->block[2 * loop_cnt];
bd = &mb->e_mbd.block[2 * loop_cnt];
coeff_ptr = be->coeff;

View File

@ -68,7 +68,7 @@ void vp8_cal_dissimilarity(VP8_COMP *cpi) {
if (cm->frame_type != KEY_FRAME) {
store_info->is_frame_dropped = 0;
for (i = 1; i < MAX_REF_FRAMES; i++)
for (i = 1; i < MAX_REF_FRAMES; ++i)
store_info->low_res_ref_frames[i] = cpi->current_ref_frames[i];
}
@ -79,9 +79,9 @@ void vp8_cal_dissimilarity(VP8_COMP *cpi) {
MODE_INFO *tmp = cm->mip + cm->mode_info_stride;
LOWER_RES_MB_INFO *store_mode_info = store_info->mb_info;
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
tmp++;
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
int dissim = INT_MAX;
if (tmp->mbmi.ref_frame != INTRA_FRAME) {
@ -160,7 +160,7 @@ void vp8_cal_dissimilarity(VP8_COMP *cpi) {
int i;
if (cnt > 1) {
for (i = 1; i < cnt; i++) {
for (i = 1; i < cnt; ++i) {
if (mvx[i] > max_mvx)
max_mvx = mvx[i];
else if (mvx[i] < min_mvx)

View File

@ -334,7 +334,7 @@ static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf,
cpi->current_layer = 0;
save_layer_context(cpi);
}
for (i = 0; i < curr_num_layers; i++) {
for (i = 0; i < curr_num_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->layer_context[i];
if (i >= prev_num_layers) {
init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
@ -447,7 +447,7 @@ static void dealloc_compressor_data(VP8_COMP *cpi) {
VP8_COMMON *const pc = &cpi->common;
int i;
for (i = 0; i < pc->mb_rows; i++) {
for (i = 0; i < pc->mb_rows; ++i) {
pthread_mutex_destroy(&cpi->pmutex[i]);
}
vpx_free(cpi->pmutex);
@ -727,7 +727,7 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
int ref_frames;
/* Initialise default mode frequency sampling variables */
for (i = 0; i < MAX_MODES; i++) {
for (i = 0; i < MAX_MODES; ++i) {
cpi->mode_check_freq[i] = 0;
}
@ -753,7 +753,7 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
sf->improved_mv_pred = 1;
/* default thresholds to 0 */
for (i = 0; i < MAX_MODES; i++) sf->thresh_mult[i] = 0;
for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0;
/* Count enabled references */
ref_frames = 1;
@ -910,7 +910,7 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
min >>= 7;
for (i = 0; i < min; i++) {
for (i = 0; i < min; ++i) {
sum += cpi->mb.error_bins[i];
}
@ -918,7 +918,7 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
sum = 0;
/* i starts from 2 to make sure thresh started from 2048 */
for (; i < 1024; i++) {
for (; i < 1024; ++i) {
sum += cpi->mb.error_bins[i];
if (10 * sum >=
@ -1167,7 +1167,7 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi) {
/* De-allocate and re-allocate mutex */
if (cpi->pmutex != NULL) {
for (i = 0; i < prev_mb_rows; i++) {
for (i = 0; i < prev_mb_rows; ++i) {
pthread_mutex_destroy(&cpi->pmutex[i]);
}
vpx_free(cpi->pmutex);
@ -1177,7 +1177,7 @@ void vp8_alloc_compressor_data(VP8_COMP *cpi) {
CHECK_MEM_ERROR(cpi->pmutex,
vpx_malloc(sizeof(*cpi->pmutex) * cm->mb_rows));
if (cpi->pmutex) {
for (i = 0; i < cm->mb_rows; i++) {
for (i = 0; i < cm->mb_rows; ++i) {
pthread_mutex_init(&cpi->pmutex[i], NULL);
}
}
@ -1214,7 +1214,7 @@ static const int q_trans[] = {
int vp8_reverse_trans(int x) {
int i;
for (i = 0; i < 64; i++)
for (i = 0; i < 64; ++i)
if (q_trans[i] >= x) return i;
return 63;
@ -1311,7 +1311,7 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
unsigned int i;
double prev_layer_framerate = 0;
for (i = 0; i < cpi->oxcf.number_of_layers; i++) {
for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate);
prev_layer_framerate =
cpi->output_framerate / cpi->oxcf.rate_decimator[i];
@ -1324,7 +1324,7 @@ static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
cpi->fixed_divide[0] = 0;
for (i = 1; i < 512; i++) cpi->fixed_divide[i] = 0x80000 / i;
for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i;
}
#endif
}
@ -1484,7 +1484,7 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) {
{
int i;
for (i = 0; i < MAX_MB_SEGMENTS; i++)
for (i = 0; i < MAX_MB_SEGMENTS; ++i)
cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout;
}
@ -1746,7 +1746,7 @@ struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
{
cpi->one_pass_frame_index = 0;
for (i = 0; i < MAX_LAG_BUFFERS; i++)
for (i = 0; i < MAX_LAG_BUFFERS; ++i)
{
cpi->one_pass_frame_stats[i].frames_so_far = 0;
cpi->one_pass_frame_stats[i].frame_intra_error = 0.0;
@ -1851,7 +1851,7 @@ struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
cpi->gf_rate_correction_factor = 1.0;
cpi->twopass.est_max_qcorrection_factor = 1.0;
for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate;
}
@ -1894,7 +1894,7 @@ struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) {
vp8_set_speed_features(cpi);
/* Set starting values of RD threshold multipliers (128 = *1) */
for (i = 0; i < MAX_MODES; i++) {
for (i = 0; i < MAX_MODES; ++i) {
cpi->mb.rd_thresh_mult[i] = 128;
}
@ -2054,7 +2054,7 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
fprintf(f,
"Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t"
"GLPsnrP\tVPXSSIM\t\n");
for (i = 0; i < (int)cpi->oxcf.number_of_layers; i++) {
for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) {
double dr =
(double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded;
double samples = 3.0 / 2 * cpi->frames_in_layer[i] *
@ -2112,7 +2112,7 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
FILE *f = fopen("cxspeed.stt", "a");
cnt_pm /= cpi->common.MBs;
for (i = 0; i < 16; i++) fprintf(f, "%5d", frames_at_speed[i]);
for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]);
fprintf(f, "\n");
fclose(f);
@ -2135,7 +2135,7 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
{
int i;
for (i = 0; i < 10; i++) fprintf(f, "%8d, ", b_modes[i]);
for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]);
fprintf(f, "\n");
}
@ -2152,7 +2152,7 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
{
int i;
for (i = 0; i < 15; i++) fprintf(f, "%8d, ", inter_b_modes[i]);
for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]);
fprintf(f, "\n");
}
@ -2176,13 +2176,13 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
fprintf(fmode,
"[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n");
for (i = 0; i < 10; i++) {
for (i = 0; i < 10; ++i) {
fprintf(fmode, " { /* Above Mode : %d */\n", i);
for (j = 0; j < 10; j++) {
for (j = 0; j < 10; ++j) {
fprintf(fmode, " {");
for (k = 0; k < 10; k++) {
for (k = 0; k < 10; ++k) {
if (!intra_mode_stats[i][j][k])
fprintf(fmode, " %5d, ", 1);
else
@ -2206,7 +2206,7 @@ void vp8_remove_compressor(VP8_COMP **ptr) {
int i;
FILE *f = fopen("tokenbits.stt", "a");
for (i = 0; i < 28; i++) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256));
fprintf(f, "\n");
fclose(f);
@ -2283,8 +2283,8 @@ static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
unsigned char *border_orig = orig;
unsigned char *border_recon = recon;
for (border_row = 0; border_row < 16; border_row++) {
for (border_col = col; border_col < cols; border_col++) {
for (border_row = 0; border_row < 16; ++border_row) {
for (border_col = col; border_col < cols; ++border_col) {
diff = border_orig[border_col] - border_recon[border_col];
total_sse += diff * diff;
}
@ -2299,8 +2299,8 @@ static uint64_t calc_plane_error(unsigned char *orig, int orig_stride,
}
/* Handle odd-sized height */
for (; row < rows; row++) {
for (col = 0; col < cols; col++) {
for (; row < rows; ++row) {
for (col = 0; col < cols; ++col) {
diff = orig[col] - recon[col];
total_sse += diff * diff;
}
@ -2347,7 +2347,7 @@ static void generate_psnr_packet(VP8_COMP *cpi) {
pkt.data.psnr.samples[0] += width * height;
pkt.data.psnr.samples[3] = width * height;
for (i = 0; i < 4; i++)
for (i = 0; i < 4; ++i)
pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0,
(double)(pkt.data.psnr.sse[i]));
@ -2777,21 +2777,21 @@ void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame)
sprintf(filename, "cx\\y%04d.raw", this_frame);
yframe = fopen(filename, "wb");
for (i = 0; i < frame->y_height; i++)
for (i = 0; i < frame->y_height; ++i)
fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe);
fclose(yframe);
sprintf(filename, "cx\\u%04d.raw", this_frame);
yframe = fopen(filename, "wb");
for (i = 0; i < frame->uv_height; i++)
for (i = 0; i < frame->uv_height; ++i)
fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
fclose(yframe);
sprintf(filename, "cx\\v%04d.raw", this_frame);
yframe = fopen(filename, "wb");
for (i = 0; i < frame->uv_height; i++)
for (i = 0; i < frame->uv_height; ++i)
fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe);
fclose(yframe);
@ -3344,7 +3344,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size,
unsigned int i;
cpi->bits_off_target = cpi->oxcf.starting_buffer_level;
cpi->buffer_level = cpi->oxcf.starting_buffer_level;
for (i = 0; i < cpi->oxcf.number_of_layers; i++) {
for (i = 0; i < cpi->oxcf.number_of_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->layer_context[i];
lc->bits_off_target = lc->starting_buffer_level;
lc->buffer_level = lc->starting_buffer_level;
@ -3372,7 +3372,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size,
} else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) {
closest_ref = ALTREF_FRAME;
}
for (i = 1; i <= 3; i++) {
for (i = 1; i <= 3; ++i) {
vpx_ref_frame_type_t ref_frame_type =
(vpx_ref_frame_type_t)((i == 3) ? 4 : i);
if (cpi->ref_frame_flags & ref_frame_type) {
@ -3396,7 +3396,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size,
cpi->source_alt_ref_active = 0;
/* Reset the RD threshold multipliers to default of * 1 (128) */
for (i = 0; i < MAX_MODES; i++) {
for (i = 0; i < MAX_MODES; ++i) {
cpi->mb.rd_thresh_mult[i] = 128;
}
@ -3505,7 +3505,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size,
/* Propagate bits saved by dropping the frame to higher
* layers
*/
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; i++) {
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->layer_context[i];
lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
if (lc->bits_off_target > lc->maximum_buffer_size)
@ -4207,8 +4207,8 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size,
MODE_INFO *tmp = cm->mip;
if (cm->frame_type != KEY_FRAME) {
for (mb_row = 0; mb_row < cm->mb_rows + 1; mb_row++) {
for (mb_col = 0; mb_col < cm->mb_cols + 1; mb_col++) {
for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) {
for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) {
if (tmp->mbmi.ref_frame != INTRA_FRAME)
cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int =
tmp->mbmi.mv.as_int;
@ -4234,8 +4234,8 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size,
cpi->zeromv_count = 0;
if (cm->frame_type != KEY_FRAME) {
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME)
cpi->zeromv_count++;
tmp++;
@ -4357,7 +4357,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size,
if (cpi->oxcf.number_of_layers > 1) {
unsigned int i;
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; i++)
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i)
cpi->layer_context[i].total_byte_count += (*size);
}
@ -4460,7 +4460,7 @@ static void encode_frame_to_data_rate(VP8_COMP *cpi, unsigned long *size,
if (cpi->oxcf.number_of_layers > 1) {
unsigned int i;
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; i++) {
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->layer_context[i];
int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate -
cpi->projected_frame_size);
@ -4926,7 +4926,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
dp += dest_size / 10; /* reserve 1/10 for control partition */
cpi->partition_d_end[0] = dp;
for (i = 0; i < num_part; i++) {
for (i = 0; i < num_part; ++i) {
cpi->partition_d[i + 1] = dp;
dp += tok_part_buff_size;
cpi->partition_d_end[i + 1] = dp;
@ -4960,7 +4960,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
/* find a free buffer for the new frame */
{
int i = 0;
for (; i < NUM_YV12_BUFFERS; i++) {
for (; i < NUM_YV12_BUFFERS; ++i) {
if (!cm->yv12_fb[i].flags) {
cm->new_fb_idx = i;
break;
@ -5119,7 +5119,7 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags,
if (cpi->oxcf.number_of_layers > 1) {
unsigned int i;
for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; i++) {
for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) {
cpi->frames_in_layer[i]++;
cpi->bytes_in_layer[i] += *size;
@ -5228,7 +5228,7 @@ int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows,
}
// Translate the external delta q values to internal values.
for (i = 0; i < MAX_MB_SEGMENTS; i++)
for (i = 0; i < MAX_MB_SEGMENTS; ++i)
internal_delta_q[i] =
(delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]];

View File

@ -89,7 +89,7 @@ static int is_skin_color(int y, int cb, int cr, int consec_zeromv) {
if (cb == 128 && cr == 128) return 0;
// Exit on very strong cb.
if (cb > 150 && cr < 110) return 0;
for (; i < 5; i++) {
for (; i < 5; ++i) {
int skin_color_diff = evaluate_skin_color_difference(cb, cr, i);
if (skin_color_diff < skin_threshold[i + 1]) {
if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2))
@ -259,7 +259,7 @@ static int pick_intra4x4block(MACROBLOCK *x, int ib,
unsigned char *yleft = dst - 1;
unsigned char top_left = Above[-1];
for (mode = B_DC_PRED; mode <= B_HE_PRED; mode++) {
for (mode = B_DC_PRED; mode <= B_HE_PRED; ++mode) {
int this_rd;
rate = mode_costs[mode];
@ -294,7 +294,7 @@ static int pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *best_dist) {
bmode_costs = mb->inter_bmode_costs;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
MODE_INFO *const mic = xd->mode_info_context;
const int mis = xd->mode_info_stride;
@ -354,7 +354,7 @@ static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
int pred_error[4] = { 0, 0, 0, 0 }, best_error = INT_MAX;
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
for (i = 0; i < 8; i++) {
for (i = 0; i < 8; ++i) {
uleft_col[i] = x->dst.u_buffer[i * x->dst.uv_stride - 1];
vleft_col[i] = x->dst.v_buffer[i * x->dst.uv_stride - 1];
}
@ -366,7 +366,7 @@ static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
shift = 2;
if (x->up_available) {
for (i = 0; i < 8; i++) {
for (i = 0; i < 8; ++i) {
Uaverage += uabove_row[i];
Vaverage += vabove_row[i];
}
@ -375,7 +375,7 @@ static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
}
if (x->left_available) {
for (i = 0; i < 8; i++) {
for (i = 0; i < 8; ++i) {
Uaverage += uleft_col[i];
Vaverage += vleft_col[i];
}
@ -387,8 +387,8 @@ static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
}
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++) {
for (i = 0; i < 8; ++i) {
for (j = 0; j < 8; ++j) {
int predu = uleft_col[i] + uabove_row[j] - utop_left;
int predv = vleft_col[i] + vabove_row[j] - vtop_left;
int u_p, v_p;
@ -434,7 +434,7 @@ static void pick_intra_mbuv_mode(MACROBLOCK *mb) {
}
}
for (i = DC_PRED; i <= TM_PRED; i++) {
for (i = DC_PRED; i <= TM_PRED; ++i) {
if (best_error > pred_error[i]) {
best_error = pred_error[i];
best_mode = (MB_PREDICTION_MODE)i;
@ -840,7 +840,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
/* if we encode a new mv this is important
* find the best new motion vector
*/
for (mode_index = 0; mode_index < MAX_MODES; mode_index++) {
for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
int frame_cost;
int this_rd = INT_MAX;
int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]];
@ -1382,7 +1382,7 @@ void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) {
pick_intra_mbuv_mode(x);
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
xd->mode_info_context->mbmi.mode = mode;
vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
xd->dst.y_buffer - 1, xd->dst.y_stride,

View File

@ -841,7 +841,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi) {
unsigned int i;
// Propagate bits saved by dropping the frame to higher layers.
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; i++) {
for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) {
LAYER_CONTEXT *lc = &cpi->layer_context[i];
lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate);
if (lc->bits_off_target > lc->maximum_buffer_size)
@ -1235,7 +1235,7 @@ static int estimate_keyframe_frequency(VP8_COMP *cpi) {
/* reset keyframe context and calculate weighted average of last
* KEY_FRAME_CONTEXT keyframes
*/
for (i = 0; i < KEY_FRAME_CONTEXT; i++) {
for (i = 0; i < KEY_FRAME_CONTEXT; ++i) {
if (i < KEY_FRAME_CONTEXT - 1)
cpi->prior_key_frame_distance[i] = cpi->prior_key_frame_distance[i + 1];
else

View File

@ -111,9 +111,9 @@ static void fill_token_costs(
p[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]) {
int i, j, k;
for (i = 0; i < BLOCK_TYPES; i++)
for (j = 0; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
for (i = 0; i < BLOCK_TYPES; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
/* check for pt=0 and band > 1 if block type 0
* and 0 if blocktype 1
@ -192,7 +192,7 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) {
vp8_set_speed_features(cpi);
for (i = 0; i < MAX_MODES; i++) {
for (i = 0; i < MAX_MODES; ++i) {
x->mode_test_hit_counts[i] = 0;
}
@ -204,7 +204,7 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) {
cpi->RDDIV = 1;
cpi->RDMULT /= 100;
for (i = 0; i < MAX_MODES; i++) {
for (i = 0; i < MAX_MODES; ++i) {
if (cpi->sf.thresh_mult[i] < INT_MAX) {
x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
} else {
@ -216,7 +216,7 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) {
} else {
cpi->RDDIV = 100;
for (i = 0; i < MAX_MODES; i++) {
for (i = 0; i < MAX_MODES; ++i) {
if (cpi->sf.thresh_mult[i] < (INT_MAX / q)) {
x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
} else {
@ -311,7 +311,7 @@ int vp8_block_error_c(short *coeff, short *dqcoeff) {
int i;
int error = 0;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
int this_diff = coeff[i] - dqcoeff[i];
error += this_diff * this_diff;
}
@ -325,13 +325,13 @@ int vp8_mbblock_error_c(MACROBLOCK *mb, int dc) {
int i, j;
int berror, error = 0;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
be = &mb->block[i];
bd = &mb->e_mbd.block[i];
berror = 0;
for (j = dc; j < 16; j++) {
for (j = dc; j < 16; ++j) {
int this_diff = be->coeff[j] - bd->dqcoeff[j];
berror += this_diff * this_diff;
}
@ -349,7 +349,7 @@ int vp8_mbuverror_c(MACROBLOCK *mb) {
int i;
int error = 0;
for (i = 16; i < 24; i++) {
for (i = 16; i < 24; ++i) {
be = &mb->block[i];
bd = &mb->e_mbd.block[i];
@ -414,7 +414,7 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a,
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
assert(eob <= 16);
for (; c < eob; c++) {
for (; c < eob; ++c) {
const int v = qcoeff_ptr[vp8_default_zig_zag1d[c]];
const int t = vp8_dct_value_tokens_ptr[v].Token;
cost += mb->token_costs[type][vp8_coef_bands[c]][pt][t];
@ -445,7 +445,7 @@ static int vp8_rdcost_mby(MACROBLOCK *mb) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 0; b < 16; b++)
for (b = 0; b < 16; ++b)
cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
ta + vp8_block2above[b], tl + vp8_block2left[b]);
@ -478,7 +478,7 @@ static void macro_block_yrd(MACROBLOCK *mb, int *Rate, int *Distortion) {
mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
/* Quantization */
for (b = 0; b < 16; b++) {
for (b = 0; b < 16; ++b) {
mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
}
@ -531,7 +531,7 @@ static int rd_pick_intra4x4block(MACROBLOCK *x, BLOCK *be, BLOCKD *b,
unsigned char *yleft = dst - 1;
unsigned char top_left = Above[-1];
for (mode = B_DC_PRED; mode <= B_HU_PRED; mode++) {
for (mode = B_DC_PRED; mode <= B_HU_PRED; ++mode) {
int this_rd;
int ratey;
@ -594,7 +594,7 @@ static int rd_pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *rate_y,
bmode_costs = mb->inter_bmode_costs;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
MODE_INFO *const mic = xd->mode_info_context;
const int mis = xd->mode_info_stride;
B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
@ -641,7 +641,7 @@ static int rd_pick_intra16x16mby_mode(MACROBLOCK *x, int *Rate, int *rate_y,
MACROBLOCKD *xd = &x->e_mbd;
/* Y Search for 16x16 intra prediction mode */
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
xd->mode_info_context->mbmi.mode = mode;
vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride,
@ -681,7 +681,7 @@ static int rd_cost_mbuv(MACROBLOCK *mb) {
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
for (b = 16; b < 24; b++)
for (b = 16; b < 24; ++b)
cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
ta + vp8_block2above[b], tl + vp8_block2left[b]);
@ -735,7 +735,7 @@ static void rd_pick_intra_mbuv_mode(MACROBLOCK *x, int *rate,
int rate_to;
MACROBLOCKD *xd = &x->e_mbd;
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
int this_rate;
int this_distortion;
int this_rd;
@ -868,7 +868,7 @@ static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
int b;
MACROBLOCKD *x = &mb->e_mbd;
for (b = 0; b < 16; b++)
for (b = 0; b < 16; ++b)
if (labels[b] == which_label)
cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC,
ta + vp8_block2above[b], tl + vp8_block2left[b]);
@ -883,7 +883,7 @@ static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x,
int pre_stride = x->e_mbd.pre.y_stride;
unsigned char *base_pre = x->e_mbd.pre.y_buffer;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
if (labels[i] == which_label) {
BLOCKD *bd = &x->e_mbd.block[i];
BLOCK *be = &x->block[i];
@ -977,14 +977,14 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
br += rate;
for (i = 0; i < label_count; i++) {
for (i = 0; i < label_count; ++i) {
int_mv mode_mv[B_MODE_COUNT];
int best_label_rd = INT_MAX;
B_PREDICTION_MODE mode_selected = ZERO4X4;
int bestlabelyrate = 0;
/* search for the best motion vector on this segment */
for (this_mode = LEFT4X4; this_mode <= NEW4X4; this_mode++) {
for (this_mode = LEFT4X4; this_mode <= NEW4X4; ++this_mode) {
int this_rd;
int distortion;
int labelyrate;
@ -1161,7 +1161,7 @@ static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi,
bsi->segment_num = segmentation;
/* store everything needed to come back to this!! */
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
bsi->modes[i] = x->partition_info->bmi[i].mode;
bsi->eobs[i] = x->e_mbd.eobs[i];
@ -1199,7 +1199,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
bsi.mvthresh = mvthresh;
bsi.mdcounts = mdcounts;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
bsi.modes[i] = ZERO4X4;
}
@ -1289,7 +1289,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
}
/* set it to the best */
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
BLOCKD *bd = &x->e_mbd.block[i];
bd->bmi.mv.as_int = bsi.mvs[i].as_int;
@ -1304,7 +1304,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num;
x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
for (i = 0; i < x->partition_info->count; i++) {
for (i = 0; i < x->partition_info->count; ++i) {
int j;
j = vp8_mbsplit_offset[bsi.segment_num][i];
@ -1431,7 +1431,7 @@ void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
vcnt++;
}
for (i = 0; i < vcnt; i++) {
for (i = 0; i < vcnt; ++i) {
if (near_ref[near_sadidx[i]] != INTRA_FRAME) {
if (here->mbmi.ref_frame == near_ref[near_sadidx[i]]) {
mv.as_int = near_mvs[near_sadidx[i]].as_int;
@ -1446,7 +1446,7 @@ void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,
}
if (!find) {
for (i = 0; i < vcnt; i++) {
for (i = 0; i < vcnt; ++i) {
mvx[i] = near_mvs[i].as_mv.row;
mvy[i] = near_mvs[i].as_mv.col;
}
@ -1543,7 +1543,7 @@ static void rd_update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) {
if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV) {
int i;
for (i = 0; i < x->partition_info->count; i++) {
for (i = 0; i < x->partition_info->count; ++i) {
if (x->partition_info->bmi[i].mode == NEW4X4) {
x->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row -
best_ref_mv->as_mv.row) >>
@ -1657,10 +1657,10 @@ static int calculate_final_rd_costs(int this_rd, RATE_DISTORTION *rd,
tteob = 0;
if (has_y2_block) tteob += x->e_mbd.eobs[24];
for (i = 0; i < 16; i++) tteob += (x->e_mbd.eobs[i] > has_y2_block);
for (i = 0; i < 16; ++i) tteob += (x->e_mbd.eobs[i] > has_y2_block);
if (x->e_mbd.mode_info_context->mbmi.ref_frame) {
for (i = 16; i < 24; i++) tteob += x->e_mbd.eobs[i];
for (i = 16; i < 24; ++i) tteob += x->e_mbd.eobs[i];
} else
tteob += uv_intra_tteob;
@ -1708,7 +1708,7 @@ static void update_best_mode(BEST_MODE *best_mode, int this_rd,
if ((this_mode == B_PRED) || (this_mode == SPLITMV)) {
int i;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
best_mode->bmodes[i] = x->e_mbd.block[i].bmi;
}
}
@ -1790,7 +1790,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
x->skip = 0;
for (mode_index = 0; mode_index < MAX_MODES; mode_index++) {
for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
int this_rd = INT_MAX;
int disable_skip = 0;
int other_cost = 0;
@ -1896,7 +1896,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
* block's intra eobs will be overwritten when we check inter modes,
* we need to save uv_intra_tteob here.
*/
for (i = 16; i < 24; i++) uv_intra_tteob += x->e_mbd.eobs[i];
for (i = 16; i < 24; ++i) uv_intra_tteob += x->e_mbd.eobs[i];
uv_intra_done = 1;
}
@ -2298,12 +2298,12 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
sizeof(MB_MODE_INFO));
if (best_mode.mbmode.mode == B_PRED) {
for (i = 0; i < 16; i++)
for (i = 0; i < 16; ++i)
xd->mode_info_context->bmi[i].as_mode = best_mode.bmodes[i].as_mode;
}
if (best_mode.mbmode.mode == SPLITMV) {
for (i = 0; i < 16; i++)
for (i = 0; i < 16; ++i)
xd->mode_info_context->bmi[i].mv.as_int = best_mode.bmodes[i].mv.as_int;
memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO));

View File

@ -22,8 +22,8 @@ extern "C" {
static INLINE void insertsortmv(int arr[], int len) {
int i, j, k;
for (i = 1; i <= len - 1; i++) {
for (j = 0; j < i; j++) {
for (i = 1; i <= len - 1; ++i) {
for (j = 0; j < i; ++j) {
if (arr[j] > arr[i]) {
int temp;
@ -40,8 +40,8 @@ static INLINE void insertsortmv(int arr[], int len) {
static INLINE void insertsortsad(int arr[], int idx[], int len) {
int i, j, k;
for (i = 1; i <= len - 1; i++) {
for (j = 0; j < i; j++) {
for (i = 1; i <= len - 1; ++i) {
for (j = 0; j < i; ++j) {
if (arr[j] > arr[i]) {
int temp, tempi;
@ -101,7 +101,7 @@ static INLINE void get_reference_search_order(const VP8_COMP *cpi,
if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frame_map[i++] = LAST_FRAME;
if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frame_map[i++] = GOLDEN_FRAME;
if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frame_map[i++] = ALTREF_FRAME;
for (; i < 4; i++) ref_frame_map[i] = -1;
for (; i < 4; ++i) ref_frame_map[i] = -1;
}
extern void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here,

View File

@ -24,9 +24,9 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) {
cpi->gf_active_count = cm->mb_rows * cm->mb_cols;
} else {
/* for each macroblock row in image */
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) {
/* for each macroblock col in image */
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) {
/* If using golden then set GF active flag if not already set.
* If using last frame 0,0 mode then leave flag as it is
* else if using non 0,0 motion or intra modes then clear

View File

@ -76,7 +76,7 @@ void vp8_temporal_filter_apply_c(unsigned char *frame1, unsigned int stride,
int byte = 0;
const int rounding = strength > 0 ? 1 << (strength - 1) : 0;
for (i = 0, k = 0; i < block_size; i++) {
for (i = 0, k = 0; i < block_size; ++i) {
for (j = 0; j < block_size; j++, k++) {
int src_byte = frame1[byte];
int pixel_value = *frame2++;
@ -205,7 +205,7 @@ static void vp8_temporal_filter_iterate_c(VP8_COMP *cpi, int frame_count,
unsigned char *u_buffer = mbd->pre.u_buffer;
unsigned char *v_buffer = mbd->pre.v_buffer;
for (mb_row = 0; mb_row < mb_rows; mb_row++) {
for (mb_row = 0; mb_row < mb_rows; ++mb_row) {
#if ALT_REF_MC_ENABLED
/* Source frames are extended to 16 pixels. This is different than
* L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS)
@ -222,7 +222,7 @@ static void vp8_temporal_filter_iterate_c(VP8_COMP *cpi, int frame_count,
cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16) + (16 - 5);
#endif
for (mb_col = 0; mb_col < mb_cols; mb_col++) {
for (mb_col = 0; mb_col < mb_cols; ++mb_col) {
int i, j, k;
int stride;
@ -234,7 +234,7 @@ static void vp8_temporal_filter_iterate_c(VP8_COMP *cpi, int frame_count,
cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16) + (16 - 5);
#endif
for (frame = 0; frame < frame_count; frame++) {
for (frame = 0; frame < frame_count; ++frame) {
if (cpi->frames[frame] == NULL) continue;
mbd->block[0].bmi.mv.as_mv.row = 0;
@ -287,7 +287,7 @@ static void vp8_temporal_filter_iterate_c(VP8_COMP *cpi, int frame_count,
dst1 = cpi->alt_ref_buffer.y_buffer;
stride = cpi->alt_ref_buffer.y_stride;
byte = mb_y_offset;
for (i = 0, k = 0; i < 16; i++) {
for (i = 0, k = 0; i < 16; ++i) {
for (j = 0; j < 16; j++, k++) {
unsigned int pval = accumulator[k] + (count[k] >> 1);
pval *= cpi->fixed_divide[count[k]];
@ -306,7 +306,7 @@ static void vp8_temporal_filter_iterate_c(VP8_COMP *cpi, int frame_count,
dst2 = cpi->alt_ref_buffer.v_buffer;
stride = cpi->alt_ref_buffer.uv_stride;
byte = mb_uv_offset;
for (i = 0, k = 256; i < 8; i++) {
for (i = 0, k = 256; i < 8; ++i) {
for (j = 0; j < 8; j++, k++) {
int m = k + 64;
@ -413,7 +413,7 @@ void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance) {
/* Setup frame pointers, NULL indicates frame not included in filter */
memset(cpi->frames, 0, max_frames * sizeof(YV12_BUFFER_CONFIG *));
for (frame = 0; frame < frames_to_blur; frame++) {
for (frame = 0; frame < frames_to_blur; ++frame) {
int which_buffer = start_frame - frame;
struct lookahead_entry *buf =
vp8_lookahead_peek(cpi->lookahead, which_buffer, PEEK_FORWARD);

View File

@ -146,7 +146,7 @@ static void tokenize2nd_order_b(MACROBLOCK *x, TOKENEXTRA **tp, VP8_COMP *cpi) {
t++;
c = 1;
for (; c < eob; c++) {
for (; c < eob; ++c) {
rc = vp8_default_zig_zag1d[c];
band = vp8_coef_bands[c];
v = qcoeff_ptr[rc];
@ -238,7 +238,7 @@ static void tokenize1st_order_b(
c++;
assert(eob <= 16);
for (; c < eob; c++) {
for (; c < eob; ++c) {
rc = vp8_default_zig_zag1d[c];
band = vp8_coef_bands[c];
v = qcoeff_ptr[rc];
@ -307,7 +307,7 @@ static void tokenize1st_order_b(
c = 1;
assert(eob <= 16);
for (; c < eob; c++) {
for (; c < eob; ++c) {
rc = vp8_default_zig_zag1d[c];
band = vp8_coef_bands[c];
v = qcoeff_ptr[rc];
@ -346,10 +346,10 @@ static int mb_is_skippable(MACROBLOCKD *x, int has_y2_block) {
int i = 0;
if (has_y2_block) {
for (i = 0; i < 16; i++) skip &= (x->eobs[i] < 2);
for (i = 0; i < 16; ++i) skip &= (x->eobs[i] < 2);
}
for (; i < 24 + has_y2_block; i++) skip &= (!x->eobs[i]);
for (; i < 24 + has_y2_block; ++i) skip &= (!x->eobs[i]);
return skip;
}
@ -515,11 +515,11 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) {
plane_type = 0;
}
for (b = 0; b < 16; b++)
for (b = 0; b < 16; ++b)
stuff1st_order_b(t, A + vp8_block2above[b], L + vp8_block2left[b],
plane_type, cpi, x);
for (b = 16; b < 24; b++)
for (b = 16; b < 24; ++b)
stuff1st_order_buv(t, A + vp8_block2above[b], L + vp8_block2left[b], cpi,
x);
}

View File

@ -26,7 +26,7 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) {
short *dequant_ptr = d->dequant;
eob = -1;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
@ -65,7 +65,7 @@ void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d) {
eob = -1;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
@ -98,7 +98,7 @@ void vp8_quantize_mby(MACROBLOCK *x) {
int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
for (i = 0; i < 16; i++) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
for (i = 0; i < 16; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
if (has_2nd_order) x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
}
@ -108,14 +108,14 @@ void vp8_quantize_mb(MACROBLOCK *x) {
int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED &&
x->e_mbd.mode_info_context->mbmi.mode != SPLITMV);
for (i = 0; i < 24 + has_2nd_order; i++)
for (i = 0; i < 24 + has_2nd_order; ++i)
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
void vp8_quantize_mbuv(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i++) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
for (i = 16; i < 24; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
static const int qrounding_factors[129] = {
@ -164,7 +164,7 @@ static void invert_quant(int improved_quant, short *quant, short *shift,
unsigned t;
int l, m;
t = d;
for (l = 0; t > 1; l++) t >>= 1;
for (l = 0; t > 1; ++l) t >>= 1;
m = 1 + (1 << (16 + l)) / d;
*quant = (short)(m - (1 << 16));
*shift = l;
@ -186,7 +186,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
24, 28, 32, 36, 40, 44, 44, 44 };
for (Q = 0; Q < QINDEX_RANGE; Q++) {
for (Q = 0; Q < QINDEX_RANGE; ++Q) {
/* dc values */
quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val;
@ -244,7 +244,7 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
cpi->common.UVdequant[Q][1] = quant_val;
cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7;
for (i = 2; i < 16; i++) {
for (i = 2; i < 16; ++i) {
cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1];
cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1];
cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1];
@ -322,7 +322,7 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0];
xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0];
for (i = 1; i < 16; i++) {
for (i = 1; i < 16; ++i) {
xd->dequant_y1_dc[i] = xd->dequant_y1[i] =
cpi->common.Y1dequant[QIndex][1];
xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1];
@ -333,15 +333,15 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
* the quantizer code uses a passed in pointer to the dequant constants.
* This will also require modifications to the x86 and neon assembly.
* */
for (i = 0; i < 16; i++) x->e_mbd.block[i].dequant = xd->dequant_y1;
for (i = 16; i < 24; i++) x->e_mbd.block[i].dequant = xd->dequant_uv;
for (i = 0; i < 16; ++i) x->e_mbd.block[i].dequant = xd->dequant_y1;
for (i = 16; i < 24; ++i) x->e_mbd.block[i].dequant = xd->dequant_uv;
x->e_mbd.block[24].dequant = xd->dequant_y2;
#endif
/* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++) {
for (i = 0; i < 16; ++i) {
x->block[i].quant = cpi->Y1quant[QIndex];
x->block[i].quant_fast = cpi->Y1quant_fast[QIndex];
x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
@ -354,7 +354,7 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
/* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++) {
for (i = 16; i < 24; ++i) {
x->block[i].quant = cpi->UVquant[QIndex];
x->block[i].quant_fast = cpi->UVquant_fast[QIndex];
x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
@ -388,12 +388,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) {
/* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++) x->block[i].zbin_extra = (short)zbin_extra;
for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
/* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++) x->block[i].zbin_extra = (short)zbin_extra;
for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
/* Y2 */
zbin_extra = ZBIN_EXTRA_Y2;
@ -413,12 +413,12 @@ void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) {
/* Y */
zbin_extra = ZBIN_EXTRA_Y;
for (i = 0; i < 16; i++) x->block[i].zbin_extra = (short)zbin_extra;
for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra;
/* UV */
zbin_extra = ZBIN_EXTRA_UV;
for (i = 16; i < 24; i++) x->block[i].zbin_extra = (short)zbin_extra;
for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra;
/* Y2 */
zbin_extra = ZBIN_EXTRA_Y2;

View File

@ -228,7 +228,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx,
unsigned int i;
RANGE_CHECK_HI(cfg, ts_periodicity, 16);
for (i = 1; i < cfg->ts_number_layers; i++)
for (i = 1; i < cfg->ts_number_layers; ++i)
if (cfg->ts_target_bitrate[i] <= cfg->ts_target_bitrate[i - 1] &&
cfg->rc_target_bitrate > 0)
ERROR("ts_target_bitrate entries are not strictly increasing");

View File

@ -392,7 +392,7 @@ static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx,
xd->dst = pc->yv12_fb[pc->new_fb_idx];
#if CONFIG_MULTITHREAD
for (i = 0; i < pbi->allocated_decoding_thread_count; i++) {
for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) {
pbi->mb_row_di[i].mbd.dst = pc->yv12_fb[pc->new_fb_idx];
vp8_build_block_doffsets(&pbi->mb_row_di[i].mbd);
}