Merge "Overhauling the thresholds and mixing proportions for mfqe postprocessor."

This commit is contained in:
Deb Mukherjee 2012-01-20 08:45:42 -08:00 committed by Gerrit Code Review
commit f357e5e2f7

View File

@ -736,7 +736,7 @@ static void multiframe_quality_enhance_block
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
}; };
int blksizeby2 = blksize >> 1; int blksizeby2 = blksize >> 1;
int blksizesq = blksize * blksize; int qdiff = qcurr - qprev;
int i, j; int i, j;
unsigned char *yp; unsigned char *yp;
@ -749,27 +749,28 @@ static void multiframe_quality_enhance_block
unsigned int act, sse, sad, thr; unsigned int act, sse, sad, thr;
if (blksize == 16) if (blksize == 16)
{ {
act = vp8_variance_var16x16(y, y_stride, VP8_ZEROS, 0, &sse); act = (vp8_variance_var16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
sad = vp8_variance_sad16x16(y, y_stride, yd, yd_stride, 0); sad = (vp8_variance_sad16x16(y, y_stride, yd, yd_stride, 0)+128)>>8;
} }
else if (blksize == 8) else if (blksize == 8)
{ {
act = vp8_variance_var8x8(y, y_stride, VP8_ZEROS, 0, &sse); act = (vp8_variance_var8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
sad = vp8_variance_sad8x8(y, y_stride, yd, yd_stride, 0); sad = (vp8_variance_sad8x8(y, y_stride, yd, yd_stride, 0)+32)>>6;
} }
else else
{ {
act = vp8_variance_var4x4(y, y_stride, VP8_ZEROS, 0, &sse); act = (vp8_variance_var4x4(yd, yd_stride, VP8_ZEROS, 0, &sse)+8)>>4;
sad = vp8_variance_sad4x4(y, y_stride, yd, yd_stride, 0); sad = (vp8_variance_sad4x4(y, y_stride, yd, yd_stride, 0)+8)>>4;
} }
/* thr = qdiff/8 + log2(act) + log4(qprev) */
thr = 6 * blksizesq + (act >> 3); thr = (qdiff>>3);
if (thr > 12 * blksizesq) thr = 12 * blksizesq; while (act>>=1) thr++;
// These thresholds should be adapted later based on qcurr and qprev while (qprev>>=2) thr++;
if (sad < thr) if (sad < thr)
{ {
static const int roundoff = (1 << (MFQE_PRECISION - 1)); static const int roundoff = (1 << (MFQE_PRECISION - 1));
int ifactor = (sad << MFQE_PRECISION) / thr; int ifactor = (sad << MFQE_PRECISION) / thr;
ifactor >>= (qdiff >> 5);
// TODO: SIMD optimize this section // TODO: SIMD optimize this section
if (ifactor) if (ifactor)
{ {
@ -861,41 +862,44 @@ void vp8_multiframe_quality_enhance
if (((frame_type == INTER_FRAME && if (((frame_type == INTER_FRAME &&
abs(mode_info_context->mbmi.mv.as_mv.row) <= 10 && abs(mode_info_context->mbmi.mv.as_mv.row) <= 10 &&
abs(mode_info_context->mbmi.mv.as_mv.col) <= 10) || abs(mode_info_context->mbmi.mv.as_mv.col) <= 10) ||
(frame_type == KEY_FRAME)) && (frame_type == KEY_FRAME)))
mode_info_context->mbmi.mode != B_PRED)
{ {
multiframe_quality_enhance_block(16, if (mode_info_context->mbmi.mode == B_PRED || mode_info_context->mbmi.mode == SPLITMV)
qcurr, {
qprev, int i, j;
y_ptr, for (i=0; i<2; ++i)
u_ptr, for (j=0; j<2; ++j)
v_ptr, multiframe_quality_enhance_block(8,
show->y_stride, qcurr,
show->uv_stride, qprev,
yd_ptr, y_ptr + 8*(i*show->y_stride+j),
ud_ptr, u_ptr + 4*(i*show->uv_stride+j),
vd_ptr, v_ptr + 4*(i*show->uv_stride+j),
dest->y_stride, show->y_stride,
dest->uv_stride); show->uv_stride,
} yd_ptr + 8*(i*dest->y_stride+j),
else if (mode_info_context->mbmi.mode == B_PRED) ud_ptr + 4*(i*dest->uv_stride+j),
{ vd_ptr + 4*(i*dest->uv_stride+j),
int i, j; dest->y_stride,
for (i=0; i<2; ++i) dest->uv_stride);
for (j=0; j<2; ++j) }
multiframe_quality_enhance_block(8, else
qcurr, {
qprev, multiframe_quality_enhance_block(16,
y_ptr + 8*(i*show->y_stride+j), qcurr,
u_ptr + 4*(i*show->uv_stride+j), qprev,
v_ptr + 4*(i*show->uv_stride+j), y_ptr,
show->y_stride, u_ptr,
show->uv_stride, v_ptr,
yd_ptr + 8*(i*dest->y_stride+j), show->y_stride,
ud_ptr + 4*(i*dest->uv_stride+j), show->uv_stride,
vd_ptr + 4*(i*dest->uv_stride+j), yd_ptr,
dest->y_stride, ud_ptr,
dest->uv_stride); vd_ptr,
dest->y_stride,
dest->uv_stride);
}
} }
else else
{ {