2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include "vpx_ports/config.h"
|
2011-07-25 16:11:24 +02:00
|
|
|
#include "vpx/vpx_integer.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
#include "recon.h"
|
|
|
|
#include "subpixel.h"
|
|
|
|
#include "blockd.h"
|
|
|
|
#include "reconinter.h"
|
|
|
|
#if CONFIG_RUNTIME_CPU_DETECT
|
|
|
|
#include "onyxc_int.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void vp8_copy_mem16x16_c(
|
|
|
|
unsigned char *src,
|
|
|
|
int src_stride,
|
|
|
|
unsigned char *dst,
|
|
|
|
int dst_stride)
|
|
|
|
{
|
|
|
|
|
|
|
|
int r;
|
|
|
|
|
|
|
|
for (r = 0; r < 16; r++)
|
|
|
|
{
|
2011-07-25 16:11:24 +02:00
|
|
|
#if !(CONFIG_FAST_UNALIGNED)
|
2010-05-18 17:58:33 +02:00
|
|
|
dst[0] = src[0];
|
|
|
|
dst[1] = src[1];
|
|
|
|
dst[2] = src[2];
|
|
|
|
dst[3] = src[3];
|
|
|
|
dst[4] = src[4];
|
|
|
|
dst[5] = src[5];
|
|
|
|
dst[6] = src[6];
|
|
|
|
dst[7] = src[7];
|
|
|
|
dst[8] = src[8];
|
|
|
|
dst[9] = src[9];
|
|
|
|
dst[10] = src[10];
|
|
|
|
dst[11] = src[11];
|
|
|
|
dst[12] = src[12];
|
|
|
|
dst[13] = src[13];
|
|
|
|
dst[14] = src[14];
|
|
|
|
dst[15] = src[15];
|
|
|
|
|
|
|
|
#else
|
2011-07-25 16:11:24 +02:00
|
|
|
((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
|
|
|
|
((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
|
|
|
|
((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ;
|
|
|
|
((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
#endif
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
void vp8_avg_mem16x16_c(
|
|
|
|
unsigned char *src,
|
|
|
|
int src_stride,
|
|
|
|
unsigned char *dst,
|
|
|
|
int dst_stride)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
for (r = 0; r < 16; r++)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
for (n = 0; n < 16; n++)
|
|
|
|
{
|
|
|
|
dst[n] = (dst[n] + src[n] + 1) >> 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
void vp8_copy_mem8x8_c(
|
|
|
|
unsigned char *src,
|
|
|
|
int src_stride,
|
|
|
|
unsigned char *dst,
|
|
|
|
int dst_stride)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
for (r = 0; r < 8; r++)
|
|
|
|
{
|
2011-07-25 16:11:24 +02:00
|
|
|
#if !(CONFIG_FAST_UNALIGNED)
|
2010-05-18 17:58:33 +02:00
|
|
|
dst[0] = src[0];
|
|
|
|
dst[1] = src[1];
|
|
|
|
dst[2] = src[2];
|
|
|
|
dst[3] = src[3];
|
|
|
|
dst[4] = src[4];
|
|
|
|
dst[5] = src[5];
|
|
|
|
dst[6] = src[6];
|
|
|
|
dst[7] = src[7];
|
|
|
|
#else
|
2011-07-25 16:11:24 +02:00
|
|
|
((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
|
|
|
|
((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
void vp8_avg_mem8x8_c(
|
|
|
|
unsigned char *src,
|
|
|
|
int src_stride,
|
|
|
|
unsigned char *dst,
|
|
|
|
int dst_stride)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
for (r = 0; r < 8; r++)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
for (n = 0; n < 8; n++)
|
|
|
|
{
|
|
|
|
dst[n] = (dst[n] + src[n] + 1) >> 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
void vp8_copy_mem8x4_c(
|
|
|
|
unsigned char *src,
|
|
|
|
int src_stride,
|
|
|
|
unsigned char *dst,
|
|
|
|
int dst_stride)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
for (r = 0; r < 4; r++)
|
|
|
|
{
|
2011-07-25 16:11:24 +02:00
|
|
|
#if !(CONFIG_FAST_UNALIGNED)
|
2010-05-18 17:58:33 +02:00
|
|
|
dst[0] = src[0];
|
|
|
|
dst[1] = src[1];
|
|
|
|
dst[2] = src[2];
|
|
|
|
dst[3] = src[3];
|
|
|
|
dst[4] = src[4];
|
|
|
|
dst[5] = src[5];
|
|
|
|
dst[6] = src[6];
|
|
|
|
dst[7] = src[7];
|
|
|
|
#else
|
2011-07-25 16:11:24 +02:00
|
|
|
((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
|
|
|
|
((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
src += src_stride;
|
|
|
|
dst += dst_stride;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
unsigned char *ptr_base;
|
|
|
|
unsigned char *ptr;
|
|
|
|
unsigned char *pred_ptr = d->predictor;
|
|
|
|
|
|
|
|
ptr_base = *(d->base_pre);
|
|
|
|
|
|
|
|
if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
|
|
|
|
{
|
|
|
|
ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
sppf(ptr, d->pre_stride, (d->bmi.mv.as_mv.col & 7)<<1, (d->bmi.mv.as_mv.row & 7)<<1, pred_ptr, pitch);
|
|
|
|
#else
|
2010-05-18 17:58:33 +02:00
|
|
|
sppf(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ptr_base += d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
|
|
|
|
ptr = ptr_base;
|
|
|
|
|
|
|
|
for (r = 0; r < 4; r++)
|
|
|
|
{
|
2011-07-25 16:11:24 +02:00
|
|
|
#if !(CONFIG_FAST_UNALIGNED)
|
2010-05-18 17:58:33 +02:00
|
|
|
pred_ptr[0] = ptr[0];
|
|
|
|
pred_ptr[1] = ptr[1];
|
|
|
|
pred_ptr[2] = ptr[2];
|
|
|
|
pred_ptr[3] = ptr[3];
|
|
|
|
#else
|
2011-07-25 16:11:24 +02:00
|
|
|
*(uint32_t *)pred_ptr = *(uint32_t *)ptr ;
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
pred_ptr += pitch;
|
|
|
|
ptr += d->pre_stride;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-17 22:07:59 +01:00
|
|
|
static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
unsigned char *ptr_base;
|
|
|
|
unsigned char *ptr;
|
|
|
|
unsigned char *pred_ptr = d->predictor;
|
|
|
|
|
|
|
|
ptr_base = *(d->base_pre);
|
|
|
|
ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
|
|
|
|
|
|
|
|
if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
|
|
|
|
{
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
x->subpixel_predict8x8(ptr, d->pre_stride, (d->bmi.mv.as_mv.col & 7)<<1, (d->bmi.mv.as_mv.row & 7)<<1, pred_ptr, pitch);
|
|
|
|
#else
|
2010-05-18 17:58:33 +02:00
|
|
|
x->subpixel_predict8x8(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-17 22:07:59 +01:00
|
|
|
static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
unsigned char *ptr_base;
|
|
|
|
unsigned char *ptr;
|
|
|
|
unsigned char *pred_ptr = d->predictor;
|
|
|
|
|
|
|
|
ptr_base = *(d->base_pre);
|
|
|
|
ptr = ptr_base + d->pre + (d->bmi.mv.as_mv.row >> 3) * d->pre_stride + (d->bmi.mv.as_mv.col >> 3);
|
|
|
|
|
|
|
|
if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7)
|
|
|
|
{
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
x->subpixel_predict8x4(ptr, d->pre_stride, (d->bmi.mv.as_mv.col & 7)<<1, (d->bmi.mv.as_mv.row & 7)<<1, pred_ptr, pitch);
|
|
|
|
#else
|
2010-05-18 17:58:33 +02:00
|
|
|
x->subpixel_predict8x4(ptr, d->pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch);
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-19 17:42:15 +02:00
|
|
|
/*encoder only*/
|
2011-08-24 20:42:26 +02:00
|
|
|
void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-08-24 20:42:26 +02:00
|
|
|
unsigned char *uptr, *vptr;
|
|
|
|
unsigned char *upred_ptr = &x->predictor[256];
|
|
|
|
unsigned char *vpred_ptr = &x->predictor[320];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-16 18:29:54 +01:00
|
|
|
int omv_row = x->mode_info_context->mbmi.mv.as_mv.row;
|
|
|
|
int omv_col = x->mode_info_context->mbmi.mv.as_mv.col;
|
|
|
|
int mv_row = omv_row;
|
|
|
|
int mv_col = omv_col;
|
2011-08-24 20:42:26 +02:00
|
|
|
int offset;
|
|
|
|
int pre_stride = x->block[16].pre_stride;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
/* calc uv motion vectors */
|
|
|
|
if (mv_row < 0)
|
|
|
|
mv_row -= 1;
|
|
|
|
else
|
|
|
|
mv_row += 1;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
if (mv_col < 0)
|
|
|
|
mv_col -= 1;
|
|
|
|
else
|
|
|
|
mv_col += 1;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
mv_row /= 2;
|
|
|
|
mv_col /= 2;
|
|
|
|
|
|
|
|
mv_row &= x->fullpixel_mask;
|
|
|
|
mv_col &= x->fullpixel_mask;
|
|
|
|
|
|
|
|
offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
|
|
|
|
uptr = x->pre.u_buffer + offset;
|
|
|
|
vptr = x->pre.v_buffer + offset;
|
|
|
|
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
if ((omv_row | omv_col) & 15)
|
|
|
|
{
|
|
|
|
x->subpixel_predict8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, upred_ptr, 8);
|
|
|
|
x->subpixel_predict8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, vpred_ptr, 8);
|
|
|
|
}
|
|
|
|
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
2011-08-24 20:42:26 +02:00
|
|
|
if ((mv_row | mv_col) & 7)
|
|
|
|
{
|
|
|
|
x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8);
|
|
|
|
x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
|
|
|
{
|
2011-08-24 20:42:26 +02:00
|
|
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, upred_ptr, 8);
|
|
|
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, vpred_ptr, 8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*encoder only*/
|
|
|
|
void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
/* build uv mvs */
|
|
|
|
for (i = 0; i < 2; i++)
|
|
|
|
{
|
|
|
|
for (j = 0; j < 2; j++)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-08-24 20:42:26 +02:00
|
|
|
int yoffset = i * 8 + j * 2;
|
|
|
|
int uoffset = 16 + i * 2 + j;
|
|
|
|
int voffset = 20 + i * 2 + j;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
int temp;
|
|
|
|
|
|
|
|
temp = x->block[yoffset ].bmi.mv.as_mv.row
|
|
|
|
+ x->block[yoffset+1].bmi.mv.as_mv.row
|
|
|
|
+ x->block[yoffset+4].bmi.mv.as_mv.row
|
|
|
|
+ x->block[yoffset+5].bmi.mv.as_mv.row;
|
|
|
|
|
|
|
|
if (temp < 0) temp -= 4;
|
|
|
|
else temp += 4;
|
|
|
|
|
|
|
|
x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
|
|
|
|
|
|
|
|
temp = x->block[yoffset ].bmi.mv.as_mv.col
|
|
|
|
+ x->block[yoffset+1].bmi.mv.as_mv.col
|
|
|
|
+ x->block[yoffset+4].bmi.mv.as_mv.col
|
|
|
|
+ x->block[yoffset+5].bmi.mv.as_mv.col;
|
|
|
|
|
|
|
|
if (temp < 0) temp -= 4;
|
|
|
|
else temp += 4;
|
|
|
|
|
|
|
|
x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
|
|
|
|
|
|
|
|
x->block[voffset].bmi.mv.as_mv.row =
|
|
|
|
x->block[uoffset].bmi.mv.as_mv.row ;
|
|
|
|
x->block[voffset].bmi.mv.as_mv.col =
|
|
|
|
x->block[uoffset].bmi.mv.as_mv.col ;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 16; i < 24; i += 2)
|
|
|
|
{
|
|
|
|
BLOCKD *d0 = &x->block[i];
|
|
|
|
BLOCKD *d1 = &x->block[i+1];
|
|
|
|
|
|
|
|
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
|
|
|
|
build_inter_predictors2b(x, d0, 8);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
|
|
|
|
vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
|
2010-10-28 01:04:02 +02:00
|
|
|
/*encoder only*/
|
2011-04-19 17:42:15 +02:00
|
|
|
void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-04-19 17:42:15 +02:00
|
|
|
unsigned char *ptr_base;
|
|
|
|
unsigned char *ptr;
|
|
|
|
unsigned char *pred_ptr = x->predictor;
|
|
|
|
int mv_row = x->mode_info_context->mbmi.mv.as_mv.row;
|
|
|
|
int mv_col = x->mode_info_context->mbmi.mv.as_mv.col;
|
|
|
|
int pre_stride = x->block[0].pre_stride;
|
2010-08-12 22:25:43 +02:00
|
|
|
|
2011-04-19 17:42:15 +02:00
|
|
|
ptr_base = x->pre.y_buffer;
|
|
|
|
ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-04-19 17:42:15 +02:00
|
|
|
if ((mv_row | mv_col) & 7)
|
|
|
|
{
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
x->subpixel_predict16x16(ptr, pre_stride, (mv_col & 7)<<1, (mv_row & 7)<<1, pred_ptr, 16);
|
|
|
|
#else
|
2011-04-19 17:42:15 +02:00
|
|
|
x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, pred_ptr, 16);
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2011-04-19 17:42:15 +02:00
|
|
|
RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, pred_ptr, 16);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
|
|
|
|
{
|
|
|
|
/* If the MV points so far into the UMV border that no visible pixels
|
|
|
|
* are used for reconstruction, the subpel part of the MV can be
|
|
|
|
* discarded and the MV limited to 16 pixels with equivalent results.
|
|
|
|
*
|
|
|
|
* This limit kicks in at 19 pixels for the top and left edges, for
|
|
|
|
* the 16 pixels plus 3 taps right of the central pixel when subpel
|
|
|
|
* filtering. The bottom and right edges use 16 pixels plus 2 pixels
|
|
|
|
* left of the central pixel when filtering.
|
|
|
|
*/
|
2012-02-16 19:40:39 +01:00
|
|
|
if (mv->col < (xd->mb_to_left_edge - ((16+INTERP_EXTEND) << 3)))
|
2012-02-01 23:27:50 +01:00
|
|
|
mv->col = xd->mb_to_left_edge - (16 << 3);
|
2012-02-16 19:40:39 +01:00
|
|
|
else if (mv->col > xd->mb_to_right_edge + ((15+INTERP_EXTEND) << 3))
|
2012-02-01 23:27:50 +01:00
|
|
|
mv->col = xd->mb_to_right_edge + (16 << 3);
|
|
|
|
|
2012-02-16 19:40:39 +01:00
|
|
|
if (mv->row < (xd->mb_to_top_edge - ((16+INTERP_EXTEND) << 3)))
|
2012-02-01 23:27:50 +01:00
|
|
|
mv->row = xd->mb_to_top_edge - (16 << 3);
|
2012-02-16 19:40:39 +01:00
|
|
|
else if (mv->row > xd->mb_to_bottom_edge + ((15+INTERP_EXTEND) << 3))
|
2012-02-01 23:27:50 +01:00
|
|
|
mv->row = xd->mb_to_bottom_edge + (16 << 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* A version of the above function for chroma block MVs.*/
|
|
|
|
static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd)
|
|
|
|
{
|
2012-02-16 19:40:39 +01:00
|
|
|
mv->col = (2*mv->col < (xd->mb_to_left_edge - ((16+INTERP_EXTEND) << 3))) ?
|
2012-02-01 23:27:50 +01:00
|
|
|
(xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col;
|
2012-02-16 19:40:39 +01:00
|
|
|
mv->col = (2*mv->col > xd->mb_to_right_edge + ((15+INTERP_EXTEND) << 3)) ?
|
2012-02-01 23:27:50 +01:00
|
|
|
(xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col;
|
|
|
|
|
2012-02-16 19:40:39 +01:00
|
|
|
mv->row = (2*mv->row < (xd->mb_to_top_edge - ((16+INTERP_EXTEND) << 3))) ?
|
2012-02-01 23:27:50 +01:00
|
|
|
(xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row;
|
2012-02-16 19:40:39 +01:00
|
|
|
mv->row = (2*mv->row > xd->mb_to_bottom_edge + ((15+INTERP_EXTEND) << 3)) ?
|
2012-02-01 23:27:50 +01:00
|
|
|
(xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row;
|
|
|
|
}
|
|
|
|
|
Improved coding using 8x8 transform
In summary, this commit encompasses a series of changes in attempt to
improve the 8x8 transform based coding to help overall compression
quality, please refer to the detailed commit history below for what
are the rationale underly the series of changes:
a. A frame level flag to indicate if 8x8 transform is used at all.
b. 8x8 transform is not used for key frames and small image size.
c. On inter coded frame, macroblocks using modes B_PRED, SPLIT_MV
and I8X8_PRED are forced to using 4x4 transform based coding, the
rest uses 8x8 transform based coding.
d. Encoder and decoder has the same assumption on the relationship
between prediction modes and transform size, therefore no signaling
is encoded in bitstream.
e. Mode decision process now calculate the rate and distortion scores
using their respective transforms.
Overall test results:
1. HD set
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120206.html
(avg psnr: 3.09% glb psnr: 3.22%, ssim: 3.90%)
2. Cif set:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120206.html
(avg psnr: -0.03%, glb psnr: -0.02%, ssim: -0.04%)
It should be noted here, as 8x8 transform coding itself is disabled
for cif size clips, the 0.03% loss is purely from the 1 bit/frame
flag overhead on if 8x8 transform is used or not for the frame.
---patch history for future reference---
Patch 1:
this commit tries to select transform size based on macroblock
prediction mode. If the size of a prediction mode is 16x16, then
the macroblock is forced to use 8x8 transform. If the prediction
mode is B_PRED, SPLITMV or I8X8_PRED, then the macroblock is forced
to use 4x4 transform. Tests on the following HD clips showed mixed
results: (all hd clips only used first 100 frames in the test)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_log.html
while the results are mixed and overall negative, it is interesting to
see 8x8 helped a few of the clips.
Patch 2:
this patch tries to hard-wire selection of transform size based on
prediction modes without using segmentation to signal the transform size.
encoder and decoder both takes the same assumption that all macroblocks
use 8x8 transform except when prediciton mode is B_PRED, I8X8_PRED or
SPLITMV. Test results are as follows:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cifmodebase8x8_0125.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_0125log.html
Interestingly, by removing the overhead or coding the segmentation, the
results on this limited HD set have turn positive on average.
Patch 3:
this patch disabled the usage of 8x8 transform on key frames, and kept the
logic from patch 2 for inter frames only. test results on HD set turned
decidedly positive with 8x8 transform enabled on inter frame with 16x16
prediction modes: (avg psnr: .81% glb psnr: .82 ssim: .55%)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdintermode8x8_0125.html
results on cif set still negative overall
Patch 4:
continued from last patch, but now in mode decision process, the rate and
distortion estimates are computed based on 8x8 transform results for MBs
with modes associated with 8x8 transform. This patch also fixed a problem
related to segment based eob coding when 8x8 transform is used. The patch
significantly improved the results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hd8x8RDintermode.html
(avg psnr: 2.70% glb psnr: 2.76% ssim: 3.34%)
results on cif also improved, though they are still negative compared to
baseline that uses 4x4 transform only:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif8x8RDintermode.html
(avg psnr: -.78% glb psnr: -.86% ssim: -.19%)
Patch 5:
This patch does 3 things:
a. a bunch of decoder bug fixes, encodings and decodings were verified
to have matched recon buffer on a number of encodes on cif size mobile and
hd version of _pedestrian.
b. the patch further improved the rate distortion calculation of MBS that
use 8x8 transform. This provided some further gain on compression.
c. the patch also got the experimental work SEG_LVL_EOB to work with 8x8
transformed macroblock, test results indicates it improves the cif set
but hurt the HD set slightly.
Tests results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120201.html
(avg psnr: 3.19% glb psnr: 3.30% ssim: 3.93%)
Test results on cif clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120201.html
(avg psnr: -.47% glb psnr: -.51% ssim: +.28%)
Patch 6:
Added a frame level flag to indicate if 8x8 transform is allowed at all.
temporarily the decision is based on frame size, can be optimized later
one. This get the cif results to basically unchanged, with one bit per
frame overhead on both cif and hd clips.
Patch 8:
Rebase and Merge to head by PGW.
Fixed some suspect 4s that look like hey should be 64s in regard
to segmented EOB. Perhaps #defines would be bette.
Bulit and tested without T8x8 enabled and produces unchanged
output.
Patch 9:
Corrected misalligned code/decode of "txfm_mode" bit.
Limited testing for correct encode and decode with
T8x8 configured on derf clips.
Change-Id: I156e1405d25f81579d579dff8ab9af53944ec49c
2012-02-10 01:12:23 +01:00
|
|
|
|
|
|
|
|
2011-04-28 16:53:59 +02:00
|
|
|
void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|
|
|
unsigned char *dst_y,
|
|
|
|
unsigned char *dst_u,
|
|
|
|
unsigned char *dst_v,
|
|
|
|
int dst_ystride,
|
|
|
|
int dst_uvstride)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-04-28 16:53:59 +02:00
|
|
|
int offset;
|
|
|
|
unsigned char *ptr;
|
|
|
|
unsigned char *uptr, *vptr;
|
2010-08-12 22:25:43 +02:00
|
|
|
|
2012-02-16 18:29:54 +01:00
|
|
|
int_mv _o16x16mv;
|
2012-02-01 23:27:50 +01:00
|
|
|
int_mv _16x16mv;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-04-28 16:53:59 +02:00
|
|
|
unsigned char *ptr_base = x->pre.y_buffer;
|
|
|
|
int pre_stride = x->block[0].pre_stride;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
_16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
|
|
|
|
{
|
|
|
|
clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
ptr = ptr_base + ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
|
|
|
|
|
|
|
|
if ( _16x16mv.as_int & 0x00070007)
|
2011-04-28 16:53:59 +02:00
|
|
|
{
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
x->subpixel_predict16x16(ptr, pre_stride, (_16x16mv.as_mv.col & 7)<<1, (_16x16mv.as_mv.row & 7)<<1, dst_y, dst_ystride);
|
|
|
|
#else
|
2012-02-01 23:27:50 +01:00
|
|
|
x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_y, dst_ystride);
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif
|
2011-04-28 16:53:59 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RECON_INVOKE(&x->rtcd->recon, copy16x16)(ptr, pre_stride, dst_y, dst_ystride);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-16 18:29:54 +01:00
|
|
|
_o16x16mv = _16x16mv;
|
2011-08-24 20:42:26 +02:00
|
|
|
/* calc uv motion vectors */
|
2012-02-01 23:27:50 +01:00
|
|
|
if ( _16x16mv.as_mv.row < 0)
|
|
|
|
_16x16mv.as_mv.row -= 1;
|
2011-08-24 20:42:26 +02:00
|
|
|
else
|
2012-02-01 23:27:50 +01:00
|
|
|
_16x16mv.as_mv.row += 1;
|
2011-08-24 20:42:26 +02:00
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
if (_16x16mv.as_mv.col < 0)
|
|
|
|
_16x16mv.as_mv.col -= 1;
|
2011-08-24 20:42:26 +02:00
|
|
|
else
|
2012-02-01 23:27:50 +01:00
|
|
|
_16x16mv.as_mv.col += 1;
|
2011-08-24 20:42:26 +02:00
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
_16x16mv.as_mv.row /= 2;
|
|
|
|
_16x16mv.as_mv.col /= 2;
|
2011-08-24 20:42:26 +02:00
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
_16x16mv.as_mv.row &= x->fullpixel_mask;
|
|
|
|
_16x16mv.as_mv.col &= x->fullpixel_mask;
|
2011-08-24 20:42:26 +02:00
|
|
|
|
2011-04-28 16:53:59 +02:00
|
|
|
pre_stride >>= 1;
|
2012-02-01 23:27:50 +01:00
|
|
|
offset = ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3);
|
2011-04-28 16:53:59 +02:00
|
|
|
uptr = x->pre.u_buffer + offset;
|
|
|
|
vptr = x->pre.v_buffer + offset;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
if ( _o16x16mv.as_int & 0x000f000f)
|
|
|
|
{
|
|
|
|
x->subpixel_predict8x8(uptr, pre_stride, _o16x16mv.as_mv.col & 15, _o16x16mv.as_mv.row & 15, dst_u, dst_uvstride);
|
|
|
|
x->subpixel_predict8x8(vptr, pre_stride, _o16x16mv.as_mv.col & 15, _o16x16mv.as_mv.row & 15, dst_v, dst_uvstride);
|
|
|
|
}
|
|
|
|
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
2012-02-01 23:27:50 +01:00
|
|
|
if ( _16x16mv.as_int & 0x00070007)
|
2011-04-28 16:53:59 +02:00
|
|
|
{
|
2012-02-01 23:27:50 +01:00
|
|
|
x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_u, dst_uvstride);
|
|
|
|
x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_v, dst_uvstride);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
|
|
|
{
|
2011-04-28 16:53:59 +02:00
|
|
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, pre_stride, dst_u, dst_uvstride);
|
|
|
|
RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, pre_stride, dst_v, dst_uvstride);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-04-28 16:53:59 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
/*
|
|
|
|
* This function should be called after an initial call to
|
|
|
|
* vp8_build_inter16x16_predictors_mb() or _mby()/_mbuv().
|
|
|
|
* It will run a second sixtap filter on a (different) ref
|
|
|
|
* frame and average the result with the output of the
|
|
|
|
* first sixtap filter. The second reference frame is stored
|
|
|
|
* in x->second_pre (the reference frame index is in
|
|
|
|
* x->mode_info_context->mbmi.second_ref_frame). The second
|
|
|
|
* motion vector is x->mode_info_context->mbmi.second_mv.
|
|
|
|
*
|
|
|
|
* This allows blending prediction from two reference frames
|
|
|
|
* which sometimes leads to better prediction than from a
|
|
|
|
* single reference framer.
|
|
|
|
*/
|
|
|
|
void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x,
|
|
|
|
unsigned char *dst_y,
|
|
|
|
unsigned char *dst_u,
|
|
|
|
unsigned char *dst_v,
|
|
|
|
int dst_ystride,
|
|
|
|
int dst_uvstride)
|
|
|
|
{
|
|
|
|
int offset;
|
|
|
|
unsigned char *ptr;
|
|
|
|
unsigned char *uptr, *vptr;
|
|
|
|
|
2012-03-13 18:26:53 +01:00
|
|
|
int_mv _16x16mv;
|
|
|
|
int mv_row;
|
|
|
|
int mv_col;
|
|
|
|
|
2012-02-16 18:29:54 +01:00
|
|
|
int omv_row, omv_col;
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
|
|
|
|
unsigned char *ptr_base = x->second_pre.y_buffer;
|
|
|
|
int pre_stride = x->block[0].pre_stride;
|
|
|
|
|
2012-03-13 18:26:53 +01:00
|
|
|
_16x16mv.as_int = x->mode_info_context->mbmi.second_mv.as_int;
|
|
|
|
|
|
|
|
if (x->mode_info_context->mbmi.need_to_clamp_secondmv)
|
|
|
|
{
|
|
|
|
clamp_mv_to_umv_border(&_16x16mv.as_mv, x);
|
|
|
|
}
|
|
|
|
|
|
|
|
mv_row = _16x16mv.as_mv.row;
|
|
|
|
mv_col = _16x16mv.as_mv.col;
|
|
|
|
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3);
|
|
|
|
|
|
|
|
if ((mv_row | mv_col) & 7)
|
|
|
|
{
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
x->subpixel_predict_avg16x16(ptr, pre_stride, (mv_col & 7)<<1, (mv_row & 7)<<1, dst_y, dst_ystride);
|
|
|
|
#else
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
x->subpixel_predict_avg16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y, dst_ystride);
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RECON_INVOKE(&x->rtcd->recon, avg16x16)(ptr, pre_stride, dst_y, dst_ystride);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* calc uv motion vectors */
|
2012-02-16 18:29:54 +01:00
|
|
|
omv_row = mv_row;
|
|
|
|
omv_col = mv_col;
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
mv_row = (mv_row + (mv_row > 0)) >> 1;
|
|
|
|
mv_col = (mv_col + (mv_col > 0)) >> 1;
|
|
|
|
|
|
|
|
mv_row &= x->fullpixel_mask;
|
|
|
|
mv_col &= x->fullpixel_mask;
|
|
|
|
|
|
|
|
pre_stride >>= 1;
|
|
|
|
offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
|
|
|
|
uptr = x->second_pre.u_buffer + offset;
|
|
|
|
vptr = x->second_pre.v_buffer + offset;
|
|
|
|
|
2012-02-16 18:29:54 +01:00
|
|
|
#if CONFIG_SIXTEENTH_SUBPEL_UV
|
|
|
|
if ((omv_row | omv_col) & 15)
|
|
|
|
{
|
|
|
|
x->subpixel_predict_avg8x8(uptr, pre_stride, omv_col & 15, omv_row & 15, dst_u, dst_uvstride);
|
|
|
|
x->subpixel_predict_avg8x8(vptr, pre_stride, omv_col & 15, omv_row & 15, dst_v, dst_uvstride);
|
|
|
|
}
|
|
|
|
#else /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
if ((mv_row | mv_col) & 7)
|
|
|
|
{
|
|
|
|
x->subpixel_predict_avg8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, dst_u, dst_uvstride);
|
|
|
|
x->subpixel_predict_avg8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, dst_v, dst_uvstride);
|
|
|
|
}
|
2012-02-16 18:29:54 +01:00
|
|
|
#endif /* CONFIG_SIXTEENTH_SUBPEL_UV */
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
RECON_INVOKE(&x->rtcd->recon, avg8x8)(uptr, pre_stride, dst_u, dst_uvstride);
|
|
|
|
RECON_INVOKE(&x->rtcd->recon, avg8x8)(vptr, pre_stride, dst_v, dst_uvstride);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
static void build_inter4x4_predictors_mb(MACROBLOCKD *x)
|
2011-04-28 16:53:59 +02:00
|
|
|
{
|
|
|
|
int i;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-04-28 16:53:59 +02:00
|
|
|
if (x->mode_info_context->mbmi.partitioning < 3)
|
|
|
|
{
|
2011-08-24 20:42:26 +02:00
|
|
|
x->block[ 0].bmi = x->mode_info_context->bmi[ 0];
|
|
|
|
x->block[ 2].bmi = x->mode_info_context->bmi[ 2];
|
|
|
|
x->block[ 8].bmi = x->mode_info_context->bmi[ 8];
|
|
|
|
x->block[10].bmi = x->mode_info_context->bmi[10];
|
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
|
|
|
|
{
|
|
|
|
clamp_mv_to_umv_border(&x->block[ 0].bmi.mv.as_mv, x);
|
|
|
|
clamp_mv_to_umv_border(&x->block[ 2].bmi.mv.as_mv, x);
|
|
|
|
clamp_mv_to_umv_border(&x->block[ 8].bmi.mv.as_mv, x);
|
|
|
|
clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x);
|
|
|
|
}
|
|
|
|
|
Improved coding using 8x8 transform
In summary, this commit encompasses a series of changes in attempt to
improve the 8x8 transform based coding to help overall compression
quality, please refer to the detailed commit history below for what
are the rationale underly the series of changes:
a. A frame level flag to indicate if 8x8 transform is used at all.
b. 8x8 transform is not used for key frames and small image size.
c. On inter coded frame, macroblocks using modes B_PRED, SPLIT_MV
and I8X8_PRED are forced to using 4x4 transform based coding, the
rest uses 8x8 transform based coding.
d. Encoder and decoder has the same assumption on the relationship
between prediction modes and transform size, therefore no signaling
is encoded in bitstream.
e. Mode decision process now calculate the rate and distortion scores
using their respective transforms.
Overall test results:
1. HD set
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120206.html
(avg psnr: 3.09% glb psnr: 3.22%, ssim: 3.90%)
2. Cif set:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120206.html
(avg psnr: -0.03%, glb psnr: -0.02%, ssim: -0.04%)
It should be noted here, as 8x8 transform coding itself is disabled
for cif size clips, the 0.03% loss is purely from the 1 bit/frame
flag overhead on if 8x8 transform is used or not for the frame.
---patch history for future reference---
Patch 1:
this commit tries to select transform size based on macroblock
prediction mode. If the size of a prediction mode is 16x16, then
the macroblock is forced to use 8x8 transform. If the prediction
mode is B_PRED, SPLITMV or I8X8_PRED, then the macroblock is forced
to use 4x4 transform. Tests on the following HD clips showed mixed
results: (all hd clips only used first 100 frames in the test)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_log.html
while the results are mixed and overall negative, it is interesting to
see 8x8 helped a few of the clips.
Patch 2:
this patch tries to hard-wire selection of transform size based on
prediction modes without using segmentation to signal the transform size.
encoder and decoder both takes the same assumption that all macroblocks
use 8x8 transform except when prediciton mode is B_PRED, I8X8_PRED or
SPLITMV. Test results are as follows:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cifmodebase8x8_0125.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_0125log.html
Interestingly, by removing the overhead or coding the segmentation, the
results on this limited HD set have turn positive on average.
Patch 3:
this patch disabled the usage of 8x8 transform on key frames, and kept the
logic from patch 2 for inter frames only. test results on HD set turned
decidedly positive with 8x8 transform enabled on inter frame with 16x16
prediction modes: (avg psnr: .81% glb psnr: .82 ssim: .55%)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdintermode8x8_0125.html
results on cif set still negative overall
Patch 4:
continued from last patch, but now in mode decision process, the rate and
distortion estimates are computed based on 8x8 transform results for MBs
with modes associated with 8x8 transform. This patch also fixed a problem
related to segment based eob coding when 8x8 transform is used. The patch
significantly improved the results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hd8x8RDintermode.html
(avg psnr: 2.70% glb psnr: 2.76% ssim: 3.34%)
results on cif also improved, though they are still negative compared to
baseline that uses 4x4 transform only:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif8x8RDintermode.html
(avg psnr: -.78% glb psnr: -.86% ssim: -.19%)
Patch 5:
This patch does 3 things:
a. a bunch of decoder bug fixes, encodings and decodings were verified
to have matched recon buffer on a number of encodes on cif size mobile and
hd version of _pedestrian.
b. the patch further improved the rate distortion calculation of MBS that
use 8x8 transform. This provided some further gain on compression.
c. the patch also got the experimental work SEG_LVL_EOB to work with 8x8
transformed macroblock, test results indicates it improves the cif set
but hurt the HD set slightly.
Tests results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120201.html
(avg psnr: 3.19% glb psnr: 3.30% ssim: 3.93%)
Test results on cif clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120201.html
(avg psnr: -.47% glb psnr: -.51% ssim: +.28%)
Patch 6:
Added a frame level flag to indicate if 8x8 transform is allowed at all.
temporarily the decision is based on frame size, can be optimized later
one. This get the cif results to basically unchanged, with one bit per
frame overhead on both cif and hd clips.
Patch 8:
Rebase and Merge to head by PGW.
Fixed some suspect 4s that look like hey should be 64s in regard
to segmented EOB. Perhaps #defines would be bette.
Bulit and tested without T8x8 enabled and produces unchanged
output.
Patch 9:
Corrected misalligned code/decode of "txfm_mode" bit.
Limited testing for correct encode and decode with
T8x8 configured on derf clips.
Change-Id: I156e1405d25f81579d579dff8ab9af53944ec49c
2012-02-10 01:12:23 +01:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
build_inter_predictors4b(x, &x->block[ 0], 16);
|
|
|
|
build_inter_predictors4b(x, &x->block[ 2], 16);
|
|
|
|
build_inter_predictors4b(x, &x->block[ 8], 16);
|
|
|
|
build_inter_predictors4b(x, &x->block[10], 16);
|
2011-04-28 16:53:59 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (i = 0; i < 16; i += 2)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
BLOCKD *d0 = &x->block[i];
|
|
|
|
BLOCKD *d1 = &x->block[i+1];
|
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
x->block[i+0].bmi = x->mode_info_context->bmi[i+0];
|
|
|
|
x->block[i+1].bmi = x->mode_info_context->bmi[i+1];
|
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
|
|
|
|
{
|
|
|
|
clamp_mv_to_umv_border(&x->block[i+0].bmi.mv.as_mv, x);
|
|
|
|
clamp_mv_to_umv_border(&x->block[i+1].bmi.mv.as_mv, x);
|
|
|
|
}
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
|
2011-04-28 16:53:59 +02:00
|
|
|
build_inter_predictors2b(x, d0, 16);
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
|
|
|
{
|
2011-04-28 16:53:59 +02:00
|
|
|
vp8_build_inter_predictors_b(d0, 16, x->subpixel_predict);
|
|
|
|
vp8_build_inter_predictors_b(d1, 16, x->subpixel_predict);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2011-04-28 16:53:59 +02:00
|
|
|
|
|
|
|
for (i = 16; i < 24; i += 2)
|
|
|
|
{
|
|
|
|
BLOCKD *d0 = &x->block[i];
|
|
|
|
BLOCKD *d1 = &x->block[i+1];
|
|
|
|
|
|
|
|
if (d0->bmi.mv.as_int == d1->bmi.mv.as_int)
|
|
|
|
build_inter_predictors2b(x, d0, 8);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
vp8_build_inter_predictors_b(d0, 8, x->subpixel_predict);
|
|
|
|
vp8_build_inter_predictors_b(d1, 8, x->subpixel_predict);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
static
|
|
|
|
void build_4x4uvmvs(MACROBLOCKD *x)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
for (i = 0; i < 2; i++)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-08-24 20:42:26 +02:00
|
|
|
for (j = 0; j < 2; j++)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-08-24 20:42:26 +02:00
|
|
|
int yoffset = i * 8 + j * 2;
|
|
|
|
int uoffset = 16 + i * 2 + j;
|
|
|
|
int voffset = 20 + i * 2 + j;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
int temp;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row
|
|
|
|
+ x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row
|
|
|
|
+ x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row
|
|
|
|
+ x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
if (temp < 0) temp -= 4;
|
|
|
|
else temp += 4;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col
|
|
|
|
+ x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col
|
|
|
|
+ x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col
|
|
|
|
+ x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
if (temp < 0) temp -= 4;
|
|
|
|
else temp += 4;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
Improved coding using 8x8 transform
In summary, this commit encompasses a series of changes in attempt to
improve the 8x8 transform based coding to help overall compression
quality, please refer to the detailed commit history below for what
are the rationale underly the series of changes:
a. A frame level flag to indicate if 8x8 transform is used at all.
b. 8x8 transform is not used for key frames and small image size.
c. On inter coded frame, macroblocks using modes B_PRED, SPLIT_MV
and I8X8_PRED are forced to using 4x4 transform based coding, the
rest uses 8x8 transform based coding.
d. Encoder and decoder has the same assumption on the relationship
between prediction modes and transform size, therefore no signaling
is encoded in bitstream.
e. Mode decision process now calculate the rate and distortion scores
using their respective transforms.
Overall test results:
1. HD set
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120206.html
(avg psnr: 3.09% glb psnr: 3.22%, ssim: 3.90%)
2. Cif set:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120206.html
(avg psnr: -0.03%, glb psnr: -0.02%, ssim: -0.04%)
It should be noted here, as 8x8 transform coding itself is disabled
for cif size clips, the 0.03% loss is purely from the 1 bit/frame
flag overhead on if 8x8 transform is used or not for the frame.
---patch history for future reference---
Patch 1:
this commit tries to select transform size based on macroblock
prediction mode. If the size of a prediction mode is 16x16, then
the macroblock is forced to use 8x8 transform. If the prediction
mode is B_PRED, SPLITMV or I8X8_PRED, then the macroblock is forced
to use 4x4 transform. Tests on the following HD clips showed mixed
results: (all hd clips only used first 100 frames in the test)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_log.html
while the results are mixed and overall negative, it is interesting to
see 8x8 helped a few of the clips.
Patch 2:
this patch tries to hard-wire selection of transform size based on
prediction modes without using segmentation to signal the transform size.
encoder and decoder both takes the same assumption that all macroblocks
use 8x8 transform except when prediciton mode is B_PRED, I8X8_PRED or
SPLITMV. Test results are as follows:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cifmodebase8x8_0125.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_0125log.html
Interestingly, by removing the overhead or coding the segmentation, the
results on this limited HD set have turn positive on average.
Patch 3:
this patch disabled the usage of 8x8 transform on key frames, and kept the
logic from patch 2 for inter frames only. test results on HD set turned
decidedly positive with 8x8 transform enabled on inter frame with 16x16
prediction modes: (avg psnr: .81% glb psnr: .82 ssim: .55%)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdintermode8x8_0125.html
results on cif set still negative overall
Patch 4:
continued from last patch, but now in mode decision process, the rate and
distortion estimates are computed based on 8x8 transform results for MBs
with modes associated with 8x8 transform. This patch also fixed a problem
related to segment based eob coding when 8x8 transform is used. The patch
significantly improved the results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hd8x8RDintermode.html
(avg psnr: 2.70% glb psnr: 2.76% ssim: 3.34%)
results on cif also improved, though they are still negative compared to
baseline that uses 4x4 transform only:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif8x8RDintermode.html
(avg psnr: -.78% glb psnr: -.86% ssim: -.19%)
Patch 5:
This patch does 3 things:
a. a bunch of decoder bug fixes, encodings and decodings were verified
to have matched recon buffer on a number of encodes on cif size mobile and
hd version of _pedestrian.
b. the patch further improved the rate distortion calculation of MBS that
use 8x8 transform. This provided some further gain on compression.
c. the patch also got the experimental work SEG_LVL_EOB to work with 8x8
transformed macroblock, test results indicates it improves the cif set
but hurt the HD set slightly.
Tests results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120201.html
(avg psnr: 3.19% glb psnr: 3.30% ssim: 3.93%)
Test results on cif clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120201.html
(avg psnr: -.47% glb psnr: -.51% ssim: +.28%)
Patch 6:
Added a frame level flag to indicate if 8x8 transform is allowed at all.
temporarily the decision is based on frame size, can be optimized later
one. This get the cif results to basically unchanged, with one bit per
frame overhead on both cif and hd clips.
Patch 8:
Rebase and Merge to head by PGW.
Fixed some suspect 4s that look like hey should be 64s in regard
to segmented EOB. Perhaps #defines would be bette.
Bulit and tested without T8x8 enabled and produces unchanged
output.
Patch 9:
Corrected misalligned code/decode of "txfm_mode" bit.
Limited testing for correct encode and decode with
T8x8 configured on derf clips.
Change-Id: I156e1405d25f81579d579dff8ab9af53944ec49c
2012-02-10 01:12:23 +01:00
|
|
|
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
|
|
|
|
clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
|
|
|
|
|
2012-02-01 23:27:50 +01:00
|
|
|
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
|
|
|
|
clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
|
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
x->block[voffset].bmi.mv.as_mv.row =
|
|
|
|
x->block[uoffset].bmi.mv.as_mv.row ;
|
|
|
|
x->block[voffset].bmi.mv.as_mv.col =
|
|
|
|
x->block[uoffset].bmi.mv.as_mv.col ;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
}
|
2011-08-24 20:42:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void vp8_build_inter_predictors_mb(MACROBLOCKD *x)
|
|
|
|
{
|
|
|
|
if (x->mode_info_context->mbmi.mode != SPLITMV)
|
|
|
|
{
|
|
|
|
vp8_build_inter16x16_predictors_mb(x, x->predictor, &x->predictor[256],
|
|
|
|
&x->predictor[320], 16, 8);
|
2012-02-10 01:11:00 +01:00
|
|
|
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
if (x->mode_info_context->mbmi.second_ref_frame)
|
|
|
|
{
|
|
|
|
/* 256 = offset of U plane in Y+U+V buffer;
|
|
|
|
* 320 = offset of V plane in Y+U+V buffer.
|
|
|
|
* (256=16x16, 320=16x16+8x8). */
|
|
|
|
vp8_build_2nd_inter16x16_predictors_mb(x, x->predictor,
|
|
|
|
&x->predictor[256],
|
|
|
|
&x->predictor[320], 16, 8);
|
|
|
|
}
|
2011-08-24 20:42:26 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
|
|
|
{
|
2011-08-24 20:42:26 +02:00
|
|
|
build_4x4uvmvs(x);
|
|
|
|
build_inter4x4_predictors_mb(x);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
}
|