vpx/vp8/encoder/quantize.c

717 lines
24 KiB
C
Raw Normal View History

2010-05-18 17:58:33 +02:00
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
2010-05-18 17:58:33 +02:00
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
2010-05-18 17:58:33 +02:00
*/
#include <math.h>
#include "vpx_mem/vpx_mem.h"
#include "onyx_int.h"
2010-05-18 17:58:33 +02:00
#include "quantize.h"
#include "vp8/common/quant_common.h"
2010-05-18 17:58:33 +02:00
2011-10-05 12:26:00 +02:00
#include "vp8/common/seg_common.h"
#ifdef ENC_DEBUG
extern int enc_debug;
#endif
void vp9_ht_quantize_b_4x4(BLOCK *b, BLOCKD *d, TX_TYPE tx_type) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *zbin_boost_ptr = b->zrun_zbin_boost;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
unsigned char *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
int const *pt_scan ;
switch (tx_type) {
case ADST_DCT :
pt_scan = vp9_row_scan;
break;
case DCT_ADST :
pt_scan = vp9_col_scan;
break;
default :
pt_scan = vp9_default_zig_zag1d;
break;
}
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
eob = -1;
for (i = 0; i < b->eob_max_offset; i++) {
rc = pt_scan[i];
z = coeff_ptr[rc];
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
zbin_boost_ptr ++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin) {
x += round_ptr[rc];
y = (((x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
if (y) {
eob = i; // last nonzero coeffs
zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
}
}
}
d->eob = eob + 1;
}
void vp9_regular_quantize_b_4x4(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *zbin_boost_ptr = b->zrun_zbin_boost;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
unsigned char *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
eob = -1;
for (i = 0; i < b->eob_max_offset; i++) {
rc = vp9_default_zig_zag1d[i];
z = coeff_ptr[rc];
zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value;
zbin_boost_ptr ++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin) {
x += round_ptr[rc];
y = (((x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
if (y) {
eob = i; // last nonzero coeffs
zbin_boost_ptr = b->zrun_zbin_boost; // reset zero runlength
}
2010-05-18 17:58:33 +02:00
}
}
2010-05-18 17:58:33 +02:00
d->eob = eob + 1;
2010-05-18 17:58:33 +02:00
}
void vp9_quantize_mby_4x4_c(MACROBLOCK *x) {
int i;
int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
2010-05-18 17:58:33 +02:00
for (i = 0; i < 16; i++)
x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
2010-05-18 17:58:33 +02:00
if (has_2nd_order)
x->quantize_b_4x4(&x->block[24], &x->e_mbd.block[24]);
2010-05-18 17:58:33 +02:00
}
void vp9_quantize_mbuv_4x4_c(MACROBLOCK *x) {
int i;
2010-05-18 17:58:33 +02:00
for (i = 16; i < 24; i++)
x->quantize_b_4x4(&x->block[i], &x->e_mbd.block[i]);
2010-05-18 17:58:33 +02:00
}
void vp9_quantize_mb_4x4_c(MACROBLOCK *x) {
vp9_quantize_mby_4x4_c(x);
vp9_quantize_mbuv_4x4_c(x);
}
void vp9_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *zbin_boost_ptr = b->zrun_zbin_boost;
int zbin_zrun_index = 0;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
unsigned char *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
// double q2nd = 4;
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
eob = -1;
for (i = 0; i < b->eob_max_offset_8x8; i++) {
rc = vp9_default_zig_zag1d[i];
z = coeff_ptr[rc];
zbin_boost_ptr = &b->zrun_zbin_boost[zbin_zrun_index];
zbin_zrun_index += 4;
zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value);
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin) {
x += (round_ptr[rc]);
y = ((int)((int)(x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
if (y) {
eob = i; // last nonzero coeffs
zbin_zrun_index = 0;
}
}
}
d->eob = eob + 1;
}
void vp9_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *zbin_boost_ptr = b->zrun_zbin_boost_8x8;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin_8x8;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
unsigned char *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
vpx_memset(qcoeff_ptr, 0, 64 * sizeof(short));
vpx_memset(dqcoeff_ptr, 0, 64 * sizeof(short));
eob = -1;
for (i = 0; i < b->eob_max_offset_8x8; i++) {
rc = vp9_default_zig_zag1d_8x8[i];
z = coeff_ptr[rc];
zbin = (zbin_ptr[rc != 0] + *zbin_boost_ptr + zbin_oq_value);
zbin_boost_ptr++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin) {
x += (round_ptr[rc != 0]);
y = ((int)(((int)(x * quant_ptr[rc != 0]) >> 16) + x))
>> quant_shift_ptr[rc != 0]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc != 0]; // dequantized value
if (y) {
eob = i; // last nonzero coeffs
zbin_boost_ptr = b->zrun_zbin_boost_8x8;
}
}
}
d->eob = eob + 1;
}
void vp9_quantize_mby_8x8(MACROBLOCK *x) {
int i;
int has_2nd_order = x->e_mbd.mode_info_context->mbmi.mode != SPLITMV;
for (i = 0; i < 16; i ++) {
x->e_mbd.block[i].eob = 0;
}
x->e_mbd.block[24].eob = 0;
for (i = 0; i < 16; i += 4)
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
if (has_2nd_order)
x->quantize_b_2x2(&x->block[24], &x->e_mbd.block[24]);
}
void vp9_quantize_mbuv_8x8(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i ++)
x->e_mbd.block[i].eob = 0;
for (i = 16; i < 24; i += 4)
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
}
void vp9_quantize_mb_8x8(MACROBLOCK *x) {
vp9_quantize_mby_8x8(x);
vp9_quantize_mbuv_8x8(x);
}
void vp9_quantize_mby_16x16(MACROBLOCK *x) {
int i;
for (i = 0; i < 16; i++)
x->e_mbd.block[i].eob = 0;
x->e_mbd.block[24].eob = 0;
x->quantize_b_16x16(&x->block[0], &x->e_mbd.block[0]);
}
void vp9_quantize_mb_16x16(MACROBLOCK *x) {
vp9_quantize_mby_16x16(x);
vp9_quantize_mbuv_8x8(x);
}
void vp9_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *zbin_boost_ptr = b->zrun_zbin_boost_16x16;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin_16x16;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
unsigned char *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
vpx_memset(qcoeff_ptr, 0, 256*sizeof(short));
vpx_memset(dqcoeff_ptr, 0, 256*sizeof(short));
eob = -1;
for (i = 0; i < b->eob_max_offset_16x16; i++) {
rc = vp9_default_zig_zag1d_16x16[i];
z = coeff_ptr[rc];
zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value);
zbin_boost_ptr ++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin) {
x += (round_ptr[rc!=0]);
y = ((int)(((int)(x * quant_ptr[rc!=0]) >> 16) + x))
>> quant_shift_ptr[rc!=0]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value
if (y) {
eob = i; // last nonzero coeffs
zbin_boost_ptr = b->zrun_zbin_boost_16x16;
}
}
}
d->eob = eob + 1;
}
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
* these two C functions if corresponding optimized routine is not available.
* NEON optimized version implements currently the fast quantization for pair
* of blocks. */
void vp9_regular_quantize_b_4x4_pair(BLOCK *b1, BLOCK *b2,
BLOCKD *d1, BLOCKD *d2) {
vp9_regular_quantize_b_4x4(b1, d1);
vp9_regular_quantize_b_4x4(b2, d2);
}
static void invert_quant(short *quant,
unsigned char *shift, short d) {
unsigned t;
int l;
t = d;
for (l = 0; t > 1; l++)
t >>= 1;
t = 1 + (1 << (16 + l)) / d;
*quant = (short)(t - (1 << 16));
*shift = l;
}
void vp9_init_quantizer(VP9_COMP *cpi) {
int i;
int quant_val;
int Q;
static const int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20,
24, 28, 32, 36, 40, 44, 44, 44
};
static const int zbin_boost_8x8[64] = { 0, 0, 0, 8, 8, 8, 10, 12,
14, 16, 18, 20, 22, 24, 26, 28,
30, 32, 34, 36, 38, 40, 42, 44,
46, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48
};
static const int zbin_boost_16x16[256] = {
0, 0, 0, 8, 8, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28,
30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
};
int qrounding_factor = 48;
for (Q = 0; Q < QINDEX_RANGE; Q++) {
int qzbin_factor = (vp9_dc_quant(Q, 0) < 148) ? 84 : 80;
Add lossless compression mode. This commit adds lossless compression capability to the experimental branch. The lossless experiment can be enabled using --enable-lossless in configure. When the experiment is enabled, the encoder will use lossless compression mode by command line option --lossless, and the decoder automatically recognizes a losslessly encoded clip and decodes accordingly. To achieve the lossless coding, this commit has changed the following: 1. To encode at lossless mode, encoder forces the use of unit quantizer, i.e, Q 0, where effective quantization is 1. Encoder also disables the usage of 8x8 transform and allows only 4x4 transform; 2. At Q 0, the first order 4x4 DCT/IDCT have been switched over to a pair of forward and inverse Walsh-Hadamard Transform (http://goo.gl/EIsfy), with proper scaling applied to match the range of the original 4x4 DCT/IDCT pair; 3. At Q 0, the second order remains to use the previous walsh-hadamard transform pair. However, to maintain the reversibility in second order transform at Q 0, scaling down is applied to first order DC coefficients prior to forward transform, and scaling up is applied to the second order output prior to quantization. Symmetric upscaling and downscaling are added around inverse second order transform; 4. At lossless mode, encoder also disables a number of minor features to ensure no loss is introduced, these features includes: a. Trellis quantization optimization b. Loop filtering c. Aggressive zero-binning, rounding and zero-bin boosting d. Mode based zero-bin boosting Lossless coding test was performed on all clips within the derf set, to verify that the commit has achieved lossless compression for all clips. The average compression ratio is around 2.57 to 1. (http://goo.gl/dEShs) Change-Id: Ia3aba7dd09df40dd590f93b9aba134defbc64e34
2012-06-14 04:03:31 +02:00
#if CONFIG_LOSSLESS
if (cpi->oxcf.lossless) {
if (Q == 0) {
qzbin_factor = 64;
qrounding_factor = 64;
}
}
Add lossless compression mode. This commit adds lossless compression capability to the experimental branch. The lossless experiment can be enabled using --enable-lossless in configure. When the experiment is enabled, the encoder will use lossless compression mode by command line option --lossless, and the decoder automatically recognizes a losslessly encoded clip and decodes accordingly. To achieve the lossless coding, this commit has changed the following: 1. To encode at lossless mode, encoder forces the use of unit quantizer, i.e, Q 0, where effective quantization is 1. Encoder also disables the usage of 8x8 transform and allows only 4x4 transform; 2. At Q 0, the first order 4x4 DCT/IDCT have been switched over to a pair of forward and inverse Walsh-Hadamard Transform (http://goo.gl/EIsfy), with proper scaling applied to match the range of the original 4x4 DCT/IDCT pair; 3. At Q 0, the second order remains to use the previous walsh-hadamard transform pair. However, to maintain the reversibility in second order transform at Q 0, scaling down is applied to first order DC coefficients prior to forward transform, and scaling up is applied to the second order output prior to quantization. Symmetric upscaling and downscaling are added around inverse second order transform; 4. At lossless mode, encoder also disables a number of minor features to ensure no loss is introduced, these features includes: a. Trellis quantization optimization b. Loop filtering c. Aggressive zero-binning, rounding and zero-bin boosting d. Mode based zero-bin boosting Lossless coding test was performed on all clips within the derf set, to verify that the commit has achieved lossless compression for all clips. The average compression ratio is around 2.57 to 1. (http://goo.gl/dEShs) Change-Id: Ia3aba7dd09df40dd590f93b9aba134defbc64e34
2012-06-14 04:03:31 +02:00
#endif
// dc values
quant_val = vp9_dc_quant(Q, cpi->common.y1dc_delta_q);
invert_quant(cpi->Y1quant[Q] + 0,
cpi->Y1quant_shift[Q] + 0, quant_val);
cpi->Y1zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y1zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y1zbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y1dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_y1_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
cpi->zrun_zbin_boost_y1_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
quant_val = vp9_dc2quant(Q, cpi->common.y2dc_delta_q);
invert_quant(cpi->Y2quant[Q] + 0,
cpi->Y2quant_shift[Q] + 0, quant_val);
cpi->Y2zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y2zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y2zbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y2dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_y2_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
cpi->zrun_zbin_boost_y2_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
quant_val = vp9_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
invert_quant(cpi->UVquant[Q] + 0,
cpi->UVquant_shift[Q] + 0, quant_val);
cpi->UVzbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->UVzbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->UVzbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_uv_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
cpi->zrun_zbin_boost_uv_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
// all the 4x4 ac values =;
for (i = 1; i < 16; i++) {
int rc = vp9_default_zig_zag1d[i];
quant_val = vp9_ac_yquant(Q);
invert_quant(cpi->Y1quant[Q] + rc,
cpi->Y1quant_shift[Q] + rc, quant_val);
cpi->Y1zbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y1round[Q][rc] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y1dequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_y1[Q][i] =
((quant_val * zbin_boost[i]) + 64) >> 7;
quant_val = vp9_ac2quant(Q, cpi->common.y2ac_delta_q);
invert_quant(cpi->Y2quant[Q] + rc,
cpi->Y2quant_shift[Q] + rc, quant_val);
cpi->Y2zbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y2round[Q][rc] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y2dequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_y2[Q][i] =
((quant_val * zbin_boost[i]) + 64) >> 7;
quant_val = vp9_ac_uv_quant(Q, cpi->common.uvac_delta_q);
invert_quant(cpi->UVquant[Q] + rc,
cpi->UVquant_shift[Q] + rc, quant_val);
cpi->UVzbin[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->UVround[Q][rc] = (qrounding_factor * quant_val) >> 7;
cpi->common.UVdequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_uv[Q][i] =
((quant_val * zbin_boost[i]) + 64) >> 7;
}
// 8x8 structures... only zbin seperated out for now
// This needs cleaning up for 8x8 especially if we are to add
// support for non flat Q matices
for (i = 1; i < 64; i++) {
int rc = vp9_default_zig_zag1d_8x8[i];
quant_val = vp9_ac_yquant(Q);
cpi->Y1zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_y1_8x8[Q][i] =
((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
quant_val = vp9_ac2quant(Q, cpi->common.y2ac_delta_q);
cpi->Y2zbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_y2_8x8[Q][i] =
((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
quant_val = vp9_ac_uv_quant(Q, cpi->common.uvac_delta_q);
cpi->UVzbin_8x8[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_uv_8x8[Q][i] =
((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
}
// 16x16 structures. Same comment above applies.
for (i = 1; i < 256; i++) {
int rc = vp9_default_zig_zag1d_16x16[i];
quant_val = vp9_ac_yquant(Q);
cpi->Y1zbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_y1_16x16[Q][i] = ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
quant_val = vp9_ac2quant(Q, cpi->common.y2ac_delta_q);
cpi->Y2zbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_y2_16x16[Q][i] = ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
quant_val = vp9_ac_uv_quant(Q, cpi->common.uvac_delta_q);
cpi->UVzbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_uv_16x16[Q][i] = ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
}
}
}
void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
int i;
int QIndex;
MACROBLOCKD *xd = &x->e_mbd;
int zbin_extra;
int segment_id = xd->mode_info_context->mbmi.segment_id;
// Select the baseline MB Q index allowing for any segment level change.
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_ALT_Q)) {
// Abs Value
if (xd->mb_segment_abs_delta == SEGMENT_ABSDATA)
QIndex = vp9_get_segdata(xd, segment_id, SEG_LVL_ALT_Q);
// Delta Value
else {
QIndex = cpi->common.base_qindex +
vp9_get_segdata(xd, segment_id, SEG_LVL_ALT_Q);
// Clamp to valid range
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;
}
} else
QIndex = cpi->common.base_qindex;
// Y
zbin_extra = (cpi->common.Y1dequant[QIndex][1] *
(cpi->zbin_over_quant +
cpi->zbin_mode_boost +
x->act_zbin_adj)) >> 7;
for (i = 0; i < 16; i++) {
x->block[i].quant = cpi->Y1quant[QIndex];
x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
x->block[i].zbin = cpi->Y1zbin[QIndex];
x->block[i].zbin_8x8 = cpi->Y1zbin_8x8[QIndex];
x->block[i].zbin_16x16 = cpi->Y1zbin_16x16[QIndex];
x->block[i].round = cpi->Y1round[QIndex];
x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y1_8x8[QIndex];
x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y1_16x16[QIndex];
x->block[i].zbin_extra = (short)zbin_extra;
2011-10-05 12:26:00 +02:00
// Segment max eob offset feature.
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
x->block[i].eob_max_offset =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
x->block[i].eob_max_offset_8x8 =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
x->block[i].eob_max_offset_16x16 =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
} else {
x->block[i].eob_max_offset = 16;
x->block[i].eob_max_offset_8x8 = 64;
x->block[i].eob_max_offset_16x16 = 256;
2011-10-05 12:26:00 +02:00
}
}
// UV
zbin_extra = (cpi->common.UVdequant[QIndex][1] *
(cpi->zbin_over_quant +
cpi->zbin_mode_boost +
x->act_zbin_adj)) >> 7;
for (i = 16; i < 24; i++) {
x->block[i].quant = cpi->UVquant[QIndex];
x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
x->block[i].zbin = cpi->UVzbin[QIndex];
x->block[i].zbin_8x8 = cpi->UVzbin_8x8[QIndex];
x->block[i].zbin_16x16 = cpi->UVzbin_16x16[QIndex];
x->block[i].round = cpi->UVround[QIndex];
x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_uv_8x8[QIndex];
x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_uv_16x16[QIndex];
x->block[i].zbin_extra = (short)zbin_extra;
// Segment max eob offset feature.
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
x->block[i].eob_max_offset =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
x->block[i].eob_max_offset_8x8 =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
} else {
x->block[i].eob_max_offset = 16;
x->block[i].eob_max_offset_8x8 = 64;
Improved coding using 8x8 transform In summary, this commit encompasses a series of changes in attempt to improve the 8x8 transform based coding to help overall compression quality, please refer to the detailed commit history below for what are the rationale underly the series of changes: a. A frame level flag to indicate if 8x8 transform is used at all. b. 8x8 transform is not used for key frames and small image size. c. On inter coded frame, macroblocks using modes B_PRED, SPLIT_MV and I8X8_PRED are forced to using 4x4 transform based coding, the rest uses 8x8 transform based coding. d. Encoder and decoder has the same assumption on the relationship between prediction modes and transform size, therefore no signaling is encoded in bitstream. e. Mode decision process now calculate the rate and distortion scores using their respective transforms. Overall test results: 1. HD set http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120206.html (avg psnr: 3.09% glb psnr: 3.22%, ssim: 3.90%) 2. Cif set: http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120206.html (avg psnr: -0.03%, glb psnr: -0.02%, ssim: -0.04%) It should be noted here, as 8x8 transform coding itself is disabled for cif size clips, the 0.03% loss is purely from the 1 bit/frame flag overhead on if 8x8 transform is used or not for the frame. ---patch history for future reference--- Patch 1: this commit tries to select transform size based on macroblock prediction mode. If the size of a prediction mode is 16x16, then the macroblock is forced to use 8x8 transform. If the prediction mode is B_PRED, SPLITMV or I8X8_PRED, then the macroblock is forced to use 4x4 transform. Tests on the following HD clips showed mixed results: (all hd clips only used first 100 frames in the test) http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8.html http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_log.html while the results are mixed and overall negative, it is interesting to see 8x8 helped a few of the clips. Patch 2: this patch tries to hard-wire selection of transform size based on prediction modes without using segmentation to signal the transform size. encoder and decoder both takes the same assumption that all macroblocks use 8x8 transform except when prediciton mode is B_PRED, I8X8_PRED or SPLITMV. Test results are as follows: http://www.corp.google.com/~yaowu/no_crawl/t8x8/cifmodebase8x8_0125.html http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_0125log.html Interestingly, by removing the overhead or coding the segmentation, the results on this limited HD set have turn positive on average. Patch 3: this patch disabled the usage of 8x8 transform on key frames, and kept the logic from patch 2 for inter frames only. test results on HD set turned decidedly positive with 8x8 transform enabled on inter frame with 16x16 prediction modes: (avg psnr: .81% glb psnr: .82 ssim: .55%) http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdintermode8x8_0125.html results on cif set still negative overall Patch 4: continued from last patch, but now in mode decision process, the rate and distortion estimates are computed based on 8x8 transform results for MBs with modes associated with 8x8 transform. This patch also fixed a problem related to segment based eob coding when 8x8 transform is used. The patch significantly improved the results on HD clips: http://www.corp.google.com/~yaowu/no_crawl/t8x8/hd8x8RDintermode.html (avg psnr: 2.70% glb psnr: 2.76% ssim: 3.34%) results on cif also improved, though they are still negative compared to baseline that uses 4x4 transform only: http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif8x8RDintermode.html (avg psnr: -.78% glb psnr: -.86% ssim: -.19%) Patch 5: This patch does 3 things: a. a bunch of decoder bug fixes, encodings and decodings were verified to have matched recon buffer on a number of encodes on cif size mobile and hd version of _pedestrian. b. the patch further improved the rate distortion calculation of MBS that use 8x8 transform. This provided some further gain on compression. c. the patch also got the experimental work SEG_LVL_EOB to work with 8x8 transformed macroblock, test results indicates it improves the cif set but hurt the HD set slightly. Tests results on HD clips: http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120201.html (avg psnr: 3.19% glb psnr: 3.30% ssim: 3.93%) Test results on cif clips: http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120201.html (avg psnr: -.47% glb psnr: -.51% ssim: +.28%) Patch 6: Added a frame level flag to indicate if 8x8 transform is allowed at all. temporarily the decision is based on frame size, can be optimized later one. This get the cif results to basically unchanged, with one bit per frame overhead on both cif and hd clips. Patch 8: Rebase and Merge to head by PGW. Fixed some suspect 4s that look like hey should be 64s in regard to segmented EOB. Perhaps #defines would be bette. Bulit and tested without T8x8 enabled and produces unchanged output. Patch 9: Corrected misalligned code/decode of "txfm_mode" bit. Limited testing for correct encode and decode with T8x8 configured on derf clips. Change-Id: I156e1405d25f81579d579dff8ab9af53944ec49c
2012-02-10 01:12:23 +01:00
}
}
// Y2
zbin_extra = (cpi->common.Y2dequant[QIndex][1] *
((cpi->zbin_over_quant / 2) +
cpi->zbin_mode_boost +
x->act_zbin_adj)) >> 7;
x->block[24].quant = cpi->Y2quant[QIndex];
x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
x->block[24].zbin = cpi->Y2zbin[QIndex];
x->block[24].zbin_8x8 = cpi->Y2zbin_8x8[QIndex];
x->block[24].zbin_16x16 = cpi->Y2zbin_16x16[QIndex];
x->block[24].round = cpi->Y2round[QIndex];
x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
x->block[24].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y2_8x8[QIndex];
x->block[24].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y2_16x16[QIndex];
x->block[24].zbin_extra = (short)zbin_extra;
// TBD perhaps not use for Y2
// Segment max eob offset feature.
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB)) {
x->block[24].eob_max_offset =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
x->block[24].eob_max_offset_8x8 =
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
} else {
x->block[24].eob_max_offset = 16;
x->block[24].eob_max_offset_8x8 = 4;
}
2011-10-05 12:26:00 +02:00
/* save this macroblock QIndex for vp9_update_zbin_extra() */
x->e_mbd.q_index = QIndex;
}
void vp9_update_zbin_extra(VP9_COMP *cpi, MACROBLOCK *x) {
int i;
int QIndex = x->e_mbd.q_index;
int zbin_extra;
// Y
zbin_extra = (cpi->common.Y1dequant[QIndex][1] *
(cpi->zbin_over_quant +
cpi->zbin_mode_boost +
x->act_zbin_adj)) >> 7;
for (i = 0; i < 16; i++) {
x->block[i].zbin_extra = (short)zbin_extra;
}
// UV
zbin_extra = (cpi->common.UVdequant[QIndex][1] *
(cpi->zbin_over_quant +
cpi->zbin_mode_boost +
x->act_zbin_adj)) >> 7;
for (i = 16; i < 24; i++) {
x->block[i].zbin_extra = (short)zbin_extra;
}
// Y2
zbin_extra = (cpi->common.Y2dequant[QIndex][1] *
((cpi->zbin_over_quant / 2) +
cpi->zbin_mode_boost +
x->act_zbin_adj)) >> 7;
x->block[24].zbin_extra = (short)zbin_extra;
}
void vp9_frame_init_quantizer(VP9_COMP *cpi) {
// Clear Zbin mode boost for default case
cpi->zbin_mode_boost = 0;
// MB level quantizer setup
vp9_mb_init_quantizer(cpi, &cpi->mb);
}
void vp9_set_quantizer(struct VP9_COMP *cpi, int Q) {
VP9_COMMON *cm = &cpi->common;
cm->base_qindex = Q;
// if any of the delta_q values are changing update flag will
// have to be set.
cm->y1dc_delta_q = 0;
cm->y2ac_delta_q = 0;
cm->uvdc_delta_q = 0;
cm->uvac_delta_q = 0;
cm->y2dc_delta_q = 0;
// quantizer has to be reinitialized if any delta_q changes.
// As there are not any here for now this is inactive code.
// if(update)
// vp9_init_quantizer(cpi);
}