vpx/vp9/encoder/vp9_encodemb.c

874 lines
24 KiB
C
Raw Normal View History

2010-05-18 17:58:33 +02:00
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
2010-05-18 17:58:33 +02:00
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
2010-05-18 17:58:33 +02:00
*/
#include "vpx_ports/config.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/encoder/vp9_quantize.h"
#include "vp9/encoder/vp9_tokenize.h"
#include "vp9/common/vp9_invtrans.h"
#include "vp9/common/vp9_reconintra.h"
2010-05-18 17:58:33 +02:00
#include "vpx_mem/vpx_mem.h"
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/common/vp9_systemdependent.h"
#include "vp9_rtcd.h"
2010-05-18 17:58:33 +02:00
void vp9_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) {
unsigned char *src_ptr = (*(be->base_src) + be->src);
short *diff_ptr = be->src_diff;
unsigned char *pred_ptr = bd->predictor;
int src_stride = be->src_stride;
int r, c;
2010-05-18 17:58:33 +02:00
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
diff_ptr[c] = src_ptr[c] - pred_ptr[c];
2010-05-18 17:58:33 +02:00
}
diff_ptr += pitch;
pred_ptr += pitch;
src_ptr += src_stride;
}
2010-05-18 17:58:33 +02:00
}
void vp9_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
unsigned char *src_ptr = (*(be->base_src) + be->src);
short *diff_ptr = be->src_diff;
unsigned char *pred_ptr = bd->predictor;
int src_stride = be->src_stride;
int r, c;
for (r = 0; r < 8; r++) {
for (c = 0; c < 8; c++) {
diff_ptr[c] = src_ptr[c] - pred_ptr[c];
}
diff_ptr += pitch;
pred_ptr += pitch;
src_ptr += src_stride;
}
}
void vp9_subtract_mbuv_s_c(short *diff, const unsigned char *usrc,
const unsigned char *vsrc, int src_stride,
const unsigned char *upred,
const unsigned char *vpred, int dst_stride) {
short *udiff = diff + 256;
short *vdiff = diff + 320;
int r, c;
2010-05-18 17:58:33 +02:00
for (r = 0; r < 8; r++) {
for (c = 0; c < 8; c++) {
udiff[c] = usrc[c] - upred[c];
2010-05-18 17:58:33 +02:00
}
udiff += 8;
upred += dst_stride;
usrc += src_stride;
}
2010-05-18 17:58:33 +02:00
for (r = 0; r < 8; r++) {
for (c = 0; c < 8; c++) {
vdiff[c] = vsrc[c] - vpred[c];
2010-05-18 17:58:33 +02:00
}
vdiff += 8;
vpred += dst_stride;
vsrc += src_stride;
}
}
2010-05-18 17:58:33 +02:00
void vp9_subtract_mbuv_c(short *diff, unsigned char *usrc,
unsigned char *vsrc, unsigned char *pred, int stride) {
unsigned char *upred = pred + 256;
unsigned char *vpred = pred + 320;
vp9_subtract_mbuv_s_c(diff, usrc, vsrc, stride, upred, vpred, 8);
}
void vp9_subtract_mby_s_c(short *diff, const unsigned char *src, int src_stride,
const unsigned char *pred, int dst_stride) {
int r, c;
2010-05-18 17:58:33 +02:00
for (r = 0; r < 16; r++) {
for (c = 0; c < 16; c++) {
diff[c] = src[c] - pred[c];
2010-05-18 17:58:33 +02:00
}
diff += 16;
pred += dst_stride;
src += src_stride;
}
2010-05-18 17:58:33 +02:00
}
32x32 transform for superblocks. This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds code all over the place to wrap that in the bitstream/encoder/decoder/RD. Some implementation notes (these probably need careful review): - token range is extended by 1 bit, since the value range out of this transform is [-16384,16383]. - the coefficients coming out of the FDCT are manually scaled back by 1 bit, or else they won't fit in int16_t (they are 17 bits). Because of this, the RD error scoring does not right-shift the MSE score by two (unlike for 4x4/8x8/16x16). - to compensate for this loss in precision, the quantizer is halved also. This is currently a little hacky. - FDCT and IDCT is double-only right now. Needs a fixed-point impl. - There are no default probabilities for the 32x32 transform yet; I'm simply using the 16x16 luma ones. A future commit will add newly generated probabilities for all transforms. - No ADST version. I don't think we'll add one for this level; if an ADST is desired, transform-size selection can scale back to 16x16 or lower, and use an ADST at that level. Additional notes specific to Debargha's DWT/DCT hybrid: - coefficient scale is different for the top/left 16x16 (DCT-over-DWT) block than for the rest (DWT pixel differences) of the block. Therefore, RD error scoring isn't easily scalable between coefficient and pixel domain. Thus, unfortunately, we need to compute the RD distortion in the pixel domain until we figure out how to scale these appropriately. Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
void vp9_subtract_sby_s_c(short *diff, const unsigned char *src, int src_stride,
const unsigned char *pred, int dst_stride) {
int r, c;
for (r = 0; r < 32; r++) {
for (c = 0; c < 32; c++) {
diff[c] = src[c] - pred[c];
}
diff += 32;
pred += dst_stride;
src += src_stride;
}
}
void vp9_subtract_sbuv_s_c(short *diff, const unsigned char *usrc,
const unsigned char *vsrc, int src_stride,
const unsigned char *upred,
const unsigned char *vpred, int dst_stride) {
short *udiff = diff + 1024;
short *vdiff = diff + 1024 + 256;
int r, c;
for (r = 0; r < 16; r++) {
for (c = 0; c < 16; c++) {
udiff[c] = usrc[c] - upred[c];
}
udiff += 16;
upred += dst_stride;
usrc += src_stride;
}
for (r = 0; r < 16; r++) {
for (c = 0; c < 16; c++) {
vdiff[c] = vsrc[c] - vpred[c];
}
vdiff += 16;
vpred += dst_stride;
vsrc += src_stride;
}
}
#endif
void vp9_subtract_mby_c(short *diff, unsigned char *src,
unsigned char *pred, int stride) {
vp9_subtract_mby_s_c(diff, src, stride, pred, 16);
}
static void subtract_mb(MACROBLOCK *x) {
BLOCK *b = &x->block[0];
vp9_subtract_mby(x->src_diff, *(b->base_src), x->e_mbd.predictor,
b->src_stride);
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->e_mbd.predictor, x->src.uv_stride);
2010-05-18 17:58:33 +02:00
}
static void build_dcblock_4x4(MACROBLOCK *x) {
short *src_diff_ptr = &x->src_diff[384];
int i;
2010-05-18 17:58:33 +02:00
for (i = 0; i < 16; i++) {
src_diff_ptr[i] = x->coeff[i * 16];
x->coeff[i * 16] = 0;
}
2010-05-18 17:58:33 +02:00
}
void vp9_transform_mby_4x4(MACROBLOCK *x) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
int has_2nd_order = get_2nd_order_usage(xd);
2010-05-18 17:58:33 +02:00
for (i = 0; i < 16; i++) {
BLOCK *b = &x->block[i];
TX_TYPE tx_type = get_tx_type_4x4(xd, &xd->block[i]);
if (tx_type != DCT_DCT) {
assert(has_2nd_order == 0);
vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 4);
} else {
x->vp9_short_fdct4x4(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 32);
}
}
2010-05-18 17:58:33 +02:00
if (has_2nd_order) {
// build dc block from 16 y dc values
build_dcblock_4x4(x);
// do 2nd order transform on the dc block
x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
} else {
vpx_memset(x->block[24].coeff, 0, 16 * sizeof(x->block[24].coeff[0]));
}
2010-05-18 17:58:33 +02:00
}
void vp9_transform_mbuv_4x4(MACROBLOCK *x) {
int i;
2010-05-18 17:58:33 +02:00
for (i = 16; i < 24; i += 2) {
x->vp9_short_fdct8x4(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 16);
}
}
static void transform_mb_4x4(MACROBLOCK *x) {
vp9_transform_mby_4x4(x);
vp9_transform_mbuv_4x4(x);
}
static void build_dcblock_8x8(MACROBLOCK *x) {
int16_t *src_diff_ptr = x->block[24].src_diff;
int i;
for (i = 0; i < 16; i++) {
src_diff_ptr[i] = 0;
}
src_diff_ptr[0] = x->coeff[0 * 16];
src_diff_ptr[1] = x->coeff[4 * 16];
src_diff_ptr[4] = x->coeff[8 * 16];
src_diff_ptr[8] = x->coeff[12 * 16];
x->coeff[0 * 16] = 0;
x->coeff[4 * 16] = 0;
x->coeff[8 * 16] = 0;
x->coeff[12 * 16] = 0;
}
void vp9_transform_mby_8x8(MACROBLOCK *x) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
TX_TYPE tx_type;
int has_2nd_order = get_2nd_order_usage(xd);
for (i = 0; i < 9; i += 8) {
BLOCK *b = &x->block[i];
tx_type = get_tx_type_8x8(xd, &xd->block[i]);
if (tx_type != DCT_DCT) {
assert(has_2nd_order == 0);
vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 8);
} else {
x->vp9_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 32);
}
}
for (i = 2; i < 11; i += 8) {
BLOCK *b = &x->block[i];
tx_type = get_tx_type_8x8(xd, &xd->block[i]);
if (tx_type != DCT_DCT) {
assert(has_2nd_order == 0);
vp9_fht_c(b->src_diff, 32, (b + 2)->coeff, tx_type, 8);
} else {
x->vp9_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i + 2].coeff[0], 32);
}
}
if (has_2nd_order) {
// build dc block from 2x2 y dc values
build_dcblock_8x8(x);
// do 2nd order transform on the dc block
x->short_fhaar2x2(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8);
} else {
vpx_memset(x->block[24].coeff, 0, 16 * sizeof(x->block[24].coeff[0]));
}
}
void vp9_transform_mbuv_8x8(MACROBLOCK *x) {
int i;
for (i = 16; i < 24; i += 4) {
x->vp9_short_fdct8x8(&x->block[i].src_diff[0],
&x->block[i].coeff[0], 16);
}
}
void vp9_transform_mb_8x8(MACROBLOCK *x) {
vp9_transform_mby_8x8(x);
vp9_transform_mbuv_8x8(x);
}
void vp9_transform_mby_16x16(MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
TX_TYPE tx_type = get_tx_type_16x16(xd, &xd->block[0]);
vp9_clear_system_state();
if (tx_type != DCT_DCT) {
vp9_fht_c(b->src_diff, 32, b->coeff, tx_type, 16);
} else {
x->vp9_short_fdct16x16(&x->block[0].src_diff[0],
&x->block[0].coeff[0], 32);
}
}
void vp9_transform_mb_16x16(MACROBLOCK *x) {
vp9_transform_mby_16x16(x);
vp9_transform_mbuv_8x8(x);
}
32x32 transform for superblocks. This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds code all over the place to wrap that in the bitstream/encoder/decoder/RD. Some implementation notes (these probably need careful review): - token range is extended by 1 bit, since the value range out of this transform is [-16384,16383]. - the coefficients coming out of the FDCT are manually scaled back by 1 bit, or else they won't fit in int16_t (they are 17 bits). Because of this, the RD error scoring does not right-shift the MSE score by two (unlike for 4x4/8x8/16x16). - to compensate for this loss in precision, the quantizer is halved also. This is currently a little hacky. - FDCT and IDCT is double-only right now. Needs a fixed-point impl. - There are no default probabilities for the 32x32 transform yet; I'm simply using the 16x16 luma ones. A future commit will add newly generated probabilities for all transforms. - No ADST version. I don't think we'll add one for this level; if an ADST is desired, transform-size selection can scale back to 16x16 or lower, and use an ADST at that level. Additional notes specific to Debargha's DWT/DCT hybrid: - coefficient scale is different for the top/left 16x16 (DCT-over-DWT) block than for the rest (DWT pixel differences) of the block. Therefore, RD error scoring isn't easily scalable between coefficient and pixel domain. Thus, unfortunately, we need to compute the RD distortion in the pixel domain until we figure out how to scale these appropriately. Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
void vp9_transform_sby_32x32(MACROBLOCK *x) {
SUPERBLOCK * const x_sb = &x->sb_coeff_data;
vp9_short_fdct32x32(x_sb->src_diff, x_sb->coeff, 64);
}
void vp9_transform_sbuv_16x16(MACROBLOCK *x) {
SUPERBLOCK * const x_sb = &x->sb_coeff_data;
vp9_clear_system_state();
x->vp9_short_fdct16x16(x_sb->src_diff + 1024,
x_sb->coeff + 1024, 32);
x->vp9_short_fdct16x16(x_sb->src_diff + 1280,
x_sb->coeff + 1280, 32);
}
#endif
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
typedef struct vp9_token_state vp9_token_state;
Add trellis quantization. Replace the exponential search for optimal rounding during quantization with a linear Viterbi trellis and enable it by default when using --best. Right now this operates on top of the output of the adaptive zero-bin quantizer in vp8_regular_quantize_b() and gives a small gain. It can be tested as a replacement for that quantizer by enabling the call to vp8_strict_quantize_b(), which uses normal rounding and no zero bin offset. Ultimately, the quantizer will have to become a function of lambda in order to take advantage of activity masking, since there is limited ability to change the quantization factor itself. However, currently vp8_strict_quantize_b() plus the trellis quantizer (which is lambda-dependent) loses to vp8_regular_quantize_b() alone (which is not) on my test clip. Patch Set 3: Fix an issue related to the cost evaluation of successor states when a coefficient is reduced to zero. With this issue fixed, now the trellis search almost exactly matches the exponential search. Patch Set 2: Overall, the goal of this patch set is to make "trellis" search to produce encodings that match the exponential search version. There are three main differences between Patch Set 2 and 1: a. Patch set 1 did not properly account for the scale of 2nd order error, so patch set 2 disable it all together for 2nd blocks. b. Patch set 1 was not consistent on when to enable the the quantization optimization. Patch set 2 restore the condition to be consistent. c. Patch set 1 checks quantized level L-1, and L for any input coefficient was quantized to L. Patch set 2 limits the candidate coefficient to those that were rounded up to L. It is worth noting here that a strategy to check L and L+1 for coefficients that were truncated down to L might work. (a and b get trellis quant to basically match the exponential search on all mid/low rate encodings on cif set, without a, b, trellis quant can hurt the psnr by 0.2 to .3db at 200kbps for some cif clips) (c gets trellis quant to match the exponential search to match at Q0 encoding, without c, trellis quant can be 1.5 to 2db lower for encodings with fixed Q at 0 on most derf cif clips) Change-Id: Ib1a043b665d75fbf00cb0257b7c18e90eebab95e
2010-07-02 23:35:53 +02:00
struct vp9_token_state {
Add trellis quantization. Replace the exponential search for optimal rounding during quantization with a linear Viterbi trellis and enable it by default when using --best. Right now this operates on top of the output of the adaptive zero-bin quantizer in vp8_regular_quantize_b() and gives a small gain. It can be tested as a replacement for that quantizer by enabling the call to vp8_strict_quantize_b(), which uses normal rounding and no zero bin offset. Ultimately, the quantizer will have to become a function of lambda in order to take advantage of activity masking, since there is limited ability to change the quantization factor itself. However, currently vp8_strict_quantize_b() plus the trellis quantizer (which is lambda-dependent) loses to vp8_regular_quantize_b() alone (which is not) on my test clip. Patch Set 3: Fix an issue related to the cost evaluation of successor states when a coefficient is reduced to zero. With this issue fixed, now the trellis search almost exactly matches the exponential search. Patch Set 2: Overall, the goal of this patch set is to make "trellis" search to produce encodings that match the exponential search version. There are three main differences between Patch Set 2 and 1: a. Patch set 1 did not properly account for the scale of 2nd order error, so patch set 2 disable it all together for 2nd blocks. b. Patch set 1 was not consistent on when to enable the the quantization optimization. Patch set 2 restore the condition to be consistent. c. Patch set 1 checks quantized level L-1, and L for any input coefficient was quantized to L. Patch set 2 limits the candidate coefficient to those that were rounded up to L. It is worth noting here that a strategy to check L and L+1 for coefficients that were truncated down to L might work. (a and b get trellis quant to basically match the exponential search on all mid/low rate encodings on cif set, without a, b, trellis quant can hurt the psnr by 0.2 to .3db at 200kbps for some cif clips) (c gets trellis quant to match the exponential search to match at Q0 encoding, without c, trellis quant can be 1.5 to 2db lower for encodings with fixed Q at 0 on most derf cif clips) Change-Id: Ib1a043b665d75fbf00cb0257b7c18e90eebab95e
2010-07-02 23:35:53 +02:00
int rate;
int error;
int next;
Add trellis quantization. Replace the exponential search for optimal rounding during quantization with a linear Viterbi trellis and enable it by default when using --best. Right now this operates on top of the output of the adaptive zero-bin quantizer in vp8_regular_quantize_b() and gives a small gain. It can be tested as a replacement for that quantizer by enabling the call to vp8_strict_quantize_b(), which uses normal rounding and no zero bin offset. Ultimately, the quantizer will have to become a function of lambda in order to take advantage of activity masking, since there is limited ability to change the quantization factor itself. However, currently vp8_strict_quantize_b() plus the trellis quantizer (which is lambda-dependent) loses to vp8_regular_quantize_b() alone (which is not) on my test clip. Patch Set 3: Fix an issue related to the cost evaluation of successor states when a coefficient is reduced to zero. With this issue fixed, now the trellis search almost exactly matches the exponential search. Patch Set 2: Overall, the goal of this patch set is to make "trellis" search to produce encodings that match the exponential search version. There are three main differences between Patch Set 2 and 1: a. Patch set 1 did not properly account for the scale of 2nd order error, so patch set 2 disable it all together for 2nd blocks. b. Patch set 1 was not consistent on when to enable the the quantization optimization. Patch set 2 restore the condition to be consistent. c. Patch set 1 checks quantized level L-1, and L for any input coefficient was quantized to L. Patch set 2 limits the candidate coefficient to those that were rounded up to L. It is worth noting here that a strategy to check L and L+1 for coefficients that were truncated down to L might work. (a and b get trellis quant to basically match the exponential search on all mid/low rate encodings on cif set, without a, b, trellis quant can hurt the psnr by 0.2 to .3db at 200kbps for some cif clips) (c gets trellis quant to match the exponential search to match at Q0 encoding, without c, trellis quant can be 1.5 to 2db lower for encodings with fixed Q at 0 on most derf cif clips) Change-Id: Ib1a043b665d75fbf00cb0257b7c18e90eebab95e
2010-07-02 23:35:53 +02:00
signed char token;
short qc;
};
// TODO: experiments to find optimal multiple numbers
#define Y1_RD_MULT 4
#define UV_RD_MULT 2
experiment extending the quantizer range Prior to this change, VP8 min quantizer is 4, which caps the highest quality around 51DB. This experimental change extends the min quantizer to 1, removes the cap and allows the highest quality to be around ~73DB, consistent with the fdct/idct round trip error. To test this change, at configure time use options: --enable-experimental --enable-extend_qrange The following is a brief log of changes in each of the patch sets patch set 1: In this commit, the quantization/dequantization constants are kept unchanged, instead scaling factor 4 is rolled into fdct/idct. Fixed Q0 encoding tests on mobile: Before: 9560.567kbps Overall PSNR:50.255DB VPXSSIM:98.288 Now: 18035.774kbps Overall PSNR:73.022DB VPXSSIM:99.991 patch set 2: regenerated dc/ac quantizer lookup tables based on the scaling factor rolled in the fdct/idct. Also slightly extended the range towards the high quantizer end. patch set 3: slightly tweaked the quantizer tables and generated bits_per_mb table based on Paul's suggestions. patch set 4: fix a typo in idct, re-calculated tables relating active max Q to active min Q patch set 5: added rdmult lookup table based on Q patch set 6: fix rdmult scale: dct coefficient has scaled up by 4 patch set 7: make transform coefficients to be within 16bits patch set 8: normalize 2nd order quantizers patch set 9: fix mis-spellings patch set 10: change the configure script and macros to allow experimental code to be enabled at configure time with --enable-extend_qrange patch set 11: rebase for merge Change-Id: Ib50641ddd44aba2a52ed890222c309faa31cc59c
2010-12-02 00:50:14 +01:00
#define Y2_RD_MULT 4
static const int plane_rd_mult[4] = {
Y1_RD_MULT,
Y2_RD_MULT,
UV_RD_MULT,
Y1_RD_MULT
};
#define UPDATE_RD_COST()\
{\
rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\
rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\
if (rd_cost0 == rd_cost1) {\
rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);\
rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);\
}\
}
static void optimize_b(MACROBLOCK *mb, int i, PLANE_TYPE type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
int tx_size) {
BLOCK *b = &mb->block[i];
BLOCKD *d = &mb->e_mbd.block[i];
vp9_token_state tokens[257][2];
unsigned best_index[257][2];
const short *dequant_ptr = d->dequant, *coeff_ptr = b->coeff;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
int eob = d->eob, final_eob, sz = 0;
int i0 = (type == PLANE_TYPE_Y_NO_DC);
int rc, x, next;
int64_t rdmult, rddiv, rd_cost0, rd_cost1;
int rate0, rate1, error0, error1, t0, t1;
int best, band, pt;
int err_mult = plane_rd_mult[type];
int default_eob;
int const *scan, *bands;
switch (tx_size) {
default:
case TX_4X4:
scan = vp9_default_zig_zag1d_4x4;
bands = vp9_coef_bands_4x4;
default_eob = 16;
// TODO: this isn't called (for intra4x4 modes), but will be left in
// since it could be used later
{
TX_TYPE tx_type = get_tx_type_4x4(&mb->e_mbd, d);
if (tx_type != DCT_DCT) {
switch (tx_type) {
case ADST_DCT:
scan = vp9_row_scan_4x4;
break;
case DCT_ADST:
scan = vp9_col_scan_4x4;
break;
default:
scan = vp9_default_zig_zag1d_4x4;
break;
}
} else {
scan = vp9_default_zig_zag1d_4x4;
}
}
break;
case TX_8X8:
scan = vp9_default_zig_zag1d_8x8;
bands = vp9_coef_bands_8x8;
default_eob = 64;
break;
case TX_16X16:
scan = vp9_default_zig_zag1d_16x16;
bands = vp9_coef_bands_16x16;
default_eob = 256;
break;
}
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
rdmult = mb->rdmult * err_mult;
if (mb->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME)
rdmult = (rdmult * 9) >> 4;
rddiv = mb->rddiv;
memset(best_index, 0, sizeof(best_index));
/* Initialize the sentinel node of the trellis. */
tokens[eob][0].rate = 0;
tokens[eob][0].error = 0;
tokens[eob][0].next = default_eob;
tokens[eob][0].token = DCT_EOB_TOKEN;
tokens[eob][0].qc = 0;
*(tokens[eob] + 1) = *(tokens[eob] + 0);
next = eob;
for (i = eob; i-- > i0;) {
int base_bits, d2, dx;
rc = scan[i];
x = qcoeff_ptr[rc];
/* Only add a trellis state for non-zero coefficients. */
if (x) {
int shortcut = 0;
error0 = tokens[next][0].error;
error1 = tokens[next][1].error;
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
t0 = (vp9_dct_value_tokens_ptr + x)->Token;
/* Consider both possible successor states. */
if (next < default_eob) {
band = bands[i + 1];
pt = vp9_prev_token_class[t0];
rate0 +=
mb->token_costs[tx_size][type][band][pt][tokens[next][0].token];
rate1 +=
mb->token_costs[tx_size][type][band][pt][tokens[next][1].token];
}
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
base_bits = *(vp9_dct_value_cost_ptr + x);
dx = dqcoeff_ptr[rc] - coeff_ptr[rc];
d2 = dx * dx;
tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
tokens[i][0].error = d2 + (best ? error1 : error0);
tokens[i][0].next = next;
tokens[i][0].token = t0;
tokens[i][0].qc = x;
best_index[i][0] = best;
/* Evaluate the second possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
if ((abs(x)*dequant_ptr[rc != 0] > abs(coeff_ptr[rc])) &&
(abs(x)*dequant_ptr[rc != 0] < abs(coeff_ptr[rc]) + dequant_ptr[rc != 0]))
shortcut = 1;
else
shortcut = 0;
if (shortcut) {
sz = -(x < 0);
x -= 2 * sz + 1;
}
/* Consider both possible successor states. */
if (!x) {
/* If we reduced this coefficient to zero, check to see if
* we need to move the EOB back here.
Add trellis quantization. Replace the exponential search for optimal rounding during quantization with a linear Viterbi trellis and enable it by default when using --best. Right now this operates on top of the output of the adaptive zero-bin quantizer in vp8_regular_quantize_b() and gives a small gain. It can be tested as a replacement for that quantizer by enabling the call to vp8_strict_quantize_b(), which uses normal rounding and no zero bin offset. Ultimately, the quantizer will have to become a function of lambda in order to take advantage of activity masking, since there is limited ability to change the quantization factor itself. However, currently vp8_strict_quantize_b() plus the trellis quantizer (which is lambda-dependent) loses to vp8_regular_quantize_b() alone (which is not) on my test clip. Patch Set 3: Fix an issue related to the cost evaluation of successor states when a coefficient is reduced to zero. With this issue fixed, now the trellis search almost exactly matches the exponential search. Patch Set 2: Overall, the goal of this patch set is to make "trellis" search to produce encodings that match the exponential search version. There are three main differences between Patch Set 2 and 1: a. Patch set 1 did not properly account for the scale of 2nd order error, so patch set 2 disable it all together for 2nd blocks. b. Patch set 1 was not consistent on when to enable the the quantization optimization. Patch set 2 restore the condition to be consistent. c. Patch set 1 checks quantized level L-1, and L for any input coefficient was quantized to L. Patch set 2 limits the candidate coefficient to those that were rounded up to L. It is worth noting here that a strategy to check L and L+1 for coefficients that were truncated down to L might work. (a and b get trellis quant to basically match the exponential search on all mid/low rate encodings on cif set, without a, b, trellis quant can hurt the psnr by 0.2 to .3db at 200kbps for some cif clips) (c gets trellis quant to match the exponential search to match at Q0 encoding, without c, trellis quant can be 1.5 to 2db lower for encodings with fixed Q at 0 on most derf cif clips) Change-Id: Ib1a043b665d75fbf00cb0257b7c18e90eebab95e
2010-07-02 23:35:53 +02:00
*/
t0 = tokens[next][0].token == DCT_EOB_TOKEN ?
DCT_EOB_TOKEN : ZERO_TOKEN;
t1 = tokens[next][1].token == DCT_EOB_TOKEN ?
DCT_EOB_TOKEN : ZERO_TOKEN;
} else {
t0 = t1 = (vp9_dct_value_tokens_ptr + x)->Token;
}
if (next < default_eob) {
band = bands[i + 1];
if (t0 != DCT_EOB_TOKEN) {
pt = vp9_prev_token_class[t0];
rate0 += mb->token_costs[tx_size][type][band][pt][
tokens[next][0].token];
2010-05-18 17:58:33 +02:00
}
if (t1 != DCT_EOB_TOKEN) {
pt = vp9_prev_token_class[t1];
rate1 += mb->token_costs[tx_size][type][band][pt][
tokens[next][1].token];
}
}
2010-05-18 17:58:33 +02:00
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
base_bits = *(vp9_dct_value_cost_ptr + x);
if (shortcut) {
dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
d2 = dx * dx;
}
tokens[i][1].rate = base_bits + (best ? rate1 : rate0);
tokens[i][1].error = d2 + (best ? error1 : error0);
tokens[i][1].next = next;
tokens[i][1].token = best ? t1 : t0;
tokens[i][1].qc = x;
best_index[i][1] = best;
/* Finally, make this the new head of the trellis. */
next = i;
Add trellis quantization. Replace the exponential search for optimal rounding during quantization with a linear Viterbi trellis and enable it by default when using --best. Right now this operates on top of the output of the adaptive zero-bin quantizer in vp8_regular_quantize_b() and gives a small gain. It can be tested as a replacement for that quantizer by enabling the call to vp8_strict_quantize_b(), which uses normal rounding and no zero bin offset. Ultimately, the quantizer will have to become a function of lambda in order to take advantage of activity masking, since there is limited ability to change the quantization factor itself. However, currently vp8_strict_quantize_b() plus the trellis quantizer (which is lambda-dependent) loses to vp8_regular_quantize_b() alone (which is not) on my test clip. Patch Set 3: Fix an issue related to the cost evaluation of successor states when a coefficient is reduced to zero. With this issue fixed, now the trellis search almost exactly matches the exponential search. Patch Set 2: Overall, the goal of this patch set is to make "trellis" search to produce encodings that match the exponential search version. There are three main differences between Patch Set 2 and 1: a. Patch set 1 did not properly account for the scale of 2nd order error, so patch set 2 disable it all together for 2nd blocks. b. Patch set 1 was not consistent on when to enable the the quantization optimization. Patch set 2 restore the condition to be consistent. c. Patch set 1 checks quantized level L-1, and L for any input coefficient was quantized to L. Patch set 2 limits the candidate coefficient to those that were rounded up to L. It is worth noting here that a strategy to check L and L+1 for coefficients that were truncated down to L might work. (a and b get trellis quant to basically match the exponential search on all mid/low rate encodings on cif set, without a, b, trellis quant can hurt the psnr by 0.2 to .3db at 200kbps for some cif clips) (c gets trellis quant to match the exponential search to match at Q0 encoding, without c, trellis quant can be 1.5 to 2db lower for encodings with fixed Q at 0 on most derf cif clips) Change-Id: Ib1a043b665d75fbf00cb0257b7c18e90eebab95e
2010-07-02 23:35:53 +02:00
}
/* There's no choice to make for a zero coefficient, so we don't
* add a new trellis node, but we do need to update the costs.
*/
else {
band = bands[i + 1];
t0 = tokens[next][0].token;
t1 = tokens[next][1].token;
/* Update the cost of each path if we're past the EOB token. */
if (t0 != DCT_EOB_TOKEN) {
tokens[next][0].rate += mb->token_costs[tx_size][type][band][0][t0];
tokens[next][0].token = ZERO_TOKEN;
}
if (t1 != DCT_EOB_TOKEN) {
tokens[next][1].rate += mb->token_costs[tx_size][type][band][0][t1];
tokens[next][1].token = ZERO_TOKEN;
}
/* Don't update next, because we didn't add a new node. */
Add trellis quantization. Replace the exponential search for optimal rounding during quantization with a linear Viterbi trellis and enable it by default when using --best. Right now this operates on top of the output of the adaptive zero-bin quantizer in vp8_regular_quantize_b() and gives a small gain. It can be tested as a replacement for that quantizer by enabling the call to vp8_strict_quantize_b(), which uses normal rounding and no zero bin offset. Ultimately, the quantizer will have to become a function of lambda in order to take advantage of activity masking, since there is limited ability to change the quantization factor itself. However, currently vp8_strict_quantize_b() plus the trellis quantizer (which is lambda-dependent) loses to vp8_regular_quantize_b() alone (which is not) on my test clip. Patch Set 3: Fix an issue related to the cost evaluation of successor states when a coefficient is reduced to zero. With this issue fixed, now the trellis search almost exactly matches the exponential search. Patch Set 2: Overall, the goal of this patch set is to make "trellis" search to produce encodings that match the exponential search version. There are three main differences between Patch Set 2 and 1: a. Patch set 1 did not properly account for the scale of 2nd order error, so patch set 2 disable it all together for 2nd blocks. b. Patch set 1 was not consistent on when to enable the the quantization optimization. Patch set 2 restore the condition to be consistent. c. Patch set 1 checks quantized level L-1, and L for any input coefficient was quantized to L. Patch set 2 limits the candidate coefficient to those that were rounded up to L. It is worth noting here that a strategy to check L and L+1 for coefficients that were truncated down to L might work. (a and b get trellis quant to basically match the exponential search on all mid/low rate encodings on cif set, without a, b, trellis quant can hurt the psnr by 0.2 to .3db at 200kbps for some cif clips) (c gets trellis quant to match the exponential search to match at Q0 encoding, without c, trellis quant can be 1.5 to 2db lower for encodings with fixed Q at 0 on most derf cif clips) Change-Id: Ib1a043b665d75fbf00cb0257b7c18e90eebab95e
2010-07-02 23:35:53 +02:00
}
}
/* Now pick the best path through the whole trellis. */
band = bands[i + 1];
VP9_COMBINEENTROPYCONTEXTS(pt, *a, *l);
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
error0 = tokens[next][0].error;
error1 = tokens[next][1].error;
t0 = tokens[next][0].token;
t1 = tokens[next][1].token;
rate0 += mb->token_costs[tx_size][type][band][pt][t0];
rate1 += mb->token_costs[tx_size][type][band][pt][t1];
UPDATE_RD_COST();
best = rd_cost1 < rd_cost0;
final_eob = i0 - 1;
for (i = next; i < eob; i = next) {
x = tokens[i][best].qc;
if (x)
final_eob = i;
rc = scan[i];
qcoeff_ptr[rc] = x;
dqcoeff_ptr[rc] = (x * dequant_ptr[rc != 0]);
next = tokens[i][best].next;
best = best_index[i][best];
}
final_eob++;
d->eob = final_eob;
*a = *l = (d->eob > !type);
2010-05-18 17:58:33 +02:00
}
/**************************************************************************
our inverse hadamard transform effectively is weighted sum of all 16 inputs
with weight either 1 or -1. It has a last stage scaling of (sum+1)>>2. And
dc only idct is (dc+16)>>5. So if all the sums are between -65 and 63 the
output after inverse wht and idct will be all zero. A sum of absolute value
smaller than 65 guarantees all 16 different (+1/-1) weighted sums in wht
fall between -65 and +65.
**************************************************************************/
#define SUM_2ND_COEFF_THRESH 65
static void check_reset_2nd_coeffs(MACROBLOCKD *xd,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
int sum = 0;
int i;
BLOCKD *bd = &xd->block[24];
if (bd->dequant[0] >= SUM_2ND_COEFF_THRESH
&& bd->dequant[1] >= SUM_2ND_COEFF_THRESH)
return;
for (i = 0; i < bd->eob; i++) {
int coef = bd->dqcoeff[vp9_default_zig_zag1d_4x4[i]];
sum += (coef >= 0) ? coef : -coef;
if (sum >= SUM_2ND_COEFF_THRESH)
return;
}
if (sum < SUM_2ND_COEFF_THRESH) {
for (i = 0; i < bd->eob; i++) {
int rc = vp9_default_zig_zag1d_4x4[i];
bd->qcoeff[rc] = 0;
bd->dqcoeff[rc] = 0;
}
bd->eob = 0;
*a = *l = (bd->eob != 0);
}
}
#define SUM_2ND_COEFF_THRESH_8X8 32
static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
int sum = 0;
BLOCKD *bd = &xd->block[24];
int coef;
coef = bd->dqcoeff[0];
sum += (coef >= 0) ? coef : -coef;
coef = bd->dqcoeff[1];
sum += (coef >= 0) ? coef : -coef;
coef = bd->dqcoeff[4];
sum += (coef >= 0) ? coef : -coef;
coef = bd->dqcoeff[8];
sum += (coef >= 0) ? coef : -coef;
if (sum < SUM_2ND_COEFF_THRESH_8X8) {
bd->qcoeff[0] = 0;
bd->dqcoeff[0] = 0;
bd->qcoeff[1] = 0;
bd->dqcoeff[1] = 0;
bd->qcoeff[4] = 0;
bd->dqcoeff[4] = 0;
bd->qcoeff[8] = 0;
bd->dqcoeff[8] = 0;
bd->eob = 0;
*a = *l = (bd->eob != 0);
}
}
void vp9_optimize_mby_4x4(MACROBLOCK *x) {
int b;
PLANE_TYPE type;
int has_2nd_order;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
has_2nd_order = get_2nd_order_usage(&x->e_mbd);
type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
2010-05-18 17:58:33 +02:00
for (b = 0; b < 16; b++) {
optimize_b(x, b, type,
ta + vp9_block2above[TX_4X4][b],
tl + vp9_block2left[TX_4X4][b], TX_4X4);
}
2010-05-18 17:58:33 +02:00
if (has_2nd_order) {
b = 24;
optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp9_block2above[TX_4X4][b],
tl + vp9_block2left[TX_4X4][b], TX_4X4);
check_reset_2nd_coeffs(&x->e_mbd,
ta + vp9_block2above[TX_4X4][b],
tl + vp9_block2left[TX_4X4][b]);
}
2010-05-18 17:58:33 +02:00
}
void vp9_optimize_mbuv_4x4(MACROBLOCK *x) {
int b;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
2010-05-18 17:58:33 +02:00
if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
2010-05-18 17:58:33 +02:00
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
2010-05-18 17:58:33 +02:00
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
2010-05-18 17:58:33 +02:00
for (b = 16; b < 24; b++) {
optimize_b(x, b, PLANE_TYPE_UV,
ta + vp9_block2above[TX_4X4][b],
tl + vp9_block2left[TX_4X4][b], TX_4X4);
}
}
static void optimize_mb_4x4(MACROBLOCK *x) {
vp9_optimize_mby_4x4(x);
vp9_optimize_mbuv_4x4(x);
}
void vp9_optimize_mby_8x8(MACROBLOCK *x) {
int b;
PLANE_TYPE type;
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
int has_2nd_order = get_2nd_order_usage(&x->e_mbd);
if (!x->e_mbd.above_context || !x->e_mbd.left_context)
return;
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
ta = (ENTROPY_CONTEXT *)&t_above;
tl = (ENTROPY_CONTEXT *)&t_left;
type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC;
for (b = 0; b < 16; b += 4) {
ENTROPY_CONTEXT *const a = ta + vp9_block2above[TX_8X8][b];
ENTROPY_CONTEXT *const l = tl + vp9_block2left[TX_8X8][b];
#if CONFIG_CNVCONTEXT
ENTROPY_CONTEXT above_ec = (a[0] + a[1]) != 0;
ENTROPY_CONTEXT left_ec = (l[0] + l[1]) != 0;
#else
ENTROPY_CONTEXT above_ec = a[0];
ENTROPY_CONTEXT left_ec = l[0];
#endif
optimize_b(x, b, type, &above_ec, &left_ec, TX_8X8);
a[1] = a[0] = above_ec;
l[1] = l[0] = left_ec;
}
// 8x8 always have 2nd order block
if (has_2nd_order) {
check_reset_8x8_2nd_coeffs(&x->e_mbd,
ta + vp9_block2above[TX_8X8][24],
tl + vp9_block2left[TX_8X8][24]);
}
}
void vp9_optimize_mbuv_8x8(MACROBLOCK *x) {
int b;
ENTROPY_CONTEXT *const ta = (ENTROPY_CONTEXT *)x->e_mbd.above_context;
ENTROPY_CONTEXT *const tl = (ENTROPY_CONTEXT *)x->e_mbd.left_context;
if (!ta || !tl)
return;
for (b = 16; b < 24; b += 4) {
ENTROPY_CONTEXT *const a = ta + vp9_block2above[TX_8X8][b];
ENTROPY_CONTEXT *const l = tl + vp9_block2left[TX_8X8][b];
#if CONFIG_CNVCONTEXT
ENTROPY_CONTEXT above_ec = (a[0] + a[1]) != 0;
ENTROPY_CONTEXT left_ec = (l[0] + l[1]) != 0;
#else
ENTROPY_CONTEXT above_ec = a[0];
ENTROPY_CONTEXT left_ec = l[0];
#endif
optimize_b(x, b, PLANE_TYPE_UV, &above_ec, &left_ec, TX_8X8);
}
}
static void optimize_mb_8x8(MACROBLOCK *x) {
vp9_optimize_mby_8x8(x);
vp9_optimize_mbuv_8x8(x);
}
void vp9_optimize_mby_16x16(MACROBLOCK *x) {
ENTROPY_CONTEXT_PLANES *const t_above = x->e_mbd.above_context;
ENTROPY_CONTEXT_PLANES *const t_left = x->e_mbd.left_context;
ENTROPY_CONTEXT ta, tl;
if (!t_above || !t_left)
return;
#if CONFIG_CNVCONTEXT
ta = (t_above->y1[0] + t_above->y1[1] + t_above->y1[2] + t_above->y1[3]) != 0;
tl = (t_left->y1[0] + t_left->y1[1] + t_left->y1[2] + t_left->y1[3]) != 0;
#else
ta = t_above->y1[0];
tl = t_left->y1[0];
#endif
optimize_b(x, 0, PLANE_TYPE_Y_WITH_DC, &ta, &tl, TX_16X16);
}
static void optimize_mb_16x16(MACROBLOCK *x) {
vp9_optimize_mby_16x16(x);
vp9_optimize_mbuv_8x8(x);
}
void vp9_fidct_mb(MACROBLOCK *x) {
MACROBLOCKD *const xd = &x->e_mbd;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
2010-05-18 17:58:33 +02:00
if (tx_size == TX_16X16) {
vp9_transform_mb_16x16(x);
vp9_quantize_mb_16x16(x);
if (x->optimize)
optimize_mb_16x16(x);
vp9_inverse_transform_mb_16x16(xd);
} else if (tx_size == TX_8X8) {
if (xd->mode_info_context->mbmi.mode == SPLITMV) {
assert(xd->mode_info_context->mbmi.partitioning != PARTITIONING_4X4);
vp9_transform_mby_8x8(x);
vp9_transform_mbuv_4x4(x);
vp9_quantize_mby_8x8(x);
vp9_quantize_mbuv_4x4(x);
if (x->optimize) {
vp9_optimize_mby_8x8(x);
vp9_optimize_mbuv_4x4(x);
}
vp9_inverse_transform_mby_8x8(xd);
vp9_inverse_transform_mbuv_4x4(xd);
} else {
vp9_transform_mb_8x8(x);
vp9_quantize_mb_8x8(x);
if (x->optimize)
optimize_mb_8x8(x);
vp9_inverse_transform_mb_8x8(xd);
}
} else {
transform_mb_4x4(x);
vp9_quantize_mb_4x4(x);
if (x->optimize)
optimize_mb_4x4(x);
vp9_inverse_transform_mb_4x4(xd);
}
}
void vp9_encode_inter16x16(MACROBLOCK *x) {
MACROBLOCKD *const xd = &x->e_mbd;
vp9_build_inter_predictors_mb(xd);
subtract_mb(x);
vp9_fidct_mb(x);
vp9_recon_mb(xd);
2010-05-18 17:58:33 +02:00
}
/* this function is used by first pass only */
void vp9_encode_inter16x16y(MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
#if CONFIG_PRED_FILTER
// Disable the prediction filter for firstpass
xd->mode_info_context->mbmi.pred_filter_enabled = 0;
#endif
vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
2010-05-18 17:58:33 +02:00
vp9_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
2010-05-18 17:58:33 +02:00
vp9_transform_mby_4x4(x);
vp9_quantize_mby_4x4(x);
vp9_inverse_transform_mby_4x4(xd);
vp9_recon_mby(xd);
2010-05-18 17:58:33 +02:00
}