2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <math.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <assert.h>
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_pragmas.h"
|
|
|
|
|
2012-11-28 19:41:40 +01:00
|
|
|
#include "vp9/encoder/vp9_tokenize.h"
|
|
|
|
#include "vp9/encoder/vp9_treewriter.h"
|
|
|
|
#include "vp9/encoder/vp9_onyx_int.h"
|
|
|
|
#include "vp9/encoder/vp9_modecosts.h"
|
|
|
|
#include "vp9/encoder/vp9_encodeintra.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_entropymode.h"
|
|
|
|
#include "vp9/common/vp9_reconinter.h"
|
|
|
|
#include "vp9/common/vp9_reconintra.h"
|
|
|
|
#include "vp9/common/vp9_findnearmv.h"
|
|
|
|
#include "vp9/common/vp9_quant_common.h"
|
2012-11-28 19:41:40 +01:00
|
|
|
#include "vp9/encoder/vp9_encodemb.h"
|
|
|
|
#include "vp9/encoder/vp9_quantize.h"
|
|
|
|
#include "vp9/encoder/vp9_variance.h"
|
|
|
|
#include "vp9/encoder/vp9_mcomp.h"
|
|
|
|
#include "vp9/encoder/vp9_rdopt.h"
|
|
|
|
#include "vp9/encoder/vp9_ratectrl.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
#include "vpx_mem/vpx_mem.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_systemdependent.h"
|
|
|
|
#include "vp9/encoder/vp9_encodemv.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_seg_common.h"
|
|
|
|
#include "vp9/common/vp9_pred_common.h"
|
|
|
|
#include "vp9/common/vp9_entropy.h"
|
2012-11-09 02:09:30 +01:00
|
|
|
#include "vp9_rtcd.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_mvref_common.h"
|
2013-01-06 03:20:25 +01:00
|
|
|
#include "vp9/common/vp9_common.h"
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
#define MAXF(a,b) (((a) > (b)) ? (a) : (b))
|
|
|
|
|
2012-04-18 22:51:58 +02:00
|
|
|
#define INVALID_MV 0x80008000
|
|
|
|
|
2012-07-18 22:43:01 +02:00
|
|
|
/* Factor to weigh the rate for switchable interp filters */
|
|
|
|
#define SWITCHABLE_INTERP_RATE_FACTOR 1
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
static const int auto_speed_thresh[17] = {
|
|
|
|
1000,
|
|
|
|
200,
|
|
|
|
150,
|
|
|
|
130,
|
|
|
|
150,
|
|
|
|
125,
|
|
|
|
120,
|
|
|
|
115,
|
|
|
|
115,
|
|
|
|
115,
|
|
|
|
115,
|
|
|
|
115,
|
|
|
|
115,
|
|
|
|
115,
|
|
|
|
115,
|
|
|
|
115,
|
|
|
|
105
|
2010-05-18 17:58:33 +02:00
|
|
|
};
|
|
|
|
|
2012-10-31 01:12:12 +01:00
|
|
|
const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
|
2012-11-07 15:50:25 +01:00
|
|
|
{ZEROMV, LAST_FRAME, NONE},
|
|
|
|
{DC_PRED, INTRA_FRAME, NONE},
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{NEARESTMV, LAST_FRAME, NONE},
|
|
|
|
{NEARMV, LAST_FRAME, NONE},
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{ZEROMV, GOLDEN_FRAME, NONE},
|
|
|
|
{NEARESTMV, GOLDEN_FRAME, NONE},
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{ZEROMV, ALTREF_FRAME, NONE},
|
|
|
|
{NEARESTMV, ALTREF_FRAME, NONE},
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{NEARMV, GOLDEN_FRAME, NONE},
|
|
|
|
{NEARMV, ALTREF_FRAME, NONE},
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{V_PRED, INTRA_FRAME, NONE},
|
|
|
|
{H_PRED, INTRA_FRAME, NONE},
|
|
|
|
{D45_PRED, INTRA_FRAME, NONE},
|
|
|
|
{D135_PRED, INTRA_FRAME, NONE},
|
|
|
|
{D117_PRED, INTRA_FRAME, NONE},
|
|
|
|
{D153_PRED, INTRA_FRAME, NONE},
|
|
|
|
{D27_PRED, INTRA_FRAME, NONE},
|
|
|
|
{D63_PRED, INTRA_FRAME, NONE},
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{TM_PRED, INTRA_FRAME, NONE},
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{NEWMV, LAST_FRAME, NONE},
|
|
|
|
{NEWMV, GOLDEN_FRAME, NONE},
|
|
|
|
{NEWMV, ALTREF_FRAME, NONE},
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{SPLITMV, LAST_FRAME, NONE},
|
|
|
|
{SPLITMV, GOLDEN_FRAME, NONE},
|
|
|
|
{SPLITMV, ALTREF_FRAME, NONE},
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
{B_PRED, INTRA_FRAME, NONE},
|
|
|
|
{I8X8_PRED, INTRA_FRAME, NONE},
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/* compound prediction modes */
|
|
|
|
{ZEROMV, LAST_FRAME, GOLDEN_FRAME},
|
|
|
|
{NEARESTMV, LAST_FRAME, GOLDEN_FRAME},
|
|
|
|
{NEARMV, LAST_FRAME, GOLDEN_FRAME},
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
{ZEROMV, ALTREF_FRAME, LAST_FRAME},
|
|
|
|
{NEARESTMV, ALTREF_FRAME, LAST_FRAME},
|
|
|
|
{NEARMV, ALTREF_FRAME, LAST_FRAME},
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
{ZEROMV, GOLDEN_FRAME, ALTREF_FRAME},
|
|
|
|
{NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME},
|
|
|
|
{NEARMV, GOLDEN_FRAME, ALTREF_FRAME},
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
{NEWMV, LAST_FRAME, GOLDEN_FRAME},
|
|
|
|
{NEWMV, ALTREF_FRAME, LAST_FRAME },
|
|
|
|
{NEWMV, GOLDEN_FRAME, ALTREF_FRAME},
|
2012-04-18 22:51:58 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
{SPLITMV, LAST_FRAME, GOLDEN_FRAME},
|
|
|
|
{SPLITMV, ALTREF_FRAME, LAST_FRAME },
|
2012-11-07 15:50:25 +01:00
|
|
|
{SPLITMV, GOLDEN_FRAME, ALTREF_FRAME},
|
|
|
|
|
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
/* compound inter-intra prediction */
|
|
|
|
{ZEROMV, LAST_FRAME, INTRA_FRAME},
|
|
|
|
{NEARESTMV, LAST_FRAME, INTRA_FRAME},
|
|
|
|
{NEARMV, LAST_FRAME, INTRA_FRAME},
|
|
|
|
{NEWMV, LAST_FRAME, INTRA_FRAME},
|
|
|
|
|
|
|
|
{ZEROMV, GOLDEN_FRAME, INTRA_FRAME},
|
|
|
|
{NEARESTMV, GOLDEN_FRAME, INTRA_FRAME},
|
|
|
|
{NEARMV, GOLDEN_FRAME, INTRA_FRAME},
|
|
|
|
{NEWMV, GOLDEN_FRAME, INTRA_FRAME},
|
|
|
|
|
|
|
|
{ZEROMV, ALTREF_FRAME, INTRA_FRAME},
|
|
|
|
{NEARESTMV, ALTREF_FRAME, INTRA_FRAME},
|
|
|
|
{NEARMV, ALTREF_FRAME, INTRA_FRAME},
|
|
|
|
{NEWMV, ALTREF_FRAME, INTRA_FRAME},
|
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
};
|
|
|
|
|
2012-12-08 01:09:59 +01:00
|
|
|
static void fill_token_costs(vp9_coeff_count *c,
|
|
|
|
vp9_coeff_probs *p,
|
|
|
|
int block_type_counts) {
|
2013-02-19 22:36:38 +01:00
|
|
|
int i, j, k, l;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
for (i = 0; i < block_type_counts; i++)
|
2013-02-19 22:36:38 +01:00
|
|
|
for (j = 0; j < REF_TYPES; j++)
|
|
|
|
for (k = 0; k < COEF_BANDS; k++)
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
|
|
|
|
if (l == 0 && k > 0)
|
|
|
|
vp9_cost_tokens_skip((int *)(c[i][j][k][l]),
|
|
|
|
p[i][j][k][l],
|
|
|
|
vp9_coef_tree);
|
|
|
|
else
|
|
|
|
vp9_cost_tokens((int *)(c[i][j][k][l]),
|
|
|
|
p[i][j][k][l],
|
|
|
|
vp9_coef_tree);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-05-02 22:53:15 +02:00
|
|
|
|
2012-10-30 22:25:33 +01:00
|
|
|
static int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, };
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2010-12-02 00:50:14 +01:00
|
|
|
// 3* dc_qlookup[Q]*dc_qlookup[Q];
|
2011-11-23 12:32:20 +01:00
|
|
|
|
2010-12-24 04:59:12 +01:00
|
|
|
/* values are now correlated to quantizer */
|
2011-11-21 16:45:10 +01:00
|
|
|
static int sad_per_bit16lut[QINDEX_RANGE];
|
|
|
|
static int sad_per_bit4lut[QINDEX_RANGE];
|
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
void vp9_init_me_luts() {
|
2012-07-14 00:21:29 +02:00
|
|
|
int i;
|
|
|
|
|
|
|
|
// Initialize the sad lut tables using a formulaic calculation for now
|
|
|
|
// This is to make it easier to resolve the impact of experimental changes
|
|
|
|
// to the quantizer tables.
|
|
|
|
for (i = 0; i < QINDEX_RANGE; i++) {
|
|
|
|
sad_per_bit16lut[i] =
|
2012-10-30 20:58:42 +01:00
|
|
|
(int)((0.0418 * vp9_convert_qindex_to_q(i)) + 2.4107);
|
|
|
|
sad_per_bit4lut[i] = (int)((0.063 * vp9_convert_qindex_to_q(i)) + 2.742);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2011-11-21 16:45:10 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-30 02:04:33 +01:00
|
|
|
static int compute_rd_mult(int qindex) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int q;
|
2011-11-23 12:32:20 +01:00
|
|
|
|
2012-10-31 00:25:53 +01:00
|
|
|
q = vp9_dc_quant(qindex, 0);
|
2012-07-14 00:21:29 +02:00
|
|
|
return (11 * q * q) >> 6;
|
2011-11-23 12:32:20 +01:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
void vp9_initialize_me_consts(VP9_COMP *cpi, int QIndex) {
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex];
|
|
|
|
cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2010-12-02 00:50:14 +01:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
void vp9_initialize_rd_consts(VP9_COMP *cpi, int QIndex) {
|
2012-08-07 00:03:04 +02:00
|
|
|
int q, i;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_clear_system_state(); // __asm emms;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Further tests required to see if optimum is different
|
|
|
|
// for key frames, golden frames and arf frames.
|
|
|
|
// if (cpi->common.refresh_golden_frame ||
|
|
|
|
// cpi->common.refresh_alt_ref_frame)
|
|
|
|
QIndex = (QIndex < 0) ? 0 : ((QIndex > MAXQ) ? MAXQ : QIndex);
|
2011-11-23 12:32:20 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->RDMULT = compute_rd_mult(QIndex);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
|
|
|
|
if (cpi->twopass.next_iiratio > 31)
|
|
|
|
cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
|
|
|
|
else
|
|
|
|
cpi->RDMULT +=
|
|
|
|
(cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (cpi->RDMULT < 7)
|
|
|
|
cpi->RDMULT = 7;
|
2011-12-02 15:57:21 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->mb.errorperbit = (cpi->RDMULT / 110);
|
|
|
|
cpi->mb.errorperbit += (cpi->mb.errorperbit == 0);
|
2011-01-26 07:24:22 +01:00
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_set_speed_features(cpi);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-31 00:25:53 +01:00
|
|
|
q = (int)pow(vp9_dc_quant(QIndex, 0) >> 2, 1.25);
|
2012-07-14 00:21:29 +02:00
|
|
|
q = q << 2;
|
|
|
|
cpi->RDMULT = cpi->RDMULT << 4;
|
2012-02-09 17:44:46 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (q < 8)
|
|
|
|
q = 8;
|
2010-12-02 00:50:14 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (cpi->RDMULT > 1000) {
|
|
|
|
cpi->RDDIV = 1;
|
|
|
|
cpi->RDMULT /= 100;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (i = 0; i < MAX_MODES; i++) {
|
|
|
|
if (cpi->sf.thresh_mult[i] < INT_MAX) {
|
|
|
|
cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
|
|
|
|
} else {
|
|
|
|
cpi->rd_threshes[i] = INT_MAX;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
} else {
|
|
|
|
cpi->RDDIV = 100;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (i = 0; i < MAX_MODES; i++) {
|
|
|
|
if (cpi->sf.thresh_mult[i] < (INT_MAX / q)) {
|
|
|
|
cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
|
|
|
|
} else {
|
|
|
|
cpi->rd_threshes[i] = INT_MAX;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-08 01:09:59 +01:00
|
|
|
fill_token_costs(cpi->mb.token_costs[TX_4X4],
|
2013-02-19 22:36:38 +01:00
|
|
|
cpi->common.fc.coef_probs_4x4, BLOCK_TYPES);
|
2012-12-08 01:09:59 +01:00
|
|
|
fill_token_costs(cpi->mb.token_costs[TX_8X8],
|
2013-02-19 22:36:38 +01:00
|
|
|
cpi->common.fc.coef_probs_8x8, BLOCK_TYPES);
|
2012-12-08 01:09:59 +01:00
|
|
|
fill_token_costs(cpi->mb.token_costs[TX_16X16],
|
2013-02-19 22:36:38 +01:00
|
|
|
cpi->common.fc.coef_probs_16x16, BLOCK_TYPES);
|
2012-12-08 01:09:59 +01:00
|
|
|
fill_token_costs(cpi->mb.token_costs[TX_32X32],
|
|
|
|
cpi->common.fc.coef_probs_32x32, BLOCK_TYPES_32X32);
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/*rough estimate for costing*/
|
|
|
|
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_init_mode_costs(cpi);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
if (cpi->common.frame_type != KEY_FRAME) {
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_build_nmv_cost_table(
|
2012-07-26 22:42:07 +02:00
|
|
|
cpi->mb.nmvjointcost,
|
|
|
|
cpi->mb.e_mbd.allow_high_precision_mv ?
|
|
|
|
cpi->mb.nmvcost_hp : cpi->mb.nmvcost,
|
|
|
|
&cpi->common.fc.nmvc,
|
|
|
|
cpi->mb.e_mbd.allow_high_precision_mv, 1, 1);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
int vp9_block_error_c(int16_t *coeff, int16_t *dqcoeff, int block_size) {
|
2012-08-07 00:03:04 +02:00
|
|
|
int i, error = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-08-07 00:03:04 +02:00
|
|
|
for (i = 0; i < block_size; i++) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int this_diff = coeff[i] - dqcoeff[i];
|
|
|
|
error += this_diff * this_diff;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return error;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2013-02-15 19:15:42 +01:00
|
|
|
int vp9_mbblock_error_c(MACROBLOCK *mb) {
|
2012-07-14 00:21:29 +02:00
|
|
|
BLOCK *be;
|
|
|
|
BLOCKD *bd;
|
|
|
|
int i, j;
|
|
|
|
int berror, error = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
be = &mb->block[i];
|
|
|
|
bd = &mb->e_mbd.block[i];
|
|
|
|
berror = 0;
|
2013-02-15 19:15:42 +01:00
|
|
|
for (j = 0; j < 16; j++) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int this_diff = be->coeff[j] - bd->dqcoeff[j];
|
|
|
|
berror += this_diff * this_diff;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
error += berror;
|
|
|
|
}
|
|
|
|
return error;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
int vp9_mbuverror_c(MACROBLOCK *mb) {
|
2012-07-14 00:21:29 +02:00
|
|
|
BLOCK *be;
|
|
|
|
BLOCKD *bd;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-08-07 01:21:23 +02:00
|
|
|
int i, error = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (i = 16; i < 24; i++) {
|
|
|
|
be = &mb->block[i];
|
|
|
|
bd = &mb->e_mbd.block[i];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
error += vp9_block_error_c(be->coeff, bd->dqcoeff, 16);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return error;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
int vp9_uvsse(MACROBLOCK *x) {
|
2012-12-19 00:31:19 +01:00
|
|
|
uint8_t *uptr, *vptr;
|
|
|
|
uint8_t *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
|
|
|
|
uint8_t *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
|
2012-07-14 00:21:29 +02:00
|
|
|
int uv_stride = x->block[16].src_stride;
|
|
|
|
|
|
|
|
unsigned int sse1 = 0;
|
|
|
|
unsigned int sse2 = 0;
|
2012-08-10 01:07:41 +02:00
|
|
|
int mv_row = x->e_mbd.mode_info_context->mbmi.mv[0].as_mv.row;
|
|
|
|
int mv_col = x->e_mbd.mode_info_context->mbmi.mv[0].as_mv.col;
|
2012-07-14 00:21:29 +02:00
|
|
|
int offset;
|
|
|
|
int pre_stride = x->e_mbd.block[16].pre_stride;
|
|
|
|
|
|
|
|
if (mv_row < 0)
|
|
|
|
mv_row -= 1;
|
|
|
|
else
|
|
|
|
mv_row += 1;
|
|
|
|
|
|
|
|
if (mv_col < 0)
|
|
|
|
mv_col -= 1;
|
|
|
|
else
|
|
|
|
mv_col += 1;
|
|
|
|
|
|
|
|
mv_row /= 2;
|
|
|
|
mv_col /= 2;
|
|
|
|
|
|
|
|
offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
|
|
|
|
uptr = x->e_mbd.pre.u_buffer + offset;
|
|
|
|
vptr = x->e_mbd.pre.v_buffer + offset;
|
|
|
|
|
|
|
|
if ((mv_row | mv_col) & 7) {
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_sub_pixel_variance8x8(uptr, pre_stride, (mv_col & 7) << 1,
|
2012-10-22 05:47:57 +02:00
|
|
|
(mv_row & 7) << 1, upred_ptr, uv_stride, &sse2);
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_sub_pixel_variance8x8(vptr, pre_stride, (mv_col & 7) << 1,
|
2012-10-22 05:47:57 +02:00
|
|
|
(mv_row & 7) << 1, vpred_ptr, uv_stride, &sse1);
|
2012-07-14 00:21:29 +02:00
|
|
|
sse2 += sse1;
|
|
|
|
} else {
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_variance8x8(uptr, pre_stride, upred_ptr, uv_stride, &sse2);
|
|
|
|
vp9_variance8x8(vptr, pre_stride, vpred_ptr, uv_stride, &sse1);
|
2012-07-14 00:21:29 +02:00
|
|
|
sse2 += sse1;
|
|
|
|
}
|
|
|
|
return sse2;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-02-07 02:16:36 +01:00
|
|
|
static INLINE int cost_coeffs(MACROBLOCK *mb,
|
|
|
|
BLOCKD *b, PLANE_TYPE type,
|
|
|
|
ENTROPY_CONTEXT *a,
|
|
|
|
ENTROPY_CONTEXT *l,
|
|
|
|
TX_SIZE tx_size) {
|
2013-01-03 18:00:30 +01:00
|
|
|
int pt;
|
2012-08-06 21:15:24 +02:00
|
|
|
const int eob = b->eob;
|
2012-09-10 07:42:35 +02:00
|
|
|
MACROBLOCKD *xd = &mb->e_mbd;
|
2013-01-03 18:00:30 +01:00
|
|
|
const int ib = (int)(b - xd->block);
|
2013-02-15 19:15:42 +01:00
|
|
|
int c = 0;
|
2013-01-03 18:00:30 +01:00
|
|
|
int cost = 0, seg_eob;
|
|
|
|
const int segment_id = xd->mode_info_context->mbmi.segment_id;
|
2013-02-11 21:44:53 +01:00
|
|
|
const int *scan;
|
2013-01-03 18:00:30 +01:00
|
|
|
int16_t *qcoeff_ptr = b->qcoeff;
|
2013-02-19 22:36:38 +01:00
|
|
|
const int ref = xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME;
|
2013-01-03 18:00:30 +01:00
|
|
|
const TX_TYPE tx_type = (type == PLANE_TYPE_Y_WITH_DC) ?
|
|
|
|
get_tx_type(xd, b) : DCT_DCT;
|
2013-02-07 02:16:36 +01:00
|
|
|
unsigned int (*token_costs)[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS] =
|
2013-02-19 22:36:38 +01:00
|
|
|
mb->token_costs[tx_size][type][ref];
|
2013-01-03 18:00:30 +01:00
|
|
|
ENTROPY_CONTEXT a_ec = *a, l_ec = *l;
|
2012-08-07 03:29:59 +02:00
|
|
|
|
2012-09-10 07:42:35 +02:00
|
|
|
switch (tx_size) {
|
2012-08-06 21:15:24 +02:00
|
|
|
case TX_4X4:
|
2013-01-03 18:00:30 +01:00
|
|
|
scan = vp9_default_zig_zag1d_4x4;
|
|
|
|
seg_eob = 16;
|
2012-10-16 01:41:41 +02:00
|
|
|
if (type == PLANE_TYPE_Y_WITH_DC) {
|
2013-01-03 18:00:30 +01:00
|
|
|
if (tx_type == ADST_DCT) {
|
|
|
|
scan = vp9_row_scan_4x4;
|
|
|
|
} else if (tx_type == DCT_ADST) {
|
|
|
|
scan = vp9_col_scan_4x4;
|
2012-09-10 07:42:35 +02:00
|
|
|
}
|
2012-08-06 21:15:24 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case TX_8X8:
|
2013-02-15 19:15:42 +01:00
|
|
|
scan = vp9_default_zig_zag1d_8x8;
|
|
|
|
seg_eob = 64;
|
2012-08-06 21:15:24 +02:00
|
|
|
break;
|
|
|
|
case TX_16X16:
|
2012-10-31 01:12:12 +01:00
|
|
|
scan = vp9_default_zig_zag1d_16x16;
|
2013-01-03 18:00:30 +01:00
|
|
|
seg_eob = 256;
|
|
|
|
if (type == PLANE_TYPE_UV) {
|
|
|
|
const int uv_idx = ib - 16;
|
|
|
|
qcoeff_ptr = xd->sb_coeff_data.qcoeff + 1024 + 64 * uv_idx;
|
2012-10-16 01:41:41 +02:00
|
|
|
}
|
2012-08-06 21:15:24 +02:00
|
|
|
break;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
case TX_32X32:
|
|
|
|
scan = vp9_default_zig_zag1d_32x32;
|
2013-01-03 18:00:30 +01:00
|
|
|
seg_eob = 1024;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
qcoeff_ptr = xd->sb_coeff_data.qcoeff;
|
|
|
|
break;
|
2012-08-06 21:15:24 +02:00
|
|
|
default:
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
abort();
|
2012-08-06 21:15:24 +02:00
|
|
|
break;
|
|
|
|
}
|
2012-08-06 20:22:28 +02:00
|
|
|
|
2013-01-03 18:00:30 +01:00
|
|
|
VP9_COMBINEENTROPYCONTEXTS(pt, a_ec, l_ec);
|
|
|
|
|
2013-01-28 16:22:53 +01:00
|
|
|
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))
|
|
|
|
seg_eob = 0;
|
2013-01-03 18:00:30 +01:00
|
|
|
|
2013-02-19 22:36:38 +01:00
|
|
|
{
|
2013-02-11 20:19:21 +01:00
|
|
|
int recent_energy = 0;
|
2012-09-10 07:42:35 +02:00
|
|
|
for (; c < eob; c++) {
|
|
|
|
int v = qcoeff_ptr[scan[c]];
|
2012-10-31 01:12:12 +01:00
|
|
|
int t = vp9_dct_value_tokens_ptr[v].Token;
|
2013-02-19 19:12:00 +01:00
|
|
|
cost += token_costs[get_coef_band(tx_size, c)][pt][t];
|
2012-10-31 01:12:12 +01:00
|
|
|
cost += vp9_dct_value_cost_ptr[v];
|
2013-02-11 20:19:21 +01:00
|
|
|
pt = vp9_get_coef_context(&recent_energy, t);
|
2012-09-10 07:42:35 +02:00
|
|
|
}
|
|
|
|
if (c < seg_eob)
|
2013-02-19 22:36:38 +01:00
|
|
|
cost += mb->token_costs[tx_size][type][ref][get_coef_band(tx_size, c)]
|
2013-02-11 13:56:02 +01:00
|
|
|
[pt][DCT_EOB_TOKEN];
|
2012-08-06 20:22:28 +02:00
|
|
|
}
|
|
|
|
|
2012-11-29 02:34:02 +01:00
|
|
|
// is eob first coefficient;
|
2013-02-15 19:15:42 +01:00
|
|
|
pt = (c > 0);
|
2012-08-06 20:22:28 +02:00
|
|
|
*a = *l = pt;
|
|
|
|
return cost;
|
|
|
|
}
|
|
|
|
|
2013-02-15 19:15:42 +01:00
|
|
|
static int rdcost_mby_4x4(MACROBLOCK *mb, int backup) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int cost = 0;
|
|
|
|
int b;
|
2012-08-10 15:12:43 +02:00
|
|
|
MACROBLOCKD *xd = &mb->e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
|
|
|
ENTROPY_CONTEXT *ta;
|
|
|
|
ENTROPY_CONTEXT *tl;
|
2010-08-31 16:49:57 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
if (backup) {
|
|
|
|
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
ta = (ENTROPY_CONTEXT *)&t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *)&t_left;
|
|
|
|
} else {
|
|
|
|
ta = (ENTROPY_CONTEXT *)xd->above_context;
|
|
|
|
tl = (ENTROPY_CONTEXT *)xd->left_context;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (b = 0; b < 16; b++)
|
2013-02-15 19:15:42 +01:00
|
|
|
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
ta + vp9_block2above[TX_4X4][b],
|
|
|
|
tl + vp9_block2left[TX_4X4][b],
|
2012-08-06 21:15:24 +02:00
|
|
|
TX_4X4);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return cost;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-09 18:18:21 +02:00
|
|
|
static void macro_block_yrd_4x4(MACROBLOCK *mb,
|
|
|
|
int *Rate,
|
|
|
|
int *Distortion,
|
2012-11-08 20:03:00 +01:00
|
|
|
int *skippable, int backup) {
|
2012-08-10 15:12:43 +02:00
|
|
|
MACROBLOCKD *const xd = &mb->e_mbd;
|
2011-01-26 19:46:34 +01:00
|
|
|
|
2012-11-16 00:14:38 +01:00
|
|
|
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
|
|
|
|
vp9_transform_mby_4x4(mb);
|
|
|
|
vp9_quantize_mby_4x4(mb);
|
2011-01-26 19:46:34 +01:00
|
|
|
|
2013-02-15 19:15:42 +01:00
|
|
|
*Distortion = vp9_mbblock_error(mb) >> 2;
|
|
|
|
*Rate = rdcost_mby_4x4(mb, backup);
|
|
|
|
*skippable = vp9_mby_is_skippable_4x4(xd);
|
2011-01-26 19:46:34 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-02-15 19:15:42 +01:00
|
|
|
static int rdcost_mby_8x8(MACROBLOCK *mb, int backup) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int cost = 0;
|
|
|
|
int b;
|
2012-08-10 15:12:43 +02:00
|
|
|
MACROBLOCKD *xd = &mb->e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
|
|
|
ENTROPY_CONTEXT *ta;
|
|
|
|
ENTROPY_CONTEXT *tl;
|
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
if (backup) {
|
|
|
|
vpx_memcpy(&t_above,xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
ta = (ENTROPY_CONTEXT *)&t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *)&t_left;
|
|
|
|
} else {
|
|
|
|
ta = (ENTROPY_CONTEXT *)mb->e_mbd.above_context;
|
|
|
|
tl = (ENTROPY_CONTEXT *)mb->e_mbd.left_context;
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
for (b = 0; b < 16; b += 4)
|
2013-02-15 19:15:42 +01:00
|
|
|
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
ta + vp9_block2above[TX_8X8][b],
|
|
|
|
tl + vp9_block2left[TX_8X8][b],
|
2012-08-06 21:15:24 +02:00
|
|
|
TX_8X8);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
return cost;
|
Improved coding using 8x8 transform
In summary, this commit encompasses a series of changes in attempt to
improve the 8x8 transform based coding to help overall compression
quality, please refer to the detailed commit history below for what
are the rationale underly the series of changes:
a. A frame level flag to indicate if 8x8 transform is used at all.
b. 8x8 transform is not used for key frames and small image size.
c. On inter coded frame, macroblocks using modes B_PRED, SPLIT_MV
and I8X8_PRED are forced to using 4x4 transform based coding, the
rest uses 8x8 transform based coding.
d. Encoder and decoder has the same assumption on the relationship
between prediction modes and transform size, therefore no signaling
is encoded in bitstream.
e. Mode decision process now calculate the rate and distortion scores
using their respective transforms.
Overall test results:
1. HD set
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120206.html
(avg psnr: 3.09% glb psnr: 3.22%, ssim: 3.90%)
2. Cif set:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120206.html
(avg psnr: -0.03%, glb psnr: -0.02%, ssim: -0.04%)
It should be noted here, as 8x8 transform coding itself is disabled
for cif size clips, the 0.03% loss is purely from the 1 bit/frame
flag overhead on if 8x8 transform is used or not for the frame.
---patch history for future reference---
Patch 1:
this commit tries to select transform size based on macroblock
prediction mode. If the size of a prediction mode is 16x16, then
the macroblock is forced to use 8x8 transform. If the prediction
mode is B_PRED, SPLITMV or I8X8_PRED, then the macroblock is forced
to use 4x4 transform. Tests on the following HD clips showed mixed
results: (all hd clips only used first 100 frames in the test)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_log.html
while the results are mixed and overall negative, it is interesting to
see 8x8 helped a few of the clips.
Patch 2:
this patch tries to hard-wire selection of transform size based on
prediction modes without using segmentation to signal the transform size.
encoder and decoder both takes the same assumption that all macroblocks
use 8x8 transform except when prediciton mode is B_PRED, I8X8_PRED or
SPLITMV. Test results are as follows:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cifmodebase8x8_0125.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_0125log.html
Interestingly, by removing the overhead or coding the segmentation, the
results on this limited HD set have turn positive on average.
Patch 3:
this patch disabled the usage of 8x8 transform on key frames, and kept the
logic from patch 2 for inter frames only. test results on HD set turned
decidedly positive with 8x8 transform enabled on inter frame with 16x16
prediction modes: (avg psnr: .81% glb psnr: .82 ssim: .55%)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdintermode8x8_0125.html
results on cif set still negative overall
Patch 4:
continued from last patch, but now in mode decision process, the rate and
distortion estimates are computed based on 8x8 transform results for MBs
with modes associated with 8x8 transform. This patch also fixed a problem
related to segment based eob coding when 8x8 transform is used. The patch
significantly improved the results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hd8x8RDintermode.html
(avg psnr: 2.70% glb psnr: 2.76% ssim: 3.34%)
results on cif also improved, though they are still negative compared to
baseline that uses 4x4 transform only:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif8x8RDintermode.html
(avg psnr: -.78% glb psnr: -.86% ssim: -.19%)
Patch 5:
This patch does 3 things:
a. a bunch of decoder bug fixes, encodings and decodings were verified
to have matched recon buffer on a number of encodes on cif size mobile and
hd version of _pedestrian.
b. the patch further improved the rate distortion calculation of MBS that
use 8x8 transform. This provided some further gain on compression.
c. the patch also got the experimental work SEG_LVL_EOB to work with 8x8
transformed macroblock, test results indicates it improves the cif set
but hurt the HD set slightly.
Tests results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120201.html
(avg psnr: 3.19% glb psnr: 3.30% ssim: 3.93%)
Test results on cif clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120201.html
(avg psnr: -.47% glb psnr: -.51% ssim: +.28%)
Patch 6:
Added a frame level flag to indicate if 8x8 transform is allowed at all.
temporarily the decision is based on frame size, can be optimized later
one. This get the cif results to basically unchanged, with one bit per
frame overhead on both cif and hd clips.
Patch 8:
Rebase and Merge to head by PGW.
Fixed some suspect 4s that look like hey should be 64s in regard
to segmented EOB. Perhaps #defines would be bette.
Bulit and tested without T8x8 enabled and produces unchanged
output.
Patch 9:
Corrected misalligned code/decode of "txfm_mode" bit.
Limited testing for correct encode and decode with
T8x8 configured on derf clips.
Change-Id: I156e1405d25f81579d579dff8ab9af53944ec49c
2012-02-10 01:12:23 +01:00
|
|
|
}
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
static void macro_block_yrd_8x8(MACROBLOCK *mb,
|
|
|
|
int *Rate,
|
|
|
|
int *Distortion,
|
2012-11-08 20:03:00 +01:00
|
|
|
int *skippable, int backup) {
|
2012-08-10 15:12:43 +02:00
|
|
|
MACROBLOCKD *const xd = &mb->e_mbd;
|
2012-11-16 00:14:38 +01:00
|
|
|
|
|
|
|
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_transform_mby_8x8(mb);
|
|
|
|
vp9_quantize_mby_8x8(mb);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-02-15 19:15:42 +01:00
|
|
|
*Distortion = vp9_mbblock_error(mb) >> 2;
|
|
|
|
*Rate = rdcost_mby_8x8(mb, backup);
|
|
|
|
*skippable = vp9_mby_is_skippable_8x8(xd);
|
Improved coding using 8x8 transform
In summary, this commit encompasses a series of changes in attempt to
improve the 8x8 transform based coding to help overall compression
quality, please refer to the detailed commit history below for what
are the rationale underly the series of changes:
a. A frame level flag to indicate if 8x8 transform is used at all.
b. 8x8 transform is not used for key frames and small image size.
c. On inter coded frame, macroblocks using modes B_PRED, SPLIT_MV
and I8X8_PRED are forced to using 4x4 transform based coding, the
rest uses 8x8 transform based coding.
d. Encoder and decoder has the same assumption on the relationship
between prediction modes and transform size, therefore no signaling
is encoded in bitstream.
e. Mode decision process now calculate the rate and distortion scores
using their respective transforms.
Overall test results:
1. HD set
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120206.html
(avg psnr: 3.09% glb psnr: 3.22%, ssim: 3.90%)
2. Cif set:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120206.html
(avg psnr: -0.03%, glb psnr: -0.02%, ssim: -0.04%)
It should be noted here, as 8x8 transform coding itself is disabled
for cif size clips, the 0.03% loss is purely from the 1 bit/frame
flag overhead on if 8x8 transform is used or not for the frame.
---patch history for future reference---
Patch 1:
this commit tries to select transform size based on macroblock
prediction mode. If the size of a prediction mode is 16x16, then
the macroblock is forced to use 8x8 transform. If the prediction
mode is B_PRED, SPLITMV or I8X8_PRED, then the macroblock is forced
to use 4x4 transform. Tests on the following HD clips showed mixed
results: (all hd clips only used first 100 frames in the test)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_log.html
while the results are mixed and overall negative, it is interesting to
see 8x8 helped a few of the clips.
Patch 2:
this patch tries to hard-wire selection of transform size based on
prediction modes without using segmentation to signal the transform size.
encoder and decoder both takes the same assumption that all macroblocks
use 8x8 transform except when prediciton mode is B_PRED, I8X8_PRED or
SPLITMV. Test results are as follows:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cifmodebase8x8_0125.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_0125log.html
Interestingly, by removing the overhead or coding the segmentation, the
results on this limited HD set have turn positive on average.
Patch 3:
this patch disabled the usage of 8x8 transform on key frames, and kept the
logic from patch 2 for inter frames only. test results on HD set turned
decidedly positive with 8x8 transform enabled on inter frame with 16x16
prediction modes: (avg psnr: .81% glb psnr: .82 ssim: .55%)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdintermode8x8_0125.html
results on cif set still negative overall
Patch 4:
continued from last patch, but now in mode decision process, the rate and
distortion estimates are computed based on 8x8 transform results for MBs
with modes associated with 8x8 transform. This patch also fixed a problem
related to segment based eob coding when 8x8 transform is used. The patch
significantly improved the results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hd8x8RDintermode.html
(avg psnr: 2.70% glb psnr: 2.76% ssim: 3.34%)
results on cif also improved, though they are still negative compared to
baseline that uses 4x4 transform only:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif8x8RDintermode.html
(avg psnr: -.78% glb psnr: -.86% ssim: -.19%)
Patch 5:
This patch does 3 things:
a. a bunch of decoder bug fixes, encodings and decodings were verified
to have matched recon buffer on a number of encodes on cif size mobile and
hd version of _pedestrian.
b. the patch further improved the rate distortion calculation of MBS that
use 8x8 transform. This provided some further gain on compression.
c. the patch also got the experimental work SEG_LVL_EOB to work with 8x8
transformed macroblock, test results indicates it improves the cif set
but hurt the HD set slightly.
Tests results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120201.html
(avg psnr: 3.19% glb psnr: 3.30% ssim: 3.93%)
Test results on cif clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120201.html
(avg psnr: -.47% glb psnr: -.51% ssim: +.28%)
Patch 6:
Added a frame level flag to indicate if 8x8 transform is allowed at all.
temporarily the decision is based on frame size, can be optimized later
one. This get the cif results to basically unchanged, with one bit per
frame overhead on both cif and hd clips.
Patch 8:
Rebase and Merge to head by PGW.
Fixed some suspect 4s that look like hey should be 64s in regard
to segmented EOB. Perhaps #defines would be bette.
Bulit and tested without T8x8 enabled and produces unchanged
output.
Patch 9:
Corrected misalligned code/decode of "txfm_mode" bit.
Limited testing for correct encode and decode with
T8x8 configured on derf clips.
Change-Id: I156e1405d25f81579d579dff8ab9af53944ec49c
2012-02-10 01:12:23 +01:00
|
|
|
}
|
2012-01-26 23:36:20 +01:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
static int rdcost_mby_16x16(MACROBLOCK *mb, int backup) {
|
2012-08-03 02:03:14 +02:00
|
|
|
int cost;
|
2012-08-10 15:12:43 +02:00
|
|
|
MACROBLOCKD *xd = &mb->e_mbd;
|
2012-08-03 02:03:14 +02:00
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
|
|
|
ENTROPY_CONTEXT *ta, *tl;
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
if (backup) {
|
|
|
|
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
2012-08-03 02:03:14 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
ta = (ENTROPY_CONTEXT *)&t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *)&t_left;
|
|
|
|
} else {
|
|
|
|
ta = (ENTROPY_CONTEXT *)xd->above_context;
|
|
|
|
tl = (ENTROPY_CONTEXT *)xd->left_context;
|
|
|
|
}
|
2012-08-03 02:03:14 +02:00
|
|
|
|
2012-08-10 15:12:43 +02:00
|
|
|
cost = cost_coeffs(mb, xd->block, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_16X16);
|
2012-08-03 02:03:14 +02:00
|
|
|
return cost;
|
|
|
|
}
|
2012-09-10 07:42:35 +02:00
|
|
|
|
2012-08-03 02:03:14 +02:00
|
|
|
static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
|
2012-11-25 04:33:58 +01:00
|
|
|
int *skippable, int backup) {
|
2012-10-16 01:41:41 +02:00
|
|
|
MACROBLOCKD *xd = &mb->e_mbd;
|
2012-08-03 02:03:14 +02:00
|
|
|
|
2012-11-16 00:14:38 +01:00
|
|
|
xd->mode_info_context->mbmi.txfm_size = TX_16X16;
|
|
|
|
vp9_transform_mby_16x16(mb);
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_quantize_mby_16x16(mb);
|
2012-08-29 20:25:38 +02:00
|
|
|
// TODO(jingning) is it possible to quickly determine whether to force
|
|
|
|
// trailing coefficients to be zero, instead of running trellis
|
|
|
|
// optimization in the rate-distortion optimization loop?
|
2013-02-11 20:19:21 +01:00
|
|
|
if (mb->optimize &&
|
|
|
|
xd->mode_info_context->mbmi.mode < I8X8_PRED)
|
2012-11-25 04:33:58 +01:00
|
|
|
vp9_optimize_mby_16x16(mb);
|
2012-08-29 20:25:38 +02:00
|
|
|
|
2013-02-15 19:15:42 +01:00
|
|
|
*Distortion = vp9_mbblock_error(mb) >> 2;
|
2012-11-08 20:03:00 +01:00
|
|
|
*Rate = rdcost_mby_16x16(mb, backup);
|
2013-02-15 19:15:42 +01:00
|
|
|
*skippable = vp9_mby_is_skippable_16x16(xd);
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
int (*r)[2], int *rate,
|
|
|
|
int *d, int *distortion,
|
|
|
|
int *s, int *skip,
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES],
|
|
|
|
TX_SIZE max_txfm_size) {
|
2012-11-08 20:03:00 +01:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
|
|
|
|
vp9_prob skip_prob = cm->mb_no_coeff_skip ?
|
|
|
|
vp9_get_pred_prob(cm, xd, PRED_MBSKIP) : 128;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
int64_t rd[TX_SIZE_MAX_SB][2];
|
|
|
|
int n, m;
|
|
|
|
|
|
|
|
for (n = TX_4X4; n <= max_txfm_size; n++) {
|
|
|
|
r[n][1] = r[n][0];
|
|
|
|
for (m = 0; m <= n - (n == max_txfm_size); m++) {
|
|
|
|
if (m == n)
|
|
|
|
r[n][1] += vp9_cost_zero(cm->prob_tx[m]);
|
|
|
|
else
|
|
|
|
r[n][1] += vp9_cost_one(cm->prob_tx[m]);
|
|
|
|
}
|
|
|
|
}
|
2012-11-08 20:03:00 +01:00
|
|
|
|
|
|
|
if (cm->mb_no_coeff_skip) {
|
|
|
|
int s0, s1;
|
|
|
|
|
|
|
|
assert(skip_prob > 0);
|
|
|
|
s0 = vp9_cost_bit(skip_prob, 0);
|
|
|
|
s1 = vp9_cost_bit(skip_prob, 1);
|
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
for (n = TX_4X4; n <= max_txfm_size; n++) {
|
2012-11-08 20:03:00 +01:00
|
|
|
if (s[n]) {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, d[n]);
|
2012-11-08 20:03:00 +01:00
|
|
|
} else {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
|
|
|
|
rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
|
2012-11-08 20:03:00 +01:00
|
|
|
}
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
|
|
|
} else {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
for (n = TX_4X4; n <= max_txfm_size; n++) {
|
|
|
|
rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0], d[n]);
|
|
|
|
rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1], d[n]);
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
if (max_txfm_size == TX_32X32 &&
|
|
|
|
(cm->txfm_mode == ALLOW_32X32 ||
|
|
|
|
(cm->txfm_mode == TX_MODE_SELECT &&
|
|
|
|
rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
|
|
|
|
rd[TX_32X32][1] < rd[TX_4X4][1]))) {
|
|
|
|
mbmi->txfm_size = TX_32X32;
|
2013-01-10 17:23:59 +01:00
|
|
|
} else if ( cm->txfm_mode == ALLOW_16X16 ||
|
|
|
|
(max_txfm_size == TX_16X16 && cm->txfm_mode == ALLOW_32X32) ||
|
|
|
|
(cm->txfm_mode == TX_MODE_SELECT &&
|
|
|
|
rd[TX_16X16][1] < rd[TX_8X8][1] &&
|
|
|
|
rd[TX_16X16][1] < rd[TX_4X4][1])) {
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = TX_16X16;
|
2012-11-08 20:03:00 +01:00
|
|
|
} else if (cm->txfm_mode == ALLOW_8X8 ||
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
(cm->txfm_mode == TX_MODE_SELECT && rd[TX_8X8][1] < rd[TX_4X4][1])) {
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = TX_8X8;
|
|
|
|
} else {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
assert(cm->txfm_mode == ONLY_4X4 || cm->txfm_mode == TX_MODE_SELECT);
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = TX_4X4;
|
|
|
|
}
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
*distortion = d[mbmi->txfm_size];
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
*rate = r[mbmi->txfm_size][cm->txfm_mode == TX_MODE_SELECT];
|
2012-11-08 20:03:00 +01:00
|
|
|
*skip = s[mbmi->txfm_size];
|
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
txfm_cache[ONLY_4X4] = rd[TX_4X4][0];
|
|
|
|
txfm_cache[ALLOW_8X8] = rd[TX_8X8][0];
|
|
|
|
txfm_cache[ALLOW_16X16] = rd[TX_16X16][0];
|
|
|
|
txfm_cache[ALLOW_32X32] = rd[max_txfm_size][0];
|
|
|
|
if (max_txfm_size == TX_32X32 &&
|
|
|
|
rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
|
|
|
|
rd[TX_32X32][1] < rd[TX_4X4][1])
|
|
|
|
txfm_cache[TX_MODE_SELECT] = rd[TX_32X32][1];
|
2013-01-10 17:23:59 +01:00
|
|
|
else if (rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1])
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
txfm_cache[TX_MODE_SELECT] = rd[TX_16X16][1];
|
2012-10-09 18:18:21 +02:00
|
|
|
else
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
txfm_cache[TX_MODE_SELECT] = rd[TX_4X4][1] < rd[TX_8X8][1] ?
|
|
|
|
rd[TX_4X4][1] : rd[TX_8X8][1];
|
2012-11-08 20:03:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void macro_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int *distortion, int *skippable,
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES]) {
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
int r[TX_SIZE_MAX_MB][2], d[TX_SIZE_MAX_MB], s[TX_SIZE_MAX_MB];
|
2012-11-08 20:03:00 +01:00
|
|
|
|
|
|
|
vp9_subtract_mby(x->src_diff, *(x->block[0].base_src), xd->predictor,
|
|
|
|
x->block[0].src_stride);
|
2012-10-09 18:18:21 +02:00
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
macro_block_yrd_16x16(x, &r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16], 1);
|
|
|
|
macro_block_yrd_8x8(x, &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8], 1);
|
|
|
|
macro_block_yrd_4x4(x, &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4], 1);
|
2012-11-08 20:03:00 +01:00
|
|
|
|
|
|
|
choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s, skippable,
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
txfm_cache, TX_16X16);
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
static void copy_predictor(uint8_t *dst, const uint8_t *predictor) {
|
2012-07-14 00:21:29 +02:00
|
|
|
const unsigned int *p = (const unsigned int *)predictor;
|
|
|
|
unsigned int *d = (unsigned int *)dst;
|
|
|
|
d[0] = p[0];
|
|
|
|
d[4] = p[4];
|
|
|
|
d[8] = p[8];
|
|
|
|
d[12] = p[12];
|
2011-02-14 19:32:58 +01:00
|
|
|
}
|
2011-08-05 01:30:27 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
static int rdcost_sby_32x32(MACROBLOCK *x, int backup) {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
MACROBLOCKD * const xd = &x->e_mbd;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
2013-01-06 03:20:25 +01:00
|
|
|
ENTROPY_CONTEXT *ta, *tl;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
if (backup) {
|
|
|
|
ta = (ENTROPY_CONTEXT *) &t_above,
|
|
|
|
tl = (ENTROPY_CONTEXT *) &t_left;
|
|
|
|
|
|
|
|
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
} else {
|
|
|
|
ta = (ENTROPY_CONTEXT *) xd->above_context;
|
|
|
|
tl = (ENTROPY_CONTEXT *) xd->left_context;
|
|
|
|
}
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
|
|
|
return cost_coeffs(x, xd->block, PLANE_TYPE_Y_WITH_DC, ta, tl, TX_32X32);
|
|
|
|
}
|
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
static int vp9_sb_block_error_c(int16_t *coeff, int16_t *dqcoeff,
|
|
|
|
int block_size) {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
int i;
|
|
|
|
int64_t error = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < block_size; i++) {
|
|
|
|
unsigned int this_diff = coeff[i] - dqcoeff[i];
|
|
|
|
error += this_diff * this_diff;
|
|
|
|
}
|
|
|
|
|
|
|
|
return error > INT_MAX ? INT_MAX : error;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DEBUG_ERROR 0
|
|
|
|
static void super_block_yrd_32x32(MACROBLOCK *x,
|
2013-01-06 03:20:25 +01:00
|
|
|
int *rate, int *distortion, int *skippable,
|
|
|
|
int backup) {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
SUPERBLOCK * const x_sb = &x->sb_coeff_data;
|
|
|
|
MACROBLOCKD * const xd = &x->e_mbd;
|
|
|
|
SUPERBLOCKD * const xd_sb = &xd->sb_coeff_data;
|
2013-02-11 13:35:28 +01:00
|
|
|
#if DEBUG_ERROR
|
2012-12-19 00:31:19 +01:00
|
|
|
int16_t out[1024];
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
vp9_transform_sby_32x32(x);
|
|
|
|
vp9_quantize_sby_32x32(x);
|
2013-02-11 13:35:28 +01:00
|
|
|
#if DEBUG_ERROR
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
vp9_short_idct32x32(xd_sb->dqcoeff, out, 64);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
*distortion = vp9_sb_block_error_c(x_sb->coeff, xd_sb->dqcoeff, 1024);
|
2013-02-11 13:35:28 +01:00
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
#if DEBUG_ERROR
|
|
|
|
printf("IDCT/FDCT error 32x32: %d (d: %d)\n",
|
|
|
|
vp9_block_error_c(x_sb->src_diff, out, 1024), *distortion);
|
|
|
|
#endif
|
2013-01-06 03:20:25 +01:00
|
|
|
*rate = rdcost_sby_32x32(x, backup);
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
*skippable = vp9_sby_is_skippable_32x32(&x->e_mbd);
|
|
|
|
}
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
static void super_block_yrd(VP9_COMP *cpi,
|
|
|
|
MACROBLOCK *x, int *rate, int *distortion,
|
2012-11-25 04:33:58 +01:00
|
|
|
int *skip,
|
2012-11-08 20:03:00 +01:00
|
|
|
int64_t txfm_cache[NB_TXFM_MODES]) {
|
2012-08-20 23:43:34 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
int r[TX_SIZE_MAX_SB][2], d[TX_SIZE_MAX_SB], s[TX_SIZE_MAX_SB], n;
|
2012-08-20 23:43:34 +02:00
|
|
|
const uint8_t *src = x->src.y_buffer, *dst = xd->dst.y_buffer;
|
|
|
|
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
ENTROPY_CONTEXT_PLANES t_above[TX_SIZE_MAX_MB][2],
|
|
|
|
*orig_above = xd->above_context;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_left[TX_SIZE_MAX_MB][2],
|
|
|
|
*orig_left = xd->left_context;
|
2012-11-08 20:03:00 +01:00
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
for (n = TX_4X4; n < TX_SIZE_MAX_MB; n++) {
|
2012-11-08 20:03:00 +01:00
|
|
|
vpx_memcpy(t_above[n], xd->above_context, sizeof(t_above[n]));
|
|
|
|
vpx_memcpy(t_left[n], xd->left_context, sizeof(t_left[n]));
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
r[n][0] = 0;
|
2012-11-08 20:03:00 +01:00
|
|
|
d[n] = 0;
|
|
|
|
s[n] = 1;
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff, src, src_y_stride,
|
|
|
|
dst, dst_y_stride);
|
2013-01-06 03:20:25 +01:00
|
|
|
super_block_yrd_32x32(x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32], 1);
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
|
|
|
#if DEBUG_ERROR
|
|
|
|
int err[3] = { 0, 0, 0 };
|
|
|
|
#endif
|
2012-08-20 23:43:34 +02:00
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
int x_idx = n & 1, y_idx = n >> 1;
|
2012-11-08 20:03:00 +01:00
|
|
|
int r_tmp, d_tmp, s_tmp;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_subtract_mby_s_c(x->src_diff,
|
2012-08-20 23:43:34 +02:00
|
|
|
src + x_idx * 16 + y_idx * 16 * src_y_stride,
|
|
|
|
src_y_stride,
|
|
|
|
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
|
|
|
|
dst_y_stride);
|
2012-11-08 20:03:00 +01:00
|
|
|
|
|
|
|
xd->above_context = &t_above[TX_16X16][x_idx];
|
|
|
|
xd->left_context = &t_left[TX_16X16][y_idx];
|
2012-11-25 04:33:58 +01:00
|
|
|
macro_block_yrd_16x16(x, &r_tmp, &d_tmp, &s_tmp, 0);
|
2012-11-08 20:03:00 +01:00
|
|
|
d[TX_16X16] += d_tmp;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
r[TX_16X16][0] += r_tmp;
|
2012-11-08 20:03:00 +01:00
|
|
|
s[TX_16X16] = s[TX_16X16] && s_tmp;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
#if DEBUG_ERROR
|
|
|
|
vp9_inverse_transform_mby_16x16(xd);
|
|
|
|
err[2] += vp9_block_error_c(xd->diff, x->src_diff, 256);
|
|
|
|
#endif
|
2012-11-08 20:03:00 +01:00
|
|
|
|
|
|
|
xd->above_context = &t_above[TX_4X4][x_idx];
|
|
|
|
xd->left_context = &t_left[TX_4X4][y_idx];
|
2012-11-25 04:33:58 +01:00
|
|
|
macro_block_yrd_4x4(x, &r_tmp, &d_tmp, &s_tmp, 0);
|
2012-11-08 20:03:00 +01:00
|
|
|
d[TX_4X4] += d_tmp;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
r[TX_4X4][0] += r_tmp;
|
2012-11-08 20:03:00 +01:00
|
|
|
s[TX_4X4] = s[TX_4X4] && s_tmp;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
#if DEBUG_ERROR
|
|
|
|
vp9_inverse_transform_mby_4x4(xd);
|
|
|
|
err[0] += vp9_block_error_c(xd->diff, x->src_diff, 256);
|
|
|
|
#endif
|
2012-11-08 20:03:00 +01:00
|
|
|
|
|
|
|
xd->above_context = &t_above[TX_8X8][x_idx];
|
|
|
|
xd->left_context = &t_left[TX_8X8][y_idx];
|
2012-11-25 04:33:58 +01:00
|
|
|
macro_block_yrd_8x8(x, &r_tmp, &d_tmp, &s_tmp, 0);
|
2012-11-08 20:03:00 +01:00
|
|
|
d[TX_8X8] += d_tmp;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
r[TX_8X8][0] += r_tmp;
|
2012-11-08 20:03:00 +01:00
|
|
|
s[TX_8X8] = s[TX_8X8] && s_tmp;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
#if DEBUG_ERROR
|
|
|
|
vp9_inverse_transform_mby_8x8(xd);
|
|
|
|
err[1] += vp9_block_error_c(xd->diff, x->src_diff, 256);
|
|
|
|
#endif
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
#if DEBUG_ERROR
|
|
|
|
printf("IDCT/FDCT error 16x16: %d (d: %d)\n", err[2], d[2]);
|
|
|
|
printf("IDCT/FDCT error 8x8: %d (d: %d)\n", err[1], d[1]);
|
|
|
|
printf("IDCT/FDCT error 4x4: %d (d: %d)\n", err[0], d[0]);
|
|
|
|
#endif
|
|
|
|
choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s, skip, txfm_cache,
|
|
|
|
TX_SIZE_MAX_SB - 1);
|
2012-11-08 20:03:00 +01:00
|
|
|
|
|
|
|
xd->above_context = orig_above;
|
|
|
|
xd->left_context = orig_left;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
static void super_block_64_yrd(VP9_COMP *cpi,
|
|
|
|
MACROBLOCK *x, int *rate, int *distortion,
|
|
|
|
int *skip,
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES]) {
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
int r[TX_SIZE_MAX_SB][2], d[TX_SIZE_MAX_SB], s[TX_SIZE_MAX_SB], n;
|
|
|
|
const uint8_t *src = x->src.y_buffer, *dst = xd->dst.y_buffer;
|
|
|
|
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above[TX_SIZE_MAX_SB][4],
|
|
|
|
*orig_above = xd->above_context;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_left[TX_SIZE_MAX_SB][4],
|
|
|
|
*orig_left = xd->left_context;
|
|
|
|
|
|
|
|
for (n = TX_4X4; n < TX_SIZE_MAX_SB; n++) {
|
|
|
|
vpx_memcpy(t_above[n], xd->above_context, sizeof(t_above[n]));
|
|
|
|
vpx_memcpy(t_left[n], xd->left_context, sizeof(t_left[n]));
|
|
|
|
r[n][0] = 0;
|
|
|
|
d[n] = 0;
|
|
|
|
s[n] = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
int x_idx = n & 1, y_idx = n >> 1;
|
|
|
|
int r_tmp, d_tmp, s_tmp;
|
|
|
|
|
|
|
|
xd->above_context = &t_above[TX_32X32][x_idx << 1];
|
|
|
|
xd->left_context = &t_left[TX_32X32][y_idx << 1];
|
|
|
|
vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff,
|
|
|
|
src + 32 * x_idx + 32 * y_idx * src_y_stride,
|
|
|
|
src_y_stride,
|
|
|
|
dst + 32 * x_idx + 32 * y_idx * dst_y_stride,
|
|
|
|
dst_y_stride);
|
|
|
|
super_block_yrd_32x32(x, &r_tmp, &d_tmp, &s_tmp, 0);
|
|
|
|
r[TX_32X32][0] += r_tmp;
|
|
|
|
d[TX_32X32] += d_tmp;
|
|
|
|
s[TX_32X32] = s[TX_32X32] && s_tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if DEBUG_ERROR
|
|
|
|
int err[3] = { 0, 0, 0 };
|
|
|
|
#endif
|
|
|
|
for (n = 0; n < 16; n++) {
|
|
|
|
int x_idx = n & 3, y_idx = n >> 2;
|
|
|
|
int r_tmp, d_tmp, s_tmp;
|
|
|
|
|
|
|
|
vp9_subtract_mby_s_c(x->src_diff,
|
|
|
|
src + x_idx * 16 + y_idx * 16 * src_y_stride,
|
|
|
|
src_y_stride,
|
|
|
|
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
|
|
|
|
dst_y_stride);
|
|
|
|
|
|
|
|
xd->above_context = &t_above[TX_16X16][x_idx];
|
|
|
|
xd->left_context = &t_left[TX_16X16][y_idx];
|
|
|
|
macro_block_yrd_16x16(x, &r_tmp, &d_tmp, &s_tmp, 0);
|
|
|
|
d[TX_16X16] += d_tmp;
|
|
|
|
r[TX_16X16][0] += r_tmp;
|
|
|
|
s[TX_16X16] = s[TX_16X16] && s_tmp;
|
|
|
|
#if DEBUG_ERROR
|
|
|
|
vp9_inverse_transform_mby_16x16(xd);
|
|
|
|
err[2] += vp9_block_error_c(xd->diff, x->src_diff, 256);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
xd->above_context = &t_above[TX_4X4][x_idx];
|
|
|
|
xd->left_context = &t_left[TX_4X4][y_idx];
|
|
|
|
macro_block_yrd_4x4(x, &r_tmp, &d_tmp, &s_tmp, 0);
|
|
|
|
d[TX_4X4] += d_tmp;
|
|
|
|
r[TX_4X4][0] += r_tmp;
|
|
|
|
s[TX_4X4] = s[TX_4X4] && s_tmp;
|
|
|
|
#if DEBUG_ERROR
|
|
|
|
vp9_inverse_transform_mby_4x4(xd);
|
|
|
|
err[0] += vp9_block_error_c(xd->diff, x->src_diff, 256);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
xd->above_context = &t_above[TX_8X8][x_idx];
|
|
|
|
xd->left_context = &t_left[TX_8X8][y_idx];
|
|
|
|
macro_block_yrd_8x8(x, &r_tmp, &d_tmp, &s_tmp, 0);
|
|
|
|
d[TX_8X8] += d_tmp;
|
|
|
|
r[TX_8X8][0] += r_tmp;
|
|
|
|
s[TX_8X8] = s[TX_8X8] && s_tmp;
|
|
|
|
#if DEBUG_ERROR
|
|
|
|
vp9_inverse_transform_mby_8x8(xd);
|
|
|
|
err[1] += vp9_block_error_c(xd->diff, x->src_diff, 256);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#if DEBUG_ERROR
|
|
|
|
printf("IDCT/FDCT error 16x16: %d (d: %d)\n", err[2], d[2]);
|
|
|
|
printf("IDCT/FDCT error 8x8: %d (d: %d)\n", err[1], d[1]);
|
|
|
|
printf("IDCT/FDCT error 4x4: %d (d: %d)\n", err[0], d[0]);
|
|
|
|
#endif
|
|
|
|
choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s, skip, txfm_cache,
|
|
|
|
TX_SIZE_MAX_SB - 1);
|
|
|
|
|
|
|
|
xd->above_context = orig_above;
|
|
|
|
xd->left_context = orig_left;
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-12-19 00:31:19 +01:00
|
|
|
static void copy_predictor_8x8(uint8_t *dst, const uint8_t *predictor) {
|
2012-07-14 00:21:29 +02:00
|
|
|
const unsigned int *p = (const unsigned int *)predictor;
|
|
|
|
unsigned int *d = (unsigned int *)dst;
|
|
|
|
d[0] = p[0];
|
|
|
|
d[1] = p[1];
|
|
|
|
d[4] = p[4];
|
|
|
|
d[5] = p[5];
|
|
|
|
d[8] = p[8];
|
|
|
|
d[9] = p[9];
|
|
|
|
d[12] = p[12];
|
|
|
|
d[13] = p[13];
|
|
|
|
d[16] = p[16];
|
|
|
|
d[17] = p[17];
|
|
|
|
d[20] = p[20];
|
|
|
|
d[21] = p[21];
|
|
|
|
d[24] = p[24];
|
|
|
|
d[25] = p[25];
|
|
|
|
d[28] = p[28];
|
|
|
|
d[29] = p[29];
|
2011-08-05 01:30:27 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, BLOCK *be,
|
2012-08-07 01:21:23 +02:00
|
|
|
BLOCKD *b, B_PREDICTION_MODE *best_mode,
|
|
|
|
int *bmode_costs,
|
|
|
|
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
|
|
|
|
int *bestrate, int *bestratey,
|
|
|
|
int *bestdistortion) {
|
2012-07-14 00:21:29 +02:00
|
|
|
B_PREDICTION_MODE mode;
|
2012-10-16 01:41:41 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
int rate = 0;
|
|
|
|
int distortion;
|
|
|
|
|
|
|
|
ENTROPY_CONTEXT ta = *a, tempa = *a;
|
|
|
|
ENTROPY_CONTEXT tl = *l, templ = *l;
|
2012-10-16 01:41:41 +02:00
|
|
|
TX_TYPE tx_type = DCT_DCT;
|
|
|
|
TX_TYPE best_tx_type = DCT_DCT;
|
2012-07-14 00:21:29 +02:00
|
|
|
/*
|
|
|
|
* The predictor buffer is a 2d buffer with a stride of 16. Create
|
|
|
|
* a temp buffer that meets the stride requirements, but we are only
|
|
|
|
* interested in the left 4x4 block
|
|
|
|
* */
|
2012-12-19 00:31:19 +01:00
|
|
|
DECLARE_ALIGNED_ARRAY(16, uint8_t, best_predictor, 16 * 4);
|
|
|
|
DECLARE_ALIGNED_ARRAY(16, int16_t, best_dqcoeff, 16);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-09 22:19:15 +02:00
|
|
|
#if CONFIG_NEWBINTRAMODES
|
|
|
|
b->bmi.as_mode.context = vp9_find_bpred_context(b);
|
|
|
|
#endif
|
|
|
|
for (mode = B_DC_PRED; mode < LEFT4X4; mode++) {
|
2013-01-14 23:37:53 +01:00
|
|
|
int64_t this_rd;
|
|
|
|
int ratey;
|
2012-10-09 22:19:15 +02:00
|
|
|
|
|
|
|
#if CONFIG_NEWBINTRAMODES
|
2013-01-14 23:37:53 +01:00
|
|
|
if (xd->frame_type == KEY_FRAME) {
|
|
|
|
if (mode == B_CONTEXT_PRED) continue;
|
|
|
|
} else {
|
|
|
|
if (mode >= B_CONTEXT_PRED - CONTEXT_PRED_REPLACEMENTS &&
|
|
|
|
mode < B_CONTEXT_PRED)
|
|
|
|
continue;
|
|
|
|
}
|
2012-10-09 22:19:15 +02:00
|
|
|
#endif
|
2011-02-14 19:32:58 +01:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
b->bmi.as_mode.first = mode;
|
2012-10-09 22:19:15 +02:00
|
|
|
#if CONFIG_NEWBINTRAMODES
|
2013-01-14 23:37:53 +01:00
|
|
|
rate = bmode_costs[
|
|
|
|
mode == B_CONTEXT_PRED ? mode - CONTEXT_PRED_REPLACEMENTS : mode];
|
2012-10-09 22:19:15 +02:00
|
|
|
#else
|
2013-01-14 23:37:53 +01:00
|
|
|
rate = bmode_costs[mode];
|
2012-10-09 22:19:15 +02:00
|
|
|
#endif
|
2012-07-14 00:21:29 +02:00
|
|
|
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
vp9_intra4x4_predict(xd, b, mode, b->predictor);
|
2013-01-14 23:37:53 +01:00
|
|
|
vp9_subtract_b(be, b, 16);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
b->bmi.as_mode.first = mode;
|
|
|
|
tx_type = get_tx_type_4x4(xd, b);
|
|
|
|
if (tx_type != DCT_DCT) {
|
2013-02-13 18:03:21 +01:00
|
|
|
vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type);
|
2013-01-14 23:37:53 +01:00
|
|
|
vp9_ht_quantize_b_4x4(be, b, tx_type);
|
|
|
|
} else {
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
|
2013-01-14 23:37:53 +01:00
|
|
|
x->quantize_b_4x4(be, b);
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
tempa = ta;
|
|
|
|
templ = tl;
|
2012-06-25 21:26:09 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ, TX_4X4);
|
|
|
|
rate += ratey;
|
|
|
|
distortion = vp9_block_error(be->coeff, b->dqcoeff, 16) >> 2;
|
2012-06-25 21:26:09 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
2012-06-25 21:26:09 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
if (this_rd < best_rd) {
|
|
|
|
*bestrate = rate;
|
|
|
|
*bestratey = ratey;
|
|
|
|
*bestdistortion = distortion;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*best_mode = mode;
|
|
|
|
best_tx_type = tx_type;
|
|
|
|
*a = tempa;
|
|
|
|
*l = templ;
|
|
|
|
copy_predictor(best_predictor, b->predictor);
|
|
|
|
vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
b->bmi.as_mode.first = (B_PREDICTION_MODE)(*best_mode);
|
2011-02-14 19:32:58 +01:00
|
|
|
|
2012-06-25 21:26:09 +02:00
|
|
|
// inverse transform
|
2012-10-16 01:41:41 +02:00
|
|
|
if (best_tx_type != DCT_DCT)
|
2013-02-09 01:19:42 +01:00
|
|
|
vp9_short_iht4x4(best_dqcoeff, b->diff, 32, best_tx_type);
|
2012-08-07 01:21:23 +02:00
|
|
|
else
|
2013-02-12 06:14:46 +01:00
|
|
|
xd->inv_txm4x4(best_dqcoeff, b->diff, 32);
|
2012-06-25 21:26:09 +02:00
|
|
|
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_recon_b(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return best_rd;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
|
|
|
|
int *Rate, int *rate_y,
|
2013-01-25 01:28:53 +01:00
|
|
|
int *Distortion, int64_t best_rd) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int i;
|
|
|
|
MACROBLOCKD *const xd = &mb->e_mbd;
|
|
|
|
int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
|
|
|
|
int distortion = 0;
|
|
|
|
int tot_rate_y = 0;
|
|
|
|
int64_t total_rd = 0;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
2012-08-07 01:21:23 +02:00
|
|
|
ENTROPY_CONTEXT *ta, *tl;
|
2012-07-14 00:21:29 +02:00
|
|
|
int *bmode_costs;
|
|
|
|
|
2013-01-25 01:28:53 +01:00
|
|
|
vpx_memcpy(&t_above, xd->above_context,
|
|
|
|
sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context,
|
|
|
|
sizeof(ENTROPY_CONTEXT_PLANES));
|
2010-08-31 16:49:57 +02:00
|
|
|
|
2013-01-25 01:28:53 +01:00
|
|
|
ta = (ENTROPY_CONTEXT *)&t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *)&t_left;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-15 22:49:45 +02:00
|
|
|
xd->mode_info_context->mbmi.mode = B_PRED;
|
2012-07-14 00:21:29 +02:00
|
|
|
bmode_costs = mb->inter_bmode_costs;
|
2011-02-14 19:32:58 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
MODE_INFO *const mic = xd->mode_info_context;
|
|
|
|
const int mis = xd->mode_info_stride;
|
|
|
|
B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
|
|
|
|
int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-08-10 15:12:43 +02:00
|
|
|
if (xd->frame_type == KEY_FRAME) {
|
2012-07-14 00:21:29 +02:00
|
|
|
const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
|
|
|
|
const B_PREDICTION_MODE L = left_block_mode(mic, i);
|
2011-02-14 19:32:58 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
bmode_costs = mb->bmode_costs[A][L];
|
|
|
|
}
|
2012-10-09 22:19:15 +02:00
|
|
|
#if CONFIG_NEWBINTRAMODES
|
|
|
|
mic->bmi[i].as_mode.context = vp9_find_bpred_context(xd->block + i);
|
|
|
|
#endif
|
2011-02-14 19:32:58 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
total_rd += rd_pick_intra4x4block(
|
|
|
|
cpi, mb, mb->block + i, xd->block + i, &best_mode,
|
2012-12-06 21:40:57 +01:00
|
|
|
bmode_costs, ta + vp9_block2above[TX_4X4][i],
|
|
|
|
tl + vp9_block2left[TX_4X4][i], &r, &ry, &d);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
cost += r;
|
|
|
|
distortion += d;
|
|
|
|
tot_rate_y += ry;
|
2011-05-24 19:24:52 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
mic->bmi[i].as_mode.first = best_mode;
|
2011-02-08 22:50:43 +01:00
|
|
|
|
2012-10-09 22:19:15 +02:00
|
|
|
#if 0 // CONFIG_NEWBINTRAMODES
|
|
|
|
printf("%d %d\n", mic->bmi[i].as_mode.first, mic->bmi[i].as_mode.context);
|
|
|
|
#endif
|
|
|
|
|
2012-08-02 19:07:33 +02:00
|
|
|
if (total_rd >= best_rd)
|
2012-07-14 00:21:29 +02:00
|
|
|
break;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-08-02 19:07:33 +02:00
|
|
|
if (total_rd >= best_rd)
|
2013-01-14 20:49:30 +01:00
|
|
|
return INT64_MAX;
|
2011-02-08 22:50:43 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
*Rate = cost;
|
2012-11-05 21:50:16 +01:00
|
|
|
*rate_y = tot_rate_y;
|
2012-07-14 00:21:29 +02:00
|
|
|
*Distortion = distortion;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2011-06-08 18:05:05 +02:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi,
|
2012-08-20 23:43:34 +02:00
|
|
|
MACROBLOCK *x,
|
|
|
|
int *rate,
|
|
|
|
int *rate_tokenonly,
|
2012-08-29 19:43:20 +02:00
|
|
|
int *distortion,
|
2012-11-08 20:03:00 +01:00
|
|
|
int *skippable,
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES]) {
|
2012-08-20 23:43:34 +02:00
|
|
|
MB_PREDICTION_MODE mode;
|
|
|
|
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
|
|
|
int this_rate, this_rate_tokenonly;
|
2012-08-29 19:43:20 +02:00
|
|
|
int this_distortion, s;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX, this_rd;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
/* Y Search for 32x32 intra prediction mode */
|
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
|
|
|
x->e_mbd.mode_info_context->mbmi.mode = mode;
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_build_intra_predictors_sby_s(&x->e_mbd);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
super_block_yrd(cpi, x, &this_rate_tokenonly,
|
2012-11-25 04:33:58 +01:00
|
|
|
&this_distortion, &s, txfm_cache);
|
2012-08-20 23:43:34 +02:00
|
|
|
this_rate = this_rate_tokenonly +
|
|
|
|
x->mbmode_cost[x->e_mbd.frame_type]
|
|
|
|
[x->e_mbd.mode_info_context->mbmi.mode];
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
|
|
|
|
|
|
|
|
if (this_rd < best_rd) {
|
|
|
|
mode_selected = mode;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*rate = this_rate;
|
|
|
|
*rate_tokenonly = this_rate_tokenonly;
|
|
|
|
*distortion = this_distortion;
|
2012-08-29 19:43:20 +02:00
|
|
|
*skippable = s;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
|
|
|
|
|
|
|
|
return best_rd;
|
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
static int64_t rd_pick_intra_sb64y_mode(VP9_COMP *cpi,
|
|
|
|
MACROBLOCK *x,
|
|
|
|
int *rate,
|
|
|
|
int *rate_tokenonly,
|
|
|
|
int *distortion,
|
|
|
|
int *skippable,
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES]) {
|
|
|
|
MB_PREDICTION_MODE mode;
|
|
|
|
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
|
|
|
int this_rate, this_rate_tokenonly;
|
|
|
|
int this_distortion, s;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX, this_rd;
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
/* Y Search for 32x32 intra prediction mode */
|
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
|
|
|
x->e_mbd.mode_info_context->mbmi.mode = mode;
|
|
|
|
vp9_build_intra_predictors_sb64y_s(&x->e_mbd);
|
|
|
|
|
|
|
|
super_block_64_yrd(cpi, x, &this_rate_tokenonly,
|
|
|
|
&this_distortion, &s, txfm_cache);
|
|
|
|
this_rate = this_rate_tokenonly +
|
|
|
|
x->mbmode_cost[x->e_mbd.frame_type]
|
|
|
|
[x->e_mbd.mode_info_context->mbmi.mode];
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
|
|
|
|
|
|
|
|
if (this_rd < best_rd) {
|
|
|
|
mode_selected = mode;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*rate = this_rate;
|
|
|
|
*rate_tokenonly = this_rate_tokenonly;
|
|
|
|
*distortion = this_distortion;
|
|
|
|
*skippable = s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
|
|
|
|
|
|
|
|
return best_rd;
|
|
|
|
}
|
2011-06-08 18:05:05 +02:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int64_t rd_pick_intra16x16mby_mode(VP9_COMP *cpi,
|
2012-08-29 19:43:20 +02:00
|
|
|
MACROBLOCK *x,
|
|
|
|
int *Rate,
|
|
|
|
int *rate_y,
|
|
|
|
int *Distortion,
|
2012-10-09 18:18:21 +02:00
|
|
|
int *skippable,
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES]) {
|
2012-07-14 00:21:29 +02:00
|
|
|
MB_PREDICTION_MODE mode;
|
2012-12-23 16:20:10 +01:00
|
|
|
TX_SIZE txfm_size = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
2012-11-08 20:03:00 +01:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
|
2012-07-14 00:21:29 +02:00
|
|
|
int rate, ratey;
|
2012-10-09 18:18:21 +02:00
|
|
|
int distortion, skip;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX;
|
2012-08-02 19:07:33 +02:00
|
|
|
int64_t this_rd;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-09 18:18:21 +02:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++)
|
2013-01-14 20:49:30 +01:00
|
|
|
txfm_cache[i] = INT64_MAX;
|
2012-08-29 20:25:38 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Y Search for 16x16 intra prediction mode
|
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
2012-10-09 18:18:21 +02:00
|
|
|
int64_t local_txfm_cache[NB_TXFM_MODES];
|
|
|
|
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = mode;
|
2012-08-29 20:25:38 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
vp9_build_intra_predictors_mby(xd);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
macro_block_yrd(cpi, x, &ratey, &distortion, &skip, local_txfm_cache);
|
2012-10-09 18:18:21 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
// FIXME add compoundmode cost
|
|
|
|
// FIXME add rate for mode2
|
|
|
|
rate = ratey + x->mbmode_cost[xd->frame_type][mbmi->mode];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
if (this_rd < best_rd) {
|
|
|
|
mode_selected = mode;
|
|
|
|
txfm_size = mbmi->txfm_size;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*Rate = rate;
|
|
|
|
*rate_y = ratey;
|
|
|
|
*Distortion = distortion;
|
|
|
|
*skippable = skip;
|
|
|
|
}
|
2012-10-09 18:18:21 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
|
|
|
int64_t adj_rd = this_rd + local_txfm_cache[i] -
|
|
|
|
local_txfm_cache[cpi->common.txfm_mode];
|
|
|
|
if (adj_rd < txfm_cache[i]) {
|
|
|
|
txfm_cache[i] = adj_rd;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = txfm_size;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = mode_selected;
|
2012-08-29 20:25:38 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return best_rd;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-06-25 21:26:09 +02:00
|
|
|
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
|
2012-08-07 01:21:23 +02:00
|
|
|
B_PREDICTION_MODE *best_mode,
|
|
|
|
int *mode_costs,
|
|
|
|
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
|
|
|
|
int *bestrate, int *bestratey,
|
|
|
|
int *bestdistortion) {
|
2012-07-14 00:21:29 +02:00
|
|
|
MB_PREDICTION_MODE mode;
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX;
|
2012-11-16 00:14:38 +01:00
|
|
|
int distortion = 0, rate = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
BLOCK *be = x->block + ib;
|
2012-08-10 15:12:43 +02:00
|
|
|
BLOCKD *b = xd->block + ib;
|
2012-07-14 00:21:29 +02:00
|
|
|
ENTROPY_CONTEXT ta0, ta1, besta0 = 0, besta1 = 0;
|
|
|
|
ENTROPY_CONTEXT tl0, tl1, bestl0 = 0, bestl1 = 0;
|
2011-08-05 01:30:27 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/*
|
|
|
|
* The predictor buffer is a 2d buffer with a stride of 16. Create
|
|
|
|
* a temp buffer that meets the stride requirements, but we are only
|
|
|
|
* interested in the left 8x8 block
|
|
|
|
* */
|
2012-12-19 00:31:19 +01:00
|
|
|
DECLARE_ALIGNED_ARRAY(16, uint8_t, best_predictor, 16 * 8);
|
|
|
|
DECLARE_ALIGNED_ARRAY(16, int16_t, best_dqcoeff, 16 * 4);
|
2011-08-05 01:30:27 +02:00
|
|
|
|
2012-08-01 19:18:25 +02:00
|
|
|
// perform transformation of dimension 8x8
|
|
|
|
// note the input and output index mapping
|
|
|
|
int idx = (ib & 0x02) ? (ib + 2) : ib;
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
2013-01-14 23:37:53 +01:00
|
|
|
int64_t this_rd;
|
|
|
|
int rate_t = 0;
|
2011-08-05 01:30:27 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
// FIXME rate for compound mode and second intrapred mode
|
|
|
|
rate = mode_costs[mode];
|
|
|
|
b->bmi.as_mode.first = mode;
|
2011-08-05 01:30:27 +02:00
|
|
|
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
vp9_intra8x8_predict(xd, b, mode, b->predictor);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
vp9_subtract_4b_c(be, b, 16);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
|
|
|
|
TX_TYPE tx_type = get_tx_type_8x8(xd, b);
|
|
|
|
if (tx_type != DCT_DCT)
|
2013-02-13 18:03:21 +01:00
|
|
|
vp9_short_fht8x8(be->src_diff, (x->block + idx)->coeff, 32, tx_type);
|
2013-01-14 23:37:53 +01:00
|
|
|
else
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm8x8(be->src_diff, (x->block + idx)->coeff, 32);
|
2013-01-14 23:37:53 +01:00
|
|
|
x->quantize_b_8x8(x->block + idx, xd->block + idx);
|
2012-09-21 23:20:15 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
// compute quantization mse of 8x8 block
|
|
|
|
distortion = vp9_block_error_c((x->block + idx)->coeff,
|
|
|
|
(xd->block + idx)->dqcoeff, 64);
|
|
|
|
ta0 = a[vp9_block2above[TX_8X8][idx]];
|
|
|
|
tl0 = l[vp9_block2left[TX_8X8][idx]];
|
2012-08-01 19:18:25 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
rate_t = cost_coeffs(x, xd->block + idx, PLANE_TYPE_Y_WITH_DC,
|
|
|
|
&ta0, &tl0, TX_8X8);
|
2012-08-01 19:18:25 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
rate += rate_t;
|
|
|
|
ta1 = ta0;
|
|
|
|
tl1 = tl0;
|
|
|
|
} else {
|
|
|
|
static const int iblock[4] = {0, 1, 4, 5};
|
|
|
|
TX_TYPE tx_type;
|
|
|
|
int i;
|
|
|
|
ta0 = a[vp9_block2above[TX_4X4][ib]];
|
|
|
|
ta1 = a[vp9_block2above[TX_4X4][ib + 1]];
|
|
|
|
tl0 = l[vp9_block2left[TX_4X4][ib]];
|
|
|
|
tl1 = l[vp9_block2left[TX_4X4][ib + 4]];
|
|
|
|
distortion = 0;
|
|
|
|
rate_t = 0;
|
|
|
|
for (i = 0; i < 4; ++i) {
|
2013-02-06 23:13:05 +01:00
|
|
|
int do_two = 0;
|
2013-01-14 23:37:53 +01:00
|
|
|
b = &xd->block[ib + iblock[i]];
|
|
|
|
be = &x->block[ib + iblock[i]];
|
|
|
|
tx_type = get_tx_type_4x4(xd, b);
|
|
|
|
if (tx_type != DCT_DCT) {
|
2013-02-13 18:03:21 +01:00
|
|
|
vp9_short_fht4x4(be->src_diff, be->coeff, 32, tx_type);
|
2013-01-14 23:37:53 +01:00
|
|
|
vp9_ht_quantize_b_4x4(be, b, tx_type);
|
2013-02-06 23:13:05 +01:00
|
|
|
} else if (!(i & 1) && get_tx_type_4x4(xd, b + 1) == DCT_DCT) {
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
|
2013-02-06 23:13:05 +01:00
|
|
|
x->quantize_b_4x4_pair(be, be + 1, b, b + 1);
|
|
|
|
do_two = 1;
|
2013-01-14 23:37:53 +01:00
|
|
|
} else {
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
|
2013-01-14 23:37:53 +01:00
|
|
|
x->quantize_b_4x4(be, b);
|
2012-11-16 00:14:38 +01:00
|
|
|
}
|
2013-02-06 23:13:05 +01:00
|
|
|
distortion += vp9_block_error_c(be->coeff, b->dqcoeff, 16 << do_two);
|
2013-01-14 23:37:53 +01:00
|
|
|
rate_t += cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC,
|
|
|
|
// i&1 ? &ta1 : &ta0, i&2 ? &tl1 : &tl0,
|
|
|
|
&ta0, &tl0,
|
|
|
|
TX_4X4);
|
2013-02-06 23:13:05 +01:00
|
|
|
if (do_two) {
|
|
|
|
rate_t += cost_coeffs(x, b + 1, PLANE_TYPE_Y_WITH_DC,
|
|
|
|
// i&1 ? &ta1 : &ta0, i&2 ? &tl1 : &tl0,
|
|
|
|
&ta0, &tl0,
|
|
|
|
TX_4X4);
|
|
|
|
i++;
|
|
|
|
}
|
2012-10-11 02:18:22 +02:00
|
|
|
}
|
2013-01-26 01:08:19 +01:00
|
|
|
b = &xd->block[ib];
|
|
|
|
be = &x->block[ib];
|
2013-01-14 23:37:53 +01:00
|
|
|
rate += rate_t;
|
|
|
|
}
|
2012-08-01 19:18:25 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
distortion >>= 2;
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
|
|
|
if (this_rd < best_rd) {
|
|
|
|
*bestrate = rate;
|
|
|
|
*bestratey = rate_t;
|
|
|
|
*bestdistortion = distortion;
|
|
|
|
besta0 = ta0;
|
|
|
|
besta1 = ta1;
|
|
|
|
bestl0 = tl0;
|
|
|
|
bestl1 = tl1;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*best_mode = mode;
|
|
|
|
copy_predictor_8x8(best_predictor, b->predictor);
|
|
|
|
vpx_memcpy(best_dqcoeff, b->dqcoeff, 64);
|
|
|
|
vpx_memcpy(best_dqcoeff + 32, b->dqcoeff + 64, 64);
|
2011-08-05 01:30:27 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
b->bmi.as_mode.first = (*best_mode);
|
2012-11-25 04:33:58 +01:00
|
|
|
vp9_encode_intra8x8(x, ib);
|
2012-08-01 19:18:25 +02:00
|
|
|
|
2012-10-11 02:18:22 +02:00
|
|
|
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
|
2012-12-06 21:40:57 +01:00
|
|
|
a[vp9_block2above[TX_8X8][idx]] = besta0;
|
|
|
|
a[vp9_block2above[TX_8X8][idx] + 1] = besta1;
|
|
|
|
l[vp9_block2left[TX_8X8][idx]] = bestl0;
|
|
|
|
l[vp9_block2left[TX_8X8][idx] + 1] = bestl1;
|
2012-10-11 02:18:22 +02:00
|
|
|
} else {
|
2012-12-06 21:40:57 +01:00
|
|
|
a[vp9_block2above[TX_4X4][ib]] = besta0;
|
|
|
|
a[vp9_block2above[TX_4X4][ib + 1]] = besta1;
|
|
|
|
l[vp9_block2left[TX_4X4][ib]] = bestl0;
|
|
|
|
l[vp9_block2left[TX_4X4][ib + 4]] = bestl1;
|
2012-10-11 02:18:22 +02:00
|
|
|
}
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return best_rd;
|
2011-08-05 01:30:27 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int64_t rd_pick_intra8x8mby_modes(VP9_COMP *cpi, MACROBLOCK *mb,
|
2012-10-30 02:04:33 +01:00
|
|
|
int *Rate, int *rate_y,
|
|
|
|
int *Distortion, int64_t best_rd) {
|
2012-07-14 00:21:29 +02:00
|
|
|
MACROBLOCKD *const xd = &mb->e_mbd;
|
|
|
|
int i, ib;
|
|
|
|
int cost = mb->mbmode_cost [xd->frame_type] [I8X8_PRED];
|
|
|
|
int distortion = 0;
|
|
|
|
int tot_rate_y = 0;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t total_rd = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
2012-08-07 01:21:23 +02:00
|
|
|
ENTROPY_CONTEXT *ta, *tl;
|
2012-07-14 00:21:29 +02:00
|
|
|
int *i8x8mode_costs;
|
|
|
|
|
2012-08-10 15:12:43 +02:00
|
|
|
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
ta = (ENTROPY_CONTEXT *)&t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *)&t_left;
|
|
|
|
|
2012-10-15 22:49:45 +02:00
|
|
|
xd->mode_info_context->mbmi.mode = I8X8_PRED;
|
2012-07-14 00:21:29 +02:00
|
|
|
i8x8mode_costs = mb->i8x8_mode_costs;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
MODE_INFO *const mic = xd->mode_info_context;
|
|
|
|
B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
|
|
|
|
int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
|
2011-08-05 01:30:27 +02:00
|
|
|
|
2012-10-31 01:12:12 +01:00
|
|
|
ib = vp9_i8x8_block[i];
|
2012-07-14 00:21:29 +02:00
|
|
|
total_rd += rd_pick_intra8x8block(
|
|
|
|
cpi, mb, ib, &best_mode,
|
|
|
|
i8x8mode_costs, ta, tl, &r, &ry, &d);
|
|
|
|
cost += r;
|
|
|
|
distortion += d;
|
|
|
|
tot_rate_y += ry;
|
|
|
|
mic->bmi[ib].as_mode.first = best_mode;
|
|
|
|
}
|
2012-12-14 18:49:46 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
*Rate = cost;
|
2012-11-05 21:50:16 +01:00
|
|
|
*rate_y = tot_rate_y;
|
2012-07-14 00:21:29 +02:00
|
|
|
*Distortion = distortion;
|
|
|
|
return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
|
2011-08-05 01:30:27 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
static int rd_cost_mbuv_4x4(MACROBLOCK *mb, int backup) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int b;
|
|
|
|
int cost = 0;
|
2012-08-10 15:12:43 +02:00
|
|
|
MACROBLOCKD *xd = &mb->e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
2012-08-07 01:21:23 +02:00
|
|
|
ENTROPY_CONTEXT *ta, *tl;
|
2010-08-31 16:49:57 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
if (backup) {
|
|
|
|
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
ta = (ENTROPY_CONTEXT *)&t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *)&t_left;
|
|
|
|
} else {
|
|
|
|
ta = (ENTROPY_CONTEXT *)xd->above_context;
|
|
|
|
tl = (ENTROPY_CONTEXT *)xd->left_context;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (b = 16; b < 24; b++)
|
2012-08-10 15:12:43 +02:00
|
|
|
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV,
|
2012-12-06 21:40:57 +01:00
|
|
|
ta + vp9_block2above[TX_4X4][b],
|
|
|
|
tl + vp9_block2left[TX_4X4][b],
|
2012-08-06 21:15:24 +02:00
|
|
|
TX_4X4);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return cost;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
static int64_t rd_inter16x16_uv_4x4(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int *distortion, int fullpixel, int *skip,
|
|
|
|
int do_ctx_backup) {
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_transform_mbuv_4x4(x);
|
|
|
|
vp9_quantize_mbuv_4x4(x);
|
2011-08-24 20:42:26 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
*rate = rd_cost_mbuv_4x4(x, do_ctx_backup);
|
2012-10-30 20:58:42 +01:00
|
|
|
*distortion = vp9_mbuverror(x) / 4;
|
2012-10-30 05:07:40 +01:00
|
|
|
*skip = vp9_mbuv_is_skippable_4x4(&x->e_mbd);
|
2011-08-24 20:42:26 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
2011-08-24 20:42:26 +02:00
|
|
|
}
|
2012-02-29 02:11:12 +01:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
static int rd_cost_mbuv_8x8(MACROBLOCK *mb, int backup) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int b;
|
|
|
|
int cost = 0;
|
2012-08-10 15:12:43 +02:00
|
|
|
MACROBLOCKD *xd = &mb->e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
2012-08-07 01:21:23 +02:00
|
|
|
ENTROPY_CONTEXT *ta, *tl;
|
2012-01-21 00:30:31 +01:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
if (backup) {
|
|
|
|
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
2012-01-21 00:30:31 +01:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
ta = (ENTROPY_CONTEXT *)&t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *)&t_left;
|
|
|
|
} else {
|
|
|
|
ta = (ENTROPY_CONTEXT *)mb->e_mbd.above_context;
|
|
|
|
tl = (ENTROPY_CONTEXT *)mb->e_mbd.left_context;
|
|
|
|
}
|
2012-01-21 00:30:31 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (b = 16; b < 24; b += 4)
|
2012-08-10 15:12:43 +02:00
|
|
|
cost += cost_coeffs(mb, xd->block + b, PLANE_TYPE_UV,
|
2012-12-06 21:40:57 +01:00
|
|
|
ta + vp9_block2above[TX_8X8][b],
|
|
|
|
tl + vp9_block2left[TX_8X8][b], TX_8X8);
|
2012-01-21 00:30:31 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return cost;
|
2012-01-21 00:30:31 +01:00
|
|
|
}
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
static int64_t rd_inter16x16_uv_8x8(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int *distortion, int fullpixel, int *skip,
|
|
|
|
int do_ctx_backup) {
|
|
|
|
vp9_transform_mbuv_8x8(x);
|
|
|
|
vp9_quantize_mbuv_8x8(x);
|
|
|
|
|
|
|
|
*rate = rd_cost_mbuv_8x8(x, do_ctx_backup);
|
|
|
|
*distortion = vp9_mbuverror(x) / 4;
|
|
|
|
*skip = vp9_mbuv_is_skippable_8x8(&x->e_mbd);
|
|
|
|
|
|
|
|
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
|
|
|
}
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
static int rd_cost_sbuv_16x16(MACROBLOCK *x, int backup) {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
int b;
|
|
|
|
int cost = 0;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
|
|
|
ENTROPY_CONTEXT *ta, *tl;
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
if (backup) {
|
|
|
|
vpx_memcpy(&t_above, xd->above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, xd->left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
ta = (ENTROPY_CONTEXT *) &t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *) &t_left;
|
|
|
|
} else {
|
|
|
|
ta = (ENTROPY_CONTEXT *)xd->above_context;
|
|
|
|
tl = (ENTROPY_CONTEXT *)xd->left_context;
|
|
|
|
}
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
|
|
|
for (b = 16; b < 24; b += 4)
|
|
|
|
cost += cost_coeffs(x, xd->block + b, PLANE_TYPE_UV,
|
2012-12-13 21:18:38 +01:00
|
|
|
ta + vp9_block2above[TX_8X8][b],
|
|
|
|
tl + vp9_block2left[TX_8X8][b], TX_16X16);
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
|
|
|
return cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rd_inter32x32_uv_16x16(MACROBLOCK *x, int *rate,
|
2013-01-06 03:20:25 +01:00
|
|
|
int *distortion, int *skip,
|
|
|
|
int backup) {
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
|
|
|
|
vp9_transform_sbuv_16x16(x);
|
|
|
|
vp9_quantize_sbuv_16x16(x);
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
*rate = rd_cost_sbuv_16x16(x, backup);
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
*distortion = vp9_block_error_c(x->sb_coeff_data.coeff + 1024,
|
|
|
|
xd->sb_coeff_data.dqcoeff + 1024, 512) >> 2;
|
|
|
|
*skip = vp9_sbuv_is_skippable_16x16(xd);
|
|
|
|
}
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
static int64_t rd_inter32x32_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
2012-08-20 23:43:34 +02:00
|
|
|
int *distortion, int fullpixel, int *skip) {
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2012-11-08 20:03:00 +01:00
|
|
|
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
|
2012-08-20 23:43:34 +02:00
|
|
|
const uint8_t *usrc = x->src.u_buffer, *udst = xd->dst.u_buffer;
|
|
|
|
const uint8_t *vsrc = x->src.v_buffer, *vdst = xd->dst.v_buffer;
|
|
|
|
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
|
|
|
if (mbmi->txfm_size == TX_32X32) {
|
|
|
|
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
|
|
|
|
usrc, vsrc, src_uv_stride,
|
|
|
|
udst, vdst, dst_uv_stride);
|
2013-01-06 03:20:25 +01:00
|
|
|
rd_inter32x32_uv_16x16(x, rate, distortion, skip, 1);
|
2013-01-10 17:23:59 +01:00
|
|
|
} else {
|
2013-01-08 19:29:22 +01:00
|
|
|
int n, r = 0, d = 0;
|
|
|
|
int skippable = 1;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
|
|
|
|
ENTROPY_CONTEXT_PLANES *ta = xd->above_context;
|
|
|
|
ENTROPY_CONTEXT_PLANES *tl = xd->left_context;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
memcpy(t_above, xd->above_context, sizeof(t_above));
|
|
|
|
memcpy(t_left, xd->left_context, sizeof(t_left));
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
int x_idx = n & 1, y_idx = n >> 1;
|
|
|
|
int d_tmp, s_tmp, r_tmp;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
xd->above_context = ta + x_idx;
|
|
|
|
xd->left_context = tl + y_idx;
|
|
|
|
vp9_subtract_mbuv_s_c(x->src_diff,
|
|
|
|
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
|
|
|
|
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
|
|
|
|
src_uv_stride,
|
|
|
|
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
|
|
|
|
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
|
|
|
|
dst_uv_stride);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
if (mbmi->txfm_size == TX_4X4) {
|
|
|
|
rd_inter16x16_uv_4x4(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
|
|
|
|
} else {
|
|
|
|
rd_inter16x16_uv_8x8(cpi, x, &r_tmp, &d_tmp, fullpixel, &s_tmp, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
r += r_tmp;
|
|
|
|
d += d_tmp;
|
|
|
|
skippable = skippable && s_tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
*rate = r;
|
|
|
|
*distortion = d;
|
|
|
|
*skip = skippable;
|
|
|
|
xd->left_context = tl;
|
|
|
|
xd->above_context = ta;
|
|
|
|
memcpy(xd->above_context, t_above, sizeof(t_above));
|
|
|
|
memcpy(xd->left_context, t_left, sizeof(t_left));
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
static void super_block_64_uvrd(MACROBLOCK *x, int *rate,
|
|
|
|
int *distortion, int *skip);
|
|
|
|
static int64_t rd_inter64x64_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int *distortion, int fullpixel, int *skip) {
|
|
|
|
super_block_64_uvrd(x, rate, distortion, skip);
|
|
|
|
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
|
|
|
}
|
2012-01-21 00:30:31 +01:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int64_t rd_inter4x4_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
2012-11-08 20:03:00 +01:00
|
|
|
int *distortion, int *skip, int fullpixel) {
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_build_inter4x4_predictors_mbuv(&x->e_mbd);
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
|
2012-10-28 18:38:23 +01:00
|
|
|
x->e_mbd.predictor, x->src.uv_stride);
|
2012-11-08 20:03:00 +01:00
|
|
|
return rd_inter16x16_uv_4x4(cpi, x, rate, distortion, fullpixel, skip, 1);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static void rd_pick_intra_mbuv_mode(VP9_COMP *cpi,
|
2012-02-22 03:10:18 +01:00
|
|
|
MACROBLOCK *x,
|
|
|
|
int *rate,
|
|
|
|
int *rate_tokenonly,
|
2012-08-29 19:43:20 +02:00
|
|
|
int *distortion,
|
|
|
|
int *skippable) {
|
2012-07-14 00:21:29 +02:00
|
|
|
MB_PREDICTION_MODE mode;
|
|
|
|
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
2012-08-29 19:43:20 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2012-08-10 15:12:43 +02:00
|
|
|
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
|
2012-08-29 19:43:20 +02:00
|
|
|
int rate_to, UNINITIALIZED_IS_SAFE(skip);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
2013-01-14 23:37:53 +01:00
|
|
|
int rate;
|
|
|
|
int distortion;
|
|
|
|
int64_t this_rd;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
mbmi->uv_mode = mode;
|
|
|
|
vp9_build_intra_predictors_mbuv(&x->e_mbd);
|
2012-02-29 02:12:08 +01:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
|
|
|
|
x->e_mbd.predictor, x->src.uv_stride);
|
|
|
|
vp9_transform_mbuv_4x4(x);
|
|
|
|
vp9_quantize_mbuv_4x4(x);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
rate_to = rd_cost_mbuv_4x4(x, 1);
|
|
|
|
rate = rate_to
|
|
|
|
+ x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
distortion = vp9_mbuverror(x) / 4;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
if (this_rd < best_rd) {
|
|
|
|
skip = vp9_mbuv_is_skippable_4x4(xd);
|
|
|
|
best_rd = this_rd;
|
|
|
|
d = distortion;
|
|
|
|
r = rate;
|
|
|
|
*rate_tokenonly = rate_to;
|
|
|
|
mode_selected = mode;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
*rate = r;
|
|
|
|
*distortion = d;
|
2012-08-29 19:43:20 +02:00
|
|
|
*skippable = skip;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->uv_mode = mode_selected;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static void rd_pick_intra_mbuv_mode_8x8(VP9_COMP *cpi,
|
2012-02-22 03:10:18 +01:00
|
|
|
MACROBLOCK *x,
|
|
|
|
int *rate,
|
|
|
|
int *rate_tokenonly,
|
2012-08-29 19:43:20 +02:00
|
|
|
int *distortion,
|
|
|
|
int *skippable) {
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
MB_PREDICTION_MODE mode;
|
|
|
|
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
2012-08-10 15:12:43 +02:00
|
|
|
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
|
2012-08-29 19:43:20 +02:00
|
|
|
int rate_to, UNINITIALIZED_IS_SAFE(skip);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
|
|
|
int rate;
|
|
|
|
int distortion;
|
2012-08-02 19:07:33 +02:00
|
|
|
int64_t this_rd;
|
2012-02-22 03:10:18 +01:00
|
|
|
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->uv_mode = mode;
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_build_intra_predictors_mbuv(&x->e_mbd);
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
|
2012-10-28 18:38:23 +01:00
|
|
|
x->e_mbd.predictor, x->src.uv_stride);
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_transform_mbuv_8x8(x);
|
2012-02-22 03:10:18 +01:00
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_quantize_mbuv_8x8(x);
|
2012-02-22 03:10:18 +01:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
rate_to = rd_cost_mbuv_8x8(x, 1);
|
2012-08-10 15:12:43 +02:00
|
|
|
rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][mbmi->uv_mode];
|
2012-02-22 03:10:18 +01:00
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
distortion = vp9_mbuverror(x) / 4;
|
2012-07-14 00:21:29 +02:00
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
2012-02-22 03:10:18 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (this_rd < best_rd) {
|
2012-10-30 05:07:40 +01:00
|
|
|
skip = vp9_mbuv_is_skippable_8x8(xd);
|
2012-07-14 00:21:29 +02:00
|
|
|
best_rd = this_rd;
|
|
|
|
d = distortion;
|
|
|
|
r = rate;
|
|
|
|
*rate_tokenonly = rate_to;
|
|
|
|
mode_selected = mode;
|
2012-02-22 03:10:18 +01:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
*rate = r;
|
|
|
|
*distortion = d;
|
2012-08-29 19:43:20 +02:00
|
|
|
*skippable = skip;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->uv_mode = mode_selected;
|
2012-02-22 03:10:18 +01:00
|
|
|
}
|
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
// TODO(rbultje) very similar to rd_inter32x32_uv(), merge?
|
|
|
|
static void super_block_uvrd(MACROBLOCK *x,
|
|
|
|
int *rate,
|
|
|
|
int *distortion,
|
|
|
|
int *skippable) {
|
2012-08-20 23:43:34 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
|
2012-08-20 23:43:34 +02:00
|
|
|
const uint8_t *usrc = x->src.u_buffer, *udst = xd->dst.u_buffer;
|
|
|
|
const uint8_t *vsrc = x->src.v_buffer, *vdst = xd->dst.v_buffer;
|
|
|
|
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
|
|
|
if (mbmi->txfm_size == TX_32X32) {
|
|
|
|
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
|
|
|
|
usrc, vsrc, src_uv_stride,
|
|
|
|
udst, vdst, dst_uv_stride);
|
2013-01-06 03:20:25 +01:00
|
|
|
rd_inter32x32_uv_16x16(x, rate, distortion, skippable, 1);
|
2013-01-10 17:23:59 +01:00
|
|
|
} else {
|
2013-01-08 19:29:22 +01:00
|
|
|
int d = 0, r = 0, n, s = 1;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
|
|
|
|
ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
|
|
|
|
ENTROPY_CONTEXT_PLANES *tl_orig = xd->left_context;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
memcpy(t_above, xd->above_context, sizeof(t_above));
|
|
|
|
memcpy(t_left, xd->left_context, sizeof(t_left));
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
int x_idx = n & 1, y_idx = n >> 1;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
vp9_subtract_mbuv_s_c(x->src_diff,
|
|
|
|
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
|
|
|
|
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
|
|
|
|
src_uv_stride,
|
|
|
|
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
|
|
|
|
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
|
|
|
|
dst_uv_stride);
|
|
|
|
if (mbmi->txfm_size == TX_4X4) {
|
|
|
|
vp9_transform_mbuv_4x4(x);
|
|
|
|
vp9_quantize_mbuv_4x4(x);
|
|
|
|
s &= vp9_mbuv_is_skippable_4x4(xd);
|
|
|
|
} else {
|
|
|
|
vp9_transform_mbuv_8x8(x);
|
|
|
|
vp9_quantize_mbuv_8x8(x);
|
|
|
|
s &= vp9_mbuv_is_skippable_8x8(xd);
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
d += vp9_mbuverror(x) >> 2;
|
|
|
|
xd->above_context = t_above + x_idx;
|
|
|
|
xd->left_context = t_left + y_idx;
|
|
|
|
if (mbmi->txfm_size == TX_4X4) {
|
|
|
|
r += rd_cost_mbuv_4x4(x, 0);
|
|
|
|
} else {
|
|
|
|
r += rd_cost_mbuv_8x8(x, 0);
|
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
xd->above_context = ta_orig;
|
|
|
|
xd->left_context = tl_orig;
|
2013-01-06 03:20:25 +01:00
|
|
|
|
2013-01-08 19:29:22 +01:00
|
|
|
*distortion = d;
|
|
|
|
*rate = r;
|
|
|
|
*skippable = s;
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
static void super_block_64_uvrd(MACROBLOCK *x,
|
|
|
|
int *rate,
|
|
|
|
int *distortion,
|
|
|
|
int *skippable) {
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
|
|
|
|
const uint8_t *usrc = x->src.u_buffer, *udst = xd->dst.u_buffer;
|
|
|
|
const uint8_t *vsrc = x->src.v_buffer, *vdst = xd->dst.v_buffer;
|
|
|
|
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above[4], t_left[4];
|
|
|
|
ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
|
|
|
|
ENTROPY_CONTEXT_PLANES *tl_orig = xd->left_context;
|
|
|
|
int d = 0, r = 0, n, s = 1;
|
|
|
|
|
|
|
|
memcpy(t_above, xd->above_context, sizeof(t_above));
|
|
|
|
memcpy(t_left, xd->left_context, sizeof(t_left));
|
|
|
|
|
|
|
|
if (mbmi->txfm_size == TX_32X32) {
|
|
|
|
int n;
|
|
|
|
|
|
|
|
*rate = 0;
|
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
int x_idx = n & 1, y_idx = n >> 1;
|
|
|
|
int r_tmp, d_tmp, s_tmp;
|
|
|
|
|
|
|
|
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
|
|
|
|
usrc + x_idx * 16 + y_idx * 16 * src_uv_stride,
|
|
|
|
vsrc + x_idx * 16 + y_idx * 16 * src_uv_stride,
|
|
|
|
src_uv_stride,
|
|
|
|
udst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
|
|
|
|
vdst + x_idx * 16 + y_idx * 16 * dst_uv_stride,
|
|
|
|
dst_uv_stride);
|
|
|
|
xd->above_context = t_above + x_idx * 2;
|
|
|
|
xd->left_context = t_left + y_idx * 2;
|
|
|
|
rd_inter32x32_uv_16x16(x, &r_tmp, &d_tmp, &s_tmp, 0);
|
|
|
|
r += r_tmp;
|
|
|
|
d += d_tmp;
|
|
|
|
s = s && s_tmp;
|
|
|
|
}
|
2013-01-10 17:23:59 +01:00
|
|
|
} else {
|
2013-01-06 03:20:25 +01:00
|
|
|
for (n = 0; n < 16; n++) {
|
|
|
|
int x_idx = n & 3, y_idx = n >> 2;
|
|
|
|
|
|
|
|
vp9_subtract_mbuv_s_c(x->src_diff,
|
|
|
|
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
|
|
|
|
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
|
|
|
|
src_uv_stride,
|
|
|
|
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
|
|
|
|
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
|
|
|
|
dst_uv_stride);
|
|
|
|
if (mbmi->txfm_size == TX_4X4) {
|
|
|
|
vp9_transform_mbuv_4x4(x);
|
|
|
|
vp9_quantize_mbuv_4x4(x);
|
|
|
|
s &= vp9_mbuv_is_skippable_4x4(xd);
|
|
|
|
} else {
|
|
|
|
vp9_transform_mbuv_8x8(x);
|
|
|
|
vp9_quantize_mbuv_8x8(x);
|
|
|
|
s &= vp9_mbuv_is_skippable_8x8(xd);
|
|
|
|
}
|
|
|
|
|
|
|
|
xd->above_context = t_above + x_idx;
|
|
|
|
xd->left_context = t_left + y_idx;
|
|
|
|
d += vp9_mbuverror(x) >> 2;
|
|
|
|
if (mbmi->txfm_size == TX_4X4) {
|
|
|
|
r += rd_cost_mbuv_4x4(x, 0);
|
|
|
|
} else {
|
|
|
|
r += rd_cost_mbuv_8x8(x, 0);
|
|
|
|
}
|
|
|
|
}
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
*distortion = d;
|
|
|
|
*rate = r;
|
|
|
|
*skippable = s;
|
|
|
|
|
|
|
|
xd->left_context = tl_orig;
|
|
|
|
xd->above_context = ta_orig;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi,
|
2012-08-20 23:43:34 +02:00
|
|
|
MACROBLOCK *x,
|
|
|
|
int *rate,
|
|
|
|
int *rate_tokenonly,
|
2012-08-29 19:43:20 +02:00
|
|
|
int *distortion,
|
|
|
|
int *skippable) {
|
2012-08-20 23:43:34 +02:00
|
|
|
MB_PREDICTION_MODE mode;
|
|
|
|
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX, this_rd;
|
2012-08-20 23:43:34 +02:00
|
|
|
int this_rate_tokenonly, this_rate;
|
2012-08-29 19:43:20 +02:00
|
|
|
int this_distortion, s;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
|
|
|
x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_build_intra_predictors_sbuv_s(&x->e_mbd);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
super_block_uvrd(x, &this_rate_tokenonly,
|
|
|
|
&this_distortion, &s);
|
2012-08-20 23:43:34 +02:00
|
|
|
this_rate = this_rate_tokenonly +
|
2012-11-15 00:03:39 +01:00
|
|
|
x->intra_uv_mode_cost[x->e_mbd.frame_type][mode];
|
2012-08-20 23:43:34 +02:00
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
|
|
|
|
|
|
|
|
if (this_rd < best_rd) {
|
|
|
|
mode_selected = mode;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*rate = this_rate;
|
|
|
|
*rate_tokenonly = this_rate_tokenonly;
|
|
|
|
*distortion = this_distortion;
|
2012-08-29 19:43:20 +02:00
|
|
|
*skippable = s;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
|
|
|
|
|
|
|
|
return best_rd;
|
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
static int64_t rd_pick_intra_sb64uv_mode(VP9_COMP *cpi,
|
|
|
|
MACROBLOCK *x,
|
|
|
|
int *rate,
|
|
|
|
int *rate_tokenonly,
|
|
|
|
int *distortion,
|
|
|
|
int *skippable) {
|
|
|
|
MB_PREDICTION_MODE mode;
|
|
|
|
MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX, this_rd;
|
2013-01-06 03:20:25 +01:00
|
|
|
int this_rate_tokenonly, this_rate;
|
|
|
|
int this_distortion, s;
|
|
|
|
|
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
|
|
|
x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
|
|
|
|
vp9_build_intra_predictors_sb64uv_s(&x->e_mbd);
|
|
|
|
|
|
|
|
super_block_64_uvrd(x, &this_rate_tokenonly,
|
|
|
|
&this_distortion, &s);
|
|
|
|
this_rate = this_rate_tokenonly +
|
|
|
|
x->intra_uv_mode_cost[x->e_mbd.frame_type][mode];
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
|
|
|
|
|
|
|
|
if (this_rd < best_rd) {
|
|
|
|
mode_selected = mode;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*rate = this_rate;
|
|
|
|
*rate_tokenonly = this_rate_tokenonly;
|
|
|
|
*distortion = this_distortion;
|
|
|
|
*skippable = s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
|
|
|
|
|
|
|
|
return best_rd;
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
int vp9_cost_mv_ref(VP9_COMP *cpi,
|
2011-12-06 21:03:42 +01:00
|
|
|
MB_PREDICTION_MODE m,
|
2012-11-12 16:09:25 +01:00
|
|
|
const int mode_context) {
|
2012-07-14 00:21:29 +02:00
|
|
|
MACROBLOCKD *xd = &cpi->mb.e_mbd;
|
|
|
|
int segment_id = xd->mode_info_context->mbmi.segment_id;
|
|
|
|
|
2013-01-28 16:22:53 +01:00
|
|
|
// Dont account for mode here if segment skip is enabled.
|
|
|
|
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *pc = &cpi->common;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob p [VP9_MVREFS - 1];
|
2012-07-14 00:21:29 +02:00
|
|
|
assert(NEARESTMV <= m && m <= SPLITMV);
|
2012-11-12 16:09:25 +01:00
|
|
|
vp9_mv_ref_probs(pc, p, mode_context);
|
2012-10-31 22:40:53 +01:00
|
|
|
return cost_token(vp9_mv_ref_tree, p,
|
|
|
|
vp9_mv_ref_encoding_array - NEARESTMV + m);
|
2012-07-14 00:21:29 +02:00
|
|
|
} else
|
|
|
|
return 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) {
|
2012-07-14 00:21:29 +02:00
|
|
|
x->e_mbd.mode_info_context->mbmi.mode = mb;
|
2012-08-10 01:07:41 +02:00
|
|
|
x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-07-26 22:42:07 +02:00
|
|
|
static int labels2mode(
|
|
|
|
MACROBLOCK *x,
|
|
|
|
int const *labelings, int which_label,
|
|
|
|
B_PREDICTION_MODE this_mode,
|
|
|
|
int_mv *this_mv, int_mv *this_second_mv,
|
|
|
|
int_mv seg_mvs[MAX_REF_FRAMES - 1],
|
|
|
|
int_mv *best_ref_mv,
|
|
|
|
int_mv *second_best_ref_mv,
|
2012-11-09 00:44:39 +01:00
|
|
|
int *mvjcost, int *mvcost[2]) {
|
2012-10-17 23:51:27 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
MODE_INFO *const mic = xd->mode_info_context;
|
2012-08-14 12:32:29 +02:00
|
|
|
MB_MODE_INFO * mbmi = &mic->mbmi;
|
2012-07-14 00:21:29 +02:00
|
|
|
const int mis = xd->mode_info_stride;
|
|
|
|
|
2012-08-07 01:21:23 +02:00
|
|
|
int i, cost = 0, thismvcost = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
/* We have to be careful retrieving previously-encoded motion vectors.
|
|
|
|
Ones from this macroblock have to be pulled from the BLOCKD array
|
|
|
|
as they have not yet made it to the bmi array in our MB_MODE_INFO. */
|
2012-08-07 01:21:23 +02:00
|
|
|
for (i = 0; i < 16; ++i) {
|
2012-07-14 00:21:29 +02:00
|
|
|
BLOCKD *const d = xd->block + i;
|
|
|
|
const int row = i >> 2, col = i & 3;
|
|
|
|
|
|
|
|
B_PREDICTION_MODE m;
|
|
|
|
|
|
|
|
if (labelings[i] != which_label)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (col && labelings[i] == labelings[i - 1])
|
|
|
|
m = LEFT4X4;
|
|
|
|
else if (row && labelings[i] == labelings[i - 4])
|
|
|
|
m = ABOVE4X4;
|
|
|
|
else {
|
|
|
|
// the only time we should do costing for new motion vector or mode
|
|
|
|
// is when we are on a new label (jbb May 08, 2007)
|
|
|
|
switch (m = this_mode) {
|
|
|
|
case NEW4X4 :
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0) {
|
2012-08-10 15:12:43 +02:00
|
|
|
this_mv->as_int = seg_mvs[mbmi->ref_frame - 1].as_int;
|
|
|
|
this_second_mv->as_int =
|
|
|
|
seg_mvs[mbmi->second_ref_frame - 1].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-04-18 22:51:58 +02:00
|
|
|
|
2012-11-09 00:44:39 +01:00
|
|
|
thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, mvjcost, mvcost,
|
2012-07-14 00:21:29 +02:00
|
|
|
102, xd->allow_high_precision_mv);
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0) {
|
2012-10-30 20:58:42 +01:00
|
|
|
thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv,
|
2012-11-09 00:44:39 +01:00
|
|
|
mvjcost, mvcost, 102,
|
2012-08-03 21:17:18 +02:00
|
|
|
xd->allow_high_precision_mv);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case LEFT4X4:
|
2013-02-09 04:46:36 +01:00
|
|
|
this_mv->as_int = col ? d[-1].bmi.as_mv[0].as_int :
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
left_block_mv(xd, mic, i);
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2013-02-09 04:46:36 +01:00
|
|
|
this_second_mv->as_int = col ? d[-1].bmi.as_mv[1].as_int :
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
left_block_second_mv(xd, mic, i);
|
2012-07-14 00:21:29 +02:00
|
|
|
break;
|
|
|
|
case ABOVE4X4:
|
2013-02-09 04:46:36 +01:00
|
|
|
this_mv->as_int = row ? d[-4].bmi.as_mv[0].as_int :
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
above_block_mv(mic, i, mis);
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2013-02-09 04:46:36 +01:00
|
|
|
this_second_mv->as_int = row ? d[-4].bmi.as_mv[1].as_int :
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
above_block_second_mv(mic, i, mis);
|
2012-07-14 00:21:29 +02:00
|
|
|
break;
|
|
|
|
case ZERO4X4:
|
|
|
|
this_mv->as_int = 0;
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2012-07-14 00:21:29 +02:00
|
|
|
this_second_mv->as_int = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (m == ABOVE4X4) { // replace above with left if same
|
|
|
|
int_mv left_mv, left_second_mv;
|
2011-05-24 19:24:52 +02:00
|
|
|
|
2012-08-14 01:20:20 +02:00
|
|
|
left_second_mv.as_int = 0;
|
2013-02-09 04:46:36 +01:00
|
|
|
left_mv.as_int = col ? d[-1].bmi.as_mv[0].as_int :
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
left_block_mv(xd, mic, i);
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2013-02-09 04:46:36 +01:00
|
|
|
left_second_mv.as_int = col ? d[-1].bmi.as_mv[1].as_int :
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
left_block_second_mv(xd, mic, i);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (left_mv.as_int == this_mv->as_int &&
|
2012-11-07 15:50:25 +01:00
|
|
|
(mbmi->second_ref_frame <= 0 ||
|
2012-07-14 00:21:29 +02:00
|
|
|
left_second_mv.as_int == this_second_mv->as_int))
|
|
|
|
m = LEFT4X4;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-09 22:19:15 +02:00
|
|
|
#if CONFIG_NEWBINTRAMODES
|
|
|
|
cost = x->inter_bmode_costs[
|
|
|
|
m == B_CONTEXT_PRED ? m - CONTEXT_PRED_REPLACEMENTS : m];
|
|
|
|
#else
|
|
|
|
cost = x->inter_bmode_costs[m];
|
|
|
|
#endif
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-02-09 04:46:36 +01:00
|
|
|
d->bmi.as_mv[0].as_int = this_mv->as_int;
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2013-02-09 04:46:36 +01:00
|
|
|
d->bmi.as_mv[1].as_int = this_second_mv->as_int;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
x->partition_info->bmi[i].mode = m;
|
|
|
|
x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2012-07-14 00:21:29 +02:00
|
|
|
x->partition_info->bmi[i].second_mv.as_int = this_second_mv->as_int;
|
2012-08-07 01:21:23 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
cost += thismvcost;
|
|
|
|
return cost;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-17 20:40:00 +02:00
|
|
|
static int64_t encode_inter_mb_segment(MACROBLOCK *x,
|
|
|
|
int const *labels,
|
|
|
|
int which_label,
|
|
|
|
int *labelyrate,
|
|
|
|
int *distortion,
|
|
|
|
ENTROPY_CONTEXT *ta,
|
2012-11-25 04:33:58 +01:00
|
|
|
ENTROPY_CONTEXT *tl) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int i;
|
2012-08-14 12:32:29 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-17 20:40:00 +02:00
|
|
|
*labelyrate = 0;
|
|
|
|
*distortion = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
if (labels[i] == which_label) {
|
|
|
|
BLOCKD *bd = &x->e_mbd.block[i];
|
|
|
|
BLOCK *be = &x->block[i];
|
|
|
|
int thisdistortion;
|
|
|
|
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
vp9_build_inter_predictors_b(bd, 16, &xd->subpix);
|
2012-11-07 15:50:25 +01:00
|
|
|
if (xd->mode_info_context->mbmi.second_ref_frame > 0)
|
Convert subpixel filters to use convolve framework
Update the code to call the new convolution functions to do subpixel
prediction rather than the existing functions. Remove the old C and
assembly code, since it is unused. This causes a 50% performance
reduction on the decoder, but that will be resolved when the asm for
the new functions is available.
There is no consensus for whether 6-tap or 2-tap predictors will be
supported in the final codec, so these filters are implemented in
terms of the 8-tap code, so that quality testing of these modes
can continue. Implementing the lower complexity algorithms is a
simple exercise, should it be necessary.
This code produces slightly better results in the EIGHTTAP_SMOOTH
case, since the filter is now applied in only one direction when
the subpel motion is only in one direction. Like the previous code,
the filtering is skipped entirely on full-pel MVs. This combination
seems to give the best quality gains, but this may be indicative of a
bug in the encoder's filter selection, since the encoder could
achieve the result of skipping the filtering on full-pel by selecting
one of the other filters. This should be revisited.
Quality gains on derf positive on almost all clips. The only clip
that seemed to be hurt at all datarates was football
(-0.115% PSNR average, -0.587% min). Overall averages 0.375% PSNR,
0.347% SSIM.
Change-Id: I7d469716091b1d89b4b08adde5863999319d69ff
2013-01-29 01:59:03 +01:00
|
|
|
vp9_build_2nd_inter_predictors_b(bd, 16, &xd->subpix);
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_subtract_b(be, bd, 16);
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm4x4(be->src_diff, be->coeff, 32);
|
2012-10-13 06:41:58 +02:00
|
|
|
x->quantize_b_4x4(be, bd);
|
2012-10-30 20:58:42 +01:00
|
|
|
thisdistortion = vp9_block_error(be->coeff, bd->dqcoeff, 16);
|
2012-10-17 20:40:00 +02:00
|
|
|
*distortion += thisdistortion;
|
|
|
|
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
ta + vp9_block2above[TX_4X4][i],
|
|
|
|
tl + vp9_block2left[TX_4X4][i], TX_4X4);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
}
|
2012-10-17 20:40:00 +02:00
|
|
|
*distortion >>= 2;
|
|
|
|
return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-17 20:40:00 +02:00
|
|
|
static int64_t encode_inter_mb_segment_8x8(MACROBLOCK *x,
|
|
|
|
int const *labels,
|
|
|
|
int which_label,
|
|
|
|
int *labelyrate,
|
|
|
|
int *distortion,
|
2012-10-22 20:49:00 +02:00
|
|
|
int64_t *otherrd,
|
2012-10-17 20:40:00 +02:00
|
|
|
ENTROPY_CONTEXT *ta,
|
2012-11-25 04:33:58 +01:00
|
|
|
ENTROPY_CONTEXT *tl) {
|
2012-10-17 20:40:00 +02:00
|
|
|
int i, j;
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
|
|
|
const int iblock[4] = { 0, 1, 4, 5 };
|
2012-10-22 20:49:00 +02:00
|
|
|
int othercost = 0, otherdist = 0;
|
|
|
|
ENTROPY_CONTEXT_PLANES tac, tlc;
|
|
|
|
ENTROPY_CONTEXT *tacp = (ENTROPY_CONTEXT *) &tac,
|
|
|
|
*tlcp = (ENTROPY_CONTEXT *) &tlc;
|
|
|
|
|
|
|
|
if (otherrd) {
|
|
|
|
memcpy(&tac, ta, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
memcpy(&tlc, tl, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
}
|
2012-10-17 20:40:00 +02:00
|
|
|
|
|
|
|
*distortion = 0;
|
|
|
|
*labelyrate = 0;
|
|
|
|
for (i = 0; i < 4; i++) {
|
2012-10-31 01:12:12 +01:00
|
|
|
int ib = vp9_i8x8_block[i];
|
2012-10-17 20:40:00 +02:00
|
|
|
|
|
|
|
if (labels[ib] == which_label) {
|
2012-10-22 20:49:00 +02:00
|
|
|
int idx = (ib & 8) + ((ib & 2) << 1);
|
|
|
|
BLOCKD *bd = &xd->block[ib], *bd2 = &xd->block[idx];
|
|
|
|
BLOCK *be = &x->block[ib], *be2 = &x->block[idx];
|
2012-10-17 20:40:00 +02:00
|
|
|
int thisdistortion;
|
|
|
|
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_build_inter_predictors4b(xd, bd, 16);
|
2012-11-07 15:50:25 +01:00
|
|
|
if (xd->mode_info_context->mbmi.second_ref_frame > 0)
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_build_2nd_inter_predictors4b(xd, bd, 16);
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_subtract_4b_c(be, bd, 16);
|
2012-10-17 20:40:00 +02:00
|
|
|
|
2012-10-22 20:49:00 +02:00
|
|
|
if (xd->mode_info_context->mbmi.txfm_size == TX_4X4) {
|
|
|
|
if (otherrd) {
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm8x8(be->src_diff, be2->coeff, 32);
|
2012-10-22 20:49:00 +02:00
|
|
|
x->quantize_b_8x8(be2, bd2);
|
2012-10-30 20:58:42 +01:00
|
|
|
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
|
2012-10-22 20:49:00 +02:00
|
|
|
otherdist += thisdistortion;
|
|
|
|
othercost += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
tacp + vp9_block2above[TX_8X8][idx],
|
|
|
|
tlcp + vp9_block2left[TX_8X8][idx],
|
|
|
|
TX_8X8);
|
2012-10-22 20:49:00 +02:00
|
|
|
}
|
|
|
|
for (j = 0; j < 4; j += 2) {
|
|
|
|
bd = &xd->block[ib + iblock[j]];
|
|
|
|
be = &x->block[ib + iblock[j]];
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
|
2012-10-22 20:49:00 +02:00
|
|
|
x->quantize_b_4x4_pair(be, be + 1, bd, bd + 1);
|
2012-10-30 20:58:42 +01:00
|
|
|
thisdistortion = vp9_block_error_c(be->coeff, bd->dqcoeff, 32);
|
2012-10-22 20:49:00 +02:00
|
|
|
*distortion += thisdistortion;
|
|
|
|
*labelyrate += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
ta + vp9_block2above[TX_4X4][ib + iblock[j]],
|
|
|
|
tl + vp9_block2left[TX_4X4][ib + iblock[j]],
|
|
|
|
TX_4X4);
|
2012-10-22 20:49:00 +02:00
|
|
|
*labelyrate += cost_coeffs(x, bd + 1, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
ta + vp9_block2above[TX_4X4][ib + iblock[j] + 1],
|
|
|
|
tl + vp9_block2left[TX_4X4][ib + iblock[j]],
|
|
|
|
TX_4X4);
|
2012-10-22 20:49:00 +02:00
|
|
|
}
|
|
|
|
} else /* 8x8 */ {
|
|
|
|
if (otherrd) {
|
|
|
|
for (j = 0; j < 4; j += 2) {
|
2012-11-16 00:14:38 +01:00
|
|
|
BLOCKD *bd = &xd->block[ib + iblock[j]];
|
|
|
|
BLOCK *be = &x->block[ib + iblock[j]];
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm8x4(be->src_diff, be->coeff, 32);
|
2012-11-16 00:14:38 +01:00
|
|
|
x->quantize_b_4x4_pair(be, be + 1, bd, bd + 1);
|
|
|
|
thisdistortion = vp9_block_error_c(be->coeff, bd->dqcoeff, 32);
|
2012-10-22 20:49:00 +02:00
|
|
|
otherdist += thisdistortion;
|
2012-11-16 00:14:38 +01:00
|
|
|
othercost += cost_coeffs(x, bd, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
tacp + vp9_block2above[TX_4X4][ib + iblock[j]],
|
|
|
|
tlcp + vp9_block2left[TX_4X4][ib + iblock[j]],
|
|
|
|
TX_4X4);
|
2012-11-16 00:14:38 +01:00
|
|
|
othercost += cost_coeffs(x, bd + 1, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
tacp + vp9_block2above[TX_4X4][ib + iblock[j] + 1],
|
|
|
|
tlcp + vp9_block2left[TX_4X4][ib + iblock[j]],
|
|
|
|
TX_4X4);
|
2012-10-22 20:49:00 +02:00
|
|
|
}
|
|
|
|
}
|
2013-02-12 06:14:46 +01:00
|
|
|
x->fwd_txm8x8(be->src_diff, be2->coeff, 32);
|
2012-10-22 20:49:00 +02:00
|
|
|
x->quantize_b_8x8(be2, bd2);
|
2012-10-30 20:58:42 +01:00
|
|
|
thisdistortion = vp9_block_error_c(be2->coeff, bd2->dqcoeff, 64);
|
2012-10-17 20:40:00 +02:00
|
|
|
*distortion += thisdistortion;
|
2012-10-22 20:49:00 +02:00
|
|
|
*labelyrate += cost_coeffs(x, bd2, PLANE_TYPE_Y_WITH_DC,
|
2012-12-06 21:40:57 +01:00
|
|
|
ta + vp9_block2above[TX_8X8][idx],
|
|
|
|
tl + vp9_block2left[TX_8X8][idx], TX_8X8);
|
2012-10-17 20:40:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*distortion >>= 2;
|
2012-10-22 20:49:00 +02:00
|
|
|
if (otherrd) {
|
2012-10-25 18:17:47 +02:00
|
|
|
otherdist >>= 2;
|
2012-10-22 20:49:00 +02:00
|
|
|
*otherrd = RDCOST(x->rdmult, x->rddiv, othercost, otherdist);
|
|
|
|
}
|
2012-10-17 20:40:00 +02:00
|
|
|
return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
|
|
|
|
}
|
2011-06-23 00:07:04 +02:00
|
|
|
|
2010-12-06 22:42:52 +01:00
|
|
|
static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0};
|
|
|
|
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
typedef struct {
|
2012-04-18 22:51:58 +02:00
|
|
|
int_mv *ref_mv, *second_ref_mv;
|
2011-05-12 16:50:16 +02:00
|
|
|
int_mv mvp;
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-08-02 19:07:33 +02:00
|
|
|
int64_t segment_rd;
|
2012-10-22 20:25:48 +02:00
|
|
|
SPLITMV_PARTITIONING_TYPE segment_num;
|
2012-10-22 20:49:00 +02:00
|
|
|
TX_SIZE txfm_size;
|
2010-12-06 22:42:52 +01:00
|
|
|
int r;
|
|
|
|
int d;
|
|
|
|
int segment_yrate;
|
|
|
|
B_PREDICTION_MODE modes[16];
|
2012-04-18 22:51:58 +02:00
|
|
|
int_mv mvs[16], second_mvs[16];
|
2012-10-23 18:23:23 +02:00
|
|
|
int eobs[16];
|
2010-12-06 22:42:52 +01:00
|
|
|
|
|
|
|
int mvthresh;
|
|
|
|
int *mdcounts;
|
|
|
|
|
2011-05-12 16:50:16 +02:00
|
|
|
int_mv sv_mvp[4]; // save 4 mvp from 8x8
|
2010-12-23 17:23:03 +01:00
|
|
|
int sv_istep[2]; // save 2 initial step_param for 16x8/8x16
|
|
|
|
|
2010-12-06 22:42:52 +01:00
|
|
|
} BEST_SEG_INFO;
|
|
|
|
|
2013-02-06 21:45:28 +01:00
|
|
|
static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) {
|
2012-08-08 20:13:53 +02:00
|
|
|
int r = 0;
|
|
|
|
r |= (mv->as_mv.row >> 3) < x->mv_row_min;
|
|
|
|
r |= (mv->as_mv.row >> 3) > x->mv_row_max;
|
|
|
|
r |= (mv->as_mv.col >> 3) < x->mv_col_min;
|
|
|
|
r |= (mv->as_mv.col >> 3) > x->mv_col_max;
|
|
|
|
return r;
|
|
|
|
}
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
|
2012-10-22 20:49:00 +02:00
|
|
|
BEST_SEG_INFO *bsi,
|
|
|
|
SPLITMV_PARTITIONING_TYPE segmentation,
|
|
|
|
TX_SIZE tx_size, int64_t *otherrds,
|
|
|
|
int64_t *rds, int *completed,
|
|
|
|
/* 16 = n_blocks */
|
|
|
|
int_mv seg_mvs[16 /* n_blocks */]
|
|
|
|
[MAX_REF_FRAMES - 1]) {
|
2012-10-17 23:32:17 +02:00
|
|
|
int i, j;
|
2012-07-14 00:21:29 +02:00
|
|
|
int const *labels;
|
2012-08-07 01:21:23 +02:00
|
|
|
int br = 0, bd = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
B_PREDICTION_MODE this_mode;
|
2012-08-10 15:12:43 +02:00
|
|
|
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
int label_count;
|
2012-10-22 20:49:00 +02:00
|
|
|
int64_t this_segment_rd = 0, other_segment_rd;
|
2012-07-14 00:21:29 +02:00
|
|
|
int label_mv_thresh;
|
|
|
|
int rate = 0;
|
2012-08-07 01:21:23 +02:00
|
|
|
int sbr = 0, sbd = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
int segmentyrate = 0;
|
2012-10-23 18:23:23 +02:00
|
|
|
int best_eobs[16] = { 0 };
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_variance_fn_ptr_t *v_fn_ptr;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
2012-08-07 01:21:23 +02:00
|
|
|
ENTROPY_CONTEXT *ta, *tl;
|
2012-07-14 00:21:29 +02:00
|
|
|
ENTROPY_CONTEXT_PLANES t_above_b, t_left_b;
|
2012-08-07 01:21:23 +02:00
|
|
|
ENTROPY_CONTEXT *ta_b, *tl_b;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
|
|
|
|
ta = (ENTROPY_CONTEXT *)&t_above;
|
|
|
|
tl = (ENTROPY_CONTEXT *)&t_left;
|
|
|
|
ta_b = (ENTROPY_CONTEXT *)&t_above_b;
|
|
|
|
tl_b = (ENTROPY_CONTEXT *)&t_left_b;
|
|
|
|
|
|
|
|
v_fn_ptr = &cpi->fn_ptr[segmentation];
|
2012-10-31 01:12:12 +01:00
|
|
|
labels = vp9_mbsplits[segmentation];
|
|
|
|
label_count = vp9_mbsplit_count[segmentation];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// 64 makes this threshold really big effectively
|
|
|
|
// making it so that we very rarely check mvs on
|
|
|
|
// segments. setting this to 1 would make mv thresh
|
|
|
|
// roughly equal to what it is for macroblocks
|
|
|
|
label_mv_thresh = 1 * bsi->mvthresh / label_count;
|
|
|
|
|
|
|
|
// Segmentation method overheads
|
2012-10-31 22:40:53 +01:00
|
|
|
rate = cost_token(vp9_mbsplit_tree, vp9_mbsplit_probs,
|
|
|
|
vp9_mbsplit_encodings + segmentation);
|
2012-11-12 16:09:25 +01:00
|
|
|
rate += vp9_cost_mv_ref(cpi, SPLITMV,
|
|
|
|
mbmi->mb_mode_context[mbmi->ref_frame]);
|
2012-07-14 00:21:29 +02:00
|
|
|
this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
|
|
|
|
br += rate;
|
2012-10-22 20:49:00 +02:00
|
|
|
other_segment_rd = this_segment_rd;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-22 20:49:00 +02:00
|
|
|
mbmi->txfm_size = tx_size;
|
|
|
|
for (i = 0; i < label_count && this_segment_rd < bsi->segment_rd; i++) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int_mv mode_mv[B_MODE_COUNT], second_mode_mv[B_MODE_COUNT];
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_label_rd = INT64_MAX, best_other_rd = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
B_PREDICTION_MODE mode_selected = ZERO4X4;
|
|
|
|
int bestlabelyrate = 0;
|
|
|
|
|
|
|
|
// search for the best motion vector on this segment
|
|
|
|
for (this_mode = LEFT4X4; this_mode <= NEW4X4; this_mode ++) {
|
2012-10-22 20:49:00 +02:00
|
|
|
int64_t this_rd, other_rd;
|
2012-07-14 00:21:29 +02:00
|
|
|
int distortion;
|
|
|
|
int labelyrate;
|
|
|
|
ENTROPY_CONTEXT_PLANES t_above_s, t_left_s;
|
|
|
|
ENTROPY_CONTEXT *ta_s;
|
|
|
|
ENTROPY_CONTEXT *tl_s;
|
|
|
|
|
|
|
|
vpx_memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
|
|
|
|
ta_s = (ENTROPY_CONTEXT *)&t_above_s;
|
|
|
|
tl_s = (ENTROPY_CONTEXT *)&t_left_s;
|
|
|
|
|
|
|
|
// motion search for newmv (single predictor case only)
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame <= 0 && this_mode == NEW4X4) {
|
2012-08-08 01:44:26 +02:00
|
|
|
int sseshift, n;
|
2012-07-14 00:21:29 +02:00
|
|
|
int step_param = 0;
|
|
|
|
int further_steps;
|
2012-08-07 01:21:23 +02:00
|
|
|
int thissme, bestsme = INT_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
BLOCK *c;
|
|
|
|
BLOCKD *e;
|
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
/* Is the best so far sufficiently good that we cant justify doing
|
|
|
|
* and new motion search. */
|
2012-07-14 00:21:29 +02:00
|
|
|
if (best_label_rd < label_mv_thresh)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (cpi->compressor_speed) {
|
2012-10-22 20:49:00 +02:00
|
|
|
if (segmentation == PARTITIONING_8X16 ||
|
|
|
|
segmentation == PARTITIONING_16X8) {
|
2012-07-14 00:21:29 +02:00
|
|
|
bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
|
2012-10-22 20:49:00 +02:00
|
|
|
if (i == 1 && segmentation == PARTITIONING_16X8)
|
2012-07-14 00:21:29 +02:00
|
|
|
bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
|
|
|
|
|
|
|
|
step_param = bsi->sv_istep[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
// use previous block's result as next block's MV predictor.
|
2012-10-22 20:49:00 +02:00
|
|
|
if (segmentation == PARTITIONING_4X4 && i > 0) {
|
2013-02-09 04:46:36 +01:00
|
|
|
bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.as_mv[0].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
if (i == 4 || i == 8 || i == 12)
|
2013-02-09 04:46:36 +01:00
|
|
|
bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.as_mv[0].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
step_param = 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
|
|
|
|
|
|
|
|
{
|
|
|
|
int sadpb = x->sadperbit4;
|
|
|
|
int_mv mvp_full;
|
|
|
|
|
|
|
|
mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3;
|
|
|
|
mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3;
|
|
|
|
|
|
|
|
// find first label
|
2012-10-31 01:12:12 +01:00
|
|
|
n = vp9_mbsplit_offset[segmentation][i];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
c = &x->block[n];
|
|
|
|
e = &x->e_mbd.block[n];
|
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
bestsme = vp9_full_pixel_diamond(cpi, x, c, e, &mvp_full, step_param,
|
2012-08-08 20:52:22 +02:00
|
|
|
sadpb, further_steps, 0, v_fn_ptr,
|
|
|
|
bsi->ref_mv, &mode_mv[NEW4X4]);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
sseshift = segmentation_to_sseshift[segmentation];
|
2010-10-26 21:34:16 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Should we do a full search (best quality only)
|
|
|
|
if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) {
|
|
|
|
/* Check if mvp_full is within the range. */
|
2012-10-31 22:40:53 +01:00
|
|
|
clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max,
|
|
|
|
x->mv_row_min, x->mv_row_max);
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
thissme = cpi->full_search_sad(x, c, e, &mvp_full,
|
|
|
|
sadpb, 16, v_fn_ptr,
|
2012-11-09 00:44:39 +01:00
|
|
|
x->nmvjointcost, x->mvcost,
|
|
|
|
bsi->ref_mv);
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (thissme < bestsme) {
|
|
|
|
bestsme = thissme;
|
2013-02-09 04:46:36 +01:00
|
|
|
mode_mv[NEW4X4].as_int = e->bmi.as_mv[0].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
} else {
|
2012-10-22 20:25:48 +02:00
|
|
|
/* The full search result is actually worse so re-instate the
|
|
|
|
* previous best vector */
|
2013-02-09 04:46:36 +01:00
|
|
|
e->bmi.as_mv[0].as_int = mode_mv[NEW4X4].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (bestsme < INT_MAX) {
|
|
|
|
int distortion;
|
|
|
|
unsigned int sse;
|
|
|
|
cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
|
2012-10-22 20:25:48 +02:00
|
|
|
bsi->ref_mv, x->errorperbit, v_fn_ptr,
|
2012-11-09 00:44:39 +01:00
|
|
|
x->nmvjointcost, x->mvcost,
|
|
|
|
&distortion, &sse);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// safe motion search result for use in compound prediction
|
2012-08-10 15:12:43 +02:00
|
|
|
seg_mvs[i][mbmi->ref_frame - 1].as_int = mode_mv[NEW4X4].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-11-07 15:50:25 +01:00
|
|
|
} else if (mbmi->second_ref_frame > 0 && this_mode == NEW4X4) {
|
|
|
|
/* NEW4X4 */
|
2012-10-22 20:25:48 +02:00
|
|
|
/* motion search not completed? Then skip newmv for this block with
|
|
|
|
* comppred */
|
2012-08-10 15:12:43 +02:00
|
|
|
if (seg_mvs[i][mbmi->second_ref_frame - 1].as_int == INVALID_MV ||
|
|
|
|
seg_mvs[i][mbmi->ref_frame - 1].as_int == INVALID_MV) {
|
2012-07-14 00:21:29 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
|
2012-08-10 15:12:43 +02:00
|
|
|
&second_mode_mv[this_mode], seg_mvs[i],
|
2012-11-09 00:44:39 +01:00
|
|
|
bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost,
|
|
|
|
x->mvcost);
|
2010-06-11 20:33:49 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Trap vectors that reach beyond the UMV borders
|
2012-08-10 15:12:43 +02:00
|
|
|
if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) ||
|
|
|
|
((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
|
|
|
|
((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) ||
|
|
|
|
((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) {
|
2012-07-14 00:21:29 +02:00
|
|
|
continue;
|
|
|
|
}
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0 &&
|
2012-08-08 20:13:53 +02:00
|
|
|
mv_check_bounds(x, &second_mode_mv[this_mode]))
|
|
|
|
continue;
|
2010-06-11 20:33:49 +02:00
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
if (segmentation == PARTITIONING_4X4) {
|
2012-10-17 20:40:00 +02:00
|
|
|
this_rd = encode_inter_mb_segment(x, labels, i, &labelyrate,
|
2012-11-25 04:33:58 +01:00
|
|
|
&distortion, ta_s, tl_s);
|
2012-10-22 20:49:00 +02:00
|
|
|
other_rd = this_rd;
|
2012-10-17 20:40:00 +02:00
|
|
|
} else {
|
|
|
|
this_rd = encode_inter_mb_segment_8x8(x, labels, i, &labelyrate,
|
2012-10-22 20:49:00 +02:00
|
|
|
&distortion, &other_rd,
|
2012-11-25 04:33:58 +01:00
|
|
|
ta_s, tl_s);
|
2012-10-17 20:40:00 +02:00
|
|
|
}
|
|
|
|
this_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
|
2012-07-14 00:21:29 +02:00
|
|
|
rate += labelyrate;
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (this_rd < best_label_rd) {
|
|
|
|
sbr = rate;
|
|
|
|
sbd = distortion;
|
|
|
|
bestlabelyrate = labelyrate;
|
|
|
|
mode_selected = this_mode;
|
|
|
|
best_label_rd = this_rd;
|
2012-10-22 20:49:00 +02:00
|
|
|
if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_4X4) {
|
|
|
|
for (j = 0; j < 16; j++)
|
|
|
|
if (labels[j] == i)
|
|
|
|
best_eobs[j] = x->e_mbd.block[j].eob;
|
|
|
|
} else {
|
|
|
|
for (j = 0; j < 4; j++) {
|
2012-10-31 01:12:12 +01:00
|
|
|
int ib = vp9_i8x8_block[j], idx = j * 4;
|
2012-10-22 20:49:00 +02:00
|
|
|
|
|
|
|
if (labels[ib] == i)
|
|
|
|
best_eobs[idx] = x->e_mbd.block[idx].eob;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (other_rd < best_other_rd)
|
|
|
|
best_other_rd = other_rd;
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
vpx_memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
} /*for each 4x4 mode*/
|
2012-04-18 22:51:58 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
vpx_memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
|
|
|
|
vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
|
2012-10-22 20:49:00 +02:00
|
|
|
&second_mode_mv[mode_selected], seg_mvs[i],
|
2012-11-09 00:44:39 +01:00
|
|
|
bsi->ref_mv, bsi->second_ref_mv, x->nmvjointcost, x->mvcost);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
br += sbr;
|
|
|
|
bd += sbd;
|
|
|
|
segmentyrate += bestlabelyrate;
|
|
|
|
this_segment_rd += best_label_rd;
|
2012-10-22 20:49:00 +02:00
|
|
|
other_segment_rd += best_other_rd;
|
|
|
|
if (rds)
|
|
|
|
rds[i] = this_segment_rd;
|
|
|
|
if (otherrds)
|
2012-10-24 22:03:51 +02:00
|
|
|
otherrds[i] = other_segment_rd;
|
2012-07-14 00:21:29 +02:00
|
|
|
} /* for each label */
|
2012-02-16 18:29:54 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (this_segment_rd < bsi->segment_rd) {
|
|
|
|
bsi->r = br;
|
|
|
|
bsi->d = bd;
|
|
|
|
bsi->segment_yrate = segmentyrate;
|
|
|
|
bsi->segment_rd = this_segment_rd;
|
|
|
|
bsi->segment_num = segmentation;
|
2012-10-22 20:49:00 +02:00
|
|
|
bsi->txfm_size = mbmi->txfm_size;
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// store everything needed to come back to this!!
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2012-07-14 00:21:29 +02:00
|
|
|
bsi->second_mvs[i].as_mv = x->partition_info->bmi[i].second_mv.as_mv;
|
|
|
|
bsi->modes[i] = x->partition_info->bmi[i].mode;
|
2012-10-17 23:32:17 +02:00
|
|
|
bsi->eobs[i] = best_eobs[i];
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-10-22 20:49:00 +02:00
|
|
|
|
|
|
|
if (completed) {
|
|
|
|
*completed = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static void rd_check_segment(VP9_COMP *cpi, MACROBLOCK *x,
|
2012-10-22 20:49:00 +02:00
|
|
|
BEST_SEG_INFO *bsi,
|
|
|
|
unsigned int segmentation,
|
|
|
|
/* 16 = n_blocks */
|
|
|
|
int_mv seg_mvs[16][MAX_REF_FRAMES - 1],
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES]) {
|
2012-10-31 01:12:12 +01:00
|
|
|
int i, n, c = vp9_mbsplit_count[segmentation];
|
2012-10-22 20:49:00 +02:00
|
|
|
|
|
|
|
if (segmentation == PARTITIONING_4X4) {
|
|
|
|
int64_t rd[16];
|
|
|
|
|
|
|
|
rd_check_segment_txsize(cpi, x, bsi, segmentation, TX_4X4, NULL,
|
|
|
|
rd, &n, seg_mvs);
|
|
|
|
if (n == c) {
|
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
|
|
|
if (rd[c - 1] < txfm_cache[i])
|
|
|
|
txfm_cache[i] = rd[c - 1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int64_t diff, base_rd;
|
2012-10-31 22:40:53 +01:00
|
|
|
int cost4x4 = vp9_cost_bit(cpi->common.prob_tx[0], 0);
|
|
|
|
int cost8x8 = vp9_cost_bit(cpi->common.prob_tx[0], 1);
|
2012-10-22 20:49:00 +02:00
|
|
|
|
|
|
|
if (cpi->common.txfm_mode == TX_MODE_SELECT) {
|
|
|
|
int64_t rd4x4[4], rd8x8[4];
|
|
|
|
int n4x4, n8x8, nmin;
|
|
|
|
BEST_SEG_INFO bsi4x4, bsi8x8;
|
|
|
|
|
|
|
|
/* factor in cost of cost4x4/8x8 in decision */
|
|
|
|
vpx_memcpy(&bsi4x4, bsi, sizeof(*bsi));
|
|
|
|
vpx_memcpy(&bsi8x8, bsi, sizeof(*bsi));
|
|
|
|
rd_check_segment_txsize(cpi, x, &bsi4x4, segmentation,
|
|
|
|
TX_4X4, NULL, rd4x4, &n4x4, seg_mvs);
|
|
|
|
rd_check_segment_txsize(cpi, x, &bsi8x8, segmentation,
|
|
|
|
TX_8X8, NULL, rd8x8, &n8x8, seg_mvs);
|
|
|
|
if (bsi4x4.segment_num == segmentation) {
|
|
|
|
bsi4x4.segment_rd += RDCOST(x->rdmult, x->rddiv, cost4x4, 0);
|
|
|
|
if (bsi4x4.segment_rd < bsi->segment_rd)
|
|
|
|
vpx_memcpy(bsi, &bsi4x4, sizeof(*bsi));
|
|
|
|
}
|
|
|
|
if (bsi8x8.segment_num == segmentation) {
|
|
|
|
bsi8x8.segment_rd += RDCOST(x->rdmult, x->rddiv, cost8x8, 0);
|
|
|
|
if (bsi8x8.segment_rd < bsi->segment_rd)
|
|
|
|
vpx_memcpy(bsi, &bsi8x8, sizeof(*bsi));
|
|
|
|
}
|
|
|
|
n = n4x4 > n8x8 ? n4x4 : n8x8;
|
|
|
|
if (n == c) {
|
|
|
|
nmin = n4x4 < n8x8 ? n4x4 : n8x8;
|
|
|
|
diff = rd8x8[nmin - 1] - rd4x4[nmin - 1];
|
|
|
|
if (n == n4x4) {
|
|
|
|
base_rd = rd4x4[c - 1];
|
|
|
|
} else {
|
|
|
|
base_rd = rd8x8[c - 1] - diff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int64_t rd[4], otherrd[4];
|
|
|
|
|
|
|
|
if (cpi->common.txfm_mode == ONLY_4X4) {
|
|
|
|
rd_check_segment_txsize(cpi, x, bsi, segmentation, TX_4X4, otherrd,
|
|
|
|
rd, &n, seg_mvs);
|
|
|
|
if (n == c) {
|
|
|
|
base_rd = rd[c - 1];
|
|
|
|
diff = otherrd[c - 1] - rd[c - 1];
|
|
|
|
}
|
|
|
|
} else /* use 8x8 transform */ {
|
|
|
|
rd_check_segment_txsize(cpi, x, bsi, segmentation, TX_8X8, otherrd,
|
|
|
|
rd, &n, seg_mvs);
|
|
|
|
if (n == c) {
|
|
|
|
diff = rd[c - 1] - otherrd[c - 1];
|
|
|
|
base_rd = otherrd[c - 1];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n == c) {
|
|
|
|
if (base_rd < txfm_cache[ONLY_4X4]) {
|
|
|
|
txfm_cache[ONLY_4X4] = base_rd;
|
|
|
|
}
|
2013-02-16 01:39:22 +01:00
|
|
|
if (base_rd + diff < txfm_cache[ALLOW_8X8]) {
|
2013-02-16 00:55:31 +01:00
|
|
|
txfm_cache[ALLOW_8X8] = txfm_cache[ALLOW_16X16] =
|
|
|
|
txfm_cache[ALLOW_32X32] = base_rd + diff;
|
2012-10-22 20:49:00 +02:00
|
|
|
}
|
|
|
|
if (diff < 0) {
|
|
|
|
base_rd += diff + RDCOST(x->rdmult, x->rddiv, cost8x8, 0);
|
|
|
|
} else {
|
|
|
|
base_rd += RDCOST(x->rdmult, x->rddiv, cost4x4, 0);
|
|
|
|
}
|
|
|
|
if (base_rd < txfm_cache[TX_MODE_SELECT]) {
|
|
|
|
txfm_cache[TX_MODE_SELECT] = base_rd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-12-06 22:42:52 +01:00
|
|
|
}
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2013-02-06 21:45:28 +01:00
|
|
|
static INLINE void cal_step_param(int sr, int *sp) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int step = 0;
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (sr > MAX_FIRST_STEP) sr = MAX_FIRST_STEP;
|
|
|
|
else if (sr < 1) sr = 1;
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
while (sr >>= 1)
|
|
|
|
step++;
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
*sp = MAX_MVSEARCH_STEPS - 1 - step;
|
2010-12-23 17:23:03 +01:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
|
2012-10-22 20:25:48 +02:00
|
|
|
int_mv *best_ref_mv,
|
|
|
|
int_mv *second_best_ref_mv,
|
|
|
|
int64_t best_rd,
|
|
|
|
int *mdcounts,
|
|
|
|
int *returntotrate,
|
|
|
|
int *returnyrate,
|
|
|
|
int *returndistortion,
|
|
|
|
int *skippable, int mvthresh,
|
|
|
|
int_mv seg_mvs[NB_PARTITIONINGS]
|
|
|
|
[16 /* n_blocks */]
|
2012-10-22 20:49:00 +02:00
|
|
|
[MAX_REF_FRAMES - 1],
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES]) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int i;
|
|
|
|
BEST_SEG_INFO bsi;
|
2012-08-10 15:12:43 +02:00
|
|
|
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
vpx_memset(&bsi, 0, sizeof(bsi));
|
2012-10-22 20:49:00 +02:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++)
|
2013-01-14 20:49:30 +01:00
|
|
|
txfm_cache[i] = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
bsi.segment_rd = best_rd;
|
|
|
|
bsi.ref_mv = best_ref_mv;
|
|
|
|
bsi.second_ref_mv = second_best_ref_mv;
|
|
|
|
bsi.mvp.as_int = best_ref_mv->as_int;
|
|
|
|
bsi.mvthresh = mvthresh;
|
|
|
|
bsi.mdcounts = mdcounts;
|
2012-10-22 20:49:00 +02:00
|
|
|
bsi.txfm_size = TX_4X4;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-07 01:21:23 +02:00
|
|
|
for (i = 0; i < 16; i++)
|
2012-07-14 00:21:29 +02:00
|
|
|
bsi.modes[i] = ZERO4X4;
|
|
|
|
|
|
|
|
if (cpi->compressor_speed == 0) {
|
|
|
|
/* for now, we will keep the original segmentation order
|
|
|
|
when in best quality mode */
|
2012-10-22 20:25:48 +02:00
|
|
|
rd_check_segment(cpi, x, &bsi, PARTITIONING_16X8,
|
2012-10-22 20:49:00 +02:00
|
|
|
seg_mvs[PARTITIONING_16X8], txfm_cache);
|
2012-10-22 20:25:48 +02:00
|
|
|
rd_check_segment(cpi, x, &bsi, PARTITIONING_8X16,
|
2012-10-22 20:49:00 +02:00
|
|
|
seg_mvs[PARTITIONING_8X16], txfm_cache);
|
2012-10-22 20:25:48 +02:00
|
|
|
rd_check_segment(cpi, x, &bsi, PARTITIONING_8X8,
|
2012-10-22 20:49:00 +02:00
|
|
|
seg_mvs[PARTITIONING_8X8], txfm_cache);
|
2012-10-22 20:25:48 +02:00
|
|
|
rd_check_segment(cpi, x, &bsi, PARTITIONING_4X4,
|
2012-10-22 20:49:00 +02:00
|
|
|
seg_mvs[PARTITIONING_4X4], txfm_cache);
|
2012-07-14 00:21:29 +02:00
|
|
|
} else {
|
|
|
|
int sr;
|
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
rd_check_segment(cpi, x, &bsi, PARTITIONING_8X8,
|
2012-10-22 20:49:00 +02:00
|
|
|
seg_mvs[PARTITIONING_8X8], txfm_cache);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
if (bsi.segment_rd < best_rd) {
|
|
|
|
int tmp_col_min = x->mv_col_min;
|
|
|
|
int tmp_col_max = x->mv_col_max;
|
|
|
|
int tmp_row_min = x->mv_row_min;
|
|
|
|
int tmp_row_max = x->mv_row_max;
|
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_clamp_mv_min_max(x, best_ref_mv);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
/* Get 8x8 result */
|
|
|
|
bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int;
|
|
|
|
bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int;
|
|
|
|
bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
|
|
|
|
bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
|
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
/* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range
|
|
|
|
* according to the closeness of 2 MV. */
|
2012-07-14 00:21:29 +02:00
|
|
|
/* block 8X16 */
|
2012-10-22 20:25:48 +02:00
|
|
|
sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3,
|
|
|
|
(abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3);
|
2012-10-31 22:40:53 +01:00
|
|
|
cal_step_param(sr, &bsi.sv_istep[0]);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
|
|
|
|
(abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
|
2012-10-31 22:40:53 +01:00
|
|
|
cal_step_param(sr, &bsi.sv_istep[1]);
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
rd_check_segment(cpi, x, &bsi, PARTITIONING_8X16,
|
2012-10-22 20:49:00 +02:00
|
|
|
seg_mvs[PARTITIONING_8X16], txfm_cache);
|
2012-02-16 18:29:54 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/* block 16X8 */
|
2012-10-22 20:25:48 +02:00
|
|
|
sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3,
|
|
|
|
(abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3);
|
2012-10-31 22:40:53 +01:00
|
|
|
cal_step_param(sr, &bsi.sv_istep[0]);
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3,
|
|
|
|
(abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3);
|
2012-10-31 22:40:53 +01:00
|
|
|
cal_step_param(sr, &bsi.sv_istep[1]);
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
rd_check_segment(cpi, x, &bsi, PARTITIONING_16X8,
|
2012-10-22 20:49:00 +02:00
|
|
|
seg_mvs[PARTITIONING_16X8], txfm_cache);
|
2010-12-23 17:23:03 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/* If 8x8 is better than 16x8/8x16, then do 4x4 search */
|
|
|
|
/* Not skip 4x4 if speed=0 (good quality) */
|
2012-10-22 20:49:00 +02:00
|
|
|
if (cpi->sf.no_skip_block4x4_search ||
|
|
|
|
bsi.segment_num == PARTITIONING_8X8) {
|
2012-10-22 20:25:48 +02:00
|
|
|
/* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
|
2012-07-14 00:21:29 +02:00
|
|
|
bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
|
2012-10-22 20:25:48 +02:00
|
|
|
rd_check_segment(cpi, x, &bsi, PARTITIONING_4X4,
|
2012-10-22 20:49:00 +02:00
|
|
|
seg_mvs[PARTITIONING_4X4], txfm_cache);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-12-28 19:23:07 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/* restore UMV window */
|
|
|
|
x->mv_col_min = tmp_col_min;
|
|
|
|
x->mv_col_max = tmp_col_max;
|
|
|
|
x->mv_row_min = tmp_row_min;
|
|
|
|
x->mv_row_max = tmp_row_max;
|
2010-12-16 23:01:27 +01:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/* set it to the best */
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
BLOCKD *bd = &x->e_mbd.block[i];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-02-09 04:46:36 +01:00
|
|
|
bd->bmi.as_mv[0].as_int = bsi.mvs[i].as_int;
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2013-02-09 04:46:36 +01:00
|
|
|
bd->bmi.as_mv[1].as_int = bsi.second_mvs[i].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
bd->eob = bsi.eobs[i];
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
*returntotrate = bsi.r;
|
|
|
|
*returndistortion = bsi.d;
|
|
|
|
*returnyrate = bsi.segment_yrate;
|
2012-10-22 20:49:00 +02:00
|
|
|
*skippable = bsi.txfm_size == TX_4X4 ?
|
2013-02-15 19:15:42 +01:00
|
|
|
vp9_mby_is_skippable_4x4(&x->e_mbd) :
|
|
|
|
vp9_mby_is_skippable_8x8(&x->e_mbd);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/* save partitions */
|
2012-10-22 20:49:00 +02:00
|
|
|
mbmi->txfm_size = bsi.txfm_size;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->partitioning = bsi.segment_num;
|
2012-10-31 01:12:12 +01:00
|
|
|
x->partition_info->count = vp9_mbsplit_count[bsi.segment_num];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
for (i = 0; i < x->partition_info->count; i++) {
|
|
|
|
int j;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-31 01:12:12 +01:00
|
|
|
j = vp9_mbsplit_offset[bsi.segment_num][i];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
x->partition_info->bmi[i].mode = bsi.modes[j];
|
|
|
|
x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2012-07-14 00:21:29 +02:00
|
|
|
x->partition_info->bmi[i].second_mv.as_mv = bsi.second_mvs[j].as_mv;
|
|
|
|
}
|
|
|
|
/*
|
2012-08-10 15:12:43 +02:00
|
|
|
* used to set mbmi->mv.as_int
|
2012-07-14 00:21:29 +02:00
|
|
|
*/
|
|
|
|
x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2012-07-14 00:21:29 +02:00
|
|
|
x->partition_info->bmi[15].second_mv.as_int = bsi.second_mvs[15].as_int;
|
|
|
|
|
2012-11-05 23:22:59 +01:00
|
|
|
return (int)(bsi.segment_rd);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
static void mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
|
2012-12-19 00:31:19 +01:00
|
|
|
uint8_t *ref_y_buffer, int ref_y_stride,
|
2012-12-05 17:23:38 +01:00
|
|
|
int ref_frame, enum BlockSize block_size ) {
|
2012-11-26 12:29:08 +01:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
|
|
|
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
|
|
|
|
int_mv this_mv;
|
|
|
|
int i;
|
|
|
|
int zero_seen = FALSE;
|
2012-11-29 18:02:47 +01:00
|
|
|
int best_index = 0;
|
2012-11-26 12:29:08 +01:00
|
|
|
int best_sad = INT_MAX;
|
|
|
|
int this_sad = INT_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
BLOCK *b = &x->block[0];
|
2012-12-19 00:31:19 +01:00
|
|
|
uint8_t *src_y_ptr = *(b->base_src);
|
|
|
|
uint8_t *ref_y_ptr;
|
2012-11-29 18:02:47 +01:00
|
|
|
int row_offset, col_offset;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
// Get the sad for each candidate reference mv
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
this_mv.as_int = mbmi->ref_mvs[ref_frame][i].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
// The list is at an end if we see 0 for a second time.
|
|
|
|
if (!this_mv.as_int && zero_seen)
|
|
|
|
break;
|
|
|
|
zero_seen = zero_seen || !this_mv.as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
row_offset = this_mv.as_mv.row >> 3;
|
|
|
|
col_offset = this_mv.as_mv.col >> 3;
|
|
|
|
ref_y_ptr = ref_y_buffer + (ref_y_stride * row_offset) + col_offset;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
// Find sad for current vector.
|
|
|
|
this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, b->src_stride,
|
|
|
|
ref_y_ptr, ref_y_stride,
|
|
|
|
0x7fffffff);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
// Note if it is the best so far.
|
|
|
|
if (this_sad < best_sad) {
|
|
|
|
best_sad = this_sad;
|
|
|
|
best_index = i;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-05 17:23:38 +01:00
|
|
|
// Note the index of the mv that worked best in the reference list.
|
|
|
|
x->mv_best_ref_index[ref_frame] = best_index;
|
2010-12-03 17:26:21 +01:00
|
|
|
}
|
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
static void set_i8x8_block_modes(MACROBLOCK *x, int modes[4]) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int i;
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
|
|
|
for (i = 0; i < 4; i++) {
|
2012-10-31 01:12:12 +01:00
|
|
|
int ib = vp9_i8x8_block[i];
|
2013-01-14 23:37:53 +01:00
|
|
|
xd->mode_info_context->bmi[ib + 0].as_mode.first = modes[i];
|
|
|
|
xd->mode_info_context->bmi[ib + 1].as_mode.first = modes[i];
|
|
|
|
xd->mode_info_context->bmi[ib + 4].as_mode.first = modes[i];
|
|
|
|
xd->mode_info_context->bmi[ib + 5].as_mode.first = modes[i];
|
|
|
|
// printf("%d,%d,%d,%d\n",
|
|
|
|
// modes[0], modes[1], modes[2], modes[3]);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
xd->block[i].bmi = xd->mode_info_context->bmi[i];
|
|
|
|
}
|
2011-12-01 01:25:00 +01:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
extern void vp9_calc_ref_probs(int *count, vp9_prob *probs);
|
|
|
|
static void estimate_curframe_refprobs(VP9_COMP *cpi, vp9_prob mod_refprobs[3], int pred_ref) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int norm_cnt[MAX_REF_FRAMES];
|
|
|
|
const int *const rfct = cpi->count_mb_ref_frame_usage;
|
|
|
|
int intra_count = rfct[INTRA_FRAME];
|
|
|
|
int last_count = rfct[LAST_FRAME];
|
|
|
|
int gf_count = rfct[GOLDEN_FRAME];
|
|
|
|
int arf_count = rfct[ALTREF_FRAME];
|
|
|
|
|
|
|
|
// Work out modified reference frame probabilities to use where prediction
|
|
|
|
// of the reference frame fails
|
|
|
|
if (pred_ref == INTRA_FRAME) {
|
|
|
|
norm_cnt[0] = 0;
|
|
|
|
norm_cnt[1] = last_count;
|
|
|
|
norm_cnt[2] = gf_count;
|
|
|
|
norm_cnt[3] = arf_count;
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_calc_ref_probs(norm_cnt, mod_refprobs);
|
2012-07-14 00:21:29 +02:00
|
|
|
mod_refprobs[0] = 0; // This branch implicit
|
|
|
|
} else if (pred_ref == LAST_FRAME) {
|
|
|
|
norm_cnt[0] = intra_count;
|
|
|
|
norm_cnt[1] = 0;
|
|
|
|
norm_cnt[2] = gf_count;
|
|
|
|
norm_cnt[3] = arf_count;
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_calc_ref_probs(norm_cnt, mod_refprobs);
|
2012-07-14 00:21:29 +02:00
|
|
|
mod_refprobs[1] = 0; // This branch implicit
|
|
|
|
} else if (pred_ref == GOLDEN_FRAME) {
|
|
|
|
norm_cnt[0] = intra_count;
|
|
|
|
norm_cnt[1] = last_count;
|
|
|
|
norm_cnt[2] = 0;
|
|
|
|
norm_cnt[3] = arf_count;
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_calc_ref_probs(norm_cnt, mod_refprobs);
|
2012-07-14 00:21:29 +02:00
|
|
|
mod_refprobs[2] = 0; // This branch implicit
|
|
|
|
} else {
|
|
|
|
norm_cnt[0] = intra_count;
|
|
|
|
norm_cnt[1] = last_count;
|
|
|
|
norm_cnt[2] = gf_count;
|
|
|
|
norm_cnt[3] = 0;
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_calc_ref_probs(norm_cnt, mod_refprobs);
|
2012-07-14 00:21:29 +02:00
|
|
|
mod_refprobs[2] = 0; // This branch implicit
|
|
|
|
}
|
2012-05-15 02:39:42 +02:00
|
|
|
}
|
|
|
|
|
2013-02-06 21:45:28 +01:00
|
|
|
static INLINE unsigned weighted_cost(vp9_prob *tab0, vp9_prob *tab1,
|
|
|
|
int idx, int val, int weight) {
|
2012-10-31 22:40:53 +01:00
|
|
|
unsigned cost0 = tab0[idx] ? vp9_cost_bit(tab0[idx], val) : 0;
|
|
|
|
unsigned cost1 = tab1[idx] ? vp9_cost_bit(tab1[idx], val) : 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
// weight is 16-bit fixed point, so this basically calculates:
|
|
|
|
// 0.5 + weight * cost1 + (1.0 - weight) * cost0
|
|
|
|
return (0x8000 + weight * cost1 + (0x10000 - weight) * cost0) >> 16;
|
2012-05-15 02:39:42 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
static void estimate_ref_frame_costs(VP9_COMP *cpi, int segment_id, unsigned int *ref_costs) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
2012-07-14 00:21:29 +02:00
|
|
|
MACROBLOCKD *xd = &cpi->mb.e_mbd;
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob *mod_refprobs;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
unsigned int cost;
|
|
|
|
int pred_ref;
|
|
|
|
int pred_flag;
|
|
|
|
int pred_ctx;
|
|
|
|
int i;
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob pred_prob, new_pred_prob;
|
2012-07-14 00:21:29 +02:00
|
|
|
int seg_ref_active;
|
|
|
|
int seg_ref_count = 0;
|
2012-10-30 06:15:27 +01:00
|
|
|
seg_ref_active = vp9_segfeature_active(xd,
|
|
|
|
segment_id,
|
|
|
|
SEG_LVL_REF_FRAME);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
if (seg_ref_active) {
|
2012-10-30 06:15:27 +01:00
|
|
|
seg_ref_count = vp9_check_segref(xd, segment_id, INTRA_FRAME) +
|
|
|
|
vp9_check_segref(xd, segment_id, LAST_FRAME) +
|
|
|
|
vp9_check_segref(xd, segment_id, GOLDEN_FRAME) +
|
|
|
|
vp9_check_segref(xd, segment_id, ALTREF_FRAME);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get the predicted reference for this mb
|
2012-10-29 14:44:18 +01:00
|
|
|
pred_ref = vp9_get_pred_ref(cm, xd);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Get the context probability for the prediction flag (based on last frame)
|
2012-10-29 14:44:18 +01:00
|
|
|
pred_prob = vp9_get_pred_prob(cm, xd, PRED_REF);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Predict probability for current frame based on stats so far
|
2012-10-29 14:44:18 +01:00
|
|
|
pred_ctx = vp9_get_pred_context(cm, xd, PRED_REF);
|
Consistently use get_prob(), clip_prob() and newly added clip_pixel().
Add a function clip_pixel() to clip a pixel value to the [0,255] range
of allowed values, and use this where-ever appropriate (e.g. prediction,
reconstruction). Likewise, consistently use the recently added function
clip_prob(), which calculates a binary probability in the [1,255] range.
If possible, try to use get_prob() or its sister get_binary_prob() to
calculate binary probabilities, for consistency.
Since in some places, this means that binary probability calculations
are changed (we use {255,256}*count0/(total) in a range of places,
and all of these are now changed to use 256*count0+(total>>1)/total),
this changes the encoding result, so this patch warrants some extensive
testing.
Change-Id: Ibeeff8d886496839b8e0c0ace9ccc552351f7628
2012-12-10 21:09:07 +01:00
|
|
|
new_pred_prob = get_binary_prob(cpi->ref_pred_count[pred_ctx][0],
|
|
|
|
cpi->ref_pred_count[pred_ctx][1]);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Get the set of probabilities to use if prediction fails
|
|
|
|
mod_refprobs = cm->mod_refprobs[pred_ref];
|
|
|
|
|
|
|
|
// For each possible selected reference frame work out a cost.
|
|
|
|
for (i = 0; i < MAX_REF_FRAMES; i++) {
|
|
|
|
if (seg_ref_active && seg_ref_count == 1) {
|
|
|
|
cost = 0;
|
|
|
|
} else {
|
|
|
|
pred_flag = (i == pred_ref);
|
|
|
|
|
|
|
|
// Get the prediction for the current mb
|
|
|
|
cost = weighted_cost(&pred_prob, &new_pred_prob, 0,
|
|
|
|
pred_flag, cpi->seg0_progress);
|
|
|
|
if (cost > 1024) cost = 768; // i.e. account for 4 bits max.
|
|
|
|
|
|
|
|
// for incorrectly predicted cases
|
|
|
|
if (! pred_flag) {
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob curframe_mod_refprobs[3];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
if (cpi->seg0_progress) {
|
|
|
|
estimate_curframe_refprobs(cpi, curframe_mod_refprobs, pred_ref);
|
|
|
|
} else {
|
|
|
|
vpx_memset(curframe_mod_refprobs, 0, sizeof(curframe_mod_refprobs));
|
|
|
|
}
|
|
|
|
|
|
|
|
cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 0,
|
|
|
|
(i != INTRA_FRAME), cpi->seg0_progress);
|
|
|
|
if (i != INTRA_FRAME) {
|
|
|
|
cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 1,
|
|
|
|
(i != LAST_FRAME), cpi->seg0_progress);
|
|
|
|
if (i != LAST_FRAME) {
|
|
|
|
cost += weighted_cost(mod_refprobs, curframe_mod_refprobs, 2,
|
|
|
|
(i != GOLDEN_FRAME), cpi->seg0_progress);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ref_costs[i] = cost;
|
|
|
|
}
|
2012-01-28 13:20:14 +01:00
|
|
|
}
|
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
|
2012-07-14 00:21:29 +02:00
|
|
|
int mode_index,
|
|
|
|
PARTITION_INFO *partition,
|
|
|
|
int_mv *ref_mv,
|
2012-08-20 23:43:34 +02:00
|
|
|
int_mv *second_ref_mv,
|
2012-11-08 20:03:00 +01:00
|
|
|
int64_t comp_pred_diff[NB_PREDICTION_TYPES],
|
2012-10-09 18:18:21 +02:00
|
|
|
int64_t txfm_size_diff[NB_TXFM_MODES]) {
|
2012-11-08 20:03:00 +01:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Take a snapshot of the coding context so it can be
|
|
|
|
// restored if we decide to encode this way
|
2013-01-07 20:02:14 +01:00
|
|
|
ctx->skip = x->skip;
|
2012-08-20 23:43:34 +02:00
|
|
|
ctx->best_mode_index = mode_index;
|
|
|
|
vpx_memcpy(&ctx->mic, xd->mode_info_context,
|
2012-07-14 00:21:29 +02:00
|
|
|
sizeof(MODE_INFO));
|
2012-08-20 23:43:34 +02:00
|
|
|
if (partition)
|
|
|
|
vpx_memcpy(&ctx->partition_info, partition,
|
|
|
|
sizeof(PARTITION_INFO));
|
|
|
|
ctx->best_ref_mv.as_int = ref_mv->as_int;
|
|
|
|
ctx->second_best_ref_mv.as_int = second_ref_mv->as_int;
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
ctx->single_pred_diff = comp_pred_diff[SINGLE_PREDICTION_ONLY];
|
|
|
|
ctx->comp_pred_diff = comp_pred_diff[COMP_PREDICTION_ONLY];
|
|
|
|
ctx->hybrid_pred_diff = comp_pred_diff[HYBRID_PREDICTION];
|
2012-10-09 18:18:21 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
memcpy(ctx->txfm_rd_diff, txfm_size_diff, sizeof(ctx->txfm_rd_diff));
|
2012-04-13 19:26:49 +02:00
|
|
|
}
|
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
static void inter_mode_cost(VP9_COMP *cpi, MACROBLOCK *x,
|
2012-08-07 23:39:25 +02:00
|
|
|
int *rate2, int *distortion2, int *rate_y,
|
2012-10-09 18:18:21 +02:00
|
|
|
int *distortion, int* rate_uv, int *distortion_uv,
|
|
|
|
int *skippable, int64_t txfm_cache[NB_TXFM_MODES]) {
|
|
|
|
int y_skippable, uv_skippable;
|
|
|
|
|
2012-08-07 23:39:25 +02:00
|
|
|
// Y cost and distortion
|
2012-10-09 18:18:21 +02:00
|
|
|
macro_block_yrd(cpi, x, rate_y, distortion, &y_skippable, txfm_cache);
|
2012-08-07 23:39:25 +02:00
|
|
|
|
|
|
|
*rate2 += *rate_y;
|
|
|
|
*distortion2 += *distortion;
|
|
|
|
|
|
|
|
// UV cost and distortion
|
2012-11-08 20:03:00 +01:00
|
|
|
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
|
|
|
|
x->e_mbd.predictor, x->src.uv_stride);
|
2012-10-09 18:18:21 +02:00
|
|
|
if (x->e_mbd.mode_info_context->mbmi.txfm_size != TX_4X4)
|
2012-08-07 23:39:25 +02:00
|
|
|
rd_inter16x16_uv_8x8(cpi, x, rate_uv, distortion_uv,
|
2012-11-08 20:03:00 +01:00
|
|
|
cpi->common.full_pixel, &uv_skippable, 1);
|
2012-08-07 23:39:25 +02:00
|
|
|
else
|
2012-11-08 20:03:00 +01:00
|
|
|
rd_inter16x16_uv_4x4(cpi, x, rate_uv, distortion_uv,
|
|
|
|
cpi->common.full_pixel, &uv_skippable, 1);
|
|
|
|
|
2012-08-07 23:39:25 +02:00
|
|
|
*rate2 += *rate_uv;
|
|
|
|
*distortion2 += *distortion_uv;
|
2012-10-09 18:18:21 +02:00
|
|
|
*skippable = y_skippable && uv_skippable;
|
2012-08-07 23:39:25 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
|
2012-11-26 12:29:08 +01:00
|
|
|
int idx, MV_REFERENCE_FRAME frame_type,
|
|
|
|
int block_size,
|
2013-02-07 19:09:05 +01:00
|
|
|
int mb_row, int mb_col,
|
2012-11-26 12:29:08 +01:00
|
|
|
int_mv frame_nearest_mv[MAX_REF_FRAMES],
|
|
|
|
int_mv frame_near_mv[MAX_REF_FRAMES],
|
2012-10-30 02:04:33 +01:00
|
|
|
int frame_mdcounts[4][4],
|
2013-02-07 19:09:05 +01:00
|
|
|
YV12_BUFFER_CONFIG yv12_mb[4]) {
|
2012-08-08 03:55:28 +02:00
|
|
|
YV12_BUFFER_CONFIG *yv12 = &cpi->common.yv12_fb[idx];
|
2012-11-08 20:03:00 +01:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
|
2012-09-07 13:46:41 +02:00
|
|
|
|
2013-02-07 19:09:05 +01:00
|
|
|
setup_pred_block(&yv12_mb[frame_type], yv12, mb_row, mb_col);
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
// Gets an initial list of candidate vectors from neighbours and orders them
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
vp9_find_mv_refs(&cpi->common, xd, xd->mode_info_context,
|
2013-01-15 15:43:35 +01:00
|
|
|
cpi->common.error_resilient_mode ?
|
|
|
|
0 : xd->prev_mode_info_context,
|
2012-10-29 14:44:18 +01:00
|
|
|
frame_type,
|
|
|
|
mbmi->ref_mvs[frame_type],
|
|
|
|
cpi->common.ref_frame_sign_bias);
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2012-11-26 12:29:08 +01:00
|
|
|
// Candidate refinement carried out at encoder and decoder
|
2013-01-15 15:43:35 +01:00
|
|
|
vp9_find_best_ref_mvs(xd,
|
2013-01-25 20:30:28 +01:00
|
|
|
cpi->common.error_resilient_mode ||
|
|
|
|
cpi->common.frame_parallel_decoding_mode ?
|
2013-02-07 19:09:05 +01:00
|
|
|
0 : yv12_mb[frame_type].y_buffer,
|
2012-08-06 19:51:20 +02:00
|
|
|
yv12->y_stride,
|
2012-09-07 13:46:41 +02:00
|
|
|
mbmi->ref_mvs[frame_type],
|
2012-08-06 19:51:20 +02:00
|
|
|
&frame_nearest_mv[frame_type],
|
|
|
|
&frame_near_mv[frame_type]);
|
2012-11-26 12:29:08 +01:00
|
|
|
|
|
|
|
// Further refinement that is encode side only to test the top few candidates
|
|
|
|
// in full and choose the best as the centre point for subsequent searches.
|
2013-02-07 19:09:05 +01:00
|
|
|
mv_pred(cpi, x, yv12_mb[frame_type].y_buffer, yv12->y_stride,
|
2012-12-05 17:23:38 +01:00
|
|
|
frame_type, block_size);
|
2012-11-26 12:29:08 +01:00
|
|
|
|
2012-08-08 03:55:28 +02:00
|
|
|
}
|
|
|
|
|
2013-02-12 02:08:52 +01:00
|
|
|
static void model_rd_from_var_lapndz(int var, int n, int qstep,
|
|
|
|
int *rate, int *dist) {
|
|
|
|
// This function models the rate and distortion for a Laplacian
|
|
|
|
// source with given variance when quantized with a uniform quantizer
|
|
|
|
// with given stepsize. The closed form expressions are in:
|
|
|
|
// Hang and Chen, "Source Model for transform video coder and its
|
|
|
|
// application - Part I: Fundamental Theory", IEEE Trans. Circ.
|
|
|
|
// Sys. for Video Tech., April 1997.
|
|
|
|
// The function is implemented as piecewise approximation to the
|
|
|
|
// exact computation.
|
|
|
|
// TODO(debargha): Implement the functions by interpolating from a
|
|
|
|
// look-up table
|
|
|
|
vp9_clear_system_state();
|
|
|
|
{
|
|
|
|
double D, R;
|
|
|
|
double s2 = (double) var / n;
|
|
|
|
double s = sqrt(s2);
|
|
|
|
double x = qstep / s;
|
|
|
|
if (x > 1.0) {
|
|
|
|
double y = exp(-x / 2);
|
|
|
|
double y2 = y * y;
|
|
|
|
D = 2.069981728764738 * y2 - 2.764286806516079 * y + 1.003956960819275;
|
|
|
|
R = 0.924056758535089 * y2 + 2.738636469814024 * y - 0.005169662030017;
|
|
|
|
} else {
|
|
|
|
double x2 = x * x;
|
|
|
|
D = 0.075303187668830 * x2 + 0.004296954321112 * x - 0.000413209252807;
|
|
|
|
if (x > 0.125)
|
|
|
|
R = 1 / (-0.03459733614226 * x2 + 0.36561675733603 * x +
|
|
|
|
0.1626989668625);
|
|
|
|
else
|
|
|
|
R = -1.442252874826093 * log(x) + 1.944647760719664;
|
|
|
|
}
|
|
|
|
if (R < 0) {
|
|
|
|
*rate = 0;
|
|
|
|
*dist = var;
|
|
|
|
} else {
|
|
|
|
*rate = (n * R * 256 + 0.5);
|
|
|
|
*dist = (n * D * s2 + 0.5);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
vp9_clear_system_state();
|
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
2012-10-30 01:58:18 +01:00
|
|
|
enum BlockSize block_size,
|
|
|
|
int *saddone, int near_sadidx[],
|
|
|
|
int mdcounts[4], int64_t txfm_cache[],
|
|
|
|
int *rate2, int *distortion, int *skippable,
|
|
|
|
int *compmode_cost,
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
int *compmode_interintra_cost,
|
|
|
|
#endif
|
2012-10-30 01:58:18 +01:00
|
|
|
int *rate_y, int *distortion_y,
|
|
|
|
int *rate_uv, int *distortion_uv,
|
|
|
|
int *mode_excluded, int *disable_skip,
|
2013-02-07 19:09:05 +01:00
|
|
|
int mode_index,
|
2013-02-12 02:08:52 +01:00
|
|
|
INTERPOLATIONFILTERTYPE *best_filter,
|
2012-12-05 17:23:38 +01:00
|
|
|
int_mv frame_mv[MB_MODE_COUNT]
|
|
|
|
[MAX_REF_FRAMES]) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
2012-10-30 01:58:18 +01:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
|
|
|
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
|
|
|
|
BLOCK *b = &x->block[0];
|
|
|
|
BLOCKD *d = &xd->block[0];
|
2012-11-07 15:50:25 +01:00
|
|
|
const int is_comp_pred = (mbmi->second_ref_frame > 0);
|
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
const int is_comp_interintra_pred = (mbmi->second_ref_frame == INTRA_FRAME);
|
|
|
|
#endif
|
2012-10-30 01:58:18 +01:00
|
|
|
const int num_refs = is_comp_pred ? 2 : 1;
|
|
|
|
const int this_mode = mbmi->mode;
|
|
|
|
int i;
|
2012-11-07 15:50:25 +01:00
|
|
|
int refs[2] = { mbmi->ref_frame,
|
|
|
|
(mbmi->second_ref_frame < 0 ? 0 : mbmi->second_ref_frame) };
|
2012-10-30 01:58:18 +01:00
|
|
|
int_mv cur_mv[2];
|
2012-12-05 17:23:38 +01:00
|
|
|
int_mv ref_mv[2];
|
2012-10-30 01:58:18 +01:00
|
|
|
int64_t this_rd = 0;
|
2013-02-12 02:08:52 +01:00
|
|
|
unsigned char tmp_ybuf[64 * 64];
|
|
|
|
unsigned char tmp_ubuf[32 * 32];
|
|
|
|
unsigned char tmp_vbuf[32 * 32];
|
|
|
|
int pred_exists = 0;
|
|
|
|
int interpolating_intpel_seen = 0;
|
|
|
|
int intpel_mv;
|
|
|
|
int64_t rd, best_rd = INT64_MAX;
|
2012-10-30 01:58:18 +01:00
|
|
|
|
|
|
|
switch (this_mode) {
|
|
|
|
case NEWMV:
|
2012-12-05 17:23:38 +01:00
|
|
|
ref_mv[0] = mbmi->ref_mvs[refs[0]][0];
|
|
|
|
ref_mv[1] = mbmi->ref_mvs[refs[1]][0];
|
2012-12-10 13:38:48 +01:00
|
|
|
|
2012-10-30 01:58:18 +01:00
|
|
|
if (is_comp_pred) {
|
|
|
|
if (frame_mv[NEWMV][refs[0]].as_int == INVALID_MV ||
|
|
|
|
frame_mv[NEWMV][refs[1]].as_int == INVALID_MV)
|
2013-01-14 20:49:30 +01:00
|
|
|
return INT64_MAX;
|
2012-10-30 20:58:42 +01:00
|
|
|
*rate2 += vp9_mv_bit_cost(&frame_mv[NEWMV][refs[0]],
|
2012-12-05 17:23:38 +01:00
|
|
|
&ref_mv[0],
|
2012-11-09 00:44:39 +01:00
|
|
|
x->nmvjointcost, x->mvcost, 96,
|
2012-10-30 01:58:18 +01:00
|
|
|
x->e_mbd.allow_high_precision_mv);
|
2012-10-30 20:58:42 +01:00
|
|
|
*rate2 += vp9_mv_bit_cost(&frame_mv[NEWMV][refs[1]],
|
2012-12-05 17:23:38 +01:00
|
|
|
&ref_mv[1],
|
2012-11-09 00:44:39 +01:00
|
|
|
x->nmvjointcost, x->mvcost, 96,
|
2012-10-30 01:58:18 +01:00
|
|
|
x->e_mbd.allow_high_precision_mv);
|
|
|
|
} else {
|
|
|
|
int bestsme = INT_MAX;
|
|
|
|
int further_steps, step_param = cpi->sf.first_step;
|
|
|
|
int sadpb = x->sadperbit16;
|
|
|
|
int_mv mvp_full, tmp_mv;
|
|
|
|
int sr = 0;
|
|
|
|
|
|
|
|
int tmp_col_min = x->mv_col_min;
|
|
|
|
int tmp_col_max = x->mv_col_max;
|
|
|
|
int tmp_row_min = x->mv_row_min;
|
|
|
|
int tmp_row_max = x->mv_row_max;
|
|
|
|
|
2012-12-05 17:23:38 +01:00
|
|
|
vp9_clamp_mv_min_max(x, &ref_mv[0]);
|
2012-10-30 01:58:18 +01:00
|
|
|
|
2012-12-10 13:38:48 +01:00
|
|
|
// mvp_full.as_int = ref_mv[0].as_int;
|
2012-12-05 17:23:38 +01:00
|
|
|
mvp_full.as_int =
|
2012-12-10 13:38:48 +01:00
|
|
|
mbmi->ref_mvs[refs[0]][x->mv_best_ref_index[refs[0]]].as_int;
|
|
|
|
|
2012-12-05 17:23:38 +01:00
|
|
|
mvp_full.as_mv.col >>= 3;
|
|
|
|
mvp_full.as_mv.row >>= 3;
|
2012-10-30 01:58:18 +01:00
|
|
|
|
|
|
|
// adjust search range according to sr from mv prediction
|
|
|
|
step_param = MAX(step_param, sr);
|
|
|
|
|
|
|
|
// Further step/diamond searches as necessary
|
|
|
|
further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
|
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
bestsme = vp9_full_pixel_diamond(cpi, x, b, d, &mvp_full, step_param,
|
2012-10-30 01:58:18 +01:00
|
|
|
sadpb, further_steps, 1,
|
|
|
|
&cpi->fn_ptr[block_size],
|
2012-12-05 17:23:38 +01:00
|
|
|
&ref_mv[0], &tmp_mv);
|
2012-10-30 01:58:18 +01:00
|
|
|
|
|
|
|
x->mv_col_min = tmp_col_min;
|
|
|
|
x->mv_col_max = tmp_col_max;
|
|
|
|
x->mv_row_min = tmp_row_min;
|
|
|
|
x->mv_row_max = tmp_row_max;
|
|
|
|
|
|
|
|
if (bestsme < INT_MAX) {
|
|
|
|
int dis; /* TODO: use dis in distortion calculation later. */
|
|
|
|
unsigned int sse;
|
|
|
|
cpi->find_fractional_mv_step(x, b, d, &tmp_mv,
|
2012-12-05 17:23:38 +01:00
|
|
|
&ref_mv[0],
|
2012-10-30 01:58:18 +01:00
|
|
|
x->errorperbit,
|
|
|
|
&cpi->fn_ptr[block_size],
|
2012-11-09 00:44:39 +01:00
|
|
|
x->nmvjointcost, x->mvcost,
|
|
|
|
&dis, &sse);
|
2012-10-30 01:58:18 +01:00
|
|
|
}
|
2013-02-09 04:46:36 +01:00
|
|
|
d->bmi.as_mv[0].as_int = tmp_mv.as_int;
|
|
|
|
frame_mv[NEWMV][refs[0]].as_int = d->bmi.as_mv[0].as_int;
|
2012-10-30 01:58:18 +01:00
|
|
|
|
|
|
|
// Add the new motion vector cost to our rolling cost variable
|
2012-12-05 17:23:38 +01:00
|
|
|
*rate2 += vp9_mv_bit_cost(&tmp_mv, &ref_mv[0],
|
2012-11-09 00:44:39 +01:00
|
|
|
x->nmvjointcost, x->mvcost,
|
|
|
|
96, xd->allow_high_precision_mv);
|
2012-10-30 01:58:18 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NEARMV:
|
2013-02-05 11:28:26 +01:00
|
|
|
case NEARESTMV:
|
2012-10-30 01:58:18 +01:00
|
|
|
case ZEROMV:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
for (i = 0; i < num_refs; ++i) {
|
|
|
|
cur_mv[i] = frame_mv[this_mode][refs[i]];
|
|
|
|
// Clip "next_nearest" so that it does not extend to far out of image
|
2012-10-31 22:40:53 +01:00
|
|
|
clamp_mv2(&cur_mv[i], xd);
|
2012-10-30 01:58:18 +01:00
|
|
|
if (mv_check_bounds(x, &cur_mv[i]))
|
2013-01-14 20:49:30 +01:00
|
|
|
return INT64_MAX;
|
2012-10-30 01:58:18 +01:00
|
|
|
mbmi->mv[i].as_int = cur_mv[i].as_int;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* We don't include the cost of the second reference here, because there
|
|
|
|
* are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other
|
|
|
|
* words if you present them in that order, the second one is always known
|
|
|
|
* if the first is known */
|
2012-10-31 22:40:53 +01:00
|
|
|
*compmode_cost = vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP),
|
2012-10-30 01:58:18 +01:00
|
|
|
is_comp_pred);
|
2012-11-12 16:09:25 +01:00
|
|
|
*rate2 += vp9_cost_mv_ref(cpi, this_mode,
|
|
|
|
mbmi->mb_mode_context[mbmi->ref_frame]);
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (!is_comp_pred) {
|
|
|
|
*compmode_interintra_cost = vp9_cost_bit(cm->fc.interintra_prob,
|
|
|
|
is_comp_interintra_pred);
|
|
|
|
if (is_comp_interintra_pred) {
|
|
|
|
*compmode_interintra_cost +=
|
|
|
|
x->mbmode_cost[xd->frame_type][mbmi->interintra_mode];
|
|
|
|
#if SEPARATE_INTERINTRA_UV
|
|
|
|
*compmode_interintra_cost +=
|
|
|
|
x->intra_uv_mode_cost[xd->frame_type][mbmi->interintra_uv_mode];
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2012-10-30 01:58:18 +01:00
|
|
|
|
2013-02-12 02:08:52 +01:00
|
|
|
pred_exists = 0;
|
|
|
|
interpolating_intpel_seen = 0;
|
|
|
|
// Are all MVs integer pel for Y and UV
|
|
|
|
intpel_mv = (mbmi->mv[0].as_mv.row & 15) == 0 &&
|
|
|
|
(mbmi->mv[0].as_mv.col & 15) == 0;
|
|
|
|
if (is_comp_pred)
|
|
|
|
intpel_mv &= (mbmi->mv[1].as_mv.row & 15) == 0 &&
|
|
|
|
(mbmi->mv[1].as_mv.col & 15) == 0;
|
|
|
|
// Search for best switchable filter by checking the variance of
|
|
|
|
// pred error irrespective of whether the filter will be used
|
2013-01-06 03:20:25 +01:00
|
|
|
if (block_size == BLOCK_64X64) {
|
2013-02-12 02:08:52 +01:00
|
|
|
int switchable_filter_index, newbest;
|
|
|
|
int tmp_rate_y_i = 0, tmp_rate_u_i = 0, tmp_rate_v_i = 0;
|
|
|
|
int tmp_dist_y_i = 0, tmp_dist_u_i = 0, tmp_dist_v_i = 0;
|
|
|
|
for (switchable_filter_index = 0;
|
|
|
|
switchable_filter_index < VP9_SWITCHABLE_FILTERS;
|
|
|
|
++switchable_filter_index) {
|
|
|
|
int rs = 0;
|
|
|
|
mbmi->interp_filter = vp9_switchable_interp[switchable_filter_index];
|
|
|
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
|
|
|
|
|
|
|
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
|
|
|
|
const int c = vp9_get_pred_context(cm, xd, PRED_SWITCHABLE_INTERP);
|
|
|
|
const int m = vp9_switchable_interp_map[mbmi->interp_filter];
|
|
|
|
rs = SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m];
|
|
|
|
}
|
|
|
|
if (interpolating_intpel_seen && intpel_mv &&
|
|
|
|
vp9_is_interpolating_filter[mbmi->interp_filter]) {
|
|
|
|
rd = RDCOST(x->rdmult, x->rddiv,
|
|
|
|
rs + tmp_rate_y_i + tmp_rate_u_i + tmp_rate_v_i,
|
|
|
|
tmp_dist_y_i + tmp_dist_u_i + tmp_dist_v_i);
|
|
|
|
} else {
|
|
|
|
unsigned int sse, var;
|
|
|
|
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
|
|
|
|
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
|
|
|
|
vp9_build_inter64x64_predictors_sb(xd,
|
|
|
|
xd->dst.y_buffer,
|
|
|
|
xd->dst.u_buffer,
|
|
|
|
xd->dst.v_buffer,
|
|
|
|
xd->dst.y_stride,
|
|
|
|
xd->dst.uv_stride);
|
|
|
|
var = vp9_variance64x64(*(b->base_src), b->src_stride,
|
|
|
|
xd->dst.y_buffer, xd->dst.y_stride, &sse);
|
|
|
|
// Note our transform coeffs are 8 times an orthogonal transform.
|
|
|
|
// Hence quantizer step is also 8 times. To get effective quantizer
|
|
|
|
// we need to divide by 8 before sending to modeling function.
|
|
|
|
model_rd_from_var_lapndz(var, 64 * 64, xd->block[0].dequant[1] >> 3,
|
|
|
|
&tmp_rate_y, &tmp_dist_y);
|
|
|
|
var = vp9_variance32x32(x->src.u_buffer, x->src.uv_stride,
|
|
|
|
xd->dst.u_buffer, xd->dst.uv_stride, &sse);
|
|
|
|
model_rd_from_var_lapndz(var, 32 * 32, xd->block[16].dequant[1] >> 3,
|
|
|
|
&tmp_rate_u, &tmp_dist_u);
|
|
|
|
var = vp9_variance32x32(x->src.v_buffer, x->src.uv_stride,
|
|
|
|
xd->dst.v_buffer, xd->dst.uv_stride, &sse);
|
|
|
|
model_rd_from_var_lapndz(var, 32 * 32, xd->block[20].dequant[1] >> 3,
|
|
|
|
&tmp_rate_v, &tmp_dist_v);
|
|
|
|
rd = RDCOST(x->rdmult, x->rddiv,
|
|
|
|
rs + tmp_rate_y + tmp_rate_u + tmp_rate_v,
|
|
|
|
tmp_dist_y + tmp_dist_u + tmp_dist_v);
|
|
|
|
if (!interpolating_intpel_seen && intpel_mv &&
|
|
|
|
vp9_is_interpolating_filter[mbmi->interp_filter]) {
|
|
|
|
tmp_rate_y_i = tmp_rate_y;
|
|
|
|
tmp_rate_u_i = tmp_rate_u;
|
|
|
|
tmp_rate_v_i = tmp_rate_v;
|
|
|
|
tmp_dist_y_i = tmp_dist_y;
|
|
|
|
tmp_dist_u_i = tmp_dist_u;
|
|
|
|
tmp_dist_v_i = tmp_dist_v;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
newbest = (switchable_filter_index == 0 || rd < best_rd);
|
|
|
|
if (newbest) {
|
|
|
|
best_rd = rd;
|
|
|
|
*best_filter = mbmi->interp_filter;
|
|
|
|
}
|
|
|
|
if ((cm->mcomp_filter_type == SWITCHABLE && newbest) ||
|
|
|
|
(cm->mcomp_filter_type != SWITCHABLE &&
|
|
|
|
cm->mcomp_filter_type == mbmi->interp_filter)) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < 64; ++i)
|
|
|
|
vpx_memcpy(tmp_ybuf + i * 64,
|
|
|
|
xd->dst.y_buffer + i * xd->dst.y_stride,
|
|
|
|
sizeof(unsigned char) * 64);
|
|
|
|
for (i = 0; i < 32; ++i)
|
|
|
|
vpx_memcpy(tmp_ubuf + i * 32,
|
|
|
|
xd->dst.u_buffer + i * xd->dst.uv_stride,
|
|
|
|
sizeof(unsigned char) * 32);
|
|
|
|
for (i = 0; i < 32; ++i)
|
|
|
|
vpx_memcpy(tmp_vbuf + i * 32,
|
|
|
|
xd->dst.v_buffer + i * xd->dst.uv_stride,
|
|
|
|
sizeof(unsigned char) * 32);
|
|
|
|
pred_exists = 1;
|
|
|
|
}
|
|
|
|
interpolating_intpel_seen |=
|
|
|
|
intpel_mv && vp9_is_interpolating_filter[mbmi->interp_filter];
|
|
|
|
}
|
2013-01-10 02:21:28 +01:00
|
|
|
} else if (block_size == BLOCK_32X32) {
|
2013-02-12 02:08:52 +01:00
|
|
|
int switchable_filter_index, newbest;
|
|
|
|
int tmp_rate_y_i = 0, tmp_rate_u_i = 0, tmp_rate_v_i = 0;
|
|
|
|
int tmp_dist_y_i = 0, tmp_dist_u_i = 0, tmp_dist_v_i = 0;
|
|
|
|
for (switchable_filter_index = 0;
|
|
|
|
switchable_filter_index < VP9_SWITCHABLE_FILTERS;
|
|
|
|
++switchable_filter_index) {
|
|
|
|
int rs = 0;
|
|
|
|
mbmi->interp_filter = vp9_switchable_interp[switchable_filter_index];
|
|
|
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
|
|
|
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
|
|
|
|
const int c = vp9_get_pred_context(cm, xd, PRED_SWITCHABLE_INTERP);
|
|
|
|
const int m = vp9_switchable_interp_map[mbmi->interp_filter];
|
|
|
|
rs = SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m];
|
|
|
|
}
|
|
|
|
if (interpolating_intpel_seen && intpel_mv &&
|
|
|
|
vp9_is_interpolating_filter[mbmi->interp_filter]) {
|
|
|
|
rd = RDCOST(x->rdmult, x->rddiv,
|
|
|
|
rs + tmp_rate_y_i + tmp_rate_u_i + tmp_rate_v_i,
|
|
|
|
tmp_dist_y_i + tmp_dist_u_i + tmp_dist_v_i);
|
|
|
|
} else {
|
|
|
|
unsigned int sse, var;
|
|
|
|
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
|
|
|
|
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
|
|
|
|
vp9_build_inter32x32_predictors_sb(xd,
|
|
|
|
xd->dst.y_buffer,
|
|
|
|
xd->dst.u_buffer,
|
|
|
|
xd->dst.v_buffer,
|
|
|
|
xd->dst.y_stride,
|
|
|
|
xd->dst.uv_stride);
|
|
|
|
var = vp9_variance32x32(*(b->base_src), b->src_stride,
|
|
|
|
xd->dst.y_buffer, xd->dst.y_stride, &sse);
|
|
|
|
// Note our transform coeffs are 8 times an orthogonal transform.
|
|
|
|
// Hence quantizer step is also 8 times. To get effective quantizer
|
|
|
|
// we need to divide by 8 before sending to modeling function.
|
|
|
|
model_rd_from_var_lapndz(var, 32 * 32, xd->block[0].dequant[1] >> 3,
|
|
|
|
&tmp_rate_y, &tmp_dist_y);
|
|
|
|
var = vp9_variance16x16(x->src.u_buffer, x->src.uv_stride,
|
|
|
|
xd->dst.u_buffer, xd->dst.uv_stride, &sse);
|
|
|
|
model_rd_from_var_lapndz(var, 16 * 16, xd->block[16].dequant[1] >> 3,
|
|
|
|
&tmp_rate_u, &tmp_dist_u);
|
|
|
|
var = vp9_variance16x16(x->src.v_buffer, x->src.uv_stride,
|
|
|
|
xd->dst.v_buffer, xd->dst.uv_stride, &sse);
|
|
|
|
model_rd_from_var_lapndz(var, 16 * 16, xd->block[20].dequant[1] >> 3,
|
|
|
|
&tmp_rate_v, &tmp_dist_v);
|
|
|
|
rd = RDCOST(x->rdmult, x->rddiv,
|
|
|
|
rs + tmp_rate_y + tmp_rate_u + tmp_rate_v,
|
|
|
|
tmp_dist_y + tmp_dist_u + tmp_dist_v);
|
|
|
|
if (!interpolating_intpel_seen && intpel_mv &&
|
|
|
|
vp9_is_interpolating_filter[mbmi->interp_filter]) {
|
|
|
|
tmp_rate_y_i = tmp_rate_y;
|
|
|
|
tmp_rate_u_i = tmp_rate_u;
|
|
|
|
tmp_rate_v_i = tmp_rate_v;
|
|
|
|
tmp_dist_y_i = tmp_dist_y;
|
|
|
|
tmp_dist_u_i = tmp_dist_u;
|
|
|
|
tmp_dist_v_i = tmp_dist_v;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
newbest = (switchable_filter_index == 0 || rd < best_rd);
|
|
|
|
if (newbest) {
|
|
|
|
best_rd = rd;
|
|
|
|
*best_filter = mbmi->interp_filter;
|
|
|
|
}
|
|
|
|
if ((cm->mcomp_filter_type == SWITCHABLE && newbest) ||
|
|
|
|
(cm->mcomp_filter_type != SWITCHABLE &&
|
|
|
|
cm->mcomp_filter_type == mbmi->interp_filter)) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < 32; ++i)
|
|
|
|
vpx_memcpy(tmp_ybuf + i * 64,
|
|
|
|
xd->dst.y_buffer + i * xd->dst.y_stride,
|
|
|
|
sizeof(unsigned char) * 32);
|
|
|
|
for (i = 0; i < 16; ++i)
|
|
|
|
vpx_memcpy(tmp_ubuf + i * 32,
|
|
|
|
xd->dst.u_buffer + i * xd->dst.uv_stride,
|
|
|
|
sizeof(unsigned char) * 16);
|
|
|
|
for (i = 0; i < 16; ++i)
|
|
|
|
vpx_memcpy(tmp_vbuf + i * 32,
|
|
|
|
xd->dst.v_buffer + i * xd->dst.uv_stride,
|
|
|
|
sizeof(unsigned char) * 16);
|
|
|
|
pred_exists = 1;
|
|
|
|
}
|
|
|
|
interpolating_intpel_seen |=
|
|
|
|
intpel_mv && vp9_is_interpolating_filter[mbmi->interp_filter];
|
|
|
|
}
|
2013-01-08 19:29:22 +01:00
|
|
|
} else {
|
2013-02-12 02:08:52 +01:00
|
|
|
int switchable_filter_index, newbest;
|
|
|
|
int tmp_rate_y_i = 0, tmp_rate_u_i = 0, tmp_rate_v_i = 0;
|
|
|
|
int tmp_dist_y_i = 0, tmp_dist_u_i = 0, tmp_dist_v_i = 0;
|
2013-01-06 03:20:25 +01:00
|
|
|
assert(block_size == BLOCK_16X16);
|
2013-02-12 02:08:52 +01:00
|
|
|
for (switchable_filter_index = 0;
|
|
|
|
switchable_filter_index < VP9_SWITCHABLE_FILTERS;
|
|
|
|
++switchable_filter_index) {
|
|
|
|
int rs = 0;
|
|
|
|
mbmi->interp_filter = vp9_switchable_interp[switchable_filter_index];
|
|
|
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
|
|
|
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
|
|
|
|
const int c = vp9_get_pred_context(cm, xd, PRED_SWITCHABLE_INTERP);
|
|
|
|
const int m = vp9_switchable_interp_map[mbmi->interp_filter];
|
|
|
|
rs = SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m];
|
|
|
|
}
|
|
|
|
if (interpolating_intpel_seen && intpel_mv &&
|
|
|
|
vp9_is_interpolating_filter[mbmi->interp_filter]) {
|
|
|
|
rd = RDCOST(x->rdmult, x->rddiv,
|
|
|
|
rs + tmp_rate_y_i + tmp_rate_u_i + tmp_rate_v_i,
|
|
|
|
tmp_dist_y_i + tmp_dist_u_i + tmp_dist_v_i);
|
|
|
|
} else {
|
|
|
|
unsigned int sse, var;
|
|
|
|
int tmp_rate_y, tmp_rate_u, tmp_rate_v;
|
|
|
|
int tmp_dist_y, tmp_dist_u, tmp_dist_v;
|
|
|
|
vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
|
|
|
|
if (is_comp_pred)
|
|
|
|
vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
|
2013-01-06 03:20:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
2013-02-12 02:08:52 +01:00
|
|
|
if (is_comp_interintra_pred) {
|
|
|
|
vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
vp9_build_1st_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
|
|
|
|
xd->predictor + 320, 8);
|
|
|
|
if (is_comp_pred)
|
|
|
|
vp9_build_2nd_inter16x16_predictors_mbuv(xd, xd->predictor + 256,
|
|
|
|
xd->predictor + 320, 8);
|
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (is_comp_interintra_pred) {
|
|
|
|
vp9_build_interintra_16x16_predictors_mbuv(xd, xd->predictor + 256,
|
|
|
|
xd->predictor + 320, 8);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
var = vp9_variance16x16(*(b->base_src), b->src_stride,
|
|
|
|
xd->predictor, 16, &sse);
|
|
|
|
// Note our transform coeffs are 8 times an orthogonal transform.
|
|
|
|
// Hence quantizer step is also 8 times. To get effective quantizer
|
|
|
|
// we need to divide by 8 before sending to modeling function.
|
|
|
|
model_rd_from_var_lapndz(var, 16 * 16, xd->block[0].dequant[1] >> 3,
|
|
|
|
&tmp_rate_y, &tmp_dist_y);
|
|
|
|
var = vp9_variance8x8(x->src.u_buffer, x->src.uv_stride,
|
|
|
|
&xd->predictor[256], 8, &sse);
|
|
|
|
model_rd_from_var_lapndz(var, 8 * 8, xd->block[16].dequant[1] >> 3,
|
|
|
|
&tmp_rate_u, &tmp_dist_u);
|
|
|
|
var = vp9_variance8x8(x->src.v_buffer, x->src.uv_stride,
|
|
|
|
&xd->predictor[320], 8, &sse);
|
|
|
|
model_rd_from_var_lapndz(var, 8 * 8, xd->block[20].dequant[1] >> 3,
|
|
|
|
&tmp_rate_v, &tmp_dist_v);
|
|
|
|
rd = RDCOST(x->rdmult, x->rddiv,
|
|
|
|
rs + tmp_rate_y + tmp_rate_u + tmp_rate_v,
|
|
|
|
tmp_dist_y + tmp_dist_u + tmp_dist_v);
|
|
|
|
if (!interpolating_intpel_seen && intpel_mv &&
|
|
|
|
vp9_is_interpolating_filter[mbmi->interp_filter]) {
|
|
|
|
tmp_rate_y_i = tmp_rate_y;
|
|
|
|
tmp_rate_u_i = tmp_rate_u;
|
|
|
|
tmp_rate_v_i = tmp_rate_v;
|
|
|
|
tmp_dist_y_i = tmp_dist_y;
|
|
|
|
tmp_dist_u_i = tmp_dist_u;
|
|
|
|
tmp_dist_v_i = tmp_dist_v;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
newbest = (switchable_filter_index == 0 || rd < best_rd);
|
|
|
|
if (newbest) {
|
|
|
|
best_rd = rd;
|
|
|
|
*best_filter = mbmi->interp_filter;
|
|
|
|
}
|
|
|
|
if ((cm->mcomp_filter_type == SWITCHABLE && newbest) ||
|
|
|
|
(cm->mcomp_filter_type != SWITCHABLE &&
|
|
|
|
cm->mcomp_filter_type == mbmi->interp_filter)) {
|
|
|
|
vpx_memcpy(tmp_ybuf, xd->predictor, sizeof(unsigned char) * 256);
|
|
|
|
vpx_memcpy(tmp_ubuf, xd->predictor + 256, sizeof(unsigned char) * 64);
|
|
|
|
vpx_memcpy(tmp_vbuf, xd->predictor + 320, sizeof(unsigned char) * 64);
|
|
|
|
pred_exists = 1;
|
|
|
|
}
|
|
|
|
interpolating_intpel_seen |=
|
|
|
|
intpel_mv && vp9_is_interpolating_filter[mbmi->interp_filter];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the appripriate filter
|
|
|
|
if (cm->mcomp_filter_type != SWITCHABLE)
|
|
|
|
mbmi->interp_filter = cm->mcomp_filter_type;
|
|
|
|
else
|
|
|
|
mbmi->interp_filter = *best_filter;
|
|
|
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
|
|
|
|
|
|
|
if (pred_exists) {
|
|
|
|
if (block_size == BLOCK_64X64) {
|
|
|
|
for (i = 0; i < 64; ++i)
|
|
|
|
vpx_memcpy(xd->dst.y_buffer + i * xd->dst.y_stride, tmp_ybuf + i * 64,
|
|
|
|
sizeof(unsigned char) * 64);
|
|
|
|
for (i = 0; i < 32; ++i)
|
|
|
|
vpx_memcpy(xd->dst.u_buffer + i * xd->dst.uv_stride, tmp_ubuf + i * 32,
|
|
|
|
sizeof(unsigned char) * 32);
|
|
|
|
for (i = 0; i < 32; ++i)
|
|
|
|
vpx_memcpy(xd->dst.v_buffer + i * xd->dst.uv_stride, tmp_vbuf + i * 32,
|
|
|
|
sizeof(unsigned char) * 32);
|
|
|
|
} else if (block_size == BLOCK_32X32) {
|
|
|
|
for (i = 0; i < 32; ++i)
|
|
|
|
vpx_memcpy(xd->dst.y_buffer + i * xd->dst.y_stride, tmp_ybuf + i * 64,
|
|
|
|
sizeof(unsigned char) * 32);
|
|
|
|
for (i = 0; i < 16; ++i)
|
|
|
|
vpx_memcpy(xd->dst.u_buffer + i * xd->dst.uv_stride, tmp_ubuf + i * 32,
|
|
|
|
sizeof(unsigned char) * 16);
|
|
|
|
for (i = 0; i < 16; ++i)
|
|
|
|
vpx_memcpy(xd->dst.v_buffer + i * xd->dst.uv_stride, tmp_vbuf + i * 32,
|
|
|
|
sizeof(unsigned char) * 16);
|
|
|
|
} else {
|
|
|
|
vpx_memcpy(xd->predictor, tmp_ybuf, sizeof(unsigned char) * 256);
|
|
|
|
vpx_memcpy(xd->predictor + 256, tmp_ubuf, sizeof(unsigned char) * 64);
|
|
|
|
vpx_memcpy(xd->predictor + 320, tmp_vbuf, sizeof(unsigned char) * 64);
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2013-02-12 02:08:52 +01:00
|
|
|
} else {
|
|
|
|
// Handles the special case when a filter that is not in the
|
|
|
|
// switchable list (ex. bilinear, 6-tap) is indicated at the frame level
|
|
|
|
if (block_size == BLOCK_64X64) {
|
|
|
|
vp9_build_inter64x64_predictors_sb(xd,
|
|
|
|
xd->dst.y_buffer,
|
|
|
|
xd->dst.u_buffer,
|
|
|
|
xd->dst.v_buffer,
|
|
|
|
xd->dst.y_stride,
|
|
|
|
xd->dst.uv_stride);
|
|
|
|
} else if (block_size == BLOCK_32X32) {
|
|
|
|
vp9_build_inter32x32_predictors_sb(xd,
|
|
|
|
xd->dst.y_buffer,
|
|
|
|
xd->dst.u_buffer,
|
|
|
|
xd->dst.v_buffer,
|
|
|
|
xd->dst.y_stride,
|
|
|
|
xd->dst.uv_stride);
|
|
|
|
} else {
|
|
|
|
vp9_build_1st_inter16x16_predictors_mby(xd, xd->predictor, 16, 0);
|
|
|
|
if (is_comp_pred)
|
|
|
|
vp9_build_2nd_inter16x16_predictors_mby(xd, xd->predictor, 16);
|
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (is_comp_interintra_pred) {
|
|
|
|
vp9_build_interintra_16x16_predictors_mby(xd, xd->predictor, 16);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
vp9_build_1st_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
|
|
|
|
&xd->predictor[320], 8);
|
|
|
|
if (is_comp_pred)
|
|
|
|
vp9_build_2nd_inter16x16_predictors_mbuv(xd, &xd->predictor[256],
|
|
|
|
&xd->predictor[320], 8);
|
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (is_comp_interintra_pred) {
|
|
|
|
vp9_build_interintra_16x16_predictors_mbuv(xd, &xd->predictor[256],
|
|
|
|
&xd->predictor[320], 8);
|
|
|
|
}
|
2012-10-30 01:58:18 +01:00
|
|
|
#endif
|
2013-02-12 02:08:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
|
|
|
|
const int c = vp9_get_pred_context(cm, xd, PRED_SWITCHABLE_INTERP);
|
|
|
|
const int m = vp9_switchable_interp_map[mbmi->interp_filter];
|
|
|
|
*rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs[c][m];
|
2012-10-30 01:58:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cpi->active_map_enabled && x->active_ptr[0] == 0)
|
|
|
|
x->skip = 1;
|
|
|
|
else if (x->encode_breakout) {
|
2013-02-12 02:08:52 +01:00
|
|
|
unsigned int var, sse;
|
2012-10-30 01:58:18 +01:00
|
|
|
int threshold = (xd->block[0].dequant[1]
|
|
|
|
* xd->block[0].dequant[1] >> 4);
|
|
|
|
|
|
|
|
if (threshold < x->encode_breakout)
|
|
|
|
threshold = x->encode_breakout;
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
if (block_size == BLOCK_64X64) {
|
|
|
|
var = vp9_variance64x64(*(b->base_src), b->src_stride,
|
|
|
|
xd->dst.y_buffer, xd->dst.y_stride, &sse);
|
2013-01-10 02:21:28 +01:00
|
|
|
} else if (block_size == BLOCK_32X32) {
|
2012-10-30 20:58:42 +01:00
|
|
|
var = vp9_variance32x32(*(b->base_src), b->src_stride,
|
2012-10-30 01:58:18 +01:00
|
|
|
xd->dst.y_buffer, xd->dst.y_stride, &sse);
|
2013-01-08 19:29:22 +01:00
|
|
|
} else {
|
2013-01-06 03:20:25 +01:00
|
|
|
assert(block_size == BLOCK_16X16);
|
|
|
|
var = vp9_variance16x16(*(b->base_src), b->src_stride,
|
|
|
|
xd->predictor, 16, &sse);
|
2012-10-30 01:58:18 +01:00
|
|
|
}
|
|
|
|
|
2012-11-05 23:22:59 +01:00
|
|
|
if ((int)sse < threshold) {
|
2013-02-15 19:15:42 +01:00
|
|
|
unsigned int q2dc = xd->block[0].dequant[0];
|
2012-10-30 01:58:18 +01:00
|
|
|
/* If there is no codeable 2nd order dc
|
2013-02-12 02:08:52 +01:00
|
|
|
or a very small uniform pixel change change */
|
2012-10-30 01:58:18 +01:00
|
|
|
if ((sse - var < q2dc * q2dc >> 4) ||
|
|
|
|
(sse / 2 > var && sse - var < 64)) {
|
|
|
|
// Check u and v to make sure skip is ok
|
|
|
|
int sse2;
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
if (block_size == BLOCK_64X64) {
|
|
|
|
unsigned int sse2u, sse2v;
|
|
|
|
var = vp9_variance32x32(x->src.u_buffer, x->src.uv_stride,
|
|
|
|
xd->dst.u_buffer, xd->dst.uv_stride, &sse2u);
|
|
|
|
var = vp9_variance32x32(x->src.v_buffer, x->src.uv_stride,
|
|
|
|
xd->dst.v_buffer, xd->dst.uv_stride, &sse2v);
|
|
|
|
sse2 = sse2u + sse2v;
|
2013-01-10 02:21:28 +01:00
|
|
|
} else if (block_size == BLOCK_32X32) {
|
2012-10-30 01:58:18 +01:00
|
|
|
unsigned int sse2u, sse2v;
|
2012-10-30 20:58:42 +01:00
|
|
|
var = vp9_variance16x16(x->src.u_buffer, x->src.uv_stride,
|
2012-10-30 01:58:18 +01:00
|
|
|
xd->dst.u_buffer, xd->dst.uv_stride, &sse2u);
|
2012-10-30 20:58:42 +01:00
|
|
|
var = vp9_variance16x16(x->src.v_buffer, x->src.uv_stride,
|
2012-10-30 01:58:18 +01:00
|
|
|
xd->dst.v_buffer, xd->dst.uv_stride, &sse2v);
|
|
|
|
sse2 = sse2u + sse2v;
|
2013-01-08 19:29:22 +01:00
|
|
|
} else {
|
2013-01-06 03:20:25 +01:00
|
|
|
assert(block_size == BLOCK_16X16);
|
|
|
|
sse2 = vp9_uvsse(x);
|
2012-10-30 01:58:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sse2 * 2 < threshold) {
|
|
|
|
x->skip = 1;
|
|
|
|
*distortion = sse + sse2;
|
|
|
|
*rate2 = 500;
|
|
|
|
|
|
|
|
/* for best_yrd calculation */
|
|
|
|
*rate_uv = 0;
|
|
|
|
*distortion_uv = sse2;
|
|
|
|
|
|
|
|
*disable_skip = 1;
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!x->skip) {
|
2013-01-06 03:20:25 +01:00
|
|
|
if (block_size == BLOCK_64X64) {
|
|
|
|
int skippable_y, skippable_uv;
|
|
|
|
|
|
|
|
// Y cost and distortion
|
|
|
|
super_block_64_yrd(cpi, x, rate_y, distortion_y,
|
|
|
|
&skippable_y, txfm_cache);
|
|
|
|
*rate2 += *rate_y;
|
|
|
|
*distortion += *distortion_y;
|
|
|
|
|
|
|
|
rd_inter64x64_uv(cpi, x, rate_uv, distortion_uv,
|
|
|
|
cm->full_pixel, &skippable_uv);
|
|
|
|
|
|
|
|
*rate2 += *rate_uv;
|
|
|
|
*distortion += *distortion_uv;
|
|
|
|
*skippable = skippable_y && skippable_uv;
|
2013-01-10 02:21:28 +01:00
|
|
|
} else if (block_size == BLOCK_32X32) {
|
2012-10-30 01:58:18 +01:00
|
|
|
int skippable_y, skippable_uv;
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
// Y cost and distortion
|
|
|
|
super_block_yrd(cpi, x, rate_y, distortion_y,
|
2012-11-25 04:33:58 +01:00
|
|
|
&skippable_y, txfm_cache);
|
2012-10-30 01:58:18 +01:00
|
|
|
*rate2 += *rate_y;
|
|
|
|
*distortion += *distortion_y;
|
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
rd_inter32x32_uv(cpi, x, rate_uv, distortion_uv,
|
|
|
|
cm->full_pixel, &skippable_uv);
|
2012-10-30 01:58:18 +01:00
|
|
|
|
|
|
|
*rate2 += *rate_uv;
|
|
|
|
*distortion += *distortion_uv;
|
|
|
|
*skippable = skippable_y && skippable_uv;
|
2013-01-08 19:29:22 +01:00
|
|
|
} else {
|
2013-01-06 03:20:25 +01:00
|
|
|
assert(block_size == BLOCK_16X16);
|
|
|
|
inter_mode_cost(cpi, x, rate2, distortion,
|
|
|
|
rate_y, distortion_y, rate_uv, distortion_uv,
|
|
|
|
skippable, txfm_cache);
|
2012-10-30 01:58:18 +01:00
|
|
|
}
|
|
|
|
}
|
2013-02-12 02:08:52 +01:00
|
|
|
|
|
|
|
if (!(*mode_excluded)) {
|
|
|
|
if (is_comp_pred) {
|
|
|
|
*mode_excluded = (cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY);
|
|
|
|
} else {
|
|
|
|
*mode_excluded = (cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY);
|
|
|
|
}
|
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (is_comp_interintra_pred && !cm->use_interintra) *mode_excluded = 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-10-30 01:58:18 +01:00
|
|
|
return this_rd; // if 0, this will be re-calculated by caller
|
|
|
|
}
|
|
|
|
|
2012-11-02 19:22:57 +01:00
|
|
|
static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
2013-02-07 19:09:05 +01:00
|
|
|
int mb_row, int mb_col,
|
2012-11-02 19:22:57 +01:00
|
|
|
int *returnrate, int *returndistortion,
|
|
|
|
int64_t *returnintra) {
|
2013-02-07 00:54:52 +01:00
|
|
|
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
|
2013-02-12 02:08:52 +01:00
|
|
|
VP9_ALT_FLAG };
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
2012-07-14 00:21:29 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
|
|
|
union b_mode_info best_bmodes[16];
|
|
|
|
MB_MODE_INFO best_mbmode;
|
|
|
|
PARTITION_INFO best_partition;
|
|
|
|
int_mv best_ref_mv, second_best_ref_mv;
|
|
|
|
MB_PREDICTION_MODE this_mode;
|
2012-12-20 23:56:19 +01:00
|
|
|
MB_PREDICTION_MODE best_mode = DC_PRED;
|
2012-10-29 11:36:11 +01:00
|
|
|
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
|
2012-08-08 01:44:26 +02:00
|
|
|
int i, best_mode_index = 0;
|
2013-01-14 23:37:53 +01:00
|
|
|
int mode8x8[4];
|
2012-08-10 15:12:43 +02:00
|
|
|
unsigned char segment_id = mbmi->segment_id;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
int mode_index;
|
|
|
|
int mdcounts[4];
|
2012-08-07 01:21:23 +02:00
|
|
|
int rate, distortion;
|
|
|
|
int rate2, distortion2;
|
2012-10-09 18:18:21 +02:00
|
|
|
int64_t best_txfm_rd[NB_TXFM_MODES];
|
|
|
|
int64_t best_txfm_diff[NB_TXFM_MODES];
|
2012-08-10 03:25:29 +02:00
|
|
|
int64_t best_pred_diff[NB_PREDICTION_TYPES];
|
|
|
|
int64_t best_pred_rd[NB_PREDICTION_TYPES];
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX, best_intra_rd = INT64_MAX;
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
int is_best_interintra = 0;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_intra16_rd = INT64_MAX;
|
2012-11-07 15:50:25 +01:00
|
|
|
int best_intra16_mode = DC_PRED, best_intra16_uv_mode = DC_PRED;
|
2012-07-14 00:21:29 +02:00
|
|
|
#endif
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_overall_rd = INT64_MAX;
|
2012-12-20 23:56:19 +01:00
|
|
|
INTERPOLATIONFILTERTYPE best_filter = SWITCHABLE;
|
2013-02-12 02:08:52 +01:00
|
|
|
INTERPOLATIONFILTERTYPE tmp_best_filter = SWITCHABLE;
|
2012-07-14 00:21:29 +02:00
|
|
|
int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
|
|
|
|
int uv_intra_skippable = 0;
|
|
|
|
int uv_intra_rate_8x8 = 0, uv_intra_distortion_8x8 = 0, uv_intra_rate_tokenonly_8x8 = 0;
|
|
|
|
int uv_intra_skippable_8x8 = 0;
|
|
|
|
int rate_y, UNINITIALIZED_IS_SAFE(rate_uv);
|
2012-08-22 00:08:54 +02:00
|
|
|
int distortion_uv = INT_MAX;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_yrd = INT64_MAX;
|
2011-01-11 21:00:00 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
MB_PREDICTION_MODE uv_intra_mode;
|
|
|
|
MB_PREDICTION_MODE uv_intra_mode_8x8 = 0;
|
2012-02-18 00:52:30 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7};
|
|
|
|
int saddone = 0;
|
|
|
|
|
2012-08-10 01:07:41 +02:00
|
|
|
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
|
2012-07-14 00:21:29 +02:00
|
|
|
int frame_mdcounts[4][4];
|
2013-02-07 19:09:05 +01:00
|
|
|
YV12_BUFFER_CONFIG yv12_mb[4];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
unsigned int ref_costs[MAX_REF_FRAMES];
|
2012-10-22 20:25:48 +02:00
|
|
|
int_mv seg_mvs[NB_PARTITIONINGS][16 /* n_blocks */][MAX_REF_FRAMES - 1];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-12-14 18:49:46 +01:00
|
|
|
int intra_cost_penalty = 20 * vp9_dc_quant(cpi->common.base_qindex,
|
|
|
|
cpi->common.y1dc_delta_q);
|
|
|
|
|
2012-08-14 01:20:20 +02:00
|
|
|
vpx_memset(mode8x8, 0, sizeof(mode8x8));
|
2012-08-10 01:07:41 +02:00
|
|
|
vpx_memset(&frame_mv, 0, sizeof(frame_mv));
|
2012-07-14 00:21:29 +02:00
|
|
|
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
|
|
|
|
vpx_memset(&best_bmodes, 0, sizeof(best_bmodes));
|
2013-01-06 03:20:25 +01:00
|
|
|
vpx_memset(&x->mb_context[xd->sb_index][xd->mb_index], 0,
|
|
|
|
sizeof(PICK_MODE_CONTEXT));
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-10 01:07:41 +02:00
|
|
|
for (i = 0; i < MAX_REF_FRAMES; i++)
|
|
|
|
frame_mv[NEWMV][i].as_int = INVALID_MV;
|
2012-08-10 03:25:29 +02:00
|
|
|
for (i = 0; i < NB_PREDICTION_TYPES; ++i)
|
2013-01-14 20:49:30 +01:00
|
|
|
best_pred_rd[i] = INT64_MAX;
|
2012-10-09 18:18:21 +02:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++)
|
2013-01-14 20:49:30 +01:00
|
|
|
best_txfm_rd[i] = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-22 20:25:48 +02:00
|
|
|
for (i = 0; i < NB_PARTITIONINGS; i++) {
|
2012-08-07 01:21:23 +02:00
|
|
|
int j, k;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-07 01:21:23 +02:00
|
|
|
for (j = 0; j < 16; j++)
|
|
|
|
for (k = 0; k < MAX_REF_FRAMES - 1; k++)
|
2012-07-14 00:21:29 +02:00
|
|
|
seg_mvs[i][j][k].as_int = INVALID_MV;
|
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
if (cpi->ref_frame_flags & VP9_LAST_FLAG) {
|
2013-01-15 22:49:44 +01:00
|
|
|
setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->lst_fb_idx],
|
2013-02-07 19:09:05 +01:00
|
|
|
LAST_FRAME, BLOCK_16X16, mb_row, mb_col,
|
2012-12-05 17:23:38 +01:00
|
|
|
frame_mv[NEARESTMV], frame_mv[NEARMV],
|
2013-02-07 19:09:05 +01:00
|
|
|
frame_mdcounts, yv12_mb);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
if (cpi->ref_frame_flags & VP9_GOLD_FLAG) {
|
2013-01-15 22:49:44 +01:00
|
|
|
setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->gld_fb_idx],
|
2013-02-07 19:09:05 +01:00
|
|
|
GOLDEN_FRAME, BLOCK_16X16, mb_row, mb_col,
|
2012-12-05 17:23:38 +01:00
|
|
|
frame_mv[NEARESTMV], frame_mv[NEARMV],
|
2013-02-07 19:09:05 +01:00
|
|
|
frame_mdcounts, yv12_mb);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
if (cpi->ref_frame_flags & VP9_ALT_FLAG) {
|
2013-01-15 22:49:44 +01:00
|
|
|
setup_buffer_inter(cpi, x, cpi->common.active_ref_idx[cpi->alt_fb_idx],
|
2013-02-07 19:09:05 +01:00
|
|
|
ALTREF_FRAME, BLOCK_16X16, mb_row, mb_col,
|
2012-12-05 17:23:38 +01:00
|
|
|
frame_mv[NEARESTMV], frame_mv[NEARMV],
|
2013-02-07 19:09:05 +01:00
|
|
|
frame_mdcounts, yv12_mb);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
2013-01-14 20:49:30 +01:00
|
|
|
*returnintra = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->ref_frame = INTRA_FRAME;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
/* Initialize zbin mode boost for uv costing */
|
|
|
|
cpi->zbin_mode_boost = 0;
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_update_zbin_extra(cpi, x);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
rd_pick_intra_mbuv_mode(cpi, x, &uv_intra_rate,
|
2012-08-29 19:43:20 +02:00
|
|
|
&uv_intra_rate_tokenonly, &uv_intra_distortion,
|
|
|
|
&uv_intra_skippable);
|
2012-08-10 15:12:43 +02:00
|
|
|
uv_intra_mode = mbmi->uv_mode;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
/* rough estimate for now */
|
2012-10-09 18:18:21 +02:00
|
|
|
if (cpi->common.txfm_mode != ONLY_4X4) {
|
2012-07-14 00:21:29 +02:00
|
|
|
rd_pick_intra_mbuv_mode_8x8(cpi, x, &uv_intra_rate_8x8,
|
|
|
|
&uv_intra_rate_tokenonly_8x8,
|
2012-08-29 19:43:20 +02:00
|
|
|
&uv_intra_distortion_8x8,
|
|
|
|
&uv_intra_skippable_8x8);
|
2012-08-10 15:12:43 +02:00
|
|
|
uv_intra_mode_8x8 = mbmi->uv_mode;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get estimates of reference frame costs for each reference frame
|
|
|
|
// that depend on the current prediction etc.
|
2012-10-31 22:40:53 +01:00
|
|
|
estimate_ref_frame_costs(cpi, segment_id, ref_costs);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-02-12 02:08:52 +01:00
|
|
|
for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t this_rd = INT64_MAX;
|
2012-10-09 18:18:21 +02:00
|
|
|
int disable_skip = 0, skippable = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
int other_cost = 0;
|
|
|
|
int compmode_cost = 0;
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
int compmode_interintra_cost = 0;
|
|
|
|
#endif
|
2012-07-14 00:21:29 +02:00
|
|
|
int mode_excluded = 0;
|
2012-10-11 02:18:22 +02:00
|
|
|
int64_t txfm_cache[NB_TXFM_MODES] = { 0 };
|
2012-02-22 03:10:18 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// These variables hold are rolling total cost and distortion for this mode
|
|
|
|
rate2 = 0;
|
|
|
|
distortion2 = 0;
|
|
|
|
rate_y = 0;
|
|
|
|
rate_uv = 0;
|
|
|
|
|
2013-01-14 21:43:12 +01:00
|
|
|
x->skip = 0;
|
|
|
|
|
2012-10-31 01:12:12 +01:00
|
|
|
this_mode = vp9_mode_order[mode_index].mode;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = this_mode;
|
|
|
|
mbmi->uv_mode = DC_PRED;
|
2012-10-31 01:12:12 +01:00
|
|
|
mbmi->ref_frame = vp9_mode_order[mode_index].ref_frame;
|
|
|
|
mbmi->second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
|
2012-12-20 23:56:19 +01:00
|
|
|
|
2013-02-12 02:08:52 +01:00
|
|
|
mbmi->interp_filter = cm->mcomp_filter_type;
|
|
|
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
2012-07-18 22:43:01 +02:00
|
|
|
|
|
|
|
// Test best rd so far against threshold for trying this mode.
|
2012-08-07 01:21:23 +02:00
|
|
|
if (best_rd <= cpi->rd_threshes[mode_index])
|
2012-07-18 22:43:01 +02:00
|
|
|
continue;
|
2012-04-07 01:38:34 +02:00
|
|
|
|
2013-02-07 00:54:52 +01:00
|
|
|
// Ensure that the references used by this mode are available.
|
|
|
|
if (mbmi->ref_frame &&
|
|
|
|
!(cpi->ref_frame_flags & flag_list[mbmi->ref_frame]))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (mbmi->second_ref_frame > 0 &&
|
|
|
|
!(cpi->ref_frame_flags & flag_list[mbmi->second_ref_frame]))
|
|
|
|
continue;
|
|
|
|
|
2012-06-25 21:26:09 +02:00
|
|
|
// current coding mode under rate-distortion optimization test loop
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
mbmi->interintra_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
|
|
|
|
mbmi->interintra_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
|
|
|
|
#endif
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// If the segment reference frame feature is enabled....
|
|
|
|
// then do nothing if the current ref frame is not allowed..
|
2012-10-30 06:15:27 +01:00
|
|
|
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
|
|
|
|
!vp9_check_segref(xd, segment_id, mbmi->ref_frame)) {
|
2012-07-14 00:21:29 +02:00
|
|
|
continue;
|
2013-01-28 16:22:53 +01:00
|
|
|
// If the segment skip feature is enabled....
|
2012-07-14 00:21:29 +02:00
|
|
|
// then do nothing if the current mode is not allowed..
|
2013-01-28 16:22:53 +01:00
|
|
|
} else if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) &&
|
|
|
|
(this_mode != ZEROMV)) {
|
2012-07-14 00:21:29 +02:00
|
|
|
continue;
|
2013-01-28 16:22:53 +01:00
|
|
|
// Disable this drop out case if the ref frame segment
|
|
|
|
// level feature is enabled for this segment. This is to
|
2012-07-14 00:21:29 +02:00
|
|
|
// prevent the possibility that the we end up unable to pick any mode.
|
2013-01-28 16:22:53 +01:00
|
|
|
} else if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME)) {
|
|
|
|
// Only consider ZEROMV/ALTREF_FRAME for alt ref frame overlay,
|
2012-07-14 00:21:29 +02:00
|
|
|
// unless ARNR filtering is enabled in which case we want
|
|
|
|
// an unfiltered alternative
|
|
|
|
if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
|
|
|
|
if (this_mode != ZEROMV ||
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->ref_frame != ALTREF_FRAME) {
|
2012-07-14 00:21:29 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* everything but intra */
|
2012-08-10 15:12:43 +02:00
|
|
|
if (mbmi->ref_frame) {
|
|
|
|
int ref = mbmi->ref_frame;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-02-07 19:09:05 +01:00
|
|
|
xd->pre = yv12_mb[ref];
|
2012-12-05 17:23:38 +01:00
|
|
|
best_ref_mv = mbmi->ref_mvs[ref][0];
|
2012-07-14 00:21:29 +02:00
|
|
|
vpx_memcpy(mdcounts, frame_mdcounts[ref], sizeof(mdcounts));
|
|
|
|
}
|
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0) {
|
2012-08-10 15:12:43 +02:00
|
|
|
int ref = mbmi->second_ref_frame;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-02-07 19:09:05 +01:00
|
|
|
xd->second_pre = yv12_mb[ref];
|
2012-12-05 17:23:38 +01:00
|
|
|
second_best_ref_mv = mbmi->ref_mvs[ref][0];
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Experimental code. Special case for gf and arf zeromv modes.
|
|
|
|
// Increase zbin size to suppress noise
|
|
|
|
if (cpi->zbin_mode_boost_enabled) {
|
2012-10-31 01:12:12 +01:00
|
|
|
if (vp9_mode_order[mode_index].ref_frame == INTRA_FRAME)
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->zbin_mode_boost = 0;
|
|
|
|
else {
|
2012-10-31 01:12:12 +01:00
|
|
|
if (vp9_mode_order[mode_index].mode == ZEROMV) {
|
|
|
|
if (vp9_mode_order[mode_index].ref_frame != LAST_FRAME)
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
|
|
|
|
else
|
|
|
|
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
|
2012-10-31 01:12:12 +01:00
|
|
|
} else if (vp9_mode_order[mode_index].mode == SPLITMV)
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->zbin_mode_boost = 0;
|
|
|
|
else
|
|
|
|
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_update_zbin_extra(cpi, x);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-08-08 20:13:53 +02:00
|
|
|
// Intra
|
2012-08-10 15:12:43 +02:00
|
|
|
if (!mbmi->ref_frame) {
|
2012-07-14 00:21:29 +02:00
|
|
|
switch (this_mode) {
|
2012-08-08 20:13:53 +02:00
|
|
|
default:
|
2012-08-07 23:33:15 +02:00
|
|
|
case V_PRED:
|
|
|
|
case H_PRED:
|
|
|
|
case D45_PRED:
|
|
|
|
case D135_PRED:
|
|
|
|
case D117_PRED:
|
|
|
|
case D153_PRED:
|
|
|
|
case D27_PRED:
|
|
|
|
case D63_PRED:
|
2012-12-14 18:49:46 +01:00
|
|
|
rate2 += intra_cost_penalty;
|
|
|
|
case DC_PRED:
|
|
|
|
case TM_PRED:
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->ref_frame = INTRA_FRAME;
|
2012-08-07 23:33:15 +02:00
|
|
|
// FIXME compound intra prediction
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_build_intra_predictors_mby(&x->e_mbd);
|
2012-10-09 18:18:21 +02:00
|
|
|
macro_block_yrd(cpi, x, &rate_y, &distortion, &skippable, txfm_cache);
|
2012-08-07 23:33:15 +02:00
|
|
|
rate2 += rate_y;
|
|
|
|
distortion2 += distortion;
|
2012-10-29 11:36:11 +01:00
|
|
|
rate2 += x->mbmode_cost[xd->frame_type][mbmi->mode];
|
2012-10-09 18:18:21 +02:00
|
|
|
if (mbmi->txfm_size != TX_4X4) {
|
2012-08-07 23:33:15 +02:00
|
|
|
rate2 += uv_intra_rate_8x8;
|
|
|
|
rate_uv = uv_intra_rate_tokenonly_8x8;
|
|
|
|
distortion2 += uv_intra_distortion_8x8;
|
|
|
|
distortion_uv = uv_intra_distortion_8x8;
|
2012-10-09 18:18:21 +02:00
|
|
|
skippable = skippable && uv_intra_skippable_8x8;
|
2012-08-07 23:33:15 +02:00
|
|
|
} else {
|
|
|
|
rate2 += uv_intra_rate;
|
|
|
|
rate_uv = uv_intra_rate_tokenonly;
|
|
|
|
distortion2 += uv_intra_distortion;
|
|
|
|
distortion_uv = uv_intra_distortion;
|
2012-10-09 18:18:21 +02:00
|
|
|
skippable = skippable && uv_intra_skippable;
|
2012-08-07 23:33:15 +02:00
|
|
|
}
|
|
|
|
break;
|
2012-07-14 00:21:29 +02:00
|
|
|
case B_PRED: {
|
2012-08-02 19:07:33 +02:00
|
|
|
int64_t tmp_rd;
|
2011-02-08 22:50:43 +01:00
|
|
|
|
2012-08-08 20:13:53 +02:00
|
|
|
// Note the rate value returned here includes the cost of coding
|
2012-10-29 11:36:11 +01:00
|
|
|
// the BPRED mode : x->mbmode_cost[xd->frame_type][BPRED];
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = TX_4X4;
|
2013-01-14 23:37:53 +01:00
|
|
|
tmp_rd = rd_pick_intra4x4mby_modes(cpi, x, &rate, &rate_y,
|
2013-01-25 01:28:53 +01:00
|
|
|
&distortion, best_yrd);
|
2012-07-14 00:21:29 +02:00
|
|
|
rate2 += rate;
|
2012-12-14 18:49:46 +01:00
|
|
|
rate2 += intra_cost_penalty;
|
2012-07-14 00:21:29 +02:00
|
|
|
distortion2 += distortion;
|
|
|
|
|
|
|
|
if (tmp_rd < best_yrd) {
|
|
|
|
rate2 += uv_intra_rate;
|
|
|
|
rate_uv = uv_intra_rate_tokenonly;
|
|
|
|
distortion2 += uv_intra_distortion;
|
|
|
|
distortion_uv = uv_intra_distortion;
|
|
|
|
} else {
|
2013-01-14 20:49:30 +01:00
|
|
|
this_rd = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
disable_skip = 1;
|
|
|
|
}
|
2011-02-08 22:50:43 +01:00
|
|
|
}
|
|
|
|
break;
|
2012-07-14 00:21:29 +02:00
|
|
|
case I8X8_PRED: {
|
2012-10-31 22:40:53 +01:00
|
|
|
int cost0 = vp9_cost_bit(cm->prob_tx[0], 0);
|
|
|
|
int cost1 = vp9_cost_bit(cm->prob_tx[0], 1);
|
2012-10-11 02:18:22 +02:00
|
|
|
int64_t tmp_rd_4x4s, tmp_rd_8x8s;
|
|
|
|
int64_t tmp_rd_4x4, tmp_rd_8x8, tmp_rd;
|
|
|
|
int r4x4, tok4x4, d4x4, r8x8, tok8x8, d8x8;
|
|
|
|
mbmi->txfm_size = TX_4X4;
|
|
|
|
tmp_rd_4x4 = rd_pick_intra8x8mby_modes(cpi, x, &r4x4, &tok4x4,
|
|
|
|
&d4x4, best_yrd);
|
2013-01-14 23:37:53 +01:00
|
|
|
mode8x8[0] = xd->mode_info_context->bmi[0].as_mode.first;
|
|
|
|
mode8x8[1] = xd->mode_info_context->bmi[2].as_mode.first;
|
|
|
|
mode8x8[2] = xd->mode_info_context->bmi[8].as_mode.first;
|
|
|
|
mode8x8[3] = xd->mode_info_context->bmi[10].as_mode.first;
|
2012-10-11 02:18:22 +02:00
|
|
|
mbmi->txfm_size = TX_8X8;
|
|
|
|
tmp_rd_8x8 = rd_pick_intra8x8mby_modes(cpi, x, &r8x8, &tok8x8,
|
|
|
|
&d8x8, best_yrd);
|
|
|
|
txfm_cache[ONLY_4X4] = tmp_rd_4x4;
|
|
|
|
txfm_cache[ALLOW_8X8] = tmp_rd_8x8;
|
|
|
|
txfm_cache[ALLOW_16X16] = tmp_rd_8x8;
|
|
|
|
tmp_rd_4x4s = tmp_rd_4x4 + RDCOST(x->rdmult, x->rddiv, cost0, 0);
|
|
|
|
tmp_rd_8x8s = tmp_rd_8x8 + RDCOST(x->rdmult, x->rddiv, cost1, 0);
|
|
|
|
txfm_cache[TX_MODE_SELECT] = tmp_rd_4x4s < tmp_rd_8x8s ? tmp_rd_4x4s : tmp_rd_8x8s;
|
|
|
|
if (cm->txfm_mode == TX_MODE_SELECT) {
|
|
|
|
if (tmp_rd_4x4s < tmp_rd_8x8s) {
|
|
|
|
rate = r4x4 + cost0;
|
|
|
|
rate_y = tok4x4 + cost0;
|
|
|
|
distortion = d4x4;
|
|
|
|
mbmi->txfm_size = TX_4X4;
|
|
|
|
tmp_rd = tmp_rd_4x4s;
|
|
|
|
} else {
|
|
|
|
rate = r8x8 + cost1;
|
|
|
|
rate_y = tok8x8 + cost1;
|
|
|
|
distortion = d8x8;
|
|
|
|
mbmi->txfm_size = TX_8X8;
|
|
|
|
tmp_rd = tmp_rd_8x8s;
|
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
mode8x8[0] = xd->mode_info_context->bmi[0].as_mode.first;
|
|
|
|
mode8x8[1] = xd->mode_info_context->bmi[2].as_mode.first;
|
|
|
|
mode8x8[2] = xd->mode_info_context->bmi[8].as_mode.first;
|
|
|
|
mode8x8[3] = xd->mode_info_context->bmi[10].as_mode.first;
|
2012-10-11 02:18:22 +02:00
|
|
|
}
|
2012-10-23 01:52:28 +02:00
|
|
|
} else if (cm->txfm_mode == ONLY_4X4) {
|
2012-10-11 02:18:22 +02:00
|
|
|
rate = r4x4;
|
|
|
|
rate_y = tok4x4;
|
|
|
|
distortion = d4x4;
|
|
|
|
mbmi->txfm_size = TX_4X4;
|
|
|
|
tmp_rd = tmp_rd_4x4;
|
|
|
|
} else {
|
|
|
|
rate = r8x8;
|
|
|
|
rate_y = tok8x8;
|
|
|
|
distortion = d8x8;
|
|
|
|
mbmi->txfm_size = TX_8X8;
|
|
|
|
tmp_rd = tmp_rd_8x8;
|
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
mode8x8[0] = xd->mode_info_context->bmi[0].as_mode.first;
|
|
|
|
mode8x8[1] = xd->mode_info_context->bmi[2].as_mode.first;
|
|
|
|
mode8x8[2] = xd->mode_info_context->bmi[8].as_mode.first;
|
|
|
|
mode8x8[3] = xd->mode_info_context->bmi[10].as_mode.first;
|
2012-10-11 02:18:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rate2 += rate;
|
2012-12-14 18:49:46 +01:00
|
|
|
rate2 += intra_cost_penalty;
|
2012-10-11 02:18:22 +02:00
|
|
|
distortion2 += distortion;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
/* TODO: uv rate maybe over-estimated here since there is UV intra
|
|
|
|
mode coded in I8X8_PRED prediction */
|
|
|
|
if (tmp_rd < best_yrd) {
|
|
|
|
rate2 += uv_intra_rate;
|
|
|
|
rate_uv = uv_intra_rate_tokenonly;
|
|
|
|
distortion2 += uv_intra_distortion;
|
|
|
|
distortion_uv = uv_intra_distortion;
|
|
|
|
} else {
|
2013-01-14 20:49:30 +01:00
|
|
|
this_rd = INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
disable_skip = 1;
|
|
|
|
}
|
2011-12-01 01:25:00 +01:00
|
|
|
}
|
|
|
|
break;
|
2012-08-08 20:13:53 +02:00
|
|
|
}
|
|
|
|
}
|
2012-08-08 20:49:56 +02:00
|
|
|
// Split MV. The code is very different from the other inter modes so
|
|
|
|
// special case it.
|
|
|
|
else if (this_mode == SPLITMV) {
|
2012-11-07 15:50:25 +01:00
|
|
|
const int is_comp_pred = mbmi->second_ref_frame > 0;
|
2013-02-12 02:08:52 +01:00
|
|
|
int64_t this_rd_thresh;
|
|
|
|
int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
|
|
|
|
int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
|
|
|
|
int tmp_best_distortion = INT_MAX, tmp_best_skippable = 0;
|
|
|
|
int switchable_filter_index;
|
2012-08-08 20:49:56 +02:00
|
|
|
int_mv *second_ref = is_comp_pred ? &second_best_ref_mv : NULL;
|
2013-02-12 02:08:52 +01:00
|
|
|
union b_mode_info tmp_best_bmodes[16];
|
|
|
|
MB_MODE_INFO tmp_best_mbmode;
|
|
|
|
PARTITION_INFO tmp_best_partition;
|
|
|
|
int pred_exists = 0;
|
2012-08-08 20:49:56 +02:00
|
|
|
|
|
|
|
this_rd_thresh =
|
2013-02-12 02:08:52 +01:00
|
|
|
(mbmi->ref_frame == LAST_FRAME) ?
|
2012-08-08 20:49:56 +02:00
|
|
|
cpi->rd_threshes[THR_NEWMV] : cpi->rd_threshes[THR_NEWA];
|
|
|
|
this_rd_thresh =
|
2013-02-12 02:08:52 +01:00
|
|
|
(mbmi->ref_frame == GOLDEN_FRAME) ?
|
2012-08-08 20:49:56 +02:00
|
|
|
cpi->rd_threshes[THR_NEWG] : this_rd_thresh;
|
|
|
|
|
2013-02-12 02:08:52 +01:00
|
|
|
for (switchable_filter_index = 0;
|
|
|
|
switchable_filter_index < VP9_SWITCHABLE_FILTERS;
|
|
|
|
++switchable_filter_index) {
|
|
|
|
int newbest;
|
|
|
|
mbmi->interp_filter =
|
|
|
|
vp9_switchable_interp[switchable_filter_index];
|
|
|
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
|
|
|
|
|
|
|
tmp_rd = rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
|
|
|
|
second_ref, best_yrd, mdcounts,
|
|
|
|
&rate, &rate_y, &distortion,
|
|
|
|
&skippable,
|
|
|
|
(int)this_rd_thresh, seg_mvs,
|
|
|
|
txfm_cache);
|
|
|
|
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
|
|
|
|
int rs = SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
|
|
|
|
[vp9_get_pred_context(&cpi->common, xd,
|
|
|
|
PRED_SWITCHABLE_INTERP)]
|
|
|
|
[vp9_switchable_interp_map[mbmi->interp_filter]];
|
|
|
|
tmp_rd += RDCOST(x->rdmult, x->rddiv, rs, 0);
|
|
|
|
}
|
|
|
|
newbest = (tmp_rd < tmp_best_rd);
|
|
|
|
if (newbest) {
|
|
|
|
tmp_best_filter = mbmi->interp_filter;
|
|
|
|
tmp_best_rd = tmp_rd;
|
|
|
|
}
|
|
|
|
if ((newbest && cm->mcomp_filter_type == SWITCHABLE) ||
|
|
|
|
(mbmi->interp_filter == cm->mcomp_filter_type &&
|
|
|
|
cm->mcomp_filter_type != SWITCHABLE)) {
|
|
|
|
tmp_best_rdu = tmp_rd;
|
|
|
|
tmp_best_rate = rate;
|
|
|
|
tmp_best_ratey = rate_y;
|
|
|
|
tmp_best_distortion = distortion;
|
|
|
|
tmp_best_skippable = skippable;
|
|
|
|
vpx_memcpy(&tmp_best_mbmode, mbmi, sizeof(MB_MODE_INFO));
|
|
|
|
vpx_memcpy(&tmp_best_partition, x->partition_info,
|
|
|
|
sizeof(PARTITION_INFO));
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
tmp_best_bmodes[i] = xd->block[i].bmi;
|
|
|
|
}
|
|
|
|
pred_exists = 1;
|
|
|
|
}
|
|
|
|
} // switchable_filter_index loop
|
|
|
|
|
|
|
|
mbmi->interp_filter = (cm->mcomp_filter_type == SWITCHABLE ?
|
|
|
|
tmp_best_filter : cm->mcomp_filter_type);
|
|
|
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
|
|
|
if (!pred_exists) {
|
|
|
|
// Handles the special case when a filter that is not in the
|
|
|
|
// switchable list (bilinear, 6-tap) is indicated at the frame level
|
|
|
|
tmp_rd = rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv,
|
|
|
|
second_ref, best_yrd, mdcounts,
|
|
|
|
&rate, &rate_y, &distortion,
|
|
|
|
&skippable,
|
|
|
|
(int)this_rd_thresh, seg_mvs,
|
|
|
|
txfm_cache);
|
|
|
|
} else {
|
|
|
|
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
|
|
|
|
int rs = SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
|
|
|
|
[vp9_get_pred_context(&cpi->common, xd,
|
|
|
|
PRED_SWITCHABLE_INTERP)]
|
|
|
|
[vp9_switchable_interp_map[mbmi->interp_filter]];
|
|
|
|
tmp_best_rdu -= RDCOST(x->rdmult, x->rddiv, rs, 0);
|
|
|
|
}
|
|
|
|
tmp_rd = tmp_best_rdu;
|
|
|
|
rate = tmp_best_rate;
|
|
|
|
rate_y = tmp_best_ratey;
|
|
|
|
distortion = tmp_best_distortion;
|
|
|
|
skippable = tmp_best_skippable;
|
|
|
|
vpx_memcpy(mbmi, &tmp_best_mbmode, sizeof(MB_MODE_INFO));
|
|
|
|
vpx_memcpy(x->partition_info, &tmp_best_partition,
|
|
|
|
sizeof(PARTITION_INFO));
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
xd->block[i].bmi = tmp_best_bmodes[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-08 20:49:56 +02:00
|
|
|
rate2 += rate;
|
|
|
|
distortion2 += distortion;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-08 20:49:56 +02:00
|
|
|
if (cpi->common.mcomp_filter_type == SWITCHABLE)
|
|
|
|
rate2 += SWITCHABLE_INTERP_RATE_FACTOR * x->switchable_interp_costs
|
2012-10-29 14:44:18 +01:00
|
|
|
[vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
|
2013-02-12 02:08:52 +01:00
|
|
|
[vp9_switchable_interp_map[mbmi->interp_filter]];
|
2012-12-20 23:56:19 +01:00
|
|
|
|
2012-08-08 20:49:56 +02:00
|
|
|
// If even the 'Y' rd value of split is higher than best so far
|
|
|
|
// then dont bother looking at UV
|
|
|
|
if (tmp_rd < best_yrd) {
|
2012-10-17 23:32:17 +02:00
|
|
|
int uv_skippable;
|
|
|
|
|
|
|
|
rd_inter4x4_uv(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
|
|
|
|
cpi->common.full_pixel);
|
2012-08-08 20:49:56 +02:00
|
|
|
rate2 += rate_uv;
|
|
|
|
distortion2 += distortion_uv;
|
2012-10-17 23:32:17 +02:00
|
|
|
skippable = skippable && uv_skippable;
|
2012-08-08 20:49:56 +02:00
|
|
|
} else {
|
2013-01-14 20:49:30 +01:00
|
|
|
this_rd = INT64_MAX;
|
2012-08-08 20:49:56 +02:00
|
|
|
disable_skip = 1;
|
|
|
|
}
|
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
if (!mode_excluded) {
|
|
|
|
if (is_comp_pred)
|
|
|
|
mode_excluded = cpi->common.comp_pred_mode == SINGLE_PREDICTION_ONLY;
|
|
|
|
else
|
|
|
|
mode_excluded = cpi->common.comp_pred_mode == COMP_PREDICTION_ONLY;
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-08 20:49:56 +02:00
|
|
|
compmode_cost =
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_COMP), is_comp_pred);
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = this_mode;
|
2012-08-08 20:49:56 +02:00
|
|
|
}
|
2012-08-10 01:07:41 +02:00
|
|
|
else {
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (mbmi->second_ref_frame == INTRA_FRAME) {
|
|
|
|
if (best_intra16_mode == DC_PRED - 1) continue;
|
|
|
|
mbmi->interintra_mode = best_intra16_mode;
|
|
|
|
#if SEPARATE_INTERINTRA_UV
|
|
|
|
mbmi->interintra_uv_mode = best_intra16_uv_mode;
|
|
|
|
#else
|
|
|
|
mbmi->interintra_uv_mode = best_intra16_mode;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
2012-10-30 01:58:18 +01:00
|
|
|
this_rd = handle_inter_mode(cpi, x, BLOCK_16X16,
|
|
|
|
&saddone, near_sadidx, mdcounts, txfm_cache,
|
|
|
|
&rate2, &distortion2, &skippable,
|
2012-11-07 15:50:25 +01:00
|
|
|
&compmode_cost,
|
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
&compmode_interintra_cost,
|
|
|
|
#endif
|
|
|
|
&rate_y, &distortion,
|
2012-10-30 01:58:18 +01:00
|
|
|
&rate_uv, &distortion_uv,
|
2013-02-07 19:09:05 +01:00
|
|
|
&mode_excluded, &disable_skip,
|
2013-02-12 02:08:52 +01:00
|
|
|
mode_index, &tmp_best_filter, frame_mv);
|
2013-01-14 20:49:30 +01:00
|
|
|
if (this_rd == INT64_MAX)
|
2012-08-10 01:07:41 +02:00
|
|
|
continue;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (cpi->common.use_interintra)
|
|
|
|
rate2 += compmode_interintra_cost;
|
|
|
|
#endif
|
|
|
|
|
2012-08-07 01:21:23 +02:00
|
|
|
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION)
|
2012-07-14 00:21:29 +02:00
|
|
|
rate2 += compmode_cost;
|
|
|
|
|
|
|
|
// Estimate the reference frame signaling cost and add it
|
|
|
|
// to the rolling cost variable.
|
2012-08-10 15:12:43 +02:00
|
|
|
rate2 += ref_costs[mbmi->ref_frame];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
if (!disable_skip) {
|
|
|
|
// Test for the condition where skip block will be activated
|
|
|
|
// because there are no non zero coefficients and make any
|
|
|
|
// necessary adjustment for rate. Ignore if skip is coded at
|
|
|
|
// segment level as the cost wont have been added in.
|
|
|
|
if (cpi->common.mb_no_coeff_skip) {
|
|
|
|
int mb_skip_allowed;
|
|
|
|
|
2013-01-28 16:22:53 +01:00
|
|
|
// Is Mb level skip allowed (i.e. not coded at segment level).
|
|
|
|
mb_skip_allowed = !vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-09 18:18:21 +02:00
|
|
|
if (skippable) {
|
|
|
|
mbmi->mb_skip_coeff = 1;
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Back out the coefficient coding costs
|
|
|
|
rate2 -= (rate_y + rate_uv);
|
|
|
|
// for best_yrd calculation
|
|
|
|
rate_uv = 0;
|
|
|
|
|
|
|
|
if (mb_skip_allowed) {
|
|
|
|
int prob_skip_cost;
|
|
|
|
|
|
|
|
// Cost the skip mb case
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob skip_prob =
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
if (skip_prob) {
|
2012-10-31 22:40:53 +01:00
|
|
|
prob_skip_cost = vp9_cost_bit(skip_prob, 1);
|
2012-07-14 00:21:29 +02:00
|
|
|
rate2 += prob_skip_cost;
|
|
|
|
other_cost += prob_skip_cost;
|
|
|
|
}
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
// Add in the cost of the no skip flag.
|
2012-10-09 18:18:21 +02:00
|
|
|
else {
|
|
|
|
mbmi->mb_skip_coeff = 0;
|
|
|
|
if (mb_skip_allowed) {
|
2012-10-31 22:40:53 +01:00
|
|
|
int prob_skip_cost = vp9_cost_bit(
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_get_pred_prob(cm, &x->e_mbd, PRED_MBSKIP), 0);
|
2012-10-09 18:18:21 +02:00
|
|
|
rate2 += prob_skip_cost;
|
|
|
|
other_cost += prob_skip_cost;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Calculate the final RD estimate for this mode.
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
|
|
|
|
}
|
Dual 16x16 inter prediction.
This patch introduces the concept of dual inter16x16 prediction. A
16x16 inter-predicted macroblock can use 2 references instead of 1,
where both references use the same mvmode (new, near/est, zero). In the
case of newmv, this means that two MVs are coded instead of one. The
frame can be encoded in 3 ways: all MBs single-prediction, all MBs dual
prediction, or per-MB single/dual prediction selection ("hybrid"), in
which case a single bit is coded per-MB to indicate whether the MB uses
single or dual inter prediction.
In the future, we can (maybe?) get further gains by mixing this with
Adrian's 32x32 work, per-segment dual prediction settings, or adding
support for dual splitmv/8x8mv inter prediction.
Gain (on derf-set, CQ mode) is ~2.8% (SSIM) or ~3.6% (glb PSNR). Most
gain is at medium/high bitrates, but there's minor gains at low bitrates
also. Output was confirmed to match between encoder and decoder.
Note for optimization people: this patch introduces a 2nd version of
16x16/8x8 sixtap/bilin functions, which does an avg instead of a
store. They may want to look and make sure this is implemented to
their satisfaction so we can optimize it best in the future.
Change-ID: I59dc84b07cbb3ccf073ac0f756d03d294cb19281
2011-12-06 20:53:02 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Keep record of best intra distortion
|
2012-08-10 15:12:43 +02:00
|
|
|
if ((mbmi->ref_frame == INTRA_FRAME) &&
|
2012-07-14 00:21:29 +02:00
|
|
|
(this_rd < best_intra_rd)) {
|
|
|
|
best_intra_rd = this_rd;
|
|
|
|
*returnintra = distortion2;
|
|
|
|
}
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if ((mbmi->ref_frame == INTRA_FRAME) &&
|
2012-11-30 16:29:43 +01:00
|
|
|
(this_mode <= TM_PRED) &&
|
2012-11-07 15:50:25 +01:00
|
|
|
(this_rd < best_intra16_rd)) {
|
|
|
|
best_intra16_rd = this_rd;
|
|
|
|
best_intra16_mode = this_mode;
|
|
|
|
best_intra16_uv_mode = (mbmi->txfm_size != TX_4X4 ?
|
|
|
|
uv_intra_mode_8x8 : uv_intra_mode);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-08-10 03:25:29 +02:00
|
|
|
if (!disable_skip && mbmi->ref_frame == INTRA_FRAME)
|
|
|
|
for (i = 0; i < NB_PREDICTION_TYPES; ++i)
|
|
|
|
best_pred_rd[i] = MIN(best_pred_rd[i], this_rd);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (this_rd < best_overall_rd) {
|
|
|
|
best_overall_rd = this_rd;
|
2013-02-12 02:08:52 +01:00
|
|
|
best_filter = tmp_best_filter;
|
2012-12-20 23:56:19 +01:00
|
|
|
best_mode = this_mode;
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
is_best_interintra = (mbmi->second_ref_frame == INTRA_FRAME);
|
|
|
|
#endif
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
// Did this mode help.. i.e. is it the new best mode
|
|
|
|
if (this_rd < best_rd || x->skip) {
|
|
|
|
if (!mode_excluded) {
|
|
|
|
/*
|
|
|
|
if (mbmi->second_ref_frame == INTRA_FRAME) {
|
|
|
|
printf("rd %d best %d bestintra16 %d\n", this_rd, best_rd, best_intra16_rd);
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
// Note index of best mode so far
|
|
|
|
best_mode_index = mode_index;
|
2012-06-26 01:23:58 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
if (this_mode <= B_PRED) {
|
|
|
|
if (mbmi->txfm_size != TX_4X4
|
|
|
|
&& this_mode != B_PRED
|
|
|
|
&& this_mode != I8X8_PRED)
|
|
|
|
mbmi->uv_mode = uv_intra_mode_8x8;
|
|
|
|
else
|
|
|
|
mbmi->uv_mode = uv_intra_mode;
|
|
|
|
/* required for left and above block mv */
|
|
|
|
mbmi->mv[0].as_int = 0;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
other_cost += ref_costs[mbmi->ref_frame];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
/* Calculate the final y RD estimate for this mode */
|
|
|
|
best_yrd = RDCOST(x->rdmult, x->rddiv, (rate2 - rate_uv - other_cost),
|
|
|
|
(distortion2 - distortion_uv));
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
*returnrate = rate2;
|
|
|
|
*returndistortion = distortion2;
|
|
|
|
best_rd = this_rd;
|
|
|
|
vpx_memcpy(&best_mbmode, mbmi, sizeof(MB_MODE_INFO));
|
|
|
|
vpx_memcpy(&best_partition, x->partition_info, sizeof(PARTITION_INFO));
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
if ((this_mode == B_PRED)
|
|
|
|
|| (this_mode == I8X8_PRED)
|
|
|
|
|| (this_mode == SPLITMV))
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
best_bmodes[i] = xd->block[i].bmi;
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-12-20 23:56:19 +01:00
|
|
|
|
|
|
|
// Testing this mode gave rise to an improvement in best error score.
|
|
|
|
// Lower threshold a bit for next time
|
|
|
|
cpi->rd_thresh_mult[mode_index] =
|
|
|
|
(cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
|
|
|
|
cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
|
|
|
|
cpi->rd_threshes[mode_index] =
|
|
|
|
(cpi->rd_baseline_thresh[mode_index] >> 7) *
|
|
|
|
cpi->rd_thresh_mult[mode_index];
|
|
|
|
} else {
|
2012-07-18 22:43:01 +02:00
|
|
|
// If the mode did not help improve the best error case then raise the
|
|
|
|
// threshold for testing that mode next time around.
|
2012-12-20 23:56:19 +01:00
|
|
|
cpi->rd_thresh_mult[mode_index] += 4;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
|
|
|
|
cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
cpi->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7)
|
|
|
|
* cpi->rd_thresh_mult[mode_index];
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
/* keep record of best compound/single-only prediction */
|
|
|
|
if (!disable_skip && mbmi->ref_frame != INTRA_FRAME) {
|
|
|
|
int64_t single_rd, hybrid_rd;
|
|
|
|
int single_rate, hybrid_rate;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
|
|
|
|
single_rate = rate2 - compmode_cost;
|
|
|
|
hybrid_rate = rate2;
|
|
|
|
} else {
|
|
|
|
single_rate = rate2;
|
|
|
|
hybrid_rate = rate2 + compmode_cost;
|
|
|
|
}
|
2011-12-01 01:25:00 +01:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
|
|
|
|
hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
if (mbmi->second_ref_frame <= INTRA_FRAME &&
|
|
|
|
single_rd < best_pred_rd[SINGLE_PREDICTION_ONLY]) {
|
|
|
|
best_pred_rd[SINGLE_PREDICTION_ONLY] = single_rd;
|
|
|
|
} else if (mbmi->second_ref_frame > INTRA_FRAME &&
|
|
|
|
single_rd < best_pred_rd[COMP_PREDICTION_ONLY]) {
|
|
|
|
best_pred_rd[COMP_PREDICTION_ONLY] = single_rd;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-12-20 23:56:19 +01:00
|
|
|
if (hybrid_rd < best_pred_rd[HYBRID_PREDICTION])
|
|
|
|
best_pred_rd[HYBRID_PREDICTION] = hybrid_rd;
|
|
|
|
}
|
2012-10-09 18:18:21 +02:00
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
/* keep record of best txfm size */
|
2013-01-14 20:49:30 +01:00
|
|
|
if (!mode_excluded && this_rd != INT64_MAX) {
|
2012-12-20 23:56:19 +01:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
|
|
|
int64_t adj_rd;
|
|
|
|
if (this_mode != B_PRED) {
|
|
|
|
const int64_t txfm_mode_diff =
|
|
|
|
txfm_cache[i] - txfm_cache[cm->txfm_mode];
|
|
|
|
adj_rd = this_rd + txfm_mode_diff;
|
|
|
|
} else {
|
|
|
|
adj_rd = this_rd;
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
2012-12-20 23:56:19 +01:00
|
|
|
if (adj_rd < best_txfm_rd[i])
|
|
|
|
best_txfm_rd[i] = adj_rd;
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
2011-05-19 21:03:36 +02:00
|
|
|
}
|
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
if (x->skip && !mode_excluded)
|
2012-07-14 00:21:29 +02:00
|
|
|
break;
|
2013-02-12 02:08:52 +01:00
|
|
|
}
|
2012-12-20 23:56:19 +01:00
|
|
|
|
|
|
|
assert((cm->mcomp_filter_type == SWITCHABLE) ||
|
|
|
|
(cm->mcomp_filter_type == best_mbmode.interp_filter) ||
|
|
|
|
(best_mbmode.mode <= B_PRED));
|
2012-04-07 01:38:34 +02:00
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
++cpi->interintra_select_count[is_best_interintra];
|
2012-07-14 00:21:29 +02:00
|
|
|
#endif
|
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
// Accumulate filter usage stats
|
|
|
|
// TODO(agrange): Use RD criteria to select interpolation filter mode.
|
|
|
|
if ((best_mode >= NEARESTMV) && (best_mode <= SPLITMV))
|
|
|
|
++cpi->best_switchable_interp_count[vp9_switchable_interp_map[best_filter]];
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Reduce the activation RD thresholds for the best choice mode
|
2012-07-18 22:43:01 +02:00
|
|
|
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
|
|
|
|
(cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
|
|
|
|
|
2012-07-18 22:43:01 +02:00
|
|
|
cpi->rd_thresh_mult[best_mode_index] =
|
|
|
|
(cpi->rd_thresh_mult[best_mode_index] >=
|
|
|
|
(MIN_THRESHMULT + best_adjustment)) ?
|
|
|
|
cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
|
|
|
|
cpi->rd_threshes[best_mode_index] =
|
|
|
|
(cpi->rd_baseline_thresh[best_mode_index] >> 7) *
|
|
|
|
cpi->rd_thresh_mult[best_mode_index];
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
2013-01-28 16:22:53 +01:00
|
|
|
// This code forces Altref,0,0 and skip for the frame that overlays a
|
2012-07-14 00:21:29 +02:00
|
|
|
// an alrtef unless Altref is filtered. However, this is unsafe if
|
2013-01-28 16:22:53 +01:00
|
|
|
// segment level coding of ref frame is enabled for this
|
2012-07-14 00:21:29 +02:00
|
|
|
// segment.
|
2012-10-30 06:15:27 +01:00
|
|
|
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
|
2012-07-14 00:21:29 +02:00
|
|
|
cpi->is_src_frame_alt_ref &&
|
|
|
|
(cpi->oxcf.arnr_max_frames == 0) &&
|
|
|
|
(best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = ZEROMV;
|
2013-01-04 21:05:40 +01:00
|
|
|
if (cm->txfm_mode <= ALLOW_8X8)
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = cm->txfm_mode;
|
|
|
|
else
|
|
|
|
mbmi->txfm_size = TX_16X16;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->ref_frame = ALTREF_FRAME;
|
2012-08-10 01:07:41 +02:00
|
|
|
mbmi->mv[0].as_int = 0;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->uv_mode = DC_PRED;
|
|
|
|
mbmi->mb_skip_coeff =
|
2012-07-14 00:21:29 +02:00
|
|
|
(cpi->common.mb_no_coeff_skip) ? 1 : 0;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->partitioning = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-10 03:25:29 +02:00
|
|
|
vpx_memset(best_pred_diff, 0, sizeof(best_pred_diff));
|
2012-10-09 18:18:21 +02:00
|
|
|
vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
|
2012-08-10 03:25:29 +02:00
|
|
|
goto end;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// macroblock modes
|
2012-08-10 15:12:43 +02:00
|
|
|
vpx_memcpy(mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
|
2012-07-14 00:21:29 +02:00
|
|
|
if (best_mbmode.mode == B_PRED) {
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode;
|
|
|
|
xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-07 01:21:23 +02:00
|
|
|
if (best_mbmode.mode == I8X8_PRED)
|
2012-07-14 00:21:29 +02:00
|
|
|
set_i8x8_block_modes(x, mode8x8);
|
|
|
|
|
|
|
|
if (best_mbmode.mode == SPLITMV) {
|
|
|
|
for (i = 0; i < 16; i++)
|
2013-02-09 04:46:36 +01:00
|
|
|
xd->mode_info_context->bmi[i].as_mv[0].as_int =
|
|
|
|
best_bmodes[i].as_mv[0].as_int;
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame > 0)
|
2012-07-14 00:21:29 +02:00
|
|
|
for (i = 0; i < 16; i++)
|
2013-02-09 04:46:36 +01:00
|
|
|
xd->mode_info_context->bmi[i].as_mv[1].as_int =
|
|
|
|
best_bmodes[i].as_mv[1].as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
vpx_memcpy(x->partition_info, &best_partition, sizeof(PARTITION_INFO));
|
|
|
|
|
2012-08-10 01:07:41 +02:00
|
|
|
mbmi->mv[0].as_int = x->partition_info->bmi[15].mv.as_int;
|
|
|
|
mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
2012-08-10 03:25:29 +02:00
|
|
|
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
|
2013-01-14 20:49:30 +01:00
|
|
|
if (best_pred_rd[i] == INT64_MAX)
|
2012-08-10 03:25:29 +02:00
|
|
|
best_pred_diff[i] = INT_MIN;
|
|
|
|
else
|
|
|
|
best_pred_diff[i] = best_rd - best_pred_rd[i];
|
|
|
|
}
|
|
|
|
|
2012-10-09 18:18:21 +02:00
|
|
|
if (!x->skip) {
|
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
2013-01-14 20:49:30 +01:00
|
|
|
if (best_txfm_rd[i] == INT64_MAX)
|
2013-02-16 00:55:31 +01:00
|
|
|
best_txfm_diff[i] = 0;
|
2012-10-09 18:18:21 +02:00
|
|
|
else
|
|
|
|
best_txfm_diff[i] = best_rd - best_txfm_rd[i];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
|
|
|
|
}
|
|
|
|
|
2012-08-10 03:25:29 +02:00
|
|
|
end:
|
2013-01-06 03:20:25 +01:00
|
|
|
store_coding_context(x, &x->mb_context[xd->sb_index][xd->mb_index],
|
|
|
|
best_mode_index, &best_partition,
|
|
|
|
&mbmi->ref_mvs[mbmi->ref_frame][0],
|
|
|
|
&mbmi->ref_mvs[mbmi->second_ref_frame < 0 ? 0 :
|
|
|
|
mbmi->second_ref_frame][0],
|
|
|
|
best_pred_diff, best_txfm_diff);
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
void vp9_rd_pick_intra_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
|
|
|
|
int *returnrate,
|
|
|
|
int *returndist) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
2012-08-29 19:43:20 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2013-01-15 03:03:34 +01:00
|
|
|
int rate_y = 0, rate_uv;
|
|
|
|
int rate_y_tokenonly = 0, rate_uv_tokenonly;
|
|
|
|
int dist_y = 0, dist_uv;
|
|
|
|
int y_skip = 0, uv_skip;
|
2013-02-16 00:55:31 +01:00
|
|
|
int64_t txfm_cache[NB_TXFM_MODES], err;
|
|
|
|
int i;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-02-16 00:55:31 +01:00
|
|
|
err = rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly,
|
2013-02-16 01:39:22 +01:00
|
|
|
&dist_y, &y_skip, txfm_cache);
|
2013-01-14 22:56:52 +01:00
|
|
|
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
|
2013-02-16 01:39:22 +01:00
|
|
|
&dist_uv, &uv_skip);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-08-29 19:43:20 +02:00
|
|
|
if (cpi->common.mb_no_coeff_skip && y_skip && uv_skip) {
|
|
|
|
*returnrate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1);
|
2012-08-29 19:43:20 +02:00
|
|
|
*returndist = dist_y + (dist_uv >> 2);
|
2013-02-16 00:55:31 +01:00
|
|
|
memset(x->sb32_context[xd->sb_index].txfm_rd_diff, 0,
|
|
|
|
sizeof(x->sb32_context[xd->sb_index].txfm_rd_diff));
|
2012-08-29 19:43:20 +02:00
|
|
|
} else {
|
|
|
|
*returnrate = rate_y + rate_uv;
|
|
|
|
if (cpi->common.mb_no_coeff_skip)
|
2012-10-31 22:40:53 +01:00
|
|
|
*returnrate += vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
|
2012-08-29 19:43:20 +02:00
|
|
|
*returndist = dist_y + (dist_uv >> 2);
|
2013-02-16 00:55:31 +01:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
|
|
|
x->sb32_context[xd->sb_index].txfm_rd_diff[i] = err - txfm_cache[i];
|
|
|
|
}
|
2012-08-29 19:43:20 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
void vp9_rd_pick_intra_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
|
|
|
|
int *returnrate,
|
|
|
|
int *returndist) {
|
|
|
|
VP9_COMMON *cm = &cpi->common;
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2013-01-15 03:03:34 +01:00
|
|
|
int rate_y = 0, rate_uv;
|
|
|
|
int rate_y_tokenonly = 0, rate_uv_tokenonly;
|
|
|
|
int dist_y = 0, dist_uv;
|
|
|
|
int y_skip = 0, uv_skip;
|
2013-02-16 00:55:31 +01:00
|
|
|
int64_t txfm_cache[NB_TXFM_MODES], err;
|
|
|
|
int i;
|
2013-01-06 03:20:25 +01:00
|
|
|
|
2013-02-16 00:55:31 +01:00
|
|
|
err = rd_pick_intra_sb64y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
|
2013-02-16 01:39:22 +01:00
|
|
|
&dist_y, &y_skip, txfm_cache);
|
2013-01-14 22:56:52 +01:00
|
|
|
rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly,
|
2013-02-16 01:39:22 +01:00
|
|
|
&dist_uv, &uv_skip);
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
if (cpi->common.mb_no_coeff_skip && y_skip && uv_skip) {
|
|
|
|
*returnrate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
|
|
|
|
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1);
|
|
|
|
*returndist = dist_y + (dist_uv >> 2);
|
2013-02-16 00:55:31 +01:00
|
|
|
memset(x->sb64_context.txfm_rd_diff, 0,
|
|
|
|
sizeof(x->sb64_context.txfm_rd_diff));
|
2013-01-06 03:20:25 +01:00
|
|
|
} else {
|
|
|
|
*returnrate = rate_y + rate_uv;
|
|
|
|
if (cm->mb_no_coeff_skip)
|
|
|
|
*returnrate += vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
|
|
|
|
*returndist = dist_y + (dist_uv >> 2);
|
2013-02-16 00:55:31 +01:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
|
|
|
x->sb64_context.txfm_rd_diff[i] = err - txfm_cache[i];
|
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
|
|
|
}
|
2011-06-08 18:05:05 +02:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
void vp9_rd_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
2012-08-20 23:43:34 +02:00
|
|
|
int *returnrate, int *returndist) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
2012-07-14 00:21:29 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2012-08-10 15:12:43 +02:00
|
|
|
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
|
2012-08-02 19:07:33 +02:00
|
|
|
int64_t error4x4, error16x16;
|
2012-08-29 19:43:20 +02:00
|
|
|
int rate4x4, rate16x16 = 0, rateuv, rateuv8x8;
|
2012-11-30 16:29:43 +01:00
|
|
|
int dist4x4 = 0, dist16x16 = 0, distuv = 0, distuv8x8 = 0;
|
2012-08-07 01:21:23 +02:00
|
|
|
int rate;
|
|
|
|
int rate4x4_tokenonly = 0;
|
|
|
|
int rate16x16_tokenonly = 0;
|
2012-08-29 19:43:20 +02:00
|
|
|
int rateuv_tokenonly = 0, rateuv8x8_tokenonly = 0;
|
2012-08-02 19:07:33 +02:00
|
|
|
int64_t error8x8;
|
|
|
|
int rate8x8_tokenonly=0;
|
2012-08-07 01:21:23 +02:00
|
|
|
int rate8x8, dist8x8;
|
|
|
|
int mode16x16;
|
2013-01-14 23:37:53 +01:00
|
|
|
int mode8x8[4];
|
2012-08-20 23:43:34 +02:00
|
|
|
int dist;
|
2012-12-23 16:20:10 +01:00
|
|
|
int modeuv, uv_intra_skippable, uv_intra_skippable_8x8;
|
2012-11-30 16:29:43 +01:00
|
|
|
int y_intra16x16_skippable = 0;
|
2012-10-09 18:18:21 +02:00
|
|
|
int64_t txfm_cache[NB_TXFM_MODES];
|
|
|
|
TX_SIZE txfm_size_16x16;
|
|
|
|
int i;
|
2012-06-25 21:26:09 +02:00
|
|
|
|
2012-08-10 19:00:18 +02:00
|
|
|
mbmi->ref_frame = INTRA_FRAME;
|
2012-08-29 19:43:20 +02:00
|
|
|
rd_pick_intra_mbuv_mode(cpi, x, &rateuv, &rateuv_tokenonly, &distuv,
|
|
|
|
&uv_intra_skippable);
|
|
|
|
modeuv = mbmi->uv_mode;
|
2012-10-09 18:18:21 +02:00
|
|
|
if (cpi->common.txfm_mode != ONLY_4X4) {
|
2012-08-29 19:43:20 +02:00
|
|
|
rd_pick_intra_mbuv_mode_8x8(cpi, x, &rateuv8x8, &rateuv8x8_tokenonly,
|
|
|
|
&distuv8x8, &uv_intra_skippable_8x8);
|
|
|
|
} else {
|
|
|
|
uv_intra_skippable_8x8 = uv_intra_skippable;
|
|
|
|
rateuv8x8 = rateuv;
|
|
|
|
distuv8x8 = distuv;
|
|
|
|
rateuv8x8_tokenonly = rateuv_tokenonly;
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-10 19:00:18 +02:00
|
|
|
// current macroblock under rate-distortion optimization test loop
|
2012-08-07 01:21:23 +02:00
|
|
|
error16x16 = rd_pick_intra16x16mby_mode(cpi, x, &rate16x16,
|
2012-08-29 19:43:20 +02:00
|
|
|
&rate16x16_tokenonly, &dist16x16,
|
2012-10-09 18:18:21 +02:00
|
|
|
&y_intra16x16_skippable, txfm_cache);
|
2012-08-10 15:12:43 +02:00
|
|
|
mode16x16 = mbmi->mode;
|
2012-10-09 18:18:21 +02:00
|
|
|
txfm_size_16x16 = mbmi->txfm_size;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-10-11 02:18:22 +02:00
|
|
|
// FIXME(rbultje) support transform-size selection
|
|
|
|
mbmi->txfm_size = (cm->txfm_mode == ONLY_4X4) ? TX_4X4 : TX_8X8;
|
2012-08-07 01:21:23 +02:00
|
|
|
error8x8 = rd_pick_intra8x8mby_modes(cpi, x, &rate8x8, &rate8x8_tokenonly,
|
|
|
|
&dist8x8, error16x16);
|
2013-01-14 23:37:53 +01:00
|
|
|
mode8x8[0]= xd->mode_info_context->bmi[0].as_mode.first;
|
|
|
|
mode8x8[1]= xd->mode_info_context->bmi[2].as_mode.first;
|
|
|
|
mode8x8[2]= xd->mode_info_context->bmi[8].as_mode.first;
|
|
|
|
mode8x8[3]= xd->mode_info_context->bmi[10].as_mode.first;
|
Improved coding using 8x8 transform
In summary, this commit encompasses a series of changes in attempt to
improve the 8x8 transform based coding to help overall compression
quality, please refer to the detailed commit history below for what
are the rationale underly the series of changes:
a. A frame level flag to indicate if 8x8 transform is used at all.
b. 8x8 transform is not used for key frames and small image size.
c. On inter coded frame, macroblocks using modes B_PRED, SPLIT_MV
and I8X8_PRED are forced to using 4x4 transform based coding, the
rest uses 8x8 transform based coding.
d. Encoder and decoder has the same assumption on the relationship
between prediction modes and transform size, therefore no signaling
is encoded in bitstream.
e. Mode decision process now calculate the rate and distortion scores
using their respective transforms.
Overall test results:
1. HD set
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120206.html
(avg psnr: 3.09% glb psnr: 3.22%, ssim: 3.90%)
2. Cif set:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120206.html
(avg psnr: -0.03%, glb psnr: -0.02%, ssim: -0.04%)
It should be noted here, as 8x8 transform coding itself is disabled
for cif size clips, the 0.03% loss is purely from the 1 bit/frame
flag overhead on if 8x8 transform is used or not for the frame.
---patch history for future reference---
Patch 1:
this commit tries to select transform size based on macroblock
prediction mode. If the size of a prediction mode is 16x16, then
the macroblock is forced to use 8x8 transform. If the prediction
mode is B_PRED, SPLITMV or I8X8_PRED, then the macroblock is forced
to use 4x4 transform. Tests on the following HD clips showed mixed
results: (all hd clips only used first 100 frames in the test)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_log.html
while the results are mixed and overall negative, it is interesting to
see 8x8 helped a few of the clips.
Patch 2:
this patch tries to hard-wire selection of transform size based on
prediction modes without using segmentation to signal the transform size.
encoder and decoder both takes the same assumption that all macroblocks
use 8x8 transform except when prediciton mode is B_PRED, I8X8_PRED or
SPLITMV. Test results are as follows:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cifmodebase8x8_0125.html
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdmodebased8x8_0125log.html
Interestingly, by removing the overhead or coding the segmentation, the
results on this limited HD set have turn positive on average.
Patch 3:
this patch disabled the usage of 8x8 transform on key frames, and kept the
logic from patch 2 for inter frames only. test results on HD set turned
decidedly positive with 8x8 transform enabled on inter frame with 16x16
prediction modes: (avg psnr: .81% glb psnr: .82 ssim: .55%)
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hdintermode8x8_0125.html
results on cif set still negative overall
Patch 4:
continued from last patch, but now in mode decision process, the rate and
distortion estimates are computed based on 8x8 transform results for MBs
with modes associated with 8x8 transform. This patch also fixed a problem
related to segment based eob coding when 8x8 transform is used. The patch
significantly improved the results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/hd8x8RDintermode.html
(avg psnr: 2.70% glb psnr: 2.76% ssim: 3.34%)
results on cif also improved, though they are still negative compared to
baseline that uses 4x4 transform only:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif8x8RDintermode.html
(avg psnr: -.78% glb psnr: -.86% ssim: -.19%)
Patch 5:
This patch does 3 things:
a. a bunch of decoder bug fixes, encodings and decodings were verified
to have matched recon buffer on a number of encodes on cif size mobile and
hd version of _pedestrian.
b. the patch further improved the rate distortion calculation of MBS that
use 8x8 transform. This provided some further gain on compression.
c. the patch also got the experimental work SEG_LVL_EOB to work with 8x8
transformed macroblock, test results indicates it improves the cif set
but hurt the HD set slightly.
Tests results on HD clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/HD_t8x8_20120201.html
(avg psnr: 3.19% glb psnr: 3.30% ssim: 3.93%)
Test results on cif clips:
http://www.corp.google.com/~yaowu/no_crawl/t8x8/cif_t8x8_20120201.html
(avg psnr: -.47% glb psnr: -.51% ssim: +.28%)
Patch 6:
Added a frame level flag to indicate if 8x8 transform is allowed at all.
temporarily the decision is based on frame size, can be optimized later
one. This get the cif results to basically unchanged, with one bit per
frame overhead on both cif and hd clips.
Patch 8:
Rebase and Merge to head by PGW.
Fixed some suspect 4s that look like hey should be 64s in regard
to segmented EOB. Perhaps #defines would be bette.
Bulit and tested without T8x8 enabled and produces unchanged
output.
Patch 9:
Corrected misalligned code/decode of "txfm_mode" bit.
Limited testing for correct encode and decode with
T8x8 configured on derf clips.
Change-Id: I156e1405d25f81579d579dff8ab9af53944ec49c
2012-02-10 01:12:23 +01:00
|
|
|
|
2013-02-15 23:45:20 +01:00
|
|
|
mbmi->txfm_size = TX_4X4;
|
2012-08-07 01:21:23 +02:00
|
|
|
error4x4 = rd_pick_intra4x4mby_modes(cpi, x,
|
|
|
|
&rate4x4, &rate4x4_tokenonly,
|
2013-01-25 01:28:53 +01:00
|
|
|
&dist4x4, error16x16);
|
2011-06-08 18:05:05 +02:00
|
|
|
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->mb_skip_coeff = 0;
|
2012-08-29 19:43:20 +02:00
|
|
|
if (cpi->common.mb_no_coeff_skip &&
|
|
|
|
y_intra16x16_skippable && uv_intra_skippable_8x8) {
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->mb_skip_coeff = 1;
|
2012-10-16 21:41:56 +02:00
|
|
|
mbmi->mode = mode16x16;
|
2012-08-29 19:43:20 +02:00
|
|
|
mbmi->uv_mode = modeuv;
|
|
|
|
rate = rateuv8x8 + rate16x16 - rateuv8x8_tokenonly - rate16x16_tokenonly +
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 1);
|
2012-08-29 19:43:20 +02:00
|
|
|
dist = dist16x16 + (distuv8x8 >> 2);
|
2013-01-14 22:56:52 +01:00
|
|
|
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = txfm_size_16x16;
|
2013-01-06 03:20:25 +01:00
|
|
|
memset(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff, 0,
|
|
|
|
sizeof(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff));
|
2012-08-29 19:43:20 +02:00
|
|
|
} else if (error8x8 > error16x16) {
|
2012-07-14 00:21:29 +02:00
|
|
|
if (error4x4 < error16x16) {
|
2013-01-14 23:37:53 +01:00
|
|
|
rate = rateuv + rate4x4;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = B_PRED;
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = TX_4X4;
|
2012-08-29 19:43:20 +02:00
|
|
|
dist = dist4x4 + (distuv >> 2);
|
2013-01-06 03:20:25 +01:00
|
|
|
memset(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff, 0,
|
|
|
|
sizeof(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff));
|
2012-07-14 00:21:29 +02:00
|
|
|
} else {
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = txfm_size_16x16;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = mode16x16;
|
2012-08-29 19:43:20 +02:00
|
|
|
rate = rate16x16 + rateuv8x8;
|
|
|
|
dist = dist16x16 + (distuv8x8 >> 2);
|
2012-10-09 18:18:21 +02:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
2013-01-06 03:20:25 +01:00
|
|
|
x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff[i] =
|
|
|
|
error16x16 - txfm_cache[i];
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
2011-08-05 01:30:27 +02:00
|
|
|
}
|
2012-08-29 19:43:20 +02:00
|
|
|
if (cpi->common.mb_no_coeff_skip)
|
2012-10-31 22:40:53 +01:00
|
|
|
rate += vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
|
2012-07-14 00:21:29 +02:00
|
|
|
} else {
|
|
|
|
if (error4x4 < error8x8) {
|
2013-01-14 23:37:53 +01:00
|
|
|
rate = rateuv + rate4x4;
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = B_PRED;
|
2012-10-09 18:18:21 +02:00
|
|
|
mbmi->txfm_size = TX_4X4;
|
2012-08-29 19:43:20 +02:00
|
|
|
dist = dist4x4 + (distuv >> 2);
|
2013-01-06 03:20:25 +01:00
|
|
|
memset(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff, 0,
|
|
|
|
sizeof(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff));
|
2012-07-14 00:21:29 +02:00
|
|
|
} else {
|
2012-10-11 02:18:22 +02:00
|
|
|
// FIXME(rbultje) support transform-size selection
|
2012-08-10 15:12:43 +02:00
|
|
|
mbmi->mode = I8X8_PRED;
|
2012-10-11 02:18:22 +02:00
|
|
|
mbmi->txfm_size = (cm->txfm_mode == ONLY_4X4) ? TX_4X4 : TX_8X8;
|
2012-07-14 00:21:29 +02:00
|
|
|
set_i8x8_block_modes(x, mode8x8);
|
2012-08-29 19:43:20 +02:00
|
|
|
rate = rate8x8 + rateuv;
|
|
|
|
dist = dist8x8 + (distuv >> 2);
|
2013-01-06 03:20:25 +01:00
|
|
|
memset(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff, 0,
|
|
|
|
sizeof(x->mb_context[xd->sb_index][xd->mb_index].txfm_rd_diff));
|
2011-08-05 01:30:27 +02:00
|
|
|
}
|
2012-08-29 19:43:20 +02:00
|
|
|
if (cpi->common.mb_no_coeff_skip)
|
2012-10-31 22:40:53 +01:00
|
|
|
rate += vp9_cost_bit(vp9_get_pred_prob(cm, xd, PRED_MBSKIP), 0);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-08-29 19:43:20 +02:00
|
|
|
*returnrate = rate;
|
|
|
|
*returndist = dist;
|
2012-04-07 01:38:34 +02:00
|
|
|
}
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
2013-02-07 19:09:05 +01:00
|
|
|
int mb_row, int mb_col,
|
2013-01-06 03:20:25 +01:00
|
|
|
int *returnrate,
|
|
|
|
int *returndistortion,
|
|
|
|
int block_size) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
2012-08-20 23:43:34 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2012-10-30 01:58:18 +01:00
|
|
|
MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
|
2012-08-20 23:43:34 +02:00
|
|
|
MB_PREDICTION_MODE this_mode;
|
2012-12-20 23:56:19 +01:00
|
|
|
MB_PREDICTION_MODE best_mode = DC_PRED;
|
2012-08-20 23:43:34 +02:00
|
|
|
MV_REFERENCE_FRAME ref_frame;
|
|
|
|
unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
|
2012-11-08 20:03:00 +01:00
|
|
|
int comp_pred, i;
|
2012-10-30 01:58:18 +01:00
|
|
|
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
|
2012-08-20 23:43:34 +02:00
|
|
|
int frame_mdcounts[4][4];
|
2013-02-07 19:09:05 +01:00
|
|
|
YV12_BUFFER_CONFIG yv12_mb[4];
|
2012-10-31 22:40:53 +01:00
|
|
|
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
|
|
|
|
VP9_ALT_FLAG };
|
2013-01-15 22:49:44 +01:00
|
|
|
int idx_list[4] = {0,
|
|
|
|
cpi->common.active_ref_idx[cpi->lst_fb_idx],
|
|
|
|
cpi->common.active_ref_idx[cpi->gld_fb_idx],
|
|
|
|
cpi->common.active_ref_idx[cpi->alt_fb_idx]};
|
2012-08-20 23:43:34 +02:00
|
|
|
int mdcounts[4];
|
2012-10-30 01:58:18 +01:00
|
|
|
int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
|
2012-08-20 23:43:34 +02:00
|
|
|
int saddone = 0;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_rd = INT64_MAX;
|
2012-11-08 20:03:00 +01:00
|
|
|
int64_t best_txfm_rd[NB_TXFM_MODES];
|
|
|
|
int64_t best_txfm_diff[NB_TXFM_MODES];
|
|
|
|
int64_t best_pred_diff[NB_PREDICTION_TYPES];
|
|
|
|
int64_t best_pred_rd[NB_PREDICTION_TYPES];
|
2012-08-20 23:43:34 +02:00
|
|
|
MB_MODE_INFO best_mbmode;
|
2012-11-30 16:29:43 +01:00
|
|
|
int mode_index, best_mode_index = 0;
|
2012-08-20 23:43:34 +02:00
|
|
|
unsigned int ref_costs[MAX_REF_FRAMES];
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
int is_best_interintra = 0;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_intra16_rd = INT64_MAX;
|
2012-11-07 15:50:25 +01:00
|
|
|
int best_intra16_mode = DC_PRED, best_intra16_uv_mode = DC_PRED;
|
|
|
|
#endif
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t best_overall_rd = INT64_MAX;
|
2012-12-20 23:56:19 +01:00
|
|
|
INTERPOLATIONFILTERTYPE best_filter = SWITCHABLE;
|
2013-02-12 02:08:52 +01:00
|
|
|
INTERPOLATIONFILTERTYPE tmp_best_filter = SWITCHABLE;
|
2012-11-30 16:29:43 +01:00
|
|
|
int rate_uv_4x4 = 0, rate_uv_8x8 = 0, rate_uv_tokenonly_4x4 = 0,
|
|
|
|
rate_uv_tokenonly_8x8 = 0;
|
|
|
|
int dist_uv_4x4 = 0, dist_uv_8x8 = 0, uv_skip_4x4 = 0, uv_skip_8x8 = 0;
|
|
|
|
MB_PREDICTION_MODE mode_uv_4x4 = NEARESTMV, mode_uv_8x8 = NEARESTMV;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
int rate_uv_16x16 = 0, rate_uv_tokenonly_16x16 = 0;
|
|
|
|
int dist_uv_16x16 = 0, uv_skip_16x16 = 0;
|
2013-01-08 19:29:22 +01:00
|
|
|
MB_PREDICTION_MODE mode_uv_16x16 = NEARESTMV;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
xd->mode_info_context->mbmi.segment_id = segment_id;
|
2012-10-31 22:40:53 +01:00
|
|
|
estimate_ref_frame_costs(cpi, segment_id, ref_costs);
|
2012-10-30 01:58:18 +01:00
|
|
|
vpx_memset(&best_mbmode, 0, sizeof(best_mbmode));
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
for (i = 0; i < NB_PREDICTION_TYPES; ++i)
|
2013-01-14 20:49:30 +01:00
|
|
|
best_pred_rd[i] = INT64_MAX;
|
2012-11-08 20:03:00 +01:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++)
|
2013-01-14 20:49:30 +01:00
|
|
|
best_txfm_rd[i] = INT64_MAX;
|
2012-11-08 20:03:00 +01:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
|
|
|
|
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
|
2013-01-06 03:20:25 +01:00
|
|
|
setup_buffer_inter(cpi, x, idx_list[ref_frame], ref_frame, block_size,
|
2013-02-07 19:09:05 +01:00
|
|
|
mb_row, mb_col, frame_mv[NEARESTMV],
|
2012-12-05 17:23:38 +01:00
|
|
|
frame_mv[NEARMV], frame_mdcounts,
|
2013-02-07 19:09:05 +01:00
|
|
|
yv12_mb);
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2012-10-30 01:58:18 +01:00
|
|
|
frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
|
|
|
|
frame_mv[ZEROMV][ref_frame].as_int = 0;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
if (block_size == BLOCK_64X64) {
|
|
|
|
mbmi->mode = DC_PRED;
|
|
|
|
if (cm->txfm_mode == ONLY_4X4 || cm->txfm_mode == TX_MODE_SELECT) {
|
|
|
|
mbmi->txfm_size = TX_4X4;
|
|
|
|
rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv_4x4, &rate_uv_tokenonly_4x4,
|
|
|
|
&dist_uv_4x4, &uv_skip_4x4);
|
|
|
|
mode_uv_4x4 = mbmi->uv_mode;
|
|
|
|
}
|
|
|
|
if (cm->txfm_mode != ONLY_4X4) {
|
|
|
|
mbmi->txfm_size = TX_8X8;
|
|
|
|
rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv_8x8, &rate_uv_tokenonly_8x8,
|
|
|
|
&dist_uv_8x8, &uv_skip_8x8);
|
|
|
|
mode_uv_8x8 = mbmi->uv_mode;
|
|
|
|
}
|
|
|
|
if (cm->txfm_mode >= ALLOW_32X32) {
|
|
|
|
mbmi->txfm_size = TX_32X32;
|
|
|
|
rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv_16x16,
|
|
|
|
&rate_uv_tokenonly_16x16,
|
|
|
|
&dist_uv_16x16, &uv_skip_16x16);
|
|
|
|
mode_uv_16x16 = mbmi->uv_mode;
|
|
|
|
}
|
2013-01-10 02:21:28 +01:00
|
|
|
} else {
|
2013-01-06 03:20:25 +01:00
|
|
|
assert(block_size == BLOCK_32X32);
|
|
|
|
mbmi->mode = DC_PRED;
|
|
|
|
if (cm->txfm_mode == ONLY_4X4 || cm->txfm_mode == TX_MODE_SELECT) {
|
|
|
|
mbmi->txfm_size = TX_4X4;
|
|
|
|
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_4x4, &rate_uv_tokenonly_4x4,
|
|
|
|
&dist_uv_4x4, &uv_skip_4x4);
|
|
|
|
mode_uv_4x4 = mbmi->uv_mode;
|
|
|
|
}
|
|
|
|
if (cm->txfm_mode != ONLY_4X4) {
|
|
|
|
mbmi->txfm_size = TX_8X8;
|
|
|
|
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_8x8, &rate_uv_tokenonly_8x8,
|
|
|
|
&dist_uv_8x8, &uv_skip_8x8);
|
|
|
|
mode_uv_8x8 = mbmi->uv_mode;
|
|
|
|
}
|
|
|
|
if (cm->txfm_mode >= ALLOW_32X32) {
|
|
|
|
mbmi->txfm_size = TX_32X32;
|
|
|
|
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_16x16, &rate_uv_tokenonly_16x16,
|
|
|
|
&dist_uv_16x16, &uv_skip_16x16);
|
|
|
|
mode_uv_16x16 = mbmi->uv_mode;
|
|
|
|
}
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
}
|
2012-11-16 00:50:07 +01:00
|
|
|
|
2013-02-12 02:08:52 +01:00
|
|
|
for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) {
|
2012-11-16 00:50:07 +01:00
|
|
|
int mode_excluded = 0;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t this_rd = INT64_MAX;
|
2012-08-20 23:43:34 +02:00
|
|
|
int disable_skip = 0;
|
|
|
|
int other_cost = 0;
|
|
|
|
int compmode_cost = 0;
|
2012-10-30 01:58:18 +01:00
|
|
|
int rate2 = 0, rate_y = 0, rate_uv = 0;
|
|
|
|
int distortion2 = 0, distortion_y = 0, distortion_uv = 0;
|
|
|
|
int skippable;
|
|
|
|
int64_t txfm_cache[NB_TXFM_MODES];
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
int compmode_interintra_cost = 0;
|
|
|
|
#endif
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
// Test best rd so far against threshold for trying this mode.
|
2012-11-24 17:19:04 +01:00
|
|
|
if (best_rd <= cpi->rd_threshes[mode_index] ||
|
|
|
|
cpi->rd_threshes[mode_index] == INT_MAX) {
|
2012-08-20 23:43:34 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-01-30 06:42:46 +01:00
|
|
|
x->skip = 0;
|
2012-10-31 01:12:12 +01:00
|
|
|
this_mode = vp9_mode_order[mode_index].mode;
|
|
|
|
ref_frame = vp9_mode_order[mode_index].ref_frame;
|
2012-11-16 00:14:38 +01:00
|
|
|
if (!(ref_frame == INTRA_FRAME ||
|
|
|
|
(cpi->ref_frame_flags & flag_list[ref_frame]))) {
|
|
|
|
continue;
|
|
|
|
}
|
2012-10-30 01:58:18 +01:00
|
|
|
mbmi->ref_frame = ref_frame;
|
2012-11-30 20:46:20 +01:00
|
|
|
mbmi->second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
|
|
|
|
comp_pred = mbmi->second_ref_frame > INTRA_FRAME;
|
2012-10-30 01:58:18 +01:00
|
|
|
mbmi->mode = this_mode;
|
|
|
|
mbmi->uv_mode = DC_PRED;
|
2012-11-30 20:46:20 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
mbmi->interintra_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
|
|
|
|
mbmi->interintra_uv_mode = (MB_PREDICTION_MODE)(DC_PRED - 1);
|
2012-08-20 23:43:34 +02:00
|
|
|
#endif
|
2012-12-20 23:56:19 +01:00
|
|
|
// Evaluate all sub-pel filters irrespective of whether we can use
|
|
|
|
// them for this frame.
|
2013-02-12 02:08:52 +01:00
|
|
|
mbmi->interp_filter = cm->mcomp_filter_type;
|
|
|
|
vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-11-30 20:46:20 +01:00
|
|
|
// if (!(cpi->ref_frame_flags & flag_list[ref_frame]))
|
|
|
|
// continue;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-11-30 20:46:20 +01:00
|
|
|
if (this_mode == I8X8_PRED || this_mode == B_PRED || this_mode == SPLITMV)
|
2012-08-20 23:43:34 +02:00
|
|
|
continue;
|
2012-11-30 20:46:20 +01:00
|
|
|
// if (vp9_mode_order[mode_index].second_ref_frame == INTRA_FRAME)
|
|
|
|
// continue;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
if (comp_pred) {
|
|
|
|
int second_ref;
|
|
|
|
|
|
|
|
if (ref_frame == ALTREF_FRAME) {
|
|
|
|
second_ref = LAST_FRAME;
|
|
|
|
} else {
|
|
|
|
second_ref = ref_frame + 1;
|
|
|
|
}
|
|
|
|
if (!(cpi->ref_frame_flags & flag_list[second_ref]))
|
|
|
|
continue;
|
2012-10-30 01:58:18 +01:00
|
|
|
mbmi->second_ref_frame = second_ref;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-02-07 19:09:05 +01:00
|
|
|
xd->second_pre = yv12_mb[second_ref];
|
2012-12-20 23:56:19 +01:00
|
|
|
mode_excluded =
|
|
|
|
mode_excluded ?
|
|
|
|
mode_excluded : cm->comp_pred_mode == SINGLE_PREDICTION_ONLY;
|
2012-08-20 23:43:34 +02:00
|
|
|
} else {
|
2012-11-30 20:46:20 +01:00
|
|
|
// mbmi->second_ref_frame = vp9_mode_order[mode_index].second_ref_frame;
|
|
|
|
if (ref_frame != INTRA_FRAME) {
|
|
|
|
if (mbmi->second_ref_frame != INTRA_FRAME)
|
2012-12-20 23:56:19 +01:00
|
|
|
mode_excluded =
|
|
|
|
mode_excluded ?
|
|
|
|
mode_excluded : cm->comp_pred_mode == COMP_PREDICTION_ONLY;
|
2012-11-30 20:46:20 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
else
|
2012-12-20 23:56:19 +01:00
|
|
|
mode_excluded = mode_excluded ? mode_excluded : !cm->use_interintra;
|
2012-11-30 20:46:20 +01:00
|
|
|
#endif
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
2013-02-07 19:09:05 +01:00
|
|
|
xd->pre = yv12_mb[ref_frame];
|
2012-08-20 23:43:34 +02:00
|
|
|
vpx_memcpy(mdcounts, frame_mdcounts[ref_frame], sizeof(mdcounts));
|
|
|
|
|
|
|
|
// If the segment reference frame feature is enabled....
|
|
|
|
// then do nothing if the current ref frame is not allowed..
|
2012-10-30 06:15:27 +01:00
|
|
|
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
|
|
|
|
!vp9_check_segref(xd, segment_id, ref_frame)) {
|
2012-08-20 23:43:34 +02:00
|
|
|
continue;
|
2013-01-28 16:22:53 +01:00
|
|
|
// If the segment skip feature is enabled....
|
2012-08-20 23:43:34 +02:00
|
|
|
// then do nothing if the current mode is not allowed..
|
2013-01-28 16:22:53 +01:00
|
|
|
} else if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) &&
|
|
|
|
(this_mode != ZEROMV)) {
|
2012-08-20 23:43:34 +02:00
|
|
|
continue;
|
2013-01-28 16:22:53 +01:00
|
|
|
// Disable this drop out case if the ref frame
|
2012-08-20 23:43:34 +02:00
|
|
|
// segment level feature is enabled for this segment. This is to
|
|
|
|
// prevent the possibility that we end up unable to pick any mode.
|
2013-01-28 16:22:53 +01:00
|
|
|
} else if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME)) {
|
2012-08-20 23:43:34 +02:00
|
|
|
// Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
|
|
|
|
// unless ARNR filtering is enabled in which case we want
|
|
|
|
// an unfiltered alternative
|
|
|
|
if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
|
|
|
|
if (this_mode != ZEROMV || ref_frame != ALTREF_FRAME) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-16 00:50:07 +01:00
|
|
|
if (ref_frame == INTRA_FRAME) {
|
2013-01-06 03:20:25 +01:00
|
|
|
if (block_size == BLOCK_64X64) {
|
|
|
|
vp9_build_intra_predictors_sb64y_s(xd);
|
|
|
|
super_block_64_yrd(cpi, x, &rate_y, &distortion_y,
|
|
|
|
&skippable, txfm_cache);
|
2013-01-10 02:21:28 +01:00
|
|
|
} else {
|
2013-01-06 03:20:25 +01:00
|
|
|
assert(block_size == BLOCK_32X32);
|
|
|
|
vp9_build_intra_predictors_sby_s(xd);
|
|
|
|
super_block_yrd(cpi, x, &rate_y, &distortion_y,
|
|
|
|
&skippable, txfm_cache);
|
|
|
|
}
|
2012-11-16 00:50:07 +01:00
|
|
|
if (mbmi->txfm_size == TX_4X4) {
|
|
|
|
rate_uv = rate_uv_4x4;
|
|
|
|
distortion_uv = dist_uv_4x4;
|
|
|
|
skippable = skippable && uv_skip_4x4;
|
|
|
|
mbmi->uv_mode = mode_uv_4x4;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
} else if (mbmi->txfm_size == TX_32X32) {
|
|
|
|
rate_uv = rate_uv_16x16;
|
|
|
|
distortion_uv = dist_uv_16x16;
|
|
|
|
skippable = skippable && uv_skip_16x16;
|
|
|
|
mbmi->uv_mode = mode_uv_16x16;
|
2012-11-16 00:50:07 +01:00
|
|
|
} else {
|
|
|
|
rate_uv = rate_uv_8x8;
|
|
|
|
distortion_uv = dist_uv_8x8;
|
|
|
|
skippable = skippable && uv_skip_8x8;
|
|
|
|
mbmi->uv_mode = mode_uv_8x8;
|
|
|
|
}
|
|
|
|
|
|
|
|
rate2 = rate_y + x->mbmode_cost[cm->frame_type][mbmi->mode] + rate_uv;
|
|
|
|
distortion2 = distortion_y + distortion_uv;
|
|
|
|
} else {
|
2012-11-30 20:46:20 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (mbmi->second_ref_frame == INTRA_FRAME) {
|
|
|
|
if (best_intra16_mode == DC_PRED - 1) continue;
|
|
|
|
mbmi->interintra_mode = best_intra16_mode;
|
|
|
|
#if SEPARATE_INTERINTRA_UV
|
|
|
|
mbmi->interintra_uv_mode = best_intra16_uv_mode;
|
|
|
|
#else
|
|
|
|
mbmi->interintra_uv_mode = best_intra16_mode;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
2013-01-06 03:20:25 +01:00
|
|
|
this_rd = handle_inter_mode(cpi, x, block_size,
|
2012-11-16 00:50:07 +01:00
|
|
|
&saddone, near_sadidx, mdcounts, txfm_cache,
|
|
|
|
&rate2, &distortion2, &skippable,
|
|
|
|
&compmode_cost,
|
2012-11-07 15:50:25 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
2012-11-16 00:50:07 +01:00
|
|
|
&compmode_interintra_cost,
|
2012-11-07 15:50:25 +01:00
|
|
|
#endif
|
2012-11-16 00:50:07 +01:00
|
|
|
&rate_y, &distortion_y,
|
|
|
|
&rate_uv, &distortion_uv,
|
2013-02-07 19:09:05 +01:00
|
|
|
&mode_excluded, &disable_skip,
|
2013-02-12 02:08:52 +01:00
|
|
|
mode_index, &tmp_best_filter, frame_mv);
|
2013-01-14 20:49:30 +01:00
|
|
|
if (this_rd == INT64_MAX)
|
2012-11-16 00:50:07 +01:00
|
|
|
continue;
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-11-30 20:46:20 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if (cpi->common.use_interintra) {
|
|
|
|
rate2 += compmode_interintra_cost;
|
|
|
|
}
|
|
|
|
#endif
|
2012-08-20 23:43:34 +02:00
|
|
|
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
|
|
|
|
rate2 += compmode_cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Estimate the reference frame signaling cost and add it
|
|
|
|
// to the rolling cost variable.
|
|
|
|
rate2 += ref_costs[xd->mode_info_context->mbmi.ref_frame];
|
|
|
|
|
|
|
|
if (!disable_skip) {
|
|
|
|
// Test for the condition where skip block will be activated
|
|
|
|
// because there are no non zero coefficients and make any
|
|
|
|
// necessary adjustment for rate. Ignore if skip is coded at
|
|
|
|
// segment level as the cost wont have been added in.
|
|
|
|
if (cpi->common.mb_no_coeff_skip) {
|
|
|
|
int mb_skip_allowed;
|
|
|
|
|
2013-01-28 16:22:53 +01:00
|
|
|
// Is Mb level skip allowed (i.e. not coded at segment level).
|
|
|
|
mb_skip_allowed = !vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-10-30 01:58:18 +01:00
|
|
|
if (skippable) {
|
2012-08-20 23:43:34 +02:00
|
|
|
// Back out the coefficient coding costs
|
|
|
|
rate2 -= (rate_y + rate_uv);
|
|
|
|
// for best_yrd calculation
|
|
|
|
rate_uv = 0;
|
|
|
|
|
|
|
|
if (mb_skip_allowed) {
|
|
|
|
int prob_skip_cost;
|
|
|
|
|
|
|
|
// Cost the skip mb case
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob skip_prob =
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_get_pred_prob(cm, xd, PRED_MBSKIP);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
if (skip_prob) {
|
2012-10-31 22:40:53 +01:00
|
|
|
prob_skip_cost = vp9_cost_bit(skip_prob, 1);
|
2012-08-20 23:43:34 +02:00
|
|
|
rate2 += prob_skip_cost;
|
|
|
|
other_cost += prob_skip_cost;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Add in the cost of the no skip flag.
|
|
|
|
else if (mb_skip_allowed) {
|
2012-10-31 22:40:53 +01:00
|
|
|
int prob_skip_cost = vp9_cost_bit(vp9_get_pred_prob(cm, xd,
|
2012-08-20 23:43:34 +02:00
|
|
|
PRED_MBSKIP), 0);
|
|
|
|
rate2 += prob_skip_cost;
|
|
|
|
other_cost += prob_skip_cost;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate the final RD estimate for this mode.
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
// Keep record of best intra distortion
|
|
|
|
if ((xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) &&
|
|
|
|
(this_rd < best_intra_rd)) {
|
|
|
|
best_intra_rd = this_rd;
|
|
|
|
*returnintra = distortion2;
|
|
|
|
}
|
|
|
|
#endif
|
2012-11-30 20:46:20 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
if ((mbmi->ref_frame == INTRA_FRAME) &&
|
|
|
|
(this_mode <= TM_PRED) &&
|
|
|
|
(this_rd < best_intra16_rd)) {
|
|
|
|
best_intra16_rd = this_rd;
|
|
|
|
best_intra16_mode = this_mode;
|
|
|
|
best_intra16_uv_mode = (mbmi->txfm_size != TX_4X4 ?
|
|
|
|
mode_uv_8x8 : mode_uv_4x4);
|
|
|
|
}
|
|
|
|
#endif
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
if (!disable_skip && mbmi->ref_frame == INTRA_FRAME)
|
|
|
|
for (i = 0; i < NB_PREDICTION_TYPES; ++i)
|
|
|
|
best_pred_rd[i] = MIN(best_pred_rd[i], this_rd);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-11-30 20:46:20 +01:00
|
|
|
if (this_rd < best_overall_rd) {
|
|
|
|
best_overall_rd = this_rd;
|
2013-02-12 02:08:52 +01:00
|
|
|
best_filter = tmp_best_filter;
|
2012-12-20 23:56:19 +01:00
|
|
|
best_mode = this_mode;
|
2012-11-30 20:46:20 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
is_best_interintra = (mbmi->second_ref_frame == INTRA_FRAME);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
// Did this mode help.. i.e. is it the new best mode
|
|
|
|
if (this_rd < best_rd || x->skip) {
|
|
|
|
if (!mode_excluded) {
|
|
|
|
// Note index of best mode so far
|
|
|
|
best_mode_index = mode_index;
|
|
|
|
|
|
|
|
if (this_mode <= B_PRED) {
|
|
|
|
/* required for left and above block mv */
|
2012-11-16 00:50:07 +01:00
|
|
|
mbmi->mv[0].as_int = 0;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
other_cost += ref_costs[xd->mode_info_context->mbmi.ref_frame];
|
|
|
|
*returnrate = rate2;
|
|
|
|
*returndistortion = distortion2;
|
|
|
|
best_rd = this_rd;
|
2012-10-30 01:58:18 +01:00
|
|
|
vpx_memcpy(&best_mbmode, mbmi, sizeof(MB_MODE_INFO));
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
#if 0
|
2012-12-20 23:56:19 +01:00
|
|
|
// Testing this mode gave rise to an improvement in best error score.
|
|
|
|
// Lower threshold a bit for next time
|
|
|
|
cpi->rd_thresh_mult[mode_index] =
|
|
|
|
(cpi->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ?
|
|
|
|
cpi->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT;
|
|
|
|
cpi->rd_threshes[mode_index] =
|
|
|
|
(cpi->rd_baseline_thresh[mode_index] >> 7)
|
|
|
|
* cpi->rd_thresh_mult[mode_index];
|
2012-08-20 23:43:34 +02:00
|
|
|
#endif
|
2012-12-20 23:56:19 +01:00
|
|
|
} else {
|
|
|
|
// If the mode did not help improve the best error case then
|
|
|
|
// raise the threshold for testing that mode next time around.
|
2012-08-20 23:43:34 +02:00
|
|
|
#if 0
|
|
|
|
cpi->rd_thresh_mult[mode_index] += 4;
|
|
|
|
|
|
|
|
if (cpi->rd_thresh_mult[mode_index] > MAX_THRESHMULT)
|
|
|
|
cpi->rd_thresh_mult[mode_index] = MAX_THRESHMULT;
|
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
cpi->rd_threshes[mode_index] =
|
|
|
|
(cpi->rd_baseline_thresh[mode_index] >> 7)
|
|
|
|
* cpi->rd_thresh_mult[mode_index];
|
2012-08-20 23:43:34 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* keep record of best compound/single-only prediction */
|
2012-10-30 01:58:18 +01:00
|
|
|
if (!disable_skip && mbmi->ref_frame != INTRA_FRAME) {
|
2012-08-20 23:43:34 +02:00
|
|
|
int single_rd, hybrid_rd, single_rate, hybrid_rate;
|
|
|
|
|
|
|
|
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
|
|
|
|
single_rate = rate2 - compmode_cost;
|
|
|
|
hybrid_rate = rate2;
|
|
|
|
} else {
|
|
|
|
single_rate = rate2;
|
|
|
|
hybrid_rate = rate2 + compmode_cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
|
|
|
|
hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
|
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
if (mbmi->second_ref_frame <= INTRA_FRAME &&
|
2012-11-08 20:03:00 +01:00
|
|
|
single_rd < best_pred_rd[SINGLE_PREDICTION_ONLY]) {
|
|
|
|
best_pred_rd[SINGLE_PREDICTION_ONLY] = single_rd;
|
2012-11-07 15:50:25 +01:00
|
|
|
} else if (mbmi->second_ref_frame > INTRA_FRAME &&
|
2012-11-08 20:03:00 +01:00
|
|
|
single_rd < best_pred_rd[COMP_PREDICTION_ONLY]) {
|
|
|
|
best_pred_rd[COMP_PREDICTION_ONLY] = single_rd;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2012-11-08 20:03:00 +01:00
|
|
|
if (hybrid_rd < best_pred_rd[HYBRID_PREDICTION])
|
|
|
|
best_pred_rd[HYBRID_PREDICTION] = hybrid_rd;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* keep record of best txfm size */
|
2013-01-14 20:49:30 +01:00
|
|
|
if (!mode_excluded && this_rd != INT64_MAX) {
|
2012-11-08 20:03:00 +01:00
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
|
|
|
int64_t adj_rd;
|
|
|
|
if (this_mode != B_PRED) {
|
|
|
|
adj_rd = this_rd + txfm_cache[i] - txfm_cache[cm->txfm_mode];
|
|
|
|
} else {
|
|
|
|
adj_rd = this_rd;
|
|
|
|
}
|
|
|
|
if (adj_rd < best_txfm_rd[i])
|
|
|
|
best_txfm_rd[i] = adj_rd;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (x->skip && !mode_excluded)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
assert((cm->mcomp_filter_type == SWITCHABLE) ||
|
|
|
|
(cm->mcomp_filter_type == best_mbmode.interp_filter) ||
|
|
|
|
(best_mbmode.mode <= B_PRED));
|
|
|
|
|
2012-11-30 20:46:20 +01:00
|
|
|
#if CONFIG_COMP_INTERINTRA_PRED
|
|
|
|
++cpi->interintra_select_count[is_best_interintra];
|
|
|
|
// if (is_best_interintra) printf("best_interintra\n");
|
|
|
|
#endif
|
|
|
|
|
2012-12-20 23:56:19 +01:00
|
|
|
// Accumulate filter usage stats
|
|
|
|
// TODO(agrange): Use RD criteria to select interpolation filter mode.
|
|
|
|
if ((best_mode >= NEARESTMV) && (best_mode <= SPLITMV))
|
|
|
|
++cpi->best_switchable_interp_count[vp9_switchable_interp_map[best_filter]];
|
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
// TODO(rbultje) integrate with RD thresholding
|
|
|
|
#if 0
|
|
|
|
// Reduce the activation RD thresholds for the best choice mode
|
|
|
|
if ((cpi->rd_baseline_thresh[best_mode_index] > 0) &&
|
|
|
|
(cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) {
|
|
|
|
int best_adjustment = (cpi->rd_thresh_mult[best_mode_index] >> 2);
|
|
|
|
|
|
|
|
cpi->rd_thresh_mult[best_mode_index] =
|
|
|
|
(cpi->rd_thresh_mult[best_mode_index] >= (MIN_THRESHMULT + best_adjustment)) ?
|
|
|
|
cpi->rd_thresh_mult[best_mode_index] - best_adjustment : MIN_THRESHMULT;
|
|
|
|
cpi->rd_threshes[best_mode_index] =
|
|
|
|
(cpi->rd_baseline_thresh[best_mode_index] >> 7) * cpi->rd_thresh_mult[best_mode_index];
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// This code forces Altref,0,0 and skip for the frame that overlays a
|
|
|
|
// an alrtef unless Altref is filtered. However, this is unsafe if
|
2013-01-28 16:22:53 +01:00
|
|
|
// segment level coding of ref frame is enabled for this segment.
|
2012-10-30 06:15:27 +01:00
|
|
|
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) &&
|
2012-08-20 23:43:34 +02:00
|
|
|
cpi->is_src_frame_alt_ref &&
|
|
|
|
(cpi->oxcf.arnr_max_frames == 0) &&
|
|
|
|
(best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) {
|
2012-10-30 01:58:18 +01:00
|
|
|
mbmi->mode = ZEROMV;
|
|
|
|
mbmi->ref_frame = ALTREF_FRAME;
|
2012-11-07 15:50:25 +01:00
|
|
|
mbmi->second_ref_frame = INTRA_FRAME;
|
2012-10-30 01:58:18 +01:00
|
|
|
mbmi->mv[0].as_int = 0;
|
|
|
|
mbmi->uv_mode = DC_PRED;
|
|
|
|
mbmi->mb_skip_coeff = (cpi->common.mb_no_coeff_skip) ? 1 : 0;
|
|
|
|
mbmi->partitioning = 0;
|
2012-11-08 20:03:00 +01:00
|
|
|
mbmi->txfm_size = cm->txfm_mode == TX_MODE_SELECT ?
|
2013-01-30 06:46:31 +01:00
|
|
|
TX_32X32 : cm->txfm_mode;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-11-08 20:03:00 +01:00
|
|
|
vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
|
|
|
|
vpx_memset(best_pred_diff, 0, sizeof(best_pred_diff));
|
|
|
|
goto end;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// macroblock modes
|
2012-10-30 01:58:18 +01:00
|
|
|
vpx_memcpy(mbmi, &best_mbmode, sizeof(MB_MODE_INFO));
|
2012-11-08 20:03:00 +01:00
|
|
|
|
|
|
|
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
|
2013-01-14 20:49:30 +01:00
|
|
|
if (best_pred_rd[i] == INT64_MAX)
|
2012-11-08 20:03:00 +01:00
|
|
|
best_pred_diff[i] = INT_MIN;
|
|
|
|
else
|
|
|
|
best_pred_diff[i] = best_rd - best_pred_rd[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!x->skip) {
|
|
|
|
for (i = 0; i < NB_TXFM_MODES; i++) {
|
2013-01-14 20:49:30 +01:00
|
|
|
if (best_txfm_rd[i] == INT64_MAX)
|
2013-02-16 00:55:31 +01:00
|
|
|
best_txfm_diff[i] = 0;
|
2012-11-08 20:03:00 +01:00
|
|
|
else
|
|
|
|
best_txfm_diff[i] = best_rd - best_txfm_rd[i];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vpx_memset(best_txfm_diff, 0, sizeof(best_txfm_diff));
|
|
|
|
}
|
|
|
|
|
|
|
|
end:
|
2013-01-06 03:20:25 +01:00
|
|
|
{
|
|
|
|
PICK_MODE_CONTEXT *p = (block_size == BLOCK_32X32) ?
|
|
|
|
&x->sb32_context[xd->sb_index] :
|
|
|
|
&x->sb64_context;
|
|
|
|
store_coding_context(x, p, best_mode_index, NULL,
|
|
|
|
&mbmi->ref_mvs[mbmi->ref_frame][0],
|
|
|
|
&mbmi->ref_mvs[mbmi->second_ref_frame < 0 ? 0 :
|
|
|
|
mbmi->second_ref_frame][0],
|
|
|
|
best_pred_diff, best_txfm_diff);
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
return best_rd;
|
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
|
|
|
|
int64_t vp9_rd_pick_inter_mode_sb32(VP9_COMP *cpi, MACROBLOCK *x,
|
2013-02-07 19:09:05 +01:00
|
|
|
int mb_row, int mb_col,
|
2013-01-06 03:20:25 +01:00
|
|
|
int *returnrate,
|
|
|
|
int *returndistortion) {
|
2013-02-07 19:09:05 +01:00
|
|
|
return vp9_rd_pick_inter_mode_sb(cpi, x, mb_row, mb_col,
|
2013-01-06 03:20:25 +01:00
|
|
|
returnrate, returndistortion, BLOCK_32X32);
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t vp9_rd_pick_inter_mode_sb64(VP9_COMP *cpi, MACROBLOCK *x,
|
2013-02-07 19:09:05 +01:00
|
|
|
int mb_row, int mb_col,
|
2013-01-06 03:20:25 +01:00
|
|
|
int *returnrate,
|
|
|
|
int *returndistortion) {
|
2013-02-07 19:09:05 +01:00
|
|
|
return vp9_rd_pick_inter_mode_sb(cpi, x, mb_row, mb_col,
|
2013-01-06 03:20:25 +01:00
|
|
|
returnrate, returndistortion, BLOCK_64X64);
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
void vp9_pick_mode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
|
2013-02-07 19:09:05 +01:00
|
|
|
int mb_row, int mb_col,
|
2012-10-30 22:25:33 +01:00
|
|
|
int *totalrate, int *totaldist) {
|
2012-07-14 00:21:29 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2012-08-10 15:12:43 +02:00
|
|
|
MB_MODE_INFO * mbmi = &x->e_mbd.mode_info_context->mbmi;
|
2012-08-10 03:25:29 +02:00
|
|
|
int rate, distortion;
|
2012-08-02 20:01:48 +02:00
|
|
|
int64_t intra_error = 0;
|
2012-08-10 15:12:43 +02:00
|
|
|
unsigned char *segment_id = &mbmi->segment_id;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
if (xd->segmentation_enabled)
|
|
|
|
x->encode_breakout = cpi->segment_encode_breakout[*segment_id];
|
|
|
|
else
|
|
|
|
x->encode_breakout = cpi->oxcf.encode_breakout;
|
|
|
|
|
|
|
|
// if (cpi->sf.RD)
|
|
|
|
// For now this codebase is limited to a single rd encode path
|
|
|
|
{
|
|
|
|
int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
|
|
|
|
|
2013-02-07 19:09:05 +01:00
|
|
|
rd_pick_inter_mode(cpi, x, mb_row, mb_col, &rate,
|
2012-11-02 19:22:57 +01:00
|
|
|
&distortion, &intra_error);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
/* restore cpi->zbin_mode_boost_enabled */
|
|
|
|
cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
|
|
|
|
}
|
|
|
|
// else
|
|
|
|
// The non rd encode path has been deleted from this code base
|
|
|
|
// to simplify development
|
2012-10-31 22:40:53 +01:00
|
|
|
// vp9_pick_inter_mode
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Store metrics so they can be added in to totals if this mode is picked
|
2013-01-06 03:20:25 +01:00
|
|
|
x->mb_context[xd->sb_index][xd->mb_index].distortion = distortion;
|
|
|
|
x->mb_context[xd->sb_index][xd->mb_index].intra_error = intra_error;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
*totalrate = rate;
|
|
|
|
*totaldist = distortion;
|
2011-06-08 18:05:05 +02:00
|
|
|
}
|