2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <assert.h>
|
2014-02-28 19:57:30 +01:00
|
|
|
#include <math.h>
|
2012-11-27 22:59:17 +01:00
|
|
|
|
2014-02-28 19:57:30 +01:00
|
|
|
#include "./vp9_rtcd.h"
|
2015-07-06 18:33:27 +02:00
|
|
|
#include "./vpx_dsp_rtcd.h"
|
2014-02-28 19:57:30 +01:00
|
|
|
|
2015-08-31 23:36:35 +02:00
|
|
|
#include "vpx_dsp/vpx_dsp_common.h"
|
2014-02-28 19:57:30 +01:00
|
|
|
#include "vpx_mem/vpx_mem.h"
|
2015-05-12 04:09:22 +02:00
|
|
|
#include "vpx_ports/mem.h"
|
2015-08-10 20:28:04 +02:00
|
|
|
#include "vpx_ports/system_state.h"
|
2014-02-28 19:57:30 +01:00
|
|
|
|
|
|
|
#include "vp9/common/vp9_common.h"
|
|
|
|
#include "vp9/common/vp9_entropy.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_entropymode.h"
|
2014-02-28 19:57:30 +01:00
|
|
|
#include "vp9/common/vp9_idct.h"
|
|
|
|
#include "vp9/common/vp9_mvref_common.h"
|
|
|
|
#include "vp9/common/vp9_pred_common.h"
|
|
|
|
#include "vp9/common/vp9_quant_common.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_reconinter.h"
|
|
|
|
#include "vp9/common/vp9_reconintra.h"
|
2015-05-22 20:19:51 +02:00
|
|
|
#include "vp9/common/vp9_scan.h"
|
2014-02-28 19:57:30 +01:00
|
|
|
#include "vp9/common/vp9_seg_common.h"
|
|
|
|
|
2014-03-05 20:57:57 +01:00
|
|
|
#include "vp9/encoder/vp9_cost.h"
|
2012-11-28 19:41:40 +01:00
|
|
|
#include "vp9/encoder/vp9_encodemb.h"
|
2014-02-28 19:57:30 +01:00
|
|
|
#include "vp9/encoder/vp9_encodemv.h"
|
2014-04-19 03:27:47 +02:00
|
|
|
#include "vp9/encoder/vp9_encoder.h"
|
2012-11-28 19:41:40 +01:00
|
|
|
#include "vp9/encoder/vp9_mcomp.h"
|
2014-02-28 19:57:30 +01:00
|
|
|
#include "vp9/encoder/vp9_quantize.h"
|
2012-11-28 19:41:40 +01:00
|
|
|
#include "vp9/encoder/vp9_ratectrl.h"
|
2014-07-02 21:36:48 +02:00
|
|
|
#include "vp9/encoder/vp9_rd.h"
|
2014-02-28 19:57:30 +01:00
|
|
|
#include "vp9/encoder/vp9_rdopt.h"
|
2014-12-11 17:22:03 +01:00
|
|
|
#include "vp9/encoder/vp9_aq_variance.h"
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
#define LAST_FRAME_MODE_MASK \
|
|
|
|
((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
|
|
|
|
#define GOLDEN_FRAME_MODE_MASK \
|
|
|
|
((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME))
|
|
|
|
#define ALT_REF_MODE_MASK \
|
|
|
|
((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME))
|
2014-09-10 03:43:27 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01)
|
2013-09-05 02:15:05 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
#define MIN_EARLY_TERM_INDEX 3
|
|
|
|
#define NEW_MV_DISCOUNT_FACTOR 8
|
2013-10-01 17:57:18 +02:00
|
|
|
|
2014-01-06 21:23:36 +01:00
|
|
|
typedef struct {
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE mode;
|
2014-01-06 21:23:36 +01:00
|
|
|
MV_REFERENCE_FRAME ref_frame[2];
|
|
|
|
} MODE_DEFINITION;
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION;
|
2014-01-06 21:23:36 +01:00
|
|
|
|
2014-01-31 20:06:14 +01:00
|
|
|
struct rdcost_block_args {
|
2016-07-02 01:02:41 +02:00
|
|
|
const VP9_COMP *cpi;
|
2014-01-31 20:06:14 +01:00
|
|
|
MACROBLOCK *x;
|
|
|
|
ENTROPY_CONTEXT t_above[16];
|
|
|
|
ENTROPY_CONTEXT t_left[16];
|
|
|
|
int this_rate;
|
|
|
|
int64_t this_dist;
|
|
|
|
int64_t this_sse;
|
|
|
|
int64_t this_rd;
|
|
|
|
int64_t best_rd;
|
2015-07-30 21:36:57 +02:00
|
|
|
int exit_early;
|
2014-03-03 20:49:13 +01:00
|
|
|
int use_fast_coef_costing;
|
2014-02-12 03:08:06 +01:00
|
|
|
const scan_order *so;
|
2015-07-31 02:39:23 +02:00
|
|
|
uint8_t skippable;
|
2014-01-31 20:06:14 +01:00
|
|
|
};
|
|
|
|
|
2014-12-11 17:22:03 +01:00
|
|
|
#define LAST_NEW_MV_INDEX 6
|
2014-04-09 20:39:59 +02:00
|
|
|
static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
|
2016-07-27 05:43:23 +02:00
|
|
|
{ NEARESTMV, { LAST_FRAME, NONE } },
|
|
|
|
{ NEARESTMV, { ALTREF_FRAME, NONE } },
|
|
|
|
{ NEARESTMV, { GOLDEN_FRAME, NONE } },
|
2014-01-06 21:23:36 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ DC_PRED, { INTRA_FRAME, NONE } },
|
2014-01-06 21:23:36 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ NEWMV, { LAST_FRAME, NONE } },
|
|
|
|
{ NEWMV, { ALTREF_FRAME, NONE } },
|
|
|
|
{ NEWMV, { GOLDEN_FRAME, NONE } },
|
2014-01-06 21:23:36 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ NEARMV, { LAST_FRAME, NONE } },
|
|
|
|
{ NEARMV, { ALTREF_FRAME, NONE } },
|
|
|
|
{ NEARMV, { GOLDEN_FRAME, NONE } },
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ ZEROMV, { LAST_FRAME, NONE } },
|
|
|
|
{ ZEROMV, { GOLDEN_FRAME, NONE } },
|
|
|
|
{ ZEROMV, { ALTREF_FRAME, NONE } },
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ NEARESTMV, { LAST_FRAME, ALTREF_FRAME } },
|
|
|
|
{ NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } },
|
2014-01-06 21:23:36 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ TM_PRED, { INTRA_FRAME, NONE } },
|
2014-01-06 21:23:36 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ NEARMV, { LAST_FRAME, ALTREF_FRAME } },
|
|
|
|
{ NEWMV, { LAST_FRAME, ALTREF_FRAME } },
|
|
|
|
{ NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } },
|
|
|
|
{ NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } },
|
2014-01-06 21:23:36 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ ZEROMV, { LAST_FRAME, ALTREF_FRAME } },
|
|
|
|
{ ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } },
|
2014-01-06 21:23:36 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
{ H_PRED, { INTRA_FRAME, NONE } },
|
|
|
|
{ V_PRED, { INTRA_FRAME, NONE } },
|
|
|
|
{ D135_PRED, { INTRA_FRAME, NONE } },
|
|
|
|
{ D207_PRED, { INTRA_FRAME, NONE } },
|
|
|
|
{ D153_PRED, { INTRA_FRAME, NONE } },
|
|
|
|
{ D63_PRED, { INTRA_FRAME, NONE } },
|
|
|
|
{ D117_PRED, { INTRA_FRAME, NONE } },
|
|
|
|
{ D45_PRED, { INTRA_FRAME, NONE } },
|
2013-09-28 01:02:49 +02:00
|
|
|
};
|
|
|
|
|
2014-04-09 20:39:59 +02:00
|
|
|
static const REF_DEFINITION vp9_ref_order[MAX_REFS] = {
|
2016-07-27 05:43:23 +02:00
|
|
|
{ { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } },
|
|
|
|
{ { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } },
|
|
|
|
{ { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
|
2010-05-18 17:58:33 +02:00
|
|
|
};
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n,
|
|
|
|
int min_plane, int max_plane) {
|
2014-05-15 20:19:43 +02:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = min_plane; i < max_plane; ++i) {
|
|
|
|
struct macroblock_plane *const p = &x->plane[i];
|
|
|
|
struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
p->coeff = ctx->coeff_pbuf[i][m];
|
|
|
|
p->qcoeff = ctx->qcoeff_pbuf[i][m];
|
2014-05-15 20:19:43 +02:00
|
|
|
pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
|
2016-07-27 05:43:23 +02:00
|
|
|
p->eobs = ctx->eobs_pbuf[i][m];
|
2014-05-15 20:19:43 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
|
|
|
|
ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
|
2014-05-15 20:19:43 +02:00
|
|
|
ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
|
2016-07-27 05:43:23 +02:00
|
|
|
ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
|
2014-05-15 20:19:43 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
ctx->coeff_pbuf[i][n] = p->coeff;
|
|
|
|
ctx->qcoeff_pbuf[i][n] = p->qcoeff;
|
2014-05-15 20:19:43 +02:00
|
|
|
ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
|
2016-07-27 05:43:23 +02:00
|
|
|
ctx->eobs_pbuf[i][n] = p->eobs;
|
2014-05-15 20:19:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
|
|
|
|
MACROBLOCKD *xd, int *out_rate_sum,
|
|
|
|
int64_t *out_dist_sum, int *skip_txfm_sb,
|
|
|
|
int64_t *skip_sse_sb) {
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
// Note our transform coeffs are 8 times an orthogonal transform.
|
|
|
|
// Hence quantizer step is also 8 times. To get effective quantizer
|
|
|
|
// we need to divide by 8 before sending to modeling function.
|
2014-02-19 03:13:57 +01:00
|
|
|
int i;
|
|
|
|
int64_t rate_sum = 0;
|
|
|
|
int64_t dist_sum = 0;
|
2016-01-20 01:40:20 +01:00
|
|
|
const int ref = xd->mi[0]->ref_frame[0];
|
2013-12-21 00:24:22 +01:00
|
|
|
unsigned int sse;
|
2014-08-26 21:34:54 +02:00
|
|
|
unsigned int var = 0;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
int64_t total_sse = 0;
|
|
|
|
int skip_flag = 1;
|
2014-10-01 20:31:34 +02:00
|
|
|
const int shift = 6;
|
2014-08-26 21:34:54 +02:00
|
|
|
int64_t dist;
|
2015-07-31 19:56:11 +02:00
|
|
|
const int dequant_shift =
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2016-07-27 05:43:23 +02:00
|
|
|
(xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
|
2015-07-31 19:56:11 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2016-07-27 05:43:23 +02:00
|
|
|
3;
|
2016-12-14 21:07:34 +01:00
|
|
|
unsigned int qstep_vec[MAX_MB_PLANE];
|
|
|
|
unsigned int nlog2_vec[MAX_MB_PLANE];
|
|
|
|
unsigned int sum_sse_vec[MAX_MB_PLANE];
|
|
|
|
int any_zero_sum_sse = 0;
|
2014-08-26 21:34:54 +02:00
|
|
|
|
|
|
|
x->pred_sse[ref] = 0;
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; ++i) {
|
|
|
|
struct macroblock_plane *const p = &x->plane[i];
|
|
|
|
struct macroblockd_plane *const pd = &xd->plane[i];
|
2013-08-27 20:05:08 +02:00
|
|
|
const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
|
2014-08-26 21:34:54 +02:00
|
|
|
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
|
|
|
|
const BLOCK_SIZE unit_size = txsize_to_bsize[max_tx_size];
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
const int64_t dc_thr = p->quant_thred[0] >> shift;
|
|
|
|
const int64_t ac_thr = p->quant_thred[1] >> shift;
|
2016-12-14 21:07:34 +01:00
|
|
|
unsigned int sum_sse = 0;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
// The low thresholds are used to measure if the prediction errors are
|
|
|
|
// low enough so that we can skip the mode search.
|
2015-08-18 03:19:22 +02:00
|
|
|
const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2);
|
|
|
|
const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2);
|
2014-08-26 21:34:54 +02:00
|
|
|
int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
|
|
|
|
int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]);
|
|
|
|
int idx, idy;
|
|
|
|
int lw = b_width_log2_lookup[unit_size] + 2;
|
|
|
|
int lh = b_height_log2_lookup[unit_size] + 2;
|
|
|
|
|
|
|
|
for (idy = 0; idy < bh; ++idy) {
|
|
|
|
for (idx = 0; idx < bw; ++idx) {
|
|
|
|
uint8_t *src = p->src.buf + (idy * p->src.stride << lh) + (idx << lw);
|
|
|
|
uint8_t *dst = pd->dst.buf + (idy * pd->dst.stride << lh) + (idx << lh);
|
2014-08-28 18:09:37 +02:00
|
|
|
int block_idx = (idy << 1) + idx;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
int low_err_skip = 0;
|
2014-08-28 18:09:37 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, dst, pd->dst.stride,
|
|
|
|
&sse);
|
2014-08-28 18:09:37 +02:00
|
|
|
x->bsse[(i << 2) + block_idx] = sse;
|
|
|
|
sum_sse += sse;
|
|
|
|
|
2015-07-30 20:52:28 +02:00
|
|
|
x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_NONE;
|
2014-08-28 18:09:37 +02:00
|
|
|
if (!x->select_tx_size) {
|
2014-10-01 20:31:34 +02:00
|
|
|
// Check if all ac coefficients can be quantized to zero.
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
if (var < ac_thr || var == 0) {
|
2015-07-30 20:52:28 +02:00
|
|
|
x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_ONLY;
|
2014-10-01 20:31:34 +02:00
|
|
|
|
|
|
|
// Check if dc coefficient can be quantized to zero.
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
if (sse - var < dc_thr || sse == var) {
|
2015-07-30 20:52:28 +02:00
|
|
|
x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_DC;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
|
|
|
|
if (!sse || (var < low_ac_thr && sse - var < low_dc_thr))
|
|
|
|
low_err_skip = 1;
|
|
|
|
}
|
2014-10-01 20:31:34 +02:00
|
|
|
}
|
2014-08-28 18:09:37 +02:00
|
|
|
}
|
2014-08-26 21:34:54 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (skip_flag && !low_err_skip) skip_flag = 0;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (i == 0) x->pred_sse[ref] += sse;
|
2014-08-26 21:34:54 +02:00
|
|
|
}
|
|
|
|
}
|
2014-01-28 22:51:06 +01:00
|
|
|
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
total_sse += sum_sse;
|
2016-12-14 21:07:34 +01:00
|
|
|
sum_sse_vec[i] = sum_sse;
|
|
|
|
any_zero_sum_sse = any_zero_sum_sse || (sum_sse == 0);
|
|
|
|
qstep_vec[i] = pd->dequant[1] >> dequant_shift;
|
|
|
|
nlog2_vec[i] = num_pels_log2_lookup[bs];
|
|
|
|
}
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
|
2016-12-14 21:07:34 +01:00
|
|
|
// Fast approximate the modelling function.
|
|
|
|
if (cpi->sf.simple_model_rd_from_var) {
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; ++i) {
|
2014-02-19 03:13:57 +01:00
|
|
|
int64_t rate;
|
2016-12-14 21:07:34 +01:00
|
|
|
const int64_t square_error = sum_sse_vec[i];
|
|
|
|
int quantizer = qstep_vec[i];
|
2014-02-12 23:16:55 +01:00
|
|
|
|
2014-02-19 03:13:57 +01:00
|
|
|
if (quantizer < 120)
|
2016-01-15 23:32:52 +01:00
|
|
|
rate = (square_error * (280 - quantizer)) >> (16 - VP9_PROB_COST_SHIFT);
|
2014-02-12 23:16:55 +01:00
|
|
|
else
|
|
|
|
rate = 0;
|
|
|
|
dist = (square_error * quantizer) >> 8;
|
|
|
|
rate_sum += rate;
|
|
|
|
dist_sum += dist;
|
2016-12-14 21:07:34 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (any_zero_sum_sse) {
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; ++i) {
|
|
|
|
int rate;
|
|
|
|
vp9_model_rd_from_var_lapndz(sum_sse_vec[i], nlog2_vec[i], qstep_vec[i],
|
|
|
|
&rate, &dist);
|
|
|
|
rate_sum += rate;
|
|
|
|
dist_sum += dist;
|
|
|
|
}
|
2014-01-28 22:51:06 +01:00
|
|
|
} else {
|
2016-12-14 21:07:34 +01:00
|
|
|
vp9_model_rd_from_var_lapndz_vec(sum_sse_vec, nlog2_vec, qstep_vec,
|
|
|
|
&rate_sum, &dist_sum);
|
2014-01-28 22:51:06 +01:00
|
|
|
}
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
}
|
|
|
|
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
*skip_txfm_sb = skip_flag;
|
|
|
|
*skip_sse_sb = total_sse << 4;
|
2014-02-19 03:13:57 +01:00
|
|
|
*out_rate_sum = (int)rate_sum;
|
|
|
|
*out_dist_sum = dist_sum << 4;
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
}
|
|
|
|
|
2015-10-08 16:44:49 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
int64_t vp9_highbd_block_error_c(const tran_low_t *coeff,
|
2016-07-27 05:43:23 +02:00
|
|
|
const tran_low_t *dqcoeff, intptr_t block_size,
|
2015-10-08 16:44:49 +02:00
|
|
|
int64_t *ssz, int bd) {
|
2013-06-21 21:54:52 +02:00
|
|
|
int i;
|
2013-06-28 02:41:54 +02:00
|
|
|
int64_t error = 0, sqcoeff = 0;
|
2015-10-08 16:44:49 +02:00
|
|
|
int shift = 2 * (bd - 8);
|
|
|
|
int rounding = shift > 0 ? 1 << (shift - 1) : 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-08-07 00:03:04 +02:00
|
|
|
for (i = 0; i < block_size; i++) {
|
2015-10-08 16:44:49 +02:00
|
|
|
const int64_t diff = coeff[i] - dqcoeff[i];
|
2016-07-27 05:43:23 +02:00
|
|
|
error += diff * diff;
|
2015-10-08 16:44:49 +02:00
|
|
|
sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i];
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2015-10-08 16:44:49 +02:00
|
|
|
assert(error >= 0 && sqcoeff >= 0);
|
|
|
|
error = (error + rounding) >> shift;
|
|
|
|
sqcoeff = (sqcoeff + rounding) >> shift;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-06-28 02:41:54 +02:00
|
|
|
*ssz = sqcoeff;
|
2012-07-14 00:21:29 +02:00
|
|
|
return error;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2015-10-08 16:44:49 +02:00
|
|
|
static int64_t vp9_highbd_block_error_dispatch(const tran_low_t *coeff,
|
|
|
|
const tran_low_t *dqcoeff,
|
|
|
|
intptr_t block_size,
|
|
|
|
int64_t *ssz, int bd) {
|
|
|
|
if (bd == 8) {
|
2017-02-17 02:57:44 +01:00
|
|
|
return vp9_block_error(coeff, dqcoeff, block_size, ssz);
|
2015-10-08 16:44:49 +02:00
|
|
|
} else {
|
|
|
|
return vp9_highbd_block_error(coeff, dqcoeff, block_size, ssz, bd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
|
|
|
|
int64_t vp9_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
|
|
|
|
intptr_t block_size, int64_t *ssz) {
|
2014-09-24 15:36:34 +02:00
|
|
|
int i;
|
|
|
|
int64_t error = 0, sqcoeff = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < block_size; i++) {
|
2015-10-08 16:44:49 +02:00
|
|
|
const int diff = coeff[i] - dqcoeff[i];
|
2016-07-27 05:43:23 +02:00
|
|
|
error += diff * diff;
|
2015-10-08 16:44:49 +02:00
|
|
|
sqcoeff += coeff[i] * coeff[i];
|
2014-09-24 15:36:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
*ssz = sqcoeff;
|
|
|
|
return error;
|
|
|
|
}
|
2015-10-08 16:44:49 +02:00
|
|
|
|
2017-01-27 00:00:04 +01:00
|
|
|
int64_t vp9_block_error_fp_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
|
2015-10-08 16:44:49 +02:00
|
|
|
int block_size) {
|
|
|
|
int i;
|
|
|
|
int64_t error = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < block_size; i++) {
|
|
|
|
const int diff = coeff[i] - dqcoeff[i];
|
2016-07-27 05:43:23 +02:00
|
|
|
error += diff * diff;
|
2015-10-08 16:44:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
2014-09-24 15:36:34 +02:00
|
|
|
|
2013-07-25 00:13:58 +02:00
|
|
|
/* The trailing '0' is a terminator which is used inside cost_coeffs() to
|
|
|
|
* decide whether to include cost of a trailing EOB node or not (i.e. we
|
|
|
|
* can skip this if the last coefficient in this transform block, e.g. the
|
|
|
|
* 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block,
|
|
|
|
* were non-zero). */
|
2013-07-27 02:15:37 +02:00
|
|
|
static const int16_t band_counts[TX_SIZES][8] = {
|
2016-07-27 05:43:23 +02:00
|
|
|
{ 1, 2, 3, 4, 3, 16 - 13, 0 },
|
|
|
|
{ 1, 2, 3, 4, 11, 64 - 21, 0 },
|
|
|
|
{ 1, 2, 3, 4, 11, 256 - 21, 0 },
|
2013-07-25 00:13:58 +02:00
|
|
|
{ 1, 2, 3, 4, 11, 1024 - 21, 0 },
|
2013-07-23 01:09:09 +02:00
|
|
|
};
|
2016-07-27 05:43:23 +02:00
|
|
|
static int cost_coeffs(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
|
|
|
|
int pt, const int16_t *scan, const int16_t *nb,
|
2015-03-05 21:39:02 +01:00
|
|
|
int use_fast_coef_costing) {
|
2013-10-11 20:26:32 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *mi = xd->mi[0];
|
2014-03-03 21:19:51 +01:00
|
|
|
const struct macroblock_plane *p = &x->plane[plane];
|
2015-09-29 19:40:27 +02:00
|
|
|
const PLANE_TYPE type = get_plane_type(plane);
|
2013-07-25 00:13:58 +02:00
|
|
|
const int16_t *band_count = &band_counts[tx_size][1];
|
2013-12-04 02:59:32 +01:00
|
|
|
const int eob = p->eobs[block];
|
2014-09-03 01:34:09 +02:00
|
|
|
const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
|
2016-07-27 05:43:23 +02:00
|
|
|
unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] =
|
|
|
|
x->token_costs[tx_size][type][is_inter_block(mi)];
|
2014-02-28 03:58:32 +01:00
|
|
|
uint8_t token_cache[32 * 32];
|
2017-02-01 00:35:20 +01:00
|
|
|
int cost;
|
2014-12-24 00:06:17 +01:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2017-03-04 00:02:56 +01:00
|
|
|
const uint16_t *cat6_high_cost = vp9_get_high_cost_table(xd->bd);
|
2014-12-24 00:06:17 +01:00
|
|
|
#else
|
2017-03-04 00:02:56 +01:00
|
|
|
const uint16_t *cat6_high_cost = vp9_get_high_cost_table(8);
|
2014-12-24 00:06:17 +01:00
|
|
|
#endif
|
|
|
|
|
2013-02-20 19:16:24 +01:00
|
|
|
// Check for consistency of tx_size with mode info
|
2016-07-27 05:43:23 +02:00
|
|
|
assert(type == PLANE_TYPE_Y
|
|
|
|
? mi->tx_size == tx_size
|
|
|
|
: get_uv_tx_size(mi, &xd->plane[plane]) == tx_size);
|
2013-04-04 20:07:19 +02:00
|
|
|
|
2013-06-28 05:57:37 +02:00
|
|
|
if (eob == 0) {
|
|
|
|
// single eob token
|
2013-12-04 02:23:03 +01:00
|
|
|
cost = token_costs[0][0][pt][EOB_TOKEN];
|
2013-06-28 05:57:37 +02:00
|
|
|
} else {
|
2016-02-26 23:42:25 +01:00
|
|
|
if (use_fast_coef_costing) {
|
|
|
|
int band_left = *band_count++;
|
2017-02-01 00:35:20 +01:00
|
|
|
int c;
|
2016-02-26 23:42:25 +01:00
|
|
|
|
|
|
|
// dc token
|
|
|
|
int v = qcoeff[0];
|
|
|
|
int16_t prev_t;
|
|
|
|
cost = vp9_get_token_cost(v, &prev_t, cat6_high_cost);
|
|
|
|
cost += (*token_costs)[0][pt][prev_t];
|
|
|
|
|
|
|
|
token_cache[0] = vp9_pt_energy_class[prev_t];
|
|
|
|
++token_costs;
|
|
|
|
|
|
|
|
// ac tokens
|
|
|
|
for (c = 1; c < eob; c++) {
|
|
|
|
const int rc = scan[c];
|
|
|
|
int16_t t;
|
|
|
|
|
|
|
|
v = qcoeff[rc];
|
|
|
|
cost += vp9_get_token_cost(v, &t, cat6_high_cost);
|
|
|
|
cost += (*token_costs)[!prev_t][!prev_t][t];
|
|
|
|
prev_t = t;
|
|
|
|
if (!--band_left) {
|
|
|
|
band_left = *band_count++;
|
|
|
|
++token_costs;
|
|
|
|
}
|
2013-07-23 01:09:09 +02:00
|
|
|
}
|
2013-06-28 05:57:37 +02:00
|
|
|
|
2016-02-26 23:42:25 +01:00
|
|
|
// eob token
|
2016-07-27 05:43:23 +02:00
|
|
|
if (band_left) cost += (*token_costs)[0][!prev_t][EOB_TOKEN];
|
2016-02-26 23:42:25 +01:00
|
|
|
|
|
|
|
} else { // !use_fast_coef_costing
|
|
|
|
int band_left = *band_count++;
|
2017-02-01 00:35:20 +01:00
|
|
|
int c;
|
2016-02-26 23:42:25 +01:00
|
|
|
|
|
|
|
// dc token
|
|
|
|
int v = qcoeff[0];
|
|
|
|
int16_t tok;
|
2016-07-27 05:43:23 +02:00
|
|
|
unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS];
|
2016-02-26 23:42:25 +01:00
|
|
|
cost = vp9_get_token_cost(v, &tok, cat6_high_cost);
|
|
|
|
cost += (*token_costs)[0][pt][tok];
|
|
|
|
|
|
|
|
token_cache[0] = vp9_pt_energy_class[tok];
|
|
|
|
++token_costs;
|
|
|
|
|
|
|
|
tok_cost_ptr = &((*token_costs)[!tok]);
|
|
|
|
|
|
|
|
// ac tokens
|
|
|
|
for (c = 1; c < eob; c++) {
|
|
|
|
const int rc = scan[c];
|
|
|
|
|
|
|
|
v = qcoeff[rc];
|
|
|
|
cost += vp9_get_token_cost(v, &tok, cat6_high_cost);
|
|
|
|
pt = get_coef_context(nb, token_cache, c);
|
|
|
|
cost += (*tok_cost_ptr)[pt][tok];
|
|
|
|
token_cache[rc] = vp9_pt_energy_class[tok];
|
|
|
|
if (!--band_left) {
|
|
|
|
band_left = *band_count++;
|
|
|
|
++token_costs;
|
|
|
|
}
|
|
|
|
tok_cost_ptr = &((*token_costs)[!tok]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// eob token
|
|
|
|
if (band_left) {
|
2014-02-28 03:58:32 +01:00
|
|
|
pt = get_coef_context(nb, token_cache, c);
|
2014-03-03 20:49:13 +01:00
|
|
|
cost += (*token_costs)[0][pt][EOB_TOKEN];
|
|
|
|
}
|
2013-04-22 19:58:49 +02:00
|
|
|
}
|
2012-08-06 20:22:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return cost;
|
|
|
|
}
|
2014-09-24 15:36:34 +02:00
|
|
|
|
2016-07-25 20:05:40 +02:00
|
|
|
static INLINE int num_4x4_to_edge(int plane_4x4_dim, int mb_to_edge_dim,
|
|
|
|
int subsampling_dim, int blk_dim) {
|
|
|
|
return plane_4x4_dim + (mb_to_edge_dim >> (5 + subsampling_dim)) - blk_dim;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the pixel domain sum square error on all visible 4x4s in the
|
|
|
|
// transform block.
|
|
|
|
static unsigned pixel_sse(const VP9_COMP *const cpi, const MACROBLOCKD *xd,
|
|
|
|
const struct macroblockd_plane *const pd,
|
|
|
|
const uint8_t *src, const int src_stride,
|
2016-07-27 05:43:23 +02:00
|
|
|
const uint8_t *dst, const int dst_stride, int blk_row,
|
|
|
|
int blk_col, const BLOCK_SIZE plane_bsize,
|
2016-07-25 20:05:40 +02:00
|
|
|
const BLOCK_SIZE tx_bsize) {
|
|
|
|
unsigned int sse = 0;
|
|
|
|
const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
|
|
|
|
const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
|
|
|
|
const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
|
|
|
|
const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
|
|
|
|
int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
|
|
|
|
pd->subsampling_x, blk_col);
|
|
|
|
int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
|
|
|
|
pd->subsampling_y, blk_row);
|
|
|
|
if (tx_bsize == BLOCK_4X4 ||
|
|
|
|
(b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
|
|
|
|
cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse);
|
|
|
|
} else {
|
|
|
|
const vpx_variance_fn_t vf_4x4 = cpi->fn_ptr[BLOCK_4X4].vf;
|
|
|
|
int r, c;
|
|
|
|
unsigned this_sse = 0;
|
|
|
|
int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
|
|
|
|
int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
|
|
|
|
sse = 0;
|
|
|
|
// if we are in the unrestricted motion border.
|
|
|
|
for (r = 0; r < max_r; ++r) {
|
|
|
|
// Skip visiting the sub blocks that are wholly within the UMV.
|
|
|
|
for (c = 0; c < max_c; ++c) {
|
|
|
|
vf_4x4(src + r * src_stride * 4 + c * 4, src_stride,
|
2016-07-27 05:43:23 +02:00
|
|
|
dst + r * dst_stride * 4 + c * 4, dst_stride, &this_sse);
|
2016-07-25 20:05:40 +02:00
|
|
|
sse += this_sse;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return sse;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the squares sum squares on all visible 4x4s in the transform block.
|
2016-07-27 18:38:07 +02:00
|
|
|
static int64_t sum_squares_visible(const MACROBLOCKD *xd,
|
|
|
|
const struct macroblockd_plane *const pd,
|
|
|
|
const int16_t *diff, const int diff_stride,
|
|
|
|
int blk_row, int blk_col,
|
|
|
|
const BLOCK_SIZE plane_bsize,
|
|
|
|
const BLOCK_SIZE tx_bsize) {
|
|
|
|
int64_t sse;
|
2016-07-25 20:05:40 +02:00
|
|
|
const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
|
|
|
|
const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
|
|
|
|
const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize];
|
|
|
|
const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize];
|
|
|
|
int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge,
|
|
|
|
pd->subsampling_x, blk_col);
|
|
|
|
int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge,
|
|
|
|
pd->subsampling_y, blk_row);
|
|
|
|
if (tx_bsize == BLOCK_4X4 ||
|
|
|
|
(b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) {
|
2017-03-22 07:29:12 +01:00
|
|
|
assert(tx_4x4_w == tx_4x4_h);
|
|
|
|
sse = (int64_t)vpx_sum_squares_2d_i16(diff, diff_stride, tx_4x4_w << 2);
|
2016-07-25 20:05:40 +02:00
|
|
|
} else {
|
|
|
|
int r, c;
|
|
|
|
int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h);
|
|
|
|
int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w);
|
|
|
|
sse = 0;
|
|
|
|
// if we are in the unrestricted motion border.
|
|
|
|
for (r = 0; r < max_r; ++r) {
|
|
|
|
// Skip visiting the sub blocks that are wholly within the UMV.
|
|
|
|
for (c = 0; c < max_c; ++c) {
|
2017-03-22 07:29:12 +01:00
|
|
|
sse += (int64_t)vpx_sum_squares_2d_i16(
|
|
|
|
diff + r * diff_stride * 4 + c * 4, diff_stride, 4);
|
2016-07-25 20:05:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return sse;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dist_block(const VP9_COMP *cpi, MACROBLOCK *x, int plane,
|
2016-07-27 05:43:23 +02:00
|
|
|
BLOCK_SIZE plane_bsize, int block, int blk_row,
|
|
|
|
int blk_col, TX_SIZE tx_size, int64_t *out_dist,
|
|
|
|
int64_t *out_sse) {
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2014-03-03 21:19:51 +01:00
|
|
|
const struct macroblock_plane *const p = &x->plane[plane];
|
|
|
|
const struct macroblockd_plane *const pd = &xd->plane[plane];
|
2016-07-02 01:02:41 +02:00
|
|
|
|
2016-08-18 15:15:25 +02:00
|
|
|
if (x->block_tx_domain) {
|
2016-07-02 01:02:41 +02:00
|
|
|
const int ss_txfrm_size = tx_size << 1;
|
|
|
|
int64_t this_sse;
|
|
|
|
const int shift = tx_size == TX_32X32 ? 0 : 2;
|
|
|
|
const tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
|
|
|
|
const tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2016-07-02 01:02:41 +02:00
|
|
|
const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
|
|
|
|
*out_dist = vp9_highbd_block_error_dispatch(
|
|
|
|
coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse, bd) >>
|
|
|
|
shift;
|
2014-09-24 15:36:34 +02:00
|
|
|
#else
|
2016-07-02 01:02:41 +02:00
|
|
|
*out_dist =
|
|
|
|
vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >>
|
|
|
|
shift;
|
2014-09-24 15:36:34 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2016-07-02 01:02:41 +02:00
|
|
|
*out_sse = this_sse >> shift;
|
2013-07-09 01:48:47 +02:00
|
|
|
|
2016-07-02 01:02:41 +02:00
|
|
|
if (x->skip_encode && !is_inter_block(xd->mi[0])) {
|
|
|
|
// TODO(jingning): tune the model to better capture the distortion.
|
|
|
|
const int64_t p =
|
|
|
|
(pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >>
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2016-07-27 05:43:23 +02:00
|
|
|
(shift + 2 + (bd - 8) * 2);
|
2016-07-02 01:02:41 +02:00
|
|
|
#else
|
2016-07-27 05:43:23 +02:00
|
|
|
(shift + 2);
|
2016-07-02 01:02:41 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
*out_dist += (p >> 4);
|
|
|
|
*out_sse += p;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
|
|
|
|
const int bs = 4 * num_4x4_blocks_wide_lookup[tx_bsize];
|
|
|
|
const int src_stride = p->src.stride;
|
|
|
|
const int dst_stride = pd->dst.stride;
|
|
|
|
const int src_idx = 4 * (blk_row * src_stride + blk_col);
|
|
|
|
const int dst_idx = 4 * (blk_row * dst_stride + blk_col);
|
|
|
|
const uint8_t *src = &p->src.buf[src_idx];
|
|
|
|
const uint8_t *dst = &pd->dst.buf[dst_idx];
|
|
|
|
const tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
|
|
|
|
const uint16_t *eob = &p->eobs[block];
|
|
|
|
unsigned int tmp;
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
tmp = pixel_sse(cpi, xd, pd, src, src_stride, dst, dst_stride, blk_row,
|
|
|
|
blk_col, plane_bsize, tx_bsize);
|
2016-07-02 01:02:41 +02:00
|
|
|
*out_sse = (int64_t)tmp * 16;
|
|
|
|
|
|
|
|
if (*eob) {
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2016-07-02 01:02:41 +02:00
|
|
|
DECLARE_ALIGNED(16, uint16_t, recon16[1024]);
|
|
|
|
uint8_t *recon = (uint8_t *)recon16;
|
2015-07-31 02:52:55 +02:00
|
|
|
#else
|
2016-07-02 01:02:41 +02:00
|
|
|
DECLARE_ALIGNED(16, uint8_t, recon[1024]);
|
2014-09-24 15:36:34 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2016-07-02 01:02:41 +02:00
|
|
|
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
2017-04-19 22:08:25 +02:00
|
|
|
vpx_highbd_convolve_copy(CONVERT_TO_SHORTPTR(dst), dst_stride, recon16,
|
|
|
|
32, NULL, 0, NULL, 0, bs, bs, xd->bd);
|
2016-07-02 01:02:41 +02:00
|
|
|
if (xd->lossless) {
|
2017-05-03 22:32:08 +02:00
|
|
|
vp9_highbd_iwht4x4_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
2016-07-02 01:02:41 +02:00
|
|
|
} else {
|
|
|
|
switch (tx_size) {
|
|
|
|
case TX_4X4:
|
2017-05-03 22:32:08 +02:00
|
|
|
vp9_highbd_idct4x4_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
2016-07-02 01:02:41 +02:00
|
|
|
break;
|
|
|
|
case TX_8X8:
|
2017-05-03 22:32:08 +02:00
|
|
|
vp9_highbd_idct8x8_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
2016-07-02 01:02:41 +02:00
|
|
|
break;
|
|
|
|
case TX_16X16:
|
2017-05-03 22:32:08 +02:00
|
|
|
vp9_highbd_idct16x16_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
2016-07-02 01:02:41 +02:00
|
|
|
break;
|
|
|
|
case TX_32X32:
|
2017-05-03 22:32:08 +02:00
|
|
|
vp9_highbd_idct32x32_add(dqcoeff, recon16, 32, *eob, xd->bd);
|
2016-07-02 01:02:41 +02:00
|
|
|
break;
|
2016-07-27 05:43:23 +02:00
|
|
|
default: assert(0 && "Invalid transform size");
|
2016-07-02 01:02:41 +02:00
|
|
|
}
|
|
|
|
}
|
2017-05-02 19:44:12 +02:00
|
|
|
recon = CONVERT_TO_BYTEPTR(recon16);
|
2016-07-02 01:02:41 +02:00
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2016-07-02 01:02:41 +02:00
|
|
|
vpx_convolve_copy(dst, dst_stride, recon, 32, NULL, 0, NULL, 0, bs, bs);
|
|
|
|
switch (tx_size) {
|
2016-07-27 05:43:23 +02:00
|
|
|
case TX_32X32: vp9_idct32x32_add(dqcoeff, recon, 32, *eob); break;
|
|
|
|
case TX_16X16: vp9_idct16x16_add(dqcoeff, recon, 32, *eob); break;
|
|
|
|
case TX_8X8: vp9_idct8x8_add(dqcoeff, recon, 32, *eob); break;
|
2016-07-02 01:02:41 +02:00
|
|
|
case TX_4X4:
|
|
|
|
// this is like vp9_short_idct4x4 but has a special case around
|
|
|
|
// eob<=1, which is significant (not just an optimization) for
|
|
|
|
// the lossless case.
|
2017-07-01 00:29:46 +02:00
|
|
|
x->inv_txfm_add(dqcoeff, recon, 32, *eob);
|
2016-07-02 01:02:41 +02:00
|
|
|
break;
|
2016-07-27 05:43:23 +02:00
|
|
|
default: assert(0 && "Invalid transform size"); break;
|
2016-07-02 01:02:41 +02:00
|
|
|
}
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
}
|
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
tmp = pixel_sse(cpi, xd, pd, src, src_stride, recon, 32, blk_row, blk_col,
|
|
|
|
plane_bsize, tx_bsize);
|
2016-07-02 01:02:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
*out_dist = (int64_t)tmp * 16;
|
2013-07-09 01:48:47 +02:00
|
|
|
}
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
}
|
|
|
|
|
2016-07-06 18:58:22 +02:00
|
|
|
static int rate_block(int plane, int block, TX_SIZE tx_size, int coeff_ctx,
|
2016-07-27 05:43:23 +02:00
|
|
|
struct rdcost_block_args *args) {
|
|
|
|
return cost_coeffs(args->x, plane, block, tx_size, coeff_ctx, args->so->scan,
|
|
|
|
args->so->neighbors, args->use_fast_coef_costing);
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
}
|
|
|
|
|
2016-07-02 01:02:41 +02:00
|
|
|
static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
|
|
|
|
BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
struct rdcost_block_args *args = arg;
|
|
|
|
MACROBLOCK *const x = args->x;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
2013-07-17 18:56:46 +02:00
|
|
|
int64_t rd1, rd2, rd;
|
2015-07-30 21:36:57 +02:00
|
|
|
int rate;
|
|
|
|
int64_t dist;
|
|
|
|
int64_t sse;
|
2016-07-27 05:43:23 +02:00
|
|
|
const int coeff_ctx =
|
|
|
|
combine_entropy_contexts(args->t_left[blk_row], args->t_above[blk_col]);
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (args->exit_early) return;
|
2013-07-03 01:48:15 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
if (!is_inter_block(mi)) {
|
2016-08-18 15:15:25 +02:00
|
|
|
struct encode_b_args intra_arg = { x, x->block_qcoeff_opt, args->t_above,
|
|
|
|
args->t_left, &mi->skip };
|
2016-07-02 01:02:41 +02:00
|
|
|
vp9_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size,
|
2016-07-06 19:05:51 +02:00
|
|
|
&intra_arg);
|
2016-08-18 15:15:25 +02:00
|
|
|
if (x->block_tx_domain) {
|
2016-07-25 20:05:40 +02:00
|
|
|
dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
|
|
|
|
tx_size, &dist, &sse);
|
2016-07-02 01:02:41 +02:00
|
|
|
} else {
|
|
|
|
const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size];
|
|
|
|
const struct macroblock_plane *const p = &x->plane[plane];
|
|
|
|
const struct macroblockd_plane *const pd = &xd->plane[plane];
|
|
|
|
const int src_stride = p->src.stride;
|
|
|
|
const int dst_stride = pd->dst.stride;
|
|
|
|
const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
|
|
|
|
const uint8_t *src = &p->src.buf[4 * (blk_row * src_stride + blk_col)];
|
|
|
|
const uint8_t *dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)];
|
|
|
|
const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
|
|
|
|
unsigned int tmp;
|
2016-07-27 05:43:23 +02:00
|
|
|
sse = sum_squares_visible(xd, pd, diff, diff_stride, blk_row, blk_col,
|
|
|
|
plane_bsize, tx_bsize);
|
2016-07-02 01:02:41 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && (xd->bd > 8))
|
2016-08-03 23:58:46 +02:00
|
|
|
sse = ROUND64_POWER_OF_TWO(sse, (xd->bd - 8) * 2);
|
2016-07-02 01:02:41 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
sse = sse * 16;
|
2016-07-25 20:05:40 +02:00
|
|
|
tmp = pixel_sse(args->cpi, xd, pd, src, src_stride, dst, dst_stride,
|
|
|
|
blk_row, blk_col, plane_bsize, tx_bsize);
|
2016-07-02 01:02:41 +02:00
|
|
|
dist = (int64_t)tmp * 16;
|
|
|
|
}
|
2014-08-28 18:09:37 +02:00
|
|
|
} else if (max_txsize_lookup[plane_bsize] == tx_size) {
|
2015-07-30 20:52:28 +02:00
|
|
|
if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
|
|
|
|
SKIP_TXFM_NONE) {
|
2014-08-07 07:48:37 +02:00
|
|
|
// full forward transform and quantization
|
2016-07-02 01:02:41 +02:00
|
|
|
vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
|
2016-08-18 15:15:25 +02:00
|
|
|
if (x->block_qcoeff_opt)
|
2016-07-06 19:05:51 +02:00
|
|
|
vp9_optimize_b(x, plane, block, tx_size, coeff_ctx);
|
2016-07-25 20:05:40 +02:00
|
|
|
dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
|
|
|
|
tx_size, &dist, &sse);
|
2015-07-30 20:52:28 +02:00
|
|
|
} else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
|
|
|
|
SKIP_TXFM_AC_ONLY) {
|
2014-08-07 07:48:37 +02:00
|
|
|
// compute DC coefficient
|
2016-07-27 05:43:23 +02:00
|
|
|
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
|
2014-09-03 01:34:09 +02:00
|
|
|
tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
|
2016-07-02 01:02:41 +02:00
|
|
|
vp9_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
|
|
|
|
tx_size);
|
2016-07-27 05:43:23 +02:00
|
|
|
sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
|
2015-07-30 21:36:57 +02:00
|
|
|
dist = sse;
|
2014-10-01 20:31:34 +02:00
|
|
|
if (x->plane[plane].eobs[block]) {
|
2015-02-12 21:08:30 +01:00
|
|
|
const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
|
|
|
|
const int64_t resd_sse = coeff[0] - dqcoeff[0];
|
|
|
|
int64_t dc_correct = orig_sse - resd_sse * resd_sse;
|
2014-10-03 19:22:21 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
dc_correct >>= ((xd->bd - 8) * 2);
|
|
|
|
#endif
|
2016-07-27 05:43:23 +02:00
|
|
|
if (tx_size != TX_32X32) dc_correct >>= 2;
|
2014-10-01 20:31:34 +02:00
|
|
|
|
2015-08-18 03:19:22 +02:00
|
|
|
dist = VPXMAX(0, sse - dc_correct);
|
2014-10-01 20:31:34 +02:00
|
|
|
}
|
2014-08-07 07:48:37 +02:00
|
|
|
} else {
|
2015-07-30 20:52:28 +02:00
|
|
|
// SKIP_TXFM_AC_DC
|
2014-08-07 07:48:37 +02:00
|
|
|
// skip forward transform
|
|
|
|
x->plane[plane].eobs[block] = 0;
|
2016-07-27 05:43:23 +02:00
|
|
|
sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
|
2015-07-30 21:36:57 +02:00
|
|
|
dist = sse;
|
2014-08-07 07:48:37 +02:00
|
|
|
}
|
2014-08-28 18:09:37 +02:00
|
|
|
} else {
|
|
|
|
// full forward transform and quantization
|
2016-07-02 01:02:41 +02:00
|
|
|
vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
|
2016-08-18 15:15:25 +02:00
|
|
|
if (x->block_qcoeff_opt)
|
2016-07-06 19:05:51 +02:00
|
|
|
vp9_optimize_b(x, plane, block, tx_size, coeff_ctx);
|
2016-07-25 20:05:40 +02:00
|
|
|
dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
|
|
|
|
tx_size, &dist, &sse);
|
2014-08-07 07:48:37 +02:00
|
|
|
}
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
|
2015-07-31 00:33:47 +02:00
|
|
|
rd = RDCOST(x->rdmult, x->rddiv, 0, dist);
|
|
|
|
if (args->this_rd + rd > args->best_rd) {
|
|
|
|
args->exit_early = 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-06 18:58:22 +02:00
|
|
|
rate = rate_block(plane, block, tx_size, coeff_ctx, args);
|
|
|
|
args->t_above[blk_col] = (x->plane[plane].eobs[block] > 0) ? 1 : 0;
|
|
|
|
args->t_left[blk_row] = (x->plane[plane].eobs[block] > 0) ? 1 : 0;
|
2015-07-30 21:36:57 +02:00
|
|
|
rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist);
|
|
|
|
rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse);
|
2013-09-21 01:29:24 +02:00
|
|
|
|
|
|
|
// TODO(jingning): temporarily enabled only for luma component
|
2015-08-18 03:19:22 +02:00
|
|
|
rd = VPXMIN(rd1, rd2);
|
2017-03-16 23:45:07 +01:00
|
|
|
if (plane == 0) {
|
2016-07-27 05:43:23 +02:00
|
|
|
x->zcoeff_blk[tx_size][block] =
|
|
|
|
!x->plane[plane].eobs[block] || (rd1 > rd2 && !xd->lossless);
|
2017-03-16 23:45:07 +01:00
|
|
|
x->sum_y_eobs[tx_size] += x->plane[plane].eobs[block];
|
|
|
|
}
|
2013-09-21 01:29:24 +02:00
|
|
|
|
2015-07-30 21:36:57 +02:00
|
|
|
args->this_rate += rate;
|
|
|
|
args->this_dist += dist;
|
|
|
|
args->this_sse += sse;
|
2013-09-20 20:30:46 +02:00
|
|
|
args->this_rd += rd;
|
|
|
|
|
|
|
|
if (args->this_rd > args->best_rd) {
|
2015-07-30 21:36:57 +02:00
|
|
|
args->exit_early = 1;
|
2013-09-20 20:30:46 +02:00
|
|
|
return;
|
|
|
|
}
|
2015-07-31 02:39:23 +02:00
|
|
|
|
|
|
|
args->skippable &= !x->plane[plane].eobs[block];
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
}
|
|
|
|
|
2016-07-02 01:02:41 +02:00
|
|
|
static void txfm_rd_in_plane(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int64_t *distortion, int *skippable, int64_t *sse,
|
|
|
|
int64_t ref_best_rd, int plane, BLOCK_SIZE bsize,
|
|
|
|
TX_SIZE tx_size, int use_fast_coef_casting) {
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2014-03-03 21:19:51 +01:00
|
|
|
const struct macroblockd_plane *const pd = &xd->plane[plane];
|
2014-05-13 20:18:25 +02:00
|
|
|
struct rdcost_block_args args;
|
|
|
|
vp9_zero(args);
|
2016-07-02 01:02:41 +02:00
|
|
|
args.cpi = cpi;
|
2014-02-12 03:08:06 +01:00
|
|
|
args.x = x;
|
|
|
|
args.best_rd = ref_best_rd;
|
2014-03-03 20:49:13 +01:00
|
|
|
args.use_fast_coef_costing = use_fast_coef_casting;
|
2015-07-31 02:39:23 +02:00
|
|
|
args.skippable = 1;
|
2013-09-25 02:26:05 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (plane == 0) xd->mi[0]->tx_size = tx_size;
|
2013-08-14 03:58:21 +02:00
|
|
|
|
2014-02-27 01:20:17 +01:00
|
|
|
vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
|
2013-10-04 01:07:26 +02:00
|
|
|
|
2015-09-29 19:40:27 +02:00
|
|
|
args.so = get_scan(xd, tx_size, get_plane_type(plane), 0);
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
|
|
|
|
&args);
|
2015-07-30 21:36:57 +02:00
|
|
|
if (args.exit_early) {
|
2016-07-27 05:43:23 +02:00
|
|
|
*rate = INT_MAX;
|
2013-09-20 20:30:46 +02:00
|
|
|
*distortion = INT64_MAX;
|
2016-07-27 05:43:23 +02:00
|
|
|
*sse = INT64_MAX;
|
|
|
|
*skippable = 0;
|
2013-09-20 20:30:46 +02:00
|
|
|
} else {
|
2014-02-12 03:08:06 +01:00
|
|
|
*distortion = args.this_dist;
|
2016-07-27 05:43:23 +02:00
|
|
|
*rate = args.this_rate;
|
|
|
|
*sse = args.this_sse;
|
|
|
|
*skippable = args.skippable;
|
2013-09-20 20:30:46 +02:00
|
|
|
}
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int64_t *distortion, int *skip, int64_t *sse,
|
|
|
|
int64_t ref_best_rd, BLOCK_SIZE bs) {
|
2013-09-25 02:24:35 +02:00
|
|
|
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-10-17 20:09:14 +02:00
|
|
|
const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
2013-10-17 20:09:14 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
|
2013-10-17 20:09:14 +02:00
|
|
|
|
2016-07-02 01:02:41 +02:00
|
|
|
txfm_rd_in_plane(cpi, x, rate, distortion, skip, sse, ref_best_rd, 0, bs,
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->tx_size, cpi->sf.use_fast_coef_costing);
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int64_t *distortion, int *skip,
|
|
|
|
int64_t *psse, int64_t ref_best_rd,
|
2014-06-12 20:57:26 +02:00
|
|
|
BLOCK_SIZE bs) {
|
2013-08-13 01:56:32 +02:00
|
|
|
const TX_SIZE max_tx_size = max_txsize_lookup[bs];
|
2012-11-08 20:03:00 +01:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
2015-07-20 22:49:15 +02:00
|
|
|
vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
|
2014-07-23 02:57:40 +02:00
|
|
|
int r[TX_SIZES][2], s[TX_SIZES];
|
|
|
|
int64_t d[TX_SIZES], sse[TX_SIZES];
|
2016-07-27 05:43:23 +02:00
|
|
|
int64_t rd[TX_SIZES][2] = { { INT64_MAX, INT64_MAX },
|
|
|
|
{ INT64_MAX, INT64_MAX },
|
|
|
|
{ INT64_MAX, INT64_MAX },
|
|
|
|
{ INT64_MAX, INT64_MAX } };
|
Add early termination in transform size search
In the full-rd transform size search, we go through all transform
sizes to choose the one with best rd score. In this patch, an
early termination is added to stop the search once we see that the
smaller size won't give better rd score than the larger size. Also,
the search starts from largest transform size, then goes down to
smallest size.
A speed feature tx_size_search_breakout is added, which is turned off
at speed 0, and on for other speeds. The transform size search is
turned on at speed 1.
Borg test results:
1. At speed 1,
derf set: psnr gain: 0.618%, ssim gain: 0.377%;
stdhd set: psnr gain: 0.594%, ssim gain: 0.162%;
No noticeable speed change.
3. At speed 2,
derf set: psnr loss: 0.157%, ssim loss: 0.175%;
stdhd set: psnr loss: 0.090%, ssim loss: 0.101%;
speed gain: ~4%.
Change-Id: I22535cd2017b5e54f2a62bb6a38231aea4268b3f
2014-08-12 23:36:42 +02:00
|
|
|
int n, m;
|
2013-04-09 19:54:19 +02:00
|
|
|
int s0, s1;
|
2013-11-25 21:45:51 +01:00
|
|
|
int64_t best_rd = INT64_MAX;
|
Add early termination in transform size search
In the full-rd transform size search, we go through all transform
sizes to choose the one with best rd score. In this patch, an
early termination is added to stop the search once we see that the
smaller size won't give better rd score than the larger size. Also,
the search starts from largest transform size, then goes down to
smallest size.
A speed feature tx_size_search_breakout is added, which is turned off
at speed 0, and on for other speeds. The transform size search is
turned on at speed 1.
Borg test results:
1. At speed 1,
derf set: psnr gain: 0.618%, ssim gain: 0.377%;
stdhd set: psnr gain: 0.594%, ssim gain: 0.162%;
No noticeable speed change.
3. At speed 2,
derf set: psnr loss: 0.157%, ssim loss: 0.175%;
stdhd set: psnr loss: 0.090%, ssim loss: 0.101%;
speed gain: ~4%.
Change-Id: I22535cd2017b5e54f2a62bb6a38231aea4268b3f
2014-08-12 23:36:42 +02:00
|
|
|
TX_SIZE best_tx = max_tx_size;
|
2015-07-29 22:37:41 +02:00
|
|
|
int start_tx, end_tx;
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
2015-07-20 22:49:15 +02:00
|
|
|
const vpx_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
|
2013-04-09 19:54:19 +02:00
|
|
|
assert(skip_prob > 0);
|
|
|
|
s0 = vp9_cost_bit(skip_prob, 0);
|
|
|
|
s1 = vp9_cost_bit(skip_prob, 1);
|
2012-11-08 20:03:00 +01:00
|
|
|
|
2015-07-29 22:37:41 +02:00
|
|
|
if (cm->tx_mode == TX_MODE_SELECT) {
|
|
|
|
start_tx = max_tx_size;
|
|
|
|
end_tx = 0;
|
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
TX_SIZE chosen_tx_size =
|
|
|
|
VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[cm->tx_mode]);
|
2015-07-29 22:37:41 +02:00
|
|
|
start_tx = chosen_tx_size;
|
|
|
|
end_tx = chosen_tx_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (n = start_tx; n >= end_tx; n--) {
|
2015-08-19 21:04:56 +02:00
|
|
|
int r_tx_size = 0;
|
2016-07-27 05:43:23 +02:00
|
|
|
for (m = 0; m <= n - (n == (int)max_tx_size); m++) {
|
2015-08-19 21:04:56 +02:00
|
|
|
if (m == n)
|
|
|
|
r_tx_size += vp9_cost_zero(tx_probs[m]);
|
|
|
|
else
|
|
|
|
r_tx_size += vp9_cost_one(tx_probs[m]);
|
|
|
|
}
|
2016-07-02 01:02:41 +02:00
|
|
|
txfm_rd_in_plane(cpi, x, &r[n][0], &d[n], &s[n], &sse[n], ref_best_rd, 0,
|
|
|
|
bs, n, cpi->sf.use_fast_coef_costing);
|
2013-11-25 21:45:51 +01:00
|
|
|
r[n][1] = r[n][0];
|
|
|
|
if (r[n][0] < INT_MAX) {
|
2015-08-19 21:04:56 +02:00
|
|
|
r[n][1] += r_tx_size;
|
2013-11-25 21:45:51 +01:00
|
|
|
}
|
2015-08-19 21:04:56 +02:00
|
|
|
if (d[n] == INT64_MAX || r[n][0] == INT_MAX) {
|
2013-07-03 01:48:15 +02:00
|
|
|
rd[n][0] = rd[n][1] = INT64_MAX;
|
2013-11-25 21:45:51 +01:00
|
|
|
} else if (s[n]) {
|
2016-01-20 01:40:20 +01:00
|
|
|
if (is_inter_block(mi)) {
|
2015-08-19 21:04:56 +02:00
|
|
|
rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
|
2015-09-22 23:08:28 +02:00
|
|
|
r[n][1] -= r_tx_size;
|
2015-08-19 21:04:56 +02:00
|
|
|
} else {
|
|
|
|
rd[n][0] = RDCOST(x->rdmult, x->rddiv, s1, sse[n]);
|
|
|
|
rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size, sse[n]);
|
|
|
|
}
|
2013-04-09 19:54:19 +02:00
|
|
|
} else {
|
|
|
|
rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]);
|
|
|
|
rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]);
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
2013-11-25 21:45:51 +01:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
if (is_inter_block(mi) && !xd->lossless && !s[n] && sse[n] != INT64_MAX) {
|
2015-09-22 23:08:28 +02:00
|
|
|
rd[n][0] = VPXMIN(rd[n][0], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
|
|
|
|
rd[n][1] = VPXMIN(rd[n][1], RDCOST(x->rdmult, x->rddiv, s1, sse[n]));
|
|
|
|
}
|
|
|
|
|
Add early termination in transform size search
In the full-rd transform size search, we go through all transform
sizes to choose the one with best rd score. In this patch, an
early termination is added to stop the search once we see that the
smaller size won't give better rd score than the larger size. Also,
the search starts from largest transform size, then goes down to
smallest size.
A speed feature tx_size_search_breakout is added, which is turned off
at speed 0, and on for other speeds. The transform size search is
turned on at speed 1.
Borg test results:
1. At speed 1,
derf set: psnr gain: 0.618%, ssim gain: 0.377%;
stdhd set: psnr gain: 0.594%, ssim gain: 0.162%;
No noticeable speed change.
3. At speed 2,
derf set: psnr loss: 0.157%, ssim loss: 0.175%;
stdhd set: psnr loss: 0.090%, ssim loss: 0.101%;
speed gain: ~4%.
Change-Id: I22535cd2017b5e54f2a62bb6a38231aea4268b3f
2014-08-12 23:36:42 +02:00
|
|
|
// Early termination in transform size search.
|
|
|
|
if (cpi->sf.tx_size_search_breakout &&
|
|
|
|
(rd[n][1] == INT64_MAX ||
|
2016-07-27 05:43:23 +02:00
|
|
|
(n < (int)max_tx_size && rd[n][1] > rd[n + 1][1]) || s[n] == 1))
|
Add early termination in transform size search
In the full-rd transform size search, we go through all transform
sizes to choose the one with best rd score. In this patch, an
early termination is added to stop the search once we see that the
smaller size won't give better rd score than the larger size. Also,
the search starts from largest transform size, then goes down to
smallest size.
A speed feature tx_size_search_breakout is added, which is turned off
at speed 0, and on for other speeds. The transform size search is
turned on at speed 1.
Borg test results:
1. At speed 1,
derf set: psnr gain: 0.618%, ssim gain: 0.377%;
stdhd set: psnr gain: 0.594%, ssim gain: 0.162%;
No noticeable speed change.
3. At speed 2,
derf set: psnr loss: 0.157%, ssim loss: 0.175%;
stdhd set: psnr loss: 0.090%, ssim loss: 0.101%;
speed gain: ~4%.
Change-Id: I22535cd2017b5e54f2a62bb6a38231aea4268b3f
2014-08-12 23:36:42 +02:00
|
|
|
break;
|
|
|
|
|
2013-11-25 21:45:51 +01:00
|
|
|
if (rd[n][1] < best_rd) {
|
|
|
|
best_tx = n;
|
|
|
|
best_rd = rd[n][1];
|
|
|
|
}
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->tx_size = best_tx;
|
2012-10-09 18:18:21 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
*distortion = d[mi->tx_size];
|
2016-07-27 05:43:23 +02:00
|
|
|
*rate = r[mi->tx_size][cm->tx_mode == TX_MODE_SELECT];
|
|
|
|
*skip = s[mi->tx_size];
|
|
|
|
*psse = sse[mi->tx_size];
|
Tx size selection enhancements
(1) Refines the modeling function and uses that to add some speed
features. Specifically, intead of using a flag use_largest_txfm as
a speed feature, an enum tx_size_search_method is used, of which
two of the types are USE_FULL_RD and USE_LARGESTALL. Two other
new types are added:
USE_LARGESTINTRA (use largest only for intra)
USE_LARGESTINTRA_MODELINTER (use largest for intra, and model for
inter)
(2) Another change is that the framework for deciding transform type
is simplified to use a heuristic count based method rather than
an rd based method using txfm_cache. In practice the new method
is found to work just as well - with derf only -0.01 down.
The new method is more compatible with the new framework where
certain rd costs are based on full rd and certain others are
based on modeled rd or are not computed. In this patch the existing
rd based method is still kept for use in the USE_FULL_RD mode.
In the other modes, the count based method is used.
However the recommendation is to remove it eventually since the
benefit is limited, and will remove a lot of complications in
the code
(3) Finally a bug is fixed with the existing use_largest_txfm speed feature
that causes mismatches when the lossless mode and 4x4 WH transform is
forced.
Results on derf:
USE_FULL_RD: +0.03% (due to change in the tables), 0% encode time reduction
USE_LARGESTINTRA: -0.21%, 15% encode time reduction (this one is a
pretty good compromise)
USE_LARGESTINTRA_MODELINTER: -0.98%, 22% encode time reduction
(currently the benefit of modeling is limited for txfm size selection,
but keeping this enum as a placeholder) .
USE_LARGESTALL: -1.05%, 27% encode-time reduction (same as existing
use_largest_txfm speed feature).
Change-Id: I4d60a5f9ce78fbc90cddf2f97ed91d8bc0d4f936
2013-06-22 01:31:12 +02:00
|
|
|
}
|
2013-03-06 00:18:06 +01:00
|
|
|
|
2014-09-02 21:42:34 +02:00
|
|
|
static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
2016-07-27 05:43:23 +02:00
|
|
|
int64_t *distortion, int *skip, int64_t *psse,
|
|
|
|
BLOCK_SIZE bs, int64_t ref_best_rd) {
|
2013-05-31 00:13:08 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2014-09-02 21:42:34 +02:00
|
|
|
int64_t sse;
|
|
|
|
int64_t *ret_sse = psse ? psse : &sse;
|
2013-11-26 23:55:53 +01:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
assert(bs == xd->mi[0]->sb_type);
|
2013-01-06 03:20:25 +01:00
|
|
|
|
2014-05-09 03:04:56 +02:00
|
|
|
if (cpi->sf.tx_size_search_method == USE_LARGESTALL || xd->lossless) {
|
2014-09-02 21:42:34 +02:00
|
|
|
choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
|
2014-06-12 20:57:26 +02:00
|
|
|
bs);
|
2014-02-25 20:59:56 +01:00
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd,
|
|
|
|
bs);
|
2014-02-25 20:59:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-12 00:26:24 +02:00
|
|
|
static int conditional_skipintra(PREDICTION_MODE mode,
|
|
|
|
PREDICTION_MODE best_intra_mode) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (mode == D117_PRED && best_intra_mode != V_PRED &&
|
2013-07-02 20:18:00 +02:00
|
|
|
best_intra_mode != D135_PRED)
|
|
|
|
return 1;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (mode == D63_PRED && best_intra_mode != V_PRED &&
|
2013-07-02 20:18:00 +02:00
|
|
|
best_intra_mode != D45_PRED)
|
|
|
|
return 1;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (mode == D207_PRED && best_intra_mode != H_PRED &&
|
2013-07-02 20:18:00 +02:00
|
|
|
best_intra_mode != D45_PRED)
|
|
|
|
return 1;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (mode == D153_PRED && best_intra_mode != H_PRED &&
|
2013-07-02 20:18:00 +02:00
|
|
|
best_intra_mode != D135_PRED)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int row,
|
|
|
|
int col, PREDICTION_MODE *best_mode,
|
|
|
|
const int *bmode_costs, ENTROPY_CONTEXT *a,
|
|
|
|
ENTROPY_CONTEXT *l, int *bestrate,
|
|
|
|
int *bestratey, int64_t *bestdistortion,
|
2013-08-27 20:05:08 +02:00
|
|
|
BLOCK_SIZE bsize, int64_t rd_thresh) {
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE mode;
|
2014-02-26 22:43:46 +01:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2013-07-19 18:17:00 +02:00
|
|
|
int64_t best_rd = rd_thresh;
|
2013-06-26 04:41:56 +02:00
|
|
|
struct macroblock_plane *p = &x->plane[0];
|
|
|
|
struct macroblockd_plane *pd = &xd->plane[0];
|
|
|
|
const int src_stride = p->src.stride;
|
2013-07-09 01:48:47 +02:00
|
|
|
const int dst_stride = pd->dst.stride;
|
2015-07-13 19:40:01 +02:00
|
|
|
const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4];
|
|
|
|
uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4];
|
2013-05-21 01:04:28 +02:00
|
|
|
ENTROPY_CONTEXT ta[2], tempa[2];
|
|
|
|
ENTROPY_CONTEXT tl[2], templ[2];
|
2013-08-05 21:15:52 +02:00
|
|
|
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
|
|
|
|
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
|
2013-10-28 22:28:28 +01:00
|
|
|
int idx, idy;
|
2013-07-23 00:39:43 +02:00
|
|
|
uint8_t best_dst[8 * 8];
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
uint16_t best_dst16[8 * 8];
|
|
|
|
#endif
|
2016-02-11 21:36:49 +01:00
|
|
|
memcpy(ta, a, num_4x4_blocks_wide * sizeof(a[0]));
|
|
|
|
memcpy(tl, l, num_4x4_blocks_high * sizeof(l[0]));
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
xd->mi[0]->tx_size = TX_4X4;
|
2013-05-21 01:04:28 +02:00
|
|
|
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
|
|
|
|
int64_t this_rd;
|
|
|
|
int ratey = 0;
|
|
|
|
int64_t distortion = 0;
|
|
|
|
int rate = bmode_costs[mode];
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
|
2014-09-24 15:36:34 +02:00
|
|
|
|
|
|
|
// Only do the oblique modes if the best so far is
|
|
|
|
// one of the neighboring directional modes
|
|
|
|
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (conditional_skipintra(mode, *best_mode)) continue;
|
2014-09-24 15:36:34 +02:00
|
|
|
}
|
|
|
|
|
2016-02-11 21:36:49 +01:00
|
|
|
memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
|
|
|
|
memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
|
2014-09-24 15:36:34 +02:00
|
|
|
|
|
|
|
for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
|
|
|
|
for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
|
2015-07-13 19:40:01 +02:00
|
|
|
const int block = (row + idy) * 2 + (col + idx);
|
2014-09-24 15:36:34 +02:00
|
|
|
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
|
|
|
|
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
|
2017-05-03 22:32:08 +02:00
|
|
|
uint16_t *const dst16 = CONVERT_TO_SHORTPTR(dst);
|
2016-07-27 05:43:23 +02:00
|
|
|
int16_t *const src_diff =
|
|
|
|
vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
|
2014-09-24 15:36:34 +02:00
|
|
|
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
|
2015-04-21 14:36:58 +02:00
|
|
|
xd->mi[0]->bmi[block].as_mode = mode;
|
2015-07-13 19:40:01 +02:00
|
|
|
vp9_predict_intra_block(xd, 1, TX_4X4, mode,
|
2014-09-24 15:36:34 +02:00
|
|
|
x->skip_encode ? src : dst,
|
2016-07-27 05:43:23 +02:00
|
|
|
x->skip_encode ? src_stride : dst_stride, dst,
|
|
|
|
dst_stride, col + idx, row + idy, 0);
|
|
|
|
vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
|
|
|
|
dst_stride, xd->bd);
|
2014-09-24 15:36:34 +02:00
|
|
|
if (xd->lossless) {
|
|
|
|
const scan_order *so = &vp9_default_scan_orders[TX_4X4];
|
2016-07-27 05:43:23 +02:00
|
|
|
const int coeff_ctx =
|
|
|
|
combine_entropy_contexts(tempa[idx], templ[idy]);
|
2014-10-08 21:43:22 +02:00
|
|
|
vp9_highbd_fwht4x4(src_diff, coeff, 8);
|
2014-09-24 15:36:34 +02:00
|
|
|
vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
|
2016-07-27 05:43:23 +02:00
|
|
|
ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
|
|
|
|
so->neighbors, cpi->sf.use_fast_coef_costing);
|
2016-07-06 18:58:22 +02:00
|
|
|
tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
|
2014-09-24 15:36:34 +02:00
|
|
|
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
|
|
|
|
goto next_highbd;
|
2017-05-02 19:44:12 +02:00
|
|
|
vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst16,
|
2016-07-27 05:43:23 +02:00
|
|
|
dst_stride, p->eobs[block], xd->bd);
|
2014-09-24 15:36:34 +02:00
|
|
|
} else {
|
|
|
|
int64_t unused;
|
|
|
|
const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
|
|
|
|
const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
|
2016-07-27 05:43:23 +02:00
|
|
|
const int coeff_ctx =
|
|
|
|
combine_entropy_contexts(tempa[idx], templ[idy]);
|
2015-07-20 19:26:04 +02:00
|
|
|
if (tx_type == DCT_DCT)
|
2015-07-29 00:57:40 +02:00
|
|
|
vpx_highbd_fdct4x4(src_diff, coeff, 8);
|
2015-07-20 19:26:04 +02:00
|
|
|
else
|
|
|
|
vp9_highbd_fht4x4(src_diff, coeff, 8, tx_type);
|
2014-09-24 15:36:34 +02:00
|
|
|
vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
|
2016-07-27 05:43:23 +02:00
|
|
|
ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
|
|
|
|
so->neighbors, cpi->sf.use_fast_coef_costing);
|
2015-10-08 16:44:49 +02:00
|
|
|
distortion += vp9_highbd_block_error_dispatch(
|
2016-07-27 05:43:23 +02:00
|
|
|
coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
|
|
|
|
&unused, xd->bd) >>
|
|
|
|
2;
|
2016-07-06 18:58:22 +02:00
|
|
|
tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0);
|
2014-09-24 15:36:34 +02:00
|
|
|
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
|
|
|
|
goto next_highbd;
|
2014-10-08 21:43:22 +02:00
|
|
|
vp9_highbd_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block),
|
2017-05-02 19:44:12 +02:00
|
|
|
dst16, dst_stride, p->eobs[block], xd->bd);
|
2014-09-24 15:36:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rate += ratey;
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
|
|
|
|
|
|
|
if (this_rd < best_rd) {
|
|
|
|
*bestrate = rate;
|
|
|
|
*bestratey = ratey;
|
|
|
|
*bestdistortion = distortion;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*best_mode = mode;
|
2016-02-11 21:36:49 +01:00
|
|
|
memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
|
|
|
|
memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
|
2014-09-24 15:36:34 +02:00
|
|
|
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(best_dst16 + idy * 8,
|
|
|
|
CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
|
|
|
|
num_4x4_blocks_wide * 4 * sizeof(uint16_t));
|
2014-09-24 15:36:34 +02:00
|
|
|
}
|
|
|
|
}
|
2016-07-27 05:43:23 +02:00
|
|
|
next_highbd : {}
|
2014-09-24 15:36:34 +02:00
|
|
|
}
|
2016-07-27 05:43:23 +02:00
|
|
|
if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
|
2014-09-24 15:36:34 +02:00
|
|
|
|
|
|
|
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) {
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride),
|
2016-07-27 05:43:23 +02:00
|
|
|
best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t));
|
2014-09-24 15:36:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return best_rd;
|
|
|
|
}
|
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
|
2013-05-21 01:04:28 +02:00
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
|
2013-01-14 23:37:53 +01:00
|
|
|
int64_t this_rd;
|
2013-05-21 01:04:28 +02:00
|
|
|
int ratey = 0;
|
2014-02-26 22:43:46 +01:00
|
|
|
int64_t distortion = 0;
|
|
|
|
int rate = bmode_costs[mode];
|
2013-08-22 18:23:02 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue;
|
2013-08-22 18:23:02 +02:00
|
|
|
|
2013-07-02 20:18:00 +02:00
|
|
|
// Only do the oblique modes if the best so far is
|
|
|
|
// one of the neighboring directional modes
|
2013-07-03 23:47:54 +02:00
|
|
|
if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (conditional_skipintra(mode, *best_mode)) continue;
|
2013-07-02 20:18:00 +02:00
|
|
|
}
|
2012-10-09 22:19:15 +02:00
|
|
|
|
2016-02-11 21:36:49 +01:00
|
|
|
memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0]));
|
|
|
|
memcpy(templ, tl, num_4x4_blocks_high * sizeof(tl[0]));
|
2013-05-21 01:04:28 +02:00
|
|
|
|
2013-07-23 15:51:44 +02:00
|
|
|
for (idy = 0; idy < num_4x4_blocks_high; ++idy) {
|
|
|
|
for (idx = 0; idx < num_4x4_blocks_wide; ++idx) {
|
2015-07-13 19:40:01 +02:00
|
|
|
const int block = (row + idy) * 2 + (col + idx);
|
2014-02-26 22:43:46 +01:00
|
|
|
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
|
|
|
|
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
|
2014-12-22 22:38:34 +01:00
|
|
|
int16_t *const src_diff =
|
|
|
|
vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
|
2014-09-03 01:34:09 +02:00
|
|
|
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
|
2015-04-21 14:36:58 +02:00
|
|
|
xd->mi[0]->bmi[block].as_mode = mode;
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_predict_intra_block(xd, 1, TX_4X4, mode, x->skip_encode ? src : dst,
|
|
|
|
x->skip_encode ? src_stride : dst_stride, dst,
|
|
|
|
dst_stride, col + idx, row + idy, 0);
|
2015-07-06 18:33:27 +02:00
|
|
|
vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
|
2014-02-26 22:43:46 +01:00
|
|
|
|
|
|
|
if (xd->lossless) {
|
|
|
|
const scan_order *so = &vp9_default_scan_orders[TX_4X4];
|
2016-07-27 05:43:23 +02:00
|
|
|
const int coeff_ctx =
|
|
|
|
combine_entropy_contexts(tempa[idx], templ[idy]);
|
2014-02-26 22:43:46 +01:00
|
|
|
vp9_fwht4x4(src_diff, coeff, 8);
|
|
|
|
vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
|
2016-07-27 05:43:23 +02:00
|
|
|
ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
|
|
|
|
so->neighbors, cpi->sf.use_fast_coef_costing);
|
2016-07-06 18:58:22 +02:00
|
|
|
tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
|
2014-02-26 22:43:46 +01:00
|
|
|
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
|
|
|
|
goto next;
|
|
|
|
vp9_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, dst_stride,
|
|
|
|
p->eobs[block]);
|
|
|
|
} else {
|
|
|
|
int64_t unused;
|
|
|
|
const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block);
|
|
|
|
const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type];
|
2016-07-27 05:43:23 +02:00
|
|
|
const int coeff_ctx =
|
|
|
|
combine_entropy_contexts(tempa[idx], templ[idy]);
|
2014-02-06 20:54:15 +01:00
|
|
|
vp9_fht4x4(src_diff, coeff, 8, tx_type);
|
2014-02-26 22:43:46 +01:00
|
|
|
vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
|
2016-07-27 05:43:23 +02:00
|
|
|
ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan,
|
|
|
|
so->neighbors, cpi->sf.use_fast_coef_costing);
|
2016-07-06 18:58:22 +02:00
|
|
|
tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0;
|
2014-02-26 22:43:46 +01:00
|
|
|
distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
|
2016-07-27 05:43:23 +02:00
|
|
|
16, &unused) >>
|
|
|
|
2;
|
2014-02-26 22:43:46 +01:00
|
|
|
if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
|
|
|
|
goto next;
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), dst,
|
|
|
|
dst_stride, p->eobs[block]);
|
2014-02-26 22:43:46 +01:00
|
|
|
}
|
2013-05-21 01:04:28 +02:00
|
|
|
}
|
|
|
|
}
|
2012-06-25 21:26:09 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
rate += ratey;
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
|
2012-06-25 21:26:09 +02:00
|
|
|
|
2013-01-14 23:37:53 +01:00
|
|
|
if (this_rd < best_rd) {
|
|
|
|
*bestrate = rate;
|
|
|
|
*bestratey = ratey;
|
|
|
|
*bestdistortion = distortion;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*best_mode = mode;
|
2016-02-11 21:36:49 +01:00
|
|
|
memcpy(a, tempa, num_4x4_blocks_wide * sizeof(tempa[0]));
|
|
|
|
memcpy(l, templ, num_4x4_blocks_high * sizeof(templ[0]));
|
2013-07-23 00:39:43 +02:00
|
|
|
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(best_dst + idy * 8, dst_init + idy * dst_stride,
|
|
|
|
num_4x4_blocks_wide * 4);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2016-07-27 05:43:23 +02:00
|
|
|
next : {}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2013-05-21 01:04:28 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (best_rd >= rd_thresh || x->skip_encode) return best_rd;
|
2013-07-09 01:48:47 +02:00
|
|
|
|
2013-07-23 00:39:43 +02:00
|
|
|
for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy)
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(dst_init + idy * dst_stride, best_dst + idy * 8,
|
|
|
|
num_4x4_blocks_wide * 4);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return best_rd;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2014-03-05 21:28:30 +01:00
|
|
|
static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb,
|
|
|
|
int *rate, int *rate_y,
|
|
|
|
int64_t *distortion,
|
2013-07-31 21:58:19 +02:00
|
|
|
int64_t best_rd) {
|
2013-05-21 01:04:28 +02:00
|
|
|
int i, j;
|
2014-03-05 21:28:30 +01:00
|
|
|
const MACROBLOCKD *const xd = &mb->e_mbd;
|
2015-04-21 14:36:58 +02:00
|
|
|
MODE_INFO *const mic = xd->mi[0];
|
2014-12-05 23:39:21 +01:00
|
|
|
const MODE_INFO *above_mi = xd->above_mi;
|
|
|
|
const MODE_INFO *left_mi = xd->left_mi;
|
2016-01-20 01:40:20 +01:00
|
|
|
const BLOCK_SIZE bsize = xd->mi[0]->sb_type;
|
2013-08-05 21:15:52 +02:00
|
|
|
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
|
|
|
|
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
|
2013-05-21 01:04:28 +02:00
|
|
|
int idx, idy;
|
2013-05-11 02:06:37 +02:00
|
|
|
int cost = 0;
|
2013-07-31 21:58:19 +02:00
|
|
|
int64_t total_distortion = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
int tot_rate_y = 0;
|
|
|
|
int64_t total_rd = 0;
|
2014-05-02 01:12:23 +02:00
|
|
|
const int *bmode_costs = cpi->mbmode_cost;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-07-30 19:16:03 +02:00
|
|
|
// Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block.
|
2013-07-23 15:51:44 +02:00
|
|
|
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
|
|
|
|
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE best_mode = DC_PRED;
|
2013-08-19 18:28:34 +02:00
|
|
|
int r = INT_MAX, ry = INT_MAX;
|
|
|
|
int64_t d = INT64_MAX, this_rd = INT64_MAX;
|
2013-05-21 01:04:28 +02:00
|
|
|
i = idy * 2 + idx;
|
2013-07-19 20:55:36 +02:00
|
|
|
if (cpi->common.frame_type == KEY_FRAME) {
|
2014-04-12 00:26:24 +02:00
|
|
|
const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i);
|
|
|
|
const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i);
|
2013-05-21 01:04:28 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
bmode_costs = cpi->y_mode_costs[A][L];
|
2013-05-21 01:04:28 +02:00
|
|
|
}
|
2011-02-14 19:32:58 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
this_rd = rd_pick_intra4x4block(
|
|
|
|
cpi, mb, idy, idx, &best_mode, bmode_costs,
|
|
|
|
xd->plane[0].above_context + idx, xd->plane[0].left_context + idy, &r,
|
|
|
|
&ry, &d, bsize, best_rd - total_rd);
|
2016-02-11 21:36:49 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (this_rd >= best_rd - total_rd) return INT64_MAX;
|
2013-07-19 18:17:00 +02:00
|
|
|
|
|
|
|
total_rd += this_rd;
|
2013-05-21 01:04:28 +02:00
|
|
|
cost += r;
|
2013-07-31 21:58:19 +02:00
|
|
|
total_distortion += d;
|
2013-05-21 01:04:28 +02:00
|
|
|
tot_rate_y += ry;
|
|
|
|
|
2013-07-03 01:51:57 +02:00
|
|
|
mic->bmi[i].as_mode = best_mode;
|
2013-07-23 15:51:44 +02:00
|
|
|
for (j = 1; j < num_4x4_blocks_high; ++j)
|
2013-07-03 01:51:57 +02:00
|
|
|
mic->bmi[i + j * 2].as_mode = best_mode;
|
2013-07-23 15:51:44 +02:00
|
|
|
for (j = 1; j < num_4x4_blocks_wide; ++j)
|
2013-07-03 01:51:57 +02:00
|
|
|
mic->bmi[i + j].as_mode = best_mode;
|
2013-05-21 01:04:28 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (total_rd >= best_rd) return INT64_MAX;
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-07-31 21:58:19 +02:00
|
|
|
*rate = cost;
|
2012-11-05 21:50:16 +01:00
|
|
|
*rate_y = tot_rate_y;
|
2013-07-31 21:58:19 +02:00
|
|
|
*distortion = total_distortion;
|
2016-01-20 01:40:20 +01:00
|
|
|
mic->mode = mic->bmi[3].as_mode;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-07-31 21:58:19 +02:00
|
|
|
return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2011-06-08 18:05:05 +02:00
|
|
|
|
2014-09-22 23:58:51 +02:00
|
|
|
// This function is used only for intra_only frames
|
2016-07-27 05:43:23 +02:00
|
|
|
static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int *rate_tokenonly, int64_t *distortion,
|
|
|
|
int *skippable, BLOCK_SIZE bsize,
|
2013-07-18 03:21:41 +02:00
|
|
|
int64_t best_rd) {
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE mode;
|
|
|
|
PREDICTION_MODE mode_selected = DC_PRED;
|
2013-05-31 00:13:08 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2015-04-21 14:36:58 +02:00
|
|
|
MODE_INFO *const mic = xd->mi[0];
|
2013-06-21 21:54:52 +02:00
|
|
|
int this_rate, this_rate_tokenonly, s;
|
2013-07-18 03:21:41 +02:00
|
|
|
int64_t this_distortion, this_rd;
|
2013-08-19 18:28:34 +02:00
|
|
|
TX_SIZE best_tx = TX_4X4;
|
2014-09-22 23:58:51 +02:00
|
|
|
int *bmode_costs;
|
2014-12-05 23:39:21 +01:00
|
|
|
const MODE_INFO *above_mi = xd->above_mi;
|
|
|
|
const MODE_INFO *left_mi = xd->left_mi;
|
2014-09-22 23:58:51 +02:00
|
|
|
const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0);
|
|
|
|
const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0);
|
|
|
|
bmode_costs = cpi->y_mode_costs[A][L];
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2015-07-30 20:52:28 +02:00
|
|
|
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
|
2013-07-30 19:16:03 +02:00
|
|
|
/* Y Search for intra prediction mode */
|
2013-01-06 03:20:25 +01:00
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; mode++) {
|
2014-12-04 23:53:36 +01:00
|
|
|
if (cpi->sf.use_nonrd_pick_mode) {
|
|
|
|
// These speed features are turned on in hybrid non-RD and RD mode
|
|
|
|
// for key frame coding in the context of real-time setting.
|
2016-07-27 05:43:23 +02:00
|
|
|
if (conditional_skipintra(mode, mode_selected)) continue;
|
|
|
|
if (*skippable) break;
|
2014-12-04 23:53:36 +01:00
|
|
|
}
|
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mic->mode = mode;
|
2013-01-06 03:20:25 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL,
|
|
|
|
bsize, best_rd);
|
2013-07-03 01:48:15 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (this_rate_tokenonly == INT_MAX) continue;
|
2013-05-31 00:13:08 +02:00
|
|
|
|
2013-05-21 01:04:28 +02:00
|
|
|
this_rate = this_rate_tokenonly + bmode_costs[mode];
|
2013-01-06 03:20:25 +01:00
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
|
|
|
|
|
|
|
|
if (this_rd < best_rd) {
|
2016-07-27 05:43:23 +02:00
|
|
|
mode_selected = mode;
|
|
|
|
best_rd = this_rd;
|
|
|
|
best_tx = mic->tx_size;
|
|
|
|
*rate = this_rate;
|
2013-01-06 03:20:25 +01:00
|
|
|
*rate_tokenonly = this_rate_tokenonly;
|
2016-07-27 05:43:23 +02:00
|
|
|
*distortion = this_distortion;
|
|
|
|
*skippable = s;
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mic->mode = mode_selected;
|
|
|
|
mic->tx_size = best_tx;
|
2012-08-29 20:25:38 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
return best_rd;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-06-25 21:26:09 +02:00
|
|
|
|
2014-10-14 02:06:22 +02:00
|
|
|
// Return value 0: early termination triggered, no valid rd cost available;
|
|
|
|
// 1: rd cost values are valid.
|
2016-07-27 05:43:23 +02:00
|
|
|
static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int64_t *distortion, int *skippable, int64_t *sse,
|
|
|
|
BLOCK_SIZE bsize, int64_t ref_best_rd) {
|
2013-05-01 01:13:20 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
|
|
|
const TX_SIZE uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
|
2013-08-14 03:58:21 +02:00
|
|
|
int plane;
|
|
|
|
int pnrate = 0, pnskip = 1;
|
|
|
|
int64_t pndist = 0, pnsse = 0;
|
2014-10-14 02:06:22 +02:00
|
|
|
int is_cost_valid = 1;
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (ref_best_rd < 0) is_cost_valid = 0;
|
2013-08-15 01:50:45 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
if (is_inter_block(mi) && is_cost_valid) {
|
2014-02-17 13:57:40 +01:00
|
|
|
int plane;
|
|
|
|
for (plane = 1; plane < MAX_MB_PLANE; ++plane)
|
|
|
|
vp9_subtract_plane(x, bsize, plane);
|
|
|
|
}
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2013-08-14 03:58:21 +02:00
|
|
|
*rate = 0;
|
|
|
|
*distortion = 0;
|
|
|
|
*sse = 0;
|
|
|
|
*skippable = 1;
|
|
|
|
|
2014-10-16 18:19:42 +02:00
|
|
|
for (plane = 1; plane < MAX_MB_PLANE; ++plane) {
|
2016-07-02 01:02:41 +02:00
|
|
|
txfm_rd_in_plane(cpi, x, &pnrate, &pndist, &pnskip, &pnsse, ref_best_rd,
|
|
|
|
plane, bsize, uv_tx_size, cpi->sf.use_fast_coef_costing);
|
2014-10-16 18:19:42 +02:00
|
|
|
if (pnrate == INT_MAX) {
|
2014-10-14 02:06:22 +02:00
|
|
|
is_cost_valid = 0;
|
2014-10-16 18:19:42 +02:00
|
|
|
break;
|
|
|
|
}
|
2013-08-14 03:58:21 +02:00
|
|
|
*rate += pnrate;
|
|
|
|
*distortion += pndist;
|
|
|
|
*sse += pnsse;
|
|
|
|
*skippable &= pnskip;
|
|
|
|
}
|
2013-08-15 01:50:45 +02:00
|
|
|
|
2014-10-14 02:06:22 +02:00
|
|
|
if (!is_cost_valid) {
|
|
|
|
// reset cost value
|
|
|
|
*rate = INT_MAX;
|
|
|
|
*distortion = INT64_MAX;
|
|
|
|
*sse = INT64_MAX;
|
|
|
|
*skippable = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return is_cost_valid;
|
2013-05-01 01:13:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x,
|
2016-07-27 05:43:23 +02:00
|
|
|
PICK_MODE_CONTEXT *ctx, int *rate,
|
|
|
|
int *rate_tokenonly, int64_t *distortion,
|
|
|
|
int *skippable, BLOCK_SIZE bsize,
|
|
|
|
TX_SIZE max_tx_size) {
|
2014-02-26 02:46:27 +01:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE mode;
|
|
|
|
PREDICTION_MODE mode_selected = DC_PRED;
|
2013-05-01 01:13:20 +02:00
|
|
|
int64_t best_rd = INT64_MAX, this_rd;
|
2013-06-21 21:54:52 +02:00
|
|
|
int this_rate_tokenonly, this_rate, s;
|
2013-08-14 03:58:21 +02:00
|
|
|
int64_t this_distortion, this_sse;
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2015-07-30 20:52:28 +02:00
|
|
|
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
|
2013-12-03 00:24:41 +01:00
|
|
|
for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue;
|
2016-06-15 20:39:41 +02:00
|
|
|
#if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
|
|
|
|
(xd->above_mi == NULL || xd->left_mi == NULL) && need_top_left[mode])
|
|
|
|
continue;
|
|
|
|
#endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
|
2013-07-10 17:59:18 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
xd->mi[0]->uv_mode = mode;
|
2013-09-11 19:45:44 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s,
|
|
|
|
&this_sse, bsize, best_rd))
|
2013-08-15 01:50:45 +02:00
|
|
|
continue;
|
2016-07-27 05:43:23 +02:00
|
|
|
this_rate =
|
|
|
|
this_rate_tokenonly +
|
|
|
|
cpi->intra_uv_mode_cost[cpi->common.frame_type][xd->mi[0]->mode][mode];
|
2013-05-01 01:13:20 +02:00
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion);
|
|
|
|
|
|
|
|
if (this_rd < best_rd) {
|
2016-07-27 05:43:23 +02:00
|
|
|
mode_selected = mode;
|
|
|
|
best_rd = this_rd;
|
|
|
|
*rate = this_rate;
|
2013-05-01 01:13:20 +02:00
|
|
|
*rate_tokenonly = this_rate_tokenonly;
|
2016-07-27 05:43:23 +02:00
|
|
|
*distortion = this_distortion;
|
|
|
|
*skippable = s;
|
|
|
|
if (!x->select_tx_size) swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
|
2013-05-01 01:13:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
xd->mi[0]->uv_mode = mode_selected;
|
2013-05-01 01:13:20 +02:00
|
|
|
return best_rd;
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
|
|
|
int *rate_tokenonly, int64_t *distortion,
|
|
|
|
int *skippable, BLOCK_SIZE bsize) {
|
2014-03-03 20:49:13 +01:00
|
|
|
const VP9_COMMON *cm = &cpi->common;
|
2014-02-26 02:46:27 +01:00
|
|
|
int64_t unused;
|
2013-07-16 19:12:34 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
x->e_mbd.mi[0]->uv_mode = DC_PRED;
|
2015-07-30 20:52:28 +02:00
|
|
|
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
|
2016-07-27 05:43:23 +02:00
|
|
|
super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused,
|
|
|
|
bsize, INT64_MAX);
|
|
|
|
*rate =
|
|
|
|
*rate_tokenonly +
|
|
|
|
cpi->intra_uv_mode_cost[cm->frame_type][x->e_mbd.mi[0]->mode][DC_PRED];
|
2014-02-26 02:46:27 +01:00
|
|
|
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
2013-07-16 19:12:34 +02:00
|
|
|
}
|
|
|
|
|
2014-11-21 20:11:06 +01:00
|
|
|
static void choose_intra_uv_mode(VP9_COMP *cpi, MACROBLOCK *const x,
|
2016-07-27 05:43:23 +02:00
|
|
|
PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
|
|
|
|
TX_SIZE max_tx_size, int *rate_uv,
|
|
|
|
int *rate_uv_tokenonly, int64_t *dist_uv,
|
|
|
|
int *skip_uv, PREDICTION_MODE *mode_uv) {
|
2013-07-18 22:09:38 +02:00
|
|
|
// Use an estimated rd for uv_intra based on DC_PRED if the
|
|
|
|
// appropriate speed flag is set.
|
|
|
|
if (cpi->sf.use_uv_intra_rd_estimate) {
|
2016-07-27 05:43:23 +02:00
|
|
|
rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv,
|
|
|
|
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize);
|
|
|
|
// Else do a proper rd search for each possible transform size that may
|
|
|
|
// be considered in the main rd loop.
|
2013-07-18 22:09:38 +02:00
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv,
|
|
|
|
skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
|
|
|
|
max_tx_size);
|
2013-07-18 22:09:38 +02:00
|
|
|
}
|
2016-01-20 01:40:20 +01:00
|
|
|
*mode_uv = x->e_mbd.mi[0]->uv_mode;
|
2013-07-18 22:09:38 +02:00
|
|
|
}
|
|
|
|
|
2014-04-12 00:26:24 +02:00
|
|
|
static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode,
|
2013-07-11 23:14:47 +02:00
|
|
|
int mode_context) {
|
2014-05-31 01:02:46 +02:00
|
|
|
assert(is_inter_mode(mode));
|
|
|
|
return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
|
2013-05-01 01:13:20 +02:00
|
|
|
}
|
|
|
|
|
2015-06-29 18:27:11 +02:00
|
|
|
static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
|
2016-07-27 05:43:23 +02:00
|
|
|
int i, PREDICTION_MODE mode, int_mv this_mv[2],
|
2014-04-17 22:48:54 +02:00
|
|
|
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
|
|
|
|
int_mv seg_mvs[MAX_REF_FRAMES],
|
|
|
|
int_mv *best_ref_mv[2], const int *mvjcost,
|
|
|
|
int *mvcost[2]) {
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
2015-06-29 18:27:11 +02:00
|
|
|
const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
|
2014-02-26 02:46:27 +01:00
|
|
|
int thismvcost = 0;
|
2013-05-16 07:28:36 +02:00
|
|
|
int idx, idy;
|
2016-01-20 01:40:20 +01:00
|
|
|
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mi->sb_type];
|
|
|
|
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mi->sb_type];
|
|
|
|
const int is_compound = has_second_ref(mi);
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2014-02-26 02:46:27 +01:00
|
|
|
switch (mode) {
|
2013-05-28 23:02:29 +02:00
|
|
|
case NEWMV:
|
2016-01-20 01:40:20 +01:00
|
|
|
this_mv[0].as_int = seg_mvs[mi->ref_frame[0]].as_int;
|
2014-03-06 20:24:30 +01:00
|
|
|
thismvcost += vp9_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
|
2013-10-09 20:32:03 +02:00
|
|
|
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
|
2014-03-06 20:24:30 +01:00
|
|
|
if (is_compound) {
|
2016-01-20 01:40:20 +01:00
|
|
|
this_mv[1].as_int = seg_mvs[mi->ref_frame[1]].as_int;
|
2014-03-06 20:24:30 +01:00
|
|
|
thismvcost += vp9_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv,
|
2013-10-09 20:32:03 +02:00
|
|
|
mvjcost, mvcost, MV_COST_WEIGHT_SUB);
|
2013-05-28 23:02:29 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NEARMV:
|
2014-04-17 22:48:54 +02:00
|
|
|
case NEARESTMV:
|
2016-01-20 01:40:20 +01:00
|
|
|
this_mv[0].as_int = frame_mv[mode][mi->ref_frame[0]].as_int;
|
2014-03-06 20:24:30 +01:00
|
|
|
if (is_compound)
|
2016-01-20 01:40:20 +01:00
|
|
|
this_mv[1].as_int = frame_mv[mode][mi->ref_frame[1]].as_int;
|
2013-05-28 23:02:29 +02:00
|
|
|
break;
|
|
|
|
case ZEROMV:
|
2014-03-06 20:24:30 +01:00
|
|
|
this_mv[0].as_int = 0;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (is_compound) this_mv[1].as_int = 0;
|
2013-05-28 23:02:29 +02:00
|
|
|
break;
|
2016-07-27 05:43:23 +02:00
|
|
|
default: break;
|
2013-05-28 23:02:29 +02:00
|
|
|
}
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->bmi[i].as_mv[0].as_int = this_mv[0].as_int;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (is_compound) mi->bmi[i].as_mv[1].as_int = this_mv[1].as_int;
|
2013-05-28 23:02:29 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->bmi[i].as_mode = mode;
|
2013-10-07 20:20:50 +02:00
|
|
|
|
2013-07-23 15:51:44 +02:00
|
|
|
for (idy = 0; idy < num_4x4_blocks_high; ++idy)
|
|
|
|
for (idx = 0; idx < num_4x4_blocks_wide; ++idx)
|
2016-01-20 01:40:20 +01:00
|
|
|
memmove(&mi->bmi[i + idy * 2 + idx], &mi->bmi[i], sizeof(mi->bmi[i]));
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mi->ref_frame[0]]) +
|
2016-07-27 05:43:23 +02:00
|
|
|
thismvcost;
|
2013-05-01 01:13:20 +02:00
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static int64_t encode_inter_mb_segment(VP9_COMP *cpi, MACROBLOCK *x,
|
|
|
|
int64_t best_yrd, int i, int *labelyrate,
|
2013-07-11 00:18:52 +02:00
|
|
|
int64_t *distortion, int64_t *sse,
|
2016-07-27 05:43:23 +02:00
|
|
|
ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
|
2013-12-19 20:16:05 +01:00
|
|
|
int mi_row, int mi_col) {
|
2013-05-29 21:52:57 +02:00
|
|
|
int k;
|
2013-05-01 01:13:20 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2013-08-03 00:26:32 +02:00
|
|
|
struct macroblockd_plane *const pd = &xd->plane[0];
|
2013-09-18 14:20:10 +02:00
|
|
|
struct macroblock_plane *const p = &x->plane[0];
|
2015-04-21 14:36:58 +02:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
2016-01-20 01:40:20 +01:00
|
|
|
const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->sb_type, pd);
|
2013-11-20 21:39:29 +01:00
|
|
|
const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
|
|
|
|
const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
|
2013-05-16 07:28:36 +02:00
|
|
|
int idx, idy;
|
2013-09-18 14:20:10 +02:00
|
|
|
|
2014-12-22 22:38:34 +01:00
|
|
|
const uint8_t *const src =
|
|
|
|
&p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
|
2016-07-27 05:43:23 +02:00
|
|
|
uint8_t *const dst =
|
|
|
|
&pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
|
2013-07-11 00:18:52 +02:00
|
|
|
int64_t thisdistortion = 0, thissse = 0;
|
2013-09-18 14:20:10 +02:00
|
|
|
int thisrate = 0, ref;
|
2013-11-23 01:20:45 +01:00
|
|
|
const scan_order *so = &vp9_default_scan_orders[TX_4X4];
|
2016-01-20 01:40:20 +01:00
|
|
|
const int is_compound = has_second_ref(mi);
|
|
|
|
const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
|
2014-04-04 00:28:42 +02:00
|
|
|
|
2013-09-18 14:20:10 +02:00
|
|
|
for (ref = 0; ref < 1 + is_compound; ++ref) {
|
2015-12-11 19:06:43 +01:00
|
|
|
const int bw = b_width_log2_lookup[BLOCK_8X8];
|
|
|
|
const int h = 4 * (i >> bw);
|
|
|
|
const int w = 4 * (i & ((1 << bw) - 1));
|
|
|
|
const struct scale_factors *sf = &xd->block_refs[ref]->sf;
|
|
|
|
int y_stride = pd->pre[ref].stride;
|
|
|
|
uint8_t *pre = pd->pre[ref].buf + (h * pd->pre[ref].stride + w);
|
|
|
|
|
|
|
|
if (vp9_is_scaled(sf)) {
|
|
|
|
const int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
|
|
|
|
const int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
|
|
|
|
|
|
|
|
y_stride = xd->block_refs[ref]->buf->y_stride;
|
|
|
|
pre = xd->block_refs[ref]->buf->y_buffer;
|
2016-07-27 05:43:23 +02:00
|
|
|
pre += scaled_buffer_offset(x_start + w, y_start + h, y_stride, sf);
|
2015-12-11 19:06:43 +01:00
|
|
|
}
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2016-07-27 05:43:23 +02:00
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
|
|
|
vp9_highbd_build_inter_predictor(
|
2017-04-19 23:48:07 +02:00
|
|
|
CONVERT_TO_SHORTPTR(pre), y_stride, CONVERT_TO_SHORTPTR(dst),
|
|
|
|
pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
|
2016-07-27 05:43:23 +02:00
|
|
|
&xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
|
|
|
|
mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2),
|
|
|
|
xd->bd);
|
|
|
|
} else {
|
|
|
|
vp9_build_inter_predictor(
|
|
|
|
pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
|
|
|
|
&xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
|
|
|
|
mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
|
|
|
|
}
|
2014-09-24 15:36:34 +02:00
|
|
|
#else
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_build_inter_predictor(
|
|
|
|
pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv,
|
|
|
|
&xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3,
|
|
|
|
mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2));
|
2014-09-24 15:36:34 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2013-05-29 21:52:57 +02:00
|
|
|
}
|
2013-05-16 07:28:36 +02:00
|
|
|
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
2015-07-06 18:33:27 +02:00
|
|
|
vpx_highbd_subtract_block(
|
2014-12-22 22:38:34 +01:00
|
|
|
height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
|
|
|
|
8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
|
2014-09-24 15:36:34 +02:00
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
vpx_subtract_block(height, width,
|
|
|
|
vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
|
|
|
|
8, src, p->src.stride, dst, pd->dst.stride);
|
2014-09-24 15:36:34 +02:00
|
|
|
}
|
|
|
|
#else
|
2015-07-06 18:33:27 +02:00
|
|
|
vpx_subtract_block(height, width,
|
2014-12-22 22:38:34 +01:00
|
|
|
vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
|
|
|
|
8, src, p->src.stride, dst, pd->dst.stride);
|
2014-09-24 15:36:34 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2013-05-29 21:52:57 +02:00
|
|
|
|
|
|
|
k = i;
|
2013-07-23 15:51:44 +02:00
|
|
|
for (idy = 0; idy < height / 4; ++idy) {
|
|
|
|
for (idx = 0; idx < width / 4; ++idx) {
|
2015-10-08 16:44:49 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
|
|
|
|
#endif
|
2013-07-17 23:21:44 +02:00
|
|
|
int64_t ssz, rd, rd1, rd2;
|
2016-07-27 05:43:23 +02:00
|
|
|
tran_low_t *coeff;
|
2016-07-06 18:58:22 +02:00
|
|
|
int coeff_ctx;
|
2013-05-29 21:52:57 +02:00
|
|
|
k += (idy * 2 + idx);
|
2016-07-06 18:58:22 +02:00
|
|
|
coeff_ctx = combine_entropy_contexts(ta[k & 1], tl[k >> 1]);
|
2013-09-18 14:20:10 +02:00
|
|
|
coeff = BLOCK_OFFSET(p->coeff, k);
|
2017-06-30 00:07:55 +02:00
|
|
|
x->fwd_txfm4x4(vp9_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
|
|
|
|
coeff, 8);
|
2013-12-03 03:33:50 +01:00
|
|
|
vp9_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2015-10-08 16:44:49 +02:00
|
|
|
thisdistortion += vp9_highbd_block_error_dispatch(
|
|
|
|
coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, bd);
|
2014-09-24 15:36:34 +02:00
|
|
|
#else
|
2016-07-27 05:43:23 +02:00
|
|
|
thisdistortion +=
|
|
|
|
vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
|
2014-09-24 15:36:34 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2013-07-11 00:18:52 +02:00
|
|
|
thissse += ssz;
|
2016-07-27 05:43:23 +02:00
|
|
|
thisrate += cost_coeffs(x, 0, k, TX_4X4, coeff_ctx, so->scan,
|
|
|
|
so->neighbors, cpi->sf.use_fast_coef_costing);
|
2016-07-06 18:58:22 +02:00
|
|
|
ta[k & 1] = tl[k >> 1] = (x->plane[0].eobs[k] > 0) ? 1 : 0;
|
2013-07-17 23:21:44 +02:00
|
|
|
rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2);
|
|
|
|
rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2);
|
2015-08-18 03:19:22 +02:00
|
|
|
rd = VPXMIN(rd1, rd2);
|
2016-07-27 05:43:23 +02:00
|
|
|
if (rd >= best_yrd) return INT64_MAX;
|
2013-05-01 01:13:20 +02:00
|
|
|
}
|
|
|
|
}
|
2013-09-18 14:20:10 +02:00
|
|
|
|
2013-07-17 23:21:44 +02:00
|
|
|
*distortion = thisdistortion >> 2;
|
|
|
|
*labelyrate = thisrate;
|
2013-07-11 00:18:52 +02:00
|
|
|
*sse = thissse >> 2;
|
2013-05-29 21:52:57 +02:00
|
|
|
|
2013-05-01 01:13:20 +02:00
|
|
|
return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion);
|
|
|
|
}
|
|
|
|
|
2013-07-18 02:07:32 +02:00
|
|
|
typedef struct {
|
|
|
|
int eobs;
|
|
|
|
int brate;
|
|
|
|
int byrate;
|
|
|
|
int64_t bdist;
|
|
|
|
int64_t bsse;
|
|
|
|
int64_t brdcost;
|
|
|
|
int_mv mvs[2];
|
|
|
|
ENTROPY_CONTEXT ta[2];
|
|
|
|
ENTROPY_CONTEXT tl[2];
|
|
|
|
} SEG_RDSTAT;
|
|
|
|
|
2013-05-01 01:13:20 +02:00
|
|
|
typedef struct {
|
2014-02-28 23:10:51 +01:00
|
|
|
int_mv *ref_mv[2];
|
2013-05-01 01:13:20 +02:00
|
|
|
int_mv mvp;
|
|
|
|
|
|
|
|
int64_t segment_rd;
|
|
|
|
int r;
|
2013-06-21 21:54:52 +02:00
|
|
|
int64_t d;
|
2013-07-11 00:18:52 +02:00
|
|
|
int64_t sse;
|
2013-05-01 01:13:20 +02:00
|
|
|
int segment_yrate;
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE modes[4];
|
2013-08-23 03:40:34 +02:00
|
|
|
SEG_RDSTAT rdstat[4][INTER_MODES];
|
2013-05-01 01:13:20 +02:00
|
|
|
int mvthresh;
|
|
|
|
} BEST_SEG_INFO;
|
|
|
|
|
2016-08-08 20:42:27 +02:00
|
|
|
static INLINE int mv_check_bounds(const MvLimits *mv_limits, const MV *mv) {
|
|
|
|
return (mv->row >> 3) < mv_limits->row_min ||
|
|
|
|
(mv->row >> 3) > mv_limits->row_max ||
|
|
|
|
(mv->col >> 3) < mv_limits->col_min ||
|
|
|
|
(mv->col >> 3) > mv_limits->col_max;
|
2013-05-01 01:13:20 +02:00
|
|
|
}
|
|
|
|
|
2013-05-30 06:59:41 +02:00
|
|
|
static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = x->e_mbd.mi[0];
|
2013-08-19 22:20:21 +02:00
|
|
|
struct macroblock_plane *const p = &x->plane[0];
|
|
|
|
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
p->src.buf =
|
|
|
|
&p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
|
2013-08-19 22:20:21 +02:00
|
|
|
assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
|
2016-07-27 05:43:23 +02:00
|
|
|
pd->pre[0].buf =
|
|
|
|
&pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
|
2016-01-20 01:40:20 +01:00
|
|
|
if (has_second_ref(mi))
|
2016-07-27 05:43:23 +02:00
|
|
|
pd->pre[1].buf =
|
|
|
|
&pd->pre[1]
|
|
|
|
.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
|
2013-05-30 06:59:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
|
|
|
|
struct buf_2d orig_pre[2]) {
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *mi = x->e_mbd.mi[0];
|
2013-05-30 06:59:41 +02:00
|
|
|
x->plane[0].src = orig_src;
|
|
|
|
x->e_mbd.plane[0].pre[0] = orig_pre[0];
|
2016-07-27 05:43:23 +02:00
|
|
|
if (has_second_ref(mi)) x->e_mbd.plane[0].pre[1] = orig_pre[1];
|
2013-05-30 06:59:41 +02:00
|
|
|
}
|
|
|
|
|
2014-01-13 19:57:56 +01:00
|
|
|
static INLINE int mv_has_subpel(const MV *mv) {
|
|
|
|
return (mv->row & 0x0F) || (mv->col & 0x0F);
|
|
|
|
}
|
|
|
|
|
2014-03-25 22:16:11 +01:00
|
|
|
// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
|
|
|
|
// TODO(aconverse): Find out if this is still productive then clean up or remove
|
2016-07-27 05:43:23 +02:00
|
|
|
static int check_best_zero_mv(const VP9_COMP *cpi,
|
|
|
|
const uint8_t mode_context[MAX_REF_FRAMES],
|
|
|
|
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
|
|
|
|
int this_mode,
|
|
|
|
const MV_REFERENCE_FRAME ref_frames[2]) {
|
2014-09-11 19:17:12 +02:00
|
|
|
if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) &&
|
2014-04-11 23:32:07 +02:00
|
|
|
frame_mv[this_mode][ref_frames[0]].as_int == 0 &&
|
|
|
|
(ref_frames[1] == NONE ||
|
|
|
|
frame_mv[this_mode][ref_frames[1]].as_int == 0)) {
|
|
|
|
int rfc = mode_context[ref_frames[0]];
|
2014-03-25 22:16:11 +01:00
|
|
|
int c1 = cost_mv_ref(cpi, NEARMV, rfc);
|
|
|
|
int c2 = cost_mv_ref(cpi, NEARESTMV, rfc);
|
|
|
|
int c3 = cost_mv_ref(cpi, ZEROMV, rfc);
|
|
|
|
|
|
|
|
if (this_mode == NEARMV) {
|
|
|
|
if (c1 > c3) return 0;
|
|
|
|
} else if (this_mode == NEARESTMV) {
|
|
|
|
if (c2 > c3) return 0;
|
|
|
|
} else {
|
|
|
|
assert(this_mode == ZEROMV);
|
2014-04-11 23:32:07 +02:00
|
|
|
if (ref_frames[1] == NONE) {
|
|
|
|
if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) ||
|
|
|
|
(c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0))
|
2014-03-25 22:16:11 +01:00
|
|
|
return 0;
|
|
|
|
} else {
|
2014-04-11 23:32:07 +02:00
|
|
|
if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 &&
|
|
|
|
frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) ||
|
|
|
|
(c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 &&
|
|
|
|
frame_mv[NEARMV][ref_frames[1]].as_int == 0))
|
2014-03-25 22:16:11 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
|
|
|
|
int_mv *frame_mv, int mi_row, int mi_col,
|
2015-03-19 22:28:52 +01:00
|
|
|
int_mv single_newmv[MAX_REF_FRAMES],
|
|
|
|
int *rate_mv) {
|
2015-03-24 16:55:35 +01:00
|
|
|
const VP9_COMMON *const cm = &cpi->common;
|
2015-03-19 22:28:52 +01:00
|
|
|
const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
|
|
|
|
const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *mi = xd->mi[0];
|
2016-07-27 05:43:23 +02:00
|
|
|
const int refs[2] = { mi->ref_frame[0],
|
|
|
|
mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1] };
|
2015-03-19 22:28:52 +01:00
|
|
|
int_mv ref_mv[2];
|
|
|
|
int ite, ref;
|
2016-01-20 01:40:20 +01:00
|
|
|
const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
|
2015-03-24 16:55:35 +01:00
|
|
|
struct scale_factors sf;
|
2015-03-19 22:28:52 +01:00
|
|
|
|
|
|
|
// Do joint motion search in compound mode to get more accurate mv.
|
|
|
|
struct buf_2d backup_yv12[2][MAX_MB_PLANE];
|
2016-07-27 05:43:23 +02:00
|
|
|
uint32_t last_besterr[2] = { UINT_MAX, UINT_MAX };
|
2015-03-19 22:28:52 +01:00
|
|
|
const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
|
2016-01-20 01:40:20 +01:00
|
|
|
vp9_get_scaled_ref_frame(cpi, mi->ref_frame[0]),
|
|
|
|
vp9_get_scaled_ref_frame(cpi, mi->ref_frame[1])
|
2015-03-19 22:28:52 +01:00
|
|
|
};
|
2015-03-24 17:44:07 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
// Prediction buffer from second frame.
|
2015-03-19 22:28:52 +01:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2015-05-02 22:24:16 +02:00
|
|
|
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
|
2015-03-24 17:44:07 +01:00
|
|
|
uint8_t *second_pred;
|
|
|
|
#else
|
2015-05-02 22:24:16 +02:00
|
|
|
DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
|
2015-03-19 22:28:52 +01:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
|
|
|
|
for (ref = 0; ref < 2; ++ref) {
|
2015-06-29 18:27:11 +02:00
|
|
|
ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
|
2015-03-19 22:28:52 +01:00
|
|
|
|
|
|
|
if (scaled_ref_frame[ref]) {
|
|
|
|
int i;
|
|
|
|
// Swap out the reference frame for a version that's been scaled to
|
|
|
|
// match the resolution of the current frame, allowing the existing
|
|
|
|
// motion search code to be used without additional modifications.
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++)
|
|
|
|
backup_yv12[ref][i] = xd->plane[i].pre[ref];
|
|
|
|
vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int;
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
// Since we have scaled the reference frames to match the size of the current
|
|
|
|
// frame we must use a unit scaling factor during mode selection.
|
2015-03-24 16:55:35 +01:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
|
|
|
|
cm->height, cm->use_highbitdepth);
|
2015-03-24 16:55:35 +01:00
|
|
|
#else
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
|
|
|
|
cm->height);
|
2015-03-24 16:55:35 +01:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
|
2015-03-19 22:41:43 +01:00
|
|
|
// Allow joint search multiple times iteratively for each reference frame
|
|
|
|
// and break out of the search loop if it couldn't find a better mv.
|
2015-03-19 22:28:52 +01:00
|
|
|
for (ite = 0; ite < 4; ite++) {
|
|
|
|
struct buf_2d ref_yv12[2];
|
2016-06-25 19:58:38 +02:00
|
|
|
uint32_t bestsme = UINT_MAX;
|
2015-03-19 22:28:52 +01:00
|
|
|
int sadpb = x->sadperbit16;
|
|
|
|
MV tmp_mv;
|
|
|
|
int search_range = 3;
|
|
|
|
|
2016-08-08 20:42:27 +02:00
|
|
|
const MvLimits tmp_mv_limits = x->mv_limits;
|
2015-03-19 22:41:43 +01:00
|
|
|
int id = ite % 2; // Even iterations search in the first reference frame,
|
|
|
|
// odd iterations search in the second. The predictor
|
|
|
|
// found for the 'other' reference frame is factored in.
|
2015-03-19 22:28:52 +01:00
|
|
|
|
|
|
|
// Initialized here because of compiler problem in Visual Studio.
|
|
|
|
ref_yv12[0] = xd->plane[0].pre[0];
|
|
|
|
ref_yv12[1] = xd->plane[0].pre[1];
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
// Get the prediction block from the 'other' reference frame.
|
2015-03-19 22:28:52 +01:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
2015-03-24 17:44:07 +01:00
|
|
|
second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_highbd_build_inter_predictor(
|
2017-04-19 23:48:07 +02:00
|
|
|
CONVERT_TO_SHORTPTR(ref_yv12[!id].buf), ref_yv12[!id].stride,
|
|
|
|
second_pred_alloc_16, pw, &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0,
|
|
|
|
kernel, MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
|
2015-03-19 22:28:52 +01:00
|
|
|
} else {
|
2015-03-24 17:44:07 +01:00
|
|
|
second_pred = (uint8_t *)second_pred_alloc_16;
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
|
|
|
|
second_pred, pw, &frame_mv[refs[!id]].as_mv,
|
|
|
|
&sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
|
2015-03-19 22:28:52 +01:00
|
|
|
mi_col * MI_SIZE, mi_row * MI_SIZE);
|
|
|
|
}
|
|
|
|
#else
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
|
|
|
|
second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
|
|
|
|
pw, ph, 0, kernel, MV_PRECISION_Q3,
|
2015-03-19 22:28:52 +01:00
|
|
|
mi_col * MI_SIZE, mi_row * MI_SIZE);
|
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
|
2015-03-19 22:41:43 +01:00
|
|
|
// Do compound motion search on the current reference frame.
|
2016-07-27 05:43:23 +02:00
|
|
|
if (id) xd->plane[0].pre[0] = ref_yv12[id];
|
2016-08-08 20:42:27 +02:00
|
|
|
vp9_set_mv_search_range(&x->mv_limits, &ref_mv[id].as_mv);
|
2015-03-19 22:28:52 +01:00
|
|
|
|
2015-03-19 22:41:43 +01:00
|
|
|
// Use the mv result from the single mode as mv predictor.
|
2015-03-19 22:28:52 +01:00
|
|
|
tmp_mv = frame_mv[refs[id]].as_mv;
|
|
|
|
|
|
|
|
tmp_mv.col >>= 3;
|
|
|
|
tmp_mv.row >>= 3;
|
|
|
|
|
2015-03-19 22:41:43 +01:00
|
|
|
// Small-range full-pixel motion search.
|
2016-07-27 05:43:23 +02:00
|
|
|
bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
|
|
|
|
&cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
|
|
|
|
second_pred);
|
2016-06-25 19:58:38 +02:00
|
|
|
if (bestsme < UINT_MAX)
|
2015-03-19 22:28:52 +01:00
|
|
|
bestsme = vp9_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
|
|
|
|
second_pred, &cpi->fn_ptr[bsize], 1);
|
|
|
|
|
2016-08-08 20:42:27 +02:00
|
|
|
x->mv_limits = tmp_mv_limits;
|
2015-03-19 22:28:52 +01:00
|
|
|
|
2016-06-25 19:58:38 +02:00
|
|
|
if (bestsme < UINT_MAX) {
|
2016-06-20 19:13:30 +02:00
|
|
|
uint32_t dis; /* TODO: use dis in distortion calculation later. */
|
|
|
|
uint32_t sse;
|
2015-03-19 22:28:52 +01:00
|
|
|
bestsme = cpi->find_fractional_mv_step(
|
2016-07-27 05:43:23 +02:00
|
|
|
x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
|
|
|
|
x->errorperbit, &cpi->fn_ptr[bsize], 0,
|
|
|
|
cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
|
|
|
|
&dis, &sse, second_pred, pw, ph);
|
2015-03-19 22:28:52 +01:00
|
|
|
}
|
|
|
|
|
2015-03-19 22:41:43 +01:00
|
|
|
// Restore the pointer to the first (possibly scaled) prediction buffer.
|
2016-07-27 05:43:23 +02:00
|
|
|
if (id) xd->plane[0].pre[0] = ref_yv12[0];
|
2015-03-19 22:28:52 +01:00
|
|
|
|
|
|
|
if (bestsme < last_besterr[id]) {
|
|
|
|
frame_mv[refs[id]].as_mv = tmp_mv;
|
|
|
|
last_besterr[id] = bestsme;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*rate_mv = 0;
|
|
|
|
|
|
|
|
for (ref = 0; ref < 2; ++ref) {
|
|
|
|
if (scaled_ref_frame[ref]) {
|
2015-03-19 22:41:43 +01:00
|
|
|
// Restore the prediction frame pointers to their unscaled versions.
|
2015-03-19 22:28:52 +01:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++)
|
|
|
|
xd->plane[i].pre[ref] = backup_yv12[ref][i];
|
|
|
|
}
|
|
|
|
|
|
|
|
*rate_mv += vp9_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
|
2015-06-29 18:27:11 +02:00
|
|
|
&x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
|
2015-03-19 22:28:52 +01:00
|
|
|
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static int64_t rd_pick_best_sub8x8_mode(
|
|
|
|
VP9_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
|
|
|
|
int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
|
|
|
|
int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
|
|
|
|
int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
|
|
|
|
int filter_idx, int mi_row, int mi_col) {
|
2014-04-17 18:11:23 +02:00
|
|
|
int i;
|
|
|
|
BEST_SEG_INFO *bsi = bsi_buf + filter_idx;
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2015-04-21 14:36:58 +02:00
|
|
|
MODE_INFO *mi = xd->mi[0];
|
2014-04-17 18:11:23 +02:00
|
|
|
int mode_idx;
|
2014-02-09 05:18:37 +01:00
|
|
|
int k, br = 0, idx, idy;
|
2013-07-18 02:07:32 +02:00
|
|
|
int64_t bd = 0, block_sse = 0;
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE this_mode;
|
2014-01-07 02:29:16 +01:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
2013-12-04 02:59:32 +01:00
|
|
|
struct macroblock_plane *const p = &x->plane[0];
|
2014-02-09 05:18:37 +01:00
|
|
|
struct macroblockd_plane *const pd = &xd->plane[0];
|
2013-05-01 01:13:20 +02:00
|
|
|
const int label_count = 4;
|
2013-07-09 01:01:01 +02:00
|
|
|
int64_t this_segment_rd = 0;
|
2013-05-01 01:13:20 +02:00
|
|
|
int label_mv_thresh;
|
|
|
|
int segmentyrate = 0;
|
2016-01-20 01:40:20 +01:00
|
|
|
const BLOCK_SIZE bsize = mi->sb_type;
|
2013-08-05 21:15:52 +02:00
|
|
|
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
|
|
|
|
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
|
2013-07-18 02:07:32 +02:00
|
|
|
ENTROPY_CONTEXT t_above[2], t_left[2];
|
|
|
|
int subpelmv = 1, have_ref = 0;
|
2015-10-14 11:38:49 +02:00
|
|
|
SPEED_FEATURES *const sf = &cpi->sf;
|
2016-01-20 01:40:20 +01:00
|
|
|
const int has_second_rf = has_second_ref(mi);
|
2015-10-14 11:38:49 +02:00
|
|
|
const int inter_mode_mask = sf->inter_mode_mask[bsize];
|
2015-06-29 18:27:11 +02:00
|
|
|
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
|
2013-05-16 07:28:36 +02:00
|
|
|
|
2014-04-17 18:11:23 +02:00
|
|
|
vp9_zero(*bsi);
|
|
|
|
|
|
|
|
bsi->segment_rd = best_rd;
|
|
|
|
bsi->ref_mv[0] = best_ref_mv;
|
|
|
|
bsi->ref_mv[1] = second_best_ref_mv;
|
|
|
|
bsi->mvp.as_int = best_ref_mv->as_int;
|
|
|
|
bsi->mvthresh = mvthresh;
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < 4; i++) bsi->modes[i] = ZEROMV;
|
2014-04-17 18:11:23 +02:00
|
|
|
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(t_above, pd->above_context, sizeof(t_above));
|
|
|
|
memcpy(t_left, pd->left_context, sizeof(t_left));
|
2013-05-01 01:13:20 +02:00
|
|
|
|
|
|
|
// 64 makes this threshold really big effectively
|
|
|
|
// making it so that we very rarely check mvs on
|
|
|
|
// segments. setting this to 1 would make mv thresh
|
|
|
|
// roughly equal to what it is for macroblocks
|
|
|
|
label_mv_thresh = 1 * bsi->mvthresh / label_count;
|
|
|
|
|
|
|
|
// Segmentation method overheads
|
2013-07-23 15:51:44 +02:00
|
|
|
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
|
|
|
|
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
|
2013-05-16 07:28:36 +02:00
|
|
|
// TODO(jingning,rbultje): rewrite the rate-distortion optimization
|
2013-05-22 06:28:42 +02:00
|
|
|
// loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop
|
2014-03-05 20:12:00 +01:00
|
|
|
int_mv mode_mv[MB_MODE_COUNT][2];
|
2013-05-26 23:40:49 +02:00
|
|
|
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE mode_selected = ZEROMV;
|
2013-07-18 02:07:32 +02:00
|
|
|
int64_t best_rd = INT64_MAX;
|
2014-02-09 05:18:37 +01:00
|
|
|
const int i = idy * 2 + idx;
|
|
|
|
int ref;
|
|
|
|
|
|
|
|
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
|
2016-01-20 01:40:20 +01:00
|
|
|
const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
|
2014-02-09 05:18:37 +01:00
|
|
|
frame_mv[ZEROMV][frame].as_int = 0;
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_append_sub8x8_mvs_for_idx(
|
|
|
|
cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
|
|
|
|
&frame_mv[NEARMV][frame], mbmi_ext->mode_context);
|
2013-09-18 01:31:46 +02:00
|
|
|
}
|
2014-02-09 05:18:37 +01:00
|
|
|
|
2013-05-16 07:28:36 +02:00
|
|
|
// search for the best motion vector on this segment
|
2013-05-26 23:40:49 +02:00
|
|
|
for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
|
2013-05-30 06:59:41 +02:00
|
|
|
const struct buf_2d orig_src = x->plane[0].src;
|
|
|
|
struct buf_2d orig_pre[2];
|
|
|
|
|
2013-11-05 20:58:57 +01:00
|
|
|
mode_idx = INTER_OFFSET(this_mode);
|
2013-07-18 02:07:32 +02:00
|
|
|
bsi->rdstat[i][mode_idx].brdcost = INT64_MAX;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!(inter_mode_mask & (1 << this_mode))) continue;
|
2013-07-18 02:07:32 +02:00
|
|
|
|
2015-06-29 18:27:11 +02:00
|
|
|
if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv,
|
2016-01-20 01:40:20 +01:00
|
|
|
this_mode, mi->ref_frame))
|
2014-03-25 22:16:11 +01:00
|
|
|
continue;
|
2013-07-17 22:53:35 +02:00
|
|
|
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(orig_pre, pd->pre, sizeof(orig_pre));
|
|
|
|
memcpy(bsi->rdstat[i][mode_idx].ta, t_above,
|
|
|
|
sizeof(bsi->rdstat[i][mode_idx].ta));
|
|
|
|
memcpy(bsi->rdstat[i][mode_idx].tl, t_left,
|
|
|
|
sizeof(bsi->rdstat[i][mode_idx].tl));
|
2013-05-16 07:28:36 +02:00
|
|
|
|
|
|
|
// motion search for newmv (single predictor case only)
|
2013-09-17 23:06:00 +02:00
|
|
|
if (!has_second_rf && this_mode == NEWMV &&
|
2016-01-20 01:40:20 +01:00
|
|
|
seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV) {
|
2014-05-01 00:50:13 +02:00
|
|
|
MV *const new_mv = &mode_mv[NEWMV][0].as_mv;
|
2013-05-16 07:28:36 +02:00
|
|
|
int step_param = 0;
|
2016-06-25 19:58:38 +02:00
|
|
|
uint32_t bestsme = UINT_MAX;
|
2013-05-29 21:52:57 +02:00
|
|
|
int sadpb = x->sadperbit4;
|
2014-01-03 20:48:07 +01:00
|
|
|
MV mvp_full;
|
2013-07-22 23:47:57 +02:00
|
|
|
int max_mv;
|
2014-10-07 11:48:08 +02:00
|
|
|
int cost_list[5];
|
2016-08-08 20:42:27 +02:00
|
|
|
const MvLimits tmp_mv_limits = x->mv_limits;
|
2013-05-16 07:28:36 +02:00
|
|
|
|
|
|
|
/* Is the best so far sufficiently good that we cant justify doing
|
|
|
|
* and new motion search. */
|
2016-07-27 05:43:23 +02:00
|
|
|
if (best_rd < label_mv_thresh) break;
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2014-08-21 20:30:52 +02:00
|
|
|
if (cpi->oxcf.mode != BEST) {
|
2013-05-16 07:28:36 +02:00
|
|
|
// use previous block's result as next block's MV predictor.
|
|
|
|
if (i > 0) {
|
2013-09-11 19:45:44 +02:00
|
|
|
bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int;
|
2013-05-16 07:28:36 +02:00
|
|
|
}
|
|
|
|
}
|
2013-07-22 23:47:57 +02:00
|
|
|
if (i == 0)
|
2016-01-20 01:40:20 +01:00
|
|
|
max_mv = x->max_mv_context[mi->ref_frame[0]];
|
2013-07-22 23:47:57 +02:00
|
|
|
else
|
2015-08-18 03:19:22 +02:00
|
|
|
max_mv =
|
|
|
|
VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3;
|
2013-09-12 19:06:47 +02:00
|
|
|
|
2015-10-14 11:38:49 +02:00
|
|
|
if (sf->mv.auto_mv_step_size && cm->show_frame) {
|
2013-07-12 18:52:24 +02:00
|
|
|
// Take wtd average of the step_params based on the last frame's
|
|
|
|
// max mv magnitude and the best ref mvs of the current block for
|
|
|
|
// the given reference.
|
2016-07-27 05:43:23 +02:00
|
|
|
step_param =
|
|
|
|
(vp9_init_search_range(max_mv) + cpi->mv_step_param) / 2;
|
2013-07-12 18:52:24 +02:00
|
|
|
} else {
|
|
|
|
step_param = cpi->mv_step_param;
|
|
|
|
}
|
2013-05-16 07:28:36 +02:00
|
|
|
|
2014-01-03 20:48:07 +01:00
|
|
|
mvp_full.row = bsi->mvp.as_mv.row >> 3;
|
|
|
|
mvp_full.col = bsi->mvp.as_mv.col >> 3;
|
2013-05-29 21:52:57 +02:00
|
|
|
|
2015-10-14 11:38:49 +02:00
|
|
|
if (sf->adaptive_motion_search) {
|
2016-01-20 01:40:20 +01:00
|
|
|
mvp_full.row = x->pred_mv[mi->ref_frame[0]].row >> 3;
|
|
|
|
mvp_full.col = x->pred_mv[mi->ref_frame[0]].col >> 3;
|
2015-08-18 03:19:22 +02:00
|
|
|
step_param = VPXMAX(step_param, 8);
|
2013-09-12 19:06:47 +02:00
|
|
|
}
|
|
|
|
|
2013-05-30 06:59:41 +02:00
|
|
|
// adjust src pointer for this block
|
|
|
|
mi_buf_shift(x, i);
|
2013-12-11 19:57:15 +01:00
|
|
|
|
2016-08-08 20:42:27 +02:00
|
|
|
vp9_set_mv_search_range(&x->mv_limits, &bsi->ref_mv[0]->as_mv);
|
2013-12-11 19:57:15 +01:00
|
|
|
|
2014-08-27 23:06:30 +02:00
|
|
|
bestsme = vp9_full_pixel_search(
|
2017-02-01 12:04:49 +01:00
|
|
|
cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method,
|
|
|
|
sadpb,
|
2015-10-14 11:38:49 +02:00
|
|
|
sf->mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
|
2016-07-27 05:43:23 +02:00
|
|
|
&bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
|
2013-05-29 21:52:57 +02:00
|
|
|
|
2016-08-08 20:42:27 +02:00
|
|
|
x->mv_limits = tmp_mv_limits;
|
2016-08-06 00:09:13 +02:00
|
|
|
|
2016-06-25 19:58:38 +02:00
|
|
|
if (bestsme < UINT_MAX) {
|
2016-06-20 19:13:30 +02:00
|
|
|
uint32_t distortion;
|
2014-08-27 23:06:30 +02:00
|
|
|
cpi->find_fractional_mv_step(
|
2016-07-27 05:43:23 +02:00
|
|
|
x, new_mv, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv,
|
|
|
|
x->errorperbit, &cpi->fn_ptr[bsize], sf->mv.subpel_force_stop,
|
|
|
|
sf->mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
|
|
|
|
x->nmvjointcost, x->mvcost, &distortion,
|
|
|
|
&x->pred_sse[mi->ref_frame[0]], NULL, 0, 0);
|
2013-05-16 07:28:36 +02:00
|
|
|
|
2013-09-12 19:06:47 +02:00
|
|
|
// save motion search result for use in compound prediction
|
2016-01-20 01:40:20 +01:00
|
|
|
seg_mvs[i][mi->ref_frame[0]].as_mv = *new_mv;
|
2013-05-16 07:28:36 +02:00
|
|
|
}
|
|
|
|
|
2015-10-14 11:38:49 +02:00
|
|
|
if (sf->adaptive_motion_search)
|
2016-01-20 01:40:20 +01:00
|
|
|
x->pred_mv[mi->ref_frame[0]] = *new_mv;
|
2013-09-12 19:06:47 +02:00
|
|
|
|
2013-05-16 07:28:36 +02:00
|
|
|
// restore src pointers
|
2013-05-30 06:59:41 +02:00
|
|
|
mi_buf_restore(x, orig_src, orig_pre);
|
2013-07-16 21:04:07 +02:00
|
|
|
}
|
|
|
|
|
2013-11-05 03:45:45 +01:00
|
|
|
if (has_second_rf) {
|
2016-01-20 01:40:20 +01:00
|
|
|
if (seg_mvs[i][mi->ref_frame[1]].as_int == INVALID_MV ||
|
|
|
|
seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV)
|
2013-05-16 07:28:36 +02:00
|
|
|
continue;
|
2013-11-05 03:45:45 +01:00
|
|
|
}
|
2013-05-30 06:59:41 +02:00
|
|
|
|
2013-11-05 03:45:45 +01:00
|
|
|
if (has_second_rf && this_mode == NEWMV &&
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->interp_filter == EIGHTTAP) {
|
2013-05-30 06:59:41 +02:00
|
|
|
// adjust src pointers
|
|
|
|
mi_buf_shift(x, i);
|
2015-10-14 11:38:49 +02:00
|
|
|
if (sf->comp_inter_joint_search_thresh <= bsize) {
|
2013-06-14 00:10:42 +02:00
|
|
|
int rate_mv;
|
2016-07-27 05:43:23 +02:00
|
|
|
joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
|
|
|
|
mi_col, seg_mvs[i], &rate_mv);
|
2016-01-20 01:40:20 +01:00
|
|
|
seg_mvs[i][mi->ref_frame[0]].as_int =
|
|
|
|
frame_mv[this_mode][mi->ref_frame[0]].as_int;
|
|
|
|
seg_mvs[i][mi->ref_frame[1]].as_int =
|
|
|
|
frame_mv[this_mode][mi->ref_frame[1]].as_int;
|
2013-05-30 06:59:41 +02:00
|
|
|
}
|
|
|
|
// restore src pointers
|
|
|
|
mi_buf_restore(x, orig_src, orig_pre);
|
2013-05-16 07:28:36 +02:00
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs(
|
|
|
|
cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i],
|
|
|
|
bsi->ref_mv, x->nmvjointcost, x->mvcost);
|
2013-07-18 02:07:32 +02:00
|
|
|
|
2014-03-05 20:12:00 +01:00
|
|
|
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
|
|
|
|
bsi->rdstat[i][mode_idx].mvs[ref].as_int =
|
|
|
|
mode_mv[this_mode][ref].as_int;
|
2013-07-23 15:51:44 +02:00
|
|
|
if (num_4x4_blocks_wide > 1)
|
2014-03-05 20:12:00 +01:00
|
|
|
bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int =
|
|
|
|
mode_mv[this_mode][ref].as_int;
|
2013-07-23 15:51:44 +02:00
|
|
|
if (num_4x4_blocks_high > 1)
|
2014-03-05 20:12:00 +01:00
|
|
|
bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int =
|
|
|
|
mode_mv[this_mode][ref].as_int;
|
2013-07-18 02:07:32 +02:00
|
|
|
}
|
2013-05-16 07:28:36 +02:00
|
|
|
|
|
|
|
// Trap vectors that reach beyond the UMV borders
|
2016-08-08 20:42:27 +02:00
|
|
|
if (mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][0].as_mv) ||
|
|
|
|
(has_second_rf &&
|
|
|
|
mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][1].as_mv)))
|
2013-05-16 07:28:36 +02:00
|
|
|
continue;
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2013-07-18 02:07:32 +02:00
|
|
|
if (filter_idx > 0) {
|
|
|
|
BEST_SEG_INFO *ref_bsi = bsi_buf;
|
2014-03-05 20:12:00 +01:00
|
|
|
subpelmv = 0;
|
|
|
|
have_ref = 1;
|
|
|
|
|
|
|
|
for (ref = 0; ref < 1 + has_second_rf; ++ref) {
|
|
|
|
subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv);
|
|
|
|
have_ref &= mode_mv[this_mode][ref].as_int ==
|
2016-07-27 05:43:23 +02:00
|
|
|
ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
|
2013-07-18 02:07:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (filter_idx > 1 && !subpelmv && !have_ref) {
|
|
|
|
ref_bsi = bsi_buf + 1;
|
2014-03-05 20:12:00 +01:00
|
|
|
have_ref = 1;
|
|
|
|
for (ref = 0; ref < 1 + has_second_rf; ++ref)
|
|
|
|
have_ref &= mode_mv[this_mode][ref].as_int ==
|
2016-07-27 05:43:23 +02:00
|
|
|
ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int;
|
2013-07-18 02:07:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!subpelmv && have_ref &&
|
|
|
|
ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx],
|
|
|
|
sizeof(SEG_RDSTAT));
|
2013-10-30 21:52:55 +01:00
|
|
|
if (num_4x4_blocks_wide > 1)
|
|
|
|
bsi->rdstat[i + 1][mode_idx].eobs =
|
|
|
|
ref_bsi->rdstat[i + 1][mode_idx].eobs;
|
|
|
|
if (num_4x4_blocks_high > 1)
|
|
|
|
bsi->rdstat[i + 2][mode_idx].eobs =
|
|
|
|
ref_bsi->rdstat[i + 2][mode_idx].eobs;
|
|
|
|
|
2013-07-18 02:07:32 +02:00
|
|
|
if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
|
|
|
|
mode_selected = this_mode;
|
|
|
|
best_rd = bsi->rdstat[i][mode_idx].brdcost;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2013-07-17 23:21:44 +02:00
|
|
|
}
|
2013-05-16 07:28:36 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment(
|
|
|
|
cpi, x, bsi->segment_rd - this_segment_rd, i,
|
|
|
|
&bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist,
|
|
|
|
&bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta,
|
|
|
|
bsi->rdstat[i][mode_idx].tl, mi_row, mi_col);
|
2013-07-18 02:07:32 +02:00
|
|
|
if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) {
|
2016-07-27 05:43:23 +02:00
|
|
|
bsi->rdstat[i][mode_idx].brdcost +=
|
|
|
|
RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0);
|
2013-07-18 02:07:32 +02:00
|
|
|
bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate;
|
2013-12-04 02:59:32 +01:00
|
|
|
bsi->rdstat[i][mode_idx].eobs = p->eobs[i];
|
2013-10-30 21:52:55 +01:00
|
|
|
if (num_4x4_blocks_wide > 1)
|
2013-12-04 02:59:32 +01:00
|
|
|
bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1];
|
2013-10-30 21:52:55 +01:00
|
|
|
if (num_4x4_blocks_high > 1)
|
2013-12-04 02:59:32 +01:00
|
|
|
bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2];
|
2013-07-18 02:07:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (bsi->rdstat[i][mode_idx].brdcost < best_rd) {
|
2013-05-16 07:28:36 +02:00
|
|
|
mode_selected = this_mode;
|
2013-07-18 02:07:32 +02:00
|
|
|
best_rd = bsi->rdstat[i][mode_idx].brdcost;
|
2013-05-16 07:28:36 +02:00
|
|
|
}
|
|
|
|
} /*for each 4x4 mode*/
|
|
|
|
|
2013-07-18 02:07:32 +02:00
|
|
|
if (best_rd == INT64_MAX) {
|
|
|
|
int iy, midx;
|
|
|
|
for (iy = i + 1; iy < 4; ++iy)
|
2013-08-23 03:40:34 +02:00
|
|
|
for (midx = 0; midx < INTER_MODES; ++midx)
|
2013-07-18 02:07:32 +02:00
|
|
|
bsi->rdstat[iy][midx].brdcost = INT64_MAX;
|
2013-07-17 23:21:44 +02:00
|
|
|
bsi->segment_rd = INT64_MAX;
|
2014-11-15 00:29:18 +01:00
|
|
|
return INT64_MAX;
|
2013-07-17 23:21:44 +02:00
|
|
|
}
|
|
|
|
|
2013-11-05 20:58:57 +01:00
|
|
|
mode_idx = INTER_OFFSET(mode_selected);
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above));
|
|
|
|
memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left));
|
2013-05-16 07:28:36 +02:00
|
|
|
|
2015-06-29 18:27:11 +02:00
|
|
|
set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected],
|
2014-04-17 22:48:54 +02:00
|
|
|
frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost,
|
|
|
|
x->mvcost);
|
2013-05-16 07:28:36 +02:00
|
|
|
|
2013-07-18 02:07:32 +02:00
|
|
|
br += bsi->rdstat[i][mode_idx].brate;
|
|
|
|
bd += bsi->rdstat[i][mode_idx].bdist;
|
|
|
|
block_sse += bsi->rdstat[i][mode_idx].bsse;
|
|
|
|
segmentyrate += bsi->rdstat[i][mode_idx].byrate;
|
|
|
|
this_segment_rd += bsi->rdstat[i][mode_idx].brdcost;
|
2013-07-09 01:01:01 +02:00
|
|
|
|
|
|
|
if (this_segment_rd > bsi->segment_rd) {
|
2013-07-18 02:07:32 +02:00
|
|
|
int iy, midx;
|
|
|
|
for (iy = i + 1; iy < 4; ++iy)
|
2013-08-23 03:40:34 +02:00
|
|
|
for (midx = 0; midx < INTER_MODES; ++midx)
|
2013-07-18 02:07:32 +02:00
|
|
|
bsi->rdstat[iy][midx].brdcost = INT64_MAX;
|
2013-07-09 01:01:01 +02:00
|
|
|
bsi->segment_rd = INT64_MAX;
|
2014-11-15 00:29:18 +01:00
|
|
|
return INT64_MAX;
|
2013-07-09 01:01:01 +02:00
|
|
|
}
|
2013-05-16 07:28:36 +02:00
|
|
|
}
|
2013-05-01 01:13:20 +02:00
|
|
|
} /* for each label */
|
|
|
|
|
2013-07-09 01:01:01 +02:00
|
|
|
bsi->r = br;
|
|
|
|
bsi->d = bd;
|
|
|
|
bsi->segment_yrate = segmentyrate;
|
|
|
|
bsi->segment_rd = this_segment_rd;
|
2013-07-11 00:18:52 +02:00
|
|
|
bsi->sse = block_sse;
|
2013-05-01 01:13:20 +02:00
|
|
|
|
2013-07-18 02:07:32 +02:00
|
|
|
// update the coding decisions
|
2016-07-27 05:43:23 +02:00
|
|
|
for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (bsi->segment_rd > best_rd) return INT64_MAX;
|
2013-05-01 01:13:20 +02:00
|
|
|
/* set it to the best */
|
|
|
|
for (i = 0; i < 4; i++) {
|
2013-11-05 20:58:57 +01:00
|
|
|
mode_idx = INTER_OFFSET(bsi->modes[i]);
|
2013-07-18 02:07:32 +02:00
|
|
|
mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int;
|
2016-01-20 01:40:20 +01:00
|
|
|
if (has_second_ref(mi))
|
2013-07-18 02:07:32 +02:00
|
|
|
mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int;
|
2013-12-04 02:59:32 +01:00
|
|
|
x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs;
|
2013-10-07 20:20:50 +02:00
|
|
|
mi->bmi[i].as_mode = bsi->modes[i];
|
2013-05-01 01:13:20 +02:00
|
|
|
}
|
2013-07-18 01:46:53 +02:00
|
|
|
|
2013-05-01 01:13:20 +02:00
|
|
|
/*
|
|
|
|
* used to set mbmi->mv.as_int
|
|
|
|
*/
|
2013-07-18 02:07:32 +02:00
|
|
|
*returntotrate = bsi->r;
|
|
|
|
*returndistortion = bsi->d;
|
|
|
|
*returnyrate = bsi->segment_yrate;
|
2013-12-04 02:59:32 +01:00
|
|
|
*skippable = vp9_is_skippable_in_plane(x, BLOCK_8X8, 0);
|
2013-07-18 02:07:32 +02:00
|
|
|
*psse = bsi->sse;
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mode = bsi->modes[3];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-07-18 02:07:32 +02:00
|
|
|
return bsi->segment_rd;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2014-04-19 01:56:43 +02:00
|
|
|
static void estimate_ref_frame_costs(const VP9_COMMON *cm,
|
2016-07-27 05:43:23 +02:00
|
|
|
const MACROBLOCKD *xd, int segment_id,
|
2013-06-06 22:44:34 +02:00
|
|
|
unsigned int *ref_costs_single,
|
|
|
|
unsigned int *ref_costs_comp,
|
2015-07-20 22:49:15 +02:00
|
|
|
vpx_prob *comp_mode_p) {
|
2016-07-27 05:43:23 +02:00
|
|
|
int seg_ref_active =
|
|
|
|
segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME);
|
2013-05-05 07:09:43 +02:00
|
|
|
if (seg_ref_active) {
|
2015-04-24 05:47:40 +02:00
|
|
|
memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single));
|
2016-07-27 05:43:23 +02:00
|
|
|
memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
|
2013-06-06 22:44:34 +02:00
|
|
|
*comp_mode_p = 128;
|
|
|
|
} else {
|
2015-07-20 22:49:15 +02:00
|
|
|
vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd);
|
|
|
|
vpx_prob comp_inter_p = 128;
|
2013-05-05 07:09:43 +02:00
|
|
|
|
2013-12-10 00:13:34 +01:00
|
|
|
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
|
2013-12-06 20:23:01 +01:00
|
|
|
comp_inter_p = vp9_get_reference_mode_prob(cm, xd);
|
2013-06-06 22:44:34 +02:00
|
|
|
*comp_mode_p = comp_inter_p;
|
2013-05-05 07:09:43 +02:00
|
|
|
} else {
|
2013-06-06 22:44:34 +02:00
|
|
|
*comp_mode_p = 128;
|
|
|
|
}
|
2013-05-05 07:09:43 +02:00
|
|
|
|
2013-06-06 22:44:34 +02:00
|
|
|
ref_costs_single[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0);
|
2013-05-05 07:09:43 +02:00
|
|
|
|
2013-12-10 00:13:34 +01:00
|
|
|
if (cm->reference_mode != COMPOUND_REFERENCE) {
|
2015-07-20 22:49:15 +02:00
|
|
|
vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd);
|
|
|
|
vpx_prob ref_single_p2 = vp9_get_pred_prob_single_ref_p2(cm, xd);
|
2013-06-06 22:44:34 +02:00
|
|
|
unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
|
2013-05-05 07:09:43 +02:00
|
|
|
|
2013-12-10 00:13:34 +01:00
|
|
|
if (cm->reference_mode == REFERENCE_MODE_SELECT)
|
2013-06-06 22:44:34 +02:00
|
|
|
base_cost += vp9_cost_bit(comp_inter_p, 0);
|
2013-05-05 07:09:43 +02:00
|
|
|
|
2013-06-06 22:44:34 +02:00
|
|
|
ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
|
|
|
|
ref_costs_single[ALTREF_FRAME] = base_cost;
|
2016-07-27 05:43:23 +02:00
|
|
|
ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0);
|
2013-06-06 22:44:34 +02:00
|
|
|
ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1);
|
|
|
|
ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1);
|
|
|
|
ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0);
|
|
|
|
ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1);
|
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
ref_costs_single[LAST_FRAME] = 512;
|
2013-06-06 22:44:34 +02:00
|
|
|
ref_costs_single[GOLDEN_FRAME] = 512;
|
|
|
|
ref_costs_single[ALTREF_FRAME] = 512;
|
2012-10-17 20:40:00 +02:00
|
|
|
}
|
2013-12-10 00:13:34 +01:00
|
|
|
if (cm->reference_mode != SINGLE_REFERENCE) {
|
2015-07-20 22:49:15 +02:00
|
|
|
vpx_prob ref_comp_p = vp9_get_pred_prob_comp_ref_p(cm, xd);
|
2013-06-06 22:44:34 +02:00
|
|
|
unsigned int base_cost = vp9_cost_bit(intra_inter_p, 1);
|
2013-05-05 07:09:43 +02:00
|
|
|
|
2013-12-10 00:13:34 +01:00
|
|
|
if (cm->reference_mode == REFERENCE_MODE_SELECT)
|
2013-06-06 22:44:34 +02:00
|
|
|
base_cost += vp9_cost_bit(comp_inter_p, 1);
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0);
|
2013-06-06 22:44:34 +02:00
|
|
|
ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1);
|
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
ref_costs_comp[LAST_FRAME] = 512;
|
2013-06-06 22:44:34 +02:00
|
|
|
ref_costs_comp[GOLDEN_FRAME] = 512;
|
|
|
|
}
|
2012-10-17 20:40:00 +02:00
|
|
|
}
|
|
|
|
}
|
2011-06-23 00:07:04 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static void store_coding_context(
|
|
|
|
MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index,
|
|
|
|
int64_t comp_pred_diff[REFERENCE_MODES],
|
|
|
|
int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], int skippable) {
|
2013-05-05 07:09:43 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
// Take a snapshot of the coding context so it can be
|
|
|
|
// restored if we decide to encode this way
|
|
|
|
ctx->skip = x->skip;
|
2014-10-09 00:02:37 +02:00
|
|
|
ctx->skippable = skippable;
|
2013-05-05 07:09:43 +02:00
|
|
|
ctx->best_mode_index = mode_index;
|
2015-04-21 14:36:58 +02:00
|
|
|
ctx->mic = *xd->mi[0];
|
2015-06-29 18:27:11 +02:00
|
|
|
ctx->mbmi_ext = *x->mbmi_ext;
|
2013-11-23 01:35:37 +01:00
|
|
|
ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
|
2016-07-27 05:43:23 +02:00
|
|
|
ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
|
2013-11-23 01:35:37 +01:00
|
|
|
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(ctx->best_filter_diff, best_filter_diff,
|
|
|
|
sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
|
2013-05-05 07:09:43 +02:00
|
|
|
}
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2014-07-02 21:36:48 +02:00
|
|
|
static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
|
|
|
|
MV_REFERENCE_FRAME ref_frame,
|
2016-07-27 05:43:23 +02:00
|
|
|
BLOCK_SIZE block_size, int mi_row, int mi_col,
|
2014-07-02 21:36:48 +02:00
|
|
|
int_mv frame_nearest_mv[MAX_REF_FRAMES],
|
|
|
|
int_mv frame_near_mv[MAX_REF_FRAMES],
|
|
|
|
struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
|
2014-01-22 04:46:07 +01:00
|
|
|
const VP9_COMMON *cm = &cpi->common;
|
|
|
|
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
|
2013-05-05 07:09:43 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2015-04-21 14:36:58 +02:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
2015-06-29 18:27:11 +02:00
|
|
|
int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
|
2014-01-22 04:46:07 +01:00
|
|
|
const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
|
2015-06-29 18:27:11 +02:00
|
|
|
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2015-02-18 18:40:34 +01:00
|
|
|
assert(yv12 != NULL);
|
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
// TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
|
|
|
|
// use the UV scaling factors.
|
2014-06-24 00:49:12 +02:00
|
|
|
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
// Gets an initial list of candidate vectors from neighbours and orders them
|
2015-06-16 15:38:34 +02:00
|
|
|
vp9_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col,
|
2016-01-13 22:30:40 +01:00
|
|
|
mbmi_ext->mode_context);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
// Candidate refinement carried out at encoder and decoder
|
2014-01-22 04:46:07 +01:00
|
|
|
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
|
|
|
|
&frame_nearest_mv[ref_frame],
|
|
|
|
&frame_near_mv[ref_frame]);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
// Further refinement that is encode side only to test the top few candidates
|
|
|
|
// in full and choose the best as the centre point for subsequent searches.
|
|
|
|
// The current implementation doesn't support scaling.
|
2013-12-20 01:06:33 +01:00
|
|
|
if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8)
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
|
|
|
|
block_size);
|
2013-05-05 07:09:43 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
|
|
|
|
int mi_row, int mi_col, int_mv *tmp_mv,
|
|
|
|
int *rate_mv) {
|
2013-06-14 00:10:42 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2014-04-19 01:56:43 +02:00
|
|
|
const VP9_COMMON *cm = &cpi->common;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *mi = xd->mi[0];
|
2016-07-27 05:43:23 +02:00
|
|
|
struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
|
2013-06-14 00:10:42 +02:00
|
|
|
int bestsme = INT_MAX;
|
2014-05-06 02:35:54 +02:00
|
|
|
int step_param;
|
2013-06-14 00:10:42 +02:00
|
|
|
int sadpb = x->sadperbit16;
|
2014-01-07 03:07:12 +01:00
|
|
|
MV mvp_full;
|
2016-01-20 01:40:20 +01:00
|
|
|
int ref = mi->ref_frame[0];
|
2015-06-29 18:27:11 +02:00
|
|
|
MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
|
2016-08-08 20:42:27 +02:00
|
|
|
const MvLimits tmp_mv_limits = x->mv_limits;
|
2014-10-07 11:48:08 +02:00
|
|
|
int cost_list[5];
|
2013-06-14 00:10:42 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
const YV12_BUFFER_CONFIG *scaled_ref_frame =
|
|
|
|
vp9_get_scaled_ref_frame(cpi, ref);
|
2013-06-14 00:10:42 +02:00
|
|
|
|
2014-02-27 02:07:06 +01:00
|
|
|
MV pred_mv[3];
|
2015-06-29 18:27:11 +02:00
|
|
|
pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
|
|
|
|
pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv;
|
2014-05-19 20:43:07 +02:00
|
|
|
pred_mv[2] = x->pred_mv[ref];
|
2014-01-07 18:53:38 +01:00
|
|
|
|
2013-06-14 00:10:42 +02:00
|
|
|
if (scaled_ref_frame) {
|
|
|
|
int i;
|
|
|
|
// Swap out the reference frame for a version that's been scaled to
|
|
|
|
// match the resolution of the current frame, allowing the existing
|
|
|
|
// motion search code to be used without additional modifications.
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
|
2013-06-14 00:10:42 +02:00
|
|
|
|
2014-03-03 23:58:43 +01:00
|
|
|
vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
|
2013-06-14 00:10:42 +02:00
|
|
|
}
|
|
|
|
|
2014-02-12 21:48:15 +01:00
|
|
|
// Work out the size of the first step in the mv step search.
|
2015-08-18 03:19:22 +02:00
|
|
|
// 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
|
2014-06-12 21:35:57 +02:00
|
|
|
if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) {
|
2014-02-12 21:48:15 +01:00
|
|
|
// Take wtd average of the step_params based on the last frame's
|
|
|
|
// max mv magnitude and that based on the best ref mvs of the current
|
|
|
|
// block for the given reference.
|
2016-07-27 05:43:23 +02:00
|
|
|
step_param =
|
|
|
|
(vp9_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
|
|
|
|
2;
|
2013-06-26 18:06:25 +02:00
|
|
|
} else {
|
2014-02-12 21:48:15 +01:00
|
|
|
step_param = cpi->mv_step_param;
|
2013-07-03 23:43:23 +02:00
|
|
|
}
|
2013-06-14 00:10:42 +02:00
|
|
|
|
2014-09-05 01:09:14 +02:00
|
|
|
if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) {
|
2015-08-18 03:19:22 +02:00
|
|
|
int boffset =
|
|
|
|
2 * (b_width_log2_lookup[BLOCK_64X64] -
|
|
|
|
VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize]));
|
|
|
|
step_param = VPXMAX(step_param, boffset);
|
2013-09-12 19:06:47 +02:00
|
|
|
}
|
|
|
|
|
2013-12-21 00:24:22 +01:00
|
|
|
if (cpi->sf.adaptive_motion_search) {
|
2014-10-07 21:30:33 +02:00
|
|
|
int bwl = b_width_log2_lookup[bsize];
|
|
|
|
int bhl = b_height_log2_lookup[bsize];
|
2013-12-21 00:24:22 +01:00
|
|
|
int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4);
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (tlevel < 5) step_param += 2;
|
2013-12-21 00:24:22 +01:00
|
|
|
|
2014-11-15 00:29:18 +01:00
|
|
|
// prev_mv_sad is not setup for dynamically scaled frames.
|
|
|
|
if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) {
|
|
|
|
int i;
|
|
|
|
for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) {
|
|
|
|
if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) {
|
|
|
|
x->pred_mv[ref].row = 0;
|
|
|
|
x->pred_mv[ref].col = 0;
|
|
|
|
tmp_mv->as_int = INVALID_MV;
|
|
|
|
|
|
|
|
if (scaled_ref_frame) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; ++i)
|
|
|
|
xd->plane[i].pre[0] = backup_yv12[i];
|
|
|
|
}
|
|
|
|
return;
|
2013-12-21 00:24:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-12 02:34:20 +02:00
|
|
|
// Note: MV limits are modified here. Always restore the original values
|
|
|
|
// after full-pixel motion search.
|
|
|
|
vp9_set_mv_search_range(&x->mv_limits, &ref_mv);
|
|
|
|
|
2014-02-27 02:07:06 +01:00
|
|
|
mvp_full = pred_mv[x->mv_best_ref_index[ref]];
|
2013-09-12 19:06:47 +02:00
|
|
|
|
2014-01-07 03:07:12 +01:00
|
|
|
mvp_full.col >>= 3;
|
|
|
|
mvp_full.row >>= 3;
|
2013-06-14 00:10:42 +02:00
|
|
|
|
2017-02-01 12:04:49 +01:00
|
|
|
bestsme = vp9_full_pixel_search(
|
|
|
|
cpi, x, bsize, &mvp_full, step_param, cpi->sf.mv.search_method, sadpb,
|
|
|
|
cond_cost_list(cpi, cost_list), &ref_mv, &tmp_mv->as_mv, INT_MAX, 1);
|
2013-06-14 00:10:42 +02:00
|
|
|
|
2016-08-08 20:42:27 +02:00
|
|
|
x->mv_limits = tmp_mv_limits;
|
2013-06-14 00:10:42 +02:00
|
|
|
|
|
|
|
if (bestsme < INT_MAX) {
|
2016-07-27 05:43:23 +02:00
|
|
|
uint32_t dis; /* TODO: use dis in distortion calculation later. */
|
|
|
|
cpi->find_fractional_mv_step(
|
|
|
|
x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
|
|
|
|
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
|
|
|
|
cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
|
|
|
|
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
|
2013-06-14 00:10:42 +02:00
|
|
|
}
|
2016-07-27 05:43:23 +02:00
|
|
|
*rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
|
|
|
|
x->mvcost, MV_COST_WEIGHT);
|
2013-09-12 19:06:47 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = tmp_mv->as_mv;
|
2013-09-12 19:06:47 +02:00
|
|
|
|
2013-06-14 00:10:42 +02:00
|
|
|
if (scaled_ref_frame) {
|
|
|
|
int i;
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i];
|
2013-06-14 00:10:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-20 00:29:22 +01:00
|
|
|
static INLINE void restore_dst_buf(MACROBLOCKD *xd,
|
|
|
|
uint8_t *orig_dst[MAX_MB_PLANE],
|
|
|
|
int orig_dst_stride[MAX_MB_PLANE]) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++) {
|
|
|
|
xd->plane[i].dst.buf = orig_dst[i];
|
|
|
|
xd->plane[i].dst.stride = orig_dst_stride[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-11 17:22:03 +01:00
|
|
|
// In some situations we want to discount tha pparent cost of a new motion
|
|
|
|
// vector. Where there is a subtle motion field and especially where there is
|
|
|
|
// low spatial complexity then it can be hard to cover the cost of a new motion
|
|
|
|
// vector in a single block, even if that motion vector reduces distortion.
|
|
|
|
// However, once established that vector may be usable through the nearest and
|
|
|
|
// near mv modes to reduce distortion in subsequent blocks and also improve
|
|
|
|
// visual quality.
|
2016-07-27 05:43:23 +02:00
|
|
|
static int discount_newmv_test(const VP9_COMP *cpi, int this_mode,
|
2014-12-11 17:22:03 +01:00
|
|
|
int_mv this_mv,
|
|
|
|
int_mv (*mode_mv)[MAX_REF_FRAMES],
|
|
|
|
int ref_frame) {
|
2016-07-27 05:43:23 +02:00
|
|
|
return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) &&
|
2014-12-11 17:22:03 +01:00
|
|
|
(this_mv.as_int != 0) &&
|
|
|
|
((mode_mv[NEARESTMV][ref_frame].as_int == 0) ||
|
|
|
|
(mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) &&
|
|
|
|
((mode_mv[NEARMV][ref_frame].as_int == 0) ||
|
|
|
|
(mode_mv[NEARMV][ref_frame].as_int == INVALID_MV)));
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
static int64_t handle_inter_mode(
|
|
|
|
VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
|
|
|
|
int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
|
|
|
|
int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
|
|
|
|
int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
|
|
|
|
INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
|
|
|
|
int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
|
|
|
|
const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
|
2013-05-05 07:09:43 +02:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *mi = xd->mi[0];
|
2015-06-29 18:27:11 +02:00
|
|
|
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
|
2016-01-20 01:40:20 +01:00
|
|
|
const int is_comp_pred = has_second_ref(mi);
|
|
|
|
const int this_mode = mi->mode;
|
2013-07-17 20:33:15 +02:00
|
|
|
int_mv *frame_mv = mode_mv[this_mode];
|
2013-05-05 07:09:43 +02:00
|
|
|
int i;
|
2016-01-20 01:40:20 +01:00
|
|
|
int refs[2] = { mi->ref_frame[0],
|
2016-07-27 05:43:23 +02:00
|
|
|
(mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]) };
|
2013-05-05 07:09:43 +02:00
|
|
|
int_mv cur_mv[2];
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
2015-05-02 22:24:16 +02:00
|
|
|
DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
|
2014-10-06 23:30:01 +02:00
|
|
|
uint8_t *tmp_buf;
|
2014-09-24 15:36:34 +02:00
|
|
|
#else
|
2015-05-02 22:24:16 +02:00
|
|
|
DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
|
2014-09-24 15:36:34 +02:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2013-05-05 07:09:43 +02:00
|
|
|
int pred_exists = 0;
|
|
|
|
int intpel_mv;
|
2014-08-27 01:52:56 +02:00
|
|
|
int64_t rd, tmp_rd, best_rd = INT64_MAX;
|
2013-07-10 18:26:32 +02:00
|
|
|
int best_needs_copy = 0;
|
|
|
|
uint8_t *orig_dst[MAX_MB_PLANE];
|
|
|
|
int orig_dst_stride[MAX_MB_PLANE];
|
2013-07-09 01:01:01 +02:00
|
|
|
int rs = 0;
|
2014-07-23 01:32:20 +02:00
|
|
|
INTERP_FILTER best_filter = SWITCHABLE;
|
2016-07-27 05:43:23 +02:00
|
|
|
uint8_t skip_txfm[MAX_MB_PLANE << 2] = { 0 };
|
|
|
|
int64_t bsse[MAX_MB_PLANE << 2] = { 0 };
|
2014-07-23 01:32:20 +02:00
|
|
|
|
|
|
|
int bsl = mi_width_log2_lookup[bsize];
|
2016-07-27 05:43:23 +02:00
|
|
|
int pred_filter_search =
|
|
|
|
cpi->sf.cb_pred_filter_search
|
|
|
|
? (((mi_row + mi_col) >> bsl) +
|
|
|
|
get_chessboard_index(cm->current_video_frame)) &
|
|
|
|
0x1
|
|
|
|
: 0;
|
2014-07-23 01:32:20 +02:00
|
|
|
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
int skip_txfm_sb = 0;
|
|
|
|
int64_t skip_sse_sb = INT64_MAX;
|
|
|
|
int64_t distortion_y = 0, distortion_uv = 0;
|
|
|
|
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
|
|
|
tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
|
|
|
|
} else {
|
2015-03-24 22:28:15 +01:00
|
|
|
tmp_buf = (uint8_t *)tmp_buf16;
|
2014-09-24 15:36:34 +02:00
|
|
|
}
|
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
|
2014-07-23 20:47:56 +02:00
|
|
|
if (pred_filter_search) {
|
|
|
|
INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
|
2016-06-28 23:09:59 +02:00
|
|
|
if (xd->above_mi && is_inter_block(xd->above_mi))
|
2016-03-30 13:47:39 +02:00
|
|
|
af = xd->above_mi->interp_filter;
|
2016-06-28 23:09:59 +02:00
|
|
|
if (xd->left_mi && is_inter_block(xd->left_mi))
|
2016-03-30 13:47:39 +02:00
|
|
|
lf = xd->left_mi->interp_filter;
|
2014-07-23 20:47:56 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if ((this_mode != NEWMV) || (af == lf)) best_filter = af;
|
2014-07-23 01:32:20 +02:00
|
|
|
}
|
2010-12-06 22:42:52 +01:00
|
|
|
|
2013-11-05 03:45:45 +01:00
|
|
|
if (is_comp_pred) {
|
|
|
|
if (frame_mv[refs[0]].as_int == INVALID_MV ||
|
|
|
|
frame_mv[refs[1]].as_int == INVALID_MV)
|
|
|
|
return INT64_MAX;
|
2014-09-03 02:32:12 +02:00
|
|
|
|
|
|
|
if (cpi->sf.adaptive_mode_search) {
|
|
|
|
if (single_filter[this_mode][refs[0]] ==
|
|
|
|
single_filter[this_mode][refs[1]])
|
|
|
|
best_filter = single_filter[this_mode][refs[0]];
|
|
|
|
}
|
2013-11-05 03:45:45 +01:00
|
|
|
}
|
|
|
|
|
2013-07-30 19:16:03 +02:00
|
|
|
if (this_mode == NEWMV) {
|
2013-06-14 00:10:42 +02:00
|
|
|
int rate_mv;
|
2013-07-30 19:16:03 +02:00
|
|
|
if (is_comp_pred) {
|
|
|
|
// Initialize mv using single prediction mode result.
|
|
|
|
frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int;
|
|
|
|
frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
|
|
|
|
|
|
|
|
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
|
2016-07-27 05:43:23 +02:00
|
|
|
joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
|
|
|
|
single_newmv, &rate_mv);
|
2013-06-14 00:10:42 +02:00
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv,
|
|
|
|
&x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
|
|
|
|
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
|
2013-09-20 11:52:43 +02:00
|
|
|
rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv,
|
2015-06-29 18:27:11 +02:00
|
|
|
&x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
|
2013-10-09 20:32:03 +02:00
|
|
|
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2013-07-30 19:16:03 +02:00
|
|
|
*rate2 += rate_mv;
|
|
|
|
} else {
|
|
|
|
int_mv tmp_mv;
|
2016-07-27 05:43:23 +02:00
|
|
|
single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv);
|
|
|
|
if (tmp_mv.as_int == INVALID_MV) return INT64_MAX;
|
2014-12-11 17:22:03 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
frame_mv[refs[0]].as_int = xd->mi[0]->bmi[0].as_mv[0].as_int =
|
|
|
|
tmp_mv.as_int;
|
2013-07-30 19:16:03 +02:00
|
|
|
single_newmv[refs[0]].as_int = tmp_mv.as_int;
|
2014-12-11 17:22:03 +01:00
|
|
|
|
|
|
|
// Estimate the rate implications of a new mv but discount this
|
|
|
|
// under certain circumstances where we want to help initiate a weak
|
|
|
|
// motion field, where the distortion gain for a single block may not
|
|
|
|
// be enough to overcome the cost of a new mv.
|
|
|
|
if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) {
|
2015-08-18 03:19:22 +02:00
|
|
|
*rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1);
|
2014-12-11 17:22:03 +01:00
|
|
|
} else {
|
|
|
|
*rate2 += rate_mv;
|
|
|
|
}
|
2013-07-30 19:16:03 +02:00
|
|
|
}
|
2013-05-05 07:09:43 +02:00
|
|
|
}
|
2013-07-17 20:33:15 +02:00
|
|
|
|
2014-07-23 18:29:04 +02:00
|
|
|
for (i = 0; i < is_comp_pred + 1; ++i) {
|
2013-05-27 23:16:45 +02:00
|
|
|
cur_mv[i] = frame_mv[refs[i]];
|
2013-05-05 07:09:43 +02:00
|
|
|
// Clip "next_nearest" so that it does not extend to far out of image
|
2016-07-27 05:43:23 +02:00
|
|
|
if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd);
|
2012-12-20 23:56:19 +01:00
|
|
|
|
2016-08-08 20:42:27 +02:00
|
|
|
if (mv_check_bounds(&x->mv_limits, &cur_mv[i].as_mv)) return INT64_MAX;
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mv[i].as_int = cur_mv[i].as_int;
|
2013-05-05 07:09:43 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-07-10 18:26:32 +02:00
|
|
|
// do first prediction into the destination buffer. Do the next
|
|
|
|
// prediction into a temporary buffer. Then keep track of which one
|
|
|
|
// of these currently holds the best predictor, and use the other
|
|
|
|
// one for future predictions. In the end, copy from tmp_buf to
|
|
|
|
// dst if necessary.
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++) {
|
|
|
|
orig_dst[i] = xd->plane[i].dst.buf;
|
|
|
|
orig_dst_stride[i] = xd->plane[i].dst.stride;
|
|
|
|
}
|
|
|
|
|
2014-12-11 17:22:03 +01:00
|
|
|
// We don't include the cost of the second reference here, because there
|
2015-08-07 02:36:57 +02:00
|
|
|
// are only two options: Last/ARF or Golden/ARF; The second one is always
|
|
|
|
// known, which is ARF.
|
2014-12-11 17:22:03 +01:00
|
|
|
//
|
|
|
|
// Under some circumstances we discount the cost of new mv mode to encourage
|
|
|
|
// initiation of a motion field.
|
2016-07-27 05:43:23 +02:00
|
|
|
if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv,
|
|
|
|
refs[0])) {
|
|
|
|
*rate2 +=
|
|
|
|
VPXMIN(cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]),
|
|
|
|
cost_mv_ref(cpi, NEARESTMV, mbmi_ext->mode_context[refs[0]]));
|
2014-12-11 17:22:03 +01:00
|
|
|
} else {
|
2015-06-29 18:27:11 +02:00
|
|
|
*rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]);
|
2014-12-11 17:22:03 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2014-08-27 00:53:56 +02:00
|
|
|
if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd &&
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mode != NEARESTMV)
|
2014-08-27 00:53:56 +02:00
|
|
|
return INT64_MAX;
|
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
pred_exists = 0;
|
|
|
|
// Are all MVs integer pel for Y and UV
|
2016-01-20 01:40:20 +01:00
|
|
|
intpel_mv = !mv_has_subpel(&mi->mv[0].as_mv);
|
2016-07-27 05:43:23 +02:00
|
|
|
if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mi->mv[1].as_mv);
|
2013-12-13 21:58:49 +01:00
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
// Search for best switchable filter by checking the variance of
|
|
|
|
// pred error irrespective of whether the filter will be used
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
|
2013-12-17 02:52:59 +01:00
|
|
|
|
2014-01-24 21:26:57 +01:00
|
|
|
if (cm->interp_filter != BILINEAR) {
|
2014-04-10 00:00:14 +02:00
|
|
|
if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) {
|
2014-07-22 20:32:17 +02:00
|
|
|
best_filter = EIGHTTAP;
|
2014-07-23 01:32:20 +02:00
|
|
|
} else if (best_filter == SWITCHABLE) {
|
2013-12-13 21:58:49 +01:00
|
|
|
int newbest;
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
int tmp_rate_sum = 0;
|
|
|
|
int64_t tmp_dist_sum = 0;
|
|
|
|
|
2013-08-23 03:40:34 +02:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
int j;
|
|
|
|
int64_t rs_rd;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
int tmp_skip_sb = 0;
|
|
|
|
int64_t tmp_skip_sse = INT64_MAX;
|
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->interp_filter = i;
|
2014-11-21 20:11:06 +01:00
|
|
|
rs = vp9_get_switchable_rate(cpi, xd);
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
|
|
|
|
|
|
|
|
if (i > 0 && intpel_mv) {
|
2013-12-17 02:52:59 +01:00
|
|
|
rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum);
|
2014-11-20 21:42:36 +01:00
|
|
|
filter_cache[i] = rd;
|
|
|
|
filter_cache[SWITCHABLE_FILTERS] =
|
2015-08-18 03:19:22 +02:00
|
|
|
VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
|
2016-07-27 05:43:23 +02:00
|
|
|
if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
|
2015-08-18 03:19:22 +02:00
|
|
|
*mask_filter = VPXMAX(*mask_filter, rd);
|
2013-07-10 18:26:32 +02:00
|
|
|
} else {
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
int rate_sum = 0;
|
|
|
|
int64_t dist_sum = 0;
|
2014-08-20 20:17:05 +02:00
|
|
|
if (i > 0 && cpi->sf.adaptive_interp_filter_search &&
|
|
|
|
(cpi->sf.interp_filter_search_mask & (1 << i))) {
|
|
|
|
rate_sum = INT_MAX;
|
|
|
|
dist_sum = INT64_MAX;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) ||
|
2014-01-24 21:26:57 +01:00
|
|
|
(cm->interp_filter != SWITCHABLE &&
|
2016-01-20 01:40:20 +01:00
|
|
|
(cm->interp_filter == mi->interp_filter ||
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
(i == 0 && intpel_mv)))) {
|
2013-11-20 00:29:22 +01:00
|
|
|
restore_dst_buf(xd, orig_dst, orig_dst_stride);
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
} else {
|
|
|
|
for (j = 0; j < MAX_MB_PLANE; j++) {
|
|
|
|
xd->plane[j].dst.buf = tmp_buf + j * 64 * 64;
|
|
|
|
xd->plane[j].dst.stride = 64;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
|
2016-07-27 05:43:23 +02:00
|
|
|
model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
|
|
|
|
&tmp_skip_sse);
|
2013-12-17 02:52:59 +01:00
|
|
|
|
|
|
|
rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum);
|
2014-11-20 21:42:36 +01:00
|
|
|
filter_cache[i] = rd;
|
|
|
|
filter_cache[SWITCHABLE_FILTERS] =
|
2015-08-18 03:19:22 +02:00
|
|
|
VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd);
|
2016-07-27 05:43:23 +02:00
|
|
|
if (cm->interp_filter == SWITCHABLE) rd += rs_rd;
|
2015-08-18 03:19:22 +02:00
|
|
|
*mask_filter = VPXMAX(*mask_filter, rd);
|
2013-12-17 02:52:59 +01:00
|
|
|
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
if (i == 0 && intpel_mv) {
|
|
|
|
tmp_rate_sum = rate_sum;
|
|
|
|
tmp_dist_sum = dist_sum;
|
2013-07-10 18:26:32 +02:00
|
|
|
}
|
|
|
|
}
|
2013-12-17 02:52:59 +01:00
|
|
|
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
|
|
|
|
if (rd / 2 > ref_best_rd) {
|
2013-11-20 00:29:22 +01:00
|
|
|
restore_dst_buf(xd, orig_dst, orig_dst_stride);
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
return INT64_MAX;
|
2013-07-09 01:01:01 +02:00
|
|
|
}
|
|
|
|
}
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
newbest = i == 0 || rd < best_rd;
|
|
|
|
|
|
|
|
if (newbest) {
|
|
|
|
best_rd = rd;
|
2016-01-20 01:40:20 +01:00
|
|
|
best_filter = mi->interp_filter;
|
2014-01-24 21:26:57 +01:00
|
|
|
if (cm->interp_filter == SWITCHABLE && i && !intpel_mv)
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
best_needs_copy = !best_needs_copy;
|
|
|
|
}
|
2012-10-09 18:18:21 +02:00
|
|
|
|
2014-01-24 21:26:57 +01:00
|
|
|
if ((cm->interp_filter == SWITCHABLE && newbest) ||
|
|
|
|
(cm->interp_filter != SWITCHABLE &&
|
2016-01-20 01:40:20 +01:00
|
|
|
cm->interp_filter == mi->interp_filter)) {
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
pred_exists = 1;
|
2014-08-27 01:52:56 +02:00
|
|
|
tmp_rd = best_rd;
|
2014-10-07 21:30:33 +02:00
|
|
|
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
skip_txfm_sb = tmp_skip_sb;
|
|
|
|
skip_sse_sb = tmp_skip_sse;
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
|
|
|
|
memcpy(bsse, x->bsse, sizeof(bsse));
|
Cleanup/enhancements of switchable filter search
Cleans up the switchable filter search logic. Also adds a
speed feature - a variance threshold - to disable filter search
if source variance is lower than this value.
Results: derfraw300
threshold = 16, psnr -0.238%, 4-5% speedup (tested on football)
threshold = 32, psnr -0.381%, 8-9% speedup (tested on football)
threshold = 64, psnr -0.611%, 12-13% speedup (tested on football)
threshold = 96, psnr -0.804%, 16-17% speedup (tested on football)
Based on these results, the threshold is chosen as 16 for speed 1,
32 for speed 2, 64 for speed 3 and 96 for speed 4.
Change-Id: Ib630d39192773b1983d3d349b97973768e170c04
2013-08-16 22:51:00 +02:00
|
|
|
}
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
2013-11-20 00:29:22 +01:00
|
|
|
restore_dst_buf(xd, orig_dst, orig_dst_stride);
|
2013-07-10 18:26:32 +02:00
|
|
|
}
|
2013-02-12 02:08:52 +01:00
|
|
|
}
|
2013-07-30 19:16:03 +02:00
|
|
|
// Set the appropriate filter
|
2016-07-27 05:43:23 +02:00
|
|
|
mi->interp_filter =
|
|
|
|
cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
|
2014-11-21 20:11:06 +01:00
|
|
|
rs = cm->interp_filter == SWITCHABLE ? vp9_get_switchable_rate(cpi, xd) : 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
if (pred_exists) {
|
2013-07-10 18:26:32 +02:00
|
|
|
if (best_needs_copy) {
|
|
|
|
// again temporarily set the buffers to local memory to prevent a memcpy
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++) {
|
|
|
|
xd->plane[i].dst.buf = tmp_buf + i * 64 * 64;
|
|
|
|
xd->plane[i].dst.stride = 64;
|
|
|
|
}
|
2013-05-07 20:49:21 +02:00
|
|
|
}
|
2014-08-27 01:52:56 +02:00
|
|
|
rd = tmp_rd + RDCOST(x->rdmult, x->rddiv, rs, 0);
|
2013-05-05 07:09:43 +02:00
|
|
|
} else {
|
2014-08-27 01:52:56 +02:00
|
|
|
int tmp_rate;
|
|
|
|
int64_t tmp_dist;
|
2013-05-05 07:09:43 +02:00
|
|
|
// Handles the special case when a filter that is not in the
|
2014-08-27 01:52:56 +02:00
|
|
|
// switchable list (ex. bilinear) is indicated at the frame level, or
|
|
|
|
// skip condition holds.
|
2013-05-05 07:09:43 +02:00
|
|
|
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
|
2016-07-27 05:43:23 +02:00
|
|
|
model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
|
|
|
|
&skip_sse_sb);
|
2014-08-27 01:52:56 +02:00
|
|
|
rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm));
|
|
|
|
memcpy(bsse, x->bsse, sizeof(bsse));
|
2013-05-05 07:09:43 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!is_comp_pred) single_filter[this_mode][refs[0]] = mi->interp_filter;
|
2014-09-03 02:32:12 +02:00
|
|
|
|
|
|
|
if (cpi->sf.adaptive_mode_search)
|
|
|
|
if (is_comp_pred)
|
|
|
|
if (single_skippable[this_mode][refs[0]] &&
|
|
|
|
single_skippable[this_mode][refs[1]])
|
2015-07-30 20:52:28 +02:00
|
|
|
memset(skip_txfm, SKIP_TXFM_AC_DC, sizeof(skip_txfm));
|
2014-09-03 02:32:12 +02:00
|
|
|
|
2013-07-09 01:01:01 +02:00
|
|
|
if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) {
|
|
|
|
// if current pred_error modeled rd is substantially more than the best
|
|
|
|
// so far, do not bother doing full rd
|
|
|
|
if (rd / 2 > ref_best_rd) {
|
2013-11-20 00:29:22 +01:00
|
|
|
restore_dst_buf(xd, orig_dst, orig_dst_stride);
|
2013-07-09 01:01:01 +02:00
|
|
|
return INT64_MAX;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (cm->interp_filter == SWITCHABLE) *rate2 += rs;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm));
|
|
|
|
memcpy(x->bsse, bsse, sizeof(bsse));
|
2014-08-07 07:48:37 +02:00
|
|
|
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
if (!skip_txfm_sb) {
|
2013-05-05 07:09:43 +02:00
|
|
|
int skippable_y, skippable_uv;
|
2013-08-15 01:50:45 +02:00
|
|
|
int64_t sseuv = INT64_MAX;
|
|
|
|
int64_t rdcosty = INT64_MAX;
|
2012-10-09 18:18:21 +02:00
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
// Y cost and distortion
|
2014-09-02 21:42:34 +02:00
|
|
|
vp9_subtract_plane(x, bsize, 0);
|
2016-07-27 05:43:23 +02:00
|
|
|
super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
|
|
|
|
ref_best_rd);
|
2013-07-03 01:48:15 +02:00
|
|
|
|
|
|
|
if (*rate_y == INT_MAX) {
|
|
|
|
*rate2 = INT_MAX;
|
|
|
|
*distortion = INT64_MAX;
|
2013-11-20 00:29:22 +01:00
|
|
|
restore_dst_buf(xd, orig_dst, orig_dst_stride);
|
2013-07-03 01:48:15 +02:00
|
|
|
return INT64_MAX;
|
|
|
|
}
|
2013-05-31 00:13:08 +02:00
|
|
|
|
2013-05-05 07:09:43 +02:00
|
|
|
*rate2 += *rate_y;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
*distortion += distortion_y;
|
2013-04-23 00:42:41 +02:00
|
|
|
|
2013-08-15 01:50:45 +02:00
|
|
|
rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion);
|
2015-08-18 03:19:22 +02:00
|
|
|
rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse));
|
2013-08-15 01:50:45 +02:00
|
|
|
|
2014-10-14 02:06:22 +02:00
|
|
|
if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv,
|
|
|
|
&sseuv, bsize, ref_best_rd - rdcosty)) {
|
2013-08-15 01:50:45 +02:00
|
|
|
*rate2 = INT_MAX;
|
|
|
|
*distortion = INT64_MAX;
|
2013-11-20 00:29:22 +01:00
|
|
|
restore_dst_buf(xd, orig_dst, orig_dst_stride);
|
2013-08-15 01:50:45 +02:00
|
|
|
return INT64_MAX;
|
|
|
|
}
|
2013-04-23 00:42:41 +02:00
|
|
|
|
2013-06-28 02:41:54 +02:00
|
|
|
*psse += sseuv;
|
2013-05-05 07:09:43 +02:00
|
|
|
*rate2 += *rate_uv;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
*distortion += distortion_uv;
|
2013-05-05 07:09:43 +02:00
|
|
|
*skippable = skippable_y && skippable_uv;
|
Allow mode search breakout at very low prediction errors
In model_rd_for_sb function, the spatial domain SSE and variance
are checked to see if transform coefficients are quantized to 0.
Besides that, this patch adds another set of thresholds that are
much more strict. These thresholds are used to conduct a partition
block level check to measure if all its TX blocks are skippable
for YUV planes. If it is true, x->skip is set for this partition
block, and thus its mode search is terminated.
This speeds up the encoding at very low prediction error case,
such as screen sharing application. This patch covers what
rd_encode_breakout_test() does, so that function is removed.
Borg test at speed 3 shows:
For stdhd set, psnr: +0.008%, ssim: +0.014%;
For derf set, psnr: +0.018%, ssim: +0.025%.
No noticeable speed change.
Change-Id: I4e5f15cf10016a282a68e35175ff854b28195944
2014-10-03 01:25:52 +02:00
|
|
|
} else {
|
|
|
|
x->skip = 1;
|
|
|
|
*disable_skip = 1;
|
|
|
|
|
|
|
|
// The cost of skip bit needs to be added.
|
|
|
|
*rate2 += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
|
|
|
|
|
|
|
|
*distortion = skip_sse_sb;
|
2013-04-23 00:42:41 +02:00
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable;
|
2014-09-03 02:32:12 +02:00
|
|
|
|
2013-11-20 00:29:22 +01:00
|
|
|
restore_dst_buf(xd, orig_dst, orig_dst_stride);
|
2014-09-25 03:27:44 +02:00
|
|
|
return 0; // The rate-distortion cost will be re-calculated by caller.
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
|
|
|
|
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
|
|
|
|
int64_t best_rd) {
|
2013-07-18 03:21:41 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2014-03-27 22:27:12 +01:00
|
|
|
struct macroblockd_plane *const pd = xd->plane;
|
2013-07-18 03:21:41 +02:00
|
|
|
int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
|
2013-09-11 19:45:44 +02:00
|
|
|
int y_skip = 0, uv_skip = 0;
|
2015-07-29 22:37:41 +02:00
|
|
|
int64_t dist_y = 0, dist_uv = 0;
|
2013-12-09 20:03:02 +01:00
|
|
|
TX_SIZE max_uv_tx_size;
|
2013-07-09 01:48:47 +02:00
|
|
|
x->skip_encode = 0;
|
2013-04-15 21:50:32 +02:00
|
|
|
ctx->skip = 0;
|
2016-01-20 01:40:20 +01:00
|
|
|
xd->mi[0]->ref_frame[0] = INTRA_FRAME;
|
|
|
|
xd->mi[0]->ref_frame[1] = NONE;
|
2016-07-09 18:47:34 +02:00
|
|
|
// Initialize interp_filter here so we do not have to check for inter block
|
|
|
|
// modes in get_pred_context_switchable_interp()
|
|
|
|
xd->mi[0]->interp_filter = SWITCHABLE_FILTERS;
|
2013-12-09 20:03:02 +01:00
|
|
|
|
2013-08-06 00:23:49 +02:00
|
|
|
if (bsize >= BLOCK_8X8) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y,
|
|
|
|
&y_skip, bsize, best_rd) >= best_rd) {
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
2013-07-18 03:21:41 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
y_skip = 0;
|
2013-07-31 21:58:19 +02:00
|
|
|
if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly,
|
|
|
|
&dist_y, best_rd) >= best_rd) {
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
2013-07-18 03:21:41 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2016-08-30 21:52:29 +02:00
|
|
|
max_uv_tx_size = uv_txsize_lookup[bsize][xd->mi[0]->tx_size]
|
|
|
|
[pd[1].subsampling_x][pd[1].subsampling_y];
|
2016-07-27 05:43:23 +02:00
|
|
|
rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv,
|
|
|
|
&uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-04-09 19:54:19 +02:00
|
|
|
if (y_skip && uv_skip) {
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
|
|
|
|
vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
|
|
|
|
rd_cost->dist = dist_y + dist_uv;
|
2013-01-06 03:20:25 +01:00
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
rd_cost->rate =
|
|
|
|
rate_y + rate_uv + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0);
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->dist = dist_y + dist_uv;
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2013-04-11 18:33:49 +02:00
|
|
|
|
2015-04-21 14:36:58 +02:00
|
|
|
ctx->mic = *xd->mi[0];
|
2015-06-29 18:27:11 +02:00
|
|
|
ctx->mbmi_ext = *x->mbmi_ext;
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist);
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2011-06-08 18:05:05 +02:00
|
|
|
|
2015-03-06 18:21:36 +01:00
|
|
|
// This function is designed to apply a bias or adjustment to an rd value based
|
|
|
|
// on the relative variance of the source and reconstruction.
|
|
|
|
#define LOW_VAR_THRESH 16
|
|
|
|
#define VLOW_ADJ_MAX 25
|
2015-03-18 15:41:18 +01:00
|
|
|
#define VHIGH_ADJ_MAX 8
|
2016-07-27 05:43:23 +02:00
|
|
|
static void rd_variance_adjustment(VP9_COMP *cpi, MACROBLOCK *x,
|
|
|
|
BLOCK_SIZE bsize, int64_t *this_rd,
|
2015-03-18 15:41:18 +01:00
|
|
|
MV_REFERENCE_FRAME ref_frame,
|
2015-03-06 18:21:36 +01:00
|
|
|
unsigned int source_variance) {
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
unsigned int recon_variance;
|
2015-03-18 15:41:18 +01:00
|
|
|
unsigned int absvar_diff = 0;
|
|
|
|
int64_t var_error = 0;
|
|
|
|
int64_t var_factor = 0;
|
2015-03-06 18:21:36 +01:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (*this_rd == INT64_MAX) return;
|
2015-03-06 18:21:36 +01:00
|
|
|
|
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
2016-07-27 05:43:23 +02:00
|
|
|
recon_variance = vp9_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
|
|
|
|
bsize, xd->bd);
|
2015-03-06 18:21:36 +01:00
|
|
|
} else {
|
|
|
|
recon_variance =
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
|
2015-03-06 18:21:36 +01:00
|
|
|
}
|
|
|
|
#else
|
2016-07-27 05:43:23 +02:00
|
|
|
recon_variance = vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
|
2015-03-06 18:21:36 +01:00
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
|
2015-03-18 15:41:18 +01:00
|
|
|
if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
|
|
|
|
absvar_diff = (source_variance > recon_variance)
|
2016-07-27 05:43:23 +02:00
|
|
|
? (source_variance - recon_variance)
|
|
|
|
: (recon_variance - source_variance);
|
2015-03-06 18:21:36 +01:00
|
|
|
|
2015-11-19 23:53:51 +01:00
|
|
|
var_error = ((int64_t)200 * source_variance * recon_variance) /
|
2016-07-27 05:43:23 +02:00
|
|
|
(((int64_t)source_variance * source_variance) +
|
|
|
|
((int64_t)recon_variance * recon_variance));
|
2015-03-18 15:41:18 +01:00
|
|
|
var_error = 100 - var_error;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Source variance above a threshold and ref frame is intra.
|
|
|
|
// This case is targeted mainly at discouraging intra modes that give rise
|
|
|
|
// to a predictor with a low spatial complexity compared to the source.
|
|
|
|
if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) &&
|
|
|
|
(source_variance > recon_variance)) {
|
2015-08-18 03:19:22 +02:00
|
|
|
var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error));
|
2016-07-27 05:43:23 +02:00
|
|
|
// A second possible case of interest is where the source variance
|
|
|
|
// is very low and we wish to discourage false texture or motion trails.
|
2015-03-18 15:41:18 +01:00
|
|
|
} else if ((source_variance < (LOW_VAR_THRESH >> 1)) &&
|
|
|
|
(recon_variance > source_variance)) {
|
2015-08-18 03:19:22 +02:00
|
|
|
var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error));
|
2015-03-06 18:21:36 +01:00
|
|
|
}
|
2015-03-18 15:41:18 +01:00
|
|
|
*this_rd += (*this_rd * var_factor) / 100;
|
2015-03-06 18:21:36 +01:00
|
|
|
}
|
|
|
|
|
2015-06-24 12:36:51 +02:00
|
|
|
// Do we have an internal image edge (e.g. formatting bars).
|
|
|
|
int vp9_internal_image_edge(VP9_COMP *cpi) {
|
|
|
|
return (cpi->oxcf.pass == 2) &&
|
2016-07-27 05:43:23 +02:00
|
|
|
((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
|
|
|
|
(cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
|
2015-06-24 12:36:51 +02:00
|
|
|
}
|
|
|
|
|
2015-06-24 12:36:51 +02:00
|
|
|
// Checks to see if a super block is on a horizontal image edge.
|
2015-06-24 12:36:51 +02:00
|
|
|
// In most cases this is the "real" edge unless there are formatting
|
|
|
|
// bars embedded in the stream.
|
2015-06-24 12:36:51 +02:00
|
|
|
int vp9_active_h_edge(VP9_COMP *cpi, int mi_row, int mi_step) {
|
2015-06-24 12:36:51 +02:00
|
|
|
int top_edge = 0;
|
|
|
|
int bottom_edge = cpi->common.mi_rows;
|
2015-06-24 12:36:51 +02:00
|
|
|
int is_active_h_edge = 0;
|
2015-06-24 12:36:51 +02:00
|
|
|
|
|
|
|
// For two pass account for any formatting bars detected.
|
|
|
|
if (cpi->oxcf.pass == 2) {
|
|
|
|
TWO_PASS *twopass = &cpi->twopass;
|
|
|
|
|
|
|
|
// The inactive region is specified in MBs not mi units.
|
|
|
|
// The image edge is in the following MB row.
|
|
|
|
top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
|
|
|
|
|
|
|
|
bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2);
|
2015-08-18 03:19:22 +02:00
|
|
|
bottom_edge = VPXMAX(top_edge, bottom_edge);
|
2015-06-24 12:36:51 +02:00
|
|
|
}
|
|
|
|
|
2015-06-24 12:36:51 +02:00
|
|
|
if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) ||
|
|
|
|
((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) {
|
|
|
|
is_active_h_edge = 1;
|
|
|
|
}
|
|
|
|
return is_active_h_edge;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks to see if a super block is on a vertical image edge.
|
|
|
|
// In most cases this is the "real" edge unless there are formatting
|
|
|
|
// bars embedded in the stream.
|
|
|
|
int vp9_active_v_edge(VP9_COMP *cpi, int mi_col, int mi_step) {
|
|
|
|
int left_edge = 0;
|
|
|
|
int right_edge = cpi->common.mi_cols;
|
|
|
|
int is_active_v_edge = 0;
|
|
|
|
|
|
|
|
// For two pass account for any formatting bars detected.
|
|
|
|
if (cpi->oxcf.pass == 2) {
|
|
|
|
TWO_PASS *twopass = &cpi->twopass;
|
|
|
|
|
|
|
|
// The inactive region is specified in MBs not mi units.
|
|
|
|
// The image edge is in the following MB row.
|
|
|
|
left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
|
|
|
|
|
|
|
|
right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2);
|
2015-08-18 03:19:22 +02:00
|
|
|
right_edge = VPXMAX(left_edge, right_edge);
|
2015-06-24 12:36:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) ||
|
|
|
|
((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) {
|
|
|
|
is_active_v_edge = 1;
|
2015-06-24 12:36:51 +02:00
|
|
|
}
|
2015-06-24 12:36:51 +02:00
|
|
|
return is_active_v_edge;
|
|
|
|
}
|
2015-06-24 12:36:51 +02:00
|
|
|
|
2015-06-24 12:36:51 +02:00
|
|
|
// Checks to see if a super block is at the edge of the active image.
|
|
|
|
// In most cases this is the "real" edge unless there are formatting
|
|
|
|
// bars embedded in the stream.
|
2016-07-27 05:43:23 +02:00
|
|
|
int vp9_active_edge_sb(VP9_COMP *cpi, int mi_row, int mi_col) {
|
2015-06-24 12:36:51 +02:00
|
|
|
return vp9_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
|
|
|
|
vp9_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
|
2015-06-24 12:36:51 +02:00
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data,
|
|
|
|
MACROBLOCK *x, int mi_row, int mi_col,
|
2014-10-14 00:56:37 +02:00
|
|
|
RD_COST *rd_cost, BLOCK_SIZE bsize,
|
2016-07-27 05:43:23 +02:00
|
|
|
PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) {
|
2014-02-20 18:43:32 +01:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2014-10-27 19:11:50 +01:00
|
|
|
TileInfo *const tile_info = &tile_data->tile_info;
|
2014-04-10 00:00:14 +02:00
|
|
|
RD_OPT *const rd_opt = &cpi->rd;
|
2014-10-14 01:13:59 +02:00
|
|
|
SPEED_FEATURES *const sf = &cpi->sf;
|
2014-02-20 18:43:32 +01:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
2015-06-29 18:27:11 +02:00
|
|
|
MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
|
2014-02-20 18:43:32 +01:00
|
|
|
const struct segmentation *const seg = &cm->seg;
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE this_mode;
|
2013-07-24 18:59:36 +02:00
|
|
|
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
|
2016-01-20 01:40:20 +01:00
|
|
|
unsigned char segment_id = mi->segment_id;
|
2014-09-03 02:32:12 +02:00
|
|
|
int comp_pred, i, k;
|
2012-10-30 01:58:18 +01:00
|
|
|
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
|
2013-05-07 20:31:12 +02:00
|
|
|
struct buf_2d yv12_mb[4][MAX_MB_PLANE];
|
2013-08-15 20:37:56 +02:00
|
|
|
int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } };
|
2014-09-03 02:32:12 +02:00
|
|
|
INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES];
|
|
|
|
int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES];
|
2012-10-31 22:40:53 +01:00
|
|
|
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
|
|
|
|
VP9_ALT_FLAG };
|
2013-07-17 18:56:46 +02:00
|
|
|
int64_t best_rd = best_rd_so_far;
|
2013-11-23 01:35:37 +01:00
|
|
|
int64_t best_pred_diff[REFERENCE_MODES];
|
|
|
|
int64_t best_pred_rd[REFERENCE_MODES];
|
2013-10-30 22:40:34 +01:00
|
|
|
int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
|
|
|
|
int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO best_mbmode;
|
Early termination in encoding partition search
In the partition search, the encoder checks all possible
partitionings in the superblock's partition search tree.
This patch proposed a set of criteria for partition search
early termination, which effectively decided whether or
not to terminate the search in current branch based on the
"skippable" result of the quantized transform coefficients.
The "skippable" information was gathered during the
partition mode search, and no overhead calculations were
introduced.
This patch gives significant encoding speed gains without
sacrificing the quality.
Borg test results:
1. At speed 1,
stdhd set: psnr: +0.074%, ssim: +0.093%;
derf set: psnr: -0.024%, ssim: +0.011%;
2. At speed 2,
stdhd set: psnr: +0.033%, ssim: +0.100%;
derf set: psnr: -0.062%, ssim: +0.003%;
3. At speed 3,
stdhd set: psnr: +0.060%, ssim: +0.190%;
derf set: psnr: -0.064%, ssim: -0.002%;
4. At speed 4,
stdhd set: psnr: +0.070%, ssim: +0.143%;
derf set: psnr: -0.104%, ssim: +0.039%;
The speedup ranges from several percent to 60+%.
speed1 speed2 speed3 speed4
(1080p, 100f):
old_town_cross: 48.2% 23.9% 20.8% 16.5%
park_joy: 11.4% 17.8% 29.4% 18.2%
pedestrian_area: 10.7% 4.0% 4.2% 2.4%
(720p, 200f):
mobcal: 68.1% 36.3% 34.4% 17.7%
parkrun: 15.8% 24.2% 37.1% 16.8%
shields: 45.1% 32.8% 30.1% 9.6%
(cif, 300f)
bus: 3.7% 10.4% 14.0% 7.9%
deadline: 13.6% 14.8% 12.6% 10.9%
mobile: 5.3% 11.5% 14.7% 10.7%
Change-Id: I246c38fb952ad762ce5e365711235b605f470a66
2014-08-15 02:25:21 +02:00
|
|
|
int best_mode_skippable = 0;
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
int midx, best_mode_index = -1;
|
2013-06-06 22:44:34 +02:00
|
|
|
unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
|
2015-07-20 22:49:15 +02:00
|
|
|
vpx_prob comp_mode_p;
|
2013-07-02 20:18:00 +02:00
|
|
|
int64_t best_intra_rd = INT64_MAX;
|
2014-09-10 00:32:40 +02:00
|
|
|
unsigned int best_pred_sse = UINT_MAX;
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE best_intra_mode = DC_PRED;
|
2013-07-27 02:15:37 +02:00
|
|
|
int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES];
|
|
|
|
int64_t dist_uv[TX_SIZES];
|
|
|
|
int skip_uv[TX_SIZES];
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE mode_uv[TX_SIZES];
|
2014-09-24 15:36:34 +02:00
|
|
|
const int intra_cost_penalty = vp9_get_intra_cost_penalty(
|
|
|
|
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
|
2013-06-28 02:41:54 +02:00
|
|
|
int best_skip2 = 0;
|
2014-09-10 03:43:27 +02:00
|
|
|
uint8_t ref_frame_skip_mask[2] = { 0 };
|
|
|
|
uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 };
|
2014-10-14 01:13:59 +02:00
|
|
|
int mode_skip_start = sf->mode_skip_start + 1;
|
2014-04-10 00:00:14 +02:00
|
|
|
const int *const rd_threshes = rd_opt->threshes[segment_id][bsize];
|
Tile based adaptive mode search in RD loop
Make the spatially adaptive mode search in rate-distortion
optimization loop inter tile independent. Experiments suggest that
this does not significantly change the coding staticstics.
Single tile, speed 3:
pedestrian_area 1080p 1500 kbps
59192 b/f, 40.611 dB, 101689 ms
blue_sky 1080p 1500 kbps
58505 b/f, 36.347 dB, 62458 ms
mobile_cal 720p 1000 kbps
13335 b/f, 35.646 dB, 45655 ms
as compared to 4 column tiles, speed 3:
pedestrian_area 1080p 1500 kbps
59329 b/f, 40.597 dB, 101917 ms
blue_sky 1080p 1500 kbps
58712 b/f, 36.320 dB, 62693 ms
mobile_cal 720p 1000 kbps
13191 b/f, 35.485 dB, 45319 ms
Change-Id: I35c6e1e0a859fece8f4145dec28623cbc6a12325
2014-10-24 01:54:45 +02:00
|
|
|
const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
|
2014-09-23 18:51:47 +02:00
|
|
|
int64_t mode_threshold[MAX_MODES];
|
2017-02-10 11:55:50 +01:00
|
|
|
int *tile_mode_map = tile_data->mode_map[bsize];
|
|
|
|
int mode_map[MAX_MODES]; // Maintain mode_map information locally to avoid
|
|
|
|
// lock mechanism involved with reads from
|
|
|
|
// tile_mode_map
|
2014-10-14 01:13:59 +02:00
|
|
|
const int mode_search_skip_flags = sf->mode_search_skip_flags;
|
2014-11-20 18:41:49 +01:00
|
|
|
int64_t mask_filter = 0;
|
2014-11-20 21:42:36 +01:00
|
|
|
int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
|
2014-11-20 18:41:49 +01:00
|
|
|
|
2014-05-13 20:18:25 +02:00
|
|
|
vp9_zero(best_mbmode);
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
|
2014-10-14 01:13:59 +02:00
|
|
|
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
|
2013-07-09 01:48:47 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
|
2014-11-20 21:42:36 +01:00
|
|
|
|
2014-04-19 01:56:43 +02:00
|
|
|
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
|
2013-06-06 22:44:34 +02:00
|
|
|
&comp_mode_p);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
|
2013-07-08 23:49:33 +02:00
|
|
|
best_filter_rd[i] = INT64_MAX;
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX;
|
|
|
|
for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
|
2014-09-03 02:32:12 +02:00
|
|
|
for (i = 0; i < MB_MODE_COUNT; ++i) {
|
|
|
|
for (k = 0; k < MAX_REF_FRAMES; ++k) {
|
|
|
|
single_inter_filter[i][k] = SWITCHABLE;
|
|
|
|
single_skippable[i][k] = 0;
|
|
|
|
}
|
|
|
|
}
|
2012-11-08 20:03:00 +01:00
|
|
|
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
2013-07-17 18:56:46 +02:00
|
|
|
|
2014-01-10 03:01:30 +01:00
|
|
|
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
|
2013-12-21 00:24:22 +01:00
|
|
|
x->pred_mv_sad[ref_frame] = INT_MAX;
|
2012-08-20 23:43:34 +02:00
|
|
|
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
|
2015-02-18 18:40:34 +01:00
|
|
|
assert(get_ref_frame_buffer(cpi, ref_frame) != NULL);
|
2015-06-16 15:38:34 +02:00
|
|
|
setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
|
2014-08-05 00:00:04 +02:00
|
|
|
frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2012-10-30 01:58:18 +01:00
|
|
|
frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
|
|
|
|
frame_mv[ZEROMV][ref_frame].as_int = 0;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2013-07-16 16:56:42 +02:00
|
|
|
|
2014-02-20 18:43:32 +01:00
|
|
|
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
|
|
|
|
if (!(cpi->ref_frame_flags & flag_list[ref_frame])) {
|
2014-09-10 03:43:27 +02:00
|
|
|
// Skip checking missing references in both single and compound reference
|
2016-01-07 18:29:34 +01:00
|
|
|
// modes. Note that a mode will be skipped if both reference frames
|
2014-09-10 03:43:27 +02:00
|
|
|
// are masked out.
|
|
|
|
ref_frame_skip_mask[0] |= (1 << ref_frame);
|
|
|
|
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
|
2014-10-14 01:13:59 +02:00
|
|
|
} else if (sf->reference_masking) {
|
2014-02-20 18:43:32 +01:00
|
|
|
for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
|
|
|
|
// Skip fixed mv modes for poor references
|
|
|
|
if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) {
|
2014-09-10 03:43:27 +02:00
|
|
|
mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO;
|
2014-02-20 18:43:32 +01:00
|
|
|
break;
|
|
|
|
}
|
2014-01-10 03:01:30 +01:00
|
|
|
}
|
|
|
|
}
|
2014-02-28 01:41:44 +01:00
|
|
|
// If the segment reference frame feature is enabled....
|
|
|
|
// then do nothing if the current ref frame is not allowed..
|
2015-06-11 13:20:55 +02:00
|
|
|
if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
|
2015-06-11 18:52:00 +02:00
|
|
|
get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
|
2014-09-10 03:43:27 +02:00
|
|
|
ref_frame_skip_mask[0] |= (1 << ref_frame);
|
|
|
|
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
|
2014-02-28 01:41:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disable this drop out case if the ref frame
|
|
|
|
// segment level feature is enabled for this segment. This is to
|
|
|
|
// prevent the possibility that we end up unable to pick any mode.
|
2015-06-11 13:20:55 +02:00
|
|
|
if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
|
2014-02-28 01:41:44 +01:00
|
|
|
// Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
|
|
|
|
// unless ARNR filtering is enabled in which case we want
|
|
|
|
// an unfiltered alternative. We allow near/nearest as well
|
|
|
|
// because they may result in zero-zero MVs but be cheaper.
|
|
|
|
if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) {
|
2014-09-10 03:43:27 +02:00
|
|
|
ref_frame_skip_mask[0] = (1 << LAST_FRAME) | (1 << GOLDEN_FRAME);
|
|
|
|
ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
|
|
|
|
mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO;
|
2014-02-28 01:41:44 +01:00
|
|
|
if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0)
|
2014-09-10 03:43:27 +02:00
|
|
|
mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV);
|
2014-02-28 01:41:44 +01:00
|
|
|
if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0)
|
2014-09-10 03:43:27 +02:00
|
|
|
mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV);
|
2014-02-28 01:41:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-11 04:42:51 +02:00
|
|
|
if (cpi->rc.is_src_frame_alt_ref) {
|
2014-10-14 01:13:59 +02:00
|
|
|
if (sf->alt_ref_search_fp) {
|
2014-09-11 04:42:51 +02:00
|
|
|
mode_skip_mask[ALTREF_FRAME] = 0;
|
|
|
|
ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME);
|
|
|
|
ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-14 01:13:59 +02:00
|
|
|
if (sf->alt_ref_search_fp)
|
2014-10-05 21:05:14 +02:00
|
|
|
if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX)
|
Skip certain ALTREF inter modes in ARF coding
This commit enables the encoder to skip checking ALTREF inter modes
in ARF coding, if the predicted motion vectors suggest that the
GOLDEN_FRAME provides higher prediction accuracy than ALTREF_FRAME.
It improves the speed 3 encoding speed by about 5%, at the expense
of compression performance loss -0.041% and -0.225% for derf and
stdhd, respectively.
pedestrian_area 1080p 2000 kbps
66705 b/f, 40.909 dB, 118738 ms ->
66732 b/f, 40.908 dB, 113688 ms
old_town_cross 720p 1500 kbps
14427 b/f, 36.256 dB, 62746 ms ->
14412 b/f, 36.252 dB, 60690 ms
blue_sky 1080p 1500 kbps
51026 b/f, 35.897 dB, 73310 ms ->
50921 b/f, 35.893 dB, 70406 ms
bus CIF 1000 kbps
21301 b/f, 34.841 dB, 7326 ms ->
21248 b/f, 34.837 dB, 7196 ms
Change-Id: I76cf88b4d655e1ee3c0cb03c8a5745493040e8d2
2014-09-26 21:43:14 +02:00
|
|
|
if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1))
|
|
|
|
mode_skip_mask[ALTREF_FRAME] |= INTER_ALL;
|
|
|
|
|
2014-10-14 01:13:59 +02:00
|
|
|
if (sf->adaptive_mode_search) {
|
Conditionally skip reference frame check
For regular inter frames, if the distance from GOLDEN_FRAME is larger
than 2 and if the predicted motion vector of LAST_FRAME gives lower
sse than that of GOLDEN_FRAME, skip the GOLDE_FRAME mode checking in
the rate-distortion optimization. It provides about 5% speed-up at
expense of -0.137% and -0.230% performance down for speed 3. Local
experiment results:
pedestrian 1080p 2000 kbps
66712 b/f, 40.908 dB, 113688 ms ->
66768 b/f, 40.911 dB, 108752 ms
blue_sky 1080p 2000 kbps
51054 b/f, 35.894 dB, 70406 ms ->
51051 b/f, 35.891 dB, 67236 ms
old_town_cross 720p 1500 kbps
14412 b/f, 36.252 dB, 60690 ms ->
14431 b/f, 36.249 dB, 57346 ms
Change-Id: Idfcafe7f63da7a4896602fc60bd7093f0f0d82ca
2014-09-29 21:42:07 +02:00
|
|
|
if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref &&
|
|
|
|
cpi->rc.frames_since_golden >= 3)
|
|
|
|
if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1))
|
|
|
|
mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL;
|
|
|
|
}
|
|
|
|
|
2014-10-14 01:13:59 +02:00
|
|
|
if (bsize > sf->max_intra_bsize) {
|
2014-09-11 18:54:19 +02:00
|
|
|
ref_frame_skip_mask[0] |= (1 << INTRA_FRAME);
|
|
|
|
ref_frame_skip_mask[1] |= (1 << INTRA_FRAME);
|
|
|
|
}
|
|
|
|
|
2014-09-11 19:34:37 +02:00
|
|
|
mode_skip_mask[INTRA_FRAME] |=
|
2014-10-14 01:13:59 +02:00
|
|
|
~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]);
|
2014-09-11 19:34:37 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0;
|
2017-02-10 11:55:50 +01:00
|
|
|
|
2014-12-11 17:22:03 +01:00
|
|
|
for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i)
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5;
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
midx = sf->schedule_mode_search ? mode_skip_start : 0;
|
2017-02-10 11:55:50 +01:00
|
|
|
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
while (midx > 4) {
|
|
|
|
uint8_t end_pos = 0;
|
|
|
|
for (i = 5; i < midx; ++i) {
|
2017-02-16 15:07:39 +01:00
|
|
|
if (mode_threshold[tile_mode_map[i - 1]] >
|
|
|
|
mode_threshold[tile_mode_map[i]]) {
|
|
|
|
uint8_t tmp = tile_mode_map[i];
|
|
|
|
tile_mode_map[i] = tile_mode_map[i - 1];
|
|
|
|
tile_mode_map[i - 1] = tmp;
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
end_pos = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
midx = end_pos;
|
|
|
|
}
|
|
|
|
|
2017-02-16 15:07:39 +01:00
|
|
|
memcpy(mode_map, tile_mode_map, sizeof(mode_map));
|
2017-02-10 11:55:50 +01:00
|
|
|
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
for (midx = 0; midx < MAX_MODES; ++midx) {
|
|
|
|
int mode_index = mode_map[midx];
|
2012-11-16 00:50:07 +01:00
|
|
|
int mode_excluded = 0;
|
2013-01-14 20:49:30 +01:00
|
|
|
int64_t this_rd = INT64_MAX;
|
2012-08-20 23:43:34 +02:00
|
|
|
int disable_skip = 0;
|
|
|
|
int compmode_cost = 0;
|
2012-10-30 01:58:18 +01:00
|
|
|
int rate2 = 0, rate_y = 0, rate_uv = 0;
|
2013-06-21 21:54:52 +02:00
|
|
|
int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
|
2013-08-10 01:15:08 +02:00
|
|
|
int skippable = 0;
|
2013-06-28 02:41:54 +02:00
|
|
|
int this_skip2 = 0;
|
2014-02-25 16:29:48 +01:00
|
|
|
int64_t total_sse = INT64_MAX;
|
2013-07-03 23:47:54 +02:00
|
|
|
int early_term = 0;
|
2013-05-16 07:28:36 +02:00
|
|
|
|
2014-08-18 19:42:23 +02:00
|
|
|
this_mode = vp9_mode_order[mode_index].mode;
|
|
|
|
ref_frame = vp9_mode_order[mode_index].ref_frame[0];
|
|
|
|
second_ref_frame = vp9_mode_order[mode_index].ref_frame[1];
|
|
|
|
|
2017-03-16 23:45:07 +01:00
|
|
|
vp9_zero(x->sum_y_eobs);
|
|
|
|
|
2013-09-05 02:15:05 +02:00
|
|
|
// Look at the reference frame of the best mode so far and set the
|
|
|
|
// skip mask to look at a subset of the remaining modes.
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
if (midx == mode_skip_start && best_mode_index >= 0) {
|
2014-09-09 20:58:10 +02:00
|
|
|
switch (best_mbmode.ref_frame[0]) {
|
2016-07-27 05:43:23 +02:00
|
|
|
case INTRA_FRAME: break;
|
2014-02-20 18:43:32 +01:00
|
|
|
case LAST_FRAME:
|
2014-09-10 03:43:27 +02:00
|
|
|
ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK;
|
|
|
|
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
|
2014-02-20 18:43:32 +01:00
|
|
|
break;
|
|
|
|
case GOLDEN_FRAME:
|
2014-09-10 03:43:27 +02:00
|
|
|
ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK;
|
|
|
|
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
|
2014-02-20 18:43:32 +01:00
|
|
|
break;
|
2016-07-27 05:43:23 +02:00
|
|
|
case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break;
|
2014-02-20 18:43:32 +01:00
|
|
|
case NONE:
|
2016-07-27 05:43:23 +02:00
|
|
|
case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
|
2013-09-05 02:15:05 +02:00
|
|
|
}
|
|
|
|
}
|
2014-08-18 19:42:23 +02:00
|
|
|
|
2015-01-23 20:47:15 +01:00
|
|
|
if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
|
2015-08-18 03:19:22 +02:00
|
|
|
(ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
|
2014-09-10 03:43:27 +02:00
|
|
|
continue;
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue;
|
2013-07-01 17:27:12 +02:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
// Test best rd so far against threshold for trying this mode.
|
2014-10-14 01:13:59 +02:00
|
|
|
if (best_mode_skippable && sf->schedule_mode_search)
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
mode_threshold[mode_index] <<= 1;
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (best_rd < mode_threshold[mode_index]) continue;
|
2013-06-10 19:19:25 +02:00
|
|
|
|
2017-03-31 19:39:57 +02:00
|
|
|
// This is only used in motion vector unit test.
|
|
|
|
if (cpi->oxcf.motion_vector_unit_test && ref_frame == INTRA_FRAME) continue;
|
|
|
|
|
2014-10-14 01:13:59 +02:00
|
|
|
if (sf->motion_field_mode_search) {
|
2016-07-27 05:43:23 +02:00
|
|
|
const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize],
|
|
|
|
tile_info->mi_col_end - mi_col);
|
2015-08-18 03:19:22 +02:00
|
|
|
const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize],
|
|
|
|
tile_info->mi_row_end - mi_row);
|
2014-10-07 21:45:25 +02:00
|
|
|
const int bsl = mi_width_log2_lookup[bsize];
|
2016-07-27 05:43:23 +02:00
|
|
|
int cb_partition_search_ctrl =
|
|
|
|
(((mi_row + mi_col) >> bsl) +
|
|
|
|
get_chessboard_index(cm->current_video_frame)) &
|
|
|
|
0x1;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *ref_mi;
|
Enable motion field based mode seach skip
This commit allows the encoder to check the above and left neighbor
blocks' reference frames and motion vectors. If they are all
consistent, skip checking the NEARMV and ZEROMV modes. This is
enabled in speed 3. The coding performance is improved:
pedestrian area 1080p at 2000 kbps,
from 74773 b/f, 41.101 dB, 198064 ms
to 74795 b/f, 41.099 dB, 193078 ms
park joy 1080p at 15000 kbps,
from 290727 b/f, 30.640 dB, 609113 ms
to 290558 b/f, 30.630 dB, 592815 ms
Overall compression performance of speed 3 is changed
derf -0.171%
stdhd -0.168%
Change-Id: I8d47dd543a5f90d7a1c583f74035b926b6704b95
2014-08-12 02:48:14 +02:00
|
|
|
int const_motion = 1;
|
2014-08-12 03:02:18 +02:00
|
|
|
int skip_ref_frame = !cb_partition_search_ctrl;
|
|
|
|
MV_REFERENCE_FRAME rf = NONE;
|
Enable motion field based mode seach skip
This commit allows the encoder to check the above and left neighbor
blocks' reference frames and motion vectors. If they are all
consistent, skip checking the NEARMV and ZEROMV modes. This is
enabled in speed 3. The coding performance is improved:
pedestrian area 1080p at 2000 kbps,
from 74773 b/f, 41.101 dB, 198064 ms
to 74795 b/f, 41.099 dB, 193078 ms
park joy 1080p at 15000 kbps,
from 290727 b/f, 30.640 dB, 609113 ms
to 290558 b/f, 30.630 dB, 592815 ms
Overall compression performance of speed 3 is changed
derf -0.171%
stdhd -0.168%
Change-Id: I8d47dd543a5f90d7a1c583f74035b926b6704b95
2014-08-12 02:48:14 +02:00
|
|
|
int_mv ref_mv;
|
|
|
|
ref_mv.as_int = INVALID_MV;
|
|
|
|
|
2014-10-27 19:11:50 +01:00
|
|
|
if ((mi_row - 1) >= tile_info->mi_row_start) {
|
2016-01-20 01:40:20 +01:00
|
|
|
ref_mv = xd->mi[-xd->mi_stride]->mv[0];
|
|
|
|
rf = xd->mi[-xd->mi_stride]->ref_frame[0];
|
Enable motion field based mode seach skip
This commit allows the encoder to check the above and left neighbor
blocks' reference frames and motion vectors. If they are all
consistent, skip checking the NEARMV and ZEROMV modes. This is
enabled in speed 3. The coding performance is improved:
pedestrian area 1080p at 2000 kbps,
from 74773 b/f, 41.101 dB, 198064 ms
to 74795 b/f, 41.099 dB, 193078 ms
park joy 1080p at 15000 kbps,
from 290727 b/f, 30.640 dB, 609113 ms
to 290558 b/f, 30.630 dB, 592815 ms
Overall compression performance of speed 3 is changed
derf -0.171%
stdhd -0.168%
Change-Id: I8d47dd543a5f90d7a1c583f74035b926b6704b95
2014-08-12 02:48:14 +02:00
|
|
|
for (i = 0; i < mi_width; ++i) {
|
2016-01-20 01:40:20 +01:00
|
|
|
ref_mi = xd->mi[-xd->mi_stride + i];
|
|
|
|
const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
|
|
|
|
(ref_frame == ref_mi->ref_frame[0]);
|
|
|
|
skip_ref_frame &= (rf == ref_mi->ref_frame[0]);
|
Enable motion field based mode seach skip
This commit allows the encoder to check the above and left neighbor
blocks' reference frames and motion vectors. If they are all
consistent, skip checking the NEARMV and ZEROMV modes. This is
enabled in speed 3. The coding performance is improved:
pedestrian area 1080p at 2000 kbps,
from 74773 b/f, 41.101 dB, 198064 ms
to 74795 b/f, 41.099 dB, 193078 ms
park joy 1080p at 15000 kbps,
from 290727 b/f, 30.640 dB, 609113 ms
to 290558 b/f, 30.630 dB, 592815 ms
Overall compression performance of speed 3 is changed
derf -0.171%
stdhd -0.168%
Change-Id: I8d47dd543a5f90d7a1c583f74035b926b6704b95
2014-08-12 02:48:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-27 19:11:50 +01:00
|
|
|
if ((mi_col - 1) >= tile_info->mi_col_start) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (ref_mv.as_int == INVALID_MV) ref_mv = xd->mi[-1]->mv[0];
|
|
|
|
if (rf == NONE) rf = xd->mi[-1]->ref_frame[0];
|
Enable motion field based mode seach skip
This commit allows the encoder to check the above and left neighbor
blocks' reference frames and motion vectors. If they are all
consistent, skip checking the NEARMV and ZEROMV modes. This is
enabled in speed 3. The coding performance is improved:
pedestrian area 1080p at 2000 kbps,
from 74773 b/f, 41.101 dB, 198064 ms
to 74795 b/f, 41.099 dB, 193078 ms
park joy 1080p at 15000 kbps,
from 290727 b/f, 30.640 dB, 609113 ms
to 290558 b/f, 30.630 dB, 592815 ms
Overall compression performance of speed 3 is changed
derf -0.171%
stdhd -0.168%
Change-Id: I8d47dd543a5f90d7a1c583f74035b926b6704b95
2014-08-12 02:48:14 +02:00
|
|
|
for (i = 0; i < mi_height; ++i) {
|
2016-01-20 01:40:20 +01:00
|
|
|
ref_mi = xd->mi[i * xd->mi_stride - 1];
|
|
|
|
const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) &&
|
|
|
|
(ref_frame == ref_mi->ref_frame[0]);
|
|
|
|
skip_ref_frame &= (rf == ref_mi->ref_frame[0]);
|
Enable motion field based mode seach skip
This commit allows the encoder to check the above and left neighbor
blocks' reference frames and motion vectors. If they are all
consistent, skip checking the NEARMV and ZEROMV modes. This is
enabled in speed 3. The coding performance is improved:
pedestrian area 1080p at 2000 kbps,
from 74773 b/f, 41.101 dB, 198064 ms
to 74795 b/f, 41.099 dB, 193078 ms
park joy 1080p at 15000 kbps,
from 290727 b/f, 30.640 dB, 609113 ms
to 290558 b/f, 30.630 dB, 592815 ms
Overall compression performance of speed 3 is changed
derf -0.171%
stdhd -0.168%
Change-Id: I8d47dd543a5f90d7a1c583f74035b926b6704b95
2014-08-12 02:48:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-12 03:02:18 +02:00
|
|
|
if (skip_ref_frame && this_mode != NEARESTMV && this_mode != NEWMV)
|
|
|
|
if (rf > INTRA_FRAME)
|
2016-07-27 05:43:23 +02:00
|
|
|
if (ref_frame != rf) continue;
|
2014-08-12 03:02:18 +02:00
|
|
|
|
Enable motion field based mode seach skip
This commit allows the encoder to check the above and left neighbor
blocks' reference frames and motion vectors. If they are all
consistent, skip checking the NEARMV and ZEROMV modes. This is
enabled in speed 3. The coding performance is improved:
pedestrian area 1080p at 2000 kbps,
from 74773 b/f, 41.101 dB, 198064 ms
to 74795 b/f, 41.099 dB, 193078 ms
park joy 1080p at 15000 kbps,
from 290727 b/f, 30.640 dB, 609113 ms
to 290558 b/f, 30.630 dB, 592815 ms
Overall compression performance of speed 3 is changed
derf -0.171%
stdhd -0.168%
Change-Id: I8d47dd543a5f90d7a1c583f74035b926b6704b95
2014-08-12 02:48:14 +02:00
|
|
|
if (const_motion)
|
2016-07-27 05:43:23 +02:00
|
|
|
if (this_mode == NEARMV || this_mode == ZEROMV) continue;
|
Enable motion field based mode seach skip
This commit allows the encoder to check the above and left neighbor
blocks' reference frames and motion vectors. If they are all
consistent, skip checking the NEARMV and ZEROMV modes. This is
enabled in speed 3. The coding performance is improved:
pedestrian area 1080p at 2000 kbps,
from 74773 b/f, 41.101 dB, 198064 ms
to 74795 b/f, 41.099 dB, 193078 ms
park joy 1080p at 15000 kbps,
from 290727 b/f, 30.640 dB, 609113 ms
to 290558 b/f, 30.630 dB, 592815 ms
Overall compression performance of speed 3 is changed
derf -0.171%
stdhd -0.168%
Change-Id: I8d47dd543a5f90d7a1c583f74035b926b6704b95
2014-08-12 02:48:14 +02:00
|
|
|
}
|
|
|
|
|
2013-07-24 18:59:36 +02:00
|
|
|
comp_pred = second_ref_frame > INTRA_FRAME;
|
2013-07-03 23:47:54 +02:00
|
|
|
if (comp_pred) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!cpi->allow_comp_inter_inter) continue;
|
2014-09-02 19:57:10 +02:00
|
|
|
|
2014-09-15 20:20:30 +02:00
|
|
|
// Skip compound inter modes if ARF is not available.
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
|
2014-09-15 20:20:30 +02:00
|
|
|
|
|
|
|
// Do not allow compound prediction if the segment level reference frame
|
|
|
|
// feature is in use as in this case there can only be one reference.
|
2016-07-27 05:43:23 +02:00
|
|
|
if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
|
2014-09-15 20:20:30 +02:00
|
|
|
|
2014-02-20 18:43:32 +01:00
|
|
|
if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
|
2014-09-12 02:15:15 +02:00
|
|
|
best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME)
|
2014-02-20 18:43:32 +01:00
|
|
|
continue;
|
2014-09-12 02:15:15 +02:00
|
|
|
|
2014-02-28 02:01:48 +01:00
|
|
|
mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
|
2012-08-20 23:43:34 +02:00
|
|
|
} else {
|
2014-02-28 02:01:48 +01:00
|
|
|
if (ref_frame != INTRA_FRAME)
|
|
|
|
mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
2014-02-28 04:13:48 +01:00
|
|
|
if (ref_frame == INTRA_FRAME) {
|
2014-10-14 01:13:59 +02:00
|
|
|
if (sf->adaptive_mode_search)
|
2014-09-10 00:32:40 +02:00
|
|
|
if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse)
|
2014-08-29 07:58:30 +02:00
|
|
|
continue;
|
|
|
|
|
2014-02-28 04:13:48 +01:00
|
|
|
if (this_mode != DC_PRED) {
|
|
|
|
// Disable intra modes other than DC_PRED for blocks with low variance
|
|
|
|
// Threshold for intra skipping based on source variance
|
|
|
|
// TODO(debargha): Specialize the threshold for super block sizes
|
|
|
|
const unsigned int skip_intra_var_thresh = 64;
|
|
|
|
if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) &&
|
|
|
|
x->source_variance < skip_intra_var_thresh)
|
2013-07-03 23:47:54 +02:00
|
|
|
continue;
|
2014-02-28 04:13:48 +01:00
|
|
|
// Only search the oblique modes if the best so far is
|
|
|
|
// one of the neighboring directional modes
|
|
|
|
if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) &&
|
|
|
|
(this_mode >= D45_PRED && this_mode <= TM_PRED)) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME)
|
2013-07-02 20:18:00 +02:00
|
|
|
continue;
|
2014-02-28 04:13:48 +01:00
|
|
|
}
|
|
|
|
if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (conditional_skipintra(this_mode, best_intra_mode)) continue;
|
2014-02-28 04:13:48 +01:00
|
|
|
}
|
2013-07-02 20:18:00 +02:00
|
|
|
}
|
2014-02-28 19:28:57 +01:00
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame };
|
|
|
|
if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode,
|
|
|
|
ref_frames))
|
2014-06-20 19:36:18 +02:00
|
|
|
continue;
|
2014-02-20 18:43:32 +01:00
|
|
|
}
|
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mode = this_mode;
|
|
|
|
mi->uv_mode = DC_PRED;
|
|
|
|
mi->ref_frame[0] = ref_frame;
|
|
|
|
mi->ref_frame[1] = second_ref_frame;
|
2014-02-20 18:43:32 +01:00
|
|
|
// Evaluate all sub-pel filters irrespective of whether we can use
|
|
|
|
// them for this frame.
|
2016-07-27 05:43:23 +02:00
|
|
|
mi->interp_filter =
|
|
|
|
cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mv[0].as_int = mi->mv[1].as_int = 0;
|
2014-09-05 00:16:12 +02:00
|
|
|
|
2014-02-20 18:43:32 +01:00
|
|
|
x->skip = 0;
|
|
|
|
set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
|
2013-09-12 01:34:55 +02:00
|
|
|
|
2014-02-20 18:43:32 +01:00
|
|
|
// Select prediction reference frames.
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++) {
|
|
|
|
xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
|
2016-07-27 05:43:23 +02:00
|
|
|
if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
|
2014-02-20 18:43:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ref_frame == INTRA_FRAME) {
|
|
|
|
TX_SIZE uv_tx;
|
2014-10-08 02:10:38 +02:00
|
|
|
struct macroblockd_plane *const pd = &xd->plane[1];
|
2015-04-24 05:47:40 +02:00
|
|
|
memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
|
2016-07-27 05:43:23 +02:00
|
|
|
super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize,
|
|
|
|
best_rd);
|
|
|
|
if (rate_y == INT_MAX) continue;
|
2013-04-11 00:55:59 +02:00
|
|
|
|
2016-08-30 21:52:29 +02:00
|
|
|
uv_tx = uv_txsize_lookup[bsize][mi->tx_size][pd->subsampling_x]
|
|
|
|
[pd->subsampling_y];
|
2013-07-18 22:09:38 +02:00
|
|
|
if (rate_uv_intra[uv_tx] == INT_MAX) {
|
2016-07-27 05:43:23 +02:00
|
|
|
choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx],
|
|
|
|
&rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx],
|
|
|
|
&skip_uv[uv_tx], &mode_uv[uv_tx]);
|
2013-07-18 22:09:38 +02:00
|
|
|
}
|
2013-04-11 00:55:59 +02:00
|
|
|
|
2013-07-18 22:09:38 +02:00
|
|
|
rate_uv = rate_uv_tokenonly[uv_tx];
|
2013-04-11 00:55:59 +02:00
|
|
|
distortion_uv = dist_uv[uv_tx];
|
|
|
|
skippable = skippable && skip_uv[uv_tx];
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->uv_mode = mode_uv[uv_tx];
|
2012-11-16 00:50:07 +01:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
rate2 = rate_y + cpi->mbmode_cost[mi->mode] + rate_uv_intra[uv_tx];
|
2013-09-28 01:02:49 +02:00
|
|
|
if (this_mode != DC_PRED && this_mode != TM_PRED)
|
2013-05-01 01:13:20 +02:00
|
|
|
rate2 += intra_cost_penalty;
|
2012-11-16 00:50:07 +01:00
|
|
|
distortion2 = distortion_y + distortion_uv;
|
|
|
|
} else {
|
2016-07-27 05:43:23 +02:00
|
|
|
this_rd = handle_inter_mode(
|
|
|
|
cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv,
|
|
|
|
&disable_skip, frame_mv, mi_row, mi_col, single_newmv,
|
|
|
|
single_inter_filter, single_skippable, &total_sse, best_rd,
|
|
|
|
&mask_filter, filter_cache);
|
|
|
|
if (this_rd == INT64_MAX) continue;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2014-02-20 18:43:32 +01:00
|
|
|
compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
|
2014-02-20 18:43:32 +01:00
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
// Estimate the reference frame signaling cost and add it
|
|
|
|
// to the rolling cost variable.
|
2014-02-20 18:43:32 +01:00
|
|
|
if (comp_pred) {
|
2013-07-24 18:59:36 +02:00
|
|
|
rate2 += ref_costs_comp[ref_frame];
|
2013-06-06 22:44:34 +02:00
|
|
|
} else {
|
2013-07-24 18:59:36 +02:00
|
|
|
rate2 += ref_costs_single[ref_frame];
|
2013-06-06 22:44:34 +02:00
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
|
|
|
if (!disable_skip) {
|
2016-02-08 18:30:50 +01:00
|
|
|
const vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
|
|
|
|
const int skip_cost0 = vp9_cost_bit(skip_prob, 0);
|
|
|
|
const int skip_cost1 = vp9_cost_bit(skip_prob, 1);
|
|
|
|
|
2013-09-28 01:02:49 +02:00
|
|
|
if (skippable) {
|
2013-04-09 19:54:19 +02:00
|
|
|
// Back out the coefficient coding costs
|
|
|
|
rate2 -= (rate_y + rate_uv);
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2014-05-31 01:02:46 +02:00
|
|
|
// Cost the skip mb case
|
2016-02-08 18:30:50 +01:00
|
|
|
rate2 += skip_cost1;
|
2014-05-31 01:02:46 +02:00
|
|
|
} else if (ref_frame != INTRA_FRAME && !xd->lossless) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
|
|
|
|
distortion2) <
|
2016-02-08 18:30:50 +01:00
|
|
|
RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
|
2013-06-28 02:41:54 +02:00
|
|
|
// Add in the cost of the no skip flag.
|
2016-02-08 18:30:50 +01:00
|
|
|
rate2 += skip_cost0;
|
2013-06-28 02:41:54 +02:00
|
|
|
} else {
|
2013-07-08 23:49:33 +02:00
|
|
|
// FIXME(rbultje) make this work for splitmv also
|
2016-06-22 21:20:06 +02:00
|
|
|
assert(total_sse >= 0);
|
|
|
|
|
2016-02-08 18:30:50 +01:00
|
|
|
rate2 += skip_cost1;
|
2013-06-28 02:41:54 +02:00
|
|
|
distortion2 = total_sse;
|
|
|
|
rate2 -= (rate_y + rate_uv);
|
|
|
|
this_skip2 = 1;
|
|
|
|
}
|
2014-05-31 01:02:46 +02:00
|
|
|
} else {
|
2012-08-20 23:43:34 +02:00
|
|
|
// Add in the cost of the no skip flag.
|
2016-02-08 18:30:50 +01:00
|
|
|
rate2 += skip_cost0;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate the final RD estimate for this mode.
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
|
|
|
|
}
|
|
|
|
|
2015-03-06 18:21:36 +01:00
|
|
|
// Apply an adjustment to the rd value based on the similarity of the
|
|
|
|
// source variance and reconstructed variance.
|
2016-07-27 05:43:23 +02:00
|
|
|
rd_variance_adjustment(cpi, x, bsize, &this_rd, ref_frame,
|
|
|
|
x->source_variance);
|
2015-03-06 18:21:36 +01:00
|
|
|
|
2014-02-28 02:01:48 +01:00
|
|
|
if (ref_frame == INTRA_FRAME) {
|
2016-07-27 05:43:23 +02:00
|
|
|
// Keep record of best intra rd
|
2014-02-28 02:01:48 +01:00
|
|
|
if (this_rd < best_intra_rd) {
|
|
|
|
best_intra_rd = this_rd;
|
2016-01-20 01:40:20 +01:00
|
|
|
best_intra_mode = mi->mode;
|
2014-02-28 02:01:48 +01:00
|
|
|
}
|
2013-07-03 23:47:54 +02:00
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-07-24 18:59:36 +02:00
|
|
|
if (!disable_skip && ref_frame == INTRA_FRAME) {
|
2013-11-23 01:35:37 +01:00
|
|
|
for (i = 0; i < REFERENCE_MODES; ++i)
|
2015-08-18 03:19:22 +02:00
|
|
|
best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
|
2015-08-18 03:19:22 +02:00
|
|
|
best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
|
2012-11-30 20:46:20 +01:00
|
|
|
}
|
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
// Did this mode help.. i.e. is it the new best mode
|
|
|
|
if (this_rd < best_rd || x->skip) {
|
2013-11-07 23:56:58 +01:00
|
|
|
int max_plane = MAX_MB_PLANE;
|
2012-08-20 23:43:34 +02:00
|
|
|
if (!mode_excluded) {
|
|
|
|
// Note index of best mode so far
|
|
|
|
best_mode_index = mode_index;
|
|
|
|
|
2013-05-30 20:27:40 +02:00
|
|
|
if (ref_frame == INTRA_FRAME) {
|
2012-08-20 23:43:34 +02:00
|
|
|
/* required for left and above block mv */
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mv[0].as_int = 0;
|
2013-11-07 23:56:58 +01:00
|
|
|
max_plane = 1;
|
2016-07-21 20:47:51 +02:00
|
|
|
// Initialize interp_filter here so we do not have to check for
|
|
|
|
// inter block modes in get_pred_context_switchable_interp()
|
|
|
|
mi->interp_filter = SWITCHABLE_FILTERS;
|
2014-08-29 07:58:30 +02:00
|
|
|
} else {
|
2014-09-10 00:32:40 +02:00
|
|
|
best_pred_sse = x->pred_sse[ref_frame];
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = rate2;
|
|
|
|
rd_cost->dist = distortion2;
|
|
|
|
rd_cost->rdcost = this_rd;
|
2012-08-20 23:43:34 +02:00
|
|
|
best_rd = this_rd;
|
2016-01-20 01:40:20 +01:00
|
|
|
best_mbmode = *mi;
|
2013-06-28 02:41:54 +02:00
|
|
|
best_skip2 = this_skip2;
|
Early termination in encoding partition search
In the partition search, the encoder checks all possible
partitionings in the superblock's partition search tree.
This patch proposed a set of criteria for partition search
early termination, which effectively decided whether or
not to terminate the search in current branch based on the
"skippable" result of the quantized transform coefficients.
The "skippable" information was gathered during the
partition mode search, and no overhead calculations were
introduced.
This patch gives significant encoding speed gains without
sacrificing the quality.
Borg test results:
1. At speed 1,
stdhd set: psnr: +0.074%, ssim: +0.093%;
derf set: psnr: -0.024%, ssim: +0.011%;
2. At speed 2,
stdhd set: psnr: +0.033%, ssim: +0.100%;
derf set: psnr: -0.062%, ssim: +0.003%;
3. At speed 3,
stdhd set: psnr: +0.060%, ssim: +0.190%;
derf set: psnr: -0.064%, ssim: -0.002%;
4. At speed 4,
stdhd set: psnr: +0.070%, ssim: +0.143%;
derf set: psnr: -0.104%, ssim: +0.039%;
The speedup ranges from several percent to 60+%.
speed1 speed2 speed3 speed4
(1080p, 100f):
old_town_cross: 48.2% 23.9% 20.8% 16.5%
park_joy: 11.4% 17.8% 29.4% 18.2%
pedestrian_area: 10.7% 4.0% 4.2% 2.4%
(720p, 200f):
mobcal: 68.1% 36.3% 34.4% 17.7%
parkrun: 15.8% 24.2% 37.1% 16.8%
shields: 45.1% 32.8% 30.1% 9.6%
(cif, 300f)
bus: 3.7% 10.4% 14.0% 7.9%
deadline: 13.6% 14.8% 12.6% 10.9%
mobile: 5.3% 11.5% 14.7% 10.7%
Change-Id: I246c38fb952ad762ce5e365711235b605f470a66
2014-08-15 02:25:21 +02:00
|
|
|
best_mode_skippable = skippable;
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
|
2016-01-20 01:40:20 +01:00
|
|
|
memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mi->tx_size],
|
2015-08-12 19:41:51 +02:00
|
|
|
sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
|
2017-03-16 23:45:07 +01:00
|
|
|
ctx->sum_y_eobs = x->sum_y_eobs[mi->tx_size];
|
2013-05-01 23:43:33 +02:00
|
|
|
|
2013-07-03 23:47:54 +02:00
|
|
|
// TODO(debargha): enhance this test with a better distortion prediction
|
|
|
|
// based on qp, activity mask and history
|
2014-02-20 18:43:32 +01:00
|
|
|
if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
|
2013-10-01 17:57:18 +02:00
|
|
|
(mode_index > MIN_EARLY_TERM_INDEX)) {
|
2014-09-24 15:36:34 +02:00
|
|
|
int qstep = xd->plane[0].dequant[1];
|
2013-07-31 18:33:58 +02:00
|
|
|
// TODO(debargha): Enhance this by specializing for each mode_index
|
|
|
|
int scale = 4;
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
|
|
|
qstep >>= (xd->bd - 8);
|
|
|
|
}
|
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2013-07-31 18:33:58 +02:00
|
|
|
if (x->source_variance < UINT_MAX) {
|
|
|
|
const int var_adjust = (x->source_variance < 16);
|
|
|
|
scale -= var_adjust;
|
|
|
|
}
|
2016-07-27 05:43:23 +02:00
|
|
|
if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
|
2013-07-03 23:47:54 +02:00
|
|
|
early_term = 1;
|
2013-07-31 18:33:58 +02:00
|
|
|
}
|
|
|
|
}
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* keep record of best compound/single-only prediction */
|
2013-07-24 18:59:36 +02:00
|
|
|
if (!disable_skip && ref_frame != INTRA_FRAME) {
|
2013-11-25 21:45:51 +01:00
|
|
|
int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
|
2012-08-20 23:43:34 +02:00
|
|
|
|
2013-12-10 00:13:34 +01:00
|
|
|
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
|
2012-08-20 23:43:34 +02:00
|
|
|
single_rate = rate2 - compmode_cost;
|
|
|
|
hybrid_rate = rate2;
|
|
|
|
} else {
|
|
|
|
single_rate = rate2;
|
|
|
|
hybrid_rate = rate2 + compmode_cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
|
|
|
|
hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
|
|
|
|
|
2014-02-28 02:01:48 +01:00
|
|
|
if (!comp_pred) {
|
2014-09-12 02:15:15 +02:00
|
|
|
if (single_rd < best_pred_rd[SINGLE_REFERENCE])
|
2014-02-28 02:01:48 +01:00
|
|
|
best_pred_rd[SINGLE_REFERENCE] = single_rd;
|
|
|
|
} else {
|
2014-09-12 02:15:15 +02:00
|
|
|
if (single_rd < best_pred_rd[COMPOUND_REFERENCE])
|
2014-02-28 02:01:48 +01:00
|
|
|
best_pred_rd[COMPOUND_REFERENCE] = single_rd;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
2013-11-23 01:35:37 +01:00
|
|
|
if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
|
|
|
|
best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
|
2012-11-08 20:03:00 +01:00
|
|
|
|
2014-02-28 02:01:48 +01:00
|
|
|
/* keep record of best filter type */
|
|
|
|
if (!mode_excluded && cm->interp_filter != BILINEAR) {
|
2016-07-27 05:43:23 +02:00
|
|
|
int64_t ref =
|
|
|
|
filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
|
|
|
|
: cm->interp_filter];
|
2013-12-17 02:52:59 +01:00
|
|
|
|
2014-02-28 02:01:48 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
|
|
|
|
int64_t adj_rd;
|
|
|
|
if (ref == INT64_MAX)
|
|
|
|
adj_rd = 0;
|
2014-11-20 21:42:36 +01:00
|
|
|
else if (filter_cache[i] == INT64_MAX)
|
2014-02-28 02:01:48 +01:00
|
|
|
// when early termination is triggered, the encoder does not have
|
|
|
|
// access to the rate-distortion cost. it only knows that the cost
|
|
|
|
// should be above the maximum valid value. hence it takes the known
|
|
|
|
// maximum plus an arbitrary constant as the rate-distortion cost.
|
2014-11-20 18:41:49 +01:00
|
|
|
adj_rd = mask_filter - ref + 10;
|
2014-02-28 02:01:48 +01:00
|
|
|
else
|
2014-11-20 21:42:36 +01:00
|
|
|
adj_rd = filter_cache[i] - ref;
|
2013-12-17 02:52:59 +01:00
|
|
|
|
2014-02-28 02:01:48 +01:00
|
|
|
adj_rd += this_rd;
|
2015-08-18 03:19:22 +02:00
|
|
|
best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
|
2014-02-28 02:01:48 +01:00
|
|
|
}
|
2013-07-08 23:49:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (early_term) break;
|
2013-07-03 23:47:54 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (x->skip && !comp_pred) break;
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
Add encoding option --static-thresh
This option exists in VP8, and it was rewritten in VP9 to support
skipping on different partition levels. After prediction is done,
we can check if the residuals in the partition block will be all
quantized to 0. If this is true, the skip flag is set, and only
prediction data are needed in reconstruction. Based on DCT's energy
conservation property, the skipping check can be estimated in
spatial domain.
The prediction error is calculated and compared to a threshold.
The threshold is determined by the dequant values, and also
adjusted by partition sizes. To be precise, the DC and AC parts
for Y, U, and V planes are checked to decide skipping or not.
Test showed that
1. derf set:
when static-thresh = 1, psnr loss is 0.666%;
when static-thresh = 500, psnr loss is 1.162%;
2. stdhd set:
when static-thresh = 1, psnr loss is 1.249%;
when static-thresh = 500, psnr loss is 1.668%;
For different clips, encoding speedup range is between several
percentage and 20+% when static-thresh <= 500. For example,
clip bitrate static-thresh psnr time
akiyo(cif) 500 0 48.923 5.635s(50f)
akiyo 500 500 48.863 4.402s(50f)
parkjoy(1080p) 4000 0 30.380 77.54s(30f)
parkjoy 4000 500 30.384 69.59s(30f)
sunflower(1080p) 4000 0 44.461 85.2s(30f)
sunflower 4000 500 44.418 78.1s(30f)
Higher static-thresh values give larger speedup with larger
quality loss.
Change-Id: I857031ceb466ff314ab580ac5ec5d18542203c53
2013-07-11 20:15:00 +02:00
|
|
|
|
2014-09-05 00:16:12 +02:00
|
|
|
// The inter modes' rate costs are not calculated precisely in some cases.
|
|
|
|
// Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and
|
|
|
|
// ZEROMV. Here, checks are added for those cases, and the mode decisions
|
|
|
|
// are corrected.
|
|
|
|
if (best_mbmode.mode == NEWMV) {
|
2016-07-27 05:43:23 +02:00
|
|
|
const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0],
|
|
|
|
best_mbmode.ref_frame[1] };
|
2014-09-05 00:16:12 +02:00
|
|
|
int comp_pred_mode = refs[1] > INTRA_FRAME;
|
|
|
|
|
|
|
|
if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
|
2016-07-27 05:43:23 +02:00
|
|
|
((comp_pred_mode &&
|
|
|
|
frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
|
|
|
|
!comp_pred_mode))
|
2014-09-05 00:16:12 +02:00
|
|
|
best_mbmode.mode = NEARESTMV;
|
|
|
|
else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int &&
|
2016-07-27 05:43:23 +02:00
|
|
|
((comp_pred_mode &&
|
|
|
|
frame_mv[NEARMV][refs[1]].as_int == best_mbmode.mv[1].as_int) ||
|
|
|
|
!comp_pred_mode))
|
2014-09-05 00:16:12 +02:00
|
|
|
best_mbmode.mode = NEARMV;
|
|
|
|
else if (best_mbmode.mv[0].as_int == 0 &&
|
2016-07-27 05:43:23 +02:00
|
|
|
((comp_pred_mode && best_mbmode.mv[1].as_int == 0) ||
|
|
|
|
!comp_pred_mode))
|
2014-09-05 00:16:12 +02:00
|
|
|
best_mbmode.mode = ZEROMV;
|
|
|
|
}
|
|
|
|
|
2014-10-14 00:56:37 +02:00
|
|
|
if (best_mode_index < 0 || best_rd >= best_rd_so_far) {
|
2017-02-10 11:55:50 +01:00
|
|
|
// If adaptive interp filter is enabled, then the current leaf node of 8x8
|
|
|
|
// data is needed for sub8x8. Hence preserve the context.
|
2017-02-24 20:40:22 +01:00
|
|
|
if (cpi->row_mt && bsize == BLOCK_8X8) ctx->mic = *xd->mi[0];
|
2014-10-17 18:31:42 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
2014-10-14 00:56:37 +02:00
|
|
|
rd_cost->rdcost = INT64_MAX;
|
|
|
|
return;
|
|
|
|
}
|
2013-07-01 17:27:12 +02:00
|
|
|
|
2013-07-16 19:12:34 +02:00
|
|
|
// If we used an estimate for the uv intra rd in the loop above...
|
2014-10-14 01:13:59 +02:00
|
|
|
if (sf->use_uv_intra_rd_estimate) {
|
2013-07-16 19:12:34 +02:00
|
|
|
// Do Intra UV best rd mode selection if best mode choice above was intra.
|
2014-09-09 20:58:10 +02:00
|
|
|
if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
|
2013-12-03 00:24:41 +01:00
|
|
|
TX_SIZE uv_tx_size;
|
2016-01-20 01:40:20 +01:00
|
|
|
*mi = best_mbmode;
|
|
|
|
uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]);
|
2013-11-07 23:56:58 +01:00
|
|
|
rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size],
|
2013-07-16 19:12:34 +02:00
|
|
|
&rate_uv_tokenonly[uv_tx_size],
|
2016-07-27 05:43:23 +02:00
|
|
|
&dist_uv[uv_tx_size], &skip_uv[uv_tx_size],
|
2013-12-09 20:03:02 +01:00
|
|
|
bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize,
|
|
|
|
uv_tx_size);
|
2013-07-16 19:12:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-24 21:26:57 +01:00
|
|
|
assert((cm->interp_filter == SWITCHABLE) ||
|
|
|
|
(cm->interp_filter == best_mbmode.interp_filter) ||
|
2013-11-20 05:25:55 +01:00
|
|
|
!is_inter_block(&best_mbmode));
|
2012-12-20 23:56:19 +01:00
|
|
|
|
Adaptive mode search scheduling
This commit enables an adaptive mode search order scheduling scheme
in the rate-distortion optimization. It changes the compression
performance by -0.433% and -0.420% for derf and stdhd respectively.
It provides speed improvement for speed 3:
bus CIF 1000 kbps
24590 b/f, 35.513 dB, 7864 ms ->
24696 b/f, 35.491 dB, 7408 ms (6% speed-up)
stockholm 720p 1000 kbps
8983 b/f, 35.078 dB, 65698 ms ->
8962 b/f, 35.054 dB, 60298 ms (8%)
old_town_cross 720p 1000 kbps
11804 b/f, 35.666 dB, 62492 ms ->
11778 b/f, 35.609 dB, 56040 ms (10%)
blue_sky 1080p 1500 kbps
57173 b/f, 36.179 dB, 77879 ms ->
57199 b/f, 36.131 dB, 69821 ms (10%)
pedestrian_area 1080p 2000 kbps
74241 b/f, 41.105 dB, 144031 ms ->
74271 b/f, 41.091 dB, 133614 ms (8%)
Change-Id: Iaad28cbc99399030fc5f9951eb5aa7fa633f320e
2014-09-18 22:37:20 +02:00
|
|
|
if (!cpi->rc.is_src_frame_alt_ref)
|
2014-10-30 01:37:54 +01:00
|
|
|
vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
|
2017-04-24 21:06:49 +02:00
|
|
|
sf->adaptive_rd_thresh, bsize, best_mode_index);
|
2013-06-03 13:51:29 +02:00
|
|
|
|
2012-08-20 23:43:34 +02:00
|
|
|
// macroblock modes
|
2016-01-20 01:40:20 +01:00
|
|
|
*mi = best_mbmode;
|
2013-06-28 02:41:54 +02:00
|
|
|
x->skip |= best_skip2;
|
2012-11-08 20:03:00 +01:00
|
|
|
|
2013-11-23 01:35:37 +01:00
|
|
|
for (i = 0; i < REFERENCE_MODES; ++i) {
|
2013-09-28 01:02:49 +02:00
|
|
|
if (best_pred_rd[i] == INT64_MAX)
|
|
|
|
best_pred_diff[i] = INT_MIN;
|
|
|
|
else
|
|
|
|
best_pred_diff[i] = best_rd - best_pred_rd[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!x->skip) {
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
|
2013-09-28 01:02:49 +02:00
|
|
|
if (best_filter_rd[i] == INT64_MAX)
|
|
|
|
best_filter_diff[i] = 0;
|
|
|
|
else
|
|
|
|
best_filter_diff[i] = best_rd - best_filter_rd[i];
|
|
|
|
}
|
2014-01-24 21:26:57 +01:00
|
|
|
if (cm->interp_filter == SWITCHABLE)
|
2013-09-28 01:02:49 +02:00
|
|
|
assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
|
|
|
|
} else {
|
2014-02-28 02:01:48 +01:00
|
|
|
vp9_zero(best_filter_diff);
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
2014-09-16 01:50:19 +02:00
|
|
|
// TODO(yunqingwang): Moving this line in front of the above best_filter_diff
|
|
|
|
// updating code causes PSNR loss. Need to figure out the confliction.
|
|
|
|
x->skip |= best_mode_skippable;
|
|
|
|
|
2014-10-12 07:34:02 +02:00
|
|
|
if (!x->skip && !x->select_tx_size) {
|
2014-10-09 00:02:37 +02:00
|
|
|
int has_high_freq_coeff = 0;
|
|
|
|
int plane;
|
2016-07-27 05:43:23 +02:00
|
|
|
int max_plane = is_inter_block(xd->mi[0]) ? MAX_MB_PLANE : 1;
|
2014-10-09 00:02:37 +02:00
|
|
|
for (plane = 0; plane < max_plane; ++plane) {
|
|
|
|
x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
|
|
|
|
has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
|
|
|
|
x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
|
|
|
|
has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane);
|
|
|
|
}
|
|
|
|
|
|
|
|
best_mode_skippable |= !has_high_freq_coeff;
|
|
|
|
}
|
|
|
|
|
2014-11-15 00:29:18 +01:00
|
|
|
assert(best_mode_index >= 0);
|
|
|
|
|
Early termination in encoding partition search
In the partition search, the encoder checks all possible
partitionings in the superblock's partition search tree.
This patch proposed a set of criteria for partition search
early termination, which effectively decided whether or
not to terminate the search in current branch based on the
"skippable" result of the quantized transform coefficients.
The "skippable" information was gathered during the
partition mode search, and no overhead calculations were
introduced.
This patch gives significant encoding speed gains without
sacrificing the quality.
Borg test results:
1. At speed 1,
stdhd set: psnr: +0.074%, ssim: +0.093%;
derf set: psnr: -0.024%, ssim: +0.011%;
2. At speed 2,
stdhd set: psnr: +0.033%, ssim: +0.100%;
derf set: psnr: -0.062%, ssim: +0.003%;
3. At speed 3,
stdhd set: psnr: +0.060%, ssim: +0.190%;
derf set: psnr: -0.064%, ssim: -0.002%;
4. At speed 4,
stdhd set: psnr: +0.070%, ssim: +0.143%;
derf set: psnr: -0.104%, ssim: +0.039%;
The speedup ranges from several percent to 60+%.
speed1 speed2 speed3 speed4
(1080p, 100f):
old_town_cross: 48.2% 23.9% 20.8% 16.5%
park_joy: 11.4% 17.8% 29.4% 18.2%
pedestrian_area: 10.7% 4.0% 4.2% 2.4%
(720p, 200f):
mobcal: 68.1% 36.3% 34.4% 17.7%
parkrun: 15.8% 24.2% 37.1% 16.8%
shields: 45.1% 32.8% 30.1% 9.6%
(cif, 300f)
bus: 3.7% 10.4% 14.0% 7.9%
deadline: 13.6% 14.8% 12.6% 10.9%
mobile: 5.3% 11.5% 14.7% 10.7%
Change-Id: I246c38fb952ad762ce5e365711235b605f470a66
2014-08-15 02:25:21 +02:00
|
|
|
store_coding_context(x, ctx, best_mode_index, best_pred_diff,
|
2015-07-29 22:37:41 +02:00
|
|
|
best_filter_diff, best_mode_skippable);
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, TileDataEnc *tile_data,
|
|
|
|
MACROBLOCK *x, RD_COST *rd_cost,
|
2014-10-14 00:56:37 +02:00
|
|
|
BLOCK_SIZE bsize,
|
|
|
|
PICK_MODE_CONTEXT *ctx,
|
|
|
|
int64_t best_rd_so_far) {
|
2014-05-31 01:02:46 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
|
|
|
unsigned char segment_id = mi->segment_id;
|
2014-05-31 01:02:46 +02:00
|
|
|
const int comp_pred = 0;
|
|
|
|
int i;
|
|
|
|
int64_t best_pred_diff[REFERENCE_MODES];
|
|
|
|
int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
|
|
|
|
unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
|
2015-07-20 22:49:15 +02:00
|
|
|
vpx_prob comp_mode_p;
|
2014-05-31 01:02:46 +02:00
|
|
|
INTERP_FILTER best_filter = SWITCHABLE;
|
|
|
|
int64_t this_rd = INT64_MAX;
|
|
|
|
int rate2 = 0;
|
|
|
|
const int64_t distortion2 = 0;
|
|
|
|
|
|
|
|
x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
|
|
|
|
|
|
|
|
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
|
|
|
|
&comp_mode_p);
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX;
|
|
|
|
for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX;
|
2014-05-31 01:02:46 +02:00
|
|
|
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
2014-05-31 01:02:46 +02:00
|
|
|
|
2015-06-11 13:20:55 +02:00
|
|
|
assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP));
|
2014-05-31 01:02:46 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mode = ZEROMV;
|
|
|
|
mi->uv_mode = DC_PRED;
|
|
|
|
mi->ref_frame[0] = LAST_FRAME;
|
|
|
|
mi->ref_frame[1] = NONE;
|
|
|
|
mi->mv[0].as_int = 0;
|
2014-05-31 01:02:46 +02:00
|
|
|
x->skip = 1;
|
|
|
|
|
2017-03-16 23:45:07 +01:00
|
|
|
ctx->sum_y_eobs = 0;
|
|
|
|
|
2014-05-31 01:02:46 +02:00
|
|
|
if (cm->interp_filter != BILINEAR) {
|
|
|
|
best_filter = EIGHTTAP;
|
|
|
|
if (cm->interp_filter == SWITCHABLE &&
|
|
|
|
x->source_variance >= cpi->sf.disable_filter_search_var_thresh) {
|
|
|
|
int rs;
|
|
|
|
int best_rs = INT_MAX;
|
|
|
|
for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->interp_filter = i;
|
2014-11-21 20:11:06 +01:00
|
|
|
rs = vp9_get_switchable_rate(cpi, xd);
|
2014-05-31 01:02:46 +02:00
|
|
|
if (rs < best_rs) {
|
|
|
|
best_rs = rs;
|
2016-01-20 01:40:20 +01:00
|
|
|
best_filter = mi->interp_filter;
|
2014-05-31 01:02:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Set the appropriate filter
|
|
|
|
if (cm->interp_filter == SWITCHABLE) {
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->interp_filter = best_filter;
|
2014-11-21 20:11:06 +01:00
|
|
|
rate2 += vp9_get_switchable_rate(cpi, xd);
|
2014-05-31 01:02:46 +02:00
|
|
|
} else {
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->interp_filter = cm->interp_filter;
|
2014-05-31 01:02:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cm->reference_mode == REFERENCE_MODE_SELECT)
|
|
|
|
rate2 += vp9_cost_bit(comp_mode_p, comp_pred);
|
|
|
|
|
|
|
|
// Estimate the reference frame signaling cost and add it
|
|
|
|
// to the rolling cost variable.
|
|
|
|
rate2 += ref_costs_single[LAST_FRAME];
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
|
|
|
|
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = rate2;
|
|
|
|
rd_cost->dist = distortion2;
|
|
|
|
rd_cost->rdcost = this_rd;
|
2014-05-31 01:02:46 +02:00
|
|
|
|
2014-10-09 21:32:56 +02:00
|
|
|
if (this_rd >= best_rd_so_far) {
|
2014-10-17 18:31:42 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rdcost = INT64_MAX;
|
2014-10-14 00:56:37 +02:00
|
|
|
return;
|
2014-10-09 21:32:56 +02:00
|
|
|
}
|
2014-05-31 01:02:46 +02:00
|
|
|
|
|
|
|
assert((cm->interp_filter == SWITCHABLE) ||
|
2016-01-20 01:40:20 +01:00
|
|
|
(cm->interp_filter == mi->interp_filter));
|
2014-05-31 01:02:46 +02:00
|
|
|
|
2014-10-30 01:37:54 +01:00
|
|
|
vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
|
2017-04-24 21:06:49 +02:00
|
|
|
cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
|
2014-05-31 01:02:46 +02:00
|
|
|
|
|
|
|
vp9_zero(best_pred_diff);
|
|
|
|
vp9_zero(best_filter_diff);
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
|
|
|
|
store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
|
2014-05-31 01:02:46 +02:00
|
|
|
}
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data,
|
|
|
|
MACROBLOCK *x, int mi_row, int mi_col,
|
|
|
|
RD_COST *rd_cost, BLOCK_SIZE bsize,
|
2014-10-14 00:56:37 +02:00
|
|
|
PICK_MODE_CONTEXT *ctx,
|
|
|
|
int64_t best_rd_so_far) {
|
2014-04-09 20:15:59 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2014-04-10 00:00:14 +02:00
|
|
|
RD_OPT *const rd_opt = &cpi->rd;
|
2014-10-14 01:13:59 +02:00
|
|
|
SPEED_FEATURES *const sf = &cpi->sf;
|
2014-04-09 20:15:59 +02:00
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO *const mi = xd->mi[0];
|
2014-04-09 20:15:59 +02:00
|
|
|
const struct segmentation *const seg = &cm->seg;
|
2013-09-28 01:02:49 +02:00
|
|
|
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
|
2016-01-20 01:40:20 +01:00
|
|
|
unsigned char segment_id = mi->segment_id;
|
2013-09-28 01:02:49 +02:00
|
|
|
int comp_pred, i;
|
|
|
|
int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES];
|
|
|
|
struct buf_2d yv12_mb[4][MAX_MB_PLANE];
|
|
|
|
static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
|
|
|
|
VP9_ALT_FLAG };
|
|
|
|
int64_t best_rd = best_rd_so_far;
|
|
|
|
int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise
|
2013-11-23 01:35:37 +01:00
|
|
|
int64_t best_pred_diff[REFERENCE_MODES];
|
|
|
|
int64_t best_pred_rd[REFERENCE_MODES];
|
2013-10-30 22:40:34 +01:00
|
|
|
int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS];
|
|
|
|
int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO best_mbmode;
|
2014-04-17 17:40:47 +02:00
|
|
|
int ref_index, best_ref_index = 0;
|
2013-09-28 01:02:49 +02:00
|
|
|
unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES];
|
2015-07-20 22:49:15 +02:00
|
|
|
vpx_prob comp_mode_p;
|
2014-01-24 21:26:57 +01:00
|
|
|
INTERP_FILTER tmp_best_filter = SWITCHABLE;
|
2014-04-10 02:58:12 +02:00
|
|
|
int rate_uv_intra, rate_uv_tokenonly;
|
|
|
|
int64_t dist_uv;
|
|
|
|
int skip_uv;
|
2014-04-12 00:26:24 +02:00
|
|
|
PREDICTION_MODE mode_uv = DC_PRED;
|
2014-09-24 15:36:34 +02:00
|
|
|
const int intra_cost_penalty = vp9_get_intra_cost_penalty(
|
2016-07-27 05:43:23 +02:00
|
|
|
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
|
2013-09-28 01:02:49 +02:00
|
|
|
int_mv seg_mvs[4][MAX_REF_FRAMES];
|
2013-10-03 02:05:31 +02:00
|
|
|
b_mode_info best_bmodes[4];
|
2013-09-28 01:02:49 +02:00
|
|
|
int best_skip2 = 0;
|
2014-09-12 20:00:15 +02:00
|
|
|
int ref_frame_skip_mask[2] = { 0 };
|
2014-11-20 18:41:49 +01:00
|
|
|
int64_t mask_filter = 0;
|
2014-11-20 21:42:36 +01:00
|
|
|
int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
|
2015-06-24 12:36:51 +02:00
|
|
|
int internal_active_edge =
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi);
|
2017-02-10 11:55:50 +01:00
|
|
|
const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2014-10-14 01:13:59 +02:00
|
|
|
x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
|
2015-04-24 05:47:40 +02:00
|
|
|
memset(x->zcoeff_blk[TX_4X4], 0, 4);
|
2014-05-13 20:18:25 +02:00
|
|
|
vp9_zero(best_mbmode);
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
|
2014-11-20 21:42:36 +01:00
|
|
|
|
2013-09-28 01:02:49 +02:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
int j;
|
2016-07-27 05:43:23 +02:00
|
|
|
for (j = 0; j < MAX_REF_FRAMES; j++) seg_mvs[i][j].as_int = INVALID_MV;
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
2014-04-19 01:56:43 +02:00
|
|
|
estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp,
|
2013-09-28 01:02:49 +02:00
|
|
|
&comp_mode_p);
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX;
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
|
2013-09-28 01:02:49 +02:00
|
|
|
best_filter_rd[i] = INT64_MAX;
|
2014-04-10 02:58:12 +02:00
|
|
|
rate_uv_intra = INT_MAX;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
|
|
|
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) {
|
|
|
|
if (cpi->ref_frame_flags & flag_list[ref_frame]) {
|
2015-06-16 15:38:34 +02:00
|
|
|
setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col,
|
2016-07-27 05:43:23 +02:00
|
|
|
frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb);
|
2014-09-12 20:00:15 +02:00
|
|
|
} else {
|
|
|
|
ref_frame_skip_mask[0] |= (1 << ref_frame);
|
|
|
|
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
frame_mv[NEWMV][ref_frame].as_int = INVALID_MV;
|
|
|
|
frame_mv[ZEROMV][ref_frame].as_int = 0;
|
|
|
|
}
|
|
|
|
|
2014-04-17 17:40:47 +02:00
|
|
|
for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) {
|
2013-09-28 01:02:49 +02:00
|
|
|
int mode_excluded = 0;
|
|
|
|
int64_t this_rd = INT64_MAX;
|
|
|
|
int disable_skip = 0;
|
|
|
|
int compmode_cost = 0;
|
|
|
|
int rate2 = 0, rate_y = 0, rate_uv = 0;
|
|
|
|
int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0;
|
|
|
|
int skippable = 0;
|
|
|
|
int i;
|
|
|
|
int this_skip2 = 0;
|
|
|
|
int64_t total_sse = INT_MAX;
|
|
|
|
int early_term = 0;
|
2015-09-08 18:53:23 +02:00
|
|
|
struct buf_2d backup_yv12[2][MAX_MB_PLANE];
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2014-04-17 17:40:47 +02:00
|
|
|
ref_frame = vp9_ref_order[ref_index].ref_frame[0];
|
|
|
|
second_ref_frame = vp9_ref_order[ref_index].ref_frame[1];
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2017-03-16 23:45:07 +01:00
|
|
|
vp9_zero(x->sum_y_eobs);
|
|
|
|
|
2016-01-07 18:43:26 +01:00
|
|
|
#if CONFIG_BETTER_HW_COMPATIBILITY
|
|
|
|
// forbid 8X4 and 4X8 partitions if any reference frame is scaled.
|
|
|
|
if (bsize == BLOCK_8X4 || bsize == BLOCK_4X8) {
|
|
|
|
int ref_scaled = vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf);
|
|
|
|
if (second_ref_frame > INTRA_FRAME)
|
|
|
|
ref_scaled += vp9_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf);
|
2016-07-27 05:43:23 +02:00
|
|
|
if (ref_scaled) continue;
|
2016-01-07 18:43:26 +01:00
|
|
|
}
|
|
|
|
#endif
|
2013-10-04 17:51:22 +02:00
|
|
|
// Look at the reference frame of the best mode so far and set the
|
|
|
|
// skip mask to look at a subset of the remaining modes.
|
2014-10-14 01:13:59 +02:00
|
|
|
if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) {
|
2014-04-17 17:40:47 +02:00
|
|
|
if (ref_index == 3) {
|
2014-09-15 18:59:20 +02:00
|
|
|
switch (best_mbmode.ref_frame[0]) {
|
2016-07-27 05:43:23 +02:00
|
|
|
case INTRA_FRAME: break;
|
2013-10-04 17:51:22 +02:00
|
|
|
case LAST_FRAME:
|
2014-09-12 20:00:15 +02:00
|
|
|
ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME);
|
|
|
|
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
|
2013-10-04 17:51:22 +02:00
|
|
|
break;
|
|
|
|
case GOLDEN_FRAME:
|
2014-09-12 20:00:15 +02:00
|
|
|
ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << ALTREF_FRAME);
|
|
|
|
ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK;
|
2013-10-04 17:51:22 +02:00
|
|
|
break;
|
|
|
|
case ALTREF_FRAME:
|
2014-09-12 20:00:15 +02:00
|
|
|
ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME);
|
2013-10-04 17:51:22 +02:00
|
|
|
break;
|
|
|
|
case NONE:
|
2016-07-27 05:43:23 +02:00
|
|
|
case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break;
|
2013-10-04 17:51:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-23 20:47:15 +01:00
|
|
|
if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
|
2015-08-18 03:19:22 +02:00
|
|
|
(ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame))))
|
2014-09-12 20:00:15 +02:00
|
|
|
continue;
|
|
|
|
|
2013-09-28 01:02:49 +02:00
|
|
|
// Test best rd so far against threshold for trying this mode.
|
2015-06-24 12:36:51 +02:00
|
|
|
if (!internal_active_edge &&
|
|
|
|
rd_less_than_thresh(best_rd,
|
2014-04-17 17:40:47 +02:00
|
|
|
rd_opt->threshes[segment_id][bsize][ref_index],
|
2017-02-10 11:55:50 +01:00
|
|
|
&rd_thresh_freq_fact[ref_index]))
|
2013-09-28 01:02:49 +02:00
|
|
|
continue;
|
|
|
|
|
2017-03-31 19:39:57 +02:00
|
|
|
// This is only used in motion vector unit test.
|
|
|
|
if (cpi->oxcf.motion_vector_unit_test && ref_frame == INTRA_FRAME) continue;
|
|
|
|
|
2013-09-28 01:02:49 +02:00
|
|
|
comp_pred = second_ref_frame > INTRA_FRAME;
|
|
|
|
if (comp_pred) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!cpi->allow_comp_inter_inter) continue;
|
|
|
|
if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue;
|
2014-04-09 20:15:59 +02:00
|
|
|
// Do not allow compound prediction if the segment level reference frame
|
|
|
|
// feature is in use as in this case there can only be one reference.
|
2016-07-27 05:43:23 +02:00
|
|
|
if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue;
|
2014-09-15 19:12:05 +02:00
|
|
|
|
2014-10-14 01:13:59 +02:00
|
|
|
if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) &&
|
2014-09-15 18:59:20 +02:00
|
|
|
best_mbmode.ref_frame[0] == INTRA_FRAME)
|
2014-04-09 20:15:59 +02:00
|
|
|
continue;
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
2014-07-24 23:43:06 +02:00
|
|
|
if (comp_pred)
|
|
|
|
mode_excluded = cm->reference_mode == SINGLE_REFERENCE;
|
|
|
|
else if (ref_frame != INTRA_FRAME)
|
|
|
|
mode_excluded = cm->reference_mode == COMPOUND_REFERENCE;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
|
|
|
// If the segment reference frame feature is enabled....
|
|
|
|
// then do nothing if the current ref frame is not allowed..
|
2015-06-11 13:20:55 +02:00
|
|
|
if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
|
2015-06-11 18:52:00 +02:00
|
|
|
get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) {
|
2013-09-28 01:02:49 +02:00
|
|
|
continue;
|
2016-07-27 05:43:23 +02:00
|
|
|
// Disable this drop out case if the ref frame
|
|
|
|
// segment level feature is enabled for this segment. This is to
|
|
|
|
// prevent the possibility that we end up unable to pick any mode.
|
2015-06-11 13:20:55 +02:00
|
|
|
} else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) {
|
2013-09-28 01:02:49 +02:00
|
|
|
// Only consider ZEROMV/ALTREF_FRAME for alt ref frame,
|
|
|
|
// unless ARNR filtering is enabled in which case we want
|
|
|
|
// an unfiltered alternative. We allow near/nearest as well
|
|
|
|
// because they may result in zero-zero MVs but be cheaper.
|
2013-12-13 18:32:05 +01:00
|
|
|
if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0))
|
2013-09-28 01:02:49 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->tx_size = TX_4X4;
|
|
|
|
mi->uv_mode = DC_PRED;
|
|
|
|
mi->ref_frame[0] = ref_frame;
|
|
|
|
mi->ref_frame[1] = second_ref_frame;
|
2014-04-09 20:15:59 +02:00
|
|
|
// Evaluate all sub-pel filters irrespective of whether we can use
|
|
|
|
// them for this frame.
|
2016-07-27 05:43:23 +02:00
|
|
|
mi->interp_filter =
|
|
|
|
cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter;
|
2014-04-09 20:15:59 +02:00
|
|
|
x->skip = 0;
|
|
|
|
set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
|
|
|
|
|
|
|
|
// Select prediction reference frames.
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++) {
|
|
|
|
xd->plane[i].pre[0] = yv12_mb[ref_frame][i];
|
2016-07-27 05:43:23 +02:00
|
|
|
if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i];
|
2014-04-09 20:15:59 +02:00
|
|
|
}
|
|
|
|
|
2013-09-28 01:02:49 +02:00
|
|
|
if (ref_frame == INTRA_FRAME) {
|
|
|
|
int rate;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y,
|
|
|
|
best_rd) >= best_rd)
|
2013-09-28 01:02:49 +02:00
|
|
|
continue;
|
|
|
|
rate2 += rate;
|
|
|
|
rate2 += intra_cost_penalty;
|
|
|
|
distortion2 += distortion_y;
|
|
|
|
|
2014-04-10 02:58:12 +02:00
|
|
|
if (rate_uv_intra == INT_MAX) {
|
2016-07-27 05:43:23 +02:00
|
|
|
choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra,
|
|
|
|
&rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv);
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
2014-04-10 02:58:12 +02:00
|
|
|
rate2 += rate_uv_intra;
|
|
|
|
rate_uv = rate_uv_tokenonly;
|
|
|
|
distortion2 += dist_uv;
|
|
|
|
distortion_uv = dist_uv;
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->uv_mode = mode_uv;
|
2013-09-28 01:02:49 +02:00
|
|
|
} else {
|
|
|
|
int rate;
|
|
|
|
int64_t distortion;
|
|
|
|
int64_t this_rd_thresh;
|
|
|
|
int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX;
|
|
|
|
int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX;
|
|
|
|
int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse;
|
|
|
|
int tmp_best_skippable = 0;
|
|
|
|
int switchable_filter_index;
|
2016-07-27 05:43:23 +02:00
|
|
|
int_mv *second_ref =
|
|
|
|
comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL;
|
2013-10-03 02:05:31 +02:00
|
|
|
b_mode_info tmp_best_bmodes[16];
|
2016-01-20 01:40:20 +01:00
|
|
|
MODE_INFO tmp_best_mbmode;
|
2013-09-28 01:02:49 +02:00
|
|
|
BEST_SEG_INFO bsi[SWITCHABLE_FILTERS];
|
|
|
|
int pred_exists = 0;
|
|
|
|
int uv_skippable;
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
YV12_BUFFER_CONFIG *scaled_ref_frame[2] = { NULL, NULL };
|
2015-09-09 21:29:57 +02:00
|
|
|
int ref;
|
|
|
|
|
2015-09-08 18:53:23 +02:00
|
|
|
for (ref = 0; ref < 2; ++ref) {
|
2016-07-27 05:43:23 +02:00
|
|
|
scaled_ref_frame[ref] =
|
|
|
|
mi->ref_frame[ref] > INTRA_FRAME
|
|
|
|
? vp9_get_scaled_ref_frame(cpi, mi->ref_frame[ref])
|
|
|
|
: NULL;
|
2015-09-09 21:29:57 +02:00
|
|
|
|
2015-09-08 18:53:23 +02:00
|
|
|
if (scaled_ref_frame[ref]) {
|
|
|
|
int i;
|
|
|
|
// Swap out the reference frame for a version that's been scaled to
|
|
|
|
// match the resolution of the current frame, allowing the existing
|
|
|
|
// motion search code to be used without additional modifications.
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; i++)
|
|
|
|
backup_yv12[ref][i] = xd->plane[i].pre[ref];
|
|
|
|
vp9_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
this_rd_thresh = (ref_frame == LAST_FRAME)
|
|
|
|
? rd_opt->threshes[segment_id][bsize][THR_LAST]
|
|
|
|
: rd_opt->threshes[segment_id][bsize][THR_ALTR];
|
|
|
|
this_rd_thresh = (ref_frame == GOLDEN_FRAME)
|
|
|
|
? rd_opt->threshes[segment_id][bsize][THR_GOLD]
|
|
|
|
: this_rd_thresh;
|
2013-12-16 23:38:45 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
|
2014-11-20 21:42:36 +01:00
|
|
|
filter_cache[i] = INT64_MAX;
|
2013-12-16 23:38:45 +01:00
|
|
|
|
2014-01-24 21:26:57 +01:00
|
|
|
if (cm->interp_filter != BILINEAR) {
|
2013-09-28 01:02:49 +02:00
|
|
|
tmp_best_filter = EIGHTTAP;
|
2014-10-14 01:13:59 +02:00
|
|
|
if (x->source_variance < sf->disable_filter_search_var_thresh) {
|
2013-09-28 01:02:49 +02:00
|
|
|
tmp_best_filter = EIGHTTAP;
|
2014-10-14 01:13:59 +02:00
|
|
|
} else if (sf->adaptive_pred_interp_filter == 1 &&
|
2014-01-24 21:26:57 +01:00
|
|
|
ctx->pred_interp_filter < SWITCHABLE) {
|
|
|
|
tmp_best_filter = ctx->pred_interp_filter;
|
2014-10-14 01:13:59 +02:00
|
|
|
} else if (sf->adaptive_pred_interp_filter == 2) {
|
2016-07-27 05:43:23 +02:00
|
|
|
tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE
|
|
|
|
? ctx->pred_interp_filter
|
|
|
|
: 0;
|
2013-09-28 01:02:49 +02:00
|
|
|
} else {
|
|
|
|
for (switchable_filter_index = 0;
|
|
|
|
switchable_filter_index < SWITCHABLE_FILTERS;
|
|
|
|
++switchable_filter_index) {
|
|
|
|
int newbest, rs;
|
|
|
|
int64_t rs_rd;
|
2015-06-29 18:27:11 +02:00
|
|
|
MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext;
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->interp_filter = switchable_filter_index;
|
2016-07-27 05:43:23 +02:00
|
|
|
tmp_rd = rd_pick_best_sub8x8_mode(
|
|
|
|
cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
|
|
|
|
&rate, &rate_y, &distortion, &skippable, &total_sse,
|
|
|
|
(int)this_rd_thresh, seg_mvs, bsi, switchable_filter_index,
|
|
|
|
mi_row, mi_col);
|
|
|
|
|
|
|
|
if (tmp_rd == INT64_MAX) continue;
|
2014-11-21 20:11:06 +01:00
|
|
|
rs = vp9_get_switchable_rate(cpi, xd);
|
2013-09-28 01:02:49 +02:00
|
|
|
rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
|
2014-11-20 21:42:36 +01:00
|
|
|
filter_cache[switchable_filter_index] = tmp_rd;
|
|
|
|
filter_cache[SWITCHABLE_FILTERS] =
|
2015-08-18 03:19:22 +02:00
|
|
|
VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd);
|
2016-07-27 05:43:23 +02:00
|
|
|
if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2015-08-18 03:19:22 +02:00
|
|
|
mask_filter = VPXMAX(mask_filter, tmp_rd);
|
2013-12-16 23:38:45 +01:00
|
|
|
|
2013-09-28 01:02:49 +02:00
|
|
|
newbest = (tmp_rd < tmp_best_rd);
|
|
|
|
if (newbest) {
|
2016-01-20 01:40:20 +01:00
|
|
|
tmp_best_filter = mi->interp_filter;
|
2013-09-28 01:02:49 +02:00
|
|
|
tmp_best_rd = tmp_rd;
|
|
|
|
}
|
2014-01-24 21:26:57 +01:00
|
|
|
if ((newbest && cm->interp_filter == SWITCHABLE) ||
|
2016-01-20 01:40:20 +01:00
|
|
|
(mi->interp_filter == cm->interp_filter &&
|
2014-01-24 21:26:57 +01:00
|
|
|
cm->interp_filter != SWITCHABLE)) {
|
2013-09-28 01:02:49 +02:00
|
|
|
tmp_best_rdu = tmp_rd;
|
|
|
|
tmp_best_rate = rate;
|
|
|
|
tmp_best_ratey = rate_y;
|
|
|
|
tmp_best_distortion = distortion;
|
|
|
|
tmp_best_sse = total_sse;
|
|
|
|
tmp_best_skippable = skippable;
|
2016-01-20 01:40:20 +01:00
|
|
|
tmp_best_mbmode = *mi;
|
2013-10-30 21:52:55 +01:00
|
|
|
for (i = 0; i < 4; i++) {
|
2015-04-21 14:36:58 +02:00
|
|
|
tmp_best_bmodes[i] = xd->mi[0]->bmi[i];
|
2013-12-04 02:59:32 +01:00
|
|
|
x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i];
|
2017-03-16 23:45:07 +01:00
|
|
|
x->sum_y_eobs[TX_4X4] += x->plane[0].eobs[i];
|
2013-10-30 21:52:55 +01:00
|
|
|
}
|
2013-09-28 01:02:49 +02:00
|
|
|
pred_exists = 1;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (switchable_filter_index == 0 && sf->use_rd_breakout &&
|
2013-09-28 01:02:49 +02:00
|
|
|
best_rd < INT64_MAX) {
|
|
|
|
if (tmp_best_rdu / 2 > best_rd) {
|
|
|
|
// skip searching the other filters if the first is
|
|
|
|
// already substantially larger than the best so far
|
2016-01-20 01:40:20 +01:00
|
|
|
tmp_best_filter = mi->interp_filter;
|
2013-09-28 01:02:49 +02:00
|
|
|
tmp_best_rdu = INT64_MAX;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} // switchable_filter_index loop
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (tmp_best_rdu == INT64_MAX && pred_exists) continue;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
mi->interp_filter = (cm->interp_filter == SWITCHABLE ? tmp_best_filter
|
|
|
|
: cm->interp_filter);
|
2013-09-28 01:02:49 +02:00
|
|
|
if (!pred_exists) {
|
|
|
|
// Handles the special case when a filter that is not in the
|
|
|
|
// switchable list (bilinear, 6-tap) is indicated at the frame level
|
2016-07-27 05:43:23 +02:00
|
|
|
tmp_rd = rd_pick_best_sub8x8_mode(
|
|
|
|
cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd,
|
|
|
|
&rate, &rate_y, &distortion, &skippable, &total_sse,
|
|
|
|
(int)this_rd_thresh, seg_mvs, bsi, 0, mi_row, mi_col);
|
|
|
|
if (tmp_rd == INT64_MAX) continue;
|
2013-09-28 01:02:49 +02:00
|
|
|
} else {
|
|
|
|
total_sse = tmp_best_sse;
|
|
|
|
rate = tmp_best_rate;
|
|
|
|
rate_y = tmp_best_ratey;
|
|
|
|
distortion = tmp_best_distortion;
|
|
|
|
skippable = tmp_best_skippable;
|
2016-01-20 01:40:20 +01:00
|
|
|
*mi = tmp_best_mbmode;
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i];
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rate2 += rate;
|
|
|
|
distortion2 += distortion;
|
|
|
|
|
2014-01-24 21:26:57 +01:00
|
|
|
if (cm->interp_filter == SWITCHABLE)
|
2014-11-21 20:11:06 +01:00
|
|
|
rate2 += vp9_get_switchable_rate(cpi, xd);
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2013-12-10 00:13:34 +01:00
|
|
|
if (!mode_excluded)
|
2014-01-07 02:29:16 +01:00
|
|
|
mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
|
|
|
|
: cm->reference_mode == COMPOUND_REFERENCE;
|
2013-12-10 00:13:34 +01:00
|
|
|
|
2013-10-03 23:00:42 +02:00
|
|
|
compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred);
|
2013-06-01 01:00:32 +02:00
|
|
|
|
2015-08-18 03:19:22 +02:00
|
|
|
tmp_best_rdu =
|
|
|
|
best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
|
|
|
|
RDCOST(x->rdmult, x->rddiv, 0, total_sse));
|
2013-09-28 01:02:49 +02:00
|
|
|
|
|
|
|
if (tmp_best_rdu > 0) {
|
|
|
|
// If even the 'Y' rd value of split is higher than best so far
|
|
|
|
// then dont bother looking at UV
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
|
2015-07-30 20:52:28 +02:00
|
|
|
memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
|
2014-10-14 02:06:22 +02:00
|
|
|
if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
|
2015-09-08 18:53:23 +02:00
|
|
|
&uv_sse, BLOCK_8X8, tmp_best_rdu)) {
|
|
|
|
for (ref = 0; ref < 2; ++ref) {
|
|
|
|
if (scaled_ref_frame[ref]) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; ++i)
|
|
|
|
xd->plane[i].pre[ref] = backup_yv12[ref][i];
|
|
|
|
}
|
|
|
|
}
|
2013-09-28 01:02:49 +02:00
|
|
|
continue;
|
2015-09-08 18:53:23 +02:00
|
|
|
}
|
2014-10-14 02:06:22 +02:00
|
|
|
|
2013-09-28 01:02:49 +02:00
|
|
|
rate2 += rate_uv;
|
|
|
|
distortion2 += distortion_uv;
|
|
|
|
skippable = skippable && uv_skippable;
|
|
|
|
total_sse += uv_sse;
|
|
|
|
}
|
|
|
|
|
2015-09-09 21:29:57 +02:00
|
|
|
for (ref = 0; ref < 2; ++ref) {
|
|
|
|
if (scaled_ref_frame[ref]) {
|
|
|
|
// Restore the prediction frame pointers to their unscaled versions.
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_MB_PLANE; ++i)
|
|
|
|
xd->plane[i].pre[ref] = backup_yv12[ref][i];
|
|
|
|
}
|
2015-09-08 18:53:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
|
|
|
// Estimate the reference frame signaling cost and add it
|
|
|
|
// to the rolling cost variable.
|
|
|
|
if (second_ref_frame > INTRA_FRAME) {
|
|
|
|
rate2 += ref_costs_comp[ref_frame];
|
|
|
|
} else {
|
|
|
|
rate2 += ref_costs_single[ref_frame];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!disable_skip) {
|
2016-02-08 18:56:34 +01:00
|
|
|
const vpx_prob skip_prob = vp9_get_skip_prob(cm, xd);
|
|
|
|
const int skip_cost0 = vp9_cost_bit(skip_prob, 0);
|
|
|
|
const int skip_cost1 = vp9_cost_bit(skip_prob, 1);
|
|
|
|
|
2014-05-29 20:27:07 +02:00
|
|
|
// Skip is never coded at the segment level for sub8x8 blocks and instead
|
|
|
|
// always coded in the bitstream at the mode info level.
|
|
|
|
if (ref_frame != INTRA_FRAME && !xd->lossless) {
|
2016-07-27 05:43:23 +02:00
|
|
|
if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0,
|
|
|
|
distortion2) <
|
2016-02-08 18:56:34 +01:00
|
|
|
RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) {
|
2013-09-28 01:02:49 +02:00
|
|
|
// Add in the cost of the no skip flag.
|
2016-02-08 18:56:34 +01:00
|
|
|
rate2 += skip_cost0;
|
2013-09-28 01:02:49 +02:00
|
|
|
} else {
|
|
|
|
// FIXME(rbultje) make this work for splitmv also
|
2016-02-08 18:56:34 +01:00
|
|
|
rate2 += skip_cost1;
|
2013-09-28 01:02:49 +02:00
|
|
|
distortion2 = total_sse;
|
|
|
|
assert(total_sse >= 0);
|
|
|
|
rate2 -= (rate_y + rate_uv);
|
|
|
|
rate_y = 0;
|
|
|
|
rate_uv = 0;
|
|
|
|
this_skip2 = 1;
|
|
|
|
}
|
2014-05-29 20:27:07 +02:00
|
|
|
} else {
|
2013-09-28 01:02:49 +02:00
|
|
|
// Add in the cost of the no skip flag.
|
2016-02-08 18:56:34 +01:00
|
|
|
rate2 += skip_cost0;
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate the final RD estimate for this mode.
|
|
|
|
this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!disable_skip && ref_frame == INTRA_FRAME) {
|
2013-11-23 01:35:37 +01:00
|
|
|
for (i = 0; i < REFERENCE_MODES; ++i)
|
2015-08-18 03:19:22 +02:00
|
|
|
best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd);
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
|
2015-08-18 03:19:22 +02:00
|
|
|
best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd);
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Did this mode help.. i.e. is it the new best mode
|
|
|
|
if (this_rd < best_rd || x->skip) {
|
|
|
|
if (!mode_excluded) {
|
2013-11-07 23:56:58 +01:00
|
|
|
int max_plane = MAX_MB_PLANE;
|
2013-09-28 01:02:49 +02:00
|
|
|
// Note index of best mode so far
|
2014-04-17 17:40:47 +02:00
|
|
|
best_ref_index = ref_index;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
|
|
|
if (ref_frame == INTRA_FRAME) {
|
|
|
|
/* required for left and above block mv */
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mv[0].as_int = 0;
|
2013-11-07 23:56:58 +01:00
|
|
|
max_plane = 1;
|
2016-07-21 20:47:51 +02:00
|
|
|
// Initialize interp_filter here so we do not have to check for
|
|
|
|
// inter block modes in get_pred_context_switchable_interp()
|
|
|
|
mi->interp_filter = SWITCHABLE_FILTERS;
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = rate2;
|
|
|
|
rd_cost->dist = distortion2;
|
|
|
|
rd_cost->rdcost = this_rd;
|
2013-09-28 01:02:49 +02:00
|
|
|
best_rd = this_rd;
|
2016-07-27 05:43:23 +02:00
|
|
|
best_yrd =
|
|
|
|
best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv);
|
2016-01-20 01:40:20 +01:00
|
|
|
best_mbmode = *mi;
|
2013-09-28 01:02:49 +02:00
|
|
|
best_skip2 = this_skip2;
|
2016-07-27 05:43:23 +02:00
|
|
|
if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
|
2015-08-12 19:41:51 +02:00
|
|
|
sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk);
|
2017-03-16 23:45:07 +01:00
|
|
|
ctx->sum_y_eobs = x->sum_y_eobs[TX_4X4];
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i];
|
2013-09-28 01:02:49 +02:00
|
|
|
|
|
|
|
// TODO(debargha): enhance this test with a better distortion prediction
|
|
|
|
// based on qp, activity mask and history
|
2014-10-14 01:13:59 +02:00
|
|
|
if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) &&
|
2014-04-17 17:40:47 +02:00
|
|
|
(ref_index > MIN_EARLY_TERM_INDEX)) {
|
2014-09-24 15:36:34 +02:00
|
|
|
int qstep = xd->plane[0].dequant[1];
|
2013-09-28 01:02:49 +02:00
|
|
|
// TODO(debargha): Enhance this by specializing for each mode_index
|
|
|
|
int scale = 4;
|
2014-09-24 15:36:34 +02:00
|
|
|
#if CONFIG_VP9_HIGHBITDEPTH
|
|
|
|
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
|
|
|
|
qstep >>= (xd->bd - 8);
|
|
|
|
}
|
|
|
|
#endif // CONFIG_VP9_HIGHBITDEPTH
|
2013-09-28 01:02:49 +02:00
|
|
|
if (x->source_variance < UINT_MAX) {
|
|
|
|
const int var_adjust = (x->source_variance < 16);
|
|
|
|
scale -= var_adjust;
|
|
|
|
}
|
2016-07-27 05:43:23 +02:00
|
|
|
if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) {
|
2013-09-28 01:02:49 +02:00
|
|
|
early_term = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* keep record of best compound/single-only prediction */
|
|
|
|
if (!disable_skip && ref_frame != INTRA_FRAME) {
|
2013-11-25 21:45:51 +01:00
|
|
|
int64_t single_rd, hybrid_rd, single_rate, hybrid_rate;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2014-01-07 02:29:16 +01:00
|
|
|
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
|
2013-09-28 01:02:49 +02:00
|
|
|
single_rate = rate2 - compmode_cost;
|
|
|
|
hybrid_rate = rate2;
|
|
|
|
} else {
|
|
|
|
single_rate = rate2;
|
|
|
|
hybrid_rate = rate2 + compmode_cost;
|
|
|
|
}
|
|
|
|
|
|
|
|
single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2);
|
|
|
|
hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2);
|
|
|
|
|
2014-09-15 18:43:54 +02:00
|
|
|
if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE])
|
2013-11-23 01:35:37 +01:00
|
|
|
best_pred_rd[SINGLE_REFERENCE] = single_rd;
|
2014-09-15 18:43:54 +02:00
|
|
|
else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE])
|
2013-11-23 01:35:37 +01:00
|
|
|
best_pred_rd[COMPOUND_REFERENCE] = single_rd;
|
2014-09-15 18:43:54 +02:00
|
|
|
|
2013-11-23 01:35:37 +01:00
|
|
|
if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT])
|
|
|
|
best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd;
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* keep record of best filter type */
|
|
|
|
if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME &&
|
2014-01-24 21:26:57 +01:00
|
|
|
cm->interp_filter != BILINEAR) {
|
2016-07-27 05:43:23 +02:00
|
|
|
int64_t ref =
|
|
|
|
filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS
|
|
|
|
: cm->interp_filter];
|
2013-12-16 23:38:45 +01:00
|
|
|
int64_t adj_rd;
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
|
2013-12-16 23:38:45 +01:00
|
|
|
if (ref == INT64_MAX)
|
|
|
|
adj_rd = 0;
|
2014-11-20 21:42:36 +01:00
|
|
|
else if (filter_cache[i] == INT64_MAX)
|
2013-12-17 02:52:59 +01:00
|
|
|
// when early termination is triggered, the encoder does not have
|
|
|
|
// access to the rate-distortion cost. it only knows that the cost
|
|
|
|
// should be above the maximum valid value. hence it takes the known
|
|
|
|
// maximum plus an arbitrary constant as the rate-distortion cost.
|
2014-11-20 18:41:49 +01:00
|
|
|
adj_rd = mask_filter - ref + 10;
|
2013-12-16 23:38:45 +01:00
|
|
|
else
|
2014-11-20 21:42:36 +01:00
|
|
|
adj_rd = filter_cache[i] - ref;
|
2013-12-16 23:38:45 +01:00
|
|
|
|
|
|
|
adj_rd += this_rd;
|
2015-08-18 03:19:22 +02:00
|
|
|
best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd);
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (early_term) break;
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
if (x->skip && !comp_pred) break;
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
2014-10-14 00:56:37 +02:00
|
|
|
if (best_rd >= best_rd_so_far) {
|
2014-10-17 18:31:42 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
2014-10-14 00:56:37 +02:00
|
|
|
rd_cost->rdcost = INT64_MAX;
|
|
|
|
return;
|
|
|
|
}
|
2013-09-28 01:02:49 +02:00
|
|
|
|
|
|
|
// If we used an estimate for the uv intra rd in the loop above...
|
2014-10-14 01:13:59 +02:00
|
|
|
if (sf->use_uv_intra_rd_estimate) {
|
2013-09-28 01:02:49 +02:00
|
|
|
// Do Intra UV best rd mode selection if best mode choice above was intra.
|
2014-09-15 18:59:20 +02:00
|
|
|
if (best_mbmode.ref_frame[0] == INTRA_FRAME) {
|
2016-01-20 01:40:20 +01:00
|
|
|
*mi = best_mbmode;
|
2016-07-27 05:43:23 +02:00
|
|
|
rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, &rate_uv_tokenonly,
|
|
|
|
&dist_uv, &skip_uv, BLOCK_8X8, TX_4X4);
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-09 20:15:59 +02:00
|
|
|
if (best_rd == INT64_MAX) {
|
2014-10-09 21:32:56 +02:00
|
|
|
rd_cost->rate = INT_MAX;
|
|
|
|
rd_cost->dist = INT64_MAX;
|
|
|
|
rd_cost->rdcost = INT64_MAX;
|
2014-10-14 00:56:37 +02:00
|
|
|
return;
|
2013-09-28 01:02:49 +02:00
|
|
|
}
|
|
|
|
|
2014-01-24 21:26:57 +01:00
|
|
|
assert((cm->interp_filter == SWITCHABLE) ||
|
|
|
|
(cm->interp_filter == best_mbmode.interp_filter) ||
|
2013-11-20 05:25:55 +01:00
|
|
|
!is_inter_block(&best_mbmode));
|
2013-09-28 01:02:49 +02:00
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact, sf->adaptive_rd_thresh,
|
2017-04-24 21:06:49 +02:00
|
|
|
bsize, best_ref_index);
|
2013-09-28 01:02:49 +02:00
|
|
|
|
|
|
|
// macroblock modes
|
2016-01-20 01:40:20 +01:00
|
|
|
*mi = best_mbmode;
|
2013-09-28 01:02:49 +02:00
|
|
|
x->skip |= best_skip2;
|
2013-11-20 05:25:55 +01:00
|
|
|
if (!is_inter_block(&best_mbmode)) {
|
2016-07-27 05:43:23 +02:00
|
|
|
for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode;
|
2013-09-28 01:02:49 +02:00
|
|
|
} else {
|
2013-10-07 20:20:50 +02:00
|
|
|
for (i = 0; i < 4; ++i)
|
2015-04-24 05:42:19 +02:00
|
|
|
memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info));
|
2013-05-01 23:43:33 +02:00
|
|
|
|
2016-01-20 01:40:20 +01:00
|
|
|
mi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
|
|
|
|
mi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
|
2013-05-01 23:43:33 +02:00
|
|
|
}
|
2013-05-05 07:09:43 +02:00
|
|
|
|
2013-11-23 01:35:37 +01:00
|
|
|
for (i = 0; i < REFERENCE_MODES; ++i) {
|
2013-01-14 20:49:30 +01:00
|
|
|
if (best_pred_rd[i] == INT64_MAX)
|
2012-11-08 20:03:00 +01:00
|
|
|
best_pred_diff[i] = INT_MIN;
|
|
|
|
else
|
|
|
|
best_pred_diff[i] = best_rd - best_pred_rd[i];
|
|
|
|
}
|
|
|
|
|
2013-07-08 23:49:33 +02:00
|
|
|
if (!x->skip) {
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
|
2013-07-08 23:49:33 +02:00
|
|
|
if (best_filter_rd[i] == INT64_MAX)
|
|
|
|
best_filter_diff[i] = 0;
|
|
|
|
else
|
|
|
|
best_filter_diff[i] = best_rd - best_filter_rd[i];
|
|
|
|
}
|
2014-01-24 21:26:57 +01:00
|
|
|
if (cm->interp_filter == SWITCHABLE)
|
2013-08-23 03:40:34 +02:00
|
|
|
assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
|
2012-11-08 20:03:00 +01:00
|
|
|
} else {
|
2014-04-09 20:15:59 +02:00
|
|
|
vp9_zero(best_filter_diff);
|
2012-11-08 20:03:00 +01:00
|
|
|
}
|
|
|
|
|
2016-07-27 05:43:23 +02:00
|
|
|
store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,
|
|
|
|
0);
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|