2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
2013-04-09 04:07:29 +02:00
|
|
|
#include <assert.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <limits.h>
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
#include "vpx/vpx_encoder.h"
|
|
|
|
#include "vpx_mem/vpx_mem.h"
|
|
|
|
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_entropymode.h"
|
2012-12-10 13:38:48 +01:00
|
|
|
#include "vp9/common/vp9_entropymv.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_findnearmv.h"
|
2013-02-07 00:30:21 +01:00
|
|
|
#include "vp9/common/vp9_tile_common.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_seg_common.h"
|
|
|
|
#include "vp9/common/vp9_pred_common.h"
|
|
|
|
#include "vp9/common/vp9_entropy.h"
|
|
|
|
#include "vp9/common/vp9_entropymv.h"
|
|
|
|
#include "vp9/common/vp9_mvref_common.h"
|
Consistently use get_prob(), clip_prob() and newly added clip_pixel().
Add a function clip_pixel() to clip a pixel value to the [0,255] range
of allowed values, and use this where-ever appropriate (e.g. prediction,
reconstruction). Likewise, consistently use the recently added function
clip_prob(), which calculates a binary probability in the [1,255] range.
If possible, try to use get_prob() or its sister get_binary_prob() to
calculate binary probabilities, for consistency.
Since in some places, this means that binary probability calculations
are changed (we use {255,256}*count0/(total) in a range of places,
and all of these are now changed to use 256*count0+(total>>1)/total),
this changes the encoding result, so this patch warrants some extensive
testing.
Change-Id: Ibeeff8d886496839b8e0c0ace9ccc552351f7628
2012-12-10 21:09:07 +01:00
|
|
|
#include "vp9/common/vp9_treecoder.h"
|
2013-05-29 03:07:54 +02:00
|
|
|
#include "vp9/common/vp9_systemdependent.h"
|
|
|
|
#include "vp9/common/vp9_pragmas.h"
|
|
|
|
|
|
|
|
#include "vp9/encoder/vp9_mcomp.h"
|
|
|
|
#include "vp9/encoder/vp9_encodemv.h"
|
|
|
|
#include "vp9/encoder/vp9_bitstream.h"
|
|
|
|
#include "vp9/encoder/vp9_segmentation.h"
|
|
|
|
#include "vp9/encoder/vp9_write_bit_buffer.h"
|
|
|
|
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
#if defined(SECTIONBITS_OUTPUT)
|
|
|
|
unsigned __int64 Sectionbits[500];
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef ENTROPY_STATS
|
2013-05-29 02:07:27 +02:00
|
|
|
int intra_mode_stats[VP9_BINTRAMODES]
|
|
|
|
[VP9_BINTRAMODES]
|
|
|
|
[VP9_BINTRAMODES];
|
2013-02-19 22:36:38 +01:00
|
|
|
vp9_coeff_stats tree_update_hist_4x4[BLOCK_TYPES];
|
|
|
|
vp9_coeff_stats tree_update_hist_8x8[BLOCK_TYPES];
|
|
|
|
vp9_coeff_stats tree_update_hist_16x16[BLOCK_TYPES];
|
2013-03-04 23:12:17 +01:00
|
|
|
vp9_coeff_stats tree_update_hist_32x32[BLOCK_TYPES];
|
2012-05-15 01:21:01 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
extern unsigned int active_section;
|
|
|
|
#endif
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
#define vp9_cost_upd ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)) >> 8)
|
|
|
|
#define vp9_cost_upd256 ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd)))
|
2012-04-12 18:24:03 +02:00
|
|
|
|
|
|
|
static int update_bits[255];
|
|
|
|
|
2013-04-17 01:26:25 +02:00
|
|
|
static INLINE void write_le16(uint8_t *p, int value) {
|
|
|
|
p[0] = value;
|
|
|
|
p[1] = value >> 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
static INLINE void write_le32(uint8_t *p, int value) {
|
|
|
|
p[0] = value;
|
|
|
|
p[1] = value >> 8;
|
|
|
|
p[2] = value >> 16;
|
|
|
|
p[3] = value >> 24;
|
|
|
|
}
|
|
|
|
|
2013-05-21 01:23:19 +02:00
|
|
|
void vp9_encode_unsigned_max(vp9_writer *br, int data, int max) {
|
|
|
|
assert(data <= max);
|
|
|
|
while (max) {
|
|
|
|
vp9_write_bit(br, data & 1);
|
|
|
|
data >>= 1;
|
|
|
|
max >>= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int recenter_nonneg(int v, int m) {
|
|
|
|
if (v > (m << 1))
|
|
|
|
return v;
|
|
|
|
else if (v >= m)
|
|
|
|
return ((v - m) << 1);
|
|
|
|
else
|
|
|
|
return ((m - v) << 1) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_unsigned_bits(unsigned num_values) {
|
|
|
|
int cat = 0;
|
|
|
|
if ((num_values--) <= 1) return 0;
|
|
|
|
while (num_values > 0) {
|
|
|
|
cat++;
|
|
|
|
num_values >>= 1;
|
|
|
|
}
|
|
|
|
return cat;
|
|
|
|
}
|
|
|
|
|
|
|
|
void encode_uniform(vp9_writer *w, int v, int n) {
|
|
|
|
int l = get_unsigned_bits(n);
|
|
|
|
int m;
|
|
|
|
if (l == 0)
|
|
|
|
return;
|
|
|
|
m = (1 << l) - n;
|
|
|
|
if (v < m) {
|
|
|
|
vp9_write_literal(w, v, l - 1);
|
|
|
|
} else {
|
|
|
|
vp9_write_literal(w, m + ((v - m) >> 1), l - 1);
|
|
|
|
vp9_write_literal(w, (v - m) & 1, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int count_uniform(int v, int n) {
|
|
|
|
int l = get_unsigned_bits(n);
|
|
|
|
int m;
|
|
|
|
if (l == 0) return 0;
|
|
|
|
m = (1 << l) - n;
|
|
|
|
if (v < m)
|
|
|
|
return l - 1;
|
|
|
|
else
|
|
|
|
return l;
|
|
|
|
}
|
|
|
|
|
|
|
|
void encode_term_subexp(vp9_writer *w, int word, int k, int num_syms) {
|
|
|
|
int i = 0;
|
|
|
|
int mk = 0;
|
|
|
|
while (1) {
|
|
|
|
int b = (i ? k + i - 1 : k);
|
|
|
|
int a = (1 << b);
|
|
|
|
if (num_syms <= mk + 3 * a) {
|
|
|
|
encode_uniform(w, word - mk, num_syms - mk);
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
int t = (word >= mk + a);
|
|
|
|
vp9_write_literal(w, t, 1);
|
|
|
|
if (t) {
|
|
|
|
i = i + 1;
|
|
|
|
mk += a;
|
|
|
|
} else {
|
|
|
|
vp9_write_literal(w, word - mk, b);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int count_term_subexp(int word, int k, int num_syms) {
|
|
|
|
int count = 0;
|
|
|
|
int i = 0;
|
|
|
|
int mk = 0;
|
|
|
|
while (1) {
|
|
|
|
int b = (i ? k + i - 1 : k);
|
|
|
|
int a = (1 << b);
|
|
|
|
if (num_syms <= mk + 3 * a) {
|
|
|
|
count += count_uniform(word - mk, num_syms - mk);
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
int t = (word >= mk + a);
|
|
|
|
count++;
|
|
|
|
if (t) {
|
|
|
|
i = i + 1;
|
|
|
|
mk += a;
|
|
|
|
} else {
|
|
|
|
count += b;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
static void compute_update_table() {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < 255; i++)
|
2013-05-21 01:23:19 +02:00
|
|
|
update_bits[i] = count_term_subexp(i, SUBEXP_PARAM, 255);
|
2012-04-12 18:24:03 +02:00
|
|
|
}
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
static int split_index(int i, int n, int modulus) {
|
|
|
|
int max1 = (n - 1 - modulus / 2) / modulus + 1;
|
|
|
|
if (i % modulus == modulus / 2) i = i / modulus;
|
|
|
|
else i = max1 + i - (i + modulus - modulus / 2) / modulus;
|
|
|
|
return i;
|
2012-05-03 11:22:26 +02:00
|
|
|
}
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
static int remap_prob(int v, int m) {
|
|
|
|
const int n = 256;
|
|
|
|
const int modulus = MODULUS_PARAM;
|
|
|
|
int i;
|
|
|
|
if ((m << 1) <= n)
|
2013-05-21 01:23:19 +02:00
|
|
|
i = recenter_nonneg(v, m) - 1;
|
2012-07-14 00:21:29 +02:00
|
|
|
else
|
2013-05-21 01:23:19 +02:00
|
|
|
i = recenter_nonneg(n - 1 - v, n - 1 - m) - 1;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
i = split_index(i, n - 1, modulus);
|
|
|
|
return i;
|
2012-04-12 18:24:03 +02:00
|
|
|
}
|
2012-05-03 11:22:26 +02:00
|
|
|
|
2013-05-21 01:23:19 +02:00
|
|
|
static void write_prob_diff_update(vp9_writer *w,
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob newp, vp9_prob oldp) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int delp = remap_prob(newp, oldp);
|
2013-05-21 01:23:19 +02:00
|
|
|
encode_term_subexp(w, delp, SUBEXP_PARAM, 255);
|
2012-05-03 11:22:26 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
static int prob_diff_update_cost(vp9_prob newp, vp9_prob oldp) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int delp = remap_prob(newp, oldp);
|
|
|
|
return update_bits[delp] * 256;
|
2012-05-03 11:22:26 +02:00
|
|
|
}
|
2012-03-29 00:19:45 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
static void update_mode(
|
2013-05-21 01:23:19 +02:00
|
|
|
vp9_writer *w,
|
2012-07-14 00:21:29 +02:00
|
|
|
int n,
|
2013-04-11 22:01:52 +02:00
|
|
|
const struct vp9_token tok[/* n */],
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_tree tree,
|
|
|
|
vp9_prob Pnew [/* n-1 */],
|
|
|
|
vp9_prob Pcur [/* n-1 */],
|
2012-07-14 00:21:29 +02:00
|
|
|
unsigned int bct [/* n-1 */] [2],
|
|
|
|
const unsigned int num_events[/* n */]
|
|
|
|
) {
|
|
|
|
unsigned int new_b = 0, old_b = 0;
|
|
|
|
int i = 0;
|
|
|
|
|
2013-03-10 21:39:30 +01:00
|
|
|
vp9_tree_probs_from_distribution(tree, Pnew, bct, num_events, 0);
|
|
|
|
n--;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
do {
|
2012-10-31 22:40:53 +01:00
|
|
|
new_b += cost_branch(bct[i], Pnew[i]);
|
|
|
|
old_b += cost_branch(bct[i], Pcur[i]);
|
2012-07-14 00:21:29 +02:00
|
|
|
} while (++i < n);
|
|
|
|
|
|
|
|
if (new_b + (n << 8) < old_b) {
|
2010-05-18 17:58:33 +02:00
|
|
|
int i = 0;
|
|
|
|
|
2013-05-21 01:23:19 +02:00
|
|
|
vp9_write_bit(w, 1);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
do {
|
2012-10-31 22:40:53 +01:00
|
|
|
const vp9_prob p = Pnew[i];
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-21 01:23:19 +02:00
|
|
|
vp9_write_literal(w, Pcur[i] = p ? p : 1, 8);
|
2012-07-14 00:21:29 +02:00
|
|
|
} while (++i < n);
|
|
|
|
} else
|
2013-05-21 01:23:19 +02:00
|
|
|
vp9_write_bit(w, 0);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static void update_mbintra_mode_probs(VP9_COMP* const cpi,
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_writer* const bc) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-04-27 01:46:17 +02:00
|
|
|
vp9_prob pnew[VP9_YMODES - 1];
|
|
|
|
unsigned int bct[VP9_YMODES - 1][2];
|
|
|
|
|
2013-05-30 18:58:53 +02:00
|
|
|
update_mode(bc, VP9_YMODES, vp9_intra_mode_encodings,
|
|
|
|
vp9_intra_mode_tree, pnew,
|
|
|
|
cm->fc.y_mode_prob, bct, (unsigned int *)cpi->y_mode_count);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
void vp9_update_skip_probs(VP9_COMP *cpi) {
|
|
|
|
VP9_COMMON *const pc = &cpi->common;
|
2012-07-14 00:21:29 +02:00
|
|
|
int k;
|
2012-04-11 15:37:48 +02:00
|
|
|
|
2013-04-27 01:46:17 +02:00
|
|
|
for (k = 0; k < MBSKIP_CONTEXTS; ++k)
|
2012-10-19 01:27:30 +02:00
|
|
|
pc->mbskip_pred_probs[k] = get_binary_prob(cpi->skip_false_count[k],
|
|
|
|
cpi->skip_true_count[k]);
|
2012-04-11 15:37:48 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static void update_switchable_interp_probs(VP9_COMP *cpi,
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_writer* const bc) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *const pc = &cpi->common;
|
2012-07-18 22:43:01 +02:00
|
|
|
unsigned int branch_ct[32][2];
|
|
|
|
int i, j;
|
2012-10-31 22:40:53 +01:00
|
|
|
for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j) {
|
2012-10-31 00:25:53 +01:00
|
|
|
vp9_tree_probs_from_distribution(
|
2013-03-10 21:39:30 +01:00
|
|
|
vp9_switchable_interp_tree,
|
2012-10-22 23:43:01 +02:00
|
|
|
pc->fc.switchable_interp_prob[j], branch_ct,
|
2013-03-10 21:39:30 +01:00
|
|
|
cpi->switchable_interp_count[j], 0);
|
2012-10-31 22:40:53 +01:00
|
|
|
for (i = 0; i < VP9_SWITCHABLE_FILTERS - 1; ++i) {
|
2012-07-18 22:43:01 +02:00
|
|
|
if (pc->fc.switchable_interp_prob[j][i] < 1)
|
|
|
|
pc->fc.switchable_interp_prob[j][i] = 1;
|
2013-04-17 01:23:17 +02:00
|
|
|
vp9_write_prob(bc, pc->fc.switchable_interp_prob[j][i]);
|
2012-07-18 22:43:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-11 16:44:14 +02:00
|
|
|
// This function updates the reference frame prediction stats
|
2012-10-31 01:53:32 +01:00
|
|
|
static void update_refpred_stats(VP9_COMP *cpi) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2012-07-14 00:21:29 +02:00
|
|
|
int i;
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob new_pred_probs[PREDICTION_PROBS];
|
2012-07-14 00:21:29 +02:00
|
|
|
int old_cost, new_cost;
|
|
|
|
|
|
|
|
// Set the prediction probability structures to defaults
|
2013-01-15 15:43:35 +01:00
|
|
|
if (cm->frame_type != KEY_FRAME) {
|
2012-07-14 00:21:29 +02:00
|
|
|
// From the prediction counts set the probabilities for each context
|
|
|
|
for (i = 0; i < PREDICTION_PROBS; i++) {
|
2013-04-27 01:46:17 +02:00
|
|
|
const int c0 = cpi->ref_pred_count[i][0];
|
|
|
|
const int c1 = cpi->ref_pred_count[i][1];
|
|
|
|
|
|
|
|
new_pred_probs[i] = get_binary_prob(c0, c1);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Decide whether or not to update the reference frame probs.
|
|
|
|
// Returned costs are in 1/256 bit units.
|
2013-04-27 01:46:17 +02:00
|
|
|
old_cost = c0 * vp9_cost_zero(cm->ref_pred_probs[i]) +
|
|
|
|
c1 * vp9_cost_one(cm->ref_pred_probs[i]);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-04-27 01:46:17 +02:00
|
|
|
new_cost = c0 * vp9_cost_zero(new_pred_probs[i]) +
|
|
|
|
c1 * vp9_cost_one(new_pred_probs[i]);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Cost saving must be >= 8 bits (2048 in these units)
|
|
|
|
if ((old_cost - new_cost) >= 2048) {
|
|
|
|
cpi->ref_pred_probs_update[i] = 1;
|
|
|
|
cm->ref_pred_probs[i] = new_pred_probs[i];
|
|
|
|
} else
|
|
|
|
cpi->ref_pred_probs_update[i] = 0;
|
2012-04-11 16:44:14 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-04-11 16:44:14 +02:00
|
|
|
}
|
|
|
|
|
2012-11-16 17:31:32 +01:00
|
|
|
// This function is called to update the mode probability context used to encode
|
|
|
|
// inter modes. It assumes the branch counts table has already been populated
|
|
|
|
// prior to the actual packing of the bitstream (in rd stage or dummy pack)
|
|
|
|
//
|
|
|
|
// The branch counts table is re-populated during the actual pack stage and in
|
|
|
|
// the decoder to facilitate backwards update of the context.
|
2013-02-06 14:02:53 +01:00
|
|
|
static void update_inter_mode_probs(VP9_COMMON *cm,
|
|
|
|
int mode_context[INTER_MODE_CONTEXTS][4]) {
|
2012-11-16 17:31:32 +01:00
|
|
|
int i, j;
|
2013-04-27 01:46:17 +02:00
|
|
|
unsigned int (*mv_ref_ct)[4][2] = cm->fc.mv_ref_ct;
|
2012-11-16 17:31:32 +01:00
|
|
|
|
|
|
|
vpx_memcpy(mode_context, cm->fc.vp9_mode_contexts,
|
|
|
|
sizeof(cm->fc.vp9_mode_contexts));
|
|
|
|
|
|
|
|
for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
|
|
|
|
for (j = 0; j < 4; j++) {
|
Consistently use get_prob(), clip_prob() and newly added clip_pixel().
Add a function clip_pixel() to clip a pixel value to the [0,255] range
of allowed values, and use this where-ever appropriate (e.g. prediction,
reconstruction). Likewise, consistently use the recently added function
clip_prob(), which calculates a binary probability in the [1,255] range.
If possible, try to use get_prob() or its sister get_binary_prob() to
calculate binary probabilities, for consistency.
Since in some places, this means that binary probability calculations
are changed (we use {255,256}*count0/(total) in a range of places,
and all of these are now changed to use 256*count0+(total>>1)/total),
this changes the encoding result, so this patch warrants some extensive
testing.
Change-Id: Ibeeff8d886496839b8e0c0ace9ccc552351f7628
2012-12-10 21:09:07 +01:00
|
|
|
int new_prob, old_cost, new_cost;
|
2012-11-16 17:31:32 +01:00
|
|
|
|
|
|
|
// Work out cost of coding branches with the old and optimal probability
|
|
|
|
old_cost = cost_branch256(mv_ref_ct[i][j], mode_context[i][j]);
|
Consistently use get_prob(), clip_prob() and newly added clip_pixel().
Add a function clip_pixel() to clip a pixel value to the [0,255] range
of allowed values, and use this where-ever appropriate (e.g. prediction,
reconstruction). Likewise, consistently use the recently added function
clip_prob(), which calculates a binary probability in the [1,255] range.
If possible, try to use get_prob() or its sister get_binary_prob() to
calculate binary probabilities, for consistency.
Since in some places, this means that binary probability calculations
are changed (we use {255,256}*count0/(total) in a range of places,
and all of these are now changed to use 256*count0+(total>>1)/total),
this changes the encoding result, so this patch warrants some extensive
testing.
Change-Id: Ibeeff8d886496839b8e0c0ace9ccc552351f7628
2012-12-10 21:09:07 +01:00
|
|
|
new_prob = get_binary_prob(mv_ref_ct[i][j][0], mv_ref_ct[i][j][1]);
|
2012-11-16 17:31:32 +01:00
|
|
|
new_cost = cost_branch256(mv_ref_ct[i][j], new_prob);
|
|
|
|
|
|
|
|
// If cost saving is >= 14 bits then update the mode probability.
|
|
|
|
// This is the approximate net cost of updating one probability given
|
|
|
|
// that the no update case ismuch more common than the update case.
|
|
|
|
if (new_cost <= (old_cost - (14 << 8))) {
|
|
|
|
mode_context[i][j] = new_prob;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-12-10 13:38:48 +01:00
|
|
|
|
2013-05-30 18:58:53 +02:00
|
|
|
static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
|
|
|
|
write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m);
|
2012-10-09 22:19:15 +02:00
|
|
|
}
|
|
|
|
|
2012-03-19 19:02:04 +01:00
|
|
|
static int prob_update_savings(const unsigned int *ct,
|
2012-10-31 22:40:53 +01:00
|
|
|
const vp9_prob oldp, const vp9_prob newp,
|
|
|
|
const vp9_prob upd) {
|
|
|
|
const int old_b = cost_branch256(ct, oldp);
|
|
|
|
const int new_b = cost_branch256(ct, newp);
|
|
|
|
const int update_b = 2048 + vp9_cost_upd256;
|
2013-04-27 01:46:17 +02:00
|
|
|
return old_b - new_b - update_b;
|
2012-05-03 11:22:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int prob_diff_update_savings_search(const unsigned int *ct,
|
2012-10-31 22:40:53 +01:00
|
|
|
const vp9_prob oldp, vp9_prob *bestp,
|
|
|
|
const vp9_prob upd) {
|
|
|
|
const int old_b = cost_branch256(ct, oldp);
|
2012-07-14 00:21:29 +02:00
|
|
|
int new_b, update_b, savings, bestsavings, step;
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob newp, bestnewp;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
bestsavings = 0;
|
|
|
|
bestnewp = oldp;
|
|
|
|
|
|
|
|
step = (*bestp > oldp ? -1 : 1);
|
|
|
|
for (newp = *bestp; newp != oldp; newp += step) {
|
2012-10-31 22:40:53 +01:00
|
|
|
new_b = cost_branch256(ct, newp);
|
|
|
|
update_b = prob_diff_update_cost(newp, oldp) + vp9_cost_upd256;
|
2012-07-14 00:21:29 +02:00
|
|
|
savings = old_b - new_b - update_b;
|
|
|
|
if (savings > bestsavings) {
|
|
|
|
bestsavings = savings;
|
|
|
|
bestnewp = newp;
|
2012-04-12 18:24:03 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
*bestp = bestnewp;
|
|
|
|
return bestsavings;
|
2012-03-19 19:02:04 +01:00
|
|
|
}
|
|
|
|
|
2013-03-13 19:03:17 +01:00
|
|
|
static int prob_diff_update_savings_search_model(const unsigned int *ct,
|
|
|
|
const vp9_prob *oldp,
|
|
|
|
vp9_prob *bestp,
|
|
|
|
const vp9_prob upd,
|
2013-05-17 15:40:25 +02:00
|
|
|
int b, int r) {
|
2013-03-13 19:03:17 +01:00
|
|
|
int i, old_b, new_b, update_b, savings, bestsavings, step;
|
|
|
|
int newp;
|
2013-05-17 15:40:25 +02:00
|
|
|
vp9_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
|
2013-05-22 16:28:29 +02:00
|
|
|
vp9_model_to_full_probs(oldp, oldplist);
|
|
|
|
vpx_memcpy(newplist, oldp, sizeof(vp9_prob) * UNCONSTRAINED_NODES);
|
2013-05-17 15:40:25 +02:00
|
|
|
for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
|
|
|
|
old_b += cost_branch256(ct + 2 * i, oldplist[i]);
|
|
|
|
old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]);
|
2013-03-13 19:03:17 +01:00
|
|
|
|
|
|
|
bestsavings = 0;
|
2013-05-17 15:40:25 +02:00
|
|
|
bestnewp = oldp[PIVOT_NODE];
|
2013-03-13 19:03:17 +01:00
|
|
|
|
2013-05-17 15:40:25 +02:00
|
|
|
step = (*bestp > oldp[PIVOT_NODE] ? -1 : 1);
|
2013-03-13 19:03:17 +01:00
|
|
|
newp = *bestp;
|
2013-05-17 15:40:25 +02:00
|
|
|
for (; newp != oldp[PIVOT_NODE]; newp += step) {
|
2013-03-13 19:03:17 +01:00
|
|
|
if (newp < 1 || newp > 255) continue;
|
2013-05-17 15:40:25 +02:00
|
|
|
newplist[PIVOT_NODE] = newp;
|
2013-05-22 16:28:29 +02:00
|
|
|
vp9_model_to_full_probs(newplist, newplist);
|
2013-05-17 15:40:25 +02:00
|
|
|
for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
|
2013-03-13 19:03:17 +01:00
|
|
|
new_b += cost_branch256(ct + 2 * i, newplist[i]);
|
2013-05-17 15:40:25 +02:00
|
|
|
new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
|
|
|
|
update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) +
|
2013-03-13 19:03:17 +01:00
|
|
|
vp9_cost_upd256;
|
|
|
|
savings = old_b - new_b - update_b;
|
|
|
|
if (savings > bestsavings) {
|
|
|
|
bestsavings = savings;
|
|
|
|
bestnewp = newp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*bestp = bestnewp;
|
|
|
|
return bestsavings;
|
|
|
|
}
|
|
|
|
|
2012-11-07 15:50:25 +01:00
|
|
|
static void vp9_cond_prob_update(vp9_writer *bc, vp9_prob *oldp, vp9_prob upd,
|
|
|
|
unsigned int *ct) {
|
|
|
|
vp9_prob newp;
|
|
|
|
int savings;
|
|
|
|
newp = get_binary_prob(ct[0], ct[1]);
|
|
|
|
savings = prob_update_savings(ct, *oldp, newp, upd);
|
|
|
|
if (savings > 0) {
|
|
|
|
vp9_write(bc, 1, upd);
|
2013-04-17 01:23:17 +02:00
|
|
|
vp9_write_prob(bc, newp);
|
2012-11-07 15:50:25 +01:00
|
|
|
*oldp = newp;
|
|
|
|
} else {
|
|
|
|
vp9_write(bc, 0, upd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
static void pack_mb_tokens(vp9_writer* const bc,
|
2012-10-16 22:52:39 +02:00
|
|
|
TOKENEXTRA **tp,
|
|
|
|
const TOKENEXTRA *const stop) {
|
|
|
|
TOKENEXTRA *p = *tp;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
while (p < stop) {
|
2013-04-19 01:18:08 +02:00
|
|
|
const int t = p->token;
|
2013-04-11 22:01:52 +02:00
|
|
|
const struct vp9_token *const a = vp9_coef_encodings + t;
|
2013-04-19 20:14:33 +02:00
|
|
|
const vp9_extra_bit *const b = vp9_extra_bits + t;
|
2012-07-14 00:21:29 +02:00
|
|
|
int i = 0;
|
2013-05-17 15:40:25 +02:00
|
|
|
const vp9_prob *pp;
|
2012-07-14 00:21:29 +02:00
|
|
|
int v = a->value;
|
2013-04-11 22:01:52 +02:00
|
|
|
int n = a->len;
|
2013-05-17 15:40:25 +02:00
|
|
|
vp9_prob probs[ENTROPY_NODES];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-05-17 15:40:25 +02:00
|
|
|
if (t == EOSB_TOKEN) {
|
2012-10-16 22:52:39 +02:00
|
|
|
++p;
|
|
|
|
break;
|
|
|
|
}
|
2013-05-17 15:40:25 +02:00
|
|
|
if (t >= TWO_TOKEN) {
|
2013-05-22 16:28:29 +02:00
|
|
|
vp9_model_to_full_probs(p->context_tree, probs);
|
2013-05-17 15:40:25 +02:00
|
|
|
pp = probs;
|
|
|
|
} else {
|
|
|
|
pp = p->context_tree;
|
|
|
|
}
|
2013-03-28 18:42:23 +01:00
|
|
|
assert(pp != 0);
|
2012-10-16 22:52:39 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/* skip one or two nodes */
|
2013-05-08 19:04:14 +02:00
|
|
|
#if !CONFIG_BALANCED_COEFTREE
|
2012-07-14 00:21:29 +02:00
|
|
|
if (p->skip_eob_node) {
|
|
|
|
n -= p->skip_eob_node;
|
|
|
|
i = 2 * p->skip_eob_node;
|
|
|
|
}
|
2013-05-08 19:04:14 +02:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
do {
|
|
|
|
const int bb = (v >> --n) & 1;
|
2013-05-08 19:04:14 +02:00
|
|
|
#if CONFIG_BALANCED_COEFTREE
|
|
|
|
if (i == 2 && p->skip_eob_node) {
|
|
|
|
i += 2;
|
|
|
|
assert(bb == 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
2013-04-09 04:07:29 +02:00
|
|
|
vp9_write(bc, bb, pp[i >> 1]);
|
2012-10-31 01:12:12 +01:00
|
|
|
i = vp9_coef_tree[i + bb];
|
2013-05-08 19:04:14 +02:00
|
|
|
} while (n);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (b->base_val) {
|
2013-04-19 20:14:33 +02:00
|
|
|
const int e = p->extra, l = b->len;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-04-19 20:14:33 +02:00
|
|
|
if (l) {
|
2013-05-17 15:40:25 +02:00
|
|
|
const unsigned char *pb = b->prob;
|
2012-07-14 00:21:29 +02:00
|
|
|
int v = e >> 1;
|
2013-04-19 20:14:33 +02:00
|
|
|
int n = l; /* number of bits in v, assumed nonzero */
|
2012-07-14 00:21:29 +02:00
|
|
|
int i = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
do {
|
|
|
|
const int bb = (v >> --n) & 1;
|
2013-05-17 15:40:25 +02:00
|
|
|
vp9_write(bc, bb, pb[i >> 1]);
|
2012-07-14 00:21:29 +02:00
|
|
|
i = b->tree[i + bb];
|
|
|
|
} while (n);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-04-09 04:07:29 +02:00
|
|
|
vp9_write_bit(bc, e & 1);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
++p;
|
|
|
|
}
|
|
|
|
|
2012-10-16 22:52:39 +02:00
|
|
|
*tp = p;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
static void write_sb_mv_ref(vp9_writer *bc, MB_PREDICTION_MODE m,
|
|
|
|
const vp9_prob *p) {
|
2012-08-20 23:43:34 +02:00
|
|
|
#if CONFIG_DEBUG
|
|
|
|
assert(NEARESTMV <= m && m < SPLITMV);
|
|
|
|
#endif
|
2012-10-31 22:40:53 +01:00
|
|
|
write_token(bc, vp9_sb_mv_ref_tree, p,
|
|
|
|
vp9_sb_mv_ref_encoding_array - NEARESTMV + m);
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
2011-11-11 11:10:06 +01:00
|
|
|
// This function writes the current macro block's segnment id to the bitstream
|
|
|
|
// It should only be called if a segment map update is indicated.
|
2012-10-31 22:40:53 +01:00
|
|
|
static void write_mb_segid(vp9_writer *bc,
|
2012-08-15 12:00:53 +02:00
|
|
|
const MB_MODE_INFO *mi, const MACROBLOCKD *xd) {
|
2013-04-30 23:06:49 +02:00
|
|
|
if (xd->segmentation_enabled && xd->update_mb_segmentation_map)
|
|
|
|
treed_write(bc, vp9_segment_tree, xd->mb_segment_tree_probs,
|
|
|
|
mi->segment_id, 3);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2011-11-02 14:30:10 +01:00
|
|
|
// This function encodes the reference frame
|
2012-10-31 22:40:53 +01:00
|
|
|
static void encode_ref_frame(vp9_writer *const bc,
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *const cm,
|
2012-07-14 00:21:29 +02:00
|
|
|
MACROBLOCKD *xd,
|
|
|
|
int segment_id,
|
|
|
|
MV_REFERENCE_FRAME rf) {
|
|
|
|
int seg_ref_active;
|
|
|
|
int seg_ref_count = 0;
|
2012-10-30 06:15:27 +01:00
|
|
|
seg_ref_active = vp9_segfeature_active(xd,
|
|
|
|
segment_id,
|
|
|
|
SEG_LVL_REF_FRAME);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
if (seg_ref_active) {
|
2012-10-30 06:15:27 +01:00
|
|
|
seg_ref_count = vp9_check_segref(xd, segment_id, INTRA_FRAME) +
|
|
|
|
vp9_check_segref(xd, segment_id, LAST_FRAME) +
|
|
|
|
vp9_check_segref(xd, segment_id, GOLDEN_FRAME) +
|
|
|
|
vp9_check_segref(xd, segment_id, ALTREF_FRAME);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If segment level coding of this signal is disabled...
|
|
|
|
// or the segment allows multiple reference frame options
|
|
|
|
if (!seg_ref_active || (seg_ref_count > 1)) {
|
|
|
|
// Values used in prediction model coding
|
|
|
|
unsigned char prediction_flag;
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob pred_prob;
|
2012-07-14 00:21:29 +02:00
|
|
|
MV_REFERENCE_FRAME pred_rf;
|
|
|
|
|
|
|
|
// Get the context probability the prediction flag
|
2012-10-29 14:44:18 +01:00
|
|
|
pred_prob = vp9_get_pred_prob(cm, xd, PRED_REF);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Get the predicted value.
|
2012-10-29 14:44:18 +01:00
|
|
|
pred_rf = vp9_get_pred_ref(cm, xd);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// Did the chosen reference frame match its predicted value.
|
|
|
|
prediction_flag =
|
|
|
|
(xd->mode_info_context->mbmi.ref_frame == pred_rf);
|
|
|
|
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_set_pred_flag(xd, PRED_REF, prediction_flag);
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_write(bc, prediction_flag, pred_prob);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// If not predicted correctly then code value explicitly
|
|
|
|
if (!prediction_flag) {
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_prob mod_refprobs[PREDICTION_PROBS];
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
vpx_memcpy(mod_refprobs,
|
|
|
|
cm->mod_refprobs[pred_rf], sizeof(mod_refprobs));
|
|
|
|
|
|
|
|
// If segment coding enabled blank out options that cant occur by
|
|
|
|
// setting the branch probability to 0.
|
|
|
|
if (seg_ref_active) {
|
|
|
|
mod_refprobs[INTRA_FRAME] *=
|
2012-10-30 06:15:27 +01:00
|
|
|
vp9_check_segref(xd, segment_id, INTRA_FRAME);
|
2012-07-14 00:21:29 +02:00
|
|
|
mod_refprobs[LAST_FRAME] *=
|
2012-10-30 06:15:27 +01:00
|
|
|
vp9_check_segref(xd, segment_id, LAST_FRAME);
|
2012-07-14 00:21:29 +02:00
|
|
|
mod_refprobs[GOLDEN_FRAME] *=
|
2012-10-30 06:15:27 +01:00
|
|
|
(vp9_check_segref(xd, segment_id, GOLDEN_FRAME) *
|
|
|
|
vp9_check_segref(xd, segment_id, ALTREF_FRAME));
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mod_refprobs[0]) {
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_write(bc, (rf != INTRA_FRAME), mod_refprobs[0]);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Inter coded
|
|
|
|
if (rf != INTRA_FRAME) {
|
|
|
|
if (mod_refprobs[1]) {
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_write(bc, (rf != LAST_FRAME), mod_refprobs[1]);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-01-28 13:20:14 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (rf != LAST_FRAME) {
|
|
|
|
if (mod_refprobs[2]) {
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_write(bc, (rf != GOLDEN_FRAME), mod_refprobs[2]);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-01-28 13:20:14 +01:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2011-11-02 14:30:10 +01:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-01-31 13:45:30 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// if using the prediction mdoel we have nothing further to do because
|
|
|
|
// the reference frame is fully coded by the segment
|
2011-11-02 14:30:10 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-03 14:46:18 +01:00
|
|
|
// Update the probabilities used to encode reference frame data
|
2012-10-31 01:53:32 +01:00
|
|
|
static void update_ref_probs(VP9_COMP *const cpi) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2012-02-03 14:46:18 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
const int *const rfct = cpi->count_mb_ref_frame_usage;
|
|
|
|
const int rf_intra = rfct[INTRA_FRAME];
|
|
|
|
const int rf_inter = rfct[LAST_FRAME] +
|
|
|
|
rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
|
2012-02-03 14:46:18 +01:00
|
|
|
|
2012-10-19 01:27:30 +02:00
|
|
|
cm->prob_intra_coded = get_binary_prob(rf_intra, rf_inter);
|
|
|
|
cm->prob_last_coded = get_prob(rfct[LAST_FRAME], rf_inter);
|
|
|
|
cm->prob_gf_coded = get_binary_prob(rfct[GOLDEN_FRAME], rfct[ALTREF_FRAME]);
|
2012-02-03 14:46:18 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Compute a modified set of probabilities to use when prediction of the
|
|
|
|
// reference frame fails
|
2012-10-29 14:44:18 +01:00
|
|
|
vp9_compute_mod_refprobs(cm);
|
2012-02-03 14:46:18 +01:00
|
|
|
}
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
|
2013-04-26 20:57:17 +02:00
|
|
|
vp9_writer *bc, int mi_row, int mi_col) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *const pc = &cpi->common;
|
2012-07-26 22:42:07 +02:00
|
|
|
const nmv_context *nmvc = &pc->fc.nmvc;
|
2013-01-06 03:20:25 +01:00
|
|
|
MACROBLOCK *const x = &cpi->mb;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
MB_MODE_INFO *const mi = &m->mbmi;
|
|
|
|
const MV_REFERENCE_FRAME rf = mi->ref_frame;
|
|
|
|
const MB_PREDICTION_MODE mode = mi->mode;
|
|
|
|
const int segment_id = mi->segment_id;
|
|
|
|
int skip_coeff;
|
2012-04-07 01:38:34 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
xd->prev_mode_info_context = pc->prev_mi + (m - pc->mi);
|
|
|
|
x->partition_info = x->pi + (m - pc->mi);
|
2012-05-31 18:51:54 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 9;
|
2012-11-13 00:43:11 +01:00
|
|
|
#endif
|
2012-01-28 11:24:43 +01:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
if (cpi->mb.e_mbd.update_mb_segmentation_map) {
|
|
|
|
// Is temporal coding of the segment map enabled
|
|
|
|
if (pc->temporal_update) {
|
|
|
|
unsigned char prediction_flag = vp9_get_pred_flag(xd, PRED_SEG_ID);
|
|
|
|
vp9_prob pred_prob = vp9_get_pred_prob(pc, xd, PRED_SEG_ID);
|
2011-11-02 14:30:10 +01:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
// Code the segment id prediction flag for this mb
|
|
|
|
vp9_write(bc, prediction_flag, pred_prob);
|
2012-01-28 11:24:43 +01:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
// If the mb segment id wasn't predicted code explicitly
|
|
|
|
if (!prediction_flag)
|
2013-04-24 11:20:52 +02:00
|
|
|
write_mb_segid(bc, mi, &cpi->mb.e_mbd);
|
2013-01-06 03:20:25 +01:00
|
|
|
} else {
|
|
|
|
// Normal unpredicted coding
|
|
|
|
write_mb_segid(bc, mi, &cpi->mb.e_mbd);
|
|
|
|
}
|
|
|
|
}
|
2011-11-15 16:22:26 +01:00
|
|
|
|
2013-04-09 19:54:19 +02:00
|
|
|
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
|
2013-01-06 03:20:25 +01:00
|
|
|
skip_coeff = 1;
|
|
|
|
} else {
|
2013-02-20 19:16:24 +01:00
|
|
|
skip_coeff = m->mbmi.mb_skip_coeff;
|
2013-01-06 03:20:25 +01:00
|
|
|
vp9_write(bc, skip_coeff,
|
|
|
|
vp9_get_pred_prob(pc, xd, PRED_MBSKIP));
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
// Encode the reference frame.
|
2013-01-28 16:22:53 +01:00
|
|
|
encode_ref_frame(bc, pc, xd, segment_id, rf);
|
2011-11-02 14:30:10 +01:00
|
|
|
|
2013-05-27 09:14:53 +02:00
|
|
|
if (mi->sb_type >= BLOCK_SIZE_SB8X8 && pc->txfm_mode == TX_MODE_SELECT &&
|
|
|
|
!(rf != INTRA_FRAME &&
|
|
|
|
(skip_coeff || vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)))) {
|
|
|
|
TX_SIZE sz = mi->txfm_size;
|
|
|
|
// FIXME(rbultje) code ternary symbol once all experiments are merged
|
|
|
|
vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
|
|
|
|
if (mi->sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
|
|
|
|
vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]);
|
|
|
|
if (mi->sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
|
|
|
|
vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
if (rf == INTRA_FRAME) {
|
2012-02-27 19:22:38 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-01-06 03:20:25 +01:00
|
|
|
active_section = 6;
|
2012-02-27 19:22:38 +01:00
|
|
|
#endif
|
2011-11-02 14:30:10 +01:00
|
|
|
|
2013-05-30 18:58:53 +02:00
|
|
|
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
|
|
|
|
write_intra_mode(bc, mode, pc->fc.y_mode_prob);
|
|
|
|
} else {
|
2013-05-20 21:08:22 +02:00
|
|
|
int idx, idy;
|
|
|
|
int bw = 1 << b_width_log2(mi->sb_type);
|
|
|
|
int bh = 1 << b_height_log2(mi->sb_type);
|
|
|
|
for (idy = 0; idy < 2; idy += bh)
|
|
|
|
for (idx = 0; idx < 2; idx += bw)
|
2013-05-30 18:58:53 +02:00
|
|
|
write_intra_mode(bc, m->bmi[idy * 2 + idx].as_mode.first,
|
|
|
|
pc->fc.y_mode_prob);
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2013-05-30 18:58:53 +02:00
|
|
|
write_intra_mode(bc, mi->uv_mode,
|
|
|
|
pc->fc.uv_mode_prob[mode]);
|
2013-01-06 03:20:25 +01:00
|
|
|
} else {
|
|
|
|
vp9_prob mv_ref_p[VP9_MVREFS - 1];
|
2012-10-25 14:58:21 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
vp9_mv_ref_probs(&cpi->common, mv_ref_p, mi->mb_mode_context[rf]);
|
2011-12-06 21:03:42 +01:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-01-06 03:20:25 +01:00
|
|
|
active_section = 3;
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
2013-02-06 14:02:53 +01:00
|
|
|
// If segment skip is not enabled code the mode.
|
2013-01-28 16:22:53 +01:00
|
|
|
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
|
2013-05-11 02:06:37 +02:00
|
|
|
if (mi->sb_type >= BLOCK_SIZE_SB8X8)
|
|
|
|
write_sb_mv_ref(bc, mode, mv_ref_p);
|
2013-01-06 03:20:25 +01:00
|
|
|
vp9_accum_mv_refs(&cpi->common, mode, mi->mb_mode_context[rf]);
|
|
|
|
}
|
2011-11-03 13:50:09 +01:00
|
|
|
|
2013-04-24 21:14:58 +02:00
|
|
|
if (is_inter_mode(mode)) {
|
2013-01-06 03:20:25 +01:00
|
|
|
if (cpi->common.mcomp_filter_type == SWITCHABLE) {
|
|
|
|
write_token(bc, vp9_switchable_interp_tree,
|
|
|
|
vp9_get_pred_probs(&cpi->common, xd,
|
|
|
|
PRED_SWITCHABLE_INTERP),
|
|
|
|
vp9_switchable_interp_encodings +
|
|
|
|
vp9_switchable_interp_map[mi->interp_filter]);
|
|
|
|
} else {
|
|
|
|
assert(mi->interp_filter == cpi->common.mcomp_filter_type);
|
|
|
|
}
|
|
|
|
}
|
2012-11-08 20:03:00 +01:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
// does the feature use compound prediction or not
|
|
|
|
// (if not specified at the frame/segment level)
|
|
|
|
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
|
|
|
|
vp9_write(bc, mi->second_ref_frame > INTRA_FRAME,
|
|
|
|
vp9_get_pred_prob(pc, xd, PRED_COMP));
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
switch (mode) { /* new, split require MVs */
|
|
|
|
case NEWMV:
|
2012-02-27 19:22:38 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-01-06 03:20:25 +01:00
|
|
|
active_section = 5;
|
2012-02-27 19:22:38 +01:00
|
|
|
#endif
|
2013-05-15 21:37:03 +02:00
|
|
|
vp9_encode_mv(bc,
|
|
|
|
&mi->mv[0].as_mv, &mi->best_mv.as_mv,
|
|
|
|
nmvc, xd->allow_high_precision_mv);
|
|
|
|
|
|
|
|
if (mi->second_ref_frame > 0)
|
|
|
|
vp9_encode_mv(bc,
|
|
|
|
&mi->mv[1].as_mv, &mi->best_second_mv.as_mv,
|
|
|
|
nmvc, xd->allow_high_precision_mv);
|
2013-01-06 03:20:25 +01:00
|
|
|
break;
|
|
|
|
case SPLITMV: {
|
2013-05-16 07:28:36 +02:00
|
|
|
int j;
|
2013-05-26 23:40:49 +02:00
|
|
|
MB_PREDICTION_MODE blockmode;
|
2013-05-16 07:28:36 +02:00
|
|
|
int_mv blockmv;
|
|
|
|
int bwl = b_width_log2(mi->sb_type), bw = 1 << bwl;
|
|
|
|
int bhl = b_height_log2(mi->sb_type), bh = 1 << bhl;
|
|
|
|
int idx, idy;
|
|
|
|
for (idy = 0; idy < 2; idy += bh) {
|
|
|
|
for (idx = 0; idx < 2; idx += bw) {
|
|
|
|
j = idy * 2 + idx;
|
|
|
|
blockmode = cpi->mb.partition_info->bmi[j].mode;
|
|
|
|
blockmv = cpi->mb.partition_info->bmi[j].mv;
|
2013-05-26 23:40:49 +02:00
|
|
|
write_sb_mv_ref(bc, blockmode, mv_ref_p);
|
|
|
|
vp9_accum_mv_refs(&cpi->common, blockmode, mi->mb_mode_context[rf]);
|
|
|
|
if (blockmode == NEWMV) {
|
2012-02-27 19:22:38 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-05-16 07:28:36 +02:00
|
|
|
active_section = 11;
|
2013-01-06 03:20:25 +01:00
|
|
|
#endif
|
2013-05-16 07:28:36 +02:00
|
|
|
vp9_encode_mv(bc, &blockmv.as_mv, &mi->best_mv.as_mv,
|
2013-05-15 21:37:03 +02:00
|
|
|
nmvc, xd->allow_high_precision_mv);
|
2013-05-16 07:28:36 +02:00
|
|
|
|
|
|
|
if (mi->second_ref_frame > 0)
|
|
|
|
vp9_encode_mv(bc,
|
|
|
|
&cpi->mb.partition_info->bmi[j].second_mv.as_mv,
|
|
|
|
&mi->best_second_mv.as_mv,
|
|
|
|
nmvc, xd->allow_high_precision_mv);
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2013-05-16 07:28:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef MODE_STATS
|
|
|
|
++count_mb_seg[mi->partitioning];
|
|
|
|
#endif
|
2013-01-06 03:20:25 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-02-02 18:04:40 +01:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
static void write_mb_modes_kf(const VP9_COMP *cpi,
|
2013-02-20 19:16:24 +01:00
|
|
|
MODE_INFO *m,
|
2013-04-26 20:57:17 +02:00
|
|
|
vp9_writer *bc, int mi_row, int mi_col) {
|
2013-01-06 03:20:25 +01:00
|
|
|
const VP9_COMMON *const c = &cpi->common;
|
|
|
|
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
|
|
|
const int ym = m->mbmi.mode;
|
2013-05-21 01:04:28 +02:00
|
|
|
const int mis = c->mode_info_stride;
|
2013-01-06 03:20:25 +01:00
|
|
|
const int segment_id = m->mbmi.segment_id;
|
|
|
|
int skip_coeff;
|
2012-10-16 22:52:39 +02:00
|
|
|
|
2013-04-27 01:46:17 +02:00
|
|
|
if (xd->update_mb_segmentation_map)
|
2012-10-16 22:52:39 +02:00
|
|
|
write_mb_segid(bc, &m->mbmi, xd);
|
|
|
|
|
2013-04-09 19:54:19 +02:00
|
|
|
if (vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
|
2013-01-06 03:20:25 +01:00
|
|
|
skip_coeff = 1;
|
|
|
|
} else {
|
2013-02-20 19:16:24 +01:00
|
|
|
skip_coeff = m->mbmi.mb_skip_coeff;
|
2013-04-27 01:46:17 +02:00
|
|
|
vp9_write(bc, skip_coeff, vp9_get_pred_prob(c, xd, PRED_MBSKIP));
|
2012-10-16 22:52:39 +02:00
|
|
|
}
|
|
|
|
|
2013-05-27 09:14:53 +02:00
|
|
|
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->txfm_mode == TX_MODE_SELECT) {
|
|
|
|
TX_SIZE sz = m->mbmi.txfm_size;
|
|
|
|
// FIXME(rbultje) code ternary symbol once all experiments are merged
|
|
|
|
vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
|
|
|
|
if (m->mbmi.sb_type >= BLOCK_SIZE_MB16X16 && sz != TX_4X4) {
|
|
|
|
vp9_write(bc, sz != TX_8X8, c->prob_tx[1]);
|
|
|
|
if (m->mbmi.sb_type >= BLOCK_SIZE_SB32X32 && sz != TX_8X8)
|
|
|
|
vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-21 01:04:28 +02:00
|
|
|
if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
|
2013-05-25 01:13:54 +02:00
|
|
|
const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis);
|
|
|
|
const MB_PREDICTION_MODE L = xd->left_available ?
|
2013-05-21 01:04:28 +02:00
|
|
|
left_block_mode(m, 0) : DC_PRED;
|
2013-05-30 18:58:53 +02:00
|
|
|
write_intra_mode(bc, ym, c->kf_y_mode_prob[A][L]);
|
|
|
|
} else {
|
2013-05-20 21:08:22 +02:00
|
|
|
int idx, idy;
|
|
|
|
int bw = 1 << b_width_log2(m->mbmi.sb_type);
|
|
|
|
int bh = 1 << b_height_log2(m->mbmi.sb_type);
|
2013-05-21 01:04:28 +02:00
|
|
|
for (idy = 0; idy < 2; idy += bh) {
|
|
|
|
for (idx = 0; idx < 2; idx += bw) {
|
|
|
|
int i = idy * 2 + idx;
|
2013-05-25 01:13:54 +02:00
|
|
|
const MB_PREDICTION_MODE A = above_block_mode(m, i, mis);
|
|
|
|
const MB_PREDICTION_MODE L = (xd->left_available || idx) ?
|
2013-05-21 01:04:28 +02:00
|
|
|
left_block_mode(m, i) : DC_PRED;
|
2013-05-08 19:04:14 +02:00
|
|
|
const int bm = m->bmi[i].as_mode.first;
|
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
++intra_mode_stats[A][L][bm];
|
|
|
|
#endif
|
2013-05-30 18:58:53 +02:00
|
|
|
write_intra_mode(bc, bm, c->kf_y_mode_prob[A][L]);
|
2013-05-21 01:04:28 +02:00
|
|
|
}
|
|
|
|
}
|
2012-10-16 22:52:39 +02:00
|
|
|
}
|
|
|
|
|
2013-05-30 18:58:53 +02:00
|
|
|
write_intra_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
|
2012-10-16 22:52:39 +02:00
|
|
|
}
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
|
|
|
|
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
|
2013-04-26 20:57:17 +02:00
|
|
|
int mi_row, int mi_col) {
|
2013-02-20 19:16:24 +01:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-01-06 03:20:25 +01:00
|
|
|
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
|
|
|
|
2013-05-16 07:28:36 +02:00
|
|
|
if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8)
|
|
|
|
if (xd->ab_index > 0)
|
|
|
|
return;
|
2013-01-06 03:20:25 +01:00
|
|
|
xd->mode_info_context = m;
|
2013-04-26 20:57:17 +02:00
|
|
|
set_mi_row_col(&cpi->common, xd, mi_row,
|
|
|
|
1 << mi_height_log2(m->mbmi.sb_type),
|
|
|
|
mi_col, 1 << mi_width_log2(m->mbmi.sb_type));
|
2013-02-20 19:16:24 +01:00
|
|
|
if (cm->frame_type == KEY_FRAME) {
|
2013-04-26 20:57:17 +02:00
|
|
|
write_mb_modes_kf(cpi, m, bc, mi_row, mi_col);
|
2013-01-06 03:20:25 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 8;
|
|
|
|
#endif
|
|
|
|
} else {
|
2013-04-26 20:57:17 +02:00
|
|
|
pack_inter_mode_mvs(cpi, m, bc, mi_row, mi_col);
|
2013-01-06 03:20:25 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(*tok < tok_end);
|
|
|
|
pack_mb_tokens(bc, tok, tok_end);
|
|
|
|
}
|
|
|
|
|
2013-04-16 09:18:02 +02:00
|
|
|
static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc,
|
|
|
|
TOKENEXTRA **tok, TOKENEXTRA *tok_end,
|
2013-04-26 20:57:17 +02:00
|
|
|
int mi_row, int mi_col,
|
2013-04-16 09:18:02 +02:00
|
|
|
BLOCK_SIZE_TYPE bsize) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-04-23 19:12:18 +02:00
|
|
|
MACROBLOCKD *xd = &cpi->mb.e_mbd;
|
2013-04-16 09:18:02 +02:00
|
|
|
const int mis = cm->mode_info_stride;
|
|
|
|
int bwl, bhl;
|
2013-05-16 22:30:00 +02:00
|
|
|
int bsl = b_width_log2(bsize);
|
|
|
|
int bs = (1 << bsl) / 4; // mode_info step for subsize
|
2013-04-16 09:18:02 +02:00
|
|
|
int n;
|
|
|
|
PARTITION_TYPE partition;
|
|
|
|
BLOCK_SIZE_TYPE subsize;
|
|
|
|
|
2013-04-26 20:57:17 +02:00
|
|
|
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
|
2013-04-16 09:18:02 +02:00
|
|
|
return;
|
|
|
|
|
2013-05-16 22:30:00 +02:00
|
|
|
bwl = b_width_log2(m->mbmi.sb_type);
|
|
|
|
bhl = b_height_log2(m->mbmi.sb_type);
|
2013-04-16 09:18:02 +02:00
|
|
|
|
|
|
|
// parse the partition type
|
|
|
|
if ((bwl == bsl) && (bhl == bsl))
|
|
|
|
partition = PARTITION_NONE;
|
|
|
|
else if ((bwl == bsl) && (bhl < bsl))
|
|
|
|
partition = PARTITION_HORZ;
|
|
|
|
else if ((bwl < bsl) && (bhl == bsl))
|
|
|
|
partition = PARTITION_VERT;
|
|
|
|
else if ((bwl < bsl) && (bhl < bsl))
|
|
|
|
partition = PARTITION_SPLIT;
|
|
|
|
else
|
|
|
|
assert(0);
|
|
|
|
|
2013-05-11 02:06:37 +02:00
|
|
|
if (bsize < BLOCK_SIZE_SB8X8)
|
2013-05-16 07:28:36 +02:00
|
|
|
if (xd->ab_index > 0)
|
2013-05-11 02:06:37 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (bsize >= BLOCK_SIZE_SB8X8) {
|
2013-04-23 19:12:18 +02:00
|
|
|
int pl;
|
2013-05-08 00:36:30 +02:00
|
|
|
xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
|
|
|
|
xd->above_seg_context = cm->above_seg_context + mi_col;
|
2013-04-23 19:12:18 +02:00
|
|
|
pl = partition_plane_context(xd, bsize);
|
2013-04-16 09:18:02 +02:00
|
|
|
// encode the partition information
|
2013-04-23 19:12:18 +02:00
|
|
|
write_token(bc, vp9_partition_tree, cm->fc.partition_prob[pl],
|
2013-04-16 09:18:02 +02:00
|
|
|
vp9_partition_encodings + partition);
|
2013-04-23 19:12:18 +02:00
|
|
|
}
|
2013-04-16 09:18:02 +02:00
|
|
|
|
2013-05-01 18:43:59 +02:00
|
|
|
subsize = get_subsize(bsize, partition);
|
2013-05-16 07:28:36 +02:00
|
|
|
*(get_sb_index(xd, subsize)) = 0;
|
2013-05-01 18:43:59 +02:00
|
|
|
|
2013-04-16 09:18:02 +02:00
|
|
|
switch (partition) {
|
|
|
|
case PARTITION_NONE:
|
2013-04-26 20:57:17 +02:00
|
|
|
write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
|
2013-04-16 09:18:02 +02:00
|
|
|
break;
|
|
|
|
case PARTITION_HORZ:
|
2013-04-26 20:57:17 +02:00
|
|
|
write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
|
2013-05-16 07:28:36 +02:00
|
|
|
*(get_sb_index(xd, subsize)) = 1;
|
2013-05-16 22:30:00 +02:00
|
|
|
if ((mi_row + bs) < cm->mi_rows)
|
|
|
|
write_modes_b(cpi, m + bs * mis, bc, tok, tok_end, mi_row + bs, mi_col);
|
2013-04-16 09:18:02 +02:00
|
|
|
break;
|
|
|
|
case PARTITION_VERT:
|
2013-04-26 20:57:17 +02:00
|
|
|
write_modes_b(cpi, m, bc, tok, tok_end, mi_row, mi_col);
|
2013-05-16 07:28:36 +02:00
|
|
|
*(get_sb_index(xd, subsize)) = 1;
|
2013-05-16 22:30:00 +02:00
|
|
|
if ((mi_col + bs) < cm->mi_cols)
|
|
|
|
write_modes_b(cpi, m + bs, bc, tok, tok_end, mi_row, mi_col + bs);
|
2013-04-16 09:18:02 +02:00
|
|
|
break;
|
|
|
|
case PARTITION_SPLIT:
|
|
|
|
for (n = 0; n < 4; n++) {
|
|
|
|
int j = n >> 1, i = n & 0x01;
|
2013-05-11 22:24:03 +02:00
|
|
|
*(get_sb_index(xd, subsize)) = n;
|
2013-04-16 09:18:02 +02:00
|
|
|
write_modes_sb(cpi, m + j * bs * mis + i * bs, bc, tok, tok_end,
|
2013-04-26 20:57:17 +02:00
|
|
|
mi_row + j * bs, mi_col + i * bs, subsize);
|
2013-04-16 09:18:02 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
2013-04-23 19:12:18 +02:00
|
|
|
|
|
|
|
// update partition context
|
2013-05-11 02:06:37 +02:00
|
|
|
if (bsize >= BLOCK_SIZE_SB8X8 &&
|
|
|
|
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
|
2013-05-12 00:19:56 +02:00
|
|
|
set_partition_seg_context(cm, xd, mi_row, mi_col);
|
|
|
|
update_partition_context(xd, subsize, bsize);
|
|
|
|
}
|
2013-04-16 09:18:02 +02:00
|
|
|
}
|
|
|
|
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
static void write_modes(VP9_COMP *cpi, vp9_writer* const bc,
|
2013-02-08 20:33:11 +01:00
|
|
|
TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
|
2012-10-31 01:53:32 +01:00
|
|
|
VP9_COMMON *const c = &cpi->common;
|
2012-07-14 00:21:29 +02:00
|
|
|
const int mis = c->mode_info_stride;
|
2013-02-08 20:33:11 +01:00
|
|
|
MODE_INFO *m, *m_ptr = c->mi;
|
2013-04-26 20:57:17 +02:00
|
|
|
int mi_row, mi_col;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-04-26 20:57:17 +02:00
|
|
|
m_ptr += c->cur_tile_mi_col_start + c->cur_tile_mi_row_start * mis;
|
2013-04-23 19:12:18 +02:00
|
|
|
vpx_memset(c->above_seg_context, 0, sizeof(PARTITION_CONTEXT) *
|
2013-05-08 00:36:30 +02:00
|
|
|
mi_cols_aligned_to_sb(c));
|
2013-04-23 19:12:18 +02:00
|
|
|
|
2013-04-26 20:57:17 +02:00
|
|
|
for (mi_row = c->cur_tile_mi_row_start;
|
|
|
|
mi_row < c->cur_tile_mi_row_end;
|
2013-05-05 07:09:43 +02:00
|
|
|
mi_row += 8, m_ptr += 8 * mis) {
|
2013-01-06 03:20:25 +01:00
|
|
|
m = m_ptr;
|
2013-04-23 19:12:18 +02:00
|
|
|
vpx_memset(c->left_seg_context, 0, sizeof(c->left_seg_context));
|
2013-04-26 20:57:17 +02:00
|
|
|
for (mi_col = c->cur_tile_mi_col_start;
|
|
|
|
mi_col < c->cur_tile_mi_col_end;
|
2013-05-24 20:11:06 +02:00
|
|
|
mi_col += 64 / MI_SIZE, m += 64 / MI_SIZE)
|
2013-04-26 20:57:17 +02:00
|
|
|
write_modes_sb(cpi, m, bc, tok, tok_end, mi_row, mi_col,
|
2013-04-16 09:18:02 +02:00
|
|
|
BLOCK_SIZE_SB64X64);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-02-02 18:04:40 +01:00
|
|
|
|
2011-06-10 11:11:15 +02:00
|
|
|
/* This function is used for debugging probability trees. */
|
2013-02-19 22:36:38 +01:00
|
|
|
static void print_prob_tree(vp9_coeff_probs *coef_probs, int block_types) {
|
2012-07-14 00:21:29 +02:00
|
|
|
/* print coef probability tree */
|
2013-02-19 22:36:38 +01:00
|
|
|
int i, j, k, l, m;
|
2012-07-14 00:21:29 +02:00
|
|
|
FILE *f = fopen("enc_tree_probs.txt", "a");
|
|
|
|
fprintf(f, "{\n");
|
2013-02-19 22:36:38 +01:00
|
|
|
for (i = 0; i < block_types; i++) {
|
2012-07-14 00:21:29 +02:00
|
|
|
fprintf(f, " {\n");
|
2013-02-19 22:36:38 +01:00
|
|
|
for (j = 0; j < REF_TYPES; ++j) {
|
|
|
|
fprintf(f, " {\n");
|
|
|
|
for (k = 0; k < COEF_BANDS; k++) {
|
|
|
|
fprintf(f, " {\n");
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
|
|
|
|
fprintf(f, " {");
|
|
|
|
for (m = 0; m < ENTROPY_NODES; m++) {
|
|
|
|
fprintf(f, "%3u, ",
|
|
|
|
(unsigned int)(coef_probs[i][j][k][l][m]));
|
|
|
|
}
|
2011-06-10 11:11:15 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
fprintf(f, " }\n");
|
|
|
|
}
|
|
|
|
fprintf(f, " }\n");
|
2011-06-10 11:11:15 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
fprintf(f, " }\n");
|
|
|
|
}
|
|
|
|
fprintf(f, "}\n");
|
|
|
|
fclose(f);
|
2011-06-10 11:11:15 +02:00
|
|
|
}
|
|
|
|
|
2013-05-29 01:25:43 +02:00
|
|
|
static void build_tree_distribution(vp9_coeff_probs_model *coef_probs,
|
2012-12-08 01:09:59 +01:00
|
|
|
vp9_coeff_count *coef_counts,
|
2013-03-27 00:46:09 +01:00
|
|
|
unsigned int (*eob_branch_ct)[REF_TYPES]
|
|
|
|
[COEF_BANDS]
|
|
|
|
[PREV_COEF_CONTEXTS],
|
2012-12-08 01:09:59 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
VP9_COMP *cpi,
|
|
|
|
vp9_coeff_accum *context_counters,
|
|
|
|
#endif
|
2013-05-29 01:25:43 +02:00
|
|
|
vp9_coeff_stats_model *coef_branch_ct,
|
2012-12-08 01:09:59 +01:00
|
|
|
int block_types) {
|
2013-02-19 22:36:38 +01:00
|
|
|
int i, j, k, l;
|
2012-06-15 03:14:43 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-08-03 02:03:14 +02:00
|
|
|
int t = 0;
|
2012-06-15 03:14:43 +02:00
|
|
|
#endif
|
2013-05-29 01:25:43 +02:00
|
|
|
unsigned int model_counts[UNCONSTRAINED_NODES + 1];
|
2012-12-08 01:09:59 +01:00
|
|
|
|
|
|
|
for (i = 0; i < block_types; ++i) {
|
2013-02-19 22:36:38 +01:00
|
|
|
for (j = 0; j < REF_TYPES; ++j) {
|
|
|
|
for (k = 0; k < COEF_BANDS; ++k) {
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
|
|
|
|
if (l >= 3 && k == 0)
|
|
|
|
continue;
|
2013-05-29 01:25:43 +02:00
|
|
|
vp9_full_to_model_count(model_counts, coef_counts[i][j][k][l]);
|
|
|
|
vp9_tree_probs_from_distribution(vp9_coefmodel_tree,
|
2013-02-19 22:36:38 +01:00
|
|
|
coef_probs[i][j][k][l],
|
|
|
|
coef_branch_ct[i][j][k][l],
|
2013-05-29 01:25:43 +02:00
|
|
|
model_counts, 0);
|
2013-05-08 19:04:14 +02:00
|
|
|
#if CONFIG_BALANCED_COEFTREE
|
|
|
|
coef_branch_ct[i][j][k][l][1][1] = eob_branch_ct[i][j][k][l] -
|
|
|
|
coef_branch_ct[i][j][k][l][1][0];
|
|
|
|
coef_probs[i][j][k][l][1] =
|
|
|
|
get_binary_prob(coef_branch_ct[i][j][k][l][1][0],
|
|
|
|
coef_branch_ct[i][j][k][l][1][1]);
|
|
|
|
#else
|
2013-03-27 00:46:09 +01:00
|
|
|
coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
|
|
|
|
coef_branch_ct[i][j][k][l][0][0];
|
|
|
|
coef_probs[i][j][k][l][0] =
|
|
|
|
get_binary_prob(coef_branch_ct[i][j][k][l][0][0],
|
|
|
|
coef_branch_ct[i][j][k][l][0][1]);
|
2013-05-08 19:04:14 +02:00
|
|
|
#endif
|
2012-04-12 18:24:03 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-03-27 00:46:09 +01:00
|
|
|
if (!cpi->dummy_packing) {
|
|
|
|
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
|
|
|
|
context_counters[i][j][k][l][t] += coef_counts[i][j][k][l][t];
|
|
|
|
context_counters[i][j][k][l][MAX_ENTROPY_TOKENS] +=
|
|
|
|
eob_branch_ct[i][j][k][l];
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
#endif
|
2013-02-19 22:36:38 +01:00
|
|
|
}
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-12-08 01:09:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void build_coeff_contexts(VP9_COMP *cpi) {
|
|
|
|
build_tree_distribution(cpi->frame_coef_probs_4x4,
|
|
|
|
cpi->coef_counts_4x4,
|
2013-03-27 00:46:09 +01:00
|
|
|
cpi->common.fc.eob_branch_counts[TX_4X4],
|
2012-09-10 07:42:35 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi, context_counters_4x4,
|
2012-09-10 07:42:35 +02:00
|
|
|
#endif
|
2013-02-19 22:36:38 +01:00
|
|
|
cpi->frame_branch_ct_4x4, BLOCK_TYPES);
|
2012-12-08 01:09:59 +01:00
|
|
|
build_tree_distribution(cpi->frame_coef_probs_8x8,
|
|
|
|
cpi->coef_counts_8x8,
|
2013-03-27 00:46:09 +01:00
|
|
|
cpi->common.fc.eob_branch_counts[TX_8X8],
|
2012-09-10 07:42:35 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi, context_counters_8x8,
|
2012-09-10 07:42:35 +02:00
|
|
|
#endif
|
2013-02-19 22:36:38 +01:00
|
|
|
cpi->frame_branch_ct_8x8, BLOCK_TYPES);
|
2012-12-08 01:09:59 +01:00
|
|
|
build_tree_distribution(cpi->frame_coef_probs_16x16,
|
|
|
|
cpi->coef_counts_16x16,
|
2013-03-27 00:46:09 +01:00
|
|
|
cpi->common.fc.eob_branch_counts[TX_16X16],
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi, context_counters_16x16,
|
2012-08-03 02:03:14 +02:00
|
|
|
#endif
|
2013-02-19 22:36:38 +01:00
|
|
|
cpi->frame_branch_ct_16x16, BLOCK_TYPES);
|
2012-12-08 01:09:59 +01:00
|
|
|
build_tree_distribution(cpi->frame_coef_probs_32x32,
|
|
|
|
cpi->coef_counts_32x32,
|
2013-03-27 00:46:09 +01:00
|
|
|
cpi->common.fc.eob_branch_counts[TX_32X32],
|
2012-09-10 07:42:35 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi, context_counters_32x32,
|
2012-09-10 07:42:35 +02:00
|
|
|
#endif
|
2013-03-04 23:12:17 +01:00
|
|
|
cpi->frame_branch_ct_32x32, BLOCK_TYPES);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2013-05-17 15:40:25 +02:00
|
|
|
static void update_coef_probs_common(
|
|
|
|
vp9_writer* const bc,
|
|
|
|
VP9_COMP *cpi,
|
2013-03-26 16:34:56 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-05-17 15:40:25 +02:00
|
|
|
vp9_coeff_stats *tree_update_hist,
|
|
|
|
#endif
|
2013-05-29 01:25:43 +02:00
|
|
|
vp9_coeff_probs_model *new_frame_coef_probs,
|
2013-05-17 15:40:25 +02:00
|
|
|
vp9_coeff_probs_model *old_frame_coef_probs,
|
2013-05-29 01:25:43 +02:00
|
|
|
vp9_coeff_stats_model *frame_branch_ct,
|
2013-05-17 15:40:25 +02:00
|
|
|
TX_SIZE tx_size) {
|
2013-02-19 22:36:38 +01:00
|
|
|
int i, j, k, l, t;
|
2012-07-14 00:21:29 +02:00
|
|
|
int update[2] = {0, 0};
|
|
|
|
int savings;
|
2013-05-17 15:40:25 +02:00
|
|
|
|
|
|
|
const int entropy_nodes_update = UNCONSTRAINED_NODES;
|
2012-10-31 22:40:53 +01:00
|
|
|
// vp9_prob bestupd = find_coef_update_prob(cpi);
|
2012-04-12 18:24:03 +02:00
|
|
|
|
2013-03-26 23:23:30 +01:00
|
|
|
const int tstart = 0;
|
2012-07-14 00:21:29 +02:00
|
|
|
/* dry run to see if there is any udpate at all needed */
|
|
|
|
savings = 0;
|
2013-03-26 23:23:30 +01:00
|
|
|
for (i = 0; i < BLOCK_TYPES; ++i) {
|
2013-02-19 22:36:38 +01:00
|
|
|
for (j = 0; j < REF_TYPES; ++j) {
|
|
|
|
for (k = 0; k < COEF_BANDS; ++k) {
|
2013-03-13 19:03:17 +01:00
|
|
|
// int prev_coef_savings[ENTROPY_NODES] = {0};
|
2013-02-19 22:36:38 +01:00
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
|
2013-03-26 23:23:30 +01:00
|
|
|
for (t = tstart; t < entropy_nodes_update; ++t) {
|
2013-02-19 22:36:38 +01:00
|
|
|
vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
|
|
|
|
const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t];
|
2013-03-13 19:03:17 +01:00
|
|
|
const vp9_prob upd = vp9_coef_update_prob[t];
|
|
|
|
int s; // = prev_coef_savings[t];
|
2013-02-19 22:36:38 +01:00
|
|
|
int u = 0;
|
|
|
|
|
|
|
|
if (l >= 3 && k == 0)
|
|
|
|
continue;
|
2013-05-17 15:40:25 +02:00
|
|
|
if (t == PIVOT_NODE)
|
2013-03-13 19:03:17 +01:00
|
|
|
s = prob_diff_update_savings_search_model(
|
|
|
|
frame_branch_ct[i][j][k][l][0],
|
2013-05-17 15:40:25 +02:00
|
|
|
old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
|
2013-03-13 19:03:17 +01:00
|
|
|
else
|
|
|
|
s = prob_diff_update_savings_search(
|
|
|
|
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
|
2013-02-19 22:36:38 +01:00
|
|
|
if (s > 0 && newp != oldp)
|
|
|
|
u = 1;
|
|
|
|
if (u)
|
|
|
|
savings += s - (int)(vp9_cost_zero(upd));
|
|
|
|
else
|
|
|
|
savings -= (int)(vp9_cost_zero(upd));
|
|
|
|
update[u]++;
|
|
|
|
}
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
// printf("Update %d %d, savings %d\n", update[0], update[1], savings);
|
|
|
|
/* Is coef updated at all */
|
2012-10-18 01:47:38 +02:00
|
|
|
if (update[1] == 0 || savings < 0) {
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_write_bit(bc, 0);
|
2013-03-13 19:03:17 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
vp9_write_bit(bc, 1);
|
2013-03-26 23:23:30 +01:00
|
|
|
for (i = 0; i < BLOCK_TYPES; ++i) {
|
2013-03-13 19:03:17 +01:00
|
|
|
for (j = 0; j < REF_TYPES; ++j) {
|
|
|
|
for (k = 0; k < COEF_BANDS; ++k) {
|
|
|
|
// int prev_coef_savings[ENTROPY_NODES] = {0};
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
|
|
|
|
// calc probs and branch cts for this frame only
|
2013-03-26 23:23:30 +01:00
|
|
|
for (t = tstart; t < entropy_nodes_update; ++t) {
|
2013-03-13 19:03:17 +01:00
|
|
|
vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
|
|
|
|
vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
|
|
|
|
const vp9_prob upd = vp9_coef_update_prob[t];
|
|
|
|
int s; // = prev_coef_savings[t];
|
|
|
|
int u = 0;
|
|
|
|
if (l >= 3 && k == 0)
|
|
|
|
continue;
|
2012-02-26 02:15:47 +01:00
|
|
|
|
2013-05-17 15:40:25 +02:00
|
|
|
if (t == PIVOT_NODE)
|
2013-03-13 19:03:17 +01:00
|
|
|
s = prob_diff_update_savings_search_model(
|
|
|
|
frame_branch_ct[i][j][k][l][0],
|
2013-05-17 15:40:25 +02:00
|
|
|
old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
|
2013-03-13 19:03:17 +01:00
|
|
|
else
|
2013-02-19 22:36:38 +01:00
|
|
|
s = prob_diff_update_savings_search(
|
2013-03-13 19:03:17 +01:00
|
|
|
frame_branch_ct[i][j][k][l][t],
|
|
|
|
*oldp, &newp, upd);
|
|
|
|
if (s > 0 && newp != *oldp)
|
|
|
|
u = 1;
|
|
|
|
vp9_write(bc, u, upd);
|
2012-02-07 00:10:13 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-03-13 19:03:17 +01:00
|
|
|
if (!cpi->dummy_packing)
|
|
|
|
++tree_update_hist[i][j][k][l][t][u];
|
|
|
|
#endif
|
|
|
|
if (u) {
|
|
|
|
/* send/use new probability */
|
|
|
|
write_prob_diff_update(bc, newp, *oldp);
|
|
|
|
*oldp = newp;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-10-20 00:35:36 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
|
|
|
|
vp9_clear_system_state();
|
2012-09-10 07:42:35 +02:00
|
|
|
|
2012-10-20 00:35:36 +02:00
|
|
|
// Build the cofficient contexts based on counts collected in encode loop
|
|
|
|
build_coeff_contexts(cpi);
|
2012-09-10 07:42:35 +02:00
|
|
|
|
2012-10-20 00:35:36 +02:00
|
|
|
update_coef_probs_common(bc,
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi,
|
2013-03-26 16:34:56 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
tree_update_hist_4x4,
|
|
|
|
#endif
|
|
|
|
cpi->frame_coef_probs_4x4,
|
|
|
|
cpi->common.fc.coef_probs_4x4,
|
|
|
|
cpi->frame_branch_ct_4x4,
|
2013-03-26 23:23:30 +01:00
|
|
|
TX_4X4);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2012-08-03 02:03:14 +02:00
|
|
|
/* do not do this if not even allowed */
|
2012-10-09 18:18:21 +02:00
|
|
|
if (cpi->common.txfm_mode != ONLY_4X4) {
|
2012-10-20 00:35:36 +02:00
|
|
|
update_coef_probs_common(bc,
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi,
|
2013-03-26 16:34:56 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
tree_update_hist_8x8,
|
|
|
|
#endif
|
2012-10-20 00:35:36 +02:00
|
|
|
cpi->frame_coef_probs_8x8,
|
|
|
|
cpi->common.fc.coef_probs_8x8,
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi->frame_branch_ct_8x8,
|
2013-03-26 23:23:30 +01:00
|
|
|
TX_8X8);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-08-03 02:03:14 +02:00
|
|
|
|
2012-10-09 18:18:21 +02:00
|
|
|
if (cpi->common.txfm_mode > ALLOW_8X8) {
|
2012-10-20 00:35:36 +02:00
|
|
|
update_coef_probs_common(bc,
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi,
|
2013-03-26 16:34:56 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
tree_update_hist_16x16,
|
|
|
|
#endif
|
2012-10-20 00:35:36 +02:00
|
|
|
cpi->frame_coef_probs_16x16,
|
|
|
|
cpi->common.fc.coef_probs_16x16,
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi->frame_branch_ct_16x16,
|
2013-03-26 23:23:30 +01:00
|
|
|
TX_16X16);
|
2012-10-09 18:18:21 +02:00
|
|
|
}
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
|
|
|
if (cpi->common.txfm_mode > ALLOW_16X16) {
|
|
|
|
update_coef_probs_common(bc,
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi,
|
2013-03-26 16:34:56 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
tree_update_hist_32x32,
|
|
|
|
#endif
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
cpi->frame_coef_probs_32x32,
|
|
|
|
cpi->common.fc.coef_probs_32x32,
|
2012-12-08 01:09:59 +01:00
|
|
|
cpi->frame_branch_ct_32x32,
|
2013-03-26 23:23:30 +01:00
|
|
|
TX_32X32);
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-04-12 18:24:03 +02:00
|
|
|
|
2012-10-31 01:53:32 +01:00
|
|
|
static void segment_reference_frames(VP9_COMP *cpi) {
|
|
|
|
VP9_COMMON *oci = &cpi->common;
|
2012-07-14 00:21:29 +02:00
|
|
|
MODE_INFO *mi = oci->mi;
|
|
|
|
int ref[MAX_MB_SEGMENTS] = {0};
|
|
|
|
int i, j;
|
|
|
|
int mb_index = 0;
|
2012-10-17 23:51:27 +02:00
|
|
|
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
2012-07-14 00:21:29 +02:00
|
|
|
|
|
|
|
for (i = 0; i < oci->mb_rows; i++) {
|
2013-04-09 04:07:29 +02:00
|
|
|
for (j = 0; j < oci->mb_cols; j++, mb_index++)
|
2012-07-14 00:21:29 +02:00
|
|
|
ref[mi[mb_index].mbmi.segment_id] |= (1 << mi[mb_index].mbmi.ref_frame);
|
|
|
|
mb_index++;
|
|
|
|
}
|
|
|
|
for (i = 0; i < MAX_MB_SEGMENTS; i++) {
|
2012-10-30 06:15:27 +01:00
|
|
|
vp9_enable_segfeature(xd, i, SEG_LVL_REF_FRAME);
|
|
|
|
vp9_set_segdata(xd, i, SEG_LVL_REF_FRAME, ref[i]);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-01-26 19:04:34 +01:00
|
|
|
}
|
2012-03-19 19:02:04 +01:00
|
|
|
|
2013-05-14 03:05:20 +02:00
|
|
|
static void encode_loopfilter(VP9_COMMON *pc, MACROBLOCKD *xd, vp9_writer *w) {
|
2013-05-02 23:23:56 +02:00
|
|
|
int i;
|
|
|
|
|
2013-05-14 03:05:20 +02:00
|
|
|
// Encode the loop filter level and type
|
|
|
|
vp9_write_literal(w, pc->filter_level, 6);
|
|
|
|
vp9_write_literal(w, pc->sharpness_level, 3);
|
|
|
|
|
2013-05-02 23:23:56 +02:00
|
|
|
// Write out loop filter deltas applied at the MB level based on mode or
|
|
|
|
// ref frame (if they are enabled).
|
|
|
|
vp9_write_bit(w, xd->mode_ref_lf_delta_enabled);
|
|
|
|
|
|
|
|
if (xd->mode_ref_lf_delta_enabled) {
|
|
|
|
// Do the deltas need to be updated
|
|
|
|
vp9_write_bit(w, xd->mode_ref_lf_delta_update);
|
|
|
|
if (xd->mode_ref_lf_delta_update) {
|
|
|
|
// Send update
|
|
|
|
for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
|
|
|
|
const int delta = xd->ref_lf_deltas[i];
|
|
|
|
|
|
|
|
// Frame level data
|
|
|
|
if (delta != xd->last_ref_lf_deltas[i]) {
|
|
|
|
xd->last_ref_lf_deltas[i] = delta;
|
|
|
|
vp9_write_bit(w, 1);
|
|
|
|
|
|
|
|
if (delta > 0) {
|
|
|
|
vp9_write_literal(w, delta & 0x3F, 6);
|
|
|
|
vp9_write_bit(w, 0); // sign
|
|
|
|
} else {
|
|
|
|
assert(delta < 0);
|
|
|
|
vp9_write_literal(w, (-delta) & 0x3F, 6);
|
|
|
|
vp9_write_bit(w, 1); // sign
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vp9_write_bit(w, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send update
|
|
|
|
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
|
|
|
|
const int delta = xd->mode_lf_deltas[i];
|
|
|
|
if (delta != xd->last_mode_lf_deltas[i]) {
|
|
|
|
xd->last_mode_lf_deltas[i] = delta;
|
|
|
|
vp9_write_bit(w, 1);
|
|
|
|
|
|
|
|
if (delta > 0) {
|
|
|
|
vp9_write_literal(w, delta & 0x3F, 6);
|
|
|
|
vp9_write_bit(w, 0); // sign
|
|
|
|
} else {
|
|
|
|
assert(delta < 0);
|
|
|
|
vp9_write_literal(w, (-delta) & 0x3F, 6);
|
|
|
|
vp9_write_bit(w, 1); // sign
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vp9_write_bit(w, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-14 03:05:20 +02:00
|
|
|
static void put_delta_q(vp9_writer *bc, int delta_q) {
|
|
|
|
if (delta_q != 0) {
|
|
|
|
vp9_write_bit(bc, 1);
|
|
|
|
vp9_write_literal(bc, abs(delta_q), 4);
|
|
|
|
vp9_write_bit(bc, delta_q < 0);
|
|
|
|
} else {
|
|
|
|
vp9_write_bit(bc, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void encode_quantization(VP9_COMMON *pc, vp9_writer *w) {
|
|
|
|
vp9_write_literal(w, pc->base_qindex, QINDEX_BITS);
|
|
|
|
put_delta_q(w, pc->y_dc_delta_q);
|
|
|
|
put_delta_q(w, pc->uv_dc_delta_q);
|
|
|
|
put_delta_q(w, pc->uv_ac_delta_q);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-30 01:07:17 +02:00
|
|
|
static void encode_segmentation(VP9_COMP *cpi, vp9_writer *w) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int i, j;
|
2013-04-30 01:07:17 +02:00
|
|
|
VP9_COMMON *const pc = &cpi->common;
|
|
|
|
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
|
|
|
|
|
|
|
vp9_write_bit(w, xd->segmentation_enabled);
|
|
|
|
if (!xd->segmentation_enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Segmentation map
|
|
|
|
vp9_write_bit(w, xd->update_mb_segmentation_map);
|
|
|
|
#if CONFIG_IMPLICIT_SEGMENTATION
|
|
|
|
vp9_write_bit(w, xd->allow_implicit_segment_update);
|
|
|
|
#endif
|
|
|
|
if (xd->update_mb_segmentation_map) {
|
|
|
|
// Select the coding strategy (temporal or spatial)
|
|
|
|
vp9_choose_segmap_coding_method(cpi);
|
|
|
|
// Write out probabilities used to decode unpredicted macro-block segments
|
|
|
|
for (i = 0; i < MB_SEG_TREE_PROBS; i++) {
|
|
|
|
const int prob = xd->mb_segment_tree_probs[i];
|
|
|
|
if (prob != MAX_PROB) {
|
|
|
|
vp9_write_bit(w, 1);
|
|
|
|
vp9_write_prob(w, prob);
|
|
|
|
} else {
|
|
|
|
vp9_write_bit(w, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write out the chosen coding method.
|
|
|
|
vp9_write_bit(w, pc->temporal_update);
|
|
|
|
if (pc->temporal_update) {
|
|
|
|
for (i = 0; i < PREDICTION_PROBS; i++) {
|
|
|
|
const int prob = pc->segment_pred_probs[i];
|
|
|
|
if (prob != MAX_PROB) {
|
|
|
|
vp9_write_bit(w, 1);
|
|
|
|
vp9_write_prob(w, prob);
|
|
|
|
} else {
|
|
|
|
vp9_write_bit(w, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Segmentation data
|
|
|
|
vp9_write_bit(w, xd->update_mb_segmentation_data);
|
|
|
|
// segment_reference_frames(cpi);
|
|
|
|
if (xd->update_mb_segmentation_data) {
|
|
|
|
vp9_write_bit(w, xd->mb_segment_abs_delta);
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_MB_SEGMENTS; i++) {
|
|
|
|
for (j = 0; j < SEG_LVL_MAX; j++) {
|
|
|
|
const int data = vp9_get_segdata(xd, i, j);
|
|
|
|
const int data_max = vp9_seg_feature_data_max(j);
|
|
|
|
|
|
|
|
if (vp9_segfeature_active(xd, i, j)) {
|
|
|
|
vp9_write_bit(w, 1);
|
|
|
|
|
|
|
|
if (vp9_is_segfeature_signed(j)) {
|
|
|
|
if (data < 0) {
|
|
|
|
vp9_encode_unsigned_max(w, -data, data_max);
|
|
|
|
vp9_write_bit(w, 1);
|
|
|
|
} else {
|
|
|
|
vp9_encode_unsigned_max(w, data, data_max);
|
|
|
|
vp9_write_bit(w, 0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vp9_encode_unsigned_max(w, data, data_max);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vp9_write_bit(w, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-28 11:24:52 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
void write_uncompressed_header(VP9_COMMON *cm,
|
|
|
|
struct vp9_write_bit_buffer *wb) {
|
|
|
|
const int scaling_active = cm->width != cm->display_width ||
|
|
|
|
cm->height != cm->display_height;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
vp9_wb_write_bit(wb, cm->frame_type);
|
|
|
|
vp9_wb_write_literal(wb, cm->version, 3);
|
|
|
|
vp9_wb_write_bit(wb, cm->show_frame);
|
|
|
|
vp9_wb_write_bit(wb, scaling_active);
|
|
|
|
vp9_wb_write_bit(wb, cm->subsampling_x);
|
|
|
|
vp9_wb_write_bit(wb, cm->subsampling_y);
|
2013-05-28 11:24:52 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
if (cm->frame_type == KEY_FRAME) {
|
|
|
|
vp9_wb_write_literal(wb, SYNC_CODE_0, 8);
|
|
|
|
vp9_wb_write_literal(wb, SYNC_CODE_1, 8);
|
|
|
|
vp9_wb_write_literal(wb, SYNC_CODE_2, 8);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
if (scaling_active) {
|
|
|
|
vp9_wb_write_literal(wb, cm->display_width, 16);
|
|
|
|
vp9_wb_write_literal(wb, cm->display_height, 16);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
vp9_wb_write_literal(wb, cm->width, 16);
|
|
|
|
vp9_wb_write_literal(wb, cm->height, 16);
|
2012-04-12 18:24:03 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LG2);
|
|
|
|
vp9_wb_write_bit(wb, cm->clr_type);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
vp9_wb_write_bit(wb, cm->error_resilient_mode);
|
|
|
|
if (!cm->error_resilient_mode) {
|
|
|
|
vp9_wb_write_bit(wb, cm->refresh_frame_context);
|
|
|
|
vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
|
2013-02-07 00:54:52 +01:00
|
|
|
}
|
2013-05-29 03:07:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
|
|
|
|
int i, bytes_packed;
|
|
|
|
VP9_COMMON *const pc = &cpi->common;
|
|
|
|
vp9_writer header_bc, residual_bc;
|
|
|
|
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
uint8_t *cx_data = dest;
|
|
|
|
struct vp9_write_bit_buffer wb = {dest, 0};
|
|
|
|
struct vp9_write_bit_buffer first_partition_size_wb;
|
2013-04-17 01:26:25 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
write_uncompressed_header(pc, &wb);
|
|
|
|
first_partition_size_wb = wb;
|
|
|
|
vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
bytes_packed = vp9_rb_bytes_written(&wb);
|
|
|
|
cx_data += bytes_packed;
|
2013-05-28 11:24:52 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
compute_update_table();
|
|
|
|
|
|
|
|
vp9_start_encode(&header_bc, cx_data);
|
2013-05-28 11:24:52 +02:00
|
|
|
|
2013-05-14 03:05:20 +02:00
|
|
|
encode_loopfilter(pc, xd, &header_bc);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-14 03:05:20 +02:00
|
|
|
encode_quantization(pc, &header_bc);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// When there is a key frame all reference buffers are updated using the new key frame
|
|
|
|
if (pc->frame_type != KEY_FRAME) {
|
2013-01-16 21:19:42 +01:00
|
|
|
int refresh_mask;
|
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Should the GF or ARF be updated using the transmitted frame or buffer
|
2013-04-03 00:08:50 +02:00
|
|
|
#if CONFIG_MULTIPLE_ARF
|
|
|
|
if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
|
|
|
|
!cpi->refresh_alt_ref_frame) {
|
|
|
|
#else
|
|
|
|
if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame) {
|
|
|
|
#endif
|
2013-01-16 21:19:42 +01:00
|
|
|
/* Preserve the previously existing golden frame and update the frame in
|
|
|
|
* the alt ref slot instead. This is highly specific to the use of
|
|
|
|
* alt-ref as a forward reference, and this needs to be generalized as
|
|
|
|
* other uses are implemented (like RTC/temporal scaling)
|
|
|
|
*
|
|
|
|
* gld_fb_idx and alt_fb_idx need to be swapped for future frames, but
|
|
|
|
* that happens in vp9_onyx_if.c:update_reference_frames() so that it can
|
|
|
|
* be done outside of the recode loop.
|
|
|
|
*/
|
|
|
|
refresh_mask = (cpi->refresh_last_frame << cpi->lst_fb_idx) |
|
|
|
|
(cpi->refresh_golden_frame << cpi->alt_fb_idx);
|
|
|
|
} else {
|
2013-04-03 00:08:50 +02:00
|
|
|
int arf_idx = cpi->alt_fb_idx;
|
|
|
|
#if CONFIG_MULTIPLE_ARF
|
|
|
|
// Determine which ARF buffer to use to encode this ARF frame.
|
|
|
|
if (cpi->multi_arf_enabled) {
|
|
|
|
int sn = cpi->sequence_number;
|
|
|
|
arf_idx = (cpi->frame_coding_order[sn] < 0) ?
|
|
|
|
cpi->arf_buffer_idx[sn + 1] :
|
|
|
|
cpi->arf_buffer_idx[sn];
|
|
|
|
}
|
|
|
|
#endif
|
2013-01-16 21:19:42 +01:00
|
|
|
refresh_mask = (cpi->refresh_last_frame << cpi->lst_fb_idx) |
|
|
|
|
(cpi->refresh_golden_frame << cpi->gld_fb_idx) |
|
2013-04-03 00:08:50 +02:00
|
|
|
(cpi->refresh_alt_ref_frame << arf_idx);
|
2013-01-16 21:19:42 +01:00
|
|
|
}
|
2013-04-03 00:08:50 +02:00
|
|
|
|
2013-01-16 21:19:42 +01:00
|
|
|
vp9_write_literal(&header_bc, refresh_mask, NUM_REF_FRAMES);
|
|
|
|
vp9_write_literal(&header_bc, cpi->lst_fb_idx, NUM_REF_FRAMES_LG2);
|
|
|
|
vp9_write_literal(&header_bc, cpi->gld_fb_idx, NUM_REF_FRAMES_LG2);
|
|
|
|
vp9_write_literal(&header_bc, cpi->alt_fb_idx, NUM_REF_FRAMES_LG2);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-03 01:41:19 +02:00
|
|
|
// Indicate the sign bias for each reference frame buffer.
|
2013-05-28 11:24:52 +02:00
|
|
|
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
|
2013-05-03 01:41:19 +02:00
|
|
|
vp9_write_bit(&header_bc, pc->ref_frame_sign_bias[LAST_FRAME + i]);
|
2013-05-28 11:24:52 +02:00
|
|
|
}
|
2012-02-16 18:29:54 +01:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
// Signal whether to allow high MV precision
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_write_bit(&header_bc, (xd->allow_high_precision_mv) ? 1 : 0);
|
2012-07-18 22:43:01 +02:00
|
|
|
if (pc->mcomp_filter_type == SWITCHABLE) {
|
|
|
|
/* Check to see if only one of the filters is actually used */
|
2012-10-31 22:40:53 +01:00
|
|
|
int count[VP9_SWITCHABLE_FILTERS];
|
2012-07-18 22:43:01 +02:00
|
|
|
int i, j, c = 0;
|
2012-10-31 22:40:53 +01:00
|
|
|
for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
|
2012-07-18 22:43:01 +02:00
|
|
|
count[i] = 0;
|
2013-04-27 01:46:17 +02:00
|
|
|
for (j = 0; j <= VP9_SWITCHABLE_FILTERS; ++j)
|
2012-07-18 22:43:01 +02:00
|
|
|
count[i] += cpi->switchable_interp_count[j][i];
|
|
|
|
c += (count[i] > 0);
|
|
|
|
}
|
|
|
|
if (c == 1) {
|
|
|
|
/* Only one filter is used. So set the filter at frame level */
|
2012-10-31 22:40:53 +01:00
|
|
|
for (i = 0; i < VP9_SWITCHABLE_FILTERS; ++i) {
|
2012-07-18 22:43:01 +02:00
|
|
|
if (count[i]) {
|
2012-10-31 01:12:12 +01:00
|
|
|
pc->mcomp_filter_type = vp9_switchable_interp[i];
|
2012-07-18 22:43:01 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
// Signal the type of subpel filter to use
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_write_bit(&header_bc, (pc->mcomp_filter_type == SWITCHABLE));
|
2012-07-18 22:43:01 +02:00
|
|
|
if (pc->mcomp_filter_type != SWITCHABLE)
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_write_literal(&header_bc, (pc->mcomp_filter_type), 2);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
#ifdef ENTROPY_STATS
|
2012-07-14 00:21:29 +02:00
|
|
|
if (pc->frame_type == INTER_FRAME)
|
|
|
|
active_section = 0;
|
|
|
|
else
|
|
|
|
active_section = 7;
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
2013-04-30 01:07:17 +02:00
|
|
|
encode_segmentation(cpi, &header_bc);
|
2013-04-09 08:16:12 +02:00
|
|
|
|
|
|
|
// Encode the common prediction model status flag probability updates for
|
|
|
|
// the reference frame
|
|
|
|
update_refpred_stats(cpi);
|
|
|
|
if (pc->frame_type != KEY_FRAME) {
|
|
|
|
for (i = 0; i < PREDICTION_PROBS; i++) {
|
|
|
|
if (cpi->ref_pred_probs_update[i]) {
|
|
|
|
vp9_write_bit(&header_bc, 1);
|
|
|
|
vp9_write_prob(&header_bc, pc->ref_pred_probs[i]);
|
|
|
|
} else {
|
|
|
|
vp9_write_bit(&header_bc, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpi->mb.e_mbd.lossless) {
|
|
|
|
pc->txfm_mode = ONLY_4X4;
|
|
|
|
} else {
|
|
|
|
if (pc->txfm_mode == TX_MODE_SELECT) {
|
|
|
|
pc->prob_tx[0] = get_prob(cpi->txfm_count_32x32p[TX_4X4] +
|
|
|
|
cpi->txfm_count_16x16p[TX_4X4] +
|
|
|
|
cpi->txfm_count_8x8p[TX_4X4],
|
|
|
|
cpi->txfm_count_32x32p[TX_4X4] +
|
|
|
|
cpi->txfm_count_32x32p[TX_8X8] +
|
|
|
|
cpi->txfm_count_32x32p[TX_16X16] +
|
|
|
|
cpi->txfm_count_32x32p[TX_32X32] +
|
|
|
|
cpi->txfm_count_16x16p[TX_4X4] +
|
|
|
|
cpi->txfm_count_16x16p[TX_8X8] +
|
|
|
|
cpi->txfm_count_16x16p[TX_16X16] +
|
|
|
|
cpi->txfm_count_8x8p[TX_4X4] +
|
|
|
|
cpi->txfm_count_8x8p[TX_8X8]);
|
|
|
|
pc->prob_tx[1] = get_prob(cpi->txfm_count_32x32p[TX_8X8] +
|
|
|
|
cpi->txfm_count_16x16p[TX_8X8],
|
|
|
|
cpi->txfm_count_32x32p[TX_8X8] +
|
|
|
|
cpi->txfm_count_32x32p[TX_16X16] +
|
|
|
|
cpi->txfm_count_32x32p[TX_32X32] +
|
|
|
|
cpi->txfm_count_16x16p[TX_8X8] +
|
|
|
|
cpi->txfm_count_16x16p[TX_16X16]);
|
|
|
|
pc->prob_tx[2] = get_prob(cpi->txfm_count_32x32p[TX_16X16],
|
|
|
|
cpi->txfm_count_32x32p[TX_16X16] +
|
|
|
|
cpi->txfm_count_32x32p[TX_32X32]);
|
|
|
|
} else {
|
|
|
|
pc->prob_tx[0] = 128;
|
|
|
|
pc->prob_tx[1] = 128;
|
|
|
|
pc->prob_tx[2] = 128;
|
|
|
|
}
|
|
|
|
vp9_write_literal(&header_bc, pc->txfm_mode <= 3 ? pc->txfm_mode : 3, 2);
|
|
|
|
if (pc->txfm_mode > ALLOW_16X16) {
|
|
|
|
vp9_write_bit(&header_bc, pc->txfm_mode == TX_MODE_SELECT);
|
|
|
|
}
|
|
|
|
if (pc->txfm_mode == TX_MODE_SELECT) {
|
|
|
|
vp9_write_prob(&header_bc, pc->prob_tx[0]);
|
|
|
|
vp9_write_prob(&header_bc, pc->prob_tx[1]);
|
|
|
|
vp9_write_prob(&header_bc, pc->prob_tx[2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-16 17:31:32 +01:00
|
|
|
// If appropriate update the inter mode probability context and code the
|
|
|
|
// changes in the bitstream.
|
2012-12-10 13:38:48 +01:00
|
|
|
if (pc->frame_type != KEY_FRAME) {
|
2012-11-16 17:31:32 +01:00
|
|
|
int i, j;
|
|
|
|
int new_context[INTER_MODE_CONTEXTS][4];
|
2013-02-06 14:02:53 +01:00
|
|
|
if (!cpi->dummy_packing) {
|
|
|
|
update_inter_mode_probs(pc, new_context);
|
|
|
|
} else {
|
|
|
|
// In dummy pack assume context unchanged.
|
|
|
|
vpx_memcpy(new_context, pc->fc.vp9_mode_contexts,
|
|
|
|
sizeof(pc->fc.vp9_mode_contexts));
|
|
|
|
}
|
2012-11-16 17:31:32 +01:00
|
|
|
|
|
|
|
for (i = 0; i < INTER_MODE_CONTEXTS; i++) {
|
|
|
|
for (j = 0; j < 4; j++) {
|
|
|
|
if (new_context[i][j] != pc->fc.vp9_mode_contexts[i][j]) {
|
|
|
|
vp9_write(&header_bc, 1, 252);
|
2013-04-17 01:23:17 +02:00
|
|
|
vp9_write_prob(&header_bc, new_context[i][j]);
|
2012-11-16 17:31:32 +01:00
|
|
|
|
|
|
|
// Only update the persistent copy if this is the "real pack"
|
|
|
|
if (!cpi->dummy_packing) {
|
|
|
|
pc->fc.vp9_mode_contexts[i][j] = new_context[i][j];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
vp9_write(&header_bc, 0, 252);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_clear_system_state(); // __asm emms;
|
|
|
|
|
2012-12-08 01:09:59 +01:00
|
|
|
vp9_copy(cpi->common.fc.pre_coef_probs_4x4,
|
|
|
|
cpi->common.fc.coef_probs_4x4);
|
|
|
|
vp9_copy(cpi->common.fc.pre_coef_probs_8x8,
|
|
|
|
cpi->common.fc.coef_probs_8x8);
|
|
|
|
vp9_copy(cpi->common.fc.pre_coef_probs_16x16,
|
|
|
|
cpi->common.fc.coef_probs_16x16);
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
vp9_copy(cpi->common.fc.pre_coef_probs_32x32,
|
|
|
|
cpi->common.fc.coef_probs_32x32);
|
2013-05-07 18:24:21 +02:00
|
|
|
|
2013-05-30 18:58:53 +02:00
|
|
|
vp9_copy(cpi->common.fc.pre_y_mode_prob, cpi->common.fc.y_mode_prob);
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
|
2013-04-16 09:18:02 +02:00
|
|
|
vp9_copy(cpi->common.fc.pre_partition_prob, cpi->common.fc.partition_prob);
|
2012-07-26 22:42:07 +02:00
|
|
|
cpi->common.fc.pre_nmvc = cpi->common.fc.nmvc;
|
2013-04-16 09:18:02 +02:00
|
|
|
vp9_zero(cpi->common.fc.mv_ref_ct);
|
2012-09-10 07:42:35 +02:00
|
|
|
|
2012-10-18 01:47:38 +02:00
|
|
|
update_coef_probs(cpi, &header_bc);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
#ifdef ENTROPY_STATS
|
2012-07-14 00:21:29 +02:00
|
|
|
active_section = 2;
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
2013-04-09 19:54:19 +02:00
|
|
|
vp9_update_skip_probs(cpi);
|
|
|
|
for (i = 0; i < MBSKIP_CONTEXTS; ++i) {
|
2013-04-17 01:23:17 +02:00
|
|
|
vp9_write_prob(&header_bc, pc->mbskip_pred_probs[i]);
|
2012-10-17 18:38:13 +02:00
|
|
|
}
|
|
|
|
|
2013-05-30 18:58:53 +02:00
|
|
|
if (pc->frame_type != KEY_FRAME) {
|
2012-10-17 18:38:13 +02:00
|
|
|
// Update the probabilities used to encode reference frame data
|
|
|
|
update_ref_probs(cpi);
|
|
|
|
|
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (pc->mcomp_filter_type == SWITCHABLE)
|
2012-10-18 01:47:38 +02:00
|
|
|
update_switchable_interp_probs(cpi, &header_bc);
|
2012-11-30 16:29:43 +01:00
|
|
|
|
2013-04-17 01:23:17 +02:00
|
|
|
vp9_write_prob(&header_bc, pc->prob_intra_coded);
|
|
|
|
vp9_write_prob(&header_bc, pc->prob_last_coded);
|
|
|
|
vp9_write_prob(&header_bc, pc->prob_gf_coded);
|
2012-10-17 18:38:13 +02:00
|
|
|
|
|
|
|
{
|
|
|
|
const int comp_pred_mode = cpi->common.comp_pred_mode;
|
|
|
|
const int use_compound_pred = (comp_pred_mode != SINGLE_PREDICTION_ONLY);
|
|
|
|
const int use_hybrid_pred = (comp_pred_mode == HYBRID_PREDICTION);
|
|
|
|
|
2013-04-09 04:07:29 +02:00
|
|
|
vp9_write_bit(&header_bc, use_compound_pred);
|
2012-10-17 18:38:13 +02:00
|
|
|
if (use_compound_pred) {
|
2013-04-09 04:07:29 +02:00
|
|
|
vp9_write_bit(&header_bc, use_hybrid_pred);
|
2012-10-17 18:38:13 +02:00
|
|
|
if (use_hybrid_pred) {
|
|
|
|
for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
|
2012-10-19 01:27:30 +02:00
|
|
|
pc->prob_comppred[i] = get_binary_prob(cpi->single_pred_count[i],
|
|
|
|
cpi->comp_pred_count[i]);
|
2013-04-17 01:23:17 +02:00
|
|
|
vp9_write_prob(&header_bc, pc->prob_comppred[i]);
|
2012-10-17 18:38:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-10-18 01:47:38 +02:00
|
|
|
update_mbintra_mode_probs(cpi, &header_bc);
|
2012-10-17 18:38:13 +02:00
|
|
|
|
2013-04-27 06:52:53 +02:00
|
|
|
for (i = 0; i < NUM_PARTITION_CONTEXTS; ++i) {
|
|
|
|
vp9_prob Pnew[PARTITION_TYPES - 1];
|
|
|
|
unsigned int bct[PARTITION_TYPES - 1][2];
|
|
|
|
update_mode(&header_bc, PARTITION_TYPES, vp9_partition_encodings,
|
|
|
|
vp9_partition_tree, Pnew, pc->fc.partition_prob[i], bct,
|
|
|
|
(unsigned int *)cpi->partition_count[i]);
|
|
|
|
}
|
|
|
|
|
2012-11-09 19:52:08 +01:00
|
|
|
vp9_write_nmv_probs(cpi, xd->allow_high_precision_mv, &header_bc);
|
2012-10-17 18:38:13 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
/* tiling */
|
2013-02-07 00:30:21 +01:00
|
|
|
{
|
|
|
|
int min_log2_tiles, delta_log2_tiles, n_tile_bits, n;
|
|
|
|
|
|
|
|
vp9_get_tile_n_bits(pc, &min_log2_tiles, &delta_log2_tiles);
|
|
|
|
n_tile_bits = pc->log2_tile_columns - min_log2_tiles;
|
|
|
|
for (n = 0; n < delta_log2_tiles; n++) {
|
|
|
|
if (n_tile_bits--) {
|
|
|
|
vp9_write_bit(&header_bc, 1);
|
|
|
|
} else {
|
|
|
|
vp9_write_bit(&header_bc, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-02-08 20:33:11 +01:00
|
|
|
vp9_write_bit(&header_bc, pc->log2_tile_rows != 0);
|
|
|
|
if (pc->log2_tile_rows != 0)
|
|
|
|
vp9_write_bit(&header_bc, pc->log2_tile_rows != 1);
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
}
|
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_stop_encode(&header_bc);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-03-11 11:34:57 +01:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
// first partition size
|
|
|
|
assert(header_bc.pos <= 0xffff);
|
|
|
|
vp9_wb_write_literal(&first_partition_size_wb, header_bc.pos, 16);
|
|
|
|
*size = bytes_packed + header_bc.pos;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
{
|
2013-02-08 20:33:11 +01:00
|
|
|
int tile_row, tile_col, total_size = 0;
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
unsigned char *data_ptr = cx_data + header_bc.pos;
|
2013-02-08 20:33:11 +01:00
|
|
|
TOKENEXTRA *tok[1 << 6], *tok_end;
|
|
|
|
|
|
|
|
tok[0] = cpi->tok;
|
|
|
|
for (tile_col = 1; tile_col < pc->tile_columns; tile_col++)
|
|
|
|
tok[tile_col] = tok[tile_col - 1] + cpi->tok_count[tile_col - 1];
|
|
|
|
|
|
|
|
for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
|
|
|
|
vp9_get_tile_row_offsets(pc, tile_row);
|
|
|
|
tok_end = cpi->tok + cpi->tok_count[0];
|
|
|
|
for (tile_col = 0; tile_col < pc->tile_columns;
|
|
|
|
tile_col++, tok_end += cpi->tok_count[tile_col]) {
|
|
|
|
vp9_get_tile_col_offsets(pc, tile_col);
|
|
|
|
|
|
|
|
if (tile_col < pc->tile_columns - 1 || tile_row < pc->tile_rows - 1)
|
|
|
|
vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
|
|
|
|
else
|
|
|
|
vp9_start_encode(&residual_bc, data_ptr + total_size);
|
|
|
|
write_modes(cpi, &residual_bc, &tok[tile_col], tok_end);
|
|
|
|
vp9_stop_encode(&residual_bc);
|
|
|
|
if (tile_col < pc->tile_columns - 1 || tile_row < pc->tile_rows - 1) {
|
2013-04-17 01:26:25 +02:00
|
|
|
// size of this tile
|
|
|
|
write_le32(data_ptr + total_size, residual_bc.pos);
|
2013-02-08 20:33:11 +01:00
|
|
|
total_size += 4;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-02-08 20:33:11 +01:00
|
|
|
total_size += residual_bc.pos;
|
|
|
|
}
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
}
|
|
|
|
|
2013-02-08 20:33:11 +01:00
|
|
|
assert((unsigned int)(tok[0] - cpi->tok) == cpi->tok_count[0]);
|
|
|
|
for (tile_col = 1; tile_col < pc->tile_columns; tile_col++)
|
|
|
|
assert((unsigned int)(tok[tile_col] - tok[tile_col - 1]) ==
|
|
|
|
cpi->tok_count[tile_col]);
|
|
|
|
|
[WIP] Add column-based tiling.
This patch adds column-based tiling. The idea is to make each tile
independently decodable (after reading the common frame header) and
also independendly encodable (minus within-frame cost adjustments in
the RD loop) to speed-up hardware & software en/decoders if they used
multi-threading. Column-based tiling has the added advantage (over
other tiling methods) that it minimizes realtime use-case latency,
since all threads can start encoding data as soon as the first SB-row
worth of data is available to the encoder.
There is some test code that does random tile ordering in the decoder,
to confirm that each tile is indeed independently decodable from other
tiles in the same frame. At tile edges, all contexts assume default
values (i.e. 0, 0 motion vector, no coefficients, DC intra4x4 mode),
and motion vector search and ordering do not cross tiles in the same
frame.
t log
Tile independence is not maintained between frames ATM, i.e. tile 0 of
frame 1 is free to use motion vectors that point into any tile of frame
0. We support 1 (i.e. no tiling), 2 or 4 column-tiles.
The loopfilter crosses tile boundaries. I discussed this briefly with Aki
and he says that's OK. An in-loop loopfilter would need to do some sync
between tile threads, but that shouldn't be a big issue.
Resuls: with tiling disabled, we go up slightly because of improved edge
use in the intra4x4 prediction. With 2 tiles, we lose about ~1% on derf,
~0.35% on HD and ~0.55% on STD/HD. With 4 tiles, we lose another ~1.5%
on derf ~0.77% on HD and ~0.85% on STD/HD. Most of this loss is
concentrated in the low-bitrate end of clips, and most of it is because
of the loss of edges at tile boundaries and the resulting loss of intra
predictors.
TODO:
- more tiles (perhaps allow row-based tiling also, and max. 8 tiles)?
- maybe optionally (for EC purposes), motion vectors themselves
should not cross tile edges, or we should emulate such borders as
if they were off-frame, to limit error propagation to within one
tile only. This doesn't have to be the default behaviour but could
be an optional bitstream flag.
Change-Id: I5951c3a0742a767b20bc9fb5af685d9892c2c96f
2013-02-01 18:35:28 +01:00
|
|
|
*size += total_size;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef ENTROPY_STATS
|
2012-12-08 01:09:59 +01:00
|
|
|
static void print_tree_update_for_type(FILE *f,
|
|
|
|
vp9_coeff_stats *tree_update_hist,
|
|
|
|
int block_types, const char *header) {
|
2013-02-19 22:36:38 +01:00
|
|
|
int i, j, k, l, m;
|
2012-08-03 02:03:14 +02:00
|
|
|
|
2012-12-08 01:09:59 +01:00
|
|
|
fprintf(f, "const vp9_coeff_prob %s = {\n", header);
|
|
|
|
for (i = 0; i < block_types; i++) {
|
2012-07-14 00:21:29 +02:00
|
|
|
fprintf(f, " { \n");
|
2013-02-19 22:36:38 +01:00
|
|
|
for (j = 0; j < REF_TYPES; j++) {
|
|
|
|
fprintf(f, " { \n");
|
|
|
|
for (k = 0; k < COEF_BANDS; k++) {
|
|
|
|
fprintf(f, " {\n");
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; l++) {
|
|
|
|
fprintf(f, " {");
|
|
|
|
for (m = 0; m < ENTROPY_NODES; m++) {
|
|
|
|
fprintf(f, "%3d, ",
|
|
|
|
get_binary_prob(tree_update_hist[i][j][k][l][m][0],
|
|
|
|
tree_update_hist[i][j][k][l][m][1]));
|
|
|
|
}
|
|
|
|
fprintf(f, "},\n");
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
fprintf(f, "},\n");
|
|
|
|
}
|
|
|
|
fprintf(f, " },\n");
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
fprintf(f, " },\n");
|
|
|
|
}
|
|
|
|
fprintf(f, "};\n");
|
2012-12-08 01:09:59 +01:00
|
|
|
}
|
2012-05-08 21:38:39 +02:00
|
|
|
|
2012-12-08 01:09:59 +01:00
|
|
|
void print_tree_update_probs() {
|
|
|
|
FILE *f = fopen("coefupdprob.h", "w");
|
|
|
|
fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n");
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-02-19 22:36:38 +01:00
|
|
|
print_tree_update_for_type(f, tree_update_hist_4x4, BLOCK_TYPES,
|
2013-03-04 23:12:17 +01:00
|
|
|
"vp9_coef_update_probs_4x4[BLOCK_TYPES]");
|
2013-02-19 22:36:38 +01:00
|
|
|
print_tree_update_for_type(f, tree_update_hist_8x8, BLOCK_TYPES,
|
2013-03-04 23:12:17 +01:00
|
|
|
"vp9_coef_update_probs_8x8[BLOCK_TYPES]");
|
2013-02-19 22:36:38 +01:00
|
|
|
print_tree_update_for_type(f, tree_update_hist_16x16, BLOCK_TYPES,
|
2013-03-04 23:12:17 +01:00
|
|
|
"vp9_coef_update_probs_16x16[BLOCK_TYPES]");
|
|
|
|
print_tree_update_for_type(f, tree_update_hist_32x32, BLOCK_TYPES,
|
|
|
|
"vp9_coef_update_probs_32x32[BLOCK_TYPES]");
|
2012-08-03 02:03:14 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
fclose(f);
|
|
|
|
f = fopen("treeupdate.bin", "wb");
|
2012-12-08 01:09:59 +01:00
|
|
|
fwrite(tree_update_hist_4x4, sizeof(tree_update_hist_4x4), 1, f);
|
2012-07-14 00:21:29 +02:00
|
|
|
fwrite(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
|
2012-08-03 02:03:14 +02:00
|
|
|
fwrite(tree_update_hist_16x16, sizeof(tree_update_hist_16x16), 1, f);
|
2013-02-19 22:36:38 +01:00
|
|
|
fwrite(tree_update_hist_32x32, sizeof(tree_update_hist_32x32), 1, f);
|
2012-07-14 00:21:29 +02:00
|
|
|
fclose(f);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
#endif
|