2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
2013-04-09 04:07:29 +02:00
|
|
|
#include <assert.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <limits.h>
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
#include "vpx/vpx_encoder.h"
|
|
|
|
#include "vpx_mem/vpx_mem.h"
|
|
|
|
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_entropymode.h"
|
2012-12-10 13:38:48 +01:00
|
|
|
#include "vp9/common/vp9_entropymv.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_findnearmv.h"
|
2013-02-07 00:30:21 +01:00
|
|
|
#include "vp9/common/vp9_tile_common.h"
|
2012-11-27 22:59:17 +01:00
|
|
|
#include "vp9/common/vp9_seg_common.h"
|
|
|
|
#include "vp9/common/vp9_pred_common.h"
|
|
|
|
#include "vp9/common/vp9_entropy.h"
|
|
|
|
#include "vp9/common/vp9_mvref_common.h"
|
Consistently use get_prob(), clip_prob() and newly added clip_pixel().
Add a function clip_pixel() to clip a pixel value to the [0,255] range
of allowed values, and use this where-ever appropriate (e.g. prediction,
reconstruction). Likewise, consistently use the recently added function
clip_prob(), which calculates a binary probability in the [1,255] range.
If possible, try to use get_prob() or its sister get_binary_prob() to
calculate binary probabilities, for consistency.
Since in some places, this means that binary probability calculations
are changed (we use {255,256}*count0/(total) in a range of places,
and all of these are now changed to use 256*count0+(total>>1)/total),
this changes the encoding result, so this patch warrants some extensive
testing.
Change-Id: Ibeeff8d886496839b8e0c0ace9ccc552351f7628
2012-12-10 21:09:07 +01:00
|
|
|
#include "vp9/common/vp9_treecoder.h"
|
2013-05-29 03:07:54 +02:00
|
|
|
#include "vp9/common/vp9_systemdependent.h"
|
|
|
|
#include "vp9/common/vp9_pragmas.h"
|
|
|
|
|
|
|
|
#include "vp9/encoder/vp9_mcomp.h"
|
|
|
|
#include "vp9/encoder/vp9_encodemv.h"
|
|
|
|
#include "vp9/encoder/vp9_bitstream.h"
|
|
|
|
#include "vp9/encoder/vp9_segmentation.h"
|
2013-06-29 20:50:45 +02:00
|
|
|
#include "vp9/encoder/vp9_subexp.h"
|
2013-05-29 03:07:54 +02:00
|
|
|
#include "vp9/encoder/vp9_write_bit_buffer.h"
|
|
|
|
|
2012-08-24 16:44:01 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
#if defined(SECTIONBITS_OUTPUT)
|
|
|
|
unsigned __int64 Sectionbits[500];
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef ENTROPY_STATS
|
2013-08-23 03:40:34 +02:00
|
|
|
int intra_mode_stats[INTRA_MODES]
|
|
|
|
[INTRA_MODES]
|
|
|
|
[INTRA_MODES];
|
2013-07-27 02:15:37 +02:00
|
|
|
vp9_coeff_stats tree_update_hist[TX_SIZES][BLOCK_TYPES];
|
2012-05-15 01:21:01 +02:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
extern unsigned int active_section;
|
|
|
|
#endif
|
|
|
|
|
2012-04-12 18:24:03 +02:00
|
|
|
|
2013-06-06 20:14:04 +02:00
|
|
|
#ifdef MODE_STATS
|
2013-07-27 02:15:37 +02:00
|
|
|
int64_t tx_count_32x32p_stats[TX_SIZE_CONTEXTS][TX_SIZES];
|
|
|
|
int64_t tx_count_16x16p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 1];
|
|
|
|
int64_t tx_count_8x8p_stats[TX_SIZE_CONTEXTS][TX_SIZES - 2];
|
2013-10-30 22:40:34 +01:00
|
|
|
int64_t switchable_interp_stats[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS];
|
2013-06-06 20:14:04 +02:00
|
|
|
|
|
|
|
void init_tx_count_stats() {
|
|
|
|
vp9_zero(tx_count_32x32p_stats);
|
|
|
|
vp9_zero(tx_count_16x16p_stats);
|
|
|
|
vp9_zero(tx_count_8x8p_stats);
|
|
|
|
}
|
|
|
|
|
2013-06-10 21:00:43 +02:00
|
|
|
void init_switchable_interp_stats() {
|
|
|
|
vp9_zero(switchable_interp_stats);
|
|
|
|
}
|
|
|
|
|
2013-06-06 20:14:04 +02:00
|
|
|
static void update_tx_count_stats(VP9_COMMON *cm) {
|
|
|
|
int i, j;
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES; j++) {
|
2013-06-06 20:14:04 +02:00
|
|
|
tx_count_32x32p_stats[i][j] += cm->fc.tx_count_32x32p[i][j];
|
|
|
|
}
|
|
|
|
}
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES - 1; j++) {
|
2013-06-06 20:14:04 +02:00
|
|
|
tx_count_16x16p_stats[i][j] += cm->fc.tx_count_16x16p[i][j];
|
|
|
|
}
|
|
|
|
}
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES - 2; j++) {
|
2013-06-06 20:14:04 +02:00
|
|
|
tx_count_8x8p_stats[i][j] += cm->fc.tx_count_8x8p[i][j];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-10 21:00:43 +02:00
|
|
|
static void update_switchable_interp_stats(VP9_COMMON *cm) {
|
|
|
|
int i, j;
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
|
|
|
|
for (j = 0; j < SWITCHABLE_FILTERS; ++j)
|
2013-06-10 21:00:43 +02:00
|
|
|
switchable_interp_stats[i][j] += cm->fc.switchable_interp_count[i][j];
|
|
|
|
}
|
|
|
|
|
2013-06-06 20:14:04 +02:00
|
|
|
void write_tx_count_stats() {
|
|
|
|
int i, j;
|
|
|
|
FILE *fp = fopen("tx_count.bin", "wb");
|
|
|
|
fwrite(tx_count_32x32p_stats, sizeof(tx_count_32x32p_stats), 1, fp);
|
|
|
|
fwrite(tx_count_16x16p_stats, sizeof(tx_count_16x16p_stats), 1, fp);
|
|
|
|
fwrite(tx_count_8x8p_stats, sizeof(tx_count_8x8p_stats), 1, fp);
|
|
|
|
fclose(fp);
|
|
|
|
|
2013-06-08 09:09:44 +02:00
|
|
|
printf(
|
2013-07-27 02:15:37 +02:00
|
|
|
"vp9_default_tx_count_32x32p[TX_SIZE_CONTEXTS][TX_SIZES] = {\n");
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
|
|
|
printf(" { ");
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES; j++) {
|
2013-06-06 20:14:04 +02:00
|
|
|
printf("%"PRId64", ", tx_count_32x32p_stats[i][j]);
|
|
|
|
}
|
|
|
|
printf("},\n");
|
|
|
|
}
|
|
|
|
printf("};\n");
|
2013-06-08 09:09:44 +02:00
|
|
|
printf(
|
2013-07-27 02:15:37 +02:00
|
|
|
"vp9_default_tx_count_16x16p[TX_SIZE_CONTEXTS][TX_SIZES-1] = {\n");
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
|
|
|
printf(" { ");
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES - 1; j++) {
|
2013-06-06 20:14:04 +02:00
|
|
|
printf("%"PRId64", ", tx_count_16x16p_stats[i][j]);
|
|
|
|
}
|
|
|
|
printf("},\n");
|
|
|
|
}
|
|
|
|
printf("};\n");
|
2013-06-08 09:09:44 +02:00
|
|
|
printf(
|
2013-07-27 02:15:37 +02:00
|
|
|
"vp9_default_tx_count_8x8p[TX_SIZE_CONTEXTS][TX_SIZES-2] = {\n");
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
|
|
|
printf(" { ");
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES - 2; j++) {
|
2013-06-06 20:14:04 +02:00
|
|
|
printf("%"PRId64", ", tx_count_8x8p_stats[i][j]);
|
|
|
|
}
|
|
|
|
printf("},\n");
|
|
|
|
}
|
|
|
|
printf("};\n");
|
|
|
|
}
|
2013-06-10 21:00:43 +02:00
|
|
|
|
|
|
|
void write_switchable_interp_stats() {
|
|
|
|
int i, j;
|
|
|
|
FILE *fp = fopen("switchable_interp.bin", "wb");
|
|
|
|
fwrite(switchable_interp_stats, sizeof(switchable_interp_stats), 1, fp);
|
|
|
|
fclose(fp);
|
|
|
|
|
|
|
|
printf(
|
2013-10-30 22:40:34 +01:00
|
|
|
"vp9_default_switchable_filter_count[SWITCHABLE_FILTER_CONTEXTS]"
|
2013-08-23 03:40:34 +02:00
|
|
|
"[SWITCHABLE_FILTERS] = {\n");
|
2013-10-30 22:40:34 +01:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
|
2013-06-10 21:00:43 +02:00
|
|
|
printf(" { ");
|
2013-08-23 03:40:34 +02:00
|
|
|
for (j = 0; j < SWITCHABLE_FILTERS; j++) {
|
2013-06-10 21:00:43 +02:00
|
|
|
printf("%"PRId64", ", switchable_interp_stats[i][j]);
|
|
|
|
}
|
|
|
|
printf("},\n");
|
|
|
|
}
|
|
|
|
printf("};\n");
|
|
|
|
}
|
2013-06-06 20:14:04 +02:00
|
|
|
#endif
|
|
|
|
|
2013-06-11 01:13:08 +02:00
|
|
|
static INLINE void write_be32(uint8_t *p, int value) {
|
|
|
|
p[0] = value >> 24;
|
|
|
|
p[1] = value >> 16;
|
|
|
|
p[2] = value >> 8;
|
|
|
|
p[3] = value;
|
2013-04-17 01:26:25 +02:00
|
|
|
}
|
|
|
|
|
2013-06-06 21:33:12 +02:00
|
|
|
void vp9_encode_unsigned_max(struct vp9_write_bit_buffer *wb,
|
|
|
|
int data, int max) {
|
|
|
|
vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
|
|
|
|
}
|
|
|
|
|
2013-11-01 23:09:43 +01:00
|
|
|
static void update_mode(vp9_writer *w, int n, vp9_tree tree,
|
|
|
|
vp9_prob Pcur[/* n-1 */],
|
|
|
|
unsigned int bct[/* n-1 */][2],
|
|
|
|
const unsigned int num_events[/* n */]) {
|
2013-06-05 19:42:52 +02:00
|
|
|
int i = 0;
|
|
|
|
|
2013-11-05 20:58:57 +01:00
|
|
|
vp9_tree_probs_from_distribution(tree, bct, num_events);
|
|
|
|
for (i = 0; i < n - 1; ++i)
|
2013-10-11 19:47:22 +02:00
|
|
|
vp9_cond_prob_diff_update(w, &Pcur[i], bct[i]);
|
2013-06-05 19:42:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void update_mbintra_mode_probs(VP9_COMP* const cpi,
|
|
|
|
vp9_writer* const bc) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-06-03 19:39:40 +02:00
|
|
|
int j;
|
2013-08-23 03:40:34 +02:00
|
|
|
unsigned int bct[INTRA_MODES - 1][2];
|
2013-06-05 19:42:52 +02:00
|
|
|
|
2013-06-03 19:39:40 +02:00
|
|
|
for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
|
2013-11-01 23:09:43 +01:00
|
|
|
update_mode(bc, INTRA_MODES, vp9_intra_mode_tree,
|
2013-06-03 19:39:40 +02:00
|
|
|
cm->fc.y_mode_prob[j], bct,
|
|
|
|
(unsigned int *)cpi->y_mode_count[j]);
|
2013-06-05 19:42:52 +02:00
|
|
|
}
|
|
|
|
|
2013-09-11 19:45:44 +02:00
|
|
|
static void write_selected_tx_size(const VP9_COMP *cpi, MODE_INFO *m,
|
|
|
|
TX_SIZE tx_size, BLOCK_SIZE bsize,
|
|
|
|
vp9_writer *w) {
|
2013-11-06 02:36:43 +01:00
|
|
|
const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
|
2013-07-03 01:41:22 +02:00
|
|
|
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
2013-11-06 02:36:43 +01:00
|
|
|
const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
|
|
|
|
&cpi->common.fc.tx_probs);
|
2013-07-03 01:41:22 +02:00
|
|
|
vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
|
2013-11-06 02:36:43 +01:00
|
|
|
if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
|
2013-07-03 01:41:22 +02:00
|
|
|
vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
|
2013-11-06 02:36:43 +01:00
|
|
|
if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
|
2013-07-03 01:41:22 +02:00
|
|
|
vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-03 22:23:47 +02:00
|
|
|
static int write_skip_coeff(const VP9_COMP *cpi, int segment_id, MODE_INFO *m,
|
|
|
|
vp9_writer *w) {
|
|
|
|
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
2013-08-14 20:20:33 +02:00
|
|
|
if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) {
|
2013-07-03 22:23:47 +02:00
|
|
|
return 1;
|
|
|
|
} else {
|
2013-08-13 18:47:40 +02:00
|
|
|
const int skip_coeff = m->mbmi.skip_coeff;
|
2013-07-10 04:55:07 +02:00
|
|
|
vp9_write(w, skip_coeff, vp9_get_pred_prob_mbskip(&cpi->common, xd));
|
2013-07-03 22:23:47 +02:00
|
|
|
return skip_coeff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-12 03:39:10 +02:00
|
|
|
void vp9_update_skip_probs(VP9_COMP *cpi, vp9_writer *w) {
|
2013-07-24 02:02:08 +02:00
|
|
|
VP9_COMMON *cm = &cpi->common;
|
2013-06-05 19:42:52 +02:00
|
|
|
int k;
|
|
|
|
|
2013-07-12 03:39:10 +02:00
|
|
|
for (k = 0; k < MBSKIP_CONTEXTS; ++k)
|
2013-10-11 19:47:22 +02:00
|
|
|
vp9_cond_prob_diff_update(w, &cm->fc.mbskip_probs[k], cm->counts.mbskip[k]);
|
2013-06-05 19:42:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void write_intra_mode(vp9_writer *bc, int m, const vp9_prob *p) {
|
|
|
|
write_token(bc, vp9_intra_mode_tree, p, vp9_intra_mode_encodings + m);
|
|
|
|
}
|
|
|
|
|
2013-11-01 23:09:43 +01:00
|
|
|
static void update_switchable_interp_probs(VP9_COMP *cpi, vp9_writer *w) {
|
2013-08-23 05:03:08 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-11-01 23:09:43 +01:00
|
|
|
unsigned int branch_ct[SWITCHABLE_FILTERS - 1][2];
|
2013-06-05 00:25:16 +02:00
|
|
|
int i, j;
|
2013-10-30 22:40:34 +01:00
|
|
|
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) {
|
2013-11-01 23:09:43 +01:00
|
|
|
vp9_tree_probs_from_distribution(vp9_switchable_interp_tree, branch_ct,
|
2013-11-05 20:58:57 +01:00
|
|
|
cm->counts.switchable_interp[j]);
|
2013-11-01 23:09:43 +01:00
|
|
|
|
|
|
|
for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
|
|
|
|
vp9_cond_prob_diff_update(w, &cm->fc.switchable_interp_prob[j][i],
|
|
|
|
branch_ct[i]);
|
2013-06-05 00:25:16 +02:00
|
|
|
}
|
2013-11-01 23:09:43 +01:00
|
|
|
|
2013-06-10 21:00:43 +02:00
|
|
|
#ifdef MODE_STATS
|
|
|
|
if (!cpi->dummy_packing)
|
2013-08-23 05:03:08 +02:00
|
|
|
update_switchable_interp_stats(cm);
|
2013-06-10 21:00:43 +02:00
|
|
|
#endif
|
2013-06-05 00:25:16 +02:00
|
|
|
}
|
|
|
|
|
2013-11-01 23:09:43 +01:00
|
|
|
static void update_inter_mode_probs(VP9_COMMON *cm, vp9_writer *w) {
|
2013-06-05 00:25:16 +02:00
|
|
|
int i, j;
|
|
|
|
|
2013-07-31 03:06:34 +02:00
|
|
|
for (i = 0; i < INTER_MODE_CONTEXTS; ++i) {
|
2013-08-23 03:40:34 +02:00
|
|
|
unsigned int branch_ct[INTER_MODES - 1][2];
|
2013-11-01 23:09:43 +01:00
|
|
|
vp9_tree_probs_from_distribution(vp9_inter_mode_tree, branch_ct,
|
2013-11-05 20:58:57 +01:00
|
|
|
cm->counts.inter_mode[i]);
|
2013-07-31 03:06:34 +02:00
|
|
|
|
2013-08-23 03:40:34 +02:00
|
|
|
for (j = 0; j < INTER_MODES - 1; ++j)
|
2013-11-01 23:09:43 +01:00
|
|
|
vp9_cond_prob_diff_update(w, &cm->fc.inter_mode_probs[i][j],
|
2013-10-11 19:47:22 +02:00
|
|
|
branch_ct[j]);
|
2013-06-05 00:25:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-12 23:21:15 +01:00
|
|
|
static void pack_mb_tokens(vp9_writer* const w,
|
2012-10-16 22:52:39 +02:00
|
|
|
TOKENEXTRA **tp,
|
|
|
|
const TOKENEXTRA *const stop) {
|
|
|
|
TOKENEXTRA *p = *tp;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-09-18 21:29:32 +02:00
|
|
|
while (p < stop && p->token != EOSB_TOKEN) {
|
2013-04-19 01:18:08 +02:00
|
|
|
const int t = p->token;
|
2013-11-12 23:21:15 +01:00
|
|
|
const struct vp9_token *const a = &vp9_coef_encodings[t];
|
|
|
|
const vp9_extra_bit *const b = &vp9_extra_bits[t];
|
2012-07-14 00:21:29 +02:00
|
|
|
int i = 0;
|
|
|
|
int v = a->value;
|
2013-04-11 22:01:52 +02:00
|
|
|
int n = a->len;
|
2012-10-16 22:52:39 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
/* skip one or two nodes */
|
|
|
|
if (p->skip_eob_node) {
|
|
|
|
n -= p->skip_eob_node;
|
|
|
|
i = 2 * p->skip_eob_node;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-11-20 19:06:04 +01:00
|
|
|
// TODO(jbb): expanding this can lead to big gains. It allows
|
|
|
|
// much better branch prediction and would enable us to avoid numerous
|
|
|
|
// lookups and compares.
|
|
|
|
|
|
|
|
// If we have a token that's in the constrained set, the coefficient tree
|
|
|
|
// is split into two treed writes. The first treed write takes care of the
|
|
|
|
// unconstrained nodes. The second treed write takes care of the
|
|
|
|
// constrained nodes.
|
|
|
|
if (t >= TWO_TOKEN && t < DCT_EOB_TOKEN) {
|
|
|
|
int len = UNCONSTRAINED_NODES - p->skip_eob_node;
|
|
|
|
int bits = v >> (n - len);
|
|
|
|
treed_write(w, vp9_coef_tree, p->context_tree, bits, len, i);
|
|
|
|
treed_write(w, vp9_coef_con_tree,
|
|
|
|
vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v, n - len,
|
|
|
|
0);
|
|
|
|
} else {
|
|
|
|
treed_write(w, vp9_coef_tree, p->context_tree, v, n, i);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
if (b->base_val) {
|
2013-04-19 20:14:33 +02:00
|
|
|
const int e = p->extra, l = b->len;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-04-19 20:14:33 +02:00
|
|
|
if (l) {
|
2013-05-17 15:40:25 +02:00
|
|
|
const unsigned char *pb = b->prob;
|
2012-07-14 00:21:29 +02:00
|
|
|
int v = e >> 1;
|
2013-04-19 20:14:33 +02:00
|
|
|
int n = l; /* number of bits in v, assumed nonzero */
|
2012-07-14 00:21:29 +02:00
|
|
|
int i = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-07-14 00:21:29 +02:00
|
|
|
do {
|
|
|
|
const int bb = (v >> --n) & 1;
|
2013-11-12 23:21:15 +01:00
|
|
|
vp9_write(w, bb, pb[i >> 1]);
|
2012-07-14 00:21:29 +02:00
|
|
|
i = b->tree[i + bb];
|
|
|
|
} while (n);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-11-12 23:21:15 +01:00
|
|
|
vp9_write_bit(w, e & 1);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
++p;
|
|
|
|
}
|
|
|
|
|
2013-09-18 21:29:32 +02:00
|
|
|
*tp = p + (p->token == EOSB_TOKEN);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2013-07-11 23:14:47 +02:00
|
|
|
static void write_sb_mv_ref(vp9_writer *w, MB_PREDICTION_MODE mode,
|
2012-10-31 22:40:53 +01:00
|
|
|
const vp9_prob *p) {
|
2013-07-11 23:14:47 +02:00
|
|
|
assert(is_inter_mode(mode));
|
2013-07-19 19:40:04 +02:00
|
|
|
write_token(w, vp9_inter_mode_tree, p,
|
2013-11-05 20:58:57 +01:00
|
|
|
&vp9_inter_mode_encodings[INTER_OFFSET(mode)]);
|
2012-08-20 23:43:34 +02:00
|
|
|
}
|
|
|
|
|
2013-06-26 19:27:28 +02:00
|
|
|
|
2013-07-10 21:29:43 +02:00
|
|
|
static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
|
2013-06-26 19:27:28 +02:00
|
|
|
int segment_id) {
|
2013-07-10 21:29:43 +02:00
|
|
|
if (seg->enabled && seg->update_map)
|
2013-11-20 19:06:04 +01:00
|
|
|
treed_write(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2011-11-02 14:30:10 +01:00
|
|
|
// This function encodes the reference frame
|
2013-06-06 22:44:34 +02:00
|
|
|
static void encode_ref_frame(VP9_COMP *cpi, vp9_writer *bc) {
|
2013-08-23 05:03:08 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-06-06 22:44:34 +02:00
|
|
|
MACROBLOCK *const x = &cpi->mb;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2013-10-10 21:11:44 +02:00
|
|
|
MB_MODE_INFO *mi = &xd->mi_8x8[0]->mbmi;
|
2013-06-06 22:44:34 +02:00
|
|
|
const int segment_id = mi->segment_id;
|
2013-08-23 05:03:08 +02:00
|
|
|
int seg_ref_active = vp9_segfeature_active(&cm->seg, segment_id,
|
2013-06-06 22:44:34 +02:00
|
|
|
SEG_LVL_REF_FRAME);
|
2012-07-14 00:21:29 +02:00
|
|
|
// If segment level coding of this signal is disabled...
|
|
|
|
// or the segment allows multiple reference frame options
|
2013-06-07 15:59:53 +02:00
|
|
|
if (!seg_ref_active) {
|
2013-06-06 22:44:34 +02:00
|
|
|
// does the feature use compound prediction or not
|
|
|
|
// (if not specified at the frame/segment level)
|
2013-08-23 05:03:08 +02:00
|
|
|
if (cm->comp_pred_mode == HYBRID_PREDICTION) {
|
2013-06-06 22:44:34 +02:00
|
|
|
vp9_write(bc, mi->ref_frame[1] > INTRA_FRAME,
|
2013-08-23 05:03:08 +02:00
|
|
|
vp9_get_pred_prob_comp_inter_inter(cm, xd));
|
2013-06-06 22:44:34 +02:00
|
|
|
} else {
|
|
|
|
assert((mi->ref_frame[1] <= INTRA_FRAME) ==
|
2013-08-23 05:03:08 +02:00
|
|
|
(cm->comp_pred_mode == SINGLE_PREDICTION_ONLY));
|
2013-06-06 22:44:34 +02:00
|
|
|
}
|
2012-01-28 13:20:14 +01:00
|
|
|
|
2013-06-06 22:44:34 +02:00
|
|
|
if (mi->ref_frame[1] > INTRA_FRAME) {
|
|
|
|
vp9_write(bc, mi->ref_frame[0] == GOLDEN_FRAME,
|
2013-08-23 05:03:08 +02:00
|
|
|
vp9_get_pred_prob_comp_ref_p(cm, xd));
|
2013-06-06 22:44:34 +02:00
|
|
|
} else {
|
|
|
|
vp9_write(bc, mi->ref_frame[0] != LAST_FRAME,
|
2013-08-23 05:03:08 +02:00
|
|
|
vp9_get_pred_prob_single_ref_p1(cm, xd));
|
2013-06-06 22:44:34 +02:00
|
|
|
if (mi->ref_frame[0] != LAST_FRAME)
|
|
|
|
vp9_write(bc, mi->ref_frame[0] != GOLDEN_FRAME,
|
2013-08-23 05:03:08 +02:00
|
|
|
vp9_get_pred_prob_single_ref_p2(cm, xd));
|
2011-11-02 14:30:10 +01:00
|
|
|
}
|
2013-06-06 22:44:34 +02:00
|
|
|
} else {
|
|
|
|
assert(mi->ref_frame[1] <= INTRA_FRAME);
|
2013-08-23 05:03:08 +02:00
|
|
|
assert(vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) ==
|
2013-06-07 15:59:53 +02:00
|
|
|
mi->ref_frame[0]);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-01-31 13:45:30 +01:00
|
|
|
|
2013-10-07 19:45:23 +02:00
|
|
|
// If using the prediction model we have nothing further to do because
|
|
|
|
// the reference frame is fully coded by the segment.
|
2011-11-02 14:30:10 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-08-01 01:59:15 +02:00
|
|
|
static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) {
|
2013-08-23 05:03:08 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
const nmv_context *nmvc = &cm->fc.nmvc;
|
2013-01-06 03:20:25 +01:00
|
|
|
MACROBLOCK *const x = &cpi->mb;
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
2013-08-23 05:03:08 +02:00
|
|
|
struct segmentation *seg = &cm->seg;
|
2013-01-06 03:20:25 +01:00
|
|
|
MB_MODE_INFO *const mi = &m->mbmi;
|
2013-06-06 22:44:34 +02:00
|
|
|
const MV_REFERENCE_FRAME rf = mi->ref_frame[0];
|
2013-01-06 03:20:25 +01:00
|
|
|
const MB_PREDICTION_MODE mode = mi->mode;
|
|
|
|
const int segment_id = mi->segment_id;
|
|
|
|
int skip_coeff;
|
2013-08-27 20:05:08 +02:00
|
|
|
const BLOCK_SIZE bsize = mi->sb_type;
|
2013-10-21 19:12:14 +02:00
|
|
|
const int allow_hp = cm->allow_high_precision_mv;
|
2012-04-07 01:38:34 +02:00
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 9;
|
2012-11-13 00:43:11 +01:00
|
|
|
#endif
|
2012-01-28 11:24:43 +01:00
|
|
|
|
2013-07-10 21:29:43 +02:00
|
|
|
if (seg->update_map) {
|
|
|
|
if (seg->temporal_update) {
|
2013-07-23 16:08:28 +02:00
|
|
|
const int pred_flag = mi->seg_id_predicted;
|
2013-08-14 20:20:33 +02:00
|
|
|
vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
|
2013-07-12 03:39:10 +02:00
|
|
|
vp9_write(bc, pred_flag, pred_prob);
|
|
|
|
if (!pred_flag)
|
|
|
|
write_segment_id(bc, seg, segment_id);
|
2013-01-06 03:20:25 +01:00
|
|
|
} else {
|
2013-07-12 03:39:10 +02:00
|
|
|
write_segment_id(bc, seg, segment_id);
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
|
|
|
}
|
2011-11-15 16:22:26 +01:00
|
|
|
|
2013-07-03 22:23:47 +02:00
|
|
|
skip_coeff = write_skip_coeff(cpi, segment_id, m, bc);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-07-10 21:29:43 +02:00
|
|
|
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
|
2013-06-07 20:54:20 +02:00
|
|
|
vp9_write(bc, rf != INTRA_FRAME,
|
2013-08-23 05:03:08 +02:00
|
|
|
vp9_get_pred_prob_intra_inter(cm, xd));
|
2011-11-02 14:30:10 +01:00
|
|
|
|
2013-08-23 05:03:08 +02:00
|
|
|
if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
|
2013-05-27 09:14:53 +02:00
|
|
|
!(rf != INTRA_FRAME &&
|
2013-07-10 21:29:43 +02:00
|
|
|
(skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
|
2013-09-11 19:45:44 +02:00
|
|
|
write_selected_tx_size(cpi, m, mi->tx_size, bsize, bc);
|
2013-05-27 09:14:53 +02:00
|
|
|
}
|
|
|
|
|
2013-01-06 03:20:25 +01:00
|
|
|
if (rf == INTRA_FRAME) {
|
2012-02-27 19:22:38 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-01-06 03:20:25 +01:00
|
|
|
active_section = 6;
|
2012-02-27 19:22:38 +01:00
|
|
|
#endif
|
2011-11-02 14:30:10 +01:00
|
|
|
|
2013-08-06 00:23:49 +02:00
|
|
|
if (bsize >= BLOCK_8X8) {
|
2013-08-23 05:03:08 +02:00
|
|
|
write_intra_mode(bc, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]);
|
2013-05-30 18:58:53 +02:00
|
|
|
} else {
|
2013-05-20 21:08:22 +02:00
|
|
|
int idx, idy;
|
2013-08-05 21:15:52 +02:00
|
|
|
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
|
|
|
|
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
|
|
|
|
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
|
2013-07-23 15:51:44 +02:00
|
|
|
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
|
2013-07-03 01:51:57 +02:00
|
|
|
const MB_PREDICTION_MODE bm = m->bmi[idy * 2 + idx].as_mode;
|
2013-08-23 05:03:08 +02:00
|
|
|
write_intra_mode(bc, bm, cm->fc.y_mode_prob[0]);
|
2013-06-03 19:39:40 +02:00
|
|
|
}
|
2013-08-05 21:15:52 +02:00
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2013-08-23 05:03:08 +02:00
|
|
|
write_intra_mode(bc, mi->uv_mode, cm->fc.uv_mode_prob[mode]);
|
2013-01-06 03:20:25 +01:00
|
|
|
} else {
|
2013-06-19 03:18:25 +02:00
|
|
|
vp9_prob *mv_ref_p;
|
2013-06-06 22:44:34 +02:00
|
|
|
encode_ref_frame(cpi, bc);
|
2013-08-13 18:47:40 +02:00
|
|
|
mv_ref_p = cpi->common.fc.inter_mode_probs[mi->mode_context[rf]];
|
2011-12-06 21:03:42 +01:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-01-06 03:20:25 +01:00
|
|
|
active_section = 3;
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
2013-02-06 14:02:53 +01:00
|
|
|
// If segment skip is not enabled code the mode.
|
2013-07-10 21:29:43 +02:00
|
|
|
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
|
2013-08-06 00:23:49 +02:00
|
|
|
if (bsize >= BLOCK_8X8) {
|
2013-05-11 02:06:37 +02:00
|
|
|
write_sb_mv_ref(bc, mode, mv_ref_p);
|
2013-08-23 05:03:08 +02:00
|
|
|
++cm->counts.inter_mode[mi->mode_context[rf]]
|
2013-11-05 20:58:57 +01:00
|
|
|
[INTER_OFFSET(mode)];
|
2013-05-30 21:49:38 +02:00
|
|
|
}
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2011-11-03 13:50:09 +01:00
|
|
|
|
2013-08-23 05:03:08 +02:00
|
|
|
if (cm->mcomp_filter_type == SWITCHABLE) {
|
2013-08-06 20:04:31 +02:00
|
|
|
const int ctx = vp9_get_pred_context_switchable_interp(xd);
|
2013-05-30 21:49:38 +02:00
|
|
|
write_token(bc, vp9_switchable_interp_tree,
|
2013-08-23 05:03:08 +02:00
|
|
|
cm->fc.switchable_interp_prob[ctx],
|
2013-08-05 21:26:15 +02:00
|
|
|
&vp9_switchable_interp_encodings[mi->interp_filter]);
|
2013-05-30 21:49:38 +02:00
|
|
|
} else {
|
2013-08-23 05:03:08 +02:00
|
|
|
assert(mi->interp_filter == cm->mcomp_filter_type);
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
2012-11-08 20:03:00 +01:00
|
|
|
|
2013-08-06 00:23:49 +02:00
|
|
|
if (bsize < BLOCK_8X8) {
|
2013-08-05 21:15:52 +02:00
|
|
|
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
|
|
|
|
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
|
2013-05-30 21:49:38 +02:00
|
|
|
int idx, idy;
|
2013-07-23 15:51:44 +02:00
|
|
|
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
|
|
|
|
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
|
2013-09-20 15:08:53 +02:00
|
|
|
const int j = idy * 2 + idx;
|
2013-10-07 20:20:50 +02:00
|
|
|
const MB_PREDICTION_MODE blockmode = m->bmi[j].as_mode;
|
2013-05-30 21:49:38 +02:00
|
|
|
write_sb_mv_ref(bc, blockmode, mv_ref_p);
|
2013-08-23 05:03:08 +02:00
|
|
|
++cm->counts.inter_mode[mi->mode_context[rf]]
|
2013-11-05 20:58:57 +01:00
|
|
|
[INTER_OFFSET(blockmode)];
|
2013-07-31 03:06:34 +02:00
|
|
|
|
2013-05-30 21:49:38 +02:00
|
|
|
if (blockmode == NEWMV) {
|
2012-02-27 19:22:38 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-05-30 21:49:38 +02:00
|
|
|
active_section = 11;
|
2012-02-27 19:22:38 +01:00
|
|
|
#endif
|
2013-09-20 15:08:53 +02:00
|
|
|
vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[0].as_mv,
|
|
|
|
&mi->best_mv[0].as_mv, nmvc, allow_hp);
|
|
|
|
|
|
|
|
if (has_second_ref(mi))
|
|
|
|
vp9_encode_mv(cpi, bc, &m->bmi[j].as_mv[1].as_mv,
|
|
|
|
&mi->best_mv[1].as_mv, nmvc, allow_hp);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2013-05-16 07:28:36 +02:00
|
|
|
}
|
2013-05-30 21:49:38 +02:00
|
|
|
}
|
|
|
|
} else if (mode == NEWMV) {
|
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 5;
|
|
|
|
#endif
|
2013-09-20 15:08:53 +02:00
|
|
|
vp9_encode_mv(cpi, bc, &mi->mv[0].as_mv,
|
|
|
|
&mi->best_mv[0].as_mv, nmvc, allow_hp);
|
2013-05-30 21:49:38 +02:00
|
|
|
|
2013-09-20 15:08:53 +02:00
|
|
|
if (has_second_ref(mi))
|
|
|
|
vp9_encode_mv(cpi, bc, &mi->mv[1].as_mv,
|
|
|
|
&mi->best_mv[1].as_mv, nmvc, allow_hp);
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-02-02 18:04:40 +01:00
|
|
|
|
2013-09-11 19:45:44 +02:00
|
|
|
static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
|
2013-08-01 01:59:15 +02:00
|
|
|
vp9_writer *bc) {
|
2013-08-24 04:30:33 +02:00
|
|
|
const VP9_COMMON *const cm = &cpi->common;
|
2013-01-06 03:20:25 +01:00
|
|
|
const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
2013-08-24 04:30:33 +02:00
|
|
|
const struct segmentation *const seg = &cm->seg;
|
2013-09-11 19:45:44 +02:00
|
|
|
MODE_INFO *m = mi_8x8[0];
|
2013-01-06 03:20:25 +01:00
|
|
|
const int ym = m->mbmi.mode;
|
|
|
|
const int segment_id = m->mbmi.segment_id;
|
2013-09-11 19:45:44 +02:00
|
|
|
MODE_INFO *above_mi = mi_8x8[-xd->mode_info_stride];
|
2013-10-22 02:01:57 +02:00
|
|
|
MODE_INFO *left_mi = xd->left_available ? mi_8x8[-1] : NULL;
|
2012-10-16 22:52:39 +02:00
|
|
|
|
2013-08-14 20:20:33 +02:00
|
|
|
if (seg->update_map)
|
|
|
|
write_segment_id(bc, seg, m->mbmi.segment_id);
|
2012-10-16 22:52:39 +02:00
|
|
|
|
2013-07-03 22:23:47 +02:00
|
|
|
write_skip_coeff(cpi, segment_id, m, bc);
|
2012-10-16 22:52:39 +02:00
|
|
|
|
2013-08-24 04:30:33 +02:00
|
|
|
if (m->mbmi.sb_type >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
|
2013-09-11 19:45:44 +02:00
|
|
|
write_selected_tx_size(cpi, m, m->mbmi.tx_size, m->mbmi.sb_type, bc);
|
2013-05-27 09:14:53 +02:00
|
|
|
|
2013-08-06 00:23:49 +02:00
|
|
|
if (m->mbmi.sb_type >= BLOCK_8X8) {
|
2013-09-11 19:45:44 +02:00
|
|
|
const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, 0);
|
2013-10-22 02:01:57 +02:00
|
|
|
const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, 0);
|
2013-07-18 01:50:52 +02:00
|
|
|
write_intra_mode(bc, ym, vp9_kf_y_mode_prob[A][L]);
|
2013-05-30 18:58:53 +02:00
|
|
|
} else {
|
2013-05-20 21:08:22 +02:00
|
|
|
int idx, idy;
|
2013-08-05 21:15:52 +02:00
|
|
|
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[m->mbmi.sb_type];
|
|
|
|
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[m->mbmi.sb_type];
|
2013-07-23 15:51:44 +02:00
|
|
|
for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
|
|
|
|
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
|
2013-09-11 19:45:44 +02:00
|
|
|
int i = idy * 2 + idx;
|
|
|
|
const MB_PREDICTION_MODE A = above_block_mode(m, above_mi, i);
|
2013-10-22 02:01:57 +02:00
|
|
|
const MB_PREDICTION_MODE L = left_block_mode(m, left_mi, i);
|
2013-07-03 01:51:57 +02:00
|
|
|
const int bm = m->bmi[i].as_mode;
|
2013-05-08 19:04:14 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
++intra_mode_stats[A][L][bm];
|
|
|
|
#endif
|
2013-07-18 01:50:52 +02:00
|
|
|
write_intra_mode(bc, bm, vp9_kf_y_mode_prob[A][L]);
|
2013-05-21 01:04:28 +02:00
|
|
|
}
|
|
|
|
}
|
2012-10-16 22:52:39 +02:00
|
|
|
}
|
|
|
|
|
2013-07-18 01:50:52 +02:00
|
|
|
write_intra_mode(bc, m->mbmi.uv_mode, vp9_kf_uv_mode_prob[ym]);
|
2012-10-16 22:52:39 +02:00
|
|
|
}
|
|
|
|
|
2013-10-25 17:18:04 +02:00
|
|
|
static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
|
2013-11-12 02:29:31 +01:00
|
|
|
vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end,
|
|
|
|
int mi_row, int mi_col) {
|
2013-02-20 19:16:24 +01:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-01-06 03:20:25 +01:00
|
|
|
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
2013-11-12 02:29:31 +01:00
|
|
|
MODE_INFO *m;
|
2013-08-28 21:22:37 +02:00
|
|
|
|
2013-11-12 02:29:31 +01:00
|
|
|
xd->mi_8x8 = cm->mi_grid_visible + (mi_row * cm->mode_info_stride + mi_col);
|
|
|
|
m = xd->mi_8x8[0];
|
2013-09-11 19:45:44 +02:00
|
|
|
|
2013-10-25 17:18:04 +02:00
|
|
|
set_mi_row_col(xd, tile,
|
2013-08-28 21:22:37 +02:00
|
|
|
mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
|
2013-10-25 17:18:04 +02:00
|
|
|
mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
|
|
|
|
cm->mi_rows, cm->mi_cols);
|
2013-10-03 18:07:24 +02:00
|
|
|
if (frame_is_intra_only(cm)) {
|
2013-11-12 02:29:31 +01:00
|
|
|
write_mb_modes_kf(cpi, xd->mi_8x8, w);
|
2013-01-06 03:20:25 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 8;
|
|
|
|
#endif
|
|
|
|
} else {
|
2013-11-12 02:29:31 +01:00
|
|
|
pack_inter_mode_mvs(cpi, m, w);
|
2013-01-06 03:20:25 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(*tok < tok_end);
|
2013-11-12 02:29:31 +01:00
|
|
|
pack_mb_tokens(w, tok, tok_end);
|
2013-01-06 03:20:25 +01:00
|
|
|
}
|
|
|
|
|
2013-11-02 02:23:06 +01:00
|
|
|
static void write_partition(VP9_COMP *cpi, int hbs, int mi_row, int mi_col,
|
|
|
|
PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
const int ctx = partition_plane_context(cpi->above_seg_context,
|
|
|
|
cpi->left_seg_context,
|
|
|
|
mi_row, mi_col, bsize);
|
|
|
|
const vp9_prob *const probs = get_partition_probs(cm, ctx);
|
|
|
|
const int has_rows = (mi_row + hbs) < cm->mi_rows;
|
|
|
|
const int has_cols = (mi_col + hbs) < cm->mi_cols;
|
2013-10-28 23:14:45 +01:00
|
|
|
|
|
|
|
if (has_rows && has_cols) {
|
2013-11-02 02:23:06 +01:00
|
|
|
write_token(w, vp9_partition_tree, probs, &vp9_partition_encodings[p]);
|
2013-10-28 23:14:45 +01:00
|
|
|
} else if (!has_rows && has_cols) {
|
2013-11-02 02:23:06 +01:00
|
|
|
assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
|
|
|
|
vp9_write(w, p == PARTITION_SPLIT, probs[1]);
|
2013-10-28 23:14:45 +01:00
|
|
|
} else if (has_rows && !has_cols) {
|
2013-11-02 02:23:06 +01:00
|
|
|
assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
|
|
|
|
vp9_write(w, p == PARTITION_SPLIT, probs[2]);
|
2013-10-28 23:14:45 +01:00
|
|
|
} else {
|
2013-11-02 02:23:06 +01:00
|
|
|
assert(p == PARTITION_SPLIT);
|
2013-10-28 23:14:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-25 17:18:04 +02:00
|
|
|
static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile,
|
2013-11-12 02:29:31 +01:00
|
|
|
vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end,
|
|
|
|
int mi_row, int mi_col, BLOCK_SIZE bsize) {
|
2013-04-16 09:18:02 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-11-12 02:29:31 +01:00
|
|
|
const int bsl = b_width_log2(bsize);
|
|
|
|
const int bs = (1 << bsl) / 4;
|
|
|
|
PARTITION_TYPE partition;
|
2013-08-27 20:05:08 +02:00
|
|
|
BLOCK_SIZE subsize;
|
2013-11-12 02:29:31 +01:00
|
|
|
MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mode_info_stride + mi_col];
|
2013-04-16 09:18:02 +02:00
|
|
|
|
2013-04-26 20:57:17 +02:00
|
|
|
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
|
2013-04-16 09:18:02 +02:00
|
|
|
return;
|
|
|
|
|
2013-07-23 15:51:44 +02:00
|
|
|
partition = partition_lookup[bsl][m->mbmi.sb_type];
|
2013-11-12 02:29:31 +01:00
|
|
|
write_partition(cpi, bs, mi_row, mi_col, partition, bsize, w);
|
2013-05-01 18:43:59 +02:00
|
|
|
subsize = get_subsize(bsize, partition);
|
2013-11-12 02:29:31 +01:00
|
|
|
if (subsize < BLOCK_8X8) {
|
|
|
|
write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
|
|
|
|
} else {
|
|
|
|
switch (partition) {
|
|
|
|
case PARTITION_NONE:
|
|
|
|
write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
|
|
|
|
break;
|
|
|
|
case PARTITION_HORZ:
|
|
|
|
write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
|
|
|
|
if (mi_row + bs < cm->mi_rows)
|
|
|
|
write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
|
|
|
|
break;
|
|
|
|
case PARTITION_VERT:
|
|
|
|
write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
|
|
|
|
if (mi_col + bs < cm->mi_cols)
|
|
|
|
write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
|
|
|
|
break;
|
|
|
|
case PARTITION_SPLIT:
|
|
|
|
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
|
|
|
|
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
|
|
|
|
subsize);
|
|
|
|
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
|
|
|
|
subsize);
|
|
|
|
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
|
|
|
|
subsize);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
2013-04-16 09:18:02 +02:00
|
|
|
}
|
2013-04-23 19:12:18 +02:00
|
|
|
|
|
|
|
// update partition context
|
2013-08-06 00:23:49 +02:00
|
|
|
if (bsize >= BLOCK_8X8 &&
|
2013-10-21 21:02:19 +02:00
|
|
|
(bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
|
2013-10-24 18:10:59 +02:00
|
|
|
update_partition_context(cpi->above_seg_context, cpi->left_seg_context,
|
2013-10-24 16:31:07 +02:00
|
|
|
mi_row, mi_col, subsize, bsize);
|
2013-04-16 09:18:02 +02:00
|
|
|
}
|
|
|
|
|
2013-10-25 17:18:04 +02:00
|
|
|
static void write_modes(VP9_COMP *cpi, const TileInfo *const tile,
|
2013-11-12 02:29:31 +01:00
|
|
|
vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
|
2013-04-26 20:57:17 +02:00
|
|
|
int mi_row, mi_col;
|
2013-04-23 19:12:18 +02:00
|
|
|
|
2013-10-25 17:18:04 +02:00
|
|
|
for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
|
2013-11-12 02:29:31 +01:00
|
|
|
mi_row += MI_BLOCK_SIZE) {
|
|
|
|
vp9_zero(cpi->left_seg_context);
|
2013-10-25 17:18:04 +02:00
|
|
|
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
|
2013-11-12 02:29:31 +01:00
|
|
|
mi_col += MI_BLOCK_SIZE)
|
|
|
|
write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-02-02 18:04:40 +01:00
|
|
|
|
2013-07-29 23:35:55 +02:00
|
|
|
static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size) {
|
|
|
|
vp9_coeff_probs_model *coef_probs = cpi->frame_coef_probs[tx_size];
|
|
|
|
vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size];
|
2013-05-31 18:18:59 +02:00
|
|
|
unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] =
|
2013-07-29 23:35:55 +02:00
|
|
|
cpi->common.counts.eob_branch[tx_size];
|
|
|
|
vp9_coeff_stats *coef_branch_ct = cpi->frame_branch_ct[tx_size];
|
2013-11-01 23:09:43 +01:00
|
|
|
int i, j, k, l, m;
|
2012-12-08 01:09:59 +01:00
|
|
|
|
2013-05-31 18:18:59 +02:00
|
|
|
for (i = 0; i < BLOCK_TYPES; ++i) {
|
2013-02-19 22:36:38 +01:00
|
|
|
for (j = 0; j < REF_TYPES; ++j) {
|
|
|
|
for (k = 0; k < COEF_BANDS; ++k) {
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
|
|
|
|
if (l >= 3 && k == 0)
|
|
|
|
continue;
|
2013-05-30 21:20:03 +02:00
|
|
|
vp9_tree_probs_from_distribution(vp9_coef_tree,
|
2013-02-19 22:36:38 +01:00
|
|
|
coef_branch_ct[i][j][k][l],
|
2013-11-05 20:58:57 +01:00
|
|
|
coef_counts[i][j][k][l]);
|
2013-03-27 00:46:09 +01:00
|
|
|
coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
|
|
|
|
coef_branch_ct[i][j][k][l][0][0];
|
2013-11-01 23:09:43 +01:00
|
|
|
for (m = 0; m < UNCONSTRAINED_NODES; ++m)
|
|
|
|
coef_probs[i][j][k][l][m] = get_binary_prob(
|
|
|
|
coef_branch_ct[i][j][k][l][m][0],
|
|
|
|
coef_branch_ct[i][j][k][l][m][1]);
|
2012-04-12 18:24:03 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-03-27 00:46:09 +01:00
|
|
|
if (!cpi->dummy_packing) {
|
2013-05-31 19:05:37 +02:00
|
|
|
int t;
|
2013-03-27 00:46:09 +01:00
|
|
|
for (t = 0; t < MAX_ENTROPY_TOKENS; ++t)
|
2013-07-29 23:35:55 +02:00
|
|
|
context_counters[tx_size][i][j][k][l][t] +=
|
2013-05-31 19:05:37 +02:00
|
|
|
coef_counts[i][j][k][l][t];
|
2013-07-29 23:35:55 +02:00
|
|
|
context_counters[tx_size][i][j][k][l][MAX_ENTROPY_TOKENS] +=
|
2013-03-27 00:46:09 +01:00
|
|
|
eob_branch_ct[i][j][k][l];
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
#endif
|
2013-02-19 22:36:38 +01:00
|
|
|
}
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-12-08 01:09:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void build_coeff_contexts(VP9_COMP *cpi) {
|
2013-05-31 18:18:59 +02:00
|
|
|
TX_SIZE t;
|
|
|
|
for (t = TX_4X4; t <= TX_32X32; t++)
|
|
|
|
build_tree_distribution(cpi, t);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2013-05-31 18:18:59 +02:00
|
|
|
static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
|
|
|
|
TX_SIZE tx_size) {
|
|
|
|
vp9_coeff_probs_model *new_frame_coef_probs = cpi->frame_coef_probs[tx_size];
|
|
|
|
vp9_coeff_probs_model *old_frame_coef_probs =
|
|
|
|
cpi->common.fc.coef_probs[tx_size];
|
|
|
|
vp9_coeff_stats *frame_branch_ct = cpi->frame_branch_ct[tx_size];
|
2013-10-11 19:47:22 +02:00
|
|
|
const vp9_prob upd = DIFF_UPDATE_PROB;
|
2013-05-17 15:40:25 +02:00
|
|
|
const int entropy_nodes_update = UNCONSTRAINED_NODES;
|
2013-08-28 00:07:50 +02:00
|
|
|
int i, j, k, l, t;
|
|
|
|
switch (cpi->sf.use_fast_coef_updates) {
|
|
|
|
case 0: {
|
|
|
|
/* dry run to see if there is any udpate at all needed */
|
|
|
|
int savings = 0;
|
|
|
|
int update[2] = {0, 0};
|
|
|
|
for (i = 0; i < BLOCK_TYPES; ++i) {
|
|
|
|
for (j = 0; j < REF_TYPES; ++j) {
|
|
|
|
for (k = 0; k < COEF_BANDS; ++k) {
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
|
|
|
|
for (t = 0; t < entropy_nodes_update; ++t) {
|
|
|
|
vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
|
|
|
|
const vp9_prob oldp = old_frame_coef_probs[i][j][k][l][t];
|
|
|
|
int s;
|
|
|
|
int u = 0;
|
|
|
|
|
|
|
|
if (l >= 3 && k == 0)
|
|
|
|
continue;
|
|
|
|
if (t == PIVOT_NODE)
|
|
|
|
s = vp9_prob_diff_update_savings_search_model(
|
|
|
|
frame_branch_ct[i][j][k][l][0],
|
|
|
|
old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
|
|
|
|
else
|
|
|
|
s = vp9_prob_diff_update_savings_search(
|
|
|
|
frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
|
|
|
|
if (s > 0 && newp != oldp)
|
|
|
|
u = 1;
|
|
|
|
if (u)
|
|
|
|
savings += s - (int)(vp9_cost_zero(upd));
|
|
|
|
else
|
|
|
|
savings -= (int)(vp9_cost_zero(upd));
|
|
|
|
update[u]++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-04-12 18:24:03 +02:00
|
|
|
|
2013-08-28 00:07:50 +02:00
|
|
|
// printf("Update %d %d, savings %d\n", update[0], update[1], savings);
|
|
|
|
/* Is coef updated at all */
|
|
|
|
if (update[1] == 0 || savings < 0) {
|
|
|
|
vp9_write_bit(bc, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
vp9_write_bit(bc, 1);
|
|
|
|
for (i = 0; i < BLOCK_TYPES; ++i) {
|
|
|
|
for (j = 0; j < REF_TYPES; ++j) {
|
|
|
|
for (k = 0; k < COEF_BANDS; ++k) {
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
|
|
|
|
// calc probs and branch cts for this frame only
|
|
|
|
for (t = 0; t < entropy_nodes_update; ++t) {
|
|
|
|
vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
|
|
|
|
vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
|
2013-10-11 19:47:22 +02:00
|
|
|
const vp9_prob upd = DIFF_UPDATE_PROB;
|
2013-08-28 00:07:50 +02:00
|
|
|
int s;
|
|
|
|
int u = 0;
|
|
|
|
if (l >= 3 && k == 0)
|
|
|
|
continue;
|
|
|
|
if (t == PIVOT_NODE)
|
|
|
|
s = vp9_prob_diff_update_savings_search_model(
|
|
|
|
frame_branch_ct[i][j][k][l][0],
|
|
|
|
old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
|
|
|
|
else
|
|
|
|
s = vp9_prob_diff_update_savings_search(
|
|
|
|
frame_branch_ct[i][j][k][l][t],
|
|
|
|
*oldp, &newp, upd);
|
|
|
|
if (s > 0 && newp != *oldp)
|
|
|
|
u = 1;
|
|
|
|
vp9_write(bc, u, upd);
|
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
if (!cpi->dummy_packing)
|
|
|
|
++tree_update_hist[tx_size][i][j][k][l][t][u];
|
|
|
|
#endif
|
|
|
|
if (u) {
|
|
|
|
/* send/use new probability */
|
|
|
|
vp9_write_prob_diff_update(bc, newp, *oldp);
|
|
|
|
*oldp = newp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-02-19 22:36:38 +01:00
|
|
|
}
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
2013-08-28 00:07:50 +02:00
|
|
|
return;
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-08-28 00:07:50 +02:00
|
|
|
case 1:
|
|
|
|
case 2: {
|
|
|
|
const int prev_coef_contexts_to_update =
|
|
|
|
(cpi->sf.use_fast_coef_updates == 2 ?
|
|
|
|
PREV_COEF_CONTEXTS >> 1 : PREV_COEF_CONTEXTS);
|
|
|
|
const int coef_band_to_update =
|
|
|
|
(cpi->sf.use_fast_coef_updates == 2 ?
|
|
|
|
COEF_BANDS >> 1 : COEF_BANDS);
|
|
|
|
int updates = 0;
|
|
|
|
int noupdates_before_first = 0;
|
|
|
|
for (i = 0; i < BLOCK_TYPES; ++i) {
|
|
|
|
for (j = 0; j < REF_TYPES; ++j) {
|
|
|
|
for (k = 0; k < COEF_BANDS; ++k) {
|
|
|
|
for (l = 0; l < PREV_COEF_CONTEXTS; ++l) {
|
|
|
|
// calc probs and branch cts for this frame only
|
|
|
|
for (t = 0; t < entropy_nodes_update; ++t) {
|
|
|
|
vp9_prob newp = new_frame_coef_probs[i][j][k][l][t];
|
|
|
|
vp9_prob *oldp = old_frame_coef_probs[i][j][k][l] + t;
|
|
|
|
int s;
|
|
|
|
int u = 0;
|
|
|
|
if (l >= 3 && k == 0)
|
|
|
|
continue;
|
|
|
|
if (l >= prev_coef_contexts_to_update ||
|
|
|
|
k >= coef_band_to_update) {
|
|
|
|
u = 0;
|
|
|
|
} else {
|
|
|
|
if (t == PIVOT_NODE)
|
|
|
|
s = vp9_prob_diff_update_savings_search_model(
|
|
|
|
frame_branch_ct[i][j][k][l][0],
|
|
|
|
old_frame_coef_probs[i][j][k][l], &newp, upd, i, j);
|
|
|
|
else
|
|
|
|
s = vp9_prob_diff_update_savings_search(
|
|
|
|
frame_branch_ct[i][j][k][l][t],
|
|
|
|
*oldp, &newp, upd);
|
|
|
|
if (s > 0 && newp != *oldp)
|
|
|
|
u = 1;
|
|
|
|
}
|
|
|
|
updates += u;
|
|
|
|
if (u == 0 && updates == 0) {
|
|
|
|
noupdates_before_first++;
|
2012-02-07 00:10:13 +01:00
|
|
|
#ifdef ENTROPY_STATS
|
2013-08-28 00:07:50 +02:00
|
|
|
if (!cpi->dummy_packing)
|
|
|
|
++tree_update_hist[tx_size][i][j][k][l][t][u];
|
2013-03-13 19:03:17 +01:00
|
|
|
#endif
|
2013-08-28 00:07:50 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (u == 1 && updates == 1) {
|
|
|
|
int v;
|
|
|
|
// first update
|
|
|
|
vp9_write_bit(bc, 1);
|
|
|
|
for (v = 0; v < noupdates_before_first; ++v)
|
|
|
|
vp9_write(bc, 0, upd);
|
|
|
|
}
|
|
|
|
vp9_write(bc, u, upd);
|
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
if (!cpi->dummy_packing)
|
|
|
|
++tree_update_hist[tx_size][i][j][k][l][t][u];
|
|
|
|
#endif
|
|
|
|
if (u) {
|
|
|
|
/* send/use new probability */
|
|
|
|
vp9_write_prob_diff_update(bc, newp, *oldp);
|
|
|
|
*oldp = newp;
|
|
|
|
}
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-08-28 00:07:50 +02:00
|
|
|
if (updates == 0) {
|
|
|
|
vp9_write_bit(bc, 0); // no updates
|
|
|
|
}
|
|
|
|
return;
|
2012-08-03 02:03:14 +02:00
|
|
|
}
|
2013-08-28 00:07:50 +02:00
|
|
|
|
|
|
|
default:
|
|
|
|
assert(0);
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2012-10-20 00:35:36 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
|
2013-07-19 20:37:13 +02:00
|
|
|
const TX_MODE tx_mode = cpi->common.tx_mode;
|
2013-06-06 05:56:37 +02:00
|
|
|
|
2012-10-31 22:40:53 +01:00
|
|
|
vp9_clear_system_state();
|
2012-09-10 07:42:35 +02:00
|
|
|
|
2012-10-20 00:35:36 +02:00
|
|
|
// Build the cofficient contexts based on counts collected in encode loop
|
|
|
|
build_coeff_contexts(cpi);
|
2012-09-10 07:42:35 +02:00
|
|
|
|
2013-05-31 18:18:59 +02:00
|
|
|
update_coef_probs_common(bc, cpi, TX_4X4);
|
2012-07-14 00:21:29 +02:00
|
|
|
|
2013-06-06 05:56:37 +02:00
|
|
|
// do not do this if not even allowed
|
2013-07-19 20:37:13 +02:00
|
|
|
if (tx_mode > ONLY_4X4)
|
2013-05-31 18:18:59 +02:00
|
|
|
update_coef_probs_common(bc, cpi, TX_8X8);
|
2012-08-03 02:03:14 +02:00
|
|
|
|
2013-07-19 20:37:13 +02:00
|
|
|
if (tx_mode > ALLOW_8X8)
|
2013-05-31 18:18:59 +02:00
|
|
|
update_coef_probs_common(bc, cpi, TX_16X16);
|
32x32 transform for superblocks.
This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.
Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
1 bit, or else they won't fit in int16_t (they are 17 bits). Because
of this, the RD error scoring does not right-shift the MSE score by
two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
simply using the 16x16 luma ones. A future commit will add newly
generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
ADST is desired, transform-size selection can scale back to 16x16
or lower, and use an ADST at that level.
Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
block than for the rest (DWT pixel differences) of the block. Therefore,
RD error scoring isn't easily scalable between coefficient and pixel
domain. Thus, unfortunately, we need to compute the RD distortion in
the pixel domain until we figure out how to scale these appropriately.
Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
2012-12-07 23:45:05 +01:00
|
|
|
|
2013-07-19 20:37:13 +02:00
|
|
|
if (tx_mode > ALLOW_16X16)
|
2013-05-31 18:18:59 +02:00
|
|
|
update_coef_probs_common(bc, cpi, TX_32X32);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-04-12 18:24:03 +02:00
|
|
|
|
2013-07-18 03:37:45 +02:00
|
|
|
static void encode_loopfilter(struct loopfilter *lf,
|
2013-06-03 19:50:57 +02:00
|
|
|
struct vp9_write_bit_buffer *wb) {
|
2013-05-02 23:23:56 +02:00
|
|
|
int i;
|
|
|
|
|
2013-05-14 03:05:20 +02:00
|
|
|
// Encode the loop filter level and type
|
2013-07-18 03:37:45 +02:00
|
|
|
vp9_wb_write_literal(wb, lf->filter_level, 6);
|
|
|
|
vp9_wb_write_literal(wb, lf->sharpness_level, 3);
|
2013-05-14 03:05:20 +02:00
|
|
|
|
2013-05-02 23:23:56 +02:00
|
|
|
// Write out loop filter deltas applied at the MB level based on mode or
|
|
|
|
// ref frame (if they are enabled).
|
2013-07-18 03:37:45 +02:00
|
|
|
vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled);
|
2013-05-02 23:23:56 +02:00
|
|
|
|
2013-07-18 03:37:45 +02:00
|
|
|
if (lf->mode_ref_delta_enabled) {
|
2013-05-02 23:23:56 +02:00
|
|
|
// Do the deltas need to be updated
|
2013-07-18 03:37:45 +02:00
|
|
|
vp9_wb_write_bit(wb, lf->mode_ref_delta_update);
|
|
|
|
if (lf->mode_ref_delta_update) {
|
2013-05-02 23:23:56 +02:00
|
|
|
// Send update
|
|
|
|
for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
|
2013-07-18 03:37:45 +02:00
|
|
|
const int delta = lf->ref_deltas[i];
|
2013-05-02 23:23:56 +02:00
|
|
|
|
|
|
|
// Frame level data
|
2013-07-18 03:37:45 +02:00
|
|
|
if (delta != lf->last_ref_deltas[i]) {
|
|
|
|
lf->last_ref_deltas[i] = delta;
|
2013-06-03 19:50:57 +02:00
|
|
|
vp9_wb_write_bit(wb, 1);
|
2013-05-02 23:23:56 +02:00
|
|
|
|
2013-06-03 19:50:57 +02:00
|
|
|
assert(delta != 0);
|
|
|
|
vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
|
|
|
|
vp9_wb_write_bit(wb, delta < 0);
|
2013-05-02 23:23:56 +02:00
|
|
|
} else {
|
2013-06-03 19:50:57 +02:00
|
|
|
vp9_wb_write_bit(wb, 0);
|
2013-05-02 23:23:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send update
|
|
|
|
for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
|
2013-07-18 03:37:45 +02:00
|
|
|
const int delta = lf->mode_deltas[i];
|
|
|
|
if (delta != lf->last_mode_deltas[i]) {
|
|
|
|
lf->last_mode_deltas[i] = delta;
|
2013-06-03 19:50:57 +02:00
|
|
|
vp9_wb_write_bit(wb, 1);
|
2013-05-02 23:23:56 +02:00
|
|
|
|
2013-06-03 19:50:57 +02:00
|
|
|
assert(delta != 0);
|
|
|
|
vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
|
|
|
|
vp9_wb_write_bit(wb, delta < 0);
|
2013-05-02 23:23:56 +02:00
|
|
|
} else {
|
2013-06-03 19:50:57 +02:00
|
|
|
vp9_wb_write_bit(wb, 0);
|
2013-05-02 23:23:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-03 19:50:57 +02:00
|
|
|
static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) {
|
2013-05-14 03:05:20 +02:00
|
|
|
if (delta_q != 0) {
|
2013-06-03 19:50:57 +02:00
|
|
|
vp9_wb_write_bit(wb, 1);
|
|
|
|
vp9_wb_write_literal(wb, abs(delta_q), 4);
|
|
|
|
vp9_wb_write_bit(wb, delta_q < 0);
|
2013-05-14 03:05:20 +02:00
|
|
|
} else {
|
2013-06-03 19:50:57 +02:00
|
|
|
vp9_wb_write_bit(wb, 0);
|
2013-05-14 03:05:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-03 19:50:57 +02:00
|
|
|
static void encode_quantization(VP9_COMMON *cm,
|
|
|
|
struct vp9_write_bit_buffer *wb) {
|
|
|
|
vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
|
|
|
|
write_delta_q(wb, cm->y_dc_delta_q);
|
|
|
|
write_delta_q(wb, cm->uv_dc_delta_q);
|
|
|
|
write_delta_q(wb, cm->uv_ac_delta_q);
|
2013-05-14 03:05:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-06 21:33:12 +02:00
|
|
|
static void encode_segmentation(VP9_COMP *cpi,
|
2013-07-10 21:29:43 +02:00
|
|
|
struct vp9_write_bit_buffer *wb) {
|
2012-07-14 00:21:29 +02:00
|
|
|
int i, j;
|
2013-04-30 01:07:17 +02:00
|
|
|
|
2013-08-14 20:20:33 +02:00
|
|
|
struct segmentation *seg = &cpi->common.seg;
|
2013-07-10 21:29:43 +02:00
|
|
|
|
|
|
|
vp9_wb_write_bit(wb, seg->enabled);
|
|
|
|
if (!seg->enabled)
|
2013-04-30 01:07:17 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Segmentation map
|
2013-07-10 21:29:43 +02:00
|
|
|
vp9_wb_write_bit(wb, seg->update_map);
|
|
|
|
if (seg->update_map) {
|
2013-04-30 01:07:17 +02:00
|
|
|
// Select the coding strategy (temporal or spatial)
|
|
|
|
vp9_choose_segmap_coding_method(cpi);
|
|
|
|
// Write out probabilities used to decode unpredicted macro-block segments
|
2013-07-23 13:09:04 +02:00
|
|
|
for (i = 0; i < SEG_TREE_PROBS; i++) {
|
2013-07-10 21:29:43 +02:00
|
|
|
const int prob = seg->tree_probs[i];
|
2013-06-06 21:33:12 +02:00
|
|
|
const int update = prob != MAX_PROB;
|
|
|
|
vp9_wb_write_bit(wb, update);
|
|
|
|
if (update)
|
|
|
|
vp9_wb_write_literal(wb, prob, 8);
|
2013-04-30 01:07:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write out the chosen coding method.
|
2013-07-10 21:29:43 +02:00
|
|
|
vp9_wb_write_bit(wb, seg->temporal_update);
|
|
|
|
if (seg->temporal_update) {
|
2013-04-30 01:07:17 +02:00
|
|
|
for (i = 0; i < PREDICTION_PROBS; i++) {
|
2013-07-10 21:29:43 +02:00
|
|
|
const int prob = seg->pred_probs[i];
|
2013-06-06 21:33:12 +02:00
|
|
|
const int update = prob != MAX_PROB;
|
|
|
|
vp9_wb_write_bit(wb, update);
|
|
|
|
if (update)
|
|
|
|
vp9_wb_write_literal(wb, prob, 8);
|
2013-04-30 01:07:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Segmentation data
|
2013-07-10 21:29:43 +02:00
|
|
|
vp9_wb_write_bit(wb, seg->update_data);
|
|
|
|
if (seg->update_data) {
|
|
|
|
vp9_wb_write_bit(wb, seg->abs_delta);
|
2013-04-30 01:07:17 +02:00
|
|
|
|
2013-07-23 13:09:04 +02:00
|
|
|
for (i = 0; i < MAX_SEGMENTS; i++) {
|
2013-04-30 01:07:17 +02:00
|
|
|
for (j = 0; j < SEG_LVL_MAX; j++) {
|
2013-07-10 21:29:43 +02:00
|
|
|
const int active = vp9_segfeature_active(seg, i, j);
|
2013-06-06 21:33:12 +02:00
|
|
|
vp9_wb_write_bit(wb, active);
|
|
|
|
if (active) {
|
2013-07-10 21:29:43 +02:00
|
|
|
const int data = vp9_get_segdata(seg, i, j);
|
2013-06-06 21:33:12 +02:00
|
|
|
const int data_max = vp9_seg_feature_data_max(j);
|
2013-04-30 01:07:17 +02:00
|
|
|
|
|
|
|
if (vp9_is_segfeature_signed(j)) {
|
2013-06-06 21:33:12 +02:00
|
|
|
vp9_encode_unsigned_max(wb, abs(data), data_max);
|
|
|
|
vp9_wb_write_bit(wb, data < 0);
|
2013-04-30 01:07:17 +02:00
|
|
|
} else {
|
2013-06-06 21:33:12 +02:00
|
|
|
vp9_encode_unsigned_max(wb, data, data_max);
|
2013-04-30 01:07:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-30 23:50:41 +02:00
|
|
|
|
2013-06-06 20:14:04 +02:00
|
|
|
static void encode_txfm_probs(VP9_COMP *cpi, vp9_writer *w) {
|
2013-05-30 23:50:41 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
|
|
|
|
// Mode
|
2013-07-19 20:37:13 +02:00
|
|
|
vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2);
|
|
|
|
if (cm->tx_mode >= ALLOW_32X32)
|
|
|
|
vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
|
2013-05-30 23:50:41 +02:00
|
|
|
|
|
|
|
// Probabilities
|
2013-07-19 20:37:13 +02:00
|
|
|
if (cm->tx_mode == TX_MODE_SELECT) {
|
2013-06-06 20:14:04 +02:00
|
|
|
int i, j;
|
2013-07-27 02:15:37 +02:00
|
|
|
unsigned int ct_8x8p[TX_SIZES - 3][2];
|
|
|
|
unsigned int ct_16x16p[TX_SIZES - 2][2];
|
|
|
|
unsigned int ct_32x32p[TX_SIZES - 1][2];
|
2013-06-06 20:14:04 +02:00
|
|
|
|
|
|
|
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
2013-10-11 19:47:22 +02:00
|
|
|
tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p);
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES - 3; j++)
|
2013-10-11 19:47:22 +02:00
|
|
|
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]);
|
2013-06-06 20:14:04 +02:00
|
|
|
}
|
2013-07-11 02:36:06 +02:00
|
|
|
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
2013-10-11 19:47:22 +02:00
|
|
|
tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p);
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES - 2; j++)
|
2013-07-11 02:36:06 +02:00
|
|
|
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j],
|
2013-10-11 19:47:22 +02:00
|
|
|
ct_16x16p[j]);
|
2013-06-06 20:14:04 +02:00
|
|
|
}
|
2013-07-11 02:36:06 +02:00
|
|
|
|
2013-06-08 09:09:44 +02:00
|
|
|
for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
|
2013-07-24 02:02:08 +02:00
|
|
|
tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p);
|
2013-07-27 02:15:37 +02:00
|
|
|
for (j = 0; j < TX_SIZES - 1; j++)
|
2013-07-11 02:36:06 +02:00
|
|
|
vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j],
|
2013-10-11 19:47:22 +02:00
|
|
|
ct_32x32p[j]);
|
2013-06-06 20:14:04 +02:00
|
|
|
}
|
|
|
|
#ifdef MODE_STATS
|
2013-06-10 21:00:43 +02:00
|
|
|
if (!cpi->dummy_packing)
|
|
|
|
update_tx_count_stats(cm);
|
2013-06-06 20:14:04 +02:00
|
|
|
#endif
|
2013-05-30 23:50:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-24 02:45:52 +02:00
|
|
|
static void write_interp_filter_type(INTERPOLATION_TYPE type,
|
2013-06-06 05:56:37 +02:00
|
|
|
struct vp9_write_bit_buffer *wb) {
|
2013-10-03 03:04:12 +02:00
|
|
|
const int type_to_literal[] = { 1, 0, 2, 3 };
|
2013-08-05 21:26:15 +02:00
|
|
|
|
2013-06-06 05:56:37 +02:00
|
|
|
vp9_wb_write_bit(wb, type == SWITCHABLE);
|
|
|
|
if (type != SWITCHABLE)
|
2013-08-05 21:26:15 +02:00
|
|
|
vp9_wb_write_literal(wb, type_to_literal[type], 2);
|
2013-06-06 05:56:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void fix_mcomp_filter_type(VP9_COMP *cpi) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
|
|
|
|
if (cm->mcomp_filter_type == SWITCHABLE) {
|
|
|
|
// Check to see if only one of the filters is actually used
|
2013-08-23 03:40:34 +02:00
|
|
|
int count[SWITCHABLE_FILTERS];
|
2013-06-06 05:56:37 +02:00
|
|
|
int i, j, c = 0;
|
2013-08-23 03:40:34 +02:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
|
2013-06-06 05:56:37 +02:00
|
|
|
count[i] = 0;
|
2013-10-30 22:40:34 +01:00
|
|
|
for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
|
2013-07-24 02:02:08 +02:00
|
|
|
count[i] += cm->counts.switchable_interp[j][i];
|
2013-06-06 05:56:37 +02:00
|
|
|
c += (count[i] > 0);
|
|
|
|
}
|
|
|
|
if (c == 1) {
|
|
|
|
// Only one filter is used. So set the filter at frame level
|
2013-08-23 03:40:34 +02:00
|
|
|
for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
|
2013-06-06 05:56:37 +02:00
|
|
|
if (count[i]) {
|
2013-08-05 21:26:15 +02:00
|
|
|
cm->mcomp_filter_type = i;
|
2013-06-06 05:56:37 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-06 21:33:12 +02:00
|
|
|
static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) {
|
2013-07-16 23:47:15 +02:00
|
|
|
int min_log2_tile_cols, max_log2_tile_cols, ones;
|
|
|
|
vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
|
|
|
|
|
|
|
|
// columns
|
|
|
|
ones = cm->log2_tile_cols - min_log2_tile_cols;
|
|
|
|
while (ones--)
|
|
|
|
vp9_wb_write_bit(wb, 1);
|
|
|
|
|
|
|
|
if (cm->log2_tile_cols < max_log2_tile_cols)
|
|
|
|
vp9_wb_write_bit(wb, 0);
|
2013-06-06 21:33:12 +02:00
|
|
|
|
2013-07-16 23:47:15 +02:00
|
|
|
// rows
|
2013-06-06 21:33:12 +02:00
|
|
|
vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
|
|
|
|
if (cm->log2_tile_rows != 0)
|
|
|
|
vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
|
|
|
|
}
|
|
|
|
|
2013-06-07 22:41:44 +02:00
|
|
|
static int get_refresh_mask(VP9_COMP *cpi) {
|
|
|
|
// Should the GF or ARF be updated using the transmitted frame or buffer
|
|
|
|
#if CONFIG_MULTIPLE_ARF
|
|
|
|
if (!cpi->multi_arf_enabled && cpi->refresh_golden_frame &&
|
|
|
|
!cpi->refresh_alt_ref_frame) {
|
|
|
|
#else
|
2013-09-05 17:55:47 +02:00
|
|
|
if (cpi->refresh_golden_frame && !cpi->refresh_alt_ref_frame &&
|
|
|
|
!cpi->use_svc) {
|
2013-06-07 22:41:44 +02:00
|
|
|
#endif
|
|
|
|
// Preserve the previously existing golden frame and update the frame in
|
|
|
|
// the alt ref slot instead. This is highly specific to the use of
|
|
|
|
// alt-ref as a forward reference, and this needs to be generalized as
|
|
|
|
// other uses are implemented (like RTC/temporal scaling)
|
|
|
|
//
|
|
|
|
// gld_fb_idx and alt_fb_idx need to be swapped for future frames, but
|
|
|
|
// that happens in vp9_onyx_if.c:update_reference_frames() so that it can
|
|
|
|
// be done outside of the recode loop.
|
|
|
|
return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
|
|
|
|
(cpi->refresh_golden_frame << cpi->alt_fb_idx);
|
|
|
|
} else {
|
|
|
|
int arf_idx = cpi->alt_fb_idx;
|
|
|
|
#if CONFIG_MULTIPLE_ARF
|
|
|
|
// Determine which ARF buffer to use to encode this ARF frame.
|
|
|
|
if (cpi->multi_arf_enabled) {
|
|
|
|
int sn = cpi->sequence_number;
|
|
|
|
arf_idx = (cpi->frame_coding_order[sn] < 0) ?
|
|
|
|
cpi->arf_buffer_idx[sn + 1] :
|
|
|
|
cpi->arf_buffer_idx[sn];
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
|
|
|
|
(cpi->refresh_golden_frame << cpi->gld_fb_idx) |
|
|
|
|
(cpi->refresh_alt_ref_frame << arf_idx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-10 00:59:19 +02:00
|
|
|
static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
vp9_writer residual_bc;
|
|
|
|
|
|
|
|
int tile_row, tile_col;
|
|
|
|
TOKENEXTRA *tok[4][1 << 6], *tok_end;
|
|
|
|
size_t total_size = 0;
|
2013-07-16 23:47:15 +02:00
|
|
|
const int tile_cols = 1 << cm->log2_tile_cols;
|
|
|
|
const int tile_rows = 1 << cm->log2_tile_rows;
|
2013-07-10 00:59:19 +02:00
|
|
|
|
2013-10-24 18:10:59 +02:00
|
|
|
vpx_memset(cpi->above_seg_context, 0, sizeof(*cpi->above_seg_context) *
|
2013-07-10 00:59:19 +02:00
|
|
|
mi_cols_aligned_to_sb(cm->mi_cols));
|
|
|
|
|
|
|
|
tok[0][0] = cpi->tok;
|
2013-07-16 23:47:15 +02:00
|
|
|
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
|
2013-07-10 00:59:19 +02:00
|
|
|
if (tile_row)
|
2013-07-16 23:47:15 +02:00
|
|
|
tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] +
|
|
|
|
cpi->tok_count[tile_row - 1][tile_cols - 1];
|
2013-07-10 00:59:19 +02:00
|
|
|
|
2013-07-16 23:47:15 +02:00
|
|
|
for (tile_col = 1; tile_col < tile_cols; tile_col++)
|
2013-07-10 00:59:19 +02:00
|
|
|
tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] +
|
|
|
|
cpi->tok_count[tile_row][tile_col - 1];
|
|
|
|
}
|
|
|
|
|
2013-07-16 23:47:15 +02:00
|
|
|
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
|
|
|
|
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
|
2013-10-25 17:18:04 +02:00
|
|
|
TileInfo tile;
|
|
|
|
|
2013-11-08 21:43:51 +01:00
|
|
|
vp9_tile_init(&tile, cm, tile_row, tile_col);
|
2013-07-10 00:59:19 +02:00
|
|
|
tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
|
|
|
|
|
2013-07-16 23:47:15 +02:00
|
|
|
if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
|
2013-07-10 00:59:19 +02:00
|
|
|
vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
|
|
|
|
else
|
|
|
|
vp9_start_encode(&residual_bc, data_ptr + total_size);
|
|
|
|
|
2013-10-25 17:18:04 +02:00
|
|
|
write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end);
|
2013-07-10 00:59:19 +02:00
|
|
|
assert(tok[tile_row][tile_col] == tok_end);
|
|
|
|
vp9_stop_encode(&residual_bc);
|
2013-07-16 23:47:15 +02:00
|
|
|
if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
|
2013-07-10 00:59:19 +02:00
|
|
|
// size of this tile
|
|
|
|
write_be32(data_ptr + total_size, residual_bc.pos);
|
|
|
|
total_size += 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
total_size += residual_bc.pos;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return total_size;
|
|
|
|
}
|
|
|
|
|
2013-06-07 22:41:44 +02:00
|
|
|
static void write_display_size(VP9_COMP *cpi, struct vp9_write_bit_buffer *wb) {
|
2013-06-03 19:50:57 +02:00
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
const int scaling_active = cm->width != cm->display_width ||
|
|
|
|
cm->height != cm->display_height;
|
2013-06-07 22:41:44 +02:00
|
|
|
vp9_wb_write_bit(wb, scaling_active);
|
|
|
|
if (scaling_active) {
|
2013-06-08 03:09:29 +02:00
|
|
|
vp9_wb_write_literal(wb, cm->display_width - 1, 16);
|
|
|
|
vp9_wb_write_literal(wb, cm->display_height - 1, 16);
|
2013-06-07 22:41:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-08 03:09:29 +02:00
|
|
|
static void write_frame_size(VP9_COMP *cpi,
|
|
|
|
struct vp9_write_bit_buffer *wb) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
vp9_wb_write_literal(wb, cm->width - 1, 16);
|
|
|
|
vp9_wb_write_literal(wb, cm->height - 1, 16);
|
|
|
|
|
|
|
|
write_display_size(cpi, wb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_frame_size_with_refs(VP9_COMP *cpi,
|
|
|
|
struct vp9_write_bit_buffer *wb) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
|
|
|
int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
|
|
|
|
cpi->alt_fb_idx};
|
|
|
|
int i, found = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
|
|
|
|
YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->ref_frame_map[refs[i]]];
|
|
|
|
found = cm->width == cfg->y_crop_width &&
|
|
|
|
cm->height == cfg->y_crop_height;
|
2013-09-05 17:55:47 +02:00
|
|
|
|
|
|
|
// TODO(ivan): This prevents a bug while more than 3 buffers are used. Do it
|
|
|
|
// in a better way.
|
|
|
|
if (cpi->use_svc) {
|
|
|
|
found = 0;
|
|
|
|
}
|
2013-06-08 03:09:29 +02:00
|
|
|
vp9_wb_write_bit(wb, found);
|
2013-09-05 17:55:47 +02:00
|
|
|
if (found) {
|
2013-06-08 03:09:29 +02:00
|
|
|
break;
|
2013-09-05 17:55:47 +02:00
|
|
|
}
|
2013-06-08 03:09:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
vp9_wb_write_literal(wb, cm->width - 1, 16);
|
|
|
|
vp9_wb_write_literal(wb, cm->height - 1, 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
write_display_size(cpi, wb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_sync_code(struct vp9_write_bit_buffer *wb) {
|
2013-10-24 02:24:17 +02:00
|
|
|
vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8);
|
|
|
|
vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8);
|
|
|
|
vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8);
|
2013-06-08 03:09:29 +02:00
|
|
|
}
|
|
|
|
|
2013-06-07 22:41:44 +02:00
|
|
|
static void write_uncompressed_header(VP9_COMP *cpi,
|
|
|
|
struct vp9_write_bit_buffer *wb) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-10-24 02:24:17 +02:00
|
|
|
vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2);
|
2013-06-07 08:53:56 +02:00
|
|
|
|
|
|
|
// bitstream version.
|
|
|
|
// 00 - profile 0. 4:2:0 only
|
|
|
|
// 10 - profile 1. adds 4:4:4, 4:2:2, alpha
|
|
|
|
vp9_wb_write_bit(wb, cm->version);
|
|
|
|
vp9_wb_write_bit(wb, 0);
|
|
|
|
|
2012-10-03 21:11:05 +02:00
|
|
|
vp9_wb_write_bit(wb, 0);
|
2013-05-29 03:07:54 +02:00
|
|
|
vp9_wb_write_bit(wb, cm->frame_type);
|
|
|
|
vp9_wb_write_bit(wb, cm->show_frame);
|
2013-06-07 22:41:44 +02:00
|
|
|
vp9_wb_write_bit(wb, cm->error_resilient_mode);
|
2013-05-28 11:24:52 +02:00
|
|
|
|
2013-05-29 03:07:54 +02:00
|
|
|
if (cm->frame_type == KEY_FRAME) {
|
2013-10-25 21:35:20 +02:00
|
|
|
const COLOR_SPACE cs = UNKNOWN;
|
2013-06-08 03:09:29 +02:00
|
|
|
write_sync_code(wb);
|
2013-10-25 21:35:20 +02:00
|
|
|
vp9_wb_write_literal(wb, cs, 3);
|
|
|
|
if (cs != SRGB) {
|
2013-06-07 23:45:49 +02:00
|
|
|
vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
|
|
|
|
if (cm->version == 1) {
|
|
|
|
vp9_wb_write_bit(wb, cm->subsampling_x);
|
|
|
|
vp9_wb_write_bit(wb, cm->subsampling_y);
|
|
|
|
vp9_wb_write_bit(wb, 0); // has extra plane
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(cm->version == 1);
|
2013-06-07 08:53:56 +02:00
|
|
|
vp9_wb_write_bit(wb, 0); // has extra plane
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-06-08 03:09:29 +02:00
|
|
|
write_frame_size(cpi, wb);
|
2013-06-07 22:41:44 +02:00
|
|
|
} else {
|
2013-06-08 03:09:29 +02:00
|
|
|
const int refs[ALLOWED_REFS_PER_FRAME] = {cpi->lst_fb_idx, cpi->gld_fb_idx,
|
|
|
|
cpi->alt_fb_idx};
|
2013-06-08 00:55:15 +02:00
|
|
|
if (!cm->show_frame)
|
|
|
|
vp9_wb_write_bit(wb, cm->intra_only);
|
2013-04-03 00:08:50 +02:00
|
|
|
|
2013-06-09 19:10:33 +02:00
|
|
|
if (!cm->error_resilient_mode)
|
|
|
|
vp9_wb_write_literal(wb, cm->reset_frame_context, 2);
|
|
|
|
|
2013-06-08 00:55:15 +02:00
|
|
|
if (cm->intra_only) {
|
2013-06-08 03:09:29 +02:00
|
|
|
write_sync_code(wb);
|
2013-06-09 19:10:33 +02:00
|
|
|
|
2013-06-08 00:55:15 +02:00
|
|
|
vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES);
|
2013-06-08 03:09:29 +02:00
|
|
|
write_frame_size(cpi, wb);
|
2013-06-08 00:55:15 +02:00
|
|
|
} else {
|
2013-06-08 03:09:29 +02:00
|
|
|
int i;
|
2013-06-08 00:55:15 +02:00
|
|
|
vp9_wb_write_literal(wb, get_refresh_mask(cpi), NUM_REF_FRAMES);
|
|
|
|
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
|
2013-07-12 20:37:43 +02:00
|
|
|
vp9_wb_write_literal(wb, refs[i], NUM_REF_FRAMES_LOG2);
|
2013-06-08 00:55:15 +02:00
|
|
|
vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[LAST_FRAME + i]);
|
|
|
|
}
|
|
|
|
|
2013-06-08 03:09:29 +02:00
|
|
|
write_frame_size_with_refs(cpi, wb);
|
2013-06-08 00:55:15 +02:00
|
|
|
|
2013-10-21 19:12:14 +02:00
|
|
|
vp9_wb_write_bit(wb, cm->allow_high_precision_mv);
|
2013-06-08 00:55:15 +02:00
|
|
|
|
|
|
|
fix_mcomp_filter_type(cpi);
|
|
|
|
write_interp_filter_type(cm->mcomp_filter_type, wb);
|
|
|
|
}
|
2012-07-14 00:21:29 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-06-07 22:41:44 +02:00
|
|
|
if (!cm->error_resilient_mode) {
|
|
|
|
vp9_wb_write_bit(wb, cm->refresh_frame_context);
|
|
|
|
vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
|
|
|
|
}
|
|
|
|
|
2013-07-12 20:37:43 +02:00
|
|
|
vp9_wb_write_literal(wb, cm->frame_context_idx, NUM_FRAME_CONTEXTS_LOG2);
|
2013-06-06 05:56:37 +02:00
|
|
|
|
2013-08-09 23:41:51 +02:00
|
|
|
encode_loopfilter(&cm->lf, wb);
|
2013-06-06 05:56:37 +02:00
|
|
|
encode_quantization(cm, wb);
|
2013-06-06 21:33:12 +02:00
|
|
|
encode_segmentation(cpi, wb);
|
|
|
|
|
|
|
|
write_tile_info(cm, wb);
|
2013-06-06 05:56:37 +02:00
|
|
|
}
|
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
|
|
|
|
VP9_COMMON *const cm = &cpi->common;
|
2013-06-06 05:56:37 +02:00
|
|
|
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
|
2013-07-11 00:08:34 +02:00
|
|
|
FRAME_CONTEXT *const fc = &cm->fc;
|
|
|
|
vp9_writer header_bc;
|
2013-06-06 05:56:37 +02:00
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
vp9_start_encode(&header_bc, data);
|
2013-06-06 05:56:37 +02:00
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
if (xd->lossless)
|
2013-07-19 20:37:13 +02:00
|
|
|
cm->tx_mode = ONLY_4X4;
|
2012-07-14 00:21:29 +02:00
|
|
|
else
|
2013-06-06 20:14:04 +02:00
|
|
|
encode_txfm_probs(cpi, &header_bc);
|
2012-09-10 07:42:35 +02:00
|
|
|
|
2012-10-18 01:47:38 +02:00
|
|
|
update_coef_probs(cpi, &header_bc);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
#ifdef ENTROPY_STATS
|
2012-07-14 00:21:29 +02:00
|
|
|
active_section = 2;
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
|
|
|
|
2013-06-07 22:24:14 +02:00
|
|
|
vp9_update_skip_probs(cpi, &header_bc);
|
2012-10-17 18:38:13 +02:00
|
|
|
|
2013-10-03 18:07:24 +02:00
|
|
|
if (!frame_is_intra_only(cm)) {
|
2013-07-11 00:08:34 +02:00
|
|
|
int i;
|
2012-10-17 18:38:13 +02:00
|
|
|
#ifdef ENTROPY_STATS
|
|
|
|
active_section = 1;
|
|
|
|
#endif
|
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
update_inter_mode_probs(cm, &header_bc);
|
2013-07-24 02:02:08 +02:00
|
|
|
vp9_zero(cm->counts.inter_mode);
|
2013-06-05 00:25:16 +02:00
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
if (cm->mcomp_filter_type == SWITCHABLE)
|
2013-06-10 21:00:43 +02:00
|
|
|
update_switchable_interp_probs(cpi, &header_bc);
|
2013-06-05 00:25:16 +02:00
|
|
|
|
2013-06-06 22:44:34 +02:00
|
|
|
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
|
2013-07-11 00:08:34 +02:00
|
|
|
vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
|
2013-06-10 21:00:43 +02:00
|
|
|
cpi->intra_inter_count[i]);
|
2012-10-17 18:38:13 +02:00
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
if (cm->allow_comp_inter_inter) {
|
2012-10-17 18:38:13 +02:00
|
|
|
const int comp_pred_mode = cpi->common.comp_pred_mode;
|
2013-07-11 00:08:34 +02:00
|
|
|
const int use_compound_pred = comp_pred_mode != SINGLE_PREDICTION_ONLY;
|
|
|
|
const int use_hybrid_pred = comp_pred_mode == HYBRID_PREDICTION;
|
2012-10-17 18:38:13 +02:00
|
|
|
|
2013-04-09 04:07:29 +02:00
|
|
|
vp9_write_bit(&header_bc, use_compound_pred);
|
2012-10-17 18:38:13 +02:00
|
|
|
if (use_compound_pred) {
|
2013-04-09 04:07:29 +02:00
|
|
|
vp9_write_bit(&header_bc, use_hybrid_pred);
|
2013-07-11 00:08:34 +02:00
|
|
|
if (use_hybrid_pred)
|
2013-06-06 22:44:34 +02:00
|
|
|
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
|
2013-07-11 00:08:34 +02:00
|
|
|
vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
|
2013-06-06 22:44:34 +02:00
|
|
|
cpi->comp_inter_count[i]);
|
2012-10-17 18:38:13 +02:00
|
|
|
}
|
|
|
|
}
|
2013-06-06 22:44:34 +02:00
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
if (cm->comp_pred_mode != COMP_PREDICTION_ONLY) {
|
2013-06-06 22:44:34 +02:00
|
|
|
for (i = 0; i < REF_CONTEXTS; i++) {
|
2013-07-11 00:08:34 +02:00
|
|
|
vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
|
2013-06-06 22:44:34 +02:00
|
|
|
cpi->single_ref_count[i][0]);
|
2013-07-11 00:08:34 +02:00
|
|
|
vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
|
2013-06-06 22:44:34 +02:00
|
|
|
cpi->single_ref_count[i][1]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
if (cm->comp_pred_mode != SINGLE_PREDICTION_ONLY)
|
2013-06-06 22:44:34 +02:00
|
|
|
for (i = 0; i < REF_CONTEXTS; i++)
|
2013-07-11 00:08:34 +02:00
|
|
|
vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
|
2013-06-06 22:44:34 +02:00
|
|
|
cpi->comp_ref_count[i]);
|
|
|
|
|
2012-10-18 01:47:38 +02:00
|
|
|
update_mbintra_mode_probs(cpi, &header_bc);
|
2012-10-17 18:38:13 +02:00
|
|
|
|
2013-10-19 02:44:19 +02:00
|
|
|
for (i = 0; i < PARTITION_CONTEXTS; ++i) {
|
2013-04-27 06:52:53 +02:00
|
|
|
unsigned int bct[PARTITION_TYPES - 1][2];
|
2013-11-01 23:09:43 +01:00
|
|
|
update_mode(&header_bc, PARTITION_TYPES, vp9_partition_tree,
|
2013-11-02 02:23:06 +01:00
|
|
|
fc->partition_prob[i], bct,
|
2013-04-27 06:52:53 +02:00
|
|
|
(unsigned int *)cpi->partition_count[i]);
|
|
|
|
}
|
|
|
|
|
2013-10-21 19:12:14 +02:00
|
|
|
vp9_write_nmv_probs(cpi, cm->allow_high_precision_mv, &header_bc);
|
2012-10-17 18:38:13 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-10-30 20:58:42 +01:00
|
|
|
vp9_stop_encode(&header_bc);
|
2013-07-11 00:08:34 +02:00
|
|
|
assert(header_bc.pos <= 0xffff);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
return header_bc.pos;
|
|
|
|
}
|
2011-03-11 11:34:57 +01:00
|
|
|
|
2013-07-11 00:08:34 +02:00
|
|
|
void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, unsigned long *size) {
|
|
|
|
uint8_t *data = dest;
|
|
|
|
size_t first_part_size;
|
|
|
|
struct vp9_write_bit_buffer wb = {data, 0};
|
|
|
|
struct vp9_write_bit_buffer saved_wb;
|
|
|
|
|
|
|
|
write_uncompressed_header(cpi, &wb);
|
|
|
|
saved_wb = wb;
|
|
|
|
vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
|
|
|
|
|
|
|
|
data += vp9_rb_bytes_written(&wb);
|
|
|
|
|
|
|
|
vp9_compute_update_table();
|
|
|
|
|
|
|
|
#ifdef ENTROPY_STATS
|
2013-08-23 05:03:08 +02:00
|
|
|
if (cm->frame_type == INTER_FRAME)
|
2013-07-11 00:08:34 +02:00
|
|
|
active_section = 0;
|
|
|
|
else
|
|
|
|
active_section = 7;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
vp9_clear_system_state(); // __asm emms;
|
|
|
|
|
|
|
|
first_part_size = write_compressed_header(cpi, data);
|
|
|
|
data += first_part_size;
|
|
|
|
vp9_wb_write_literal(&saved_wb, first_part_size, 16);
|
|
|
|
|
|
|
|
data += encode_tiles(cpi, data);
|
|
|
|
|
|
|
|
*size = data - dest;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|