vpx/vp8/encoder/bitstream.c

2966 lines
90 KiB
C
Raw Normal View History

2010-05-18 17:58:33 +02:00
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
2010-05-18 17:58:33 +02:00
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
2010-05-18 17:58:33 +02:00
*/
#include "vp8/common/header.h"
2010-05-18 17:58:33 +02:00
#include "encodemv.h"
#include "vp8/common/entropymode.h"
#include "vp8/common/findnearmv.h"
2010-05-18 17:58:33 +02:00
#include "mcomp.h"
#include "vp8/common/systemdependent.h"
2010-05-18 17:58:33 +02:00
#include <assert.h>
#include <stdio.h>
#include <limits.h>
#include "vp8/common/pragmas.h"
#include "vpx/vpx_encoder.h"
2010-05-18 17:58:33 +02:00
#include "vpx_mem/vpx_mem.h"
#include "bitstream.h"
#include "defaultcoefcounts.h"
2011-10-05 12:26:00 +02:00
#include "vp8/common/seg_common.h"
#include "vp8/common/pred_common.h"
2011-10-05 12:26:00 +02:00
2010-05-18 17:58:33 +02:00
#if defined(SECTIONBITS_OUTPUT)
unsigned __int64 Sectionbits[500];
#endif
#ifdef ENTROPY_STATS
int intra_mode_stats[10][10][10];
static unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
#if CONFIG_T8X8
static unsigned int tree_update_hist_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
#endif
2010-05-18 17:58:33 +02:00
extern unsigned int active_section;
#endif
#ifdef MODE_STATS
int count_mb_seg[4] = { 0, 0, 0, 0 };
#endif
static void update_mode(
vp8_writer *const w,
int n,
vp8_token tok [/* n */],
vp8_tree tree,
vp8_prob Pnew [/* n-1 */],
vp8_prob Pcur [/* n-1 */],
unsigned int bct [/* n-1 */] [2],
const unsigned int num_events[/* n */]
)
{
unsigned int new_b = 0, old_b = 0;
int i = 0;
vp8_tree_probs_from_distribution(
n--, tok, tree,
Pnew, bct, num_events,
256, 1
);
do
{
new_b += vp8_cost_branch(bct[i], Pnew[i]);
old_b += vp8_cost_branch(bct[i], Pcur[i]);
}
while (++i < n);
if (new_b + (n << 8) < old_b)
{
int i = 0;
vp8_write_bit(w, 1);
do
{
const vp8_prob p = Pnew[i];
vp8_write_literal(w, Pcur[i] = p ? p : 1, 8);
}
while (++i < n);
}
else
vp8_write_bit(w, 0);
}
static void update_mbintra_mode_probs(VP8_COMP *cpi)
{
VP8_COMMON *const x = & cpi->common;
vp8_writer *const w = & cpi->bc;
{
vp8_prob Pnew [VP8_YMODES-1];
unsigned int bct [VP8_YMODES-1] [2];
update_mode(
w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
Pnew, x->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
);
}
{
#if CONFIG_UVINTRA
//vp8_write_bit(w, 0);
#else
2010-05-18 17:58:33 +02:00
vp8_prob Pnew [VP8_UV_MODES-1];
unsigned int bct [VP8_UV_MODES-1] [2];
update_mode(
w, VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree,
Pnew, x->fc.uv_mode_prob, bct, (unsigned int *)cpi->uv_mode_count
);
#endif
2010-05-18 17:58:33 +02:00
}
}
static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p)
{
vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m);
}
static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p)
{
vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m);
}
static void write_i8x8_mode(vp8_writer *bc, int m, const vp8_prob *p)
{
vp8_write_token(bc,vp8_i8x8_mode_tree, p, vp8_i8x8_mode_encodings + m);
}
2010-05-18 17:58:33 +02:00
static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p)
{
vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m);
}
static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p)
{
vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m);
}
static void write_split(vp8_writer *bc, int x)
{
vp8_write_token(
bc, vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + x
);
}
static void pack_tokens_c(vp8_writer *w, const TOKENEXTRA *p, int xcount)
{
const TOKENEXTRA *const stop = p + xcount;
unsigned int split;
unsigned int shift;
int count = w->count;
unsigned int range = w->range;
unsigned int lowvalue = w->lowvalue;
while (p < stop)
{
const int t = p->Token;
vp8_token *const a = vp8_coef_encodings + t;
const vp8_extra_bit_struct *const b = vp8_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
int v = a->value;
int n = a->Len;
if (p->skip_eob_node)
{
n--;
i = 2;
}
do
{
const int bb = (v >> --n) & 1;
split = 1 + (((range - 1) * pp[i>>1]) >> 8);
i = vp8_coef_tree[i+bb];
if (bb)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
shift = vp8_norm[range];
2010-05-18 17:58:33 +02:00
range <<= shift;
count += shift;
if (count >= 0)
{
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000)
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
w->buffer[w->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
lowvalue &= 0xffffff;
count -= 8 ;
}
lowvalue <<= shift;
}
while (n);
if (b->base_val)
{
const int e = p->Extra, L = b->Len;
if (L)
{
const unsigned char *pp = b->prob;
int v = e >> 1;
int n = L; /* number of bits in v, assumed nonzero */
int i = 0;
do
{
const int bb = (v >> --n) & 1;
split = 1 + (((range - 1) * pp[i>>1]) >> 8);
i = b->tree[i+bb];
if (bb)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
shift = vp8_norm[range];
2010-05-18 17:58:33 +02:00
range <<= shift;
count += shift;
if (count >= 0)
{
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000)
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
w->buffer[w->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
lowvalue &= 0xffffff;
count -= 8 ;
}
lowvalue <<= shift;
}
while (n);
}
{
split = (range + 1) >> 1;
if (e & 1)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
range <<= 1;
if ((lowvalue & 0x80000000))
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
lowvalue <<= 1;
if (!++count)
{
count = -8;
w->buffer[w->pos++] = (lowvalue >> 24);
lowvalue &= 0xffffff;
}
}
}
++p;
}
w->count = count;
w->lowvalue = lowvalue;
w->range = range;
}
static void write_partition_size(unsigned char *cx_data, int size)
{
signed char csize;
csize = size & 0xff;
*cx_data = csize;
csize = (size >> 8) & 0xff;
*(cx_data + 1) = csize;
csize = (size >> 16) & 0xff;
*(cx_data + 2) = csize;
}
static void pack_tokens_into_partitions_c(VP8_COMP *cpi, unsigned char *cx_data, int num_part, int *size)
{
int i;
unsigned char *ptr = cx_data;
unsigned int shift;
vp8_writer *w = &cpi->bc2;
*size = 3 * (num_part - 1);
New ways of passing encoded data between encoder and decoder. With this commit frames can be received partition-by-partition from the encoder and passed partition-by-partition to the decoder. At the encoder-side this makes it easier to split encoded frames at partition boundaries, useful when packetizing frames. When VPX_CODEC_USE_OUTPUT_PARTITION is enabled, several VPX_CODEC_CX_FRAME_PKT packets will be returned from vpx_codec_get_cx_data(), containing one partition each. The partition_id (starting at 0) specifies the decoding order of the partitions. All partitions but the last has the VPX_FRAME_IS_FRAGMENT flag set. At the decoder this opens up the possibility of decoding partition N even though partition N-1 was lost (given that independent partitioning has been enabled in the encoder) if more info about the missing parts of the stream is available through external signaling. Each partition is passed to the decoder through the vpx_codec_decode() function, with the data pointer pointing to the start of the partition, and with data_sz equal to the size of the partition. Missing partitions can be signaled to the decoder by setting data != NULL and data_sz = 0. When all partitions have been given to the decoder "end of data" should be signaled by calling vpx_codec_decode() with data = NULL and data_sz = 0. The first partition is the first partition according to the VP8 bitstream + the uncompressed data chunk + DCT address offsets if multiple residual partitions are used. Change-Id: I5bc0682b9e4112e0db77904755c694c3c7ac6e74
2011-06-13 16:42:27 +02:00
cpi->partition_sz[0] += *size;
2010-05-18 17:58:33 +02:00
ptr = cx_data + (*size);
for (i = 0; i < num_part; i++)
{
vp8_start_encode(w, ptr);
{
unsigned int split;
int count = w->count;
unsigned int range = w->range;
unsigned int lowvalue = w->lowvalue;
int mb_row;
for (mb_row = i; mb_row < cpi->common.mb_rows; mb_row += num_part)
{
TOKENEXTRA *p = cpi->tplist[mb_row].start;
TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
while (p < stop)
{
const int t = p->Token;
vp8_token *const a = vp8_coef_encodings + t;
const vp8_extra_bit_struct *const b = vp8_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
int v = a->value;
int n = a->Len;
if (p->skip_eob_node)
{
n--;
i = 2;
}
do
{
const int bb = (v >> --n) & 1;
split = 1 + (((range - 1) * pp[i>>1]) >> 8);
i = vp8_coef_tree[i+bb];
if (bb)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
shift = vp8_norm[range];
2010-05-18 17:58:33 +02:00
range <<= shift;
count += shift;
if (count >= 0)
{
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000)
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
w->buffer[w->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
lowvalue &= 0xffffff;
count -= 8 ;
}
lowvalue <<= shift;
}
while (n);
if (b->base_val)
{
const int e = p->Extra, L = b->Len;
if (L)
{
const unsigned char *pp = b->prob;
int v = e >> 1;
int n = L; /* number of bits in v, assumed nonzero */
int i = 0;
do
{
const int bb = (v >> --n) & 1;
split = 1 + (((range - 1) * pp[i>>1]) >> 8);
i = b->tree[i+bb];
if (bb)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
shift = vp8_norm[range];
2010-05-18 17:58:33 +02:00
range <<= shift;
count += shift;
if (count >= 0)
{
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000)
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
w->buffer[w->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
lowvalue &= 0xffffff;
count -= 8 ;
}
lowvalue <<= shift;
}
while (n);
}
{
split = (range + 1) >> 1;
if (e & 1)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
range <<= 1;
if ((lowvalue & 0x80000000))
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
lowvalue <<= 1;
if (!++count)
{
count = -8;
w->buffer[w->pos++] = (lowvalue >> 24);
lowvalue &= 0xffffff;
}
}
}
++p;
}
}
w->count = count;
w->lowvalue = lowvalue;
w->range = range;
}
vp8_stop_encode(w);
*size += w->pos;
New ways of passing encoded data between encoder and decoder. With this commit frames can be received partition-by-partition from the encoder and passed partition-by-partition to the decoder. At the encoder-side this makes it easier to split encoded frames at partition boundaries, useful when packetizing frames. When VPX_CODEC_USE_OUTPUT_PARTITION is enabled, several VPX_CODEC_CX_FRAME_PKT packets will be returned from vpx_codec_get_cx_data(), containing one partition each. The partition_id (starting at 0) specifies the decoding order of the partitions. All partitions but the last has the VPX_FRAME_IS_FRAGMENT flag set. At the decoder this opens up the possibility of decoding partition N even though partition N-1 was lost (given that independent partitioning has been enabled in the encoder) if more info about the missing parts of the stream is available through external signaling. Each partition is passed to the decoder through the vpx_codec_decode() function, with the data pointer pointing to the start of the partition, and with data_sz equal to the size of the partition. Missing partitions can be signaled to the decoder by setting data != NULL and data_sz = 0. When all partitions have been given to the decoder "end of data" should be signaled by calling vpx_codec_decode() with data = NULL and data_sz = 0. The first partition is the first partition according to the VP8 bitstream + the uncompressed data chunk + DCT address offsets if multiple residual partitions are used. Change-Id: I5bc0682b9e4112e0db77904755c694c3c7ac6e74
2011-06-13 16:42:27 +02:00
/* The first partition size is set earlier */
cpi->partition_sz[i + 1] = w->pos;
2010-05-18 17:58:33 +02:00
if (i < (num_part - 1))
{
write_partition_size(cx_data, w->pos);
cx_data += 3;
ptr += w->pos;
}
}
}
static void pack_mb_row_tokens_c(VP8_COMP *cpi, vp8_writer *w)
{
unsigned int split;
int count = w->count;
unsigned int range = w->range;
unsigned int lowvalue = w->lowvalue;
unsigned int shift;
int mb_row;
for (mb_row = 0; mb_row < cpi->common.mb_rows; mb_row++)
{
TOKENEXTRA *p = cpi->tplist[mb_row].start;
TOKENEXTRA *stop = cpi->tplist[mb_row].stop;
while (p < stop)
{
const int t = p->Token;
vp8_token *const a = vp8_coef_encodings + t;
const vp8_extra_bit_struct *const b = vp8_extra_bits + t;
int i = 0;
const unsigned char *pp = p->context_tree;
int v = a->value;
int n = a->Len;
if (p->skip_eob_node)
{
n--;
i = 2;
}
do
{
const int bb = (v >> --n) & 1;
split = 1 + (((range - 1) * pp[i>>1]) >> 8);
i = vp8_coef_tree[i+bb];
if (bb)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
shift = vp8_norm[range];
2010-05-18 17:58:33 +02:00
range <<= shift;
count += shift;
if (count >= 0)
{
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000)
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
w->buffer[w->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
lowvalue &= 0xffffff;
count -= 8 ;
}
lowvalue <<= shift;
}
while (n);
if (b->base_val)
{
const int e = p->Extra, L = b->Len;
if (L)
{
const unsigned char *pp = b->prob;
int v = e >> 1;
int n = L; /* number of bits in v, assumed nonzero */
int i = 0;
do
{
const int bb = (v >> --n) & 1;
split = 1 + (((range - 1) * pp[i>>1]) >> 8);
i = b->tree[i+bb];
if (bb)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
shift = vp8_norm[range];
2010-05-18 17:58:33 +02:00
range <<= shift;
count += shift;
if (count >= 0)
{
int offset = shift - count;
if ((lowvalue << (offset - 1)) & 0x80000000)
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
w->buffer[w->pos++] = (lowvalue >> (24 - offset));
lowvalue <<= offset;
shift = count;
lowvalue &= 0xffffff;
count -= 8 ;
}
lowvalue <<= shift;
}
while (n);
}
{
split = (range + 1) >> 1;
if (e & 1)
{
lowvalue += split;
range = range - split;
}
else
{
range = split;
}
range <<= 1;
if ((lowvalue & 0x80000000))
{
int x = w->pos - 1;
while (x >= 0 && w->buffer[x] == 0xff)
{
w->buffer[x] = (unsigned char)0;
x--;
}
w->buffer[x] += 1;
}
lowvalue <<= 1;
if (!++count)
{
count = -8;
w->buffer[w->pos++] = (lowvalue >> 24);
lowvalue &= 0xffffff;
}
}
}
++p;
}
}
w->count = count;
w->lowvalue = lowvalue;
w->range = range;
}
static void write_mv_ref
(
vp8_writer *w, MB_PREDICTION_MODE m, const vp8_prob *p
)
{
#if CONFIG_DEBUG
2010-05-18 17:58:33 +02:00
assert(NEARESTMV <= m && m <= SPLITMV);
#endif
vp8_write_token(w, vp8_mv_ref_tree, p,
vp8_mv_ref_encoding_array - NEARESTMV + m);
2010-05-18 17:58:33 +02:00
}
static void write_sub_mv_ref
(
vp8_writer *w, B_PREDICTION_MODE m, const vp8_prob *p
)
{
#if CONFIG_DEBUG
2010-05-18 17:58:33 +02:00
assert(LEFT4X4 <= m && m <= NEW4X4);
#endif
vp8_write_token(w, vp8_sub_mv_ref_tree, p,
vp8_sub_mv_ref_encoding_array - LEFT4X4 + m);
2010-05-18 17:58:33 +02:00
}
static void write_mv
(
vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc
2010-05-18 17:58:33 +02:00
)
{
MV e;
e.row = mv->row - ref->as_mv.row;
e.col = mv->col - ref->as_mv.col;
2010-05-18 17:58:33 +02:00
vp8_encode_motion_vector(w, &e, mvc);
}
// This function writes the current macro block's segnment id to the bitstream
// It should only be called if a segment map update is indicated.
static void write_mb_segid(vp8_writer *w,
const MB_MODE_INFO *mi, const MACROBLOCKD *x)
2010-05-18 17:58:33 +02:00
{
// Encode the MB segment id.
if (x->segmentation_enabled && x->update_mb_segmentation_map)
{
switch (mi->segment_id)
{
case 0:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
break;
case 1:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 1, x->mb_segment_tree_probs[1]);
break;
case 2:
vp8_write(w, 1, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[2]);
break;
case 3:
vp8_write(w, 1, x->mb_segment_tree_probs[0]);
vp8_write(w, 1, x->mb_segment_tree_probs[2]);
break;
// TRAP.. This should not happen
default:
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
break;
}
}
}
// This function encodes the reference frame
static void encode_ref_frame( vp8_writer *const w,
VP8_COMMON *const cm,
MACROBLOCKD *xd,
int segment_id,
MV_REFERENCE_FRAME rf )
{
int seg_ref_active;
int seg_ref_count = 0;
seg_ref_active = segfeature_active( xd,
segment_id,
SEG_LVL_REF_FRAME );
if ( seg_ref_active )
{
seg_ref_count = check_segref( xd, segment_id, INTRA_FRAME ) +
check_segref( xd, segment_id, LAST_FRAME ) +
check_segref( xd, segment_id, GOLDEN_FRAME ) +
check_segref( xd, segment_id, ALTREF_FRAME );
}
// If segment level coding of this signal is disabled...
// or the segment allows multiple reference frame options
if ( !seg_ref_active || (seg_ref_count > 1) )
{
// Values used in prediction model coding
unsigned char prediction_flag;
vp8_prob pred_prob;
MV_REFERENCE_FRAME pred_rf;
// Get the context probability the prediction flag
pred_prob = get_pred_prob( cm, xd, PRED_REF );
// Get the predicted value.
pred_rf = get_pred_ref( cm, xd );
// Did the chosen reference frame match its predicted value.
prediction_flag =
( xd->mode_info_context->mbmi.ref_frame == pred_rf );
set_pred_flag( xd, PRED_REF, prediction_flag );
vp8_write( w, prediction_flag, pred_prob );
// If not predicted correctly then code value explicitly
if ( !prediction_flag )
{
vp8_prob mod_refprobs[PREDICTION_PROBS];
vpx_memcpy( mod_refprobs,
cm->mod_refprobs[pred_rf], sizeof(mod_refprobs) );
// If segment coding enabled blank out options that cant occur by
// setting the branch probability to 0.
if ( seg_ref_active )
{
mod_refprobs[INTRA_FRAME] *=
check_segref( xd, segment_id, INTRA_FRAME );
mod_refprobs[LAST_FRAME] *=
check_segref( xd, segment_id, LAST_FRAME );
mod_refprobs[GOLDEN_FRAME] *=
( check_segref( xd, segment_id, GOLDEN_FRAME ) *
check_segref( xd, segment_id, ALTREF_FRAME ) );
}
if ( mod_refprobs[0] )
{
vp8_write(w, (rf != INTRA_FRAME), mod_refprobs[0] );
}
// Inter coded
if (rf != INTRA_FRAME)
{
if ( mod_refprobs[1] )
{
vp8_write(w, (rf != LAST_FRAME), mod_refprobs[1] );
}
if (rf != LAST_FRAME)
{
if ( mod_refprobs[2] )
{
vp8_write(w, (rf != GOLDEN_FRAME), mod_refprobs[2] );
}
}
}
}
}
// if using the prediction mdoel we have nothing further to do because
// the reference frame is fully coded by the segment
}
2010-05-18 17:58:33 +02:00
// Update the probabilities used to encode reference frame data
static void update_ref_probs( VP8_COMP *const cpi )
{
VP8_COMMON *const cm = & cpi->common;
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] +
rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
cm->prob_intra_coded = (rf_intra + rf_inter)
? rf_intra * 255 / (rf_intra + rf_inter) : 1;
if (!cm->prob_intra_coded)
cm->prob_intra_coded = 1;
cm->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
if (!cm->prob_last_coded)
cm->prob_last_coded = 1;
cm->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
? (rfct[GOLDEN_FRAME] * 255) /
(rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
if (!cm->prob_gf_coded)
cm->prob_gf_coded = 1;
// Compute a modified set of probabilities to use when prediction of the
// reference frame fails
compute_mod_refprobs( cm );
}
#if CONFIG_SUPERBLOCKS
static void pack_inter_mode_mvs(VP8_COMP *const cpi)
{
VP8_COMMON *const pc = & cpi->common;
vp8_writer *const w = & cpi->bc;
const MV_CONTEXT *mvc = pc->fc.mvc;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int i;
int pred_context;
MODE_INFO *m = pc->mi;
MODE_INFO *prev_m = pc->prev_mi;
const int mis = pc->mode_info_stride;
int mb_row, mb_col;
int row, col;
int prob_skip_false = 0;
// Values used in prediction model coding
vp8_prob pred_prob;
unsigned char prediction_flag;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = {+1, -1, +1, +1};
cpi->mb.partition_info = cpi->mb.pi;
// Update the probabilities used to encode reference frame data
update_ref_probs( cpi );
#ifdef ENTROPY_STATS
active_section = 1;
#endif
if (pc->mb_no_coeff_skip)
{
// Divide by 0 check. 0 case possible with segment features
if ( (cpi->skip_false_count + cpi->skip_true_count) )
{
prob_skip_false = cpi->skip_false_count * 256 /
(cpi->skip_false_count + cpi->skip_true_count);
if (prob_skip_false <= 1)
prob_skip_false = 1;
if (prob_skip_false > 255)
prob_skip_false = 255;
}
else
prob_skip_false = 255;
cpi->prob_skip_false = prob_skip_false;
vp8_write_literal(w, prob_skip_false, 8);
}
vp8_write_literal(w, pc->prob_intra_coded, 8);
vp8_write_literal(w, pc->prob_last_coded, 8);
vp8_write_literal(w, pc->prob_gf_coded, 8);
if (cpi->common.dual_pred_mode == HYBRID_PREDICTION)
{
vp8_write(w, 1, 128);
vp8_write(w, 1, 128);
for (i = 0; i < DUAL_PRED_CONTEXTS; i++)
{
if (cpi->single_pred_count[i] + cpi->dual_pred_count[i])
{
pc->prob_dualpred[i] = cpi->single_pred_count[i] * 255 /
(cpi->single_pred_count[i] + cpi->dual_pred_count[i]);
if (pc->prob_dualpred[i] < 1)
pc->prob_dualpred[i] = 1;
}
else
{
pc->prob_dualpred[i] = 128;
}
vp8_write_literal(w, pc->prob_dualpred[i], 8);
}
}
else if (cpi->common.dual_pred_mode == SINGLE_PREDICTION_ONLY)
{
vp8_write(w, 0, 128);
}
else /* dual prediction only */
{
vp8_write(w, 1, 128);
vp8_write(w, 0, 128);
}
update_mbintra_mode_probs(cpi);
vp8_write_mvprobs(cpi);
mb_row = 0;
for (row=0; row < pc->mb_rows; row += 2)
{
m = pc->mi + row * mis;
mb_col = 0;
for (col=0; col < pc->mb_cols; col += 2)
{
int i;
for (i=0; i<4; i++)
{
const MB_MODE_INFO *const mi = & m->mbmi;
const MV_REFERENCE_FRAME rf = mi->ref_frame;
const MB_PREDICTION_MODE mode = mi->mode;
const int segment_id = mi->segment_id;
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * mis + dx;
if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols))
{
mb_row += dy;
mb_col += dx;
m += offset_extended;
cpi->mb.partition_info += offset_extended;
continue;
}
// Distance of Mb to the various image edges.
// These specified to 8th pel as they are always compared to MV
// values that are in 1/8th pel units
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
// Make sure the MacroBlockD mode info pointer is set correctly
xd->mode_info_context = m;
xd->prev_mode_info_context = prev_m;
#ifdef ENTROPY_STATS
active_section = 9;
#endif
if (cpi->mb.e_mbd.update_mb_segmentation_map)
{
// Is temporal coding of the segment map enabled
if (pc->temporal_update)
{
prediction_flag =
get_pred_flag( xd, PRED_SEG_ID );
pred_prob =
get_pred_prob( pc, xd, PRED_SEG_ID);
// Code the segment id prediction flag for this mb
vp8_write( w, prediction_flag, pred_prob );
// If the mbs segment id was not predicted code explicitly
if (!prediction_flag)
write_mb_segid(w, mi, &cpi->mb.e_mbd);
}
else
{
// Normal undpredicted coding
write_mb_segid(w, mi, &cpi->mb.e_mbd);
}
}
if ( pc->mb_no_coeff_skip &&
( !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
( get_segdata( xd, segment_id, SEG_LVL_EOB ) != 0 ) ) )
{
vp8_encode_bool(w, mi->mb_skip_coeff, prob_skip_false);
}
// Encode the reference frame.
encode_ref_frame( w, pc, xd,
segment_id, rf );
if (rf == INTRA_FRAME)
{
#ifdef ENTROPY_STATS
active_section = 6;
#endif
if ( !segfeature_active( xd, segment_id, SEG_LVL_MODE ) )
write_ymode(w, mode, pc->fc.ymode_prob);
if (mode == B_PRED)
{
int j = 0;
do
write_bmode(w, m->bmi[j].as_mode, pc->fc.bmode_prob);
while (++j < 16);
}
if(mode == I8X8_PRED)
{
write_i8x8_mode(w, m->bmi[0].as_mode, pc->i8x8_mode_prob);
write_i8x8_mode(w, m->bmi[2].as_mode, pc->i8x8_mode_prob);
write_i8x8_mode(w, m->bmi[8].as_mode, pc->i8x8_mode_prob);
write_i8x8_mode(w, m->bmi[10].as_mode, pc->i8x8_mode_prob);
}
else
{
#if CONFIG_UVINTRA
write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob[mode]);
#ifdef MODE_STATS
if(mode!=B_PRED)
++cpi->y_uv_mode_count[mode][mi->uv_mode];
#endif
#else
write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob);
#endif /*CONFIG_UVINTRA*/
}
}
else
{
int_mv best_mv;
vp8_prob mv_ref_p [VP8_MVREFS-1];
{
int_mv n1, n2;
int ct[4];
vp8_find_near_mvs(xd, m,
prev_m,
&n1, &n2, &best_mv, ct, rf,
cpi->common.ref_frame_sign_bias);
vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
#ifdef ENTROPY_STATS
accum_mv_refs(mode, ct);
#endif
}
#ifdef ENTROPY_STATS
active_section = 3;
#endif
// Is the segment coding of mode enabled
if ( !segfeature_active( xd, segment_id, SEG_LVL_MODE ) )
{
write_mv_ref(w, mode, mv_ref_p);
vp8_accum_mv_refs(&cpi->common, mode, ct);
}
{
switch (mode) /* new, split require MVs */
{
case NEWMV:
#ifdef ENTROPY_STATS
active_section = 5;
#endif
write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
if (cpi->common.dual_pred_mode == HYBRID_PREDICTION)
{
vp8_write(w,
mi->second_ref_frame != INTRA_FRAME,
get_pred_prob( pc, xd, PRED_DUAL ) );
}
if (mi->second_ref_frame)
{
const int second_rf = mi->second_ref_frame;
int_mv n1, n2;
int ct[4];
vp8_find_near_mvs(xd, m,
prev_m,
&n1, &n2, &best_mv,
ct, second_rf,
cpi->common.ref_frame_sign_bias);
write_mv(w, &mi->second_mv.as_mv, &best_mv, mvc);
}
break;
case SPLITMV:
{
int j = 0;
#ifdef MODE_STATS
++count_mb_seg [mi->partitioning];
#endif
write_split(w, mi->partitioning);
do
{
B_PREDICTION_MODE blockmode;
int_mv blockmv;
const int *const L = vp8_mbsplits [mi->partitioning];
int k = -1; /* first block in subset j */
int mv_contz;
int_mv leftmv, abovemv;
blockmode = cpi->mb.partition_info->bmi[j].mode;
blockmv = cpi->mb.partition_info->bmi[j].mv;
#if CONFIG_DEBUG
while (j != L[++k])
if (k >= 16)
assert(0);
#else
while (j != L[++k]);
#endif
leftmv.as_int = left_block_mv(m, k);
abovemv.as_int = above_block_mv(m, k, mis);
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2 [mv_contz]);
if (blockmode == NEW4X4)
{
#ifdef ENTROPY_STATS
active_section = 11;
#endif
write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *) mvc);
}
}
while (++j < cpi->mb.partition_info->count);
}
break;
default:
if (cpi->common.dual_pred_mode == HYBRID_PREDICTION)
{
vp8_write(w,
mi->second_ref_frame != INTRA_FRAME,
get_pred_prob( pc, xd, PRED_DUAL ) );
}
break;
}
}
}
prev_m += offset_extended;
assert((prev_m-cpi->common.prev_mip)==(m-cpi->common.mip));
assert((prev_m-cpi->common.prev_mi)==(m-cpi->common.mi));
// skip to next MB
mb_row += dy;
mb_col += dx;
m += offset_extended;
cpi->mb.partition_info += offset_extended;
}
}
mb_row += 2;
m += mis + (1- (pc->mb_cols & 0x1));
cpi->mb.partition_info += mis + (1- (pc->mb_cols & 0x1));
}
}
#else
2010-05-18 17:58:33 +02:00
static void pack_inter_mode_mvs(VP8_COMP *const cpi)
{
VP8_COMMON *const pc = & cpi->common;
vp8_writer *const w = & cpi->bc;
const MV_CONTEXT *mvc = pc->fc.mvc;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int i;
Further work on Segmentation Experiment: This check in includes quite a lot of clean up and refactoring. Most of the analysis and set up for the different coding options for the segment map (currently simple distribution based coding or temporaly predicted coding), has been moved to one location (the function choose_segmap_coding_method() in segmenation.c). This code was previously scattered around in various locations making integration with other experiments and modification / debug more difficult. Currently the functionality is as it was with the exception that the prediction probabilities are now only transmitted when the temporal prediction mode is selected. There is still quite a bit more clean up work that will be possible when the #ifdef is removed. Also at that time I may rename and alter the sense of macroblock based variable "segment_flag" which indicates (1 that the segmnet id is not predicted vs 0 that it is predicted). I also intend to experiment with a spatial prediction mode that can be used when coding a key frame segment map or in cases where temporal prediction does not work well but there is spatial correlation. In a later check in when the ifdefs have gone I may also move the call to choose_segmap_coding_method() to just before where the bitsream is packed (currently it is in vp8_encode_frame()) to further reduce the possibility of clashes with other experiments and prevent it being called on each itteration of the recode loop. Change-Id: I3d4aba2a2826ec21f367678d5b07c1d1c36db168
2011-11-15 12:13:33 +01:00
int pred_context;
2010-05-18 17:58:33 +02:00
MODE_INFO *m = pc->mi;
MODE_INFO *prev_m = pc->prev_mi;
2010-05-18 17:58:33 +02:00
const int mis = pc->mode_info_stride;
int mb_row = -1;
int prob_skip_false = 0;
// Values used in prediction model coding
vp8_prob pred_prob;
unsigned char prediction_flag;
cpi->mb.partition_info = cpi->mb.pi;
// Update the probabilities used to encode reference frame data
update_ref_probs( cpi );
2010-05-18 17:58:33 +02:00
#ifdef ENTROPY_STATS
active_section = 1;
#endif
if (pc->mb_no_coeff_skip)
{
// Divide by 0 check. 0 case possible with segment features
if ( (cpi->skip_false_count + cpi->skip_true_count) )
{
prob_skip_false = cpi->skip_false_count * 256 /
(cpi->skip_false_count + cpi->skip_true_count);
2010-05-18 17:58:33 +02:00
if (prob_skip_false <= 1)
prob_skip_false = 1;
2010-05-18 17:58:33 +02:00
if (prob_skip_false > 255)
prob_skip_false = 255;
}
else
2010-05-18 17:58:33 +02:00
prob_skip_false = 255;
cpi->prob_skip_false = prob_skip_false;
vp8_write_literal(w, prob_skip_false, 8);
}
vp8_write_literal(w, pc->prob_intra_coded, 8);
vp8_write_literal(w, pc->prob_last_coded, 8);
vp8_write_literal(w, pc->prob_gf_coded, 8);
2010-05-18 17:58:33 +02:00
if (cpi->common.dual_pred_mode == HYBRID_PREDICTION)
{
vp8_write(w, 1, 128);
vp8_write(w, 1, 128);
for (i = 0; i < DUAL_PRED_CONTEXTS; i++)
{
if (cpi->single_pred_count[i] + cpi->dual_pred_count[i])
{
pc->prob_dualpred[i] = cpi->single_pred_count[i] * 255 /
(cpi->single_pred_count[i] + cpi->dual_pred_count[i]);
if (pc->prob_dualpred[i] < 1)
pc->prob_dualpred[i] = 1;
}
else
{
pc->prob_dualpred[i] = 128;
}
vp8_write_literal(w, pc->prob_dualpred[i], 8);
}
}
else if (cpi->common.dual_pred_mode == SINGLE_PREDICTION_ONLY)
{
vp8_write(w, 0, 128);
}
else /* dual prediction only */
{
vp8_write(w, 1, 128);
vp8_write(w, 0, 128);
}
2010-05-18 17:58:33 +02:00
update_mbintra_mode_probs(cpi);
vp8_write_mvprobs(cpi);
2010-05-18 17:58:33 +02:00
while (++mb_row < pc->mb_rows)
{
int mb_col = -1;
while (++mb_col < pc->mb_cols)
{
const MB_MODE_INFO *const mi = & m->mbmi;
const MV_REFERENCE_FRAME rf = mi->ref_frame;
const MB_PREDICTION_MODE mode = mi->mode;
2011-10-05 12:26:00 +02:00
const int segment_id = mi->segment_id;
2010-05-18 17:58:33 +02:00
// Distance of Mb to the various image edges.
// These specified to 8th pel as they are always compared to MV values that are in 1/8th pel units
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
// Make sure the MacroBlockD mode info pointer is set correctly
xd->mode_info_context = m;
xd->prev_mode_info_context = prev_m;
2010-05-18 17:58:33 +02:00
#ifdef ENTROPY_STATS
active_section = 9;
#endif
if (cpi->mb.e_mbd.update_mb_segmentation_map)
{
// Is temporal coding of the segment map enabled
if (pc->temporal_update)
{
prediction_flag =
get_pred_flag( xd, PRED_SEG_ID );
pred_prob =
get_pred_prob( pc, xd, PRED_SEG_ID);
// Code the segment id prediction flag for this mb
vp8_write( w, prediction_flag, pred_prob );
// If the mbs segment id was not predicted code explicitly
if (!prediction_flag)
write_mb_segid(w, mi, &cpi->mb.e_mbd);
}
else
{
// Normal undpredicted coding
write_mb_segid(w, mi, &cpi->mb.e_mbd);
}
}
2010-05-18 17:58:33 +02:00
2011-10-05 12:26:00 +02:00
if ( pc->mb_no_coeff_skip &&
( !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
( get_segdata( xd, segment_id, SEG_LVL_EOB ) != 0 ) ) )
2011-10-05 12:26:00 +02:00
{
vp8_encode_bool(w, mi->mb_skip_coeff, prob_skip_false);
}
2010-05-18 17:58:33 +02:00
// Encode the reference frame.
encode_ref_frame( w, pc, xd,
segment_id, rf );
2010-05-18 17:58:33 +02:00
if (rf == INTRA_FRAME)
{
#ifdef ENTROPY_STATS
2010-05-18 17:58:33 +02:00
active_section = 6;
#endif
if ( !segfeature_active( xd, segment_id, SEG_LVL_MODE ) )
write_ymode(w, mode, pc->fc.ymode_prob);
2010-05-18 17:58:33 +02:00
if (mode == B_PRED)
{
int j = 0;
do
write_bmode(w, m->bmi[j].as_mode, pc->fc.bmode_prob);
2010-05-18 17:58:33 +02:00
while (++j < 16);
}
if(mode == I8X8_PRED)
{
write_i8x8_mode(w, m->bmi[0].as_mode, pc->i8x8_mode_prob);
write_i8x8_mode(w, m->bmi[2].as_mode, pc->i8x8_mode_prob);
write_i8x8_mode(w, m->bmi[8].as_mode, pc->i8x8_mode_prob);
write_i8x8_mode(w, m->bmi[10].as_mode, pc->i8x8_mode_prob);
}
else
{
#if CONFIG_UVINTRA
write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob[mode]);
#ifdef MODE_STATS
if(mode!=B_PRED)
++cpi->y_uv_mode_count[mode][mi->uv_mode];
#endif
#else
write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob);
#endif /*CONFIG_UVINTRA*/
}
2010-05-18 17:58:33 +02:00
}
else
2010-05-18 17:58:33 +02:00
{
int_mv best_mv;
int ct[4];
2010-05-18 17:58:33 +02:00
vp8_prob mv_ref_p [VP8_MVREFS-1];
{
int_mv n1, n2;
2010-05-18 17:58:33 +02:00
vp8_find_near_mvs(xd, m,
prev_m,
&n1, &n2, &best_mv, ct, rf, cpi->common.ref_frame_sign_bias);
vp8_mv_ref_probs(&cpi->common, mv_ref_p, ct);
2010-05-18 17:58:33 +02:00
#ifdef ENTROPY_STATS
accum_mv_refs(mode, ct);
#endif
}
#ifdef ENTROPY_STATS
active_section = 3;
#endif
// Is the segment coding of mode enabled
2011-10-05 12:26:00 +02:00
if ( !segfeature_active( xd, segment_id, SEG_LVL_MODE ) )
2010-05-18 17:58:33 +02:00
{
write_mv_ref(w, mode, mv_ref_p);
vp8_accum_mv_refs(&cpi->common, mode, ct);
}
{
switch (mode) /* new, split require MVs */
{
case NEWMV:
#ifdef ENTROPY_STATS
active_section = 5;
#endif
2010-05-18 17:58:33 +02:00
write_mv(w, &mi->mv.as_mv, &best_mv, mvc);
if (cpi->common.dual_pred_mode == HYBRID_PREDICTION)
{
vp8_write(w, mi->second_ref_frame != INTRA_FRAME,
get_pred_prob( pc, xd, PRED_DUAL ) );
}
if (mi->second_ref_frame)
{
const int second_rf = mi->second_ref_frame;
int_mv n1, n2;
int ct[4];
vp8_find_near_mvs(xd, m,
prev_m,
&n1, &n2, &best_mv,
ct, second_rf,
cpi->common.ref_frame_sign_bias);
write_mv(w, &mi->second_mv.as_mv, &best_mv, mvc);
}
break;
case SPLITMV:
2010-05-18 17:58:33 +02:00
{
int j = 0;
2010-05-18 17:58:33 +02:00
#ifdef MODE_STATS
++count_mb_seg [mi->partitioning];
#endif
2010-05-18 17:58:33 +02:00
write_split(w, mi->partitioning);
do
2010-05-18 17:58:33 +02:00
{
B_PREDICTION_MODE blockmode;
int_mv blockmv;
const int *const L = vp8_mbsplits [mi->partitioning];
int k = -1; /* first block in subset j */
int mv_contz;
int_mv leftmv, abovemv;
blockmode = cpi->mb.partition_info->bmi[j].mode;
blockmv = cpi->mb.partition_info->bmi[j].mv;
#if CONFIG_DEBUG
while (j != L[++k])
if (k >= 16)
assert(0);
#else
while (j != L[++k]);
#endif
leftmv.as_int = left_block_mv(m, k);
abovemv.as_int = above_block_mv(m, k, mis);
mv_contz = vp8_mv_cont(&leftmv, &abovemv);
write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2 [mv_contz]);
if (blockmode == NEW4X4)
{
#ifdef ENTROPY_STATS
active_section = 11;
#endif
write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *) mvc);
}
2010-05-18 17:58:33 +02:00
}
while (++j < cpi->mb.partition_info->count);
2010-05-18 17:58:33 +02:00
}
break;
default:
if (cpi->common.dual_pred_mode == HYBRID_PREDICTION)
{
vp8_write(w, mi->second_ref_frame != INTRA_FRAME,
get_pred_prob( pc, xd, PRED_DUAL ) );
}
break;
}
2010-05-18 17:58:33 +02:00
}
}
++m;
++prev_m;
assert((prev_m-cpi->common.prev_mip)==(m-cpi->common.mip));
assert((prev_m-cpi->common.prev_mi)==(m-cpi->common.mi));
cpi->mb.partition_info++;
2010-05-18 17:58:33 +02:00
}
++m; /* skip L prediction border */
++prev_m;
cpi->mb.partition_info++;
2010-05-18 17:58:33 +02:00
}
}
#endif // CONFIG_SUPERBLOCKS
#if CONFIG_SUPERBLOCKS
static void write_kfmodes(VP8_COMP *cpi)
{
vp8_writer *const bc = & cpi->bc;
const VP8_COMMON *const c = & cpi->common;
MODE_INFO *m;
int i;
int row, col;
int mb_row, mb_col;
int prob_skip_false = 0;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = {+1, -1, +1, +1};
const int mis = c->mode_info_stride;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
if (c->mb_no_coeff_skip)
{
// Divide by 0 check. 0 case possible with segment features
if ( (cpi->skip_false_count + cpi->skip_true_count) )
{
prob_skip_false = cpi->skip_false_count * 256 /
(cpi->skip_false_count + cpi->skip_true_count);
if (prob_skip_false <= 1)
prob_skip_false = 1;
if (prob_skip_false > 255)
prob_skip_false = 255;
}
else
prob_skip_false = 255;
cpi->prob_skip_false = prob_skip_false;
vp8_write_literal(bc, prob_skip_false, 8);
}
#if CONFIG_QIMODE
if(!c->kf_ymode_probs_update)
{
vp8_write_literal(bc, c->kf_ymode_probs_index, 3);
}
#endif
mb_row = 0;
for (row=0; row < c->mb_rows; row += 2)
{
m = c->mi + row * mis;
mb_col = 0;
for (col=0; col < c->mb_cols; col += 2)
{
for (i=0; i<4; i++)
{
int ym;
int segment_id;
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * mis + dx;
if ((mb_row >= c->mb_rows) || (mb_col >= c->mb_cols))
{
mb_row += dy;
mb_col += dx;
m += offset_extended;
continue;
}
ym = m->mbmi.mode;
segment_id = m->mbmi.segment_id;
if (cpi->mb.e_mbd.update_mb_segmentation_map)
{
write_mb_segid(bc, &m->mbmi, &cpi->mb.e_mbd);
}
2010-05-18 17:58:33 +02:00
if ( c->mb_no_coeff_skip &&
( !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
(get_segdata( xd, segment_id, SEG_LVL_EOB ) != 0) ) )
{
vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
}
#if CONFIG_QIMODE
kfwrite_ymode(bc, ym, c->kf_ymode_prob[c->kf_ymode_probs_index]);
#else
kfwrite_ymode(bc, ym, c->kf_ymode_prob);
#endif
if (ym == B_PRED)
{
int i = 0;
2010-05-18 17:58:33 +02:00
do
{
const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
const B_PREDICTION_MODE L = left_block_mode(m, i);
const int bm = m->bmi[i].as_mode;
#ifdef ENTROPY_STATS
++intra_mode_stats [A] [L] [bm];
#endif
write_bmode(bc, bm, c->kf_bmode_prob [A] [L]);
}
while (++i < 16);
}
if(ym == I8X8_PRED)
{
write_i8x8_mode(bc, m->bmi[0].as_mode, c->i8x8_mode_prob);
write_i8x8_mode(bc, m->bmi[2].as_mode, c->i8x8_mode_prob);
write_i8x8_mode(bc, m->bmi[8].as_mode, c->i8x8_mode_prob);
write_i8x8_mode(bc, m->bmi[10].as_mode, c->i8x8_mode_prob);
}
else
#if CONFIG_UVINTRA
write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
#else
write_uv_mode(bc, m->mbmi.uv_mode, c->kf_uv_mode_prob);
#endif
// skip to next MB
mb_row += dy;
mb_col += dx;
m += offset_extended;
}
}
mb_row += 2;
}
}
#else
2010-05-18 17:58:33 +02:00
static void write_kfmodes(VP8_COMP *cpi)
{
vp8_writer *const bc = & cpi->bc;
const VP8_COMMON *const c = & cpi->common;
/* const */
MODE_INFO *m = c->mi;
int mb_row = -1;
int prob_skip_false = 0;
2011-10-05 12:26:00 +02:00
MACROBLOCKD *xd = &cpi->mb.e_mbd;
2010-05-18 17:58:33 +02:00
if (c->mb_no_coeff_skip)
{
// Divide by 0 check. 0 case possible with segment features
if ( (cpi->skip_false_count + cpi->skip_true_count) )
{
prob_skip_false = cpi->skip_false_count * 256 /
(cpi->skip_false_count + cpi->skip_true_count);
2010-05-18 17:58:33 +02:00
if (prob_skip_false <= 1)
prob_skip_false = 1;
2010-05-18 17:58:33 +02:00
if (prob_skip_false > 255)
prob_skip_false = 255;
}
else
2010-05-18 17:58:33 +02:00
prob_skip_false = 255;
cpi->prob_skip_false = prob_skip_false;
vp8_write_literal(bc, prob_skip_false, 8);
}
#if CONFIG_QIMODE
if(!c->kf_ymode_probs_update)
{
vp8_write_literal(bc, c->kf_ymode_probs_index, 3);
}
#endif
2010-05-18 17:58:33 +02:00
while (++mb_row < c->mb_rows)
{
int mb_col = -1;
while (++mb_col < c->mb_cols)
{
const int ym = m->mbmi.mode;
2011-10-05 12:26:00 +02:00
int segment_id = m->mbmi.segment_id;
2010-05-18 17:58:33 +02:00
if (cpi->mb.e_mbd.update_mb_segmentation_map)
{
Further work on Segmentation Experiment: This check in includes quite a lot of clean up and refactoring. Most of the analysis and set up for the different coding options for the segment map (currently simple distribution based coding or temporaly predicted coding), has been moved to one location (the function choose_segmap_coding_method() in segmenation.c). This code was previously scattered around in various locations making integration with other experiments and modification / debug more difficult. Currently the functionality is as it was with the exception that the prediction probabilities are now only transmitted when the temporal prediction mode is selected. There is still quite a bit more clean up work that will be possible when the #ifdef is removed. Also at that time I may rename and alter the sense of macroblock based variable "segment_flag" which indicates (1 that the segmnet id is not predicted vs 0 that it is predicted). I also intend to experiment with a spatial prediction mode that can be used when coding a key frame segment map or in cases where temporal prediction does not work well but there is spatial correlation. In a later check in when the ifdefs have gone I may also move the call to choose_segmap_coding_method() to just before where the bitsream is packed (currently it is in vp8_encode_frame()) to further reduce the possibility of clashes with other experiments and prevent it being called on each itteration of the recode loop. Change-Id: I3d4aba2a2826ec21f367678d5b07c1d1c36db168
2011-11-15 12:13:33 +01:00
write_mb_segid(bc, &m->mbmi, &cpi->mb.e_mbd);
}
2010-05-18 17:58:33 +02:00
2011-10-05 12:26:00 +02:00
if ( c->mb_no_coeff_skip &&
( !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
(get_segdata( xd, segment_id, SEG_LVL_EOB ) != 0) ) )
2011-10-05 12:26:00 +02:00
{
2010-05-18 17:58:33 +02:00
vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
2011-10-05 12:26:00 +02:00
}
#if CONFIG_QIMODE
kfwrite_ymode(bc, ym, c->kf_ymode_prob[c->kf_ymode_probs_index]);
#else
2010-05-18 17:58:33 +02:00
kfwrite_ymode(bc, ym, c->kf_ymode_prob);
#endif
2010-05-18 17:58:33 +02:00
if (ym == B_PRED)
{
const int mis = c->mode_info_stride;
int i = 0;
do
{
const B_PREDICTION_MODE A = above_block_mode(m, i, mis);
const B_PREDICTION_MODE L = left_block_mode(m, i);
const int bm = m->bmi[i].as_mode;
2010-05-18 17:58:33 +02:00
#ifdef ENTROPY_STATS
++intra_mode_stats [A] [L] [bm];
#endif
write_bmode(bc, bm, c->kf_bmode_prob [A] [L]);
}
while (++i < 16);
}
if(ym == I8X8_PRED)
{
write_i8x8_mode(bc, m->bmi[0].as_mode, c->i8x8_mode_prob);
write_i8x8_mode(bc, m->bmi[2].as_mode, c->i8x8_mode_prob);
write_i8x8_mode(bc, m->bmi[8].as_mode, c->i8x8_mode_prob);
write_i8x8_mode(bc, m->bmi[10].as_mode, c->i8x8_mode_prob);
m++;
}
else
#if CONFIG_UVINTRA
write_uv_mode(bc, (m++)->mbmi.uv_mode, c->kf_uv_mode_prob[ym]);
#else
write_uv_mode(bc, (m++)->mbmi.uv_mode, c->kf_uv_mode_prob);
#endif
2010-05-18 17:58:33 +02:00
}
//printf("\n");
2010-05-18 17:58:33 +02:00
m++; // skip L prediction border
}
}
#endif /* CONFIG_SUPERBLOCKS */
/* This function is used for debugging probability trees. */
static void print_prob_tree(vp8_prob
coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES])
2010-05-18 17:58:33 +02:00
{
/* print coef probability tree */
int i,j,k,l;
FILE* f = fopen("enc_tree_probs.txt", "a");
fprintf(f, "{\n");
for (i = 0; i < BLOCK_TYPES; i++)
{
fprintf(f, " {\n");
for (j = 0; j < COEF_BANDS; j++)
{
fprintf(f, " {\n");
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
{
fprintf(f, " {");
for (l = 0; l < ENTROPY_NODES; l++)
{
fprintf(f, "%3u, ",
(unsigned int)(coef_probs [i][j][k][l]));
}
fprintf(f, " }\n");
}
fprintf(f, " }\n");
}
fprintf(f, " }\n");
}
fprintf(f, "}\n");
fclose(f);
}
static void sum_probs_over_prev_coef_context(
const unsigned int probs[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS],
unsigned int* out)
{
int i, j;
for (i=0; i < MAX_ENTROPY_TOKENS; ++i)
{
for (j=0; j < PREV_COEF_CONTEXTS; ++j)
{
const int tmp = out[i];
out[i] += probs[j][i];
/* check for wrap */
if (out[i] < tmp)
out[i] = UINT_MAX;
}
}
}
static int prob_update_savings(const unsigned int *ct,
const vp8_prob oldp, const vp8_prob newp,
const vp8_prob upd)
{
const int old_b = vp8_cost_branch(ct, oldp);
const int new_b = vp8_cost_branch(ct, newp);
const int update_b = 8 +
((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8);
return old_b - new_b - update_b;
}
static int independent_coef_context_savings(VP8_COMP *cpi)
2010-05-18 17:58:33 +02:00
{
int savings = 0;
2010-05-18 17:58:33 +02:00
int i = 0;
do
{
int j = 0;
do
{
int k = 0;
unsigned int prev_coef_count_sum[MAX_ENTROPY_TOKENS] = {0};
int prev_coef_savings[MAX_ENTROPY_TOKENS] = {0};
/* Calculate new probabilities given the constraint that
* they must be equal over the prev coef contexts
*/
if (cpi->common.frame_type == KEY_FRAME)
{
/* Reset to default probabilities at key frames */
sum_probs_over_prev_coef_context(default_coef_counts[i][j],
prev_coef_count_sum);
}
else
{
sum_probs_over_prev_coef_context(cpi->coef_counts[i][j],
prev_coef_count_sum);
}
do
{
/* at every context */
/* calc probs and branch cts for this frame only */
//vp8_prob new_p [ENTROPY_NODES];
//unsigned int branch_ct [ENTROPY_NODES] [2];
int t = 0; /* token/prob index */
vp8_tree_probs_from_distribution(
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
cpi->frame_coef_probs[i][j][k],
cpi->frame_branch_ct [i][j][k],
prev_coef_count_sum,
256, 1);
do
{
const unsigned int *ct = cpi->frame_branch_ct [i][j][k][t];
const vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
const vp8_prob oldp = cpi->common.fc.coef_probs [i][j][k][t];
const vp8_prob upd = vp8_coef_update_probs [i][j][k][t];
const int s = prob_update_savings(ct, oldp, newp, upd);
if (cpi->common.frame_type != KEY_FRAME ||
(cpi->common.frame_type == KEY_FRAME && newp != oldp))
prev_coef_savings[t] += s;
}
while (++t < ENTROPY_NODES);
}
while (++k < PREV_COEF_CONTEXTS);
k = 0;
do
{
/* We only update probabilities if we can save bits, except
* for key frames where we have to update all probabilities
* to get the equal probabilities across the prev coef
* contexts.
*/
if (prev_coef_savings[k] > 0 ||
cpi->common.frame_type == KEY_FRAME)
savings += prev_coef_savings[k];
}
while (++k < ENTROPY_NODES);
}
while (++j < COEF_BANDS);
}
while (++i < BLOCK_TYPES);
return savings;
}
static int default_coef_context_savings(VP8_COMP *cpi)
{
int savings = 0;
int i = 0;
do
{
int j = 0;
do
{
int k = 0;
do
{
/* at every context */
/* calc probs and branch cts for this frame only */
//vp8_prob new_p [ENTROPY_NODES];
//unsigned int branch_ct [ENTROPY_NODES] [2];
int t = 0; /* token/prob index */
vp8_tree_probs_from_distribution(
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
cpi->frame_coef_probs [i][j][k],
cpi->frame_branch_ct [i][j][k],
cpi->coef_counts [i][j][k],
256, 1
);
do
{
const unsigned int *ct = cpi->frame_branch_ct [i][j][k][t];
const vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
const vp8_prob oldp = cpi->common.fc.coef_probs [i][j][k][t];
const vp8_prob upd = vp8_coef_update_probs [i][j][k][t];
const int s = prob_update_savings(ct, oldp, newp, upd);
if (s > 0)
{
savings += s;
}
}
while (++t < ENTROPY_NODES);
}
while (++k < PREV_COEF_CONTEXTS);
}
while (++j < COEF_BANDS);
}
while (++i < BLOCK_TYPES);
return savings;
}
int vp8_estimate_entropy_savings(VP8_COMP *cpi)
{
2010-05-18 17:58:33 +02:00
int savings = 0;
#if CONFIG_T8X8
int i=0;
#endif
VP8_COMMON *const cm = & cpi->common;
2010-05-18 17:58:33 +02:00
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
int new_intra, new_last, new_gf_alt, oldtotal, newtotal;
2010-05-18 17:58:33 +02:00
int ref_frame_cost[MAX_REF_FRAMES];
vp8_clear_system_state(); //__asm emms;
// Estimate reference frame cost savings.
// For now this is just based on projected overall frequency of
// each reference frame coded using an unpredicted coding tree.
2010-05-18 17:58:33 +02:00
if (cpi->common.frame_type != KEY_FRAME)
{
new_intra = (rf_intra + rf_inter)
? rf_intra * 255 / (rf_intra + rf_inter) : 1;
new_intra += !new_intra;
2010-05-18 17:58:33 +02:00
new_last = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
new_last += !new_last;
2010-05-18 17:58:33 +02:00
new_gf_alt = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
? (rfct[GOLDEN_FRAME] * 255) /
(rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
new_gf_alt += !new_gf_alt;
2010-05-18 17:58:33 +02:00
// new costs
ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(new_intra);
ref_frame_cost[LAST_FRAME] = vp8_cost_one(new_intra)
+ vp8_cost_zero(new_last);
ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(new_intra)
+ vp8_cost_one(new_last)
+ vp8_cost_zero(new_gf_alt);
2010-05-18 17:58:33 +02:00
ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(new_intra)
+ vp8_cost_one(new_last)
+ vp8_cost_one(new_gf_alt);
2010-05-18 17:58:33 +02:00
newtotal =
rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] +
rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] +
rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] +
rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
// old costs
ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cm->prob_intra_coded);
ref_frame_cost[LAST_FRAME] = vp8_cost_one(cm->prob_intra_coded)
+ vp8_cost_zero(cm->prob_last_coded);
ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cm->prob_intra_coded)
+ vp8_cost_one(cm->prob_last_coded)
+ vp8_cost_zero(cm->prob_gf_coded);
ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cm->prob_intra_coded)
+ vp8_cost_one(cm->prob_last_coded)
+ vp8_cost_one(cm->prob_gf_coded);
2010-05-18 17:58:33 +02:00
oldtotal =
rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] +
rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] +
rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] +
rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME];
savings += (oldtotal - newtotal) / 256;
// Update the reference frame probability numbers to reflect
// the observed counts in this frame. Doing this here insures
// that if there are multiple recode iterations the baseline
// probabilities used are updated in each iteration.
cm->prob_intra_coded = new_intra;
cm->prob_last_coded = new_last;
cm->prob_gf_coded = new_gf_alt;
}
2010-05-18 17:58:33 +02:00
if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
savings += independent_coef_context_savings(cpi);
else
savings += default_coef_context_savings(cpi);
2010-05-18 17:58:33 +02:00
#if CONFIG_T8X8
do
{
int j = 0;
do
{
int k = 0;
do
{
/* at every context */
/* calc probs and branch cts for this frame only */
//vp8_prob new_p [ENTROPY_NODES];
//unsigned int branch_ct [ENTROPY_NODES] [2];
int t = 0; /* token/prob index */
vp8_tree_probs_from_distribution(
MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
cpi->frame_coef_probs_8x8 [i][j][k], cpi->frame_branch_ct_8x8 [i][j][k], cpi->coef_counts_8x8 [i][j][k],
256, 1
);
do
{
const unsigned int *ct = cpi->frame_branch_ct_8x8 [i][j][k][t];
const vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
const vp8_prob old = cpi->common.fc.coef_probs_8x8 [i][j][k][t];
const vp8_prob upd = vp8_coef_update_probs_8x8 [i][j][k][t];
const int old_b = vp8_cost_branch(ct, old);
const int new_b = vp8_cost_branch(ct, newp);
const int update_b = 8 +
((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8);
const int s = old_b - new_b - update_b;
if (s > 0)
savings += s;
}
while (++t < MAX_ENTROPY_TOKENS - 1);
}
while (++k < PREV_COEF_CONTEXTS);
}
while (++j < COEF_BANDS);
}
while (++i < BLOCK_TYPES);
#endif
2010-05-18 17:58:33 +02:00
return savings;
}
static void update_coef_probs(VP8_COMP *cpi)
{
int i = 0;
vp8_writer *const w = & cpi->bc;
int savings = 0;
vp8_clear_system_state(); //__asm emms;
do
{
int j = 0;
do
{
int k = 0;
int prev_coef_savings[ENTROPY_NODES] = {0};
if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
{
for (k = 0; k < PREV_COEF_CONTEXTS; ++k)
{
int t; /* token/prob index */
for (t = 0; t < ENTROPY_NODES; ++t)
{
const unsigned int *ct = cpi->frame_branch_ct [i][j]
[k][t];
const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t];
const vp8_prob oldp = cpi->common.fc.coef_probs[i][j]
[k][t];
const vp8_prob upd = vp8_coef_update_probs[i][j][k][t];
prev_coef_savings[t] +=
prob_update_savings(ct, oldp, newp, upd);
}
}
k = 0;
}
2010-05-18 17:58:33 +02:00
do
{
//note: use result from vp8_estimate_entropy_savings, so no need to call vp8_tree_probs_from_distribution here.
/* at every context */
/* calc probs and branch cts for this frame only */
//vp8_prob new_p [ENTROPY_NODES];
//unsigned int branch_ct [ENTROPY_NODES] [2];
2010-05-18 17:58:33 +02:00
int t = 0; /* token/prob index */
//vp8_tree_probs_from_distribution(
// MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
2010-05-18 17:58:33 +02:00
// new_p, branch_ct, (unsigned int *)cpi->coef_counts [i][j][k],
// 256, 1
// );
do
{
const vp8_prob newp = cpi->frame_coef_probs [i][j][k][t];
vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t;
const vp8_prob upd = vp8_coef_update_probs [i][j][k][t];
int s = prev_coef_savings[t];
int u = 0;
2010-05-18 17:58:33 +02:00
if (!(cpi->oxcf.error_resilient_mode &
VPX_ERROR_RESILIENT_PARTITIONS))
{
s = prob_update_savings(
cpi->frame_branch_ct [i][j][k][t],
*Pold, newp, upd);
}
2010-05-18 17:58:33 +02:00
if (s > 0)
u = 1;
/* Force updates on key frames if the new is different,
* so that we can be sure we end up with equal probabilities
* over the prev coef contexts.
*/
if ((cpi->oxcf.error_resilient_mode &
VPX_ERROR_RESILIENT_PARTITIONS) &&
cpi->common.frame_type == KEY_FRAME && newp != *Pold)
u = 1;
2010-05-18 17:58:33 +02:00
vp8_write(w, u, upd);
#ifdef ENTROPY_STATS
++ tree_update_hist [i][j][k][t] [u];
#endif
if (u)
{
/* send/use new probability */
*Pold = newp;
vp8_write_literal(w, newp, 8);
savings += s;
}
}
while (++t < ENTROPY_NODES);
2010-05-18 17:58:33 +02:00
/* Accum token counts for generation of default statistics */
#ifdef ENTROPY_STATS
t = 0;
do
{
context_counters [i][j][k][t] += cpi->coef_counts [i][j][k][t];
}
while (++t < MAX_ENTROPY_TOKENS);
2010-05-18 17:58:33 +02:00
#endif
}
while (++k < PREV_COEF_CONTEXTS);
}
while (++j < COEF_BANDS);
}
while (++i < BLOCK_TYPES);
#if CONFIG_T8X8
i = 0;
do
{
int j = 0;
do
{
int k = 0;
do
{
//note: use result from vp8_estimate_entropy_savings, so no need to call vp8_tree_probs_from_distribution here.
/* at every context */
/* calc probs and branch cts for this frame only */
//vp8_prob new_p [ENTROPY_NODES];
//unsigned int branch_ct [ENTROPY_NODES] [2];
int t = 0; /* token/prob index */
//vp8_tree_probs_from_distribution(
// MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree,
// new_p, branch_ct, (unsigned int *)cpi->coef_counts [i][j][k],
// 256, 1
// );
do
{
const unsigned int *ct = cpi->frame_branch_ct_8x8 [i][j][k][t];
const vp8_prob newp = cpi->frame_coef_probs_8x8 [i][j][k][t];
vp8_prob *Pold = cpi->common.fc.coef_probs_8x8 [i][j][k] + t;
const vp8_prob old = *Pold;
const vp8_prob upd = vp8_coef_update_probs_8x8 [i][j][k][t];
const int old_b = vp8_cost_branch(ct, old);
const int new_b = vp8_cost_branch(ct, newp);
const int update_b = 8 +
((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8);
const int s = old_b - new_b - update_b;
const int u = s > 0 ? 1 : 0;
vp8_write(w, u, upd);
#ifdef ENTROPY_STATS
++ tree_update_hist_8x8 [i][j][k][t] [u];
#endif
if (u)
{
/* send/use new probability */
*Pold = newp;
vp8_write_literal(w, newp, 8);
savings += s;
}
}
while (++t < MAX_ENTROPY_TOKENS - 1);
/* Accum token counts for generation of default statistics */
#ifdef ENTROPY_STATS
t = 0;
do
{
context_counters_8x8 [i][j][k][t] += cpi->coef_counts_8x8 [i][j][k][t];
}
while (++t < MAX_ENTROPY_TOKENS);
#endif
}
while (++k < PREV_COEF_CONTEXTS);
}
while (++j < COEF_BANDS);
}
while (++i < BLOCK_TYPES);
#endif
2010-05-18 17:58:33 +02:00
}
#ifdef PACKET_TESTING
FILE *vpxlogc = 0;
#endif
static void put_delta_q(vp8_writer *bc, int delta_q)
{
if (delta_q != 0)
{
vp8_write_bit(bc, 1);
vp8_write_literal(bc, abs(delta_q), 4);
if (delta_q < 0)
vp8_write_bit(bc, 1);
else
vp8_write_bit(bc, 0);
}
else
vp8_write_bit(bc, 0);
}
#if CONFIG_QIMODE
extern const unsigned int kf_y_mode_cts[8][VP8_YMODES];
static void decide_kf_ymode_entropy(VP8_COMP *cpi)
{
int mode_cost[MB_MODE_COUNT];
int cost;
int bestcost = INT_MAX;
int bestindex = 0;
int i, j;
for(i=0; i<8; i++)
{
vp8_cost_tokens(mode_cost, cpi->common.kf_ymode_prob[i], vp8_kf_ymode_tree);
cost = 0;
for(j=0;j<VP8_YMODES;j++)
{
cost += mode_cost[j] * cpi->ymode_count[j];
}
if(cost < bestcost)
{
bestindex = i;
bestcost = cost;
}
}
cpi->common.kf_ymode_probs_index = bestindex;
}
#endif
static segment_reference_frames(VP8_COMP *cpi)
{
VP8_COMMON *oci = &cpi->common;
MODE_INFO *mi = oci->mi;
int ref[MAX_MB_SEGMENTS]={0};
int i,j;
int mb_index=0;
MACROBLOCKD *const xd = & cpi->mb.e_mbd;
for (i = 0; i < oci->mb_rows; i++)
{
for (j = 0; j < oci->mb_cols; j++, mb_index++)
{
ref[mi[mb_index].mbmi.segment_id]|=(1<<mi[mb_index].mbmi.ref_frame);
}
mb_index++;
}
for (i = 0; i < MAX_MB_SEGMENTS; i++)
{
enable_segfeature(xd,i,SEG_LVL_REF_FRAME);
set_segdata( xd,i, SEG_LVL_REF_FRAME, ref[i]);
}
2010-05-18 17:58:33 +02:00
}
2010-05-18 17:58:33 +02:00
void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
{
int i, j;
VP8_HEADER oh;
VP8_COMMON *const pc = & cpi->common;
vp8_writer *const bc = & cpi->bc;
MACROBLOCKD *const xd = & cpi->mb.e_mbd;
int extra_bytes_packed = 0;
unsigned char *cx_data = dest;
oh.show_frame = (int) pc->show_frame;
oh.type = (int)pc->frame_type;
oh.version = pc->version;
oh.first_partition_length_in_bytes = 0;
2010-05-18 17:58:33 +02:00
cx_data += 3;
#if defined(SECTIONBITS_OUTPUT)
Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256;
#endif
//vp8_kf_default_bmode_probs() is called in vp8_setup_key_frame() once for each
//K frame before encode frame. pc->kf_bmode_prob doesn't get changed anywhere
//else. No need to call it again here. --yw
//vp8_kf_default_bmode_probs( pc->kf_bmode_prob);
// every keyframe send startcode, width, height, scale factor, clamp and color type
if (oh.type == KEY_FRAME)
{
int v;
2010-05-18 17:58:33 +02:00
// Start / synch code
cx_data[0] = 0x9D;
cx_data[1] = 0x01;
cx_data[2] = 0x2a;
v = (pc->horiz_scale << 14) | pc->Width;
cx_data[3] = v;
cx_data[4] = v >> 8;
v = (pc->vert_scale << 14) | pc->Height;
cx_data[5] = v;
cx_data[6] = v >> 8;
2010-05-18 17:58:33 +02:00
extra_bytes_packed = 7;
cx_data += extra_bytes_packed ;
vp8_start_encode(bc, cx_data);
// signal clr type
vp8_write_bit(bc, pc->clr_type);
vp8_write_bit(bc, pc->clamp_type);
}
else
vp8_start_encode(bc, cx_data);
2010-05-18 17:58:33 +02:00
// Signal whether or not Segmentation is enabled
vp8_write_bit(bc, (xd->segmentation_enabled) ? 1 : 0);
// Indicate which features are enabled
if (xd->segmentation_enabled)
{
// Indicate whether or not the segmentation map is being updated.
2010-05-18 17:58:33 +02:00
vp8_write_bit(bc, (xd->update_mb_segmentation_map) ? 1 : 0);
// If it is, then indicate the method that will be used.
if ( xd->update_mb_segmentation_map )
vp8_write_bit(bc, (pc->temporal_update) ? 1:0);
2010-05-18 17:58:33 +02:00
vp8_write_bit(bc, (xd->update_mb_segmentation_data) ? 1 : 0);
//segment_reference_frames(cpi);
2010-05-18 17:58:33 +02:00
if (xd->update_mb_segmentation_data)
{
signed char Data;
vp8_write_bit(bc, (xd->mb_segement_abs_delta) ? 1 : 0);
// For each segments id...
for (i = 0; i < MAX_MB_SEGMENTS; i++)
{
// For each segmentation codable feature...
for (j = 0; j < SEG_LVL_MAX; j++)
2010-05-18 17:58:33 +02:00
{
Data = get_segdata( xd, i, j );
2010-05-18 17:58:33 +02:00
// If the feature is enabled...
if ( segfeature_active( xd, i, j ) )
2010-05-18 17:58:33 +02:00
{
vp8_write_bit(bc, 1);
// Is the segment data signed..
if ( is_segfeature_signed(j) )
2010-05-18 17:58:33 +02:00
{
// Encode the relevant feature data
if (Data < 0)
{
Data = - Data;
vp8_write_literal(bc, Data,
seg_feature_data_bits(j));
vp8_write_bit(bc, 1);
}
else
{
vp8_write_literal(bc, Data,
seg_feature_data_bits(j));
vp8_write_bit(bc, 0);
}
2010-05-18 17:58:33 +02:00
}
// Unsigned data element so no sign bit needed
2010-05-18 17:58:33 +02:00
else
vp8_write_literal(bc, Data,
seg_feature_data_bits(j));
2010-05-18 17:58:33 +02:00
}
else
vp8_write_bit(bc, 0);
}
}
}
if (xd->update_mb_segmentation_map)
{
Further work on Segmentation Experiment: This check in includes quite a lot of clean up and refactoring. Most of the analysis and set up for the different coding options for the segment map (currently simple distribution based coding or temporaly predicted coding), has been moved to one location (the function choose_segmap_coding_method() in segmenation.c). This code was previously scattered around in various locations making integration with other experiments and modification / debug more difficult. Currently the functionality is as it was with the exception that the prediction probabilities are now only transmitted when the temporal prediction mode is selected. There is still quite a bit more clean up work that will be possible when the #ifdef is removed. Also at that time I may rename and alter the sense of macroblock based variable "segment_flag" which indicates (1 that the segmnet id is not predicted vs 0 that it is predicted). I also intend to experiment with a spatial prediction mode that can be used when coding a key frame segment map or in cases where temporal prediction does not work well but there is spatial correlation. In a later check in when the ifdefs have gone I may also move the call to choose_segmap_coding_method() to just before where the bitsream is packed (currently it is in vp8_encode_frame()) to further reduce the possibility of clashes with other experiments and prevent it being called on each itteration of the recode loop. Change-Id: I3d4aba2a2826ec21f367678d5b07c1d1c36db168
2011-11-15 12:13:33 +01:00
// Send the tree probabilities used to decode unpredicted
// macro-block segments
2010-05-18 17:58:33 +02:00
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
{
int Data = xd->mb_segment_tree_probs[i];
if (Data != 255)
{
vp8_write_bit(bc, 1);
vp8_write_literal(bc, Data, 8);
}
else
vp8_write_bit(bc, 0);
}
Further work on Segmentation Experiment: This check in includes quite a lot of clean up and refactoring. Most of the analysis and set up for the different coding options for the segment map (currently simple distribution based coding or temporaly predicted coding), has been moved to one location (the function choose_segmap_coding_method() in segmenation.c). This code was previously scattered around in various locations making integration with other experiments and modification / debug more difficult. Currently the functionality is as it was with the exception that the prediction probabilities are now only transmitted when the temporal prediction mode is selected. There is still quite a bit more clean up work that will be possible when the #ifdef is removed. Also at that time I may rename and alter the sense of macroblock based variable "segment_flag" which indicates (1 that the segmnet id is not predicted vs 0 that it is predicted). I also intend to experiment with a spatial prediction mode that can be used when coding a key frame segment map or in cases where temporal prediction does not work well but there is spatial correlation. In a later check in when the ifdefs have gone I may also move the call to choose_segmap_coding_method() to just before where the bitsream is packed (currently it is in vp8_encode_frame()) to further reduce the possibility of clashes with other experiments and prevent it being called on each itteration of the recode loop. Change-Id: I3d4aba2a2826ec21f367678d5b07c1d1c36db168
2011-11-15 12:13:33 +01:00
// If predictive coding of segment map is enabled send the
// prediction probabilities.
if ( pc->temporal_update )
Further work on Segmentation Experiment: This check in includes quite a lot of clean up and refactoring. Most of the analysis and set up for the different coding options for the segment map (currently simple distribution based coding or temporaly predicted coding), has been moved to one location (the function choose_segmap_coding_method() in segmenation.c). This code was previously scattered around in various locations making integration with other experiments and modification / debug more difficult. Currently the functionality is as it was with the exception that the prediction probabilities are now only transmitted when the temporal prediction mode is selected. There is still quite a bit more clean up work that will be possible when the #ifdef is removed. Also at that time I may rename and alter the sense of macroblock based variable "segment_flag" which indicates (1 that the segmnet id is not predicted vs 0 that it is predicted). I also intend to experiment with a spatial prediction mode that can be used when coding a key frame segment map or in cases where temporal prediction does not work well but there is spatial correlation. In a later check in when the ifdefs have gone I may also move the call to choose_segmap_coding_method() to just before where the bitsream is packed (currently it is in vp8_encode_frame()) to further reduce the possibility of clashes with other experiments and prevent it being called on each itteration of the recode loop. Change-Id: I3d4aba2a2826ec21f367678d5b07c1d1c36db168
2011-11-15 12:13:33 +01:00
{
for (i = 0; i < PREDICTION_PROBS; i++)
Further work on Segmentation Experiment: This check in includes quite a lot of clean up and refactoring. Most of the analysis and set up for the different coding options for the segment map (currently simple distribution based coding or temporaly predicted coding), has been moved to one location (the function choose_segmap_coding_method() in segmenation.c). This code was previously scattered around in various locations making integration with other experiments and modification / debug more difficult. Currently the functionality is as it was with the exception that the prediction probabilities are now only transmitted when the temporal prediction mode is selected. There is still quite a bit more clean up work that will be possible when the #ifdef is removed. Also at that time I may rename and alter the sense of macroblock based variable "segment_flag" which indicates (1 that the segmnet id is not predicted vs 0 that it is predicted). I also intend to experiment with a spatial prediction mode that can be used when coding a key frame segment map or in cases where temporal prediction does not work well but there is spatial correlation. In a later check in when the ifdefs have gone I may also move the call to choose_segmap_coding_method() to just before where the bitsream is packed (currently it is in vp8_encode_frame()) to further reduce the possibility of clashes with other experiments and prevent it being called on each itteration of the recode loop. Change-Id: I3d4aba2a2826ec21f367678d5b07c1d1c36db168
2011-11-15 12:13:33 +01:00
{
int Data = pc->segment_pred_probs[i];
Further work on Segmentation Experiment: This check in includes quite a lot of clean up and refactoring. Most of the analysis and set up for the different coding options for the segment map (currently simple distribution based coding or temporaly predicted coding), has been moved to one location (the function choose_segmap_coding_method() in segmenation.c). This code was previously scattered around in various locations making integration with other experiments and modification / debug more difficult. Currently the functionality is as it was with the exception that the prediction probabilities are now only transmitted when the temporal prediction mode is selected. There is still quite a bit more clean up work that will be possible when the #ifdef is removed. Also at that time I may rename and alter the sense of macroblock based variable "segment_flag" which indicates (1 that the segmnet id is not predicted vs 0 that it is predicted). I also intend to experiment with a spatial prediction mode that can be used when coding a key frame segment map or in cases where temporal prediction does not work well but there is spatial correlation. In a later check in when the ifdefs have gone I may also move the call to choose_segmap_coding_method() to just before where the bitsream is packed (currently it is in vp8_encode_frame()) to further reduce the possibility of clashes with other experiments and prevent it being called on each itteration of the recode loop. Change-Id: I3d4aba2a2826ec21f367678d5b07c1d1c36db168
2011-11-15 12:13:33 +01:00
if (Data != 255)
{
vp8_write_bit(bc, 1);
vp8_write_literal(bc, Data, 8);
}
else
vp8_write_bit(bc, 0);
}
}
2010-05-18 17:58:33 +02:00
}
}
// Encode the common prediction model status flag probability updates for
// the reference frame
if ( pc->frame_type != KEY_FRAME )
{
for (i = 0; i < PREDICTION_PROBS; i++)
{
if ( cpi->ref_pred_probs_update[i] )
{
vp8_write_bit(bc, 1);
vp8_write_literal(bc, pc->ref_pred_probs[i], 8);
}
else
vp8_write_bit(bc, 0);
}
}
// Encode the loop filter level and type
2010-05-18 17:58:33 +02:00
vp8_write_bit(bc, pc->filter_type);
vp8_write_literal(bc, pc->filter_level, 6);
vp8_write_literal(bc, pc->sharpness_level, 3);
// Write out loop filter deltas applied at the MB level based on mode or ref frame (if they are enabled).
vp8_write_bit(bc, (xd->mode_ref_lf_delta_enabled) ? 1 : 0);
if (xd->mode_ref_lf_delta_enabled)
{
// Do the deltas need to be updated
int send_update = xd->mode_ref_lf_delta_update
|| cpi->oxcf.error_resilient_mode;
2010-05-18 17:58:33 +02:00
vp8_write_bit(bc, send_update);
if (send_update)
2010-05-18 17:58:33 +02:00
{
int Data;
// Send update
for (i = 0; i < MAX_REF_LF_DELTAS; i++)
{
Data = xd->ref_lf_deltas[i];
// Frame level data
if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i]
|| cpi->oxcf.error_resilient_mode)
2010-05-18 17:58:33 +02:00
{
xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i];
2010-05-18 17:58:33 +02:00
vp8_write_bit(bc, 1);
if (Data > 0)
{
vp8_write_literal(bc, (Data & 0x3F), 6);
vp8_write_bit(bc, 0); // sign
}
else
{
Data = -Data;
vp8_write_literal(bc, (Data & 0x3F), 6);
vp8_write_bit(bc, 1); // sign
}
}
else
vp8_write_bit(bc, 0);
}
// Send update
for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
{
Data = xd->mode_lf_deltas[i];
if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i]
|| cpi->oxcf.error_resilient_mode)
2010-05-18 17:58:33 +02:00
{
xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i];
2010-05-18 17:58:33 +02:00
vp8_write_bit(bc, 1);
if (Data > 0)
{
vp8_write_literal(bc, (Data & 0x3F), 6);
vp8_write_bit(bc, 0); // sign
}
else
{
Data = -Data;
vp8_write_literal(bc, (Data & 0x3F), 6);
vp8_write_bit(bc, 1); // sign
}
}
else
vp8_write_bit(bc, 0);
}
}
}
//signal here is multi token partition is enabled
vp8_write_literal(bc, pc->multi_token_partition, 2);
// Frame Q baseline quantizer index
vp8_write_literal(bc, pc->base_qindex, QINDEX_BITS);
2010-05-18 17:58:33 +02:00
// Transmit Dc, Second order and Uv quantizer delta information
put_delta_q(bc, pc->y1dc_delta_q);
put_delta_q(bc, pc->y2dc_delta_q);
put_delta_q(bc, pc->y2ac_delta_q);
put_delta_q(bc, pc->uvdc_delta_q);
put_delta_q(bc, pc->uvac_delta_q);
// When there is a key frame all reference buffers are updated using the new key frame
if (pc->frame_type != KEY_FRAME)
{
// Should the GF or ARF be updated using the transmitted frame or buffer
vp8_write_bit(bc, pc->refresh_golden_frame);
vp8_write_bit(bc, pc->refresh_alt_ref_frame);
// If not being updated from current frame should either GF or ARF be updated from another buffer
if (!pc->refresh_golden_frame)
vp8_write_literal(bc, pc->copy_buffer_to_gf, 2);
if (!pc->refresh_alt_ref_frame)
vp8_write_literal(bc, pc->copy_buffer_to_arf, 2);
// Indicate reference frame sign bias for Golden and ARF frames (always 0 for last frame buffer)
vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]);
vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]);
}
if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS)
{
if (pc->frame_type == KEY_FRAME)
pc->refresh_entropy_probs = 1;
else
pc->refresh_entropy_probs = 0;
}
2010-05-18 17:58:33 +02:00
vp8_write_bit(bc, pc->refresh_entropy_probs);
if (pc->frame_type != KEY_FRAME)
vp8_write_bit(bc, pc->refresh_last_frame);
#ifdef ENTROPY_STATS
if (pc->frame_type == INTER_FRAME)
active_section = 0;
else
active_section = 7;
#endif
vp8_clear_system_state(); //__asm emms;
update_coef_probs(cpi);
#ifdef ENTROPY_STATS
active_section = 2;
#endif
// Write out the mb_no_coeff_skip flag
vp8_write_bit(bc, pc->mb_no_coeff_skip);
if (pc->frame_type == KEY_FRAME)
{
#if CONFIG_QIMODE
decide_kf_ymode_entropy(cpi);
#endif
2010-05-18 17:58:33 +02:00
write_kfmodes(cpi);
#ifdef ENTROPY_STATS
active_section = 8;
#endif
}
else
{
pack_inter_mode_mvs(cpi);
vp8_update_mode_context(&cpi->common);
2010-05-18 17:58:33 +02:00
#ifdef ENTROPY_STATS
active_section = 1;
#endif
}
2010-05-18 17:58:33 +02:00
vp8_stop_encode(bc);
oh.first_partition_length_in_bytes = cpi->bc.pos;
/* update frame tag */
{
int v = (oh.first_partition_length_in_bytes << 5) |
(oh.show_frame << 4) |
(oh.version << 1) |
oh.type;
dest[0] = v;
dest[1] = v >> 8;
dest[2] = v >> 16;
}
*size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc.pos;
New ways of passing encoded data between encoder and decoder. With this commit frames can be received partition-by-partition from the encoder and passed partition-by-partition to the decoder. At the encoder-side this makes it easier to split encoded frames at partition boundaries, useful when packetizing frames. When VPX_CODEC_USE_OUTPUT_PARTITION is enabled, several VPX_CODEC_CX_FRAME_PKT packets will be returned from vpx_codec_get_cx_data(), containing one partition each. The partition_id (starting at 0) specifies the decoding order of the partitions. All partitions but the last has the VPX_FRAME_IS_FRAGMENT flag set. At the decoder this opens up the possibility of decoding partition N even though partition N-1 was lost (given that independent partitioning has been enabled in the encoder) if more info about the missing parts of the stream is available through external signaling. Each partition is passed to the decoder through the vpx_codec_decode() function, with the data pointer pointing to the start of the partition, and with data_sz equal to the size of the partition. Missing partitions can be signaled to the decoder by setting data != NULL and data_sz = 0. When all partitions have been given to the decoder "end of data" should be signaled by calling vpx_codec_decode() with data = NULL and data_sz = 0. The first partition is the first partition according to the VP8 bitstream + the uncompressed data chunk + DCT address offsets if multiple residual partitions are used. Change-Id: I5bc0682b9e4112e0db77904755c694c3c7ac6e74
2011-06-13 16:42:27 +02:00
cpi->partition_sz[0] = *size;
2010-05-18 17:58:33 +02:00
if (pc->multi_token_partition != ONE_PARTITION)
{
int num_part;
int asize;
num_part = 1 << pc->multi_token_partition;
pack_tokens_into_partitions(cpi, cx_data + bc->pos, num_part, &asize);
*size += asize;
2010-05-18 17:58:33 +02:00
}
else
{
vp8_start_encode(&cpi->bc2, cx_data + bc->pos);
#if CONFIG_MULTITHREAD
if (cpi->b_multi_threaded)
2010-05-18 17:58:33 +02:00
pack_mb_row_tokens(cpi, &cpi->bc2);
else
#endif
pack_tokens(&cpi->bc2, cpi->tok, cpi->tok_count);
2010-05-18 17:58:33 +02:00
vp8_stop_encode(&cpi->bc2);
*size += cpi->bc2.pos;
New ways of passing encoded data between encoder and decoder. With this commit frames can be received partition-by-partition from the encoder and passed partition-by-partition to the decoder. At the encoder-side this makes it easier to split encoded frames at partition boundaries, useful when packetizing frames. When VPX_CODEC_USE_OUTPUT_PARTITION is enabled, several VPX_CODEC_CX_FRAME_PKT packets will be returned from vpx_codec_get_cx_data(), containing one partition each. The partition_id (starting at 0) specifies the decoding order of the partitions. All partitions but the last has the VPX_FRAME_IS_FRAGMENT flag set. At the decoder this opens up the possibility of decoding partition N even though partition N-1 was lost (given that independent partitioning has been enabled in the encoder) if more info about the missing parts of the stream is available through external signaling. Each partition is passed to the decoder through the vpx_codec_decode() function, with the data pointer pointing to the start of the partition, and with data_sz equal to the size of the partition. Missing partitions can be signaled to the decoder by setting data != NULL and data_sz = 0. When all partitions have been given to the decoder "end of data" should be signaled by calling vpx_codec_decode() with data = NULL and data_sz = 0. The first partition is the first partition according to the VP8 bitstream + the uncompressed data chunk + DCT address offsets if multiple residual partitions are used. Change-Id: I5bc0682b9e4112e0db77904755c694c3c7ac6e74
2011-06-13 16:42:27 +02:00
cpi->partition_sz[1] = cpi->bc2.pos;
2010-05-18 17:58:33 +02:00
}
}
#ifdef ENTROPY_STATS
void print_tree_update_probs()
{
int i, j, k, l;
FILE *f = fopen("context.c", "a");
int Sum;
fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n");
fprintf(f, "const vp8_prob tree_update_probs[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n");
2010-05-18 17:58:33 +02:00
for (i = 0; i < BLOCK_TYPES; i++)
{
fprintf(f, " { \n");
for (j = 0; j < COEF_BANDS; j++)
{
fprintf(f, " {\n");
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
{
fprintf(f, " {");
for (l = 0; l < ENTROPY_NODES; l++)
2010-05-18 17:58:33 +02:00
{
Sum = tree_update_hist[i][j][k][l][0] + tree_update_hist[i][j][k][l][1];
if (Sum > 0)
{
if (((tree_update_hist[i][j][k][l][0] * 255) / Sum) > 0)
fprintf(f, "%3ld, ", (tree_update_hist[i][j][k][l][0] * 255) / Sum);
else
fprintf(f, "%3ld, ", 1);
}
else
fprintf(f, "%3ld, ", 128);
}
fprintf(f, "},\n");
}
fprintf(f, " },\n");
}
fprintf(f, " },\n");
}
fprintf(f, "};\n");
#if CONFIG_T8X8
fprintf(f, "const vp8_prob tree_update_probs_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n");
for (i = 0; i < BLOCK_TYPES; i++)
{
fprintf(f, " { \n");
for (j = 0; j < COEF_BANDS; j++)
{
fprintf(f, " {\n");
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
{
fprintf(f, " {");
for (l = 0; l < MAX_ENTROPY_TOKENS - 1; l++)
{
Sum = tree_update_hist_8x8[i][j][k][l][0] + tree_update_hist_8x8[i][j][k][l][1];
if (Sum > 0)
{
if (((tree_update_hist_8x8[i][j][k][l][0] * 255) / Sum) > 0)
fprintf(f, "%3ld, ", (tree_update_hist_8x8[i][j][k][l][0] * 255) / Sum);
else
fprintf(f, "%3ld, ", 1);
}
else
fprintf(f, "%3ld, ", 128);
}
fprintf(f, "},\n");
}
fprintf(f, " },\n");
}
fprintf(f, " },\n");
}
#endif
2010-05-18 17:58:33 +02:00
fclose(f);
}
#endif