Further segment feature extensions.

This quite large check in includes the following:

Merge in some code from Ronald (mbgraph.c) that scans a Gf/arf group.
This is used as a basis for a simple segmentation for the normal frames
in a gf/arf group. This code also uses satd functions from Yaowu.

Adds functionality for coding the latest possible position of an EOB for
blocks in the segment. (Currently 0-15 only, hence just for 4x4 dct).
Where the EOB position is 0 this acts like "skip" and the normal coding
of skip at the per mb level is disabled.

Added functions (seg_common.c) for setting and reading segment feature
elements. These may want to be optimized away at some point but while the
mecahnism is in a state of flux they provide a single location for making
changes and keep things a bit cleaner.

This is still proof of concept code. Currently the tested feature set:-

Quantizer,
Loop Filter level,
Reference frame,
Prediction Mode,
EOB end stop.

TBD:-

Add functions for setting and reading the feature data with range
and validity checking.

Handling of signed and unsigned feature data. At the moment all is assumed
to be signed and a sign bit is coded but many cannot be negative.

Correct handling of EOB feature with intra coded blocks.

Testing/trapping of legal/illegal ref frame and mode combinations.

Transform size switch plus merge and test with 8c8 DCT work

Merge and test with Sumans Segmenation coding optimizations

Change-Id: Iee12e83661c7abbd1e0ce6810915eb4ec35e2d8e
This commit is contained in:
Paul Wilkins 2011-10-05 11:26:00 +01:00
parent 152ce6b2b9
commit 01ce04bc06
28 changed files with 1119 additions and 264 deletions

View File

@ -108,7 +108,7 @@ typedef enum
SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame
SEG_LVL_MODE = 3, // Optional Segment mode
SEG_LVL_EOB = 4, // EOB end stop marker.
SEG_LVL_TRANSFORM = 6, // Block transform size.
SEG_LVL_TRANSFORM = 5, // Block transform size.
SEG_LVL_MAX = 6 // Number of MB level features supported
#else

View File

@ -14,6 +14,10 @@
#include "onyxc_int.h"
#include "vpx_mem/vpx_mem.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
typedef unsigned char uc;
prototype_loopfilter(vp8_loop_filter_horizontal_edge_c);
@ -194,7 +198,7 @@ void vp8_loop_filter_init(VP8_COMMON *cm)
}
void vp8_loop_filter_frame_init(VP8_COMMON *cm,
MACROBLOCKD *mbd,
MACROBLOCKD *xd,
int default_filt_lvl)
{
int seg, /* segment number */
@ -218,25 +222,24 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm,
// Set the baseline filter values for each segment
#if CONFIG_SEGFEATURES
if ( mbd->segmentation_enabled &&
( mbd->segment_feature_mask[seg] & (1 << SEG_LVL_ALT_LF) ) )
if ( segfeature_active( xd, seg, SEG_LVL_ALT_LF ) )
#else
if ( mbd->segmentation_enabled )
if ( xd->segmentation_enabled )
#endif
{
/* Abs value */
if (mbd->mb_segement_abs_delta == SEGMENT_ABSDATA)
if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
{
lvl_seg = mbd->segment_feature_data[seg][SEG_LVL_ALT_LF];
lvl_seg = xd->segment_feature_data[seg][SEG_LVL_ALT_LF];
}
else /* Delta Value */
{
lvl_seg += mbd->segment_feature_data[seg][SEG_LVL_ALT_LF];
lvl_seg += xd->segment_feature_data[seg][SEG_LVL_ALT_LF];
lvl_seg = (lvl_seg > 0) ? ((lvl_seg > 63) ? 63: lvl_seg) : 0;
}
}
if (!mbd->mode_ref_lf_delta_enabled)
if (!xd->mode_ref_lf_delta_enabled)
{
/* we could get rid of this if we assume that deltas are set to
* zero when not in use; encoder always uses deltas
@ -251,12 +254,12 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm,
ref = INTRA_FRAME;
/* Apply delta for reference frame */
lvl_ref += mbd->ref_lf_deltas[ref];
lvl_ref += xd->ref_lf_deltas[ref];
/* Apply delta for Intra modes */
mode = 0; /* B_PRED */
/* Only the split mode BPRED has a further special case */
lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
lvl_mode = lvl_ref + xd->mode_lf_deltas[mode];
lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; /* clamp */
lfi->lvl[seg][ref][mode] = lvl_mode;
@ -271,12 +274,12 @@ void vp8_loop_filter_frame_init(VP8_COMMON *cm,
int lvl_ref = lvl_seg;
/* Apply delta for reference frame */
lvl_ref += mbd->ref_lf_deltas[ref];
lvl_ref += xd->ref_lf_deltas[ref];
/* Apply delta for Inter modes */
for (mode = 1; mode < 4; mode++)
{
lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode];
lvl_mode = lvl_ref + xd->mode_lf_deltas[mode];
lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; /* clamp */
lfi->lvl[seg][ref][mode] = lvl_mode;

41
vp8/common/seg_common.c Normal file
View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vp8/common/seg_common.h"
// These functions provide access to new segment level features.
// Eventually these function may be "optimized out" but for the moment,
// the coding mechanism is still subject to change so these provide a
// convenient single point of change.
int segfeature_active( MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id )
{
// Return true if mask bit set and segmentation enabled.
return ( xd->segmentation_enabled &&
( xd->segment_feature_mask[segment_id] &
(0x01 << feature_id) ) );
}
void enable_segfeature( MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id )
{
xd->segment_feature_mask[segment_id] |= (0x01 << feature_id);
}
void disable_segfeature( MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id )
{
xd->segment_feature_mask[segment_id] &= ~(1 << feature_id);
}
// TBD? Functions to read and write segment data with range / validity checking

30
vp8/common/seg_common.h Normal file
View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "type_aliases.h"
#include "vp8/common/blockd.h"
#ifndef __INC_SEG_COMMON_H__
#define __INC_SEG_COMMON_H__ 1
int segfeature_active( MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id );
void enable_segfeature( MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id );
void disable_segfeature( MACROBLOCKD *xd,
int segment_id,
SEG_LVL_FEATURES feature_id );
#endif /* __INC_SEG_COMMON_H__ */

View File

@ -15,6 +15,14 @@
#include "onyxd_int.h"
#include "vp8/common/findnearmv.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
#if CONFIG_SEGMENTATION
#include "vp8/common/seg_common.h"
#endif
#if CONFIG_DEBUG
#include <assert.h>
#endif
@ -85,11 +93,33 @@ static void vp8_kfread_modes(VP8D_COMP *pbi, MODE_INFO *m, int mb_row, int mb_co
if (pbi->mb.update_mb_segmentation_map)
vp8_read_mb_features(bc, &m->mbmi, &pbi->mb);
/* Read the macroblock coeff skip flag if this feature is in use, else default to 0 */
#if CONFIG_SEGFEATURES
if ( pbi->common.mb_no_coeff_skip &&
( !segfeature_active( &pbi->mb,
m->mbmi.segment_id, SEG_LVL_EOB ) ||
(pbi->mb.segment_feature_data[m->mbmi.segment_id]
[SEG_LVL_EOB] != 0) ) )
#else
// Read the macroblock coeff skip flag if this feature is in use,
// else default to 0
if (pbi->common.mb_no_coeff_skip)
#endif
m->mbmi.mb_skip_coeff = vp8_read(bc, pbi->prob_skip_false);
else
m->mbmi.mb_skip_coeff = 0;
{
#if CONFIG_SEGFEATURES
if ( segfeature_active( &pbi->mb,
m->mbmi.segment_id, SEG_LVL_EOB ) &&
(pbi->mb.segment_feature_data[m->mbmi.segment_id]
[SEG_LVL_EOB] == 0) )
{
m->mbmi.mb_skip_coeff = 1;
}
else
#endif
m->mbmi.mb_skip_coeff = 0;
}
#if CONFIG_QIMODE
y_mode = (MB_PREDICTION_MODE) vp8_kfread_ymode(bc,
pbi->common.kf_ymode_prob[pbi->common.kf_ymode_probs_index]);
@ -210,7 +240,7 @@ static MV_REFERENCE_FRAME read_ref_frame( VP8D_COMP *pbi,
MACROBLOCKD *const xd = &pbi->mb;
// Is the segment level refernce frame feature enabled for this segment
if ( xd->segment_feature_mask[segment_id] & (0x01 << SEG_LVL_REF_FRAME) )
if ( segfeature_active( xd, segment_id, SEG_LVL_REF_FRAME ) )
{
ref_frame =
xd->segment_feature_data[segment_id][SEG_LVL_REF_FRAME];
@ -398,12 +428,34 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
#endif
}
/* Read the macroblock coeff skip flag if this feature is in use, else default to 0 */
#if CONFIG_SEGFEATURES
if ( pbi->common.mb_no_coeff_skip &&
( !segfeature_active( xd,
mbmi->segment_id, SEG_LVL_EOB ) ||
(xd->segment_feature_data[mbmi->segment_id]
[SEG_LVL_EOB] != 0) ) )
#else
if (pbi->common.mb_no_coeff_skip)
#endif
{
// Read the macroblock coeff skip flag if this feature is in use,
// else default to 0
mbmi->mb_skip_coeff = vp8_read(bc, pbi->prob_skip_false);
}
else
mbmi->mb_skip_coeff = 0;
{
#if CONFIG_SEGFEATURES
if ( segfeature_active( xd,
mbmi->segment_id, SEG_LVL_EOB ) &&
(xd->segment_feature_data[mbmi->segment_id]
[SEG_LVL_EOB] == 0) )
{
mbmi->mb_skip_coeff = 1;
}
else
#endif
mbmi->mb_skip_coeff = 0;
}
// Read the reference frame
mbmi->ref_frame = read_ref_frame( pbi, mbmi->segment_id );
@ -421,8 +473,7 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
#if CONFIG_SEGFEATURES
// Is the segment level mode feature enabled for this segment
if ( xd->segment_feature_mask[mbmi->segment_id] &
(0x01 << SEG_LVL_MODE) )
if ( segfeature_active( xd, mbmi->segment_id, SEG_LVL_MODE ) )
{
mbmi->mode =
xd->segment_feature_data[mbmi->segment_id][SEG_LVL_MODE];

View File

@ -37,6 +37,10 @@
#include "decoderthreading.h"
#include "dboolhuff.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
#include <assert.h>
#include <stdio.h>
@ -78,8 +82,7 @@ void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd)
// Set the Q baseline allowing for any segment level adjustment
#if CONFIG_SEGFEATURES
if ( xd->segmentation_enabled &&
( xd->segment_feature_mask[segment_id] & (1 << SEG_LVL_ALT_Q) ) )
if ( segfeature_active( xd, segment_id, SEG_LVL_ALT_Q ) )
#else
if ( xd->segmentation_enabled )
#endif
@ -1056,9 +1059,8 @@ int vp8_decode_frame(VP8D_COMP *pbi)
{
#if CONFIG_SEGFEATURES
// Update the feature data and mask
xd->segment_feature_mask[j] |= (1 << i);
enable_segfeature(xd, j, i);
#endif
xd->segment_feature_data[j][i] = (signed char)vp8_read_literal(bc, mb_feature_data_bits[i]);
if (vp8_read_bit(bc))

View File

@ -16,6 +16,10 @@
#include "vpx_ports/mem.h"
#include "detokenize.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
#define BOOL_DATA UINT8
#define OCB_X PREV_COEF_CONTEXTS * ENTROPY_NODES
@ -611,15 +615,15 @@ BLOCK_FINISHED_8x8:
}
#endif
int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *x)
int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd)
{
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
const FRAME_CONTEXT * const fc = &dx->common.fc;
BOOL_DECODER *bc = x->current_bc;
BOOL_DECODER *bc = xd->current_bc;
char *eobs = x->eobs;
char *eobs = xd->eobs;
ENTROPY_CONTEXT *a;
ENTROPY_CONTEXT *l;
@ -647,17 +651,27 @@ int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *x)
INT16 v;
const vp8_prob *Prob;
#if CONFIG_SEGFEATURES
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
{
seg_eob = xd->segment_feature_data[segment_id][SEG_LVL_EOB];
}
#endif
type = 3;
i = 0;
stop = 16;
scan = vp8_default_zig_zag1d;
qcoeff_ptr = &x->qcoeff[0];
if (x->mode_info_context->mbmi.mode != B_PRED &&
qcoeff_ptr = &xd->qcoeff[0];
if (xd->mode_info_context->mbmi.mode != B_PRED &&
#if CONFIG_I8X8
x->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
#endif
x->mode_info_context->mbmi.mode != SPLITMV)
xd->mode_info_context->mbmi.mode != SPLITMV)
{
i = 24;
stop = 24;
@ -687,6 +701,11 @@ BLOCK_LOOP:
Prob += v * ENTROPY_NODES;
DO_WHILE:
#if CONFIG_SEGFEATURES
if ( c == seg_eob )
goto BLOCK_FINISHED;
#endif
Prob += coef_bands_x[c];
DECODE_AND_BRANCH_IF_ZERO(Prob[EOB_CONTEXT_NODE], BLOCK_FINISHED);

View File

@ -24,9 +24,15 @@
#include "bitstream.h"
#include "defaultcoefcounts.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
#if CONFIG_SEGMENTATION
static int segment_cost = 0;
#endif
const int vp8cx_base_skip_false_prob[128] =
{
255, 255, 255, 255, 255, 255, 255, 255,
@ -954,6 +960,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
const MB_MODE_INFO *const mi = & m->mbmi;
const MV_REFERENCE_FRAME rf = mi->ref_frame;
const MB_PREDICTION_MODE mode = mi->mode;
const int segment_id = mi->segment_id;
//MACROBLOCKD *xd = &cpi->mb.e_mbd;
@ -973,7 +980,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
#ifdef MODE_STATS
#if CONFIG_SEGMENTATION
segment_modes_inter[mi->segment_id]++;
segment_modes_inter[segment_id]++;
#endif
#endif
if (cpi->mb.e_mbd.update_mb_segmentation_map)
@ -997,13 +1004,13 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
vp8_write(w,1,xd->mb_segment_tree_probs[3+sum]);
segment_cost += vp8_cost_one(xd->mb_segment_tree_probs[3+sum]);
write_mb_features(w, mi, &cpi->mb.e_mbd);
cpi->segmentation_map[index] = mi->segment_id;
cpi->segmentation_map[index] = segment_id;
}
}
else
{
write_mb_features(w, mi, &cpi->mb.e_mbd);
cpi->segmentation_map[index] = mi->segment_id;
cpi->segmentation_map[index] = segment_id;
}
index++;
#else
@ -1011,15 +1018,22 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
#endif
}
#if CONFIG_SEGFEATURES
if ( pc->mb_no_coeff_skip &&
( !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
(xd->segment_feature_data[segment_id][SEG_LVL_EOB] != 0) ) )
#else
if (pc->mb_no_coeff_skip)
vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false);
#endif
{
vp8_encode_bool(w, mi->mb_skip_coeff, prob_skip_false);
}
if (rf == INTRA_FRAME)
{
#if CONFIG_SEGFEATURES
// Is the segment coding of reference frame enabled
if ( !( xd->segment_feature_mask[mi->segment_id] &
(0x01 << SEG_LVL_REF_FRAME) ) )
if ( !segfeature_active( xd, segment_id, SEG_LVL_REF_FRAME ) )
#endif
{
vp8_write(w, 0, cpi->prob_intra_coded);
@ -1047,9 +1061,8 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
vp8_prob mv_ref_p [VP8_MVREFS-1];
#if CONFIG_SEGFEATURES
// Is the segment coding of reference frame enabled
if ( !( xd->segment_feature_mask[mi->segment_id] &
(0x01 << SEG_LVL_REF_FRAME) ) )
// Test to see if segment level coding of ref frame is enabled
if ( !segfeature_active( xd, segment_id, SEG_LVL_REF_FRAME ) )
#endif
{
vp8_write(w, 1, cpi->prob_intra_coded);
@ -1082,8 +1095,7 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi)
#if CONFIG_SEGFEATURES
// Is the segment coding of reference frame enabled
if ( !( xd->segment_feature_mask[mi->segment_id] &
(0x01 << SEG_LVL_MODE) ) )
if ( !segfeature_active( xd, segment_id, SEG_LVL_MODE ) )
#endif
{
write_mv_ref(w, mode, mv_ref_p);
@ -1174,6 +1186,10 @@ static void write_kfmodes(VP8_COMP *cpi)
int mb_row = -1;
int prob_skip_false = 0;
#if CONFIG_SEGFEATURES
MACROBLOCKD *xd = &cpi->mb.e_mbd;
#endif
if (c->mb_no_coeff_skip)
{
prob_skip_false = cpi->skip_false_count * 256 / (cpi->skip_false_count + cpi->skip_true_count);
@ -1202,6 +1218,8 @@ static void write_kfmodes(VP8_COMP *cpi)
while (++mb_col < c->mb_cols)
{
const int ym = m->mbmi.mode;
int segment_id = m->mbmi.segment_id;
#if CONFIG_SEGMENTATION
MACROBLOCKD *xd = &cpi->mb.e_mbd;
xd->up_available = (mb_row != 0);
@ -1209,7 +1227,7 @@ static void write_kfmodes(VP8_COMP *cpi)
#endif
#ifdef MODE_STATS
#if CONFIG_SEGMENTATION
segment_modes_intra[m->mbmi.segment_id]++;
segment_modes_intra[segment_id]++;
#endif
#endif
@ -1218,15 +1236,23 @@ static void write_kfmodes(VP8_COMP *cpi)
#if CONFIG_SEGMENTATION
write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
cpi->segmentation_map[index] = m->mbmi.segment_id;
cpi->segmentation_map[index] = segment_id;
index++;
#else
write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd);
#endif
}
#if CONFIG_SEGFEATURES
if ( c->mb_no_coeff_skip &&
( !segfeature_active( xd, segment_id, SEG_LVL_EOB ) ||
(xd->segment_feature_data[segment_id][SEG_LVL_EOB] != 0) ) )
#else
if (c->mb_no_coeff_skip)
#endif
{
vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false);
}
#if CONFIG_QIMODE
kfwrite_ymode(bc, ym, c->kf_ymode_prob[c->kf_ymode_probs_index]);
#else
@ -1952,7 +1978,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
#if CONFIG_SEGFEATURES
// If the feature is enabled...
if ( xd->segment_feature_mask[j] & (0x01 << i))
if ( segfeature_active( xd, j, i ) )
#else
// If the feature is enabled...Indicated by non zero
// value in VP8

View File

@ -46,8 +46,9 @@ typedef struct
int src;
int src_stride;
// MV enc_mv;
int force_empty;
#if CONFIG_SEGFEATURES
int eob_max_offset;
#endif
} BLOCK;

View File

@ -31,6 +31,9 @@
#include "vp8/common/subpixel.h"
#include "vpx_ports/vpx_timer.h"
#if CONFIG_SEGFEATURES
//#define DBG_PRNT_SEGMAP 1
#endif
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD(x) &cpi->common.rtcd.x
@ -750,18 +753,7 @@ void encode_mb_row(VP8_COMP *cpi,
else
cpi->cyclic_refresh_map[map_index+mb_col] = 1;
}
#if CONFIG_SEGFEATURES
else if ( cm->refresh_alt_ref_frame &&
(cm->frame_type != KEY_FRAME) )
{
// Update the global segmentation map to reflect
// the segment choice made for this MB.
cpi->segmentation_map[map_index+mb_col] =
xd->mode_info_context->mbmi.segment_id;
}
#endif
}
}
cpi->tplist[mb_row].stop = *tp;
@ -856,7 +848,7 @@ void encode_mb_row(VP8_COMP *cpi,
#if CONFIG_SEGFEATURES
// debug output
#if 0
#if DBG_PRNT_SEGMAP
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
@ -867,30 +859,6 @@ void encode_mb_row(VP8_COMP *cpi,
#endif
}
#if CONFIG_SEGFEATURES
// Funtion to test out new segment features
void segfeature_test_function(VP8_COMP *cpi, MACROBLOCKD * xd)
{
VP8_COMMON *const cm = & cpi->common;
// Only update segment map for a frame that is an arf but not a kf.
if ( cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME) )
{
// Test code to code features at the segment level
if ( (xd->mode_info_context->mbmi.mode ==
cpi->segment_feature_data[1][SEG_LVL_MODE]) &&
(xd->mode_info_context->mbmi.ref_frame ==
cpi->segment_feature_data[1][SEG_LVL_REF_FRAME]) )
{
xd->mode_info_context->mbmi.segment_id = 1;
}
else
xd->mode_info_context->mbmi.segment_id = 0;
}
}
#endif
void init_encode_frame_mb_context(VP8_COMP *cpi)
{
MACROBLOCK *const x = & cpi->mb;
@ -1695,8 +1663,8 @@ int vp8cx_encode_inter_macroblock
#if CONFIG_SEGFEATURES
else
{
segfeature_test_function(cpi, xd);
#if 0
//segfeature_test_function(cpi, xd);
#if DBG_PRNT_SEGMAP
// Debug output
{
FILE *statsfile;

View File

@ -355,9 +355,10 @@ static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc)
/*
z->block[i].src = x->block[i].src;
*/
z->block[i].src_stride = x->block[i].src_stride;
z->block[i].force_empty = x->block[i].force_empty;
z->block[i].src_stride = x->block[i].src_stride;
#if CONFIG_SEGFEATURES
z->block[i].eob_max_offset = x->block[i].eob_max_offset;
#endif
}
{

View File

@ -110,7 +110,7 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
#endif
#endif
// Pure C:
cpi->rtcd.variance.satd16x16 = vp8_satd16x16_c;
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
#if ARCH_X86 || ARCH_X86_64

450
vp8/encoder/mbgraph.c Normal file
View File

@ -0,0 +1,450 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <limits.h>
#include <vp8/encoder/encodeintra.h>
#include <vp8/encoder/rdopt.h>
#include <vp8/common/setupintrarecon.h>
#include <vp8/common/blockd.h>
#include <vp8/common/reconinter.h>
#include <vp8/common/systemdependent.h>
#include <vpx_mem/vpx_mem.h>
#include <vp8/encoder/segmentation.h>
static unsigned int do_16x16_motion_iteration
(
VP8_COMP *cpi,
int_mv *ref_mv,
int_mv *dst_mv
)
{
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
BLOCK *b = &x->block[0];
BLOCKD *d = &xd->block[0];
vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16];
unsigned int best_err;
int step_param, further_steps;
static int dummy_cost[2*mv_max+1];
int *mvcost[2] = { &dummy_cost[mv_max+1], &dummy_cost[mv_max+1] };
int *mvsadcost[2] = { &dummy_cost[mv_max+1], &dummy_cost[mv_max+1] };
// Further step/diamond searches as necessary
if (cpi->Speed < 8)
{
step_param = cpi->sf.first_step + ((cpi->Speed > 5) ? 1 : 0);
further_steps = (cpi->sf.max_step_search_steps - 1) - step_param;
}
else
{
step_param = cpi->sf.first_step + 2;
further_steps = 0;
}
/*cpi->sf.search_method == HEX*/
best_err = vp8_hex_search(x, b, d,
ref_mv, dst_mv,
step_param,
x->errorperbit,
&v_fn_ptr,
mvsadcost, mvcost, ref_mv);
// Try sub-pixel MC
//if (bestsme > error_thresh && bestsme < INT_MAX)
{
int distortion;
unsigned int sse;
best_err = cpi->find_fractional_mv_step(x, b, d,
dst_mv, ref_mv,
x->errorperbit, &v_fn_ptr,
mvcost, &distortion, &sse);
}
vp8_set_mbmode_and_mvs(x, NEWMV, dst_mv);
vp8_build_inter16x16_predictors_mby(xd);
VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
(xd->dst.y_buffer, xd->dst.y_stride,
xd->predictor, 16, &best_err);
return best_err;
}
static int do_16x16_motion_search
(
VP8_COMP *cpi,
int_mv *ref_mv,
int_mv *dst_mv,
YV12_BUFFER_CONFIG *buf,
int buf_mb_y_offset,
YV12_BUFFER_CONFIG *ref,
int mb_y_offset
)
{
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
unsigned int err, tmp_err;
int_mv tmp_mv;
int n;
for (n = 0; n < 16; n++) {
BLOCKD *d = &xd->block[n];
BLOCK *b = &x->block[n];
b->base_src = &buf->y_buffer;
b->src_stride = buf->y_stride;
b->src = buf->y_stride * (n & 12) + (n & 3) * 4 + buf_mb_y_offset;
d->base_pre = &ref->y_buffer;
d->pre_stride = ref->y_stride;
d->pre = ref->y_stride * (n & 12) + (n & 3) * 4 + mb_y_offset;
}
// Try zero MV first
// FIXME should really use something like near/nearest MV and/or MV prediction
xd->pre.y_buffer = ref->y_buffer + mb_y_offset;
xd->pre.y_stride = ref->y_stride;
VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
(ref->y_buffer + mb_y_offset,
ref->y_stride, xd->dst.y_buffer,
xd->dst.y_stride, &err);
dst_mv->as_int = 0;
// Test last reference frame using the previous best mv as the
// starting point (best reference) for the search
tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv);
if (tmp_err < err)
{
err = tmp_err;
dst_mv->as_int = tmp_mv.as_int;
}
// If the current best reference mv is not centred on 0,0 then do a 0,0 based search as well
if (ref_mv->as_int)
{
int tmp_err;
int_mv zero_ref_mv, tmp_mv;
zero_ref_mv.as_int = 0;
tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv);
if (tmp_err < err)
{
dst_mv->as_int = tmp_mv.as_int;
err = tmp_err;
}
}
return err;
}
static int find_best_16x16_intra
(
VP8_COMP *cpi,
YV12_BUFFER_CONFIG *buf,
int mb_y_offset,
MB_PREDICTION_MODE *pbest_mode
)
{
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
MB_PREDICTION_MODE best_mode = -1, mode;
int best_err = INT_MAX;
// calculate SATD for each intra prediction mode;
// we're intentionally not doing 4x4, we just want a rough estimate
for (mode = DC_PRED; mode <= TM_PRED; mode++)
{
unsigned int err;
xd->mode_info_context->mbmi.mode = mode;
RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mby)(xd);
VARIANCE_INVOKE(&cpi->rtcd.variance, satd16x16)
(xd->predictor, 16,
buf->y_buffer + mb_y_offset,
buf->y_stride, &err);
// find best
if (err < best_err)
{
best_err = err;
best_mode = mode;
}
}
if (pbest_mode)
*pbest_mode = best_mode;
return best_err;
}
static void update_mbgraph_mb_stats
(
VP8_COMP *cpi,
MBGRAPH_MB_STATS *stats,
YV12_BUFFER_CONFIG *buf,
int mb_y_offset,
YV12_BUFFER_CONFIG *golden_ref,
int_mv *prev_golden_ref_mv,
int gld_y_offset,
YV12_BUFFER_CONFIG *alt_ref,
int_mv *prev_alt_ref_mv,
int arf_y_offset
)
{
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
int intra_error;
// FIXME in practice we're completely ignoring chroma here
xd->dst.y_buffer = buf->y_buffer + mb_y_offset;
// do intra 16x16 prediction
intra_error = find_best_16x16_intra(cpi, buf, mb_y_offset, &stats->ref[INTRA_FRAME].m.mode);
if (intra_error <= 0)
intra_error = 1;
stats->ref[INTRA_FRAME].err = intra_error;
// Golden frame MV search, if it exists and is different than last frame
if (golden_ref)
{
int g_motion_error = do_16x16_motion_search(cpi, prev_golden_ref_mv,
&stats->ref[GOLDEN_FRAME].m.mv,
buf, mb_y_offset,
golden_ref, gld_y_offset);
stats->ref[GOLDEN_FRAME].err = g_motion_error;
}
else
{
stats->ref[GOLDEN_FRAME].err = INT_MAX;
stats->ref[GOLDEN_FRAME].m.mv.as_int = 0;
}
// Alt-ref frame MV search, if it exists and is different than last/golden frame
if (alt_ref)
{
int a_motion_error = do_16x16_motion_search(cpi, prev_alt_ref_mv,
&stats->ref[ALTREF_FRAME].m.mv,
buf, mb_y_offset,
alt_ref, arf_y_offset);
stats->ref[ALTREF_FRAME].err = a_motion_error;
}
else
{
stats->ref[ALTREF_FRAME].err = INT_MAX;
stats->ref[ALTREF_FRAME].m.mv.as_int = 0;
}
}
static void update_mbgraph_frame_stats
(
VP8_COMP *cpi,
MBGRAPH_FRAME_STATS *stats,
YV12_BUFFER_CONFIG *buf,
YV12_BUFFER_CONFIG *golden_ref,
YV12_BUFFER_CONFIG *alt_ref
)
{
MACROBLOCK * const x = &cpi->mb;
VP8_COMMON * const cm = &cpi->common;
MACROBLOCKD * const xd = &x->e_mbd;
int mb_col, mb_row, offset = 0;
int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
int_mv arf_top_mv, gld_top_mv;
// Set up limit values for motion vectors to prevent them extending outside the UMV borders
arf_top_mv.as_int = 0;
gld_top_mv.as_int = 0;
x->mv_row_min = -(VP8BORDERINPIXELS - 19);
x->mv_row_max = (cm->mb_rows - 1) * 16 + VP8BORDERINPIXELS - 19;
xd->up_available = 0;
xd->dst.y_stride = buf->y_stride;
xd->pre.y_stride = buf->y_stride;
xd->dst.uv_stride = buf->uv_stride;
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
int_mv arf_left_mv, gld_left_mv;
int mb_y_in_offset = mb_y_offset;
int arf_y_in_offset = arf_y_offset;
int gld_y_in_offset = gld_y_offset;
// Set up limit values for motion vectors to prevent them extending outside the UMV borders
arf_left_mv.as_int = arf_top_mv.as_int;
gld_left_mv.as_int = gld_top_mv.as_int;
x->mv_col_min = -(VP8BORDERINPIXELS - 19);
x->mv_col_max = (cm->mb_cols - 1) * 16 + VP8BORDERINPIXELS - 19;
xd->left_available = 0;
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col];
update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
golden_ref, &gld_left_mv, gld_y_in_offset,
alt_ref, &arf_left_mv, arf_y_in_offset);
arf_left_mv.as_int = mb_stats->ref[ALTREF_FRAME].m.mv.as_int;
gld_left_mv.as_int = mb_stats->ref[GOLDEN_FRAME].m.mv.as_int;
if (mb_col == 0)
{
arf_top_mv.as_int = arf_left_mv.as_int;
gld_top_mv.as_int = gld_left_mv.as_int;
}
xd->left_available = 1;
mb_y_in_offset += 16;
gld_y_in_offset += 16;
arf_y_in_offset += 16;
x->mv_col_min -= 16;
x->mv_col_max -= 16;
}
xd->up_available = 1;
mb_y_offset += buf->y_stride * 16;
gld_y_offset += golden_ref->y_stride * 16;
if (alt_ref)
arf_y_offset += alt_ref->y_stride * 16;
x->mv_row_min -= 16;
x->mv_row_max -= 16;
offset += cm->mb_cols;
}
}
//void separate_arf_mbs_byzz
void separate_arf_mbs
(
VP8_COMP *cpi
)
{
VP8_COMMON * const cm = &cpi->common;
int mb_col, mb_row, offset, i;
int ncnt[4];
int n_frames = cpi->mbgraph_n_frames;
int * arf_not_zz;
CHECK_MEM_ERROR(arf_not_zz,
vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1));
vpx_memset(arf_not_zz, 0, sizeof(arf_not_zz));
// We are not interested in results beyond the alt ref itself.
if ( n_frames > cpi->frames_till_gf_update_due )
n_frames = cpi->frames_till_gf_update_due;
// defer cost to reference frames
for (i = n_frames - 1; i >= 0; i--)
{
MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
offset += cm->mb_cols, mb_row++)
{
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
MBGRAPH_MB_STATS *mb_stats =
&frame_stats->mb_stats[offset + mb_col];
int altref_err = mb_stats->ref[ALTREF_FRAME].err;
int intra_err =
((mb_stats->ref[INTRA_FRAME ].err * 9) >> 3);
int golden_err =
250 + ((mb_stats->ref[GOLDEN_FRAME].err * 9) >> 3);
// Test for altref vs intra and gf and that its mv was 0,0.
if ( mb_stats->ref[ALTREF_FRAME].m.mv.as_int ||
( (altref_err > 500) &&
( (altref_err > (intra_err >> 2)) ||
(altref_err > golden_err) ) ) )
{
arf_not_zz[offset + mb_col]++;
}
}
}
}
vpx_memset(ncnt, 0, sizeof(ncnt));
for (offset = 0, mb_row = 0; mb_row < cm->mb_rows;
offset += cm->mb_cols, mb_row++)
{
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
{
// If any of the blocks in the sequence failed then the MB
// goes in segment 0
if ( arf_not_zz[offset + mb_col] )
{
ncnt[0]++;
cpi->segmentation_map[offset + mb_col] = 0;
}
else
{
ncnt[1]++;
cpi->segmentation_map[offset + mb_col] = 1;
}
}
}
// Only bother with segmentation if over 10% of the MBs in static segment
if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) )
{
cpi->mbgraph_use_arf_segmentation = ncnt[1];
vp8_enable_segmentation((VP8_PTR) cpi);
}
else
{
cpi->mbgraph_use_arf_segmentation = 0;
vp8_disable_segmentation((VP8_PTR) cpi);
}
// Free localy allocated storage
vpx_free(arf_not_zz);
}
void vp8_update_mbgraph_stats
(
VP8_COMP *cpi
)
{
VP8_COMMON * const cm = &cpi->common;
int i, n_frames = vp8_lookahead_depth(cpi->lookahead);
YV12_BUFFER_CONFIG *golden_ref = &cm->yv12_fb[cm->gld_fb_idx];
// we need to look ahead beyond where the ARF transitions into
// being a GF - so exit if we don't look ahead beyond that
if (n_frames <= cpi->frames_till_gf_update_due)
return;
if (n_frames > MAX_LAG_BUFFERS)
n_frames = MAX_LAG_BUFFERS;
cpi->mbgraph_n_frames = n_frames;
for (i = 0; i < n_frames; i++)
{
MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
vpx_memset(frame_stats->mb_stats, 0,
cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats));
}
// do motion search to find contribution of each reference to data
// later on in this GF group
// FIXME really, the GF/last MC search should be done forward, and
// the ARF MC search backwards, to get optimal results for MV caching
for (i = 0; i < n_frames; i++)
{
MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
struct lookahead_entry *q_cur =
vp8_lookahead_peek(cpi->lookahead, i);
assert(q_cur != NULL);
update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img,
golden_ref, cpi->Source);
}
vp8_clear_system_state(); //__asm emms;
separate_arf_mbs(cpi);
}

16
vp8/encoder/mbgraph.h Normal file
View File

@ -0,0 +1,16 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef __INC_MBGRAPH_H__
#define __INC_MBGRAPH_H__ 1
extern void vp8_update_mbgraph_stats(VP8_COMP *cpi);
#endif /* __INC_MBGRAPH_H__ */

View File

@ -33,6 +33,12 @@
#include "vp8/common/threading.h"
#include "vpx_ports/vpx_timer.h"
#include "temporal_filter.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#include "mbgraph.h"
#endif
#if ARCH_ARM
#include "vpx_ports/arm.h"
#endif
@ -404,58 +410,15 @@ static void dealloc_compressor_data(VP8_COMP *cpi)
#endif
}
static void enable_segmentation(VP8_PTR ptr)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
// Set the appropriate feature bit
cpi->mb.e_mbd.segmentation_enabled = 1;
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
static void disable_segmentation(VP8_PTR ptr)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
// Clear the appropriate feature bit
cpi->mb.e_mbd.segmentation_enabled = 0;
}
// Valid values for a segment are 0 to 3
// Segmentation map is arrange as [Rows][Columns]
static void set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_map)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
// Copy in the new segmentation map
vpx_memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols));
// Signal that the map should be updated.
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
static void set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned char abs_delta)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
vpx_memcpy(cpi->segment_feature_data, feature_data,
sizeof(cpi->segment_feature_data));
#if CONFIG_SEGFEATURES
// TBD ?? Set the feature mask
// vpx_memset(xd->segment_feature_mask, 0, sizeof(xd->segment_feature_mask));
#endif
}
static void segmentation_test_function(VP8_PTR ptr)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
unsigned char *seg_map;
signed char feature_data[SEG_LVL_MAX][MAX_MB_SEGMENTS];
MACROBLOCKD *xd = &cpi->mb.e_mbd;
CHECK_MEM_ERROR(seg_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
// Create a temporary map for segmentation data.
// MB loop to set local segmentation map
@ -479,10 +442,10 @@ static void segmentation_test_function(VP8_PTR ptr)
}*/
// Set the segmentation Map
set_segmentation_map(ptr, seg_map);
vp8_set_segmentation_map(ptr, seg_map);
// Activate segmentation.
enable_segmentation(ptr);
vp8_enable_segmentation(ptr);
// Set up the quant segment data
feature_data[SEG_LVL_ALT_Q][0] = 0;
@ -495,12 +458,17 @@ static void segmentation_test_function(VP8_PTR ptr)
feature_data[SEG_LVL_ALT_LF][2] = 0;
feature_data[SEG_LVL_ALT_LF][3] = 0;
#if CONFIG_SEGFEATURES
// Enable features as required
enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
#endif
// Initialise the feature data structure
// SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
set_segment_data(ptr, &feature_data[0][0], SEGMENT_DELTADATA);
vp8_set_segment_data(ptr, &feature_data[0][0], SEGMENT_DELTADATA);
// Delete sementation map
vpx_free(seg_map);
vpx_free(seg_map);
seg_map = 0;
@ -510,39 +478,60 @@ static void segmentation_test_function(VP8_PTR ptr)
static void init_seg_features(VP8_COMP *cpi)
{
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *mbd = &cpi->mb.e_mbd;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
// For now at least dont enable seg features alongside cyclic refresh.
if (cpi->cyclic_refresh_mode_enabled)
return;
// No updates for key frames
if ( cm->frame_type == KEY_FRAME )
if ( cpi->cyclic_refresh_mode_enabled ||
(cpi->pass != 2) )
{
cpi->mb.e_mbd.update_mb_segmentation_map = 0;
cpi->mb.e_mbd.update_mb_segmentation_data = 0;
vp8_disable_segmentation((VP8_PTR)cpi);
vpx_memset( cpi->segmentation_map, 0, (cm->mb_rows * cm->mb_cols));
return;
}
// Arf but not a key frame.
else if ( cm->refresh_alt_ref_frame )
// Disable and clear down for KF,ARF and low Q
if ( cm->frame_type == KEY_FRAME || cm->refresh_alt_ref_frame )
{
// Clear down the global segmentation map
vpx_memset( cpi->segmentation_map, 0, (cm->mb_rows * cm->mb_cols));
xd->update_mb_segmentation_map = 0;
xd->update_mb_segmentation_data = 0;
// Activate segmentation.
enable_segmentation((VP8_PTR)cpi);
// For now set GF, (0,0) MV in segment 1
cpi->segment_feature_data[1][SEG_LVL_REF_FRAME] = LAST_FRAME;
cpi->segment_feature_data[1][SEG_LVL_MODE] = ZEROMV;
mbd->segment_feature_data[1][SEG_LVL_REF_FRAME] = LAST_FRAME;
mbd->segment_feature_data[1][SEG_LVL_MODE] = ZEROMV;
// Enable target features is the segment feature mask
mbd->segment_feature_mask[1] |= (0x01 << SEG_LVL_REF_FRAME);
mbd->segment_feature_mask[1] |= (0x01 << SEG_LVL_MODE);
// Disable segmentation
vp8_disable_segmentation((VP8_PTR)cpi);
}
else
// First normal frame in a valid alt ref group and we dont have low Q
else if ( cpi->source_alt_ref_active &&
(cpi->common.frames_since_golden == 1) )
{
// Low Q test (only use segmentation at high q)
if ( ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) &&
(cpi->cq_target_quality > 56 ) ) ||
(cpi->ni_av_qi > 64) )
{
xd->segment_feature_data[1][SEG_LVL_REF_FRAME] = LAST_FRAME;
xd->segment_feature_data[1][SEG_LVL_MODE] = ZEROMV;
xd->segment_feature_data[1][SEG_LVL_EOB] = 10;
xd->segment_feature_data[1][SEG_LVL_ALT_Q] = 10;
xd->segment_feature_data[1][SEG_LVL_ALT_LF] = -5;
// Enable target features is the segment feature mask
enable_segfeature(xd, 1, SEG_LVL_REF_FRAME);
enable_segfeature(xd, 1, SEG_LVL_MODE);
enable_segfeature(xd, 1, SEG_LVL_EOB);
enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
// Where relevant assume segment data is delta data
xd->mb_segement_abs_delta = SEGMENT_DELTADATA;
// Scan frames from current to arf frame and define segmentation
vp8_update_mbgraph_stats(cpi);
}
}
// Normal frames if segmentation got enabled.
else if ( xd->segmentation_enabled )
{
// Special case where we are coding over the top of a previous
// alt ref frame
@ -550,21 +539,20 @@ static void init_seg_features(VP8_COMP *cpi)
{
if ( cpi->source_alt_ref_pending )
{
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
cpi->segment_feature_data[1][SEG_LVL_REF_FRAME] = ALTREF_FRAME;
mbd->segment_feature_data[1][SEG_LVL_REF_FRAME] = ALTREF_FRAME;
xd->update_mb_segmentation_data = 1;
xd->segment_feature_data[1][SEG_LVL_REF_FRAME] = ALTREF_FRAME;
}
else
{
vpx_memset( cpi->segmentation_map, 0,
(cm->mb_rows * cm->mb_cols));
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
xd->update_mb_segmentation_map = 1;
xd->update_mb_segmentation_data = 1;
}
}
else
{
cpi->mb.e_mbd.update_mb_segmentation_data = 0;
xd->update_mb_segmentation_data = 0;
}
}
}
@ -607,6 +595,7 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
int i;
int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe;
int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
// Create a temporary map for segmentation data.
CHECK_MEM_ERROR(seg_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1));
@ -664,10 +653,10 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
}
// Set the segmentation Map
set_segmentation_map((VP8_PTR)cpi, seg_map);
vp8_set_segmentation_map((VP8_PTR)cpi, seg_map);
// Activate segmentation.
enable_segmentation((VP8_PTR)cpi);
vp8_enable_segmentation((VP8_PTR)cpi);
// Set up the quant segment data
feature_data[SEG_LVL_ALT_Q][0] = 0;
@ -681,9 +670,15 @@ static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment)
feature_data[SEG_LVL_ALT_LF][2] = 0;
feature_data[SEG_LVL_ALT_LF][3] = 0;
#if CONFIG_SEGFEATURES
// Enable the loop and quant changes in the feature mask
enable_segfeature(xd, 1, SEG_LVL_ALT_Q);
enable_segfeature(xd, 1, SEG_LVL_ALT_LF);
#endif
// Initialise the feature data structure
// SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
vp8_set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
// Delete sementation map
vpx_free(seg_map);
@ -2067,6 +2062,17 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
}
#endif
#if CONFIG_SEGFEATURES
for (i = 0; i < ( sizeof(cpi->mbgraph_stats) /
sizeof(cpi->mbgraph_stats[0]) ); i++)
{
CHECK_MEM_ERROR(cpi->mbgraph_stats[i].mb_stats,
vpx_calloc(cpi->common.mb_rows * cpi->common.mb_cols *
sizeof(*cpi->mbgraph_stats[i].mb_stats),
1));
}
#endif
// Should we use the cyclic refresh method.
// Currently this is tied to error resilliant mode
cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode;
@ -2305,6 +2311,7 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
void vp8_remove_compressor(VP8_PTR *ptr)
{
VP8_COMP *cpi = (VP8_COMP *)(*ptr);
int i;
if (!cpi)
return;
@ -2520,6 +2527,13 @@ void vp8_remove_compressor(VP8_PTR *ptr)
vpx_free(cpi->tok);
vpx_free(cpi->cyclic_refresh_map);
#if CONFIG_SEGFEATURES
for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]); i++)
{
vpx_free(cpi->mbgraph_stats[i].mb_stats);
}
#endif
vp8_remove_common(&cpi->common);
vpx_free(cpi);
*ptr = 0;
@ -3550,22 +3564,6 @@ static void encode_frame_to_data_rate
cm->frame_type = KEY_FRAME;
}
// Test code for segmentation of gf/arf (0,0)
//segmentation_test_function((VP8_PTR) cpi);
#if CONFIG_SEGMENTATION
cpi->mb.e_mbd.segmentation_enabled = 1;
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
#else
#if CONFIG_SEGFEATURES
// Test code for new segment features
init_seg_features( cpi );
#else
// Set default state for segment update flags
cpi->mb.e_mbd.update_mb_segmentation_map = 0;
cpi->mb.e_mbd.update_mb_segmentation_data = 0;
#endif
#endif
// Set default state for segment based loop filter update flags
cpi->mb.e_mbd.mode_ref_lf_delta_update = 0;
@ -3597,9 +3595,9 @@ static void encode_frame_to_data_rate
// Test code for segmentation
//if ( (cm->frame_type == KEY_FRAME) || ((cm->current_video_frame % 2) == 0))
//if ( (cm->current_video_frame % 2) == 0 )
// enable_segmentation((VP8_PTR)cpi);
// vp8_enable_segmentation((VP8_PTR)cpi);
//else
// disable_segmentation((VP8_PTR)cpi);
// vp8_disable_segmentation((VP8_PTR)cpi);
#if 0
// Experimental code for lagged compress and one pass
@ -3623,6 +3621,22 @@ static void encode_frame_to_data_rate
update_rd_ref_frame_probs(cpi);
// Test code for segmentation of gf/arf (0,0)
//segmentation_test_function((VP8_PTR) cpi);
#if CONFIG_SEGMENTATION
cpi->mb.e_mbd.segmentation_enabled = 1;
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
#else
#if CONFIG_SEGFEATURES
// Test code for new segment features
init_seg_features( cpi );
#else
// Set default state for segment update flags
cpi->mb.e_mbd.update_mb_segmentation_map = 0;
cpi->mb.e_mbd.update_mb_segmentation_data = 0;
#endif
#endif
if (cpi->drop_frames_allowed)
{
// The reset to decimation 0 is only done here for one pass.
@ -5325,21 +5339,23 @@ int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned
{
VP8_COMP *cpi = (VP8_COMP *) comp;
signed char feature_data[SEG_LVL_MAX][MAX_MB_SEGMENTS];
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int i;
if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols)
return -1;
if (!map)
{
disable_segmentation((VP8_PTR)cpi);
vp8_disable_segmentation((VP8_PTR)cpi);
return 0;
}
// Set the segmentation Map
set_segmentation_map((VP8_PTR)cpi, map);
vp8_set_segmentation_map((VP8_PTR)cpi, map);
// Activate segmentation.
enable_segmentation((VP8_PTR)cpi);
vp8_enable_segmentation((VP8_PTR)cpi);
// Set up the quant segment data
feature_data[SEG_LVL_ALT_Q][0] = delta_q[0];
@ -5358,9 +5374,25 @@ int vp8_set_roimap(VP8_PTR comp, unsigned char *map, unsigned int rows, unsigned
cpi->segment_encode_breakout[2] = threshold[2];
cpi->segment_encode_breakout[3] = threshold[3];
#if CONFIG_SEGFEATURES
// Enable the loop and quant changes in the feature mask
for ( i = 0; i < 4; i++ )
{
if (delta_q[i])
enable_segfeature(xd, i, SEG_LVL_ALT_Q);
else
disable_segfeature(xd, i, SEG_LVL_ALT_Q);
if (delta_lf[i])
enable_segfeature(xd, i, SEG_LVL_ALT_LF);
else
disable_segfeature(xd, i, SEG_LVL_ALT_LF);
}
#endif
// Initialise the feature data structure
// SEGMENT_DELTADATA 0, SEGMENT_ABSDATA 1
set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
vp8_set_segment_data((VP8_PTR)cpi, &feature_data[0][0], SEGMENT_DELTADATA);
return 0;
}

View File

@ -133,6 +133,21 @@ typedef struct
} ONEPASS_FRAMESTATS;
typedef struct
{
struct {
int err;
union {
int_mv mv;
MB_PREDICTION_MODE mode;
} m;
} ref[MAX_REF_FRAMES];
} MBGRAPH_MB_STATS;
typedef struct
{
MBGRAPH_MB_STATS *mb_stats;
} MBGRAPH_FRAME_STATS;
typedef enum
{
@ -431,6 +446,13 @@ typedef struct VP8_COMP
ONEPASS_FRAMESTATS one_pass_frame_stats[MAX_LAG_BUFFERS];
int one_pass_frame_index;
#endif
MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS];
int mbgraph_n_frames; // number of frames filled in the above
int mbgraph_use_arf_segmentation; // set if part of an ARF is considered to be a
// poor predictor, and thus coeffs are skipped
// or coded at a higher Q using MB-segmentation
// this value is the number of MBs that are
// poor predictors (> 0 and < common.MBs)
int decimation_factor;
int decimation_count;
@ -483,12 +505,6 @@ typedef struct VP8_COMP
unsigned char *segmentation_map;
// Segment data (can be deltas or absolute values)
signed char segment_feature_data[MAX_MB_SEGMENTS][SEG_LVL_MAX];
#if CONFIG_SEGFEATURES
unsigned int segment_feature_mask[MAX_MB_SEGMENTS];
#endif
// segment threashold for encode breakout
int segment_encode_breakout[MAX_MB_SEGMENTS];

View File

@ -27,6 +27,10 @@
#include "rdopt.h"
#include "vpx_mem/vpx_mem.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
#else
@ -517,21 +521,19 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
#if CONFIG_SEGFEATURES
// Experimental use of Segment features.
if ( !cm->refresh_alt_ref_frame )
if ( xd->segmentation_enabled && !cm->refresh_alt_ref_frame )
{
unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
int feature_mask = xd->segment_feature_mask[segment_id];
if ( (feature_mask & (0x01 << SEG_LVL_REF_FRAME)) &&
( x->e_mbd.mode_info_context->mbmi.ref_frame !=
cpi->segment_feature_data[segment_id][SEG_LVL_REF_FRAME]))
if ( segfeature_active( xd, segment_id, SEG_LVL_REF_FRAME ) &&
( xd->mode_info_context->mbmi.ref_frame !=
xd->segment_feature_data[segment_id][SEG_LVL_REF_FRAME]))
{
continue;
}
if ( (feature_mask & (0x01 << SEG_LVL_MODE)) &&
if ( segfeature_active( xd, segment_id, SEG_LVL_MODE ) &&
( this_mode !=
cpi->segment_feature_data[segment_id][SEG_LVL_MODE]))
xd->segment_feature_data[segment_id][SEG_LVL_MODE]))
{
continue;
}

View File

@ -256,20 +256,6 @@ void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
// Stub function for now Alt LF not used
void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val)
{
MACROBLOCKD *mbd = &cpi->mb.e_mbd;
int i;
for ( i = 0; i < MAX_MB_SEGMENTS; i++ )
{
mbd->segment_feature_data[i][SEG_LVL_ALT_LF] =
cpi->segment_feature_data[i][SEG_LVL_ALT_LF];
#if CONFIG_SEGFEATURES
mbd->segment_feature_mask[i] &= ~(1 << SEG_LVL_ALT_LF);
mbd->segment_feature_mask[i] |=
cpi->segment_feature_mask[i] & (1 << SEG_LVL_ALT_LF);
#endif
}
}
void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)

View File

@ -16,6 +16,10 @@
#include "quantize.h"
#include "vp8/common/quant_common.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
#ifdef ENC_DEBUG
extern int enc_debug;
#endif
@ -132,7 +136,11 @@ void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d)
eob = -1;
#if CONFIG_SEGFEATURES
for (i = 0; i < b->eob_max_offset; i++)
#else
for (i = 0; i < 16; i++)
#endif
{
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
@ -1171,8 +1179,7 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
// Select the baseline MB Q index allowing for any segment level change.
#if CONFIG_SEGFEATURES
if ( xd->segmentation_enabled &&
( xd->segment_feature_mask[segment_id] & (0x01 << SEG_LVL_ALT_Q) ) )
if ( segfeature_active( xd, segment_id, SEG_LVL_ALT_Q ) )
#else
if ( xd->segmentation_enabled )
#endif
@ -1210,6 +1217,16 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
x->block[i].zbin_extra = (short)zbin_extra;
#if CONFIG_SEGFEATURES
// Segment max eob offset feature.
if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
{
x->block[i].eob_max_offset =
xd->segment_feature_data[segment_id][SEG_LVL_EOB];
}
else
x->block[i].eob_max_offset = 16;
#endif
}
// UV
@ -1228,6 +1245,16 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
x->block[i].zbin_extra = (short)zbin_extra;
#if CONFIG_SEGFEATURES
// Segment max eob offset feature.
if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
{
x->block[i].eob_max_offset =
xd->segment_feature_data[segment_id][SEG_LVL_EOB];
}
else
x->block[i].eob_max_offset = 16;
#endif
}
// Y2
@ -1245,6 +1272,18 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
x->block[24].zbin_extra = (short)zbin_extra;
#if CONFIG_SEGFEATURES
// TBD perhaps not use for Y2
// Segment max eob offset feature.
if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
{
x->block[24].eob_max_offset =
xd->segment_feature_data[segment_id][SEG_LVL_EOB];
}
else
x->block[24].eob_max_offset = 16;
#endif
/* save this macroblock QIndex for vp8_update_zbin_extra() */
x->q_index = QIndex;
}
@ -1324,23 +1363,8 @@ void vp8_set_quantizer(struct VP8_COMP *cpi, int Q)
update |= cm->y2dc_delta_q != new_delta_q;
cm->y2dc_delta_q = new_delta_q;
// Set Segment specific quatizers if enabled
for ( i = 0; i < MAX_MB_SEGMENTS; i++ )
{
mbd->segment_feature_data[i][SEG_LVL_ALT_Q] =
cpi->segment_feature_data[i][SEG_LVL_ALT_Q];
#if CONFIG_SEGFEATURES
mbd->segment_feature_mask[i] &= ~(1 << SEG_LVL_ALT_Q);
mbd->segment_feature_mask[i] |=
cpi->segment_feature_mask[i] & (1 << SEG_LVL_ALT_Q);
#endif
}
/* quantizer has to be reinitialized for any delta_q changes */
if(update)
vp8cx_init_quantizer(cpi);
}

View File

@ -36,6 +36,10 @@
#include "dct.h"
#include "vp8/common/systemdependent.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
#else
@ -2122,21 +2126,19 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
#if CONFIG_SEGFEATURES
// Experimental use of Segment features.
if ( !cm->refresh_alt_ref_frame )
if ( xd->segmentation_enabled && !cm->refresh_alt_ref_frame )
{
unsigned char segment_id = xd->mode_info_context->mbmi.segment_id;
int feature_mask = xd->segment_feature_mask[segment_id];
if ( (feature_mask & (0x01 << SEG_LVL_REF_FRAME)) &&
( x->e_mbd.mode_info_context->mbmi.ref_frame !=
cpi->segment_feature_data[segment_id][SEG_LVL_REF_FRAME]))
if ( segfeature_active( xd, segment_id, SEG_LVL_REF_FRAME ) &&
( xd->mode_info_context->mbmi.ref_frame !=
xd->segment_feature_data[segment_id][SEG_LVL_REF_FRAME]))
{
continue;
}
if ( (feature_mask & (0x01 << SEG_LVL_MODE)) &&
( this_mode !=
cpi->segment_feature_data[segment_id][SEG_LVL_MODE]))
if ( segfeature_active( xd, segment_id, SEG_LVL_MODE ) &&
( this_mode !=
xd->segment_feature_data[segment_id][SEG_LVL_MODE]))
{
continue;
}

53
vp8/encoder/satd_c.c Normal file
View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdlib.h>
#include "dct.h"
#include "vpx_ports/mem.h"
unsigned int vp8_satd16x16_c(const unsigned char *src_ptr,
int src_stride,
const unsigned char *ref_ptr,
int ref_stride,
unsigned int *psatd)
{
int r, c, i;
unsigned int satd = 0;
DECLARE_ALIGNED(16, short, diff_in[256]);
DECLARE_ALIGNED(16, short, diff_out[16]);
short *in;
for (r = 0; r < 16; r++)
{
for (c = 0; c < 16; c++)
{
diff_in[r * 16 + c] = src_ptr[c] - ref_ptr[c];
}
src_ptr += src_stride;
ref_ptr += ref_stride;
}
in = diff_in;
for (r = 0; r < 16; r += 4)
{
for (c = 0; c < 16; c+=4)
{
vp8_short_walsh4x4_c(in + c, diff_out, 32);
for(i = 0; i < 16; i++)
satd += abs(diff_out[i]);
}
in += 64;
}
if (psatd)
*psatd = satd;
return satd;
}

View File

@ -37,8 +37,10 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x)
// If using golden then set GF active flag if not already set.
// If using last frame 0,0 mode then leave flag as it is
// else if using non 0,0 motion or intra modes then clear flag if it is currently set
if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) || (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME))
// else if using non 0,0 motion or intra modes then clear
// flag if it is currently set
if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) ||
(this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME))
{
if (*(x->gf_active_ptr) == 0)
{
@ -46,7 +48,8 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x)
cpi->gf_active_count ++;
}
}
else if ((this_mb_mode_info->mbmi.mode != ZEROMV) && *(x->gf_active_ptr))
else if ((this_mb_mode_info->mbmi.mode != ZEROMV) &&
*(x->gf_active_ptr))
{
*(x->gf_active_ptr) = 0;
cpi->gf_active_count--;
@ -62,3 +65,54 @@ void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x)
}
}
}
void vp8_enable_segmentation(VP8_PTR ptr)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
// Set the appropriate feature bit
cpi->mb.e_mbd.segmentation_enabled = 1;
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
void vp8_disable_segmentation(VP8_PTR ptr)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
// Clear the appropriate feature bit
cpi->mb.e_mbd.segmentation_enabled = 0;
}
void vp8_set_segmentation_map(VP8_PTR ptr,
unsigned char *segmentation_map)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
// Copy in the new segmentation map
vpx_memcpy( cpi->segmentation_map, segmentation_map,
(cpi->common.mb_rows * cpi->common.mb_cols) );
// Signal that the map should be updated.
cpi->mb.e_mbd.update_mb_segmentation_map = 1;
cpi->mb.e_mbd.update_mb_segmentation_data = 1;
}
void vp8_set_segment_data(VP8_PTR ptr,
signed char *feature_data,
unsigned char abs_delta)
{
VP8_COMP *cpi = (VP8_COMP *)(ptr);
cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta;
vpx_memcpy(cpi->mb.e_mbd.segment_feature_data, feature_data,
sizeof(cpi->mb.e_mbd.segment_feature_data));
#if CONFIG_SEGFEATURES
// TBD ?? Set the feature mask
// vpx_memcpy(cpi->mb.e_mbd.segment_feature_mask, 0,
// sizeof(cpi->mb.e_mbd.segment_feature_mask));
#endif
}

View File

@ -13,4 +13,29 @@
#include "vp8/common/blockd.h"
#include "onyx_int.h"
#ifndef __INC_SEGMENTATION_H__
#define __INC_SEGMENTATION_H__ 1
extern void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x);
extern void vp8_enable_segmentation(VP8_PTR ptr);
extern void vp8_disable_segmentation(VP8_PTR ptr);
// Valid values for a segment are 0 to 3
// Segmentation map is arrange as [Rows][Columns]
extern void vp8_set_segmentation_map(VP8_PTR ptr, unsigned char *segmentation_map);
// The values given for each segment can be either deltas (from the default
// value chosen for the frame) or absolute values.
//
// Valid range for abs values is (0-127 for MB_LVL_ALT_Q) , (0-63 for
// SEGMENT_ALT_LF)
// Valid range for delta values are (+/-127 for MB_LVL_ALT_Q) , (+/-63 for
// SEGMENT_ALT_LF)
//
// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
// the absolute values given).
//
extern void vp8_set_segment_data(VP8_PTR ptr, signed char *feature_data, unsigned char abs_delta);
#endif /* __INC_SEGMENTATION_H__ */

View File

@ -17,6 +17,10 @@
#include "tokenize.h"
#include "vpx_mem/vpx_mem.h"
#if CONFIG_SEGFEATURES
#include "vp8/common/seg_common.h"
#endif
/* Global event counters used for accumulating statistics across several
compressions, then generating context.c = initial stats. */
@ -167,7 +171,7 @@ static void tokenize2nd_order_b_8x8
static void tokenize2nd_order_b
(
MACROBLOCKD *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
VP8_COMP *cpi
)
@ -181,10 +185,20 @@ static void tokenize2nd_order_b
ENTROPY_CONTEXT * l;
int band, rc, v, token;
b = x->block + 24;
#if CONFIG_SEGFEATURES
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
{
seg_eob = xd->segment_feature_data[segment_id][SEG_LVL_EOB];
}
#endif
b = xd->block + 24;
qcoeff_ptr = b->qcoeff;
a = (ENTROPY_CONTEXT *)x->above_context + 8;
l = (ENTROPY_CONTEXT *)x->left_context + 8;
a = (ENTROPY_CONTEXT *)xd->above_context + 8;
l = (ENTROPY_CONTEXT *)xd->left_context + 8;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
@ -207,7 +221,12 @@ static void tokenize2nd_order_b
pt = vp8_prev_token_class[token];
t++;
}
#if CONFIG_SEGFEATURES
if (c < seg_eob)
#else
if (c < 16)
#endif
{
band = vp8_coef_bands[c];
t->Token = DCT_EOB_TOKEN;
@ -288,7 +307,7 @@ static void tokenize1st_order_b_8x8
static void tokenize1st_order_b
(
MACROBLOCKD *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */
VP8_COMP *cpi
@ -306,15 +325,25 @@ static void tokenize1st_order_b
int band, rc, v;
int tmp1, tmp2;
b = x->block;
#if CONFIG_SEGFEATURES
int seg_eob = 16;
int segment_id = xd->mode_info_context->mbmi.segment_id;
if ( segfeature_active( xd, segment_id, SEG_LVL_EOB ) )
{
seg_eob = xd->segment_feature_data[segment_id][SEG_LVL_EOB];
}
#endif
b = xd->block;
/* Luma */
for (block = 0; block < 16; block++, b++)
{
tmp1 = vp8_block2above[block];
tmp2 = vp8_block2left[block];
qcoeff_ptr = b->qcoeff;
a = (ENTROPY_CONTEXT *)x->above_context + tmp1;
l = (ENTROPY_CONTEXT *)x->left_context + tmp2;
a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
@ -340,7 +369,12 @@ static void tokenize1st_order_b
pt = vp8_prev_token_class[token];
t++;
}
#if CONFIG_SEGFEATURES
if (c < seg_eob)
#else
if (c < 16)
#endif
{
band = vp8_coef_bands[c];
t->Token = DCT_EOB_TOKEN;
@ -364,8 +398,8 @@ static void tokenize1st_order_b
tmp1 = vp8_block2above[block];
tmp2 = vp8_block2left[block];
qcoeff_ptr = b->qcoeff;
a = (ENTROPY_CONTEXT *)x->above_context + tmp1;
l = (ENTROPY_CONTEXT *)x->left_context + tmp2;
a = (ENTROPY_CONTEXT *)xd->above_context + tmp1;
l = (ENTROPY_CONTEXT *)xd->left_context + tmp2;
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
@ -388,7 +422,11 @@ static void tokenize1st_order_b
pt = vp8_prev_token_class[token];
t++;
}
#if CONFIG_SEGFEATURES
if (c < seg_eob)
#else
if (c < 16)
#endif
{
band = vp8_coef_bands[c];
t->Token = DCT_EOB_TOKEN;

View File

@ -330,6 +330,11 @@ extern prototype_ssimpf(vp8_ssimpf_8x8)
#endif
extern prototype_ssimpf(vp8_ssimpf_16x16)
#ifndef vp8_variance_satd16x16
#define vp8_variance_satd16x16 vp8_satd16x16_c
#endif
extern prototype_variance(vp8_variance_satd16x16);
typedef prototype_sad(*vp8_sad_fn_t);
typedef prototype_sad_multi_same_address(*vp8_sad_multi_fn_t);
typedef prototype_sad_multi_same_address_1(*vp8_sad_multi1_fn_t);
@ -397,6 +402,7 @@ typedef struct
vp8_ssimpf_fn_t ssimpf_16x16;
#endif
vp8_variance_fn_t satd16x16;
} vp8_variance_rtcd_vtable_t;
typedef struct

View File

@ -217,7 +217,9 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.encodemb.submby = vp8_subtract_mby_sse2;
cpi->rtcd.encodemb.submbuv = vp8_subtract_mbuv_sse2;
#if !CONFIG_SEGFEATURES
cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse2;
#endif
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_sse2;
#if !(CONFIG_REALTIME_ONLY)
@ -279,7 +281,9 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi->rtcd.variance.sad4x4x8 = vp8_sad4x4x8_sse4;
cpi->rtcd.search.full_search = vp8_full_search_sadx8;
#if !CONFIG_SEGFEATURES
cpi->rtcd.quantize.quantb = vp8_regular_quantize_b_sse4;
#endif
}
#endif

View File

@ -50,6 +50,8 @@ VP8_COMMON_SRCS-yes += common/recon.h
VP8_COMMON_SRCS-yes += common/reconinter.h
VP8_COMMON_SRCS-yes += common/reconintra.h
VP8_COMMON_SRCS-yes += common/reconintra4x4.h
VP8_COMMON_SRCS-yes += common/seg_common.h
VP8_COMMON_SRCS-yes += common/seg_common.c
VP8_COMMON_SRCS-yes += common/setupintrarecon.h
VP8_COMMON_SRCS-yes += common/subpixel.h
VP8_COMMON_SRCS-yes += common/swapyv12buffer.h

View File

@ -76,6 +76,7 @@ VP8_CX_SRCS-yes += encoder/quantize.c
VP8_CX_SRCS-yes += encoder/ratectrl.c
VP8_CX_SRCS-yes += encoder/rdopt.c
VP8_CX_SRCS-yes += encoder/sad_c.c
VP8_CX_SRCS-yes += encoder/satd_c.c
VP8_CX_SRCS-yes += encoder/segmentation.c
VP8_CX_SRCS-yes += encoder/segmentation.h
VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/ssim.c
@ -86,6 +87,8 @@ VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h
VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c
VP8_CX_SRCS-yes += encoder/temporal_filter.c
VP8_CX_SRCS-yes += encoder/temporal_filter.h
VP8_CX_SRCS-yes += encoder/mbgraph.c
VP8_CX_SRCS-yes += encoder/mbgraph.h
ifeq ($(CONFIG_REALTIME_ONLY),yes)
VP8_CX_SRCS_REMOVE-yes += encoder/firstpass.c