2010-05-18 17:58:33 +02:00
|
|
|
/*
|
2010-09-09 14:16:39 +02:00
|
|
|
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
|
2010-05-18 17:58:33 +02:00
|
|
|
*
|
2010-06-18 18:39:21 +02:00
|
|
|
* Use of this source code is governed by a BSD-style license
|
2010-06-04 22:19:40 +02:00
|
|
|
* that can be found in the LICENSE file in the root of the source
|
|
|
|
* tree. An additional intellectual property rights grant can be found
|
2010-06-18 18:39:21 +02:00
|
|
|
* in the file PATENTS. All contributing project authors may
|
2010-06-04 22:19:40 +02:00
|
|
|
* be found in the AUTHORS file in the root of the source tree.
|
2010-05-18 17:58:33 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
2011-09-15 14:34:12 +02:00
|
|
|
#include "vpx_config.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
#include "encodemb.h"
|
|
|
|
#include "encodemv.h"
|
2011-02-10 20:41:38 +01:00
|
|
|
#include "vp8/common/common.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
#include "onyx_int.h"
|
2011-02-10 20:41:38 +01:00
|
|
|
#include "vp8/common/extend.h"
|
|
|
|
#include "vp8/common/entropymode.h"
|
|
|
|
#include "vp8/common/quant_common.h"
|
2010-08-13 20:50:51 +02:00
|
|
|
#include "segmentation.h"
|
2011-02-10 20:41:38 +01:00
|
|
|
#include "vp8/common/setupintrarecon.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
#include "encodeintra.h"
|
2011-02-10 20:41:38 +01:00
|
|
|
#include "vp8/common/reconinter.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
#include "rdopt.h"
|
|
|
|
#include "pickinter.h"
|
2011-02-10 20:41:38 +01:00
|
|
|
#include "vp8/common/findnearmv.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <limits.h>
|
2011-11-09 16:41:05 +01:00
|
|
|
#include "vp8/common/invtrans.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
#include "vpx_ports/vpx_timer.h"
|
2012-02-09 11:37:03 +01:00
|
|
|
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
|
|
|
|
#include "bitstream.h"
|
|
|
|
#endif
|
2012-05-04 18:46:57 +02:00
|
|
|
#include "encodeframe.h"
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
|
2011-12-10 01:56:18 +01:00
|
|
|
extern void vp8_calc_ref_frame_costs(int *ref_frame_cost,
|
|
|
|
int prob_intra,
|
|
|
|
int prob_last,
|
|
|
|
int prob_garf
|
|
|
|
);
|
|
|
|
extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi);
|
2010-05-18 17:58:33 +02:00
|
|
|
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
|
|
|
|
extern void vp8_auto_select_speed(VP8_COMP *cpi);
|
|
|
|
extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
|
|
|
|
MACROBLOCK *x,
|
|
|
|
MB_ROW_COMP *mbr_ei,
|
|
|
|
int mb_row,
|
|
|
|
int count);
|
2011-06-08 17:00:59 +02:00
|
|
|
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
#ifdef MODE_STATS
|
|
|
|
unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
|
|
|
unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
|
|
|
|
unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
|
|
|
unsigned int y_modes[5] = {0, 0, 0, 0, 0};
|
|
|
|
unsigned int uv_modes[4] = {0, 0, 0, 0};
|
|
|
|
unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2010-10-11 23:37:27 +02:00
|
|
|
/* activity_avg must be positive, or flat regions could get a zero weight
|
|
|
|
* (infinite lambda), which confounds analysis.
|
|
|
|
* This also avoids the need for divide by zero checks in
|
|
|
|
* vp8_activity_masking().
|
|
|
|
*/
|
|
|
|
#define VP8_ACTIVITY_AVG_MIN (64)
|
|
|
|
|
|
|
|
/* This is used as a reference when computing the source variance for the
|
|
|
|
* purposes of activity masking.
|
|
|
|
* Eventually this should be replaced by custom no-reference routines,
|
|
|
|
* which will be faster.
|
|
|
|
*/
|
|
|
|
static const unsigned char VP8_VAR_OFFS[16]=
|
|
|
|
{
|
|
|
|
128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
|
|
|
|
};
|
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Original activity measure from Tim T's code. */
|
2011-05-19 17:14:13 +02:00
|
|
|
static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
|
2010-10-11 23:37:27 +02:00
|
|
|
{
|
|
|
|
unsigned int act;
|
|
|
|
unsigned int sse;
|
|
|
|
/* TODO: This could also be done over smaller areas (8x8), but that would
|
|
|
|
* require extensive changes elsewhere, as lambda is assumed to be fixed
|
|
|
|
* over an entire MB in most of the code.
|
|
|
|
* Another option is to compute four 8x8 variances, and pick a single
|
|
|
|
* lambda using a non-linear combination (e.g., the smallest, or second
|
|
|
|
* smallest, etc.).
|
|
|
|
*/
|
2012-01-13 01:55:44 +01:00
|
|
|
act = vp8_variance16x16(x->src.y_buffer,
|
2011-06-07 01:42:58 +02:00
|
|
|
x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
|
|
|
|
act = act<<4;
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2010-10-11 23:37:27 +02:00
|
|
|
/* If the region is flat, lower the activity some more. */
|
|
|
|
if (act < 8<<12)
|
|
|
|
act = act < 5<<12 ? act : 5<<12;
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2010-10-11 23:37:27 +02:00
|
|
|
return act;
|
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Stub for alternative experimental activity measures. */
|
2011-06-08 17:00:59 +02:00
|
|
|
static unsigned int alt_activity_measure( VP8_COMP *cpi,
|
|
|
|
MACROBLOCK *x, int use_dc_pred )
|
2011-05-12 18:01:55 +02:00
|
|
|
{
|
2011-06-14 12:39:06 +02:00
|
|
|
return vp8_encode_intra(cpi,x, use_dc_pred);
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Measure the activity of the current macroblock
|
|
|
|
* What we measure here is TBD so abstracted to this function
|
|
|
|
*/
|
2011-06-08 17:00:59 +02:00
|
|
|
#define ALT_ACT_MEASURE 1
|
|
|
|
static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
|
|
|
|
int mb_row, int mb_col)
|
2011-05-12 18:01:55 +02:00
|
|
|
{
|
|
|
|
unsigned int mb_activity;
|
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
if ( ALT_ACT_MEASURE )
|
2011-05-12 18:01:55 +02:00
|
|
|
{
|
2011-06-08 17:00:59 +02:00
|
|
|
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Or use and alternative. */
|
2011-06-08 17:00:59 +02:00
|
|
|
mb_activity = alt_activity_measure( cpi, x, use_dc_pred );
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Original activity measure from Tim T's code. */
|
2011-06-08 17:00:59 +02:00
|
|
|
mb_activity = tt_activity_measure( cpi, x );
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
if ( mb_activity < VP8_ACTIVITY_AVG_MIN )
|
|
|
|
mb_activity = VP8_ACTIVITY_AVG_MIN;
|
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
return mb_activity;
|
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Calculate an "average" mb activity value for the frame */
|
2011-06-08 17:00:59 +02:00
|
|
|
#define ACT_MEDIAN 0
|
2011-07-26 03:44:59 +02:00
|
|
|
static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
|
2011-05-12 18:01:55 +02:00
|
|
|
{
|
2011-06-08 17:00:59 +02:00
|
|
|
#if ACT_MEDIAN
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Find median: Simple n^2 algorithm for experimentation */
|
2011-06-08 17:00:59 +02:00
|
|
|
{
|
|
|
|
unsigned int median;
|
|
|
|
unsigned int i,j;
|
|
|
|
unsigned int * sortlist;
|
|
|
|
unsigned int tmp;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Create a list to sort to */
|
2011-06-08 17:00:59 +02:00
|
|
|
CHECK_MEM_ERROR(sortlist,
|
|
|
|
vpx_calloc(sizeof(unsigned int),
|
|
|
|
cpi->common.MBs));
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Copy map to sort list */
|
2011-06-08 17:00:59 +02:00
|
|
|
vpx_memcpy( sortlist, cpi->mb_activity_map,
|
|
|
|
sizeof(unsigned int) * cpi->common.MBs );
|
|
|
|
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Ripple each value down to its correct position */
|
2011-06-08 17:00:59 +02:00
|
|
|
for ( i = 1; i < cpi->common.MBs; i ++ )
|
|
|
|
{
|
|
|
|
for ( j = i; j > 0; j -- )
|
|
|
|
{
|
|
|
|
if ( sortlist[j] < sortlist[j-1] )
|
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Swap values */
|
2011-06-08 17:00:59 +02:00
|
|
|
tmp = sortlist[j-1];
|
|
|
|
sortlist[j-1] = sortlist[j];
|
|
|
|
sortlist[j] = tmp;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Even number MBs so estimate median as mean of two either side. */
|
2011-06-08 17:00:59 +02:00
|
|
|
median = ( 1 + sortlist[cpi->common.MBs >> 1] +
|
|
|
|
sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1;
|
|
|
|
|
|
|
|
cpi->activity_avg = median;
|
|
|
|
|
|
|
|
vpx_free(sortlist);
|
|
|
|
}
|
|
|
|
#else
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Simple mean for now */
|
2011-05-12 18:01:55 +02:00
|
|
|
cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
|
2011-06-08 17:00:59 +02:00
|
|
|
#endif
|
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
|
|
|
|
cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
|
2011-06-08 17:00:59 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Experimental code: return fixed value normalized for several clips */
|
2011-06-08 17:00:59 +02:00
|
|
|
if ( ALT_ACT_MEASURE )
|
|
|
|
cpi->activity_avg = 100000;
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
#define USE_ACT_INDEX 0
|
2011-05-12 18:01:55 +02:00
|
|
|
#define OUTPUT_NORM_ACT_STATS 0
|
2011-06-08 17:00:59 +02:00
|
|
|
|
|
|
|
#if USE_ACT_INDEX
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Calculate and activity index for each mb */
|
2011-06-08 17:00:59 +02:00
|
|
|
static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
|
2011-05-12 18:01:55 +02:00
|
|
|
{
|
|
|
|
VP8_COMMON *const cm = & cpi->common;
|
|
|
|
int mb_row, mb_col;
|
|
|
|
|
2011-07-26 03:44:59 +02:00
|
|
|
int64_t act;
|
|
|
|
int64_t a;
|
|
|
|
int64_t b;
|
2011-05-12 18:01:55 +02:00
|
|
|
|
|
|
|
#if OUTPUT_NORM_ACT_STATS
|
|
|
|
FILE *f = fopen("norm_act.stt", "a");
|
2011-06-08 17:00:59 +02:00
|
|
|
fprintf(f, "\n%12d\n", cpi->activity_avg );
|
2011-05-12 18:01:55 +02:00
|
|
|
#endif
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Reset pointers to start of activity map */
|
2011-05-12 18:01:55 +02:00
|
|
|
x->mb_activity_ptr = cpi->mb_activity_map;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Calculate normalized mb activity number. */
|
2011-05-12 18:01:55 +02:00
|
|
|
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
|
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* for each macroblock col in image */
|
2011-05-12 18:01:55 +02:00
|
|
|
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
|
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Read activity from the map */
|
2011-05-12 18:01:55 +02:00
|
|
|
act = *(x->mb_activity_ptr);
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Calculate a normalized activity number */
|
2011-06-08 17:00:59 +02:00
|
|
|
a = act + 4*cpi->activity_avg;
|
|
|
|
b = 4*act + cpi->activity_avg;
|
2011-05-12 18:01:55 +02:00
|
|
|
|
|
|
|
if ( b >= a )
|
2011-06-08 17:00:59 +02:00
|
|
|
*(x->activity_ptr) = (int)((b + (a>>1))/a) - 1;
|
2011-05-12 18:01:55 +02:00
|
|
|
else
|
2011-06-08 17:00:59 +02:00
|
|
|
*(x->activity_ptr) = 1 - (int)((a + (b>>1))/b);
|
2011-05-12 18:01:55 +02:00
|
|
|
|
|
|
|
#if OUTPUT_NORM_ACT_STATS
|
2011-06-08 17:00:59 +02:00
|
|
|
fprintf(f, " %6d", *(x->mb_activity_ptr));
|
2011-05-12 18:01:55 +02:00
|
|
|
#endif
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Increment activity map pointers */
|
2011-05-12 18:01:55 +02:00
|
|
|
x->mb_activity_ptr++;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if OUTPUT_NORM_ACT_STATS
|
|
|
|
fprintf(f, "\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#if OUTPUT_NORM_ACT_STATS
|
|
|
|
fclose(f);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
2011-06-08 17:00:59 +02:00
|
|
|
#endif
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Loop through all MBs. Note activity of each, average activity and
|
|
|
|
* calculate a normalized activity for each
|
|
|
|
*/
|
2011-05-19 17:14:13 +02:00
|
|
|
static void build_activity_map( VP8_COMP *cpi )
|
2011-05-12 18:01:55 +02:00
|
|
|
{
|
|
|
|
MACROBLOCK *const x = & cpi->mb;
|
2011-06-08 17:00:59 +02:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2011-05-12 18:01:55 +02:00
|
|
|
VP8_COMMON *const cm = & cpi->common;
|
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
#if ALT_ACT_MEASURE
|
|
|
|
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
|
|
|
|
int recon_yoffset;
|
|
|
|
int recon_y_stride = new_yv12->y_stride;
|
|
|
|
#endif
|
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
int mb_row, mb_col;
|
|
|
|
unsigned int mb_activity;
|
2011-07-26 03:44:59 +02:00
|
|
|
int64_t activity_sum = 0;
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* for each macroblock row in image */
|
2011-05-12 18:01:55 +02:00
|
|
|
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
|
|
|
|
{
|
2011-06-08 17:00:59 +02:00
|
|
|
#if ALT_ACT_MEASURE
|
2012-05-21 23:30:56 +02:00
|
|
|
/* reset above block coeffs */
|
2011-06-08 17:00:59 +02:00
|
|
|
xd->up_available = (mb_row != 0);
|
|
|
|
recon_yoffset = (mb_row * recon_y_stride * 16);
|
|
|
|
#endif
|
2012-05-21 23:30:56 +02:00
|
|
|
/* for each macroblock col in image */
|
2011-05-12 18:01:55 +02:00
|
|
|
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
|
|
|
|
{
|
2011-06-08 17:00:59 +02:00
|
|
|
#if ALT_ACT_MEASURE
|
|
|
|
xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
|
|
|
|
xd->left_available = (mb_col != 0);
|
|
|
|
recon_yoffset += 16;
|
|
|
|
#endif
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Copy current mb to a buffer */
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
2011-06-23 19:54:02 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* measure activity */
|
2011-06-08 17:00:59 +02:00
|
|
|
mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Keep frame sum */
|
2011-05-12 18:01:55 +02:00
|
|
|
activity_sum += mb_activity;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Store MB level activity details. */
|
2011-05-12 18:01:55 +02:00
|
|
|
*x->mb_activity_ptr = mb_activity;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Increment activity map pointer */
|
2011-05-12 18:01:55 +02:00
|
|
|
x->mb_activity_ptr++;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* adjust to the next column of source macroblocks */
|
2011-05-12 18:01:55 +02:00
|
|
|
x->src.y_buffer += 16;
|
|
|
|
}
|
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* adjust to the next row of mbs */
|
2011-05-12 18:01:55 +02:00
|
|
|
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
|
2011-06-08 17:00:59 +02:00
|
|
|
|
|
|
|
#if ALT_ACT_MEASURE
|
2012-05-21 23:30:56 +02:00
|
|
|
/* extend the recon for intra prediction */
|
2011-06-08 17:00:59 +02:00
|
|
|
vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
|
|
|
|
xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
|
|
|
|
#endif
|
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Calculate an "average" MB activity */
|
2011-05-12 18:01:55 +02:00
|
|
|
calc_av_activity(cpi, activity_sum);
|
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
#if USE_ACT_INDEX
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Calculate an activity index number of each mb */
|
2011-06-08 17:00:59 +02:00
|
|
|
calc_activity_index( cpi, x );
|
|
|
|
#endif
|
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Macroblock activity masking */
|
2011-05-12 18:01:55 +02:00
|
|
|
void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
|
|
|
|
{
|
2011-06-08 17:00:59 +02:00
|
|
|
#if USE_ACT_INDEX
|
|
|
|
x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
|
2011-06-21 01:30:26 +02:00
|
|
|
x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
|
|
|
|
x->errorperbit += (x->errorperbit==0);
|
2011-06-08 17:00:59 +02:00
|
|
|
#else
|
2011-07-26 03:44:59 +02:00
|
|
|
int64_t a;
|
|
|
|
int64_t b;
|
|
|
|
int64_t act = *(x->mb_activity_ptr);
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Apply the masking to the RD multiplier. */
|
2011-06-08 17:00:59 +02:00
|
|
|
a = act + (2*cpi->activity_avg);
|
|
|
|
b = (2*act) + cpi->activity_avg;
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2011-07-26 03:44:59 +02:00
|
|
|
x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a);
|
2011-06-21 01:30:26 +02:00
|
|
|
x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
|
|
|
|
x->errorperbit += (x->errorperbit==0);
|
2011-06-08 17:00:59 +02:00
|
|
|
#endif
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Activity based Zbin adjustment */
|
2011-06-08 17:00:59 +02:00
|
|
|
adjust_act_zbin(cpi, x);
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
static
|
|
|
|
void encode_mb_row(VP8_COMP *cpi,
|
|
|
|
VP8_COMMON *cm,
|
|
|
|
int mb_row,
|
|
|
|
MACROBLOCK *x,
|
|
|
|
MACROBLOCKD *xd,
|
|
|
|
TOKENEXTRA **tp,
|
|
|
|
int *segment_counts,
|
|
|
|
int *totalrate)
|
|
|
|
{
|
|
|
|
int recon_yoffset, recon_uvoffset;
|
|
|
|
int mb_col;
|
2010-07-22 14:07:32 +02:00
|
|
|
int ref_fb_idx = cm->lst_fb_idx;
|
|
|
|
int dst_fb_idx = cm->new_fb_idx;
|
|
|
|
int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
|
|
|
|
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
|
2011-05-12 18:01:55 +02:00
|
|
|
int map_index = (mb_row * cpi->common.mb_cols);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
|
|
|
|
const int num_part = (1 << cm->multi_token_partition);
|
|
|
|
TOKENEXTRA * tp_start = cpi->tok;
|
|
|
|
vp8_writer *w;
|
|
|
|
#endif
|
|
|
|
|
2011-01-26 09:29:46 +01:00
|
|
|
#if CONFIG_MULTITHREAD
|
|
|
|
const int nsync = cpi->mt_sync_range;
|
2012-02-09 11:37:03 +01:00
|
|
|
const int rightmost_col = cm->mb_cols + nsync;
|
2011-01-26 09:29:46 +01:00
|
|
|
volatile const int *last_row_current_mb_col;
|
2012-02-09 11:37:03 +01:00
|
|
|
volatile int *current_mb_col = &cpi->mt_current_mb_col[mb_row];
|
2011-01-26 09:29:46 +01:00
|
|
|
|
|
|
|
if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
|
|
|
|
last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
|
|
|
|
else
|
|
|
|
last_row_current_mb_col = &rightmost_col;
|
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
|
|
|
|
if(num_part > 1)
|
|
|
|
w= &cpi->bc[1 + (mb_row % num_part)];
|
|
|
|
else
|
|
|
|
w = &cpi->bc[1];
|
|
|
|
#endif
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* reset above block coeffs */
|
2010-08-31 16:49:57 +02:00
|
|
|
xd->above_context = cm->above_context;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
xd->up_available = (mb_row != 0);
|
|
|
|
recon_yoffset = (mb_row * recon_y_stride * 16);
|
|
|
|
recon_uvoffset = (mb_row * recon_uv_stride * 8);
|
|
|
|
|
|
|
|
cpi->tplist[mb_row].start = *tp;
|
2012-05-21 23:30:56 +02:00
|
|
|
/* printf("Main mb_row = %d\n", mb_row); */
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Distance of Mb to the top & bottom edges, specified in 1/8th pel
|
|
|
|
* units as they are always compared to values that are in 1/8th pel
|
|
|
|
*/
|
2010-09-29 14:03:07 +02:00
|
|
|
xd->mb_to_top_edge = -((mb_row * 16) << 3);
|
|
|
|
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Set up limit values for vertical motion vector components
|
|
|
|
* to prevent them extending beyond the UMV borders
|
|
|
|
*/
|
2010-09-29 14:03:07 +02:00
|
|
|
x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
|
2010-11-11 18:41:07 +01:00
|
|
|
x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
|
2010-09-29 14:03:07 +02:00
|
|
|
+ (VP8BORDERINPIXELS - 16);
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Set the mb activity pointer to the start of the row. */
|
2011-05-12 18:01:55 +02:00
|
|
|
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* for each macroblock col in image */
|
2010-05-18 17:58:33 +02:00
|
|
|
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
|
|
|
|
{
|
2012-02-09 11:37:03 +01:00
|
|
|
|
|
|
|
#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
|
|
|
|
*tp = cpi->tok;
|
|
|
|
#endif
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Distance of Mb to the left & right edges, specified in
|
|
|
|
* 1/8th pel units as they are always compared to values
|
|
|
|
* that are in 1/8th pel units
|
|
|
|
*/
|
2010-05-18 17:58:33 +02:00
|
|
|
xd->mb_to_left_edge = -((mb_col * 16) << 3);
|
|
|
|
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Set up limit values for horizontal motion vector components
|
|
|
|
* to prevent them extending beyond the UMV borders
|
|
|
|
*/
|
2010-05-18 17:58:33 +02:00
|
|
|
x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
|
2010-11-11 18:41:07 +01:00
|
|
|
x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
|
2010-09-29 14:03:07 +02:00
|
|
|
+ (VP8BORDERINPIXELS - 16);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2010-07-22 14:07:32 +02:00
|
|
|
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
|
|
|
|
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
|
|
|
|
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
|
2010-05-18 17:58:33 +02:00
|
|
|
xd->left_available = (mb_col != 0);
|
|
|
|
|
2010-10-11 23:37:27 +02:00
|
|
|
x->rddiv = cpi->RDDIV;
|
|
|
|
x->rdmult = cpi->RDMULT;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Copy current mb to a buffer */
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
|
2011-06-23 19:54:02 +02:00
|
|
|
|
2011-01-26 09:29:46 +01:00
|
|
|
#if CONFIG_MULTITHREAD
|
2012-02-09 11:37:03 +01:00
|
|
|
if (cpi->b_multi_threaded != 0)
|
2011-01-26 09:29:46 +01:00
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
*current_mb_col = mb_col - 1; /* set previous MB done */
|
2012-02-09 11:37:03 +01:00
|
|
|
|
2011-01-26 09:29:46 +01:00
|
|
|
if ((mb_col & (nsync - 1)) == 0)
|
|
|
|
{
|
2012-02-09 11:37:03 +01:00
|
|
|
while (mb_col > (*last_row_current_mb_col - nsync))
|
2011-01-26 09:29:46 +01:00
|
|
|
{
|
|
|
|
x86_pause_hint();
|
|
|
|
thread_sleep(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-12-17 15:43:39 +01:00
|
|
|
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
|
2011-05-12 18:01:55 +02:00
|
|
|
vp8_activity_masking(cpi, x);
|
2010-10-11 23:37:27 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Is segmentation enabled */
|
|
|
|
/* MB level adjustment to quantizer */
|
2010-05-18 17:58:33 +02:00
|
|
|
if (xd->segmentation_enabled)
|
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Code to set segment id in xd->mbmi.segment_id for current MB
|
|
|
|
* (with range checking)
|
|
|
|
*/
|
2011-05-12 18:01:55 +02:00
|
|
|
if (cpi->segmentation_map[map_index+mb_col] <= 3)
|
|
|
|
xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col];
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
2010-08-12 22:25:43 +02:00
|
|
|
xd->mode_info_context->mbmi.segment_id = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-11-08 18:11:48 +01:00
|
|
|
vp8cx_mb_init_quantizer(cpi, x, 1);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Set to Segment 0 by default */
|
|
|
|
xd->mode_info_context->mbmi.segment_id = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
x->active_ptr = cpi->active_map + map_index + mb_col;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
if (cm->frame_type == KEY_FRAME)
|
|
|
|
{
|
2012-05-04 12:32:43 +02:00
|
|
|
*totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp);
|
2010-05-18 17:58:33 +02:00
|
|
|
#ifdef MODE_STATS
|
|
|
|
y_modes[xd->mbmi.mode] ++;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2011-10-25 21:14:16 +02:00
|
|
|
*totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
#ifdef MODE_STATS
|
|
|
|
inter_y_modes[xd->mbmi.mode] ++;
|
|
|
|
|
|
|
|
if (xd->mbmi.mode == SPLITMV)
|
|
|
|
{
|
|
|
|
int b;
|
|
|
|
|
|
|
|
for (b = 0; b < xd->mbmi.partition_count; b++)
|
|
|
|
{
|
2010-09-02 22:17:52 +02:00
|
|
|
inter_b_modes[x->partition->bmi[b].mode] ++;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Special case code for cyclic refresh
|
|
|
|
* If cyclic update enabled then copy xd->mbmi.segment_id; (which
|
|
|
|
* may have been updated based on mode during
|
|
|
|
* vp8cx_encode_inter_macroblock()) back into the global
|
|
|
|
* segmentation map
|
|
|
|
*/
|
2011-10-07 00:49:11 +02:00
|
|
|
if ((cpi->current_layer == 0) &&
|
|
|
|
(cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled))
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-05-12 18:01:55 +02:00
|
|
|
cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* If the block has been refreshed mark it as clean (the
|
|
|
|
* magnitude of the -ve influences how long it will be before
|
|
|
|
* we consider another refresh):
|
|
|
|
* Else if it was coded (last frame 0,0) and has not already
|
|
|
|
* been refreshed then mark it as a candidate for cleanup
|
|
|
|
* next time (marked 0) else mark it as dirty (1).
|
|
|
|
*/
|
2010-08-12 22:25:43 +02:00
|
|
|
if (xd->mode_info_context->mbmi.segment_id)
|
2011-05-12 18:01:55 +02:00
|
|
|
cpi->cyclic_refresh_map[map_index+mb_col] = -1;
|
2010-08-12 22:25:43 +02:00
|
|
|
else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-05-12 18:01:55 +02:00
|
|
|
if (cpi->cyclic_refresh_map[map_index+mb_col] == 1)
|
|
|
|
cpi->cyclic_refresh_map[map_index+mb_col] = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
2011-05-12 18:01:55 +02:00
|
|
|
cpi->cyclic_refresh_map[map_index+mb_col] = 1;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cpi->tplist[mb_row].stop = *tp;
|
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
|
|
|
|
/* pack tokens for this MB */
|
|
|
|
{
|
|
|
|
int tok_count = *tp - tp_start;
|
|
|
|
pack_tokens(w, tp_start, tok_count);
|
|
|
|
}
|
|
|
|
#endif
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Increment pointer into gf usage flags structure. */
|
2011-05-12 18:01:55 +02:00
|
|
|
x->gf_active_ptr++;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Increment the activity mask pointers. */
|
2011-05-12 18:01:55 +02:00
|
|
|
x->mb_activity_ptr++;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* adjust to the next column of macroblocks */
|
2010-05-18 17:58:33 +02:00
|
|
|
x->src.y_buffer += 16;
|
|
|
|
x->src.u_buffer += 8;
|
|
|
|
x->src.v_buffer += 8;
|
|
|
|
|
|
|
|
recon_yoffset += 16;
|
|
|
|
recon_uvoffset += 8;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Keep track of segment usage */
|
2010-08-12 22:25:43 +02:00
|
|
|
segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* skip to next mb */
|
2010-05-18 17:58:33 +02:00
|
|
|
xd->mode_info_context++;
|
2010-09-02 22:17:52 +02:00
|
|
|
x->partition_info++;
|
2010-08-31 16:49:57 +02:00
|
|
|
xd->above_context++;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* extend the recon for intra prediction */
|
2012-02-09 11:37:03 +01:00
|
|
|
vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx],
|
|
|
|
xd->dst.y_buffer + 16,
|
|
|
|
xd->dst.u_buffer + 8,
|
|
|
|
xd->dst.v_buffer + 8);
|
|
|
|
|
|
|
|
#if CONFIG_MULTITHREAD
|
|
|
|
if (cpi->b_multi_threaded != 0)
|
|
|
|
*current_mb_col = rightmost_col;
|
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* this is to account for the border */
|
2010-05-18 17:58:33 +02:00
|
|
|
xd->mode_info_context++;
|
2010-09-02 22:17:52 +02:00
|
|
|
x->partition_info++;
|
2011-01-26 09:29:46 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-05-04 18:46:57 +02:00
|
|
|
static void init_encode_frame_mb_context(VP8_COMP *cpi)
|
2011-06-08 17:00:59 +02:00
|
|
|
{
|
|
|
|
MACROBLOCK *const x = & cpi->mb;
|
|
|
|
VP8_COMMON *const cm = & cpi->common;
|
|
|
|
MACROBLOCKD *const xd = & x->e_mbd;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* GF active flags data structure */
|
2011-06-08 17:00:59 +02:00
|
|
|
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Activity map pointer */
|
2011-06-08 17:00:59 +02:00
|
|
|
x->mb_activity_ptr = cpi->mb_activity_map;
|
|
|
|
|
|
|
|
x->act_zbin_adj = 0;
|
|
|
|
|
|
|
|
x->partition_info = x->pi;
|
|
|
|
|
|
|
|
xd->mode_info_context = cm->mi;
|
|
|
|
xd->mode_info_stride = cm->mode_info_stride;
|
|
|
|
|
|
|
|
xd->frame_type = cm->frame_type;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* reset intra mode contexts */
|
2011-06-08 17:00:59 +02:00
|
|
|
if (cm->frame_type == KEY_FRAME)
|
|
|
|
vp8_init_mbmode_probs(cm);
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Copy data over into macro block data structures. */
|
2011-06-08 17:00:59 +02:00
|
|
|
x->src = * cpi->Source;
|
|
|
|
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
|
|
|
|
xd->dst = cm->yv12_fb[cm->new_fb_idx];
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* set up frame for intra coded blocks */
|
2011-06-08 17:00:59 +02:00
|
|
|
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
|
|
|
|
|
|
|
|
vp8_build_block_offsets(x);
|
|
|
|
|
|
|
|
xd->mode_info_context->mbmi.mode = DC_PRED;
|
|
|
|
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
|
|
|
|
|
|
|
|
xd->left_context = &cm->left_context;
|
|
|
|
|
|
|
|
vp8_zero(cpi->count_mb_ref_frame_usage)
|
|
|
|
vp8_zero(cpi->ymode_count)
|
|
|
|
vp8_zero(cpi->uv_mode_count)
|
|
|
|
|
|
|
|
x->mvc = cm->fc.mvc;
|
|
|
|
|
|
|
|
vpx_memset(cm->above_context, 0,
|
|
|
|
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
|
2011-06-13 15:50:54 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Special case treatment when GF and ARF are not sensible options
|
|
|
|
* for reference
|
|
|
|
*/
|
2012-05-24 01:08:37 +02:00
|
|
|
if (cpi->ref_frame_flags == VP8_LAST_FRAME)
|
2012-02-02 19:40:08 +01:00
|
|
|
vp8_calc_ref_frame_costs(x->ref_frame_cost,
|
2011-12-10 01:56:18 +01:00
|
|
|
cpi->prob_intra_coded,255,128);
|
2011-10-07 00:49:11 +02:00
|
|
|
else if ((cpi->oxcf.number_of_layers > 1) &&
|
2012-05-24 01:08:37 +02:00
|
|
|
(cpi->ref_frame_flags == VP8_GOLD_FRAME))
|
2012-02-02 19:40:08 +01:00
|
|
|
vp8_calc_ref_frame_costs(x->ref_frame_cost,
|
2011-12-10 01:56:18 +01:00
|
|
|
cpi->prob_intra_coded,1,255);
|
2011-10-07 00:49:11 +02:00
|
|
|
else if ((cpi->oxcf.number_of_layers > 1) &&
|
2012-05-24 01:08:37 +02:00
|
|
|
(cpi->ref_frame_flags == VP8_ALTR_FRAME))
|
2012-02-02 19:40:08 +01:00
|
|
|
vp8_calc_ref_frame_costs(x->ref_frame_cost,
|
2011-12-10 01:56:18 +01:00
|
|
|
cpi->prob_intra_coded,1,1);
|
2011-06-13 15:50:54 +02:00
|
|
|
else
|
2012-02-02 19:40:08 +01:00
|
|
|
vp8_calc_ref_frame_costs(x->ref_frame_cost,
|
2011-12-10 01:56:18 +01:00
|
|
|
cpi->prob_intra_coded,
|
|
|
|
cpi->prob_last_coded,
|
|
|
|
cpi->prob_gf_coded);
|
2011-06-13 15:50:54 +02:00
|
|
|
|
2011-08-24 20:42:26 +02:00
|
|
|
xd->fullpixel_mask = 0xffffffff;
|
|
|
|
if(cm->full_pixel)
|
|
|
|
xd->fullpixel_mask = 0xfffffff8;
|
2011-06-08 17:00:59 +02:00
|
|
|
}
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
void vp8_encode_frame(VP8_COMP *cpi)
|
|
|
|
{
|
|
|
|
int mb_row;
|
|
|
|
MACROBLOCK *const x = & cpi->mb;
|
|
|
|
VP8_COMMON *const cm = & cpi->common;
|
|
|
|
MACROBLOCKD *const xd = & x->e_mbd;
|
|
|
|
TOKENEXTRA *tp = cpi->tok;
|
|
|
|
int segment_counts[MAX_MB_SEGMENTS];
|
|
|
|
int totalrate;
|
2012-02-09 11:37:03 +01:00
|
|
|
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
|
2012-05-21 23:30:56 +02:00
|
|
|
BOOL_CODER * bc = &cpi->bc[1]; /* bc[0] is for control partition */
|
2012-02-09 11:37:03 +01:00
|
|
|
const int num_part = (1 << cm->multi_token_partition);
|
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
vpx_memset(segment_counts, 0, sizeof(segment_counts));
|
|
|
|
totalrate = 0;
|
|
|
|
|
|
|
|
if (cpi->compressor_speed == 2)
|
|
|
|
{
|
|
|
|
if (cpi->oxcf.cpu_used < 0)
|
|
|
|
cpi->Speed = -(cpi->oxcf.cpu_used);
|
|
|
|
else
|
|
|
|
vp8_auto_select_speed(cpi);
|
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Functions setup for all frame types so we can use MC in AltRef */
|
2012-04-23 19:23:21 +02:00
|
|
|
if(!cm->use_bilinear_mc_filter)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2012-01-13 01:55:44 +01:00
|
|
|
xd->subpixel_predict = vp8_sixtap_predict4x4;
|
|
|
|
xd->subpixel_predict8x4 = vp8_sixtap_predict8x4;
|
|
|
|
xd->subpixel_predict8x8 = vp8_sixtap_predict8x8;
|
|
|
|
xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
|
2010-09-28 16:23:41 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-01-13 01:55:44 +01:00
|
|
|
xd->subpixel_predict = vp8_bilinear_predict4x4;
|
|
|
|
xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
|
|
|
|
xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
|
|
|
|
xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
cpi->prediction_error = 0;
|
|
|
|
cpi->intra_error = 0;
|
|
|
|
cpi->skip_true_count = 0;
|
2012-02-09 11:37:03 +01:00
|
|
|
cpi->tok_count = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
#if 0
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Experimental code */
|
2010-06-18 18:39:21 +02:00
|
|
|
cpi->frame_distortion = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->last_mb_distortion = 0;
|
|
|
|
#endif
|
|
|
|
|
2011-06-09 18:46:31 +02:00
|
|
|
xd->mode_info_context = cm->mi;
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
vp8_zero(cpi->MVcount);
|
2012-02-09 11:37:03 +01:00
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
vp8_zero(cpi->coef_counts);
|
|
|
|
|
|
|
|
vp8cx_frame_init_quantizer(cpi);
|
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
vp8_initialize_rd_consts(cpi,
|
|
|
|
vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
|
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
|
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Initialize encode frame context. */
|
2011-06-08 17:00:59 +02:00
|
|
|
init_encode_frame_mb_context(cpi);
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Build a frame level activity map */
|
2011-06-08 17:00:59 +02:00
|
|
|
build_activity_map(cpi);
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* re-init encode frame context. */
|
2011-06-08 17:00:59 +02:00
|
|
|
init_encode_frame_mb_context(cpi);
|
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for(i = 0; i < num_part; i++)
|
|
|
|
{
|
|
|
|
vp8_start_encode(&bc[i], cpi->partition_d[i + 1],
|
|
|
|
cpi->partition_d_end[i + 1]);
|
|
|
|
bc[i].error = &cm->error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
struct vpx_usec_timer emr_timer;
|
|
|
|
vpx_usec_timer_start(&emr_timer);
|
|
|
|
|
|
|
|
#if CONFIG_MULTITHREAD
|
2011-02-05 06:29:25 +01:00
|
|
|
if (cpi->b_multi_threaded)
|
|
|
|
{
|
2010-10-11 23:37:27 +02:00
|
|
|
int i;
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
|
|
|
|
|
2011-01-26 09:29:46 +01:00
|
|
|
for (i = 0; i < cm->mb_rows; i++)
|
2011-03-18 09:44:08 +01:00
|
|
|
cpi->mt_current_mb_col[i] = -1;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-01-26 09:29:46 +01:00
|
|
|
for (i = 0; i < cpi->encoding_thread_count; i++)
|
|
|
|
{
|
|
|
|
sem_post(&cpi->h_event_start_encoding[i]);
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-01-26 09:29:46 +01:00
|
|
|
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
|
|
|
|
{
|
2010-05-18 17:58:33 +02:00
|
|
|
vp8_zero(cm->left_context)
|
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
|
|
|
|
tp = cpi->tok;
|
|
|
|
#else
|
2010-05-18 17:58:33 +02:00
|
|
|
tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
|
2012-02-09 11:37:03 +01:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* adjust to the next row of mbs */
|
2010-05-18 17:58:33 +02:00
|
|
|
x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
|
|
|
|
x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
|
|
|
|
x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
|
|
|
|
|
|
|
|
xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
|
2010-09-02 22:17:52 +02:00
|
|
|
x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
|
2011-05-06 18:00:44 +02:00
|
|
|
x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
if(mb_row == cm->mb_rows - 1)
|
|
|
|
{
|
|
|
|
sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2011-01-26 09:29:46 +01:00
|
|
|
sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to finish */
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
|
|
|
|
{
|
2012-08-17 19:05:35 +02:00
|
|
|
cpi->tok_count += (unsigned int)
|
|
|
|
(cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (xd->segmentation_enabled)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
if (xd->segmentation_enabled)
|
|
|
|
{
|
|
|
|
|
|
|
|
for (i = 0; i < cpi->encoding_thread_count; i++)
|
|
|
|
{
|
|
|
|
for (j = 0; j < 4; j++)
|
|
|
|
segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < cpi->encoding_thread_count; i++)
|
|
|
|
{
|
|
|
|
totalrate += cpi->mb_row_ei[i].totalrate;
|
|
|
|
}
|
|
|
|
|
2011-02-05 06:29:25 +01:00
|
|
|
}
|
|
|
|
else
|
2010-05-18 17:58:33 +02:00
|
|
|
#endif
|
2011-02-05 06:29:25 +01:00
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* for each macroblock row in image */
|
2011-02-05 06:29:25 +01:00
|
|
|
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
|
|
|
|
{
|
|
|
|
vp8_zero(cm->left_context)
|
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
|
|
|
|
tp = cpi->tok;
|
|
|
|
#endif
|
|
|
|
|
2011-02-05 06:29:25 +01:00
|
|
|
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* adjust to the next row of mbs */
|
2011-02-05 06:29:25 +01:00
|
|
|
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
|
|
|
|
x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
|
|
|
|
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
|
|
|
|
}
|
|
|
|
|
2012-08-17 19:05:35 +02:00
|
|
|
cpi->tok_count = (unsigned int)(tp - cpi->tok);
|
2012-02-09 11:37:03 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for(i = 0; i < num_part; i++)
|
|
|
|
{
|
|
|
|
vp8_stop_encode(&bc[i]);
|
|
|
|
cpi->partition_sz[i+1] = bc[i].pos;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2012-02-09 11:37:03 +01:00
|
|
|
#endif
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
vpx_usec_timer_mark(&emr_timer);
|
|
|
|
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-06-14 17:26:07 +02:00
|
|
|
// Work out the segment probabilities if segmentation is enabled
|
|
|
|
// and needs to be updated
|
|
|
|
if (xd->segmentation_enabled && xd->update_mb_segmentation_map)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
|
|
|
int tot_count;
|
|
|
|
int i;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Set to defaults */
|
2010-05-18 17:58:33 +02:00
|
|
|
vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
|
|
|
|
|
|
|
|
tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
|
|
|
|
|
|
|
|
if (tot_count)
|
|
|
|
{
|
|
|
|
xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
|
|
|
|
|
|
|
|
tot_count = segment_counts[0] + segment_counts[1];
|
|
|
|
|
|
|
|
if (tot_count > 0)
|
|
|
|
{
|
|
|
|
xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
tot_count = segment_counts[2] + segment_counts[3];
|
|
|
|
|
|
|
|
if (tot_count > 0)
|
|
|
|
xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Zero probabilities not allowed */
|
2010-05-18 17:58:33 +02:00
|
|
|
for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
|
|
|
|
{
|
|
|
|
if (xd->mb_segment_tree_probs[i] == 0)
|
|
|
|
xd->mb_segment_tree_probs[i] = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* projected_frame_size in units of BYTES */
|
|
|
|
cpi->projected_frame_size = totalrate >> 8;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Make a note of the percentage MBs coded Intra. */
|
2010-05-18 17:58:33 +02:00
|
|
|
if (cm->frame_type == KEY_FRAME)
|
|
|
|
{
|
|
|
|
cpi->this_frame_percent_intra = 100;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
int tot_modes;
|
|
|
|
|
|
|
|
tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
|
|
|
|
+ cpi->count_mb_ref_frame_usage[LAST_FRAME]
|
|
|
|
+ cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
|
|
|
|
+ cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
|
|
|
|
|
|
|
|
if (tot_modes)
|
|
|
|
cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-02-09 11:37:03 +01:00
|
|
|
#if ! CONFIG_REALTIME_ONLY
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Adjust the projected reference frame usage probability numbers to
|
|
|
|
* reflect what we have just seen. This may be useful when we make
|
|
|
|
* multiple iterations of the recode loop rather than continuing to use
|
|
|
|
* values from the previous frame.
|
|
|
|
*/
|
2011-10-07 00:49:11 +02:00
|
|
|
if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
|
|
|
|
(!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)))
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-12-10 01:56:18 +01:00
|
|
|
vp8_convert_rfct_to_prob(cpi);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
void vp8_setup_block_ptrs(MACROBLOCK *x)
|
|
|
|
{
|
|
|
|
int r, c;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (r = 0; r < 4; r++)
|
|
|
|
{
|
|
|
|
for (c = 0; c < 4; c++)
|
|
|
|
{
|
|
|
|
x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (r = 0; r < 2; r++)
|
|
|
|
{
|
|
|
|
for (c = 0; c < 2; c++)
|
|
|
|
{
|
|
|
|
x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for (r = 0; r < 2; r++)
|
|
|
|
{
|
|
|
|
for (c = 0; c < 2; c++)
|
|
|
|
{
|
|
|
|
x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
x->block[24].src_diff = x->src_diff + 384;
|
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < 25; i++)
|
|
|
|
{
|
|
|
|
x->block[i].coeff = x->coeff + i * 16;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void vp8_build_block_offsets(MACROBLOCK *x)
|
|
|
|
{
|
|
|
|
int block = 0;
|
|
|
|
int br, bc;
|
|
|
|
|
|
|
|
vp8_build_block_doffsets(&x->e_mbd);
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* y blocks */
|
2011-06-23 19:54:02 +02:00
|
|
|
x->thismb_ptr = &x->thismb[0];
|
2010-05-18 17:58:33 +02:00
|
|
|
for (br = 0; br < 4; br++)
|
|
|
|
{
|
|
|
|
for (bc = 0; bc < 4; bc++)
|
|
|
|
{
|
|
|
|
BLOCK *this_block = &x->block[block];
|
2011-06-23 19:54:02 +02:00
|
|
|
this_block->base_src = &x->thismb_ptr;
|
|
|
|
this_block->src_stride = 16;
|
|
|
|
this_block->src = 4 * br * 16 + 4 * bc;
|
2010-05-18 17:58:33 +02:00
|
|
|
++block;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* u blocks */
|
2010-05-18 17:58:33 +02:00
|
|
|
for (br = 0; br < 2; br++)
|
|
|
|
{
|
|
|
|
for (bc = 0; bc < 2; bc++)
|
|
|
|
{
|
|
|
|
BLOCK *this_block = &x->block[block];
|
|
|
|
this_block->base_src = &x->src.u_buffer;
|
|
|
|
this_block->src_stride = x->src.uv_stride;
|
|
|
|
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
|
|
|
|
++block;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* v blocks */
|
2010-05-18 17:58:33 +02:00
|
|
|
for (br = 0; br < 2; br++)
|
|
|
|
{
|
|
|
|
for (bc = 0; bc < 2; bc++)
|
|
|
|
{
|
|
|
|
BLOCK *this_block = &x->block[block];
|
|
|
|
this_block->base_src = &x->src.v_buffer;
|
|
|
|
this_block->src_stride = x->src.uv_stride;
|
|
|
|
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
|
|
|
|
++block;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
|
|
|
|
{
|
|
|
|
const MACROBLOCKD *xd = & x->e_mbd;
|
2010-08-12 22:25:43 +02:00
|
|
|
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
|
|
|
|
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
#ifdef MODE_STATS
|
|
|
|
const int is_key = cpi->common.frame_type == KEY_FRAME;
|
|
|
|
|
|
|
|
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
|
|
|
|
|
|
|
|
if (m == B_PRED)
|
|
|
|
{
|
|
|
|
unsigned int *const bct = is_key ? b_modes : inter_b_modes;
|
|
|
|
|
|
|
|
int b = 0;
|
|
|
|
|
|
|
|
do
|
|
|
|
{
|
|
|
|
++ bct[xd->block[b].bmi.mode];
|
|
|
|
}
|
|
|
|
while (++b < 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
++cpi->ymode_count[m];
|
|
|
|
++cpi->uv_mode_count[uvm];
|
|
|
|
|
|
|
|
}
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Experimental stub function to create a per MB zbin adjustment based on
|
|
|
|
* some previously calculated measure of MB activity.
|
|
|
|
*/
|
2011-06-08 17:00:59 +02:00
|
|
|
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
|
2011-05-12 18:01:55 +02:00
|
|
|
{
|
2011-06-08 17:00:59 +02:00
|
|
|
#if USE_ACT_INDEX
|
|
|
|
x->act_zbin_adj = *(x->mb_activity_ptr);
|
|
|
|
#else
|
2011-07-26 03:44:59 +02:00
|
|
|
int64_t a;
|
|
|
|
int64_t b;
|
|
|
|
int64_t act = *(x->mb_activity_ptr);
|
2011-05-12 18:01:55 +02:00
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Apply the masking to the RD multiplier. */
|
2011-05-12 18:01:55 +02:00
|
|
|
a = act + 4*cpi->activity_avg;
|
|
|
|
b = 4*act + cpi->activity_avg;
|
|
|
|
|
2011-06-08 17:00:59 +02:00
|
|
|
if ( act > cpi->activity_avg )
|
2011-07-26 03:44:59 +02:00
|
|
|
x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
|
2011-06-08 17:00:59 +02:00
|
|
|
else
|
2011-07-26 03:44:59 +02:00
|
|
|
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
|
2011-06-08 17:00:59 +02:00
|
|
|
#endif
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
|
2012-05-04 12:32:43 +02:00
|
|
|
int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2011-12-15 20:23:36 +01:00
|
|
|
MACROBLOCKD *xd = &x->e_mbd;
|
2011-06-08 18:05:05 +02:00
|
|
|
int rate;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-01-28 15:47:36 +01:00
|
|
|
if (cpi->sf.RD && cpi->compressor_speed != 2)
|
2011-06-08 18:05:05 +02:00
|
|
|
vp8_rd_pick_intra_mode(cpi, x, &rate);
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
2011-06-08 17:24:52 +02:00
|
|
|
vp8_pick_intra_mode(cpi, x, &rate);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-06-08 18:09:02 +02:00
|
|
|
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
|
|
|
|
{
|
|
|
|
adjust_act_zbin( cpi, x );
|
|
|
|
vp8_update_zbin_extra(cpi, x);
|
|
|
|
}
|
|
|
|
|
2011-06-08 17:24:52 +02:00
|
|
|
if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_encode_intra4x4mby(x);
|
2011-02-01 16:05:35 +01:00
|
|
|
else
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_encode_intra16x16mby(x);
|
2011-02-01 16:05:35 +01:00
|
|
|
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_encode_intra16x16mbuv(x);
|
2011-12-15 20:23:36 +01:00
|
|
|
|
2011-02-01 16:05:35 +01:00
|
|
|
sum_intra_stats(cpi, x);
|
2012-02-17 10:50:33 +01:00
|
|
|
|
2011-02-01 16:05:35 +01:00
|
|
|
vp8_tokenize_mb(cpi, &x->e_mbd, t);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-12-15 20:23:36 +01:00
|
|
|
if (xd->mode_info_context->mbmi.mode != B_PRED)
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_inverse_transform_mby(xd);
|
2011-11-09 16:41:05 +01:00
|
|
|
|
New RTCD implementation
This is a proof of concept RTCD implementation to replace the current
system of nested includes, prototypes, INVOKE macros, etc. Currently
only the decoder specific functions are implemented in the new system.
Additional functions will be added in subsequent commits.
Overview:
RTCD "functions" are implemented as either a global function pointer
or a macro (when only one eligible specialization available).
Functions which have RTCD specializations are listed using a simple
DSL identifying the function's base name, its prototype, and the
architecture extensions that specializations are available for.
Advantages over the old system:
- No INVOKE macros. A call to an RTCD function looks like an ordinary
function call.
- No need to pass vtables around.
- If there is only one eligible function to call, the function is
called directly, rather than indirecting through a function pointer.
- Supports the notion of "required" extensions, so in combination with
the above, on x86_64 if the best function available is sse2 or lower
it will be called directly, since all x86_64 platforms implement
sse2.
- Elides all references to functions which will never be called, which
could reduce binary size. For example if sse2 is required and there
are both mmx and sse2 implementations of a certain function, the
code will have no link time references to the mmx code.
- Significantly easier to add a new function, just one file to edit.
Disadvantages:
- Requires global writable data (though this is not a new requirement)
- 1 new generated source file.
Change-Id: Iae6edab65315f79c168485c96872641c5aa09d55
2011-08-19 20:06:00 +02:00
|
|
|
vp8_dequant_idct_add_uv_block
|
2012-01-04 17:56:50 +01:00
|
|
|
(xd->qcoeff+16*16, xd->dequant_uv,
|
2011-12-15 20:23:36 +01:00
|
|
|
xd->dst.u_buffer, xd->dst.v_buffer,
|
|
|
|
xd->dst.uv_stride, xd->eobs+16);
|
2010-05-18 17:58:33 +02:00
|
|
|
return rate;
|
|
|
|
}
|
|
|
|
#ifdef SPEEDSTATS
|
|
|
|
extern int cnt_pm;
|
|
|
|
#endif
|
|
|
|
|
2010-08-31 16:49:57 +02:00
|
|
|
extern void vp8_fix_contexts(MACROBLOCKD *x);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
|
|
|
int vp8cx_encode_inter_macroblock
|
|
|
|
(
|
|
|
|
VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
|
2011-10-25 21:14:16 +02:00
|
|
|
int recon_yoffset, int recon_uvoffset,
|
|
|
|
int mb_row, int mb_col
|
2010-05-18 17:58:33 +02:00
|
|
|
)
|
|
|
|
{
|
|
|
|
MACROBLOCKD *const xd = &x->e_mbd;
|
|
|
|
int intra_error = 0;
|
|
|
|
int rate;
|
|
|
|
int distortion;
|
|
|
|
|
|
|
|
x->skip = 0;
|
|
|
|
|
|
|
|
if (xd->segmentation_enabled)
|
2010-08-12 22:25:43 +02:00
|
|
|
x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
|
|
|
x->encode_breakout = cpi->oxcf.encode_breakout;
|
|
|
|
|
2012-03-06 10:48:18 +01:00
|
|
|
#if CONFIG_TEMPORAL_DENOISING
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Reset the best sse mode/mv for each macroblock. */
|
2012-05-24 16:44:03 +02:00
|
|
|
x->best_reference_frame = INTRA_FRAME;
|
|
|
|
x->best_zeromv_reference_frame = INTRA_FRAME;
|
|
|
|
x->best_sse_inter_mode = 0;
|
|
|
|
x->best_sse_mv.as_int = 0;
|
|
|
|
x->need_to_clamp_best_mvs = 0;
|
2012-03-06 10:48:18 +01:00
|
|
|
#endif
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
if (cpi->sf.RD)
|
|
|
|
{
|
2010-12-29 20:30:57 +01:00
|
|
|
int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
|
|
|
|
|
2010-12-28 20:51:46 +01:00
|
|
|
/* Are we using the fast quantizer for the mode selection? */
|
|
|
|
if(cpi->sf.use_fastquant_for_pick)
|
2010-12-29 20:30:57 +01:00
|
|
|
{
|
2012-01-13 01:55:44 +01:00
|
|
|
cpi->mb.quantize_b = vp8_fast_quantize_b;
|
|
|
|
cpi->mb.quantize_b_pair = vp8_fast_quantize_b_pair;
|
2010-12-28 20:51:46 +01:00
|
|
|
|
2010-12-29 20:30:57 +01:00
|
|
|
/* the fast quantizer does not use zbin_extra, so
|
|
|
|
* do not recalculate */
|
|
|
|
cpi->zbin_mode_boost_enabled = 0;
|
|
|
|
}
|
2011-05-12 18:01:55 +02:00
|
|
|
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
|
|
|
|
&distortion, &intra_error);
|
2010-12-28 20:51:46 +01:00
|
|
|
|
|
|
|
/* switch back to the regular quantizer for the encode */
|
|
|
|
if (cpi->sf.improved_quant)
|
|
|
|
{
|
2012-01-13 01:55:44 +01:00
|
|
|
cpi->mb.quantize_b = vp8_regular_quantize_b;
|
|
|
|
cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
|
2010-12-28 20:51:46 +01:00
|
|
|
}
|
|
|
|
|
2010-12-29 20:30:57 +01:00
|
|
|
/* restore cpi->zbin_mode_boost_enabled */
|
|
|
|
cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
2011-10-25 21:14:16 +02:00
|
|
|
{
|
2011-05-12 18:01:55 +02:00
|
|
|
vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
|
2011-12-16 22:50:29 +01:00
|
|
|
&distortion, &intra_error, mb_row, mb_col);
|
2011-10-25 21:14:16 +02:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-04-08 15:21:36 +02:00
|
|
|
cpi->prediction_error += distortion;
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->intra_error += intra_error;
|
|
|
|
|
2011-05-12 18:01:55 +02:00
|
|
|
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
|
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Adjust the zbin based on this MB rate. */
|
2011-06-08 17:00:59 +02:00
|
|
|
adjust_act_zbin( cpi, x );
|
2011-05-12 18:01:55 +02:00
|
|
|
}
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
#if 0
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Experimental RD code */
|
2010-05-18 17:58:33 +02:00
|
|
|
cpi->frame_distortion += distortion;
|
|
|
|
cpi->last_mb_distortion = distortion;
|
|
|
|
#endif
|
|
|
|
|
2012-05-21 23:30:56 +02:00
|
|
|
/* MB level adjutment to quantizer setup */
|
2010-12-29 20:30:57 +01:00
|
|
|
if (xd->segmentation_enabled)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* If cyclic update enabled */
|
2011-10-07 00:49:11 +02:00
|
|
|
if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Clear segment_id back to 0 if not coded (last frame 0,0) */
|
2010-08-12 22:25:43 +02:00
|
|
|
if ((xd->mode_info_context->mbmi.segment_id == 1) &&
|
|
|
|
((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2010-08-12 22:25:43 +02:00
|
|
|
xd->mode_info_context->mbmi.segment_id = 0;
|
2010-12-29 20:30:57 +01:00
|
|
|
|
|
|
|
/* segment_id changed, so update */
|
2011-11-08 18:11:48 +01:00
|
|
|
vp8cx_mb_init_quantizer(cpi, x, 1);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
}
|
2010-12-29 20:30:57 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2010-12-29 20:30:57 +01:00
|
|
|
{
|
2012-05-21 23:30:56 +02:00
|
|
|
/* Experimental code. Special case for gf and arf zeromv modes.
|
|
|
|
* Increase zbin size to supress noise
|
|
|
|
*/
|
2011-06-08 17:00:59 +02:00
|
|
|
cpi->zbin_mode_boost = 0;
|
2010-05-18 17:58:33 +02:00
|
|
|
if (cpi->zbin_mode_boost_enabled)
|
|
|
|
{
|
2011-06-08 17:00:59 +02:00
|
|
|
if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
|
2010-11-08 16:28:54 +01:00
|
|
|
{
|
|
|
|
if (xd->mode_info_context->mbmi.mode == ZEROMV)
|
|
|
|
{
|
|
|
|
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
|
|
|
|
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
|
|
|
|
else
|
|
|
|
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
|
|
|
|
}
|
|
|
|
else if (xd->mode_info_context->mbmi.mode == SPLITMV)
|
|
|
|
cpi->zbin_mode_boost = 0;
|
|
|
|
else
|
|
|
|
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
|
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
2011-08-30 20:23:34 +02:00
|
|
|
|
|
|
|
/* The fast quantizer doesn't use zbin_extra, only do so with
|
|
|
|
* the regular quantizer. */
|
|
|
|
if (cpi->sf.improved_quant)
|
|
|
|
vp8_update_zbin_extra(cpi, x);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
2010-08-12 22:25:43 +02:00
|
|
|
cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2010-08-12 22:25:43 +02:00
|
|
|
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_encode_intra16x16mbuv(x);
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2010-08-12 22:25:43 +02:00
|
|
|
if (xd->mode_info_context->mbmi.mode == B_PRED)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_encode_intra4x4mby(x);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_encode_intra16x16mby(x);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
sum_intra_stats(cpi, x);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-07-22 14:07:32 +02:00
|
|
|
int ref_fb_idx;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2010-08-12 22:25:43 +02:00
|
|
|
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
|
2010-07-22 14:07:32 +02:00
|
|
|
ref_fb_idx = cpi->common.lst_fb_idx;
|
2010-08-12 22:25:43 +02:00
|
|
|
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
|
2010-07-22 14:07:32 +02:00
|
|
|
ref_fb_idx = cpi->common.gld_fb_idx;
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
2010-07-22 14:07:32 +02:00
|
|
|
ref_fb_idx = cpi->common.alt_fb_idx;
|
|
|
|
|
|
|
|
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
|
|
|
|
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
|
|
|
|
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
|
2010-05-18 17:58:33 +02:00
|
|
|
|
2011-04-20 21:45:12 +02:00
|
|
|
if (!x->skip)
|
2010-05-18 17:58:33 +02:00
|
|
|
{
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_encode_inter16x16(x);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
2011-04-28 16:53:59 +02:00
|
|
|
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
|
|
|
|
xd->dst.u_buffer, xd->dst.v_buffer,
|
|
|
|
xd->dst.y_stride, xd->dst.uv_stride);
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!x->skip)
|
2011-11-09 16:41:05 +01:00
|
|
|
{
|
2010-05-18 17:58:33 +02:00
|
|
|
vp8_tokenize_mb(cpi, xd, t);
|
2011-12-15 20:23:36 +01:00
|
|
|
|
|
|
|
if (xd->mode_info_context->mbmi.mode != B_PRED)
|
2012-01-13 01:55:44 +01:00
|
|
|
vp8_inverse_transform_mby(xd);
|
2011-12-15 20:23:36 +01:00
|
|
|
|
New RTCD implementation
This is a proof of concept RTCD implementation to replace the current
system of nested includes, prototypes, INVOKE macros, etc. Currently
only the decoder specific functions are implemented in the new system.
Additional functions will be added in subsequent commits.
Overview:
RTCD "functions" are implemented as either a global function pointer
or a macro (when only one eligible specialization available).
Functions which have RTCD specializations are listed using a simple
DSL identifying the function's base name, its prototype, and the
architecture extensions that specializations are available for.
Advantages over the old system:
- No INVOKE macros. A call to an RTCD function looks like an ordinary
function call.
- No need to pass vtables around.
- If there is only one eligible function to call, the function is
called directly, rather than indirecting through a function pointer.
- Supports the notion of "required" extensions, so in combination with
the above, on x86_64 if the best function available is sse2 or lower
it will be called directly, since all x86_64 platforms implement
sse2.
- Elides all references to functions which will never be called, which
could reduce binary size. For example if sse2 is required and there
are both mmx and sse2 implementations of a certain function, the
code will have no link time references to the mmx code.
- Significantly easier to add a new function, just one file to edit.
Disadvantages:
- Requires global writable data (though this is not a new requirement)
- 1 new generated source file.
Change-Id: Iae6edab65315f79c168485c96872641c5aa09d55
2011-08-19 20:06:00 +02:00
|
|
|
vp8_dequant_idct_add_uv_block
|
2012-01-04 17:56:50 +01:00
|
|
|
(xd->qcoeff+16*16, xd->dequant_uv,
|
2011-12-15 20:23:36 +01:00
|
|
|
xd->dst.u_buffer, xd->dst.v_buffer,
|
|
|
|
xd->dst.uv_stride, xd->eobs+16);
|
2011-11-09 16:41:05 +01:00
|
|
|
}
|
2010-05-18 17:58:33 +02:00
|
|
|
else
|
|
|
|
{
|
2012-02-17 10:50:33 +01:00
|
|
|
/* always set mb_skip_coeff as it is needed by the loopfilter */
|
|
|
|
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
|
|
|
|
|
2010-05-18 17:58:33 +02:00
|
|
|
if (cpi->common.mb_no_coeff_skip)
|
|
|
|
{
|
|
|
|
cpi->skip_true_count ++;
|
2010-08-31 16:49:57 +02:00
|
|
|
vp8_fix_contexts(xd);
|
2010-05-18 17:58:33 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
vp8_stuff_mb(cpi, xd, t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return rate;
|
|
|
|
}
|