Merge t8x8 experiments

Change-Id: I8e9b6b154e1a0d0cb42d596366380d69c00ac15f
This commit is contained in:
Yaowu Xu 2012-02-28 17:11:12 -08:00
parent 3ceb43104f
commit 89ee68b1f7
40 changed files with 57 additions and 815 deletions

View File

@ -209,9 +209,7 @@ void vp8_create_common(VP8_COMMON *oci)
vp8_default_bmode_probs(oci->fc.bmode_prob);
#if CONFIG_T8X8
oci->txfm_mode = ONLY_4X4;
#endif
oci->mb_no_coeff_skip = 1;
oci->comp_pred_mode = HYBRID_PREDICTION;
oci->no_lpf = 0;

View File

@ -22,7 +22,6 @@ const unsigned char vp8_block2above[25] =
0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8
};
#if CONFIG_T8X8
const unsigned char vp8_block2left_8x8[25] =
{
0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8
@ -31,6 +30,4 @@ const unsigned char vp8_block2above_8x8[25] =
{
0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8
};
#endif

View File

@ -183,9 +183,7 @@ typedef struct
MB_PREDICTION_MODE second_mode, second_uv_mode;
#endif
MV_REFERENCE_FRAME ref_frame, second_ref_frame;
#if CONFIG_T8X8
TX_SIZE txfm_size;
#endif
int_mv mv, second_mv;
unsigned char partitioning;
unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */

View File

@ -183,7 +183,6 @@ const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTE
},
},
};
#if CONFIG_T8X8
const vp8_prob vp8_coef_update_probs_8x8 [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
@ -359,4 +358,3 @@ const vp8_prob vp8_coef_update_probs_8x8 [BLOCK_TYPES]
},
};
#endif

View File

@ -60,7 +60,6 @@ DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) =
9, 12, 13, 10,
7, 11, 14, 15,
};
#if CONFIG_T8X8
DECLARE_ALIGNED(64, cuchar, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5,
5, 3, 6, 3, 5, 4, 6, 6,
6, 5, 5, 6, 6, 6, 6, 6,
@ -77,7 +76,6 @@ DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]) =
35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
};
#endif
DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) =
{
@ -88,9 +86,7 @@ DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) =
};
DECLARE_ALIGNED(16, short, vp8_default_zig_zag_mask[16]);
#if CONFIG_T8X8
DECLARE_ALIGNED(64, short, vp8_default_zig_zag_mask_8x8[64]);//int64_t
#endif
/* Array indices are identical to previously-existing CONTEXT_NODE indices */
@ -132,12 +128,10 @@ void vp8_init_scan_order_mask()
{
vp8_default_zig_zag_mask[vp8_default_zig_zag1d[i]] = 1 << i;
}
#if CONFIG_T8X8
for (i = 0; i < 64; i++)
{
vp8_default_zig_zag_mask_8x8[vp8_default_zig_zag1d_8x8[i]] = 1 << i;
}
#endif
}
static void init_bit_tree(vp8_tree_index *p, int n)
@ -184,12 +178,9 @@ vp8_extra_bit_struct vp8_extra_bits[12] =
void vp8_default_coef_probs(VP8_COMMON *pc)
{
#if CONFIG_T8X8
int h;
#endif
vpx_memcpy(pc->fc.coef_probs, default_coef_probs,
sizeof(default_coef_probs));
#if CONFIG_T8X8
h = 0;
do
{
@ -213,7 +204,7 @@ void vp8_default_coef_probs(VP8_COMMON *pc)
while (++i < COEF_BANDS);
}
while (++h < BLOCK_TYPES);
#endif
}
void vp8_coef_tree_initialize()

View File

@ -63,9 +63,7 @@ extern vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */
#define COEF_BANDS 8
extern DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]);
#if CONFIG_T8X8
extern DECLARE_ALIGNED(64, const unsigned char, vp8_coef_bands_8x8[64]);
#endif
/* Inside dimension is 3-valued measure of nearby complexity, that is,
the extent to which nearby coefficients are nonzero. For the first
@ -89,18 +87,14 @@ extern DECLARE_ALIGNED(64, const unsigned char, vp8_coef_bands_8x8[64]);
extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
extern const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_T8X8
extern const vp8_prob vp8_coef_update_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
struct VP8Common;
void vp8_default_coef_probs(struct VP8Common *);
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]);
extern DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]);
extern short vp8_default_zig_zag_mask[16];
#if CONFIG_T8X8
extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]);
extern short vp8_default_zig_zag_mask_8x8[64];//int64_t
#endif
void vp8_coef_tree_initialize(void);
#endif

View File

@ -30,11 +30,9 @@ void vp8_machine_specific_config(VP8_COMMON *ctx)
rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
#if CONFIG_T8X8
rtcd->idct.idct8 = vp8_short_idct8x8_c;
rtcd->idct.idct1_scalar_add_8x8 = vp8_dc_only_idct_add_8x8_c;
rtcd->idct.ihaar2 = vp8_short_ihaar2x2_c;
#endif
rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
rtcd->recon.avg16x16 = vp8_avg_mem16x16_c;

View File

@ -31,7 +31,7 @@
#include "arm/idct_arm.h"
#endif
#if CONFIG_T8X8
#ifndef vp8_idct_idct8
#define vp8_idct_idct8 vp8_short_idct8x8_c
#endif
@ -57,7 +57,7 @@ extern prototype_idct(vp8_idct_ihaar2_1);
#endif
extern prototype_idct_scalar_add(vp8_idct_idct1_scalar_add_8x8);
#endif
#ifndef vp8_idct_idct1
#define vp8_idct_idct1 vp8_short_idct4x4llm_1_c
@ -98,13 +98,11 @@ typedef struct
vp8_second_order_fn_t iwalsh1;
vp8_second_order_fn_t iwalsh16;
#if CONFIG_T8X8
vp8_idct_fn_t idct8;
vp8_idct_fn_t idct8_1;
vp8_idct_scalar_add_fn_t idct1_scalar_add_8x8;
vp8_idct_fn_t ihaar2;
vp8_idct_fn_t ihaar2_1;
#endif
} vp8_idct_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT

View File

@ -200,7 +200,7 @@ void vp8_short_inv_walsh4x4_1_c(short *input, short *output)
}
}
#if CONFIG_T8X8
void vp8_dc_only_idct_add_8x8_c(short input_dc,
unsigned char *pred_ptr,
unsigned char *dst_ptr,
@ -408,4 +408,3 @@ void vp8_short_ihaar2x2_c(short *input, short *output, int pitch)
op[8] = (ip[0] - ip[1] - ip[4] + ip[8])>>1;
}
#endif

View File

@ -24,7 +24,6 @@ static void recon_dcblock(MACROBLOCKD *x)
}
}
#if CONFIG_T8X8
static void recon_dcblock_8x8(MACROBLOCKD *x)
{
BLOCKD *b = &x->block[24]; //for coeff 0, 2, 8, 10
@ -34,7 +33,7 @@ static void recon_dcblock_8x8(MACROBLOCKD *x)
x->block[12].dqcoeff[0] = b->diff[8];
}
#endif
void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch)
{
@ -99,7 +98,7 @@ void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x
}
#if CONFIG_T8X8
void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch)//pay attention to use when 8x8
{
// int b,i;
@ -171,4 +170,4 @@ void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCK
}
}
#endif

View File

@ -20,11 +20,9 @@ extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBL
extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
#if CONFIG_T8X8
extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
extern void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
extern void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
#endif
#endif

View File

@ -72,7 +72,6 @@ void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
#if CONFIG_T8X8
void vp8_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
loop_filter_info *lfi)
@ -80,7 +79,6 @@ void vp8_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_mbloop_filter_horizontal_edge_c(
y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
}
#endif
void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit)
@ -106,7 +104,6 @@ void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr,
vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
#if CONFIG_T8X8
void vp8_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
loop_filter_info *lfi)
@ -115,8 +112,6 @@ void vp8_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr,
y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
}
#endif
void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit)
{
@ -348,9 +343,7 @@ void vp8_loop_filter_frame
const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
const int seg = mode_info_context->mbmi.segment_id;
const int ref_frame = mode_info_context->mbmi.ref_frame;
#if CONFIG_T8X8
int tx_type = mode_info_context->mbmi.txfm_size;
#endif
filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
if (filter_level)
@ -369,12 +362,10 @@ void vp8_loop_filter_frame
if (!skip_lf)
{
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_loop_filter_bv8x8_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
else
#endif
LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v)
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
@ -387,12 +378,10 @@ void vp8_loop_filter_frame
if (!skip_lf)
{
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_loop_filter_bh8x8_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
else
#endif
LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h)
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
}
@ -479,9 +468,7 @@ void vp8_loop_filter_frame_yonly
const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode];
const int seg = mode_info_context->mbmi.segment_id;
const int ref_frame = mode_info_context->mbmi.ref_frame;
#if CONFIG_T8X8
int tx_type = mode_info_context->mbmi.txfm_size;
#endif
filter_level = lfi_n->lvl[seg][ref_frame][mode_index];
@ -501,12 +488,10 @@ void vp8_loop_filter_frame_yonly
if (!skip_lf)
{
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_loop_filter_bv8x8_c
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
else
#endif
LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v)
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
}
@ -518,12 +503,10 @@ void vp8_loop_filter_frame_yonly
if (!skip_lf)
{
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_loop_filter_bh8x8_c
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
else
#endif
LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h)
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
}

View File

@ -53,9 +53,7 @@ typedef struct frame_contexts
#endif
vp8_prob sub_mv_ref_prob [VP8_SUBMVREFS-1];
vp8_prob coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_T8X8
vp8_prob coef_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
MV_CONTEXT mvc[2];
MV_CONTEXT pre_mvc[2]; /* not to caculate the mvcost for the frame if mvc doesn't change. */
#if CONFIG_HIGH_PRECISION_MV
@ -84,14 +82,12 @@ typedef enum
NB_PREDICTION_TYPES = 3,
} COMPPREDMODE_TYPE;
#if CONFIG_T8X8
/* TODO: allows larger transform */
typedef enum
{
ONLY_4X4 = 0,
ALLOW_8X8 = 1
} TXFM_MODE;
#endif /* CONFIG_T8X8 */
typedef struct VP8_COMMON_RTCD
{
@ -150,9 +146,7 @@ typedef struct VP8Common
/* profile settings */
int experimental;
int mb_no_coeff_skip;
#if CONFIG_T8X8
TXFM_MODE txfm_mode;
#endif
COMPPREDMODE_TYPE comp_pred_mode;
int no_lpf;
int use_bilinear_mc_filter;

View File

@ -158,7 +158,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
MB_PREDICTION_MODE mode;
int i;
#if CONFIG_T8X8
int tx_type;
if( pbi->common.txfm_mode==ONLY_4X4 )
{
@ -175,7 +174,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
}
tx_type = xd->mode_info_context->mbmi.txfm_size;
#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
@ -183,19 +181,14 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
}
else if (!vp8dx_bool_error(xd->current_bc))
{
#if CONFIG_T8X8
for(i = 0; i < 25; i++)
{
xd->block[i].eob = 0;
xd->eobs[i] = 0;
}
if ( tx_type == TX_8X8 )
{
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
}
else
#endif
eobtotal = vp8_decode_mb_tokens(pbi, xd);
#ifdef DEC_DEBUG
if (dec_debug) {
@ -360,7 +353,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
{
BLOCKD *b = &xd->block[24];
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
{
DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b);
@ -388,11 +381,8 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd);
}
else
#endif
{
DEQUANT_INVOKE(&pbi->dequant, block)(b);
if (xd->eobs[24] > 1)
@ -419,18 +409,13 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->dst.y_stride, xd->eobs, xd->block[24].diff);
}
}
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
{
DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block_8x8)//
(xd->qcoeff+16*16, xd->block[16].dequant,
xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16, xd);//
}
else
#endif
if(xd->mode_info_context->mbmi.mode!=I8X8_PRED)
else if(xd->mode_info_context->mbmi.mode!=I8X8_PRED)
DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block)
(xd->qcoeff+16*16, xd->block[16].dequant,
xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer,
@ -1086,9 +1071,7 @@ int vp8_decode_frame(VP8D_COMP *pbi)
}
/* Read the loop filter level and type */
#if CONFIG_T8X8
pc->txfm_mode = (TXFM_MODE) vp8_read_bit(bc);
#endif
pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(bc);
pc->filter_level = vp8_read_literal(bc, 6);
@ -1242,7 +1225,7 @@ int vp8_decode_frame(VP8D_COMP *pbi)
}
}
}
#if CONFIG_T8X8
if(pbi->common.txfm_mode == ALLOW_8X8 && vp8_read_bit(bc))
{
// read coef probability tree
@ -1261,7 +1244,7 @@ int vp8_decode_frame(VP8D_COMP *pbi)
}
}
}
#endif
vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->lst_fb_idx], sizeof(YV12_BUFFER_CONFIG));
vpx_memcpy(&xd->dst, &pc->yv12_fb[pc->new_fb_idx], sizeof(YV12_BUFFER_CONFIG));

View File

@ -17,10 +17,8 @@
extern void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) ;
extern void vp8_short_idct4x4llm_1_c(short *input, short *output, int pitch);
#if CONFIG_T8X8
extern void vp8_short_idct8x8_c(short *input, short *output, int pitch);
extern void vp8_short_idct8x8_1_c(short *input, short *output, int pitch);
#endif
#ifdef DEC_DEBUG
extern int dec_debug;
@ -121,7 +119,6 @@ void vp8_dequant_dc_idct_add_c(short *input, short *dq, unsigned char *pred,
}
}
#if CONFIG_T8X8
void vp8_dequantize_b_2x2_c(BLOCKD *d)
{
int i;
@ -330,4 +327,3 @@ void vp8_dequant_dc_idct_add_8x8_c(short *input, short *dq, unsigned char *pred,
#endif
}
#endif

View File

@ -42,7 +42,6 @@
unsigned char *pre, unsigned char *dst_u, \
unsigned char *dst_v, int stride, char *eobs)
#if CONFIG_T8X8
#define prototype_dequant_dc_idct_add_y_block_8x8(sym) \
void sym(short *q, short *dq, \
unsigned char *pre, unsigned char *dst, \
@ -59,8 +58,6 @@
unsigned char *dst_v, int stride, char *eobs, \
MACROBLOCKD *xd)
#endif
#if ARCH_X86 || ARCH_X86_64
#include "x86/dequantize_x86.h"
#endif
@ -99,7 +96,7 @@ extern prototype_dequant_idct_add_y_block(vp8_dequant_idct_add_y_block);
#endif
extern prototype_dequant_idct_add_uv_block(vp8_dequant_idct_add_uv_block);
#if CONFIG_T8X8
#ifndef vp8_dequant_block_2x2
#define vp8_dequant_block_2x2 vp8_dequantize_b_2x2_c
#endif
@ -130,7 +127,7 @@ extern prototype_dequant_idct_add_y_block_8x8(vp8_dequant_idct_add_y_block_8x8);
#endif
extern prototype_dequant_idct_add_uv_block_8x8(vp8_dequant_idct_add_uv_block_8x8);
#endif
typedef prototype_dequant_block((*vp8_dequant_block_fn_t));
@ -144,13 +141,12 @@ typedef prototype_dequant_idct_add_y_block((*vp8_dequant_idct_add_y_block_fn_t))
typedef prototype_dequant_idct_add_uv_block((*vp8_dequant_idct_add_uv_block_fn_t));
#if CONFIG_T8X8
typedef prototype_dequant_dc_idct_add_y_block_8x8((*vp8_dequant_dc_idct_add_y_block_fn_t_8x8));
typedef prototype_dequant_idct_add_y_block_8x8((*vp8_dequant_idct_add_y_block_fn_t_8x8));
typedef prototype_dequant_idct_add_uv_block_8x8((*vp8_dequant_idct_add_uv_block_fn_t_8x8));
#endif
typedef struct
{
vp8_dequant_block_fn_t block;
@ -159,14 +155,12 @@ typedef struct
vp8_dequant_dc_idct_add_y_block_fn_t dc_idct_add_y_block;
vp8_dequant_idct_add_y_block_fn_t idct_add_y_block;
vp8_dequant_idct_add_uv_block_fn_t idct_add_uv_block;
#if CONFIG_T8X8
vp8_dequant_block_fn_t block_2x2;
vp8_dequant_idct_add_fn_t idct_add_8x8;
vp8_dequant_dc_idct_add_fn_t dc_idct_add_8x8;
vp8_dequant_dc_idct_add_y_block_fn_t_8x8 dc_idct_add_y_block_8x8;
vp8_dequant_idct_add_y_block_fn_t_8x8 idct_add_y_block_8x8;
vp8_dequant_idct_add_uv_block_fn_t_8x8 idct_add_uv_block_8x8;
#endif
} vp8_dequant_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT

View File

@ -28,7 +28,6 @@ DECLARE_ALIGNED(16, static const unsigned char, coef_bands_x[16]) =
6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X,
6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 7 * OCB_X
};
#if CONFIG_T8X8
DECLARE_ALIGNED(64, static const unsigned char, coef_bands_x_8x8[64]) = {
0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X,
5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X,
@ -39,7 +38,7 @@ DECLARE_ALIGNED(64, static const unsigned char, coef_bands_x_8x8[64]) = {
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
};
#endif
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
#define ONE_CONTEXT_NODE 2
@ -166,7 +165,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
range = range - split; \
NORMALIZE \
}
#if CONFIG_T8X8
#define DECODE_AND_LOOP_IF_ZERO_8x8_2(probability,branch) \
{ \
split = 1 + ((( probability*(range-1) ) ) >> 8); \
@ -207,7 +206,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
range = range - split; \
NORMALIZE \
}
#endif
#define DECODE_SIGN_WRITE_COEFF_AND_CHECK_EXIT(val) \
DECODE_AND_APPLYSIGN(val) \
Prob = coef_probs + (ENTROPY_NODES*2); \
@ -218,7 +217,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
qcoeff_ptr [ 15 ] = (INT16) v; \
goto BLOCK_FINISHED;
#if CONFIG_T8X8
#define DECODE_SIGN_WRITE_COEFF_AND_CHECK_EXIT_8x8_2(val) \
DECODE_AND_APPLYSIGN(val) \
Prob = coef_probs + (ENTROPY_NODES*2); \
@ -237,7 +236,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
goto DO_WHILE_8x8; }\
qcoeff_ptr [ scan[63] ] = (INT16) v; \
goto BLOCK_FINISHED_8x8;
#endif
#define DECODE_EXTRABIT_AND_ADJUST_VAL(prob, bits_count)\
split = 1 + (((range-1) * prob) >> 8); \
@ -255,7 +254,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]);
}\
NORMALIZE
#if CONFIG_T8X8
int vp8_decode_mb_tokens_8x8(VP8D_COMP *dx, MACROBLOCKD *x)
{
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
@ -580,7 +579,7 @@ BLOCK_FINISHED_8x8:
return eobtotal;
}
#endif
int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd)
{
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;

View File

@ -16,8 +16,6 @@
void vp8_reset_mb_tokens_context(MACROBLOCKD *x);
int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *);
#if CONFIG_T8X8
int vp8_decode_mb_tokens_8x8(VP8D_COMP *, MACROBLOCKD *);
#endif
#endif /* DETOKENIZE_H */

View File

@ -21,17 +21,12 @@ void vp8_dmachine_specific_config(VP8D_COMP *pbi)
/* Pure C: */
#if CONFIG_RUNTIME_CPU_DETECT
pbi->mb.rtcd = &pbi->common.rtcd;
#if CONFIG_T8X8
pbi->dequant.block_2x2 = vp8_dequantize_b_2x2_c;
pbi->dequant.idct_add_8x8 = vp8_dequant_idct_add_8x8_c;
pbi->dequant.dc_idct_add_8x8 = vp8_dequant_dc_idct_add_8x8_c;
pbi->dequant.dc_idct_add_y_block_8x8 = vp8_dequant_dc_idct_add_y_block_8x8_c;
pbi->dequant.idct_add_y_block_8x8 = vp8_dequant_idct_add_y_block_8x8_c;
pbi->dequant.idct_add_uv_block_8x8 = vp8_dequant_idct_add_uv_block_8x8_c;
#endif
pbi->dequant.block = vp8_dequantize_b_c;
pbi->dequant.idct_add = vp8_dequant_idct_add_c;
pbi->dequant.dc_idct_add = vp8_dequant_dc_idct_add_c;

View File

@ -123,7 +123,7 @@ void vp8_dequant_idct_add_uv_block_c
}
}
#if CONFIG_T8X8
void vp8_dequant_dc_idct_add_y_block_8x8_c
(short *q, short *dq, unsigned char *pre,
unsigned char *dst, int stride, char *eobs, short *dc, MACROBLOCKD *xd)
@ -163,4 +163,4 @@ void vp8_dequant_idct_add_uv_block_8x8_c
vp8_dequant_idct_add_8x8_c (q, dq, pre, dstv, 8, stride);
}
#endif

View File

@ -43,16 +43,12 @@ typedef struct
typedef struct
{
int const *scan;
#if CONFIG_T8X8
int const *scan_8x8;
#endif
UINT8 const *ptr_block2leftabove;
vp8_tree_index const *vp8_coef_tree_ptr;
unsigned char *norm_ptr;
UINT8 *ptr_coef_bands_x;
#if CONFIG_T8X8
UINT8 *ptr_coef_bands_x_8x8;
#endif
ENTROPY_CONTEXT_PLANES *A;
ENTROPY_CONTEXT_PLANES *L;
@ -61,9 +57,7 @@ typedef struct
BOOL_DECODER *current_bc;
vp8_prob const *coef_probs[4];
#if CONFIG_T8X8
vp8_prob const *coef_probs_8x8[4];
#endif
UINT8 eob[25];

View File

@ -35,9 +35,7 @@ unsigned __int64 Sectionbits[500];
#ifdef ENTROPY_STATS
int intra_mode_stats[10][10][10];
static unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
#if CONFIG_T8X8
static unsigned int tree_update_hist_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2];
#endif
extern unsigned int active_section;
#endif
@ -1685,9 +1683,7 @@ static int default_coef_context_savings(VP8_COMP *cpi)
int vp8_estimate_entropy_savings(VP8_COMP *cpi)
{
int savings = 0;
#if CONFIG_T8X8
int i=0;
#endif
VP8_COMMON *const cm = & cpi->common;
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
@ -1761,7 +1757,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
savings += default_coef_context_savings(cpi);
#if CONFIG_T8X8
/* do not do this if not evena allowed */
if(cpi->common.txfm_mode == ALLOW_8X8)
{
@ -1820,8 +1816,6 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi)
savings += savings8x8 >> 8;
}
#endif
return savings;
}
@ -1955,7 +1949,6 @@ static void update_coef_probs(VP8_COMP *cpi)
}
#if CONFIG_T8X8
/* do not do this if not evena allowed */
if(cpi->common.txfm_mode == ALLOW_8X8)
{
@ -2090,8 +2083,6 @@ static void update_coef_probs(VP8_COMP *cpi)
while (++i < BLOCK_TYPES);
}
}
#endif
}
#ifdef PACKET_TESTING
FILE *vpxlogc = 0;
@ -2400,9 +2391,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
}
}
#if CONFIG_T8X8
vp8_write_bit(bc, pc->txfm_mode);
#endif
// Encode the loop filter level and type
vp8_write_bit(bc, pc->filter_type);
@ -2636,7 +2625,6 @@ void print_tree_update_probs()
fprintf(f, "};\n");
#if CONFIG_T8X8
fprintf(f, "const vp8_prob tree_update_probs_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n");
for (i = 0; i < BLOCK_TYPES; i++)
@ -2674,7 +2662,6 @@ void print_tree_update_probs()
fprintf(f, " },\n");
}
#endif
fclose(f);
}
#endif

View File

@ -47,9 +47,7 @@ typedef struct
int src_stride;
int eob_max_offset;
#if CONFIG_T8X8
int eob_max_offset_8x8;
#endif
} BLOCK;
@ -131,11 +129,8 @@ typedef struct
unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
#if CONFIG_T8X8
unsigned int token_costs_8x8[BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
int optimize;
int q_index;
@ -145,12 +140,10 @@ typedef struct
void (*short_walsh4x4)(short *input, short *output, int pitch);
void (*quantize_b)(BLOCK *b, BLOCKD *d);
void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
#if CONFIG_T8X8
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
void (*short_fhaar2x2)(short *input, short *output, int pitch);
void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);
void (*quantize_b_2x2)(BLOCK *b, BLOCKD *d);
#endif
} MACROBLOCK;

View File

@ -16,7 +16,7 @@
#if CONFIG_T8X8
void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch)
{
int j1, i, j, k;
@ -126,7 +126,7 @@ void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) //pitch = 8
op1[8]=(ip1[0] - ip1[1] - ip1[4] + ip1[8])>>1;
}
#endif
void vp8_short_fdct4x4_c(short *input, short *output, int pitch)
{
int i;

View File

@ -22,7 +22,7 @@
#include "arm/dct_arm.h"
#endif
#if CONFIG_T8X8
#ifndef vp8_fdct_short8x8
#define vp8_fdct_short8x8 vp8_short_fdct8x8_c
@ -34,7 +34,6 @@ extern prototype_fdct(vp8_fdct_short8x8);
#endif
extern prototype_fdct(vp8_fhaar_short2x2);
#endif
#ifndef vp8_fdct_short4x4
#define vp8_fdct_short4x4 vp8_short_fdct4x4_c
@ -63,10 +62,8 @@ extern prototype_fdct(vp8_fdct_walsh_short4x4);
typedef prototype_fdct(*vp8_fdct_fn_t);
typedef struct
{
#if CONFIG_T8X8
vp8_fdct_fn_t short8x8;
vp8_fdct_fn_t haar_short2x2;
#endif
vp8_fdct_fn_t short4x4;
vp8_fdct_fn_t short8x4;
vp8_fdct_fn_t fast4x4;

View File

@ -223,7 +223,6 @@ static const unsigned int default_coef_counts[BLOCK_TYPES]
};
#if CONFIG_T8X8
const unsigned int vp8_default_coef_counts_8x8[BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
@ -399,4 +398,4 @@ const unsigned int vp8_default_coef_counts_8x8[BLOCK_TYPES]
}
}
};
#endif

View File

@ -102,187 +102,6 @@ static const unsigned char VP8_VAR_OFFS[16]=
};
#if CONFIG_T8X8
//INTRA mode transform size
//When all three criteria are off the default is 4x4
//#define INTRA_VARIANCE_ENTROPY_CRITERIA
#define INTRA_WTD_SSE_ENTROPY_CRITERIA
//#define INTRA_TEST_8X8_ONLY
//
//INTER mode transform size
//When all three criteria are off the default is 4x4
//#define INTER_VARIANCE_ENTROPY_CRITERIA
#define INTER_WTD_SSE_ENTROPY_CRITERIA
//#define INTER_TEST_8X8_ONLY
double variance_Block(short *b1, int pitch, int dimension)
{
short ip[8][8]={{0}};
short *b = b1;
int i, j = 0;
double mean = 0.0, variance = 0.0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
ip[i][j] = b[j];
mean += ip[i][j];
}
b += pitch;
}
mean /= (dimension*dimension);
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
variance += (ip[i][j]-mean)*(ip[i][j]-mean);
}
}
variance /= (dimension*dimension);
return variance;
}
double mean_Block(short *b, int pitch, int dimension)
{
short ip[8][8]={{0}};
int i, j = 0;
double mean = 0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
ip[i][j] = b[j];
mean += ip[i][j];
}
b += pitch;
}
mean /= (dimension*dimension);
return mean;
}
int SSE_Block(short *b, int pitch, int dimension)
{
int i, j, sse_block = 0;
for (i = 0; i < dimension; i++)
{
for (j = 0; j < dimension; j++)
{
sse_block += b[j]*b[j];
}
b += pitch;
}
return sse_block;
}
double Compute_Variance_Entropy(MACROBLOCK *x)
{
double variance_8[4] = {0.0, 0.0, 0.0, 0.0}, sum_var = 0.0, all_entropy = 0.0;
variance_8[0] = variance_Block(x->block[0].src_diff, 16, 8);
variance_8[1] = variance_Block(x->block[2].src_diff, 16, 8);
variance_8[2] = variance_Block(x->block[8].src_diff, 16, 8);
variance_8[3] = variance_Block(x->block[10].src_diff, 16, 8);
sum_var = variance_8[0] + variance_8[1] + variance_8[2] + variance_8[3];
if(sum_var)
{
int i;
for(i = 0; i <4; i++)
{
if(variance_8[i])
{
variance_8[i] /= sum_var;
all_entropy -= variance_8[i]*log(variance_8[i]);
}
}
}
return (all_entropy /log(2));
}
double Compute_Wtd_SSE_SubEntropy(MACROBLOCK *x)
{
double variance_8[4] = {0.0, 0.0, 0.0, 0.0};
double entropy_8[4] = {0.0, 0.0, 0.0, 0.0};
double sse_1, sse_2, sse_3, sse_4, sse_0;
int i;
for (i=0;i<3;i+=2)
{
sse_0 = SSE_Block(x->block[i].src_diff, 16, 8);
if(sse_0)
{
sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0;
sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0;
sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0;
sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0;
variance_8[i]= variance_Block(x->block[i].src_diff, 16, 8);
if(sse_1 && sse_2 && sse_3 && sse_4)
entropy_8[i]= (-sse_1*log(sse_1)
-sse_2*log(sse_2)
-sse_3*log(sse_3)
-sse_4*log(sse_4))/log(2);
}
}
for (i=8;i<11;i+=2)
{
if(sse_0)
{
sse_0 = SSE_Block(x->block[i].src_diff, 16, 8);
sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0;
sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0;
sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0;
sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0;
variance_8[i-7]= variance_Block(x->block[i].src_diff, 16, 8);
if(sse_1 && sse_2 && sse_3 && sse_4)
entropy_8[i-7]= (-sse_1*log(sse_1)
-sse_2*log(sse_2)
-sse_3*log(sse_3)
-sse_4*log(sse_4))/log(2);
}
}
if(variance_8[0]+variance_8[1]+variance_8[2]+variance_8[3])
return (entropy_8[0]*variance_8[0]+
entropy_8[1]*variance_8[1]+
entropy_8[2]*variance_8[2]+
entropy_8[3]*variance_8[3])/
(variance_8[0]+
variance_8[1]+
variance_8[2]+
variance_8[3]);
else
return 0;
}
int vp8_8x8_selection_intra(MACROBLOCK *x)
{
#ifdef INTRA_VARIANCE_ENTROPY_CRITERIA
return (Compute_Variance_Entropy(x) > 1.2);
#elif defined(INTRA_WTD_SSE_ENTROPY_CRITERIA)
return (Compute_Wtd_SSE_SubEntropy(x) > 1.2);
#elif defined(INTRA_TEST_8X8_ONLY)
return 1;
#else
return 0; //when all criteria are off use the default 4x4 only
#endif
}
int vp8_8x8_selection_inter(MACROBLOCK *x)
{
#ifdef INTER_VARIANCE_ENTROPY_CRITERIA
return (Compute_Variance_Entropy(x) > 1.5);
#elif defined(INTER_WTD_SSE_ENTROPY_CRITERIA)
return (Compute_Wtd_SSE_SubEntropy(x) > 1.5);
#elif defined(INTER_TEST_8X8_ONLY)
return 1;
#else
return 0; //when all criteria are off use the default 4x4 only
#endif
}
#endif
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
{
@ -876,10 +695,8 @@ void encode_mb_row(VP8_COMP *cpi,
x->active_ptr = cpi->active_map + map_index + mb_col;
#if CONFIG_T8X8
/* force 4x4 transform for mode selection */
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
#endif
if (cm->frame_type == KEY_FRAME)
{
@ -1485,7 +1302,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
vp8_update_zbin_extra(cpi, x);
}
#if CONFIG_T8X8
/* test code: set transform size based on mode selection */
if(cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
@ -1499,7 +1315,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count ++;
}
#endif
if(x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED)
{
@ -1583,7 +1398,7 @@ int vp8cx_encode_inter_macroblock
cpi->comp_pred_count[pred_context]++;
}
#if CONFIG_T8X8
/* test code: set transform size based on mode selection */
if( cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
@ -1598,7 +1413,7 @@ int vp8cx_encode_inter_macroblock
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count++;
}
#endif
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
{

View File

@ -114,9 +114,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
BLOCK *b = &x->block[0];
#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
#endif
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE) (DC_PRED - 1))
@ -129,35 +127,27 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_intra_mby_8x8(x);
else
#endif
vp8_transform_intra_mby(x);
vp8_transform_intra_mby(x);
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_quantize_mby_8x8(x);
else
#endif
vp8_quantize_mby(x);
if (x->optimize)
{
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_optimize_mby_8x8(x, rtcd);
else
#endif
vp8_optimize_mby(x, rtcd);
}
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
#endif
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
#ifdef ENC_DEBUG
@ -198,9 +188,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
#endif
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE) (DC_PRED - 1))
{
@ -215,18 +203,14 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
#endif
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_transform_mbuv_8x8(x);
else
#endif
vp8_transform_mbuv(x);
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_quantize_mbuv_8x8(x);
else
#endif
vp8_quantize_mbuv(x);
#ifdef ENC_DEBUG
@ -262,20 +246,16 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
#endif
if (x->optimize)
{
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_optimize_mbuv_8x8(x, rtcd);
else
#endif
vp8_optimize_mbuv(x, rtcd);
}
#if CONFIG_T8X8
if(tx_type == TX_8X8)
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
#endif
vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
}

View File

@ -142,7 +142,6 @@ static void build_dcblock(MACROBLOCK *x)
src_diff_ptr[i] = x->coeff[i * 16];
}
}
#if CONFIG_T8X8
void vp8_build_dcblock_8x8(MACROBLOCK *x)
{
short *src_diff_ptr = &x->src_diff[384];
@ -156,7 +155,7 @@ void vp8_build_dcblock_8x8(MACROBLOCK *x)
src_diff_ptr[4] = x->coeff[8 * 16];
src_diff_ptr[8] = x->coeff[12 * 16];
}
#endif
void vp8_transform_mbuv(MACROBLOCK *x)
{
int i;
@ -236,8 +235,6 @@ static void transform_mby(MACROBLOCK *x)
}
}
#if CONFIG_T8X8
void vp8_transform_mbuv_8x8(MACROBLOCK *x)
{
int i;
@ -338,7 +335,6 @@ void vp8_transform_mby_8x8(MACROBLOCK *x)
}
}
#endif
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
@ -798,7 +794,6 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
}
}
#if CONFIG_T8X8
void optimize_b_8x8(MACROBLOCK *mb, int i, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
const VP8_ENCODER_RTCD *rtcd)
@ -1150,50 +1145,37 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
}
}
#endif
void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
#endif
vp8_build_inter_predictors_mb(&x->e_mbd);
vp8_subtract_mb(rtcd, x);
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_mb_8x8(x);
else
#endif
transform_mb(x);
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_quantize_mb_8x8(x);
else
#endif
vp8_quantize_mb(x);
if (x->optimize)
{
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
optimize_mb_8x8(x, rtcd);
else
#endif
optimize_mb(x, rtcd);
}
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
#endif
vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
{
#ifdef ENC_DEBUG
@ -1225,7 +1207,6 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
}
#endif
}
#endif
RECON_INVOKE(&rtcd->common->recon, recon_mb)
(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
@ -1251,9 +1232,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
/* this function is used by first pass only */
void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
{
#if CONFIG_T8X8
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
#endif
BLOCK *b = &x->block[0];
@ -1261,19 +1240,16 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_transform_mby_8x8(x);
else
#endif
transform_mby(x);
vp8_quantize_mby(x);
#if CONFIG_T8X8
if( tx_type == TX_8X8 )
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
#endif
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
RECON_INVOKE(&rtcd->common->recon, recon_mby)

View File

@ -104,7 +104,6 @@ void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
#if CONFIG_T8X8
void vp8_transform_mb_8x8(MACROBLOCK *mb);
void vp8_transform_mby_8x8(MACROBLOCK *x);
void vp8_transform_mbuv_8x8(MACROBLOCK *x);
@ -112,7 +111,6 @@ void vp8_transform_intra_mby_8x8(MACROBLOCK *x);
void vp8_build_dcblock_8x8(MACROBLOCK *b);
void vp8_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
#endif
void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);

View File

@ -69,10 +69,8 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c;
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
#if CONFIG_T8X8
cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c;
#endif
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;
cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c;
@ -90,12 +88,10 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi)
cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair;
cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_c;
cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_c;
#if CONFIG_T8X8
cpi->rtcd.quantize.quantb_8x8 = vp8_regular_quantize_b_8x8;
cpi->rtcd.quantize.fastquantb_8x8 = vp8_fast_quantize_b_8x8_c;
cpi->rtcd.quantize.quantb_2x2 = vp8_regular_quantize_b_2x2;
cpi->rtcd.quantize.fastquantb_2x2 = vp8_fast_quantize_b_2x2_c;
#endif
cpi->rtcd.search.full_search = vp8_full_search_sad;
cpi->rtcd.search.refining_search = vp8_refining_search_sad;
cpi->rtcd.search.diamond_search = vp8_diamond_search_sad;

View File

@ -964,25 +964,19 @@ void vp8_set_speed_features(VP8_COMP *cpi)
if (cpi->sf.improved_dct)
{
#if CONFIG_T8X8
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
#endif
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
}
else
{
#if CONFIG_T8X8
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
#endif
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
}
cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4);
#if CONFIG_T8X8
cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2);
#endif
if (cpi->sf.improved_quant)
{
@ -990,10 +984,8 @@ void vp8_set_speed_features(VP8_COMP *cpi)
quantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb_pair);
#if CONFIG_T8X8
cpi->mb.quantize_b_8x8 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_8x8);
cpi->mb.quantize_b_2x2 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_2x2);
#endif
}
else
{
@ -1001,10 +993,8 @@ void vp8_set_speed_features(VP8_COMP *cpi)
fastquantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb_pair);
#if CONFIG_T8X8
cpi->mb.quantize_b_8x8 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_8x8);
cpi->mb.quantize_b_2x2 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_2x2);
#endif
}
if (cpi->sf.improved_quant != last_improved_quant)
vp8cx_init_quantizer(cpi);
@ -1863,9 +1853,8 @@ void vp8_remove_compressor(VP8_PTR *ptr)
#if CONFIG_INTERNAL_STATS
vp8_clear_system_state();
#if CONFIG_T8X8
printf("\n8x8-4x4:%d-%d\n", cpi->t8x8_count, cpi->t4x4_count);
#endif
if (cpi->pass != 1)
{
FILE *f = fopen("opsnr.stt", "a");

View File

@ -453,11 +453,9 @@ typedef struct VP8_COMP
//save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation
vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
#if CONFIG_T8X8
unsigned int coef_counts_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
#endif
int gfu_boost;
int kf_boost;
@ -514,10 +512,8 @@ typedef struct VP8_COMP
int gf_update_recommended;
int skip_true_count;
int skip_false_count;
#if CONFIG_T8X8
int t4x4_count;
int t8x8_count;
#endif
#if CONFIG_UVINTRA
int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES];

View File

@ -354,11 +354,9 @@ void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segme
if (cpi->twopass.section_intra_rating < 20)
Bias = Bias * cpi->twopass.section_intra_rating / 20;
#if CONFIG_T8X8
// yx, bias less for large block size
if(cpi->common.txfm_mode == ALLOW_8X8)
Bias >>= 1;
#endif
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);
@ -578,11 +576,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi)
if (cpi->twopass.section_intra_rating < 20)
Bias = Bias * cpi->twopass.section_intra_rating / 20;
#if CONFIG_T8X8
// yx, bias less for large block size
if(cpi->common.txfm_mode == ALLOW_8X8)
Bias >>= 1;
#endif
filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step);
filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step);

View File

@ -23,56 +23,6 @@ extern int enc_debug;
#endif
#define EXACT_QUANT
#ifdef EXACT_FASTQUANT
void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
{
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant_fast;
unsigned char *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
eob = -1;
for (i = 0; i < 16; i++)
{
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
zbin = zbin_ptr[rc] ;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin)
{
x += round_ptr[rc];
y = (((x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
if (y)
{
eob = i; // last nonzero coeffs
}
}
}
d->eob = eob + 1;
}
#else
void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
{
int i, rc, eob, nonzeros;
@ -83,11 +33,10 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
#if CONFIG_T8X8
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
#endif
eob = -1;
for (i = 0; i < 16; i++)
{
@ -110,7 +59,7 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
#endif
#ifdef EXACT_QUANT
void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d)
@ -277,7 +226,8 @@ void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
#endif //EXACT_QUANT
#endif
//EXACT_QUANT
void vp8_quantize_mby_c(MACROBLOCK *x)
@ -314,113 +264,8 @@ void vp8_quantize_mbuv_c(MACROBLOCK *x)
x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
}
#if CONFIG_T8X8
#ifdef EXACT_FASTQUANT
void vp8_fast_quantize_b_2x2_c(BLOCK *b, BLOCKD *d)
{
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
short *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
//double q2nd = 4;
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
eob = -1;
for (i = 0; i < 4; i++)
{
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
//zbin = zbin_ptr[rc]/q2nd ;
zbin = zbin_ptr[rc] ;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin)
{
//x += (round_ptr[rc]/q2nd);
x += (round_ptr[rc]);
//y = ((int)((int)(x * quant_ptr[rc] * q2nd) >> 16) + x)
// >> quant_shift_ptr[rc]; // quantize (x)
y = ((int)((int)(x * quant_ptr[rc]) >> 16) + x)
>> quant_shift_ptr[rc]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
if (y)
{
eob = i; // last nonzero coeffs
}
}
}
d->eob = eob + 1;
}
void vp8_fast_quantize_b_8x8_c(BLOCK *b, BLOCKD *d)// only ac and dc difference, no difference among ac
{
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
short *quant_shift_ptr = b->quant_shift;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
//double q1st = 2;
vpx_memset(qcoeff_ptr, 0, 64*sizeof(short));
vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short));
eob = -1;
for (i = 0; i < 64; i++)
{
rc = vp8_default_zig_zag1d_8x8[i];
z = coeff_ptr[rc];
//zbin = zbin_ptr[rc!=0]/q1st ;
zbin = zbin_ptr[rc!=0] ;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin)
{
//x += round_ptr[rc]/q1st;
//y = ((int)(((int)((x * quant_ptr[rc!=0] * q1st)) >> 16) + x))
// >> quant_shift_ptr[rc!=0]; // quantize (x)
x += round_ptr[rc];
y = ((int)(((int)((x * quant_ptr[rc!=0])) >> 16) + x))
>> quant_shift_ptr[rc!=0]; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
//dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0] / q1st; // dequantized value
dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value
if (y)
{
eob = i; // last nonzero coeffs
}
}
}
d->eob = eob + 1;
}
#else
void vp8_fast_quantize_b_2x2_c(BLOCK *b, BLOCKD *d)
{
@ -520,9 +365,9 @@ void vp8_fast_quantize_b_8x8_c(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
#endif //EXACT_FASTQUANT
#ifdef EXACT_QUANT
void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
{
int i, rc, eob;
@ -757,107 +602,7 @@ void vp8_strict_quantize_b_8x8(BLOCK *b, BLOCKD *d)
d->eob = eob + 1;
}
#else
void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d)
{
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *zbin_boost_ptr = b->zrun_zbin_boost;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
//double q2nd = 4;
vpx_memset(qcoeff_ptr, 0, 32);
vpx_memset(dqcoeff_ptr, 0, 32);
eob = -1;
for (i = 0; i < 4; i++)
{
rc = vp8_default_zig_zag1d[i];
z = coeff_ptr[rc];
//zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value)/q2nd;
zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value);
zbin_boost_ptr ++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin)
{
//y = (((x + round_ptr[rc]/q2nd) * quant_ptr[rc]*q2nd)) >> 16; // quantize (x)
y = (((x + round_ptr[rc]) * quant_ptr[rc])) >> 16; // quantize (x)
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
//dqcoeff_ptr[rc] = x * dequant_ptr[rc]/q2nd; // dequantized value
dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value
if (y)
{
eob = i; // last nonzero coeffs
zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
}
}
}
d->eob = eob + 1;
}
void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d)
{
int i, rc, eob;
int zbin;
int x, y, z, sz;
short *zbin_boost_ptr = b->zrun_zbin_boost;
short *coeff_ptr = b->coeff;
short *zbin_ptr = b->zbin;
short *round_ptr = b->round;
short *quant_ptr = b->quant;
short *qcoeff_ptr = d->qcoeff;
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
short zbin_oq_value = b->zbin_extra;
//double q1st = 2;
vpx_memset(qcoeff_ptr, 0, 64*sizeof(short));
vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short));
eob = -1;
for (i = 0; i < 64; i++)
{
rc = vp8_default_zig_zag1d_8x8[i];
z = coeff_ptr[rc];
//zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value)/q1st;
zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value);
zbin_boost_ptr ++;
sz = (z >> 31); // sign of z
x = (z ^ sz) - sz; // x = abs(z)
if (x >= zbin)
{
//y = ((x + round_ptr[rc!=0]/q1st) * quant_ptr[rc!=0] * q1st) >> 16;
y = ((x + round_ptr[rc!=0]) * quant_ptr[rc!=0]) >> 16;
x = (y ^ sz) - sz; // get the sign back
qcoeff_ptr[rc] = x; // write to destination
//dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]/q1st; // dequantized value
dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value
if (y)
{
eob = i; // last nonzero coeffs
zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength
}
}
}
d->eob = eob + 1;
}
#endif //EXACT_QUANT
void vp8_quantize_mby_8x8(MACROBLOCK *x)
{
@ -905,7 +650,7 @@ void vp8_quantize_mbuv_8x8(MACROBLOCK *x)
x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]);
}
#endif //CONFIG_T8X8
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
* these two C functions if corresponding optimized routine is not available.
@ -1023,69 +768,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi)
}
}
}
#else
void vp8cx_init_quantizer(VP8_COMP *cpi)
{
int i;
int quant_val;
int Q;
int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44};
int qrounding_factor = 48;
for (Q = 0; Q < QINDEX_RANGE; Q++)
{
int qzbin_factor = vp8_dc_quant(Q,0) < 148 ) ? 84: 80;
// dc values
quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q);
cpi->Y1quant[Q][0] = (1 << 16) / quant_val;
cpi->Y1zbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;
cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y1dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
cpi->Y2quant[Q][0] = (1 << 16) / quant_val;
cpi->Y2zbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;
cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y2dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
cpi->UVquant[Q][0] = (1 << 16) / quant_val;
cpi->UVzbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;;
cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
// all the ac values = ;
for (i = 1; i < 16; i++)
{
int rc = vp8_default_zig_zag1d[i];
quant_val = vp8_ac_yquant(Q);
cpi->Y1quant[Q][rc] = (1 << 16) / quant_val;
cpi->Y1zbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
cpi->Y1round[Q][rc] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y1dequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7;
quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q);
cpi->Y2quant[Q][rc] = (1 << 16) / quant_val;
cpi->Y2zbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
cpi->Y2round[Q][rc] = (qrounding_factors * quant_val) >> 7;
cpi->common.Y2dequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7;
quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q);
cpi->UVquant[Q][rc] = (1 << 16) / quant_val;
cpi->UVzbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7;
cpi->UVround[Q][rc] = (qrounding_factors * quant_val) >> 7;
cpi->common.UVdequant[Q][rc] = quant_val;
cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7;
}
}
}
#endif
@ -1139,17 +821,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
x->block[i].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
#endif
}
else
{
x->block[i].eob_max_offset = 16;
#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 = 64;
#endif
}
}
@ -1175,19 +853,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
x->block[i].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
#endif
}
else
{
x->block[i].eob_max_offset = 16;
#if CONFIG_T8X8
x->block[i].eob_max_offset_8x8 = 64;
#endif
}
}
@ -1212,17 +884,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x)
{
x->block[24].eob_max_offset =
get_segdata( xd, segment_id, SEG_LVL_EOB );
#if CONFIG_T8X8
x->block[24].eob_max_offset_8x8 =
get_segdata( xd, segment_id, SEG_LVL_EOB );
#endif
}
else
{
x->block[24].eob_max_offset = 16;
#if CONFIG_T8X8
x->block[24].eob_max_offset_8x8 = 4;
#endif
}
/* save this macroblock QIndex for vp8_update_zbin_extra() */

View File

@ -45,7 +45,7 @@ extern prototype_quantize_block_pair(vp8_quantize_quantb_pair);
#define vp8_quantize_fastquantb vp8_fast_quantize_b_c
#endif
extern prototype_quantize_block(vp8_quantize_fastquantb);
#if CONFIG_T8X8
#ifndef vp8_quantize_quantb_8x8
#define vp8_quantize_quantb_8x8 vp8_regular_quantize_b_8x8
#endif
@ -65,7 +65,7 @@ extern prototype_quantize_block(vp8_quantize_quantb_2x2);
#define vp8_quantize_fastquantb_2x2 vp8_fast_quantize_b_2x2_c
#endif
extern prototype_quantize_block(vp8_quantize_fastquantb_2x2);
#endif
#ifndef vp8_quantize_fastquantb_pair
#define vp8_quantize_fastquantb_pair vp8_fast_quantize_b_pair_c
@ -77,12 +77,10 @@ typedef struct
prototype_quantize_block(*quantb);
prototype_quantize_block_pair(*quantb_pair);
prototype_quantize_block(*fastquantb);
#if CONFIG_T8X8
prototype_quantize_block(*quantb_8x8);
prototype_quantize_block(*fastquantb_8x8);
prototype_quantize_block(*quantb_2x2);
prototype_quantize_block(*fastquantb_2x2);
#endif
prototype_quantize_block_pair(*fastquantb_pair);
} vp8_quantize_rtcd_vtable_t;
@ -108,10 +106,8 @@ extern prototype_quantize_mb(vp8_quantize_mby);
#endif
extern void vp8_strict_quantize_b(BLOCK *b,BLOCKD *d);
#if CONFIG_T8X8
extern void vp8_strict_quantize_b_8x8(BLOCK *b,BLOCKD *d);
extern void vp8_strict_quantize_b_2x2(BLOCK *b,BLOCKD *d);
#endif
struct VP8_COMP;
extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);
extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi);

View File

@ -242,9 +242,7 @@ void vp8_setup_key_frame(VP8_COMP *cpi)
#endif
#if CONFIG_T8X8
cpi->common.txfm_mode = ONLY_4X4;
#endif
//cpi->common.filter_level = 0; // Reset every key frame.
cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ;
@ -268,13 +266,12 @@ void vp8_setup_key_frame(VP8_COMP *cpi)
}
void vp8_setup_inter_frame(VP8_COMP *cpi)
{
#if CONFIG_T8X8
if(cpi->common.Width * cpi->common.Height > 640*360)
//||cpi->this_frame_target < 7 * cpi->common.MBs)
cpi->common.txfm_mode = ALLOW_8X8;
else
cpi->common.txfm_mode = ONLY_4X4;
#endif
if(cpi->common.refresh_alt_ref_frame)
{

View File

@ -353,12 +353,10 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex)
(const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs
);
#if CONFIG_T8X8
fill_token_costs(
cpi->mb.token_costs_8x8,
(const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs_8x8
);
#endif
#if CONFIG_QIMODE
//rough estimate for costing
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex>>4;
@ -664,7 +662,6 @@ static void macro_block_yrd( MACROBLOCK *mb,
*Rate = vp8_rdcost_mby(mb);
}
#if CONFIG_T8X8
static int cost_coeffs_2x2(MACROBLOCK *mb,
BLOCKD *b, int type,
@ -794,7 +791,6 @@ static void macro_block_yrd_8x8( MACROBLOCK *mb,
// rate
*Rate = vp8_rdcost_mby_8x8(mb);
}
#endif
static void copy_predictor(unsigned char *dst, const unsigned char *predictor)
{
@ -1311,7 +1307,7 @@ static int rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
#if CONFIG_T8X8
static int rd_cost_mbuv_8x8(MACROBLOCK *mb)
{
int b;
@ -1351,7 +1347,7 @@ static int rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
}
#endif
static int rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int fullpixel)
@ -2470,10 +2466,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
int rate2, distortion2;
int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly;
int uv_intra_tteob = 0;
#if CONFIG_T8X8
int uv_intra_rate_8x8, uv_intra_distortion_8x8, uv_intra_rate_tokenonly_8x8;
int uv_intra_tteob_8x8=0;
#endif
int rate_y, UNINITIALIZED_IS_SAFE(rate_uv);
int distortion_uv;
int best_yrd = INT_MAX;
@ -2564,9 +2558,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
for(i=16; i<24; i++)
uv_intra_tteob += x->e_mbd.block[i].eob;
#if CONFIG_T8X8
uv_intra_tteob_8x8 = uv_intra_tteob;
#endif
// Get estimates of reference frame costs for each reference frame
// that depend on the current prediction etc.
@ -2770,12 +2762,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// FIXME compound intra prediction
RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
(&x->e_mbd);
#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd)) ;
else
#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb)) ;
rate2 += rate_y;
@ -3014,12 +3004,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
rate2 += vp8_cost_mv_ref(&cpi->common, this_mode, mdcounts);
// Y cost and distortion
#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd));
else
#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb));
@ -3029,13 +3017,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
// UV cost and distortion
vp8_build_inter16x16_predictors_mbuv(&x->e_mbd);
#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
else
#endif
rd_inter16x16_uv(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
@ -3126,12 +3112,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
&x->e_mbd.predictor[320], 16, 8);
/* Y cost and distortion */
#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
macro_block_yrd_8x8(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd));
else
#endif
macro_block_yrd(x, &rate_y, &distortion,
IF_RTCD(&cpi->rtcd.encodemb));
@ -3139,13 +3123,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
distortion2 += distortion;
/* UV cost and distortion */
#if CONFIG_T8X8
if(cpi->common.txfm_mode == ALLOW_8X8)
rd_inter16x16_uv_8x8(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
else
#endif
rd_inter16x16_uv(cpi, x, &rate_uv,
&distortion_uv,
cpi->common.full_pixel);
@ -3195,7 +3177,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
if(has_y2_block)
tteob += x->e_mbd.block[24].eob;
#if CONFIG_T8X8
if(cpi->common.txfm_mode ==ALLOW_8X8 && has_y2_block)
{
for (i = 0; i < 16; i+=4)
@ -3211,7 +3192,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
}
}
else
#endif
{
for (i = 0; i < 16; i++)
tteob += (x->e_mbd.block[i].eob > has_y2_block);
@ -3467,9 +3447,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate_)
{
#if CONFIG_T8X8
MACROBLOCKD *xd = &x->e_mbd;
#endif
int error4x4, error16x16;
int rate4x4, rate16x16 = 0, rateuv;
int dist4x4, dist16x16, distuv;

View File

@ -24,14 +24,10 @@
#ifdef ENTROPY_STATS
_int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_T8X8
_int64 context_counters_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
#endif
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
#if CONFIG_T8X8
void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
#endif
void vp8_fix_contexts(MACROBLOCKD *x);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE*2];
@ -104,7 +100,6 @@ static void fill_value_tokens()
vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
#if CONFIG_T8X8
static void tokenize2nd_order_b_8x8
(
MACROBLOCKD *xd,
@ -176,7 +171,6 @@ static void tokenize2nd_order_b_8x8
*a = *l = pt;
}
#endif
static void tokenize2nd_order_b
(
@ -247,7 +241,7 @@ static void tokenize2nd_order_b
*a = *l = pt;
}
#if CONFIG_T8X8
static void tokenize1st_order_b_8x8
(
MACROBLOCKD *xd,
@ -313,7 +307,7 @@ static void tokenize1st_order_b_8x8
*a = *l = pt;
}
#endif
static void tokenize1st_order_b
@ -465,7 +459,7 @@ static int mb_is_skippable(MACROBLOCKD *x, int has_y2_block)
return skip;
}
#if CONFIG_T8X8
static int mb_is_skippable_8x8(MACROBLOCKD *x)
{
int has_y2_block;
@ -485,17 +479,14 @@ static int mb_is_skippable_8x8(MACROBLOCKD *x)
return skip;
}
#endif
void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
{
int plane_type;
int has_y2_block;
int b;
#if CONFIG_T8X8
int tx_type = x->mode_info_context->mbmi.txfm_size;
#endif
// If the MB is going to be skipped because of a segment level flag
// exclude this from the skip count stats used to calculate the
@ -516,13 +507,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
&& x->mode_info_context->mbmi.mode != SPLITMV);
x->mode_info_context->mbmi.mb_skip_coeff =
#if CONFIG_T8X8
(( tx_type == TX_8X8 ) ?
mb_is_skippable_8x8(x) :
mb_is_skippable(x, has_y2_block));
#else
mb_is_skippable(x, has_y2_block);
#endif
if (x->mode_info_context->mbmi.mb_skip_coeff)
{
@ -530,11 +517,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
if (!cpi->common.mb_no_coeff_skip)
{
#if CONFIG_T8X8
if ( tx_type == TX_8X8 )
vp8_stuff_mb_8x8(cpi, x, t) ;
else
#endif
vp8_stuff_mb(cpi, x, t) ;
}
else
@ -550,7 +535,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
plane_type = 3;
if(has_y2_block)
{
#if CONFIG_T8X8
if ( tx_type == TX_8X8 )
{
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
@ -561,13 +545,12 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
L + vp8_block2left_8x8[24], cpi);
}
else
#endif
tokenize2nd_order_b(x, t, cpi);
plane_type = 0;
}
#if CONFIG_T8X8
if ( tx_type == TX_8X8 )
{
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
@ -594,7 +577,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
}
}
else
#endif
tokenize1st_order_b(x, t, plane_type, cpi);
}
@ -604,9 +587,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
void init_context_counters(void)
{
vpx_memset(context_counters, 0, sizeof(context_counters));
#if CONFIG_T8X8
vpx_memset(context_counters_8x8, 0, sizeof(context_counters_8x8));
#endif
}
void print_context_counters()
@ -670,7 +651,6 @@ void print_context_counters()
}
while (++type < BLOCK_TYPES);
#if CONFIG_T8X8
fprintf(f, "int Contexts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];\n\n");
fprintf(f, "const int default_contexts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {");
@ -718,7 +698,6 @@ void print_context_counters()
fprintf(f, "\n }");
}
while (++type < BLOCK_TYPES);
#endif
fprintf(f, "\n};\n");
fclose(f);
@ -731,7 +710,7 @@ void vp8_tokenize_initialize()
fill_value_tokens();
}
#if CONFIG_T8X8
static __inline void stuff2nd_order_b_8x8
(
const BLOCKD *const b,
@ -857,7 +836,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t)
*(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
}
}
#endif
static __inline void stuff2nd_order_b
(

View File

@ -38,10 +38,8 @@ void init_context_counters();
void print_context_counters();
extern _int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_T8X8
extern _int64 context_counters_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
#endif
extern const int *vp8_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to
* improve cache locality, since it's needed for costing when the rest of the