From 89ee68b1f719a40f698d5793901da52fb8a3a852 Mon Sep 17 00:00:00 2001 From: Yaowu Xu Date: Tue, 28 Feb 2012 17:11:12 -0800 Subject: [PATCH] Merge t8x8 experiments Change-Id: I8e9b6b154e1a0d0cb42d596366380d69c00ac15f --- vp8/common/alloccommon.c | 2 - vp8/common/blockd.c | 3 - vp8/common/blockd.h | 2 - vp8/common/coefupdateprobs.h | 2 - vp8/common/entropy.c | 11 +- vp8/common/entropy.h | 6 - vp8/common/generic/systemdependent.c | 2 - vp8/common/idct.h | 6 +- vp8/common/idctllm.c | 3 +- vp8/common/invtrans.c | 7 +- vp8/common/invtrans.h | 2 - vp8/common/loopfilter.c | 17 -- vp8/common/onyxc_int.h | 6 - vp8/decoder/decodframe.c | 27 +- vp8/decoder/dequantize.c | 4 - vp8/decoder/dequantize.h | 12 +- vp8/decoder/detokenize.c | 15 +- vp8/decoder/detokenize.h | 2 - vp8/decoder/generic/dsystemdependent.c | 5 - vp8/decoder/idct_blk.c | 4 +- vp8/decoder/onyxd_int.h | 6 - vp8/encoder/bitstream.c | 15 +- vp8/encoder/block.h | 7 - vp8/encoder/dct.c | 4 +- vp8/encoder/dct.h | 5 +- vp8/encoder/defaultcoefcounts.h | 3 +- vp8/encoder/encodeframe.c | 189 +------------- vp8/encoder/encodeintra.c | 26 +- vp8/encoder/encodemb.c | 28 +- vp8/encoder/encodemb.h | 2 - vp8/encoder/generic/csystemdependent.c | 4 - vp8/encoder/onyx_if.c | 13 +- vp8/encoder/onyx_int.h | 4 - vp8/encoder/picklpf.c | 4 - vp8/encoder/quantize.c | 346 +------------------------ vp8/encoder/quantize.h | 8 +- vp8/encoder/ratectrl.c | 5 +- vp8/encoder/rdopt.c | 26 +- vp8/encoder/tokenize.c | 37 +-- vp8/encoder/tokenize.h | 2 - 40 files changed, 57 insertions(+), 815 deletions(-) diff --git a/vp8/common/alloccommon.c b/vp8/common/alloccommon.c index 4d15fcb8c..5a7c79c8f 100644 --- a/vp8/common/alloccommon.c +++ b/vp8/common/alloccommon.c @@ -209,9 +209,7 @@ void vp8_create_common(VP8_COMMON *oci) vp8_default_bmode_probs(oci->fc.bmode_prob); -#if CONFIG_T8X8 oci->txfm_mode = ONLY_4X4; -#endif oci->mb_no_coeff_skip = 1; oci->comp_pred_mode = HYBRID_PREDICTION; oci->no_lpf = 0; diff --git a/vp8/common/blockd.c b/vp8/common/blockd.c index 843a19635..60ef31217 100644 --- a/vp8/common/blockd.c +++ b/vp8/common/blockd.c @@ -22,7 +22,6 @@ const unsigned char vp8_block2above[25] = 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8 }; -#if CONFIG_T8X8 const unsigned char vp8_block2left_8x8[25] = { 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8 @@ -31,6 +30,4 @@ const unsigned char vp8_block2above_8x8[25] = { 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8 }; -#endif - diff --git a/vp8/common/blockd.h b/vp8/common/blockd.h index 30ea8c05b..89fe03516 100644 --- a/vp8/common/blockd.h +++ b/vp8/common/blockd.h @@ -183,9 +183,7 @@ typedef struct MB_PREDICTION_MODE second_mode, second_uv_mode; #endif MV_REFERENCE_FRAME ref_frame, second_ref_frame; -#if CONFIG_T8X8 TX_SIZE txfm_size; -#endif int_mv mv, second_mv; unsigned char partitioning; unsigned char mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */ diff --git a/vp8/common/coefupdateprobs.h b/vp8/common/coefupdateprobs.h index 6fe5fcc6f..05e456393 100644 --- a/vp8/common/coefupdateprobs.h +++ b/vp8/common/coefupdateprobs.h @@ -183,7 +183,6 @@ const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTE }, }, }; -#if CONFIG_T8X8 const vp8_prob vp8_coef_update_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] @@ -359,4 +358,3 @@ const vp8_prob vp8_coef_update_probs_8x8 [BLOCK_TYPES] }, }; -#endif \ No newline at end of file diff --git a/vp8/common/entropy.c b/vp8/common/entropy.c index b2cfb354f..2c1667225 100644 --- a/vp8/common/entropy.c +++ b/vp8/common/entropy.c @@ -60,7 +60,6 @@ DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) = 9, 12, 13, 10, 7, 11, 14, 15, }; -#if CONFIG_T8X8 DECLARE_ALIGNED(64, cuchar, vp8_coef_bands_8x8[64]) = { 0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6, 6, 6, 5, 5, 6, 6, 6, 6, 6, @@ -77,7 +76,6 @@ DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]) = 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63, }; -#endif DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) = { @@ -88,9 +86,7 @@ DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) = }; DECLARE_ALIGNED(16, short, vp8_default_zig_zag_mask[16]); -#if CONFIG_T8X8 DECLARE_ALIGNED(64, short, vp8_default_zig_zag_mask_8x8[64]);//int64_t -#endif /* Array indices are identical to previously-existing CONTEXT_NODE indices */ @@ -132,12 +128,10 @@ void vp8_init_scan_order_mask() { vp8_default_zig_zag_mask[vp8_default_zig_zag1d[i]] = 1 << i; } -#if CONFIG_T8X8 for (i = 0; i < 64; i++) { vp8_default_zig_zag_mask_8x8[vp8_default_zig_zag1d_8x8[i]] = 1 << i; } -#endif } static void init_bit_tree(vp8_tree_index *p, int n) @@ -184,12 +178,9 @@ vp8_extra_bit_struct vp8_extra_bits[12] = void vp8_default_coef_probs(VP8_COMMON *pc) { -#if CONFIG_T8X8 int h; -#endif vpx_memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(default_coef_probs)); -#if CONFIG_T8X8 h = 0; do { @@ -213,7 +204,7 @@ void vp8_default_coef_probs(VP8_COMMON *pc) while (++i < COEF_BANDS); } while (++h < BLOCK_TYPES); -#endif + } void vp8_coef_tree_initialize() diff --git a/vp8/common/entropy.h b/vp8/common/entropy.h index 541aa4d4c..c7a41f58b 100644 --- a/vp8/common/entropy.h +++ b/vp8/common/entropy.h @@ -63,9 +63,7 @@ extern vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */ #define COEF_BANDS 8 extern DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]); -#if CONFIG_T8X8 extern DECLARE_ALIGNED(64, const unsigned char, vp8_coef_bands_8x8[64]); -#endif /* Inside dimension is 3-valued measure of nearby complexity, that is, the extent to which nearby coefficients are nonzero. For the first @@ -89,18 +87,14 @@ extern DECLARE_ALIGNED(64, const unsigned char, vp8_coef_bands_8x8[64]); extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]); extern const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#if CONFIG_T8X8 extern const vp8_prob vp8_coef_update_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#endif struct VP8Common; void vp8_default_coef_probs(struct VP8Common *); extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]); extern DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]); extern short vp8_default_zig_zag_mask[16]; -#if CONFIG_T8X8 extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]); extern short vp8_default_zig_zag_mask_8x8[64];//int64_t -#endif void vp8_coef_tree_initialize(void); #endif diff --git a/vp8/common/generic/systemdependent.c b/vp8/common/generic/systemdependent.c index 0adefe54c..6168dc5ef 100644 --- a/vp8/common/generic/systemdependent.c +++ b/vp8/common/generic/systemdependent.c @@ -30,11 +30,9 @@ void vp8_machine_specific_config(VP8_COMMON *ctx) rtcd->idct.idct1_scalar_add = vp8_dc_only_idct_add_c; rtcd->idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c; rtcd->idct.iwalsh16 = vp8_short_inv_walsh4x4_c; -#if CONFIG_T8X8 rtcd->idct.idct8 = vp8_short_idct8x8_c; rtcd->idct.idct1_scalar_add_8x8 = vp8_dc_only_idct_add_8x8_c; rtcd->idct.ihaar2 = vp8_short_ihaar2x2_c; -#endif rtcd->recon.copy16x16 = vp8_copy_mem16x16_c; rtcd->recon.copy8x8 = vp8_copy_mem8x8_c; rtcd->recon.avg16x16 = vp8_avg_mem16x16_c; diff --git a/vp8/common/idct.h b/vp8/common/idct.h index d1890b9e5..e8ca23d66 100644 --- a/vp8/common/idct.h +++ b/vp8/common/idct.h @@ -31,7 +31,7 @@ #include "arm/idct_arm.h" #endif -#if CONFIG_T8X8 + #ifndef vp8_idct_idct8 #define vp8_idct_idct8 vp8_short_idct8x8_c #endif @@ -57,7 +57,7 @@ extern prototype_idct(vp8_idct_ihaar2_1); #endif extern prototype_idct_scalar_add(vp8_idct_idct1_scalar_add_8x8); -#endif + #ifndef vp8_idct_idct1 #define vp8_idct_idct1 vp8_short_idct4x4llm_1_c @@ -98,13 +98,11 @@ typedef struct vp8_second_order_fn_t iwalsh1; vp8_second_order_fn_t iwalsh16; -#if CONFIG_T8X8 vp8_idct_fn_t idct8; vp8_idct_fn_t idct8_1; vp8_idct_scalar_add_fn_t idct1_scalar_add_8x8; vp8_idct_fn_t ihaar2; vp8_idct_fn_t ihaar2_1; -#endif } vp8_idct_rtcd_vtable_t; #if CONFIG_RUNTIME_CPU_DETECT diff --git a/vp8/common/idctllm.c b/vp8/common/idctllm.c index c87012466..f0536d5e4 100644 --- a/vp8/common/idctllm.c +++ b/vp8/common/idctllm.c @@ -200,7 +200,7 @@ void vp8_short_inv_walsh4x4_1_c(short *input, short *output) } } -#if CONFIG_T8X8 + void vp8_dc_only_idct_add_8x8_c(short input_dc, unsigned char *pred_ptr, unsigned char *dst_ptr, @@ -408,4 +408,3 @@ void vp8_short_ihaar2x2_c(short *input, short *output, int pitch) op[8] = (ip[0] - ip[1] - ip[4] + ip[8])>>1; } -#endif diff --git a/vp8/common/invtrans.c b/vp8/common/invtrans.c index 11361c16c..eed8363a3 100644 --- a/vp8/common/invtrans.c +++ b/vp8/common/invtrans.c @@ -24,7 +24,6 @@ static void recon_dcblock(MACROBLOCKD *x) } } -#if CONFIG_T8X8 static void recon_dcblock_8x8(MACROBLOCKD *x) { BLOCKD *b = &x->block[24]; //for coeff 0, 2, 8, 10 @@ -34,7 +33,7 @@ static void recon_dcblock_8x8(MACROBLOCKD *x) x->block[12].dqcoeff[0] = b->diff[8]; } -#endif + void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) { @@ -99,7 +98,7 @@ void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x } -#if CONFIG_T8X8 + void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch)//pay attention to use when 8x8 { // int b,i; @@ -171,4 +170,4 @@ void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCK } } -#endif + diff --git a/vp8/common/invtrans.h b/vp8/common/invtrans.h index 1466a5844..4c4f0d3d2 100644 --- a/vp8/common/invtrans.h +++ b/vp8/common/invtrans.h @@ -20,11 +20,9 @@ extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBL extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x); extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x); -#if CONFIG_T8X8 extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch); extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x); extern void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x); extern void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x); -#endif #endif diff --git a/vp8/common/loopfilter.c b/vp8/common/loopfilter.c index 759133423..1cac063e0 100644 --- a/vp8/common/loopfilter.c +++ b/vp8/common/loopfilter.c @@ -72,7 +72,6 @@ void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr, vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); } -#if CONFIG_T8X8 void vp8_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, loop_filter_info *lfi) @@ -80,7 +79,6 @@ void vp8_loop_filter_bh8x8_c(unsigned char *y_ptr, unsigned char *u_ptr, vp8_mbloop_filter_horizontal_edge_c( y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); } -#endif void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) @@ -106,7 +104,6 @@ void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr, vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); } -#if CONFIG_T8X8 void vp8_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, loop_filter_info *lfi) @@ -115,8 +112,6 @@ void vp8_loop_filter_bv8x8_c(unsigned char *y_ptr, unsigned char *u_ptr, y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); } -#endif - void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) { @@ -348,9 +343,7 @@ void vp8_loop_filter_frame const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; const int seg = mode_info_context->mbmi.segment_id; const int ref_frame = mode_info_context->mbmi.ref_frame; -#if CONFIG_T8X8 int tx_type = mode_info_context->mbmi.txfm_size; -#endif filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; if (filter_level) @@ -369,12 +362,10 @@ void vp8_loop_filter_frame if (!skip_lf) { -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_loop_filter_bv8x8_c (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi); else -#endif LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v) (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi); @@ -387,12 +378,10 @@ void vp8_loop_filter_frame if (!skip_lf) { -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_loop_filter_bh8x8_c (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi); else -#endif LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h) (y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi); } @@ -479,9 +468,7 @@ void vp8_loop_filter_frame_yonly const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; const int seg = mode_info_context->mbmi.segment_id; const int ref_frame = mode_info_context->mbmi.ref_frame; -#if CONFIG_T8X8 int tx_type = mode_info_context->mbmi.txfm_size; -#endif filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; @@ -501,12 +488,10 @@ void vp8_loop_filter_frame_yonly if (!skip_lf) { -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_loop_filter_bv8x8_c (y_ptr, 0, 0, post->y_stride, 0, &lfi); else -#endif LF_INVOKE(&cm->rtcd.loopfilter, normal_b_v) (y_ptr, 0, 0, post->y_stride, 0, &lfi); } @@ -518,12 +503,10 @@ void vp8_loop_filter_frame_yonly if (!skip_lf) { -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_loop_filter_bh8x8_c (y_ptr, 0, 0, post->y_stride, 0, &lfi); else -#endif LF_INVOKE(&cm->rtcd.loopfilter, normal_b_h) (y_ptr, 0, 0, post->y_stride, 0, &lfi); } diff --git a/vp8/common/onyxc_int.h b/vp8/common/onyxc_int.h index a65123b23..535eabb42 100644 --- a/vp8/common/onyxc_int.h +++ b/vp8/common/onyxc_int.h @@ -53,9 +53,7 @@ typedef struct frame_contexts #endif vp8_prob sub_mv_ref_prob [VP8_SUBMVREFS-1]; vp8_prob coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#if CONFIG_T8X8 vp8_prob coef_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; -#endif MV_CONTEXT mvc[2]; MV_CONTEXT pre_mvc[2]; /* not to caculate the mvcost for the frame if mvc doesn't change. */ #if CONFIG_HIGH_PRECISION_MV @@ -84,14 +82,12 @@ typedef enum NB_PREDICTION_TYPES = 3, } COMPPREDMODE_TYPE; -#if CONFIG_T8X8 /* TODO: allows larger transform */ typedef enum { ONLY_4X4 = 0, ALLOW_8X8 = 1 } TXFM_MODE; -#endif /* CONFIG_T8X8 */ typedef struct VP8_COMMON_RTCD { @@ -150,9 +146,7 @@ typedef struct VP8Common /* profile settings */ int experimental; int mb_no_coeff_skip; -#if CONFIG_T8X8 TXFM_MODE txfm_mode; -#endif COMPPREDMODE_TYPE comp_pred_mode; int no_lpf; int use_bilinear_mc_filter; diff --git a/vp8/decoder/decodframe.c b/vp8/decoder/decodframe.c index 3e3f75e32..27f1377e1 100644 --- a/vp8/decoder/decodframe.c +++ b/vp8/decoder/decodframe.c @@ -158,7 +158,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_PREDICTION_MODE mode; int i; -#if CONFIG_T8X8 int tx_type; if( pbi->common.txfm_mode==ONLY_4X4 ) { @@ -175,7 +174,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, } tx_type = xd->mode_info_context->mbmi.txfm_size; -#endif if (xd->mode_info_context->mbmi.mb_skip_coeff) { @@ -183,19 +181,14 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, } else if (!vp8dx_bool_error(xd->current_bc)) { - -#if CONFIG_T8X8 for(i = 0; i < 25; i++) { xd->block[i].eob = 0; xd->eobs[i] = 0; } if ( tx_type == TX_8X8 ) - { eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd); - } else -#endif eobtotal = vp8_decode_mb_tokens(pbi, xd); #ifdef DEC_DEBUG if (dec_debug) { @@ -360,7 +353,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, { BLOCKD *b = &xd->block[24]; -#if CONFIG_T8X8 + if( tx_type == TX_8X8 ) { DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b); @@ -388,11 +381,8 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, (xd->qcoeff, xd->block[0].dequant, xd->predictor, xd->dst.y_buffer, xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd); - } - else -#endif { DEQUANT_INVOKE(&pbi->dequant, block)(b); if (xd->eobs[24] > 1) @@ -419,18 +409,13 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, xd->dst.y_stride, xd->eobs, xd->block[24].diff); } } -#if CONFIG_T8X8 + if( tx_type == TX_8X8 ) - { DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block_8x8)// (xd->qcoeff+16*16, xd->block[16].dequant, xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride, xd->eobs+16, xd);// - - } - else -#endif - if(xd->mode_info_context->mbmi.mode!=I8X8_PRED) + else if(xd->mode_info_context->mbmi.mode!=I8X8_PRED) DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block) (xd->qcoeff+16*16, xd->block[16].dequant, xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer, @@ -1086,9 +1071,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) } /* Read the loop filter level and type */ -#if CONFIG_T8X8 pc->txfm_mode = (TXFM_MODE) vp8_read_bit(bc); -#endif pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(bc); pc->filter_level = vp8_read_literal(bc, 6); @@ -1242,7 +1225,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) } } } -#if CONFIG_T8X8 + if(pbi->common.txfm_mode == ALLOW_8X8 && vp8_read_bit(bc)) { // read coef probability tree @@ -1261,7 +1244,7 @@ int vp8_decode_frame(VP8D_COMP *pbi) } } } -#endif + vpx_memcpy(&xd->pre, &pc->yv12_fb[pc->lst_fb_idx], sizeof(YV12_BUFFER_CONFIG)); vpx_memcpy(&xd->dst, &pc->yv12_fb[pc->new_fb_idx], sizeof(YV12_BUFFER_CONFIG)); diff --git a/vp8/decoder/dequantize.c b/vp8/decoder/dequantize.c index c48f5c23f..f83032766 100644 --- a/vp8/decoder/dequantize.c +++ b/vp8/decoder/dequantize.c @@ -17,10 +17,8 @@ extern void vp8_short_idct4x4llm_c(short *input, short *output, int pitch) ; extern void vp8_short_idct4x4llm_1_c(short *input, short *output, int pitch); -#if CONFIG_T8X8 extern void vp8_short_idct8x8_c(short *input, short *output, int pitch); extern void vp8_short_idct8x8_1_c(short *input, short *output, int pitch); -#endif #ifdef DEC_DEBUG extern int dec_debug; @@ -121,7 +119,6 @@ void vp8_dequant_dc_idct_add_c(short *input, short *dq, unsigned char *pred, } } -#if CONFIG_T8X8 void vp8_dequantize_b_2x2_c(BLOCKD *d) { int i; @@ -330,4 +327,3 @@ void vp8_dequant_dc_idct_add_8x8_c(short *input, short *dq, unsigned char *pred, #endif } -#endif \ No newline at end of file diff --git a/vp8/decoder/dequantize.h b/vp8/decoder/dequantize.h index c7344d394..d0f162b48 100644 --- a/vp8/decoder/dequantize.h +++ b/vp8/decoder/dequantize.h @@ -42,7 +42,6 @@ unsigned char *pre, unsigned char *dst_u, \ unsigned char *dst_v, int stride, char *eobs) -#if CONFIG_T8X8 #define prototype_dequant_dc_idct_add_y_block_8x8(sym) \ void sym(short *q, short *dq, \ unsigned char *pre, unsigned char *dst, \ @@ -59,8 +58,6 @@ unsigned char *dst_v, int stride, char *eobs, \ MACROBLOCKD *xd) -#endif - #if ARCH_X86 || ARCH_X86_64 #include "x86/dequantize_x86.h" #endif @@ -99,7 +96,7 @@ extern prototype_dequant_idct_add_y_block(vp8_dequant_idct_add_y_block); #endif extern prototype_dequant_idct_add_uv_block(vp8_dequant_idct_add_uv_block); -#if CONFIG_T8X8 + #ifndef vp8_dequant_block_2x2 #define vp8_dequant_block_2x2 vp8_dequantize_b_2x2_c #endif @@ -130,7 +127,7 @@ extern prototype_dequant_idct_add_y_block_8x8(vp8_dequant_idct_add_y_block_8x8); #endif extern prototype_dequant_idct_add_uv_block_8x8(vp8_dequant_idct_add_uv_block_8x8); -#endif + typedef prototype_dequant_block((*vp8_dequant_block_fn_t)); @@ -144,13 +141,12 @@ typedef prototype_dequant_idct_add_y_block((*vp8_dequant_idct_add_y_block_fn_t)) typedef prototype_dequant_idct_add_uv_block((*vp8_dequant_idct_add_uv_block_fn_t)); -#if CONFIG_T8X8 typedef prototype_dequant_dc_idct_add_y_block_8x8((*vp8_dequant_dc_idct_add_y_block_fn_t_8x8)); typedef prototype_dequant_idct_add_y_block_8x8((*vp8_dequant_idct_add_y_block_fn_t_8x8)); typedef prototype_dequant_idct_add_uv_block_8x8((*vp8_dequant_idct_add_uv_block_fn_t_8x8)); -#endif + typedef struct { vp8_dequant_block_fn_t block; @@ -159,14 +155,12 @@ typedef struct vp8_dequant_dc_idct_add_y_block_fn_t dc_idct_add_y_block; vp8_dequant_idct_add_y_block_fn_t idct_add_y_block; vp8_dequant_idct_add_uv_block_fn_t idct_add_uv_block; -#if CONFIG_T8X8 vp8_dequant_block_fn_t block_2x2; vp8_dequant_idct_add_fn_t idct_add_8x8; vp8_dequant_dc_idct_add_fn_t dc_idct_add_8x8; vp8_dequant_dc_idct_add_y_block_fn_t_8x8 dc_idct_add_y_block_8x8; vp8_dequant_idct_add_y_block_fn_t_8x8 idct_add_y_block_8x8; vp8_dequant_idct_add_uv_block_fn_t_8x8 idct_add_uv_block_8x8; -#endif } vp8_dequant_rtcd_vtable_t; #if CONFIG_RUNTIME_CPU_DETECT diff --git a/vp8/decoder/detokenize.c b/vp8/decoder/detokenize.c index d7dc143c7..d3566fae5 100644 --- a/vp8/decoder/detokenize.c +++ b/vp8/decoder/detokenize.c @@ -28,7 +28,6 @@ DECLARE_ALIGNED(16, static const unsigned char, coef_bands_x[16]) = 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 7 * OCB_X }; -#if CONFIG_T8X8 DECLARE_ALIGNED(64, static const unsigned char, coef_bands_x_8x8[64]) = { 0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X, 5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X, @@ -39,7 +38,7 @@ DECLARE_ALIGNED(64, static const unsigned char, coef_bands_x_8x8[64]) = { 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, }; -#endif + #define EOB_CONTEXT_NODE 0 #define ZERO_CONTEXT_NODE 1 #define ONE_CONTEXT_NODE 2 @@ -166,7 +165,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); range = range - split; \ NORMALIZE \ } -#if CONFIG_T8X8 + #define DECODE_AND_LOOP_IF_ZERO_8x8_2(probability,branch) \ { \ split = 1 + ((( probability*(range-1) ) ) >> 8); \ @@ -207,7 +206,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); range = range - split; \ NORMALIZE \ } -#endif + #define DECODE_SIGN_WRITE_COEFF_AND_CHECK_EXIT(val) \ DECODE_AND_APPLYSIGN(val) \ Prob = coef_probs + (ENTROPY_NODES*2); \ @@ -218,7 +217,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); qcoeff_ptr [ 15 ] = (INT16) v; \ goto BLOCK_FINISHED; -#if CONFIG_T8X8 + #define DECODE_SIGN_WRITE_COEFF_AND_CHECK_EXIT_8x8_2(val) \ DECODE_AND_APPLYSIGN(val) \ Prob = coef_probs + (ENTROPY_NODES*2); \ @@ -237,7 +236,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); goto DO_WHILE_8x8; }\ qcoeff_ptr [ scan[63] ] = (INT16) v; \ goto BLOCK_FINISHED_8x8; -#endif + #define DECODE_EXTRABIT_AND_ADJUST_VAL(prob, bits_count)\ split = 1 + (((range-1) * prob) >> 8); \ @@ -255,7 +254,7 @@ DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); }\ NORMALIZE -#if CONFIG_T8X8 + int vp8_decode_mb_tokens_8x8(VP8D_COMP *dx, MACROBLOCKD *x) { ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context; @@ -580,7 +579,7 @@ BLOCK_FINISHED_8x8: return eobtotal; } -#endif + int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *xd) { ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context; diff --git a/vp8/decoder/detokenize.h b/vp8/decoder/detokenize.h index c5305bb67..caedf2f37 100644 --- a/vp8/decoder/detokenize.h +++ b/vp8/decoder/detokenize.h @@ -16,8 +16,6 @@ void vp8_reset_mb_tokens_context(MACROBLOCKD *x); int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *); -#if CONFIG_T8X8 int vp8_decode_mb_tokens_8x8(VP8D_COMP *, MACROBLOCKD *); -#endif #endif /* DETOKENIZE_H */ diff --git a/vp8/decoder/generic/dsystemdependent.c b/vp8/decoder/generic/dsystemdependent.c index 237bc6bc7..fbba1b50a 100644 --- a/vp8/decoder/generic/dsystemdependent.c +++ b/vp8/decoder/generic/dsystemdependent.c @@ -21,17 +21,12 @@ void vp8_dmachine_specific_config(VP8D_COMP *pbi) /* Pure C: */ #if CONFIG_RUNTIME_CPU_DETECT pbi->mb.rtcd = &pbi->common.rtcd; - -#if CONFIG_T8X8 - pbi->dequant.block_2x2 = vp8_dequantize_b_2x2_c; pbi->dequant.idct_add_8x8 = vp8_dequant_idct_add_8x8_c; pbi->dequant.dc_idct_add_8x8 = vp8_dequant_dc_idct_add_8x8_c; pbi->dequant.dc_idct_add_y_block_8x8 = vp8_dequant_dc_idct_add_y_block_8x8_c; pbi->dequant.idct_add_y_block_8x8 = vp8_dequant_idct_add_y_block_8x8_c; pbi->dequant.idct_add_uv_block_8x8 = vp8_dequant_idct_add_uv_block_8x8_c; - -#endif pbi->dequant.block = vp8_dequantize_b_c; pbi->dequant.idct_add = vp8_dequant_idct_add_c; pbi->dequant.dc_idct_add = vp8_dequant_dc_idct_add_c; diff --git a/vp8/decoder/idct_blk.c b/vp8/decoder/idct_blk.c index 2015d5255..16ac46c48 100644 --- a/vp8/decoder/idct_blk.c +++ b/vp8/decoder/idct_blk.c @@ -123,7 +123,7 @@ void vp8_dequant_idct_add_uv_block_c } } -#if CONFIG_T8X8 + void vp8_dequant_dc_idct_add_y_block_8x8_c (short *q, short *dq, unsigned char *pre, unsigned char *dst, int stride, char *eobs, short *dc, MACROBLOCKD *xd) @@ -163,4 +163,4 @@ void vp8_dequant_idct_add_uv_block_8x8_c vp8_dequant_idct_add_8x8_c (q, dq, pre, dstv, 8, stride); } -#endif + diff --git a/vp8/decoder/onyxd_int.h b/vp8/decoder/onyxd_int.h index d48f40083..289808e28 100644 --- a/vp8/decoder/onyxd_int.h +++ b/vp8/decoder/onyxd_int.h @@ -43,16 +43,12 @@ typedef struct typedef struct { int const *scan; -#if CONFIG_T8X8 int const *scan_8x8; -#endif UINT8 const *ptr_block2leftabove; vp8_tree_index const *vp8_coef_tree_ptr; unsigned char *norm_ptr; UINT8 *ptr_coef_bands_x; -#if CONFIG_T8X8 UINT8 *ptr_coef_bands_x_8x8; -#endif ENTROPY_CONTEXT_PLANES *A; ENTROPY_CONTEXT_PLANES *L; @@ -61,9 +57,7 @@ typedef struct BOOL_DECODER *current_bc; vp8_prob const *coef_probs[4]; -#if CONFIG_T8X8 vp8_prob const *coef_probs_8x8[4]; -#endif UINT8 eob[25]; diff --git a/vp8/encoder/bitstream.c b/vp8/encoder/bitstream.c index 5be404167..8e22926e1 100644 --- a/vp8/encoder/bitstream.c +++ b/vp8/encoder/bitstream.c @@ -35,9 +35,7 @@ unsigned __int64 Sectionbits[500]; #ifdef ENTROPY_STATS int intra_mode_stats[10][10][10]; static unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2]; -#if CONFIG_T8X8 static unsigned int tree_update_hist_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2]; -#endif extern unsigned int active_section; #endif @@ -1685,9 +1683,7 @@ static int default_coef_context_savings(VP8_COMP *cpi) int vp8_estimate_entropy_savings(VP8_COMP *cpi) { int savings = 0; -#if CONFIG_T8X8 int i=0; -#endif VP8_COMMON *const cm = & cpi->common; const int *const rfct = cpi->count_mb_ref_frame_usage; const int rf_intra = rfct[INTRA_FRAME]; @@ -1761,7 +1757,7 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi) savings += default_coef_context_savings(cpi); -#if CONFIG_T8X8 + /* do not do this if not evena allowed */ if(cpi->common.txfm_mode == ALLOW_8X8) { @@ -1820,8 +1816,6 @@ int vp8_estimate_entropy_savings(VP8_COMP *cpi) savings += savings8x8 >> 8; } -#endif - return savings; } @@ -1955,7 +1949,6 @@ static void update_coef_probs(VP8_COMP *cpi) } -#if CONFIG_T8X8 /* do not do this if not evena allowed */ if(cpi->common.txfm_mode == ALLOW_8X8) { @@ -2090,8 +2083,6 @@ static void update_coef_probs(VP8_COMP *cpi) while (++i < BLOCK_TYPES); } } - -#endif } #ifdef PACKET_TESTING FILE *vpxlogc = 0; @@ -2400,9 +2391,7 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size) } } -#if CONFIG_T8X8 vp8_write_bit(bc, pc->txfm_mode); -#endif // Encode the loop filter level and type vp8_write_bit(bc, pc->filter_type); @@ -2636,7 +2625,6 @@ void print_tree_update_probs() fprintf(f, "};\n"); -#if CONFIG_T8X8 fprintf(f, "const vp8_prob tree_update_probs_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n"); for (i = 0; i < BLOCK_TYPES; i++) @@ -2674,7 +2662,6 @@ void print_tree_update_probs() fprintf(f, " },\n"); } -#endif fclose(f); } #endif diff --git a/vp8/encoder/block.h b/vp8/encoder/block.h index 54f38590c..25d2398ce 100644 --- a/vp8/encoder/block.h +++ b/vp8/encoder/block.h @@ -47,9 +47,7 @@ typedef struct int src_stride; int eob_max_offset; -#if CONFIG_T8X8 int eob_max_offset_8x8; -#endif } BLOCK; @@ -131,11 +129,8 @@ typedef struct unsigned int token_costs[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; - -#if CONFIG_T8X8 unsigned int token_costs_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#endif int optimize; int q_index; @@ -145,12 +140,10 @@ typedef struct void (*short_walsh4x4)(short *input, short *output, int pitch); void (*quantize_b)(BLOCK *b, BLOCKD *d); void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1); - #if CONFIG_T8X8 void (*vp8_short_fdct8x8)(short *input, short *output, int pitch); void (*short_fhaar2x2)(short *input, short *output, int pitch); void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d); void (*quantize_b_2x2)(BLOCK *b, BLOCKD *d); -#endif } MACROBLOCK; diff --git a/vp8/encoder/dct.c b/vp8/encoder/dct.c index b1e461e87..c2f2d1117 100644 --- a/vp8/encoder/dct.c +++ b/vp8/encoder/dct.c @@ -16,7 +16,7 @@ -#if CONFIG_T8X8 + void vp8_short_fdct8x8_c(short *block, short *coefs, int pitch) { int j1, i, j, k; @@ -126,7 +126,7 @@ void vp8_short_fhaar2x2_c(short *input, short *output, int pitch) //pitch = 8 op1[8]=(ip1[0] - ip1[1] - ip1[4] + ip1[8])>>1; } -#endif + void vp8_short_fdct4x4_c(short *input, short *output, int pitch) { int i; diff --git a/vp8/encoder/dct.h b/vp8/encoder/dct.h index c37d47aca..7ab525c0b 100644 --- a/vp8/encoder/dct.h +++ b/vp8/encoder/dct.h @@ -22,7 +22,7 @@ #include "arm/dct_arm.h" #endif -#if CONFIG_T8X8 + #ifndef vp8_fdct_short8x8 #define vp8_fdct_short8x8 vp8_short_fdct8x8_c @@ -34,7 +34,6 @@ extern prototype_fdct(vp8_fdct_short8x8); #endif extern prototype_fdct(vp8_fhaar_short2x2); -#endif #ifndef vp8_fdct_short4x4 #define vp8_fdct_short4x4 vp8_short_fdct4x4_c @@ -63,10 +62,8 @@ extern prototype_fdct(vp8_fdct_walsh_short4x4); typedef prototype_fdct(*vp8_fdct_fn_t); typedef struct { -#if CONFIG_T8X8 vp8_fdct_fn_t short8x8; vp8_fdct_fn_t haar_short2x2; -#endif vp8_fdct_fn_t short4x4; vp8_fdct_fn_t short8x4; vp8_fdct_fn_t fast4x4; diff --git a/vp8/encoder/defaultcoefcounts.h b/vp8/encoder/defaultcoefcounts.h index 3b54c823c..f2729d9ce 100644 --- a/vp8/encoder/defaultcoefcounts.h +++ b/vp8/encoder/defaultcoefcounts.h @@ -223,7 +223,6 @@ static const unsigned int default_coef_counts[BLOCK_TYPES] }; -#if CONFIG_T8X8 const unsigned int vp8_default_coef_counts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] @@ -399,4 +398,4 @@ const unsigned int vp8_default_coef_counts_8x8[BLOCK_TYPES] } } }; -#endif + diff --git a/vp8/encoder/encodeframe.c b/vp8/encoder/encodeframe.c index d6910677f..114c7b648 100644 --- a/vp8/encoder/encodeframe.c +++ b/vp8/encoder/encodeframe.c @@ -102,187 +102,6 @@ static const unsigned char VP8_VAR_OFFS[16]= }; - -#if CONFIG_T8X8 - -//INTRA mode transform size -//When all three criteria are off the default is 4x4 -//#define INTRA_VARIANCE_ENTROPY_CRITERIA -#define INTRA_WTD_SSE_ENTROPY_CRITERIA -//#define INTRA_TEST_8X8_ONLY -// -//INTER mode transform size -//When all three criteria are off the default is 4x4 -//#define INTER_VARIANCE_ENTROPY_CRITERIA -#define INTER_WTD_SSE_ENTROPY_CRITERIA -//#define INTER_TEST_8X8_ONLY - -double variance_Block(short *b1, int pitch, int dimension) -{ - short ip[8][8]={{0}}; - short *b = b1; - int i, j = 0; - double mean = 0.0, variance = 0.0; - for (i = 0; i < dimension; i++) - { - for (j = 0; j < dimension; j++) - { - ip[i][j] = b[j]; - mean += ip[i][j]; - } - b += pitch; - } - mean /= (dimension*dimension); - - for (i = 0; i < dimension; i++) - { - for (j = 0; j < dimension; j++) - { - variance += (ip[i][j]-mean)*(ip[i][j]-mean); - } - } - variance /= (dimension*dimension); - return variance; -} - -double mean_Block(short *b, int pitch, int dimension) -{ - short ip[8][8]={{0}}; - int i, j = 0; - double mean = 0; - for (i = 0; i < dimension; i++) - { - for (j = 0; j < dimension; j++) - { - ip[i][j] = b[j]; - mean += ip[i][j]; - } - b += pitch; - } - mean /= (dimension*dimension); - - return mean; -} - -int SSE_Block(short *b, int pitch, int dimension) -{ - int i, j, sse_block = 0; - for (i = 0; i < dimension; i++) - { - for (j = 0; j < dimension; j++) - { - sse_block += b[j]*b[j]; - } - b += pitch; - } - return sse_block; -} - -double Compute_Variance_Entropy(MACROBLOCK *x) -{ - double variance_8[4] = {0.0, 0.0, 0.0, 0.0}, sum_var = 0.0, all_entropy = 0.0; - variance_8[0] = variance_Block(x->block[0].src_diff, 16, 8); - variance_8[1] = variance_Block(x->block[2].src_diff, 16, 8); - variance_8[2] = variance_Block(x->block[8].src_diff, 16, 8); - variance_8[3] = variance_Block(x->block[10].src_diff, 16, 8); - sum_var = variance_8[0] + variance_8[1] + variance_8[2] + variance_8[3]; - if(sum_var) - { - int i; - for(i = 0; i <4; i++) - { - if(variance_8[i]) - { - variance_8[i] /= sum_var; - all_entropy -= variance_8[i]*log(variance_8[i]); - } - } - } - return (all_entropy /log(2)); -} - -double Compute_Wtd_SSE_SubEntropy(MACROBLOCK *x) -{ - double variance_8[4] = {0.0, 0.0, 0.0, 0.0}; - double entropy_8[4] = {0.0, 0.0, 0.0, 0.0}; - double sse_1, sse_2, sse_3, sse_4, sse_0; - int i; - for (i=0;i<3;i+=2) - { - sse_0 = SSE_Block(x->block[i].src_diff, 16, 8); - if(sse_0) - { - sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0; - sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0; - sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0; - sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0; - variance_8[i]= variance_Block(x->block[i].src_diff, 16, 8); - if(sse_1 && sse_2 && sse_3 && sse_4) - entropy_8[i]= (-sse_1*log(sse_1) - -sse_2*log(sse_2) - -sse_3*log(sse_3) - -sse_4*log(sse_4))/log(2); - } - } - for (i=8;i<11;i+=2) - { - if(sse_0) - { - sse_0 = SSE_Block(x->block[i].src_diff, 16, 8); - sse_1 = SSE_Block(x->block[i].src_diff, 16, 4)/sse_0; - sse_2 = SSE_Block(x->block[i+1].src_diff, 16, 4)/sse_0; - sse_3 = SSE_Block(x->block[i+4].src_diff, 16, 4)/sse_0; - sse_4 = SSE_Block(x->block[i+5].src_diff, 16, 4)/sse_0; - variance_8[i-7]= variance_Block(x->block[i].src_diff, 16, 8); - if(sse_1 && sse_2 && sse_3 && sse_4) - entropy_8[i-7]= (-sse_1*log(sse_1) - -sse_2*log(sse_2) - -sse_3*log(sse_3) - -sse_4*log(sse_4))/log(2); - } - } - - if(variance_8[0]+variance_8[1]+variance_8[2]+variance_8[3]) - return (entropy_8[0]*variance_8[0]+ - entropy_8[1]*variance_8[1]+ - entropy_8[2]*variance_8[2]+ - entropy_8[3]*variance_8[3])/ - (variance_8[0]+ - variance_8[1]+ - variance_8[2]+ - variance_8[3]); - else - return 0; -} - -int vp8_8x8_selection_intra(MACROBLOCK *x) -{ -#ifdef INTRA_VARIANCE_ENTROPY_CRITERIA - return (Compute_Variance_Entropy(x) > 1.2); -#elif defined(INTRA_WTD_SSE_ENTROPY_CRITERIA) - return (Compute_Wtd_SSE_SubEntropy(x) > 1.2); -#elif defined(INTRA_TEST_8X8_ONLY) - return 1; -#else - return 0; //when all criteria are off use the default 4x4 only -#endif -} - -int vp8_8x8_selection_inter(MACROBLOCK *x) -{ -#ifdef INTER_VARIANCE_ENTROPY_CRITERIA - return (Compute_Variance_Entropy(x) > 1.5); -#elif defined(INTER_WTD_SSE_ENTROPY_CRITERIA) - return (Compute_Wtd_SSE_SubEntropy(x) > 1.5); -#elif defined(INTER_TEST_8X8_ONLY) - return 1; -#else - return 0; //when all criteria are off use the default 4x4 only -#endif -} - -#endif - // Original activity measure from Tim T's code. static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x ) { @@ -876,10 +695,8 @@ void encode_mb_row(VP8_COMP *cpi, x->active_ptr = cpi->active_map + map_index + mb_col; -#if CONFIG_T8X8 /* force 4x4 transform for mode selection */ xd->mode_info_context->mbmi.txfm_size = TX_4X4; -#endif if (cm->frame_type == KEY_FRAME) { @@ -1485,7 +1302,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) vp8_update_zbin_extra(cpi, x); } -#if CONFIG_T8X8 /* test code: set transform size based on mode selection */ if(cpi->common.txfm_mode == ALLOW_8X8 && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED @@ -1499,7 +1315,6 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4; cpi->t4x4_count ++; } -#endif if(x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED) { @@ -1583,7 +1398,7 @@ int vp8cx_encode_inter_macroblock cpi->comp_pred_count[pred_context]++; } -#if CONFIG_T8X8 + /* test code: set transform size based on mode selection */ if( cpi->common.txfm_mode == ALLOW_8X8 && x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED @@ -1598,7 +1413,7 @@ int vp8cx_encode_inter_macroblock x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4; cpi->t4x4_count++; } -#endif + /* switch back to the regular quantizer for the encode */ if (cpi->sf.improved_quant) { diff --git a/vp8/encoder/encodeintra.c b/vp8/encoder/encodeintra.c index 63a146551..c791762ad 100644 --- a/vp8/encoder/encodeintra.c +++ b/vp8/encoder/encodeintra.c @@ -114,9 +114,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { BLOCK *b = &x->block[0]; -#if CONFIG_T8X8 int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size; -#endif #if CONFIG_COMP_INTRA_PRED if (x->e_mbd.mode_info_context->mbmi.second_mode == (MB_PREDICTION_MODE) (DC_PRED - 1)) @@ -129,35 +127,27 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride); -#if CONFIG_T8X8 if( tx_type == TX_8X8 ) vp8_transform_intra_mby_8x8(x); else -#endif - vp8_transform_intra_mby(x); + vp8_transform_intra_mby(x); -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_quantize_mby_8x8(x); else -#endif vp8_quantize_mby(x); if (x->optimize) { -#if CONFIG_T8X8 if( tx_type == TX_8X8 ) vp8_optimize_mby_8x8(x, rtcd); else -#endif vp8_optimize_mby(x, rtcd); } -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); else -#endif vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd); #ifdef ENC_DEBUG @@ -198,9 +188,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { -#if CONFIG_T8X8 int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size; -#endif #if CONFIG_COMP_INTRA_PRED if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE) (DC_PRED - 1)) { @@ -215,18 +203,14 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) #endif ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride); -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_transform_mbuv_8x8(x); else -#endif vp8_transform_mbuv(x); -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_quantize_mbuv_8x8(x); else -#endif vp8_quantize_mbuv(x); #ifdef ENC_DEBUG @@ -262,20 +246,16 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) #endif if (x->optimize) { -#if CONFIG_T8X8 if(tx_type == TX_8X8) vp8_optimize_mbuv_8x8(x, rtcd); else -#endif vp8_optimize_mbuv(x, rtcd); } -#if CONFIG_T8X8 if(tx_type == TX_8X8) - vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); + vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); else -#endif - vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd); + vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd); } diff --git a/vp8/encoder/encodemb.c b/vp8/encoder/encodemb.c index 22bf92b07..ab0f1c13e 100644 --- a/vp8/encoder/encodemb.c +++ b/vp8/encoder/encodemb.c @@ -142,7 +142,6 @@ static void build_dcblock(MACROBLOCK *x) src_diff_ptr[i] = x->coeff[i * 16]; } } -#if CONFIG_T8X8 void vp8_build_dcblock_8x8(MACROBLOCK *x) { short *src_diff_ptr = &x->src_diff[384]; @@ -156,7 +155,7 @@ void vp8_build_dcblock_8x8(MACROBLOCK *x) src_diff_ptr[4] = x->coeff[8 * 16]; src_diff_ptr[8] = x->coeff[12 * 16]; } -#endif + void vp8_transform_mbuv(MACROBLOCK *x) { int i; @@ -236,8 +235,6 @@ static void transform_mby(MACROBLOCK *x) } } -#if CONFIG_T8X8 - void vp8_transform_mbuv_8x8(MACROBLOCK *x) { int i; @@ -338,7 +335,6 @@ void vp8_transform_mby_8x8(MACROBLOCK *x) } } -#endif #define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF ) #define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF ) @@ -798,7 +794,6 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) } } -#if CONFIG_T8X8 void optimize_b_8x8(MACROBLOCK *mb, int i, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, const VP8_ENCODER_RTCD *rtcd) @@ -1150,50 +1145,37 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) } } -#endif void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { -#if CONFIG_T8X8 int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size; -#endif - vp8_build_inter_predictors_mb(&x->e_mbd); vp8_subtract_mb(rtcd, x); -#if CONFIG_T8X8 if( tx_type == TX_8X8 ) vp8_transform_mb_8x8(x); else -#endif transform_mb(x); -#if CONFIG_T8X8 if( tx_type == TX_8X8 ) vp8_quantize_mb_8x8(x); else -#endif vp8_quantize_mb(x); if (x->optimize) { -#if CONFIG_T8X8 if( tx_type == TX_8X8 ) optimize_mb_8x8(x, rtcd); else -#endif optimize_mb(x, rtcd); } -#if CONFIG_T8X8 if( tx_type == TX_8X8 ) vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); else -#endif vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd); -#if CONFIG_T8X8 if( tx_type == TX_8X8 ) { #ifdef ENC_DEBUG @@ -1225,7 +1207,6 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) } #endif } -#endif RECON_INVOKE(&rtcd->common->recon, recon_mb) (IF_RTCD(&rtcd->common->recon), &x->e_mbd); @@ -1251,9 +1232,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) /* this function is used by first pass only */ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { -#if CONFIG_T8X8 int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size; -#endif BLOCK *b = &x->block[0]; @@ -1261,19 +1240,16 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride); -#if CONFIG_T8X8 if( tx_type == TX_8X8 ) vp8_transform_mby_8x8(x); else -#endif transform_mby(x); vp8_quantize_mby(x); -#if CONFIG_T8X8 + if( tx_type == TX_8X8 ) vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); else -#endif vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd); RECON_INVOKE(&rtcd->common->recon, recon_mby) diff --git a/vp8/encoder/encodemb.h b/vp8/encoder/encodemb.h index 995ce4f0a..e211eea65 100644 --- a/vp8/encoder/encodemb.h +++ b/vp8/encoder/encodemb.h @@ -104,7 +104,6 @@ void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd); void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd); void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x); -#if CONFIG_T8X8 void vp8_transform_mb_8x8(MACROBLOCK *mb); void vp8_transform_mby_8x8(MACROBLOCK *x); void vp8_transform_mbuv_8x8(MACROBLOCK *x); @@ -112,7 +111,6 @@ void vp8_transform_intra_mby_8x8(MACROBLOCK *x); void vp8_build_dcblock_8x8(MACROBLOCK *b); void vp8_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd); void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd); -#endif void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch); diff --git a/vp8/encoder/generic/csystemdependent.c b/vp8/encoder/generic/csystemdependent.c index b6fc0dc9b..ebb16e3f0 100644 --- a/vp8/encoder/generic/csystemdependent.c +++ b/vp8/encoder/generic/csystemdependent.c @@ -69,10 +69,8 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) cpi->rtcd.variance.mse16x16 = vp8_mse16x16_c; cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c; -#if CONFIG_T8X8 cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c; cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c; -#endif cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c; cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c; cpi->rtcd.fdct.fast4x4 = vp8_short_fdct4x4_c; @@ -90,12 +88,10 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) cpi->rtcd.quantize.quantb_pair = vp8_regular_quantize_b_pair; cpi->rtcd.quantize.fastquantb = vp8_fast_quantize_b_c; cpi->rtcd.quantize.fastquantb_pair = vp8_fast_quantize_b_pair_c; -#if CONFIG_T8X8 cpi->rtcd.quantize.quantb_8x8 = vp8_regular_quantize_b_8x8; cpi->rtcd.quantize.fastquantb_8x8 = vp8_fast_quantize_b_8x8_c; cpi->rtcd.quantize.quantb_2x2 = vp8_regular_quantize_b_2x2; cpi->rtcd.quantize.fastquantb_2x2 = vp8_fast_quantize_b_2x2_c; -#endif cpi->rtcd.search.full_search = vp8_full_search_sad; cpi->rtcd.search.refining_search = vp8_refining_search_sad; cpi->rtcd.search.diamond_search = vp8_diamond_search_sad; diff --git a/vp8/encoder/onyx_if.c b/vp8/encoder/onyx_if.c index 3184932d6..925e24755 100644 --- a/vp8/encoder/onyx_if.c +++ b/vp8/encoder/onyx_if.c @@ -964,25 +964,19 @@ void vp8_set_speed_features(VP8_COMP *cpi) if (cpi->sf.improved_dct) { -#if CONFIG_T8X8 cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8); -#endif cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4); cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4); } else { -#if CONFIG_T8X8 cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8); -#endif cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4); cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4); } cpi->mb.short_walsh4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, walsh_short4x4); -#if CONFIG_T8X8 cpi->mb.short_fhaar2x2 = FDCT_INVOKE(&cpi->rtcd.fdct, haar_short2x2); -#endif if (cpi->sf.improved_quant) { @@ -990,10 +984,8 @@ void vp8_set_speed_features(VP8_COMP *cpi) quantb); cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_pair); -#if CONFIG_T8X8 cpi->mb.quantize_b_8x8 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_8x8); cpi->mb.quantize_b_2x2 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, quantb_2x2); -#endif } else { @@ -1001,10 +993,8 @@ void vp8_set_speed_features(VP8_COMP *cpi) fastquantb); cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_pair); -#if CONFIG_T8X8 cpi->mb.quantize_b_8x8 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_8x8); cpi->mb.quantize_b_2x2 = QUANTIZE_INVOKE(&cpi->rtcd.quantize, fastquantb_2x2); -#endif } if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi); @@ -1863,9 +1853,8 @@ void vp8_remove_compressor(VP8_PTR *ptr) #if CONFIG_INTERNAL_STATS vp8_clear_system_state(); -#if CONFIG_T8X8 + printf("\n8x8-4x4:%d-%d\n", cpi->t8x8_count, cpi->t4x4_count); -#endif if (cpi->pass != 1) { FILE *f = fopen("opsnr.stt", "a"); diff --git a/vp8/encoder/onyx_int.h b/vp8/encoder/onyx_int.h index fa2da1dd6..9c655b845 100644 --- a/vp8/encoder/onyx_int.h +++ b/vp8/encoder/onyx_int.h @@ -453,11 +453,9 @@ typedef struct VP8_COMP //save vp8_tree_probs_from_distribution result for each frame to avoid repeat calculation vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#if CONFIG_T8X8 unsigned int coef_counts_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */ vp8_prob frame_coef_probs_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; unsigned int frame_branch_ct_8x8 [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; -#endif int gfu_boost; int kf_boost; @@ -514,10 +512,8 @@ typedef struct VP8_COMP int gf_update_recommended; int skip_true_count; int skip_false_count; -#if CONFIG_T8X8 int t4x4_count; int t8x8_count; -#endif #if CONFIG_UVINTRA int y_uv_mode_count[VP8_YMODES][VP8_UV_MODES]; diff --git a/vp8/encoder/picklpf.c b/vp8/encoder/picklpf.c index 54a50fb08..171ec3a4d 100644 --- a/vp8/encoder/picklpf.c +++ b/vp8/encoder/picklpf.c @@ -354,11 +354,9 @@ void vp8cx_pick_filter_level_sg(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi, int segme if (cpi->twopass.section_intra_rating < 20) Bias = Bias * cpi->twopass.section_intra_rating / 20; -#if CONFIG_T8X8 // yx, bias less for large block size if(cpi->common.txfm_mode == ALLOW_8X8) Bias >>= 1; -#endif filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step); filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step); @@ -578,11 +576,9 @@ void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) if (cpi->twopass.section_intra_rating < 20) Bias = Bias * cpi->twopass.section_intra_rating / 20; -#if CONFIG_T8X8 // yx, bias less for large block size if(cpi->common.txfm_mode == ALLOW_8X8) Bias >>= 1; -#endif filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step); filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step); diff --git a/vp8/encoder/quantize.c b/vp8/encoder/quantize.c index 3d1c2ff35..636ed3a67 100644 --- a/vp8/encoder/quantize.c +++ b/vp8/encoder/quantize.c @@ -23,56 +23,6 @@ extern int enc_debug; #endif #define EXACT_QUANT - -#ifdef EXACT_FASTQUANT -void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) -{ - int i, rc, eob; - int zbin; - int x, y, z, sz; - short *coeff_ptr = b->coeff; - short *zbin_ptr = b->zbin; - short *round_ptr = b->round; - short *quant_ptr = b->quant_fast; - unsigned char *quant_shift_ptr = b->quant_shift; - short *qcoeff_ptr = d->qcoeff; - short *dqcoeff_ptr = d->dqcoeff; - short *dequant_ptr = d->dequant; - - vpx_memset(qcoeff_ptr, 0, 32); - vpx_memset(dqcoeff_ptr, 0, 32); - - eob = -1; - - for (i = 0; i < 16; i++) - { - rc = vp8_default_zig_zag1d[i]; - z = coeff_ptr[rc]; - zbin = zbin_ptr[rc] ; - - sz = (z >> 31); // sign of z - x = (z ^ sz) - sz; // x = abs(z) - - if (x >= zbin) - { - x += round_ptr[rc]; - y = (((x * quant_ptr[rc]) >> 16) + x) - >> quant_shift_ptr[rc]; // quantize (x) - x = (y ^ sz) - sz; // get the sign back - qcoeff_ptr[rc] = x; // write to destination - dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value - - if (y) - { - eob = i; // last nonzero coeffs - } - } - } - d->eob = eob + 1; -} - -#else - void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) { int i, rc, eob, nonzeros; @@ -83,11 +33,10 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) short *qcoeff_ptr = d->qcoeff; short *dqcoeff_ptr = d->dqcoeff; short *dequant_ptr = d->dequant; -#if CONFIG_T8X8 vpx_memset(qcoeff_ptr, 0, 32); vpx_memset(dqcoeff_ptr, 0, 32); -#endif + eob = -1; for (i = 0; i < 16; i++) { @@ -110,7 +59,7 @@ void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) d->eob = eob + 1; } -#endif + #ifdef EXACT_QUANT void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) @@ -277,7 +226,8 @@ void vp8_regular_quantize_b(BLOCK *b, BLOCKD *d) d->eob = eob + 1; } -#endif //EXACT_QUANT +#endif +//EXACT_QUANT void vp8_quantize_mby_c(MACROBLOCK *x) @@ -314,113 +264,8 @@ void vp8_quantize_mbuv_c(MACROBLOCK *x) x->quantize_b(&x->block[i], &x->e_mbd.block[i]); } -#if CONFIG_T8X8 - -#ifdef EXACT_FASTQUANT -void vp8_fast_quantize_b_2x2_c(BLOCK *b, BLOCKD *d) -{ - int i, rc, eob; - int zbin; - int x, y, z, sz; - short *coeff_ptr = b->coeff; - short *zbin_ptr = b->zbin; - short *round_ptr = b->round; - short *quant_ptr = b->quant; - short *quant_shift_ptr = b->quant_shift; - short *qcoeff_ptr = d->qcoeff; - short *dqcoeff_ptr = d->dqcoeff; - short *dequant_ptr = d->dequant; - //double q2nd = 4; - vpx_memset(qcoeff_ptr, 0, 32); - vpx_memset(dqcoeff_ptr, 0, 32); - - eob = -1; - - for (i = 0; i < 4; i++) - { - rc = vp8_default_zig_zag1d[i]; - z = coeff_ptr[rc]; - //zbin = zbin_ptr[rc]/q2nd ; - zbin = zbin_ptr[rc] ; - - sz = (z >> 31); // sign of z - x = (z ^ sz) - sz; // x = abs(z) - - if (x >= zbin) - { - //x += (round_ptr[rc]/q2nd); - x += (round_ptr[rc]); - //y = ((int)((int)(x * quant_ptr[rc] * q2nd) >> 16) + x) - // >> quant_shift_ptr[rc]; // quantize (x) - y = ((int)((int)(x * quant_ptr[rc]) >> 16) + x) - >> quant_shift_ptr[rc]; // quantize (x) - x = (y ^ sz) - sz; // get the sign back - qcoeff_ptr[rc] = x; // write to destination - dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value - - if (y) - { - eob = i; // last nonzero coeffs - } - } - } - d->eob = eob + 1; -} - -void vp8_fast_quantize_b_8x8_c(BLOCK *b, BLOCKD *d)// only ac and dc difference, no difference among ac -{ - int i, rc, eob; - int zbin; - int x, y, z, sz; - short *coeff_ptr = b->coeff; - short *zbin_ptr = b->zbin; - short *round_ptr = b->round; - short *quant_ptr = b->quant; - short *quant_shift_ptr = b->quant_shift; - short *qcoeff_ptr = d->qcoeff; - short *dqcoeff_ptr = d->dqcoeff; - short *dequant_ptr = d->dequant; - //double q1st = 2; - vpx_memset(qcoeff_ptr, 0, 64*sizeof(short)); - vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short)); - - eob = -1; - - for (i = 0; i < 64; i++) - { - rc = vp8_default_zig_zag1d_8x8[i]; - z = coeff_ptr[rc]; - //zbin = zbin_ptr[rc!=0]/q1st ; - zbin = zbin_ptr[rc!=0] ; - - sz = (z >> 31); // sign of z - x = (z ^ sz) - sz; // x = abs(z) - - if (x >= zbin) - { - //x += round_ptr[rc]/q1st; - //y = ((int)(((int)((x * quant_ptr[rc!=0] * q1st)) >> 16) + x)) - // >> quant_shift_ptr[rc!=0]; // quantize (x) - x += round_ptr[rc]; - y = ((int)(((int)((x * quant_ptr[rc!=0])) >> 16) + x)) - >> quant_shift_ptr[rc!=0]; // quantize (x) - x = (y ^ sz) - sz; // get the sign back - qcoeff_ptr[rc] = x; // write to destination - //dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0] / q1st; // dequantized value - dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value - - if (y) - { - eob = i; // last nonzero coeffs - } - } - } - d->eob = eob + 1; -} - -#else void vp8_fast_quantize_b_2x2_c(BLOCK *b, BLOCKD *d) { @@ -520,9 +365,9 @@ void vp8_fast_quantize_b_8x8_c(BLOCK *b, BLOCKD *d) d->eob = eob + 1; } -#endif //EXACT_FASTQUANT -#ifdef EXACT_QUANT + + void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) { int i, rc, eob; @@ -757,107 +602,7 @@ void vp8_strict_quantize_b_8x8(BLOCK *b, BLOCKD *d) d->eob = eob + 1; } -#else -void vp8_regular_quantize_b_2x2(BLOCK *b, BLOCKD *d) -{ - int i, rc, eob; - int zbin; - int x, y, z, sz; - short *zbin_boost_ptr = b->zrun_zbin_boost; - short *coeff_ptr = b->coeff; - short *zbin_ptr = b->zbin; - short *round_ptr = b->round; - short *quant_ptr = b->quant; - short *qcoeff_ptr = d->qcoeff; - short *dqcoeff_ptr = d->dqcoeff; - short *dequant_ptr = d->dequant; - short zbin_oq_value = b->zbin_extra; - //double q2nd = 4; - vpx_memset(qcoeff_ptr, 0, 32); - vpx_memset(dqcoeff_ptr, 0, 32); - - eob = -1; - for (i = 0; i < 4; i++) - { - rc = vp8_default_zig_zag1d[i]; - z = coeff_ptr[rc]; - //zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value)/q2nd; - zbin = (zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value); - zbin_boost_ptr ++; - sz = (z >> 31); // sign of z - x = (z ^ sz) - sz; // x = abs(z) - - if (x >= zbin) - { - //y = (((x + round_ptr[rc]/q2nd) * quant_ptr[rc]*q2nd)) >> 16; // quantize (x) - y = (((x + round_ptr[rc]) * quant_ptr[rc])) >> 16; // quantize (x) - x = (y ^ sz) - sz; // get the sign back - qcoeff_ptr[rc] = x; // write to destination - //dqcoeff_ptr[rc] = x * dequant_ptr[rc]/q2nd; // dequantized value - dqcoeff_ptr[rc] = x * dequant_ptr[rc]; // dequantized value - - if (y) - { - eob = i; // last nonzero coeffs - zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength - } - } - } - - d->eob = eob + 1; -} - -void vp8_regular_quantize_b_8x8(BLOCK *b, BLOCKD *d) -{ - int i, rc, eob; - int zbin; - int x, y, z, sz; - short *zbin_boost_ptr = b->zrun_zbin_boost; - short *coeff_ptr = b->coeff; - short *zbin_ptr = b->zbin; - short *round_ptr = b->round; - short *quant_ptr = b->quant; - short *qcoeff_ptr = d->qcoeff; - short *dqcoeff_ptr = d->dqcoeff; - short *dequant_ptr = d->dequant; - short zbin_oq_value = b->zbin_extra; - //double q1st = 2; - vpx_memset(qcoeff_ptr, 0, 64*sizeof(short)); - vpx_memset(dqcoeff_ptr, 0, 64*sizeof(short)); - - eob = -1; - for (i = 0; i < 64; i++) - { - - rc = vp8_default_zig_zag1d_8x8[i]; - z = coeff_ptr[rc]; - //zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value)/q1st; - zbin = (zbin_ptr[rc!=0] + *zbin_boost_ptr + zbin_oq_value); - zbin_boost_ptr ++; - sz = (z >> 31); // sign of z - x = (z ^ sz) - sz; // x = abs(z) - - if (x >= zbin) - { - //y = ((x + round_ptr[rc!=0]/q1st) * quant_ptr[rc!=0] * q1st) >> 16; - y = ((x + round_ptr[rc!=0]) * quant_ptr[rc!=0]) >> 16; - x = (y ^ sz) - sz; // get the sign back - qcoeff_ptr[rc] = x; // write to destination - //dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]/q1st; // dequantized value - dqcoeff_ptr[rc] = x * dequant_ptr[rc!=0]; // dequantized value - - if (y) - { - eob = i; // last nonzero coeffs - zbin_boost_ptr = &b->zrun_zbin_boost[0]; // reset zero runlength - } - } - } - d->eob = eob + 1; -} - -#endif //EXACT_QUANT void vp8_quantize_mby_8x8(MACROBLOCK *x) { @@ -905,7 +650,7 @@ void vp8_quantize_mbuv_8x8(MACROBLOCK *x) x->quantize_b_8x8(&x->block[i], &x->e_mbd.block[i]); } -#endif //CONFIG_T8X8 + /* quantize_b_pair function pointer in MACROBLOCK structure is set to one of * these two C functions if corresponding optimized routine is not available. @@ -1023,69 +768,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) } } } -#else -void vp8cx_init_quantizer(VP8_COMP *cpi) -{ - int i; - int quant_val; - int Q; - int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, 44, 44}; - int qrounding_factor = 48; - - for (Q = 0; Q < QINDEX_RANGE; Q++) - { - int qzbin_factor = vp8_dc_quant(Q,0) < 148 ) ? 84: 80; - - // dc values - quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q); - cpi->Y1quant[Q][0] = (1 << 16) / quant_val; - cpi->Y1zbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7; - cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7; - cpi->common.Y1dequant[Q][0] = quant_val; - cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7; - - quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q); - cpi->Y2quant[Q][0] = (1 << 16) / quant_val; - cpi->Y2zbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7; - cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7; - cpi->common.Y2dequant[Q][0] = quant_val; - cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7; - - quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q); - cpi->UVquant[Q][0] = (1 << 16) / quant_val; - cpi->UVzbin[Q][0] = ((qzbin_factors * quant_val) + 64) >> 7;; - cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7; - cpi->common.UVdequant[Q][0] = quant_val; - cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7; - - // all the ac values = ; - for (i = 1; i < 16; i++) - { - int rc = vp8_default_zig_zag1d[i]; - - quant_val = vp8_ac_yquant(Q); - cpi->Y1quant[Q][rc] = (1 << 16) / quant_val; - cpi->Y1zbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7; - cpi->Y1round[Q][rc] = (qrounding_factor * quant_val) >> 7; - cpi->common.Y1dequant[Q][rc] = quant_val; - cpi->zrun_zbin_boost_y1[Q][i] = (quant_val * zbin_boost[i]) >> 7; - - quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q); - cpi->Y2quant[Q][rc] = (1 << 16) / quant_val; - cpi->Y2zbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7; - cpi->Y2round[Q][rc] = (qrounding_factors * quant_val) >> 7; - cpi->common.Y2dequant[Q][rc] = quant_val; - cpi->zrun_zbin_boost_y2[Q][i] = (quant_val * zbin_boost[i]) >> 7; - - quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q); - cpi->UVquant[Q][rc] = (1 << 16) / quant_val; - cpi->UVzbin[Q][rc] = ((qzbin_factors * quant_val) + 64) >> 7; - cpi->UVround[Q][rc] = (qrounding_factors * quant_val) >> 7; - cpi->common.UVdequant[Q][rc] = quant_val; - cpi->zrun_zbin_boost_uv[Q][i] = (quant_val * zbin_boost[i]) >> 7; - } - } -} #endif @@ -1139,17 +821,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) { x->block[i].eob_max_offset = get_segdata( xd, segment_id, SEG_LVL_EOB ); -#if CONFIG_T8X8 x->block[i].eob_max_offset_8x8 = get_segdata( xd, segment_id, SEG_LVL_EOB ); -#endif } else { x->block[i].eob_max_offset = 16; -#if CONFIG_T8X8 x->block[i].eob_max_offset_8x8 = 64; -#endif } } @@ -1175,19 +853,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) { x->block[i].eob_max_offset = get_segdata( xd, segment_id, SEG_LVL_EOB ); -#if CONFIG_T8X8 x->block[i].eob_max_offset_8x8 = get_segdata( xd, segment_id, SEG_LVL_EOB ); -#endif - } else { x->block[i].eob_max_offset = 16; -#if CONFIG_T8X8 x->block[i].eob_max_offset_8x8 = 64; -#endif - } } @@ -1212,17 +884,13 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) { x->block[24].eob_max_offset = get_segdata( xd, segment_id, SEG_LVL_EOB ); -#if CONFIG_T8X8 x->block[24].eob_max_offset_8x8 = get_segdata( xd, segment_id, SEG_LVL_EOB ); -#endif } else { x->block[24].eob_max_offset = 16; -#if CONFIG_T8X8 x->block[24].eob_max_offset_8x8 = 4; -#endif } /* save this macroblock QIndex for vp8_update_zbin_extra() */ diff --git a/vp8/encoder/quantize.h b/vp8/encoder/quantize.h index 1a2bad667..93a1b714c 100644 --- a/vp8/encoder/quantize.h +++ b/vp8/encoder/quantize.h @@ -45,7 +45,7 @@ extern prototype_quantize_block_pair(vp8_quantize_quantb_pair); #define vp8_quantize_fastquantb vp8_fast_quantize_b_c #endif extern prototype_quantize_block(vp8_quantize_fastquantb); -#if CONFIG_T8X8 + #ifndef vp8_quantize_quantb_8x8 #define vp8_quantize_quantb_8x8 vp8_regular_quantize_b_8x8 #endif @@ -65,7 +65,7 @@ extern prototype_quantize_block(vp8_quantize_quantb_2x2); #define vp8_quantize_fastquantb_2x2 vp8_fast_quantize_b_2x2_c #endif extern prototype_quantize_block(vp8_quantize_fastquantb_2x2); -#endif + #ifndef vp8_quantize_fastquantb_pair #define vp8_quantize_fastquantb_pair vp8_fast_quantize_b_pair_c @@ -77,12 +77,10 @@ typedef struct prototype_quantize_block(*quantb); prototype_quantize_block_pair(*quantb_pair); prototype_quantize_block(*fastquantb); -#if CONFIG_T8X8 prototype_quantize_block(*quantb_8x8); prototype_quantize_block(*fastquantb_8x8); prototype_quantize_block(*quantb_2x2); prototype_quantize_block(*fastquantb_2x2); -#endif prototype_quantize_block_pair(*fastquantb_pair); } vp8_quantize_rtcd_vtable_t; @@ -108,10 +106,8 @@ extern prototype_quantize_mb(vp8_quantize_mby); #endif extern void vp8_strict_quantize_b(BLOCK *b,BLOCKD *d); -#if CONFIG_T8X8 extern void vp8_strict_quantize_b_8x8(BLOCK *b,BLOCKD *d); extern void vp8_strict_quantize_b_2x2(BLOCK *b,BLOCKD *d); -#endif struct VP8_COMP; extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q); extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi); diff --git a/vp8/encoder/ratectrl.c b/vp8/encoder/ratectrl.c index 47609306c..bf06f32e1 100644 --- a/vp8/encoder/ratectrl.c +++ b/vp8/encoder/ratectrl.c @@ -242,9 +242,7 @@ void vp8_setup_key_frame(VP8_COMP *cpi) #endif -#if CONFIG_T8X8 cpi->common.txfm_mode = ONLY_4X4; -#endif //cpi->common.filter_level = 0; // Reset every key frame. cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ; @@ -268,13 +266,12 @@ void vp8_setup_key_frame(VP8_COMP *cpi) } void vp8_setup_inter_frame(VP8_COMP *cpi) { -#if CONFIG_T8X8 + if(cpi->common.Width * cpi->common.Height > 640*360) //||cpi->this_frame_target < 7 * cpi->common.MBs) cpi->common.txfm_mode = ALLOW_8X8; else cpi->common.txfm_mode = ONLY_4X4; -#endif if(cpi->common.refresh_alt_ref_frame) { diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c index 609b70f8e..58b3216d6 100644 --- a/vp8/encoder/rdopt.c +++ b/vp8/encoder/rdopt.c @@ -353,12 +353,10 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) (const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs ); -#if CONFIG_T8X8 fill_token_costs( cpi->mb.token_costs_8x8, (const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs_8x8 ); -#endif #if CONFIG_QIMODE //rough estimate for costing cpi->common.kf_ymode_probs_index = cpi->common.base_qindex>>4; @@ -664,7 +662,6 @@ static void macro_block_yrd( MACROBLOCK *mb, *Rate = vp8_rdcost_mby(mb); } -#if CONFIG_T8X8 static int cost_coeffs_2x2(MACROBLOCK *mb, BLOCKD *b, int type, @@ -794,7 +791,6 @@ static void macro_block_yrd_8x8( MACROBLOCK *mb, // rate *Rate = vp8_rdcost_mby_8x8(mb); } -#endif static void copy_predictor(unsigned char *dst, const unsigned char *predictor) { @@ -1311,7 +1307,7 @@ static int rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate, return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); } -#if CONFIG_T8X8 + static int rd_cost_mbuv_8x8(MACROBLOCK *mb) { int b; @@ -1351,7 +1347,7 @@ static int rd_inter16x16_uv_8x8(VP8_COMP *cpi, MACROBLOCK *x, int *rate, return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); } -#endif + static int rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *distortion, int fullpixel) @@ -2470,10 +2466,8 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int int rate2, distortion2; int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly; int uv_intra_tteob = 0; -#if CONFIG_T8X8 int uv_intra_rate_8x8, uv_intra_distortion_8x8, uv_intra_rate_tokenonly_8x8; int uv_intra_tteob_8x8=0; -#endif int rate_y, UNINITIALIZED_IS_SAFE(rate_uv); int distortion_uv; int best_yrd = INT_MAX; @@ -2564,9 +2558,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int for(i=16; i<24; i++) uv_intra_tteob += x->e_mbd.block[i].eob; -#if CONFIG_T8X8 uv_intra_tteob_8x8 = uv_intra_tteob; -#endif // Get estimates of reference frame costs for each reference frame // that depend on the current prediction etc. @@ -2770,12 +2762,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int // FIXME compound intra prediction RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby) (&x->e_mbd); -#if CONFIG_T8X8 if(cpi->common.txfm_mode == ALLOW_8X8) macro_block_yrd_8x8(x, &rate_y, &distortion, IF_RTCD(&cpi->rtcd)) ; else -#endif macro_block_yrd(x, &rate_y, &distortion, IF_RTCD(&cpi->rtcd.encodemb)) ; rate2 += rate_y; @@ -3014,12 +3004,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rate2 += vp8_cost_mv_ref(&cpi->common, this_mode, mdcounts); // Y cost and distortion -#if CONFIG_T8X8 if(cpi->common.txfm_mode == ALLOW_8X8) macro_block_yrd_8x8(x, &rate_y, &distortion, IF_RTCD(&cpi->rtcd)); else -#endif macro_block_yrd(x, &rate_y, &distortion, IF_RTCD(&cpi->rtcd.encodemb)); @@ -3029,13 +3017,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int // UV cost and distortion vp8_build_inter16x16_predictors_mbuv(&x->e_mbd); -#if CONFIG_T8X8 if(cpi->common.txfm_mode == ALLOW_8X8) rd_inter16x16_uv_8x8(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel); else -#endif rd_inter16x16_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel); @@ -3126,12 +3112,10 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int &x->e_mbd.predictor[320], 16, 8); /* Y cost and distortion */ -#if CONFIG_T8X8 if(cpi->common.txfm_mode == ALLOW_8X8) macro_block_yrd_8x8(x, &rate_y, &distortion, IF_RTCD(&cpi->rtcd)); else -#endif macro_block_yrd(x, &rate_y, &distortion, IF_RTCD(&cpi->rtcd.encodemb)); @@ -3139,13 +3123,11 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int distortion2 += distortion; /* UV cost and distortion */ -#if CONFIG_T8X8 if(cpi->common.txfm_mode == ALLOW_8X8) rd_inter16x16_uv_8x8(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel); else -#endif rd_inter16x16_uv(cpi, x, &rate_uv, &distortion_uv, cpi->common.full_pixel); @@ -3195,7 +3177,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int if(has_y2_block) tteob += x->e_mbd.block[24].eob; -#if CONFIG_T8X8 if(cpi->common.txfm_mode ==ALLOW_8X8 && has_y2_block) { for (i = 0; i < 16; i+=4) @@ -3211,7 +3192,6 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int } } else -#endif { for (i = 0; i < 16; i++) tteob += (x->e_mbd.block[i].eob > has_y2_block); @@ -3467,9 +3447,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int void vp8_rd_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate_) { -#if CONFIG_T8X8 MACROBLOCKD *xd = &x->e_mbd; -#endif int error4x4, error16x16; int rate4x4, rate16x16 = 0, rateuv; int dist4x4, dist16x16, distuv; diff --git a/vp8/encoder/tokenize.c b/vp8/encoder/tokenize.c index d496bbde1..e2c9dc9a7 100644 --- a/vp8/encoder/tokenize.c +++ b/vp8/encoder/tokenize.c @@ -24,14 +24,10 @@ #ifdef ENTROPY_STATS _int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#if CONFIG_T8X8 _int64 context_counters_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; #endif -#endif void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ; -#if CONFIG_T8X8 void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ; -#endif void vp8_fix_contexts(MACROBLOCKD *x); static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE*2]; @@ -104,7 +100,6 @@ static void fill_value_tokens() vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE; } -#if CONFIG_T8X8 static void tokenize2nd_order_b_8x8 ( MACROBLOCKD *xd, @@ -176,7 +171,6 @@ static void tokenize2nd_order_b_8x8 *a = *l = pt; } -#endif static void tokenize2nd_order_b ( @@ -247,7 +241,7 @@ static void tokenize2nd_order_b *a = *l = pt; } -#if CONFIG_T8X8 + static void tokenize1st_order_b_8x8 ( MACROBLOCKD *xd, @@ -313,7 +307,7 @@ static void tokenize1st_order_b_8x8 *a = *l = pt; } -#endif + static void tokenize1st_order_b @@ -465,7 +459,7 @@ static int mb_is_skippable(MACROBLOCKD *x, int has_y2_block) return skip; } -#if CONFIG_T8X8 + static int mb_is_skippable_8x8(MACROBLOCKD *x) { int has_y2_block; @@ -485,17 +479,14 @@ static int mb_is_skippable_8x8(MACROBLOCKD *x) return skip; } -#endif + void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) { int plane_type; int has_y2_block; int b; - -#if CONFIG_T8X8 int tx_type = x->mode_info_context->mbmi.txfm_size; -#endif // If the MB is going to be skipped because of a segment level flag // exclude this from the skip count stats used to calculate the @@ -516,13 +507,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) && x->mode_info_context->mbmi.mode != SPLITMV); x->mode_info_context->mbmi.mb_skip_coeff = -#if CONFIG_T8X8 (( tx_type == TX_8X8 ) ? mb_is_skippable_8x8(x) : mb_is_skippable(x, has_y2_block)); -#else - mb_is_skippable(x, has_y2_block); -#endif if (x->mode_info_context->mbmi.mb_skip_coeff) { @@ -530,11 +517,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) if (!cpi->common.mb_no_coeff_skip) { -#if CONFIG_T8X8 if ( tx_type == TX_8X8 ) vp8_stuff_mb_8x8(cpi, x, t) ; else -#endif vp8_stuff_mb(cpi, x, t) ; } else @@ -550,7 +535,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) plane_type = 3; if(has_y2_block) { -#if CONFIG_T8X8 if ( tx_type == TX_8X8 ) { ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context; @@ -561,13 +545,12 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) L + vp8_block2left_8x8[24], cpi); } else -#endif tokenize2nd_order_b(x, t, cpi); plane_type = 0; } -#if CONFIG_T8X8 + if ( tx_type == TX_8X8 ) { ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context; @@ -594,7 +577,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) } } else -#endif + tokenize1st_order_b(x, t, plane_type, cpi); } @@ -604,9 +587,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) void init_context_counters(void) { vpx_memset(context_counters, 0, sizeof(context_counters)); -#if CONFIG_T8X8 vpx_memset(context_counters_8x8, 0, sizeof(context_counters_8x8)); -#endif } void print_context_counters() @@ -670,7 +651,6 @@ void print_context_counters() } while (++type < BLOCK_TYPES); -#if CONFIG_T8X8 fprintf(f, "int Contexts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];\n\n"); fprintf(f, "const int default_contexts_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {"); @@ -718,7 +698,6 @@ void print_context_counters() fprintf(f, "\n }"); } while (++type < BLOCK_TYPES); -#endif fprintf(f, "\n};\n"); fclose(f); @@ -731,7 +710,7 @@ void vp8_tokenize_initialize() fill_value_tokens(); } -#if CONFIG_T8X8 + static __inline void stuff2nd_order_b_8x8 ( const BLOCKD *const b, @@ -857,7 +836,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]); } } -#endif + static __inline void stuff2nd_order_b ( diff --git a/vp8/encoder/tokenize.h b/vp8/encoder/tokenize.h index cd122f19c..545c5d045 100644 --- a/vp8/encoder/tokenize.h +++ b/vp8/encoder/tokenize.h @@ -38,10 +38,8 @@ void init_context_counters(); void print_context_counters(); extern _int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; -#if CONFIG_T8X8 extern _int64 context_counters_8x8[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; #endif -#endif extern const int *vp8_dct_value_cost_ptr; /* TODO: The Token field should be broken out into a separate char array to * improve cache locality, since it's needed for costing when the rest of the