Merge of the TX_16X16 experiment

Change-Id: I22aa803ffff330622cdb77277e7b196a9766f882
This commit is contained in:
Deb Mukherjee 2012-10-10 11:20:33 -07:00
parent 239b6a8f30
commit a7333b0a5b
39 changed files with 25 additions and 380 deletions

1
configure vendored
View File

@ -224,7 +224,6 @@ EXPERIMENT_LIST="
hybridtransform
hybridtransform8x8
switchable_interp
tx16x16
newbestrefmv
new_mvref
hybridtransform16x16

View File

@ -1,7 +1,7 @@
LIBVPX_TEST_SRCS-yes += test.mk
LIBVPX_TEST_SRCS-yes += acm_random.h
LIBVPX_TEST_SRCS-yes += boolcoder_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_TX16X16) += dct16x16_test.cc
LIBVPX_TEST_SRCS-yes += dct16x16_test.cc
LIBVPX_TEST_SRCS-yes += fdct4x4_test.cc
LIBVPX_TEST_SRCS-yes += fdct8x8_test.cc
LIBVPX_TEST_SRCS-yes += idct8x8_test.cc

View File

@ -129,9 +129,7 @@ typedef enum {
typedef enum {
TX_4X4, // 4x4 dct transform
TX_8X8, // 8x8 dct transform
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
TX_16X16, // 16x16 dct transform
#endif
TX_SIZE_MAX // Number of different transforms available
} TX_SIZE;

View File

@ -13,7 +13,4 @@
Generated file included by entropy.c */
#define COEF_UPDATE_PROB 252
#define COEF_UPDATE_PROB_8X8 252
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#define COEF_UPDATE_PROB_16X16 252
#endif

View File

@ -966,7 +966,6 @@ default_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8]
};
#endif
#if CONFIG_TX16X16
static const vp8_prob
default_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS]
@ -1382,4 +1381,3 @@ static const vp8_prob
}
};
#endif
#endif

View File

@ -97,7 +97,6 @@ DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]) = {
58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
};
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
// Table can be optimized.
DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]) = {
0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6, 6,
@ -135,7 +134,6 @@ DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]) = {
203, 218, 233, 248, 249, 234, 219, 204, 189, 174, 159, 175, 190, 205, 220, 235,
250, 251, 236, 221, 206, 191, 207, 222, 237, 252, 253, 238, 223, 239, 254, 255,
};
#endif
/* Array indices are identical to previously-existing CONTEXT_NODE indices */
@ -222,7 +220,6 @@ void vp8_default_coef_probs(VP8_COMMON *pc) {
sizeof(pc->fc.hybrid_coef_probs_8x8));
#endif
#if CONFIG_TX16X16
vpx_memcpy(pc->fc.coef_probs_16x16, default_coef_probs_16x16,
sizeof(pc->fc.coef_probs_16x16));
#if CONFIG_HYBRIDTRANSFORM16X16
@ -230,7 +227,6 @@ void vp8_default_coef_probs(VP8_COMMON *pc) {
default_hybrid_coef_probs_16x16,
sizeof(pc->fc.hybrid_coef_probs_16x16));
#endif
#endif
}
void vp8_coef_tree_initialize() {
@ -419,7 +415,6 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
}
#endif
#if CONFIG_TX16X16
for (i = 0; i < BLOCK_TYPES_16X16; ++i)
for (j = 0; j < COEF_BANDS; ++j)
for (k = 0; k < PREV_COEF_CONTEXTS; ++k) {
@ -464,5 +459,4 @@ void vp8_adapt_coef_probs(VP8_COMMON *cm) {
}
}
#endif
#endif
}

View File

@ -74,9 +74,7 @@ extern vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */
#define COEF_BANDS 8
extern DECLARE_ALIGNED(16, const int, vp8_coef_bands[16]);
extern DECLARE_ALIGNED(64, const int, vp8_coef_bands_8x8[64]);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
extern DECLARE_ALIGNED(16, const int, vp8_coef_bands_16x16[256]);
#endif
/* Inside dimension is 3-valued measure of nearby complexity, that is,
the extent to which nearby coefficients are nonzero. For the first
@ -115,9 +113,7 @@ extern short vp8_default_zig_zag_mask[16];
extern DECLARE_ALIGNED(64, const int, vp8_default_zig_zag1d_8x8[64]);
void vp8_coef_tree_initialize(void);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d_16x16[256]);
#endif
void vp8_adapt_coef_probs(struct VP8Common *);
#endif

View File

@ -33,9 +33,8 @@ void vp8_machine_specific_config(VP8_COMMON *ctx) {
rtcd->idct.idct8 = vp8_short_idct8x8_c;
rtcd->idct.idct1_scalar_add_8x8 = vp8_dc_only_idct_add_8x8_c;
rtcd->idct.ihaar2 = vp8_short_ihaar2x2_c;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
rtcd->idct.idct16x16 = vp8_short_idct16x16_c;
#endif
rtcd->recon.copy16x16 = vp8_copy_mem16x16_c;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_c;
rtcd->recon.avg16x16 = vp8_avg_mem16x16_c;
rtcd->recon.avg8x8 = vp8_avg_mem8x8_c;

View File

@ -43,12 +43,10 @@
#define Y2_WHT_UPSCALE_FACTOR 2
#endif
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#ifndef vp8_idct_idct16x16
#define vp8_idct_idct16x16 vp8_short_idct16x16_c
#endif
extern prototype_idct(vp8_idct_idct16x16);
#endif
#ifndef vp8_idct_idct8
#define vp8_idct_idct8 vp8_short_idct8x8_c
@ -136,9 +134,7 @@ typedef struct {
vp8_idct_fn_t ihaar2;
vp8_idct_fn_t ihaar2_1;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vp8_idct_fn_t idct16x16;
#endif
} vp8_idct_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT

View File

@ -779,7 +779,6 @@ void vp8_short_ihaar2x2_c(short *input, short *output, int pitch) {
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#if 0
// Keep a really bad float version as reference for now.
void vp8_short_idct16x16_c(short *input, short *output, int pitch) {
@ -1070,4 +1069,3 @@ void vp8_short_idct16x16_c(short *input, short *output, int pitch) {
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
#endif

View File

@ -171,7 +171,6 @@ void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
short *input_dqcoeff,
short *output_coeff, int pitch) {
@ -210,4 +209,3 @@ void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 16);
}
#endif

View File

@ -30,7 +30,6 @@ extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MAC
extern void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
extern void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
short *input_dqcoeff, short *output_coeff,
int pitch);
@ -38,4 +37,3 @@ extern void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, M
extern void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
#endif
#endif

View File

@ -335,11 +335,7 @@ void vp8_loop_filter_frame
vp8_loop_filter_mbv_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
if (!skip_lf
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
&& tx_type != TX_16X16
#endif
) {
if (!skip_lf && tx_type != TX_16X16) {
if (tx_type == TX_8X8)
vp8_loop_filter_bv8x8_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
@ -360,11 +356,7 @@ void vp8_loop_filter_frame
vp8_loop_filter_mbh_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
if (!skip_lf
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
&& tx_type != TX_16X16
#endif
) {
if (!skip_lf && tx_type != TX_16X16) {
if (tx_type == TX_8X8)
vp8_loop_filter_bh8x8_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
@ -478,11 +470,7 @@ void vp8_loop_filter_frame_yonly
vp8_loop_filter_mbv_c
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
if (!skip_lf
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
&& tx_type != TX_16X16
#endif
) {
if (!skip_lf && tx_type != TX_16X16) {
if (tx_type == TX_8X8)
vp8_loop_filter_bv8x8_c
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
@ -496,11 +484,7 @@ void vp8_loop_filter_frame_yonly
vp8_loop_filter_mbh_c
(y_ptr, 0, 0, post->y_stride, 0, &lfi);
if (!skip_lf
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
&& tx_type != TX_16X16
#endif
) {
if (!skip_lf && tx_type != TX_16X16) {
if (tx_type == TX_8X8)
vp8_loop_filter_bh8x8_c
(y_ptr, 0, 0, post->y_stride, 0, &lfi);

View File

@ -59,12 +59,10 @@ typedef struct frame_contexts {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_prob hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
#if CONFIG_TX16X16
vp8_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
#endif
#if CONFIG_NEWMVENTROPY
nmv_context nmvc;
@ -102,13 +100,11 @@ typedef struct frame_contexts {
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
#if CONFIG_TX16X16
vp8_prob pre_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob pre_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS]
@ -125,14 +121,12 @@ typedef struct frame_contexts {
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
#if CONFIG_TX16X16
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_HYBRIDTRANSFORM16X16
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
#endif
#if CONFIG_NEWMVENTROPY
nmv_context_counts NMVcount;
@ -167,13 +161,11 @@ typedef enum {
typedef enum {
ONLY_4X4 = 0,
ALLOW_8X8 = 1,
#if CONFIG_TX16X16
ALLOW_16X16 = 2,
#endif
#if CONFIG_TX_SELECT
TX_MODE_SELECT = 2 + CONFIG_TX16X16,
TX_MODE_SELECT = 3,
#endif
NB_TXFM_MODES = 2 + CONFIG_TX16X16 + CONFIG_TX_SELECT,
NB_TXFM_MODES = 3 + CONFIG_TX_SELECT,
} TXFM_MODE;
typedef struct VP8_COMMON_RTCD {

View File

@ -175,18 +175,13 @@ static void vp8_kfread_modes(VP8D_COMP *pbi,
m->mbmi.mode <= TM_PRED) {
// FIXME(rbultje) code ternary symbol once all experiments are merged
m->mbmi.txfm_size = vp8_read(bc, cm->prob_tx[0]);
#if CONFIG_TX16X16
if (m->mbmi.txfm_size != TX_4X4)
m->mbmi.txfm_size += vp8_read(bc, cm->prob_tx[1]);
#endif
} else
#endif
#if CONFIG_TX16X16
if (cm->txfm_mode >= ALLOW_16X16 && m->mbmi.mode <= TM_PRED) {
m->mbmi.txfm_size = TX_16X16;
} else
#endif
if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != B_PRED) {
} else if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != B_PRED) {
m->mbmi.txfm_size = TX_8X8;
} else {
m->mbmi.txfm_size = TX_4X4;
@ -1290,20 +1285,15 @@ static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
// FIXME(rbultje) code ternary symbol once all experiments are merged
mbmi->txfm_size = vp8_read(bc, cm->prob_tx[0]);
#if CONFIG_TX16X16
if (mbmi->txfm_size != TX_4X4)
mbmi->txfm_size += vp8_read(bc, cm->prob_tx[1]);
#endif
} else
#endif
#if CONFIG_TX16X16
if (cm->txfm_mode >= ALLOW_16X16 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
mbmi->txfm_size = TX_16X16;
} else
#endif
if (cm->txfm_mode >= ALLOW_8X8 &&
} else if (cm->txfm_mode >= ALLOW_8X8 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode != B_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
mbmi->txfm_size = TX_8X8;

View File

@ -290,12 +290,9 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
xd->block[i].eob = 0;
xd->eobs[i] = 0;
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
eobtotal = vp8_decode_mb_tokens_16x16(pbi, xd);
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
else
eobtotal = vp8_decode_mb_tokens(pbi, xd);
@ -457,7 +454,6 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
} else {
BLOCKD *b = &xd->block[24];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16) {
#if CONFIG_HYBRIDTRANSFORM16X16
if (mode < I8X8_PRED && active_ht16) {
@ -480,9 +476,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
16, xd->dst.y_stride);
#endif
}
else
#endif
if (tx_type == TX_8X8) {
else if (tx_type == TX_8X8) {
#if CONFIG_SUPERBLOCKS
void *orig = xd->mode_info_context;
int n, num = xd->mode_info_context->mbmi.encoded_as_sb ? 4 : 1;
@ -569,9 +563,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
#endif
if ((tx_type == TX_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED)
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| tx_type == TX_16X16
#endif
)
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block_8x8) //
(xd->qcoeff + 16 * 16, xd->block[16].dequant,
@ -1012,7 +1004,6 @@ static void read_coef_probs(VP8D_COMP *pbi) {
}
#endif
#if CONFIG_TX16X16
// 16x16
if (pbi->common.txfm_mode > ALLOW_8X8 && vp8_read_bit(bc)) {
// read coef probability tree
@ -1052,7 +1043,6 @@ static void read_coef_probs(VP8D_COMP *pbi) {
}
}
#endif
#endif
}
int vp8_decode_frame(VP8D_COMP *pbi) {
@ -1282,25 +1272,15 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
/* Read the loop filter level and type */
#if CONFIG_TX_SELECT
#if CONFIG_TX16X16
pc->txfm_mode = vp8_read_literal(bc, 2);
#else
pc->txfm_mode = vp8_read_bit(bc);
if (pc->txfm_mode)
pc->txfm_mode += vp8_read_bit(bc);
#endif
if (pc->txfm_mode == TX_MODE_SELECT) {
pc->prob_tx[0] = vp8_read_literal(bc, 8);
#if CONFIG_TX16X16
pc->prob_tx[1] = vp8_read_literal(bc, 8);
#endif
}
#else
pc->txfm_mode = (TXFM_MODE) vp8_read_bit(bc);
#if CONFIG_TX16X16
if (pc->txfm_mode == ALLOW_8X8)
pc->txfm_mode = ALLOW_16X16;
#endif
#endif
pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(bc);
@ -1445,13 +1425,11 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_8x8,
pbi->common.fc.hybrid_coef_probs_8x8);
#endif
#if CONFIG_TX16X16
vp8_copy(pbi->common.fc.pre_coef_probs_16x16,
pbi->common.fc.coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16,
pbi->common.fc.hybrid_coef_probs_16x16);
#endif
#endif
vp8_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob);
vp8_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob);
@ -1473,11 +1451,9 @@ int vp8_decode_frame(VP8D_COMP *pbi) {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(pbi->common.fc.hybrid_coef_counts_8x8);
#endif
#if CONFIG_TX16X16
vp8_zero(pbi->common.fc.coef_counts_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(pbi->common.fc.hybrid_coef_counts_16x16);
#endif
#endif
vp8_zero(pbi->common.fc.ymode_counts);
vp8_zero(pbi->common.fc.uv_mode_counts);

View File

@ -509,7 +509,6 @@ void vp8_ht_dequant_idct_add_16x16_c(TX_TYPE tx_type, short *input, short *dq,
}
#endif
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_dequant_idct_add_16x16_c(short *input, short *dq, unsigned char *pred,
unsigned char *dest, int pitch, int stride) {
short output[256];
@ -544,4 +543,3 @@ void vp8_dequant_idct_add_16x16_c(short *input, short *dq, unsigned char *pred,
pred += pitch;
}
}
#endif

View File

@ -145,12 +145,10 @@ extern prototype_dequant_idct_add_y_block_8x8(vp8_dequant_idct_add_y_block_8x8);
#endif
extern prototype_dequant_idct_add_uv_block_8x8(vp8_dequant_idct_add_uv_block_8x8);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#ifndef vp8_dequant_idct_add_16x16
#define vp8_dequant_idct_add_16x16 vp8_dequant_idct_add_16x16_c
#endif
extern prototype_dequant_idct_add(vp8_dequant_idct_add_16x16);
#endif
typedef prototype_dequant_block((*vp8_dequant_block_fn_t));
@ -184,9 +182,7 @@ typedef struct {
vp8_dequant_dc_idct_add_y_block_fn_t_8x8 dc_idct_add_y_block_8x8;
vp8_dequant_idct_add_y_block_fn_t_8x8 idct_add_y_block_8x8;
vp8_dequant_idct_add_uv_block_fn_t_8x8 idct_add_uv_block_8x8;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vp8_dequant_idct_add_fn_t idct_add_16x16;
#endif
} vp8_dequant_rtcd_vtable_t;
#if CONFIG_RUNTIME_CPU_DETECT

View File

@ -39,7 +39,6 @@ DECLARE_ALIGNED(16, const int, coef_bands_x_8x8[64]) = {
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
};
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
DECLARE_ALIGNED(16, const int, coef_bands_x_16x16[256]) = {
0 * OCB_X, 1 * OCB_X, 2 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 4 * OCB_X, 5 * OCB_X, 5 * OCB_X, 3 * OCB_X, 6 * OCB_X, 3 * OCB_X, 5 * OCB_X, 4 * OCB_X, 6 * OCB_X, 6 * OCB_X,
6 * OCB_X, 5 * OCB_X, 5 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X, 6 * OCB_X,
@ -58,7 +57,6 @@ DECLARE_ALIGNED(16, const int, coef_bands_x_16x16[256]) = {
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X,
7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X, 7 * OCB_X
};
#endif
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
@ -105,9 +103,7 @@ void vp8_reset_mb_tokens_context(MACROBLOCKD *xd) {
if ((xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV)
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| xd->mode_info_context->mbmi.txfm_size == TX_16X16
#endif
) {
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
@ -237,7 +233,6 @@ void static count_tokens_8x8(INT16 *qcoeff_ptr, int block, int type,
}
}
#if CONFIG_TX16X16
void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, int type,
#if CONFIG_HYBRIDTRANSFORM16X16
TX_TYPE tx_type,
@ -269,8 +264,6 @@ void static count_tokens_16x16(INT16 *qcoeff_ptr, int block, int type,
fc->coef_counts_16x16[type][band][pt][DCT_EOB_TOKEN]++;
}
}
#endif
static int vp8_get_signed(BOOL_DECODER *br, int value_to_sign) {
const int split = (br->range + 1) >> 1;
@ -338,7 +331,6 @@ static int vp8_decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd,
#endif
fc->coef_probs_8x8[type][0][0];
break;
#if CONFIG_TX16X16
case TX_16X16:
coef_probs =
#if CONFIG_HYBRIDTRANSFORM16X16
@ -346,7 +338,6 @@ static int vp8_decode_coefs(VP8D_COMP *dx, const MACROBLOCKD *xd,
#endif
fc->coef_probs_16x16[type][0][0];
break;
#endif
}
VP8_COMBINEENTROPYCONTEXTS(tmp, *a, *l);
@ -445,18 +436,15 @@ SKIP_START:
tx_type,
#endif
a, l, c, seg_eob, fc);
#if CONFIG_TX16X16
else
count_tokens_16x16(qcoeff_ptr, i, type,
#if CONFIG_HYBRIDTRANSFORM16X16
tx_type,
#endif
a, l, c, seg_eob, fc);
#endif
return c;
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
ENTROPY_CONTEXT* const A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT* const L = (ENTROPY_CONTEXT *)xd->left_context;
@ -532,8 +520,6 @@ int vp8_decode_mb_tokens_16x16(VP8D_COMP *pbi, MACROBLOCKD *xd) {
vpx_memset(&L[8], 0, sizeof(L[8]));
return eobtotal;
}
#endif
int vp8_decode_mb_tokens_8x8(VP8D_COMP *pbi, MACROBLOCKD *xd) {
ENTROPY_CONTEXT *const A = (ENTROPY_CONTEXT *)xd->above_context;

View File

@ -17,8 +17,6 @@
void vp8_reset_mb_tokens_context(MACROBLOCKD *xd);
int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *);
int vp8_decode_mb_tokens_8x8(VP8D_COMP *, MACROBLOCKD *);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int vp8_decode_mb_tokens_16x16(VP8D_COMP *, MACROBLOCKD *);
#endif
#endif /* DETOKENIZE_H */

View File

@ -22,9 +22,7 @@ void vp8_dmachine_specific_config(VP8D_COMP *pbi) {
pbi->mb.rtcd = &pbi->common.rtcd;
pbi->dequant.block_2x2 = vp8_dequantize_b_2x2_c;
pbi->dequant.idct_add_8x8 = vp8_dequant_idct_add_8x8_c;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
pbi->dequant.idct_add_16x16 = vp8_dequant_idct_add_16x16_c;
#endif
pbi->dequant.dc_idct_add_8x8 = vp8_dequant_dc_idct_add_8x8_c;
pbi->dequant.dc_idct_add_y_block_8x8 = vp8_dequant_dc_idct_add_y_block_8x8_c;
pbi->dequant.idct_add_y_block_8x8 = vp8_dequant_idct_add_y_block_8x8_c;

View File

@ -49,9 +49,7 @@ typedef struct {
vp8_prob const *coef_probs[BLOCK_TYPES];
vp8_prob const *coef_probs_8x8[BLOCK_TYPES_8X8];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vp8_prob const *coef_probs_16X16[BLOCK_TYPES_16X16];
#endif
UINT8 eob[25];

View File

@ -61,7 +61,6 @@ unsigned int hybrid_tree_update_hist_8x8 [BLOCK_TYPES_8X8]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
#endif
#if CONFIG_TX16X16
unsigned int tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
@ -72,7 +71,6 @@ unsigned int hybrid_tree_update_hist_16x16 [BLOCK_TYPES_16X16]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] [2];
#endif
#endif
extern unsigned int active_section;
#endif
@ -1300,10 +1298,8 @@ static void pack_inter_mode_mvs(VP8_COMP *const cpi) {
TX_SIZE sz = mi->txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp8_write(w, sz != TX_4X4, pc->prob_tx[0]);
#if CONFIG_TX16X16
if (sz != TX_4X4)
vp8_write(w, sz != TX_8X8, pc->prob_tx[1]);
#endif
}
#endif
@ -1478,10 +1474,8 @@ static void write_kfmodes(VP8_COMP *cpi) {
TX_SIZE sz = m->mbmi.txfm_size;
// FIXME(rbultje) code ternary symbol once all experiments are merged
vp8_write(bc, sz != TX_4X4, c->prob_tx[0]);
#if CONFIG_TX16X16
if (sz != TX_4X4)
vp8_write(bc, sz != TX_8X8, c->prob_tx[1]);
#endif
}
#endif
@ -1634,7 +1628,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
#endif
}
#if CONFIG_TX16X16
if (cpi->common.txfm_mode > ALLOW_8X8) {
for (i = 0; i < BLOCK_TYPES_16X16; ++i) {
for (j = 0; j < COEF_BANDS; ++j) {
@ -1675,7 +1668,6 @@ void build_coeff_contexts(VP8_COMP *cpi) {
}
}
#endif
#endif
}
#if 0
@ -2193,7 +2185,6 @@ static void update_coef_probs(VP8_COMP *cpi) {
#endif
}
#if CONFIG_TX16X16
if (cpi->common.txfm_mode > ALLOW_8X8) {
/* dry run to see if update is necessary */
update[0] = update[1] = 0;
@ -2345,7 +2336,6 @@ static void update_coef_probs(VP8_COMP *cpi) {
}
#endif
}
#endif
}
#ifdef PACKET_TESTING
@ -2636,7 +2626,6 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
#if CONFIG_TX_SELECT
{
#if CONFIG_TX16X16
int cnt = cpi->txfm_count[0] + cpi->txfm_count[1] + cpi->txfm_count[2];
if (cnt && pc->txfm_mode == TX_MODE_SELECT) {
int prob = (255 * (cpi->txfm_count[1] + cpi->txfm_count[2]) + (cnt >> 1)) / cnt;
@ -2670,28 +2659,6 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
vp8_write_literal(bc, pc->prob_tx[0], 8);
vp8_write_literal(bc, pc->prob_tx[1], 8);
}
#else
int cnt = cpi->txfm_count[0] + cpi->txfm_count[1];
if (cnt && pc->txfm_mode == TX_MODE_SELECT) {
int prob = (255 * cpi->txfm_count[1] + (cnt >> 1)) / cnt;
if (prob <= 1) {
pc->prob_tx[0] = 1;
} else if (prob >= 255) {
pc->prob_tx[0] = 255;
} else {
pc->prob_tx[0] = prob;
}
pc->prob_tx[0] = 256 - pc->prob_tx[0];
} else {
pc->prob_tx[0] = 128;
}
vp8_write_bit(bc, pc->txfm_mode != 0);
if (pc->txfm_mode)
vp8_write_bit(bc, pc->txfm_mode - 1);
if (pc->txfm_mode == TX_MODE_SELECT) {
vp8_write_literal(bc, pc->prob_tx[0], 8);
}
#endif
}
#else
vp8_write_bit(bc, !!pc->txfm_mode);
@ -2846,11 +2813,9 @@ void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned long *size)
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_8x8, cpi->common.fc.hybrid_coef_probs_8x8);
#endif
#if CONFIG_TX16X16
vp8_copy(cpi->common.fc.pre_coef_probs_16x16, cpi->common.fc.coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16, cpi->common.fc.hybrid_coef_probs_16x16);
#endif
#endif
vp8_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
vp8_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
@ -2986,7 +2951,6 @@ void print_tree_update_probs() {
fprintf(f, " },\n");
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fprintf(f, "const vp8_prob\n"
"vp8_coef_update_probs_16x16[BLOCK_TYPES_16X16]\n"
" [COEF_BANDS]\n"
@ -3014,15 +2978,12 @@ void print_tree_update_probs() {
}
fprintf(f, " },\n");
}
#endif
fclose(f);
f = fopen("treeupdate.bin", "wb");
fwrite(tree_update_hist, sizeof(tree_update_hist), 1, f);
fwrite(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fwrite(tree_update_hist_16x16, sizeof(tree_update_hist_16x16), 1, f);
#endif
fclose(f);
}
#endif

View File

@ -35,14 +35,10 @@ typedef struct {
unsigned char *quant_shift;
short *zbin;
short *zbin_8x8;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
short *zbin_16x16;
#endif
short *zrun_zbin_boost;
short *zrun_zbin_boost_8x8;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
short *zrun_zbin_boost_16x16;
#endif
short *round;
// Zbin Over Quant value
@ -55,9 +51,7 @@ typedef struct {
int eob_max_offset;
int eob_max_offset_8x8;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int eob_max_offset_16x16;
#endif
} BLOCK;
typedef struct {
@ -192,13 +186,9 @@ typedef struct {
void (*quantize_b)(BLOCK *b, BLOCKD *d);
void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void (*vp8_short_fdct16x16)(short *input, short *output, int pitch);
#endif
void (*short_fhaar2x2)(short *input, short *output, int pitch);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void (*quantize_b_16x16)(BLOCK *b, BLOCKD *d);
#endif
void (*quantize_b_8x8)(BLOCK *b, BLOCKD *d);
void (*quantize_b_2x2)(BLOCK *b, BLOCKD *d);

View File

@ -693,7 +693,6 @@ void vp8_short_walsh8x4_x8_c(short *input, short *output, int pitch) {
}
#endif
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static const double C1 = 0.995184726672197;
static const double C2 = 0.98078528040323;
static const double C3 = 0.956940335732209;
@ -900,4 +899,3 @@ void vp8_short_fdct16x16_c(short *input, short *out, int pitch) {
}
vp8_clear_system_state(); // Make it simd safe : __asm emms;
}
#endif

View File

@ -31,12 +31,10 @@ void vp8_fht_c(short *input, short *output, int pitch,
TX_TYPE tx_type, int tx_dim);
#endif
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#ifndef vp8_fdct_short16x16
#define vp8_fdct_short16x16 vp8_short_fdct16x16_c
#endif
extern prototype_fdct(vp8_fdct_short16x16);
#endif
#ifndef vp8_fdct_short8x8
#define vp8_fdct_short8x8 vp8_short_fdct8x8_c
@ -81,9 +79,7 @@ extern prototype_fdct(vp8_short_walsh4x4_lossless_c);
typedef prototype_fdct(*vp8_fdct_fn_t);
typedef struct {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vp8_fdct_fn_t short16x16;
#endif
vp8_fdct_fn_t short8x8;
vp8_fdct_fn_t haar_short2x2;
vp8_fdct_fn_t short4x4;

View File

@ -1370,11 +1370,9 @@ static void encode_frame_internal(VP8_COMP *cpi) {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_zero(cpi->hybrid_coef_counts_8x8);
#endif
#if CONFIG_TX16X16
vp8_zero(cpi->coef_counts_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_zero(cpi->hybrid_coef_counts_16x16);
#endif
#endif
vp8cx_frame_init_quantizer(cpi);
@ -1516,11 +1514,7 @@ void vp8_encode_frame(VP8_COMP *cpi) {
* keyframe's probabilities as an estimate of what the current keyframe's
* coefficient cost distributions may look like. */
if (frame_type == 0) {
#if CONFIG_TX16X16
txfm_type = ALLOW_16X16;
#else
txfm_type = ALLOW_8X8;
#endif
} else
#if 0
/* FIXME (rbultje)
@ -1534,51 +1528,35 @@ void vp8_encode_frame(VP8_COMP *cpi) {
* progresses. */
if (cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] &&
#if CONFIG_TX16X16
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] &&
#endif
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
txfm_type = TX_MODE_SELECT;
} else if (cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]
#if CONFIG_TX16X16
&& cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16]
#endif
) {
txfm_type = ONLY_4X4;
#if CONFIG_TX16X16
} else if (cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
txfm_type = ALLOW_16X16;
#endif
} else
txfm_type = ALLOW_8X8;
#else
#if CONFIG_TX16X16
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_16X16 : TX_MODE_SELECT;
#else
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8] >=
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_8X8 : TX_MODE_SELECT;
#endif
#endif
#elif CONFIG_TX16X16
txfm_type = ALLOW_16X16;
#else
txfm_type = ALLOW_8X8;
txfm_type = ALLOW_16X16;
#endif // CONFIG_TX_SELECT
cpi->common.txfm_mode = txfm_type;
#if CONFIG_TX_SELECT
if (txfm_type != TX_MODE_SELECT) {
cpi->common.prob_tx[0] = 128;
#if CONFIG_TX16X16
cpi->common.prob_tx[1] = 128;
#endif
}
#endif
cpi->common.comp_pred_mode = pred_type;
@ -1622,20 +1600,14 @@ void vp8_encode_frame(VP8_COMP *cpi) {
if (cpi->common.txfm_mode == TX_MODE_SELECT) {
const int count4x4 = cpi->txfm_count[TX_4X4];
const int count8x8 = cpi->txfm_count[TX_8X8];
#if CONFIG_TX16X16
const int count16x16 = cpi->txfm_count[TX_16X16];
#else
const int count16x16 = 0;
#endif
if (count4x4 == 0 && count16x16 == 0) {
cpi->common.txfm_mode = ALLOW_8X8;
} else if (count8x8 == 0 && count16x16 == 0) {
cpi->common.txfm_mode = ONLY_4X4;
#if CONFIG_TX16X16
} else if (count8x8 == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_16X16;
#endif
}
}
#endif
@ -1977,11 +1949,9 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
}
} else
#endif
#if CONFIG_TX16X16
if (cpi->common.txfm_mode >= ALLOW_16X16 && mbmi->mode <= TM_PRED) {
mbmi->txfm_size = TX_16X16;
} else
#endif
if (cpi->common.txfm_mode >= ALLOW_8X8 && mbmi->mode != B_PRED) {
mbmi->txfm_size = TX_8X8;
} else {
@ -2171,13 +2141,10 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
}
} else
#endif
#if CONFIG_TX16X16
if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
mbmi->txfm_size = TX_16X16;
} else
#endif
if (mbmi->mode != B_PRED && mbmi->mode != SPLITMV &&
} else if (mbmi->mode != B_PRED && mbmi->mode != SPLITMV &&
cpi->common.txfm_mode >= ALLOW_8X8) {
mbmi->txfm_size = TX_8X8;
} else {

View File

@ -138,7 +138,6 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
#if CONFIG_HYBRIDTRANSFORM16X16
{
@ -154,36 +153,27 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
#else
vp8_transform_intra_mby_16x16(x);
#endif
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
vp8_transform_intra_mby_8x8(x);
else
vp8_transform_intra_mby(x);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
vp8_quantize_mby_16x16(x);
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
vp8_quantize_mby_8x8(x);
else
vp8_quantize_mby(x);
if (x->optimize) {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
vp8_optimize_mby_16x16(x, rtcd);
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
vp8_optimize_mby_8x8(x, rtcd);
else
vp8_optimize_mby(x, rtcd);
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
#if CONFIG_HYBRIDTRANSFORM16X16
{
@ -197,9 +187,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
#else
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
#endif
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
@ -211,9 +199,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16) tx_type = TX_8X8; // 16x16 for U and V should default to 8x8 behavior.
#endif
#if CONFIG_COMP_INTRA_PRED
if (x->e_mbd.mode_info_context->mbmi.second_uv_mode == (MB_PREDICTION_MODE)(DC_PRED - 1)) {
#endif

View File

@ -300,7 +300,6 @@ void vp8_transform_mby_8x8(MACROBLOCK *x) {
}
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_transform_mbuv_16x16(MACROBLOCK *x) {
int i;
@ -335,7 +334,6 @@ void vp8_transform_mby_16x16(MACROBLOCK *x) {
vp8_clear_system_state();
x->vp8_short_fdct16x16(&x->block[0].src_diff[0], &x->block[0].coeff[0], 32);
}
#endif
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
@ -880,7 +878,6 @@ void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void optimize_b_16x16(MACROBLOCK *mb, int i, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l,
const VP8_ENCODER_RTCD *rtcd) {
@ -1097,7 +1094,6 @@ void optimize_mb_16x16(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
*(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
}
}
#endif
void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
int tx_type = x->e_mbd.mode_info_context->mbmi.txfm_size;
@ -1105,43 +1101,32 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
vp8_subtract_mb(rtcd, x);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
vp8_transform_mb_16x16(x);
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
vp8_transform_mb_8x8(x);
else
transform_mb(x);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
vp8_quantize_mb_16x16(x);
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
vp8_quantize_mb_8x8(x);
else
vp8_quantize_mb(x);
if (x->optimize) {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
optimize_mb_16x16(x, rtcd);
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
optimize_mb_8x8(x, rtcd);
else
optimize_mb(x, rtcd);
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
vp8_inverse_transform_mb_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
#endif
if (tx_type == TX_8X8)
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
@ -1214,23 +1199,18 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
vp8_transform_mby_16x16(x);
else
#endif
if (tx_type == TX_8X8)
else if (tx_type == TX_8X8)
vp8_transform_mby_8x8(x);
else
transform_mby(x);
vp8_quantize_mby(x);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_type == TX_16X16)
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else
#endif
if (tx_type == TX_8X8)
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else

View File

@ -121,14 +121,12 @@ void vp8_build_dcblock_8x8(MACROBLOCK *b);
void vp8_optimize_mby_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
void vp8_optimize_mbuv_8x8(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_transform_mb_16x16(MACROBLOCK *mb);
void vp8_transform_mby_16x16(MACROBLOCK *x);
void vp8_transform_mbuv_16x16(MACROBLOCK *x);
void vp8_transform_intra_mby_16x16(MACROBLOCK *x);
void vp8_build_dcblock_16x16(MACROBLOCK *b);
void vp8_optimize_mby_16x16(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd);
#endif
void vp8_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch);

View File

@ -99,9 +99,7 @@ void vp8_cmachine_specific_config(VP8_COMP *cpi) {
cpi->rtcd.variance.getmbss = vp8_get_mb_ss_c;
cpi->rtcd.fdct.short8x8 = vp8_short_fdct8x8_c;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->rtcd.fdct.short16x16 = vp8_short_fdct16x16_c;
#endif
cpi->rtcd.fdct.haar_short2x2 = vp8_short_fhaar2x2_c;
cpi->rtcd.fdct.short4x4 = vp8_short_fdct4x4_c;
cpi->rtcd.fdct.short8x4 = vp8_short_fdct8x4_c;

View File

@ -1203,16 +1203,12 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
}
if (cpi->sf.improved_dct) {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->mb.vp8_short_fdct16x16 = FDCT_INVOKE(&cpi->rtcd.fdct, short16x16);
#endif
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, short4x4);
} else {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->mb.vp8_short_fdct16x16 = FDCT_INVOKE(&cpi->rtcd.fdct, short16x16);
#endif
cpi->mb.vp8_short_fdct8x8 = FDCT_INVOKE(&cpi->rtcd.fdct, short8x8);
cpi->mb.vp8_short_fdct8x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast8x4);
cpi->mb.vp8_short_fdct4x4 = FDCT_INVOKE(&cpi->rtcd.fdct, fast4x4);
@ -1225,9 +1221,7 @@ void vp8_set_speed_features(VP8_COMP *cpi) {
cpi->mb.quantize_b = vp8_regular_quantize_b;
cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
cpi->mb.quantize_b_8x8 = vp8_regular_quantize_b_8x8;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->mb.quantize_b_16x16= vp8_regular_quantize_b_16x16;
#endif
cpi->mb.quantize_b_2x2 = vp8_regular_quantize_b_2x2;
vp8cx_init_quantizer(cpi);
@ -3770,12 +3764,10 @@ static void encode_frame_to_data_rate
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cpi->common.fc.hybrid_coef_counts_8x8, cpi->hybrid_coef_counts_8x8);
#endif
#if CONFIG_TX16X16
vp8_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cpi->common.fc.hybrid_coef_counts_16x16,
cpi->hybrid_coef_counts_16x16);
#endif
#endif
vp8_adapt_coef_probs(&cpi->common);
if (cpi->common.frame_type != KEY_FRAME) {

View File

@ -109,13 +109,11 @@ typedef struct {
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
#endif
#if CONFIG_TX16X16
vp8_prob coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_prob hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
[COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES];
#endif
#endif
vp8_prob ymode_prob [VP8_YMODES - 1]; /* interframe intra mode probs */
@ -413,14 +411,12 @@ typedef struct VP8_COMP {
DECLARE_ALIGNED(64, short, zrun_zbin_boost_y2_8x8[QINDEX_RANGE][64]);
DECLARE_ALIGNED(64, short, zrun_zbin_boost_uv_8x8[QINDEX_RANGE][64]);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
DECLARE_ALIGNED(16, short, Y1zbin_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, Y2zbin_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, UVzbin_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y1_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2_16x16[QINDEX_RANGE][256]);
DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv_16x16[QINDEX_RANGE][256]);
#endif
MACROBLOCK mb;
VP8_COMMON common;
@ -594,7 +590,6 @@ typedef struct VP8_COMP {
unsigned int frame_hybrid_branch_ct_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
#endif
#if CONFIG_TX16X16
unsigned int coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
@ -602,7 +597,6 @@ typedef struct VP8_COMP {
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; /* for this frame */
vp8_prob frame_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
unsigned int frame_hybrid_branch_ct_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2];
#endif
#endif
int gfu_boost;

View File

@ -311,7 +311,6 @@ void vp8_quantize_mbuv_8x8(MACROBLOCK *x) {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_quantize_mby_16x16(MACROBLOCK *x) {
int i;
for (i = 0; i < 16; i++)
@ -385,7 +384,6 @@ void vp8_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
d->eob = eob + 1;
}
#endif
@ -428,7 +426,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48
};
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static const int zbin_boost_16x16[256] = {
0, 0, 0, 8, 8, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28,
30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 48, 48, 48, 48, 48, 48,
@ -447,7 +444,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
};
#endif
int qrounding_factor = 48;
@ -469,17 +465,13 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
cpi->Y1quant_shift[Q] + 0, quant_val);
cpi->Y1zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y1zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->Y1zbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
#endif
cpi->Y1round[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y1dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_y1_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->zrun_zbin_boost_y1_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
#endif
quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q);
@ -487,34 +479,26 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
cpi->Y2quant_shift[Q] + 0, quant_val);
cpi->Y2zbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->Y2zbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->Y2zbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
#endif
cpi->Y2round[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.Y2dequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_y2_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->zrun_zbin_boost_y2_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
#endif
quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q);
invert_quant(cpi->UVquant[Q] + 0,
cpi->UVquant_shift[Q] + 0, quant_val);
cpi->UVzbin[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->UVzbin_8x8[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->UVzbin_16x16[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
#endif
cpi->UVround[Q][0] = (qrounding_factor * quant_val) >> 7;
cpi->common.UVdequant[Q][0] = quant_val;
cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7;
cpi->zrun_zbin_boost_uv_8x8[Q][0] =
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
cpi->zrun_zbin_boost_uv_16x16[Q][0] = ((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
#endif
// all the 4x4 ac values =;
for (i = 1; i < 16; i++) {
@ -570,7 +554,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
((quant_val * zbin_boost_8x8[i]) + 64) >> 7;
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
// 16x16 structures. Same comment above applies.
for (i = 1; i < 256; i++) {
int rc = vp8_default_zig_zag1d_16x16[i];
@ -587,7 +570,6 @@ void vp8cx_init_quantizer(VP8_COMP *cpi) {
cpi->UVzbin_16x16[Q][rc] = ((qzbin_factor * quant_val) + 64) >> 7;
cpi->zrun_zbin_boost_uv_16x16[Q][i] = ((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
}
#endif
}
}
@ -626,16 +608,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[i].quant_shift = cpi->Y1quant_shift[QIndex];
x->block[i].zbin = cpi->Y1zbin[QIndex];
x->block[i].zbin_8x8 = cpi->Y1zbin_8x8[QIndex];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].zbin_16x16 = cpi->Y1zbin_16x16[QIndex];
#endif
x->block[i].round = cpi->Y1round[QIndex];
x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y1_8x8[QIndex];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y1_16x16[QIndex];
#endif
x->block[i].zbin_extra = (short)zbin_extra;
// Segment max eob offset feature.
@ -644,16 +622,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
get_segdata(xd, segment_id, SEG_LVL_EOB);
x->block[i].eob_max_offset_8x8 =
get_segdata(xd, segment_id, SEG_LVL_EOB);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].eob_max_offset_16x16 =
get_segdata(xd, segment_id, SEG_LVL_EOB);
#endif
} else {
x->block[i].eob_max_offset = 16;
x->block[i].eob_max_offset_8x8 = 64;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].eob_max_offset_16x16 = 256;
#endif
}
}
@ -668,16 +642,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[i].quant_shift = cpi->UVquant_shift[QIndex];
x->block[i].zbin = cpi->UVzbin[QIndex];
x->block[i].zbin_8x8 = cpi->UVzbin_8x8[QIndex];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].zbin_16x16 = cpi->UVzbin_16x16[QIndex];
#endif
x->block[i].round = cpi->UVround[QIndex];
x->e_mbd.block[i].dequant = cpi->common.UVdequant[QIndex];
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex];
x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_uv_8x8[QIndex];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_uv_16x16[QIndex];
#endif
x->block[i].zbin_extra = (short)zbin_extra;
@ -703,16 +673,12 @@ void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x) {
x->block[24].quant_shift = cpi->Y2quant_shift[QIndex];
x->block[24].zbin = cpi->Y2zbin[QIndex];
x->block[24].zbin_8x8 = cpi->Y2zbin_8x8[QIndex];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[24].zbin_16x16 = cpi->Y2zbin_16x16[QIndex];
#endif
x->block[24].round = cpi->Y2round[QIndex];
x->e_mbd.block[24].dequant = cpi->common.Y2dequant[QIndex];
x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex];
x->block[24].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y2_8x8[QIndex];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
x->block[24].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y2_16x16[QIndex];
#endif
x->block[24].zbin_extra = (short)zbin_extra;
// TBD perhaps not use for Y2

View File

@ -46,12 +46,10 @@ extern prototype_quantize_block_pair(vp8_quantize_quantb_pair);
#endif
extern prototype_quantize_block(vp8_quantize_quantb_8x8);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#ifndef vp8_quantize_quantb_16x16
#define vp8_quantize_quantb_16x16 vp8_regular_quantize_b_16x16
#endif
extern prototype_quantize_block(vp8_quantize_quantb_16x16);
#endif
#ifndef vp8_quantize_quantb_2x2
#define vp8_quantize_quantb_2x2 vp8_regular_quantize_b_2x2
@ -77,12 +75,10 @@ extern prototype_quantize_mb(vp8_quantize_mby);
extern prototype_quantize_mb(vp8_quantize_mby_8x8);
extern prototype_quantize_mb(vp8_quantize_mbuv_8x8);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
void vp8_quantize_mb_16x16(MACROBLOCK *x);
extern prototype_quantize_block(vp8_quantize_quantb_16x16);
extern prototype_quantize_mb(vp8_quantize_mby_16x16);
extern prototype_quantize_mb(vp8_quantize_mbuv_16x16);
#endif
struct VP8_COMP;
extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q);

View File

@ -185,12 +185,10 @@ void vp8_save_coding_context(VP8_COMP *cpi) {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
#endif
#if CONFIG_TX16X16
vp8_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
#endif
#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
#endif
@ -258,12 +256,10 @@ void vp8_restore_coding_context(VP8_COMP *cpi) {
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
#endif
#if CONFIG_TX16X16
vp8_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
#if CONFIG_HYBRIDTRANSFORM16X16
vp8_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
#endif
#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
#endif

View File

@ -379,7 +379,6 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
BLOCK_TYPES_8X8);
#endif
#if CONFIG_TX16X16
fill_token_costs(
cpi->mb.token_costs[TX_16X16],
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11]) cpi->common.fc.coef_probs_16x16,
@ -390,7 +389,6 @@ void vp8_initialize_rd_consts(VP8_COMP *cpi, int QIndex) {
(const vp8_prob(*)[8][PREV_COEF_CONTEXTS][11])
cpi->common.fc.hybrid_coef_probs_16x16,
BLOCK_TYPES_16X16);
#endif
#endif
/*rough estimate for costing*/
@ -661,7 +659,6 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
}
#endif
break;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
case TX_16X16:
scan = vp8_default_zig_zag1d_16x16;
band = vp8_coef_bands_16x16;
@ -673,7 +670,6 @@ static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type,
tx_type = b->bmi.as_mode.tx_type;
#endif
break;
#endif
default:
break;
}
@ -859,7 +855,6 @@ static void macro_block_yrd_8x8(MACROBLOCK *mb,
*skippable = mby_is_skippable_8x8(&mb->e_mbd, 1);
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
static int vp8_rdcost_mby_16x16(MACROBLOCK *mb) {
int cost;
MACROBLOCKD *xd = &mb->e_mbd;
@ -915,7 +910,6 @@ static void macro_block_yrd_16x16(MACROBLOCK *mb, int *Rate, int *Distortion,
*Rate = vp8_rdcost_mby_16x16(mb);
*skippable = mby_is_skippable_16x16(&mb->e_mbd);
}
#endif
static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int *distortion, int *skippable,
@ -931,17 +925,14 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
int s0, s1;
int r4x4, r4x4s, r8x8, r8x8s, d4x4, d8x8, s4x4, s8x8;
int64_t rd4x4, rd8x8, rd4x4s, rd8x8s;
#if CONFIG_TX16X16
int d16x16, r16x16, r16x16s, s16x16;
int64_t rd16x16, rd16x16s;
#endif
// FIXME don't do sub x3
if (skip_prob == 0)
skip_prob = 1;
s0 = vp8_cost_bit(skip_prob, 0);
s1 = vp8_cost_bit(skip_prob, 1);
#if CONFIG_TX16X16
macro_block_yrd_16x16(x, &r16x16, &d16x16, IF_RTCD(&cpi->rtcd), &s16x16);
if (can_skip) {
if (s16x16) {
@ -962,7 +953,6 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
} else {
rd16x16s = RDCOST(x->rdmult, x->rddiv, r16x16s, d16x16);
}
#endif
macro_block_yrd_8x8(x, &r8x8, &d8x8, IF_RTCD(&cpi->rtcd), &s8x8);
if (can_skip) {
if (s8x8) {
@ -974,9 +964,7 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
rd8x8 = RDCOST(x->rdmult, x->rddiv, r8x8, d8x8);
}
r8x8s = r8x8 + vp8_cost_one(cm->prob_tx[0]);
#if CONFIG_TX16X16
r8x8s += vp8_cost_zero(cm->prob_tx[1]);
#endif
if (can_skip) {
if (s8x8) {
rd8x8s = RDCOST(x->rdmult, x->rddiv, s1, d8x8);
@ -1007,7 +995,6 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
rd4x4s = RDCOST(x->rdmult, x->rddiv, r4x4s, d4x4);
}
#if CONFIG_TX16X16
if ( cpi->common.txfm_mode == ALLOW_16X16 ||
(cpi->common.txfm_mode == TX_MODE_SELECT &&
rd16x16s < rd8x8s && rd16x16s < rd4x4s)) {
@ -1016,7 +1003,6 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
*distortion = d16x16;
*rate = (cpi->common.txfm_mode == ALLOW_16X16) ? r16x16 : r16x16s;
} else
#endif
if ( cpi->common.txfm_mode == ALLOW_8X8 ||
(cpi->common.txfm_mode == TX_MODE_SELECT && rd8x8s < rd4x4s)) {
mbmi->txfm_size = TX_8X8;
@ -1034,23 +1020,19 @@ static void macro_block_yrd(VP8_COMP *cpi, MACROBLOCK *x, int *rate,
txfm_cache[ONLY_4X4] = rd4x4;
txfm_cache[ALLOW_8X8] = rd8x8;
#if CONFIG_TX16X16
txfm_cache[ALLOW_16X16] = rd16x16;
if (rd16x16s < rd8x8s && rd16x16s < rd4x4s)
txfm_cache[TX_MODE_SELECT] = rd16x16s;
else
#endif
txfm_cache[TX_MODE_SELECT] = rd4x4s < rd8x8s ? rd4x4s : rd8x8s;
#else /* CONFIG_TX_SELECT */
switch (cpi->common.txfm_mode) {
#if CONFIG_TX16X16
case ALLOW_16X16:
macro_block_yrd_16x16(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
mbmi->txfm_size = TX_16X16;
break;
#endif
case ALLOW_8X8:
macro_block_yrd_8x8(x, rate, distortion, IF_RTCD(&cpi->rtcd), skippable);
mbmi->txfm_size = TX_8X8;
@ -4108,11 +4090,7 @@ void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int
mbmi->txfm_size = cm->txfm_mode;
else
#endif
#if CONFIG_TX16X16
mbmi->txfm_size = TX_16X16;
#else
mbmi->txfm_size = TX_8X8;
#endif
mbmi->ref_frame = ALTREF_FRAME;
mbmi->mv[0].as_int = 0;
mbmi->uv_mode = DC_PRED;

View File

@ -35,12 +35,10 @@ INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [M
INT64 hybrid_context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
#if CONFIG_TX16X16
INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_HYBRIDTRANSFORM16X16
INT64 hybrid_context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
#endif
extern unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES][2];
@ -54,25 +52,22 @@ extern unsigned int tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
extern unsigned int hybrid_tree_update_hist_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
#endif
#if CONFIG_TX16X16
extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
#if CONFIG_HYBRIDTRANSFORM16X16
extern unsigned int hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][ENTROPY_NODES] [2];
#endif
#endif
#endif
#endif /* ENTROPY_STATS */
void vp8_stuff_mb(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_stuff_mb_8x8(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
void vp8_stuff_mb_8x8_4x4uv(VP8_COMP *cpi,
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
#if CONFIG_TX16X16
void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run);
#endif
void vp8_fix_contexts(MACROBLOCKD *xd);
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
@ -133,7 +128,6 @@ static void fill_value_tokens() {
vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
}
#if CONFIG_TX16X16
static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
const BLOCKD *const b,
TOKENEXTRA **tp,
@ -201,7 +195,6 @@ static void tokenize1st_order_b_16x16(MACROBLOCKD *xd,
pt = (c != !type); /* 0 <-> all coeff data is zero */
*a = *l = pt;
}
#endif
static void tokenize2nd_order_b_8x8
(
@ -870,7 +863,6 @@ int mb_is_skippable_8x8_4x4uv(MACROBLOCKD *xd, int has_y2_block) {
mbuv_is_skippable(xd));
}
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
int mby_is_skippable_16x16(MACROBLOCKD *xd) {
int skip = 1;
//skip &= (xd->block[0].eob < 2); // I think this should be commented? No second order == DC must be coded
@ -883,7 +875,6 @@ int mby_is_skippable_16x16(MACROBLOCKD *xd) {
int mb_is_skippable_16x16(MACROBLOCKD *xd) {
return (mby_is_skippable_16x16(xd) & mbuv_is_skippable_8x8(xd));
}
#endif
void vp8_tokenize_mb(VP8_COMP *cpi,
MACROBLOCKD *xd,
@ -917,16 +908,12 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED
&& xd->mode_info_context->mbmi.mode != I8X8_PRED
&& xd->mode_info_context->mbmi.mode != SPLITMV);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (tx_size == TX_16X16) has_y2_block = 0; // Because of inter frames
#endif
switch (tx_size) {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
case TX_16X16:
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(xd);
break;
#endif
case TX_8X8:
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8_4x4uv(xd, 0);
@ -943,11 +930,9 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
if (!dry_run)
cpi->skip_true_count[mb_skip_context] += skip_inc;
if (!cpi->common.mb_no_coeff_skip) {
#if CONFIG_TX16X16 && CONFIG_HYBRIDTRANSFORM16X16
if (tx_size == TX_16X16)
vp8_stuff_mb_16x16(cpi, xd, t, dry_run);
else
#endif
if (tx_size == TX_8X8) {
if (xd->mode_info_context->mbmi.mode == I8X8_PRED)
vp8_stuff_mb_8x8_4x4uv(cpi, xd, t, dry_run);
@ -982,7 +967,6 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
plane_type = 0;
}
#if CONFIG_TX16X16
if (tx_size == TX_16X16) {
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
@ -1003,9 +987,7 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
}
else
#endif
if (tx_size == TX_8X8) {
else if (tx_size == TX_8X8) {
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
@ -1053,15 +1035,11 @@ void init_context_counters(void) {
if (!f) {
vpx_memset(context_counters, 0, sizeof(context_counters));
vpx_memset(context_counters_8x8, 0, sizeof(context_counters_8x8));
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vpx_memset(context_counters_16x16, 0, sizeof(context_counters_16x16));
#endif
} else {
fread(context_counters, sizeof(context_counters), 1, f);
fread(context_counters_8x8, sizeof(context_counters_8x8), 1, f);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fread(context_counters_16x16, sizeof(context_counters_16x16), 1, f);
#endif
fclose(f);
}
@ -1069,15 +1047,11 @@ void init_context_counters(void) {
if (!f) {
vpx_memset(tree_update_hist, 0, sizeof(tree_update_hist));
vpx_memset(tree_update_hist_8x8, 0, sizeof(tree_update_hist_8x8));
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
vpx_memset(tree_update_hist_16x16, 0, sizeof(tree_update_hist_16x16));
#endif
} else {
fread(tree_update_hist, sizeof(tree_update_hist), 1, f);
fread(tree_update_hist_8x8, sizeof(tree_update_hist_8x8), 1, f);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fread(tree_update_hist_16x16, sizeof(tree_update_hist_16x16), 1, f);
#endif
fclose(f);
}
}
@ -1153,7 +1127,6 @@ void print_context_counters() {
} while (++type < BLOCK_TYPES_8X8);
fprintf(f, "\n};\n");
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fprintf(f, "static const unsigned int\nvp8_default_coef_counts_16x16"
"[BLOCK_TYPES_16X16] [COEF_BANDS]"
"[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {");
@ -1186,7 +1159,6 @@ void print_context_counters() {
fprintf(f, "\n }");
} while (++type < BLOCK_TYPES_16X16);
fprintf(f, "\n};\n");
#endif
fprintf(f, "static const vp8_prob\n"
"vp8_default_coef_probs[BLOCK_TYPES] [COEF_BANDS] \n"
@ -1256,7 +1228,6 @@ void print_context_counters() {
} while (++type < BLOCK_TYPES_8X8);
fprintf(f, "\n};\n");
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fprintf(f, "static const vp8_prob\n"
"vp8_default_coef_probs_16x16[BLOCK_TYPES_16X16] [COEF_BANDS]\n"
"[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {");
@ -1289,16 +1260,13 @@ void print_context_counters() {
fprintf(f, "\n }");
} while (++type < BLOCK_TYPES_16X16);
fprintf(f, "\n};\n");
#endif
fclose(f);
f = fopen("context.bin", "wb");
fwrite(context_counters, sizeof(context_counters), 1, f);
fwrite(context_counters_8x8, sizeof(context_counters_8x8), 1, f);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
fwrite(context_counters_16x16, sizeof(context_counters_16x16), 1, f);
#endif
fclose(f);
}
#endif
@ -1456,7 +1424,6 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
*t = t_backup;
}
#if CONFIG_TX16X16
static __inline
void stuff1st_order_b_16x16(MACROBLOCKD *xd,
const BLOCKD *const b,
@ -1524,7 +1491,6 @@ void vp8_stuff_mb_16x16(VP8_COMP *cpi,
if (dry_run)
*t = t_backup;
}
#endif
static __inline void stuff2nd_order_b
(
@ -1680,9 +1646,7 @@ void vp8_fix_contexts(MACROBLOCKD *xd) {
if ((xd->mode_info_context->mbmi.mode != B_PRED
&& xd->mode_info_context->mbmi.mode != I8X8_PRED
&& xd->mode_info_context->mbmi.mode != SPLITMV)
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| xd->mode_info_context->mbmi.txfm_size == TX_16X16
#endif
) {
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));

View File

@ -49,11 +49,9 @@ extern INT64 context_counters[BLOCK_TYPES][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
extern INT64 context_counters_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
extern INT64 context_counters_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
#endif
#endif
extern const int *vp8_dct_value_cost_ptr;
/* TODO: The Token field should be broken out into a separate char array to