Merge tx32x32 experiment.
Change-Id: I615651e4c7b09e576a341ad425cf80c393637833
This commit is contained in:
parent
460501fe84
commit
aa2effa954
1
configure
vendored
1
configure
vendored
@ -246,7 +246,6 @@ EXPERIMENT_LIST="
|
||||
implicit_segmentation
|
||||
newbintramodes
|
||||
comp_interintra_pred
|
||||
tx32x32
|
||||
tx64x64
|
||||
dwtdcthybrid
|
||||
cnvcontext
|
||||
|
@ -36,7 +36,7 @@ static int round(double x) {
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !CONFIG_DWT32X32HYBRID
|
||||
#if !CONFIG_DWTDCTHYBRID
|
||||
static const double kPi = 3.141592653589793238462643383279502884;
|
||||
static void reference2_32x32_idct_2d(double *input, double *output) {
|
||||
double x;
|
||||
@ -127,9 +127,9 @@ TEST(VP9Idct32x32Test, AccuracyCheck) {
|
||||
}
|
||||
}
|
||||
}
|
||||
#else // CONFIG_DWT32X32HYBRID
|
||||
#else // CONFIG_DWTDCTHYBRID
|
||||
// TODO(rbultje/debargha): add DWT-specific tests
|
||||
#endif // CONFIG_DWT32X32HYBRID
|
||||
#endif // CONFIG_DWTDCTHYBRID
|
||||
TEST(VP9Fdct32x32Test, AccuracyCheck) {
|
||||
ACMRandom rnd(ACMRandom::DeterministicSeed());
|
||||
unsigned int max_error = 0;
|
||||
|
@ -69,9 +69,7 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct4x4_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct8x8_test.cc
|
||||
#LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct16x16_test.cc
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += variance_test.cc
|
||||
ifeq ($(CONFIG_VP9_ENCODER)$(CONFIG_TX32X32),yesyes)
|
||||
LIBVPX_TEST_SRCS-yes += dct32x32_test.cc
|
||||
endif
|
||||
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct32x32_test.cc
|
||||
endif # VP9
|
||||
|
||||
|
||||
|
@ -16,15 +16,11 @@ const uint8_t vp9_block2left[TX_SIZE_MAX_SB][25] = {
|
||||
{0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8},
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8},
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 6, 6, 6, 6, 8},
|
||||
#if CONFIG_TX32X32
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 6, 6, 6, 6, 8}
|
||||
#endif
|
||||
};
|
||||
const uint8_t vp9_block2above[TX_SIZE_MAX_SB][25] = {
|
||||
{0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8},
|
||||
{0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 8},
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 6, 6, 6, 6, 8},
|
||||
#if CONFIG_TX32X32
|
||||
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 6, 6, 6, 6, 8}
|
||||
#endif
|
||||
};
|
||||
|
@ -125,12 +125,8 @@ typedef enum {
|
||||
TX_8X8 = 1, // 8x8 dct transform
|
||||
TX_16X16 = 2, // 16x16 dct transform
|
||||
TX_SIZE_MAX_MB = 3, // Number of different transforms available
|
||||
#if CONFIG_TX32X32
|
||||
TX_32X32 = TX_SIZE_MAX_MB, // 32x32 dct transform
|
||||
TX_SIZE_MAX_SB, // Number of transforms available to SBs
|
||||
#else
|
||||
TX_SIZE_MAX_SB = TX_SIZE_MAX_MB,
|
||||
#endif
|
||||
} TX_SIZE;
|
||||
|
||||
typedef enum {
|
||||
@ -301,14 +297,12 @@ typedef struct blockd {
|
||||
union b_mode_info bmi;
|
||||
} BLOCKD;
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
typedef struct superblockd {
|
||||
/* 32x32 Y and 16x16 U/V. No 2nd order transform yet. */
|
||||
DECLARE_ALIGNED(16, int16_t, diff[32*32+16*16*2]);
|
||||
DECLARE_ALIGNED(16, int16_t, qcoeff[32*32+16*16*2]);
|
||||
DECLARE_ALIGNED(16, int16_t, dqcoeff[32*32+16*16*2]);
|
||||
} SUPERBLOCKD;
|
||||
#endif
|
||||
|
||||
typedef struct macroblockd {
|
||||
DECLARE_ALIGNED(16, int16_t, diff[400]); /* from idct diff */
|
||||
@ -317,9 +311,7 @@ typedef struct macroblockd {
|
||||
DECLARE_ALIGNED(16, int16_t, dqcoeff[400]);
|
||||
DECLARE_ALIGNED(16, uint16_t, eobs[25]);
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
SUPERBLOCKD sb_coeff_data;
|
||||
#endif
|
||||
|
||||
/* 16 Y blocks, 4 U, 4 V, 1 DC 2nd order block, each with 16 entries. */
|
||||
BLOCKD block[25];
|
||||
|
@ -1038,7 +1038,6 @@ static const vp9_coeff_probs default_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]
|
||||
}
|
||||
}
|
||||
};
|
||||
#if CONFIG_TX32X32
|
||||
static const vp9_coeff_probs default_coef_probs_32x32[BLOCK_TYPES_32X32] = {
|
||||
{ /* block Type 0 */
|
||||
{ /* Coeff Band 0 */
|
||||
@ -1210,4 +1209,3 @@ static const vp9_coeff_probs default_coef_probs_32x32[BLOCK_TYPES_32X32] = {
|
||||
}
|
||||
}
|
||||
};
|
||||
#endif // CONFIG_TX32X32
|
||||
|
@ -142,7 +142,6 @@ DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]) = {
|
||||
237, 252, 253, 238, 223, 239, 254, 255,
|
||||
};
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
#if CONFIG_DWTDCTHYBRID
|
||||
DECLARE_ALIGNED(16, const int, vp9_coef_bands_32x32[1024]) = {
|
||||
0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6,
|
||||
@ -352,7 +351,7 @@ DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_32x32[1024]) = {
|
||||
975, 991, 510, 1006, 1022, 511, 1007, 1023,
|
||||
};
|
||||
|
||||
#else
|
||||
#else // CONFIG_DWTDCTHYBRID
|
||||
|
||||
DECLARE_ALIGNED(16, const int, vp9_coef_bands_32x32[1024]) = {
|
||||
0, 1, 2, 3, 5, 4, 4, 5, 5, 3, 6, 3, 5, 4, 6, 6,
|
||||
@ -459,7 +458,6 @@ DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_32x32[1024]) = {
|
||||
923, 954, 985, 1016, 1017, 986, 955, 924, 893, 862, 831, 863, 894, 925, 956, 987, 1018, 1019, 988, 957, 926, 895, 927, 958, 989, 1020, 1021, 990, 959, 991, 1022, 1023,
|
||||
};
|
||||
#endif // CONFIG_DWTDCTHYBRID
|
||||
#endif
|
||||
|
||||
/* Array indices are identical to previously-existing CONTEXT_NODE indices */
|
||||
|
||||
@ -547,10 +545,8 @@ DECLARE_ALIGNED(16, int,
|
||||
vp9_default_zig_zag1d_8x8_neighbors[64 * MAX_NEIGHBORS]);
|
||||
DECLARE_ALIGNED(16, int,
|
||||
vp9_default_zig_zag1d_16x16_neighbors[256 * MAX_NEIGHBORS]);
|
||||
#if CONFIG_TX32X32
|
||||
DECLARE_ALIGNED(16, int,
|
||||
vp9_default_zig_zag1d_32x32_neighbors[1024 * MAX_NEIGHBORS]);
|
||||
#endif
|
||||
|
||||
static int find_in_scan(const int *scan, int l, int m) {
|
||||
int i, l2 = l * l;
|
||||
@ -628,10 +624,8 @@ void vp9_init_neighbors() {
|
||||
vp9_default_zig_zag1d_8x8_neighbors);
|
||||
init_scan_neighbors(vp9_default_zig_zag1d_16x16, 16,
|
||||
vp9_default_zig_zag1d_16x16_neighbors);
|
||||
#if CONFIG_TX32X32
|
||||
init_scan_neighbors(vp9_default_zig_zag1d_32x32, 32,
|
||||
vp9_default_zig_zag1d_32x32_neighbors);
|
||||
#endif
|
||||
}
|
||||
|
||||
const int *vp9_get_coef_neighbors_handle(const int *scan) {
|
||||
@ -645,10 +639,8 @@ const int *vp9_get_coef_neighbors_handle(const int *scan) {
|
||||
return vp9_default_zig_zag1d_8x8_neighbors;
|
||||
} else if (scan == vp9_default_zig_zag1d_16x16) {
|
||||
return vp9_default_zig_zag1d_16x16_neighbors;
|
||||
#if CONFIG_TX32X32
|
||||
} else if (scan == vp9_default_zig_zag1d_32x32) {
|
||||
return vp9_default_zig_zag1d_32x32_neighbors;
|
||||
#endif
|
||||
}
|
||||
return vp9_default_zig_zag1d_4x4_neighbors;
|
||||
}
|
||||
@ -693,10 +685,8 @@ void vp9_default_coef_probs(VP9_COMMON *pc) {
|
||||
vpx_memcpy(pc->fc.hybrid_coef_probs_16x16,
|
||||
default_hybrid_coef_probs_16x16,
|
||||
sizeof(pc->fc.hybrid_coef_probs_16x16));
|
||||
#if CONFIG_TX32X32
|
||||
vpx_memcpy(pc->fc.coef_probs_32x32, default_coef_probs_32x32,
|
||||
sizeof(pc->fc.coef_probs_32x32));
|
||||
#endif
|
||||
}
|
||||
|
||||
void vp9_coef_tree_initialize() {
|
||||
@ -840,9 +830,7 @@ void vp9_adapt_coef_probs(VP9_COMMON *cm) {
|
||||
cm->fc.pre_hybrid_coef_probs_16x16,
|
||||
BLOCK_TYPES_16X16, cm->fc.hybrid_coef_counts_16x16,
|
||||
count_sat, update_factor);
|
||||
#if CONFIG_TX32X32
|
||||
update_coef_probs(cm->fc.coef_probs_32x32, cm->fc.pre_coef_probs_32x32,
|
||||
BLOCK_TYPES_32X32, cm->fc.coef_counts_32x32,
|
||||
count_sat, update_factor);
|
||||
#endif
|
||||
}
|
||||
|
@ -66,9 +66,7 @@ extern vp9_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */
|
||||
|
||||
#define BLOCK_TYPES_16X16 4
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
#define BLOCK_TYPES_32X32 4
|
||||
#endif
|
||||
|
||||
/* Middle dimension is a coarsening of the coefficient's
|
||||
position within the 4x4 DCT. */
|
||||
@ -77,9 +75,7 @@ extern vp9_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */
|
||||
extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_4x4[16]);
|
||||
extern DECLARE_ALIGNED(64, const int, vp9_coef_bands_8x8[64]);
|
||||
extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_16x16[256]);
|
||||
#if CONFIG_TX32X32
|
||||
extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_32x32[1024]);
|
||||
#endif
|
||||
|
||||
/* Inside dimension is 3-valued measure of nearby complexity, that is,
|
||||
the extent to which nearby coefficients are nonzero. For the first
|
||||
@ -122,9 +118,7 @@ extern DECLARE_ALIGNED(16, const int, vp9_row_scan_4x4[16]);
|
||||
|
||||
extern DECLARE_ALIGNED(64, const int, vp9_default_zig_zag1d_8x8[64]);
|
||||
extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]);
|
||||
#if CONFIG_TX32X32
|
||||
extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_32x32[1024]);
|
||||
#endif
|
||||
|
||||
void vp9_coef_tree_initialize(void);
|
||||
void vp9_adapt_coef_probs(struct VP9Common *);
|
||||
@ -154,9 +148,7 @@ extern DECLARE_ALIGNED(16, int, vp9_default_zig_zag1d_8x8_neighbors[
|
||||
64 * MAX_NEIGHBORS]);
|
||||
extern DECLARE_ALIGNED(16, int, vp9_default_zig_zag1d_16x16_neighbors[
|
||||
256 * MAX_NEIGHBORS]);
|
||||
#if CONFIG_TX32X32
|
||||
extern DECLARE_ALIGNED(16, int, vp9_default_zig_zag1d_32x32_neighbors[
|
||||
1024 * MAX_NEIGHBORS]);
|
||||
#endif
|
||||
#endif // CONFIG_NEWCOEFCONTEXT
|
||||
#endif // VP9_COMMON_VP9_ENTROPY_H_
|
||||
|
@ -1533,7 +1533,6 @@ void vp9_short_idct10_16x16_c(int16_t *input, int16_t *output, int pitch) {
|
||||
#undef RIGHT_ROUNDING
|
||||
#endif
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
#if !CONFIG_DWTDCTHYBRID
|
||||
#define DownshiftMultiplyBy2(x) x * 2
|
||||
#define DownshiftMultiply(x) x
|
||||
@ -2505,4 +2504,3 @@ void vp9_short_idct64x64_c(int16_t *input, int16_t *output, int pitch) {
|
||||
#endif
|
||||
}
|
||||
#endif // CONFIG_DWTDCTHYBRID
|
||||
#endif // CONFIG_TX32X32
|
||||
|
@ -146,7 +146,6 @@ void vp9_inverse_transform_mb_16x16(MACROBLOCKD *xd) {
|
||||
vp9_inverse_transform_mbuv_8x8(xd);
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_inverse_transform_sby_32x32(SUPERBLOCKD *xd_sb) {
|
||||
vp9_short_idct32x32(xd_sb->dqcoeff, xd_sb->diff, 64);
|
||||
}
|
||||
@ -157,4 +156,3 @@ void vp9_inverse_transform_sbuv_16x16(SUPERBLOCKD *xd_sb) {
|
||||
vp9_inverse_transform_b_16x16(xd_sb->dqcoeff + 1280,
|
||||
xd_sb->diff + 1280, 32);
|
||||
}
|
||||
#endif
|
||||
|
@ -39,9 +39,7 @@ extern void vp9_inverse_transform_mb_16x16(MACROBLOCKD *xd);
|
||||
|
||||
extern void vp9_inverse_transform_mby_16x16(MACROBLOCKD *xd);
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
extern void vp9_inverse_transform_sby_32x32(SUPERBLOCKD *xd_sb);
|
||||
extern void vp9_inverse_transform_sbuv_16x16(SUPERBLOCKD *xd_sb);
|
||||
#endif
|
||||
|
||||
#endif // VP9_COMMON_VP9_INVTRANS_H_
|
||||
|
@ -187,9 +187,7 @@ static int sb_mb_lf_skip(const MODE_INFO *const mip0,
|
||||
const MODE_INFO *const mip1) {
|
||||
return mb_lf_skip(&mip0->mbmi) &&
|
||||
mb_lf_skip(&mip1->mbmi) &&
|
||||
#if CONFIG_TX32X32
|
||||
mip0->mbmi.txfm_size >= TX_32X32 &&
|
||||
#endif
|
||||
mip0->mbmi.ref_frame;
|
||||
}
|
||||
void vp9_loop_filter_frame(VP9_COMMON *cm,
|
||||
|
@ -55,9 +55,7 @@ typedef struct frame_contexts {
|
||||
vp9_coeff_probs hybrid_coef_probs_8x8[BLOCK_TYPES_8X8];
|
||||
vp9_coeff_probs coef_probs_16x16[BLOCK_TYPES_16X16];
|
||||
vp9_coeff_probs hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
|
||||
#if CONFIG_TX32X32
|
||||
vp9_coeff_probs coef_probs_32x32[BLOCK_TYPES_32X32];
|
||||
#endif
|
||||
|
||||
nmv_context nmvc;
|
||||
nmv_context pre_nmvc;
|
||||
@ -82,9 +80,7 @@ typedef struct frame_contexts {
|
||||
vp9_coeff_probs pre_hybrid_coef_probs_8x8[BLOCK_TYPES_8X8];
|
||||
vp9_coeff_probs pre_coef_probs_16x16[BLOCK_TYPES_16X16];
|
||||
vp9_coeff_probs pre_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
|
||||
#if CONFIG_TX32X32
|
||||
vp9_coeff_probs pre_coef_probs_32x32[BLOCK_TYPES_32X32];
|
||||
#endif
|
||||
|
||||
vp9_coeff_count coef_counts_4x4[BLOCK_TYPES_4X4];
|
||||
vp9_coeff_count hybrid_coef_counts_4x4[BLOCK_TYPES_4X4];
|
||||
@ -92,9 +88,7 @@ typedef struct frame_contexts {
|
||||
vp9_coeff_count hybrid_coef_counts_8x8[BLOCK_TYPES_8X8];
|
||||
vp9_coeff_count coef_counts_16x16[BLOCK_TYPES_16X16];
|
||||
vp9_coeff_count hybrid_coef_counts_16x16[BLOCK_TYPES_16X16];
|
||||
#if CONFIG_TX32X32
|
||||
vp9_coeff_count coef_counts_32x32[BLOCK_TYPES_32X32];
|
||||
#endif
|
||||
|
||||
nmv_context_counts NMVcount;
|
||||
vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
|
||||
@ -125,11 +119,9 @@ typedef enum {
|
||||
ONLY_4X4 = 0,
|
||||
ALLOW_8X8 = 1,
|
||||
ALLOW_16X16 = 2,
|
||||
#if CONFIG_TX32X32
|
||||
ALLOW_32X32 = 3,
|
||||
#endif
|
||||
TX_MODE_SELECT = 3 + CONFIG_TX32X32,
|
||||
NB_TXFM_MODES = 4 + CONFIG_TX32X32,
|
||||
TX_MODE_SELECT = 4,
|
||||
NB_TXFM_MODES = 5,
|
||||
} TXFM_MODE;
|
||||
|
||||
typedef struct VP9Common {
|
||||
|
@ -115,7 +115,6 @@ void vp9_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_recon_sby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
|
||||
int x, y, stride = xd->block[0].dst_stride;
|
||||
int16_t *diff = xd->sb_coeff_data.diff;
|
||||
@ -145,7 +144,6 @@ void vp9_recon_sbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
|
||||
vdiff += 16;
|
||||
}
|
||||
}
|
||||
#endif // CONFIG_TX32X32
|
||||
|
||||
void vp9_recon_mby_c(MACROBLOCKD *xd) {
|
||||
int i;
|
||||
|
@ -77,16 +77,12 @@ specialize vp9_dequant_idct_add_y_block
|
||||
prototype void vp9_dequant_idct_add_uv_block "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dstu, uint8_t *dstv, int stride, uint16_t *eobs"
|
||||
specialize vp9_dequant_idct_add_uv_block
|
||||
|
||||
if [ "$CONFIG_TX32X32" = "yes" ]; then
|
||||
|
||||
prototype void vp9_dequant_idct_add_32x32 "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dst, int pitch, int stride, int eob"
|
||||
specialize vp9_dequant_idct_add_32x32
|
||||
|
||||
prototype void vp9_dequant_idct_add_uv_block_16x16 "int16_t *q, const int16_t *dq, uint8_t *dstu, uint8_t *dstv, int stride, uint16_t *eobs"
|
||||
specialize vp9_dequant_idct_add_uv_block_16x16
|
||||
|
||||
fi
|
||||
|
||||
#
|
||||
# RECON
|
||||
#
|
||||
@ -135,16 +131,12 @@ specialize vp9_recon_mby_s
|
||||
prototype void vp9_recon_mbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst"
|
||||
specialize void vp9_recon_mbuv_s
|
||||
|
||||
if [ "$CONFIG_TX32X32" = "yes" ]; then
|
||||
|
||||
prototype void vp9_recon_sby_s "struct macroblockd *x, uint8_t *dst"
|
||||
specialize vp9_recon_sby_s
|
||||
|
||||
prototype void vp9_recon_sbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst"
|
||||
specialize void vp9_recon_sbuv_s
|
||||
|
||||
fi
|
||||
|
||||
prototype void vp9_build_intra_predictors_mby_s "struct macroblockd *x"
|
||||
specialize vp9_build_intra_predictors_mby_s
|
||||
|
||||
|
@ -222,15 +222,11 @@ static void kfread_modes(VP9D_COMP *pbi,
|
||||
m->mbmi.txfm_size = vp9_read(bc, cm->prob_tx[0]);
|
||||
if (m->mbmi.txfm_size != TX_4X4 && m->mbmi.mode != I8X8_PRED) {
|
||||
m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[1]);
|
||||
#if CONFIG_TX32X32
|
||||
if (m->mbmi.txfm_size != TX_8X8 && m->mbmi.sb_type)
|
||||
m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[2]);
|
||||
#endif
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
} else if (cm->txfm_mode >= ALLOW_32X32 && m->mbmi.sb_type) {
|
||||
m->mbmi.txfm_size = TX_32X32;
|
||||
#endif
|
||||
} else if (cm->txfm_mode >= ALLOW_16X16 && m->mbmi.mode <= TM_PRED) {
|
||||
m->mbmi.txfm_size = TX_16X16;
|
||||
} else if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != B_PRED) {
|
||||
@ -1208,15 +1204,11 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
|
||||
if (mbmi->txfm_size != TX_4X4 && mbmi->mode != I8X8_PRED &&
|
||||
mbmi->mode != SPLITMV) {
|
||||
mbmi->txfm_size += vp9_read(bc, cm->prob_tx[1]);
|
||||
#if CONFIG_TX32X32
|
||||
if (mbmi->sb_type && mbmi->txfm_size != TX_8X8)
|
||||
mbmi->txfm_size += vp9_read(bc, cm->prob_tx[2]);
|
||||
#endif
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
} else if (mbmi->sb_type && cm->txfm_mode >= ALLOW_32X32) {
|
||||
mbmi->txfm_size = TX_32X32;
|
||||
#endif
|
||||
} else if (cm->txfm_mode >= ALLOW_16X16 &&
|
||||
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
|
||||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
|
||||
|
@ -747,7 +747,6 @@ static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
}
|
||||
|
||||
/* dequantization and idct */
|
||||
#if CONFIG_TX32X32
|
||||
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
|
||||
for (n = 0; n < 4; n++) {
|
||||
const int x_idx = n & 1, y_idx = n >> 1;
|
||||
@ -787,7 +786,6 @@ static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#endif
|
||||
for (n = 0; n < 16; n++) {
|
||||
int x_idx = n & 3, y_idx = n >> 2;
|
||||
|
||||
@ -816,9 +814,7 @@ static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
decode_4x4_sb(pbi, xd, bc, n, 3, 2);
|
||||
}
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
}
|
||||
#endif
|
||||
|
||||
xd->above_context = pc->above_context + mb_col;
|
||||
xd->left_context = pc->left_context;
|
||||
@ -873,7 +869,6 @@ static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
}
|
||||
|
||||
/* dequantization and idct */
|
||||
#if CONFIG_TX32X32
|
||||
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
|
||||
eobtotal = vp9_decode_sb_tokens(pbi, xd, bc);
|
||||
if (eobtotal == 0) { // skip loopfilter
|
||||
@ -895,9 +890,7 @@ static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
xd->dst.uv_stride, xd->eobs + 16);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
for (n = 0; n < 4; n++) {
|
||||
int x_idx = n & 1, y_idx = n >> 1;
|
||||
|
||||
@ -1396,11 +1389,9 @@ static void read_coef_probs(VP9D_COMP *pbi, BOOL_DECODER* const bc) {
|
||||
read_coef_probs_common(bc, pc->fc.hybrid_coef_probs_16x16,
|
||||
BLOCK_TYPES_16X16);
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
if (pbi->common.txfm_mode > ALLOW_16X16) {
|
||||
read_coef_probs_common(bc, pc->fc.coef_probs_32x32, BLOCK_TYPES_32X32);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) {
|
||||
@ -1590,16 +1581,12 @@ int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) {
|
||||
|
||||
/* Read the loop filter level and type */
|
||||
pc->txfm_mode = vp9_read_literal(&header_bc, 2);
|
||||
#if CONFIG_TX32X32
|
||||
if (pc->txfm_mode == 3)
|
||||
pc->txfm_mode += vp9_read_bit(&header_bc);
|
||||
#endif
|
||||
if (pc->txfm_mode == TX_MODE_SELECT) {
|
||||
pc->prob_tx[0] = vp9_read_literal(&header_bc, 8);
|
||||
pc->prob_tx[1] = vp9_read_literal(&header_bc, 8);
|
||||
#if CONFIG_TX32X32
|
||||
pc->prob_tx[2] = vp9_read_literal(&header_bc, 8);
|
||||
#endif
|
||||
}
|
||||
|
||||
pc->filter_type = (LOOPFILTERTYPE) vp9_read_bit(&header_bc);
|
||||
@ -1782,10 +1769,8 @@ int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) {
|
||||
pbi->common.fc.coef_probs_16x16);
|
||||
vp9_copy(pbi->common.fc.pre_hybrid_coef_probs_16x16,
|
||||
pbi->common.fc.hybrid_coef_probs_16x16);
|
||||
#if CONFIG_TX32X32
|
||||
vp9_copy(pbi->common.fc.pre_coef_probs_32x32,
|
||||
pbi->common.fc.coef_probs_32x32);
|
||||
#endif
|
||||
vp9_copy(pbi->common.fc.pre_ymode_prob, pbi->common.fc.ymode_prob);
|
||||
vp9_copy(pbi->common.fc.pre_sb_ymode_prob, pbi->common.fc.sb_ymode_prob);
|
||||
vp9_copy(pbi->common.fc.pre_uv_mode_prob, pbi->common.fc.uv_mode_prob);
|
||||
@ -1803,9 +1788,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) {
|
||||
vp9_zero(pbi->common.fc.hybrid_coef_counts_8x8);
|
||||
vp9_zero(pbi->common.fc.coef_counts_16x16);
|
||||
vp9_zero(pbi->common.fc.hybrid_coef_counts_16x16);
|
||||
#if CONFIG_TX32X32
|
||||
vp9_zero(pbi->common.fc.coef_counts_32x32);
|
||||
#endif
|
||||
vp9_zero(pbi->common.fc.ymode_counts);
|
||||
vp9_zero(pbi->common.fc.sb_ymode_counts);
|
||||
vp9_zero(pbi->common.fc.uv_mode_counts);
|
||||
|
@ -348,7 +348,6 @@ void vp9_dequant_idct_add_16x16_c(int16_t *input, const int16_t *dq,
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_dequant_idct_add_32x32_c(int16_t *input, const int16_t *dq,
|
||||
uint8_t *pred, uint8_t *dest, int pitch,
|
||||
int stride, int eob) {
|
||||
@ -373,4 +372,3 @@ void vp9_dequant_idct_add_uv_block_16x16_c(int16_t *q, const int16_t *dq,
|
||||
vp9_dequant_idct_add_16x16_c(q + 256, dq,
|
||||
dstv, dstv, stride, stride, eobs[4]);
|
||||
}
|
||||
#endif
|
||||
|
@ -144,12 +144,10 @@ static int decode_coefs(VP9D_COMP *dx, const MACROBLOCKD *xd,
|
||||
coef_counts = fc->hybrid_coef_counts_16x16;
|
||||
}
|
||||
break;
|
||||
#if CONFIG_TX32X32
|
||||
case TX_32X32:
|
||||
coef_probs = fc->coef_probs_32x32;
|
||||
coef_counts = fc->coef_counts_32x32;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
VP9_COMBINEENTROPYCONTEXTS(pt, *a, *l);
|
||||
@ -249,7 +247,6 @@ static int get_eob(MACROBLOCKD* const xd, int segment_id, int eob_max) {
|
||||
return eob;
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
int vp9_decode_sb_tokens(VP9D_COMP* const pbi,
|
||||
MACROBLOCKD* const xd,
|
||||
BOOL_DECODER* const bc) {
|
||||
@ -316,7 +313,6 @@ int vp9_decode_sb_tokens(VP9D_COMP* const pbi,
|
||||
A[8] = L[8] = A1[8] = L1[8] = 0;
|
||||
return eobtotal;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int vp9_decode_mb_tokens_16x16(VP9D_COMP* const pbi,
|
||||
MACROBLOCKD* const xd,
|
||||
|
@ -23,11 +23,9 @@ int vp9_decode_coefs_4x4(VP9D_COMP *dx, MACROBLOCKD *xd,
|
||||
int vp9_decode_mb_tokens(VP9D_COMP* const, MACROBLOCKD* const,
|
||||
BOOL_DECODER* const);
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
int vp9_decode_sb_tokens(VP9D_COMP* const pbi,
|
||||
MACROBLOCKD* const xd,
|
||||
BOOL_DECODER* const bc);
|
||||
#endif
|
||||
|
||||
int vp9_decode_mb_tokens_4x4_uv(VP9D_COMP* const dx, MACROBLOCKD* const xd,
|
||||
BOOL_DECODER* const bc);
|
||||
|
@ -972,10 +972,8 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m,
|
||||
vp9_write(bc, sz != TX_4X4, pc->prob_tx[0]);
|
||||
if (sz != TX_4X4 && mode != I8X8_PRED && mode != SPLITMV) {
|
||||
vp9_write(bc, sz != TX_8X8, pc->prob_tx[1]);
|
||||
#if CONFIG_TX32X32
|
||||
if (mi->sb_type && sz != TX_8X8)
|
||||
vp9_write(bc, sz != TX_16X16, pc->prob_tx[2]);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1078,10 +1076,8 @@ static void write_mb_modes_kf(const VP9_COMP *cpi,
|
||||
vp9_write(bc, sz != TX_4X4, c->prob_tx[0]);
|
||||
if (sz != TX_4X4 && ym <= TM_PRED) {
|
||||
vp9_write(bc, sz != TX_8X8, c->prob_tx[1]);
|
||||
#if CONFIG_TX32X32
|
||||
if (m->mbmi.sb_type && sz != TX_8X8)
|
||||
vp9_write(bc, sz != TX_16X16, c->prob_tx[2]);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1262,14 +1258,12 @@ static void build_coeff_contexts(VP9_COMP *cpi) {
|
||||
cpi, hybrid_context_counters_16x16,
|
||||
#endif
|
||||
cpi->frame_hybrid_branch_ct_16x16, BLOCK_TYPES_16X16);
|
||||
#if CONFIG_TX32X32
|
||||
build_tree_distribution(cpi->frame_coef_probs_32x32,
|
||||
cpi->coef_counts_32x32,
|
||||
#ifdef ENTROPY_STATS
|
||||
cpi, context_counters_32x32,
|
||||
#endif
|
||||
cpi->frame_branch_ct_32x32, BLOCK_TYPES_32X32);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void update_coef_probs_common(vp9_writer* const bc,
|
||||
@ -1446,7 +1440,6 @@ static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
|
||||
BLOCK_TYPES_16X16);
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
if (cpi->common.txfm_mode > ALLOW_16X16) {
|
||||
update_coef_probs_common(bc,
|
||||
#ifdef ENTROPY_STATS
|
||||
@ -1458,7 +1451,6 @@ static void update_coef_probs(VP9_COMP* const cpi, vp9_writer* const bc) {
|
||||
cpi->frame_branch_ct_32x32,
|
||||
BLOCK_TYPES_32X32);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef PACKET_TESTING
|
||||
@ -1699,9 +1691,7 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
|
||||
cpi->txfm_count_32x32p[TX_4X4] +
|
||||
cpi->txfm_count_32x32p[TX_8X8] +
|
||||
cpi->txfm_count_32x32p[TX_16X16] +
|
||||
#if CONFIG_TX32X32
|
||||
cpi->txfm_count_32x32p[TX_32X32] +
|
||||
#endif
|
||||
cpi->txfm_count_16x16p[TX_4X4] +
|
||||
cpi->txfm_count_16x16p[TX_8X8] +
|
||||
cpi->txfm_count_16x16p[TX_16X16] +
|
||||
@ -1711,35 +1701,25 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
|
||||
cpi->txfm_count_16x16p[TX_8X8],
|
||||
cpi->txfm_count_32x32p[TX_8X8] +
|
||||
cpi->txfm_count_32x32p[TX_16X16] +
|
||||
#if CONFIG_TX32X32
|
||||
cpi->txfm_count_32x32p[TX_32X32] +
|
||||
#endif
|
||||
cpi->txfm_count_16x16p[TX_8X8] +
|
||||
cpi->txfm_count_16x16p[TX_16X16]);
|
||||
#if CONFIG_TX32X32
|
||||
pc->prob_tx[2] = get_prob(cpi->txfm_count_32x32p[TX_16X16],
|
||||
cpi->txfm_count_32x32p[TX_16X16] +
|
||||
cpi->txfm_count_32x32p[TX_32X32]);
|
||||
#endif
|
||||
} else {
|
||||
pc->prob_tx[0] = 128;
|
||||
pc->prob_tx[1] = 128;
|
||||
#if CONFIG_TX32X32
|
||||
pc->prob_tx[2] = 128;
|
||||
#endif
|
||||
}
|
||||
vp9_write_literal(&header_bc, pc->txfm_mode <= 3 ? pc->txfm_mode : 3, 2);
|
||||
#if CONFIG_TX32X32
|
||||
if (pc->txfm_mode > ALLOW_16X16) {
|
||||
vp9_write_bit(&header_bc, pc->txfm_mode == TX_MODE_SELECT);
|
||||
}
|
||||
#endif
|
||||
if (pc->txfm_mode == TX_MODE_SELECT) {
|
||||
vp9_write_literal(&header_bc, pc->prob_tx[0], 8);
|
||||
vp9_write_literal(&header_bc, pc->prob_tx[1], 8);
|
||||
#if CONFIG_TX32X32
|
||||
vp9_write_literal(&header_bc, pc->prob_tx[2], 8);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -1960,10 +1940,8 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
|
||||
cpi->common.fc.coef_probs_16x16);
|
||||
vp9_copy(cpi->common.fc.pre_hybrid_coef_probs_16x16,
|
||||
cpi->common.fc.hybrid_coef_probs_16x16);
|
||||
#if CONFIG_TX32X32
|
||||
vp9_copy(cpi->common.fc.pre_coef_probs_32x32,
|
||||
cpi->common.fc.coef_probs_32x32);
|
||||
#endif
|
||||
vp9_copy(cpi->common.fc.pre_sb_ymode_prob, cpi->common.fc.sb_ymode_prob);
|
||||
vp9_copy(cpi->common.fc.pre_ymode_prob, cpi->common.fc.ymode_prob);
|
||||
vp9_copy(cpi->common.fc.pre_uv_mode_prob, cpi->common.fc.uv_mode_prob);
|
||||
@ -2125,10 +2103,8 @@ void print_tree_update_probs() {
|
||||
print_tree_update_for_type(f, hybrid_tree_update_hist_16x16,
|
||||
BLOCK_TYPES_16X16,
|
||||
"vp9_coef_update_probs_16x16[BLOCK_TYPES_16X16]");
|
||||
#if CONFIG_TX32X32
|
||||
print_tree_update_for_type(f, tree_update_hist_32x32, BLOCK_TYPES_32X32,
|
||||
"vp9_coef_update_probs_32x32[BLOCK_TYPES_32X32]");
|
||||
#endif
|
||||
|
||||
fclose(f);
|
||||
f = fopen("treeupdate.bin", "wb");
|
||||
|
@ -35,15 +35,11 @@ typedef struct block {
|
||||
int16_t *zbin;
|
||||
int16_t *zbin_8x8;
|
||||
int16_t *zbin_16x16;
|
||||
#if CONFIG_TX32X32
|
||||
int16_t *zbin_32x32;
|
||||
#endif
|
||||
int16_t *zrun_zbin_boost;
|
||||
int16_t *zrun_zbin_boost_8x8;
|
||||
int16_t *zrun_zbin_boost_16x16;
|
||||
#if CONFIG_TX32X32
|
||||
int16_t *zrun_zbin_boost_32x32;
|
||||
#endif
|
||||
int16_t *round;
|
||||
|
||||
// Zbin Over Quant value
|
||||
@ -57,9 +53,7 @@ typedef struct block {
|
||||
int eob_max_offset;
|
||||
int eob_max_offset_8x8;
|
||||
int eob_max_offset_16x16;
|
||||
#if CONFIG_TX32X32
|
||||
int eob_max_offset_32x32;
|
||||
#endif
|
||||
} BLOCK;
|
||||
|
||||
typedef struct {
|
||||
@ -92,12 +86,10 @@ typedef struct {
|
||||
int64_t txfm_rd_diff[NB_TXFM_MODES];
|
||||
} PICK_MODE_CONTEXT;
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
typedef struct superblock {
|
||||
DECLARE_ALIGNED(16, int16_t, src_diff[32*32+16*16*2]);
|
||||
DECLARE_ALIGNED(16, int16_t, coeff[32*32+16*16*2]);
|
||||
} SUPERBLOCK;
|
||||
#endif
|
||||
|
||||
typedef struct macroblock {
|
||||
DECLARE_ALIGNED(16, int16_t, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
|
||||
@ -106,9 +98,7 @@ typedef struct macroblock {
|
||||
// 1 DC 2nd order block each with 16 entries
|
||||
BLOCK block[25];
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
SUPERBLOCK sb_coeff_data;
|
||||
#endif
|
||||
|
||||
YV12_BUFFER_CONFIG src;
|
||||
|
||||
|
@ -1332,9 +1332,7 @@ void vp9_short_fdct16x16_c(int16_t *input, int16_t *out, int pitch) {
|
||||
#undef ROUNDING
|
||||
#endif
|
||||
|
||||
#if CONFIG_TX32X32 || CONFIG_TX64X64
|
||||
#if !CONFIG_DWTDCTHYBRID
|
||||
#if CONFIG_TX32X32
|
||||
static void dct32_1d(double *input, double *output, int stride) {
|
||||
static const double C1 = 0.998795456205; // cos(pi * 1 / 64)
|
||||
static const double C2 = 0.995184726672; // cos(pi * 2 / 64)
|
||||
@ -1685,7 +1683,6 @@ void vp9_short_fdct32x32_c(int16_t *input, int16_t *out, int pitch) {
|
||||
|
||||
vp9_clear_system_state(); // Make it simd safe : __asm emms;
|
||||
}
|
||||
#endif // CONFIG_TX32X32
|
||||
|
||||
#else // CONFIG_DWTDCTHYBRID
|
||||
|
||||
@ -2142,7 +2139,6 @@ static void vp9_short_fdct16x16_c_f(short *input, short *out, int pitch,
|
||||
vp9_clear_system_state(); // Make it simd safe : __asm emms;
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_short_fdct32x32_c(short *input, short *out, int pitch) {
|
||||
// assume out is a 32x32 buffer
|
||||
short buffer[16 * 16];
|
||||
@ -2173,7 +2169,6 @@ void vp9_short_fdct32x32_c(short *input, short *out, int pitch) {
|
||||
for (i = 0; i < 16; ++i)
|
||||
vpx_memcpy(out + i * 32 + 33 * 16, buffer + i * 16, sizeof(short) * 16);
|
||||
}
|
||||
#endif // CONFIG_TX32X32
|
||||
|
||||
#if CONFIG_TX64X64
|
||||
void vp9_short_fdct64x64_c(short *input, short *out, int pitch) {
|
||||
@ -2235,4 +2230,3 @@ void vp9_short_fdct64x64_c(short *input, short *out, int pitch) {
|
||||
}
|
||||
#endif // CONFIG_TX64X64
|
||||
#endif // CONFIG_DWTDCTHYBRID
|
||||
#endif // CONFIG_TX32X32 || CONFIG_TX64X64
|
||||
|
@ -461,11 +461,9 @@ static void update_state(VP9_COMP *cpi,
|
||||
}
|
||||
}
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
if (block_size == 16) {
|
||||
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
|
||||
}
|
||||
#endif
|
||||
|
||||
if (mb_mode == B_PRED) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
@ -1299,9 +1297,7 @@ static void encode_frame_internal(VP9_COMP *cpi) {
|
||||
vp9_zero(cpi->hybrid_coef_counts_8x8);
|
||||
vp9_zero(cpi->coef_counts_16x16);
|
||||
vp9_zero(cpi->hybrid_coef_counts_16x16);
|
||||
#if CONFIG_TX32X32
|
||||
vp9_zero(cpi->coef_counts_32x32);
|
||||
#endif
|
||||
#if CONFIG_NEW_MVREF
|
||||
vp9_zero(cpi->mb_mv_ref_count);
|
||||
#endif
|
||||
@ -1570,11 +1566,7 @@ void vp9_encode_frame(VP9_COMP *cpi) {
|
||||
* keyframe's probabilities as an estimate of what the current keyframe's
|
||||
* coefficient cost distributions may look like. */
|
||||
if (frame_type == 0) {
|
||||
#if CONFIG_TX32X32
|
||||
txfm_type = ALLOW_32X32;
|
||||
#else
|
||||
txfm_type = ALLOW_16X16;
|
||||
#endif
|
||||
} else
|
||||
#if 0
|
||||
/* FIXME (rbultje)
|
||||
@ -1605,15 +1597,9 @@ void vp9_encode_frame(VP9_COMP *cpi) {
|
||||
} else
|
||||
txfm_type = ALLOW_8X8;
|
||||
#else
|
||||
#if CONFIG_TX32X32
|
||||
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >=
|
||||
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
|
||||
ALLOW_32X32 : TX_MODE_SELECT;
|
||||
#else
|
||||
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
|
||||
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
|
||||
ALLOW_16X16 : TX_MODE_SELECT;
|
||||
#endif
|
||||
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
|
||||
ALLOW_32X32 : TX_MODE_SELECT;
|
||||
#endif
|
||||
cpi->common.txfm_mode = txfm_type;
|
||||
if (txfm_type != TX_MODE_SELECT) {
|
||||
@ -1665,11 +1651,7 @@ void vp9_encode_frame(VP9_COMP *cpi) {
|
||||
const int count8x8_8x8p = cpi->txfm_count_8x8p[TX_8X8];
|
||||
const int count16x16_16x16p = cpi->txfm_count_16x16p[TX_16X16];
|
||||
const int count16x16_lp = cpi->txfm_count_32x32p[TX_16X16];
|
||||
#if CONFIG_TX32X32
|
||||
const int count32x32 = cpi->txfm_count_32x32p[TX_32X32];
|
||||
#else
|
||||
const int count32x32 = 0;
|
||||
#endif
|
||||
|
||||
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
|
||||
count32x32 == 0) {
|
||||
@ -1679,15 +1661,11 @@ void vp9_encode_frame(VP9_COMP *cpi) {
|
||||
count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
|
||||
cpi->common.txfm_mode = ONLY_4X4;
|
||||
reset_skip_txfm_size(cpi, TX_4X4);
|
||||
#if CONFIG_TX32X32
|
||||
} else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
|
||||
cpi->common.txfm_mode = ALLOW_32X32;
|
||||
#endif
|
||||
} else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
|
||||
cpi->common.txfm_mode = ALLOW_16X16;
|
||||
#if CONFIG_TX32X32
|
||||
reset_skip_txfm_size(cpi, TX_16X16);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -1913,7 +1891,6 @@ static void update_sb64_skip_coeff_state(VP9_COMP *cpi,
|
||||
int skip[16], int output_enabled) {
|
||||
MACROBLOCK *const x = &cpi->mb;
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
if (x->e_mbd.mode_info_context->mbmi.txfm_size == TX_32X32) {
|
||||
TOKENEXTRA tokens[4][1024+512];
|
||||
int n_tokens[4], n;
|
||||
@ -1961,9 +1938,7 @@ static void update_sb64_skip_coeff_state(VP9_COMP *cpi,
|
||||
(*tp) += n_tokens[n];
|
||||
}
|
||||
}
|
||||
} else
|
||||
#endif // CONFIG_TX32X32
|
||||
{
|
||||
} else {
|
||||
TOKENEXTRA tokens[16][16 * 25];
|
||||
int n_tokens[16], n;
|
||||
|
||||
@ -2388,7 +2363,6 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
|
||||
xd->dst.y_stride, xd->dst.uv_stride);
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
|
||||
if (!x->skip) {
|
||||
vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff, src, src_y_stride,
|
||||
@ -2435,9 +2409,7 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
|
||||
mi[mis + 1].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
|
||||
}
|
||||
skip[0] = skip[2] = skip[1] = skip[3] = mi->mbmi.mb_skip_coeff;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
for (n = 0; n < 4; n++) {
|
||||
int x_idx = n & 1, y_idx = n >> 1;
|
||||
|
||||
@ -2502,11 +2474,7 @@ static void encode_superblock32(VP9_COMP *cpi, TOKENEXTRA **t,
|
||||
cpi->txfm_count_32x32p[mi->mbmi.txfm_size]++;
|
||||
} else {
|
||||
TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ?
|
||||
#if CONFIG_TX32X32
|
||||
TX_32X32 :
|
||||
#else
|
||||
TX_16X16 :
|
||||
#endif
|
||||
cm->txfm_mode;
|
||||
mi->mbmi.txfm_size = sz;
|
||||
if (mb_col < cm->mb_cols - 1)
|
||||
@ -2634,7 +2602,6 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
|
||||
xd->dst.y_stride, xd->dst.uv_stride);
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
|
||||
int n;
|
||||
|
||||
@ -2705,9 +2672,7 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
|
||||
}
|
||||
skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
for (n = 0; n < 16; n++) {
|
||||
const int x_idx = n & 3, y_idx = n >> 2;
|
||||
|
||||
@ -2766,15 +2731,9 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
|
||||
if (output_enabled) {
|
||||
if (cm->txfm_mode == TX_MODE_SELECT &&
|
||||
!((cm->mb_no_coeff_skip &&
|
||||
(
|
||||
#if CONFIG_TX32X32
|
||||
(mi->mbmi.txfm_size == TX_32X32 &&
|
||||
((mi->mbmi.txfm_size == TX_32X32 &&
|
||||
skip[0] && skip[1] && skip[2] && skip[3]) ||
|
||||
#endif // CONFIG_TX32X32
|
||||
(
|
||||
#if CONFIG_TX32X32
|
||||
mi->mbmi.txfm_size != TX_32X32 &&
|
||||
#endif // CONFIG_TX32X32
|
||||
(mi->mbmi.txfm_size != TX_32X32 &&
|
||||
skip[0] && skip[1] && skip[2] && skip[3] &&
|
||||
skip[4] && skip[5] && skip[6] && skip[7] &&
|
||||
skip[8] && skip[9] && skip[10] && skip[11] &&
|
||||
@ -2785,11 +2744,7 @@ static void encode_superblock64(VP9_COMP *cpi, TOKENEXTRA **t,
|
||||
} else {
|
||||
int x, y;
|
||||
TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ?
|
||||
#if CONFIG_TX32X32
|
||||
TX_32X32 :
|
||||
#else
|
||||
TX_16X16 :
|
||||
#endif
|
||||
cm->txfm_mode;
|
||||
for (y = 0; y < 4; y++) {
|
||||
for (x = 0; x < 4; x++) {
|
||||
|
@ -108,7 +108,6 @@ void vp9_subtract_mby_s_c(int16_t *diff, const uint8_t *src, int src_stride,
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_subtract_sby_s_c(int16_t *diff, const uint8_t *src, int src_stride,
|
||||
const uint8_t *pred, int dst_stride) {
|
||||
int r, c;
|
||||
@ -152,7 +151,6 @@ void vp9_subtract_sbuv_s_c(int16_t *diff, const uint8_t *usrc,
|
||||
vsrc += src_stride;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp9_subtract_mby_c(int16_t *diff, uint8_t *src,
|
||||
uint8_t *pred, int stride) {
|
||||
@ -311,7 +309,6 @@ void vp9_transform_mb_16x16(MACROBLOCK *x) {
|
||||
vp9_transform_mbuv_8x8(x);
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_transform_sby_32x32(MACROBLOCK *x) {
|
||||
SUPERBLOCK * const x_sb = &x->sb_coeff_data;
|
||||
vp9_short_fdct32x32(x_sb->src_diff, x_sb->coeff, 64);
|
||||
@ -325,7 +322,6 @@ void vp9_transform_sbuv_16x16(MACROBLOCK *x) {
|
||||
x->vp9_short_fdct16x16(x_sb->src_diff + 1280,
|
||||
x_sb->coeff + 1280, 32);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
|
||||
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
|
||||
|
@ -43,10 +43,8 @@ void vp9_transform_mb_16x16(MACROBLOCK *mb);
|
||||
void vp9_transform_mby_16x16(MACROBLOCK *x);
|
||||
void vp9_optimize_mby_16x16(MACROBLOCK *x);
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_transform_sby_32x32(MACROBLOCK *x);
|
||||
void vp9_transform_sbuv_16x16(MACROBLOCK *x);
|
||||
#endif
|
||||
|
||||
void vp9_fidct_mb(MACROBLOCK *x);
|
||||
|
||||
@ -59,13 +57,11 @@ void vp9_subtract_mbuv_s_c(int16_t *diff, const uint8_t *usrc,
|
||||
void vp9_subtract_mby_s_c(int16_t *diff, const uint8_t *src,
|
||||
int src_stride, const uint8_t *pred,
|
||||
int dst_stride);
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_subtract_sby_s_c(int16_t *diff, const uint8_t *src, int src_stride,
|
||||
const uint8_t *pred, int dst_stride);
|
||||
void vp9_subtract_sbuv_s_c(int16_t *diff, const uint8_t *usrc,
|
||||
const uint8_t *vsrc, int src_stride,
|
||||
const uint8_t *upred,
|
||||
const uint8_t *vpred, int dst_stride);
|
||||
#endif // CONFIG_TX32X32
|
||||
|
||||
#endif // VP9_ENCODER_VP9_ENCODEMB_H_
|
||||
|
@ -3461,9 +3461,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi,
|
||||
vp9_copy(cpi->common.fc.coef_counts_16x16, cpi->coef_counts_16x16);
|
||||
vp9_copy(cpi->common.fc.hybrid_coef_counts_16x16,
|
||||
cpi->hybrid_coef_counts_16x16);
|
||||
#if CONFIG_TX32X32
|
||||
vp9_copy(cpi->common.fc.coef_counts_32x32, cpi->coef_counts_32x32);
|
||||
#endif
|
||||
vp9_adapt_coef_probs(&cpi->common);
|
||||
if (cpi->common.frame_type != KEY_FRAME) {
|
||||
vp9_copy(cpi->common.fc.sb_ymode_counts, cpi->sb_ymode_count);
|
||||
|
@ -92,9 +92,7 @@ typedef struct {
|
||||
vp9_coeff_probs hybrid_coef_probs_8x8[BLOCK_TYPES_8X8];
|
||||
vp9_coeff_probs coef_probs_16x16[BLOCK_TYPES_16X16];
|
||||
vp9_coeff_probs hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
|
||||
#if CONFIG_TX32X32
|
||||
vp9_coeff_probs coef_probs_32x32[BLOCK_TYPES_32X32];
|
||||
#endif
|
||||
|
||||
vp9_prob sb_ymode_prob[VP9_I32X32_MODES - 1];
|
||||
vp9_prob ymode_prob[VP9_YMODES - 1]; /* interframe intra mode probs */
|
||||
@ -331,14 +329,12 @@ typedef struct VP9_COMP {
|
||||
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2_16x16[QINDEX_RANGE][256]);
|
||||
DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv_16x16[QINDEX_RANGE][256]);
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
DECLARE_ALIGNED(16, short, Y1zbin_32x32[QINDEX_RANGE][1024]);
|
||||
DECLARE_ALIGNED(16, short, Y2zbin_32x32[QINDEX_RANGE][1024]);
|
||||
DECLARE_ALIGNED(16, short, UVzbin_32x32[QINDEX_RANGE][1024]);
|
||||
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y1_32x32[QINDEX_RANGE][1024]);
|
||||
DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2_32x32[QINDEX_RANGE][1024]);
|
||||
DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv_32x32[QINDEX_RANGE][1024]);
|
||||
#endif
|
||||
|
||||
MACROBLOCK mb;
|
||||
VP9_COMMON common;
|
||||
@ -509,11 +505,9 @@ typedef struct VP9_COMP {
|
||||
vp9_coeff_probs frame_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16];
|
||||
vp9_coeff_stats frame_hybrid_branch_ct_16x16[BLOCK_TYPES_16X16];
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
vp9_coeff_count coef_counts_32x32[BLOCK_TYPES_32X32];
|
||||
vp9_coeff_probs frame_coef_probs_32x32[BLOCK_TYPES_32X32];
|
||||
vp9_coeff_stats frame_branch_ct_32x32[BLOCK_TYPES_32X32];
|
||||
#endif
|
||||
|
||||
int gfu_boost;
|
||||
int last_boost;
|
||||
|
@ -379,7 +379,6 @@ void vp9_regular_quantize_b_16x16(BLOCK *b, BLOCKD *d) {
|
||||
&d->eob, vp9_default_zig_zag1d_16x16, 1);
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_quantize_sby_32x32(MACROBLOCK *x) {
|
||||
x->e_mbd.block[0].eob = 0;
|
||||
quantize(x->block[0].zrun_zbin_boost_32x32,
|
||||
@ -413,7 +412,6 @@ void vp9_quantize_sbuv_16x16(MACROBLOCK *x) {
|
||||
&x->e_mbd.block[i].eob,
|
||||
vp9_default_zig_zag1d_16x16, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* quantize_b_pair function pointer in MACROBLOCK structure is set to one of
|
||||
* these two C functions if corresponding optimized routine is not available.
|
||||
@ -472,7 +470,6 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
|
||||
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
|
||||
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
|
||||
};
|
||||
#if CONFIG_TX32X32
|
||||
static const int zbin_boost_32x32[1024] = {
|
||||
0, 0, 0, 8, 8, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28,
|
||||
30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 48, 48, 48, 48, 48, 48,
|
||||
@ -539,7 +536,6 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
|
||||
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
|
||||
48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
|
||||
};
|
||||
#endif
|
||||
int qrounding_factor = 48;
|
||||
|
||||
|
||||
@ -569,11 +565,9 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
|
||||
((quant_val * zbin_boost_8x8[0]) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_y1_16x16[Q][0] =
|
||||
((quant_val * zbin_boost_16x16[0]) + 64) >> 7;
|
||||
#if CONFIG_TX32X32
|
||||
cpi->Y1zbin_32x32[Q][0] = ((qzbin_factor * quant_val) + 64) >> 7;
|
||||
cpi->zrun_zbin_boost_y1_32x32[Q][0] =
|
||||
((quant_val * zbin_boost_32x32[0]) + 64) >> 7;
|
||||
#endif
|
||||
|
||||
|
||||
quant_val = vp9_dc2quant(Q, cpi->common.y2dc_delta_q);
|
||||
@ -677,7 +671,6 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
|
||||
cpi->zrun_zbin_boost_uv_16x16[Q][i] =
|
||||
((quant_val * zbin_boost_16x16[i]) + 64) >> 7;
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
// 32x32 structures. Same comment above applies.
|
||||
for (i = 1; i < 1024; i++) {
|
||||
int rc = vp9_default_zig_zag1d_32x32[i];
|
||||
@ -687,7 +680,6 @@ void vp9_init_quantizer(VP9_COMP *cpi) {
|
||||
cpi->zrun_zbin_boost_y1_32x32[Q][i] =
|
||||
((quant_val * zbin_boost_32x32[i]) + 64) >> 7;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -727,17 +719,13 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
|
||||
x->block[i].zbin = cpi->Y1zbin[QIndex];
|
||||
x->block[i].zbin_8x8 = cpi->Y1zbin_8x8[QIndex];
|
||||
x->block[i].zbin_16x16 = cpi->Y1zbin_16x16[QIndex];
|
||||
#if CONFIG_TX32X32
|
||||
x->block[i].zbin_32x32 = cpi->Y1zbin_32x32[QIndex];
|
||||
#endif
|
||||
x->block[i].round = cpi->Y1round[QIndex];
|
||||
x->e_mbd.block[i].dequant = cpi->common.Y1dequant[QIndex];
|
||||
x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex];
|
||||
x->block[i].zrun_zbin_boost_8x8 = cpi->zrun_zbin_boost_y1_8x8[QIndex];
|
||||
x->block[i].zrun_zbin_boost_16x16 = cpi->zrun_zbin_boost_y1_16x16[QIndex];
|
||||
#if CONFIG_TX32X32
|
||||
x->block[i].zrun_zbin_boost_32x32 = cpi->zrun_zbin_boost_y1_32x32[QIndex];
|
||||
#endif
|
||||
x->block[i].zbin_extra = (int16_t)zbin_extra;
|
||||
|
||||
// Segment max eob offset feature.
|
||||
@ -748,17 +736,13 @@ void vp9_mb_init_quantizer(VP9_COMP *cpi, MACROBLOCK *x) {
|
||||
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
|
||||
x->block[i].eob_max_offset_16x16 =
|
||||
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
|
||||
#if CONFIG_TX32X32
|
||||
x->block[i].eob_max_offset_32x32 =
|
||||
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB);
|
||||
#endif
|
||||
} else {
|
||||
x->block[i].eob_max_offset = 16;
|
||||
x->block[i].eob_max_offset_8x8 = 64;
|
||||
x->block[i].eob_max_offset_16x16 = 256;
|
||||
#if CONFIG_TX32X32
|
||||
x->block[i].eob_max_offset_32x32 = 1024;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,10 +78,8 @@ void vp9_quantize_mb_16x16(MACROBLOCK *x);
|
||||
extern prototype_quantize_block(vp9_quantize_quantb_16x16);
|
||||
extern prototype_quantize_mb(vp9_quantize_mby_16x16);
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_quantize_sby_32x32(MACROBLOCK *x);
|
||||
void vp9_quantize_sbuv_16x16(MACROBLOCK *x);
|
||||
#endif
|
||||
|
||||
struct VP9_COMP;
|
||||
|
||||
|
@ -173,9 +173,7 @@ void vp9_save_coding_context(VP9_COMP *cpi) {
|
||||
vp9_copy(cc->hybrid_coef_probs_8x8, cm->fc.hybrid_coef_probs_8x8);
|
||||
vp9_copy(cc->coef_probs_16x16, cm->fc.coef_probs_16x16);
|
||||
vp9_copy(cc->hybrid_coef_probs_16x16, cm->fc.hybrid_coef_probs_16x16);
|
||||
#if CONFIG_TX32X32
|
||||
vp9_copy(cc->coef_probs_32x32, cm->fc.coef_probs_32x32);
|
||||
#endif
|
||||
vp9_copy(cc->switchable_interp_prob, cm->fc.switchable_interp_prob);
|
||||
#if CONFIG_COMP_INTERINTRA_PRED
|
||||
cc->interintra_prob = cm->fc.interintra_prob;
|
||||
@ -233,9 +231,7 @@ void vp9_restore_coding_context(VP9_COMP *cpi) {
|
||||
vp9_copy(cm->fc.hybrid_coef_probs_8x8, cc->hybrid_coef_probs_8x8);
|
||||
vp9_copy(cm->fc.coef_probs_16x16, cc->coef_probs_16x16);
|
||||
vp9_copy(cm->fc.hybrid_coef_probs_16x16, cc->hybrid_coef_probs_16x16);
|
||||
#if CONFIG_TX32X32
|
||||
vp9_copy(cm->fc.coef_probs_32x32, cc->coef_probs_32x32);
|
||||
#endif
|
||||
vp9_copy(cm->fc.switchable_interp_prob, cc->switchable_interp_prob);
|
||||
#if CONFIG_COMP_INTERINTRA_PRED
|
||||
cm->fc.interintra_prob = cc->interintra_prob;
|
||||
|
@ -293,10 +293,8 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi, int QIndex) {
|
||||
fill_token_costs(cpi->mb.hybrid_token_costs[TX_16X16],
|
||||
cpi->common.fc.hybrid_coef_probs_16x16, BLOCK_TYPES_16X16);
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
fill_token_costs(cpi->mb.token_costs[TX_32X32],
|
||||
cpi->common.fc.coef_probs_32x32, BLOCK_TYPES_32X32);
|
||||
#endif
|
||||
|
||||
/*rough estimate for costing*/
|
||||
cpi->common.kf_ymode_probs_index = cpi->common.base_qindex >> 4;
|
||||
@ -435,9 +433,7 @@ static int cost_coeffs(MACROBLOCK *mb,
|
||||
int pt;
|
||||
const int eob = b->eob;
|
||||
MACROBLOCKD *xd = &mb->e_mbd;
|
||||
#if CONFIG_TX32X32
|
||||
const int ib = (int)(b - xd->block);
|
||||
#endif
|
||||
int c = (type == PLANE_TYPE_Y_NO_DC) ? 1 : 0;
|
||||
int cost = 0, seg_eob;
|
||||
const int segment_id = xd->mode_info_context->mbmi.segment_id;
|
||||
@ -480,21 +476,17 @@ static int cost_coeffs(MACROBLOCK *mb,
|
||||
scan = vp9_default_zig_zag1d_16x16;
|
||||
band = vp9_coef_bands_16x16;
|
||||
seg_eob = 256;
|
||||
#if CONFIG_TX32X32
|
||||
if (type == PLANE_TYPE_UV) {
|
||||
const int uv_idx = ib - 16;
|
||||
qcoeff_ptr = xd->sb_coeff_data.qcoeff + 1024 + 64 * uv_idx;
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
#if CONFIG_TX32X32
|
||||
case TX_32X32:
|
||||
scan = vp9_default_zig_zag1d_32x32;
|
||||
band = vp9_coef_bands_32x32;
|
||||
seg_eob = 1024;
|
||||
qcoeff_ptr = xd->sb_coeff_data.qcoeff;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
abort();
|
||||
break;
|
||||
@ -761,21 +753,17 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
if (max_txfm_size == TX_32X32 &&
|
||||
(cm->txfm_mode == ALLOW_32X32 ||
|
||||
(cm->txfm_mode == TX_MODE_SELECT &&
|
||||
rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
|
||||
rd[TX_32X32][1] < rd[TX_4X4][1]))) {
|
||||
mbmi->txfm_size = TX_32X32;
|
||||
} else
|
||||
#endif
|
||||
if ( cm->txfm_mode == ALLOW_16X16 ||
|
||||
#if CONFIG_TX32X32
|
||||
(max_txfm_size == TX_16X16 && cm->txfm_mode == ALLOW_32X32) ||
|
||||
#endif
|
||||
(cm->txfm_mode == TX_MODE_SELECT &&
|
||||
rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1])) {
|
||||
} else if ( cm->txfm_mode == ALLOW_16X16 ||
|
||||
(max_txfm_size == TX_16X16 && cm->txfm_mode == ALLOW_32X32) ||
|
||||
(cm->txfm_mode == TX_MODE_SELECT &&
|
||||
rd[TX_16X16][1] < rd[TX_8X8][1] &&
|
||||
rd[TX_16X16][1] < rd[TX_4X4][1])) {
|
||||
mbmi->txfm_size = TX_16X16;
|
||||
} else if (cm->txfm_mode == ALLOW_8X8 ||
|
||||
(cm->txfm_mode == TX_MODE_SELECT && rd[TX_8X8][1] < rd[TX_4X4][1])) {
|
||||
@ -792,15 +780,12 @@ static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
txfm_cache[ONLY_4X4] = rd[TX_4X4][0];
|
||||
txfm_cache[ALLOW_8X8] = rd[TX_8X8][0];
|
||||
txfm_cache[ALLOW_16X16] = rd[TX_16X16][0];
|
||||
#if CONFIG_TX32X32
|
||||
txfm_cache[ALLOW_32X32] = rd[max_txfm_size][0];
|
||||
if (max_txfm_size == TX_32X32 &&
|
||||
rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] &&
|
||||
rd[TX_32X32][1] < rd[TX_4X4][1])
|
||||
txfm_cache[TX_MODE_SELECT] = rd[TX_32X32][1];
|
||||
else
|
||||
#endif
|
||||
if (rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1])
|
||||
else if (rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1])
|
||||
txfm_cache[TX_MODE_SELECT] = rd[TX_16X16][1];
|
||||
else
|
||||
txfm_cache[TX_MODE_SELECT] = rd[TX_4X4][1] < rd[TX_8X8][1] ?
|
||||
@ -833,7 +818,6 @@ static void copy_predictor(uint8_t *dst, const uint8_t *predictor) {
|
||||
d[12] = p[12];
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
static int rdcost_sby_32x32(MACROBLOCK *x, int backup) {
|
||||
MACROBLOCKD * const xd = &x->e_mbd;
|
||||
ENTROPY_CONTEXT_PLANES t_above, t_left;
|
||||
@ -895,7 +879,6 @@ static void super_block_yrd_32x32(MACROBLOCK *x,
|
||||
*rate = rdcost_sby_32x32(x, backup);
|
||||
*skippable = vp9_sby_is_skippable_32x32(&x->e_mbd);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void super_block_yrd(VP9_COMP *cpi,
|
||||
MACROBLOCK *x, int *rate, int *distortion,
|
||||
@ -918,11 +901,9 @@ static void super_block_yrd(VP9_COMP *cpi,
|
||||
s[n] = 1;
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
vp9_subtract_sby_s_c(x->sb_coeff_data.src_diff, src, src_y_stride,
|
||||
dst, dst_y_stride);
|
||||
super_block_yrd_32x32(x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32], 1);
|
||||
#endif
|
||||
|
||||
#if DEBUG_ERROR
|
||||
int err[3] = { 0, 0, 0 };
|
||||
@ -1003,7 +984,6 @@ static void super_block_64_yrd(VP9_COMP *cpi,
|
||||
s[n] = 1;
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
for (n = 0; n < 4; n++) {
|
||||
int x_idx = n & 1, y_idx = n >> 1;
|
||||
int r_tmp, d_tmp, s_tmp;
|
||||
@ -1020,7 +1000,6 @@ static void super_block_64_yrd(VP9_COMP *cpi,
|
||||
d[TX_32X32] += d_tmp;
|
||||
s[TX_32X32] = s[TX_32X32] && s_tmp;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if DEBUG_ERROR
|
||||
int err[3] = { 0, 0, 0 };
|
||||
@ -1784,7 +1763,6 @@ static int64_t rd_inter16x16_uv_8x8(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
||||
return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
static int rd_cost_sbuv_16x16(MACROBLOCK *x, int backup) {
|
||||
int b;
|
||||
int cost = 0;
|
||||
@ -1824,7 +1802,6 @@ static void rd_inter32x32_uv_16x16(MACROBLOCK *x, int *rate,
|
||||
xd->sb_coeff_data.dqcoeff + 1024, 512) >> 2;
|
||||
*skip = vp9_sbuv_is_skippable_16x16(xd);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int64_t rd_inter32x32_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
||||
int *distortion, int fullpixel, int *skip) {
|
||||
@ -1834,15 +1811,12 @@ static int64_t rd_inter32x32_uv(VP9_COMP *cpi, MACROBLOCK *x, int *rate,
|
||||
const uint8_t *vsrc = x->src.v_buffer, *vdst = xd->dst.v_buffer;
|
||||
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
if (mbmi->txfm_size == TX_32X32) {
|
||||
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
|
||||
usrc, vsrc, src_uv_stride,
|
||||
udst, vdst, dst_uv_stride);
|
||||
rd_inter32x32_uv_16x16(x, rate, distortion, skip, 1);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
int n, r = 0, d = 0;
|
||||
int skippable = 1;
|
||||
ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
|
||||
@ -2040,15 +2014,12 @@ static void super_block_uvrd(MACROBLOCK *x,
|
||||
const uint8_t *vsrc = x->src.v_buffer, *vdst = xd->dst.v_buffer;
|
||||
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
if (mbmi->txfm_size == TX_32X32) {
|
||||
vp9_subtract_sbuv_s_c(x->sb_coeff_data.src_diff,
|
||||
usrc, vsrc, src_uv_stride,
|
||||
udst, vdst, dst_uv_stride);
|
||||
rd_inter32x32_uv_16x16(x, rate, distortion, skippable, 1);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
int d = 0, r = 0, n, s = 1;
|
||||
ENTROPY_CONTEXT_PLANES t_above[2], t_left[2];
|
||||
ENTROPY_CONTEXT_PLANES *ta_orig = xd->above_context;
|
||||
@ -2113,7 +2084,6 @@ static void super_block_64_uvrd(MACROBLOCK *x,
|
||||
memcpy(t_above, xd->above_context, sizeof(t_above));
|
||||
memcpy(t_left, xd->left_context, sizeof(t_left));
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
if (mbmi->txfm_size == TX_32X32) {
|
||||
int n;
|
||||
|
||||
@ -2136,9 +2106,7 @@ static void super_block_64_uvrd(MACROBLOCK *x,
|
||||
d += d_tmp;
|
||||
s = s && s_tmp;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
} else {
|
||||
for (n = 0; n < 16; n++) {
|
||||
int x_idx = n & 3, y_idx = n >> 2;
|
||||
|
||||
@ -4749,11 +4717,9 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
int dist_uv_4x4 = 0, dist_uv_8x8 = 0, uv_skip_4x4 = 0, uv_skip_8x8 = 0;
|
||||
MB_PREDICTION_MODE mode_uv_4x4 = NEARESTMV, mode_uv_8x8 = NEARESTMV;
|
||||
int switchable_filter_index = 0;
|
||||
#if CONFIG_TX32X32
|
||||
int rate_uv_16x16 = 0, rate_uv_tokenonly_16x16 = 0;
|
||||
int dist_uv_16x16 = 0, uv_skip_16x16 = 0;
|
||||
MB_PREDICTION_MODE mode_uv_16x16 = NEARESTMV;
|
||||
#endif
|
||||
|
||||
x->skip = 0;
|
||||
xd->mode_info_context->mbmi.segment_id = segment_id;
|
||||
@ -4790,7 +4756,6 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
&dist_uv_8x8, &uv_skip_8x8);
|
||||
mode_uv_8x8 = mbmi->uv_mode;
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
if (cm->txfm_mode >= ALLOW_32X32) {
|
||||
mbmi->txfm_size = TX_32X32;
|
||||
rd_pick_intra_sb64uv_mode(cpi, x, &rate_uv_16x16,
|
||||
@ -4798,7 +4763,6 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
&dist_uv_16x16, &uv_skip_16x16);
|
||||
mode_uv_16x16 = mbmi->uv_mode;
|
||||
}
|
||||
#endif // CONFIG_TX32X32
|
||||
} else {
|
||||
assert(block_size == BLOCK_32X32);
|
||||
mbmi->mode = DC_PRED;
|
||||
@ -4814,14 +4778,12 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
&dist_uv_8x8, &uv_skip_8x8);
|
||||
mode_uv_8x8 = mbmi->uv_mode;
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
if (cm->txfm_mode >= ALLOW_32X32) {
|
||||
mbmi->txfm_size = TX_32X32;
|
||||
rd_pick_intra_sbuv_mode(cpi, x, &rate_uv_16x16, &rate_uv_tokenonly_16x16,
|
||||
&dist_uv_16x16, &uv_skip_16x16);
|
||||
mode_uv_16x16 = mbmi->uv_mode;
|
||||
}
|
||||
#endif // CONFIG_TX32X32
|
||||
}
|
||||
|
||||
for (mode_index = 0; mode_index < MAX_MODES;
|
||||
@ -4965,13 +4927,11 @@ static int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
|
||||
distortion_uv = dist_uv_4x4;
|
||||
skippable = skippable && uv_skip_4x4;
|
||||
mbmi->uv_mode = mode_uv_4x4;
|
||||
#if CONFIG_TX32X32
|
||||
} else if (mbmi->txfm_size == TX_32X32) {
|
||||
rate_uv = rate_uv_16x16;
|
||||
distortion_uv = dist_uv_16x16;
|
||||
skippable = skippable && uv_skip_16x16;
|
||||
mbmi->uv_mode = mode_uv_16x16;
|
||||
#endif // CONFIG_TX32X32
|
||||
} else {
|
||||
rate_uv = rate_uv_8x8;
|
||||
distortion_uv = dist_uv_8x8;
|
||||
|
@ -31,9 +31,7 @@ vp9_coeff_accum context_counters_8x8[BLOCK_TYPES_8X8];
|
||||
vp9_coeff_accum hybrid_context_counters_8x8[BLOCK_TYPES_8X8];
|
||||
vp9_coeff_accum context_counters_16x16[BLOCK_TYPES_16X16];
|
||||
vp9_coeff_accum hybrid_context_counters_16x16[BLOCK_TYPES_16X16];
|
||||
#if CONFIG_TX32X32
|
||||
vp9_coeff_accum context_counters_32x32[BLOCK_TYPES_32X32];
|
||||
#endif
|
||||
|
||||
extern vp9_coeff_stats tree_update_hist_4x4[BLOCK_TYPES_4X4];
|
||||
extern vp9_coeff_stats hybrid_tree_update_hist_4x4[BLOCK_TYPES_4X4];
|
||||
@ -41,9 +39,7 @@ extern vp9_coeff_stats tree_update_hist_8x8[BLOCK_TYPES_8X8];
|
||||
extern vp9_coeff_stats hybrid_tree_update_hist_8x8[BLOCK_TYPES_8X8];
|
||||
extern vp9_coeff_stats tree_update_hist_16x16[BLOCK_TYPES_16X16];
|
||||
extern vp9_coeff_stats hybrid_tree_update_hist_16x16[BLOCK_TYPES_16X16];
|
||||
#if CONFIG_TX32X32
|
||||
extern vp9_coeff_stats tree_update_hist_32x32[BLOCK_TYPES_32X32];
|
||||
#endif
|
||||
#endif /* ENTROPY_STATS */
|
||||
|
||||
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
|
||||
@ -141,12 +137,10 @@ static void tokenize_b(VP9_COMP *cpi,
|
||||
vp9_block2left[tx_size][ib];
|
||||
ENTROPY_CONTEXT a_ec = *a, l_ec = *l;
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
ENTROPY_CONTEXT *const a1 = (ENTROPY_CONTEXT *)(&xd->above_context[1]) +
|
||||
vp9_block2above[tx_size][ib];
|
||||
ENTROPY_CONTEXT *const l1 = (ENTROPY_CONTEXT *)(&xd->left_context[1]) +
|
||||
vp9_block2left[tx_size][ib];
|
||||
#endif
|
||||
|
||||
|
||||
switch (tx_size) {
|
||||
@ -195,11 +189,9 @@ static void tokenize_b(VP9_COMP *cpi,
|
||||
if (type != PLANE_TYPE_UV) {
|
||||
a_ec = (a[0] + a[1] + a[2] + a[3]) != 0;
|
||||
l_ec = (l[0] + l[1] + l[2] + l[3]) != 0;
|
||||
#if CONFIG_TX32X32
|
||||
} else {
|
||||
a_ec = (a[0] + a[1] + a1[0] + a1[1]) != 0;
|
||||
l_ec = (l[0] + l[1] + l1[0] + l1[1]) != 0;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
seg_eob = 256;
|
||||
@ -212,14 +204,11 @@ static void tokenize_b(VP9_COMP *cpi,
|
||||
counts = cpi->coef_counts_16x16;
|
||||
probs = cpi->common.fc.coef_probs_16x16;
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
if (type == PLANE_TYPE_UV) {
|
||||
int uv_idx = (ib - 16) >> 2;
|
||||
qcoeff_ptr = xd->sb_coeff_data.qcoeff + 1024 + 256 * uv_idx;
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
#if CONFIG_TX32X32
|
||||
case TX_32X32:
|
||||
#if CONFIG_CNVCONTEXT
|
||||
a_ec = a[0] + a[1] + a[2] + a[3] +
|
||||
@ -236,7 +225,6 @@ static void tokenize_b(VP9_COMP *cpi,
|
||||
probs = cpi->common.fc.coef_probs_32x32;
|
||||
qcoeff_ptr = xd->sb_coeff_data.qcoeff;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
VP9_COMBINEENTROPYCONTEXTS(pt, a_ec, l_ec);
|
||||
@ -294,19 +282,15 @@ static void tokenize_b(VP9_COMP *cpi,
|
||||
if (type != PLANE_TYPE_UV) {
|
||||
a[1] = a[2] = a[3] = a_ec;
|
||||
l[1] = l[2] = l[3] = l_ec;
|
||||
#if CONFIG_TX32X32
|
||||
} else {
|
||||
a1[0] = a1[1] = a[1] = a_ec;
|
||||
l1[0] = l1[1] = l[1] = l_ec;
|
||||
#endif
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
} else if (tx_size == TX_32X32) {
|
||||
a[1] = a[2] = a[3] = a_ec;
|
||||
l[1] = l[2] = l[3] = l_ec;
|
||||
a1[0] = a1[1] = a1[2] = a1[3] = a_ec;
|
||||
l1[0] = l1[1] = l1[2] = l1[3] = l_ec;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -378,7 +362,6 @@ static int mb_is_skippable_16x16(MACROBLOCKD *xd) {
|
||||
return (vp9_mby_is_skippable_16x16(xd) & vp9_mbuv_is_skippable_8x8(xd));
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
int vp9_sby_is_skippable_32x32(MACROBLOCKD *xd) {
|
||||
int skip = 1;
|
||||
skip &= !xd->block[0].eob;
|
||||
@ -440,7 +423,6 @@ void vp9_tokenize_sb(VP9_COMP *cpi,
|
||||
if (dry_run)
|
||||
*t = t_backup;
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp9_tokenize_mb(VP9_COMP *cpi,
|
||||
MACROBLOCKD *xd,
|
||||
@ -557,9 +539,7 @@ void init_context_counters(void) {
|
||||
vpx_memset(context_counters_16x16, 0, sizeof(context_counters_16x16));
|
||||
vpx_memset(hybrid_context_counters_16x16, 0,
|
||||
sizeof(hybrid_context_counters_16x16));
|
||||
#if CONFIG_TX32X32
|
||||
vpx_memset(context_counters_32x32, 0, sizeof(context_counters_32x32));
|
||||
#endif
|
||||
} else {
|
||||
fread(context_counters_4x4, sizeof(context_counters_4x4), 1, f);
|
||||
fread(hybrid_context_counters_4x4,
|
||||
@ -570,9 +550,7 @@ void init_context_counters(void) {
|
||||
fread(context_counters_16x16, sizeof(context_counters_16x16), 1, f);
|
||||
fread(hybrid_context_counters_16x16,
|
||||
sizeof(hybrid_context_counters_16x16), 1, f);
|
||||
#if CONFIG_TX32X32
|
||||
fread(context_counters_32x32, sizeof(context_counters_32x32), 1, f);
|
||||
#endif
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
@ -587,9 +565,7 @@ void init_context_counters(void) {
|
||||
vpx_memset(tree_update_hist_16x16, 0, sizeof(tree_update_hist_16x16));
|
||||
vpx_memset(hybrid_tree_update_hist_16x16, 0,
|
||||
sizeof(hybrid_tree_update_hist_16x16));
|
||||
#if CONFIG_TX32X32
|
||||
vpx_memset(tree_update_hist_32x32, 0, sizeof(tree_update_hist_32x32));
|
||||
#endif
|
||||
} else {
|
||||
fread(tree_update_hist_4x4, sizeof(tree_update_hist_4x4), 1, f);
|
||||
fread(hybrid_tree_update_hist_4x4,
|
||||
@ -600,9 +576,7 @@ void init_context_counters(void) {
|
||||
fread(tree_update_hist_16x16, sizeof(tree_update_hist_16x16), 1, f);
|
||||
fread(hybrid_tree_update_hist_16x16,
|
||||
sizeof(hybrid_tree_update_hist_16x16), 1, f);
|
||||
#if CONFIG_TX32X32
|
||||
fread(tree_update_hist_32x32, sizeof(tree_update_hist_32x32), 1, f);
|
||||
#endif
|
||||
fclose(f);
|
||||
}
|
||||
}
|
||||
@ -702,10 +676,8 @@ void print_context_counters() {
|
||||
"vp9_default_coef_counts_16x16[BLOCK_TYPES_16X16]");
|
||||
print_counter(f, hybrid_context_counters_16x16, BLOCK_TYPES_16X16,
|
||||
"vp9_default_hybrid_coef_counts_16x16[BLOCK_TYPES_16X16]");
|
||||
#if CONFIG_TX32X32
|
||||
print_counter(f, context_counters_32x32, BLOCK_TYPES_32X32,
|
||||
"vp9_default_coef_counts_32x32[BLOCK_TYPES_32X32]");
|
||||
#endif
|
||||
|
||||
/* print coefficient probabilities */
|
||||
print_probs(f, context_counters_4x4, BLOCK_TYPES_4X4,
|
||||
@ -720,10 +692,8 @@ void print_context_counters() {
|
||||
"default_coef_probs_16x16[BLOCK_TYPES_16X16]");
|
||||
print_probs(f, hybrid_context_counters_16x16, BLOCK_TYPES_16X16,
|
||||
"default_hybrid_coef_probs_16x16[BLOCK_TYPES_16X16]");
|
||||
#if CONFIG_TX32X32
|
||||
print_probs(f, context_counters_32x32, BLOCK_TYPES_32X32,
|
||||
"default_coef_probs_32x32[BLOCK_TYPES_32X32]");
|
||||
#endif
|
||||
|
||||
fclose(f);
|
||||
|
||||
@ -737,9 +707,7 @@ void print_context_counters() {
|
||||
fwrite(context_counters_16x16, sizeof(context_counters_16x16), 1, f);
|
||||
fwrite(hybrid_context_counters_16x16,
|
||||
sizeof(hybrid_context_counters_16x16), 1, f);
|
||||
#if CONFIG_TX32X32
|
||||
fwrite(context_counters_32x32, sizeof(context_counters_32x32), 1, f);
|
||||
#endif
|
||||
fclose(f);
|
||||
}
|
||||
#endif
|
||||
@ -768,12 +736,10 @@ static __inline void stuff_b(VP9_COMP *cpi,
|
||||
ENTROPY_CONTEXT *const l = (ENTROPY_CONTEXT *)xd->left_context +
|
||||
vp9_block2left[tx_size][ib];
|
||||
ENTROPY_CONTEXT a_ec = *a, l_ec = *l;
|
||||
#if CONFIG_TX32X32
|
||||
ENTROPY_CONTEXT *const a1 = (ENTROPY_CONTEXT *)(&xd->above_context[1]) +
|
||||
vp9_block2above[tx_size][ib];
|
||||
ENTROPY_CONTEXT *const l1 = (ENTROPY_CONTEXT *)(&xd->left_context[1]) +
|
||||
vp9_block2left[tx_size][ib];
|
||||
#endif
|
||||
|
||||
switch (tx_size) {
|
||||
default:
|
||||
@ -808,11 +774,9 @@ static __inline void stuff_b(VP9_COMP *cpi,
|
||||
if (type != PLANE_TYPE_UV) {
|
||||
a_ec = (a[0] + a[1] + a[2] + a[3]) != 0;
|
||||
l_ec = (l[0] + l[1] + l[2] + l[3]) != 0;
|
||||
#if CONFIG_TX32X32
|
||||
} else {
|
||||
a_ec = (a[0] + a[1] + a1[0] + a1[1]) != 0;
|
||||
l_ec = (l[0] + l[1] + l1[0] + l1[1]) != 0;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
bands = vp9_coef_bands_16x16;
|
||||
@ -824,7 +788,6 @@ static __inline void stuff_b(VP9_COMP *cpi,
|
||||
probs = cpi->common.fc.coef_probs_16x16;
|
||||
}
|
||||
break;
|
||||
#if CONFIG_TX32X32
|
||||
case TX_32X32:
|
||||
#if CONFIG_CNVCONTEXT
|
||||
a_ec = a[0] + a[1] + a[2] + a[3] +
|
||||
@ -838,7 +801,6 @@ static __inline void stuff_b(VP9_COMP *cpi,
|
||||
counts = cpi->coef_counts_32x32;
|
||||
probs = cpi->common.fc.coef_probs_32x32;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
VP9_COMBINEENTROPYCONTEXTS(pt, a_ec, l_ec);
|
||||
@ -857,19 +819,15 @@ static __inline void stuff_b(VP9_COMP *cpi,
|
||||
if (type != PLANE_TYPE_UV) {
|
||||
a[1] = a[2] = a[3] = 0;
|
||||
l[1] = l[2] = l[3] = 0;
|
||||
#if CONFIG_TX32X32
|
||||
} else {
|
||||
a1[0] = a1[1] = a[1] = a_ec;
|
||||
l1[0] = l1[1] = l[1] = l_ec;
|
||||
#endif
|
||||
}
|
||||
#if CONFIG_TX32X32
|
||||
} else if (tx_size == TX_32X32) {
|
||||
a[1] = a[2] = a[3] = a_ec;
|
||||
l[1] = l[2] = l[3] = l_ec;
|
||||
a1[0] = a1[1] = a1[2] = a1[3] = a_ec;
|
||||
l1[0] = l1[1] = l1[2] = l1[3] = l_ec;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (!dry_run) {
|
||||
@ -983,7 +941,6 @@ void vp9_stuff_mb(VP9_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
static void stuff_sb_32x32(VP9_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run) {
|
||||
int b;
|
||||
@ -1003,11 +960,8 @@ void vp9_stuff_sb(VP9_COMP *cpi, MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run) {
|
||||
*t = t_backup;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
void vp9_fix_contexts_sb(MACROBLOCKD *xd) {
|
||||
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 2);
|
||||
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 2);
|
||||
}
|
||||
#endif
|
||||
|
@ -36,10 +36,8 @@ extern int vp9_mbuv_is_skippable_4x4(MACROBLOCKD *xd);
|
||||
extern int vp9_mby_is_skippable_8x8(MACROBLOCKD *xd, int has_y2_block);
|
||||
extern int vp9_mbuv_is_skippable_8x8(MACROBLOCKD *xd);
|
||||
extern int vp9_mby_is_skippable_16x16(MACROBLOCKD *xd);
|
||||
#if CONFIG_TX32X32
|
||||
extern int vp9_sby_is_skippable_32x32(MACROBLOCKD *xd);
|
||||
extern int vp9_sbuv_is_skippable_16x16(MACROBLOCKD *xd);
|
||||
#endif
|
||||
|
||||
struct VP9_COMP;
|
||||
|
||||
@ -50,14 +48,10 @@ extern void vp9_tokenize_sb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
|
||||
|
||||
extern void vp9_stuff_mb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run);
|
||||
#if CONFIG_TX32X32
|
||||
extern void vp9_stuff_sb(struct VP9_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run);
|
||||
#endif
|
||||
|
||||
#if CONFIG_TX32X32
|
||||
extern void vp9_fix_contexts_sb(MACROBLOCKD *xd);
|
||||
#endif
|
||||
#ifdef ENTROPY_STATS
|
||||
void init_context_counters();
|
||||
void print_context_counters();
|
||||
|
Loading…
x
Reference in New Issue
Block a user