Code clean up.
Further cases of inconsistent naming convention. Change-Id: Id3411ecec6f01a4c889268a00f0c9fd5a92ea143
This commit is contained in:
parent
a1168155a7
commit
77dc5c65f2
@ -15,14 +15,15 @@
|
||||
|
||||
extern void vp8_recon16x16mb_neon(unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int ystride, unsigned char *udst_ptr, unsigned char *vdst_ptr);
|
||||
|
||||
void vp8_recon_mb_neon(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
|
||||
unsigned char *pred_ptr = &x->predictor[0];
|
||||
short *diff_ptr = &x->diff[0];
|
||||
unsigned char *dst_ptr = x->dst.y_buffer;
|
||||
unsigned char *udst_ptr = x->dst.u_buffer;
|
||||
unsigned char *vdst_ptr = x->dst.v_buffer;
|
||||
int ystride = x->dst.y_stride;
|
||||
/*int uv_stride = x->dst.uv_stride;*/
|
||||
void vp8_recon_mb_neon(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
|
||||
unsigned char *pred_ptr = &xd->predictor[0];
|
||||
short *diff_ptr = &xd->diff[0];
|
||||
unsigned char *dst_ptr = xd->dst.y_buffer;
|
||||
unsigned char *udst_ptr = xd->dst.u_buffer;
|
||||
unsigned char *vdst_ptr = xd->dst.v_buffer;
|
||||
int ystride = xd->dst.y_stride;
|
||||
/*int uv_stride = xd->dst.uv_stride;*/
|
||||
|
||||
vp8_recon16x16mb_neon(pred_ptr, diff_ptr, dst_ptr, ystride, udst_ptr, vdst_ptr);
|
||||
vp8_recon16x16mb_neon(pred_ptr, diff_ptr, dst_ptr, ystride,
|
||||
udst_ptr, vdst_ptr);
|
||||
}
|
||||
|
@ -24,15 +24,16 @@ extern void vp8_build_intra_predictors_mby_neon_func(
|
||||
int Up,
|
||||
int Left);
|
||||
|
||||
void vp8_build_intra_predictors_mby_neon(MACROBLOCKD *x) {
|
||||
unsigned char *y_buffer = x->dst.y_buffer;
|
||||
unsigned char *ypred_ptr = x->predictor;
|
||||
int y_stride = x->dst.y_stride;
|
||||
int mode = x->mode_info_context->mbmi.mode;
|
||||
int Up = x->up_available;
|
||||
int Left = x->left_available;
|
||||
void vp8_build_intra_predictors_mby_neon(MACROBLOCKD *xd) {
|
||||
unsigned char *y_buffer = xd->dst.y_buffer;
|
||||
unsigned char *ypred_ptr = xd->predictor;
|
||||
int y_stride = xd->dst.y_stride;
|
||||
int mode = xd->mode_info_context->mbmi.mode;
|
||||
int Up = xd->up_available;
|
||||
int Left = xd->left_available;
|
||||
|
||||
vp8_build_intra_predictors_mby_neon_func(y_buffer, ypred_ptr, y_stride, mode, Up, Left);
|
||||
vp8_build_intra_predictors_mby_neon_func(y_buffer, ypred_ptr,
|
||||
y_stride, mode, Up, Left);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -46,15 +47,16 @@ extern void vp8_build_intra_predictors_mby_s_neon_func(
|
||||
int Up,
|
||||
int Left);
|
||||
|
||||
void vp8_build_intra_predictors_mby_s_neon(MACROBLOCKD *x) {
|
||||
unsigned char *y_buffer = x->dst.y_buffer;
|
||||
unsigned char *ypred_ptr = x->predictor;
|
||||
int y_stride = x->dst.y_stride;
|
||||
int mode = x->mode_info_context->mbmi.mode;
|
||||
int Up = x->up_available;
|
||||
int Left = x->left_available;
|
||||
void vp8_build_intra_predictors_mby_s_neon(MACROBLOCKD *xd) {
|
||||
unsigned char *y_buffer = xd->dst.y_buffer;
|
||||
unsigned char *ypred_ptr = xd->predictor;
|
||||
int y_stride = xd->dst.y_stride;
|
||||
int mode = xd->mode_info_context->mbmi.mode;
|
||||
int Up = xd->up_available;
|
||||
int Left = xd->left_available;
|
||||
|
||||
vp8_build_intra_predictors_mby_s_neon_func(y_buffer, ypred_ptr, y_stride, mode, Up, Left);
|
||||
vp8_build_intra_predictors_mby_s_neon_func(y_buffer, ypred_ptr,
|
||||
y_stride, mode, Up, Left);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -463,8 +463,8 @@ static void txfm_map(BLOCKD *b, B_PREDICTION_MODE bmode) {
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void vp8_build_block_doffsets(MACROBLOCKD *x);
|
||||
extern void vp8_setup_block_dptrs(MACROBLOCKD *x);
|
||||
extern void vp8_build_block_doffsets(MACROBLOCKD *xd);
|
||||
extern void vp8_setup_block_dptrs(MACROBLOCKD *xd);
|
||||
|
||||
static void update_blockd_bmi(MACROBLOCKD *xd) {
|
||||
int i;
|
||||
|
@ -13,21 +13,21 @@
|
||||
|
||||
|
||||
|
||||
static void recon_dcblock(MACROBLOCKD *x) {
|
||||
BLOCKD *b = &x->block[24];
|
||||
static void recon_dcblock(MACROBLOCKD *xd) {
|
||||
BLOCKD *b = &xd->block[24];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
x->block[i].dqcoeff[0] = b->diff[i];
|
||||
xd->block[i].dqcoeff[0] = b->diff[i];
|
||||
}
|
||||
|
||||
}
|
||||
static void recon_dcblock_8x8(MACROBLOCKD *x) {
|
||||
BLOCKD *b = &x->block[24]; // for coeff 0, 2, 8, 10
|
||||
x->block[0].dqcoeff[0] = b->diff[0];
|
||||
x->block[4].dqcoeff[0] = b->diff[1];
|
||||
x->block[8].dqcoeff[0] = b->diff[4];
|
||||
x->block[12].dqcoeff[0] = b->diff[8];
|
||||
static void recon_dcblock_8x8(MACROBLOCKD *xd) {
|
||||
BLOCKD *b = &xd->block[24]; // for coeff 0, 2, 8, 10
|
||||
xd->block[0].dqcoeff[0] = b->diff[0];
|
||||
xd->block[4].dqcoeff[0] = b->diff[1];
|
||||
xd->block[8].dqcoeff[0] = b->diff[4];
|
||||
xd->block[12].dqcoeff[0] = b->diff[8];
|
||||
|
||||
}
|
||||
|
||||
@ -45,24 +45,28 @@ void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int
|
||||
}
|
||||
|
||||
|
||||
void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
|
||||
void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
/* do 2nd order transform on the dc block */
|
||||
IDCT_INVOKE(rtcd, iwalsh16)(x->block[24].dqcoeff, x->block[24].diff);
|
||||
IDCT_INVOKE(rtcd, iwalsh16)(blockd[24].dqcoeff, blockd[24].diff);
|
||||
|
||||
recon_dcblock(x);
|
||||
recon_dcblock(xd);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
vp8_inverse_transform_b(rtcd, &x->block[i], 32);
|
||||
vp8_inverse_transform_b(rtcd, &blockd[i], 32);
|
||||
}
|
||||
|
||||
}
|
||||
void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
|
||||
void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd) {
|
||||
int i;
|
||||
BLOCKD *blockd = xd->block;
|
||||
|
||||
for (i = 16; i < 24; i++) {
|
||||
vp8_inverse_transform_b(rtcd, &x->block[i], 16);
|
||||
vp8_inverse_transform_b(rtcd, &blockd[i], 16);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -21,21 +21,21 @@ extern void vp8_inverse_htransform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD
|
||||
#endif
|
||||
|
||||
extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
|
||||
extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
|
||||
extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
|
||||
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
|
||||
#if CONFIG_TX16X16
|
||||
extern void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
|
||||
short *input_dqcoeff, short *output_coeff,
|
||||
int pitch);
|
||||
extern void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
extern void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
extern void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
|
||||
#endif
|
||||
#endif
|
||||
|
@ -124,36 +124,36 @@ void vp8_recon2b_c
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
|
||||
void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
|
||||
#if ARCH_ARM
|
||||
BLOCKD *b = &x->block[0];
|
||||
BLOCKD *b = &xd->block[0];
|
||||
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
|
||||
/*b = &x->block[4];*/
|
||||
/*b = &xd->block[4];*/
|
||||
b += 4;
|
||||
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
|
||||
/*b = &x->block[8];*/
|
||||
/*b = &xd->block[8];*/
|
||||
b += 4;
|
||||
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
|
||||
/*b = &x->block[12];*/
|
||||
/*b = &xd->block[12];*/
|
||||
b += 4;
|
||||
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
#else
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
BLOCKD *b = &x->block[i];
|
||||
BLOCKD *b = &xd->block[i];
|
||||
|
||||
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
|
||||
void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
|
||||
#if ARCH_ARM
|
||||
BLOCKD *b = &x->block[0];
|
||||
BLOCKD *b = &xd->block[0];
|
||||
|
||||
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
b += 4;
|
||||
@ -164,7 +164,7 @@ void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
|
||||
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
b += 4;
|
||||
|
||||
/*b = &x->block[16];*/
|
||||
/*b = &xd->block[16];*/
|
||||
|
||||
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
b++;
|
||||
@ -180,13 +180,13 @@ void vp8_recon_mb_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
BLOCKD *b = &x->block[i];
|
||||
BLOCKD *b = &xd->block[i];
|
||||
|
||||
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
|
||||
for (i = 16; i < 24; i += 2) {
|
||||
BLOCKD *b = &x->block[i];
|
||||
BLOCKD *b = &xd->block[i];
|
||||
|
||||
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
|
||||
}
|
||||
|
@ -21,10 +21,10 @@
|
||||
void sym(unsigned char *pred, short *diff, unsigned char *dst, int pitch)
|
||||
|
||||
#define prototype_recon_macroblock(sym) \
|
||||
void sym(const struct vp8_recon_rtcd_vtable *rtcd, MACROBLOCKD *x)
|
||||
void sym(const struct vp8_recon_rtcd_vtable *rtcd, MACROBLOCKD *xd)
|
||||
|
||||
#define prototype_build_intra_predictors(sym) \
|
||||
void sym(MACROBLOCKD *x)
|
||||
void sym(MACROBLOCKD *xd)
|
||||
|
||||
#define prototype_intra4x4_predict(sym) \
|
||||
void sym(BLOCKD *x, int b_mode, unsigned char *predictor)
|
||||
@ -244,5 +244,6 @@ typedef struct vp8_recon_rtcd_vtable {
|
||||
#define RECON_INVOKE(ctx,fn) vp8_recon_##fn
|
||||
#endif
|
||||
|
||||
void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x);
|
||||
void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
|
||||
MACROBLOCKD *xd);
|
||||
#endif
|
||||
|
@ -295,7 +295,7 @@ void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf
|
||||
}
|
||||
}
|
||||
|
||||
static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
||||
static void build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
||||
unsigned char *ptr_base;
|
||||
unsigned char *ptr;
|
||||
unsigned char *pred_ptr = d->predictor;
|
||||
@ -303,12 +303,15 @@ static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
||||
|
||||
ptr_base = *(d->base_pre);
|
||||
mv.as_int = d->bmi.as_mv.first.as_int;
|
||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
|
||||
(mv.as_mv.col >> 3);
|
||||
|
||||
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
||||
x->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
xd->subpixel_predict8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
|
||||
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
} else {
|
||||
RECON_INVOKE(&x->rtcd->recon, copy8x8)(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
RECON_INVOKE(&xd->rtcd->recon, copy8x8)
|
||||
(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
}
|
||||
}
|
||||
|
||||
@ -318,7 +321,8 @@ static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
||||
* come from an earlier call to build_inter_predictors_4b()) with the
|
||||
* predictor of the second reference frame / motion vector.
|
||||
*/
|
||||
static void build_2nd_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
||||
static void build_2nd_inter_predictors4b(MACROBLOCKD *xd,
|
||||
BLOCKD *d, int pitch) {
|
||||
unsigned char *ptr_base;
|
||||
unsigned char *ptr;
|
||||
unsigned char *pred_ptr = d->predictor;
|
||||
@ -326,16 +330,19 @@ static void build_2nd_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
||||
|
||||
ptr_base = *(d->base_second_pre);
|
||||
mv.as_int = d->bmi.as_mv.second.as_int;
|
||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
|
||||
(mv.as_mv.col >> 3);
|
||||
|
||||
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
||||
x->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
xd->subpixel_predict_avg8x8(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
|
||||
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
} else {
|
||||
RECON_INVOKE(&x->rtcd->recon, avg8x8)(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
RECON_INVOKE(&xd->rtcd->recon, avg8x8)
|
||||
(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
}
|
||||
}
|
||||
|
||||
static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
||||
static void build_inter_predictors2b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
|
||||
unsigned char *ptr_base;
|
||||
unsigned char *ptr;
|
||||
unsigned char *pred_ptr = d->predictor;
|
||||
@ -343,12 +350,14 @@ static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, int pitch) {
|
||||
|
||||
ptr_base = *(d->base_pre);
|
||||
mv.as_int = d->bmi.as_mv.first.as_int;
|
||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride + (mv.as_mv.col >> 3);
|
||||
ptr = ptr_base + d->pre + (mv.as_mv.row >> 3) * d->pre_stride +
|
||||
(mv.as_mv.col >> 3);
|
||||
|
||||
if (mv.as_mv.row & 7 || mv.as_mv.col & 7) {
|
||||
x->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1, (mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
xd->subpixel_predict8x4(ptr, d->pre_stride, (mv.as_mv.col & 7) << 1,
|
||||
(mv.as_mv.row & 7) << 1, pred_ptr, pitch);
|
||||
} else {
|
||||
RECON_INVOKE(&x->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
RECON_INVOKE(&xd->rtcd->recon, copy8x4)(ptr, d->pre_stride, pred_ptr, pitch);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,40 +16,44 @@
|
||||
#include "onyxc_int.h"
|
||||
#endif
|
||||
|
||||
extern void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *x,
|
||||
extern void vp8_build_1st_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
int dst_ystride);
|
||||
extern void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *x,
|
||||
extern void vp8_build_1st_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_uvstride);
|
||||
extern void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *x,
|
||||
extern void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_ystride,
|
||||
int dst_uvstride);
|
||||
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *x,
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mby(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
int dst_ystride);
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *x,
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_uvstride);
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *x,
|
||||
extern void vp8_build_2nd_inter16x16_predictors_mb(MACROBLOCKD *xd,
|
||||
unsigned char *dst_y,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_ystride,
|
||||
int dst_uvstride);
|
||||
|
||||
extern void vp8_build_inter_predictors_mb(MACROBLOCKD *x);
|
||||
extern void vp8_build_inter_predictors_mb(MACROBLOCKD *xd);
|
||||
|
||||
extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf);
|
||||
extern void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch, vp8_subpix_fn_t sppf);
|
||||
extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch,
|
||||
vp8_subpix_fn_t sppf);
|
||||
extern void vp8_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
|
||||
vp8_subpix_fn_t sppf);
|
||||
|
||||
extern void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x);
|
||||
extern void vp8_setup_interp_filters(MACROBLOCKD *x, INTERPOLATIONFILTERTYPE filter, VP8_COMMON *cm);
|
||||
extern void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd);
|
||||
extern void vp8_setup_interp_filters(MACROBLOCKD *xd,
|
||||
INTERPOLATIONFILTERTYPE filter,
|
||||
VP8_COMMON *cm);
|
||||
|
||||
#endif
|
||||
|
@ -14,8 +14,8 @@
|
||||
#include "reconintra.h"
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
|
||||
/* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and
|
||||
* vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
|
||||
/* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd)
|
||||
* and vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd).
|
||||
*/
|
||||
|
||||
void d27_predictor(unsigned char *ypred_ptr, int y_stride, int n,
|
||||
|
@ -26,6 +26,6 @@ void d27_predictor(unsigned char *ypred_ptr, int y_stride, int n,
|
||||
void d64_predictor(unsigned char *ypred_ptr, int y_stride, int n,
|
||||
unsigned char *yabove_row, unsigned char *yleft_col);
|
||||
|
||||
extern void init_intra_left_above_pixels(MACROBLOCKD *x);
|
||||
extern void init_intra_left_above_pixels(MACROBLOCKD *xd);
|
||||
|
||||
#endif
|
||||
|
@ -296,13 +296,17 @@ void vp8_comp_intra4x4_predict(BLOCKD *x,
|
||||
/* copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and
|
||||
* to the right prediction have filled in pixels to use.
|
||||
*/
|
||||
void vp8_intra_prediction_down_copy(MACROBLOCKD *x) {
|
||||
unsigned char *above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16;
|
||||
void vp8_intra_prediction_down_copy(MACROBLOCKD *xd) {
|
||||
unsigned char *above_right = *(xd->block[0].base_dst) + xd->block[0].dst -
|
||||
xd->block[0].dst_stride + 16;
|
||||
|
||||
unsigned int *src_ptr = (unsigned int *)above_right;
|
||||
unsigned int *dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
|
||||
unsigned int *dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
|
||||
unsigned int *dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);
|
||||
unsigned int *dst_ptr0 =
|
||||
(unsigned int *)(above_right + 4 * xd->block[0].dst_stride);
|
||||
unsigned int *dst_ptr1 =
|
||||
(unsigned int *)(above_right + 8 * xd->block[0].dst_stride);
|
||||
unsigned int *dst_ptr2 =
|
||||
(unsigned int *)(above_right + 12 * xd->block[0].dst_stride);
|
||||
|
||||
*dst_ptr0 = *src_ptr;
|
||||
*dst_ptr1 = *src_ptr;
|
||||
|
@ -12,6 +12,6 @@
|
||||
#ifndef __INC_RECONINTRA4x4_H
|
||||
#define __INC_RECONINTRA4x4_H
|
||||
|
||||
extern void vp8_intra_prediction_down_copy(MACROBLOCKD *x);
|
||||
extern void vp8_intra_prediction_down_copy(MACROBLOCKD *xd);
|
||||
|
||||
#endif
|
||||
|
@ -28,15 +28,15 @@ extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_ve_mmx);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_sse2);
|
||||
extern build_intra_predictors_mbuv_prototype(vp8_intra_pred_uv_tm_ssse3);
|
||||
|
||||
static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *x,
|
||||
static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
|
||||
unsigned char *dst_u,
|
||||
unsigned char *dst_v,
|
||||
int dst_stride,
|
||||
build_intra_predictors_mbuv_fn_t tm_func,
|
||||
build_intra_predictors_mbuv_fn_t ho_func) {
|
||||
int mode = x->mode_info_context->mbmi.uv_mode;
|
||||
int mode = xd->mode_info_context->mbmi.uv_mode;
|
||||
build_intra_predictors_mbuv_fn_t fn;
|
||||
int src_stride = x->dst.uv_stride;
|
||||
int src_stride = xd->dst.uv_stride;
|
||||
|
||||
switch (mode) {
|
||||
case V_PRED:
|
||||
@ -49,15 +49,15 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *x,
|
||||
fn = tm_func;
|
||||
break;
|
||||
case DC_PRED:
|
||||
if (x->up_available) {
|
||||
if (x->left_available) {
|
||||
if (xd->up_available) {
|
||||
if (xd->left_available) {
|
||||
fn = vp8_intra_pred_uv_dc_mmx2;
|
||||
break;
|
||||
} else {
|
||||
fn = vp8_intra_pred_uv_dctop_mmx2;
|
||||
break;
|
||||
}
|
||||
} else if (x->left_available) {
|
||||
} else if (xd->left_available) {
|
||||
fn = vp8_intra_pred_uv_dcleft_mmx2;
|
||||
break;
|
||||
} else {
|
||||
@ -69,34 +69,34 @@ static void vp8_build_intra_predictors_mbuv_x86(MACROBLOCKD *x,
|
||||
return;
|
||||
}
|
||||
|
||||
fn(dst_u, dst_stride, x->dst.u_buffer, src_stride);
|
||||
fn(dst_v, dst_stride, x->dst.v_buffer, src_stride);
|
||||
fn(dst_u, dst_stride, xd->dst.u_buffer, src_stride);
|
||||
fn(dst_v, dst_stride, xd->dst.v_buffer, src_stride);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_sse2(MACROBLOCKD *x) {
|
||||
vp8_build_intra_predictors_mbuv_x86(x, &x->predictor[256],
|
||||
&x->predictor[320], 8,
|
||||
void vp8_build_intra_predictors_mbuv_sse2(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_x86(xd, &xd->predictor[256],
|
||||
&xd->predictor[320], 8,
|
||||
vp8_intra_pred_uv_tm_sse2,
|
||||
vp8_intra_pred_uv_ho_mmx2);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *x) {
|
||||
vp8_build_intra_predictors_mbuv_x86(x, &x->predictor[256],
|
||||
&x->predictor[320], 8,
|
||||
void vp8_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_x86(xd, &xd->predictor[256],
|
||||
&xd->predictor[320], 8,
|
||||
vp8_intra_pred_uv_tm_ssse3,
|
||||
vp8_intra_pred_uv_ho_ssse3);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *x) {
|
||||
vp8_build_intra_predictors_mbuv_x86(x, x->dst.u_buffer,
|
||||
x->dst.v_buffer, x->dst.uv_stride,
|
||||
void vp8_build_intra_predictors_mbuv_s_sse2(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
|
||||
xd->dst.v_buffer, xd->dst.uv_stride,
|
||||
vp8_intra_pred_uv_tm_sse2,
|
||||
vp8_intra_pred_uv_ho_mmx2);
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *x) {
|
||||
vp8_build_intra_predictors_mbuv_x86(x, x->dst.u_buffer,
|
||||
x->dst.v_buffer, x->dst.uv_stride,
|
||||
void vp8_build_intra_predictors_mbuv_s_ssse3(MACROBLOCKD *xd) {
|
||||
vp8_build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
|
||||
xd->dst.v_buffer, xd->dst.uv_stride,
|
||||
vp8_intra_pred_uv_tm_ssse3,
|
||||
vp8_intra_pred_uv_ho_ssse3);
|
||||
}
|
||||
|
@ -61,14 +61,17 @@ static int vp8_read_uv_mode(vp8_reader *bc, const vp8_prob *p) {
|
||||
|
||||
// This function reads the current macro block's segnent id from the bitstream
|
||||
// It should only be called if a segment map update is indicated.
|
||||
static void vp8_read_mb_segid(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) {
|
||||
static void vp8_read_mb_segid(vp8_reader *r, MB_MODE_INFO *mi,
|
||||
MACROBLOCKD *xd) {
|
||||
/* Is segmentation enabled */
|
||||
if (x->segmentation_enabled && x->update_mb_segmentation_map) {
|
||||
if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
|
||||
/* If so then read the segment id. */
|
||||
if (vp8_read(r, x->mb_segment_tree_probs[0]))
|
||||
mi->segment_id = (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2]));
|
||||
if (vp8_read(r, xd->mb_segment_tree_probs[0]))
|
||||
mi->segment_id =
|
||||
(unsigned char)(2 + vp8_read(r, xd->mb_segment_tree_probs[2]));
|
||||
else
|
||||
mi->segment_id = (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1]));
|
||||
mi->segment_id =
|
||||
(unsigned char)(vp8_read(r, xd->mb_segment_tree_probs[1]));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
#include "onyxd_int.h"
|
||||
|
||||
void vp8_reset_mb_tokens_context(MACROBLOCKD *x);
|
||||
void vp8_reset_mb_tokens_context(MACROBLOCKD *xd);
|
||||
int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *);
|
||||
int vp8_decode_mb_tokens_8x8(VP8D_COMP *, MACROBLOCKD *);
|
||||
#if CONFIG_TX16X16
|
||||
|
@ -15,33 +15,34 @@
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
#include "onyxd_int.h"
|
||||
|
||||
/* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and
|
||||
* vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
|
||||
/* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd)
|
||||
* and vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd).
|
||||
*/
|
||||
|
||||
void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) {
|
||||
unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */
|
||||
void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
int mb_row, int mb_col) {
|
||||
unsigned char *yabove_row; /* = xd->dst.y_buffer - xd->dst.y_stride; */
|
||||
unsigned char *yleft_col;
|
||||
unsigned char yleft_buf[16];
|
||||
unsigned char ytop_left; /* = yabove_row[-1]; */
|
||||
unsigned char *ypred_ptr = x->predictor;
|
||||
unsigned char *ypred_ptr = xd->predictor;
|
||||
int r, c, i;
|
||||
|
||||
if (pbi->common.filter_level) {
|
||||
yabove_row = pbi->mt_yabove_row[mb_row] + mb_col * 16 + 32;
|
||||
yleft_col = pbi->mt_yleft_col[mb_row];
|
||||
} else {
|
||||
yabove_row = x->dst.y_buffer - x->dst.y_stride;
|
||||
yabove_row = xd->dst.y_buffer - xd->dst.y_stride;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
yleft_buf[i] = x->dst.y_buffer [i * x->dst.y_stride - 1];
|
||||
yleft_buf[i] = xd->dst.y_buffer [i * xd->dst.y_stride - 1];
|
||||
yleft_col = yleft_buf;
|
||||
}
|
||||
|
||||
ytop_left = yabove_row[-1];
|
||||
|
||||
/* for Y */
|
||||
switch (x->mode_info_context->mbmi.mode) {
|
||||
switch (xd->mode_info_context->mbmi.mode) {
|
||||
case DC_PRED: {
|
||||
int expected_dc;
|
||||
int i;
|
||||
@ -49,14 +50,14 @@ void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row
|
||||
int average = 0;
|
||||
|
||||
|
||||
if (x->up_available || x->left_available) {
|
||||
if (x->up_available) {
|
||||
if (xd->up_available || xd->left_available) {
|
||||
if (xd->up_available) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
average += yabove_row[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (x->left_available) {
|
||||
if (xd->left_available) {
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
average += yleft_col[i];
|
||||
@ -66,7 +67,7 @@ void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row
|
||||
|
||||
|
||||
|
||||
shift = 3 + x->up_available + x->left_available;
|
||||
shift = 3 + xd->up_available + xd->left_available;
|
||||
expected_dc = (average + (1 << (shift - 1))) >> shift;
|
||||
} else {
|
||||
expected_dc = 128;
|
||||
@ -128,32 +129,33 @@ void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row
|
||||
}
|
||||
}
|
||||
|
||||
void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) {
|
||||
unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */
|
||||
void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
int mb_row, int mb_col) {
|
||||
unsigned char *yabove_row; /* = xd->dst.y_buffer - xd->dst.y_stride; */
|
||||
unsigned char *yleft_col;
|
||||
unsigned char yleft_buf[16];
|
||||
unsigned char ytop_left; /* = yabove_row[-1]; */
|
||||
unsigned char *ypred_ptr = x->predictor;
|
||||
unsigned char *ypred_ptr = xd->predictor;
|
||||
int r, c, i;
|
||||
|
||||
int y_stride = x->dst.y_stride;
|
||||
ypred_ptr = x->dst.y_buffer; /*x->predictor;*/
|
||||
int y_stride = xd->dst.y_stride;
|
||||
ypred_ptr = xd->dst.y_buffer; /*xd->predictor;*/
|
||||
|
||||
if (pbi->common.filter_level) {
|
||||
yabove_row = pbi->mt_yabove_row[mb_row] + mb_col * 16 + 32;
|
||||
yleft_col = pbi->mt_yleft_col[mb_row];
|
||||
} else {
|
||||
yabove_row = x->dst.y_buffer - x->dst.y_stride;
|
||||
yabove_row = xd->dst.y_buffer - xd->dst.y_stride;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
yleft_buf[i] = x->dst.y_buffer [i * x->dst.y_stride - 1];
|
||||
yleft_buf[i] = xd->dst.y_buffer [i * xd->dst.y_stride - 1];
|
||||
yleft_col = yleft_buf;
|
||||
}
|
||||
|
||||
ytop_left = yabove_row[-1];
|
||||
|
||||
/* for Y */
|
||||
switch (x->mode_info_context->mbmi.mode) {
|
||||
switch (xd->mode_info_context->mbmi.mode) {
|
||||
case DC_PRED: {
|
||||
int expected_dc;
|
||||
int i;
|
||||
@ -161,14 +163,14 @@ void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_r
|
||||
int average = 0;
|
||||
|
||||
|
||||
if (x->up_available || x->left_available) {
|
||||
if (x->up_available) {
|
||||
if (xd->up_available || xd->left_available) {
|
||||
if (xd->up_available) {
|
||||
for (i = 0; i < 16; i++) {
|
||||
average += yabove_row[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (x->left_available) {
|
||||
if (xd->left_available) {
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
average += yleft_col[i];
|
||||
@ -178,7 +180,7 @@ void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_r
|
||||
|
||||
|
||||
|
||||
shift = 3 + x->up_available + x->left_available;
|
||||
shift = 3 + xd->up_available + xd->left_available;
|
||||
expected_dc = (average + (1 << (shift - 1))) >> shift;
|
||||
} else {
|
||||
expected_dc = 128;
|
||||
@ -244,17 +246,18 @@ void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_r
|
||||
}
|
||||
}
|
||||
|
||||
void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) {
|
||||
unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */
|
||||
void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
int mb_row, int mb_col) {
|
||||
unsigned char *uabove_row; /* = xd->dst.u_buffer - xd->dst.uv_stride; */
|
||||
unsigned char *uleft_col; /*[16];*/
|
||||
unsigned char uleft_buf[8];
|
||||
unsigned char utop_left; /* = uabove_row[-1]; */
|
||||
unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */
|
||||
unsigned char *vabove_row; /* = xd->dst.v_buffer - xd->dst.uv_stride; */
|
||||
unsigned char *vleft_col; /*[20];*/
|
||||
unsigned char vleft_buf[8];
|
||||
unsigned char vtop_left; /* = vabove_row[-1]; */
|
||||
unsigned char *upred_ptr = &x->predictor[256];
|
||||
unsigned char *vpred_ptr = &x->predictor[320];
|
||||
unsigned char *upred_ptr = &xd->predictor[256];
|
||||
unsigned char *vpred_ptr = &xd->predictor[320];
|
||||
int i, j;
|
||||
|
||||
if (pbi->common.filter_level) {
|
||||
@ -263,12 +266,12 @@ void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_ro
|
||||
uleft_col = pbi->mt_uleft_col[mb_row];
|
||||
vleft_col = pbi->mt_vleft_col[mb_row];
|
||||
} else {
|
||||
uabove_row = x->dst.u_buffer - x->dst.uv_stride;
|
||||
vabove_row = x->dst.v_buffer - x->dst.uv_stride;
|
||||
uabove_row = xd->dst.u_buffer - xd->dst.uv_stride;
|
||||
vabove_row = xd->dst.v_buffer - xd->dst.uv_stride;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
uleft_buf[i] = x->dst.u_buffer [i * x->dst.uv_stride - 1];
|
||||
vleft_buf[i] = x->dst.v_buffer [i * x->dst.uv_stride - 1];
|
||||
uleft_buf[i] = xd->dst.u_buffer [i * xd->dst.uv_stride - 1];
|
||||
vleft_buf[i] = xd->dst.v_buffer [i * xd->dst.uv_stride - 1];
|
||||
}
|
||||
uleft_col = uleft_buf;
|
||||
vleft_col = vleft_buf;
|
||||
@ -276,7 +279,7 @@ void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_ro
|
||||
utop_left = uabove_row[-1];
|
||||
vtop_left = vabove_row[-1];
|
||||
|
||||
switch (x->mode_info_context->mbmi.uv_mode) {
|
||||
switch (xd->mode_info_context->mbmi.uv_mode) {
|
||||
case DC_PRED: {
|
||||
int expected_udc;
|
||||
int expected_vdc;
|
||||
@ -285,25 +288,25 @@ void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_ro
|
||||
int Uaverage = 0;
|
||||
int Vaverage = 0;
|
||||
|
||||
if (x->up_available) {
|
||||
if (xd->up_available) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
Uaverage += uabove_row[i];
|
||||
Vaverage += vabove_row[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (x->left_available) {
|
||||
if (xd->left_available) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
Uaverage += uleft_col[i];
|
||||
Vaverage += vleft_col[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (!x->up_available && !x->left_available) {
|
||||
if (!xd->up_available && !xd->left_available) {
|
||||
expected_udc = 128;
|
||||
expected_vdc = 128;
|
||||
} else {
|
||||
shift = 2 + x->up_available + x->left_available;
|
||||
shift = 2 + xd->up_available + xd->left_available;
|
||||
expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
|
||||
expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
|
||||
}
|
||||
@ -380,18 +383,19 @@ void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_ro
|
||||
}
|
||||
}
|
||||
|
||||
void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) {
|
||||
unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */
|
||||
void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
int mb_row, int mb_col) {
|
||||
unsigned char *uabove_row; /* = xd->dst.u_buffer - xd->dst.uv_stride; */
|
||||
unsigned char *uleft_col; /*[16];*/
|
||||
unsigned char uleft_buf[8];
|
||||
unsigned char utop_left; /* = uabove_row[-1]; */
|
||||
unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */
|
||||
unsigned char *vabove_row; /* = xd->dst.v_buffer - xd->dst.uv_stride; */
|
||||
unsigned char *vleft_col; /*[20];*/
|
||||
unsigned char vleft_buf[8];
|
||||
unsigned char vtop_left; /* = vabove_row[-1]; */
|
||||
unsigned char *upred_ptr = x->dst.u_buffer; /*&x->predictor[256];*/
|
||||
unsigned char *vpred_ptr = x->dst.v_buffer; /*&x->predictor[320];*/
|
||||
int uv_stride = x->dst.uv_stride;
|
||||
unsigned char *upred_ptr = xd->dst.u_buffer; /*&xd->predictor[256];*/
|
||||
unsigned char *vpred_ptr = xd->dst.v_buffer; /*&xd->predictor[320];*/
|
||||
int uv_stride = xd->dst.uv_stride;
|
||||
int i, j;
|
||||
|
||||
if (pbi->common.filter_level) {
|
||||
@ -400,12 +404,12 @@ void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_
|
||||
uleft_col = pbi->mt_uleft_col[mb_row];
|
||||
vleft_col = pbi->mt_vleft_col[mb_row];
|
||||
} else {
|
||||
uabove_row = x->dst.u_buffer - x->dst.uv_stride;
|
||||
vabove_row = x->dst.v_buffer - x->dst.uv_stride;
|
||||
uabove_row = xd->dst.u_buffer - xd->dst.uv_stride;
|
||||
vabove_row = xd->dst.v_buffer - xd->dst.uv_stride;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
uleft_buf[i] = x->dst.u_buffer [i * x->dst.uv_stride - 1];
|
||||
vleft_buf[i] = x->dst.v_buffer [i * x->dst.uv_stride - 1];
|
||||
uleft_buf[i] = xd->dst.u_buffer [i * xd->dst.uv_stride - 1];
|
||||
vleft_buf[i] = xd->dst.v_buffer [i * xd->dst.uv_stride - 1];
|
||||
}
|
||||
uleft_col = uleft_buf;
|
||||
vleft_col = vleft_buf;
|
||||
@ -413,7 +417,7 @@ void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_
|
||||
utop_left = uabove_row[-1];
|
||||
vtop_left = vabove_row[-1];
|
||||
|
||||
switch (x->mode_info_context->mbmi.uv_mode) {
|
||||
switch (xd->mode_info_context->mbmi.uv_mode) {
|
||||
case DC_PRED: {
|
||||
int expected_udc;
|
||||
int expected_vdc;
|
||||
@ -422,25 +426,25 @@ void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_
|
||||
int Uaverage = 0;
|
||||
int Vaverage = 0;
|
||||
|
||||
if (x->up_available) {
|
||||
if (xd->up_available) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
Uaverage += uabove_row[i];
|
||||
Vaverage += vabove_row[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (x->left_available) {
|
||||
if (xd->left_available) {
|
||||
for (i = 0; i < 8; i++) {
|
||||
Uaverage += uleft_col[i];
|
||||
Vaverage += vleft_col[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (!x->up_available && !x->left_available) {
|
||||
if (!xd->up_available && !xd->left_available) {
|
||||
expected_udc = 128;
|
||||
expected_vdc = 128;
|
||||
} else {
|
||||
shift = 2 + x->up_available + x->left_available;
|
||||
shift = 2 + xd->up_available + xd->left_available;
|
||||
expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
|
||||
expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
|
||||
}
|
||||
@ -531,26 +535,26 @@ void vp8mt_predict_intra4x4(VP8D_COMP *pbi,
|
||||
int num) {
|
||||
int i, r, c;
|
||||
|
||||
unsigned char *Above; /* = *(x->base_dst) + x->dst - x->dst_stride; */
|
||||
unsigned char *Above; /* = *(xd->base_dst) + xd->dst - xd->dst_stride; */
|
||||
unsigned char Left[4];
|
||||
unsigned char top_left; /* = Above[-1]; */
|
||||
|
||||
BLOCKD *x = &xd->block[num];
|
||||
BLOCKD *blockd = &xd->block[num];
|
||||
|
||||
/*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
|
||||
if (num < 4 && pbi->common.filter_level)
|
||||
Above = pbi->mt_yabove_row[mb_row] + mb_col * 16 + num * 4 + 32;
|
||||
else
|
||||
Above = *(x->base_dst) + x->dst - x->dst_stride;
|
||||
Above = *(blockd->base_dst) + blockd->dst - blockd->dst_stride;
|
||||
|
||||
if (num % 4 == 0 && pbi->common.filter_level) {
|
||||
for (i = 0; i < 4; i++)
|
||||
Left[i] = pbi->mt_yleft_col[mb_row][num + i];
|
||||
} else {
|
||||
Left[0] = (*(x->base_dst))[x->dst - 1];
|
||||
Left[1] = (*(x->base_dst))[x->dst - 1 + x->dst_stride];
|
||||
Left[2] = (*(x->base_dst))[x->dst - 1 + 2 * x->dst_stride];
|
||||
Left[3] = (*(x->base_dst))[x->dst - 1 + 3 * x->dst_stride];
|
||||
Left[0] = (*(blockd->base_dst))[blockd->dst - 1];
|
||||
Left[1] = (*(blockd->base_dst))[blockd->dst - 1 + blockd->dst_stride];
|
||||
Left[2] = (*(blockd->base_dst))[blockd->dst - 1 + 2 * blockd->dst_stride];
|
||||
Left[3] = (*(blockd->base_dst))[blockd->dst - 1 + 3 * blockd->dst_stride];
|
||||
}
|
||||
|
||||
if ((num == 4 || num == 8 || num == 12) && pbi->common.filter_level)
|
||||
@ -808,8 +812,11 @@ void vp8mt_predict_intra4x4(VP8D_COMP *pbi,
|
||||
/* copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and
|
||||
* to the right prediction have filled in pixels to use.
|
||||
*/
|
||||
void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col) {
|
||||
unsigned char *above_right; /* = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16; */
|
||||
void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
int mb_row, int mb_col) {
|
||||
unsigned char *above_right; // = *(xd->block[0].base_dst) +
|
||||
// xd->block[0].dst -
|
||||
// xd->block[0].dst_stride + 16; */
|
||||
unsigned int *src_ptr;
|
||||
unsigned int *dst_ptr0;
|
||||
unsigned int *dst_ptr1;
|
||||
@ -818,15 +825,19 @@ void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row
|
||||
if (pbi->common.filter_level)
|
||||
above_right = pbi->mt_yabove_row[mb_row] + mb_col * 16 + 32 + 16;
|
||||
else
|
||||
above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16;
|
||||
above_right = *(xd->block[0].base_dst) + xd->block[0].dst -
|
||||
xd->block[0].dst_stride + 16;
|
||||
|
||||
src_ptr = (unsigned int *)above_right;
|
||||
/*dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
|
||||
dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
|
||||
dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);*/
|
||||
dst_ptr0 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 3 * x->block[0].dst_stride);
|
||||
dst_ptr1 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 7 * x->block[0].dst_stride);
|
||||
dst_ptr2 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 11 * x->block[0].dst_stride);
|
||||
/*dst_ptr0 = (unsigned int *)(above_right + 4 * xd->block[0].dst_stride);
|
||||
dst_ptr1 = (unsigned int *)(above_right + 8 * xd->block[0].dst_stride);
|
||||
dst_ptr2 = (unsigned int *)(above_right + 12 * xd->block[0].dst_stride);*/
|
||||
dst_ptr0 = (unsigned int *)(*(xd->block[0].base_dst) + xd->block[0].dst +
|
||||
16 + 3 * xd->block[0].dst_stride);
|
||||
dst_ptr1 = (unsigned int *)(*(xd->block[0].base_dst) + xd->block[0].dst +
|
||||
16 + 7 * xd->block[0].dst_stride);
|
||||
dst_ptr2 = (unsigned int *)(*(xd->block[0].base_dst) + xd->block[0].dst +
|
||||
16 + 11 * xd->block[0].dst_stride);
|
||||
*dst_ptr0 = *src_ptr;
|
||||
*dst_ptr1 = *src_ptr;
|
||||
*dst_ptr2 = *src_ptr;
|
||||
|
@ -138,7 +138,7 @@ static void update_mode(
|
||||
}
|
||||
|
||||
static void update_mbintra_mode_probs(VP8_COMP *cpi) {
|
||||
VP8_COMMON *const x = & cpi->common;
|
||||
VP8_COMMON *const cm = & cpi->common;
|
||||
|
||||
vp8_writer *const w = & cpi->bc;
|
||||
|
||||
@ -148,7 +148,7 @@ static void update_mbintra_mode_probs(VP8_COMP *cpi) {
|
||||
|
||||
update_mode(
|
||||
w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree,
|
||||
Pnew, x->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
|
||||
Pnew, cm->fc.ymode_prob, bct, (unsigned int *)cpi->ymode_count
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -569,31 +569,31 @@ static void write_mv_hp
|
||||
// This function writes the current macro block's segnment id to the bitstream
|
||||
// It should only be called if a segment map update is indicated.
|
||||
static void write_mb_segid(vp8_writer *w,
|
||||
const MB_MODE_INFO *mi, const MACROBLOCKD *x) {
|
||||
const MB_MODE_INFO *mi, const MACROBLOCKD *xd) {
|
||||
// Encode the MB segment id.
|
||||
if (x->segmentation_enabled && x->update_mb_segmentation_map) {
|
||||
if (xd->segmentation_enabled && xd->update_mb_segmentation_map) {
|
||||
switch (mi->segment_id) {
|
||||
case 0:
|
||||
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
|
||||
vp8_write(w, 0, xd->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 0, xd->mb_segment_tree_probs[1]);
|
||||
break;
|
||||
case 1:
|
||||
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 1, x->mb_segment_tree_probs[1]);
|
||||
vp8_write(w, 0, xd->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 1, xd->mb_segment_tree_probs[1]);
|
||||
break;
|
||||
case 2:
|
||||
vp8_write(w, 1, x->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 0, x->mb_segment_tree_probs[2]);
|
||||
vp8_write(w, 1, xd->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 0, xd->mb_segment_tree_probs[2]);
|
||||
break;
|
||||
case 3:
|
||||
vp8_write(w, 1, x->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 1, x->mb_segment_tree_probs[2]);
|
||||
vp8_write(w, 1, xd->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 1, xd->mb_segment_tree_probs[2]);
|
||||
break;
|
||||
|
||||
// TRAP.. This should not happen
|
||||
default:
|
||||
vp8_write(w, 0, x->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 0, x->mb_segment_tree_probs[1]);
|
||||
vp8_write(w, 0, xd->mb_segment_tree_probs[0]);
|
||||
vp8_write(w, 0, xd->mb_segment_tree_probs[1]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ int enc_debug = 0;
|
||||
int mb_row_debug, mb_col_debug;
|
||||
#endif
|
||||
|
||||
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x,
|
||||
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run);
|
||||
|
||||
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
|
||||
@ -1471,7 +1471,7 @@ void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
|
||||
extern int cnt_pm;
|
||||
#endif
|
||||
|
||||
extern void vp8_fix_contexts(MACROBLOCKD *x);
|
||||
extern void vp8_fix_contexts(MACROBLOCKD *xd);
|
||||
|
||||
void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
|
||||
TOKENEXTRA **t, int recon_yoffset,
|
||||
|
@ -113,9 +113,9 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) {
|
||||
int i;
|
||||
|
||||
#if 0
|
||||
MACROBLOCKD *x = &mb->e_mbd;
|
||||
MACROBLOCKD *xd = &mb->e_mbd;
|
||||
// Intra modes requiring top-right MB reconstructed data have been disabled
|
||||
vp8_intra_prediction_down_copy(x);
|
||||
vp8_intra_prediction_down_copy(xd);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
|
@ -607,11 +607,11 @@ fall between -65 and +65.
|
||||
**************************************************************************/
|
||||
#define SUM_2ND_COEFF_THRESH 65
|
||||
|
||||
static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type,
|
||||
static void check_reset_2nd_coeffs(MACROBLOCKD *xd, int type,
|
||||
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
|
||||
int sum = 0;
|
||||
int i;
|
||||
BLOCKD *bd = &x->block[24];
|
||||
BLOCKD *bd = &xd->block[24];
|
||||
if (bd->dequant[0] >= SUM_2ND_COEFF_THRESH
|
||||
&& bd->dequant[1] >= SUM_2ND_COEFF_THRESH)
|
||||
return;
|
||||
@ -634,10 +634,10 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type,
|
||||
}
|
||||
}
|
||||
#define SUM_2ND_COEFF_THRESH_8X8 32
|
||||
static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *x, int type,
|
||||
static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type,
|
||||
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) {
|
||||
int sum = 0;
|
||||
BLOCKD *bd = &x->block[24];
|
||||
BLOCKD *bd = &xd->block[24];
|
||||
int coef;
|
||||
|
||||
coef = bd->dqcoeff[0];
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
static void vp8_temporal_filter_predictors_mb_c
|
||||
(
|
||||
MACROBLOCKD *x,
|
||||
MACROBLOCKD *xd,
|
||||
unsigned char *y_mb_ptr,
|
||||
unsigned char *u_mb_ptr,
|
||||
unsigned char *v_mb_ptr,
|
||||
@ -56,10 +56,10 @@ static void vp8_temporal_filter_predictors_mb_c
|
||||
yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3);
|
||||
|
||||
if ((mv_row | mv_col) & 7) {
|
||||
x->subpixel_predict16x16(yptr, stride,
|
||||
xd->subpixel_predict16x16(yptr, stride,
|
||||
(mv_col & 7) << 1, (mv_row & 7) << 1, &pred[0], 16);
|
||||
} else {
|
||||
RECON_INVOKE(&x->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
|
||||
RECON_INVOKE(&xd->rtcd->recon, copy16x16)(yptr, stride, &pred[0], 16);
|
||||
}
|
||||
|
||||
// U & V
|
||||
@ -73,14 +73,14 @@ static void vp8_temporal_filter_predictors_mb_c
|
||||
vptr = v_mb_ptr + offset;
|
||||
|
||||
if ((omv_row | omv_col) & 15) {
|
||||
x->subpixel_predict8x8(uptr, stride,
|
||||
xd->subpixel_predict8x8(uptr, stride,
|
||||
(omv_col & 15), (omv_row & 15), &pred[256], 8);
|
||||
x->subpixel_predict8x8(vptr, stride,
|
||||
xd->subpixel_predict8x8(vptr, stride,
|
||||
(omv_col & 15), (omv_row & 15), &pred[320], 8);
|
||||
}
|
||||
else {
|
||||
RECON_INVOKE(&x->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
|
||||
RECON_INVOKE(&x->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
|
||||
RECON_INVOKE(&xd->rtcd->recon, copy8x8)(uptr, stride, &pred[256], 8);
|
||||
RECON_INVOKE(&xd->rtcd->recon, copy8x8)(vptr, stride, &pred[320], 8);
|
||||
}
|
||||
}
|
||||
void vp8_temporal_filter_apply_c
|
||||
|
@ -39,14 +39,14 @@ extern unsigned int tree_update_hist_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
|
||||
#endif
|
||||
#endif
|
||||
void vp8_stuff_mb(VP8_COMP *cpi,
|
||||
MACROBLOCKD *x, TOKENEXTRA **t, int dry_run);
|
||||
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
|
||||
void vp8_stuff_mb_8x8(VP8_COMP *cpi,
|
||||
MACROBLOCKD *x, TOKENEXTRA **t, int dry_run);
|
||||
MACROBLOCKD *xd, TOKENEXTRA **t, int dry_run);
|
||||
#if CONFIG_TX16X16
|
||||
void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *x,
|
||||
void vp8_stuff_mb_16x16(VP8_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run);
|
||||
#endif
|
||||
void vp8_fix_contexts(MACROBLOCKD *x);
|
||||
void vp8_fix_contexts(MACROBLOCKD *xd);
|
||||
|
||||
static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
|
||||
const TOKENVALUE *vp8_dct_value_tokens_ptr;
|
||||
@ -717,100 +717,100 @@ static void tokenize1st_order_b
|
||||
}
|
||||
|
||||
|
||||
int mby_is_skippable(MACROBLOCKD *x, int has_y2_block) {
|
||||
int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
|
||||
int skip = 1;
|
||||
int i = 0;
|
||||
|
||||
if (has_y2_block) {
|
||||
for (i = 0; i < 16; i++)
|
||||
skip &= (x->block[i].eob < 2);
|
||||
skip &= (!x->block[24].eob);
|
||||
skip &= (xd->block[i].eob < 2);
|
||||
skip &= (!xd->block[24].eob);
|
||||
} else {
|
||||
for (i = 0; i < 16; i++)
|
||||
skip &= (!x->block[i].eob);
|
||||
skip &= (!xd->block[i].eob);
|
||||
}
|
||||
return skip;
|
||||
}
|
||||
|
||||
int mbuv_is_skippable(MACROBLOCKD *x) {
|
||||
int mbuv_is_skippable(MACROBLOCKD *xd) {
|
||||
int skip = 1;
|
||||
int i;
|
||||
|
||||
for (i = 16; i < 24; i++)
|
||||
skip &= (!x->block[i].eob);
|
||||
skip &= (!xd->block[i].eob);
|
||||
return skip;
|
||||
}
|
||||
|
||||
int mb_is_skippable(MACROBLOCKD *x, int has_y2_block) {
|
||||
return (mby_is_skippable(x, has_y2_block) &
|
||||
mbuv_is_skippable(x));
|
||||
int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block) {
|
||||
return (mby_is_skippable(xd, has_y2_block) &
|
||||
mbuv_is_skippable(xd));
|
||||
}
|
||||
|
||||
int mby_is_skippable_8x8(MACROBLOCKD *x) {
|
||||
int mby_is_skippable_8x8(MACROBLOCKD *xd) {
|
||||
int skip = 1;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < 16; i += 4)
|
||||
skip &= (x->block[i].eob < 2);
|
||||
skip &= (!x->block[24].eob);
|
||||
skip &= (xd->block[i].eob < 2);
|
||||
skip &= (!xd->block[24].eob);
|
||||
return skip;
|
||||
}
|
||||
|
||||
int mbuv_is_skippable_8x8(MACROBLOCKD *x) {
|
||||
return (!x->block[16].eob) & (!x->block[20].eob);
|
||||
int mbuv_is_skippable_8x8(MACROBLOCKD *xd) {
|
||||
return (!xd->block[16].eob) & (!xd->block[20].eob);
|
||||
}
|
||||
|
||||
int mb_is_skippable_8x8(MACROBLOCKD *x) {
|
||||
return (mby_is_skippable_8x8(x) & mbuv_is_skippable_8x8(x));
|
||||
int mb_is_skippable_8x8(MACROBLOCKD *xd) {
|
||||
return (mby_is_skippable_8x8(xd) & mbuv_is_skippable_8x8(xd));
|
||||
}
|
||||
|
||||
#if CONFIG_TX16X16
|
||||
int mby_is_skippable_16x16(MACROBLOCKD *x) {
|
||||
int mby_is_skippable_16x16(MACROBLOCKD *xd) {
|
||||
int skip = 1;
|
||||
//skip &= (x->block[0].eob < 2); // I think this should be commented? No second order == DC must be coded
|
||||
//skip &= (x->block[0].eob < 1);
|
||||
//skip &= (!x->block[24].eob);
|
||||
skip &= !x->block[0].eob;
|
||||
//skip &= (xd->block[0].eob < 2); // I think this should be commented? No second order == DC must be coded
|
||||
//skip &= (xd->block[0].eob < 1);
|
||||
//skip &= (!xd->block[24].eob);
|
||||
skip &= !xd->block[0].eob;
|
||||
return skip;
|
||||
}
|
||||
|
||||
int mb_is_skippable_16x16(MACROBLOCKD *x) {
|
||||
int mb_is_skippable_16x16(MACROBLOCKD *xd) {
|
||||
return (mby_is_skippable_16x16(x) & mbuv_is_skippable_8x8(x));
|
||||
}
|
||||
#endif
|
||||
|
||||
void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
MACROBLOCKD *x,
|
||||
MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t,
|
||||
int dry_run) {
|
||||
int plane_type;
|
||||
int has_y2_block;
|
||||
int b;
|
||||
int tx_type = x->mode_info_context->mbmi.txfm_size;
|
||||
int mb_skip_context = get_pred_context(&cpi->common, x, PRED_MBSKIP);
|
||||
int tx_type = xd->mode_info_context->mbmi.txfm_size;
|
||||
int mb_skip_context = get_pred_context(&cpi->common, xd, PRED_MBSKIP);
|
||||
TOKENEXTRA *t_backup = *t;
|
||||
|
||||
// If the MB is going to be skipped because of a segment level flag
|
||||
// exclude this from the skip count stats used to calculate the
|
||||
// transmitted skip probability;
|
||||
int skip_inc;
|
||||
int segment_id = x->mode_info_context->mbmi.segment_id;
|
||||
int segment_id = xd->mode_info_context->mbmi.segment_id;
|
||||
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
int QIndex = cpi->mb.q_index;
|
||||
int active_ht = (QIndex < ACTIVE_HT) &&
|
||||
(x->mode_info_context->mbmi.mode == B_PRED);
|
||||
(xd->mode_info_context->mbmi.mode == B_PRED);
|
||||
#endif
|
||||
|
||||
if (!segfeature_active(x, segment_id, SEG_LVL_EOB) ||
|
||||
(get_segdata(x, segment_id, SEG_LVL_EOB) != 0)) {
|
||||
if (!segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
|
||||
(get_segdata(xd, segment_id, SEG_LVL_EOB) != 0)) {
|
||||
skip_inc = 1;
|
||||
} else
|
||||
skip_inc = 0;
|
||||
|
||||
has_y2_block = (x->mode_info_context->mbmi.mode != B_PRED
|
||||
&& x->mode_info_context->mbmi.mode != I8X8_PRED
|
||||
&& x->mode_info_context->mbmi.mode != SPLITMV);
|
||||
has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED
|
||||
&& xd->mode_info_context->mbmi.mode != I8X8_PRED
|
||||
&& xd->mode_info_context->mbmi.mode != SPLITMV);
|
||||
#if CONFIG_TX16X16
|
||||
if (tx_type == TX_16X16) has_y2_block = 0; // Because of inter frames
|
||||
#endif
|
||||
@ -818,18 +818,18 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
switch (tx_type) {
|
||||
#if CONFIG_TX16X16
|
||||
case TX_16X16:
|
||||
x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(x);
|
||||
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_16x16(x);
|
||||
break;
|
||||
#endif
|
||||
case TX_8X8:
|
||||
x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(x);
|
||||
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable_8x8(xd);
|
||||
break;
|
||||
default:
|
||||
x->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(x, has_y2_block);
|
||||
xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block);
|
||||
break;
|
||||
}
|
||||
|
||||
if (x->mode_info_context->mbmi.mb_skip_coeff) {
|
||||
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
|
||||
if (!dry_run)
|
||||
cpi->skip_true_count[mb_skip_context] += skip_inc;
|
||||
if (!cpi->common.mb_no_coeff_skip) {
|
||||
@ -839,11 +839,11 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
else
|
||||
#endif
|
||||
if (tx_type == TX_8X8)
|
||||
vp8_stuff_mb_8x8(cpi, x, t, dry_run);
|
||||
vp8_stuff_mb_8x8(cpi, xd, t, dry_run);
|
||||
else
|
||||
vp8_stuff_mb(cpi, x, t, dry_run);
|
||||
vp8_stuff_mb(cpi, xd, t, dry_run);
|
||||
} else {
|
||||
vp8_fix_contexts(x);
|
||||
vp8_fix_contexts(xd);
|
||||
}
|
||||
if (dry_run)
|
||||
*t = t_backup;
|
||||
@ -856,31 +856,31 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
plane_type = 3;
|
||||
if (has_y2_block) {
|
||||
if (tx_type == TX_8X8) {
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
|
||||
tokenize2nd_order_b_8x8(x,
|
||||
x->block + 24, t, 1, x->frame_type,
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
|
||||
tokenize2nd_order_b_8x8(xd,
|
||||
xd->block + 24, t, 1, xd->frame_type,
|
||||
A + vp8_block2above_8x8[24],
|
||||
L + vp8_block2left_8x8[24],
|
||||
cpi, dry_run);
|
||||
} else
|
||||
tokenize2nd_order_b(x, t, cpi, dry_run);
|
||||
tokenize2nd_order_b(xd, t, cpi, dry_run);
|
||||
|
||||
plane_type = 0;
|
||||
}
|
||||
|
||||
#if CONFIG_TX16X16
|
||||
if (tx_type == TX_16X16) {
|
||||
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
|
||||
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context;
|
||||
tokenize1st_order_b_16x16(x, x->block, t, 3,
|
||||
x->frame_type, A, L, cpi, dry_run);
|
||||
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
|
||||
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
|
||||
tokenize1st_order_b_16x16(xd, xd->block, t, 3,
|
||||
xd->frame_type, A, L, cpi, dry_run);
|
||||
for (b = 1; b < 16; b++) {
|
||||
*(A + vp8_block2above[b]) = *(A);
|
||||
*(L + vp8_block2left[b] ) = *(L);
|
||||
}
|
||||
for (b = 16; b < 24; b += 4) {
|
||||
tokenize1st_order_b_8x8(x, x->block + b, t, 2, x->frame_type,
|
||||
tokenize1st_order_b_8x8(xd, xd->block + b, t, 2, xd->frame_type,
|
||||
A + vp8_block2above_8x8[b], L + vp8_block2left_8x8[b], cpi, dry_run);
|
||||
*(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
|
||||
*(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
|
||||
@ -891,11 +891,11 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
else
|
||||
#endif
|
||||
if (tx_type == TX_8X8) {
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
|
||||
for (b = 0; b < 16; b += 4) {
|
||||
tokenize1st_order_b_8x8(x,
|
||||
x->block + b, t, plane_type, x->frame_type,
|
||||
tokenize1st_order_b_8x8(xd,
|
||||
xd->block + b, t, plane_type, xd->frame_type,
|
||||
A + vp8_block2above_8x8[b],
|
||||
L + vp8_block2left_8x8[b],
|
||||
cpi, dry_run);
|
||||
@ -903,8 +903,8 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
|
||||
}
|
||||
for (b = 16; b < 24; b += 4) {
|
||||
tokenize1st_order_b_8x8(x,
|
||||
x->block + b, t, 2, x->frame_type,
|
||||
tokenize1st_order_b_8x8(xd,
|
||||
xd->block + b, t, 2, xd->frame_type,
|
||||
A + vp8_block2above_8x8[b],
|
||||
L + vp8_block2left_8x8[b],
|
||||
cpi, dry_run);
|
||||
@ -914,34 +914,34 @@ void vp8_tokenize_mb(VP8_COMP *cpi,
|
||||
} else {
|
||||
#if CONFIG_HYBRIDTRANSFORM
|
||||
if(active_ht) {
|
||||
tokenize1st_order_ht(x, t, plane_type, cpi, dry_run);
|
||||
tokenize1st_order_ht(xd, t, plane_type, cpi, dry_run);
|
||||
} else {
|
||||
|
||||
#if CONFIG_HYBRIDTRANSFORM8X8
|
||||
if (x->mode_info_context->mbmi.mode == I8X8_PRED) {
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
|
||||
if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
|
||||
for (b = 0; b < 16; b += 4) {
|
||||
tokenize1st_order_b_8x8(x,
|
||||
x->block + b, t, PLANE_TYPE_Y_WITH_DC,
|
||||
x->frame_type,
|
||||
tokenize1st_order_b_8x8(xd,
|
||||
xd->block + b, t, PLANE_TYPE_Y_WITH_DC,
|
||||
xd->frame_type,
|
||||
A + vp8_block2above_8x8[b],
|
||||
L + vp8_block2left_8x8[b],
|
||||
cpi, dry_run);
|
||||
*(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
|
||||
*(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
|
||||
}
|
||||
tokenize1st_order_chroma(x, t, PLANE_TYPE_UV, cpi, dry_run);
|
||||
tokenize1st_order_chroma(xd, t, PLANE_TYPE_UV, cpi, dry_run);
|
||||
} else {
|
||||
tokenize1st_order_b(x, t, plane_type, cpi, dry_run);
|
||||
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
|
||||
}
|
||||
#else
|
||||
tokenize1st_order_b(x, t, plane_type, cpi, dry_run);
|
||||
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
|
||||
#endif
|
||||
|
||||
}
|
||||
#else
|
||||
tokenize1st_order_b(x, t, plane_type, cpi, dry_run);
|
||||
tokenize1st_order_b(xd, t, plane_type, cpi, dry_run);
|
||||
#endif
|
||||
}
|
||||
if (dry_run)
|
||||
@ -1304,22 +1304,22 @@ void stuff1st_order_buv_8x8
|
||||
}
|
||||
|
||||
void vp8_stuff_mb_8x8(VP8_COMP *cpi,
|
||||
MACROBLOCKD *x,
|
||||
MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t,
|
||||
int dry_run) {
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
|
||||
int plane_type;
|
||||
int b;
|
||||
TOKENEXTRA *t_backup = *t;
|
||||
|
||||
stuff2nd_order_b_8x8(x->block + 24, t, 1, x->frame_type,
|
||||
stuff2nd_order_b_8x8(xd->block + 24, t, 1, xd->frame_type,
|
||||
A + vp8_block2above_8x8[24],
|
||||
L + vp8_block2left_8x8[24], cpi, dry_run);
|
||||
plane_type = 0;
|
||||
|
||||
for (b = 0; b < 16; b += 4) {
|
||||
stuff1st_order_b_8x8(x->block + b, t, plane_type, x->frame_type,
|
||||
stuff1st_order_b_8x8(xd->block + b, t, plane_type, xd->frame_type,
|
||||
A + vp8_block2above_8x8[b],
|
||||
L + vp8_block2left_8x8[b],
|
||||
cpi, dry_run);
|
||||
@ -1328,7 +1328,7 @@ void vp8_stuff_mb_8x8(VP8_COMP *cpi,
|
||||
}
|
||||
|
||||
for (b = 16; b < 24; b += 4) {
|
||||
stuff1st_order_buv_8x8(x->block + b, t, 2, x->frame_type,
|
||||
stuff1st_order_buv_8x8(xd->block + b, t, 2, xd->frame_type,
|
||||
A + vp8_block2above[b],
|
||||
L + vp8_block2left[b],
|
||||
cpi, dry_run);
|
||||
@ -1367,21 +1367,21 @@ void stuff1st_order_b_16x16(const BLOCKD *const b,
|
||||
}
|
||||
|
||||
void vp8_stuff_mb_16x16(VP8_COMP *cpi,
|
||||
MACROBLOCKD *x,
|
||||
MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t,
|
||||
int dry_run) {
|
||||
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)x->above_context;
|
||||
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)x->left_context;
|
||||
ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context;
|
||||
ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context;
|
||||
int b, i;
|
||||
TOKENEXTRA *t_backup = *t;
|
||||
|
||||
stuff1st_order_b_16x16(x->block, t, x->frame_type, A, L, cpi, dry_run);
|
||||
stuff1st_order_b_16x16(xd->block, t, xd->frame_type, A, L, cpi, dry_run);
|
||||
for (i = 1; i < 16; i++) {
|
||||
*(A + vp8_block2above[i]) = *(A);
|
||||
*(L + vp8_block2left[i]) = *(L);
|
||||
}
|
||||
for (b = 16; b < 24; b += 4) {
|
||||
stuff1st_order_buv_8x8(x->block + b, t, 2, x->frame_type,
|
||||
stuff1st_order_buv_8x8(xd->block + b, t, 2, xd->frame_type,
|
||||
A + vp8_block2above[b],
|
||||
L + vp8_block2left[b],
|
||||
cpi, dry_run);
|
||||
@ -1462,10 +1462,10 @@ void stuff1st_order_buv
|
||||
*a = *l = pt;
|
||||
}
|
||||
|
||||
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x,
|
||||
void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd,
|
||||
TOKENEXTRA **t, int dry_run) {
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)x->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)x->left_context;
|
||||
ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context;
|
||||
ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context;
|
||||
int plane_type;
|
||||
int b;
|
||||
TOKENEXTRA *t_backup = *t;
|
||||
@ -1491,19 +1491,19 @@ void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x,
|
||||
if (dry_run)
|
||||
*t = t_backup;
|
||||
}
|
||||
void vp8_fix_contexts(MACROBLOCKD *x) {
|
||||
void vp8_fix_contexts(MACROBLOCKD *xd) {
|
||||
/* Clear entropy contexts for Y2 blocks */
|
||||
if ((x->mode_info_context->mbmi.mode != B_PRED
|
||||
&& x->mode_info_context->mbmi.mode != I8X8_PRED
|
||||
&& x->mode_info_context->mbmi.mode != SPLITMV)
|
||||
if ((xd->mode_info_context->mbmi.mode != B_PRED
|
||||
&& xd->mode_info_context->mbmi.mode != I8X8_PRED
|
||||
&& xd->mode_info_context->mbmi.mode != SPLITMV)
|
||||
#if CONFIG_TX16X16
|
||||
|| x->mode_info_context->mbmi.txfm_size == TX_16X16
|
||||
|| xd->mode_info_context->mbmi.txfm_size == TX_16X16
|
||||
#endif
|
||||
) {
|
||||
vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
|
||||
} else {
|
||||
vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
|
||||
vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
|
||||
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
|
||||
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1);
|
||||
}
|
||||
}
|
||||
|
@ -31,22 +31,25 @@ typedef struct {
|
||||
|
||||
int rd_cost_mby(MACROBLOCKD *);
|
||||
|
||||
extern int mby_is_skippable(MACROBLOCKD *x, int has_y2_block);
|
||||
extern int mbuv_is_skippable(MACROBLOCKD *x);
|
||||
extern int mb_is_skippable(MACROBLOCKD *x, int has_y2_block);
|
||||
extern int mby_is_skippable_8x8(MACROBLOCKD *x);
|
||||
extern int mbuv_is_skippable_8x8(MACROBLOCKD *x);
|
||||
extern int mb_is_skippable_8x8(MACROBLOCKD *x);
|
||||
extern int mb_is_skippable_16x16(MACROBLOCKD *x);
|
||||
extern int mby_is_skippable(MACROBLOCKD *xd, int has_y2_block);
|
||||
extern int mbuv_is_skippable(MACROBLOCKD *xd);
|
||||
extern int mb_is_skippable(MACROBLOCKD *xd, int has_y2_block);
|
||||
extern int mby_is_skippable_8x8(MACROBLOCKD *xd);
|
||||
extern int mbuv_is_skippable_8x8(MACROBLOCKD *xd);
|
||||
extern int mb_is_skippable_8x8(MACROBLOCKD *xd);
|
||||
extern int mb_is_skippable_16x16(MACROBLOCKD *xd);
|
||||
|
||||
#ifdef ENTROPY_STATS
|
||||
void init_context_counters();
|
||||
void print_context_counters();
|
||||
|
||||
extern INT64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
|
||||
extern INT64 context_counters_8x8[BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
|
||||
extern INT64 context_counters[BLOCK_TYPES][COEF_BANDS]
|
||||
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
|
||||
extern INT64 context_counters_8x8[BLOCK_TYPES_8X8][COEF_BANDS]
|
||||
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
|
||||
#if CONFIG_TX16X16
|
||||
extern INT64 context_counters_16x16[BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
|
||||
extern INT64 context_counters_16x16[BLOCK_TYPES_16X16][COEF_BANDS]
|
||||
[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS];
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user