WIP: removing predictor buffer usage from decoder

This patch will use the dest buffer instead of the
predictor buffer.  This will allow us in future commits
to remove the extra mem copy that occurs in the dequant
functions when eob == 0.  We should also be able to remove
extra params that are passed into the dequant functions.

Change-Id: I7241bc1ab797a430418b1f3a95b5476db7455f6a
This commit is contained in:
Scott LaVarnway 2013-04-10 19:46:52 -04:00 committed by John Koleszar
parent 2dc6acc0fc
commit 6189f2bcb1
11 changed files with 252 additions and 209 deletions

View File

@ -425,9 +425,10 @@ typedef struct macroblockd {
void (*itxm_add)(int16_t *input, const int16_t *dq,
uint8_t *pred, uint8_t *output, int pitch, int stride, int eob);
void (*itxm_add_y_block)(int16_t *q, const int16_t *dq,
uint8_t *pre, uint8_t *dst, int stride, struct macroblockd *xd);
uint8_t *pre, int pre_stride, uint8_t *dst, int stride,
struct macroblockd *xd);
void (*itxm_add_uv_block)(int16_t *q, const int16_t *dq,
uint8_t *pre, uint8_t *dst, int stride,
uint8_t *pre, int pre_stride, uint8_t *dst, int stride,
uint16_t *eobs);
struct subpix_fn_table subpix;

View File

@ -473,8 +473,11 @@ static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
int block_size, int stride,
int which_mv, int weight,
const struct subpix_fn_table *subpix,
int row, int col) {
assert(d1->predictor - d0->predictor == block_size);
int row, int col, int use_dst) {
uint8_t *d0_predictor = use_dst ? *(d0->base_dst) + d0->dst : d0->predictor;
uint8_t *d1_predictor = use_dst ? *(d1->base_dst) + d1->dst : d1->predictor;
stride = use_dst ? d0->dst_stride : stride;
assert(d1_predictor - d0_predictor == block_size);
assert(d1->pre == d0->pre + block_size);
set_scaled_offsets(&scale[which_mv], row, col);
@ -484,19 +487,18 @@ static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
vp9_build_inter_predictor(*base_pre + d0->pre,
d0->pre_stride,
d0->predictor, stride,
d0_predictor, stride,
&d0->bmi.as_mv[which_mv],
&scale[which_mv],
2 * block_size, block_size,
weight, subpix);
} else {
uint8_t **base_pre0 = which_mv ? d0->base_second_pre : d0->base_pre;
uint8_t **base_pre1 = which_mv ? d1->base_second_pre : d1->base_pre;
vp9_build_inter_predictor(*base_pre0 + d0->pre,
d0->pre_stride,
d0->predictor, stride,
d0_predictor, stride,
&d0->bmi.as_mv[which_mv],
&scale[which_mv],
block_size, block_size,
@ -506,7 +508,7 @@ static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
vp9_build_inter_predictor(*base_pre1 + d1->pre,
d1->pre_stride,
d1->predictor, stride,
d1_predictor, stride,
&d1->bmi.as_mv[which_mv],
&scale[which_mv],
block_size, block_size,
@ -1533,7 +1535,8 @@ void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *mb,
}
static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
int mb_row, int mb_col) {
int mb_row, int mb_col,
int use_dst) {
int i;
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
BLOCKD *blockd = xd->block;
@ -1562,7 +1565,8 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
build_2x1_inter_predictor(d0, d1, xd->scale_factor, 8, 16, which_mv,
which_mv ? weight : 0,
&xd->subpix, mb_row * 16 + y, mb_col * 16);
&xd->subpix, mb_row * 16 + y, mb_col * 16,
use_dst);
}
}
} else {
@ -1579,7 +1583,8 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
build_2x1_inter_predictor(d0, d1, xd->scale_factor, 4, 16, which_mv,
which_mv ? weight : 0,
&xd->subpix,
mb_row * 16 + y, mb_col * 16 + x);
mb_row * 16 + y, mb_col * 16 + x,
use_dst);
}
}
}
@ -1597,7 +1602,8 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8, which_mv,
which_mv ? weight : 0, &xd->subpix,
mb_row * 8 + y, mb_col * 8 + x);
mb_row * 8 + y, mb_col * 8 + x,
use_dst);
}
}
}
@ -1714,10 +1720,26 @@ void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
} else {
build_4x4uvmvs(xd);
build_inter4x4_predictors_mb(xd, mb_row, mb_col);
build_inter4x4_predictors_mb(xd, mb_row, mb_col, 0);
}
}
void vp9_build_inter_predictors_mb_s(MACROBLOCKD *xd,
int mb_row,
int mb_col) {
if (xd->mode_info_context->mbmi.mode != SPLITMV) {
vp9_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride,
mb_row, mb_col);
} else {
build_4x4uvmvs(xd);
build_inter4x4_predictors_mb(xd, mb_row, mb_col, 1);
}
}
/*encoder only*/
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
int mb_row, int mb_col) {
@ -1766,7 +1788,8 @@ void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8, which_mv,
which_mv ? weight : 0,
&xd->subpix, mb_row * 8 + y, mb_col * 8 + x);
&xd->subpix, mb_row * 8 + y, mb_col * 8 + x,
0);
}
}
}

View File

@ -48,6 +48,10 @@ void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
int mb_row,
int mb_col);
void vp9_build_inter_predictors_mb_s(MACROBLOCKD *xd,
int mb_row,
int mb_col);
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
int mb_row,
int mb_col);

View File

@ -751,7 +751,7 @@ void vp9_build_intra_predictors_sb64uv_s(MACROBLOCKD *xd) {
void vp9_intra8x8_predict(MACROBLOCKD *xd,
BLOCKD *b,
int mode,
uint8_t *predictor) {
uint8_t *predictor, int pre_stride) {
const int block4x4_idx = (b - xd->block);
const int block_idx = (block4x4_idx >> 2) | !!(block4x4_idx & 2);
const int have_top = (block_idx >> 1) || xd->up_available;
@ -759,7 +759,7 @@ void vp9_intra8x8_predict(MACROBLOCKD *xd,
const int have_right = !(block_idx & 1) || xd->right_available;
vp9_build_intra_predictors_internal(*(b->base_dst) + b->dst,
b->dst_stride, predictor, 16,
b->dst_stride, predictor, pre_stride,
mode, 8, have_top, have_left,
have_right);
}
@ -767,14 +767,14 @@ void vp9_intra8x8_predict(MACROBLOCKD *xd,
void vp9_intra_uv4x4_predict(MACROBLOCKD *xd,
BLOCKD *b,
int mode,
uint8_t *predictor) {
uint8_t *predictor, int pre_stride) {
const int block_idx = (b - xd->block) & 3;
const int have_top = (block_idx >> 1) || xd->up_available;
const int have_left = (block_idx & 1) || xd->left_available;
const int have_right = !(block_idx & 1) || xd->right_available;
vp9_build_intra_predictors_internal(*(b->base_dst) + b->dst,
b->dst_stride, predictor, 8,
b->dst_stride, predictor, pre_stride,
mode, 4, have_top, have_left,
have_right);
}

View File

@ -164,7 +164,8 @@ B_PREDICTION_MODE vp9_find_bpred_context(MACROBLOCKD *xd, BLOCKD *x) {
void vp9_intra4x4_predict(MACROBLOCKD *xd,
BLOCKD *x,
int b_mode,
uint8_t *predictor) {
uint8_t *predictor,
int ps) {
int i, r, c;
const int block_idx = x - xd->block;
const int have_top = (block_idx >> 2) || xd->up_available;
@ -276,7 +277,7 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
predictor[c] = expected_dc;
}
predictor += 16;
predictor += ps;
}
}
break;
@ -287,7 +288,7 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
predictor[c] = clip_pixel(above[c] - top_left + left[r]);
}
predictor += 16;
predictor += ps;
}
}
break;
@ -305,7 +306,7 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
predictor[c] = ap[c];
}
predictor += 16;
predictor += ps;
}
}
break;
@ -323,29 +324,29 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
predictor[c] = lp[r];
}
predictor += 16;
predictor += ps;
}
}
break;
case B_LD_PRED: {
uint8_t *ptr = above;
predictor[0 * 16 + 0] = (ptr[0] + ptr[1] * 2 + ptr[2] + 2) >> 2;
predictor[0 * 16 + 1] =
predictor[1 * 16 + 0] = (ptr[1] + ptr[2] * 2 + ptr[3] + 2) >> 2;
predictor[0 * 16 + 2] =
predictor[1 * 16 + 1] =
predictor[2 * 16 + 0] = (ptr[2] + ptr[3] * 2 + ptr[4] + 2) >> 2;
predictor[0 * 16 + 3] =
predictor[1 * 16 + 2] =
predictor[2 * 16 + 1] =
predictor[3 * 16 + 0] = (ptr[3] + ptr[4] * 2 + ptr[5] + 2) >> 2;
predictor[1 * 16 + 3] =
predictor[2 * 16 + 2] =
predictor[3 * 16 + 1] = (ptr[4] + ptr[5] * 2 + ptr[6] + 2) >> 2;
predictor[2 * 16 + 3] =
predictor[3 * 16 + 2] = (ptr[5] + ptr[6] * 2 + ptr[7] + 2) >> 2;
predictor[3 * 16 + 3] = (ptr[6] + ptr[7] * 2 + ptr[7] + 2) >> 2;
predictor[0 * ps + 0] = (ptr[0] + ptr[1] * 2 + ptr[2] + 2) >> 2;
predictor[0 * ps + 1] =
predictor[1 * ps + 0] = (ptr[1] + ptr[2] * 2 + ptr[3] + 2) >> 2;
predictor[0 * ps + 2] =
predictor[1 * ps + 1] =
predictor[2 * ps + 0] = (ptr[2] + ptr[3] * 2 + ptr[4] + 2) >> 2;
predictor[0 * ps + 3] =
predictor[1 * ps + 2] =
predictor[2 * ps + 1] =
predictor[3 * ps + 0] = (ptr[3] + ptr[4] * 2 + ptr[5] + 2) >> 2;
predictor[1 * ps + 3] =
predictor[2 * ps + 2] =
predictor[3 * ps + 1] = (ptr[4] + ptr[5] * 2 + ptr[6] + 2) >> 2;
predictor[2 * ps + 3] =
predictor[3 * ps + 2] = (ptr[5] + ptr[6] * 2 + ptr[7] + 2) >> 2;
predictor[3 * ps + 3] = (ptr[6] + ptr[7] * 2 + ptr[7] + 2) >> 2;
}
break;
@ -362,22 +363,22 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
pp[7] = above[2];
pp[8] = above[3];
predictor[3 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[3 * 16 + 1] =
predictor[2 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[3 * 16 + 2] =
predictor[2 * 16 + 1] =
predictor[1 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[3 * 16 + 3] =
predictor[2 * 16 + 2] =
predictor[1 * 16 + 1] =
predictor[0 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * 16 + 3] =
predictor[1 * 16 + 2] =
predictor[0 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[1 * 16 + 3] =
predictor[0 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
predictor[0 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
predictor[3 * ps + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[3 * ps + 1] =
predictor[2 * ps + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[3 * ps + 2] =
predictor[2 * ps + 1] =
predictor[1 * ps + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[3 * ps + 3] =
predictor[2 * ps + 2] =
predictor[1 * ps + 1] =
predictor[0 * ps + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * ps + 3] =
predictor[1 * ps + 2] =
predictor[0 * ps + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[1 * ps + 3] =
predictor[0 * ps + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
predictor[0 * ps + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
}
break;
@ -394,44 +395,44 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
pp[7] = above[2];
pp[8] = above[3];
predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[3 * 16 + 1] =
predictor[1 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * 16 + 1] =
predictor[0 * 16 + 0] = (pp[4] + pp[5] + 1) >> 1;
predictor[3 * 16 + 2] =
predictor[1 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[2 * 16 + 2] =
predictor[0 * 16 + 1] = (pp[5] + pp[6] + 1) >> 1;
predictor[3 * 16 + 3] =
predictor[1 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
predictor[2 * 16 + 3] =
predictor[0 * 16 + 2] = (pp[6] + pp[7] + 1) >> 1;
predictor[1 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
predictor[0 * 16 + 3] = (pp[7] + pp[8] + 1) >> 1;
predictor[3 * ps + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * ps + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[3 * ps + 1] =
predictor[1 * ps + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * ps + 1] =
predictor[0 * ps + 0] = (pp[4] + pp[5] + 1) >> 1;
predictor[3 * ps + 2] =
predictor[1 * ps + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[2 * ps + 2] =
predictor[0 * ps + 1] = (pp[5] + pp[6] + 1) >> 1;
predictor[3 * ps + 3] =
predictor[1 * ps + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
predictor[2 * ps + 3] =
predictor[0 * ps + 2] = (pp[6] + pp[7] + 1) >> 1;
predictor[1 * ps + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
predictor[0 * ps + 3] = (pp[7] + pp[8] + 1) >> 1;
}
break;
case B_VL_PRED: {
uint8_t *pp = above;
predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[1 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[2 * 16 + 0] =
predictor[0 * 16 + 1] = (pp[1] + pp[2] + 1) >> 1;
predictor[1 * 16 + 1] =
predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * 16 + 1] =
predictor[0 * 16 + 2] = (pp[2] + pp[3] + 1) >> 1;
predictor[3 * 16 + 1] =
predictor[1 * 16 + 2] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[0 * 16 + 3] =
predictor[2 * 16 + 2] = (pp[3] + pp[4] + 1) >> 1;
predictor[1 * 16 + 3] =
predictor[3 * 16 + 2] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * 16 + 3] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[3 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
predictor[0 * ps + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[1 * ps + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[2 * ps + 0] =
predictor[0 * ps + 1] = (pp[1] + pp[2] + 1) >> 1;
predictor[1 * ps + 1] =
predictor[3 * ps + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * ps + 1] =
predictor[0 * ps + 2] = (pp[2] + pp[3] + 1) >> 1;
predictor[3 * ps + 1] =
predictor[1 * ps + 2] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[0 * ps + 3] =
predictor[2 * ps + 2] = (pp[3] + pp[4] + 1) >> 1;
predictor[1 * ps + 3] =
predictor[3 * ps + 2] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * ps + 3] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[3 * ps + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
}
break;
@ -449,44 +450,44 @@ void vp9_intra4x4_predict(MACROBLOCKD *xd,
pp[8] = above[3];
predictor[3 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[3 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[2 * 16 + 0] =
predictor[3 * 16 + 2] = (pp[1] + pp[2] + 1) >> 1;
predictor[2 * 16 + 1] =
predictor[3 * 16 + 3] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * 16 + 2] =
predictor[1 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1;
predictor[2 * 16 + 3] =
predictor[1 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[1 * 16 + 2] =
predictor[0 * 16 + 0] = (pp[3] + pp[4] + 1) >> 1;
predictor[1 * 16 + 3] =
predictor[0 * 16 + 1] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[0 * 16 + 2] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[0 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
predictor[3 * ps + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[3 * ps + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[2 * ps + 0] =
predictor[3 * ps + 2] = (pp[1] + pp[2] + 1) >> 1;
predictor[2 * ps + 1] =
predictor[3 * ps + 3] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * ps + 2] =
predictor[1 * ps + 0] = (pp[2] + pp[3] + 1) >> 1;
predictor[2 * ps + 3] =
predictor[1 * ps + 1] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[1 * ps + 2] =
predictor[0 * ps + 0] = (pp[3] + pp[4] + 1) >> 1;
predictor[1 * ps + 3] =
predictor[0 * ps + 1] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[0 * ps + 2] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[0 * ps + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
}
break;
case B_HU_PRED: {
uint8_t *pp = left;
predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[0 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[0 * 16 + 2] =
predictor[1 * 16 + 0] = (pp[1] + pp[2] + 1) >> 1;
predictor[0 * 16 + 3] =
predictor[1 * 16 + 1] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[1 * 16 + 2] =
predictor[2 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1;
predictor[1 * 16 + 3] =
predictor[2 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[3] + 2) >> 2;
predictor[2 * 16 + 2] =
predictor[2 * 16 + 3] =
predictor[3 * 16 + 0] =
predictor[3 * 16 + 1] =
predictor[3 * 16 + 2] =
predictor[3 * 16 + 3] = pp[3];
predictor[0 * ps + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[0 * ps + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[0 * ps + 2] =
predictor[1 * ps + 0] = (pp[1] + pp[2] + 1) >> 1;
predictor[0 * ps + 3] =
predictor[1 * ps + 1] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[1 * ps + 2] =
predictor[2 * ps + 0] = (pp[2] + pp[3] + 1) >> 1;
predictor[1 * ps + 3] =
predictor[2 * ps + 1] = (pp[2] + pp[3] * 2 + pp[3] + 2) >> 2;
predictor[2 * ps + 2] =
predictor[2 * ps + 3] =
predictor[3 * ps + 0] =
predictor[3 * ps + 1] =
predictor[3 * ps + 2] =
predictor[3 * ps + 3] = pp[3];
}
break;

View File

@ -27,7 +27,7 @@ forward_decls vp9_common_forward_decls
#
# Dequant
#
prototype void vp9_dequant_idct_add_y_block_8x8 "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dst, int stride, struct macroblockd *xd"
prototype void vp9_dequant_idct_add_y_block_8x8 "int16_t *q, const int16_t *dq, uint8_t *pre, int pre_stride, uint8_t *dst, int stride, struct macroblockd *xd"
specialize vp9_dequant_idct_add_y_block_8x8
prototype void vp9_dequant_idct_add_16x16 "int16_t *input, const int16_t *dq, uint8_t *pred, uint8_t *dest, int pitch, int stride, int eob"
@ -39,10 +39,10 @@ specialize vp9_dequant_idct_add_8x8
prototype void vp9_dequant_idct_add "int16_t *input, const int16_t *dq, uint8_t *pred, uint8_t *dest, int pitch, int stride, int eob"
specialize vp9_dequant_idct_add
prototype void vp9_dequant_idct_add_y_block "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dst, int stride, struct macroblockd *xd"
prototype void vp9_dequant_idct_add_y_block "int16_t *q, const int16_t *dq, uint8_t *pre, int pre_stride, uint8_t *dst, int stride, struct macroblockd *xd"
specialize vp9_dequant_idct_add_y_block
prototype void vp9_dequant_idct_add_uv_block "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dst, int stride, uint16_t *eobs"
prototype void vp9_dequant_idct_add_uv_block "int16_t *q, const int16_t *dq, uint8_t *pre, int pre_stride, uint8_t *dst, int stride, uint16_t *eobs"
specialize vp9_dequant_idct_add_uv_block
prototype void vp9_dequant_idct_add_32x32 "int16_t *q, const int16_t *dq, uint8_t *pre, uint8_t *dst, int pitch, int stride, int eob"
@ -119,13 +119,13 @@ specialize vp9_build_intra_predictors_sb64y_s;
prototype void vp9_build_intra_predictors_sb64uv_s "struct macroblockd *x"
specialize vp9_build_intra_predictors_sb64uv_s;
prototype void vp9_intra4x4_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor"
prototype void vp9_intra4x4_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor, int pre_stride"
specialize vp9_intra4x4_predict;
prototype void vp9_intra8x8_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor"
prototype void vp9_intra8x8_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor, int pre_stride"
specialize vp9_intra8x8_predict;
prototype void vp9_intra_uv4x4_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor"
prototype void vp9_intra_uv4x4_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor, int pre_stride"
specialize vp9_intra_uv4x4_predict;
if [ "$CONFIG_VP9_DECODER" = "yes" ]; then

View File

@ -226,24 +226,28 @@ static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
static void decode_16x16(VP9D_COMP *pbi, MACROBLOCKD *xd,
BOOL_DECODER* const bc) {
const TX_TYPE tx_type = get_tx_type_16x16(xd, 0);
if (tx_type != DCT_DCT) {
vp9_dequant_iht_add_16x16_c(tx_type, xd->plane[0].qcoeff,
xd->block[0].dequant, xd->predictor,
xd->dst.y_buffer, 16, xd->dst.y_stride,
xd->plane[0].eobs[0]);
xd->block[0].dequant, xd->dst.y_buffer,
xd->dst.y_buffer, xd->dst.y_stride,
xd->dst.y_stride, xd->plane[0].eobs[0]);
} else {
vp9_dequant_idct_add_16x16(xd->plane[0].qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
16, xd->dst.y_stride, xd->plane[0].eobs[0]);
xd->dst.y_buffer, xd->dst.y_buffer,
xd->dst.y_stride, xd->dst.y_stride,
xd->plane[0].eobs[0]);
}
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, 8,
xd->dst.uv_stride, xd->plane[1].eobs[0]);
xd->dst.u_buffer, xd->dst.u_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[20].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer, 8,
xd->dst.uv_stride, xd->plane[2].eobs[0]);
xd->dst.v_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->plane[2].eobs[0]);
}
static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
@ -259,27 +263,27 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
int idx = (ib & 0x02) ? (ib + 2) : ib;
int16_t *q = BLOCK_OFFSET(xd->plane[0].qcoeff, idx, 16);
int16_t *dq = xd->block[0].dequant;
uint8_t *pre = xd->block[ib].predictor;
uint8_t *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst;
int stride = xd->dst.y_stride;
BLOCKD *b = &xd->block[ib];
if (mode == I8X8_PRED) {
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
vp9_intra8x8_predict(xd, b, i8x8mode, b->predictor);
vp9_intra8x8_predict(xd, b, i8x8mode, dst, stride);
}
tx_type = get_tx_type_8x8(xd, ib);
if (tx_type != DCT_DCT) {
vp9_dequant_iht_add_8x8_c(tx_type, q, dq, pre, dst, 16, stride,
vp9_dequant_iht_add_8x8_c(tx_type, q, dq, dst, dst, stride, stride,
xd->plane[0].eobs[idx]);
} else {
vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride,
vp9_dequant_idct_add_8x8_c(q, dq, dst, dst, stride, stride,
xd->plane[0].eobs[idx]);
}
}
} else {
vp9_dequant_idct_add_y_block_8x8(xd->plane[0].qcoeff,
xd->block[0].dequant,
xd->predictor,
xd->dst.y_buffer,
xd->dst.y_stride,
xd->dst.y_buffer,
xd->dst.y_stride,
xd);
@ -294,34 +298,38 @@ static void decode_8x8(VP9D_COMP *pbi, MACROBLOCKD *xd,
int i8x8mode = b->bmi.as_mode.first;
b = &xd->block[16 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, b->predictor);
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
xd->plane[1].eobs[i]);
b = &xd->block[20 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, b->predictor);
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
xd->plane[2].eobs[i]);
}
} else if (mode == SPLITMV) {
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer,
xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer,
xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer,
xd->dst.uv_stride, xd->plane[2].eobs);
} else {
vp9_dequant_idct_add_8x8(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, 8,
xd->dst.uv_stride, xd->plane[1].eobs[0]);
xd->dst.u_buffer, xd->dst.u_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->plane[1].eobs[0]);
vp9_dequant_idct_add_8x8(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer, 8,
xd->dst.uv_stride, xd->plane[2].eobs[0]);
xd->dst.v_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->dst.uv_stride,
xd->plane[2].eobs[0]);
}
}
@ -337,35 +345,38 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
int j;
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
vp9_intra8x8_predict(xd, b, i8x8mode, b->predictor);
vp9_intra8x8_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
for (j = 0; j < 4; j++) {
b = &xd->block[ib + iblock[j]];
tx_type = get_tx_type_4x4(xd, ib + iblock[j]);
if (tx_type != DCT_DCT) {
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, ib + iblock[j], 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride,
b->dst_stride,
xd->plane[0].eobs[ib + iblock[j]]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, ib + iblock[j], 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
xd->plane[0].eobs[ib + iblock[j]]);
}
}
b = &xd->block[16 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, b->predictor);
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
xd->itxm_add(BLOCK_OFFSET(xd->plane[1].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
xd->plane[1].eobs[i]);
b = &xd->block[20 + i];
vp9_intra_uv4x4_predict(xd, b, i8x8mode, b->predictor);
vp9_intra_uv4x4_predict(xd, b, i8x8mode, *(b->base_dst) + b->dst,
b->dst_stride);
xd->itxm_add(BLOCK_OFFSET(xd->plane[2].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
xd->plane[2].eobs[i]);
}
} else if (mode == B_PRED) {
@ -378,18 +389,19 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
vp9_decode_coefs_4x4(pbi, xd, bc, PLANE_TYPE_Y_WITH_DC, i);
#endif
vp9_intra4x4_predict(xd, b, b_mode, b->predictor);
vp9_intra4x4_predict(xd, b, b_mode, *(b->base_dst) + b->dst,
b->dst_stride);
tx_type = get_tx_type_4x4(xd, i);
if (tx_type != DCT_DCT) {
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride,
xd->plane[0].eobs[i]);
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride,
b->dst_stride, xd->plane[0].eobs[i]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
xd->plane[0].eobs[i]);
}
}
@ -397,25 +409,25 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
if (!xd->mode_info_context->mbmi.mb_skip_coeff)
vp9_decode_mb_tokens_4x4_uv(pbi, xd, bc);
#endif
vp9_build_intra_predictors_mbuv(xd);
vp9_build_intra_predictors_mbuv_s(xd);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer,
xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer,
xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer,
xd->dst.uv_stride, xd->plane[2].eobs);
} else if (mode == SPLITMV || get_tx_type_4x4(xd, 0) == DCT_DCT) {
xd->itxm_add_y_block(xd->plane[0].qcoeff,
xd->block[0].dequant,
xd->predictor,
xd->dst.y_buffer, xd->dst.y_stride,
xd->dst.y_buffer,
xd->dst.y_stride,
xd);
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer,
xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer,
xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer,
xd->dst.uv_stride, xd->plane[2].eobs);
} else {
for (i = 0; i < 16; i++) {
@ -424,21 +436,21 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd,
if (tx_type != DCT_DCT) {
vp9_dequant_iht_add_c(tx_type,
BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride,
b->dst_stride, xd->plane[0].eobs[i]);
} else {
xd->itxm_add(BLOCK_OFFSET(xd->plane[0].qcoeff, i, 16),
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride,
b->dequant, *(b->base_dst) + b->dst,
*(b->base_dst) + b->dst, b->dst_stride, b->dst_stride,
xd->plane[0].eobs[i]);
}
}
xd->itxm_add_uv_block(xd->plane[1].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer,
xd->dst.u_buffer, xd->dst.uv_stride, xd->dst.u_buffer,
xd->dst.uv_stride, xd->plane[1].eobs);
xd->itxm_add_uv_block(xd->plane[2].qcoeff, xd->block[16].dequant,
xd->predictor + 16 * 16 + 64, xd->dst.v_buffer,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer,
xd->dst.uv_stride, xd->plane[2].eobs);
}
}
@ -807,9 +819,9 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
// do prediction
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
if (mode != I8X8_PRED) {
vp9_build_intra_predictors_mbuv(xd);
vp9_build_intra_predictors_mbuv_s(xd);
if (mode != B_PRED)
vp9_build_intra_predictors_mby(xd);
vp9_build_intra_predictors_mby_s(xd);
}
} else {
#if 0 // def DEC_DEBUG
@ -818,7 +830,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode, tx_size,
xd->mode_info_context->mbmi.interp_filter);
#endif
vp9_build_inter_predictors_mb(xd, mb_row, mb_col);
vp9_build_inter_predictors_mb_s(xd, mb_row, mb_col);
}
if (tx_size == TX_16X16) {

View File

@ -33,13 +33,14 @@ void vp9_dequant_dc_idct_add_y_block_lossless_c(int16_t *q,
const int16_t *dc);
void vp9_dequant_idct_add_y_block_lossless_c(int16_t *q, const int16_t *dq,
unsigned char *pre,
unsigned char *pre, int pre_stride,
unsigned char *dst,
int stride,
struct macroblockd *xd);
void vp9_dequant_idct_add_uv_block_lossless_c(int16_t *q, const int16_t *dq,
unsigned char *pre,
int pre_stride,
unsigned char *dst,
int stride,
uint16_t *eobs);

View File

@ -13,85 +13,86 @@
#include "vp9/decoder/vp9_dequantize.h"
void vp9_dequant_idct_add_y_block_c(int16_t *q, const int16_t *dq,
uint8_t *pre,
uint8_t *pre, int pre_stride,
uint8_t *dst,
int stride, MACROBLOCKD *xd) {
int i, j;
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
vp9_dequant_idct_add(q, dq, pre, dst, 16, stride,
vp9_dequant_idct_add(q, dq, pre, dst, pre_stride, stride,
xd->plane[0].eobs[i * 4 + j]);
q += 16;
pre += 4;
dst += 4;
}
pre += 64 - 16;
pre += 4 * pre_stride - 16;
dst += 4 * stride - 16;
}
}
void vp9_dequant_idct_add_uv_block_c(int16_t *q, const int16_t *dq,
uint8_t *pre, uint8_t *dst,
uint8_t *pre, int pre_stride, uint8_t *dst,
int stride, uint16_t *eobs) {
int i, j;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
vp9_dequant_idct_add(q, dq, pre, dst, 8, stride, eobs[i * 2 + j]);
vp9_dequant_idct_add(q, dq, pre, dst, pre_stride, stride,
eobs[i * 2 + j]);
q += 16;
pre += 4;
dst += 4;
}
pre += 32 - 8;
pre += 4 * pre_stride - 8;
dst += 4 * stride - 8;
}
}
void vp9_dequant_idct_add_y_block_8x8_c(int16_t *q, const int16_t *dq,
uint8_t *pre,
uint8_t *pre, int pre_stride,
uint8_t *dst,
int stride, MACROBLOCKD *xd) {
uint8_t *origdest = dst;
uint8_t *origpred = pre;
vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride,
vp9_dequant_idct_add_8x8_c(q, dq, pre, dst, pre_stride, stride,
xd->plane[0].eobs[0]);
vp9_dequant_idct_add_8x8_c(&q[64], dq, origpred + 8,
origdest + 8, 16, stride,
origdest + 8, pre_stride, stride,
xd->plane[0].eobs[4]);
vp9_dequant_idct_add_8x8_c(&q[128], dq, origpred + 8 * 16,
origdest + 8 * stride, 16, stride,
vp9_dequant_idct_add_8x8_c(&q[128], dq, origpred + 8 * pre_stride,
origdest + 8 * stride, pre_stride, stride,
xd->plane[0].eobs[8]);
vp9_dequant_idct_add_8x8_c(&q[192], dq, origpred + 8 * 16 + 8,
origdest + 8 * stride + 8, 16, stride,
vp9_dequant_idct_add_8x8_c(&q[192], dq, origpred + 8 * pre_stride + 8,
origdest + 8 * stride + 8, pre_stride, stride,
xd->plane[0].eobs[12]);
}
void vp9_dequant_idct_add_y_block_lossless_c(int16_t *q, const int16_t *dq,
uint8_t *pre,
uint8_t *pre, int pre_stride,
uint8_t *dst,
int stride, MACROBLOCKD *xd) {
int i, j;
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, 16, stride,
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, pre_stride, stride,
xd->plane[0].eobs[i * 4 + j]);
q += 16;
pre += 4;
dst += 4;
}
pre += 64 - 16;
pre += 4 * pre_stride - 16;
dst += 4 * stride - 16;
}
}
void vp9_dequant_idct_add_uv_block_lossless_c(int16_t *q, const int16_t *dq,
uint8_t *pre,
uint8_t *pre, int pre_stride,
uint8_t *dst,
int stride,
uint16_t *eobs) {
@ -99,14 +100,14 @@ void vp9_dequant_idct_add_uv_block_lossless_c(int16_t *q, const int16_t *dq,
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, 8, stride,
vp9_dequant_idct_add_lossless_c(q, dq, pre, dst, pre_stride, stride,
eobs[i * 2 + j]);
q += 16;
pre += 4;
dst += 4;
}
pre += 32 - 8;
pre += 4 * pre_stride - 8;
dst += 4 * stride - 8;
}
}

View File

@ -52,7 +52,7 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib) {
b->bmi.as_mode.context = vp9_find_bpred_context(&x->e_mbd, b);
#endif
vp9_intra4x4_predict(&x->e_mbd, b, b->bmi.as_mode.first, b->predictor);
vp9_intra4x4_predict(&x->e_mbd, b, b->bmi.as_mode.first, b->predictor, 16);
vp9_subtract_b(be, b, 16);
tx_type = get_tx_type_4x4(&x->e_mbd, ib);
@ -152,7 +152,7 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
int i;
TX_TYPE tx_type;
vp9_intra8x8_predict(xd, b, b->bmi.as_mode.first, b->predictor);
vp9_intra8x8_predict(xd, b, b->bmi.as_mode.first, b->predictor, 16);
// generate residual blocks
vp9_subtract_4b_c(be, b, 16);
@ -227,7 +227,7 @@ static void encode_intra_uv4x4(MACROBLOCK *x, int ib, int mode) {
const int block = ib < 20 ? ib - 16 : ib - 20;
assert(ib >= 16 && ib < 24);
vp9_intra_uv4x4_predict(&x->e_mbd, b, mode, b->predictor);
vp9_intra_uv4x4_predict(&x->e_mbd, b, mode, b->predictor, 8);
vp9_subtract_b(be, b, 8);

View File

@ -970,7 +970,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
rate = bmode_costs[mode];
#endif
vp9_intra4x4_predict(xd, b, mode, b->predictor);
vp9_intra4x4_predict(xd, b, mode, b->predictor, 16);
vp9_subtract_b(be, b, 16);
b->bmi.as_mode.first = mode;
@ -1180,7 +1180,7 @@ static int64_t rd_pick_intra8x8block(VP9_COMP *cpi, MACROBLOCK *x, int ib,
rate = mode_costs[mode];
b->bmi.as_mode.first = mode;
vp9_intra8x8_predict(xd, b, mode, b->predictor);
vp9_intra8x8_predict(xd, b, mode, b->predictor, 16);
vp9_subtract_4b_c(be, b, 16);