Eliminated vp8mt_build_intra_predictors_mbuv_s
Reworked the code to use vp8_build_intra_predictors_mbuv_s instead. This is WIP with the goal of eliminating all functions in reconintra_mt.h Change-Id: I61c4a132684544b24a38c4a90044597c6ec0dd52
This commit is contained in:
parent
b0a12a2880
commit
f2bd11faa4
@ -400,24 +400,27 @@ void vp8_build_intra_predictors_mbuv_c(MACROBLOCKD *x)
|
||||
}
|
||||
}
|
||||
|
||||
void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x)
|
||||
void vp8_build_intra_predictors_mbuv_s_c(MACROBLOCKD *x,
|
||||
unsigned char * uabove_row,
|
||||
unsigned char * vabove_row,
|
||||
unsigned char * uleft,
|
||||
unsigned char * vleft,
|
||||
int left_stride,
|
||||
unsigned char * upred_ptr,
|
||||
unsigned char * vpred_ptr)
|
||||
{
|
||||
unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
|
||||
unsigned char uleft_col[16];
|
||||
unsigned char uleft_col[8];
|
||||
unsigned char utop_left = uabove_row[-1];
|
||||
unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
|
||||
unsigned char vleft_col[20];
|
||||
unsigned char vleft_col[8];
|
||||
unsigned char vtop_left = vabove_row[-1];
|
||||
unsigned char *upred_ptr = x->dst.u_buffer; /*&x->predictor[256];*/
|
||||
unsigned char *vpred_ptr = x->dst.v_buffer; /*&x->predictor[320];*/
|
||||
int uv_stride = x->dst.uv_stride;
|
||||
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
uleft_col[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
|
||||
vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
|
||||
uleft_col[i] = uleft [i* left_stride];
|
||||
vleft_col[i] = vleft [i* left_stride];
|
||||
}
|
||||
|
||||
switch (x->mode_info_context->mbmi.uv_mode)
|
||||
|
@ -131,8 +131,8 @@ specialize vp8_build_intra_predictors_mby_s sse2 ssse3 neon
|
||||
prototype void vp8_build_intra_predictors_mbuv "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_mbuv sse2 ssse3
|
||||
|
||||
prototype void vp8_build_intra_predictors_mbuv_s "struct macroblockd *x"
|
||||
specialize vp8_build_intra_predictors_mbuv_s sse2 ssse3
|
||||
prototype void vp8_build_intra_predictors_mbuv_s "struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr"
|
||||
#TODO: fix assembly --- specialize vp8_build_intra_predictors_mbuv_s sse2 ssse3
|
||||
|
||||
prototype void vp8_intra4x4_predict "unsigned char *src, int src_stride, int b_mode, unsigned char *dst, int dst_stride"
|
||||
specialize vp8_intra4x4_predict media
|
||||
|
@ -156,7 +156,13 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
|
||||
/* do prediction */
|
||||
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
|
||||
{
|
||||
vp8_build_intra_predictors_mbuv_s(xd);
|
||||
vp8_build_intra_predictors_mbuv_s(xd,
|
||||
xd->dst.u_buffer - xd->dst.uv_stride,
|
||||
xd->dst.v_buffer - xd->dst.uv_stride,
|
||||
xd->dst.u_buffer - 1,
|
||||
xd->dst.v_buffer - 1,
|
||||
xd->dst.uv_stride,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer);
|
||||
|
||||
if (mode != B_PRED)
|
||||
{
|
||||
|
@ -14,138 +14,6 @@
|
||||
#include "vpx_mem/vpx_mem.h"
|
||||
#include "onyxd_int.h"
|
||||
|
||||
/* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and
|
||||
* vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
|
||||
*/
|
||||
|
||||
void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
|
||||
{
|
||||
unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */
|
||||
unsigned char *yleft_col;
|
||||
unsigned char yleft_buf[16];
|
||||
unsigned char ytop_left; /* = yabove_row[-1]; */
|
||||
unsigned char *ypred_ptr = x->predictor;
|
||||
int r, c, i;
|
||||
|
||||
if (pbi->common.filter_level)
|
||||
{
|
||||
yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32;
|
||||
yleft_col = pbi->mt_yleft_col[mb_row];
|
||||
} else
|
||||
{
|
||||
yabove_row = x->dst.y_buffer - x->dst.y_stride;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
|
||||
yleft_col = yleft_buf;
|
||||
}
|
||||
|
||||
ytop_left = yabove_row[-1];
|
||||
|
||||
/* for Y */
|
||||
switch (x->mode_info_context->mbmi.mode)
|
||||
{
|
||||
case DC_PRED:
|
||||
{
|
||||
int expected_dc;
|
||||
int i;
|
||||
int shift;
|
||||
int average = 0;
|
||||
|
||||
|
||||
if (x->up_available || x->left_available)
|
||||
{
|
||||
if (x->up_available)
|
||||
{
|
||||
for (i = 0; i < 16; i++)
|
||||
{
|
||||
average += yabove_row[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (x->left_available)
|
||||
{
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
{
|
||||
average += yleft_col[i];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
shift = 3 + x->up_available + x->left_available;
|
||||
expected_dc = (average + (1 << (shift - 1))) >> shift;
|
||||
}
|
||||
else
|
||||
{
|
||||
expected_dc = 128;
|
||||
}
|
||||
|
||||
vpx_memset(ypred_ptr, expected_dc, 256);
|
||||
}
|
||||
break;
|
||||
case V_PRED:
|
||||
{
|
||||
|
||||
for (r = 0; r < 16; r++)
|
||||
{
|
||||
|
||||
((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
|
||||
((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
|
||||
((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
|
||||
((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
|
||||
ypred_ptr += 16;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case H_PRED:
|
||||
{
|
||||
|
||||
for (r = 0; r < 16; r++)
|
||||
{
|
||||
|
||||
vpx_memset(ypred_ptr, yleft_col[r], 16);
|
||||
ypred_ptr += 16;
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
case TM_PRED:
|
||||
{
|
||||
|
||||
for (r = 0; r < 16; r++)
|
||||
{
|
||||
for (c = 0; c < 16; c++)
|
||||
{
|
||||
int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
|
||||
|
||||
if (pred < 0)
|
||||
pred = 0;
|
||||
|
||||
if (pred > 255)
|
||||
pred = 255;
|
||||
|
||||
ypred_ptr[c] = pred;
|
||||
}
|
||||
|
||||
ypred_ptr += 16;
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
case B_PRED:
|
||||
case NEARESTMV:
|
||||
case NEARMV:
|
||||
case ZEROMV:
|
||||
case NEWMV:
|
||||
case SPLITMV:
|
||||
case MB_MODE_COUNT:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
|
||||
{
|
||||
unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */
|
||||
@ -282,325 +150,6 @@ void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_r
|
||||
}
|
||||
}
|
||||
|
||||
void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
|
||||
{
|
||||
unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */
|
||||
unsigned char *uleft_col; /*[16];*/
|
||||
unsigned char uleft_buf[8];
|
||||
unsigned char utop_left; /* = uabove_row[-1]; */
|
||||
unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */
|
||||
unsigned char *vleft_col; /*[20];*/
|
||||
unsigned char vleft_buf[8];
|
||||
unsigned char vtop_left; /* = vabove_row[-1]; */
|
||||
unsigned char *upred_ptr = &x->predictor[256];
|
||||
unsigned char *vpred_ptr = &x->predictor[320];
|
||||
int i, j;
|
||||
|
||||
if (pbi->common.filter_level)
|
||||
{
|
||||
uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
|
||||
vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
|
||||
uleft_col = pbi->mt_uleft_col[mb_row];
|
||||
vleft_col = pbi->mt_vleft_col[mb_row];
|
||||
} else
|
||||
{
|
||||
uabove_row = x->dst.u_buffer - x->dst.uv_stride;
|
||||
vabove_row = x->dst.v_buffer - x->dst.uv_stride;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
|
||||
vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
|
||||
}
|
||||
uleft_col = uleft_buf;
|
||||
vleft_col = vleft_buf;
|
||||
}
|
||||
utop_left = uabove_row[-1];
|
||||
vtop_left = vabove_row[-1];
|
||||
|
||||
switch (x->mode_info_context->mbmi.uv_mode)
|
||||
{
|
||||
case DC_PRED:
|
||||
{
|
||||
int expected_udc;
|
||||
int expected_vdc;
|
||||
int i;
|
||||
int shift;
|
||||
int Uaverage = 0;
|
||||
int Vaverage = 0;
|
||||
|
||||
if (x->up_available)
|
||||
{
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
Uaverage += uabove_row[i];
|
||||
Vaverage += vabove_row[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (x->left_available)
|
||||
{
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
Uaverage += uleft_col[i];
|
||||
Vaverage += vleft_col[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (!x->up_available && !x->left_available)
|
||||
{
|
||||
expected_udc = 128;
|
||||
expected_vdc = 128;
|
||||
}
|
||||
else
|
||||
{
|
||||
shift = 2 + x->up_available + x->left_available;
|
||||
expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
|
||||
expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
|
||||
}
|
||||
|
||||
|
||||
vpx_memset(upred_ptr, expected_udc, 64);
|
||||
vpx_memset(vpred_ptr, expected_vdc, 64);
|
||||
|
||||
|
||||
}
|
||||
break;
|
||||
case V_PRED:
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
vpx_memcpy(upred_ptr, uabove_row, 8);
|
||||
vpx_memcpy(vpred_ptr, vabove_row, 8);
|
||||
upred_ptr += 8;
|
||||
vpred_ptr += 8;
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
case H_PRED:
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
vpx_memset(upred_ptr, uleft_col[i], 8);
|
||||
vpx_memset(vpred_ptr, vleft_col[i], 8);
|
||||
upred_ptr += 8;
|
||||
vpred_ptr += 8;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
case TM_PRED:
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
for (j = 0; j < 8; j++)
|
||||
{
|
||||
int predu = uleft_col[i] + uabove_row[j] - utop_left;
|
||||
int predv = vleft_col[i] + vabove_row[j] - vtop_left;
|
||||
|
||||
if (predu < 0)
|
||||
predu = 0;
|
||||
|
||||
if (predu > 255)
|
||||
predu = 255;
|
||||
|
||||
if (predv < 0)
|
||||
predv = 0;
|
||||
|
||||
if (predv > 255)
|
||||
predv = 255;
|
||||
|
||||
upred_ptr[j] = predu;
|
||||
vpred_ptr[j] = predv;
|
||||
}
|
||||
|
||||
upred_ptr += 8;
|
||||
vpred_ptr += 8;
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
case B_PRED:
|
||||
case NEARESTMV:
|
||||
case NEARMV:
|
||||
case ZEROMV:
|
||||
case NEWMV:
|
||||
case SPLITMV:
|
||||
case MB_MODE_COUNT:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
|
||||
{
|
||||
unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */
|
||||
unsigned char *uleft_col; /*[16];*/
|
||||
unsigned char uleft_buf[8];
|
||||
unsigned char utop_left; /* = uabove_row[-1]; */
|
||||
unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */
|
||||
unsigned char *vleft_col; /*[20];*/
|
||||
unsigned char vleft_buf[8];
|
||||
unsigned char vtop_left; /* = vabove_row[-1]; */
|
||||
unsigned char *upred_ptr = x->dst.u_buffer; /*&x->predictor[256];*/
|
||||
unsigned char *vpred_ptr = x->dst.v_buffer; /*&x->predictor[320];*/
|
||||
int uv_stride = x->dst.uv_stride;
|
||||
int i, j;
|
||||
|
||||
if (pbi->common.filter_level)
|
||||
{
|
||||
uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
|
||||
vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
|
||||
uleft_col = pbi->mt_uleft_col[mb_row];
|
||||
vleft_col = pbi->mt_vleft_col[mb_row];
|
||||
} else
|
||||
{
|
||||
uabove_row = x->dst.u_buffer - x->dst.uv_stride;
|
||||
vabove_row = x->dst.v_buffer - x->dst.uv_stride;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
|
||||
vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
|
||||
}
|
||||
uleft_col = uleft_buf;
|
||||
vleft_col = vleft_buf;
|
||||
}
|
||||
utop_left = uabove_row[-1];
|
||||
vtop_left = vabove_row[-1];
|
||||
|
||||
switch (x->mode_info_context->mbmi.uv_mode)
|
||||
{
|
||||
case DC_PRED:
|
||||
{
|
||||
int expected_udc;
|
||||
int expected_vdc;
|
||||
int i;
|
||||
int shift;
|
||||
int Uaverage = 0;
|
||||
int Vaverage = 0;
|
||||
|
||||
if (x->up_available)
|
||||
{
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
Uaverage += uabove_row[i];
|
||||
Vaverage += vabove_row[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (x->left_available)
|
||||
{
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
Uaverage += uleft_col[i];
|
||||
Vaverage += vleft_col[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (!x->up_available && !x->left_available)
|
||||
{
|
||||
expected_udc = 128;
|
||||
expected_vdc = 128;
|
||||
}
|
||||
else
|
||||
{
|
||||
shift = 2 + x->up_available + x->left_available;
|
||||
expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
|
||||
expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
|
||||
}
|
||||
|
||||
|
||||
/*vpx_memset(upred_ptr,expected_udc,64);
|
||||
vpx_memset(vpred_ptr,expected_vdc,64);*/
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
vpx_memset(upred_ptr, expected_udc, 8);
|
||||
vpx_memset(vpred_ptr, expected_vdc, 8);
|
||||
upred_ptr += uv_stride; /*8;*/
|
||||
vpred_ptr += uv_stride; /*8;*/
|
||||
}
|
||||
}
|
||||
break;
|
||||
case V_PRED:
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
vpx_memcpy(upred_ptr, uabove_row, 8);
|
||||
vpx_memcpy(vpred_ptr, vabove_row, 8);
|
||||
upred_ptr += uv_stride; /*8;*/
|
||||
vpred_ptr += uv_stride; /*8;*/
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
case H_PRED:
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
vpx_memset(upred_ptr, uleft_col[i], 8);
|
||||
vpx_memset(vpred_ptr, vleft_col[i], 8);
|
||||
upred_ptr += uv_stride; /*8;*/
|
||||
vpred_ptr += uv_stride; /*8;*/
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
case TM_PRED:
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
{
|
||||
for (j = 0; j < 8; j++)
|
||||
{
|
||||
int predu = uleft_col[i] + uabove_row[j] - utop_left;
|
||||
int predv = vleft_col[i] + vabove_row[j] - vtop_left;
|
||||
|
||||
if (predu < 0)
|
||||
predu = 0;
|
||||
|
||||
if (predu > 255)
|
||||
predu = 255;
|
||||
|
||||
if (predv < 0)
|
||||
predv = 0;
|
||||
|
||||
if (predv > 255)
|
||||
predv = 255;
|
||||
|
||||
upred_ptr[j] = predu;
|
||||
vpred_ptr[j] = predv;
|
||||
}
|
||||
|
||||
upred_ptr += uv_stride; /*8;*/
|
||||
vpred_ptr += uv_stride; /*8;*/
|
||||
}
|
||||
|
||||
}
|
||||
break;
|
||||
case B_PRED:
|
||||
case NEARESTMV:
|
||||
case NEARMV:
|
||||
case ZEROMV:
|
||||
case NEWMV:
|
||||
case SPLITMV:
|
||||
case MB_MODE_COUNT:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void vp8mt_predict_intra4x4(VP8D_COMP *pbi,
|
||||
MACROBLOCKD *xd,
|
||||
int b_mode,
|
||||
|
@ -14,10 +14,7 @@
|
||||
|
||||
/* reconintra functions used in multi-threaded decoder */
|
||||
#if CONFIG_MULTITHREAD
|
||||
extern void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col);
|
||||
extern void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col);
|
||||
extern void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col);
|
||||
extern void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col);
|
||||
|
||||
extern void vp8mt_predict_intra4x4(VP8D_COMP *pbi, MACROBLOCKD *x, int b_mode, unsigned char *predictor, int stride, int mb_row, int mb_col, int num);
|
||||
extern void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col);
|
||||
|
@ -95,31 +95,11 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
|
||||
}
|
||||
else if (!vp8dx_bool_error(xd->current_bc))
|
||||
{
|
||||
int eobtotal;
|
||||
eobtotal = vp8_decode_mb_tokens(pbi, xd);
|
||||
}
|
||||
|
||||
eobtotal |= (xd->mode_info_context->mbmi.mode == B_PRED ||
|
||||
xd->mode_info_context->mbmi.mode == SPLITMV);
|
||||
if (!eobtotal && !vp8dx_bool_error(xd->current_bc))
|
||||
{
|
||||
/* Special case: Force the loopfilter to skip when eobtotal and
|
||||
* mb_skip_coeff are zero.
|
||||
* */
|
||||
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
|
||||
|
||||
/*mt_skip_recon_mb(pbi, xd, mb_row, mb_col);*/
|
||||
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
|
||||
{
|
||||
vp8mt_build_intra_predictors_mbuv_s(pbi, xd, mb_row, mb_col);
|
||||
vp8mt_build_intra_predictors_mby_s(pbi, xd, mb_row, mb_col);
|
||||
}
|
||||
else
|
||||
{
|
||||
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer,
|
||||
xd->dst.y_stride, xd->dst.uv_stride);
|
||||
}
|
||||
return;
|
||||
/* Special case: Force the loopfilter to skip when eobtotal is zero */
|
||||
xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0);
|
||||
}
|
||||
|
||||
if (xd->segmentation_enabled)
|
||||
@ -128,7 +108,33 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
|
||||
/* do prediction */
|
||||
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
|
||||
{
|
||||
vp8mt_build_intra_predictors_mbuv_s(pbi, xd, mb_row, mb_col);
|
||||
if (pbi->common.filter_level)
|
||||
{
|
||||
unsigned char *uabove_row;
|
||||
unsigned char *vabove_row;
|
||||
unsigned char * uleft_col;
|
||||
unsigned char * vleft_col;
|
||||
uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
|
||||
vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
|
||||
uleft_col = pbi->mt_uleft_col[mb_row];
|
||||
vleft_col = pbi->mt_vleft_col[mb_row];
|
||||
vp8_build_intra_predictors_mbuv_s(xd, uabove_row,
|
||||
vabove_row,
|
||||
uleft_col,
|
||||
vleft_col,
|
||||
1,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer);
|
||||
}
|
||||
else
|
||||
{
|
||||
vp8_build_intra_predictors_mbuv_s(xd,
|
||||
xd->dst.u_buffer - xd->dst.uv_stride,
|
||||
xd->dst.v_buffer - xd->dst.uv_stride,
|
||||
xd->dst.u_buffer - 1,
|
||||
xd->dst.v_buffer - 1,
|
||||
xd->dst.uv_stride,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer);
|
||||
}
|
||||
|
||||
if (xd->mode_info_context->mbmi.mode != B_PRED)
|
||||
{
|
||||
|
@ -112,7 +112,12 @@ void vp8_encode_intra16x16mbuv(MACROBLOCK *x)
|
||||
{
|
||||
MACROBLOCKD *xd = &x->e_mbd;
|
||||
|
||||
vp8_build_intra_predictors_mbuv_s(&x->e_mbd);
|
||||
vp8_build_intra_predictors_mbuv_s(xd, xd->dst.u_buffer - xd->dst.uv_stride,
|
||||
xd->dst.v_buffer - xd->dst.uv_stride,
|
||||
xd->dst.u_buffer - 1,
|
||||
xd->dst.v_buffer - 1,
|
||||
xd->dst.uv_stride,
|
||||
xd->dst.u_buffer, xd->dst.v_buffer);
|
||||
|
||||
vp8_subtract_mbuv(x->src_diff, x->src.u_buffer,
|
||||
x->src.v_buffer, x->src.uv_stride, xd->dst.u_buffer,
|
||||
|
Loading…
x
Reference in New Issue
Block a user