mpegvideo_enc: drop support for reusing the input motion vectors.

This misfeature is most likely completely useless and conflicts with
removing the mpegvideo-specific fields from AVFrame. In the improbable
case it is actually useful, it should be reimplemented in a better way.
This commit is contained in:
Anton Khirnov 2013-03-03 20:43:43 +01:00
parent 7e350379f8
commit 6e7b50b427
2 changed files with 1 additions and 252 deletions

View File

@ -825,152 +825,6 @@ static int interlaced_search(MpegEncContext *s, int ref_index,
}
}
static void clip_input_mv(MpegEncContext * s, int16_t *mv, int interlaced){
int ymax= s->me.ymax>>interlaced;
int ymin= s->me.ymin>>interlaced;
if(mv[0] < s->me.xmin) mv[0] = s->me.xmin;
if(mv[0] > s->me.xmax) mv[0] = s->me.xmax;
if(mv[1] < ymin) mv[1] = ymin;
if(mv[1] > ymax) mv[1] = ymax;
}
static inline int check_input_motion(MpegEncContext * s, int mb_x, int mb_y, int p_type){
MotionEstContext * const c= &s->me;
Picture *p= s->current_picture_ptr;
int mb_xy= mb_x + mb_y*s->mb_stride;
int xy= 2*mb_x + 2*mb_y*s->b8_stride;
int mb_type= s->current_picture.f.mb_type[mb_xy];
int flags= c->flags;
int shift= (flags&FLAG_QPEL) + 1;
int mask= (1<<shift)-1;
int x, y, i;
int d=0;
me_cmp_func cmpf= s->dsp.sse[0];
me_cmp_func chroma_cmpf= s->dsp.sse[1];
if(p_type && USES_LIST(mb_type, 1)){
av_log(c->avctx, AV_LOG_ERROR, "backward motion vector in P frame\n");
return INT_MAX/2;
}
assert(IS_INTRA(mb_type) || USES_LIST(mb_type,0) || USES_LIST(mb_type,1));
for(i=0; i<4; i++){
int xy= s->block_index[i];
clip_input_mv(s, p->f.motion_val[0][xy], !!IS_INTERLACED(mb_type));
clip_input_mv(s, p->f.motion_val[1][xy], !!IS_INTERLACED(mb_type));
}
if(IS_INTERLACED(mb_type)){
int xy2= xy + s->b8_stride;
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA;
c->stride<<=1;
c->uvstride<<=1;
if(!(s->flags & CODEC_FLAG_INTERLACED_ME)){
av_log(c->avctx, AV_LOG_ERROR, "Interlaced macroblock selected but interlaced motion estimation disabled\n");
return INT_MAX/2;
}
if(USES_LIST(mb_type, 0)){
int field_select0= p->f.ref_index[0][4*mb_xy ];
int field_select1= p->f.ref_index[0][4*mb_xy+2];
assert(field_select0==0 ||field_select0==1);
assert(field_select1==0 ||field_select1==1);
init_interlaced_ref(s, 0);
if(p_type){
s->p_field_select_table[0][mb_xy]= field_select0;
s->p_field_select_table[1][mb_xy]= field_select1;
*(uint32_t*)s->p_field_mv_table[0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy ];
*(uint32_t*)s->p_field_mv_table[1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy2];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER_I;
}else{
s->b_field_select_table[0][0][mb_xy]= field_select0;
s->b_field_select_table[0][1][mb_xy]= field_select1;
*(uint32_t*)s->b_field_mv_table[0][0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy ];
*(uint32_t*)s->b_field_mv_table[0][1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[0][xy2];
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_FORWARD_I;
}
x = p->f.motion_val[0][xy ][0];
y = p->f.motion_val[0][xy ][1];
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0, 0, cmpf, chroma_cmpf, flags);
x = p->f.motion_val[0][xy2][0];
y = p->f.motion_val[0][xy2][1];
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1, 1, cmpf, chroma_cmpf, flags);
}
if(USES_LIST(mb_type, 1)){
int field_select0 = p->f.ref_index[1][4 * mb_xy ];
int field_select1 = p->f.ref_index[1][4 * mb_xy + 2];
assert(field_select0==0 ||field_select0==1);
assert(field_select1==0 ||field_select1==1);
init_interlaced_ref(s, 2);
s->b_field_select_table[1][0][mb_xy]= field_select0;
s->b_field_select_table[1][1][mb_xy]= field_select1;
*(uint32_t*)s->b_field_mv_table[1][0][field_select0][mb_xy] = *(uint32_t*)p->f.motion_val[1][xy ];
*(uint32_t*)s->b_field_mv_table[1][1][field_select1][mb_xy] = *(uint32_t*)p->f.motion_val[1][xy2];
if(USES_LIST(mb_type, 0)){
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BIDIR_I;
}else{
s->mb_type[mb_xy]= CANDIDATE_MB_TYPE_BACKWARD_I;
}
x = p->f.motion_val[1][xy ][0];
y = p->f.motion_val[1][xy ][1];
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select0+2, 0, cmpf, chroma_cmpf, flags);
x = p->f.motion_val[1][xy2][0];
y = p->f.motion_val[1][xy2][1];
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 8, field_select1+2, 1, cmpf, chroma_cmpf, flags);
//FIXME bidir scores
}
c->stride>>=1;
c->uvstride>>=1;
}else if(IS_8X8(mb_type)){
if(!(s->flags & CODEC_FLAG_4MV)){
av_log(c->avctx, AV_LOG_ERROR, "4MV macroblock selected but 4MV encoding disabled\n");
return INT_MAX/2;
}
cmpf= s->dsp.sse[1];
chroma_cmpf= s->dsp.sse[1];
init_mv4_ref(c);
for(i=0; i<4; i++){
xy= s->block_index[i];
x= p->f.motion_val[0][xy][0];
y= p->f.motion_val[0][xy][1];
d+= cmp(s, x>>shift, y>>shift, x&mask, y&mask, 1, 8, i, i, cmpf, chroma_cmpf, flags);
}
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER4V;
}else{
if(USES_LIST(mb_type, 0)){
if(p_type){
*(uint32_t*)s->p_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTER;
}else if(USES_LIST(mb_type, 1)){
*(uint32_t*)s->b_bidir_forw_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
*(uint32_t*)s->b_bidir_back_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[1][xy];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BIDIR;
}else{
*(uint32_t*)s->b_forw_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[0][xy];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_FORWARD;
}
x = p->f.motion_val[0][xy][0];
y = p->f.motion_val[0][xy][1];
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 0, 0, cmpf, chroma_cmpf, flags);
}else if(USES_LIST(mb_type, 1)){
*(uint32_t*)s->b_back_mv_table[mb_xy] = *(uint32_t*)p->f.motion_val[1][xy];
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_BACKWARD;
x = p->f.motion_val[1][xy][0];
y = p->f.motion_val[1][xy][1];
d = cmp(s, x>>shift, y>>shift, x&mask, y&mask, 0, 16, 2, 0, cmpf, chroma_cmpf, flags);
}else
s->mb_type[mb_xy]=CANDIDATE_MB_TYPE_INTRA;
}
return d;
}
static inline int get_penalty_factor(int lambda, int lambda2, int type){
switch(type&0xFF){
default:
@ -1027,21 +881,6 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
pic->mb_var [s->mb_stride * mb_y + mb_x] = (varc+128)>>8;
c->mb_var_sum_temp += (varc+128)>>8;
if(c->avctx->me_threshold){
vard= check_input_motion(s, mb_x, mb_y, 1);
if((vard+128)>>8 < c->avctx->me_threshold){
int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
int i_score= varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*20;
pic->mc_mb_var[s->mb_stride * mb_y + mb_x] = (vard+128)>>8;
c->mc_mb_var_sum_temp += (vard+128)>>8;
c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score);
return;
}
if((vard+128)>>8 < c->avctx->mb_threshold)
mb_type= s->mb_type[mb_x + mb_y*s->mb_stride];
}
switch(s->me_method) {
case ME_ZERO:
default:
@ -1675,57 +1514,6 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
return;
}
if(c->avctx->me_threshold){
int vard= check_input_motion(s, mb_x, mb_y, 0);
if((vard+128)>>8 < c->avctx->me_threshold){
// pix = c->src[0][0];
// sum = s->dsp.pix_sum(pix, s->linesize);
// varc = s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500;
// pic->mb_var [s->mb_stride * mb_y + mb_x] = (varc+128)>>8;
s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] = (vard+128)>>8;
/* pic->mb_mean [s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
c->mb_var_sum_temp += (varc+128)>>8;*/
c->mc_mb_var_sum_temp += (vard+128)>>8;
/* if (vard <= 64<<8 || vard < varc) {
c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
}else{
c->scene_change_score+= s->qscale * s->avctx->scenechange_factor;
}*/
return;
}
if((vard+128)>>8 < c->avctx->mb_threshold){
type= s->mb_type[mb_y*s->mb_stride + mb_x];
if(type == CANDIDATE_MB_TYPE_DIRECT){
direct_search(s, mb_x, mb_y);
}
if(type == CANDIDATE_MB_TYPE_FORWARD || type == CANDIDATE_MB_TYPE_BIDIR){
c->skip=0;
ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, 0, s->f_code);
}
if(type == CANDIDATE_MB_TYPE_BACKWARD || type == CANDIDATE_MB_TYPE_BIDIR){
c->skip=0;
ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, 2, s->b_code);
}
if(type == CANDIDATE_MB_TYPE_FORWARD_I || type == CANDIDATE_MB_TYPE_BIDIR_I){
c->skip=0;
c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV;
interlaced_search(s, 0,
s->b_field_mv_table[0], s->b_field_select_table[0],
s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1], 1);
}
if(type == CANDIDATE_MB_TYPE_BACKWARD_I || type == CANDIDATE_MB_TYPE_BIDIR_I){
c->skip=0;
c->current_mv_penalty= c->mv_penalty[s->b_code] + MAX_MV;
interlaced_search(s, 2,
s->b_field_mv_table[1], s->b_field_select_table[1],
s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1], 1);
}
return;
}
}
if (s->codec_id == AV_CODEC_ID_MPEG4)
dmin= direct_search(s, mb_x, mb_y);
else

View File

@ -191,8 +191,6 @@ void ff_init_qscale_tab(MpegEncContext *s)
static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst,
const AVFrame *src)
{
int i;
dst->pict_type = src->pict_type;
dst->quality = src->quality;
dst->coded_picture_number = src->coded_picture_number;
@ -201,38 +199,6 @@ static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst,
dst->pts = src->pts;
dst->interlaced_frame = src->interlaced_frame;
dst->top_field_first = src->top_field_first;
if (s->avctx->me_threshold) {
if (!src->motion_val[0])
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
if (!src->mb_type)
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
if (!src->ref_index[0])
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
if (src->motion_subsample_log2 != dst->motion_subsample_log2)
av_log(s->avctx, AV_LOG_ERROR,
"AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
src->motion_subsample_log2, dst->motion_subsample_log2);
memcpy(dst->mb_type, src->mb_type,
s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
for (i = 0; i < 2; i++) {
int stride = ((16 * s->mb_width ) >>
src->motion_subsample_log2) + 1;
int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
if (src->motion_val[i] &&
src->motion_val[i] != dst->motion_val[i]) {
memcpy(dst->motion_val[i], src->motion_val[i],
2 * stride * height * sizeof(int16_t));
}
if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
memcpy(dst->ref_index[i], src->ref_index[i],
s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
}
}
}
}
static void update_duplicate_context_after_me(MpegEncContext *dst,
@ -562,11 +528,6 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
}
i = (INT_MAX / 2 + 128) >> 8;
if (avctx->me_threshold >= i) {
av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
i - 1);
return -1;
}
if (avctx->mb_threshold >= i) {
av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
i - 1);
@ -3164,7 +3125,7 @@ static int encode_picture(MpegEncContext *s, int picture_number)
if(s->pict_type != AV_PICTURE_TYPE_I){
s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
if (s->pict_type != AV_PICTURE_TYPE_B) {
if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
}