mpegvideo.c: K&R formatting and cosmetics.

Signed-off-by: Ronald S. Bultje <rsbultje@gmail.com>
This commit is contained in:
Konstantin Todorov 2011-12-25 09:58:01 -08:00 committed by Ronald S. Bultje
parent 04a14d4d25
commit c65dfac466

@ -1132,7 +1132,8 @@ int ff_find_unused_picture(MpegEncContext *s, int shared)
return AVERROR_INVALIDDATA;
}
static void update_noise_reduction(MpegEncContext *s){
static void update_noise_reduction(MpegEncContext *s)
{
int intra, i;
for (intra = 0; intra < 2; intra++) {
@ -1144,13 +1145,17 @@ static void update_noise_reduction(MpegEncContext *s){
}
for (i = 0; i < 64; i++) {
s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
s->dct_offset[intra][i] = (s->avctx->noise_reduction *
s->dct_count[intra] +
s->dct_error_sum[intra][i] / 2) /
(s->dct_error_sum[intra][i] + 1);
}
}
}
/**
* generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
* generic function for encode/decode called after coding/decoding
* the header and before a frame is coded/decoded.
*/
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
@ -1158,10 +1163,13 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
Picture *pic;
s->mb_skipped = 0;
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
s->codec_id == CODEC_ID_SVQ3);
/* mark & release old frames */
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
s->last_picture_ptr != s->next_picture_ptr &&
s->last_picture_ptr->f.data[0]) {
if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
if (s->last_picture_ptr->owner2 == s)
free_frame_buffer(s, s->last_picture_ptr);
@ -1170,9 +1178,12 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
/* if (mpeg124/h263) */
if (!s->encoding) {
for (i = 0; i < s->picture_count; i++) {
if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
&s->picture[i] != s->next_picture_ptr &&
s->picture[i].f.reference) {
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
av_log(avctx, AV_LOG_ERROR,
"releasing zombie picture\n");
free_frame_buffer(s, &s->picture[i]);
}
}
@ -1183,12 +1194,13 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
if (!s->encoding) {
ff_release_unused_pictures(s, 1);
if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
else{
if (s->current_picture_ptr &&
s->current_picture_ptr->f.data[0] == NULL) {
// we already have a unused image
// (maybe it was set before reading the header)
pic = s->current_picture_ptr;
} else {
i = ff_find_unused_picture(s, 0);
if (i < 0)
return i;
pic = &s->picture[i];
}
@ -1208,11 +1220,14 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->current_picture_ptr = pic;
// FIXME use only the vars from current_pic
s->current_picture_ptr->f.top_field_first = s->top_field_first;
if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
s->codec_id == CODEC_ID_MPEG2VIDEO) {
if (s->picture_structure != PICT_FRAME)
s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
s->current_picture_ptr->f.top_field_first =
(s->picture_structure == PICT_TOP_FIELD) == s->first_field;
}
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
!s->progressive_sequence;
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
}
@ -1228,53 +1243,64 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
if (!s->dropable)
s->next_picture_ptr = s->current_picture_ptr;
}
/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
s->pict_type, s->dropable); */
if (s->codec_id != CODEC_ID_H264) {
if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
(s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
if ((s->last_picture_ptr == NULL ||
s->last_picture_ptr->f.data[0] == NULL) &&
(s->pict_type != AV_PICTURE_TYPE_I ||
s->picture_structure != PICT_FRAME)) {
if (s->pict_type != AV_PICTURE_TYPE_I)
av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
av_log(avctx, AV_LOG_ERROR,
"warning: first frame is no keyframe\n");
else if (s->picture_structure != PICT_FRAME)
av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
av_log(avctx, AV_LOG_INFO,
"allocate dummy last picture for field based first keyframe\n");
/* Allocate a dummy frame */
i = ff_find_unused_picture(s, 0);
if (i < 0)
return i;
s->last_picture_ptr = &s->picture[i];
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
return -1;
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
INT_MAX, 0);
ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
INT_MAX, 1);
}
if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
if ((s->next_picture_ptr == NULL ||
s->next_picture_ptr->f.data[0] == NULL) &&
s->pict_type == AV_PICTURE_TYPE_B) {
/* Allocate a dummy frame */
i = ff_find_unused_picture(s, 0);
if (i < 0)
return i;
s->next_picture_ptr = &s->picture[i];
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
return -1;
ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
INT_MAX, 0);
ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
INT_MAX, 1);
}
}
if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
if (s->last_picture_ptr)
ff_copy_picture(&s->last_picture, s->last_picture_ptr);
if (s->next_picture_ptr)
ff_copy_picture(&s->next_picture, s->next_picture_ptr);
assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
s->last_picture_ptr->f.data[0]));
if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
int i;
for (i = 0; i < 4; i++) {
if (s->picture_structure == PICT_BOTTOM_FIELD) {
s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
s->current_picture.f.data[i] +=
s->current_picture.f.linesize[i];
}
s->current_picture.f.linesize[i] *= 2;
s->last_picture.f.linesize[i] *= 2;
@ -1284,8 +1310,9 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->err_recognition = avctx->err_recognition;
/* set dequantizer, we can't do it during init as it might change for mpeg4
and we can't do it in the header decode as init is not called for mpeg4 there yet */
/* set dequantizer, we can't do it during init as
* it might change for mpeg4 and we can't do it in the header
* decode as init is not called for mpeg4 there yet */
if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
@ -1299,7 +1326,6 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
if (s->dct_error_sum) {
assert(s->avctx->noise_reduction && s->encoding);
update_noise_reduction(s);
}
@ -1309,7 +1335,8 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
return 0;
}
/* generic function for encode/decode called after a frame has been coded/decoded */
/* generic function for encode/decode called after a
* frame has been coded/decoded. */
void MPV_frame_end(MpegEncContext *s)
{
int i;
@ -1317,24 +1344,27 @@ void MPV_frame_end(MpegEncContext *s)
// just to make sure that all data is rendered.
if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
ff_xvmc_field_end(s);
}else if((s->error_count || s->encoding)
&& !s->avctx->hwaccel
&& !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
&& s->unrestricted_mv
&& s->current_picture.f.reference
&& !s->intra_only
&& !(s->flags&CODEC_FLAG_EMU_EDGE)) {
} else if ((s->error_count || s->encoding) &&
!s->avctx->hwaccel &&
!(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
s->unrestricted_mv &&
s->current_picture.f.reference &&
!s->intra_only &&
!(s->flags & CODEC_FLAG_EMU_EDGE)) {
int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
s->h_edge_pos, s->v_edge_pos,
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
EDGE_WIDTH, EDGE_WIDTH,
EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
EDGE_TOP | EDGE_BOTTOM);
}
emms_c();
@ -1358,7 +1388,8 @@ void MPV_frame_end(MpegEncContext *s)
if (s->encoding) {
/* release non-reference frames */
for (i = 0; i < s->picture_count; i++) {
if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
if (s->picture[i].f.data[0] && !s->picture[i].f.reference
/* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
free_frame_buffer(s, &s->picture[i]);
}
}
@ -1372,7 +1403,8 @@ void MPV_frame_end(MpegEncContext *s)
s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
s->mb_height - 1, 0);
}
}
@ -1383,7 +1415,9 @@ void MPV_frame_end(MpegEncContext *s)
* @param stride stride/linesize of the image
* @param color color of the arrow
*/
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
int w, int h, int stride, int color)
{
int x, y, fr, f;
sx = av_clip(sx, 0, w - 1);
@ -1401,7 +1435,7 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h
buf += sx + sy * stride;
ex -= sx;
f = ((ey - sy) << 16) / ex;
for(x= 0; x <= ex; x++){
for (x = 0; x = ex; x++) {
y = (x * f) >> 16;
fr = (x * f) & 0xFFFF;
buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
@ -1414,9 +1448,11 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h
}
buf += sx + sy * stride;
ey -= sy;
if(ey) f= ((ex-sx)<<16)/ey;
else f= 0;
for(y= 0; y <= ey; y++){
if (ey)
f = ((ex - sx) << 16) / ey;
else
f = 0;
for (y = 0; y = ey; y++) {
x = (y * f) >> 16;
fr = (y * f) & 0xFFFF;
buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
@ -1432,7 +1468,9 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h
* @param stride stride/linesize of the image
* @param color color of the arrow
*/
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
int ey, int w, int h, int stride, int color)
{
int dx,dy;
sx = av_clip(sx, -100, w + 100);
@ -1459,33 +1497,48 @@ static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int
}
/**
* Print debuging info for the given picture.
* Print debugging info for the given picture.
*/
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
{
if (s->avctx->hwaccel || !pict || !pict->mb_type)
return;
if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
int x,y;
av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
switch (pict->pict_type) {
case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
case AV_PICTURE_TYPE_I:
av_log(s->avctx,AV_LOG_DEBUG,"I\n");
break;
case AV_PICTURE_TYPE_P:
av_log(s->avctx,AV_LOG_DEBUG,"P\n");
break;
case AV_PICTURE_TYPE_B:
av_log(s->avctx,AV_LOG_DEBUG,"B\n");
break;
case AV_PICTURE_TYPE_S:
av_log(s->avctx,AV_LOG_DEBUG,"S\n");
break;
case AV_PICTURE_TYPE_SI:
av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
break;
case AV_PICTURE_TYPE_SP:
av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
break;
}
for (y = 0; y < s->mb_height; y++) {
for (x = 0; x < s->mb_width; x++) {
if (s->avctx->debug & FF_DEBUG_SKIP) {
int count = s->mbskip_table[x + y * s->mb_stride];
if(count>9) count=9;
if (count > 9)
count = 9;
av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
}
if (s->avctx->debug & FF_DEBUG_QP) {
av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
av_log(s->avctx, AV_LOG_DEBUG, "%2d",
pict->qscale_table[x + y * s->mb_stride]);
}
if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
int mb_type = pict->mb_type[x + y * s->mb_stride];
@ -1542,7 +1595,7 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
}
if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
s->avctx->debug_mv) {
(s->avctx->debug_mv)) {
const int shift = 1 + s->quarter_sample;
int mb_y;
uint8_t *ptr;
@ -1551,12 +1604,16 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
const int width = s->avctx->width;
const int height = s->avctx->height;
const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
const int mv_stride = (s->mb_width << mv_sample_log2) +
(s->codec_id == CODEC_ID_H264 ? 0 : 1);
s->low_delay = 0; // needed to see the vectors without trashing the buffers
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
&h_chroma_shift, &v_chroma_shift);
for (i = 0; i < 3; i++) {
memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
memcpy(s->visualization_buffer[i], pict->data[i],
(i == 0) ? pict->linesize[i] * height:
pict->linesize[i] * height >> v_chroma_shift);
pict->data[i] = s->visualization_buffer[i];
}
pict->type = FF_BUFFER_TYPE_COPY;
@ -1567,20 +1624,26 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
int mb_x;
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
const int mb_index = mb_x + mb_y * s->mb_stride;
if (s->avctx->debug_mv && pict->motion_val) {
if ((s->avctx->debug_mv) && pict->motion_val) {
int type;
for (type = 0; type < 3; type++) {
int direction = 0;
switch (type) {
case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
case 0:
if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
(pict->pict_type!= AV_PICTURE_TYPE_P))
continue;
direction = 0;
break;
case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
case 1:
if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
(pict->pict_type!= AV_PICTURE_TYPE_B))
continue;
direction = 0;
break;
case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
case 2:
if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
(pict->pict_type!= AV_PICTURE_TYPE_B))
continue;
direction = 1;
break;
@ -1593,10 +1656,12 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
for (i = 0; i < 4; i++) {
int sx = mb_x * 16 + 4 + 8 * (i & 1);
int sy = mb_y * 16 + 4 + 8 * (i >> 1);
int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
int xy = (mb_x * 2 + (i & 1) +
(mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
draw_arrow(ptr, sx, sy, mx, my, width,
height, s->linesize, 100);
}
} else if (IS_16X8(pict->mb_type[mb_index])) {
int i;
@ -1610,7 +1675,8 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
if (IS_INTERLACED(pict->mb_type[mb_index]))
my *= 2;
draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
height, s->linesize, 100);
}
} else if (IS_8X16(pict->mb_type[mb_index])) {
int i;
@ -1618,33 +1684,40 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
int sx = mb_x * 16 + 4 + 8 * i;
int sy = mb_y * 16 + 8;
int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
int mx=(pict->motion_val[direction][xy][0]>>shift);
int my=(pict->motion_val[direction][xy][1]>>shift);
int mx = pict->motion_val[direction][xy][0] >> shift;
int my = pict->motion_val[direction][xy][1] >> shift;
if (IS_INTERLACED(pict->mb_type[mb_index]))
my *= 2;
draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
height, s->linesize, 100);
}
} else {
int sx = mb_x * 16 + 8;
int sy = mb_y * 16 + 8;
int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
int mx = pict->motion_val[direction][xy][0] >> shift + sx;
int my = pict->motion_val[direction][xy][1] >> shift + sy;
draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
}
}
}
if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
0x0101010101010101ULL;
int y;
for (y = 0; y < block_height; y++) {
*(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
*(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
*(uint64_t *)(pict->data[1] + 8 * mb_x +
(block_height * mb_y + y) *
pict->linesize[1]) = c;
*(uint64_t *)(pict->data[2] + 8 * mb_x +
(block_height * mb_y + y) *
pict->linesize[2]) = c;
}
}
if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
pict->motion_val) {
int mb_type = pict->mb_type[mb_index];
uint64_t u,v;
int y;
@ -1656,7 +1729,8 @@ v= (int)(128 + r*sin(theta*3.141592/180));
u = v = 128;
if (IS_PCM(mb_type)) {
COLOR(120, 48)
}else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
} else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
IS_INTRA16x16(mb_type)) {
COLOR(30, 48)
} else if (IS_INTRA4x4(mb_type)) {
COLOR(90, 48)
@ -1682,36 +1756,45 @@ v= (int)(128 + r*sin(theta*3.141592/180));
u *= 0x0101010101010101ULL;
v *= 0x0101010101010101ULL;
for (y = 0; y < block_height; y++) {
*(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
*(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
*(uint64_t *)(pict->data[1] + 8 * mb_x +
(block_height * mb_y + y) * pict->linesize[1]) = u;
*(uint64_t *)(pict->data[2] + 8 * mb_x +
(block_height * mb_y + y) * pict->linesize[2]) = v;
}
// segmentation
if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
*(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
*(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
*(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
(16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
*(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
(16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
}
if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
for (y = 0; y < 16; y++)
pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
pict->linesize[0]] ^= 0x80;
}
if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
int dm = 1 << (mv_sample_log2 - 2);
for (i = 0; i < 4; i++) {
int sx = mb_x * 16 + 8 * (i & 1);
int sy = mb_y * 16 + 8 * (i >> 1);
int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
int xy = (mb_x * 2 + (i & 1) +
(mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
// FIXME bidir
int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
if (mv[0] != mv[dm] ||
mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
for (y = 0; y < 8; y++)
pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
*(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
*(uint64_t *)(pict->data[0] + sx + (sy + 4) *
pict->linesize[0]) ^= 0x8080808080808080ULL;
}
}
if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
if (IS_INTERLACED(mb_type) &&
s->codec_id == CODEC_ID_H264) {
// hmm
}
}
@ -1743,15 +1826,18 @@ static inline int hpel_motion_lowres(MpegEncContext *s,
sx = motion_x & s_mask;
sy = motion_y & s_mask;
src_x += motion_x >> (lowres+1);
src_y += motion_y >> (lowres+1);
src_x += motion_x >> lowres + 1;
src_y += motion_y >> lowres + 1;
src += src_y * stride + src_x;
if( (unsigned)src_x > h_edge_pos - (!!sx) - w
|| (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
if ((unsigned)src_x > h_edge_pos - (!!sx) - w ||
(unsigned)src_y > (v_edge_pos >> field_based) - (!!sy) - h) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
(h + 1) << field_based, src_x,
src_y << field_based,
h_edge_pos,
v_edge_pos);
src = s->edge_emu_buffer;
emu = 1;
}
@ -1766,13 +1852,20 @@ static inline int hpel_motion_lowres(MpegEncContext *s,
/* apply one mpeg motion vector to the three components */
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int field_based, int bottom_field, int field_select,
uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
int motion_x, int motion_y, int h, int mb_y)
uint8_t *dest_y,
uint8_t *dest_cb,
uint8_t *dest_cr,
int field_based,
int bottom_field,
int field_select,
uint8_t **ref_picture,
h264_chroma_mc_func *pix_op,
int motion_x, int motion_y,
int h, int mb_y)
{
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
uvsx, uvsy;
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres, 2);
const int block_s = 8>>lowres;
@ -1782,26 +1875,28 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
linesize = s->current_picture.f.linesize[0] << field_based;
uvlinesize = s->current_picture.f.linesize[1] << field_based;
if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
// FIXME obviously not perfect but qpel will not work in lowres anyway
if (s->quarter_sample) {
motion_x /= 2;
motion_y /= 2;
}
if (field_based) {
motion_y += (bottom_field - field_select)*((1<<lowres)-1);
motion_y += (bottom_field - field_select) * (1 << lowres - 1);
}
sx = motion_x & s_mask;
sy = motion_y & s_mask;
src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
if (s->out_format == FMT_H263) {
uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
uvsrc_x = src_x >> 1;
uvsrc_y = src_y >> 1;
}else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
} else if (s->out_format == FMT_H261) {
// even chroma mv's are full pel in H261
mx = motion_x / 4;
my = motion_y / 4;
uvsx = (2 * mx) & s_mask;
@ -1813,31 +1908,38 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
my = motion_y / 2;
uvsx = mx & s_mask;
uvsy = my & s_mask;
uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
}
ptr_y = ref_picture[0] + src_y * linesize + src_x;
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
|| (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
if ((unsigned) src_x > h_edge_pos - (!!sx) - 2 * block_s ||
(unsigned) src_y > (v_edge_pos >> field_based) - (!!sy) - h) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
s->linesize, 17, 17 + field_based,
src_x, src_y << field_based, h_edge_pos,
v_edge_pos);
ptr_y = s->edge_emu_buffer;
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
9 + field_based,
uvsrc_x, uvsrc_y << field_based,
h_edge_pos >> 1, v_edge_pos >> 1);
s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
9 + field_based,
uvsrc_x, uvsrc_y << field_based,
h_edge_pos >> 1, v_edge_pos >> 1);
ptr_cb = uvbuf;
ptr_cr = uvbuf + 16;
}
}
if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
// FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
if (bottom_field) {
dest_y += s->linesize;
dest_cb += s->uvlinesize;
dest_cr += s->uvlinesize;
@ -1856,8 +1958,10 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
uvsx = (uvsx << 2) >> lowres;
uvsy = (uvsy << 2) >> lowres;
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
uvsx, uvsy);
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
uvsx, uvsy);
}
// FIXME h261 lowres loop filter
}
@ -1866,13 +1970,14 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
uint8_t *dest_cb, uint8_t *dest_cr,
uint8_t **ref_picture,
h264_chroma_mc_func * pix_op,
int mx, int my){
int mx, int my)
{
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres, 2);
const int block_s = 8 >> lowres;
const int s_mask = (2 << lowres) - 1;
const int h_edge_pos = s->h_edge_pos >> (lowres+1);
const int v_edge_pos = s->v_edge_pos >> (lowres+1);
const int h_edge_pos = s->h_edge_pos >> lowres + 1;
const int v_edge_pos = s->v_edge_pos >> lowres + 1;
int emu = 0, src_x, src_y, offset, sx, sy;
uint8_t *ptr;
@ -1888,15 +1993,16 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
sx = mx & s_mask;
sy = my & s_mask;
src_x = s->mb_x*block_s + (mx >> (lowres+1));
src_y = s->mb_y*block_s + (my >> (lowres+1));
src_x = s->mb_x * block_s + (mx >> lowres + 1);
src_y = s->mb_y * block_s + (my >> lowres + 1);
offset = src_y * s->uvlinesize + src_x;
ptr = ref_picture[1] + offset;
if (s->flags & CODEC_FLAG_EMU_EDGE) {
if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
|| (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
if ((unsigned) src_x > h_edge_pos - (!!sx) - block_s ||
(unsigned) src_y > v_edge_pos - (!!sy) - block_s) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
ptr = s->edge_emu_buffer;
emu = 1;
}
@ -1907,7 +2013,8 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
ptr = ref_picture[2] + offset;
if (emu) {
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
src_x, src_y, h_edge_pos, v_edge_pos);
ptr = s->edge_emu_buffer;
}
pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
@ -1925,7 +2032,8 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
* the motion vectors are taken from s->mv and the MV type from s->mv_type
*/
static inline void MPV_motion_lowres(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
uint8_t *dest_y, uint8_t *dest_cb,
uint8_t *dest_cr,
int dir, uint8_t **ref_picture,
h264_chroma_mc_func *pix_op)
{
@ -1942,15 +2050,18 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, 0,
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
s->mv[dir][0][0], s->mv[dir][0][1],
2 * block_s, mb_y);
break;
case MV_TYPE_8X8:
mx = 0;
my = 0;
for (i = 0; i < 4; i++) {
hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
s->linesize) * block_s,
ref_picture[0], 0, 0,
(2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
(2 * mb_x + (i & 1)) * block_s,
(2 * mb_y + (i >> 1)) * block_s,
s->width, s->height, s->linesize,
s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
block_s, block_s, pix_op,
@ -1961,7 +2072,8 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
}
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
pix_op, mx, my);
break;
case MV_TYPE_FIELD:
if (s->picture_structure == PICT_FRAME) {
@ -1969,28 +2081,33 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, 0, s->field_select[dir][0],
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
s->mv[dir][0][0], s->mv[dir][0][1],
block_s, mb_y);
/* bottom field */
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, 1, s->field_select[dir][1],
ref_picture, pix_op,
s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
s->mv[dir][1][0], s->mv[dir][1][1],
block_s, mb_y);
} else {
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
if (s->picture_structure != s->field_select[dir][0] + 1 &&
s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
ref_picture = s->current_picture_ptr->f.data;
}
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->field_select[dir][0],
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
s->mv[dir][0][0],
s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
}
break;
case MV_TYPE_16X8:
for (i = 0; i < 2; i++) {
uint8_t **ref2picture;
if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
if (s->picture_structure == s->field_select[dir][i] + 1 ||
s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
ref2picture = ref_picture;
} else {
ref2picture = s->current_picture_ptr->f.data;
@ -1999,7 +2116,8 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->field_select[dir][i],
ref2picture, pix_op,
s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
s->mv[dir][i][0], s->mv[dir][i][1] +
2 * block_s * i, block_s, mb_y >> 1);
dest_y += 2 * block_s * s->linesize;
dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
@ -2014,7 +2132,9 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, j, j ^ i,
ref_picture, pix_op,
s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
s->mv[dir][2 * i + j][0],
s->mv[dir][2 * i + j][1],
block_s, mb_y);
}
pix_op = s->dsp.avg_h264_chroma_pixels_tab;
}
@ -2023,19 +2143,22 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->picture_structure != i + 1,
ref_picture, pix_op,
s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2 * block_s, mb_y >> 1);
// after put we make avg of the same block
pix_op = s->dsp.avg_h264_chroma_pixels_tab;
//opposite parity is always in the same frame if this is second field
// opposite parity is always in the same
// frame if this is second field
if (!s->first_field) {
ref_picture = s->current_picture_ptr->f.data;
}
}
}
break;
default: assert(0);
default:
assert(0);
}
}