Merge remote-tracking branch 'qatar/master'
* qatar/master: rv34: Handle only complete frames in frame-mt. MPV: set reference frame pointers to NULL when allocation of dummy pictures fails configure: die if x11grab dependencies are unavailable zerocodec: factorize loop avconv: fix the resampling safety factors for output audio buffer allocation avconv: move audio output buffer allocation to a separate function avconv: make the async buffer global and free it in exit_program() Conflicts: ffmpeg.c libavcodec/mpegvideo.c libavcodec/rv34.c libavcodec/zerocodec.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
e2d110d8d2
12
configure
vendored
12
configure
vendored
@ -1636,7 +1636,6 @@ v4l2_indev_deps_any="linux_videodev2_h sys_videoio_h"
|
||||
vfwcap_indev_deps="capCreateCaptureWindow vfwcap_defines"
|
||||
vfwcap_indev_extralibs="-lavicap32"
|
||||
x11_grab_device_indev_deps="x11grab XShmCreateImage"
|
||||
x11_grab_device_indev_extralibs="-lX11 -lXext -lXfixes"
|
||||
|
||||
# protocols
|
||||
bluray_protocol_deps="libbluray"
|
||||
@ -3279,13 +3278,10 @@ enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
|
||||
enabled libcdio &&
|
||||
check_lib2 "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open "-lcdio_paranoia -lcdio_cdda -lcdio"
|
||||
|
||||
enabled x11grab &&
|
||||
check_header X11/Xlib.h &&
|
||||
check_header X11/extensions/XShm.h &&
|
||||
check_header X11/extensions/Xfixes.h &&
|
||||
check_func XOpenDisplay -lX11 &&
|
||||
check_func XShmCreateImage -lX11 -lXext &&
|
||||
check_func XFixesGetCursorImage -lX11 -lXext -lXfixes
|
||||
enabled x11grab &&
|
||||
require X11 X11/Xlib.h XOpenDisplay -lX11 &&
|
||||
require Xext X11/extensions/XShm.h XShmCreateImage -lXext &&
|
||||
require Xfixes X11/extensions/Xfixes.h XFixesGetCursorImage -lXfixes
|
||||
|
||||
if ! disabled vaapi; then
|
||||
check_lib va/va.h vaInitialize -lva && {
|
||||
|
74
ffmpeg.c
74
ffmpeg.c
@ -179,8 +179,8 @@ static int debug_ts = 0;
|
||||
|
||||
static uint8_t *audio_buf;
|
||||
static unsigned int allocated_audio_buf_size;
|
||||
|
||||
static uint8_t *input_tmp= NULL;
|
||||
static uint8_t *async_buf;
|
||||
static unsigned int allocated_async_buf_size;
|
||||
|
||||
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
|
||||
|
||||
@ -868,16 +868,16 @@ void av_noreturn exit_program(int ret)
|
||||
av_freep(&output_files);
|
||||
|
||||
uninit_opts();
|
||||
av_free(audio_buf);
|
||||
av_freep(&audio_buf);
|
||||
allocated_audio_buf_size = 0;
|
||||
av_freep(&async_buf);
|
||||
allocated_async_buf_size = 0;
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
avfilter_uninit();
|
||||
#endif
|
||||
avformat_network_deinit();
|
||||
|
||||
av_freep(&input_tmp);
|
||||
|
||||
if (received_sigterm) {
|
||||
av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
|
||||
(int) received_sigterm);
|
||||
@ -1126,11 +1126,38 @@ static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int alloc_audio_output_buf(AVCodecContext *dec, AVCodecContext *enc,
|
||||
int nb_samples)
|
||||
{
|
||||
int64_t audio_buf_samples;
|
||||
int audio_buf_size;
|
||||
|
||||
/* calculate required number of samples to allocate */
|
||||
audio_buf_samples = ((int64_t)nb_samples * enc->sample_rate + dec->sample_rate) /
|
||||
dec->sample_rate;
|
||||
audio_buf_samples = 4 * audio_buf_samples + 10000; // safety factors for resampling
|
||||
audio_buf_samples = FFMAX(audio_buf_samples, enc->frame_size);
|
||||
if (audio_buf_samples > INT_MAX)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
audio_buf_size = av_samples_get_buffer_size(NULL, enc->channels,
|
||||
audio_buf_samples,
|
||||
enc->sample_fmt, 32);
|
||||
if (audio_buf_size < 0)
|
||||
return audio_buf_size;
|
||||
|
||||
av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
|
||||
if (!audio_buf)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void do_audio_out(AVFormatContext *s, OutputStream *ost,
|
||||
InputStream *ist, AVFrame *decoded_frame)
|
||||
{
|
||||
uint8_t *buftmp;
|
||||
int64_t audio_buf_size, size_out;
|
||||
int64_t size_out;
|
||||
|
||||
int frame_bytes, resample_changed;
|
||||
AVCodecContext *enc = ost->st->codec;
|
||||
@ -1140,7 +1167,6 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
|
||||
uint8_t *buf[AV_NUM_DATA_POINTERS];
|
||||
int size = decoded_frame->nb_samples * dec->channels * isize;
|
||||
int planes = av_sample_fmt_is_planar(dec->sample_fmt) ? dec->channels : 1;
|
||||
int64_t allocated_for_size = size;
|
||||
int i;
|
||||
|
||||
av_assert0(planes <= AV_NUM_DATA_POINTERS);
|
||||
@ -1148,21 +1174,8 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
|
||||
for(i=0; i<planes; i++)
|
||||
buf[i]= decoded_frame->data[i];
|
||||
|
||||
need_realloc:
|
||||
audio_buf_size = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels);
|
||||
audio_buf_size = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate;
|
||||
audio_buf_size = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API
|
||||
audio_buf_size = FFMAX(audio_buf_size, enc->frame_size);
|
||||
audio_buf_size *= osize * enc->channels;
|
||||
|
||||
if (audio_buf_size > INT_MAX) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
|
||||
if (!audio_buf) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
|
||||
if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
@ -1251,16 +1264,21 @@ need_realloc:
|
||||
return;
|
||||
ist->is_start = 0;
|
||||
} else {
|
||||
input_tmp = av_realloc(input_tmp, byte_delta + size);
|
||||
av_fast_malloc(&async_buf, &allocated_async_buf_size,
|
||||
byte_delta + size);
|
||||
if (!async_buf) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
if (byte_delta > allocated_for_size - size) {
|
||||
allocated_for_size = byte_delta + (int64_t)size;
|
||||
goto need_realloc;
|
||||
if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples + idelta) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
|
||||
exit_program(1);
|
||||
}
|
||||
ist->is_start = 0;
|
||||
|
||||
for (i=0; i<planes; i++) {
|
||||
uint8_t *t = input_tmp + i*((byte_delta + size)/planes);
|
||||
uint8_t *t = async_buf + i*((byte_delta + size)/planes);
|
||||
generate_silence(t, dec->sample_fmt, byte_delta/planes);
|
||||
memcpy(t + byte_delta/planes, buf[i], size/planes);
|
||||
buf[i] = t;
|
||||
@ -1283,7 +1301,7 @@ need_realloc:
|
||||
|
||||
if (ost->audio_resample || ost->audio_channels_mapped) {
|
||||
buftmp = audio_buf;
|
||||
size_out = swr_convert(ost->swr, ( uint8_t*[]){buftmp}, audio_buf_size / (enc->channels * osize),
|
||||
size_out = swr_convert(ost->swr, ( uint8_t*[]){buftmp}, allocated_audio_buf_size / (enc->channels * osize),
|
||||
buf, size / (dec->channels * isize));
|
||||
if (size_out < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "swr_convert failed\n");
|
||||
|
@ -1237,10 +1237,12 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
i = ff_find_unused_picture(s, 0);
|
||||
if (i < 0)
|
||||
return i;
|
||||
s->last_picture_ptr= &s->picture[i];
|
||||
s->last_picture_ptr = &s->picture[i];
|
||||
s->last_picture_ptr->f.key_frame = 0;
|
||||
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
|
||||
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
|
||||
s->last_picture_ptr = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
|
||||
for(i=0; i<avctx->height; i++)
|
||||
@ -1258,10 +1260,12 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
i = ff_find_unused_picture(s, 0);
|
||||
if (i < 0)
|
||||
return i;
|
||||
s->next_picture_ptr= &s->picture[i];
|
||||
s->next_picture_ptr = &s->picture[i];
|
||||
s->next_picture_ptr->f.key_frame = 0;
|
||||
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
|
||||
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
|
||||
s->next_picture_ptr = NULL;
|
||||
return -1;
|
||||
}
|
||||
ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
|
||||
ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
|
||||
s->next_picture_ptr->f.reference = 3;
|
||||
|
@ -1396,7 +1396,7 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
{
|
||||
MpegEncContext *s = &r->s;
|
||||
GetBitContext *gb = &s->gb;
|
||||
int mb_pos;
|
||||
int mb_pos, slice_type;
|
||||
int res;
|
||||
|
||||
init_get_bits(&r->s.gb, buf, buf_size*8);
|
||||
@ -1406,64 +1406,14 @@ static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
|
||||
if (s->width != r->si.width || s->height != r->si.height) {
|
||||
int err;
|
||||
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
|
||||
r->si.width, r->si.height);
|
||||
ff_MPV_common_end(s);
|
||||
s->width = r->si.width;
|
||||
s->height = r->si.height;
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if ((err = ff_MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
s->pict_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
if(ff_MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
if (!r->tmp_b_block_base) {
|
||||
int i;
|
||||
|
||||
r->tmp_b_block_base = av_malloc(s->linesize * 48);
|
||||
for (i = 0; i < 2; i++)
|
||||
r->tmp_b_block_y[i] = r->tmp_b_block_base + i * 16 * s->linesize;
|
||||
for (i = 0; i < 4; i++)
|
||||
r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
|
||||
+ (i >> 1) * 8 * s->uvlinesize + (i & 1) * 16;
|
||||
}
|
||||
r->cur_pts = r->si.pts;
|
||||
if(s->pict_type != AV_PICTURE_TYPE_B){
|
||||
r->last_pts = r->next_pts;
|
||||
r->next_pts = r->cur_pts;
|
||||
}else{
|
||||
int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
|
||||
int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
|
||||
int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
|
||||
|
||||
if(!refdist){
|
||||
r->weight1 = r->weight2 = 8192;
|
||||
}else{
|
||||
r->weight1 = (dist0 << 14) / refdist;
|
||||
r->weight2 = (dist1 << 14) / refdist;
|
||||
}
|
||||
}
|
||||
s->mb_x = s->mb_y = 0;
|
||||
ff_thread_finish_setup(s->avctx);
|
||||
} else {
|
||||
int slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
|
||||
if (slice_type != s->pict_type) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->width != r->si.width || s->height != r->si.height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
|
||||
if (slice_type != s->pict_type) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->width != r->si.width || s->height != r->si.height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
r->si.end = end;
|
||||
@ -1613,10 +1563,6 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
|
||||
|
||||
memset(&r->si, 0, sizeof(r->si));
|
||||
|
||||
/* necessary since it is it the condition checked for in decode_slice
|
||||
* to call ff_MPV_frame_start. cmp. comment at the end of decode_frame */
|
||||
s->current_picture_ptr = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1626,8 +1572,33 @@ static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
|
||||
else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
|
||||
}
|
||||
|
||||
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
|
||||
{
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
MpegEncContext *s = &r->s;
|
||||
int got_picture = 0;
|
||||
|
||||
ff_er_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*pict = s->current_picture_ptr->f;
|
||||
got_picture = 1;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
*pict = s->last_picture_ptr->f;
|
||||
got_picture = 1;
|
||||
}
|
||||
if (got_picture)
|
||||
ff_print_debug_info(s, pict);
|
||||
|
||||
return got_picture;
|
||||
}
|
||||
|
||||
int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
void *data, int *got_picture_ptr,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
@ -1648,7 +1619,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
*pict = s->next_picture_ptr->f;
|
||||
s->next_picture_ptr = NULL;
|
||||
|
||||
*data_size = sizeof(AVFrame);
|
||||
*got_picture_ptr = 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -1683,6 +1654,70 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
|| avctx->skip_frame >= AVDISCARD_ALL)
|
||||
return avpkt->size;
|
||||
|
||||
/* first slice */
|
||||
if (si.start == 0) {
|
||||
if (s->mb_num_left > 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.",
|
||||
s->mb_num_left);
|
||||
ff_er_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
}
|
||||
|
||||
if (s->width != si.width || s->height != si.height) {
|
||||
int err;
|
||||
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
|
||||
si.width, si.height);
|
||||
ff_MPV_common_end(s);
|
||||
s->width = si.width;
|
||||
s->height = si.height;
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
if ((err = ff_MPV_common_init(s)) < 0)
|
||||
return err;
|
||||
if ((err = rv34_decoder_realloc(r)) < 0)
|
||||
return err;
|
||||
}
|
||||
s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
|
||||
if (ff_MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
if (!r->tmp_b_block_base) {
|
||||
int i;
|
||||
|
||||
r->tmp_b_block_base = av_malloc(s->linesize * 48);
|
||||
for (i = 0; i < 2; i++)
|
||||
r->tmp_b_block_y[i] = r->tmp_b_block_base
|
||||
+ i * 16 * s->linesize;
|
||||
for (i = 0; i < 4; i++)
|
||||
r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
|
||||
+ (i >> 1) * 8 * s->uvlinesize
|
||||
+ (i & 1) * 16;
|
||||
}
|
||||
r->cur_pts = si.pts;
|
||||
if (s->pict_type != AV_PICTURE_TYPE_B) {
|
||||
r->last_pts = r->next_pts;
|
||||
r->next_pts = r->cur_pts;
|
||||
} else {
|
||||
int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
|
||||
int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
|
||||
int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
|
||||
|
||||
if (!refdist) {
|
||||
r->weight1 = r->weight2 = 8192;
|
||||
} else {
|
||||
r->weight1 = (dist0 << 14) / refdist;
|
||||
r->weight2 = (dist1 << 14) / refdist;
|
||||
}
|
||||
}
|
||||
s->mb_x = s->mb_y = 0;
|
||||
ff_thread_finish_setup(s->avctx);
|
||||
} else if (HAVE_THREADS &&
|
||||
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
|
||||
"multithreading mode (start MB is %d).\n", si.start);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for(i = 0; i < slice_count; i++){
|
||||
int offset = get_slice_offset(avctx, slices_hdr, i);
|
||||
int size;
|
||||
@ -1697,6 +1732,8 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
r->si.end = s->mb_width * s->mb_height;
|
||||
s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
|
||||
|
||||
if(i+1 < slice_count){
|
||||
if (get_slice_offset(avctx, slices_hdr, i+1) < 0 ||
|
||||
get_slice_offset(avctx, slices_hdr, i+1) > buf_size) {
|
||||
@ -1717,32 +1754,28 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
|
||||
break;
|
||||
}
|
||||
last = rv34_decode_slice(r, r->si.end, buf + offset, size);
|
||||
s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
|
||||
if(last)
|
||||
break;
|
||||
}
|
||||
|
||||
if(last && s->current_picture_ptr){
|
||||
if(r->loop_filter)
|
||||
r->loop_filter(r, s->mb_height - 1);
|
||||
ff_er_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
if (s->current_picture_ptr) {
|
||||
if (last) {
|
||||
if(r->loop_filter)
|
||||
r->loop_filter(r, s->mb_height - 1);
|
||||
|
||||
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
|
||||
*got_picture_ptr = finish_frame(avctx, pict);
|
||||
} else if (HAVE_THREADS &&
|
||||
(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||
av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
|
||||
/* always mark the current frame as finished, frame-mt supports
|
||||
* only complete frames */
|
||||
ff_er_frame_end(s);
|
||||
ff_MPV_frame_end(s);
|
||||
ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
|
||||
|
||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||
*pict = s->current_picture_ptr->f;
|
||||
} else if (s->last_picture_ptr != NULL) {
|
||||
*pict = s->last_picture_ptr->f;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if(s->last_picture_ptr || s->low_delay){
|
||||
*data_size = sizeof(AVFrame);
|
||||
ff_print_debug_info(s, pict);
|
||||
}
|
||||
s->current_picture_ptr = NULL; //so we can detect if frame_end wasnt called (find some nicer solution...)
|
||||
}
|
||||
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
|
@ -62,27 +62,20 @@ static int zerocodec_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*/
|
||||
|
||||
if (avpkt->flags & AV_PKT_FLAG_KEY) {
|
||||
|
||||
pic->key_frame = 1;
|
||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||
|
||||
} else {
|
||||
|
||||
pic->key_frame = 0;
|
||||
pic->pict_type = AV_PICTURE_TYPE_P;
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < avctx->height; i++) {
|
||||
|
||||
zstream->next_out = dst;
|
||||
zstream->avail_out = avctx->width << 1;
|
||||
|
||||
zret = inflate(zstream, Z_SYNC_FLUSH);
|
||||
|
||||
if (zret != Z_OK && zret != Z_STREAM_END) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Inflate failed with return code: %d\n", zret);
|
||||
"Inflate failed with return code: %d\n", zret);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user