Merge remote-tracking branch 'qatar/master'

* qatar/master:
  rtpdec_asf: Set the no_resync_search option for the chained asf demuxer
  asfdec: Add an option for not searching for the packet markers
  cosmetics: Clean up the tiffenc pix_fmts declaration to match the style of others
  cosmetics: Align codec declarations
  cosmetics: Convert mimic.c to utf-8
  avconv: remove an unused function parameter.
  avconv: remove now pointless variables.
  avconv: drop support for building without libavfilter.
  nellymoserenc: fix crash due to memsetting the wrong area.
  libavformat: Only require first packet to be known for audio/video streams
  avplay: Don't try to scale timestamps if the tb isn't set

Conflicts:
	Changelog
	configure
	ffmpeg.c
	libavcodec/aacenc.c
	libavcodec/bmpenc.c
	libavcodec/dnxhddec.c
	libavcodec/dnxhdenc.c
	libavcodec/ffv1.c
	libavcodec/flacenc.c
	libavcodec/fraps.c
	libavcodec/huffyuv.c
	libavcodec/libopenjpegdec.c
	libavcodec/mpeg12enc.c
	libavcodec/mpeg4videodec.c
	libavcodec/pamenc.c
	libavcodec/pgssubdec.c
	libavcodec/pngenc.c
	libavcodec/qtrleenc.c
	libavcodec/rawdec.c
	libavcodec/sgienc.c
	libavcodec/tiffenc.c
	libavcodec/v210dec.c
	libavcodec/wmv2dec.c
	libavformat/utils.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2012-04-07 22:41:37 +02:00
commit 6101e5322f
207 changed files with 771 additions and 756 deletions

View File

@ -22,6 +22,7 @@ version next:
- Metal Gear Solid: The Twin Snakes demuxer - Metal Gear Solid: The Twin Snakes demuxer
- OpenEXR image decoder - OpenEXR image decoder
- removelogo filter - removelogo filter
- drop support for ffmpeg without libavfilter
version 0.10: version 0.10:

2
configure vendored
View File

@ -1704,7 +1704,7 @@ ffplay_select="buffersink_filter rdft"
ffprobe_deps="avcodec avformat" ffprobe_deps="avcodec avformat"
ffserver_deps="avformat ffm_muxer fork rtp_protocol rtsp_demuxer" ffserver_deps="avformat ffm_muxer fork rtp_protocol rtsp_demuxer"
ffserver_extralibs='$ldl' ffserver_extralibs='$ldl'
ffmpeg_deps="avcodec avformat swscale swresample" ffmpeg_deps="avcodec avfilter avformat swscale swresample"
ffmpeg_select="buffersink_filter" ffmpeg_select="buffersink_filter"
doc_deps="texi2html" doc_deps="texi2html"

102
ffmpeg.c
View File

@ -55,14 +55,12 @@
#include "libavformat/ffm.h" // not public API #include "libavformat/ffm.h" // not public API
#if CONFIG_AVFILTER
# include "libavfilter/avcodec.h" # include "libavfilter/avcodec.h"
# include "libavfilter/avfilter.h" # include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h" # include "libavfilter/avfiltergraph.h"
# include "libavfilter/buffersink.h" # include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h" # include "libavfilter/buffersrc.h"
# include "libavfilter/vsrc_buffer.h" # include "libavfilter/vsrc_buffer.h"
#endif
#if HAVE_SYS_RESOURCE_H #if HAVE_SYS_RESOURCE_H
#include <sys/types.h> #include <sys/types.h>
@ -245,8 +243,6 @@ typedef struct OutputStream {
/* video only */ /* video only */
int video_resample; int video_resample;
AVFrame resample_frame; /* temporary frame for image resampling */
struct SwsContext *img_resample_ctx; /* for image resampling */
int resample_height; int resample_height;
int resample_width; int resample_width;
int resample_pix_fmt; int resample_pix_fmt;
@ -274,13 +270,11 @@ typedef struct OutputStream {
SwrContext *swr; SwrContext *swr;
#if CONFIG_AVFILTER
AVFilterContext *output_video_filter; AVFilterContext *output_video_filter;
AVFilterContext *input_video_filter; AVFilterContext *input_video_filter;
AVFilterBufferRef *picref; AVFilterBufferRef *picref;
char *avfilter; char *avfilter;
AVFilterGraph *graph; AVFilterGraph *graph;
#endif
int64_t sws_flags; int64_t sws_flags;
AVDictionary *opts; AVDictionary *opts;
@ -405,10 +399,8 @@ typedef struct OptionsContext {
int nb_presets; int nb_presets;
SpecifierOpt *copy_initial_nonkeyframes; SpecifierOpt *copy_initial_nonkeyframes;
int nb_copy_initial_nonkeyframes; int nb_copy_initial_nonkeyframes;
#if CONFIG_AVFILTER
SpecifierOpt *filters; SpecifierOpt *filters;
int nb_filters; int nb_filters;
#endif
} OptionsContext; } OptionsContext;
#define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\ #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
@ -597,8 +589,6 @@ static void filter_release_buffer(AVFilterBuffer *fb)
unref_buffer(buf->ist, buf); unref_buffer(buf->ist, buf);
} }
#if CONFIG_AVFILTER
static int configure_video_filters(InputStream *ist, OutputStream *ost) static int configure_video_filters(InputStream *ist, OutputStream *ost)
{ {
AVFilterContext *last_filter, *filter; AVFilterContext *last_filter, *filter;
@ -693,7 +683,6 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost)
return 0; return 0;
} }
#endif /* CONFIG_AVFILTER */
static void term_exit(void) static void term_exit(void)
{ {
@ -863,9 +852,7 @@ void av_noreturn exit_program(int ret)
av_freep(&async_buf); av_freep(&async_buf);
allocated_async_buf_size = 0; allocated_async_buf_size = 0;
#if CONFIG_AVFILTER
avfilter_uninit(); avfilter_uninit();
#endif
avformat_network_deinit(); avformat_network_deinit();
if (received_sigterm) { if (received_sigterm) {
@ -1439,65 +1426,6 @@ static void do_subtitle_out(AVFormatContext *s,
} }
} }
static void do_video_resample(OutputStream *ost,
InputStream *ist,
AVFrame *in_picture,
AVFrame **out_picture)
{
#if CONFIG_AVFILTER
*out_picture = in_picture;
#else
AVCodecContext *dec = ist->st->codec;
AVCodecContext *enc = ost->st->codec;
int resample_changed = ost->resample_width != in_picture->width ||
ost->resample_height != in_picture->height ||
ost->resample_pix_fmt != in_picture->format;
*out_picture = in_picture;
if (resample_changed) {
av_log(NULL, AV_LOG_INFO,
"Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s / frm size:%dx%d fmt:%s\n",
ist->file_index, ist->st->index,
ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt),
in_picture->width, in_picture->height, av_get_pix_fmt_name(in_picture->format));
ost->resample_width = in_picture->width;
ost->resample_height = in_picture->height;
ost->resample_pix_fmt = in_picture->format;
}
ost->video_resample = in_picture->width != enc->width ||
in_picture->height != enc->height ||
in_picture->format != enc->pix_fmt;
if (ost->video_resample) {
*out_picture = &ost->resample_frame;
if (!ost->img_resample_ctx || resample_changed) {
/* initialize the destination picture */
if (!ost->resample_frame.data[0]) {
avcodec_get_frame_defaults(&ost->resample_frame);
if (avpicture_alloc((AVPicture *)&ost->resample_frame, enc->pix_fmt,
enc->width, enc->height)) {
av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n");
exit_program(1);
}
}
/* initialize a new scaler context */
sws_freeContext(ost->img_resample_ctx);
ost->img_resample_ctx = sws_getContext(in_picture->width, in_picture->height, in_picture->format,
enc->width, enc->height, enc->pix_fmt,
ost->sws_flags, NULL, NULL, NULL);
if (ost->img_resample_ctx == NULL) {
av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
exit_program(1);
}
}
sws_scale(ost->img_resample_ctx, in_picture->data, in_picture->linesize,
0, ost->resample_height, (*out_picture)->data, (*out_picture)->linesize);
}
#endif
}
static double psnr(double d) static double psnr(double d)
{ {
return -10.0 * log(d) / log(10.0); return -10.0 * log(d) / log(10.0);
@ -1545,7 +1473,6 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
InputStream *ist, AVFrame *in_picture) InputStream *ist, AVFrame *in_picture)
{ {
int nb_frames, i, ret, format_video_sync; int nb_frames, i, ret, format_video_sync;
AVFrame *final_picture;
AVCodecContext *enc; AVCodecContext *enc;
double sync_ipts, delta; double sync_ipts, delta;
double duration = 0; double duration = 0;
@ -1607,8 +1534,6 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1); av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
} }
do_video_resample(ost, ist, in_picture, &final_picture);
/* duplicates frame if needed */ /* duplicates frame if needed */
for (i = 0; i < nb_frames; i++) { for (i = 0; i < nb_frames; i++) {
AVPacket pkt; AVPacket pkt;
@ -1623,7 +1548,7 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
method. */ method. */
enc->coded_frame->interlaced_frame = in_picture->interlaced_frame; enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
enc->coded_frame->top_field_first = in_picture->top_field_first; enc->coded_frame->top_field_first = in_picture->top_field_first;
pkt.data = (uint8_t *)final_picture; pkt.data = (uint8_t *)in_picture;
pkt.size = sizeof(AVPicture); pkt.size = sizeof(AVPicture);
pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base); pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY; pkt.flags |= AV_PKT_FLAG_KEY;
@ -1633,7 +1558,7 @@ static void do_video_out(AVFormatContext *s, OutputStream *ost,
int got_packet; int got_packet;
AVFrame big_picture; AVFrame big_picture;
big_picture = *final_picture; big_picture = *in_picture;
/* better than nothing: use input picture interlaced /* better than nothing: use input picture interlaced
settings */ settings */
big_picture.interlaced_frame = in_picture->interlaced_frame; big_picture.interlaced_frame = in_picture->interlaced_frame;
@ -2157,7 +2082,6 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free); pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
#if CONFIG_AVFILTER
frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio"); frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
for(i=0;i<nb_output_streams;i++) { for(i=0;i<nb_output_streams;i++) {
OutputStream *ost = ost = &output_streams[i]; OutputStream *ost = ost = &output_streams[i];
@ -2189,7 +2113,6 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
} }
} }
} }
#endif
rate_emu_sleep(ist); rate_emu_sleep(ist);
@ -2199,7 +2122,6 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
if (!check_output_constraints(ist, ost) || !ost->encoding_needed) if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
continue; continue;
#if CONFIG_AVFILTER
while (av_buffersink_poll_frame(ost->output_video_filter)) { while (av_buffersink_poll_frame(ost->output_video_filter)) {
AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base; AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
AVFrame *filtered_frame; AVFrame *filtered_frame;
@ -2222,9 +2144,6 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int
cont: cont:
avfilter_unref_buffer(ost->picref); avfilter_unref_buffer(ost->picref);
} }
#else
do_video_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
#endif
} }
fail: fail:
@ -2679,12 +2598,10 @@ static int transcode_init(OutputFile *output_files, int nb_output_files,
AV_TIME_BASE_Q, AV_TIME_BASE_Q,
codec->time_base); codec->time_base);
#if CONFIG_AVFILTER
if (configure_video_filters(ist, ost)) { if (configure_video_filters(ist, ost)) {
av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n"); av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
exit_program(1); exit_program(1);
} }
#endif
break; break;
case AVMEDIA_TYPE_SUBTITLE: case AVMEDIA_TYPE_SUBTITLE:
codec->time_base = (AVRational){1, 1000}; codec->time_base = (AVRational){1, 1000};
@ -2924,7 +2841,6 @@ static int transcode(OutputFile *output_files, int nb_output_files,
do_pkt_dump = 1; do_pkt_dump = 1;
av_log_set_level(AV_LOG_DEBUG); av_log_set_level(AV_LOG_DEBUG);
} }
#if CONFIG_AVFILTER
if (key == 'c' || key == 'C'){ if (key == 'c' || key == 'C'){
char buf[4096], target[64], command[256], arg[256] = {0}; char buf[4096], target[64], command[256], arg[256] = {0};
double time; double time;
@ -2957,7 +2873,6 @@ static int transcode(OutputFile *output_files, int nb_output_files,
"only %d given in string '%s'\n", n, buf); "only %d given in string '%s'\n", n, buf);
} }
} }
#endif
if (key == 'd' || key == 'D'){ if (key == 'd' || key == 'D'){
int debug=0; int debug=0;
if(key == 'D') { if(key == 'D') {
@ -3170,9 +3085,7 @@ static int transcode(OutputFile *output_files, int nb_output_files,
av_freep(&ost->st->codec->stats_in); av_freep(&ost->st->codec->stats_in);
avcodec_close(ost->st->codec); avcodec_close(ost->st->codec);
} }
#if CONFIG_AVFILTER
avfilter_graph_free(&ost->graph); avfilter_graph_free(&ost->graph);
#endif
} }
/* close each decoder */ /* close each decoder */
@ -3202,10 +3115,7 @@ static int transcode(OutputFile *output_files, int nb_output_files,
av_fifo_free(ost->fifo); /* works even if fifo is not av_fifo_free(ost->fifo); /* works even if fifo is not
initialized but set to zero */ initialized but set to zero */
av_freep(&ost->st->codec->subtitle_header); av_freep(&ost->st->codec->subtitle_header);
av_free(ost->resample_frame.data[0]);
av_free(ost->forced_kf_pts); av_free(ost->forced_kf_pts);
if (ost->video_resample)
sws_freeContext(ost->img_resample_ctx);
swr_free(&ost->swr); swr_free(&ost->swr);
av_dict_free(&ost->opts); av_dict_free(&ost->opts);
} }
@ -4141,11 +4051,9 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
ost->top_field_first = -1; ost->top_field_first = -1;
MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st); MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
#if CONFIG_AVFILTER
MATCH_PER_STREAM_OPT(filters, str, filters, oc, st); MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
if (filters) if (filters)
ost->avfilter = av_strdup(filters); ost->avfilter = av_strdup(filters);
#endif
} else { } else {
MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st); MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
} }
@ -5080,9 +4988,7 @@ static const OptionDef options[] = {
{ "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" }, { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
{ "qscale", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_qscale}, "use fixed quality scale (VBR)", "q" }, { "qscale", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_qscale}, "use fixed quality scale (VBR)", "q" },
{ "profile", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_profile}, "set profile", "profile" }, { "profile", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_profile}, "set profile", "profile" },
#if CONFIG_AVFILTER
{ "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" }, { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
#endif
{ "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", }, { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
{ "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" }, { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
{ "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" }, { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
@ -5119,9 +5025,7 @@ static const OptionDef options[] = {
{ "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" }, { "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" },
{ "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" }, { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
{ "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" }, { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
#if CONFIG_AVFILTER
{ "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" }, { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
#endif
{ "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" }, { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
{ "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" }, { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
{ "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" }, { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
@ -5196,9 +5100,7 @@ int main(int argc, char **argv)
#if CONFIG_AVDEVICE #if CONFIG_AVDEVICE
avdevice_register_all(); avdevice_register_all();
#endif #endif
#if CONFIG_AVFILTER
avfilter_register_all(); avfilter_register_all();
#endif
av_register_all(); av_register_all();
avformat_network_init(); avformat_network_init();

View File

@ -1844,7 +1844,7 @@ static int video_thread(void *arg)
frame->opaque = picref; frame->opaque = picref;
} }
if (av_cmp_q(tb, is->video_st->time_base)) { if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
av_unused int64_t pts1 = pts_int; av_unused int64_t pts1 = pts_int;
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base); pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
av_dlog(NULL, "video_thread(): " av_dlog(NULL, "video_thread(): "

View File

@ -817,8 +817,10 @@ AVCodec ff_aac_encoder = {
.encode2 = aac_encode_frame, .encode2 = aac_encode_frame,
.close = aac_encode_end, .close = aac_encode_end,
.supported_samplerates = avpriv_mpeg4audio_sample_rates, .supported_samplerates = avpriv_mpeg4audio_sample_rates,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY |
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
.priv_class = &aacenc_class, .priv_class = &aacenc_class,
}; };

View File

@ -154,7 +154,8 @@ AVCodec ff_ac3_fixed_encoder = {
.init = ac3_fixed_encode_init, .init = ac3_fixed_encode_init,
.encode2 = ff_ac3_fixed_encode_frame, .encode2 = ff_ac3_fixed_encode_frame,
.close = ff_ac3_encode_close, .close = ff_ac3_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.priv_class = &ac3enc_class, .priv_class = &ac3enc_class,
.channel_layouts = ff_ac3_channel_layouts, .channel_layouts = ff_ac3_channel_layouts,

View File

@ -152,7 +152,8 @@ AVCodec ff_ac3_encoder = {
.init = ff_ac3_encode_init, .init = ff_ac3_encode_init,
.encode2 = ff_ac3_float_encode_frame, .encode2 = ff_ac3_float_encode_frame,
.close = ff_ac3_encode_close, .close = ff_ac3_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
.priv_class = &ac3enc_class, .priv_class = &ac3enc_class,
.channel_layouts = ff_ac3_channel_layouts, .channel_layouts = ff_ac3_channel_layouts,

View File

@ -728,7 +728,7 @@ AVCodec ff_ ## name_ ## _encoder = { \
.encode2 = adpcm_encode_frame, \ .encode2 = adpcm_encode_frame, \
.close = adpcm_encode_close, \ .close = adpcm_encode_close, \
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, \ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, \
AV_SAMPLE_FMT_NONE}, \ AV_SAMPLE_FMT_NONE }, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
} }

View File

@ -1064,5 +1064,6 @@ AVCodec ff_amrnb_decoder = {
.decode = amrnb_decode_frame, .decode = amrnb_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate NarrowBand"), .long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate NarrowBand"),
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
}; };

View File

@ -1243,5 +1243,6 @@ AVCodec ff_amrwb_decoder = {
.decode = amrwb_decode_frame, .decode = amrwb_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate WideBand"), .long_name = NULL_IF_CONFIG_SMALL("Adaptive Multi-Rate WideBand"),
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
}; };

View File

@ -624,7 +624,7 @@ AVCodec ff_asv1_decoder = {
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("ASUS V1"), .long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
}; };
AVCodec ff_asv2_decoder = { AVCodec ff_asv2_decoder = {
@ -636,7 +636,7 @@ AVCodec ff_asv2_decoder = {
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("ASUS V2"), .long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
}; };
#if CONFIG_ASV1_ENCODER #if CONFIG_ASV1_ENCODER
@ -647,9 +647,8 @@ AVCodec ff_asv1_encoder = {
.priv_data_size = sizeof(ASV1Context), .priv_data_size = sizeof(ASV1Context),
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
//encode_end, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("ASUS V1"),
.long_name= NULL_IF_CONFIG_SMALL("ASUS V1"),
}; };
#endif #endif
@ -661,8 +660,7 @@ AVCodec ff_asv2_encoder = {
.priv_data_size = sizeof(ASV1Context), .priv_data_size = sizeof(ASV1Context),
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
//encode_end, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
.long_name= NULL_IF_CONFIG_SMALL("ASUS V2"),
}; };
#endif #endif

View File

@ -177,6 +177,7 @@ AVCodec ff_bmp_encoder = {
PIX_FMT_RGB565, PIX_FMT_RGB555, PIX_FMT_RGB444, PIX_FMT_RGB565, PIX_FMT_RGB555, PIX_FMT_RGB444,
PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8,
PIX_FMT_MONOBLACK, PIX_FMT_MONOBLACK,
PIX_FMT_NONE}, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("BMP image"), .long_name = NULL_IF_CONFIG_SMALL("BMP image"),
}; };

View File

@ -738,6 +738,6 @@ AVCodec ff_cavs_decoder = {
.close = ff_cavs_end, .close = ff_cavs_end,
.decode = cavs_decode_frame, .decode = cavs_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
.flush= cavs_flush, .flush = cavs_flush,
.long_name= NULL_IF_CONFIG_SMALL("Chinese AVS video (AVS1-P2, JiZhun profile)"), .long_name = NULL_IF_CONFIG_SMALL("Chinese AVS video (AVS1-P2, JiZhun profile)"),
}; };

View File

@ -1008,7 +1008,9 @@ AVCodec ff_dnxhd_encoder = {
.encode2 = dnxhd_encode_picture, .encode2 = dnxhd_encode_picture,
.close = dnxhd_encode_end, .close = dnxhd_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_YUV422P10, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV422P,
PIX_FMT_YUV422P10,
PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"), .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
.priv_class = &class, .priv_class = &class,
.defaults = dnxhd_defaults, .defaults = dnxhd_defaults,

View File

@ -958,7 +958,9 @@ AVCodec ff_dvvideo_encoder = {
.init = dvvideo_init_encoder, .init = dvvideo_init_encoder,
.encode2 = dvvideo_encode_frame, .encode2 = dvvideo_encode_frame,
.capabilities = CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]) {
PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"), .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
}; };
#endif // CONFIG_DVVIDEO_ENCODER #endif // CONFIG_DVVIDEO_ENCODER

View File

@ -254,7 +254,8 @@ AVCodec ff_eac3_encoder = {
.init = ff_ac3_encode_init, .init = ff_ac3_encode_init,
.encode2 = ff_ac3_float_encode_frame, .encode2 = ff_ac3_float_encode_frame,
.close = ff_ac3_encode_close, .close = ff_ac3_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52 E-AC-3"), .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52 E-AC-3"),
.priv_class = &eac3enc_class, .priv_class = &eac3enc_class,
.channel_layouts = ff_ac3_channel_layouts, .channel_layouts = ff_ac3_channel_layouts,

View File

@ -1851,8 +1851,9 @@ AVCodec ff_ffv1_decoder = {
.init = decode_init, .init = decode_init,
.close = common_end, .close = common_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
.long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), CODEC_CAP_SLICE_THREADS,
.long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
}; };
#if CONFIG_FFV1_ENCODER #if CONFIG_FFV1_ENCODER
@ -1866,6 +1867,6 @@ AVCodec ff_ffv1_encoder = {
.close = common_end, .close = common_end,
.capabilities = CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE}, .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
}; };
#endif #endif

View File

@ -689,5 +689,5 @@ AVCodec ff_flac_decoder = {
.close = flac_decode_close, .close = flac_decode_close,
.decode = flac_decode_frame, .decode = flac_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"), .long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
}; };

View File

@ -1327,7 +1327,8 @@ AVCodec ff_flac_encoder = {
.encode2 = flac_encode_frame, .encode2 = flac_encode_frame,
.close = flac_encode_close, .close = flac_encode_close,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_LOSSLESS, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_LOSSLESS,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"), .long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
.priv_class = &flac_encoder_class, .priv_class = &flac_encoder_class,
}; };

View File

@ -462,7 +462,7 @@ AVCodec ff_flashsv_decoder = {
.close = flashsv_decode_end, .close = flashsv_decode_end,
.decode = flashsv_decode_frame, .decode = flashsv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v1"), .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v1"),
}; };
#endif /* CONFIG_FLASHSV_DECODER */ #endif /* CONFIG_FLASHSV_DECODER */
@ -525,7 +525,7 @@ AVCodec ff_flashsv2_decoder = {
.close = flashsv2_decode_end, .close = flashsv2_decode_end,
.decode = flashsv_decode_frame, .decode = flashsv_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v2"), .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v2"),
}; };
#endif /* CONFIG_FLASHSV2_DECODER */ #endif /* CONFIG_FLASHSV2_DECODER */

View File

@ -283,6 +283,6 @@ AVCodec ff_flashsv_encoder = {
.init = flashsv_encode_init, .init = flashsv_encode_init,
.encode2 = flashsv_encode_frame, .encode2 = flashsv_encode_frame,
.close = flashsv_encode_end, .close = flashsv_encode_end,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"), .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"),
}; };

View File

@ -127,7 +127,7 @@ AVCodec ff_flv_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres= 3, .max_lowres = 3,
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), .long_name = NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };

View File

@ -94,7 +94,7 @@ AVCodec ff_flv_encoder = {
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"), .long_name = NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
.priv_class = &flv_class, .priv_class = &flv_class,
}; };

View File

@ -398,5 +398,6 @@ AVCodec ff_adpcm_g722_encoder = {
.encode2 = g722_encode_frame, .encode2 = g722_encode_frame,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME, .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
.long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
}; };

View File

@ -405,7 +405,8 @@ AVCodec ff_adpcm_g726_encoder = {
.close = g726_encode_close, .close = g726_encode_close,
#endif #endif
.capabilities = CODEC_CAP_SMALL_LAST_FRAME, .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"),
.priv_class = &class, .priv_class = &class,
.defaults = defaults, .defaults = defaults,

View File

@ -199,6 +199,9 @@ AVCodec ff_gif_encoder = {
.init = gif_encode_init, .init = gif_encode_init,
.encode2 = gif_encode_frame, .encode2 = gif_encode_frame,
.close = gif_encode_close, .close = gif_encode_close,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
.long_name= NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"), PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE,
PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("GIF (Graphics Interchange Format)"),
}; };

View File

@ -331,7 +331,7 @@ AVCodec ff_h261_encoder = {
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("H.261"), .long_name = NULL_IF_CONFIG_SMALL("H.261"),
.priv_class = &h261_class, .priv_class = &h261_class,
}; };

View File

@ -753,9 +753,10 @@ AVCodec ff_h263_decoder = {
.init = ff_h263_decode_init, .init = ff_h263_decode_init,
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
.flush= ff_mpeg_flush, CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
.max_lowres= 3, .flush = ff_mpeg_flush,
.long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"), .max_lowres = 3,
.pix_fmts= ff_hwaccel_pixfmt_list_420, .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"),
.pix_fmts = ff_hwaccel_pixfmt_list_420,
}; };

View File

@ -1518,7 +1518,8 @@ AVCodec ff_huffyuv_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
CODEC_CAP_FRAME_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
}; };
@ -1533,7 +1534,8 @@ AVCodec ff_ffvhuff_decoder = {
.init = decode_init, .init = decode_init,
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
CODEC_CAP_FRAME_THREADS,
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
.long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
}; };
@ -1548,7 +1550,9 @@ AVCodec ff_huffyuv_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.close = encode_end, .close = encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"), .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
}; };
#endif #endif
@ -1562,7 +1566,9 @@ AVCodec ff_ffvhuff_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.close = encode_end, .close = encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"), .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
}; };
#endif #endif

View File

@ -134,5 +134,5 @@ AVCodec ff_h263i_decoder = {
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Intel H.263"), .long_name = NULL_IF_CONFIG_SMALL("Intel H.263"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };

View File

@ -399,6 +399,9 @@ AVCodec ff_jpegls_encoder = { //FIXME avoid MPV_* lossless JPEG should not need
.priv_data_size = sizeof(JpeglsContext), .priv_data_size = sizeof(JpeglsContext),
.init = encode_init_ls, .init = encode_init_ls,
.encode2 = encode_picture_ls, .encode2 = encode_picture_ls,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, PIX_FMT_GRAY16, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, PIX_FMT_GRAY16,
PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"), .long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"),
}; };

View File

@ -230,7 +230,8 @@ AVCodec ff_libfaac_encoder = {
.encode2 = Faac_encode_frame, .encode2 = Faac_encode_frame,
.close = Faac_encode_close, .close = Faac_encode_close,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("libfaac AAC (Advanced Audio Codec)"), .long_name = NULL_IF_CONFIG_SMALL("libfaac AAC (Advanced Audio Codec)"),
.profiles = NULL_IF_CONFIG_SMALL(profiles), .profiles = NULL_IF_CONFIG_SMALL(profiles),
}; };

View File

@ -124,7 +124,8 @@ AVCodec ff_libgsm_encoder = {
.init = libgsm_encode_init, .init = libgsm_encode_init,
.encode2 = libgsm_encode_frame, .encode2 = libgsm_encode_frame,
.close = libgsm_encode_close, .close = libgsm_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"), .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM"),
}; };
@ -135,7 +136,8 @@ AVCodec ff_libgsm_ms_encoder = {
.init = libgsm_encode_init, .init = libgsm_encode_init,
.encode2 = libgsm_encode_frame, .encode2 = libgsm_encode_frame,
.close = libgsm_encode_close, .close = libgsm_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"), .long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"),
}; };

View File

@ -295,7 +295,8 @@ AVCodec ff_libopencore_amrnb_encoder = {
.encode2 = amr_nb_encode_frame, .encode2 = amr_nb_encode_frame,
.close = amr_nb_encode_close, .close = amr_nb_encode_close,
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"), .long_name = NULL_IF_CONFIG_SMALL("OpenCORE Adaptive Multi-Rate (AMR) Narrow-Band"),
.priv_class = &class, .priv_class = &class,
}; };

View File

@ -370,5 +370,5 @@ AVCodec ff_libopenjpeg_decoder = {
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
.max_lowres = 5, .max_lowres = 5,
.long_name = NULL_IF_CONFIG_SMALL("OpenJPEG JPEG 2000"), .long_name = NULL_IF_CONFIG_SMALL("OpenJPEG JPEG 2000"),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(libopenjpeg_decode_init_thread_copy) .init_thread_copy = ONLY_IF_THREADS_ENABLED(libopenjpeg_decode_init_thread_copy),
}; };

View File

@ -447,6 +447,8 @@ AVCodec ff_libschroedinger_encoder = {
.encode2 = libschroedinger_encode_frame, .encode2 = libschroedinger_encode_frame,
.close = libschroedinger_encode_close, .close = libschroedinger_encode_close,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"), .long_name = NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"),
}; };

View File

@ -330,7 +330,8 @@ AVCodec ff_libspeex_encoder = {
.encode2 = encode_frame, .encode2 = encode_frame,
.close = encode_close, .close = encode_close,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"), .long_name = NULL_IF_CONFIG_SMALL("libspeex Speex"),
.priv_class = &class, .priv_class = &class,
.defaults = defaults, .defaults = defaults,

View File

@ -367,6 +367,8 @@ AVCodec ff_libtheora_encoder = {
.close = encode_close, .close = encode_close,
.encode2 = encode_frame, .encode2 = encode_frame,
.capabilities = CODEC_CAP_DELAY, // needed to get the statsfile summary .capabilities = CODEC_CAP_DELAY, // needed to get the statsfile summary
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("libtheora Theora"), .long_name = NULL_IF_CONFIG_SMALL("libtheora Theora"),
}; };

View File

@ -195,6 +195,7 @@ AVCodec ff_libvo_aacenc_encoder = {
.close = aac_encode_close, .close = aac_encode_close,
.supported_samplerates = avpriv_mpeg4audio_sample_rates, .supported_samplerates = avpriv_mpeg4audio_sample_rates,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Android VisualOn AAC"), .long_name = NULL_IF_CONFIG_SMALL("Android VisualOn AAC"),
}; };

View File

@ -146,8 +146,9 @@ AVCodec ff_libvo_amrwbenc_encoder = {
.init = amr_wb_encode_init, .init = amr_wb_encode_init,
.encode2 = amr_wb_encode_frame, .encode2 = amr_wb_encode_frame,
.close = amr_wb_encode_close, .close = amr_wb_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
.long_name = NULL_IF_CONFIG_SMALL("Android VisualOn Adaptive Multi-Rate " AV_SAMPLE_FMT_NONE },
"(AMR) Wide-Band"), .long_name = NULL_IF_CONFIG_SMALL("Android VisualOn Adaptive "
"Multi-Rate (AMR) Wide-Band"),
.priv_class = &class, .priv_class = &class,
}; };

View File

@ -600,7 +600,7 @@ AVCodec ff_libvpx_encoder = {
.encode2 = vp8_encode, .encode2 = vp8_encode,
.close = vp8_free, .close = vp8_free,
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("libvpx VP8"), .long_name = NULL_IF_CONFIG_SMALL("libvpx VP8"),
.priv_class = &class, .priv_class = &class,
.defaults = defaults, .defaults = defaults,

View File

@ -788,8 +788,8 @@ AVCodec ff_libxvid_encoder = {
.init = xvid_encode_init, .init = xvid_encode_init,
.encode2 = xvid_encode_frame, .encode2 = xvid_encode_frame,
.close = xvid_encode_close, .close = xvid_encode_close,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("libxvidcore MPEG-4 part 2"), .long_name = NULL_IF_CONFIG_SMALL("libxvidcore MPEG-4 part 2"),
}; };
#endif /* CONFIG_LIBXVID_ENCODER */ #endif /* CONFIG_LIBXVID_ENCODER */

View File

@ -275,6 +275,6 @@ AVCodec ff_mdec_decoder = {
.close = decode_end, .close = decode_end,
.decode = decode_frame, .decode = decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("Sony PlayStation MDEC (Motion DECoder)"), .long_name = NULL_IF_CONFIG_SMALL("Sony PlayStation MDEC (Motion DECoder)"),
.init_thread_copy= ONLY_IF_THREADS_ENABLED(decode_init_thread_copy) .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy)
}; };

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2005 Ole André Vadla Ravnås <oleavr@gmail.com> * Copyright (C) 2005 Ole André Vadla Ravnås <oleavr@gmail.com>
* Copyright (C) 2008 Ramiro Polla * Copyright (C) 2008 Ramiro Polla
* *
* This file is part of FFmpeg. * This file is part of FFmpeg.

View File

@ -480,8 +480,10 @@ AVCodec ff_mjpeg_encoder = {
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
.long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"), PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
}; };
AVCodec ff_amv_encoder = { AVCodec ff_amv_encoder = {

View File

@ -2568,7 +2568,9 @@ AVCodec ff_mpeg1video_decoder = {
.init = mpeg_decode_init, .init = mpeg_decode_init,
.close = mpeg_decode_end, .close = mpeg_decode_end,
.decode = mpeg_decode_frame, .decode = mpeg_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
CODEC_CAP_SLICE_THREADS,
.flush = flush, .flush = flush,
.max_lowres = 3, .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
@ -2583,7 +2585,9 @@ AVCodec ff_mpeg2video_decoder = {
.init = mpeg_decode_init, .init = mpeg_decode_init,
.close = mpeg_decode_end, .close = mpeg_decode_end,
.decode = mpeg_decode_frame, .decode = mpeg_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
CODEC_CAP_SLICE_THREADS,
.flush = flush, .flush = flush,
.max_lowres = 3, .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
@ -2631,7 +2635,8 @@ AVCodec ff_mpeg_xvmc_decoder = {
.init = mpeg_mc_decode_init, .init = mpeg_mc_decode_init,
.close = mpeg_decode_end, .close = mpeg_decode_end,
.decode = mpeg_decode_frame, .decode = mpeg_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY,
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video XvMC (X-Video Motion Compensation)"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video XvMC (X-Video Motion Compensation)"),
}; };
@ -2647,7 +2652,8 @@ AVCodec ff_mpeg_vdpau_decoder = {
.init = mpeg_decode_init, .init = mpeg_decode_init,
.close = mpeg_decode_end, .close = mpeg_decode_end,
.decode = mpeg_decode_frame, .decode = mpeg_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED |
CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"),
}; };
@ -2662,7 +2668,8 @@ AVCodec ff_mpeg1_vdpau_decoder = {
.init = mpeg_decode_init, .init = mpeg_decode_init,
.close = mpeg_decode_end, .close = mpeg_decode_end,
.decode = mpeg_decode_frame, .decode = mpeg_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED |
CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
.flush = flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"),
}; };

View File

@ -970,10 +970,11 @@ AVCodec ff_mpeg1video_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1, .supported_framerates = avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P,
.capabilities= CODEC_CAP_DELAY, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"), .capabilities = CODEC_CAP_DELAY,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
.priv_class = &mpeg1_class, .priv_class = &mpeg1_class,
}; };
@ -985,9 +986,11 @@ AVCodec ff_mpeg2video_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1, .supported_framerates = avpriv_frame_rate_tab + 1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE
.long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"), },
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
.priv_class = &mpeg2_class, .priv_class = &mpeg2_class,
}; };

View File

@ -2322,13 +2322,15 @@ AVCodec ff_mpeg4_decoder = {
.init = decode_init, .init = decode_init,
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
.flush= ff_mpeg_flush, CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
.max_lowres= 3, CODEC_CAP_FRAME_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .flush = ff_mpeg_flush,
.pix_fmts= ff_hwaccel_pixfmt_list_420, .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
.pix_fmts = ff_hwaccel_pixfmt_list_420,
.profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles), .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),
.update_thread_context= ONLY_IF_THREADS_ENABLED(ff_mpeg_update_thread_context), .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_mpeg_update_thread_context),
.priv_class = &mpeg4_class, .priv_class = &mpeg4_class,
}; };
@ -2342,9 +2344,11 @@ AVCodec ff_mpeg4_vdpau_decoder = {
.init = decode_init, .init = decode_init,
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"), CODEC_CAP_HWACCEL_VDPAU,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_VDPAU_MPEG4, PIX_FMT_NONE}, .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
.pix_fmts = (const enum PixelFormat[]){ PIX_FMT_VDPAU_MPEG4,
PIX_FMT_NONE },
.priv_class = &mpeg4_vdpau_class, .priv_class = &mpeg4_vdpau_class,
}; };
#endif #endif

View File

@ -1349,8 +1349,8 @@ AVCodec ff_mpeg4_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
.priv_class = &mpeg4enc_class, .priv_class = &mpeg4enc_class,
}; };

View File

@ -787,8 +787,11 @@ AVCodec ff_mp2_encoder = {
.init = MPA_encode_init, .init = MPA_encode_init,
.encode2 = MPA_encode_frame, .encode2 = MPA_encode_frame,
.close = MPA_encode_close, .close = MPA_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
.supported_samplerates= (const int[]){44100, 48000, 32000, 22050, 24000, 16000, 0}, AV_SAMPLE_FMT_NONE },
.supported_samplerates = (const int[]){
44100, 48000, 32000, 22050, 24000, 16000, 0
},
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
.defaults = mp2_defaults, .defaults = mp2_defaults,
}; };

View File

@ -4206,8 +4206,8 @@ AVCodec ff_h263p_encoder = {
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"), .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
.priv_class = &h263p_class, .priv_class = &h263p_class,
}; };
@ -4221,8 +4221,8 @@ AVCodec ff_msmpeg4v2_encoder = {
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
.priv_class = &msmpeg4v2_class, .priv_class = &msmpeg4v2_class,
}; };
@ -4236,8 +4236,8 @@ AVCodec ff_msmpeg4v3_encoder = {
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
.priv_class = &msmpeg4v3_class, .priv_class = &msmpeg4v3_class,
}; };
@ -4251,7 +4251,7 @@ AVCodec ff_wmv1_encoder = {
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
.priv_class = &wmv1_class, .priv_class = &wmv1_class,
}; };

View File

@ -1216,9 +1216,9 @@ AVCodec ff_msmpeg4v1_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres= 3, .max_lowres = 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };
AVCodec ff_msmpeg4v2_decoder = { AVCodec ff_msmpeg4v2_decoder = {
@ -1230,9 +1230,9 @@ AVCodec ff_msmpeg4v2_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres= 3, .max_lowres = 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };
AVCodec ff_msmpeg4v3_decoder = { AVCodec ff_msmpeg4v3_decoder = {
@ -1244,9 +1244,9 @@ AVCodec ff_msmpeg4v3_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres= 3, .max_lowres = 3,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"), .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };
AVCodec ff_wmv1_decoder = { AVCodec ff_wmv1_decoder = {
@ -1258,7 +1258,7 @@ AVCodec ff_wmv1_decoder = {
.close = ff_h263_decode_end, .close = ff_h263_decode_end,
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.max_lowres= 3, .max_lowres = 3,
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };

View File

@ -157,5 +157,5 @@ AVCodec ff_msrle_decoder = {
.close = msrle_decode_end, .close = msrle_decode_end,
.decode = msrle_decode_frame, .decode = msrle_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("Microsoft RLE"), .long_name = NULL_IF_CONFIG_SMALL("Microsoft RLE"),
}; };

View File

@ -343,5 +343,5 @@ AVCodec ff_msvideo1_decoder = {
.close = msvideo1_decode_end, .close = msvideo1_decode_end,
.decode = msvideo1_decode_frame, .decode = msvideo1_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("Microsoft Video 1"), .long_name = NULL_IF_CONFIG_SMALL("Microsoft Video 1"),
}; };

View File

@ -339,5 +339,5 @@ AVCodec ff_mxpeg_decoder = {
.close = mxpeg_decode_end, .close = mxpeg_decode_end,
.decode = mxpeg_decode_frame, .decode = mxpeg_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3 .max_lowres = 3,
}; };

View File

@ -423,5 +423,6 @@ AVCodec ff_nellymoser_encoder = {
.close = encode_end, .close = encode_end,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
.long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"), .long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
}; };

View File

@ -136,6 +136,8 @@ AVCodec ff_pam_encoder = {
.priv_data_size = sizeof(PNMContext), .priv_data_size = sizeof(PNMContext),
.init = ff_pnm_init, .init = ff_pnm_init,
.encode2 = pam_encode_frame, .encode2 = pam_encode_frame,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE, PIX_FMT_GRAY8, PIX_FMT_GRAY8A, PIX_FMT_GRAY16BE, PIX_FMT_MONOBLACK, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE, PIX_FMT_GRAY8, PIX_FMT_GRAY8A, PIX_FMT_GRAY16BE, PIX_FMT_MONOBLACK, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"), .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
}; };

View File

@ -323,7 +323,8 @@ AVCodec ff_pcm_bluray_decoder = {
.init = pcm_bluray_decode_init, .init = pcm_bluray_decode_init,
.decode = pcm_bluray_decode_frame, .decode = pcm_bluray_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, .sample_fmts = (const enum AVSampleFormat[]){
AV_SAMPLE_FMT_NONE}, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 16|20|24-bit big-endian for Blu-ray media"), .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16|20|24-bit big-endian for Blu-ray media"),
}; };

View File

@ -216,8 +216,10 @@ AVCodec ff_pcx_encoder = {
.encode2 = pcx_encode_frame, .encode2 = pcx_encode_frame,
.pix_fmts = (const enum PixelFormat[]){ .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_RGB24, PIX_FMT_RGB24,
PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8, PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE,
PIX_FMT_GRAY8, PIX_FMT_PAL8,
PIX_FMT_MONOBLACK, PIX_FMT_MONOBLACK,
PIX_FMT_NONE}, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"), .long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"),
}; };

View File

@ -443,11 +443,13 @@ AVCodec ff_png_encoder = {
.priv_data_size = sizeof(PNGEncContext), .priv_data_size = sizeof(PNGEncContext),
.init = png_enc_init, .init = png_enc_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_RGB24, PIX_FMT_RGBA,
PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE, PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE,
PIX_FMT_PAL8, PIX_FMT_PAL8,
PIX_FMT_GRAY8, PIX_FMT_GRAY8A, PIX_FMT_GRAY8, PIX_FMT_GRAY8A,
PIX_FMT_GRAY16BE, PIX_FMT_GRAY16BE,
PIX_FMT_MONOBLACK, PIX_FMT_NONE}, PIX_FMT_MONOBLACK, PIX_FMT_NONE
.long_name= NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"), },
.long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
}; };

View File

@ -127,7 +127,9 @@ AVCodec ff_pgm_encoder = {
.priv_data_size = sizeof(PNMContext), .priv_data_size = sizeof(PNMContext),
.init = ff_pnm_init, .init = ff_pnm_init,
.encode2 = pnm_encode_frame, .encode2 = pnm_encode_frame,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"), .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
}; };
#endif #endif
@ -140,7 +142,7 @@ AVCodec ff_pgmyuv_encoder = {
.priv_data_size = sizeof(PNMContext), .priv_data_size = sizeof(PNMContext),
.init = ff_pnm_init, .init = ff_pnm_init,
.encode2 = pnm_encode_frame, .encode2 = pnm_encode_frame,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"), .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
}; };
#endif #endif
@ -153,7 +155,9 @@ AVCodec ff_ppm_encoder = {
.priv_data_size = sizeof(PNMContext), .priv_data_size = sizeof(PNMContext),
.init = ff_pnm_init, .init = ff_pnm_init,
.encode2 = pnm_encode_frame, .encode2 = pnm_encode_frame,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"), .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
}; };
#endif #endif
@ -166,7 +170,8 @@ AVCodec ff_pbm_encoder = {
.priv_data_size = sizeof(PNMContext), .priv_data_size = sizeof(PNMContext),
.init = ff_pnm_init, .init = ff_pnm_init,
.encode2 = pnm_encode_frame, .encode2 = pnm_encode_frame,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_MONOWHITE,
PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"), .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
}; };
#endif #endif

View File

@ -354,6 +354,8 @@ AVCodec ff_qtrle_encoder = {
.init = qtrle_encode_init, .init = qtrle_encode_init,
.encode2 = qtrle_encode_frame, .encode2 = qtrle_encode_frame,
.close = qtrle_encode_end, .close = qtrle_encode_end,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"), .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"),
}; };

View File

@ -532,5 +532,5 @@ AVCodec ff_ralf_decoder = {
.decode = decode_frame, .decode = decode_frame,
.flush = decode_flush, .flush = decode_flush,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio Lossless") .long_name = NULL_IF_CONFIG_SMALL("RealAudio Lossless"),
}; };

View File

@ -257,5 +257,5 @@ AVCodec ff_rawvideo_decoder = {
.close = raw_close_decoder, .close = raw_close_decoder,
.decode = raw_decode, .decode = raw_decode,
.long_name = NULL_IF_CONFIG_SMALL("raw video"), .long_name = NULL_IF_CONFIG_SMALL("raw video"),
.priv_class= &class, .priv_class = &class,
}; };

View File

@ -209,6 +209,7 @@ AVCodec ff_roq_dpcm_encoder = {
.encode2 = roq_dpcm_encode_frame, .encode2 = roq_dpcm_encode_frame,
.close = roq_dpcm_encode_close, .close = roq_dpcm_encode_close,
.capabilities = CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("id RoQ DPCM"), .long_name = NULL_IF_CONFIG_SMALL("id RoQ DPCM"),
}; };

View File

@ -1078,7 +1078,8 @@ AVCodec ff_roq_encoder = {
.init = roq_encode_init, .init = roq_encode_init,
.encode2 = roq_encode_frame, .encode2 = roq_encode_frame,
.close = roq_encode_end, .close = roq_encode_end,
.supported_framerates = (const AVRational[]){{30,1}, {0,0}}, .supported_framerates = (const AVRational[]){ {30,1}, {0,0} },
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV444P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV444P,
PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("id RoQ video"), .long_name = NULL_IF_CONFIG_SMALL("id RoQ video"),
}; };

View File

@ -748,7 +748,7 @@ AVCodec ff_rv10_decoder = {
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.max_lowres = 3, .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"), .long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };
AVCodec ff_rv20_decoder = { AVCodec ff_rv20_decoder = {
@ -760,8 +760,8 @@ AVCodec ff_rv20_decoder = {
.close = rv10_decode_end, .close = rv10_decode_end,
.decode = rv10_decode_frame, .decode = rv10_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
.flush= ff_mpeg_flush, .flush = ff_mpeg_flush,
.max_lowres = 3, .max_lowres = 3,
.long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"), .long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };

View File

@ -66,7 +66,7 @@ AVCodec ff_rv10_encoder = {
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"), .long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
.priv_class = &rv10_class, .priv_class = &rv10_class,
}; };

View File

@ -67,7 +67,7 @@ AVCodec ff_rv20_encoder = {
.init = ff_MPV_encode_init, .init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"), .long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
.priv_class = &rv20_class, .priv_class = &rv20_class,
}; };

View File

@ -279,7 +279,8 @@ AVCodec ff_rv30_decoder = {
.init = rv30_decode_init, .init = rv30_decode_init,
.close = ff_rv34_decode_end, .close = ff_rv34_decode_end,
.decode = ff_rv34_decode_frame, .decode = ff_rv34_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY |
CODEC_CAP_FRAME_THREADS,
.flush = ff_mpeg_flush, .flush = ff_mpeg_flush,
.long_name = NULL_IF_CONFIG_SMALL("RealVideo 3.0"), .long_name = NULL_IF_CONFIG_SMALL("RealVideo 3.0"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,

View File

@ -567,7 +567,8 @@ AVCodec ff_rv40_decoder = {
.init = rv40_decode_init, .init = rv40_decode_init,
.close = ff_rv34_decode_end, .close = ff_rv34_decode_end,
.decode = ff_rv34_decode_frame, .decode = ff_rv34_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY |
CODEC_CAP_FRAME_THREADS,
.flush = ff_mpeg_flush, .flush = ff_mpeg_flush,
.long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"), .long_name = NULL_IF_CONFIG_SMALL("RealVideo 4.0"),
.pix_fmts = ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,

View File

@ -212,10 +212,12 @@ AVCodec ff_sgi_encoder = {
.priv_data_size = sizeof(SgiContext), .priv_data_size = sizeof(SgiContext),
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_RGB24, PIX_FMT_RGBA,
PIX_FMT_RGB48LE, PIX_FMT_RGB48BE, PIX_FMT_RGB48LE, PIX_FMT_RGB48BE,
PIX_FMT_RGBA64LE, PIX_FMT_RGBA64BE, PIX_FMT_RGBA64LE, PIX_FMT_RGBA64BE,
PIX_FMT_GRAY16LE, PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE, PIX_FMT_GRAY16BE,
PIX_FMT_GRAY8, PIX_FMT_NONE}, PIX_FMT_GRAY8, PIX_FMT_NONE
.long_name= NULL_IF_CONFIG_SMALL("SGI image"), },
.long_name = NULL_IF_CONFIG_SMALL("SGI image"),
}; };

View File

@ -638,5 +638,5 @@ AVCodec ff_shorten_decoder = {
.close = shorten_decode_close, .close = shorten_decode_close,
.decode = shorten_decode_frame, .decode = shorten_decode_frame,
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
.long_name= NULL_IF_CONFIG_SMALL("Shorten"), .long_name = NULL_IF_CONFIG_SMALL("Shorten"),
}; };

View File

@ -818,7 +818,7 @@ AVCodec ff_svq1_decoder = {
.close = svq1_decode_end, .close = svq1_decode_end,
.decode = svq1_decode_frame, .decode = svq1_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.flush= ff_mpeg_flush, .flush = ff_mpeg_flush,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV410P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV410P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"), .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
}; };

View File

@ -587,6 +587,6 @@ AVCodec ff_svq1_encoder = {
.init = svq1_encode_init, .init = svq1_encode_init,
.encode2 = svq1_encode_frame, .encode2 = svq1_encode_frame,
.close = svq1_encode_end, .close = svq1_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV410P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV410P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"), .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
}; };

View File

@ -1139,7 +1139,8 @@ AVCodec ff_svq3_decoder = {
.init = svq3_decode_init, .init = svq3_decode_init,
.close = svq3_decode_end, .close = svq3_decode_end,
.decode = svq3_decode_frame, .decode = svq3_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
CODEC_CAP_DELAY,
.long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"), .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUVJ420P, PIX_FMT_NONE },
}; };

View File

@ -166,6 +166,9 @@ AVCodec ff_targa_encoder = {
.priv_data_size = sizeof(TargaContext), .priv_data_size = sizeof(TargaContext),
.init = targa_encode_init, .init = targa_encode_init,
.encode2 = targa_encode_frame, .encode2 = targa_encode_frame,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_BGRA, PIX_FMT_RGB555LE, PIX_FMT_GRAY8, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){
PIX_FMT_BGR24, PIX_FMT_BGRA, PIX_FMT_RGB555LE, PIX_FMT_GRAY8,
PIX_FMT_NONE
},
.long_name= NULL_IF_CONFIG_SMALL("Truevision Targa image"), .long_name= NULL_IF_CONFIG_SMALL("Truevision Targa image"),
}; };

View File

@ -491,13 +491,14 @@ AVCodec ff_tiff_encoder = {
.id = CODEC_ID_TIFF, .id = CODEC_ID_TIFF,
.priv_data_size = sizeof(TiffEncoderContext), .priv_data_size = sizeof(TiffEncoderContext),
.encode2 = encode_frame, .encode2 = encode_frame,
.pix_fmts = .pix_fmts = (const enum PixelFormat[]) {
(const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8, PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8,
PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE, PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE,
PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P,
PIX_FMT_YUV444P, PIX_FMT_YUV410P, PIX_FMT_YUV410P, PIX_FMT_YUV411P, PIX_FMT_RGB48LE,
PIX_FMT_YUV411P, PIX_FMT_RGB48LE, PIX_FMT_RGBA, PIX_FMT_RGBA64LE,
PIX_FMT_RGBA, PIX_FMT_RGBA64LE, PIX_FMT_NONE}, PIX_FMT_NONE
},
.long_name = NULL_IF_CONFIG_SMALL("TIFF image"), .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
.priv_class = &tiffenc_class, .priv_class = &tiffenc_class,
}; };

View File

@ -121,6 +121,6 @@ AVCodec ff_v210_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.close = encode_close, .close = encode_close,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P10, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV422P10, PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"), .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
}; };

View File

@ -5721,7 +5721,7 @@ AVCodec ff_wmv3_vdpau_decoder = {
.decode = vc1_decode_frame, .decode = vc1_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE },
.profiles = NULL_IF_CONFIG_SMALL(profiles) .profiles = NULL_IF_CONFIG_SMALL(profiles)
}; };
#endif #endif
@ -5737,7 +5737,7 @@ AVCodec ff_vc1_vdpau_decoder = {
.decode = vc1_decode_frame, .decode = vc1_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
.long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"), .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_VC1, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_VDPAU_VC1, PIX_FMT_NONE },
.profiles = NULL_IF_CONFIG_SMALL(profiles) .profiles = NULL_IF_CONFIG_SMALL(profiles)
}; };
#endif #endif

View File

@ -1210,7 +1210,8 @@ AVCodec ff_vorbis_encoder = {
.init = vorbis_encode_init, .init = vorbis_encode_init,
.encode2 = vorbis_encode_frame, .encode2 = vorbis_encode_frame,
.close = vorbis_encode_close, .close = vorbis_encode_close,
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Vorbis"), .long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
}; };

View File

@ -2376,7 +2376,8 @@ AVCodec ff_theora_decoder = {
.init = theora_decode_init, .init = theora_decode_init,
.close = vp3_decode_end, .close = vp3_decode_end,
.decode = vp3_decode_frame, .decode = vp3_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
CODEC_CAP_FRAME_THREADS,
.flush = vp3_decode_flush, .flush = vp3_decode_flush,
.long_name = NULL_IF_CONFIG_SMALL("Theora"), .long_name = NULL_IF_CONFIG_SMALL("Theora"),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
@ -2392,9 +2393,10 @@ AVCodec ff_vp3_decoder = {
.init = vp3_decode_init, .init = vp3_decode_init,
.close = vp3_decode_end, .close = vp3_decode_end,
.decode = vp3_decode_frame, .decode = vp3_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
CODEC_CAP_FRAME_THREADS,
.flush = vp3_decode_flush, .flush = vp3_decode_flush,
.long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy),
.update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
}; };

View File

@ -425,7 +425,8 @@ AVCodec ff_wmav1_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = encode_superframe, .encode2 = encode_superframe,
.close = ff_wma_end, .close = ff_wma_end,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"),
}; };
@ -437,6 +438,7 @@ AVCodec ff_wmav2_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = encode_superframe, .encode2 = encode_superframe,
.close = ff_wma_end, .close = ff_wma_end,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"),
}; };

View File

@ -1624,6 +1624,6 @@ AVCodec ff_wmapro_decoder = {
.close = decode_end, .close = decode_end,
.decode = decode_packet, .decode = decode_packet,
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.flush= flush, .flush = flush,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
}; };

View File

@ -480,5 +480,5 @@ AVCodec ff_wmv2_decoder = {
.decode = ff_h263_decode_frame, .decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1, .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
.pix_fmts= ff_pixfmt_list_420, .pix_fmts = ff_pixfmt_list_420,
}; };

View File

@ -219,6 +219,6 @@ AVCodec ff_wmv2_encoder = {
.init = wmv2_encode_init, .init = wmv2_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 8"), .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
}; };

View File

@ -342,6 +342,6 @@ AVCodec ff_zmbv_encoder = {
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.close = encode_end, .close = encode_end,
.pix_fmts = (const enum PixelFormat[]){PIX_FMT_PAL8, PIX_FMT_NONE}, .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_PAL8, PIX_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Zip Motion Blocks Video"), .long_name = NULL_IF_CONFIG_SMALL("Zip Motion Blocks Video"),
}; };

View File

@ -26,6 +26,7 @@
#include "libavutil/avstring.h" #include "libavutil/avstring.h"
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "libavutil/mathematics.h" #include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "avformat.h" #include "avformat.h"
#include "internal.h" #include "internal.h"
#include "avio_internal.h" #include "avio_internal.h"
@ -35,6 +36,7 @@
#include "avlanguage.h" #include "avlanguage.h"
typedef struct { typedef struct {
const AVClass *class;
int asfid2avid[128]; ///< conversion table from asf ID 2 AVStream ID int asfid2avid[128]; ///< conversion table from asf ID 2 AVStream ID
ASFStream streams[128]; ///< it's max number and it's not that big ASFStream streams[128]; ///< it's max number and it's not that big
uint32_t stream_bitrates[128]; ///< max number of streams, bitrate for each (for streaming) uint32_t stream_bitrates[128]; ///< max number of streams, bitrate for each (for streaming)
@ -72,8 +74,22 @@ typedef struct {
int stream_index; int stream_index;
ASFStream* asf_st; ///< currently decoded stream ASFStream* asf_st; ///< currently decoded stream
int no_resync_search;
} ASFContext; } ASFContext;
static const AVOption options[] = {
{"no_resync_search", "Don't try to resynchronize by looking for a certain optional start code", offsetof(ASFContext, no_resync_search), AV_OPT_TYPE_INT, {.dbl = 0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass asf_class = {
.class_name = "asf demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
#undef NDEBUG #undef NDEBUG
#include <assert.h> #include <assert.h>
@ -713,7 +729,9 @@ static int ff_asf_get_packet(AVFormatContext *s, AVIOContext *pb)
// if we do not know packet size, allow skipping up to 32 kB // if we do not know packet size, allow skipping up to 32 kB
off= 32768; off= 32768;
if (s->packet_size > 0) if (asf->no_resync_search)
off = 3;
else if (s->packet_size > 0)
off= (avio_tell(pb) - s->data_offset) % s->packet_size + 3; off= (avio_tell(pb) - s->data_offset) % s->packet_size + 3;
c=d=e=-1; c=d=e=-1;
@ -1302,4 +1320,5 @@ AVInputFormat ff_asf_demuxer = {
.read_seek = asf_read_seek, .read_seek = asf_read_seek,
.read_timestamp = asf_read_pts, .read_timestamp = asf_read_pts,
.flags = AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH, .flags = AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH,
.priv_class = &asf_class,
}; };

View File

@ -99,6 +99,7 @@ int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) { if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) {
AVIOContext pb; AVIOContext pb;
RTSPState *rt = s->priv_data; RTSPState *rt = s->priv_data;
AVDictionary *opts = NULL;
int len = strlen(p) * 6 / 8; int len = strlen(p) * 6 / 8;
char *buf = av_mallocz(len); char *buf = av_mallocz(len);
av_base64_decode(buf, p, len); av_base64_decode(buf, p, len);
@ -113,7 +114,9 @@ int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
if (!(rt->asf_ctx = avformat_alloc_context())) if (!(rt->asf_ctx = avformat_alloc_context()))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
rt->asf_ctx->pb = &pb; rt->asf_ctx->pb = &pb;
ret = avformat_open_input(&rt->asf_ctx, "", &ff_asf_demuxer, NULL); av_dict_set(&opts, "no_resync_search", "1", 0);
ret = avformat_open_input(&rt->asf_ctx, "", &ff_asf_demuxer, &opts);
av_dict_free(&opts);
if (ret < 0) if (ret < 0)
return ret; return ret;
av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0); av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0);

View File

@ -2483,7 +2483,9 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
break; break;
if(st->parser && st->parser->parser->split && !st->codec->extradata) if(st->parser && st->parser->parser->split && !st->codec->extradata)
break; break;
if(st->first_dts == AV_NOPTS_VALUE && (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || st->codec->codec_type == AVMEDIA_TYPE_AUDIO)) if (st->first_dts == AV_NOPTS_VALUE &&
(st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
break; break;
} }
if (i == ic->nb_streams) { if (i == ic->nb_streams) {