avfilter: various cosmetics
Signed-off-by: Paul B Mahol <onemda@gmail.com>
This commit is contained in:
parent
ba5e77814e
commit
b211607b5c
@ -47,7 +47,7 @@ typedef struct {
|
||||
static const AVOption aconvert_options[] = {
|
||||
{ "sample_fmt", "", OFFSET(format_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ "channel_layout", "", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(aconvert);
|
||||
|
@ -19,10 +19,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "avfilter.h"
|
||||
#include "audio.h"
|
||||
#include "internal.h"
|
||||
@ -52,7 +52,7 @@ static const AVOption aecho_options[] = {
|
||||
{ "out_gain", "set signal output gain", OFFSET(out_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.3}, 0, 1, A },
|
||||
{ "delays", "set list of signal delays", OFFSET(delays), AV_OPT_TYPE_STRING, {.str="1000"}, 0, 0, A },
|
||||
{ "decays", "set list of signal decays", OFFSET(decays), AV_OPT_TYPE_STRING, {.str="0.5"}, 0, 0, A },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(aecho);
|
||||
@ -333,7 +333,7 @@ static const AVFilterPad aecho_inputs[] = {
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad aecho_outputs[] = {
|
||||
@ -343,7 +343,7 @@ static const AVFilterPad aecho_outputs[] = {
|
||||
.config_props = config_output,
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_aecho = {
|
||||
|
@ -72,7 +72,7 @@ static const AVOption afade_options[] = {
|
||||
{ "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
|
||||
{ "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
|
||||
{ "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
|
||||
{NULL},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(afade);
|
||||
@ -281,8 +281,8 @@ static const AVFilterPad avfilter_af_afade_inputs[] = {
|
||||
|
||||
static const AVFilterPad avfilter_af_afade_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -52,7 +52,7 @@ static const AVOption aformat_options[] = {
|
||||
{ "sample_fmts", "A comma-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ "sample_rates", "A comma-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ "channel_layouts", "A comma-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(aformat);
|
||||
@ -142,7 +142,6 @@ AVFilter avfilter_af_aformat = {
|
||||
.query_formats = query_formats,
|
||||
.priv_size = sizeof(AFormatContext),
|
||||
.priv_class = &aformat_class,
|
||||
|
||||
.inputs = avfilter_af_aformat_inputs,
|
||||
.outputs = avfilter_af_aformat_outputs,
|
||||
};
|
||||
|
@ -52,7 +52,7 @@ typedef struct {
|
||||
static const AVOption amerge_options[] = {
|
||||
{ "inputs", "specify the number of inputs", OFFSET(nb_inputs),
|
||||
AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS },
|
||||
{0}
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(amerge);
|
||||
|
@ -187,7 +187,7 @@ static const AVOption amix_options[] = {
|
||||
{ "dropout_transition", "Transition time, in seconds, for volume "
|
||||
"renormalization when an input stream ends.",
|
||||
OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(amix);
|
||||
@ -547,17 +547,14 @@ static const AVFilterPad avfilter_af_amix_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_amix = {
|
||||
.name = "amix",
|
||||
.description = NULL_IF_CONFIG_SMALL("Audio mixing."),
|
||||
.priv_size = sizeof(MixContext),
|
||||
.priv_class = &amix_class,
|
||||
|
||||
.name = "amix",
|
||||
.description = NULL_IF_CONFIG_SMALL("Audio mixing."),
|
||||
.priv_size = sizeof(MixContext),
|
||||
.priv_class = &amix_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = NULL,
|
||||
.outputs = avfilter_af_amix_outputs,
|
||||
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
|
||||
.inputs = NULL,
|
||||
.outputs = avfilter_af_amix_outputs,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
|
||||
};
|
||||
|
@ -29,8 +29,8 @@
|
||||
|
||||
static const AVFilterPad avfilter_af_anull_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -44,9 +44,9 @@ static const AVFilterPad avfilter_af_anull_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_anull = {
|
||||
.name = "anull",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
|
||||
.name = "anull",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
|
||||
.query_formats = ff_query_formats_all,
|
||||
.inputs = avfilter_af_anull_inputs,
|
||||
.outputs = avfilter_af_anull_outputs,
|
||||
.inputs = avfilter_af_anull_inputs,
|
||||
.outputs = avfilter_af_anull_outputs,
|
||||
};
|
||||
|
@ -51,7 +51,7 @@ static const AVOption apad_options[] = {
|
||||
{ "packet_size", "set silence packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, { .i64 = 4096 }, 0, INT_MAX, A },
|
||||
{ "pad_len", "number of samples of silence to add", OFFSET(pad_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A },
|
||||
{ "whole_len", "target number of samples in the audio stream", OFFSET(whole_len), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, A },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(apad);
|
||||
@ -132,7 +132,7 @@ static const AVFilterPad apad_inputs[] = {
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad apad_outputs[] = {
|
||||
@ -141,7 +141,7 @@ static const AVFilterPad apad_outputs[] = {
|
||||
.request_frame = request_frame,
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_apad = {
|
||||
|
@ -71,7 +71,7 @@ static const AVOption aphaser_options[] = {
|
||||
{ "t", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
|
||||
{ "sinusoidal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
|
||||
{ "s", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(aphaser);
|
||||
|
@ -283,7 +283,7 @@ static const AVFilterPad aresample_inputs[] = {
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad aresample_outputs[] = {
|
||||
@ -293,7 +293,7 @@ static const AVFilterPad aresample_outputs[] = {
|
||||
.request_frame = request_frame,
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_aresample = {
|
||||
|
@ -167,11 +167,11 @@ static int request_frame(AVFilterLink *outlink)
|
||||
|
||||
static const AVFilterPad asetnsamples_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad asetnsamples_outputs[] = {
|
||||
@ -185,12 +185,12 @@ static const AVFilterPad asetnsamples_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_asetnsamples = {
|
||||
.name = "asetnsamples",
|
||||
.description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."),
|
||||
.priv_size = sizeof(ASNSContext),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.inputs = asetnsamples_inputs,
|
||||
.outputs = asetnsamples_outputs,
|
||||
.priv_class = &asetnsamples_class,
|
||||
.name = "asetnsamples",
|
||||
.description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."),
|
||||
.priv_size = sizeof(ASNSContext),
|
||||
.priv_class = &asetnsamples_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.inputs = asetnsamples_inputs,
|
||||
.outputs = asetnsamples_outputs,
|
||||
};
|
||||
|
@ -101,11 +101,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
|
||||
|
||||
static const AVFilterPad inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad outputs[] = {
|
||||
@ -113,7 +113,7 @@ static const AVFilterPad outputs[] = {
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_ashowinfo = {
|
||||
|
@ -51,7 +51,7 @@ typedef struct {
|
||||
|
||||
static const AVOption astats_options[] = {
|
||||
{ "length", "set the window length", OFFSET(time_constant), AV_OPT_TYPE_DOUBLE, {.dbl=.05}, .01, 10, FLAGS },
|
||||
{NULL},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(astats);
|
||||
|
@ -56,7 +56,7 @@ static const AVOption asyncts_options[] = {
|
||||
"(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F },
|
||||
{ "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F },
|
||||
{ "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(asyncts);
|
||||
@ -292,9 +292,9 @@ fail:
|
||||
|
||||
static const AVFilterPad avfilter_af_asyncts_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -312,13 +312,10 @@ static const AVFilterPad avfilter_af_asyncts_outputs[] = {
|
||||
AVFilter avfilter_af_asyncts = {
|
||||
.name = "asyncts",
|
||||
.description = NULL_IF_CONFIG_SMALL("Sync audio data to timestamps"),
|
||||
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
||||
.priv_size = sizeof(ASyncContext),
|
||||
.priv_class = &asyncts_class,
|
||||
|
||||
.inputs = avfilter_af_asyncts_inputs,
|
||||
.outputs = avfilter_af_asyncts_outputs,
|
||||
};
|
||||
|
@ -62,8 +62,8 @@
|
||||
* V
|
||||
*/
|
||||
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "audio.h"
|
||||
#include "avfilter.h"
|
||||
#include "internal.h"
|
||||
@ -479,7 +479,7 @@ static const AVOption equalizer_options[] = {
|
||||
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 999, FLAGS},
|
||||
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
|
||||
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter.");
|
||||
@ -497,7 +497,7 @@ static const AVOption bass_options[] = {
|
||||
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
|
||||
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
|
||||
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies.");
|
||||
@ -515,7 +515,7 @@ static const AVOption treble_options[] = {
|
||||
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
|
||||
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
|
||||
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies.");
|
||||
@ -532,7 +532,7 @@ static const AVOption bandpass_options[] = {
|
||||
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
|
||||
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
|
||||
{"csg", "use constant skirt gain", OFFSET(csg), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter.");
|
||||
@ -548,7 +548,7 @@ static const AVOption bandreject_options[] = {
|
||||
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
|
||||
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
|
||||
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 999, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filter.");
|
||||
@ -566,7 +566,7 @@ static const AVOption lowpass_options[] = {
|
||||
{"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
|
||||
{"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
|
||||
{"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency.");
|
||||
@ -584,7 +584,7 @@ static const AVOption highpass_options[] = {
|
||||
{"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
|
||||
{"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
|
||||
{"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequency.");
|
||||
@ -600,7 +600,7 @@ static const AVOption allpass_options[] = {
|
||||
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
|
||||
{"width", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
|
||||
{"w", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(allpass, "Apply a two-pole all-pass filter.");
|
||||
@ -613,7 +613,7 @@ static const AVOption biquad_options[] = {
|
||||
{"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
|
||||
{"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
|
||||
{"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT16_MIN, INT16_MAX, FLAGS},
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
DEFINE_BIQUAD_FILTER(biquad, "Apply a biquad IIR filter with the given coefficients.");
|
||||
|
@ -74,7 +74,7 @@ static const AVOption channelmap_options[] = {
|
||||
OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ "channel_layout", "Output channel layout.",
|
||||
OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(channelmap);
|
||||
@ -404,7 +404,6 @@ AVFilter avfilter_af_channelmap = {
|
||||
.query_formats = channelmap_query_formats,
|
||||
.priv_size = sizeof(ChannelMapContext),
|
||||
.priv_class = &channelmap_class,
|
||||
|
||||
.inputs = avfilter_af_channelmap_inputs,
|
||||
.outputs = avfilter_af_channelmap_outputs,
|
||||
};
|
||||
|
@ -45,7 +45,7 @@ typedef struct ChannelSplitContext {
|
||||
#define F AV_OPT_FLAG_FILTERING_PARAM
|
||||
static const AVOption channelsplit_options[] = {
|
||||
{ "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A|F },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(channelsplit);
|
||||
@ -129,9 +129,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
|
||||
|
||||
static const AVFilterPad avfilter_af_channelsplit_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -141,12 +141,9 @@ AVFilter avfilter_af_channelsplit = {
|
||||
.description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams."),
|
||||
.priv_size = sizeof(ChannelSplitContext),
|
||||
.priv_class = &channelsplit_class,
|
||||
|
||||
.init = init,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_af_channelsplit_inputs,
|
||||
.outputs = NULL,
|
||||
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
.inputs = avfilter_af_channelsplit_inputs,
|
||||
.outputs = NULL,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
};
|
||||
|
@ -72,7 +72,7 @@ static const AVOption compand_options[] = {
|
||||
{ "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, A },
|
||||
{ "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 0, A },
|
||||
{ "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 20, A },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(compand);
|
||||
@ -492,7 +492,7 @@ static const AVFilterPad compand_inputs[] = {
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad compand_outputs[] = {
|
||||
@ -502,7 +502,7 @@ static const AVFilterPad compand_outputs[] = {
|
||||
.config_props = config_output,
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
},
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_compand = {
|
||||
|
@ -1,5 +1,4 @@
|
||||
/*
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
@ -74,7 +73,7 @@ static const AVOption join_options[] = {
|
||||
{ "map", "A comma-separated list of channels maps in the format "
|
||||
"'input_stream.input_channel-output_channel.",
|
||||
OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(join);
|
||||
@ -513,13 +512,10 @@ AVFilter avfilter_af_join = {
|
||||
"multi-channel output."),
|
||||
.priv_size = sizeof(JoinContext),
|
||||
.priv_class = &join_class,
|
||||
|
||||
.init = join_init,
|
||||
.uninit = join_uninit,
|
||||
.query_formats = join_query_formats,
|
||||
|
||||
.inputs = NULL,
|
||||
.outputs = avfilter_af_join_outputs,
|
||||
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
|
||||
.inputs = NULL,
|
||||
.outputs = avfilter_af_join_outputs,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
|
||||
};
|
||||
|
@ -391,7 +391,6 @@ static const AVOption pan_options[] = {
|
||||
|
||||
AVFILTER_DEFINE_CLASS(pan);
|
||||
|
||||
|
||||
static const AVFilterPad pan_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
|
@ -1,5 +1,4 @@
|
||||
/*
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
@ -298,9 +297,9 @@ static const AVClass resample_class = {
|
||||
|
||||
static const AVFilterPad avfilter_af_resample_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -320,11 +319,9 @@ AVFilter avfilter_af_resample = {
|
||||
.description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
|
||||
.priv_size = sizeof(ResampleContext),
|
||||
.priv_class = &resample_class,
|
||||
|
||||
.init_dict = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_af_resample_inputs,
|
||||
.outputs = avfilter_af_resample_outputs,
|
||||
.init_dict = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.inputs = avfilter_af_resample_inputs,
|
||||
.outputs = avfilter_af_resample_outputs,
|
||||
};
|
||||
|
@ -48,7 +48,7 @@ static const AVOption silencedetect_options[] = {
|
||||
{ "noise", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
|
||||
{ "d", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
|
||||
{ "duration", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, {.dbl=2.}, 0, 24*60*60, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(silencedetect);
|
||||
@ -139,9 +139,9 @@ static int query_formats(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad silencedetect_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -51,7 +51,7 @@ static const AVOption volume_options[] = {
|
||||
{ "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" },
|
||||
{ "float", "select 32-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A|F, "precision" },
|
||||
{ "double", "select 64-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A|F, "precision" },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(volume);
|
||||
|
@ -133,9 +133,9 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad volumedetect_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -151,7 +151,6 @@ static const AVFilterPad volumedetect_outputs[] = {
|
||||
AVFilter avfilter_af_volumedetect = {
|
||||
.name = "volumedetect",
|
||||
.description = NULL_IF_CONFIG_SMALL("Detect audio volume."),
|
||||
|
||||
.priv_size = sizeof(VolDetectContext),
|
||||
.query_formats = query_formats,
|
||||
.uninit = uninit,
|
||||
|
@ -76,7 +76,7 @@ static const AVOption aevalsrc_options[]= {
|
||||
{ "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
|
||||
{ "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
|
||||
{ "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
|
||||
{NULL},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(aevalsrc);
|
||||
@ -230,14 +230,13 @@ static const AVFilterPad aevalsrc_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_asrc_aevalsrc = {
|
||||
.name = "aevalsrc",
|
||||
.description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."),
|
||||
|
||||
.name = "aevalsrc",
|
||||
.description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."),
|
||||
.query_formats = query_formats,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(EvalContext),
|
||||
.inputs = NULL,
|
||||
.outputs = aevalsrc_outputs,
|
||||
.priv_class = &aevalsrc_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(EvalContext),
|
||||
.inputs = NULL,
|
||||
.outputs = aevalsrc_outputs,
|
||||
.priv_class = &aevalsrc_class,
|
||||
};
|
||||
|
@ -54,7 +54,7 @@ static const AVOption anullsrc_options[]= {
|
||||
{ "r", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
|
||||
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
|
||||
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(anullsrc);
|
||||
@ -135,15 +135,12 @@ static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_asrc_anullsrc = {
|
||||
.name = "anullsrc",
|
||||
.description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."),
|
||||
|
||||
.init = init,
|
||||
.name = "anullsrc",
|
||||
.description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."),
|
||||
.init = init,
|
||||
.query_formats = query_formats,
|
||||
.priv_size = sizeof(ANullContext),
|
||||
|
||||
.inputs = NULL,
|
||||
|
||||
.outputs = avfilter_asrc_anullsrc_outputs,
|
||||
.priv_class = &anullsrc_class,
|
||||
.priv_size = sizeof(ANullContext),
|
||||
.inputs = NULL,
|
||||
.outputs = avfilter_asrc_anullsrc_outputs,
|
||||
.priv_class = &anullsrc_class,
|
||||
};
|
||||
|
@ -271,13 +271,13 @@ static const AVFilterPad flite_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_asrc_flite = {
|
||||
.name = "flite",
|
||||
.description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."),
|
||||
.name = "flite",
|
||||
.description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."),
|
||||
.query_formats = query_formats,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(FliteContext),
|
||||
.inputs = NULL,
|
||||
.outputs = flite_outputs,
|
||||
.priv_class = &flite_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(FliteContext),
|
||||
.inputs = NULL,
|
||||
.outputs = flite_outputs,
|
||||
.priv_class = &flite_class,
|
||||
};
|
||||
|
@ -71,7 +71,7 @@ static const AVOption sine_options[] = {
|
||||
OPT_DUR("duration", duration, 0, 0, INT64_MAX, "set the audio duration"),
|
||||
OPT_DUR("d", duration, 0, 0, INT64_MAX, "set the audio duration"),
|
||||
OPT_INT("samples_per_frame", samples_per_frame, 1024, 0, INT_MAX, "set the number of samples per frame"),
|
||||
{NULL},
|
||||
{NULL}
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(sine);
|
||||
|
@ -70,7 +70,7 @@ static const AVOption avectorscope_options[] = {
|
||||
{ "gf", "set green fade", OFFSET(fade[1]), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, FLAGS },
|
||||
{ "bf", "set blue fade", OFFSET(fade[2]), AV_OPT_TYPE_INT, {.i64=5}, 0, 255, FLAGS },
|
||||
{ "zoom", "set zoom factor", OFFSET(zoom), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 10, FLAGS },
|
||||
{NULL},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(avectorscope);
|
||||
@ -254,9 +254,9 @@ static const AVFilterPad audiovectorscope_inputs[] = {
|
||||
|
||||
static const AVFilterPad audiovectorscope_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_output,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_output,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -69,7 +69,7 @@ static const AVOption concat_options[] = {
|
||||
{ "unsafe", "enable unsafe mode",
|
||||
OFFSET(unsafe),
|
||||
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, V|A|F},
|
||||
{ 0 }
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(concat);
|
||||
|
@ -79,7 +79,7 @@ static const AVOption showspectrum_options[] = {
|
||||
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, FLAGS, "scale" },
|
||||
{ "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" },
|
||||
{ "saturation", "color saturation multiplier", OFFSET(saturation), AV_OPT_TYPE_FLOAT, {.dbl = 1}, -10, 10, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(showspectrum);
|
||||
@ -491,12 +491,12 @@ static const AVFilterPad showspectrum_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_avf_showspectrum = {
|
||||
.name = "showspectrum",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.priv_size = sizeof(ShowSpectrumContext),
|
||||
.inputs = showspectrum_inputs,
|
||||
.outputs = showspectrum_outputs,
|
||||
.priv_class = &showspectrum_class,
|
||||
.name = "showspectrum",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a spectrum video output."),
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.priv_size = sizeof(ShowSpectrumContext),
|
||||
.inputs = showspectrum_inputs,
|
||||
.outputs = showspectrum_outputs,
|
||||
.priv_class = &showspectrum_class,
|
||||
};
|
||||
|
@ -62,7 +62,7 @@ static const AVOption showwaves_options[] = {
|
||||
{ "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
|
||||
{ "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
|
||||
{ "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(showwaves);
|
||||
@ -245,12 +245,12 @@ static const AVFilterPad showwaves_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_avf_showwaves = {
|
||||
.name = "showwaves",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.priv_size = sizeof(ShowWavesContext),
|
||||
.inputs = showwaves_inputs,
|
||||
.outputs = showwaves_outputs,
|
||||
.priv_class = &showwaves_class,
|
||||
.name = "showwaves",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.priv_size = sizeof(ShowWavesContext),
|
||||
.inputs = showwaves_inputs,
|
||||
.outputs = showwaves_outputs,
|
||||
.priv_class = &showwaves_class,
|
||||
};
|
||||
|
@ -142,7 +142,7 @@ static const AVOption ebur128_options[] = {
|
||||
{ "info", "information logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_INFO}, INT_MIN, INT_MAX, A|V|F, "level" },
|
||||
{ "verbose", "verbose logging level", 0, AV_OPT_TYPE_CONST, {.i64 = AV_LOG_VERBOSE}, INT_MIN, INT_MAX, A|V|F, "level" },
|
||||
{ "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, A|V|F },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(ebur128);
|
||||
@ -777,10 +777,10 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad ebur128_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_audio_input,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_audio_input,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -45,7 +45,7 @@ typedef struct {
|
||||
static const AVOption filt_name##_options[] = { \
|
||||
{ "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
|
||||
{ "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
|
||||
{ NULL }, \
|
||||
{ NULL } \
|
||||
}
|
||||
|
||||
inline static int push_frame(AVFilterContext *ctx)
|
||||
|
@ -468,23 +468,23 @@ static av_cold int aselect_init(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad avfilter_af_aselect_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_aselect = {
|
||||
.name = "aselect",
|
||||
.name = "aselect",
|
||||
.description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
|
||||
.init = aselect_init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(SelectContext),
|
||||
.inputs = avfilter_af_aselect_inputs,
|
||||
.priv_class = &aselect_class,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
.init = aselect_init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(SelectContext),
|
||||
.inputs = avfilter_af_aselect_inputs,
|
||||
.priv_class = &aselect_class,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
};
|
||||
#endif /* CONFIG_ASELECT_FILTER */
|
||||
|
||||
@ -511,25 +511,23 @@ static av_cold int select_init(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad avfilter_vf_select_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_select = {
|
||||
.name = "select",
|
||||
.description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
|
||||
.init = select_init,
|
||||
.uninit = uninit,
|
||||
.name = "select",
|
||||
.description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
|
||||
.init = select_init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.priv_size = sizeof(SelectContext),
|
||||
.priv_class = &select_class,
|
||||
|
||||
.inputs = avfilter_vf_select_inputs,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
.priv_size = sizeof(SelectContext),
|
||||
.priv_class = &select_class,
|
||||
.inputs = avfilter_vf_select_inputs,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
};
|
||||
#endif /* CONFIG_SELECT_FILTER */
|
||||
|
@ -86,7 +86,7 @@ static const AVOption options[] = {
|
||||
{ "c", "set commands", OFFSET(commands_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
|
||||
{ "filename", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
|
||||
{ "f", "set commands file", OFFSET(commands_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
|
||||
{NULL},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
#define SPACES " \f\t\n\r"
|
||||
@ -512,9 +512,9 @@ AVFILTER_DEFINE_CLASS(sendcmd);
|
||||
|
||||
static const AVFilterPad sendcmd_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -528,14 +528,14 @@ static const AVFilterPad sendcmd_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_sendcmd = {
|
||||
.name = "sendcmd",
|
||||
.name = "sendcmd",
|
||||
.description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(SendCmdContext),
|
||||
.inputs = sendcmd_inputs,
|
||||
.outputs = sendcmd_outputs,
|
||||
.priv_class = &sendcmd_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(SendCmdContext),
|
||||
.inputs = sendcmd_inputs,
|
||||
.outputs = sendcmd_outputs,
|
||||
.priv_class = &sendcmd_class,
|
||||
};
|
||||
|
||||
#endif
|
||||
@ -547,9 +547,9 @@ AVFILTER_DEFINE_CLASS(asendcmd);
|
||||
|
||||
static const AVFilterPad asendcmd_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -563,14 +563,14 @@ static const AVFilterPad asendcmd_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_asendcmd = {
|
||||
.name = "asendcmd",
|
||||
.name = "asendcmd",
|
||||
.description = NULL_IF_CONFIG_SMALL("Send commands to filters."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(SendCmdContext),
|
||||
.inputs = asendcmd_inputs,
|
||||
.outputs = asendcmd_outputs,
|
||||
.priv_class = &asendcmd_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(SendCmdContext),
|
||||
.inputs = asendcmd_inputs,
|
||||
.outputs = asendcmd_outputs,
|
||||
.priv_class = &asendcmd_class,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -127,9 +127,9 @@ AVFILTER_DEFINE_CLASS(settb);
|
||||
|
||||
static const AVFilterPad avfilter_vf_settb_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -144,14 +144,12 @@ static const AVFilterPad avfilter_vf_settb_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_settb = {
|
||||
.name = "settb",
|
||||
.name = "settb",
|
||||
.description = NULL_IF_CONFIG_SMALL("Set timebase for the video output link."),
|
||||
|
||||
.priv_size = sizeof(SetTBContext),
|
||||
.priv_class = &settb_class,
|
||||
|
||||
.inputs = avfilter_vf_settb_inputs,
|
||||
.outputs = avfilter_vf_settb_outputs,
|
||||
.priv_size = sizeof(SetTBContext),
|
||||
.priv_class = &settb_class,
|
||||
.inputs = avfilter_vf_settb_inputs,
|
||||
.outputs = avfilter_vf_settb_outputs,
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -162,9 +160,9 @@ AVFILTER_DEFINE_CLASS(asettb);
|
||||
|
||||
static const AVFilterPad avfilter_af_asettb_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -179,12 +177,11 @@ static const AVFilterPad avfilter_af_asettb_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_af_asettb = {
|
||||
.name = "asettb",
|
||||
.name = "asettb",
|
||||
.description = NULL_IF_CONFIG_SMALL("Set timebase for the audio output link."),
|
||||
|
||||
.priv_size = sizeof(SetTBContext),
|
||||
.inputs = avfilter_af_asettb_inputs,
|
||||
.outputs = avfilter_af_asettb_outputs,
|
||||
.priv_class = &asettb_class,
|
||||
.priv_size = sizeof(SetTBContext),
|
||||
.inputs = avfilter_af_asettb_inputs,
|
||||
.outputs = avfilter_af_asettb_outputs,
|
||||
.priv_class = &asettb_class,
|
||||
};
|
||||
#endif
|
||||
|
@ -211,9 +211,9 @@ AVFILTER_DEFINE_CLASS(zmq);
|
||||
|
||||
static const AVFilterPad zmq_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -246,9 +246,9 @@ AVFILTER_DEFINE_CLASS(azmq);
|
||||
|
||||
static const AVFilterPad azmq_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -221,7 +221,7 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
|
||||
static const AVOption options[] = {
|
||||
{ "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
#if CONFIG_SETPTS_FILTER
|
||||
@ -230,10 +230,10 @@ AVFILTER_DEFINE_CLASS(setpts);
|
||||
|
||||
static const AVFilterPad avfilter_vf_setpts_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -267,10 +267,10 @@ AVFILTER_DEFINE_CLASS(asetpts);
|
||||
|
||||
static const AVFilterPad asetpts_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -95,7 +95,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_VIDEO_PARAM
|
||||
static const AVOption options[] = {
|
||||
{ "outputs", "set number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
#define split_options options
|
||||
@ -106,34 +106,30 @@ AVFILTER_DEFINE_CLASS(asplit);
|
||||
|
||||
static const AVFilterPad avfilter_vf_split_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_split = {
|
||||
.name = "split",
|
||||
.name = "split",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pass on the input to N video outputs."),
|
||||
|
||||
.priv_size = sizeof(SplitContext),
|
||||
.priv_class = &split_class,
|
||||
|
||||
.init = split_init,
|
||||
.uninit = split_uninit,
|
||||
|
||||
.inputs = avfilter_vf_split_inputs,
|
||||
.outputs = NULL,
|
||||
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
.priv_size = sizeof(SplitContext),
|
||||
.priv_class = &split_class,
|
||||
.init = split_init,
|
||||
.uninit = split_uninit,
|
||||
.inputs = avfilter_vf_split_inputs,
|
||||
.outputs = NULL,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
};
|
||||
|
||||
static const AVFilterPad avfilter_af_asplit_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -141,15 +137,11 @@ static const AVFilterPad avfilter_af_asplit_inputs[] = {
|
||||
AVFilter avfilter_af_asplit = {
|
||||
.name = "asplit",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pass on the audio input to N audio outputs."),
|
||||
|
||||
.priv_size = sizeof(SplitContext),
|
||||
.priv_class = &asplit_class,
|
||||
|
||||
.init = split_init,
|
||||
.uninit = split_uninit,
|
||||
|
||||
.inputs = avfilter_af_asplit_inputs,
|
||||
.outputs = NULL,
|
||||
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
.priv_size = sizeof(SplitContext),
|
||||
.priv_class = &asplit_class,
|
||||
.init = split_init,
|
||||
.uninit = split_uninit,
|
||||
.inputs = avfilter_af_asplit_inputs,
|
||||
.outputs = NULL,
|
||||
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
|
||||
};
|
||||
|
@ -199,4 +199,3 @@ int avfilter_transform(const uint8_t *src, uint8_t *dst,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ static const AVOption trim_options[] = {
|
||||
{ "end_frame", "Number of the first frame that should be dropped "
|
||||
"again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
|
||||
COMPAT_OPTS
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
#undef FLAGS
|
||||
|
||||
@ -220,9 +220,9 @@ static const AVFilterPad trim_inputs[] = {
|
||||
|
||||
static const AVFilterPad trim_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_output,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_output,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -230,12 +230,9 @@ static const AVFilterPad trim_outputs[] = {
|
||||
AVFilter avfilter_vf_trim = {
|
||||
.name = "trim",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
|
||||
|
||||
.init = init,
|
||||
|
||||
.priv_size = sizeof(TrimContext),
|
||||
.priv_class = &trim_class,
|
||||
|
||||
.inputs = trim_inputs,
|
||||
.outputs = trim_outputs,
|
||||
};
|
||||
@ -360,7 +357,7 @@ static const AVOption atrim_options[] = {
|
||||
{ "end_sample", "Number of the first audio sample that should be "
|
||||
"dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
|
||||
COMPAT_OPTS
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
#undef FLAGS
|
||||
|
||||
@ -378,9 +375,9 @@ static const AVFilterPad atrim_inputs[] = {
|
||||
|
||||
static const AVFilterPad atrim_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.config_props = config_output,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_AUDIO,
|
||||
.config_props = config_output,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -388,12 +385,9 @@ static const AVFilterPad atrim_outputs[] = {
|
||||
AVFilter avfilter_af_atrim = {
|
||||
.name = "atrim",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
|
||||
|
||||
.init = init,
|
||||
|
||||
.priv_size = sizeof(TrimContext),
|
||||
.priv_class = &atrim_class,
|
||||
|
||||
.inputs = atrim_inputs,
|
||||
.outputs = atrim_outputs,
|
||||
};
|
||||
|
@ -140,10 +140,10 @@ AVFILTER_DEFINE_CLASS(setdar);
|
||||
|
||||
static const AVFilterPad avfilter_vf_setdar_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = setdar_config_props,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = setdar_config_props,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -157,15 +157,13 @@ static const AVFilterPad avfilter_vf_setdar_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_setdar = {
|
||||
.name = "setdar",
|
||||
.name = "setdar",
|
||||
.description = NULL_IF_CONFIG_SMALL("Set the frame display aspect ratio."),
|
||||
.init = init,
|
||||
.priv_size = sizeof(AspectContext),
|
||||
.priv_class = &setdar_class,
|
||||
|
||||
.inputs = avfilter_vf_setdar_inputs,
|
||||
|
||||
.outputs = avfilter_vf_setdar_outputs,
|
||||
.init = init,
|
||||
.priv_size = sizeof(AspectContext),
|
||||
.priv_class = &setdar_class,
|
||||
.inputs = avfilter_vf_setdar_inputs,
|
||||
.outputs = avfilter_vf_setdar_outputs,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_SETDAR_FILTER */
|
||||
@ -204,10 +202,10 @@ AVFILTER_DEFINE_CLASS(setsar);
|
||||
|
||||
static const AVFilterPad avfilter_vf_setsar_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = setsar_config_props,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = setsar_config_props,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -221,15 +219,13 @@ static const AVFilterPad avfilter_vf_setsar_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_setsar = {
|
||||
.name = "setsar",
|
||||
.name = "setsar",
|
||||
.description = NULL_IF_CONFIG_SMALL("Set the pixel sample aspect ratio."),
|
||||
.init = init,
|
||||
.priv_size = sizeof(AspectContext),
|
||||
.priv_class = &setsar_class,
|
||||
|
||||
.inputs = avfilter_vf_setsar_inputs,
|
||||
|
||||
.outputs = avfilter_vf_setsar_outputs,
|
||||
.init = init,
|
||||
.priv_size = sizeof(AspectContext),
|
||||
.priv_class = &setsar_class,
|
||||
.inputs = avfilter_vf_setsar_inputs,
|
||||
.outputs = avfilter_vf_setsar_outputs,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_SETSAR_FILTER */
|
||||
|
@ -107,9 +107,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
|
||||
static const AVFilterPad bbox_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -56,7 +56,7 @@ static const AVOption blackdetect_options[] = {
|
||||
{ "pic_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
|
||||
{ "pixel_black_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
|
||||
{ "pix_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(blackdetect);
|
||||
@ -176,10 +176,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
|
||||
|
||||
static const AVFilterPad blackdetect_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -98,16 +98,16 @@ static const AVOption blackframe_options[] = {
|
||||
OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
|
||||
{ "thresh", "threshold below which a pixel value is considered black",
|
||||
OFFSET(bthresh), AV_OPT_TYPE_INT, { .i64 = 32 }, 0, 255, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(blackframe);
|
||||
|
||||
static const AVFilterPad avfilter_vf_blackframe_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -121,15 +121,11 @@ static const AVFilterPad avfilter_vf_blackframe_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_blackframe = {
|
||||
.name = "blackframe",
|
||||
.description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
|
||||
|
||||
.priv_size = sizeof(BlackFrameContext),
|
||||
.priv_class = &blackframe_class,
|
||||
|
||||
.name = "blackframe",
|
||||
.description = NULL_IF_CONFIG_SMALL("Detect frames that are (almost) black."),
|
||||
.priv_size = sizeof(BlackFrameContext),
|
||||
.priv_class = &blackframe_class,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_blackframe_inputs,
|
||||
|
||||
.outputs = avfilter_vf_blackframe_outputs,
|
||||
.inputs = avfilter_vf_blackframe_inputs,
|
||||
.outputs = avfilter_vf_blackframe_outputs,
|
||||
};
|
||||
|
@ -142,7 +142,7 @@ static const AVOption blend_options[] = {
|
||||
{ "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
|
||||
{ "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
|
||||
{ "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(blend);
|
||||
@ -432,13 +432,13 @@ static int filter_frame_bottom(AVFilterLink *inlink, AVFrame *buf)
|
||||
|
||||
static const AVFilterPad blend_inputs[] = {
|
||||
{
|
||||
.name = "top",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame_top,
|
||||
.name = "top",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame_top,
|
||||
},{
|
||||
.name = "bottom",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame_bottom,
|
||||
.name = "bottom",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame_bottom,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -380,8 +380,7 @@ AVFilter avfilter_vf_boxblur = {
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_boxblur_inputs,
|
||||
.outputs = avfilter_vf_boxblur_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
.inputs = avfilter_vf_boxblur_inputs,
|
||||
.outputs = avfilter_vf_boxblur_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
};
|
||||
|
@ -359,10 +359,10 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
|
||||
|
||||
static const AVFilterPad colormatrix_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -378,7 +378,6 @@ static const AVFilterPad colormatrix_outputs[] = {
|
||||
AVFilter avfilter_vf_colormatrix = {
|
||||
.name = "colormatrix",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert color matrix."),
|
||||
|
||||
.priv_size = sizeof(ColorMatrixContext),
|
||||
.init = init,
|
||||
.query_formats = query_formats,
|
||||
|
@ -46,9 +46,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
|
||||
static const AVFilterPad avfilter_vf_copy_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -62,9 +62,8 @@ static const AVFilterPad avfilter_vf_copy_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_copy = {
|
||||
.name = "copy",
|
||||
.name = "copy",
|
||||
.description = NULL_IF_CONFIG_SMALL("Copy the input video unchanged to the output."),
|
||||
|
||||
.inputs = avfilter_vf_copy_inputs,
|
||||
.outputs = avfilter_vf_copy_outputs,
|
||||
.inputs = avfilter_vf_copy_inputs,
|
||||
.outputs = avfilter_vf_copy_outputs,
|
||||
};
|
||||
|
@ -308,17 +308,17 @@ static const AVOption crop_options[] = {
|
||||
{ "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
|
||||
{ "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS },
|
||||
{ "keep_aspect", "keep aspect ratio", OFFSET(keep_aspect), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
|
||||
{NULL}
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(crop);
|
||||
|
||||
static const AVFilterPad avfilter_vf_crop_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -333,15 +333,12 @@ static const AVFilterPad avfilter_vf_crop_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_crop = {
|
||||
.name = "crop",
|
||||
.description = NULL_IF_CONFIG_SMALL("Crop the input video to width:height:x:y."),
|
||||
|
||||
.priv_size = sizeof(CropContext),
|
||||
.priv_class = &crop_class,
|
||||
|
||||
.name = "crop",
|
||||
.description = NULL_IF_CONFIG_SMALL("Crop the input video to width:height:x:y."),
|
||||
.priv_size = sizeof(CropContext),
|
||||
.priv_class = &crop_class,
|
||||
.query_formats = query_formats,
|
||||
.uninit = uninit,
|
||||
|
||||
.inputs = avfilter_vf_crop_inputs,
|
||||
.outputs = avfilter_vf_crop_outputs,
|
||||
.inputs = avfilter_vf_crop_inputs,
|
||||
.outputs = avfilter_vf_crop_outputs,
|
||||
};
|
||||
|
@ -216,17 +216,17 @@ static const AVOption cropdetect_options[] = {
|
||||
{ "round", "Value by which the width/height should be divisible", OFFSET(round), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, INT_MAX, FLAGS },
|
||||
{ "reset", "Recalculate the crop area after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
|
||||
{ "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(cropdetect);
|
||||
|
||||
static const AVFilterPad avfilter_vf_cropdetect_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -240,14 +240,13 @@ static const AVFilterPad avfilter_vf_cropdetect_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_cropdetect = {
|
||||
.name = "cropdetect",
|
||||
.description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
|
||||
|
||||
.priv_size = sizeof(CropDetectContext),
|
||||
.priv_class = &cropdetect_class,
|
||||
.init = init,
|
||||
.name = "cropdetect",
|
||||
.description = NULL_IF_CONFIG_SMALL("Auto-detect crop size."),
|
||||
.priv_size = sizeof(CropDetectContext),
|
||||
.priv_class = &cropdetect_class,
|
||||
.init = init,
|
||||
.query_formats = query_formats,
|
||||
.inputs = avfilter_vf_cropdetect_inputs,
|
||||
.outputs = avfilter_vf_cropdetect_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
.inputs = avfilter_vf_cropdetect_inputs,
|
||||
.outputs = avfilter_vf_cropdetect_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
};
|
||||
|
@ -164,7 +164,7 @@ static const AVOption delogo_options[]= {
|
||||
{ "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS },
|
||||
{ "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS },
|
||||
{ "show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(delogo);
|
||||
@ -262,9 +262,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
|
||||
static const AVFilterPad avfilter_vf_delogo_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -284,8 +284,7 @@ AVFilter avfilter_vf_delogo = {
|
||||
.priv_class = &delogo_class,
|
||||
.init = init,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_delogo_inputs,
|
||||
.outputs = avfilter_vf_delogo_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
.inputs = avfilter_vf_delogo_inputs,
|
||||
.outputs = avfilter_vf_delogo_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
};
|
||||
|
@ -254,11 +254,11 @@ AVFILTER_DEFINE_CLASS(drawbox);
|
||||
|
||||
static const AVFilterPad drawbox_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.needs_writable = 1,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.needs_writable = 1,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -272,16 +272,15 @@ static const AVFilterPad drawbox_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_drawbox = {
|
||||
.name = "drawbox",
|
||||
.description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
|
||||
.priv_size = sizeof(DrawBoxContext),
|
||||
.priv_class = &drawbox_class,
|
||||
.init = init,
|
||||
|
||||
.query_formats = query_formats,
|
||||
.inputs = drawbox_inputs,
|
||||
.outputs = drawbox_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
.name = "drawbox",
|
||||
.description = NULL_IF_CONFIG_SMALL("Draw a colored box on the input video."),
|
||||
.priv_size = sizeof(DrawBoxContext),
|
||||
.priv_class = &drawbox_class,
|
||||
.init = init,
|
||||
.query_formats = query_formats,
|
||||
.inputs = drawbox_inputs,
|
||||
.outputs = drawbox_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
};
|
||||
#endif /* CONFIG_DRAWBOX_FILTER */
|
||||
|
||||
|
@ -223,7 +223,7 @@ static const AVOption drawtext_options[]= {
|
||||
{ "monochrome", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_MONOCHROME }, .flags = FLAGS, .unit = "ft_load_flags" },
|
||||
{ "linear_design", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_LINEAR_DESIGN }, .flags = FLAGS, .unit = "ft_load_flags" },
|
||||
{ "no_autohint", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FT_LOAD_NO_AUTOHINT }, .flags = FLAGS, .unit = "ft_load_flags" },
|
||||
{ NULL},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(drawtext);
|
||||
@ -1039,11 +1039,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
|
||||
static const AVFilterPad avfilter_vf_drawtext_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
.needs_writable = 1,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
.needs_writable = 1,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -1064,13 +1064,12 @@ AVFilter avfilter_vf_drawtext = {
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_drawtext_inputs,
|
||||
.outputs = avfilter_vf_drawtext_outputs,
|
||||
.inputs = avfilter_vf_drawtext_inputs,
|
||||
.outputs = avfilter_vf_drawtext_outputs,
|
||||
.process_command = command,
|
||||
#if FF_API_DRAWTEXT_OLD_TIMELINE
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
|
||||
#else
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
#endif
|
||||
};
|
||||
|
@ -45,7 +45,7 @@ typedef struct {
|
||||
static const AVOption edgedetect_options[] = {
|
||||
{ "high", "set high threshold", OFFSET(high), AV_OPT_TYPE_DOUBLE, {.dbl=50/255.}, 0, 1, FLAGS },
|
||||
{ "low", "set low threshold", OFFSET(low), AV_OPT_TYPE_DOUBLE, {.dbl=20/255.}, 0, 1, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(edgedetect);
|
||||
|
@ -316,18 +316,18 @@ static const AVOption fade_options[] = {
|
||||
OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
|
||||
{ "d", "Duration of the effect in seconds.",
|
||||
OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT32_MAX, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(fade);
|
||||
|
||||
static const AVFilterPad avfilter_vf_fade_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props,
|
||||
.filter_frame = filter_frame,
|
||||
.needs_writable = 1,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props,
|
||||
.filter_frame = filter_frame,
|
||||
.needs_writable = 1,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -347,8 +347,7 @@ AVFilter avfilter_vf_fade = {
|
||||
.priv_size = sizeof(FadeContext),
|
||||
.priv_class = &fade_class,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_fade_inputs,
|
||||
.outputs = avfilter_vf_fade_outputs,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
.inputs = avfilter_vf_fade_inputs,
|
||||
.outputs = avfilter_vf_fade_outputs,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
};
|
||||
|
@ -44,7 +44,6 @@ static const AVOption field_options[] = {
|
||||
{"type", "set field type (top or bottom)", OFFSET(type), AV_OPT_TYPE_INT, {.i64=FIELD_TYPE_TOP}, 0, 1, FLAGS, "field_type" },
|
||||
{"top", "select top field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_TOP}, INT_MIN, INT_MAX, FLAGS, "field_type"},
|
||||
{"bottom", "select bottom field", 0, AV_OPT_TYPE_CONST, {.i64=FIELD_TYPE_BOTTOM}, INT_MIN, INT_MAX, FLAGS, "field_type"},
|
||||
|
||||
{NULL}
|
||||
};
|
||||
|
||||
@ -86,28 +85,27 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
|
||||
|
||||
static const AVFilterPad field_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad field_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props_output,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props_output,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_field = {
|
||||
.name = "field",
|
||||
.description = NULL_IF_CONFIG_SMALL("Extract a field from the input video."),
|
||||
|
||||
.priv_size = sizeof(FieldContext),
|
||||
.inputs = field_inputs,
|
||||
.outputs = field_outputs,
|
||||
.priv_class = &field_class,
|
||||
.name = "field",
|
||||
.description = NULL_IF_CONFIG_SMALL("Extract a field from the input video."),
|
||||
.priv_size = sizeof(FieldContext),
|
||||
.inputs = field_inputs,
|
||||
.outputs = field_outputs,
|
||||
.priv_class = &field_class,
|
||||
};
|
||||
|
@ -157,17 +157,17 @@ static const AVOption fieldorder_options[] = {
|
||||
{ "order", "output field order", OFFSET(dst_tff), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, FLAGS, "order" },
|
||||
{ "bff", "bottom field first", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, .flags=FLAGS, .unit = "order" },
|
||||
{ "tff", "top field first", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, .flags=FLAGS, .unit = "order" },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(fieldorder);
|
||||
|
||||
static const AVFilterPad avfilter_vf_fieldorder_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -99,7 +99,7 @@ static AVFilterFormats *make_format_list(FormatContext *s, int flag)
|
||||
#define OFFSET(x) offsetof(FormatContext, x)
|
||||
static const AVOption options[] = {
|
||||
{ "pix_fmts", "A '|'-separated list of pixel formats", OFFSET(pix_fmts), AV_OPT_TYPE_STRING, .flags = AV_OPT_FLAG_VIDEO_PARAM },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
#if CONFIG_FORMAT_FILTER
|
||||
@ -130,18 +130,14 @@ static const AVFilterPad avfilter_vf_format_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_format = {
|
||||
.name = "format",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
|
||||
|
||||
.init = init,
|
||||
|
||||
.name = "format",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert the input video to one of the specified pixel formats."),
|
||||
.init = init,
|
||||
.query_formats = query_formats_format,
|
||||
|
||||
.priv_size = sizeof(FormatContext),
|
||||
.priv_class = &format_class,
|
||||
|
||||
.inputs = avfilter_vf_format_inputs,
|
||||
.outputs = avfilter_vf_format_outputs,
|
||||
.priv_size = sizeof(FormatContext),
|
||||
.priv_class = &format_class,
|
||||
.inputs = avfilter_vf_format_inputs,
|
||||
.outputs = avfilter_vf_format_outputs,
|
||||
};
|
||||
#endif /* CONFIG_FORMAT_FILTER */
|
||||
|
||||
@ -173,17 +169,13 @@ static const AVFilterPad avfilter_vf_noformat_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_noformat = {
|
||||
.name = "noformat",
|
||||
.description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
|
||||
|
||||
.init = init,
|
||||
|
||||
.name = "noformat",
|
||||
.description = NULL_IF_CONFIG_SMALL("Force libavfilter not to use any of the specified pixel formats for the input to the next filter."),
|
||||
.init = init,
|
||||
.query_formats = query_formats_noformat,
|
||||
|
||||
.priv_size = sizeof(FormatContext),
|
||||
.priv_class = &noformat_class,
|
||||
|
||||
.inputs = avfilter_vf_noformat_inputs,
|
||||
.outputs = avfilter_vf_noformat_outputs,
|
||||
.priv_size = sizeof(FormatContext),
|
||||
.priv_class = &noformat_class,
|
||||
.inputs = avfilter_vf_noformat_inputs,
|
||||
.outputs = avfilter_vf_noformat_outputs,
|
||||
};
|
||||
#endif /* CONFIG_NOFORMAT_FILTER */
|
||||
|
@ -70,7 +70,7 @@ static const AVOption fps_options[] = {
|
||||
{ "down", "round towards -infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_DOWN }, 0, 5, V|F, "round" },
|
||||
{ "up", "round towards +infty", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_UP }, 0, 5, V|F, "round" },
|
||||
{ "near", "round to nearest", OFFSET(rounding), AV_OPT_TYPE_CONST, { .i64 = AV_ROUND_NEAR_INF }, 0, 5, V|F, "round" },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(fps);
|
||||
@ -273,8 +273,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
|
||||
|
||||
static const AVFilterPad avfilter_vf_fps_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
@ -293,13 +293,10 @@ static const AVFilterPad avfilter_vf_fps_outputs[] = {
|
||||
AVFilter avfilter_vf_fps = {
|
||||
.name = "fps",
|
||||
.description = NULL_IF_CONFIG_SMALL("Force constant framerate."),
|
||||
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
||||
.priv_size = sizeof(FPSContext),
|
||||
.priv_class = &fps_class,
|
||||
|
||||
.inputs = avfilter_vf_fps_inputs,
|
||||
.outputs = avfilter_vf_fps_outputs,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(FPSContext),
|
||||
.priv_class = &fps_class,
|
||||
.inputs = avfilter_vf_fps_inputs,
|
||||
.outputs = avfilter_vf_fps_outputs,
|
||||
};
|
||||
|
@ -38,7 +38,7 @@ typedef struct {
|
||||
|
||||
static const AVOption framestep_options[] = {
|
||||
{ "step", "set frame step", OFFSET(frame_step), AV_OPT_TYPE_INT, {.i64=1}, 1, INT_MAX, FLAGS},
|
||||
{NULL},
|
||||
{ NULL },
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(framestep);
|
||||
@ -74,18 +74,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
|
||||
|
||||
static const AVFilterPad framestep_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad framestep_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_output_props,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_output_props,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -412,7 +412,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
static const AVOption frei0r_options[] = {
|
||||
{ "filter_name", NULL, OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
|
||||
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(frei0r);
|
||||
@ -436,19 +436,15 @@ static const AVFilterPad avfilter_vf_frei0r_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_frei0r = {
|
||||
.name = "frei0r",
|
||||
.description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
|
||||
|
||||
.name = "frei0r",
|
||||
.description = NULL_IF_CONFIG_SMALL("Apply a frei0r effect."),
|
||||
.query_formats = query_formats,
|
||||
.init = filter_init,
|
||||
.uninit = uninit,
|
||||
|
||||
.priv_size = sizeof(Frei0rContext),
|
||||
.priv_class = &frei0r_class,
|
||||
|
||||
.inputs = avfilter_vf_frei0r_inputs,
|
||||
|
||||
.outputs = avfilter_vf_frei0r_outputs,
|
||||
.init = filter_init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(Frei0rContext),
|
||||
.priv_class = &frei0r_class,
|
||||
.inputs = avfilter_vf_frei0r_inputs,
|
||||
.outputs = avfilter_vf_frei0r_outputs,
|
||||
};
|
||||
|
||||
static av_cold int source_init(AVFilterContext *ctx)
|
||||
@ -521,17 +517,13 @@ static const AVFilterPad avfilter_vsrc_frei0r_src_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vsrc_frei0r_src = {
|
||||
.name = "frei0r_src",
|
||||
.description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
|
||||
|
||||
.priv_size = sizeof(Frei0rContext),
|
||||
.priv_class = &frei0r_src_class,
|
||||
.init = source_init,
|
||||
.uninit = uninit,
|
||||
|
||||
.name = "frei0r_src",
|
||||
.description = NULL_IF_CONFIG_SMALL("Generate a frei0r source."),
|
||||
.priv_size = sizeof(Frei0rContext),
|
||||
.priv_class = &frei0r_src_class,
|
||||
.init = source_init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = NULL,
|
||||
|
||||
.outputs = avfilter_vsrc_frei0r_src_outputs,
|
||||
.inputs = NULL,
|
||||
.outputs = avfilter_vsrc_frei0r_src_outputs,
|
||||
};
|
||||
|
@ -228,7 +228,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
static const AVOption gradfun_options[] = {
|
||||
{ "strength", "The maximum amount by which the filter will change any one pixel.", OFFSET(strength), AV_OPT_TYPE_FLOAT, { .dbl = 1.2 }, 0.51, 64, FLAGS },
|
||||
{ "radius", "The neighborhood to fit the gradient to.", OFFSET(radius), AV_OPT_TYPE_INT, { .i64 = 16 }, 4, 32, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(gradfun);
|
||||
|
@ -166,11 +166,10 @@ static const AVFilterPad avfilter_vf_hflip_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_hflip = {
|
||||
.name = "hflip",
|
||||
.description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
|
||||
.priv_size = sizeof(FlipContext),
|
||||
.name = "hflip",
|
||||
.description = NULL_IF_CONFIG_SMALL("Horizontally flip the input video."),
|
||||
.priv_size = sizeof(FlipContext),
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_hflip_inputs,
|
||||
.outputs = avfilter_vf_hflip_outputs,
|
||||
.inputs = avfilter_vf_hflip_inputs,
|
||||
.outputs = avfilter_vf_hflip_outputs,
|
||||
};
|
||||
|
@ -252,18 +252,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
|
||||
|
||||
static const AVFilterPad histeq_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad histeq_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -274,7 +274,6 @@ AVFilter avfilter_vf_histeq = {
|
||||
.priv_size = sizeof(HisteqContext),
|
||||
.init = init,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = histeq_inputs,
|
||||
.outputs = histeq_outputs,
|
||||
.priv_class = &histeq_class,
|
||||
|
@ -71,7 +71,7 @@ static const AVOption histogram_options[] = {
|
||||
{ "levels_mode", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"},
|
||||
{ "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "levels_mode" },
|
||||
{ "logarithmic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "levels_mode" },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(histogram);
|
||||
|
@ -325,7 +325,7 @@ static const AVOption hqdn3d_options[] = {
|
||||
{ "chroma_spatial", "spatial chroma strength", OFFSET(strength[CHROMA_SPATIAL]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
|
||||
{ "luma_tmp", "temporal luma strength", OFFSET(strength[LUMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
|
||||
{ "chroma_tmp", "temporal chroma strength", OFFSET(strength[CHROMA_TMP]), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, 0, DBL_MAX, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(hqdn3d);
|
||||
@ -352,14 +352,12 @@ static const AVFilterPad avfilter_vf_hqdn3d_outputs[] = {
|
||||
AVFilter avfilter_vf_hqdn3d = {
|
||||
.name = "hqdn3d",
|
||||
.description = NULL_IF_CONFIG_SMALL("Apply a High Quality 3D Denoiser."),
|
||||
|
||||
.priv_size = sizeof(HQDN3DContext),
|
||||
.priv_class = &hqdn3d_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_hqdn3d_inputs,
|
||||
.outputs = avfilter_vf_hqdn3d_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
|
||||
.inputs = avfilter_vf_hqdn3d_inputs,
|
||||
.outputs = avfilter_vf_hqdn3d_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
|
||||
};
|
||||
|
@ -435,14 +435,12 @@ static const AVFilterPad hue_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_hue = {
|
||||
.name = "hue",
|
||||
.description = NULL_IF_CONFIG_SMALL("Adjust the hue and saturation of the input video."),
|
||||
|
||||
.priv_size = sizeof(HueContext),
|
||||
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.name = "hue",
|
||||
.description = NULL_IF_CONFIG_SMALL("Adjust the hue and saturation of the input video."),
|
||||
.priv_size = sizeof(HueContext),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.process_command = process_command,
|
||||
.inputs = hue_inputs,
|
||||
.outputs = hue_outputs,
|
||||
|
@ -288,9 +288,9 @@ static const AVFilterPad idet_inputs[] = {
|
||||
|
||||
static const AVFilterPad idet_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_output,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_output,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -298,7 +298,6 @@ static const AVFilterPad idet_outputs[] = {
|
||||
AVFilter avfilter_vf_idet = {
|
||||
.name = "idet",
|
||||
.description = NULL_IF_CONFIG_SMALL("Interlace detect Filter."),
|
||||
|
||||
.priv_size = sizeof(IDETContext),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
@ -184,18 +184,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
|
||||
|
||||
static const AVFilterPad inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -214,9 +214,9 @@ static const AVFilterPad inputs[] = {
|
||||
|
||||
static const AVFilterPad outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_out_props,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_out_props,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -225,12 +225,9 @@ AVFilter avfilter_vf_interlace = {
|
||||
.name = "interlace",
|
||||
.description = NULL_IF_CONFIG_SMALL("Convert progressive video into interlaced."),
|
||||
.uninit = uninit,
|
||||
|
||||
.priv_class = &interlace_class,
|
||||
.priv_size = sizeof(InterlaceContext),
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = inputs,
|
||||
.outputs = outputs,
|
||||
};
|
||||
|
||||
|
@ -310,11 +310,9 @@ AVFilter avfilter_vf_kerndeint = {
|
||||
.name = "kerndeint",
|
||||
.description = NULL_IF_CONFIG_SMALL("Apply kernel deinterlacing to the input."),
|
||||
.priv_size = sizeof(KerndeintContext),
|
||||
.priv_class = &kerndeint_class,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = kerndeint_inputs,
|
||||
.outputs = kerndeint_outputs,
|
||||
|
||||
.priv_class = &kerndeint_class,
|
||||
};
|
||||
|
@ -381,15 +381,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
static const AVOption ocv_options[] = {
|
||||
{ "filter_name", NULL, OFFSET(name), AV_OPT_TYPE_STRING, .flags = FLAGS },
|
||||
{ "filter_params", NULL, OFFSET(params), AV_OPT_TYPE_STRING, .flags = FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(ocv);
|
||||
|
||||
static const AVFilterPad avfilter_vf_ocv_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
@ -404,17 +404,13 @@ static const AVFilterPad avfilter_vf_ocv_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_ocv = {
|
||||
.name = "ocv",
|
||||
.description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
|
||||
|
||||
.priv_size = sizeof(OCVContext),
|
||||
.priv_class = &ocv_class,
|
||||
|
||||
.name = "ocv",
|
||||
.description = NULL_IF_CONFIG_SMALL("Apply transform using libopencv."),
|
||||
.priv_size = sizeof(OCVContext),
|
||||
.priv_class = &ocv_class,
|
||||
.query_formats = query_formats,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
||||
.inputs = avfilter_vf_ocv_inputs,
|
||||
|
||||
.outputs = avfilter_vf_ocv_outputs,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.inputs = avfilter_vf_ocv_inputs,
|
||||
.outputs = avfilter_vf_ocv_outputs,
|
||||
};
|
||||
|
@ -92,7 +92,7 @@ static const AVOption options[] = {
|
||||
{ "g", "set G expression", OFFSET(comp_expr_str[G]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
|
||||
{ "b", "set B expression", OFFSET(comp_expr_str[B]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
|
||||
{ "a", "set A expression", OFFSET(comp_expr_str[A]), AV_OPT_TYPE_STRING, { .str = "val" }, .flags = FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
@ -331,17 +331,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
}
|
||||
|
||||
static const AVFilterPad inputs[] = {
|
||||
{ .name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_props,
|
||||
{ .name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_props,
|
||||
},
|
||||
{ .name = NULL}
|
||||
{ NULL }
|
||||
};
|
||||
static const AVFilterPad outputs[] = {
|
||||
{ .name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO, },
|
||||
{ .name = NULL}
|
||||
{ .name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
#define DEFINE_LUT_FILTER(name_, description_) \
|
||||
@ -350,11 +351,9 @@ static const AVFilterPad outputs[] = {
|
||||
.description = NULL_IF_CONFIG_SMALL(description_), \
|
||||
.priv_size = sizeof(LutContext), \
|
||||
.priv_class = &name_ ## _class, \
|
||||
\
|
||||
.init = name_##_init, \
|
||||
.uninit = uninit, \
|
||||
.query_formats = query_formats, \
|
||||
\
|
||||
.inputs = inputs, \
|
||||
.outputs = outputs, \
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, \
|
||||
@ -411,7 +410,7 @@ DEFINE_LUT_FILTER(lutrgb, "Compute and apply a lookup table to the RGB input vid
|
||||
|
||||
static const AVOption negate_options[] = {
|
||||
{ "negate_alpha", NULL, OFFSET(negate_alpha), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(negate);
|
||||
|
@ -775,10 +775,10 @@ static const AVFilterPad haldclut_inputs[] = {
|
||||
|
||||
static const AVFilterPad haldclut_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.request_frame = request_frame,
|
||||
.config_props = config_output,
|
||||
.config_props = config_output,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -784,11 +784,11 @@ static const AVFilterPad mp_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_mp = {
|
||||
.name = "mp",
|
||||
.description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(MPContext),
|
||||
.name = "mp",
|
||||
.description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(MPContext),
|
||||
.query_formats = query_formats,
|
||||
.inputs = mp_inputs,
|
||||
.outputs = mp_outputs,
|
||||
|
@ -227,10 +227,10 @@ static int request_frame(AVFilterLink *outlink)
|
||||
|
||||
static const AVFilterPad mpdecimate_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -245,14 +245,13 @@ static const AVFilterPad mpdecimate_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_mpdecimate = {
|
||||
.name = "mpdecimate",
|
||||
.description = NULL_IF_CONFIG_SMALL("Remove near-duplicate frames."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
||||
.priv_size = sizeof(DecimateContext),
|
||||
.name = "mpdecimate",
|
||||
.description = NULL_IF_CONFIG_SMALL("Remove near-duplicate frames."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(DecimateContext),
|
||||
.priv_class = &mpdecimate_class,
|
||||
.query_formats = query_formats,
|
||||
.inputs = mpdecimate_inputs,
|
||||
.outputs = mpdecimate_outputs,
|
||||
.priv_class = &mpdecimate_class,
|
||||
};
|
||||
|
@ -455,18 +455,18 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad noise_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad noise_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -28,8 +28,8 @@
|
||||
|
||||
static const AVFilterPad avfilter_vf_null_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -43,8 +43,8 @@ static const AVFilterPad avfilter_vf_null_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_null = {
|
||||
.name = "null",
|
||||
.name = "null",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
|
||||
.inputs = avfilter_vf_null_inputs,
|
||||
.outputs = avfilter_vf_null_outputs,
|
||||
.inputs = avfilter_vf_null_inputs,
|
||||
.outputs = avfilter_vf_null_outputs,
|
||||
};
|
||||
|
@ -625,19 +625,15 @@ static const AVFilterPad avfilter_vf_overlay_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_overlay = {
|
||||
.name = "overlay",
|
||||
.description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
|
||||
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
|
||||
.priv_size = sizeof(OverlayContext),
|
||||
.priv_class = &overlay_class,
|
||||
|
||||
.name = "overlay",
|
||||
.description = NULL_IF_CONFIG_SMALL("Overlay a video source on top of the input."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.priv_size = sizeof(OverlayContext),
|
||||
.priv_class = &overlay_class,
|
||||
.query_formats = query_formats,
|
||||
.process_command = process_command,
|
||||
|
||||
.inputs = avfilter_vf_overlay_inputs,
|
||||
.outputs = avfilter_vf_overlay_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
|
||||
.inputs = avfilter_vf_overlay_inputs,
|
||||
.outputs = avfilter_vf_overlay_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
|
||||
};
|
||||
|
@ -367,7 +367,7 @@ static const AVOption pad_options[] = {
|
||||
{ "x", "set the x offset expression for the input image position", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
|
||||
{ "y", "set the y offset expression for the input image position", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "0"}, CHAR_MIN, CHAR_MAX, FLAGS },
|
||||
{ "color", "set the color of the padded area border", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(pad);
|
||||
@ -395,12 +395,9 @@ static const AVFilterPad avfilter_vf_pad_outputs[] = {
|
||||
AVFilter avfilter_vf_pad = {
|
||||
.name = "pad",
|
||||
.description = NULL_IF_CONFIG_SMALL("Pad input image to width:height[:x:y[:color]] (default x and y: 0, default color: black)."),
|
||||
|
||||
.priv_size = sizeof(PadContext),
|
||||
.priv_class = &pad_class,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_pad_inputs,
|
||||
|
||||
.outputs = avfilter_vf_pad_outputs,
|
||||
.inputs = avfilter_vf_pad_inputs,
|
||||
.outputs = avfilter_vf_pad_outputs,
|
||||
};
|
||||
|
@ -102,7 +102,7 @@ static inline double get_coeff(double d)
|
||||
return coeff;
|
||||
}
|
||||
|
||||
static const char *const var_names[] = { "W", "H", NULL };
|
||||
static const char *const var_names[] = { "W", "H", NULL };
|
||||
enum { VAR_W, VAR_H, VAR_VARS_NB };
|
||||
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
@ -373,18 +373,18 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad perspective_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad perspective_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -292,10 +292,10 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad phase_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.config_props = config_input,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -128,11 +128,8 @@ static const AVFilterPad avfilter_vf_pixdesctest_outputs[] = {
|
||||
AVFilter avfilter_vf_pixdesctest = {
|
||||
.name = "pixdesctest",
|
||||
.description = NULL_IF_CONFIG_SMALL("Test pixel format definitions."),
|
||||
|
||||
.priv_size = sizeof(PixdescTestContext),
|
||||
.uninit = uninit,
|
||||
|
||||
.inputs = avfilter_vf_pixdesctest_inputs,
|
||||
|
||||
.outputs = avfilter_vf_pixdesctest_outputs,
|
||||
.priv_size = sizeof(PixdescTestContext),
|
||||
.uninit = uninit,
|
||||
.inputs = avfilter_vf_pixdesctest_inputs,
|
||||
.outputs = avfilter_vf_pixdesctest_outputs,
|
||||
};
|
||||
|
@ -61,7 +61,7 @@ typedef struct PSNRContext {
|
||||
static const AVOption psnr_options[] = {
|
||||
{"stats_file", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
|
||||
{"f", "Set file where to store per-frame difference information", OFFSET(stats_file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(psnr);
|
||||
@ -351,14 +351,14 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad psnr_inputs[] = {
|
||||
{
|
||||
.name = "main",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame_main,
|
||||
.name = "main",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame_main,
|
||||
},{
|
||||
.name = "reference",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame_ref,
|
||||
.config_props = config_input_ref,
|
||||
.name = "reference",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame_ref,
|
||||
.config_props = config_input_ref,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -374,13 +374,13 @@ static const AVFilterPad psnr_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_psnr = {
|
||||
.name = "psnr",
|
||||
.description = NULL_IF_CONFIG_SMALL("Calculate the PSNR between two video streams."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.priv_size = sizeof(PSNRContext),
|
||||
.priv_class = &psnr_class,
|
||||
.inputs = psnr_inputs,
|
||||
.outputs = psnr_outputs,
|
||||
.name = "psnr",
|
||||
.description = NULL_IF_CONFIG_SMALL("Calculate the PSNR between two video streams."),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
.priv_size = sizeof(PSNRContext),
|
||||
.priv_class = &psnr_class,
|
||||
.inputs = psnr_inputs,
|
||||
.outputs = psnr_outputs,
|
||||
};
|
||||
|
@ -551,10 +551,10 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad removelogo_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props_input,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props_input,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -538,7 +538,7 @@ static const AVOption scale_options[] = {
|
||||
{ "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
|
||||
{ "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
|
||||
{ "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVClass scale_class = {
|
||||
@ -551,8 +551,8 @@ static const AVClass scale_class = {
|
||||
|
||||
static const AVFilterPad avfilter_vf_scale_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
@ -568,17 +568,13 @@ static const AVFilterPad avfilter_vf_scale_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_scale = {
|
||||
.name = "scale",
|
||||
.description = NULL_IF_CONFIG_SMALL("Scale the input video to width:height size and/or convert the image format."),
|
||||
|
||||
.init_dict = init_dict,
|
||||
.uninit = uninit,
|
||||
|
||||
.name = "scale",
|
||||
.description = NULL_IF_CONFIG_SMALL("Scale the input video to width:height size and/or convert the image format."),
|
||||
.init_dict = init_dict,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.priv_size = sizeof(ScaleContext),
|
||||
.priv_class = &scale_class,
|
||||
|
||||
.inputs = avfilter_vf_scale_inputs,
|
||||
.outputs = avfilter_vf_scale_outputs,
|
||||
.priv_size = sizeof(ScaleContext),
|
||||
.priv_class = &scale_class,
|
||||
.inputs = avfilter_vf_scale_inputs,
|
||||
.outputs = avfilter_vf_scale_outputs,
|
||||
};
|
||||
|
@ -88,26 +88,26 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
|
||||
|
||||
static const AVFilterPad separatefields_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad separatefields_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props_output,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props_output,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_separatefields = {
|
||||
.name = "separatefields",
|
||||
.description = NULL_IF_CONFIG_SMALL("Split input video frames into fields."),
|
||||
.priv_size = sizeof(SeparateFieldsContext),
|
||||
.inputs = separatefields_inputs,
|
||||
.outputs = separatefields_outputs,
|
||||
.name = "separatefields",
|
||||
.description = NULL_IF_CONFIG_SMALL("Split input video frames into fields."),
|
||||
.priv_size = sizeof(SeparateFieldsContext),
|
||||
.inputs = separatefields_inputs,
|
||||
.outputs = separatefields_outputs,
|
||||
};
|
||||
|
@ -69,9 +69,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
|
||||
|
||||
static const AVFilterPad setfield_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -85,10 +85,10 @@ static const AVFilterPad setfield_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_setfield = {
|
||||
.name = "setfield",
|
||||
.name = "setfield",
|
||||
.description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."),
|
||||
.priv_size = sizeof(SetFieldContext),
|
||||
.inputs = setfield_inputs,
|
||||
.outputs = setfield_outputs,
|
||||
.priv_class = &setfield_class,
|
||||
.priv_size = sizeof(SetFieldContext),
|
||||
.priv_class = &setfield_class,
|
||||
.inputs = setfield_inputs,
|
||||
.outputs = setfield_outputs,
|
||||
};
|
||||
|
@ -77,9 +77,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
||||
|
||||
static const AVFilterPad avfilter_vf_showinfo_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -95,8 +95,6 @@ static const AVFilterPad avfilter_vf_showinfo_outputs[] = {
|
||||
AVFilter avfilter_vf_showinfo = {
|
||||
.name = "showinfo",
|
||||
.description = NULL_IF_CONFIG_SMALL("Show textual information for each video frame."),
|
||||
|
||||
.inputs = avfilter_vf_showinfo_inputs,
|
||||
|
||||
.outputs = avfilter_vf_showinfo_outputs,
|
||||
.inputs = avfilter_vf_showinfo_inputs,
|
||||
.outputs = avfilter_vf_showinfo_outputs,
|
||||
};
|
||||
|
@ -291,11 +291,9 @@ static const AVFilterPad smartblur_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_smartblur = {
|
||||
.name = "smartblur",
|
||||
.description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
|
||||
|
||||
.priv_size = sizeof(SmartblurContext),
|
||||
|
||||
.name = "smartblur",
|
||||
.description = NULL_IF_CONFIG_SMALL("Blur the input video without impacting the outlines."),
|
||||
.priv_size = sizeof(SmartblurContext),
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
@ -187,7 +187,7 @@ static const AVOption stereo3d_options[] = {
|
||||
{ "sbs2r", "side by side half width right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_2_RL}, 0, 0, FLAGS, "out" },
|
||||
{ "sbsl", "side by side left first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_LR}, 0, 0, FLAGS, "out" },
|
||||
{ "sbsr", "side by side right first", 0, AV_OPT_TYPE_CONST, {.i64=SIDE_BY_SIDE_RL}, 0, 0, FLAGS, "out" },
|
||||
{NULL},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(stereo3d);
|
||||
@ -628,9 +628,9 @@ static av_cold void uninit(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad stereo3d_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -343,9 +343,9 @@ static const AVFilterPad super2xsai_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_super2xsai = {
|
||||
.name = "super2xsai",
|
||||
.description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
|
||||
.priv_size = sizeof(Super2xSaIContext),
|
||||
.name = "super2xsai",
|
||||
.description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
|
||||
.priv_size = sizeof(Super2xSaIContext),
|
||||
.query_formats = query_formats,
|
||||
.inputs = super2xsai_inputs,
|
||||
.outputs = super2xsai_outputs,
|
||||
|
@ -102,9 +102,8 @@ static const AVFilterPad swapuv_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_swapuv = {
|
||||
.name = "swapuv",
|
||||
.description = NULL_IF_CONFIG_SMALL("Swap U and V components."),
|
||||
.priv_size = 0,
|
||||
.name = "swapuv",
|
||||
.description = NULL_IF_CONFIG_SMALL("Swap U and V components."),
|
||||
.query_formats = query_formats,
|
||||
.inputs = swapuv_inputs,
|
||||
.outputs = swapuv_outputs,
|
||||
|
@ -209,10 +209,10 @@ static int query_formats(AVFilterContext *ctx)
|
||||
|
||||
static const AVFilterPad thumbnail_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_props,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -59,7 +59,7 @@ static const AVOption tile_options[] = {
|
||||
{ "padding", "set inner border thickness in pixels", OFFSET(padding),
|
||||
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1024, FLAGS },
|
||||
{ "color", "set the color of the unused area", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str = "black"}, .flags = FLAGS },
|
||||
{NULL},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(tile);
|
||||
|
@ -363,9 +363,9 @@ static const AVFilterPad tinterlace_inputs[] = {
|
||||
|
||||
static const AVFilterPad tinterlace_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_out_props,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.config_props = config_out_props,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
@ -267,17 +267,17 @@ static const AVOption transpose_options[] = {
|
||||
{ "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
|
||||
{ "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
|
||||
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(transpose);
|
||||
|
||||
static const AVFilterPad avfilter_vf_transpose_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.get_video_buffer= get_video_buffer,
|
||||
.filter_frame = filter_frame,
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.get_video_buffer = get_video_buffer,
|
||||
.filter_frame = filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@ -292,15 +292,12 @@ static const AVFilterPad avfilter_vf_transpose_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_transpose = {
|
||||
.name = "transpose",
|
||||
.description = NULL_IF_CONFIG_SMALL("Transpose input video."),
|
||||
|
||||
.priv_size = sizeof(TransContext),
|
||||
.priv_class = &transpose_class,
|
||||
|
||||
.name = "transpose",
|
||||
.description = NULL_IF_CONFIG_SMALL("Transpose input video."),
|
||||
.priv_size = sizeof(TransContext),
|
||||
.priv_class = &transpose_class,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_transpose_inputs,
|
||||
.outputs = avfilter_vf_transpose_outputs,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
.inputs = avfilter_vf_transpose_inputs,
|
||||
.outputs = avfilter_vf_transpose_outputs,
|
||||
.flags = AVFILTER_FLAG_SLICE_THREADS,
|
||||
};
|
||||
|
@ -275,7 +275,7 @@ static const AVOption unsharp_options[] = {
|
||||
{ "chroma_amount", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
|
||||
{ "ca", "set chroma effect strength", OFFSET(camount), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, -2, 5, FLAGS },
|
||||
{ "opencl", "use OpenCL filtering capabilities", OFFSET(opencl), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS },
|
||||
{ NULL },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(unsharp);
|
||||
@ -299,17 +299,14 @@ static const AVFilterPad avfilter_vf_unsharp_outputs[] = {
|
||||
};
|
||||
|
||||
AVFilter avfilter_vf_unsharp = {
|
||||
.name = "unsharp",
|
||||
.description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
|
||||
|
||||
.priv_size = sizeof(UnsharpContext),
|
||||
.priv_class = &unsharp_class,
|
||||
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.name = "unsharp",
|
||||
.description = NULL_IF_CONFIG_SMALL("Sharpen or blur the input video."),
|
||||
.priv_size = sizeof(UnsharpContext),
|
||||
.priv_class = &unsharp_class,
|
||||
.init = init,
|
||||
.uninit = uninit,
|
||||
.query_formats = query_formats,
|
||||
|
||||
.inputs = avfilter_vf_unsharp_inputs,
|
||||
.outputs = avfilter_vf_unsharp_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
.inputs = avfilter_vf_unsharp_inputs,
|
||||
.outputs = avfilter_vf_unsharp_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
|
||||
};
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user