Merge remote-tracking branch 'qatar/master'
* qatar/master: rtmp: Add a new option 'rtmp_buffer', for setting the client buffer time rtmp: Set the client buffer time to 3s instead of 0.26s rtmp: Handle server bandwidth packets rtmp: Display a verbose message when an unknown packet type is received lavfi/audio: use av_samples_copy() instead of custom code. configure: add all filters hardcoded into avconv to avconv_deps avfiltergraph: remove a redundant call to avfilter_get_by_name(). lavfi: allow building without swscale. build: Do not delete tests/vsynth2 directory, which is no longer created. lavfi: replace AVFilterContext.input/output_count with nb_inputs/outputs lavfi: make AVFilterPad opaque after two major bumps. lavfi: add avfilter_pad_get_type() and avfilter_pad_get_name(). lavfi: make avfilter_get_video_buffer() private on next bump. jack: update to new latency range API as the old one has been deprecated rtmp: Tokenize the AMF connection parameters manually instead of using strtok_r ppc: Rename H.264 optimization template file for consistency. lavfi: add channelsplit audio filter. golomb: check remaining bits during unary decoding in get_ur_golomb_jpegls() sws: fix planar RGB input conversions for 9/10/16 bpp. Conflicts: Changelog configure doc/APIchanges ffmpeg.c libavcodec/golomb.h libavcodec/v210dec.h libavfilter/Makefile libavfilter/allfilters.c libavfilter/asrc_anullsrc.c libavfilter/audio.c libavfilter/avfilter.c libavfilter/avfilter.h libavfilter/avfiltergraph.c libavfilter/buffersrc.c libavfilter/formats.c libavfilter/version.h libavfilter/vf_frei0r.c libavfilter/vf_pad.c libavfilter/vf_scale.c libavfilter/video.h libavfilter/vsrc_color.c libavformat/rtmpproto.c libswscale/input.c tests/Makefile Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
c7b9eab2be
1
.gitignore
vendored
1
.gitignore
vendored
@ -50,7 +50,6 @@ tests/rotozoom
|
|||||||
tests/tiny_psnr
|
tests/tiny_psnr
|
||||||
tests/videogen
|
tests/videogen
|
||||||
tests/vsynth1
|
tests/vsynth1
|
||||||
tests/vsynth2
|
|
||||||
tools/aviocat
|
tools/aviocat
|
||||||
tools/cws2fws
|
tools/cws2fws
|
||||||
tools/ffeval
|
tools/ffeval
|
||||||
|
@ -5,6 +5,7 @@ version next:
|
|||||||
- INI and flat output in ffprobe
|
- INI and flat output in ffprobe
|
||||||
- Scene detection in libavfilter
|
- Scene detection in libavfilter
|
||||||
- Indeo Audio decoder
|
- Indeo Audio decoder
|
||||||
|
- channelsplit audio filter
|
||||||
|
|
||||||
|
|
||||||
version 0.11:
|
version 0.11:
|
||||||
|
5
configure
vendored
5
configure
vendored
@ -1210,6 +1210,7 @@ HAVE_LIST="
|
|||||||
inet_aton
|
inet_aton
|
||||||
inline_asm
|
inline_asm
|
||||||
isatty
|
isatty
|
||||||
|
jack_port_get_latency_range
|
||||||
kbhit
|
kbhit
|
||||||
ldbrx
|
ldbrx
|
||||||
libdc1394_1
|
libdc1394_1
|
||||||
@ -1730,7 +1731,6 @@ yadif_filter_deps="gpl"
|
|||||||
|
|
||||||
# libraries
|
# libraries
|
||||||
avdevice_deps="avcodec avformat"
|
avdevice_deps="avcodec avformat"
|
||||||
avfilter_deps="swscale"
|
|
||||||
avformat_deps="avcodec"
|
avformat_deps="avcodec"
|
||||||
postproc_deps="gpl"
|
postproc_deps="gpl"
|
||||||
|
|
||||||
@ -3354,7 +3354,8 @@ check_header soundcard.h
|
|||||||
|
|
||||||
enabled_any alsa_indev alsa_outdev && check_lib2 alsa/asoundlib.h snd_pcm_htimestamp -lasound
|
enabled_any alsa_indev alsa_outdev && check_lib2 alsa/asoundlib.h snd_pcm_htimestamp -lasound
|
||||||
|
|
||||||
enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack && check_func sem_timedwait
|
enabled jack_indev && check_lib2 jack/jack.h jack_client_open -ljack && check_func sem_timedwait &&
|
||||||
|
check_func jack_port_get_latency_range -ljack
|
||||||
|
|
||||||
enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
|
enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
|
||||||
|
|
||||||
|
@ -38,6 +38,15 @@ API changes, most recent first:
|
|||||||
2012-03-26 - a67d9cf - lavfi 2.66.100
|
2012-03-26 - a67d9cf - lavfi 2.66.100
|
||||||
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
|
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
|
||||||
|
|
||||||
|
2012-xx-xx - xxxxxxx - lavfi 2.23.0 - avfilter.h
|
||||||
|
Add AVFilterContext.nb_inputs/outputs. Deprecate
|
||||||
|
AVFilterContext.input/output_count.
|
||||||
|
|
||||||
|
2012-xx-xx - xxxxxxx - lavfi 2.22.0 - avfilter.h
|
||||||
|
Add avfilter_pad_get_type() and avfilter_pad_get_name(). Those
|
||||||
|
should now be used instead of accessing AVFilterPad members
|
||||||
|
directly.
|
||||||
|
|
||||||
2012-xx-xx - xxxxxxx - lavu 51.32.0 - audioconvert.h
|
2012-xx-xx - xxxxxxx - lavu 51.32.0 - audioconvert.h
|
||||||
Add av_get_channel_layout_channel_index(), av_get_channel_name()
|
Add av_get_channel_layout_channel_index(), av_get_channel_name()
|
||||||
and av_channel_layout_extract_channel().
|
and av_channel_layout_extract_channel().
|
||||||
|
@ -576,6 +576,31 @@ Maximum compensation in samples per second.
|
|||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@section channelsplit
|
||||||
|
Split each channel in input audio stream into a separate output stream.
|
||||||
|
|
||||||
|
This filter accepts the following named parameters:
|
||||||
|
@table @option
|
||||||
|
@item channel_layout
|
||||||
|
Channel layout of the input stream. Default is "stereo".
|
||||||
|
@end table
|
||||||
|
|
||||||
|
For example, assuming a stereo input MP3 file
|
||||||
|
@example
|
||||||
|
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
|
||||||
|
@end example
|
||||||
|
will create an output Matroska file with two audio streams, one containing only
|
||||||
|
the left channel and the other the right channel.
|
||||||
|
|
||||||
|
To split a 5.1 WAV file into per-channel files
|
||||||
|
@example
|
||||||
|
ffmpeg -i in.wav -filter_complex
|
||||||
|
'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
|
||||||
|
-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
|
||||||
|
front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
|
||||||
|
side_right.wav
|
||||||
|
@end example
|
||||||
|
|
||||||
@section resample
|
@section resample
|
||||||
Convert the audio sample format, sample rate and channel layout. This filter is
|
Convert the audio sample format, sample rate and channel layout. This filter is
|
||||||
not meant to be used directly.
|
not meant to be used directly.
|
||||||
|
@ -228,6 +228,9 @@ Additionally, the following parameters can be set via command line options
|
|||||||
Name of application to connect on the RTMP server. This option
|
Name of application to connect on the RTMP server. This option
|
||||||
overrides the parameter specified in the URI.
|
overrides the parameter specified in the URI.
|
||||||
|
|
||||||
|
@item rtmp_buffer
|
||||||
|
Set the client buffer time in milliseconds. The default is 3000.
|
||||||
|
|
||||||
@item rtmp_conn
|
@item rtmp_conn
|
||||||
Extra arbitrary AMF connection parameters, parsed from a string,
|
Extra arbitrary AMF connection parameters, parsed from a string,
|
||||||
e.g. like @code{B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0}.
|
e.g. like @code{B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0}.
|
||||||
|
14
ffmpeg.c
14
ffmpeg.c
@ -708,7 +708,7 @@ static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
|
|||||||
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
||||||
{
|
{
|
||||||
InputStream *ist = NULL;
|
InputStream *ist = NULL;
|
||||||
enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
|
enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
// TODO: support other filter types
|
// TODO: support other filter types
|
||||||
@ -978,7 +978,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
\
|
\
|
||||||
avio_printf(pb, "%s", ctx->filter->name); \
|
avio_printf(pb, "%s", ctx->filter->name); \
|
||||||
if (nb_pads > 1) \
|
if (nb_pads > 1) \
|
||||||
avio_printf(pb, ":%s", pads[inout->pad_idx].name); \
|
avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\
|
||||||
avio_w8(pb, 0); \
|
avio_w8(pb, 0); \
|
||||||
avio_close_dyn_buf(pb, &f->name); \
|
avio_close_dyn_buf(pb, &f->name); \
|
||||||
}
|
}
|
||||||
@ -988,7 +988,7 @@ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFil
|
|||||||
av_freep(&ofilter->name);
|
av_freep(&ofilter->name);
|
||||||
DESCRIBE_FILTER_LINK(ofilter, out, 0);
|
DESCRIBE_FILTER_LINK(ofilter, out, 0);
|
||||||
|
|
||||||
switch (out->filter_ctx->output_pads[out->pad_idx].type) {
|
switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
|
||||||
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
|
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
|
||||||
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
|
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
|
||||||
default: av_assert0(0);
|
default: av_assert0(0);
|
||||||
@ -1132,7 +1132,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
|
|||||||
av_freep(&ifilter->name);
|
av_freep(&ifilter->name);
|
||||||
DESCRIBE_FILTER_LINK(ifilter, in, 1);
|
DESCRIBE_FILTER_LINK(ifilter, in, 1);
|
||||||
|
|
||||||
switch (in->filter_ctx->input_pads[in->pad_idx].type) {
|
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
|
||||||
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
||||||
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
||||||
default: av_assert0(0);
|
default: av_assert0(0);
|
||||||
@ -4899,7 +4899,8 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
|
|||||||
{
|
{
|
||||||
OutputStream *ost;
|
OutputStream *ost;
|
||||||
|
|
||||||
switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
|
switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads,
|
||||||
|
ofilter->out_tmp->pad_idx)) {
|
||||||
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc, -1); break;
|
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc, -1); break;
|
||||||
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc, -1); break;
|
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc, -1); break;
|
||||||
default:
|
default:
|
||||||
@ -4961,7 +4962,8 @@ static void opt_output_file(void *optctx, const char *filename)
|
|||||||
if (!ofilter->out_tmp || ofilter->out_tmp->name)
|
if (!ofilter->out_tmp || ofilter->out_tmp->name)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
|
switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads,
|
||||||
|
ofilter->out_tmp->pad_idx)) {
|
||||||
case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
|
case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
|
||||||
case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
|
case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
|
||||||
case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
|
case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
|
#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
|
||||||
#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
|
#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
|
||||||
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
|
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
|
||||||
#include "h264_template_altivec.c"
|
#include "h264_altivec_template.c"
|
||||||
#undef OP_U8_ALTIVEC
|
#undef OP_U8_ALTIVEC
|
||||||
#undef PREFIX_h264_chroma_mc8_altivec
|
#undef PREFIX_h264_chroma_mc8_altivec
|
||||||
#undef PREFIX_h264_chroma_mc8_num
|
#undef PREFIX_h264_chroma_mc8_num
|
||||||
@ -59,7 +59,7 @@
|
|||||||
#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
|
#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
|
||||||
#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
|
#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
|
||||||
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
|
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
|
||||||
#include "h264_template_altivec.c"
|
#include "h264_altivec_template.c"
|
||||||
#undef OP_U8_ALTIVEC
|
#undef OP_U8_ALTIVEC
|
||||||
#undef PREFIX_h264_chroma_mc8_altivec
|
#undef PREFIX_h264_chroma_mc8_altivec
|
||||||
#undef PREFIX_h264_chroma_mc8_num
|
#undef PREFIX_h264_chroma_mc8_num
|
||||||
|
@ -325,13 +325,13 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block)
|
|||||||
|
|
||||||
#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
|
#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
|
||||||
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec
|
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec
|
||||||
#include "h264_template_altivec.c"
|
#include "h264_altivec_template.c"
|
||||||
#undef OP_U8_ALTIVEC
|
#undef OP_U8_ALTIVEC
|
||||||
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
|
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
|
||||||
|
|
||||||
#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
|
#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
|
||||||
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec
|
#define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec
|
||||||
#include "h264_template_altivec.c"
|
#include "h264_altivec_template.c"
|
||||||
#undef OP_U8_ALTIVEC
|
#undef OP_U8_ALTIVEC
|
||||||
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
|
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include "libavutil/log.h"
|
#include "libavutil/log.h"
|
||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
AVClass *av_class;
|
AVClass *av_class;
|
||||||
int custom_stride;
|
int custom_stride;
|
||||||
|
@ -92,7 +92,13 @@ static int process_callback(jack_nframes_t nframes, void *arg)
|
|||||||
|
|
||||||
/* Copy and interleave audio data from the JACK buffer into the packet */
|
/* Copy and interleave audio data from the JACK buffer into the packet */
|
||||||
for (i = 0; i < self->nports; i++) {
|
for (i = 0; i < self->nports; i++) {
|
||||||
|
#if HAVE_JACK_PORT_GET_LATENCY_RANGE
|
||||||
|
jack_latency_range_t range;
|
||||||
|
jack_port_get_latency_range(self->ports[i], JackCaptureLatency, &range);
|
||||||
|
latency += range.max;
|
||||||
|
#else
|
||||||
latency += jack_port_get_total_latency(self->client, self->ports[i]);
|
latency += jack_port_get_total_latency(self->client, self->ports[i]);
|
||||||
|
#endif
|
||||||
buffer = jack_port_get_buffer(self->ports[i], self->buffer_size);
|
buffer = jack_port_get_buffer(self->ports[i], self->buffer_size);
|
||||||
for (j = 0; j < self->buffer_size; j++)
|
for (j = 0; j < self->buffer_size; j++)
|
||||||
pkt_data[j * self->nports + i] = buffer[j];
|
pkt_data[j * self->nports + i] = buffer[j];
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
include $(SUBDIR)../config.mak
|
include $(SUBDIR)../config.mak
|
||||||
|
|
||||||
NAME = avfilter
|
NAME = avfilter
|
||||||
FFLIBS = avutil swscale
|
FFLIBS = avutil
|
||||||
FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample
|
FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample
|
||||||
FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample
|
FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample
|
||||||
|
FFLIBS-$(CONFIG_SCALE_FILTER) += swscale
|
||||||
|
|
||||||
FFLIBS-$(CONFIG_ACONVERT_FILTER) += swresample
|
FFLIBS-$(CONFIG_ACONVERT_FILTER) += swresample
|
||||||
FFLIBS-$(CONFIG_AMOVIE_FILTER) += avformat avcodec
|
FFLIBS-$(CONFIG_AMOVIE_FILTER) += avformat avcodec
|
||||||
@ -54,6 +55,7 @@ OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
|
|||||||
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
|
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
|
||||||
OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o
|
OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o
|
||||||
OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
|
OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
|
||||||
|
OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o
|
||||||
OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o
|
OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o
|
||||||
OBJS-$(CONFIG_PAN_FILTER) += af_pan.o
|
OBJS-$(CONFIG_PAN_FILTER) += af_pan.o
|
||||||
OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o
|
OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o
|
||||||
@ -102,6 +104,7 @@ OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o
|
|||||||
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
|
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
|
||||||
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
|
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
|
||||||
OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
|
OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
|
||||||
|
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o
|
||||||
OBJS-$(CONFIG_SELECT_FILTER) += vf_select.o
|
OBJS-$(CONFIG_SELECT_FILTER) += vf_select.o
|
||||||
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o
|
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o
|
||||||
OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o
|
OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o
|
||||||
|
@ -118,7 +118,7 @@ static int query_formats(AVFilterContext *ctx)
|
|||||||
if ((inlayout[i] >> c) & 1)
|
if ((inlayout[i] >> c) & 1)
|
||||||
*(route[i]++) = out_ch_number++;
|
*(route[i]++) = out_ch_number++;
|
||||||
}
|
}
|
||||||
formats = avfilter_make_format_list(ff_packed_sample_fmts);
|
formats = avfilter_make_format_list(ff_packed_sample_fmts_array);
|
||||||
avfilter_set_common_sample_formats(ctx, formats);
|
avfilter_set_common_sample_formats(ctx, formats);
|
||||||
for (i = 0; i < am->nb_inputs; i++) {
|
for (i = 0; i < am->nb_inputs; i++) {
|
||||||
layouts = NULL;
|
layouts = NULL;
|
||||||
|
@ -454,10 +454,10 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
|||||||
AVFilterLink *outlink = ctx->outputs[0];
|
AVFilterLink *outlink = ctx->outputs[0];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ctx->input_count; i++)
|
for (i = 0; i < ctx->nb_inputs; i++)
|
||||||
if (ctx->inputs[i] == inlink)
|
if (ctx->inputs[i] == inlink)
|
||||||
break;
|
break;
|
||||||
if (i >= ctx->input_count) {
|
if (i >= ctx->nb_inputs) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
|
av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -518,7 +518,7 @@ static void uninit(AVFilterContext *ctx)
|
|||||||
av_freep(&s->input_state);
|
av_freep(&s->input_state);
|
||||||
av_freep(&s->input_scale);
|
av_freep(&s->input_scale);
|
||||||
|
|
||||||
for (i = 0; i < ctx->input_count; i++)
|
for (i = 0; i < ctx->nb_inputs; i++)
|
||||||
av_freep(&ctx->input_pads[i].name);
|
av_freep(&ctx->input_pads[i].name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
#include "audio.h"
|
#include "audio.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
AVFilter avfilter_af_anull = {
|
AVFilter avfilter_af_anull = {
|
||||||
.name = "anull",
|
.name = "anull",
|
||||||
|
146
libavfilter/af_channelsplit.c
Normal file
146
libavfilter/af_channelsplit.c
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
/*
|
||||||
|
* This file is part of Libav.
|
||||||
|
*
|
||||||
|
* Libav is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU Lesser General Public
|
||||||
|
* License as published by the Free Software Foundation; either
|
||||||
|
* version 2.1 of the License, or (at your option) any later version.
|
||||||
|
*
|
||||||
|
* Libav is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* Lesser General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Lesser General Public
|
||||||
|
* License along with Libav; if not, write to the Free Software
|
||||||
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* Channel split filter
|
||||||
|
*
|
||||||
|
* Split an audio stream into per-channel streams.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "libavutil/audioconvert.h"
|
||||||
|
#include "libavutil/opt.h"
|
||||||
|
|
||||||
|
#include "audio.h"
|
||||||
|
#include "avfilter.h"
|
||||||
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
|
typedef struct ChannelSplitContext {
|
||||||
|
const AVClass *class;
|
||||||
|
|
||||||
|
uint64_t channel_layout;
|
||||||
|
char *channel_layout_str;
|
||||||
|
} ChannelSplitContext;
|
||||||
|
|
||||||
|
#define OFFSET(x) offsetof(ChannelSplitContext, x)
|
||||||
|
#define A AV_OPT_FLAG_AUDIO_PARAM
|
||||||
|
static const AVOption options[] = {
|
||||||
|
{ "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A },
|
||||||
|
{ NULL },
|
||||||
|
};
|
||||||
|
|
||||||
|
static const AVClass channelsplit_class = {
|
||||||
|
.class_name = "channelsplit filter",
|
||||||
|
.item_name = av_default_item_name,
|
||||||
|
.option = options,
|
||||||
|
.version = LIBAVUTIL_VERSION_INT,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int init(AVFilterContext *ctx, const char *arg, void *opaque)
|
||||||
|
{
|
||||||
|
ChannelSplitContext *s = ctx->priv;
|
||||||
|
int nb_channels;
|
||||||
|
int ret = 0, i;
|
||||||
|
|
||||||
|
s->class = &channelsplit_class;
|
||||||
|
av_opt_set_defaults(s);
|
||||||
|
if ((ret = av_set_options_string(s, arg, "=", ":")) < 0) {
|
||||||
|
av_log(ctx, AV_LOG_ERROR, "Error parsing options string '%s'.\n", arg);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
|
||||||
|
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
|
||||||
|
s->channel_layout_str);
|
||||||
|
ret = AVERROR(EINVAL);
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
|
||||||
|
for (i = 0; i < nb_channels; i++) {
|
||||||
|
uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i);
|
||||||
|
AVFilterPad pad = { 0 };
|
||||||
|
|
||||||
|
pad.type = AVMEDIA_TYPE_AUDIO;
|
||||||
|
pad.name = av_get_channel_name(channel);
|
||||||
|
|
||||||
|
ff_insert_outpad(ctx, i, &pad);
|
||||||
|
}
|
||||||
|
|
||||||
|
fail:
|
||||||
|
av_opt_free(s);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int query_formats(AVFilterContext *ctx)
|
||||||
|
{
|
||||||
|
ChannelSplitContext *s = ctx->priv;
|
||||||
|
AVFilterChannelLayouts *in_layouts = NULL;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
ff_set_common_formats (ctx, ff_planar_sample_fmts());
|
||||||
|
ff_set_common_samplerates(ctx, ff_all_samplerates());
|
||||||
|
|
||||||
|
ff_add_channel_layout(&in_layouts, s->channel_layout);
|
||||||
|
ff_channel_layouts_ref(in_layouts, &ctx->inputs[0]->out_channel_layouts);
|
||||||
|
|
||||||
|
for (i = 0; i < ctx->nb_outputs; i++) {
|
||||||
|
AVFilterChannelLayouts *out_layouts = NULL;
|
||||||
|
uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, i);
|
||||||
|
|
||||||
|
ff_add_channel_layout(&out_layouts, channel);
|
||||||
|
ff_channel_layouts_ref(out_layouts, &ctx->outputs[i]->in_channel_layouts);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
|
||||||
|
{
|
||||||
|
AVFilterContext *ctx = inlink->dst;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ctx->nb_outputs; i++) {
|
||||||
|
AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE);
|
||||||
|
|
||||||
|
if (!buf_out)
|
||||||
|
return;
|
||||||
|
|
||||||
|
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
|
||||||
|
buf_out->audio->channel_layout =
|
||||||
|
av_channel_layout_extract_channel(buf->audio->channel_layout, i);
|
||||||
|
|
||||||
|
ff_filter_samples(ctx->outputs[i], buf_out);
|
||||||
|
}
|
||||||
|
avfilter_unref_buffer(buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
AVFilter avfilter_af_channelsplit = {
|
||||||
|
.name = "channelsplit",
|
||||||
|
.description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams"),
|
||||||
|
.priv_size = sizeof(ChannelSplitContext),
|
||||||
|
|
||||||
|
.init = init,
|
||||||
|
.query_formats = query_formats,
|
||||||
|
|
||||||
|
.inputs = (const AVFilterPad[]){{ .name = "default",
|
||||||
|
.type = AVMEDIA_TYPE_AUDIO,
|
||||||
|
.filter_samples = filter_samples, },
|
||||||
|
{ NULL }},
|
||||||
|
.outputs = (const AVFilterPad[]){{ NULL }},
|
||||||
|
};
|
@ -44,6 +44,7 @@ void avfilter_register_all(void)
|
|||||||
REGISTER_FILTER (ASPLIT, asplit, af);
|
REGISTER_FILTER (ASPLIT, asplit, af);
|
||||||
REGISTER_FILTER (ASTREAMSYNC, astreamsync, af);
|
REGISTER_FILTER (ASTREAMSYNC, astreamsync, af);
|
||||||
REGISTER_FILTER (ASYNCTS, asyncts, af);
|
REGISTER_FILTER (ASYNCTS, asyncts, af);
|
||||||
|
REGISTER_FILTER (CHANNELSPLIT,channelsplit,af);
|
||||||
REGISTER_FILTER (EARWAX, earwax, af);
|
REGISTER_FILTER (EARWAX, earwax, af);
|
||||||
REGISTER_FILTER (PAN, pan, af);
|
REGISTER_FILTER (PAN, pan, af);
|
||||||
REGISTER_FILTER (SILENCEDETECT, silencedetect, af);
|
REGISTER_FILTER (SILENCEDETECT, silencedetect, af);
|
||||||
@ -92,6 +93,7 @@ void avfilter_register_all(void)
|
|||||||
REGISTER_FILTER (PAD, pad, vf);
|
REGISTER_FILTER (PAD, pad, vf);
|
||||||
REGISTER_FILTER (PIXDESCTEST, pixdesctest, vf);
|
REGISTER_FILTER (PIXDESCTEST, pixdesctest, vf);
|
||||||
REGISTER_FILTER (REMOVELOGO, removelogo, vf);
|
REGISTER_FILTER (REMOVELOGO, removelogo, vf);
|
||||||
|
REGISTER_FILTER (SCALE, scale, vf);
|
||||||
REGISTER_FILTER (SELECT, select, vf);
|
REGISTER_FILTER (SELECT, select, vf);
|
||||||
REGISTER_FILTER (SETDAR, setdar, vf);
|
REGISTER_FILTER (SETDAR, setdar, vf);
|
||||||
REGISTER_FILTER (SETFIELD, setfield, vf);
|
REGISTER_FILTER (SETFIELD, setfield, vf);
|
||||||
@ -143,8 +145,4 @@ void avfilter_register_all(void)
|
|||||||
extern AVFilter avfilter_asink_abuffer;
|
extern AVFilter avfilter_asink_abuffer;
|
||||||
avfilter_register(&avfilter_asink_abuffer);
|
avfilter_register(&avfilter_asink_abuffer);
|
||||||
}
|
}
|
||||||
{
|
|
||||||
extern AVFilter avfilter_vf_scale;
|
|
||||||
avfilter_register(&avfilter_vf_scale);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
static void null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { }
|
static void null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { }
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
* null audio source
|
* null audio source
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "internal.h"
|
||||||
#include "libavutil/audioconvert.h"
|
#include "libavutil/audioconvert.h"
|
||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ void ff_default_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesr
|
|||||||
{
|
{
|
||||||
AVFilterLink *outlink = NULL;
|
AVFilterLink *outlink = NULL;
|
||||||
|
|
||||||
if (inlink->dst->output_count)
|
if (inlink->dst->nb_outputs)
|
||||||
outlink = inlink->dst->outputs[0];
|
outlink = inlink->dst->outputs[0];
|
||||||
|
|
||||||
if (outlink) {
|
if (outlink) {
|
||||||
@ -190,10 +190,7 @@ void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
|||||||
/* prepare to copy the samples if the buffer has insufficient permissions */
|
/* prepare to copy the samples if the buffer has insufficient permissions */
|
||||||
if ((dst->min_perms & samplesref->perms) != dst->min_perms ||
|
if ((dst->min_perms & samplesref->perms) != dst->min_perms ||
|
||||||
dst->rej_perms & samplesref->perms) {
|
dst->rej_perms & samplesref->perms) {
|
||||||
int i, size, planar = av_sample_fmt_is_planar(samplesref->format);
|
int size;
|
||||||
int planes = !planar ? 1:
|
|
||||||
av_get_channel_layout_nb_channels(samplesref->audio->channel_layout);
|
|
||||||
|
|
||||||
av_log(link->dst, AV_LOG_DEBUG,
|
av_log(link->dst, AV_LOG_DEBUG,
|
||||||
"Copying audio data in avfilter (have perms %x, need %x, reject %x)\n",
|
"Copying audio data in avfilter (have perms %x, need %x, reject %x)\n",
|
||||||
samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms);
|
samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms);
|
||||||
@ -204,13 +201,10 @@ void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
|
|||||||
link->cur_buf->audio->sample_rate = samplesref->audio->sample_rate;
|
link->cur_buf->audio->sample_rate = samplesref->audio->sample_rate;
|
||||||
|
|
||||||
/* Copy actual data into new samples buffer */
|
/* Copy actual data into new samples buffer */
|
||||||
/* src can be larger than dst if it was allocated larger than necessary.
|
av_samples_copy(link->cur_buf->extended_data, samplesref->extended_data,
|
||||||
dst can be slightly larger due to extra alignment padding. */
|
0, 0, samplesref->audio->nb_samples,
|
||||||
size = FFMIN(samplesref->linesize[0], link->cur_buf->linesize[0]);
|
av_get_channel_layout_nb_channels(link->channel_layout),
|
||||||
for (i = 0; samplesref->data[i] && i < 8; i++)
|
link->format);
|
||||||
memcpy(link->cur_buf->data[i], samplesref->data[i], size);
|
|
||||||
for (i = 0; i < planes; i++)
|
|
||||||
memcpy(link->cur_buf->extended_data[i], samplesref->extended_data[i], size);
|
|
||||||
|
|
||||||
avfilter_unref_buffer(samplesref);
|
avfilter_unref_buffer(samplesref);
|
||||||
} else
|
} else
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
|
||||||
static const enum AVSampleFormat ff_packed_sample_fmts[] = {
|
static const enum AVSampleFormat ff_packed_sample_fmts_array[] = {
|
||||||
AV_SAMPLE_FMT_U8,
|
AV_SAMPLE_FMT_U8,
|
||||||
AV_SAMPLE_FMT_S16,
|
AV_SAMPLE_FMT_S16,
|
||||||
AV_SAMPLE_FMT_S32,
|
AV_SAMPLE_FMT_S32,
|
||||||
@ -33,7 +33,7 @@ static const enum AVSampleFormat ff_packed_sample_fmts[] = {
|
|||||||
AV_SAMPLE_FMT_NONE
|
AV_SAMPLE_FMT_NONE
|
||||||
};
|
};
|
||||||
|
|
||||||
static const enum AVSampleFormat ff_planar_sample_fmts[] = {
|
static const enum AVSampleFormat ff_planar_sample_fmts_array[] = {
|
||||||
AV_SAMPLE_FMT_U8P,
|
AV_SAMPLE_FMT_U8P,
|
||||||
AV_SAMPLE_FMT_S16P,
|
AV_SAMPLE_FMT_S16P,
|
||||||
AV_SAMPLE_FMT_S32P,
|
AV_SAMPLE_FMT_S32P,
|
||||||
|
@ -120,8 +120,8 @@ int avfilter_link(AVFilterContext *src, unsigned srcpad,
|
|||||||
{
|
{
|
||||||
AVFilterLink *link;
|
AVFilterLink *link;
|
||||||
|
|
||||||
if (src->output_count <= srcpad || dst->input_count <= dstpad ||
|
if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
|
||||||
src->outputs[srcpad] || dst->inputs[dstpad])
|
src->outputs[srcpad] || dst->inputs[dstpad])
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
|
if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
|
||||||
@ -200,9 +200,9 @@ int avfilter_config_links(AVFilterContext *filter)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
for (i = 0; i < filter->input_count; i ++) {
|
for (i = 0; i < filter->nb_inputs; i ++) {
|
||||||
AVFilterLink *link = filter->inputs[i];
|
AVFilterLink *link = filter->inputs[i];
|
||||||
AVFilterLink *inlink = link->src->input_count ?
|
AVFilterLink *inlink = link->src->nb_inputs ?
|
||||||
link->src->inputs[0] : NULL;
|
link->src->inputs[0] : NULL;
|
||||||
|
|
||||||
if (!link) continue;
|
if (!link) continue;
|
||||||
@ -222,7 +222,7 @@ int avfilter_config_links(AVFilterContext *filter)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (!(config_link = link->srcpad->config_props)) {
|
if (!(config_link = link->srcpad->config_props)) {
|
||||||
if (link->src->input_count != 1) {
|
if (link->src->nb_inputs != 1) {
|
||||||
av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
|
av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
|
||||||
"with more than one input "
|
"with more than one input "
|
||||||
"must set config_props() "
|
"must set config_props() "
|
||||||
@ -335,7 +335,7 @@ int ff_poll_frame(AVFilterLink *link)
|
|||||||
if (link->srcpad->poll_frame)
|
if (link->srcpad->poll_frame)
|
||||||
return link->srcpad->poll_frame(link);
|
return link->srcpad->poll_frame(link);
|
||||||
|
|
||||||
for (i = 0; i < link->src->input_count; i++) {
|
for (i = 0; i < link->src->nb_inputs; i++) {
|
||||||
int val;
|
int val;
|
||||||
if (!link->src->inputs[i])
|
if (!link->src->inputs[i])
|
||||||
return -1;
|
return -1;
|
||||||
@ -450,27 +450,31 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret->input_count = pad_count(filter->inputs);
|
ret->nb_inputs = pad_count(filter->inputs);
|
||||||
if (ret->input_count) {
|
if (ret->nb_inputs ) {
|
||||||
ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->input_count);
|
ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs);
|
||||||
if (!ret->input_pads)
|
if (!ret->input_pads)
|
||||||
goto err;
|
goto err;
|
||||||
memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->input_count);
|
memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
|
||||||
ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->input_count);
|
ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs);
|
||||||
if (!ret->inputs)
|
if (!ret->inputs)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret->output_count = pad_count(filter->outputs);
|
ret->nb_outputs = pad_count(filter->outputs);
|
||||||
if (ret->output_count) {
|
if (ret->nb_outputs) {
|
||||||
ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->output_count);
|
ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs);
|
||||||
if (!ret->output_pads)
|
if (!ret->output_pads)
|
||||||
goto err;
|
goto err;
|
||||||
memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->output_count);
|
memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
|
||||||
ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->output_count);
|
ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs);
|
||||||
if (!ret->outputs)
|
if (!ret->outputs)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
#if FF_API_FOO_COUNT
|
||||||
|
ret->output_count = ret->nb_outputs;
|
||||||
|
ret->input_count = ret->nb_inputs;
|
||||||
|
#endif
|
||||||
|
|
||||||
*filter_ctx = ret;
|
*filter_ctx = ret;
|
||||||
return 0;
|
return 0;
|
||||||
@ -478,10 +482,10 @@ int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *in
|
|||||||
err:
|
err:
|
||||||
av_freep(&ret->inputs);
|
av_freep(&ret->inputs);
|
||||||
av_freep(&ret->input_pads);
|
av_freep(&ret->input_pads);
|
||||||
ret->input_count = 0;
|
ret->nb_inputs = 0;
|
||||||
av_freep(&ret->outputs);
|
av_freep(&ret->outputs);
|
||||||
av_freep(&ret->output_pads);
|
av_freep(&ret->output_pads);
|
||||||
ret->output_count = 0;
|
ret->nb_outputs = 0;
|
||||||
av_freep(&ret->priv);
|
av_freep(&ret->priv);
|
||||||
av_free(ret);
|
av_free(ret);
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@ -498,7 +502,7 @@ void avfilter_free(AVFilterContext *filter)
|
|||||||
if (filter->filter->uninit)
|
if (filter->filter->uninit)
|
||||||
filter->filter->uninit(filter);
|
filter->filter->uninit(filter);
|
||||||
|
|
||||||
for (i = 0; i < filter->input_count; i++) {
|
for (i = 0; i < filter->nb_inputs; i++) {
|
||||||
if ((link = filter->inputs[i])) {
|
if ((link = filter->inputs[i])) {
|
||||||
if (link->src)
|
if (link->src)
|
||||||
link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
|
link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
|
||||||
@ -511,7 +515,7 @@ void avfilter_free(AVFilterContext *filter)
|
|||||||
}
|
}
|
||||||
avfilter_link_free(&link);
|
avfilter_link_free(&link);
|
||||||
}
|
}
|
||||||
for (i = 0; i < filter->output_count; i++) {
|
for (i = 0; i < filter->nb_outputs; i++) {
|
||||||
if ((link = filter->outputs[i])) {
|
if ((link = filter->outputs[i])) {
|
||||||
if (link->dst)
|
if (link->dst)
|
||||||
link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
|
link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
|
||||||
@ -546,6 +550,16 @@ int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx)
|
||||||
|
{
|
||||||
|
return pads[pad_idx].name;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx)
|
||||||
|
{
|
||||||
|
return pads[pad_idx].type;
|
||||||
|
}
|
||||||
|
|
||||||
#if FF_API_DEFAULT_CONFIG_OUTPUT_LINK
|
#if FF_API_DEFAULT_CONFIG_OUTPUT_LINK
|
||||||
void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
|
void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
|
||||||
AVFilterPad **pads, AVFilterLink ***links,
|
AVFilterPad **pads, AVFilterLink ***links,
|
||||||
@ -556,14 +570,20 @@ void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
|
|||||||
void avfilter_insert_inpad(AVFilterContext *f, unsigned index,
|
void avfilter_insert_inpad(AVFilterContext *f, unsigned index,
|
||||||
AVFilterPad *p)
|
AVFilterPad *p)
|
||||||
{
|
{
|
||||||
ff_insert_pad(index, &f->input_count, offsetof(AVFilterLink, dstpad),
|
ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
|
||||||
&f->input_pads, &f->inputs, p);
|
&f->input_pads, &f->inputs, p);
|
||||||
|
#if FF_API_FOO_COUNT
|
||||||
|
f->input_count = f->nb_inputs;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
void avfilter_insert_outpad(AVFilterContext *f, unsigned index,
|
void avfilter_insert_outpad(AVFilterContext *f, unsigned index,
|
||||||
AVFilterPad *p)
|
AVFilterPad *p)
|
||||||
{
|
{
|
||||||
ff_insert_pad(index, &f->output_count, offsetof(AVFilterLink, srcpad),
|
ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
|
||||||
&f->output_pads, &f->outputs, p);
|
&f->output_pads, &f->outputs, p);
|
||||||
|
#if FF_API_FOO_COUNT
|
||||||
|
f->output_count = f->nb_outputs;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
int avfilter_poll_frame(AVFilterLink *link)
|
int avfilter_poll_frame(AVFilterLink *link)
|
||||||
{
|
{
|
||||||
|
@ -369,10 +369,16 @@ void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *
|
|||||||
*/
|
*/
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if FF_API_AVFILTERPAD_PUBLIC
|
||||||
/**
|
/**
|
||||||
* A filter pad used for either input or output.
|
* A filter pad used for either input or output.
|
||||||
*
|
*
|
||||||
* See doc/filter_design.txt for details on how to implement the methods.
|
* See doc/filter_design.txt for details on how to implement the methods.
|
||||||
|
*
|
||||||
|
* @warning this struct might be removed from public API.
|
||||||
|
* users should call avfilter_pad_get_name() and avfilter_pad_get_type()
|
||||||
|
* to access the name and type fields; there should be no need to access
|
||||||
|
* any other fields from outside of libavfilter.
|
||||||
*/
|
*/
|
||||||
struct AVFilterPad {
|
struct AVFilterPad {
|
||||||
/**
|
/**
|
||||||
@ -499,6 +505,29 @@ struct AVFilterPad {
|
|||||||
*/
|
*/
|
||||||
int (*config_props)(AVFilterLink *link);
|
int (*config_props)(AVFilterLink *link);
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the name of an AVFilterPad.
|
||||||
|
*
|
||||||
|
* @param pads an array of AVFilterPads
|
||||||
|
* @param pad_idx index of the pad in the array it; is the caller's
|
||||||
|
* responsibility to ensure the index is valid
|
||||||
|
*
|
||||||
|
* @return name of the pad_idx'th pad in pads
|
||||||
|
*/
|
||||||
|
const char *avfilter_pad_get_name(AVFilterPad *pads, int pad_idx);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the type of an AVFilterPad.
|
||||||
|
*
|
||||||
|
* @param pads an array of AVFilterPads
|
||||||
|
* @param pad_idx index of the pad in the array; it is the caller's
|
||||||
|
* responsibility to ensure the index is valid
|
||||||
|
*
|
||||||
|
* @return type of the pad_idx'th pad in pads
|
||||||
|
*/
|
||||||
|
enum AVMediaType avfilter_pad_get_type(AVFilterPad *pads, int pad_idx);
|
||||||
|
|
||||||
#if FF_API_FILTERS_PUBLIC
|
#if FF_API_FILTERS_PUBLIC
|
||||||
/** default handler for start_frame() for video inputs */
|
/** default handler for start_frame() for video inputs */
|
||||||
@ -608,16 +637,23 @@ struct AVFilterContext {
|
|||||||
|
|
||||||
char *name; ///< name of this filter instance
|
char *name; ///< name of this filter instance
|
||||||
|
|
||||||
unsigned input_count; ///< number of input pads
|
#if FF_API_FOO_COUNT
|
||||||
|
unsigned input_count; ///< @deprecated use nb_inputs
|
||||||
|
#endif
|
||||||
AVFilterPad *input_pads; ///< array of input pads
|
AVFilterPad *input_pads; ///< array of input pads
|
||||||
AVFilterLink **inputs; ///< array of pointers to input links
|
AVFilterLink **inputs; ///< array of pointers to input links
|
||||||
|
|
||||||
unsigned output_count; ///< number of output pads
|
#if FF_API_FOO_COUNT
|
||||||
|
unsigned output_count; ///< @deprecated use nb_outputs
|
||||||
|
#endif
|
||||||
AVFilterPad *output_pads; ///< array of output pads
|
AVFilterPad *output_pads; ///< array of output pads
|
||||||
AVFilterLink **outputs; ///< array of pointers to output links
|
AVFilterLink **outputs; ///< array of pointers to output links
|
||||||
|
|
||||||
void *priv; ///< private data for use by the filter
|
void *priv; ///< private data for use by the filter
|
||||||
|
|
||||||
|
unsigned nb_inputs; ///< number of input pads
|
||||||
|
unsigned nb_outputs; ///< number of output pads
|
||||||
|
|
||||||
struct AVFilterCommand *command_queue;
|
struct AVFilterCommand *command_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -777,19 +813,11 @@ void avfilter_link_free(AVFilterLink **link);
|
|||||||
*/
|
*/
|
||||||
int avfilter_config_links(AVFilterContext *filter);
|
int avfilter_config_links(AVFilterContext *filter);
|
||||||
|
|
||||||
/**
|
#if FF_API_FILTERS_PUBLIC
|
||||||
* Request a picture buffer with a specific set of permissions.
|
attribute_deprecated
|
||||||
*
|
|
||||||
* @param link the output link to the filter from which the buffer will
|
|
||||||
* be requested
|
|
||||||
* @param perms the required access permissions
|
|
||||||
* @param w the minimum width of the buffer to allocate
|
|
||||||
* @param h the minimum height of the buffer to allocate
|
|
||||||
* @return A reference to the buffer. This must be unreferenced with
|
|
||||||
* avfilter_unref_buffer when you are finished with it.
|
|
||||||
*/
|
|
||||||
AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms,
|
AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms,
|
||||||
int w, int h);
|
int w, int h);
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a buffer reference wrapped around an already allocated image
|
* Create a buffer reference wrapped around an already allocated image
|
||||||
|
@ -118,7 +118,7 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
|
|||||||
for (i = 0; i < graph->filter_count; i++) {
|
for (i = 0; i < graph->filter_count; i++) {
|
||||||
filt = graph->filters[i];
|
filt = graph->filters[i];
|
||||||
|
|
||||||
for (j = 0; j < filt->input_count; j++) {
|
for (j = 0; j < filt->nb_inputs; j++) {
|
||||||
if (!filt->inputs[j] || !filt->inputs[j]->src) {
|
if (!filt->inputs[j] || !filt->inputs[j]->src) {
|
||||||
av_log(log_ctx, AV_LOG_ERROR,
|
av_log(log_ctx, AV_LOG_ERROR,
|
||||||
"Input pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any source\n",
|
"Input pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any source\n",
|
||||||
@ -127,7 +127,7 @@ static int graph_check_validity(AVFilterGraph *graph, AVClass *log_ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (j = 0; j < filt->output_count; j++) {
|
for (j = 0; j < filt->nb_outputs; j++) {
|
||||||
if (!filt->outputs[j] || !filt->outputs[j]->dst) {
|
if (!filt->outputs[j] || !filt->outputs[j]->dst) {
|
||||||
av_log(log_ctx, AV_LOG_ERROR,
|
av_log(log_ctx, AV_LOG_ERROR,
|
||||||
"Output pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any destination\n",
|
"Output pad \"%s\" for the filter \"%s\" of type \"%s\" not connected to any destination\n",
|
||||||
@ -153,7 +153,7 @@ static int graph_config_links(AVFilterGraph *graph, AVClass *log_ctx)
|
|||||||
for (i=0; i < graph->filter_count; i++) {
|
for (i=0; i < graph->filter_count; i++) {
|
||||||
filt = graph->filters[i];
|
filt = graph->filters[i];
|
||||||
|
|
||||||
if (!filt->output_count) {
|
if (!filt->nb_outputs) {
|
||||||
if ((ret = avfilter_config_links(filt)))
|
if ((ret = avfilter_config_links(filt)))
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -271,7 +271,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
|
|||||||
/* Call query_formats on sources first.
|
/* Call query_formats on sources first.
|
||||||
This is a temporary workaround for amerge,
|
This is a temporary workaround for amerge,
|
||||||
until format renegociation is implemented. */
|
until format renegociation is implemented. */
|
||||||
if (!graph->filters[i]->input_count == j)
|
if (!graph->filters[i]->nb_inputs == j)
|
||||||
continue;
|
continue;
|
||||||
if (graph->filters[i]->filter->query_formats)
|
if (graph->filters[i]->filter->query_formats)
|
||||||
ret = filter_query_formats(graph->filters[i]);
|
ret = filter_query_formats(graph->filters[i]);
|
||||||
@ -286,7 +286,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
|
|||||||
for (i = 0; i < graph->filter_count; i++) {
|
for (i = 0; i < graph->filter_count; i++) {
|
||||||
AVFilterContext *filter = graph->filters[i];
|
AVFilterContext *filter = graph->filters[i];
|
||||||
|
|
||||||
for (j = 0; j < filter->input_count; j++) {
|
for (j = 0; j < filter->nb_inputs; j++) {
|
||||||
AVFilterLink *link = filter->inputs[j];
|
AVFilterLink *link = filter->inputs[j];
|
||||||
#if 0
|
#if 0
|
||||||
if (!link) continue;
|
if (!link) continue;
|
||||||
@ -348,11 +348,16 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
|
|||||||
/* couldn't merge format lists. auto-insert conversion filter */
|
/* couldn't merge format lists. auto-insert conversion filter */
|
||||||
switch (link->type) {
|
switch (link->type) {
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
|
if (!(filter = avfilter_get_by_name("scale"))) {
|
||||||
|
av_log(log_ctx, AV_LOG_ERROR, "'scale' filter "
|
||||||
|
"not present, cannot convert pixel formats.\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
|
snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
|
||||||
scaler_count++);
|
scaler_count++);
|
||||||
snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts);
|
snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts);
|
||||||
if ((ret = avfilter_graph_create_filter(&convert,
|
if ((ret = avfilter_graph_create_filter(&convert, filter,
|
||||||
avfilter_get_by_name("scale"),
|
|
||||||
inst_name, scale_args, NULL,
|
inst_name, scale_args, NULL,
|
||||||
graph)) < 0)
|
graph)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@ -366,8 +371,7 @@ static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
|
|||||||
|
|
||||||
snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
|
snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
|
||||||
resampler_count++);
|
resampler_count++);
|
||||||
if ((ret = avfilter_graph_create_filter(&convert,
|
if ((ret = avfilter_graph_create_filter(&convert, filter,
|
||||||
avfilter_get_by_name("aresample"),
|
|
||||||
inst_name, NULL, NULL, graph)) < 0)
|
inst_name, NULL, NULL, graph)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
break;
|
break;
|
||||||
@ -464,7 +468,7 @@ static int pick_format(AVFilterLink *link, AVFilterLink *ref)
|
|||||||
|
|
||||||
#define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format) \
|
#define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format) \
|
||||||
do { \
|
do { \
|
||||||
for (i = 0; i < filter->input_count; i++) { \
|
for (i = 0; i < filter->nb_inputs; i++) { \
|
||||||
AVFilterLink *link = filter->inputs[i]; \
|
AVFilterLink *link = filter->inputs[i]; \
|
||||||
fmt_type fmt; \
|
fmt_type fmt; \
|
||||||
\
|
\
|
||||||
@ -472,7 +476,7 @@ do { \
|
|||||||
continue; \
|
continue; \
|
||||||
fmt = link->out_ ## list->var[0]; \
|
fmt = link->out_ ## list->var[0]; \
|
||||||
\
|
\
|
||||||
for (j = 0; j < filter->output_count; j++) { \
|
for (j = 0; j < filter->nb_outputs; j++) { \
|
||||||
AVFilterLink *out_link = filter->outputs[j]; \
|
AVFilterLink *out_link = filter->outputs[j]; \
|
||||||
list_type *fmts; \
|
list_type *fmts; \
|
||||||
\
|
\
|
||||||
@ -529,19 +533,19 @@ static void swap_samplerates_on_filter(AVFilterContext *filter)
|
|||||||
int sample_rate;
|
int sample_rate;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
for (i = 0; i < filter->input_count; i++) {
|
for (i = 0; i < filter->nb_inputs; i++) {
|
||||||
link = filter->inputs[i];
|
link = filter->inputs[i];
|
||||||
|
|
||||||
if (link->type == AVMEDIA_TYPE_AUDIO &&
|
if (link->type == AVMEDIA_TYPE_AUDIO &&
|
||||||
link->out_samplerates->format_count == 1)
|
link->out_samplerates->format_count == 1)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (i == filter->input_count)
|
if (i == filter->nb_inputs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
sample_rate = link->out_samplerates->formats[0];
|
sample_rate = link->out_samplerates->formats[0];
|
||||||
|
|
||||||
for (i = 0; i < filter->output_count; i++) {
|
for (i = 0; i < filter->nb_outputs; i++) {
|
||||||
AVFilterLink *outlink = filter->outputs[i];
|
AVFilterLink *outlink = filter->outputs[i];
|
||||||
int best_idx, best_diff = INT_MAX;
|
int best_idx, best_diff = INT_MAX;
|
||||||
|
|
||||||
@ -576,19 +580,19 @@ static void swap_channel_layouts_on_filter(AVFilterContext *filter)
|
|||||||
uint64_t chlayout;
|
uint64_t chlayout;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
for (i = 0; i < filter->input_count; i++) {
|
for (i = 0; i < filter->nb_inputs; i++) {
|
||||||
link = filter->inputs[i];
|
link = filter->inputs[i];
|
||||||
|
|
||||||
if (link->type == AVMEDIA_TYPE_AUDIO &&
|
if (link->type == AVMEDIA_TYPE_AUDIO &&
|
||||||
link->out_channel_layouts->nb_channel_layouts == 1)
|
link->out_channel_layouts->nb_channel_layouts == 1)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (i == filter->input_count)
|
if (i == filter->nb_inputs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
chlayout = link->out_channel_layouts->channel_layouts[0];
|
chlayout = link->out_channel_layouts->channel_layouts[0];
|
||||||
|
|
||||||
for (i = 0; i < filter->output_count; i++) {
|
for (i = 0; i < filter->nb_outputs; i++) {
|
||||||
AVFilterLink *outlink = filter->outputs[i];
|
AVFilterLink *outlink = filter->outputs[i];
|
||||||
int best_idx, best_score = INT_MIN;
|
int best_idx, best_score = INT_MIN;
|
||||||
|
|
||||||
@ -629,20 +633,20 @@ static void swap_sample_fmts_on_filter(AVFilterContext *filter)
|
|||||||
int format, bps;
|
int format, bps;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
for (i = 0; i < filter->input_count; i++) {
|
for (i = 0; i < filter->nb_inputs; i++) {
|
||||||
link = filter->inputs[i];
|
link = filter->inputs[i];
|
||||||
|
|
||||||
if (link->type == AVMEDIA_TYPE_AUDIO &&
|
if (link->type == AVMEDIA_TYPE_AUDIO &&
|
||||||
link->out_formats->format_count == 1)
|
link->out_formats->format_count == 1)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (i == filter->input_count)
|
if (i == filter->nb_inputs)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
format = link->out_formats->formats[0];
|
format = link->out_formats->formats[0];
|
||||||
bps = av_get_bytes_per_sample(format);
|
bps = av_get_bytes_per_sample(format);
|
||||||
|
|
||||||
for (i = 0; i < filter->output_count; i++) {
|
for (i = 0; i < filter->nb_outputs; i++) {
|
||||||
AVFilterLink *outlink = filter->outputs[i];
|
AVFilterLink *outlink = filter->outputs[i];
|
||||||
int best_idx, best_score = INT_MIN;
|
int best_idx, best_score = INT_MIN;
|
||||||
|
|
||||||
@ -700,24 +704,24 @@ static int pick_formats(AVFilterGraph *graph)
|
|||||||
change = 0;
|
change = 0;
|
||||||
for (i = 0; i < graph->filter_count; i++) {
|
for (i = 0; i < graph->filter_count; i++) {
|
||||||
AVFilterContext *filter = graph->filters[i];
|
AVFilterContext *filter = graph->filters[i];
|
||||||
if (filter->input_count){
|
if (filter->nb_inputs){
|
||||||
for (j = 0; j < filter->input_count; j++){
|
for (j = 0; j < filter->nb_inputs; j++){
|
||||||
if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->format_count == 1) {
|
if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->format_count == 1) {
|
||||||
pick_format(filter->inputs[j], NULL);
|
pick_format(filter->inputs[j], NULL);
|
||||||
change = 1;
|
change = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (filter->output_count){
|
if (filter->nb_outputs){
|
||||||
for (j = 0; j < filter->output_count; j++){
|
for (j = 0; j < filter->nb_outputs; j++){
|
||||||
if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->format_count == 1) {
|
if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->format_count == 1) {
|
||||||
pick_format(filter->outputs[j], NULL);
|
pick_format(filter->outputs[j], NULL);
|
||||||
change = 1;
|
change = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (filter->input_count && filter->output_count && filter->inputs[0]->format>=0) {
|
if (filter->nb_inputs && filter->nb_outputs && filter->inputs[0]->format>=0) {
|
||||||
for (j = 0; j < filter->output_count; j++) {
|
for (j = 0; j < filter->nb_outputs; j++) {
|
||||||
if(filter->outputs[j]->format<0) {
|
if(filter->outputs[j]->format<0) {
|
||||||
pick_format(filter->outputs[j], filter->inputs[0]);
|
pick_format(filter->outputs[j], filter->inputs[0]);
|
||||||
change = 1;
|
change = 1;
|
||||||
@ -730,10 +734,10 @@ static int pick_formats(AVFilterGraph *graph)
|
|||||||
for (i = 0; i < graph->filter_count; i++) {
|
for (i = 0; i < graph->filter_count; i++) {
|
||||||
AVFilterContext *filter = graph->filters[i];
|
AVFilterContext *filter = graph->filters[i];
|
||||||
|
|
||||||
for (j = 0; j < filter->input_count; j++)
|
for (j = 0; j < filter->nb_inputs; j++)
|
||||||
if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
|
if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
for (j = 0; j < filter->output_count; j++)
|
for (j = 0; j < filter->nb_outputs; j++)
|
||||||
if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
|
if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -778,18 +782,18 @@ static int ff_avfilter_graph_config_pointers(AVFilterGraph *graph,
|
|||||||
|
|
||||||
for (i = 0; i < graph->filter_count; i++) {
|
for (i = 0; i < graph->filter_count; i++) {
|
||||||
f = graph->filters[i];
|
f = graph->filters[i];
|
||||||
for (j = 0; j < f->input_count; j++) {
|
for (j = 0; j < f->nb_inputs; j++) {
|
||||||
f->inputs[j]->graph = graph;
|
f->inputs[j]->graph = graph;
|
||||||
f->inputs[j]->age_index = -1;
|
f->inputs[j]->age_index = -1;
|
||||||
}
|
}
|
||||||
for (j = 0; j < f->output_count; j++) {
|
for (j = 0; j < f->nb_outputs; j++) {
|
||||||
f->outputs[j]->graph = graph;
|
f->outputs[j]->graph = graph;
|
||||||
f->outputs[j]->age_index= -1;
|
f->outputs[j]->age_index= -1;
|
||||||
}
|
}
|
||||||
if (!f->output_count) {
|
if (!f->nb_outputs) {
|
||||||
if (f->input_count > INT_MAX - sink_links_count)
|
if (f->nb_inputs > INT_MAX - sink_links_count)
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
sink_links_count += f->input_count;
|
sink_links_count += f->nb_inputs;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sinks = av_calloc(sink_links_count, sizeof(*sinks));
|
sinks = av_calloc(sink_links_count, sizeof(*sinks));
|
||||||
@ -797,8 +801,8 @@ static int ff_avfilter_graph_config_pointers(AVFilterGraph *graph,
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
for (i = 0; i < graph->filter_count; i++) {
|
for (i = 0; i < graph->filter_count; i++) {
|
||||||
f = graph->filters[i];
|
f = graph->filters[i];
|
||||||
if (!f->output_count) {
|
if (!f->nb_outputs) {
|
||||||
for (j = 0; j < f->input_count; j++) {
|
for (j = 0; j < f->nb_inputs; j++) {
|
||||||
sinks[n] = f->inputs[j];
|
sinks[n] = f->inputs[j];
|
||||||
f->inputs[j]->age_index = n++;
|
f->inputs[j]->age_index = n++;
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
|
|||||||
switch (outlink->type) {
|
switch (outlink->type) {
|
||||||
|
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
|
buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
|
||||||
ref->video->w, ref->video->h);
|
ref->video->w, ref->video->h);
|
||||||
if(!buf)
|
if(!buf)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -303,6 +303,18 @@ AVFilterFormats *avfilter_make_all_packing_formats(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
AVFilterFormats *ff_planar_sample_fmts(void)
|
||||||
|
{
|
||||||
|
AVFilterFormats *ret = NULL;
|
||||||
|
int fmt;
|
||||||
|
|
||||||
|
for (fmt = 0; fmt < AV_SAMPLE_FMT_NB; fmt++)
|
||||||
|
if (av_sample_fmt_is_planar(fmt))
|
||||||
|
ff_add_format(&ret, fmt);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
AVFilterFormats *ff_all_samplerates(void)
|
AVFilterFormats *ff_all_samplerates(void)
|
||||||
{
|
{
|
||||||
AVFilterFormats *ret = av_mallocz(sizeof(*ret));
|
AVFilterFormats *ret = av_mallocz(sizeof(*ret));
|
||||||
@ -401,13 +413,13 @@ void ff_formats_changeref(AVFilterFormats **oldref, AVFilterFormats **newref)
|
|||||||
{ \
|
{ \
|
||||||
int count = 0, i; \
|
int count = 0, i; \
|
||||||
\
|
\
|
||||||
for (i = 0; i < ctx->input_count; i++) { \
|
for (i = 0; i < ctx->nb_inputs; i++) { \
|
||||||
if (ctx->inputs[i] && !ctx->inputs[i]->out_fmts) { \
|
if (ctx->inputs[i] && !ctx->inputs[i]->out_fmts) { \
|
||||||
ref(fmts, &ctx->inputs[i]->out_fmts); \
|
ref(fmts, &ctx->inputs[i]->out_fmts); \
|
||||||
count++; \
|
count++; \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
for (i = 0; i < ctx->output_count; i++) { \
|
for (i = 0; i < ctx->nb_outputs; i++) { \
|
||||||
if (ctx->outputs[i] && !ctx->outputs[i]->in_fmts) { \
|
if (ctx->outputs[i] && !ctx->outputs[i]->in_fmts) { \
|
||||||
ref(fmts, &ctx->outputs[i]->in_fmts); \
|
ref(fmts, &ctx->outputs[i]->in_fmts); \
|
||||||
count++; \
|
count++; \
|
||||||
|
@ -162,6 +162,11 @@ int ff_add_format(AVFilterFormats **avff, int64_t fmt);
|
|||||||
*/
|
*/
|
||||||
AVFilterFormats *ff_all_formats(enum AVMediaType type);
|
AVFilterFormats *ff_all_formats(enum AVMediaType type);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct a formats list containing all planar sample formats.
|
||||||
|
*/
|
||||||
|
AVFilterFormats *ff_planar_sample_fmts(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return a format list which contains the intersection of the formats of
|
* Return a format list which contains the intersection of the formats of
|
||||||
* a and b. Also, all the references of a, all the references of b, and
|
* a and b. Also, all the references of a, all the references of b, and
|
||||||
|
@ -226,7 +226,7 @@ static int link_filter_inouts(AVFilterContext *filt_ctx,
|
|||||||
{
|
{
|
||||||
int pad, ret;
|
int pad, ret;
|
||||||
|
|
||||||
for (pad = 0; pad < filt_ctx->input_count; pad++) {
|
for (pad = 0; pad < filt_ctx->nb_inputs; pad++) {
|
||||||
AVFilterInOut *p = *curr_inputs;
|
AVFilterInOut *p = *curr_inputs;
|
||||||
|
|
||||||
if (p) {
|
if (p) {
|
||||||
@ -254,7 +254,7 @@ static int link_filter_inouts(AVFilterContext *filt_ctx,
|
|||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
pad = filt_ctx->output_count;
|
pad = filt_ctx->nb_outputs;
|
||||||
while (pad--) {
|
while (pad--) {
|
||||||
AVFilterInOut *currlinkn = av_mallocz(sizeof(AVFilterInOut));
|
AVFilterInOut *currlinkn = av_mallocz(sizeof(AVFilterInOut));
|
||||||
if (!currlinkn)
|
if (!currlinkn)
|
||||||
|
@ -50,6 +50,132 @@ typedef struct AVFilterCommand {
|
|||||||
*/
|
*/
|
||||||
void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link);
|
void ff_avfilter_graph_update_heap(AVFilterGraph *graph, AVFilterLink *link);
|
||||||
|
|
||||||
|
#if !FF_API_AVFILTERPAD_PUBLIC
|
||||||
|
/**
|
||||||
|
* A filter pad used for either input or output.
|
||||||
|
*/
|
||||||
|
struct AVFilterPad {
|
||||||
|
/**
|
||||||
|
* Pad name. The name is unique among inputs and among outputs, but an
|
||||||
|
* input may have the same name as an output. This may be NULL if this
|
||||||
|
* pad has no need to ever be referenced by name.
|
||||||
|
*/
|
||||||
|
const char *name;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* AVFilterPad type.
|
||||||
|
*/
|
||||||
|
enum AVMediaType type;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Minimum required permissions on incoming buffers. Any buffer with
|
||||||
|
* insufficient permissions will be automatically copied by the filter
|
||||||
|
* system to a new buffer which provides the needed access permissions.
|
||||||
|
*
|
||||||
|
* Input pads only.
|
||||||
|
*/
|
||||||
|
int min_perms;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Permissions which are not accepted on incoming buffers. Any buffer
|
||||||
|
* which has any of these permissions set will be automatically copied
|
||||||
|
* by the filter system to a new buffer which does not have those
|
||||||
|
* permissions. This can be used to easily disallow buffers with
|
||||||
|
* AV_PERM_REUSE.
|
||||||
|
*
|
||||||
|
* Input pads only.
|
||||||
|
*/
|
||||||
|
int rej_perms;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback called before passing the first slice of a new frame. If
|
||||||
|
* NULL, the filter layer will default to storing a reference to the
|
||||||
|
* picture inside the link structure.
|
||||||
|
*
|
||||||
|
* Input video pads only.
|
||||||
|
*/
|
||||||
|
void (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback function to get a video buffer. If NULL, the filter system will
|
||||||
|
* use avfilter_default_get_video_buffer().
|
||||||
|
*
|
||||||
|
* Input video pads only.
|
||||||
|
*/
|
||||||
|
AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback function to get an audio buffer. If NULL, the filter system will
|
||||||
|
* use avfilter_default_get_audio_buffer().
|
||||||
|
*
|
||||||
|
* Input audio pads only.
|
||||||
|
*/
|
||||||
|
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms,
|
||||||
|
int nb_samples);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Callback called after the slices of a frame are completely sent. If
|
||||||
|
* NULL, the filter layer will default to releasing the reference stored
|
||||||
|
* in the link structure during start_frame().
|
||||||
|
*
|
||||||
|
* Input video pads only.
|
||||||
|
*/
|
||||||
|
void (*end_frame)(AVFilterLink *link);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Slice drawing callback. This is where a filter receives video data
|
||||||
|
* and should do its processing.
|
||||||
|
*
|
||||||
|
* Input video pads only.
|
||||||
|
*/
|
||||||
|
void (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Samples filtering callback. This is where a filter receives audio data
|
||||||
|
* and should do its processing.
|
||||||
|
*
|
||||||
|
* Input audio pads only.
|
||||||
|
*/
|
||||||
|
void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Frame poll callback. This returns the number of immediately available
|
||||||
|
* samples. It should return a positive value if the next request_frame()
|
||||||
|
* is guaranteed to return one frame (with no delay).
|
||||||
|
*
|
||||||
|
* Defaults to just calling the source poll_frame() method.
|
||||||
|
*
|
||||||
|
* Output pads only.
|
||||||
|
*/
|
||||||
|
int (*poll_frame)(AVFilterLink *link);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Frame request callback. A call to this should result in at least one
|
||||||
|
* frame being output over the given link. This should return zero on
|
||||||
|
* success, and another value on error.
|
||||||
|
*
|
||||||
|
* Output pads only.
|
||||||
|
*/
|
||||||
|
int (*request_frame)(AVFilterLink *link);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Link configuration callback.
|
||||||
|
*
|
||||||
|
* For output pads, this should set the link properties such as
|
||||||
|
* width/height. This should NOT set the format property - that is
|
||||||
|
* negotiated between filters by the filter system using the
|
||||||
|
* query_formats() callback before this function is called.
|
||||||
|
*
|
||||||
|
* For input pads, this should check the properties of the link, and update
|
||||||
|
* the filter's internal state as necessary.
|
||||||
|
*
|
||||||
|
* For both input and output filters, this should return zero on success,
|
||||||
|
* and another value on error.
|
||||||
|
*/
|
||||||
|
int (*config_props)(AVFilterLink *link);
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
/** default handler for freeing audio/video buffer when there are no references left */
|
/** default handler for freeing audio/video buffer when there are no references left */
|
||||||
void ff_avfilter_default_free_buffer(AVFilterBuffer *buf);
|
void ff_avfilter_default_free_buffer(AVFilterBuffer *buf);
|
||||||
|
|
||||||
@ -165,16 +291,22 @@ void ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
|
|||||||
static inline void ff_insert_inpad(AVFilterContext *f, unsigned index,
|
static inline void ff_insert_inpad(AVFilterContext *f, unsigned index,
|
||||||
AVFilterPad *p)
|
AVFilterPad *p)
|
||||||
{
|
{
|
||||||
ff_insert_pad(index, &f->input_count, offsetof(AVFilterLink, dstpad),
|
ff_insert_pad(index, &f->nb_inputs, offsetof(AVFilterLink, dstpad),
|
||||||
&f->input_pads, &f->inputs, p);
|
&f->input_pads, &f->inputs, p);
|
||||||
|
#if FF_API_FOO_COUNT
|
||||||
|
f->input_count = f->nb_inputs;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Insert a new output pad for the filter. */
|
/** Insert a new output pad for the filter. */
|
||||||
static inline void ff_insert_outpad(AVFilterContext *f, unsigned index,
|
static inline void ff_insert_outpad(AVFilterContext *f, unsigned index,
|
||||||
AVFilterPad *p)
|
AVFilterPad *p)
|
||||||
{
|
{
|
||||||
ff_insert_pad(index, &f->output_count, offsetof(AVFilterLink, srcpad),
|
ff_insert_pad(index, &f->nb_outputs, offsetof(AVFilterLink, srcpad),
|
||||||
&f->output_pads, &f->outputs, p);
|
&f->output_pads, &f->outputs, p);
|
||||||
|
#if FF_API_FOO_COUNT
|
||||||
|
f->output_count = f->nb_outputs;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -59,7 +59,7 @@ static void split_uninit(AVFilterContext *ctx)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ctx->output_count; i++)
|
for (i = 0; i < ctx->nb_outputs; i++)
|
||||||
av_freep(&ctx->output_pads[i].name);
|
av_freep(&ctx->output_pads[i].name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
|
|||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ctx->output_count; i++)
|
for (i = 0; i < ctx->nb_outputs; i++)
|
||||||
ff_start_frame(ctx->outputs[i],
|
ff_start_frame(ctx->outputs[i],
|
||||||
avfilter_ref_buffer(picref, ~AV_PERM_WRITE));
|
avfilter_ref_buffer(picref, ~AV_PERM_WRITE));
|
||||||
}
|
}
|
||||||
@ -78,7 +78,7 @@ static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
|
|||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ctx->output_count; i++)
|
for (i = 0; i < ctx->nb_outputs; i++)
|
||||||
ff_draw_slice(ctx->outputs[i], y, h, slice_dir);
|
ff_draw_slice(ctx->outputs[i], y, h, slice_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ static void end_frame(AVFilterLink *inlink)
|
|||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ctx->output_count; i++)
|
for (i = 0; i < ctx->nb_outputs; i++)
|
||||||
ff_end_frame(ctx->outputs[i]);
|
ff_end_frame(ctx->outputs[i]);
|
||||||
|
|
||||||
avfilter_unref_buffer(inlink->cur_buf);
|
avfilter_unref_buffer(inlink->cur_buf);
|
||||||
@ -115,7 +115,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
|
|||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ctx->output_count; i++)
|
for (i = 0; i < ctx->nb_outputs; i++)
|
||||||
ff_filter_samples(inlink->dst->outputs[i],
|
ff_filter_samples(inlink->dst->outputs[i],
|
||||||
avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE));
|
avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE));
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -272,8 +273,8 @@ static int movie_get_frame(AVFilterLink *outlink)
|
|||||||
|
|
||||||
if (frame_decoded) {
|
if (frame_decoded) {
|
||||||
/* FIXME: avoid the memcpy */
|
/* FIXME: avoid the memcpy */
|
||||||
movie->picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE | AV_PERM_PRESERVE |
|
movie->picref = ff_get_video_buffer(outlink, AV_PERM_WRITE | AV_PERM_PRESERVE |
|
||||||
AV_PERM_REUSE2, outlink->w, outlink->h);
|
AV_PERM_REUSE2, outlink->w, outlink->h);
|
||||||
av_image_copy(movie->picref->data, movie->picref->linesize,
|
av_image_copy(movie->picref->data, movie->picref->linesize,
|
||||||
(void*)movie->frame->data, movie->frame->linesize,
|
(void*)movie->frame->data, movie->frame->linesize,
|
||||||
movie->picref->format, outlink->w, outlink->h);
|
movie->picref->format, outlink->w, outlink->h);
|
||||||
|
@ -29,8 +29,8 @@
|
|||||||
#include "libavutil/avutil.h"
|
#include "libavutil/avutil.h"
|
||||||
|
|
||||||
#define LIBAVFILTER_VERSION_MAJOR 2
|
#define LIBAVFILTER_VERSION_MAJOR 2
|
||||||
#define LIBAVFILTER_VERSION_MINOR 78
|
#define LIBAVFILTER_VERSION_MINOR 79
|
||||||
#define LIBAVFILTER_VERSION_MICRO 101
|
#define LIBAVFILTER_VERSION_MICRO 100
|
||||||
|
|
||||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||||
LIBAVFILTER_VERSION_MINOR, \
|
LIBAVFILTER_VERSION_MINOR, \
|
||||||
@ -62,5 +62,11 @@
|
|||||||
#ifndef FF_API_FILTERS_PUBLIC
|
#ifndef FF_API_FILTERS_PUBLIC
|
||||||
#define FF_API_FILTERS_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 3)
|
#define FF_API_FILTERS_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 3)
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef FF_API_AVFILTERPAD_PUBLIC
|
||||||
|
#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 4)
|
||||||
|
#endif
|
||||||
|
#ifndef FF_API_FOO_COUNT
|
||||||
|
#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4)
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif // AVFILTER_VERSION_H
|
#endif // AVFILTER_VERSION_H
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "libavutil/mathematics.h"
|
#include "libavutil/mathematics.h"
|
||||||
#include "libavutil/parseutils.h"
|
#include "libavutil/parseutils.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
static const char *const var_names[] = {
|
static const char *const var_names[] = {
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
AVFilter avfilter_vf_copy = {
|
AVFilter avfilter_vf_copy = {
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "libavutil/eval.h"
|
#include "libavutil/eval.h"
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "libavutil/imgutils.h"
|
#include "libavutil/imgutils.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -218,8 +219,8 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
|
|||||||
AVFilterBufferRef *outpicref;
|
AVFilterBufferRef *outpicref;
|
||||||
|
|
||||||
if (inpicref->perms & AV_PERM_PRESERVE) {
|
if (inpicref->perms & AV_PERM_PRESERVE) {
|
||||||
outpicref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
|
outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE,
|
||||||
outlink->w, outlink->h);
|
outlink->w, outlink->h);
|
||||||
avfilter_copy_buffer_ref_props(outpicref, inpicref);
|
avfilter_copy_buffer_ref_props(outpicref, inpicref);
|
||||||
outpicref->video->w = outlink->w;
|
outpicref->video->w = outlink->w;
|
||||||
outpicref->video->h = outlink->h;
|
outpicref->video->h = outlink->h;
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "libavutil/parseutils.h"
|
#include "libavutil/parseutils.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
enum { Y, U, V, A };
|
enum { Y, U, V, A };
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "drawutils.h"
|
#include "drawutils.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
#undef time
|
#undef time
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include "drawutils.h"
|
#include "drawutils.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
#define R 0
|
#define R 0
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct
|
typedef struct
|
||||||
@ -112,7 +113,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int
|
|||||||
AVFilterContext *ctx = inlink->dst;
|
AVFilterContext *ctx = inlink->dst;
|
||||||
AVFilterLink *outlink = ctx->outputs[0];
|
AVFilterLink *outlink = ctx->outputs[0];
|
||||||
|
|
||||||
return avfilter_get_video_buffer(outlink, perms, w, h);
|
return ff_get_video_buffer(outlink, perms, w, h);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
|
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include "libavutil/parseutils.h"
|
#include "libavutil/parseutils.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef f0r_instance_t (*f0r_construct_f)(unsigned int width, unsigned int height);
|
typedef f0r_instance_t (*f0r_construct_f)(unsigned int width, unsigned int height);
|
||||||
@ -433,7 +434,7 @@ static int source_config_props(AVFilterLink *outlink)
|
|||||||
static int source_request_frame(AVFilterLink *outlink)
|
static int source_request_frame(AVFilterLink *outlink)
|
||||||
{
|
{
|
||||||
Frei0rContext *frei0r = outlink->src->priv;
|
Frei0rContext *frei0r = outlink->src->priv;
|
||||||
AVFilterBufferRef *picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
|
AVFilterBufferRef *picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
|
||||||
picref->video->sample_aspect_ratio = (AVRational) {1, 1};
|
picref->video->sample_aspect_ratio = (AVRational) {1, 1};
|
||||||
picref->pts = frei0r->pts++;
|
picref->pts = frei0r->pts++;
|
||||||
picref->pos = -1;
|
picref->pos = -1;
|
||||||
|
@ -38,6 +38,7 @@
|
|||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
#include "gradfun.h"
|
#include "gradfun.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
|
DECLARE_ALIGNED(16, static const uint16_t, dither)[8][8] = {
|
||||||
@ -190,7 +191,7 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
|
|||||||
AVFilterBufferRef *outpicref;
|
AVFilterBufferRef *outpicref;
|
||||||
|
|
||||||
if (inpicref->perms & AV_PERM_PRESERVE) {
|
if (inpicref->perms & AV_PERM_PRESERVE) {
|
||||||
outpicref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
|
outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
|
||||||
avfilter_copy_buffer_ref_props(outpicref, inpicref);
|
avfilter_copy_buffer_ref_props(outpicref, inpicref);
|
||||||
outpicref->video->w = outlink->w;
|
outpicref->video->w = outlink->w;
|
||||||
outpicref->video->h = outlink->h;
|
outpicref->video->h = outlink->h;
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
AVFilter avfilter_vf_null = {
|
AVFilter avfilter_vf_null = {
|
||||||
|
@ -303,7 +303,7 @@ static int config_output(AVFilterLink *outlink)
|
|||||||
|
|
||||||
static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
||||||
{
|
{
|
||||||
return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
|
return ff_get_video_buffer(link->dst->outputs[0], perms, w, h);
|
||||||
}
|
}
|
||||||
|
|
||||||
// divide by 255 and round to nearest
|
// divide by 255 and round to nearest
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
#include "libavutil/eval.h"
|
#include "libavutil/eval.h"
|
||||||
@ -220,9 +221,9 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms, int
|
|||||||
PadContext *pad = inlink->dst->priv;
|
PadContext *pad = inlink->dst->priv;
|
||||||
int align = (perms&AV_PERM_ALIGN) ? AVFILTER_ALIGN : 1;
|
int align = (perms&AV_PERM_ALIGN) ? AVFILTER_ALIGN : 1;
|
||||||
|
|
||||||
AVFilterBufferRef *picref = avfilter_get_video_buffer(inlink->dst->outputs[0], perms,
|
AVFilterBufferRef *picref = ff_get_video_buffer(inlink->dst->outputs[0], perms,
|
||||||
w + (pad->w - pad->in_w) + 4*align,
|
w + (pad->w - pad->in_w) + 4*align,
|
||||||
h + (pad->h - pad->in_h));
|
h + (pad->h - pad->in_h));
|
||||||
int plane;
|
int plane;
|
||||||
|
|
||||||
picref->video->w = w;
|
picref->video->w = w;
|
||||||
@ -287,9 +288,9 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
|
|||||||
if(pad->needs_copy){
|
if(pad->needs_copy){
|
||||||
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n");
|
av_log(inlink->dst, AV_LOG_DEBUG, "Direct padding impossible allocating new frame\n");
|
||||||
avfilter_unref_buffer(outpicref);
|
avfilter_unref_buffer(outpicref);
|
||||||
outpicref = avfilter_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES,
|
outpicref = ff_get_video_buffer(inlink->dst->outputs[0], AV_PERM_WRITE | AV_PERM_NEG_LINESIZES,
|
||||||
FFMAX(inlink->w, pad->w),
|
FFMAX(inlink->w, pad->w),
|
||||||
FFMAX(inlink->h, pad->h));
|
FFMAX(inlink->h, pad->h));
|
||||||
avfilter_copy_buffer_ref_props(outpicref, inpicref);
|
avfilter_copy_buffer_ref_props(outpicref, inpicref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -57,8 +58,8 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
|
|||||||
AVFilterBufferRef *outpicref;
|
AVFilterBufferRef *outpicref;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
|
outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
|
||||||
outlink->w, outlink->h);
|
outlink->w, outlink->h);
|
||||||
outpicref = outlink->out_buf;
|
outpicref = outlink->out_buf;
|
||||||
avfilter_copy_buffer_ref_props(outpicref, picref);
|
avfilter_copy_buffer_ref_props(outpicref, picref);
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "libavutil/avstring.h"
|
#include "libavutil/avstring.h"
|
||||||
#include "libavutil/eval.h"
|
#include "libavutil/eval.h"
|
||||||
@ -302,7 +303,7 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
|||||||
scale->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w;
|
scale->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w;
|
||||||
scale->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
|
scale->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
|
||||||
|
|
||||||
outpicref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h);
|
outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN, outlink->w, outlink->h);
|
||||||
avfilter_copy_buffer_ref_props(outpicref, picref);
|
avfilter_copy_buffer_ref_props(outpicref, picref);
|
||||||
outpicref->video->w = outlink->w;
|
outpicref->video->w = outlink->w;
|
||||||
outpicref->video->h = outlink->h;
|
outpicref->video->h = outlink->h;
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "libavutil/eval.h"
|
#include "libavutil/eval.h"
|
||||||
#include "libavutil/mathematics.h"
|
#include "libavutil/mathematics.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
static const char *const var_names[] = {
|
static const char *const var_names[] = {
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
#include "libavutil/imgutils.h"
|
#include "libavutil/imgutils.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -117,8 +118,8 @@ static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
|
|||||||
{
|
{
|
||||||
AVFilterLink *outlink = inlink->dst->outputs[0];
|
AVFilterLink *outlink = inlink->dst->outputs[0];
|
||||||
|
|
||||||
outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
|
outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
|
||||||
outlink->w, outlink->h);
|
outlink->w, outlink->h);
|
||||||
outlink->out_buf->pts = picref->pts;
|
outlink->out_buf->pts = picref->pts;
|
||||||
|
|
||||||
if (picref->video->sample_aspect_ratio.num == 0) {
|
if (picref->video->sample_aspect_ratio.num == 0) {
|
||||||
|
@ -38,6 +38,7 @@
|
|||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
#include "libavutil/mem.h"
|
#include "libavutil/mem.h"
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -50,7 +51,7 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms,
|
|||||||
if (!(perms & AV_PERM_NEG_LINESIZES))
|
if (!(perms & AV_PERM_NEG_LINESIZES))
|
||||||
return ff_default_get_video_buffer(link, perms, w, h);
|
return ff_default_get_video_buffer(link, perms, w, h);
|
||||||
|
|
||||||
picref = avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
|
picref = ff_get_video_buffer(link->dst->outputs[0], perms, w, h);
|
||||||
for (i = 0; i < 4; i ++) {
|
for (i = 0; i < 4; i ++) {
|
||||||
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
|
int vsub = i == 1 || i == 2 ? flip->vsub : 0;
|
||||||
|
|
||||||
|
@ -207,8 +207,8 @@ static void return_frame(AVFilterContext *ctx, int is_second)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (is_second) {
|
if (is_second) {
|
||||||
yadif->out = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
|
yadif->out = ff_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
|
||||||
AV_PERM_REUSE, link->w, link->h);
|
AV_PERM_REUSE, link->w, link->h);
|
||||||
avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
|
avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
|
||||||
yadif->out->video->interlaced = 0;
|
yadif->out->video->interlaced = 0;
|
||||||
}
|
}
|
||||||
@ -269,8 +269,8 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
|||||||
if (!yadif->prev)
|
if (!yadif->prev)
|
||||||
yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ);
|
yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ);
|
||||||
|
|
||||||
yadif->out = avfilter_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE |
|
yadif->out = ff_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE |
|
||||||
AV_PERM_REUSE, link->w, link->h);
|
AV_PERM_REUSE, link->w, link->h);
|
||||||
|
|
||||||
avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
|
avfilter_copy_buffer_ref_props(yadif->out, yadif->cur);
|
||||||
yadif->out->video->interlaced = 0;
|
yadif->out->video->interlaced = 0;
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
||||||
{
|
{
|
||||||
return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
|
return ff_get_video_buffer(link->dst->outputs[0], perms, w, h);
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
||||||
@ -127,7 +127,7 @@ fail:
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
||||||
{
|
{
|
||||||
AVFilterBufferRef *ret = NULL;
|
AVFilterBufferRef *ret = NULL;
|
||||||
|
|
||||||
@ -158,11 +158,11 @@ static void default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
|
|||||||
{
|
{
|
||||||
AVFilterLink *outlink = NULL;
|
AVFilterLink *outlink = NULL;
|
||||||
|
|
||||||
if (inlink->dst->output_count)
|
if (inlink->dst->nb_outputs)
|
||||||
outlink = inlink->dst->outputs[0];
|
outlink = inlink->dst->outputs[0];
|
||||||
|
|
||||||
if (outlink) {
|
if (outlink) {
|
||||||
outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
|
outlink->out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
|
||||||
avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
|
avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
|
||||||
ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
|
ff_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
|
||||||
}
|
}
|
||||||
@ -191,7 +191,7 @@ void ff_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
|||||||
picref->perms,
|
picref->perms,
|
||||||
link->dstpad->min_perms, link->dstpad->rej_perms);
|
link->dstpad->min_perms, link->dstpad->rej_perms);
|
||||||
|
|
||||||
link->cur_buf = avfilter_get_video_buffer(link, dst->min_perms, link->w, link->h);
|
link->cur_buf = ff_get_video_buffer(link, dst->min_perms, link->w, link->h);
|
||||||
link->src_buf = picref;
|
link->src_buf = picref;
|
||||||
avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf);
|
avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf);
|
||||||
|
|
||||||
@ -230,7 +230,7 @@ static void default_end_frame(AVFilterLink *inlink)
|
|||||||
{
|
{
|
||||||
AVFilterLink *outlink = NULL;
|
AVFilterLink *outlink = NULL;
|
||||||
|
|
||||||
if (inlink->dst->output_count)
|
if (inlink->dst->nb_outputs)
|
||||||
outlink = inlink->dst->outputs[0];
|
outlink = inlink->dst->outputs[0];
|
||||||
|
|
||||||
avfilter_unref_buffer(inlink->cur_buf);
|
avfilter_unref_buffer(inlink->cur_buf);
|
||||||
@ -271,7 +271,7 @@ static void default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir
|
|||||||
{
|
{
|
||||||
AVFilterLink *outlink = NULL;
|
AVFilterLink *outlink = NULL;
|
||||||
|
|
||||||
if (inlink->dst->output_count)
|
if (inlink->dst->nb_outputs)
|
||||||
outlink = inlink->dst->outputs[0];
|
outlink = inlink->dst->outputs[0];
|
||||||
|
|
||||||
if (outlink)
|
if (outlink)
|
||||||
@ -364,4 +364,8 @@ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
|
|||||||
{
|
{
|
||||||
ff_draw_slice(link, y, h, slice_dir);
|
ff_draw_slice(link, y, h, slice_dir);
|
||||||
}
|
}
|
||||||
|
AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
|
||||||
|
{
|
||||||
|
return ff_get_video_buffer(link, perms, w, h);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -28,6 +28,19 @@ AVFilterBufferRef *ff_default_get_video_buffer(AVFilterLink *link,
|
|||||||
int perms, int w, int h);
|
int perms, int w, int h);
|
||||||
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h);
|
AVFilterBufferRef *ff_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Request a picture buffer with a specific set of permissions.
|
||||||
|
*
|
||||||
|
* @param link the output link to the filter from which the buffer will
|
||||||
|
* be requested
|
||||||
|
* @param perms the required access permissions
|
||||||
|
* @param w the minimum width of the buffer to allocate
|
||||||
|
* @param h the minimum height of the buffer to allocate
|
||||||
|
* @return A reference to the buffer. This must be unreferenced with
|
||||||
|
* avfilter_unref_buffer when you are finished with it.
|
||||||
|
*/
|
||||||
|
AVFilterBufferRef *ff_get_video_buffer(AVFilterLink *link, int perms,
|
||||||
|
int w, int h);
|
||||||
|
|
||||||
void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
|
void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref);
|
||||||
void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
|
void ff_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir);
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
|
||||||
{
|
{
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "libavutil/colorspace.h"
|
#include "libavutil/colorspace.h"
|
||||||
@ -105,7 +106,7 @@ static int color_config_props(AVFilterLink *inlink)
|
|||||||
static int color_request_frame(AVFilterLink *link)
|
static int color_request_frame(AVFilterLink *link)
|
||||||
{
|
{
|
||||||
ColorContext *color = link->src->priv;
|
ColorContext *color = link->src->priv;
|
||||||
AVFilterBufferRef *picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
|
AVFilterBufferRef *picref = ff_get_video_buffer(link, AV_PERM_WRITE, color->w, color->h);
|
||||||
picref->video->sample_aspect_ratio = (AVRational) {1, 1};
|
picref->video->sample_aspect_ratio = (AVRational) {1, 1};
|
||||||
picref->pts = color->pts++;
|
picref->pts = color->pts++;
|
||||||
picref->pos = -1;
|
picref->pos = -1;
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include "libavutil/parseutils.h"
|
#include "libavutil/parseutils.h"
|
||||||
#include "avfilter.h"
|
#include "avfilter.h"
|
||||||
#include "formats.h"
|
#include "formats.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -137,8 +138,7 @@ static int request_frame(AVFilterLink *outlink)
|
|||||||
|
|
||||||
if (test->max_pts >= 0 && test->pts >= test->max_pts)
|
if (test->max_pts >= 0 && test->pts >= test->max_pts)
|
||||||
return AVERROR_EOF;
|
return AVERROR_EOF;
|
||||||
picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
|
picref = ff_get_video_buffer(outlink, AV_PERM_WRITE, test->w, test->h);
|
||||||
test->w, test->h);
|
|
||||||
picref->pts = test->pts++;
|
picref->pts = test->pts++;
|
||||||
picref->pos = -1;
|
picref->pos = -1;
|
||||||
picref->video->key_frame = 1;
|
picref->video->key_frame = 1;
|
||||||
|
@ -88,6 +88,8 @@ typedef struct RTMPContext {
|
|||||||
char* tcurl; ///< url of the target stream
|
char* tcurl; ///< url of the target stream
|
||||||
char* flashver; ///< version of the flash plugin
|
char* flashver; ///< version of the flash plugin
|
||||||
char* swfurl; ///< url of the swf player
|
char* swfurl; ///< url of the swf player
|
||||||
|
int server_bw; ///< server bandwidth
|
||||||
|
int client_buffer_time; ///< client buffer time in ms
|
||||||
} RTMPContext;
|
} RTMPContext;
|
||||||
|
|
||||||
#define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
|
#define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
|
||||||
@ -115,7 +117,7 @@ static const uint8_t rtmp_server_key[] = {
|
|||||||
|
|
||||||
static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p)
|
static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p)
|
||||||
{
|
{
|
||||||
char *field, *value, *saveptr;
|
char *field, *value;
|
||||||
char type;
|
char type;
|
||||||
|
|
||||||
/* The type must be B for Boolean, N for number, S for string, O for
|
/* The type must be B for Boolean, N for number, S for string, O for
|
||||||
@ -130,8 +132,12 @@ static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p)
|
|||||||
value = param + 2;
|
value = param + 2;
|
||||||
} else if (param[0] == 'N' && param[1] && param[2] == ':') {
|
} else if (param[0] == 'N' && param[1] && param[2] == ':') {
|
||||||
type = param[1];
|
type = param[1];
|
||||||
field = av_strtok(param + 3, ":", &saveptr);
|
field = param + 3;
|
||||||
value = av_strtok(NULL, ":", &saveptr);
|
value = strchr(field, ':');
|
||||||
|
if (!value)
|
||||||
|
goto fail;
|
||||||
|
*value = '\0';
|
||||||
|
value++;
|
||||||
|
|
||||||
if (!field || !value)
|
if (!field || !value)
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -226,18 +232,27 @@ static int gen_connect(URLContext *s, RTMPContext *rt)
|
|||||||
ff_amf_write_object_end(&p);
|
ff_amf_write_object_end(&p);
|
||||||
|
|
||||||
if (rt->conn) {
|
if (rt->conn) {
|
||||||
char *param, *saveptr;
|
char *param = rt->conn;
|
||||||
|
|
||||||
// Write arbitrary AMF data to the Connect message.
|
// Write arbitrary AMF data to the Connect message.
|
||||||
param = av_strtok(rt->conn, " ", &saveptr);
|
|
||||||
while (param != NULL) {
|
while (param != NULL) {
|
||||||
|
char *sep;
|
||||||
|
param += strspn(param, " ");
|
||||||
|
if (!*param)
|
||||||
|
break;
|
||||||
|
sep = strchr(param, ' ');
|
||||||
|
if (sep)
|
||||||
|
*sep = '\0';
|
||||||
if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) {
|
if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) {
|
||||||
// Invalid AMF parameter.
|
// Invalid AMF parameter.
|
||||||
ff_rtmp_packet_destroy(&pkt);
|
ff_rtmp_packet_destroy(&pkt);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
param = av_strtok(NULL, " ", &saveptr);
|
if (sep)
|
||||||
|
param = sep + 1;
|
||||||
|
else
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -393,6 +408,31 @@ static int gen_delete_stream(URLContext *s, RTMPContext *rt)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate client buffer time and send it to the server.
|
||||||
|
*/
|
||||||
|
static int gen_buffer_time(URLContext *s, RTMPContext *rt)
|
||||||
|
{
|
||||||
|
RTMPPacket pkt;
|
||||||
|
uint8_t *p;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
|
||||||
|
1, 10)) < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
p = pkt.data;
|
||||||
|
bytestream_put_be16(&p, 3);
|
||||||
|
bytestream_put_be32(&p, rt->main_channel_id);
|
||||||
|
bytestream_put_be32(&p, rt->client_buffer_time);
|
||||||
|
|
||||||
|
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
|
||||||
|
rt->prev_pkt[1]);
|
||||||
|
ff_rtmp_packet_destroy(&pkt);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate 'play' call and send it to the server, then ping the server
|
* Generate 'play' call and send it to the server, then ping the server
|
||||||
* to start actual playing.
|
* to start actual playing.
|
||||||
@ -422,23 +462,6 @@ static int gen_play(URLContext *s, RTMPContext *rt)
|
|||||||
rt->prev_pkt[1]);
|
rt->prev_pkt[1]);
|
||||||
ff_rtmp_packet_destroy(&pkt);
|
ff_rtmp_packet_destroy(&pkt);
|
||||||
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
// set client buffer time disguised in ping packet
|
|
||||||
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
|
|
||||||
1, 10)) < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
p = pkt.data;
|
|
||||||
bytestream_put_be16(&p, 3);
|
|
||||||
bytestream_put_be32(&p, 1);
|
|
||||||
bytestream_put_be32(&p, 256); //TODO: what is a good value here?
|
|
||||||
|
|
||||||
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
|
|
||||||
rt->prev_pkt[1]);
|
|
||||||
ff_rtmp_packet_destroy(&pkt);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -510,7 +533,7 @@ static int gen_server_bw(URLContext *s, RTMPContext *rt)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
p = pkt.data;
|
p = pkt.data;
|
||||||
bytestream_put_be32(&p, 2500000);
|
bytestream_put_be32(&p, rt->server_bw);
|
||||||
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
|
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
|
||||||
rt->prev_pkt[1]);
|
rt->prev_pkt[1]);
|
||||||
ff_rtmp_packet_destroy(&pkt);
|
ff_rtmp_packet_destroy(&pkt);
|
||||||
@ -838,6 +861,14 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
|
|||||||
av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", AV_RB32(pkt->data));
|
av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", AV_RB32(pkt->data));
|
||||||
rt->client_report_size = AV_RB32(pkt->data) >> 1;
|
rt->client_report_size = AV_RB32(pkt->data) >> 1;
|
||||||
break;
|
break;
|
||||||
|
case RTMP_PT_SERVER_BW:
|
||||||
|
rt->server_bw = AV_RB32(pkt->data);
|
||||||
|
if (rt->server_bw <= 0) {
|
||||||
|
av_log(s, AV_LOG_ERROR, "Incorrect server bandwidth %d\n", rt->server_bw);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
av_log(s, AV_LOG_DEBUG, "Server bandwidth = %d\n", rt->server_bw);
|
||||||
|
break;
|
||||||
case RTMP_PT_INVOKE:
|
case RTMP_PT_INVOKE:
|
||||||
//TODO: check for the messages sent for wrong state?
|
//TODO: check for the messages sent for wrong state?
|
||||||
if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
|
if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
|
||||||
@ -888,6 +919,8 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
|
|||||||
if (rt->is_input) {
|
if (rt->is_input) {
|
||||||
if ((ret = gen_play(s, rt)) < 0)
|
if ((ret = gen_play(s, rt)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
if ((ret = gen_buffer_time(s, rt)) < 0)
|
||||||
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
if ((ret = gen_publish(s, rt)) < 0)
|
if ((ret = gen_publish(s, rt)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@ -924,6 +957,9 @@ static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
av_log(s, AV_LOG_VERBOSE, "Unknown packet type received 0x%02X\n", pkt->type);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1182,6 +1218,7 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
|
|||||||
rt->client_report_size = 1048576;
|
rt->client_report_size = 1048576;
|
||||||
rt->bytes_read = 0;
|
rt->bytes_read = 0;
|
||||||
rt->last_bytes_read = 0;
|
rt->last_bytes_read = 0;
|
||||||
|
rt->server_bw = 2500000;
|
||||||
|
|
||||||
av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
|
av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
|
||||||
proto, path, rt->app, rt->playpath);
|
proto, path, rt->app, rt->playpath);
|
||||||
@ -1328,6 +1365,7 @@ static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
|
|||||||
|
|
||||||
static const AVOption rtmp_options[] = {
|
static const AVOption rtmp_options[] = {
|
||||||
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||||
|
{"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {3000}, 0, INT_MAX, DEC|ENC},
|
||||||
{"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
{"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||||
{"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
{"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
|
||||||
{"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
|
{"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
|
||||||
|
@ -677,34 +677,6 @@ static void planar_rgb_to_y(uint16_t *dst, const uint8_t *src[4], int width)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void planar_rgb16le_to_y(uint8_t *_dst, const uint8_t *_src[4], int width)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
const uint16_t **src = (const uint16_t **)_src;
|
|
||||||
uint16_t *dst = (uint16_t *)_dst;
|
|
||||||
for (i = 0; i < width; i++) {
|
|
||||||
int g = AV_RL16(src[0] + i);
|
|
||||||
int b = AV_RL16(src[1] + i);
|
|
||||||
int r = AV_RL16(src[2] + i);
|
|
||||||
|
|
||||||
dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void planar_rgb16be_to_y(uint8_t *_dst, const uint8_t *_src[4], int width)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
const uint16_t **src = (const uint16_t **)_src;
|
|
||||||
uint16_t *dst = (uint16_t *)_dst;
|
|
||||||
for (i = 0; i < width; i++) {
|
|
||||||
int g = AV_RB16(src[0] + i);
|
|
||||||
int b = AV_RB16(src[1] + i);
|
|
||||||
int r = AV_RB16(src[2] + i);
|
|
||||||
|
|
||||||
dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void planar_rgb_to_uv(uint16_t *dstU, uint16_t *dstV, const uint8_t *src[4], int width)
|
static void planar_rgb_to_uv(uint16_t *dstU, uint16_t *dstV, const uint8_t *src[4], int width)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -718,39 +690,107 @@ static void planar_rgb_to_uv(uint16_t *dstU, uint16_t *dstV, const uint8_t *src[
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void planar_rgb16le_to_uv(uint8_t *_dstU, uint8_t *_dstV,
|
#define rdpx(src) \
|
||||||
const uint8_t *_src[4], int width)
|
is_be ? AV_RB16(src) : AV_RL16(src)
|
||||||
|
static av_always_inline void planar_rgb16_to_y(uint8_t *_dst, const uint8_t *_src[4],
|
||||||
|
int width, int bpc, int is_be)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
const uint16_t **src = (const uint16_t **)_src;
|
const uint16_t **src = (const uint16_t **)_src;
|
||||||
uint16_t *dstU = (uint16_t *)_dstU;
|
uint16_t *dst = (uint16_t *)_dst;
|
||||||
uint16_t *dstV = (uint16_t *)_dstV;
|
|
||||||
for (i = 0; i < width; i++) {
|
for (i = 0; i < width; i++) {
|
||||||
int g = AV_RL16(src[0] + i);
|
int g = rdpx(src[0] + i);
|
||||||
int b = AV_RL16(src[1] + i);
|
int b = rdpx(src[1] + i);
|
||||||
int r = AV_RL16(src[2] + i);
|
int r = rdpx(src[2] + i);
|
||||||
|
|
||||||
dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
|
dst[i] = ((RY * r + GY * g + BY * b + (33 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT);
|
||||||
dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void planar_rgb16be_to_uv(uint8_t *_dstU, uint8_t *_dstV,
|
static void planar_rgb9le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
|
||||||
const uint8_t *_src[4], int width)
|
{
|
||||||
|
planar_rgb16_to_y(dst, src, w, 9, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb9be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_y(dst, src, w, 9, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb10le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_y(dst, src, w, 10, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb10be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_y(dst, src, w, 10, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb16le_to_y(uint8_t *dst, const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_y(dst, src, w, 16, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb16be_to_y(uint8_t *dst, const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_y(dst, src, w, 16, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static av_always_inline void planar_rgb16_to_uv(uint8_t *_dstU, uint8_t *_dstV,
|
||||||
|
const uint8_t *_src[4], int width,
|
||||||
|
int bpc, int is_be)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
const uint16_t **src = (const uint16_t **)_src;
|
const uint16_t **src = (const uint16_t **)_src;
|
||||||
uint16_t *dstU = (uint16_t *)_dstU;
|
uint16_t *dstU = (uint16_t *)_dstU;
|
||||||
uint16_t *dstV = (uint16_t *)_dstV;
|
uint16_t *dstV = (uint16_t *)_dstV;
|
||||||
for (i = 0; i < width; i++) {
|
for (i = 0; i < width; i++) {
|
||||||
int g = AV_RB16(src[0] + i);
|
int g = rdpx(src[0] + i);
|
||||||
int b = AV_RB16(src[1] + i);
|
int b = rdpx(src[1] + i);
|
||||||
int r = AV_RB16(src[2] + i);
|
int r = rdpx(src[2] + i);
|
||||||
|
|
||||||
dstU[i] = (RU * r + GU * g + BU * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
|
dstU[i] = (RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT;
|
||||||
dstV[i] = (RV * r + GV * g + BV * b + (257 << RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT + 1);
|
dstV[i] = (RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT + bpc - 9))) >> RGB2YUV_SHIFT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#undef rdpx
|
||||||
|
|
||||||
|
static void planar_rgb9le_to_uv(uint8_t *dstU, uint8_t *dstV,
|
||||||
|
const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_uv(dstU, dstV, src, w, 9, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb9be_to_uv(uint8_t *dstU, uint8_t *dstV,
|
||||||
|
const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_uv(dstU, dstV, src, w, 9, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb10le_to_uv(uint8_t *dstU, uint8_t *dstV,
|
||||||
|
const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_uv(dstU, dstV, src, w, 10, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb10be_to_uv(uint8_t *dstU, uint8_t *dstV,
|
||||||
|
const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_uv(dstU, dstV, src, w, 10, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb16le_to_uv(uint8_t *dstU, uint8_t *dstV,
|
||||||
|
const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_uv(dstU, dstV, src, w, 16, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void planar_rgb16be_to_uv(uint8_t *dstU, uint8_t *dstV,
|
||||||
|
const uint8_t *src[4], int w)
|
||||||
|
{
|
||||||
|
planar_rgb16_to_uv(dstU, dstV, src, w, 16, 1);
|
||||||
|
}
|
||||||
|
|
||||||
av_cold void ff_sws_init_input_funcs(SwsContext *c)
|
av_cold void ff_sws_init_input_funcs(SwsContext *c)
|
||||||
{
|
{
|
||||||
@ -778,12 +818,20 @@ av_cold void ff_sws_init_input_funcs(SwsContext *c)
|
|||||||
c->chrToYV12 = palToUV_c;
|
c->chrToYV12 = palToUV_c;
|
||||||
break;
|
break;
|
||||||
case PIX_FMT_GBRP9LE:
|
case PIX_FMT_GBRP9LE:
|
||||||
|
c->readChrPlanar = planar_rgb9le_to_uv;
|
||||||
|
break;
|
||||||
case PIX_FMT_GBRP10LE:
|
case PIX_FMT_GBRP10LE:
|
||||||
|
c->readChrPlanar = planar_rgb10le_to_uv;
|
||||||
|
break;
|
||||||
case PIX_FMT_GBRP16LE:
|
case PIX_FMT_GBRP16LE:
|
||||||
c->readChrPlanar = planar_rgb16le_to_uv;
|
c->readChrPlanar = planar_rgb16le_to_uv;
|
||||||
break;
|
break;
|
||||||
case PIX_FMT_GBRP9BE:
|
case PIX_FMT_GBRP9BE:
|
||||||
|
c->readChrPlanar = planar_rgb9be_to_uv;
|
||||||
|
break;
|
||||||
case PIX_FMT_GBRP10BE:
|
case PIX_FMT_GBRP10BE:
|
||||||
|
c->readChrPlanar = planar_rgb10be_to_uv;
|
||||||
|
break;
|
||||||
case PIX_FMT_GBRP16BE:
|
case PIX_FMT_GBRP16BE:
|
||||||
c->readChrPlanar = planar_rgb16be_to_uv;
|
c->readChrPlanar = planar_rgb16be_to_uv;
|
||||||
break;
|
break;
|
||||||
@ -975,12 +1023,20 @@ av_cold void ff_sws_init_input_funcs(SwsContext *c)
|
|||||||
c->alpToYV12 = NULL;
|
c->alpToYV12 = NULL;
|
||||||
switch (srcFormat) {
|
switch (srcFormat) {
|
||||||
case PIX_FMT_GBRP9LE:
|
case PIX_FMT_GBRP9LE:
|
||||||
|
c->readLumPlanar = planar_rgb9le_to_y;
|
||||||
|
break;
|
||||||
case PIX_FMT_GBRP10LE:
|
case PIX_FMT_GBRP10LE:
|
||||||
|
c->readLumPlanar = planar_rgb10le_to_y;
|
||||||
|
break;
|
||||||
case PIX_FMT_GBRP16LE:
|
case PIX_FMT_GBRP16LE:
|
||||||
c->readLumPlanar = planar_rgb16le_to_y;
|
c->readLumPlanar = planar_rgb16le_to_y;
|
||||||
break;
|
break;
|
||||||
case PIX_FMT_GBRP9BE:
|
case PIX_FMT_GBRP9BE:
|
||||||
|
c->readLumPlanar = planar_rgb9be_to_y;
|
||||||
|
break;
|
||||||
case PIX_FMT_GBRP10BE:
|
case PIX_FMT_GBRP10BE:
|
||||||
|
c->readLumPlanar = planar_rgb10be_to_y;
|
||||||
|
break;
|
||||||
case PIX_FMT_GBRP16BE:
|
case PIX_FMT_GBRP16BE:
|
||||||
c->readLumPlanar = planar_rgb16be_to_y;
|
c->readLumPlanar = planar_rgb16be_to_y;
|
||||||
break;
|
break;
|
||||||
|
@ -148,7 +148,7 @@ fate-list:
|
|||||||
clean:: testclean
|
clean:: testclean
|
||||||
|
|
||||||
testclean:
|
testclean:
|
||||||
$(RM) -r tests/vsynth1 tests/vsynth2 tests/data tools/lavfi-showfiltfmts$(EXESUF)
|
$(RM) -r tests/vsynth1 tests/data tools/lavfi-showfiltfmts$(EXESUF)
|
||||||
$(RM) $(CLEANSUFFIXES:%=tests/%)
|
$(RM) $(CLEANSUFFIXES:%=tests/%)
|
||||||
$(RM) $(TESTTOOLS:%=tests/%$(HOSTEXESUF))
|
$(RM) $(TESTTOOLS:%=tests/%$(HOSTEXESUF))
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user