Compare commits

..

1 Commits
n1.0 ... n0.11

Author SHA1 Message Date
Michael Niedermayer
734cfa8e8b Update for 0.11
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2012-05-25 19:48:47 +02:00
2078 changed files with 39284 additions and 101731 deletions

96
.gitignore vendored
View File

@@ -1,10 +1,12 @@
.config
.version
*.a
*.o
*.d
*.def
*.dll
*.exe
*.h.c
*.ho
*.lib
*.pc
*.so
@@ -13,48 +15,50 @@
*-example
*-test
*_g
/.config
/.version
/ffmpeg
/ffplay
/ffprobe
/ffserver
/config.*
/version.h
/doc/*.1
/doc/*.html
/doc/*.pod
/doc/avoptions_codec.texi
/doc/avoptions_format.texi
/doc/examples/decoding_encoding
/doc/examples/demuxing
/doc/examples/filtering_audio
/doc/examples/filtering_video
/doc/examples/metadata
/doc/examples/muxing
/doc/examples/scaling_video
/doc/fate.txt
/doc/print_options
/doxy/
/libavcodec/*_tablegen
/libavcodec/*_tables.c
/libavcodec/*_tables.h
/libavutil/avconfig.h
/tests/audiogen
/tests/base64
/tests/data/
/tests/rotozoom
/tests/tiny_psnr
/tests/videogen
/tests/vsynth1/
/tools/aviocat
/tools/ffbisect
/tools/bisect.need
/tools/cws2fws
/tools/ffeval
/tools/graph2dot
/tools/ismindex
/tools/pktdumper
/tools/probetest
/tools/qt-faststart
/tools/trasher
*.def
*.dll
*.lib
*.exp
config.*
doc/*.1
doc/*.html
doc/*.pod
doc/fate.txt
doxy
ffmpeg
ffplay
ffprobe
ffserver
avconv
doc/avoptions_codec.texi
doc/avoptions_format.texi
doc/print_options
doc/examples/decoding_encoding
doc/examples/filtering_audio
doc/examples/filtering_video
doc/examples/metadata
doc/examples/muxing
libavcodec/*_tablegen
libavcodec/*_tables.c
libavcodec/*_tables.h
libavcodec/codec_names.h
libavutil/avconfig.h
tests/audiogen
tests/base64
tests/data
tests/rotozoom
tests/tiny_psnr
tests/videogen
tests/vsynth1
tests/vsynth2
tools/aviocat
tools/cws2fws
tools/ffeval
tools/graph2dot
tools/ismindex
tools/lavfi-showfiltfmts
tools/pktdumper
tools/probetest
tools/qt-faststart
tools/trasher
version.h

View File

@@ -3,82 +3,8 @@ releases are sorted from youngest to oldest.
version next:
version 1.0:
- INI and flat output in ffprobe
- Scene detection in libavfilter
- Indeo Audio decoder
- channelsplit audio filter
- setnsamples audio filter
- atempo filter
- ffprobe -show_data option
- RTMPT protocol support
- iLBC encoding/decoding via libilbc
- Microsoft Screen 1 decoder
- join audio filter
- audio channel mapping filter
- Microsoft ATC Screen decoder
- RTSP listen mode
- TechSmith Screen Codec 2 decoder
- AAC encoding via libfdk-aac
- Microsoft Expression Encoder Screen decoder
- RTMPS protocol support
- RTMPTS protocol support
- RTMPE protocol support
- RTMPTE protocol support
- showwaves and showspectrum filter
- LucasArts SMUSH playback support
- SAMI, RealText and SubViewer demuxers and decoders
- Heart Of Darkness PAF playback support
- iec61883 device
- asettb filter
- new option: -progress
- 3GPP Timed Text encoder/decoder
- GeoTIFF decoder support
- ffmpeg -(no)stdin option
- Opus decoder using libopus
- caca output device using libcaca
- alphaextract and alphamerge filters
- concat filter
- flite filter
- Canopus Lossless Codec decoder
- bitmap subtitles in filters (experimental and temporary)
- MP2 encoding via TwoLAME
- bmp parser
- smptebars source
- asetpts filter
- hue filter
- ICO muxer
- SubRip encoder and decoder without embedded timing
- edge detection filter
- framestep filter
- ffmpeg -shortest option is now per-output file
-pass and -passlogfile are now per-output stream
- volume measurement filter
- Ut Video encoder
- Microsoft Screen 2 decoder
- Matroska demuxer now identifies SRT subtitles as AV_CODEC_ID_SUBRIP
instead of AV_CODEC_ID_TEXT
- smartblur filter ported from MPlayer
- CPiA decoder
- decimate filter ported from MPlayer
- RTP depacketization of JPEG
- Smooth Streaming live segmenter muxer
- F4V muxer
- sendcmd and asendcmd filters
- WebVTT demuxer and decoder (simple tags supported)
- RTP packetization of JPEG
- faststart option in the MOV/MP4 muxer
version 0.11:
- Fixes: CVE-2012-2772, CVE-2012-2774, CVE-2012-2775, CVE-2012-2776, CVE-2012-2777,
CVE-2012-2779, CVE-2012-2782, CVE-2012-2783, CVE-2012-2784, CVE-2012-2785,
CVE-2012-2786, CVE-2012-2787, CVE-2012-2788, CVE-2012-2789, CVE-2012-2790,
CVE-2012-2791, CVE-2012-2792, CVE-2012-2793, CVE-2012-2794, CVE-2012-2795,
CVE-2012-2796, CVE-2012-2797, CVE-2012-2798, CVE-2012-2799, CVE-2012-2800,
CVE-2012-2801, CVE-2012-2802, CVE-2012-2803, CVE-2012-2804,
- Fixes CVE-2012-2771 ... 2805
- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
- setfield filter
- CDXL demuxer and decoder
@@ -107,15 +33,15 @@ version 0.11:
- MicroDVD decoder
- Avid Meridien (AVUI) encoder and decoder
- accept + prefix to -pix_fmt option to disable automatic conversions.
- complete audio filtering in libavfilter and ffmpeg
- audio filters support in libavfilter and avconv
- add fps filter
- audio split filter
- vorbis parser
- png parser
- audio mix filter
version 0.10:
- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
CVE-2011-3945, CVE-2011-3946, CVE-2011-3947, CVE-2011-3949,

View File

@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER = 1.0
PROJECT_NUMBER = 0.11
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
@@ -1378,7 +1378,7 @@ PREDEFINED = "__attribute__(x)=" \
"DEF(x)=x ## _TMPL" \
HAVE_AV_CONFIG_H \
HAVE_MMX \
HAVE_MMXEXT \
HAVE_MMX2 \
HAVE_AMD3DNOW \
"DECLARE_ALIGNED(a,t,n)=t n" \
"offsetof(x,y)=0x42"

57
LICENSE
View File

@@ -1,4 +1,5 @@
FFmpeg:
-------
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
@@ -13,27 +14,9 @@ configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
Specifically, the GPL parts of FFmpeg are
- libpostproc
- libmpcodecs
- optional x86 optimizations in the files
libavcodec/x86/idct_mmx.c
- libutvideo encoding/decoding wrappers in
libavcodec/libutvideo*.cpp
- the X11 grabber in libavdevice/x11grab.c
- the swresample test app in
libswresample/swresample-test.c
- the texi2pod.pl tool
- the following filters in libavfilter:
- vf_blackframe.c
- vf_boxblur.c
- vf_colormatrix.c
- vf_cropdetect.c
- vf_delogo.c
- vf_hqdn3d.c
- vf_mp.c
- vf_super2xsai.c
- vf_tinterlace.c
- vf_yadif.c
- vsrc_mptestsrc.c
There are a handful of files under other licensing terms, namely:
@@ -50,32 +33,18 @@ for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
external libraries
==================
external libraries:
-------------------
FFmpeg can be combined with a number of external libraries, which sometimes
affect the licensing of binaries resulting from the combination.
Some external libraries, e.g. libx264, are under GPL and can be used in
conjunction with FFmpeg. They require --enable-gpl to be passed to configure
as well.
compatible libraries
--------------------
The OpenCORE external libraries are under the Apache License 2.0. That license
is incompatible with the LGPL v2.1 and the GPL v2, but not with version 3 of
those licenses. So to combine the OpenCORE libraries with FFmpeg, the license
version needs to be upgraded by passing --enable-version3 to configure.
The libcdio, libx264, libxavs and libxvid libraries are under GPL. When
combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
passing --enable-gpl to configure.
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
version 3 of those licenses. So to combine these libraries with FFmpeg, the
license version needs to be upgraded by passing --enable-version3 to configure.
incompatible libraries
----------------------
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
are incompatible with the GPLv2 and v3. We do not know for certain if their
licenses are compatible with the LGPL.
If you wish to enable these libraries, pass --enable-nonfree to configure.
But note that if you enable any of these libraries the resulting binary will
be under a complex license mix that is more restrictive than the LGPL and that
may result in additional obligations. It is possible that these
restrictions cause the resulting binary to be unredistributeable.
The nonfree external libraries libfaac and libaacplus can be hooked up in FFmpeg.
You need to pass --enable-nonfree to configure to enable it. Employ this option
with care as FFmpeg then becomes nonfree and unredistributable.

View File

@@ -132,9 +132,7 @@ Codecs:
celp_filters.* Vitor Sessak
cinepak.c Roberto Togni
cljr Alex Beregszaszi
cllc.c Derek Buitenhuis
cook.c, cookdata.h Benjamin Larsson
cpia.c Stephan Hilb
crystalhd.c Philip Langdale
cscd.c Reimar Doeffinger
dca.c Kostya Shishkov, Benjamin Larsson
@@ -231,7 +229,6 @@ Codecs:
vble.c Derek Buitenhuis
vc1* Kostya Shishkov
vcr1.c Michael Niedermayer
vda_h264_dec.c Xidorn Quan
vmnc.c Kostya Shishkov
vorbis_enc.c Oded Shimon
vorbis_dec.c Denes Balatoni, David Conrad
@@ -266,7 +263,6 @@ libavdevice
libavdevice/avdevice.h
iec61883.c Georg Lippitsch
libdc1394.c Roman Shaposhnik
v4l2.c Luca Abeni
vfwcap.c Ramiro Polla
@@ -275,18 +271,14 @@ libavdevice
libavfilter
===========
Generic parts:
Video filters:
graphdump.c Nicolas George
Filters:
af_amerge.c Nicolas George
af_astreamsync.c Nicolas George
af_atempo.c Pavel Koshevoy
af_pan.c Nicolas George
vsrc_mandelbrot.c Michael Niedermayer
vf_yadif.c Michael Niedermayer
Sources:
vsrc_mandelbrot.c Michael Niedermayer
libavformat
===========
@@ -408,7 +400,6 @@ x86 Michael Niedermayer
Releases
========
1.0 Michael Niedermayer
0.11 Michael Niedermayer
0.10 Michael Niedermayer
@@ -423,7 +414,6 @@ Attila Kinali 11F0 F9A6 A1D2 11F6 C745 D10C 6520 BCDD F2DF E765
Baptiste Coudurier 8D77 134D 20CC 9220 201F C5DB 0AC9 325C 5C1A BAAA
Ben Littler 3EE3 3723 E560 3214 A8CD 4DEB 2CDB FCE7 768C 8D2C
Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
Bœsch Clément 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
Diego Biurrun 8227 1E31 B6D9 4994 7427 E220 9CAE D6CC 4757 FCC5
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
@@ -442,5 +432,4 @@ Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
Robert Swain EE7A 56EA 4A81 A7B5 2001 A521 67FA 362D A2FC 3E71
Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
Stefano Sabatini 9A43 10F8 D32C D33C 48E7 C52C 5DF2 8E4D B2EE 066B
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
Tomas Härdin D133 29CA 4EEC 9DB4 7076 F697 B04B 7403 3313 41FD

View File

@@ -15,10 +15,9 @@ PROGS-$(CONFIG_FFPLAY) += ffplay
PROGS-$(CONFIG_FFPROBE) += ffprobe
PROGS-$(CONFIG_FFSERVER) += ffserver
PROGS := $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
PROGS := $(PROGS-yes:%=%$(EXESUF))
INSTPROGS = $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
OBJS = cmdutils.o
OBJS-ffmpeg = ffmpeg_opt.o ffmpeg_filter.o
OBJS = $(PROGS-yes:%=%.o) cmdutils.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr base64
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
TOOLS = qt-faststart trasher
@@ -52,14 +51,14 @@ FF_DEP_LIBS := $(DEP_LIBS)
all: $(PROGS)
$(PROGS): %$(EXESUF): %_g$(EXESUF)
$(CP) $< $@
$(STRIP) $@
$(PROGS): %$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
$(CP) $< $@$(PROGSSUF)
$(STRIP) $@$(PROGSSUF)
$(TOOLS): %$(EXESUF): %.o
$(LD) $(LDFLAGS) $(LD_O) $< $(ELIBS)
$(LD) $(LDFLAGS) -o $@ $< $(ELIBS)
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
tools/cws2fws$(EXESUF): ELIBS = -lz
config.h: .config
.config: $(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c))
@@ -69,11 +68,9 @@ config.h: .config
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS NEON-OBJS \
MMI-OBJS ALTIVEC-OBJS VIS-OBJS \
MMX-OBJS YASM-OBJS \
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
OBJS HOSTOBJS TESTOBJS
ALTIVEC-OBJS ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS MMI-OBJS \
MMX-OBJS NEON-OBJS VIS-OBJS YASM-OBJS \
OBJS TESTOBJS
define RESET
$(1) :=
@@ -90,19 +87,12 @@ endef
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
define DOPROG
OBJS-$(1) += $(1).o
$(1)$(PROGSSUF)_g$(EXESUF): $(OBJS-$(1))
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(LIBS-$(1))
-include $$(OBJS-$(1):.o=.d)
endef
$(foreach P,$(PROGS-yes),$(eval $(call DOPROG,$(P))))
ffplay.o: CFLAGS += $(SDL_CFLAGS)
ffplay_g$(EXESUF): FF_EXTRALIBS += $(SDL_LIBS)
ffserver_g$(EXESUF): LDFLAGS += $(FFSERVERLDFLAGS)
%$(PROGSSUF)_g$(EXESUF): %.o cmdutils.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) cmdutils.o $(FF_EXTRALIBS)
$(LD) $(LDFLAGS) -o $@ $< cmdutils.o $(FF_EXTRALIBS)
OBJDIRS += tools
@@ -159,7 +149,7 @@ clean::
distclean::
$(RM) $(DISTCLEANSUFFIXES)
$(RM) config.* .version version.h libavutil/avconfig.h libavcodec/codec_names.h
$(RM) config.* .version version.h libavutil/avconfig.h
config:
$(SRC_PATH)/configure $(value FFMPEG_CONFIGURATION)
@@ -173,7 +163,7 @@ coverage-html: coverage.info
$(Q)genhtml -o $@ $<
$(Q)touch $@
check: all alltools examples testprogs fate
check: all alltools checkheaders examples testprogs fate
include $(SRC_PATH)/doc/Makefile
include $(SRC_PATH)/tests/Makefile

View File

@@ -1 +1 @@
1.0.git
0.11

View File

@@ -1 +1 @@
1.0
0.11

View File

@@ -4,10 +4,6 @@ OBJS-$(HAVE_ARMVFP) += $(ARMVFP-OBJS) $(ARMVFP-OBJS-yes)
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)

View File

@@ -29,7 +29,6 @@
references to libraries that are not being built. */
#include "config.h"
#include "compat/va_copy.h"
#include "libavformat/avformat.h"
#include "libavfilter/avfilter.h"
#include "libavdevice/avdevice.h"
@@ -42,7 +41,6 @@
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/imgutils.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/eval.h"
@@ -67,13 +65,11 @@ static FILE *report_file;
void init_opts(void)
{
if(CONFIG_SWSCALE)
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC,
#if CONFIG_SWSCALE
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC,
NULL, NULL, NULL);
if(CONFIG_SWRESAMPLE)
swr_opts = swr_alloc();
#endif
swr_opts = swr_alloc();
}
void uninit_opts(void)
@@ -82,10 +78,7 @@ void uninit_opts(void)
sws_freeContext(sws_opts);
sws_opts = NULL;
#endif
if(CONFIG_SWRESAMPLE)
swr_free(&swr_opts);
swr_free(&swr_opts);
av_dict_free(&format_opts);
av_dict_free(&codec_opts);
}
@@ -142,8 +135,8 @@ int64_t parse_time_or_die(const char *context, const char *timestr,
return us;
}
void show_help_options(const OptionDef *options, const char *msg, int req_flags,
int rej_flags, int alt_flags)
void show_help_options(const OptionDef *options, const char *msg, int mask,
int value)
{
const OptionDef *po;
int first;
@@ -151,33 +144,26 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
first = 1;
for (po = options; po->name != NULL; po++) {
char buf[64];
if (((po->flags & req_flags) != req_flags) ||
(alt_flags && !(po->flags & alt_flags)) ||
(po->flags & rej_flags))
continue;
if (first) {
printf("%s\n", msg);
first = 0;
if ((po->flags & mask) == value) {
if (first) {
printf("%s", msg);
first = 0;
}
av_strlcpy(buf, po->name, sizeof(buf));
if (po->flags & HAS_ARG) {
av_strlcat(buf, " ", sizeof(buf));
av_strlcat(buf, po->argname, sizeof(buf));
}
printf("-%-17s %s\n", buf, po->help);
}
av_strlcpy(buf, po->name, sizeof(buf));
if (po->argname) {
av_strlcat(buf, " ", sizeof(buf));
av_strlcat(buf, po->argname, sizeof(buf));
}
printf("-%-17s %s\n", buf, po->help);
}
printf("\n");
}
void show_help_children(const AVClass *class, int flags)
{
const AVClass *child = NULL;
if (class->option) {
av_opt_show2(&class, NULL, flags, 0);
printf("\n");
}
av_opt_show2(&class, NULL, flags, 0);
printf("\n");
while (child = av_opt_child_class_next(class, child))
show_help_children(child, flags);
@@ -198,7 +184,6 @@ static const OptionDef *find_option(const OptionDef *po, const char *name)
#if defined(_WIN32) && !defined(__MINGW32CE__)
#include <windows.h>
#include <shellapi.h>
/* Will be leaked on exit */
static char** win32_argv_utf8 = NULL;
static int win32_argc = 0;
@@ -302,7 +287,6 @@ int parse_option(void *optctx, const char *opt, const char *arg,
if (po->flags & OPT_STRING) {
char *str;
str = av_strdup(arg);
// av_freep(dst);
*(char **)dst = str;
} else if (po->flags & OPT_BOOL) {
*(int *)dst = bool_val;
@@ -317,7 +301,8 @@ int parse_option(void *optctx, const char *opt, const char *arg,
} else if (po->flags & OPT_DOUBLE) {
*(double *)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY);
} else if (po->u.func_arg) {
int ret = po->u.func_arg(optctx, opt, arg);
int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg)
: po->u.func_arg(opt, arg);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR,
"Failed to set value '%s' for option '%s'\n", arg, opt);
@@ -416,7 +401,7 @@ void parse_loglevel(int argc, char **argv, const OptionDef *options)
if (!idx)
idx = locate_option(argc, argv, options, "v");
if (idx && argv[idx + 1])
opt_loglevel(NULL, "loglevel", argv[idx + 1]);
opt_loglevel("loglevel", argv[idx + 1]);
idx = locate_option(argc, argv, options, "report");
if (idx || getenv("FFREPORT")) {
opt_report("report");
@@ -432,10 +417,10 @@ void parse_loglevel(int argc, char **argv, const OptionDef *options)
}
}
#define FLAGS (o->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0
int opt_default(void *optctx, const char *opt, const char *arg)
#define FLAGS(o) ((o)->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0
int opt_default(const char *opt, const char *arg)
{
const AVOption *o;
const AVOption *oc, *of, *os, *oswr = NULL;
char opt_stripped[128];
const char *p;
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class(), *sc, *swr_class;
@@ -444,18 +429,18 @@ int opt_default(void *optctx, const char *opt, const char *arg)
p = opt + strlen(opt);
av_strlcpy(opt_stripped, opt, FFMIN(sizeof(opt_stripped), p - opt + 1));
if ((o = av_opt_find(&cc, opt_stripped, NULL, 0,
if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) ||
((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') &&
(o = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ))))
av_dict_set(&codec_opts, opt, arg, FLAGS);
else if ((o = av_opt_find(&fc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&format_opts, opt, arg, FLAGS);
(oc = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ))))
av_dict_set(&codec_opts, opt, arg, FLAGS(oc));
if ((of = av_opt_find(&fc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&format_opts, opt, arg, FLAGS(of));
#if CONFIG_SWSCALE
sc = sws_get_class();
if (!o && (o = av_opt_find(&sc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
if ((os = av_opt_find(&sc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
// XXX we only support sws_flags, not arbitrary sws options
int ret = av_opt_set(sws_opts, opt, arg, 0);
if (ret < 0) {
@@ -464,25 +449,23 @@ int opt_default(void *optctx, const char *opt, const char *arg)
}
}
#endif
#if CONFIG_SWRESAMPLE
swr_class = swr_get_class();
if (!o && (o = av_opt_find(&swr_class, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
if (!oc && !of && !os && (oswr = av_opt_find(&swr_class, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
int ret = av_opt_set(swr_opts, opt, arg, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error setting option %s.\n", opt);
return ret;
}
}
#endif
if (o)
if (oc || of || os || oswr)
return 0;
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
return AVERROR_OPTION_NOT_FOUND;
}
int opt_loglevel(void *optctx, const char *opt, const char *arg)
int opt_loglevel(const char *opt, const char *arg)
{
const struct { const char *name; int level; } log_levels[] = {
{ "quiet" , AV_LOG_QUIET },
@@ -549,7 +532,7 @@ int opt_report(const char *opt)
return 0;
}
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
int opt_max_alloc(const char *opt, const char *arg)
{
char *tail;
size_t max;
@@ -563,7 +546,7 @@ int opt_max_alloc(void *optctx, const char *opt, const char *arg)
return 0;
}
int opt_cpuflags(void *optctx, const char *opt, const char *arg)
int opt_cpuflags(const char *opt, const char *arg)
{
int ret;
unsigned flags = av_get_cpu_flags();
@@ -575,13 +558,13 @@ int opt_cpuflags(void *optctx, const char *opt, const char *arg)
return 0;
}
int opt_codec_debug(void *optctx, const char *opt, const char *arg)
int opt_codec_debug(const char *opt, const char *arg)
{
av_log_set_level(AV_LOG_DEBUG);
return opt_default(NULL, opt, arg);
return opt_default(opt, arg);
}
int opt_timelimit(void *optctx, const char *opt, const char *arg)
int opt_timelimit(const char *opt, const char *arg)
{
#if HAVE_SETRLIMIT
int lim = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
@@ -663,9 +646,8 @@ static void print_program_info(int flags, int level)
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
program_birth_year, this_year);
av_log(NULL, level, "\n");
av_log(NULL, level, "%sbuilt on %s %s with %s\n",
indent, __DATE__, __TIME__, CC_IDENT);
av_log(NULL, level, "%sbuilt on %s %s with %s %s\n",
indent, __DATE__, __TIME__, CC_TYPE, CC_VERSION);
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
}
@@ -680,16 +662,14 @@ void show_banner(int argc, char **argv, const OptionDef *options)
print_all_libs_info(INDENT|SHOW_VERSION, AV_LOG_INFO);
}
int show_version(void *optctx, const char *opt, const char *arg)
{
int opt_version(const char *opt, const char *arg) {
av_log_set_callback(log_callback_help);
print_program_info (0 , AV_LOG_INFO);
print_all_libs_info(SHOW_VERSION, AV_LOG_INFO);
return 0;
}
int show_license(void *optctx, const char *opt, const char *arg)
int opt_license(const char *opt, const char *arg)
{
printf(
#if CONFIG_NONFREE
@@ -756,11 +736,10 @@ int show_license(void *optctx, const char *opt, const char *arg)
program_name, program_name, program_name
#endif
);
return 0;
}
int show_formats(void *optctx, const char *opt, const char *arg)
int opt_formats(const char *opt, const char *arg)
{
AVInputFormat *ifmt = NULL;
AVOutputFormat *ofmt = NULL;
@@ -808,233 +787,82 @@ int show_formats(void *optctx, const char *opt, const char *arg)
return 0;
}
#define PRINT_CODEC_SUPPORTED(codec, field, type, list_name, term, get_name) \
if (codec->field) { \
const type *p = c->field; \
\
printf(" Supported " list_name ":"); \
while (*p != term) { \
get_name(*p); \
printf(" %s", name); \
p++; \
} \
printf("\n"); \
} \
static void print_codec(const AVCodec *c)
{
int encoder = av_codec_is_encoder(c);
printf("%s %s [%s]:\n", encoder ? "Encoder" : "Decoder", c->name,
c->long_name ? c->long_name : "");
if (c->type == AVMEDIA_TYPE_VIDEO) {
printf(" Threading capabilities: ");
switch (c->capabilities & (CODEC_CAP_FRAME_THREADS |
CODEC_CAP_SLICE_THREADS)) {
case CODEC_CAP_FRAME_THREADS |
CODEC_CAP_SLICE_THREADS: printf("frame and slice"); break;
case CODEC_CAP_FRAME_THREADS: printf("frame"); break;
case CODEC_CAP_SLICE_THREADS: printf("slice"); break;
default: printf("no"); break;
}
printf("\n");
}
if (c->supported_framerates) {
const AVRational *fps = c->supported_framerates;
printf(" Supported framerates:");
while (fps->num) {
printf(" %d/%d", fps->num, fps->den);
fps++;
}
printf("\n");
}
PRINT_CODEC_SUPPORTED(c, pix_fmts, enum PixelFormat, "pixel formats",
PIX_FMT_NONE, GET_PIX_FMT_NAME);
PRINT_CODEC_SUPPORTED(c, supported_samplerates, int, "sample rates", 0,
GET_SAMPLE_RATE_NAME);
PRINT_CODEC_SUPPORTED(c, sample_fmts, enum AVSampleFormat, "sample formats",
AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME);
PRINT_CODEC_SUPPORTED(c, channel_layouts, uint64_t, "channel layouts",
0, GET_CH_LAYOUT_DESC);
if (c->priv_class) {
show_help_children(c->priv_class,
AV_OPT_FLAG_ENCODING_PARAM |
AV_OPT_FLAG_DECODING_PARAM);
}
}
static char get_media_type_char(enum AVMediaType type)
{
switch (type) {
case AVMEDIA_TYPE_VIDEO: return 'V';
case AVMEDIA_TYPE_AUDIO: return 'A';
case AVMEDIA_TYPE_DATA: return 'D';
case AVMEDIA_TYPE_SUBTITLE: return 'S';
case AVMEDIA_TYPE_ATTACHMENT:return 'T';
default: return '?';
}
static const char map[AVMEDIA_TYPE_NB] = {
[AVMEDIA_TYPE_VIDEO] = 'V',
[AVMEDIA_TYPE_AUDIO] = 'A',
[AVMEDIA_TYPE_DATA] = 'D',
[AVMEDIA_TYPE_SUBTITLE] = 'S',
[AVMEDIA_TYPE_ATTACHMENT] = 'T',
};
return type >= 0 && type < AVMEDIA_TYPE_NB && map[type] ? map[type] : '?';
}
static const AVCodec *next_codec_for_id(enum AVCodecID id, const AVCodec *prev,
int encoder)
int opt_codecs(const char *opt, const char *arg)
{
while ((prev = av_codec_next(prev))) {
if (prev->id == id &&
(encoder ? av_codec_is_encoder(prev) : av_codec_is_decoder(prev)))
return prev;
}
return NULL;
}
static int compare_codec_desc(const void *a, const void *b)
{
const AVCodecDescriptor * const *da = a;
const AVCodecDescriptor * const *db = b;
return (*da)->type != (*db)->type ? (*da)->type - (*db)->type :
strcmp((*da)->name, (*db)->name);
}
static unsigned get_codecs_sorted(const AVCodecDescriptor ***rcodecs)
{
const AVCodecDescriptor *desc = NULL;
const AVCodecDescriptor **codecs;
unsigned nb_codecs = 0, i = 0;
while ((desc = avcodec_descriptor_next(desc)))
nb_codecs++;
if (!(codecs = av_calloc(nb_codecs, sizeof(*codecs)))) {
av_log(0, AV_LOG_ERROR, "Out of memory\n");
exit_program(1);
}
desc = NULL;
while ((desc = avcodec_descriptor_next(desc)))
codecs[i++] = desc;
av_assert0(i == nb_codecs);
qsort(codecs, nb_codecs, sizeof(*codecs), compare_codec_desc);
*rcodecs = codecs;
return nb_codecs;
}
static void print_codecs_for_id(enum AVCodecID id, int encoder)
{
const AVCodec *codec = NULL;
printf(" (%s: ", encoder ? "encoders" : "decoders");
while ((codec = next_codec_for_id(id, codec, encoder)))
printf("%s ", codec->name);
printf(")");
}
int show_codecs(void *optctx, const char *opt, const char *arg)
{
const AVCodecDescriptor **codecs;
unsigned i, nb_codecs = get_codecs_sorted(&codecs);
AVCodec *p = NULL, *p2;
const char *last_name;
printf("Codecs:\n"
" D..... = Decoding supported\n"
" .E.... = Encoding supported\n"
" ..V... = Video codec\n"
" ..A... = Audio codec\n"
" ..S... = Subtitle codec\n"
" ...I.. = Intra frame-only codec\n"
" ....L. = Lossy compression\n"
" .....S = Lossless compression\n"
" -------\n");
for (i = 0; i < nb_codecs; i++) {
const AVCodecDescriptor *desc = codecs[i];
const AVCodec *codec = NULL;
" ...S.. = Supports draw_horiz_band\n"
" ....D. = Supports direct rendering method 1\n"
" .....T = Supports weird frame truncation\n"
" ------\n");
last_name= "000";
for (;;) {
int decode = 0;
int encode = 0;
int cap = 0;
printf(" ");
printf(avcodec_find_decoder(desc->id) ? "D" : ".");
printf(avcodec_find_encoder(desc->id) ? "E" : ".");
printf("%c", get_media_type_char(desc->type));
printf((desc->props & AV_CODEC_PROP_INTRA_ONLY) ? "I" : ".");
printf((desc->props & AV_CODEC_PROP_LOSSY) ? "L" : ".");
printf((desc->props & AV_CODEC_PROP_LOSSLESS) ? "S" : ".");
printf(" %-20s %s", desc->name, desc->long_name ? desc->long_name : "");
/* print decoders/encoders when there's more than one or their
* names are different from codec name */
while ((codec = next_codec_for_id(desc->id, codec, 0))) {
if (strcmp(codec->name, desc->name)) {
print_codecs_for_id(desc->id, 0);
break;
}
}
codec = NULL;
while ((codec = next_codec_for_id(desc->id, codec, 1))) {
if (strcmp(codec->name, desc->name)) {
print_codecs_for_id(desc->id, 1);
break;
p2 = NULL;
while ((p = av_codec_next(p))) {
if ((p2 == NULL || strcmp(p->name, p2->name) < 0) &&
strcmp(p->name, last_name) > 0) {
p2 = p;
decode = encode = cap = 0;
}
if (p2 && strcmp(p->name, p2->name) == 0) {
if (av_codec_is_decoder(p))
decode = 1;
if (av_codec_is_encoder(p))
encode = 1;
cap |= p->capabilities;
}
}
if (p2 == NULL)
break;
last_name = p2->name;
printf(" %s%s%c%s%s%s %-15s %s",
decode ? "D" : (/* p2->decoder ? "d" : */ " "),
encode ? "E" : " ",
get_media_type_char(p2->type),
cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S" : " ",
cap & CODEC_CAP_DR1 ? "D" : " ",
cap & CODEC_CAP_TRUNCATED ? "T" : " ",
p2->name,
p2->long_name ? p2->long_name : "");
#if 0
if (p2->decoder && decode == 0)
printf(" use %s for decoding", p2->decoder->name);
#endif
printf("\n");
}
av_free(codecs);
printf("\n");
printf("Note, the names of encoders and decoders do not always match, so there are\n"
"several cases where the above table shows encoder only or decoder only entries\n"
"even though both encoding and decoding are supported. For example, the h263\n"
"decoder corresponds to the h263 and h263p encoders, for file formats it is even\n"
"worse.\n");
return 0;
}
static void print_codecs(int encoder)
{
const AVCodecDescriptor **codecs;
unsigned i, nb_codecs = get_codecs_sorted(&codecs);
printf("%s:\n"
" V..... = Video\n"
" A..... = Audio\n"
" S..... = Subtitle\n"
" .F.... = Frame-level multithreading\n"
" ..S... = Slice-level multithreading\n"
" ...X.. = Codec is experimental\n"
" ....B. = Supports draw_horiz_band\n"
" .....D = Supports direct rendering method 1\n"
" ------\n",
encoder ? "Encoders" : "Decoders");
for (i = 0; i < nb_codecs; i++) {
const AVCodecDescriptor *desc = codecs[i];
const AVCodec *codec = NULL;
while ((codec = next_codec_for_id(desc->id, codec, encoder))) {
printf(" %c", get_media_type_char(desc->type));
printf((codec->capabilities & CODEC_CAP_FRAME_THREADS) ? "F" : ".");
printf((codec->capabilities & CODEC_CAP_SLICE_THREADS) ? "S" : ".");
printf((codec->capabilities & CODEC_CAP_EXPERIMENTAL) ? "X" : ".");
printf((codec->capabilities & CODEC_CAP_DRAW_HORIZ_BAND)?"B" : ".");
printf((codec->capabilities & CODEC_CAP_DR1) ? "D" : ".");
printf(" %-20s %s", codec->name, codec->long_name ? codec->long_name : "");
if (strcmp(codec->name, desc->name))
printf(" (codec %s)", desc->name);
printf("\n");
}
}
av_free(codecs);
}
int show_decoders(void *optctx, const char *opt, const char *arg)
{
print_codecs(0);
return 0;
}
int show_encoders(void *optctx, const char *opt, const char *arg)
{
print_codecs(1);
return 0;
}
int show_bsfs(void *optctx, const char *opt, const char *arg)
int opt_bsfs(const char *opt, const char *arg)
{
AVBitStreamFilter *bsf = NULL;
@@ -1045,7 +873,7 @@ int show_bsfs(void *optctx, const char *opt, const char *arg)
return 0;
}
int show_protocols(void *optctx, const char *opt, const char *arg)
int opt_protocols(const char *opt, const char *arg)
{
void *opaque = NULL;
const char *name;
@@ -1060,7 +888,7 @@ int show_protocols(void *optctx, const char *opt, const char *arg)
return 0;
}
int show_filters(void *optctx, const char *opt, const char *arg)
int opt_filters(const char *opt, const char *arg)
{
AVFilter av_unused(**filter) = NULL;
char descr[64], *descr_cur;
@@ -1077,7 +905,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
*(descr_cur++) = '>';
}
pad = i ? (*filter)->outputs : (*filter)->inputs;
for (j = 0; pad && pad[j].name; j++) {
for (j = 0; pad[j].name; j++) {
if (descr_cur >= descr + sizeof(descr) - 4)
break;
*(descr_cur++) = get_media_type_char(pad[j].type);
@@ -1092,7 +920,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
return 0;
}
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
int opt_pix_fmts(const char *opt, const char *arg)
{
enum PixelFormat pix_fmt;
@@ -1127,36 +955,7 @@ int show_pix_fmts(void *optctx, const char *opt, const char *arg)
return 0;
}
int show_layouts(void *optctx, const char *opt, const char *arg)
{
int i = 0;
uint64_t layout, j;
const char *name, *descr;
printf("Individual channels:\n"
"NAME DESCRIPTION\n");
for (i = 0; i < 63; i++) {
name = av_get_channel_name((uint64_t)1 << i);
if (!name)
continue;
descr = av_get_channel_description((uint64_t)1 << i);
printf("%-12s%s\n", name, descr);
}
printf("\nStandard channel layouts:\n"
"NAME DECOMPOSITION\n");
for (i = 0; !av_get_standard_channel_layout(i, &layout, &name); i++) {
if (name) {
printf("%-12s", name);
for (j = 1; j; j <<= 1)
if ((layout & j))
printf("%s%s", (layout & (j - 1)) ? "+" : "", av_get_channel_name(j));
printf("\n");
}
}
return 0;
}
int show_sample_fmts(void *optctx, const char *opt, const char *arg)
int show_sample_fmts(const char *opt, const char *arg)
{
int i;
char fmt_str[128];
@@ -1165,120 +964,6 @@ int show_sample_fmts(void *optctx, const char *opt, const char *arg)
return 0;
}
static void show_help_codec(const char *name, int encoder)
{
const AVCodecDescriptor *desc;
const AVCodec *codec;
if (!name) {
av_log(NULL, AV_LOG_ERROR, "No codec name specified.\n");
return;
}
codec = encoder ? avcodec_find_encoder_by_name(name) :
avcodec_find_decoder_by_name(name);
if (codec)
print_codec(codec);
else if ((desc = avcodec_descriptor_get_by_name(name))) {
int printed = 0;
while ((codec = next_codec_for_id(desc->id, codec, encoder))) {
printed = 1;
print_codec(codec);
}
if (!printed) {
av_log(NULL, AV_LOG_ERROR, "Codec '%s' is known to FFmpeg, "
"but no %s for it are available. FFmpeg might need to be "
"recompiled with additional external libraries.\n",
name, encoder ? "encoders" : "decoders");
}
} else {
av_log(NULL, AV_LOG_ERROR, "Codec '%s' is not recognized by FFmpeg.\n",
name);
}
}
static void show_help_demuxer(const char *name)
{
const AVInputFormat *fmt = av_find_input_format(name);
if (!fmt) {
av_log(NULL, AV_LOG_ERROR, "Unknown format '%s'.\n", name);
return;
}
printf("Demuxer %s [%s]:\n", fmt->name, fmt->long_name);
if (fmt->extensions)
printf(" Common extensions: %s.\n", fmt->extensions);
if (fmt->priv_class)
show_help_children(fmt->priv_class, AV_OPT_FLAG_DECODING_PARAM);
}
static void show_help_muxer(const char *name)
{
const AVCodecDescriptor *desc;
const AVOutputFormat *fmt = av_guess_format(name, NULL, NULL);
if (!fmt) {
av_log(NULL, AV_LOG_ERROR, "Unknown format '%s'.\n", name);
return;
}
printf("Muxer %s [%s]:\n", fmt->name, fmt->long_name);
if (fmt->extensions)
printf(" Common extensions: %s.\n", fmt->extensions);
if (fmt->mime_type)
printf(" Mime type: %s.\n", fmt->mime_type);
if (fmt->video_codec != AV_CODEC_ID_NONE &&
(desc = avcodec_descriptor_get(fmt->video_codec))) {
printf(" Default video codec: %s.\n", desc->name);
}
if (fmt->audio_codec != AV_CODEC_ID_NONE &&
(desc = avcodec_descriptor_get(fmt->audio_codec))) {
printf(" Default audio codec: %s.\n", desc->name);
}
if (fmt->subtitle_codec != AV_CODEC_ID_NONE &&
(desc = avcodec_descriptor_get(fmt->subtitle_codec))) {
printf(" Default subtitle codec: %s.\n", desc->name);
}
if (fmt->priv_class)
show_help_children(fmt->priv_class, AV_OPT_FLAG_ENCODING_PARAM);
}
int show_help(void *optctx, const char *opt, const char *arg)
{
char *topic, *par;
av_log_set_callback(log_callback_help);
topic = av_strdup(arg ? arg : "");
par = strchr(topic, '=');
if (par)
*par++ = 0;
if (!*topic) {
show_help_default(topic, par);
} else if (!strcmp(topic, "decoder")) {
show_help_codec(par, 0);
} else if (!strcmp(topic, "encoder")) {
show_help_codec(par, 1);
} else if (!strcmp(topic, "demuxer")) {
show_help_demuxer(par);
} else if (!strcmp(topic, "muxer")) {
show_help_muxer(par);
} else {
show_help_default(topic, par);
}
av_freep(&topic);
return 0;
}
int read_yesno(void)
{
int c = getchar();
@@ -1379,14 +1064,66 @@ FILE *get_preset_file(char *filename, size_t filename_size,
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
{
int ret = avformat_match_stream_specifier(s, st, spec);
if (ret < 0)
av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
return ret;
if (*spec <= '9' && *spec >= '0') /* opt:index */
return strtol(spec, NULL, 0) == st->index;
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
*spec == 't') { /* opt:[vasdt] */
enum AVMediaType type;
switch (*spec++) {
case 'v': type = AVMEDIA_TYPE_VIDEO; break;
case 'a': type = AVMEDIA_TYPE_AUDIO; break;
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
case 'd': type = AVMEDIA_TYPE_DATA; break;
case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
default: av_assert0(0);
}
if (type != st->codec->codec_type)
return 0;
if (*spec++ == ':') { /* possibly followed by :index */
int i, index = strtol(spec, NULL, 0);
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->codec->codec_type == type && index-- == 0)
return i == st->index;
return 0;
}
return 1;
} else if (*spec == 'p' && *(spec + 1) == ':') {
int prog_id, i, j;
char *endptr;
spec += 2;
prog_id = strtol(spec, &endptr, 0);
for (i = 0; i < s->nb_programs; i++) {
if (s->programs[i]->id != prog_id)
continue;
if (*endptr++ == ':') {
int stream_idx = strtol(endptr, NULL, 0);
return stream_idx >= 0 &&
stream_idx < s->programs[i]->nb_stream_indexes &&
st->index == s->programs[i]->stream_index[stream_idx];
}
for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
if (st->index == s->programs[i]->stream_index[j])
return 1;
}
return 0;
} else if (*spec == '#') {
int sid;
char *endptr;
sid = strtol(spec + 1, &endptr, 0);
if (!*endptr)
return st->id == sid;
} else if (!*spec) /* empty specifier, matches everything */
return 1;
av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
return AVERROR(EINVAL);
}
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
AVFormatContext *s, AVStream *st, AVCodec *codec)
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
AVFormatContext *s, AVStream *st)
{
AVDictionary *ret = NULL;
AVDictionaryEntry *t = NULL;
@@ -1395,9 +1132,6 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
char prefix = 0;
const AVClass *cc = avcodec_get_class();
if (!codec)
codec = s->oformat ? avcodec_find_encoder(codec_id)
: avcodec_find_decoder(codec_id);
if (!codec)
return NULL;
@@ -1458,8 +1192,8 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
return NULL;
}
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codec->codec_id,
s, s->streams[i], NULL);
opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id),
s, s->streams[i]);
return opts;
}
@@ -1481,149 +1215,3 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
}
return array;
}
static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbuf)
{
FrameBuffer *buf = av_mallocz(sizeof(*buf));
int i, ret;
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!buf)
return AVERROR(ENOMEM);
avcodec_align_dimensions(s, &w, &h);
if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
w += 2*edge;
h += 2*edge;
}
if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
s->pix_fmt, 32)) < 0) {
av_freep(&buf);
av_log(s, AV_LOG_ERROR, "alloc_buffer: av_image_alloc() failed\n");
return ret;
}
/* XXX this shouldn't be needed, but some tests break without this line
* those decoders are buggy and need to be fixed.
* the following tests fail:
* cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
*/
memset(buf->base[0], 128, ret);
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
const int h_shift = i==0 ? 0 : h_chroma_shift;
const int v_shift = i==0 ? 0 : v_chroma_shift;
if ((s->flags & CODEC_FLAG_EMU_EDGE) || !buf->linesize[i] || !buf->base[i])
buf->data[i] = buf->base[i];
else
buf->data[i] = buf->base[i] +
FFALIGN((buf->linesize[i]*edge >> v_shift) +
(pixel_size*edge >> h_shift), 32);
}
buf->w = s->width;
buf->h = s->height;
buf->pix_fmt = s->pix_fmt;
buf->pool = pool;
*pbuf = buf;
return 0;
}
int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
{
FrameBuffer **pool = s->opaque;
FrameBuffer *buf;
int ret, i;
if(av_image_check_size(s->width, s->height, 0, s) || s->pix_fmt<0) {
av_log(s, AV_LOG_ERROR, "codec_get_buffer: image parameters invalid\n");
return -1;
}
if (!*pool && (ret = alloc_buffer(pool, s, pool)) < 0)
return ret;
buf = *pool;
*pool = buf->next;
buf->next = NULL;
if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
av_freep(&buf->base[0]);
av_free(buf);
if ((ret = alloc_buffer(pool, s, &buf)) < 0)
return ret;
}
av_assert0(!buf->refcount);
buf->refcount++;
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
frame->width = buf->w;
frame->height = buf->h;
frame->format = buf->pix_fmt;
frame->sample_aspect_ratio = s->sample_aspect_ratio;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
frame->data[i] = buf->data[i];
frame->linesize[i] = buf->linesize[i];
}
return 0;
}
static void unref_buffer(FrameBuffer *buf)
{
FrameBuffer **pool = buf->pool;
av_assert0(buf->refcount > 0);
buf->refcount--;
if (!buf->refcount) {
FrameBuffer *tmp;
for(tmp= *pool; tmp; tmp= tmp->next)
av_assert1(tmp != buf);
buf->next = *pool;
*pool = buf;
}
}
void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
{
FrameBuffer *buf = frame->opaque;
int i;
if(frame->type!=FF_BUFFER_TYPE_USER) {
avcodec_default_release_buffer(s, frame);
return;
}
for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
frame->data[i] = NULL;
unref_buffer(buf);
}
void filter_release_buffer(AVFilterBuffer *fb)
{
FrameBuffer *buf = fb->priv;
av_free(fb);
unref_buffer(buf);
}
void free_buffer_pool(FrameBuffer **pool)
{
FrameBuffer *buf = *pool;
while (buf) {
*pool = buf->next;
av_freep(&buf->base[0]);
av_free(buf);
buf = *pool;
}
}

View File

@@ -75,25 +75,25 @@ void log_callback_help(void* ptr, int level, const char* fmt, va_list vl);
* Fallback for options that are not explicitly handled, these will be
* parsed through AVOptions.
*/
int opt_default(void *optctx, const char *opt, const char *arg);
int opt_default(const char *opt, const char *arg);
/**
* Set the libav* libraries log level.
*/
int opt_loglevel(void *optctx, const char *opt, const char *arg);
int opt_loglevel(const char *opt, const char *arg);
int opt_report(const char *opt);
int opt_max_alloc(void *optctx, const char *opt, const char *arg);
int opt_max_alloc(const char *opt, const char *arg);
int opt_cpuflags(void *optctx, const char *opt, const char *arg);
int opt_cpuflags(const char *opt, const char *arg);
int opt_codec_debug(void *optctx, const char *opt, const char *arg);
int opt_codec_debug(const char *opt, const char *arg);
/**
* Limit the execution time.
*/
int opt_timelimit(void *optctx, const char *opt, const char *arg);
int opt_timelimit(const char *opt, const char *arg);
/**
* Parse a string and return its corresponding value as a double.
@@ -148,14 +148,14 @@ typedef struct {
#define OPT_STRING 0x0008
#define OPT_VIDEO 0x0010
#define OPT_AUDIO 0x0020
#define OPT_GRAB 0x0040
#define OPT_INT 0x0080
#define OPT_FLOAT 0x0100
#define OPT_SUBTITLE 0x0200
#define OPT_INT64 0x0400
#define OPT_EXIT 0x0800
#define OPT_DATA 0x1000
#define OPT_PERFILE 0x2000 /* the option is per-file (currently ffmpeg-only).
implied by OPT_OFFSET or OPT_SPEC */
#define OPT_FUNC2 0x2000
#define OPT_OFFSET 0x4000 /* option is specified as an offset in a passed optctx */
#define OPT_SPEC 0x8000 /* option is to be stored in an array of SpecifierOpt.
Implies OPT_OFFSET. Next element after the offset is
@@ -164,24 +164,16 @@ typedef struct {
#define OPT_DOUBLE 0x20000
union {
void *dst_ptr;
int (*func_arg)(void *, const char *, const char *);
int (*func_arg)(const char *, const char *);
int (*func2_arg)(void *, const char *, const char *);
size_t off;
} u;
const char *help;
const char *argname;
} OptionDef;
/**
* Print help for all options matching specified flags.
*
* @param options a list of options
* @param msg title of this group. Only printed if at least one option matches.
* @param req_flags print only options which have all those flags set.
* @param rej_flags don't print options which have any of those flags set.
* @param alt_flags print only options that have at least one of those flags set
*/
void show_help_options(const OptionDef *options, const char *msg, int req_flags,
int rej_flags, int alt_flags);
void show_help_options(const OptionDef *options, const char *msg, int mask,
int value);
/**
* Show help for all options with given flags in class and all its
@@ -189,17 +181,6 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
*/
void show_help_children(const AVClass *class, int flags);
/**
* Per-avtool specific help handler. Implemented in each
* avtool, called by show_help().
*/
void show_help_default(const char *opt, const char *arg);
/**
* Generic -h handler common to all avtools.
*/
int show_help(void *optctx, const char *opt, const char *arg);
/**
* Parse the command line arguments.
*
@@ -251,12 +232,10 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
*
* @param s Corresponding format context.
* @param st A stream from s for which the options should be filtered.
* @param codec The particular codec for which the options should be filtered.
* If null, the default one is looked up according to the codec id.
* @return a pointer to the created dictionary
*/
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
AVFormatContext *s, AVStream *st, AVCodec *codec);
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
AVFormatContext *s, AVStream *st);
/**
* Setup AVCodecContext options for avformat_find_stream_info().
@@ -296,81 +275,62 @@ void show_banner(int argc, char **argv, const OptionDef *options);
* libraries.
* This option processing function does not utilize the arguments.
*/
int show_version(void *optctx, const char *opt, const char *arg);
int opt_version(const char *opt, const char *arg);
/**
* Print the license of the program to stdout. The license depends on
* the license of the libraries compiled into the program.
* This option processing function does not utilize the arguments.
*/
int show_license(void *optctx, const char *opt, const char *arg);
int opt_license(const char *opt, const char *arg);
/**
* Print a listing containing all the formats supported by the
* program.
* This option processing function does not utilize the arguments.
*/
int show_formats(void *optctx, const char *opt, const char *arg);
int opt_formats(const char *opt, const char *arg);
/**
* Print a listing containing all the codecs supported by the
* program.
* This option processing function does not utilize the arguments.
*/
int show_codecs(void *optctx, const char *opt, const char *arg);
/**
* Print a listing containing all the decoders supported by the
* program.
*/
int show_decoders(void *optctx, const char *opt, const char *arg);
/**
* Print a listing containing all the encoders supported by the
* program.
*/
int show_encoders(void *optctx, const char *opt, const char *arg);
int opt_codecs(const char *opt, const char *arg);
/**
* Print a listing containing all the filters supported by the
* program.
* This option processing function does not utilize the arguments.
*/
int show_filters(void *optctx, const char *opt, const char *arg);
int opt_filters(const char *opt, const char *arg);
/**
* Print a listing containing all the bit stream filters supported by the
* program.
* This option processing function does not utilize the arguments.
*/
int show_bsfs(void *optctx, const char *opt, const char *arg);
int opt_bsfs(const char *opt, const char *arg);
/**
* Print a listing containing all the protocols supported by the
* program.
* This option processing function does not utilize the arguments.
*/
int show_protocols(void *optctx, const char *opt, const char *arg);
int opt_protocols(const char *opt, const char *arg);
/**
* Print a listing containing all the pixel formats supported by the
* program.
* This option processing function does not utilize the arguments.
*/
int show_pix_fmts(void *optctx, const char *opt, const char *arg);
/**
* Print a listing containing all the standard channel layouts supported by
* the program.
* This option processing function does not utilize the arguments.
*/
int show_layouts(void *optctx, const char *opt, const char *arg);
int opt_pix_fmts(const char *opt, const char *arg);
/**
* Print a listing containing all the sample formats supported by the
* program.
*/
int show_sample_fmts(void *optctx, const char *opt, const char *arg);
int show_sample_fmts(const char *opt, const char *arg);
/**
* Return a positive value if a line read from standard input
@@ -414,7 +374,7 @@ FILE *get_preset_file(char *filename, size_t filename_size,
* Do all the necessary cleanup and abort.
* This function is implemented in the avtools, not cmdutils.
*/
av_noreturn void exit_program(int ret);
void exit_program(int ret);
/**
* Realloc array to hold new_size elements of elem_size.
@@ -426,65 +386,4 @@ av_noreturn void exit_program(int ret);
*/
void *grow_array(void *array, int elem_size, int *size, int new_size);
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum PixelFormat pix_fmt;
int refcount;
struct FrameBuffer **pool; ///< head of the buffer pool
struct FrameBuffer *next;
} FrameBuffer;
/**
* Get a frame from the pool. This is intended to be used as a callback for
* AVCodecContext.get_buffer.
*
* @param s codec context. s->opaque must be a pointer to the head of the
* buffer pool.
* @param frame frame->opaque will be set to point to the FrameBuffer
* containing the frame data.
*/
int codec_get_buffer(AVCodecContext *s, AVFrame *frame);
/**
* A callback to be used for AVCodecContext.release_buffer along with
* codec_get_buffer().
*/
void codec_release_buffer(AVCodecContext *s, AVFrame *frame);
/**
* A callback to be used for AVFilterBuffer.free.
* @param fb buffer to free. fb->priv must be a pointer to the FrameBuffer
* containing the buffer data.
*/
void filter_release_buffer(AVFilterBuffer *fb);
/**
* Free all the buffers in the pool. This must be called after all the
* buffers have been released.
*/
void free_buffer_pool(FrameBuffer **pool);
#define GET_PIX_FMT_NAME(pix_fmt)\
const char *name = av_get_pix_fmt_name(pix_fmt);
#define GET_SAMPLE_FMT_NAME(sample_fmt)\
const char *name = av_get_sample_fmt_name(sample_fmt)
#define GET_SAMPLE_RATE_NAME(rate)\
char name[16];\
snprintf(name, sizeof(name), "%d", rate);
#define GET_CH_LAYOUT_NAME(ch_layout)\
char name[16];\
snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
#define GET_CH_LAYOUT_DESC(ch_layout)\
char name[128];\
av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
#endif /* CMDUTILS_H */

View File

@@ -1,23 +1,20 @@
{ "L" , OPT_EXIT, {.func_arg = show_license}, "show license" },
{ "h" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
{ "?" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
{ "help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
{ "-help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
{ "version" , OPT_EXIT, {.func_arg = show_version}, "show version" },
{ "formats" , OPT_EXIT, {.func_arg = show_formats }, "show available formats" },
{ "codecs" , OPT_EXIT, {.func_arg = show_codecs }, "show available codecs" },
{ "decoders" , OPT_EXIT, {.func_arg = show_decoders }, "show available decoders" },
{ "encoders" , OPT_EXIT, {.func_arg = show_encoders }, "show available encoders" },
{ "bsfs" , OPT_EXIT, {.func_arg = show_bsfs }, "show available bit stream filters" },
{ "protocols" , OPT_EXIT, {.func_arg = show_protocols}, "show available protocols" },
{ "filters" , OPT_EXIT, {.func_arg = show_filters }, "show available filters" },
{ "pix_fmts" , OPT_EXIT, {.func_arg = show_pix_fmts }, "show available pixel formats" },
{ "layouts" , OPT_EXIT, {.func_arg = show_layouts }, "show standard channel layouts" },
{ "L", OPT_EXIT, {(void*)opt_license}, "show license" },
{ "h", OPT_EXIT, {(void*)opt_help}, "show help" },
{ "?", OPT_EXIT, {(void*)opt_help}, "show help" },
{ "help", OPT_EXIT, {(void*)opt_help}, "show help" },
{ "-help", OPT_EXIT, {(void*)opt_help}, "show help" },
{ "version", OPT_EXIT, {(void*)opt_version}, "show version" },
{ "formats" , OPT_EXIT, {(void*)opt_formats }, "show available formats" },
{ "codecs" , OPT_EXIT, {(void*)opt_codecs }, "show available codecs" },
{ "bsfs" , OPT_EXIT, {(void*)opt_bsfs }, "show available bit stream filters" },
{ "protocols", OPT_EXIT, {(void*)opt_protocols}, "show available protocols" },
{ "filters", OPT_EXIT, {(void*)opt_filters }, "show available filters" },
{ "pix_fmts" , OPT_EXIT, {(void*)opt_pix_fmts }, "show available pixel formats" },
{ "sample_fmts", OPT_EXIT, {.func_arg = show_sample_fmts }, "show available audio sample formats" },
{ "loglevel" , HAS_ARG, {.func_arg = opt_loglevel}, "set libav* logging level", "loglevel" },
{ "v", HAS_ARG, {.func_arg = opt_loglevel}, "set libav* logging level", "loglevel" },
{ "debug" , HAS_ARG, {.func_arg = opt_codec_debug}, "set debug flags", "flags" },
{ "fdebug" , HAS_ARG, {.func_arg = opt_codec_debug}, "set debug flags", "flags" },
{ "report" , 0, {(void*)opt_report}, "generate a report" },
{ "max_alloc" , HAS_ARG, {.func_arg = opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
{ "cpuflags" , HAS_ARG | OPT_EXPERT, {.func_arg = opt_cpuflags}, "force specific cpu flags", "flags" },
{ "loglevel", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
{ "v", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
{ "debug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
{ "fdebug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
{ "report", 0, {(void*)opt_report}, "generate a report" },
{ "max_alloc", HAS_ARG, {(void*)opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
{ "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "force specific cpu flags", "flags" },

View File

@@ -10,9 +10,8 @@ ifndef SUBDIR
ifndef V
Q = @
ECHO = printf "$(1)\t%s\n" $(2)
BRIEF = CC CXX HOSTCC HOSTLD AS YASM AR LD STRIP CP
SILENT = DEPCC DEPHOSTCC DEPAS DEPYASM RANLIB RM
BRIEF = CC CXX AS YASM AR LD HOSTCC STRIP CP
SILENT = DEPCC YASMDEP RM RANLIB
MSG = $@
M = @$(call ECHO,$(TAG),$@);
$(foreach VAR,$(BRIEF), \
@@ -27,17 +26,15 @@ ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscal
IFLAGS := -I. -I$(SRC_PATH)/
CPPFLAGS := $(IFLAGS) $(CPPFLAGS)
CFLAGS += $(ECFLAGS)
CCFLAGS = $(CPPFLAGS) $(CFLAGS)
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
CXXFLAGS += $(CPPFLAGS) $(CFLAGS)
YASMFLAGS += $(IFLAGS:%=%/) -I$(SRC_PATH)/libavutil/x86/ -Pconfig.asm
HOSTCCFLAGS = $(IFLAGS) $(HOSTCFLAGS)
LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
CCFLAGS = $(CFLAGS)
CXXFLAGS := $(CFLAGS) $(CXXFLAGS)
YASMFLAGS += $(IFLAGS) -I$(SRC_PATH)/libavutil/x86/ -Pconfig.asm
HOSTCFLAGS += $(IFLAGS)
LDFLAGS := $(ALLFFLIBS:%=-Llib%) $(LDFLAGS)
define COMPILE
$(call $(1)DEP,$(1))
$($(1)) $($(1)FLAGS) $($(1)_DEPFLAGS) $($(1)_C) $($(1)_O) $<
$($(1)DEP)
$($(1)) $(CPPFLAGS) $($(1)FLAGS) $($(1)_DEPFLAGS) -c $($(1)_O) $<
endef
COMPILE_C = $(call COMPILE,CC)
@@ -56,8 +53,8 @@ COMPILE_S = $(call COMPILE,AS)
%.o: %.S
$(COMPILE_S)
%.h.c:
$(Q)echo '#include "$*.h"' >$@
%.ho: %.h
$(CC) $(CPPFLAGS) $(CFLAGS) -Wno-unused -c -o $@ -x c $<
%.ver: %.v
$(Q)sed 's/$$MAJOR/$($(basename $(@F))_VERSION_MAJOR)/' $^ > $@
@@ -82,8 +79,7 @@ OBJS += $(OBJS-yes)
FFLIBS := $(FFLIBS-yes) $(FFLIBS)
TESTPROGS += $(TESTPROGS-yes)
LDLIBS = $(FFLIBS:%=%$(BUILDSUF))
FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(EXTRALIBS)
FFEXTRALIBS := $(FFLIBS:%=-l%$(BUILDSUF)) $(EXTRALIBS)
EXAMPLES := $(EXAMPLES:%=$(SUBDIR)%-example$(EXESUF))
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
@@ -100,31 +96,25 @@ DEP_LIBS := $(foreach NAME,$(FFLIBS),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME)
ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)/$(ARCH)/*.h))
SKIPHEADERS += $(ARCH_HEADERS:%=$(ARCH)/%) $(SKIPHEADERS-)
SKIPHEADERS := $(SKIPHEADERS:%=$(SUBDIR)%)
HOBJS = $(filter-out $(SKIPHEADERS:.h=.h.o),$(ALLHEADERS:.h=.h.o))
checkheaders: $(HOBJS)
.SECONDARY: $(HOBJS:.o=.c)
checkheaders: $(filter-out $(SKIPHEADERS:.h=.ho),$(ALLHEADERS:.h=.ho))
alltools: $(TOOLS)
$(HOSTOBJS): %.o: %.c
$(call COMPILE,HOSTCC)
$(HOSTCC) $(HOSTCFLAGS) -c -o $@ $<
$(HOSTPROGS): %$(HOSTEXESUF): %.o
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $< $(HOSTLIBS)
$(HOSTCC) $(HOSTLDFLAGS) -o $@ $< $(HOSTLIBS)
$(OBJS): | $(sort $(dir $(OBJS)))
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
$(HOBJS): | $(sort $(dir $(HOBJS)))
$(TOOLOBJS): | tools
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS) $(HOBJS))
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS))
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
CLEANSUFFIXES = *.d *.o *~ *.ho *.map *.ver *.gcno *.gcda
DISTCLEANSUFFIXES = *.pc
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
clean::
$(RM) $(OBJS) $(OBJS:.o=.d)
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d))
-include $(wildcard $(OBJS:.o=.d) $(TESTOBJS:.o=.d))

View File

@@ -1,85 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file was copied from the following newsgroup posting:
*
* Newsgroups: mod.std.unix
* Subject: public domain AT&T getopt source
* Date: 3 Nov 85 19:34:15 GMT
*
* Here's something you've all been waiting for: the AT&T public domain
* source for getopt(3). It is the code which was given out at the 1985
* UNIFORUM conference in Dallas. I obtained it by electronic mail
* directly from AT&T. The people there assure me that it is indeed
* in the public domain.
*/
#include <stdio.h>
#include <string.h>
static int opterr = 1;
static int optind = 1;
static int optopt;
static char *optarg;
#undef fprintf
static int getopt(int argc, char *argv[], char *opts)
{
static int sp = 1;
int c;
char *cp;
if (sp == 1)
if (optind >= argc ||
argv[optind][0] != '-' || argv[optind][1] == '\0')
return EOF;
else if (!strcmp(argv[optind], "--")) {
optind++;
return EOF;
}
optopt = c = argv[optind][sp];
if (c == ':' || (cp = strchr(opts, c)) == NULL) {
fprintf(stderr, ": illegal option -- %c\n", c);
if (argv[optind][++sp] == '\0') {
optind++;
sp = 1;
}
return '?';
}
if (*++cp == ':') {
if (argv[optind][sp+1] != '\0')
optarg = &argv[optind++][sp+1];
else if(++optind >= argc) {
fprintf(stderr, ": option requires an argument -- %c\n", c);
sp = 1;
return '?';
} else
optarg = argv[optind++];
sp = 1;
} else {
if (argv[optind][++sp] == '\0') {
sp = 1;
optind++;
}
optarg = NULL;
}
return c;
}

View File

@@ -1,71 +0,0 @@
/*
* C99-compatible snprintf() and vsnprintf() implementations
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <stdarg.h>
#include <limits.h>
#include <string.h>
#include "compat/va_copy.h"
#include "libavutil/error.h"
#if defined(__MINGW32__)
#define EOVERFLOW EFBIG
#endif
int avpriv_snprintf(char *s, size_t n, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = avpriv_vsnprintf(s, n, fmt, ap);
va_end(ap);
return ret;
}
int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
va_list ap)
{
int ret;
va_list ap_copy;
if (n == 0)
return _vscprintf(fmt, ap);
else if (n > INT_MAX)
return AVERROR(EOVERFLOW);
/* we use n - 1 here because if the buffer is not big enough, the MS
* runtime libraries don't add a terminating zero at the end. MSDN
* recommends to provide _snprintf/_vsnprintf() a buffer size that
* is one less than the actual buffer, and zero it before calling
* _snprintf/_vsnprintf() to workaround this problem.
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
memset(s, 0, n);
va_copy(ap_copy, ap);
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
va_end(ap_copy);
if (ret == -1)
ret = _vscprintf(fmt, ap);
return ret;
}

View File

@@ -1,38 +0,0 @@
/*
* C99-compatible snprintf() and vsnprintf() implementations
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef COMPAT_SNPRINTF_H
#define COMPAT_SNPRINTF_H
#include <stdarg.h>
#include <stdio.h>
int avpriv_snprintf(char *s, size_t n, const char *fmt, ...);
int avpriv_vsnprintf(char *s, size_t n, const char *fmt, va_list ap);
#undef snprintf
#undef _snprintf
#undef vsnprintf
#define snprintf avpriv_snprintf
#define _snprintf avpriv_snprintf
#define vsnprintf avpriv_vsnprintf
#endif /* COMPAT_SNPRINTF_H */

View File

@@ -1,94 +0,0 @@
/*
* C99-compatible strtod() implementation
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <ctype.h>
#include <limits.h>
#include <stdlib.h>
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
static char *check_nan_suffix(char *s)
{
char *start = s;
if (*s++ != '(')
return start;
while ((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') ||
(*s >= '0' && *s <= '9') || *s == '_')
s++;
return *s == ')' ? s + 1 : start;
}
#undef strtod
double strtod(const char *, char **);
double avpriv_strtod(const char *nptr, char **endptr)
{
char *end;
double res;
/* Skip leading spaces */
while (isspace(*nptr))
nptr++;
if (!av_strncasecmp(nptr, "infinity", 8)) {
end = nptr + 8;
res = INFINITY;
} else if (!av_strncasecmp(nptr, "inf", 3)) {
end = nptr + 3;
res = INFINITY;
} else if (!av_strncasecmp(nptr, "+infinity", 9)) {
end = nptr + 9;
res = INFINITY;
} else if (!av_strncasecmp(nptr, "+inf", 4)) {
end = nptr + 4;
res = INFINITY;
} else if (!av_strncasecmp(nptr, "-infinity", 9)) {
end = nptr + 9;
res = -INFINITY;
} else if (!av_strncasecmp(nptr, "-inf", 4)) {
end = nptr + 4;
res = -INFINITY;
} else if (!av_strncasecmp(nptr, "nan", 3)) {
end = check_nan_suffix(nptr + 3);
res = NAN;
} else if (!av_strncasecmp(nptr, "+nan", 4) ||
!av_strncasecmp(nptr, "-nan", 4)) {
end = check_nan_suffix(nptr + 4);
res = NAN;
} else if (!av_strncasecmp(nptr, "0x", 2) ||
!av_strncasecmp(nptr, "-0x", 3) ||
!av_strncasecmp(nptr, "+0x", 3)) {
/* FIXME this doesn't handle exponents, non-integers (float/double)
* and numbers too large for long long */
res = strtoll(nptr, &end, 16);
} else {
res = strtod(nptr, &end);
}
if (endptr)
*endptr = end;
return res;
}

1557
configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ since the last major version increase.
The last version increases were:
libavcodec: 2012-01-27
libavdevice: 2011-04-18
libavfilter: 2012-06-22
libavfilter: 2011-04-18
libavformat: 2012-01-27
libavresample: 2012-04-24
libpostproc: 2011-04-18
@@ -15,58 +15,6 @@ libavutil: 2011-04-18
API changes, most recent first:
2012-09-01 - xxxxxxx - lavu 51.72.100 - parseutils.h
Add av_small_strptime() time parsing function.
Can be used as a stripped-down replacement for strptime(), on
systems which do not support it.
2012-08-13 - xxxxxxx - lavfi 3.8.100 - avfilter.h
Add avfilter_get_class() function, and priv_class field to AVFilter
struct.
2012-08-13 - xxxxxxx - lavu 51.69.100 - opt.h
Add AV_OPT_FLAG_FILTERING_PARAM symbol in opt.h.
2012-07-31 - xxxxxxx - lavc 54.46.100
Add channels field to AVFrame.
2012-07-30 - xxxxxxx - lavu 51.66.100
Add av_get_channel_description()
and av_get_standard_channel_layout() functions.
2012-07-20 - xxxxxxx - lavc 54.43.100
Add decode_error_flags field to AVFrame.
2012-07-20 - xxxxxxx - lavf 54.18.100
Add avformat_match_stream_specifier() function.
2012-07-14 - xxxxxxx - lavc 54.38.100 - avcodec.h
Add metadata to AVFrame, and the accessor functions
av_frame_get_metadata() and av_frame_set_metadata().
2012-07-10 - xxxxxxx - lavc 54.33.100
Add av_fast_padded_mallocz().
2012-07-10 - xxxxxxx - lavfi 3.2.0 - avfilter.h
Add init_opaque() callback to AVFilter struct.
2012-06-26 - xxxxxxx - lavu 51.63.100 - imgutils.h
Add functions to libavutil/imgutils.h:
av_image_get_buffer_size()
av_image_fill_arrays()
av_image_copy_to_buffer()
2012-06-24 - xxxxxxx - lavu 51.62.100 - version.h
version moved from avutil.h to version.h
2012-04-11 - xxxxxxx - lavu 51.58.100 - error.h
Add av_make_error_string() and av_err2str() utilities to
libavutil/error.h.
2012-06-05 - xxxxxxx - lavc 54.24.100
Add pkt_duration field to AVFrame.
2012-05-24 - xxxxxxx - lavu 51.54.100
Move AVPALETTE_SIZE and AVPALETTE_COUNT macros from
libavcodec/avcodec.h to libavutil/pixfmt.h.
@@ -83,76 +31,6 @@ API changes, most recent first:
2012-03-26 - a67d9cf - lavfi 2.66.100
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
2012-09-23 - xxxxxxx - lavc 54.28.0 - avcodec.h
Add avcodec_free_frame(). This function must now
be used for freeing an AVFrame.
2012-09-12 - xxxxxxx - lavu 51.41.0 - audioconvert.h
Added AV_CH_LOW_FREQUENCY_2 channel mask value.
2012-09-04 - xxxxxxx - lavu 51.40.0 - opt.h
Reordered the fields in default_val in AVOption, changed which
default_val field is used for which AVOptionType.
2012-xx-xx - xxxxxxx - lavc 54.26.1 - avcodec.h
Add codec descriptor properties AV_CODEC_PROP_LOSSY and
AV_CODEC_PROP_LOSSLESS.
2012-08-18 - lavc 54.26 - avcodec.h
Add codec descriptors for accessing codec properties without having
to refer to a specific decoder or encoder.
c223d79 - Add an AVCodecDescriptor struct and functions
avcodec_descriptor_get() and avcodec_descriptor_next().
51efed1 - Add AVCodecDescriptor.props and AV_CODEC_PROP_INTRA_ONLY.
91e59fe - Add avcodec_descriptor_get_by_name().
2012-08-08 - 987170c - lavu 51.38 - dict.h
Add av_dict_count().
2012-08-07 - 104e10f - lavc 54.25 - avcodec.h
Rename CodecID to AVCodecID and all CODEC_ID_* to AV_CODEC_ID_*.
To provide backwards compatibility, CodecID is now #defined as AVCodecID.
Note that this can break user code that includes avcodec.h and uses the
'CodecID' identifier. Such code should either #undef CodecID or stop using the
CodecID name.
2012-08-03 - 239fdf1 - lavu 51.37.1 - cpu.h
lsws 2.1.1 - swscale.h
Rename AV_CPU_FLAG_MMX2 ---> AV_CPU_FLAG_MMXEXT.
Rename SWS_CPU_CAPS_MMX2 ---> SWS_CPU_CAPS_MMXEXT.
2012-07-29 - 681ed00 - lavf 54.13.0 - avformat.h
Add AVFMT_FLAG_NOBUFFER for low latency use cases.
2012-07-10 - 5fade8a - lavu 51.37.0
Add av_malloc_array() and av_mallocz_array()
2012-06-22 - d3d3a32 - lavu 51.34.0
Add av_usleep()
2012-06-20 - ae0a301 - lavu 51.33.0
Move av_gettime() to libavutil, add libavutil/time.h
2012-06-09 - 3971be0 - lavr 0.0.3
Add a parameter to avresample_build_matrix() for Dolby/DPLII downmixing.
2012-06-12 - 9baeff9 - lavfi 2.23.0 - avfilter.h
Add AVFilterContext.nb_inputs/outputs. Deprecate
AVFilterContext.input/output_count.
2012-06-12 - 84b9fbe - lavfi 2.22.0 - avfilter.h
Add avfilter_pad_get_type() and avfilter_pad_get_name(). Those
should now be used instead of accessing AVFilterPad members
directly.
2012-06-12 - b0f0dfc - lavu 51.32.0 - audioconvert.h
Add av_get_channel_layout_channel_index(), av_get_channel_name()
and av_channel_layout_extract_channel().
2012-05-25 - 154486f - lavu 51.31.0 - opt.h
Add av_opt_set_bin()
2012-05-15 - lavfi 2.17.0
Add support for audio filters
ac71230/a2cd9be - add video/audio buffer sink in a new installed

View File

@@ -6,21 +6,19 @@ HTMLPAGES = $(PROGS-yes:%=doc/%.html) \
doc/fate.html \
doc/general.html \
doc/git-howto.html \
doc/libavfilter.html \
doc/platform.html \
doc/syntax.html \
TXTPAGES = doc/fate.txt \
DOCS-$(CONFIG_HTMLPAGES) += $(HTMLPAGES)
DOCS-$(CONFIG_PODPAGES) += $(PODPAGES)
DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
DOCS = $(DOCS-yes)
DOCS = $(HTMLPAGES) $(MANPAGES) $(PODPAGES)
ifdef HAVE_MAKEINFO
DOCS += $(TXTPAGES)
endif
all-$(CONFIG_DOC): doc
doc: documentation
all-$(CONFIG_DOC): documentation
documentation: $(DOCS)
@@ -31,11 +29,13 @@ doc/%.txt: doc/%.texi
$(Q)$(TEXIDEP)
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
doc/print_options.o: libavformat/options_table.h libavcodec/options_table.h
GENTEXI = format codec
GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
$(GENTEXI): TAG = GENTEXI
$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
$(GENTEXI): doc/avoptions_%.texi: doc/print_options
$(M)doc/print_options $* > $@
doc/%.html: TAG = HTML
@@ -46,7 +46,7 @@ doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
doc/%.pod: TAG = POD
doc/%.pod: doc/%.texi $(GENTEXI)
$(Q)$(TEXIDEP)
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Idoc $< $@
$(M)$(SRC_PATH)/doc/texi2pod.pl -Idoc $< $@
doc/%.1: TAG = MAN
doc/%.1: doc/%.pod $(GENTEXI)
@@ -54,15 +54,11 @@ doc/%.1: doc/%.pod $(GENTEXI)
$(DOCS): | doc/
install-man:
ifdef CONFIG_MANPAGES
install-progs-$(CONFIG_DOC): install-man
install-man: $(MANPAGES)
$(Q)mkdir -p "$(MANDIR)/man1"
$(INSTALL) -m 644 $(MANPAGES) "$(MANDIR)/man1"
endif
uninstall: uninstall-man
@@ -74,4 +70,4 @@ clean::
-include $(wildcard $(DOCS:%=%.d))
.PHONY: doc documentation
.PHONY: documentation

View File

@@ -1,7 +1,7 @@
Release Notes
=============
* 1.0 "Angel" September, 2012
* 0.11 "Happiness" May, 2012
General notes

View File

@@ -18,10 +18,10 @@ are used to precisely specify which stream(s) does a given option belong to.
A stream specifier is a string generally appended to the option name and
separated from it by a colon. E.g. @code{-codec:a:1 ac3} option contains
@code{a:1} stream specifier, which matches the second audio stream. Therefore it
@code{a:1} stream specifer, which matches the second audio stream. Therefore it
would select the ac3 codec for the second audio stream.
A stream specifier can match several streams, the option is then applied to all
A stream specifier can match several stream, the option is then applied to all
of them. E.g. the stream specifier in @code{-b:a 128k} matches all audio
streams.
@@ -44,7 +44,6 @@ program with id @var{program_id}. Otherwise matches all streams in this program.
@item #@var{stream_id}
Matches the stream by format-specific ID.
@end table
@section Generic options
These options are shared amongst the av* tools.
@@ -54,29 +53,8 @@ These options are shared amongst the av* tools.
@item -L
Show license.
@item -h, -?, -help, --help [@var{arg}]
Show help. An optional parameter may be specified to print help about a specific
item.
Possible values of @var{arg} are:
@table @option
@item decoder=@var{decoder_name}
Print detailed information about the decoder named @var{decoder_name}. Use the
@option{-decoders} option to get a list of all decoders.
@item encoder=@var{encoder_name}
Print detailed information about the encoder named @var{encoder_name}. Use the
@option{-encoders} option to get a list of all encoders.
@item demuxer=@var{demuxer_name}
Print detailed information about the demuxer named @var{demuxer_name}. Use the
@option{-formats} option to get a list of all demuxers and muxers.
@item muxer=@var{muxer_name}
Print detailed information about the muxer named @var{muxer_name}. Use the
@option{-formats} option to get a list of all muxers and demuxers.
@end table
@item -h, -?, -help, --help
Show help.
@item -version
Show version.
@@ -93,16 +71,23 @@ Encoding available
@end table
@item -codecs
Show all codecs known to libavcodec.
Show available codecs.
Note that the term 'codec' is used throughout this documentation as a shortcut
for what is more correctly called a media bitstream format.
@item -decoders
Show available decoders.
@item -encoders
Show all available encoders.
The fields preceding the codec names have the following meanings:
@table @samp
@item D
Decoding available
@item E
Encoding available
@item V/A/S
Video/audio/subtitle codec
@item S
Codec supports slices
@item D
Codec supports direct rendering
@item T
Codec can handle input truncated at random locations instead of only at frame boundaries
@end table
@item -bsfs
Show available bitstream filters.
@@ -119,9 +104,6 @@ Show available pixel formats.
@item -sample_fmts
Show available sample formats.
@item -layouts
Show channel names and standard channel layouts.
@item -loglevel @var{loglevel} | -v @var{loglevel}
Set the logging level used by the library.
@var{loglevel} is a number or a string containing one of the following values:

View File

@@ -23,31 +23,8 @@ The description of some of the currently available demuxers follows.
Image file demuxer.
This demuxer reads from a list of image files specified by a pattern.
The syntax and meaning of the pattern is specified by the
option @var{pattern_type}.
The pattern may contain a suffix which is used to automatically
determine the format of the images contained in the files.
The size, the pixel format, and the format of each image must be the
same for all the files in the sequence.
This demuxer accepts the following options:
@table @option
@item framerate
Set the framerate for the video stream. It defaults to 25.
@item loop
If set to 1, loop over the input. Default value is 0.
@item pattern_type
Select the pattern type used to interpret the provided filename.
@var{pattern_type} accepts one of the following values.
@table @option
@item sequence
Select a sequence pattern type, used to specify a sequence of files
indexed by sequential numbers.
A sequence pattern may contain the string "%d" or "%0@var{N}d", which
The pattern may contain the string "%d" or "%0@var{N}d", which
specifies the position of the characters representing a sequential
number in each filename matched by the pattern. If the form
"%d0@var{N}d" is used, the string representing the number in each
@@ -55,11 +32,13 @@ filename is 0-padded and @var{N} is the total number of 0-padded
digits representing the number. The literal character '%' can be
specified in the pattern with the string "%%".
If the sequence pattern contains "%d" or "%0@var{N}d", the first filename of
If the pattern contains "%d" or "%0@var{N}d", the first filename of
the file list specified by the pattern must contain a number
inclusively contained between @var{start_number} and
@var{start_number}+@var{start_number_range}-1, and all the following
numbers must be sequential.
inclusively contained between 0 and 4, all the following numbers must
be sequential. This limitation may be hopefully fixed.
The pattern may contain a suffix which is used to automatically
determine the format of the images contained in the files.
For example the pattern "img-%03d.bmp" will match a sequence of
filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
@@ -67,6 +46,17 @@ filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
sequence of filenames of the form @file{i%m%g-1.jpg},
@file{i%m%g-2.jpg}, ..., @file{i%m%g-10.jpg}, etc.
The size, the pixel format, and the format of each image must be the
same for all the files in the sequence.
The following example shows how to use @command{ffmpeg} for creating a
video from the images in the file sequence @file{img-001.jpeg},
@file{img-002.jpeg}, ..., assuming an input frame rate of 10 frames per
second:
@example
ffmpeg -i 'img-%03d.jpeg' -r 10 out.mkv
@end example
Note that the pattern must not necessarily contain "%d" or
"%0@var{N}d", for example to convert a single image file
@file{img.jpeg} you can employ the command:
@@ -74,75 +64,6 @@ Note that the pattern must not necessarily contain "%d" or
ffmpeg -i img.jpeg img.png
@end example
@item glob
Select a glob wildcard pattern type.
The pattern is interpreted like a @code{glob()} pattern. This is only
selectable if libavformat was compiled with globbing support.
@item glob_sequence @emph{(deprecated, will be removed)}
Select a mixed glob wildcard/sequence pattern.
If your version of libavformat was compiled with globbing support, and
the provided pattern contains at least one glob meta character among
@code{%*?[]@{@}} that is preceded by an unescaped "%", the pattern is
interpreted like a @code{glob()} pattern, otherwise it is interpreted
like a sequence pattern.
All glob special characters @code{%*?[]@{@}} must be prefixed
with "%". To escape a literal "%" you shall use "%%".
For example the pattern @code{foo-%*.jpeg} will match all the
filenames prefixed by "foo-" and terminating with ".jpeg", and
@code{foo-%?%?%?.jpeg} will match all the filenames prefixed with
"foo-", followed by a sequence of three characters, and terminating
with ".jpeg".
This pattern type is deprecated in favor of @var{glob} and
@var{sequence}.
@end table
Default value is @var{glob_sequence}.
@item pixel_format
Set the pixel format of the images to read. If not specified the pixel
format is guessed from the first image file in the sequence.
@item start_number
Set the index of the file matched by the image file pattern to start
to read from. Default value is 0.
@item start_number_range
Set the index interval range to check when looking for the first image
file in the sequence, starting from @var{start_number}. Default value
is 5.
@item video_size
Set the video size of the images to read. If not specified the video
size is guessed from the first image file in the sequence.
@end table
@subsection Examples
@itemize
@item
Use @command{ffmpeg} for creating a video from the images in the file
sequence @file{img-001.jpeg}, @file{img-002.jpeg}, ..., assuming an
input frame rate of 10 frames per second:
@example
ffmpeg -i 'img-%03d.jpeg' -r 10 out.mkv
@end example
@item
As above, but start by reading from a file with index 100 in the sequence:
@example
ffmpeg -start_number 100 -i 'img-%03d.jpeg' -r 10 out.mkv
@end example
@item
Read images matching the "*.png" glob pattern , that is all the files
terminating with the ".png" suffix:
@example
ffmpeg -pattern_type glob -i "*.png" -r 10 out.mkv
@end example
@end itemize
@section applehttp
Apple HTTP Live Streaming demuxer.

View File

@@ -204,8 +204,8 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
(c-add-style "ffmpeg"
'("k&r"
(c-basic-offset . 4)
(indent-tabs-mode . nil)
(show-trailing-whitespace . t)
(indent-tabs-mode nil)
(show-trailing-whitespace t)
(c-offsets-alist
(statement-cont . (c-lineup-assignments +)))
)
@@ -393,9 +393,7 @@ send a reminder by email. Your patch should eventually be dealt with.
@item
Did you register it in @file{allcodecs.c} or @file{allformats.c}?
@item
Did you add the AVCodecID to @file{avcodec.h}?
When adding new codec IDs, also add an entry to the codec descriptor
list in @file{libavcodec/codec_desc.c}.
Did you add the CodecID to @file{avcodec.h}?
@item
If it has a fourCC, did you add it to @file{libavformat/riff.c},
even if it is only a decoder?
@@ -491,10 +489,6 @@ send a reminder by email. Your patch should eventually be dealt with.
Consider to add a regression test for your code.
@item
If you added YASM code please check that things still work with --disable-yasm
@item
Make sure you check the return values of function and return appropriate
error codes. Especially memory allocation functions like @code{av_malloc()}
are notoriously left unchecked, which is a serious problem.
@end enumerate
@section Patch review process
@@ -535,16 +529,4 @@ Running 'make fate' accomplishes this, please see @url{fate.html} for details.
this case, the reference results of the regression tests shall be modified
accordingly].
@subsection Adding files to the fate-suite dataset
When there is no muxer or encoder available to generate test media for a
specific test then the media has to be inlcuded in the fate-suite.
First please make sure that the sample file is as small as possible to test the
respective decoder or demuxer sufficiently. Large files increase network
bandwidth and disk space requirements.
Once you have a working fate test and fate sample, provide in the commit
message or introductionary message for the patch series that you post to
the ffmpeg-devel mailing list, a direct link to download the sample media.
@bye

View File

@@ -511,12 +511,6 @@ rc_2pass_vbr_minsection_pct
@item slices
@code{VP8E_SET_TOKEN_PARTITIONS}
@item max-intra-rate
@code{VP8E_SET_MAX_INTRA_BITRATE_PCT}
@item force_key_frames
@code{VPX_EFLAG_FORCE_KF}
@item Alternate reference frame related
@table @option
@item vp8flags altref

View File

@@ -21,78 +21,30 @@ The following unary operators are available: @code{+}, @code{-}.
The following functions are available:
@table @option
@item sinh(x)
Compute hyperbolic sine of @var{x}.
@item cosh(x)
Compute hyperbolic cosine of @var{x}.
@item tanh(x)
Compute hyperbolic tangent of @var{x}.
@item sin(x)
Compute sine of @var{x}.
@item cos(x)
Compute cosine of @var{x}.
@item tan(x)
Compute tangent of @var{x}.
@item atan(x)
Compute arctangent of @var{x}.
@item asin(x)
Compute arcsine of @var{x}.
@item acos(x)
Compute arccosine of @var{x}.
@item exp(x)
Compute exponential of @var{x} (with base @code{e}, the Euler's number).
@item log(x)
Compute natural logarithm of @var{x}.
@item abs(x)
Compute absolute value of @var{x}.
@item squish(x)
Compute expression @code{1/(1 + exp(4*x))}.
@item gauss(x)
Compute Gauss function of @var{x}, corresponding to
@code{exp(-x*x/2) / sqrt(2*PI)}.
@item isinf(x)
Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
@item isnan(x)
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
@item mod(x, y)
Compute the remainder of division of @var{x} by @var{y}.
@item max(x, y)
Return the maximum between @var{x} and @var{y}.
@item min(x, y)
Return the maximum between @var{x} and @var{y}.
@item eq(x, y)
Return 1 if @var{x} and @var{y} are equivalent, 0 otherwise.
@item gte(x, y)
Return 1 if @var{x} is greater than or equal to @var{y}, 0 otherwise.
@item gt(x, y)
Return 1 if @var{x} is greater than @var{y}, 0 otherwise.
@item lte(x, y)
Return 1 if @var{x} is lesser than or equal to @var{y}, 0 otherwise.
@item lt(x, y)
Return 1 if @var{x} is lesser than @var{y}, 0 otherwise.
@item st(var, expr)
Allow to store the value of the expression @var{expr} in an internal
variable. @var{var} specifies the number of the variable where to

View File

@@ -3,21 +3,20 @@ FFMPEG_LIBS= libavdevice \
libavformat \
libavfilter \
libavcodec \
libavresample \
libswresample \
libswscale \
libavutil \
CFLAGS += -Wall -O2 -g
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
CFLAGS += $(shell pkg-config --cflags $(FFMPEG_LIBS))
LDLIBS += $(shell pkg-config --libs $(FFMPEG_LIBS))
EXAMPLES= decoding_encoding \
demuxing \
filtering_video \
filtering_audio \
metadata \
muxing \
scaling_video \
OBJS=$(addsuffix .o,$(EXAMPLES))
@@ -25,12 +24,9 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
decoding_encoding: LDLIBS += -lm
muxing: LDLIBS += -lm
.phony: all clean-test clean
.phony: all clean
all: $(OBJS) $(EXAMPLES)
clean-test:
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
clean: clean-test
$(RM) $(EXAMPLES) $(OBJS)
clean:
rm -rf $(EXAMPLES) $(OBJS)

View File

@@ -31,11 +31,9 @@
#include <math.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/audioconvert.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
@@ -43,59 +41,6 @@
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096
/* check that a given sample format is supported by the encoder */
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
{
const enum AVSampleFormat *p = codec->sample_fmts;
while (*p != AV_SAMPLE_FMT_NONE) {
if (*p == sample_fmt)
return 1;
p++;
}
return 0;
}
/* just pick the highest supported samplerate */
static int select_sample_rate(AVCodec *codec)
{
const int *p;
int best_samplerate = 0;
if (!codec->supported_samplerates)
return 44100;
p = codec->supported_samplerates;
while (*p) {
best_samplerate = FFMAX(*p, best_samplerate);
p++;
}
return best_samplerate;
}
/* select layout with the highest channel count */
static int select_channel_layout(AVCodec *codec)
{
const uint64_t *p;
uint64_t best_ch_layout = 0;
int best_nb_channells = 0;
if (!codec->channel_layouts)
return AV_CH_LAYOUT_STEREO;
p = codec->channel_layouts;
while (*p) {
int nb_channels = av_get_channel_layout_nb_channels(*p);
if (nb_channels > best_nb_channells) {
best_ch_layout = *p;
best_nb_channells = nb_channels;
}
p++;
}
return best_ch_layout;
}
/*
* Audio encoding example
*/
@@ -103,20 +48,18 @@ static void audio_encode_example(const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
AVFrame *frame;
AVPacket pkt;
int i, j, k, ret, got_output;
int buffer_size;
int frame_size, i, j, out_size, outbuf_size;
FILE *f;
uint16_t *samples;
short *samples;
float t, tincr;
uint8_t *outbuf;
printf("Encode audio file %s\n", filename);
/* find the MP2 encoder */
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
codec = avcodec_find_encoder(CODEC_ID_MP2);
if (!codec) {
fprintf(stderr, "Codec not found\n");
fprintf(stderr, "codec not found\n");
exit(1);
}
@@ -124,58 +67,25 @@ static void audio_encode_example(const char *filename)
/* put sample parameters */
c->bit_rate = 64000;
/* check that the encoder supports s16 pcm input */
c->sample_rate = 44100;
c->channels = 2;
c->sample_fmt = AV_SAMPLE_FMT_S16;
if (!check_sample_fmt(codec, c->sample_fmt)) {
fprintf(stderr, "Encoder does not support sample format %s",
av_get_sample_fmt_name(c->sample_fmt));
exit(1);
}
/* select other audio parameters supported by the encoder */
c->sample_rate = select_sample_rate(codec);
c->channel_layout = select_channel_layout(codec);
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
fprintf(stderr, "could not open codec\n");
exit(1);
}
/* the codec gives us the frame size, in samples */
frame_size = c->frame_size;
samples = malloc(frame_size * 2 * c->channels);
outbuf_size = 10000;
outbuf = malloc(outbuf_size);
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
/* frame containing input raw audio */
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
frame->nb_samples = c->frame_size;
frame->format = c->sample_fmt;
frame->channel_layout = c->channel_layout;
/* the codec gives us the frame size, in samples,
* we calculate the size of the samples buffer in bytes */
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
c->sample_fmt, 0);
samples = av_malloc(buffer_size);
if (!samples) {
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
buffer_size);
exit(1);
}
/* setup the data pointers in the AVFrame */
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(const uint8_t*)samples, buffer_size, 0);
if (ret < 0) {
fprintf(stderr, "Could not setup audio frame\n");
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
@@ -183,46 +93,19 @@ static void audio_encode_example(const char *filename)
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
for(i=0;i<200;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
for (j = 0; j < c->frame_size; j++) {
for(j=0;j<frame_size;j++) {
samples[2*j] = (int)(sin(t) * 10000);
for (k = 1; k < c->channels; k++)
samples[2*j + k] = samples[2*j];
samples[2*j+1] = samples[2*j];
t += tincr;
}
/* encode the samples */
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples);
fwrite(outbuf, 1, out_size, f);
}
fclose(f);
free(outbuf);
free(samples);
av_freep(&samples);
avcodec_free_frame(&frame);
avcodec_close(c);
av_free(c);
}
@@ -242,12 +125,12 @@ static void audio_decode_example(const char *outfilename, const char *filename)
av_init_packet(&avpkt);
printf("Decode audio file %s to %s\n", filename, outfilename);
printf("Decode audio file %s\n", filename);
/* find the mpeg audio decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
codec = avcodec_find_decoder(CODEC_ID_MP2);
if (!codec) {
fprintf(stderr, "Codec not found\n");
fprintf(stderr, "codec not found\n");
exit(1);
}
@@ -255,13 +138,13 @@ static void audio_decode_example(const char *outfilename, const char *filename)
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
fprintf(stderr, "could not open codec\n");
exit(1);
}
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
outfile = fopen(outfilename, "wb");
@@ -279,7 +162,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
if (!decoded_frame) {
if (!(decoded_frame = avcodec_alloc_frame())) {
fprintf(stderr, "Could not allocate audio frame\n");
fprintf(stderr, "out of memory\n");
exit(1);
}
} else
@@ -320,7 +203,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
avcodec_close(c);
av_free(c);
avcodec_free_frame(&decoded_frame);
av_free(decoded_frame);
}
/*
@@ -330,22 +213,23 @@ static void video_encode_example(const char *filename, int codec_id)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y, got_output;
int i, out_size, x, y, outbuf_size;
FILE *f;
AVFrame *frame;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
AVFrame *picture;
uint8_t *outbuf;
int had_output=0;
printf("Encode video file %s\n", filename);
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
fprintf(stderr, "codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
picture= avcodec_alloc_frame();
/* put sample parameters */
c->bit_rate = 400000;
@@ -358,103 +242,79 @@ static void video_encode_example(const char *filename, int codec_id)
c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
if(codec_id == AV_CODEC_ID_H264)
if(codec_id == CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
fprintf(stderr, "could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* alloc image and output buffer */
outbuf_size = 100000 + 12*c->width*c->height;
outbuf = malloc(outbuf_size);
/* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used */
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
av_image_alloc(picture->data, picture->linesize,
c->width, c->height, c->pix_fmt, 1);
/* encode 1 second of video */
for(i=0;i<25;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
fflush(stdout);
/* prepare a dummy image */
/* Y */
for(y=0;y<c->height;y++) {
for(x=0;x<c->width;x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for(y=0;y<c->height/2;y++) {
for(x=0;x<c->width/2;x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
}
}
frame->pts = i;
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
had_output |= out_size;
printf("encoding frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
for(; out_size || !had_output; i++) {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
had_output |= out_size;
printf("write frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(outbuf);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
avcodec_free_frame(&frame);
av_free(picture->data[0]);
av_free(picture);
printf("\n");
}
@@ -491,16 +351,18 @@ static void video_decode_example(const char *outfilename, const char *filename)
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
printf("Decode video file %s to %s\n", filename, outfilename);
printf("Decode video file %s\n", filename);
/* find the mpeg1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
if (!codec) {
fprintf(stderr, "Codec not found\n");
fprintf(stderr, "codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
picture= avcodec_alloc_frame();
if(codec->capabilities&CODEC_CAP_TRUNCATED)
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
@@ -510,7 +372,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
fprintf(stderr, "could not open codec\n");
exit(1);
}
@@ -518,13 +380,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
picture = avcodec_alloc_frame();
if (!picture) {
fprintf(stderr, "Could not allocate video frame\n");
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
@@ -557,7 +413,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
exit(1);
}
if (got_picture) {
printf("Saving frame %3d\n", frame);
printf("saving frame %3d\n", frame);
fflush(stdout);
/* the picture is allocated by the decoder. no need to
@@ -579,7 +435,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
avpkt.size = 0;
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
if (got_picture) {
printf("Saving last frame %3d\n", frame);
printf("saving last frame %3d\n", frame);
fflush(stdout);
/* the picture is allocated by the decoder. no need to
@@ -594,42 +450,30 @@ static void video_decode_example(const char *outfilename, const char *filename)
avcodec_close(c);
av_free(c);
avcodec_free_frame(&picture);
av_free(picture);
printf("\n");
}
int main(int argc, char **argv)
{
const char *output_type;
const char *filename;
/* register all the codecs */
avcodec_register_all();
if (argc < 2) {
printf("usage: %s output_type\n"
"API example program to decode/encode a media stream with libavcodec.\n"
"This program generates a synthetic stream and encodes it to a file\n"
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
"The encoded stream is then decoded and written to a raw data output\n."
"output_type must be choosen between 'h264', 'mp2', 'mpg'\n",
argv[0]);
return 1;
}
output_type = argv[1];
if (argc <= 1) {
audio_encode_example("/tmp/test.mp2");
audio_decode_example("/tmp/test.sw", "/tmp/test.mp2");
if (!strcmp(output_type, "h264")) {
video_encode_example("test.h264", AV_CODEC_ID_H264);
} else if (!strcmp(output_type, "mp2")) {
audio_encode_example("test.mp2");
audio_decode_example("test.sw", "test.mp2");
} else if (!strcmp(output_type, "mpg")) {
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
video_decode_example("test%02d.pgm", "test.mpg");
video_encode_example("/tmp/test.h264", CODEC_ID_H264);
video_encode_example("/tmp/test.mpg", CODEC_ID_MPEG1VIDEO);
filename = "/tmp/test.mpg";
} else {
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
output_type);
return 1;
filename = argv[1];
}
// audio_decode_example("/tmp/test.sw", filename);
video_decode_example("/tmp/test%d.pgm", filename);
return 0;
}

View File

@@ -1,339 +0,0 @@
/*
* Copyright (c) 2012 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavformat demuxing API use example.
*
* Show how to use the libavformat and libavcodec API to demux and
* decode audio and video data.
*/
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
static AVStream *video_stream = NULL, *audio_stream = NULL;
static const char *src_filename = NULL;
static const char *video_dst_filename = NULL;
static const char *audio_dst_filename = NULL;
static FILE *video_dst_file = NULL;
static FILE *audio_dst_file = NULL;
static uint8_t *video_dst_data[4] = {NULL};
static int video_dst_linesize[4];
static int video_dst_bufsize;
static uint8_t **audio_dst_data = NULL;
static int audio_dst_linesize;
static int audio_dst_bufsize;
static int video_stream_idx = -1, audio_stream_idx = -1;
static AVFrame *frame = NULL;
static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;
static int decode_packet(int *got_frame, int cached)
{
int ret = 0;
if (pkt.stream_index == video_stream_idx) {
/* decode video frame */
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame\n");
return ret;
}
if (*got_frame) {
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
cached ? "(cached)" : "",
video_frame_count++, frame->coded_picture_number,
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
av_image_copy(video_dst_data, video_dst_linesize,
(const uint8_t **)(frame->data), frame->linesize,
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
/* write to rawvideo file */
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
}
} else if (pkt.stream_index == audio_stream_idx) {
/* decode audio frame */
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding audio frame\n");
return ret;
}
if (*got_frame) {
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
cached ? "(cached)" : "",
audio_frame_count++, frame->nb_samples,
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, frame->channels,
frame->nb_samples, frame->format, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate audio buffer\n");
return AVERROR(ENOMEM);
}
/* TODO: extend return code of the av_samples_* functions so that this call is not needed */
audio_dst_bufsize =
av_samples_get_buffer_size(NULL, frame->channels,
frame->nb_samples, frame->format, 1);
/* copy audio data to destination buffer:
* this is required since rawaudio expects non aligned data */
av_samples_copy(audio_dst_data, frame->data, 0, 0,
frame->nb_samples, frame->channels, frame->format);
/* write to rawaudio file */
fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
av_freep(&audio_dst_data[0]);
}
}
return ret;
}
static int open_codec_context(int *stream_idx,
AVFormatContext *fmt_ctx, enum AVMediaType type)
{
int ret;
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret;
} else {
*stream_idx = ret;
st = fmt_ctx->streams[*stream_idx];
/* find decoder for the stream */
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return ret;
}
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
}
return 0;
}
static int get_format_from_sample_fmt(const char **fmt,
enum AVSampleFormat sample_fmt)
{
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr,
"sample format %s is not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return -1;
}
int main (int argc, char **argv)
{
int ret = 0, got_frame;
if (argc != 4) {
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decodes them, and writes decoded\n"
"video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n"
"\n", argv[0]);
exit(1);
}
src_filename = argv[1];
video_dst_filename = argv[2];
audio_dst_filename = argv[3];
/* register all formats and codecs */
av_register_all();
/* open input file, and allocated format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", src_filename);
exit(1);
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
video_dec_ctx = video_stream->codec;
video_dst_file = fopen(video_dst_filename, "wb");
if (!video_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
ret = 1;
goto end;
}
/* allocate image where the decoded image will be put */
ret = av_image_alloc(video_dst_data, video_dst_linesize,
video_dec_ctx->width, video_dec_ctx->height,
video_dec_ctx->pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
video_dst_bufsize = ret;
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
int nb_planes;
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dec_ctx = audio_stream->codec;
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
ret = 1;
goto end;
}
nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
audio_dec_ctx->channels : 1;
audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
if (!audio_dst_data) {
fprintf(stderr, "Could not allocate audio data buffers\n");
ret = AVERROR(ENOMEM);
goto end;
}
}
if (!audio_stream && !video_stream) {
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
ret = 1;
goto end;
}
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate frame\n");
ret = AVERROR(ENOMEM);
goto end;
}
/* initialize packet, set data to NULL, let the demuxer fill it */
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (video_stream)
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
if (audio_stream)
printf("Demuxing video from file '%s' into '%s'\n", src_filename, audio_dst_filename);
/* read frames from the file */
while (av_read_frame(fmt_ctx, &pkt) >= 0)
decode_packet(&got_frame, 0);
/* flush cached frames */
pkt.data = NULL;
pkt.size = 0;
do {
decode_packet(&got_frame, 1);
} while (got_frame);
printf("Demuxing succeeded.\n");
if (video_stream) {
printf("Play the output video file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
video_dst_filename);
}
if (audio_stream) {
const char *fmt;
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt) < 0))
goto end;
printf("Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
audio_dst_filename);
}
end:
if (video_dec_ctx)
avcodec_close(video_dec_ctx);
if (audio_dec_ctx)
avcodec_close(audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
av_free(frame);
av_free(video_dst_data[0]);
av_free(audio_dst_data);
return ret < 0;
}

View File

@@ -84,23 +84,21 @@ static int init_filters(const char *filters_descr)
char args[512];
int ret;
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
AVFilter *abuffersink = avfilter_get_by_name("ffabuffersink");
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
const int64_t *chlayouts = avfilter_all_channel_layouts;
AVABufferSinkParams *abuffersink_params;
const AVFilterLink *outlink;
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
filter_graph = avfilter_graph_alloc();
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
time_base.num, time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
snprintf(args, sizeof(args), "%d:%d:0x%"PRIx64,
dec_ctx->sample_rate, dec_ctx->sample_fmt, dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
@@ -111,6 +109,7 @@ static int init_filters(const char *filters_descr)
/* buffer audio sink: to terminate the filter chain. */
abuffersink_params = av_abuffersink_params_alloc();
abuffersink_params->sample_fmts = sample_fmts;
abuffersink_params->channel_layouts = chlayouts;
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
NULL, abuffersink_params, filter_graph);
av_free(abuffersink_params);
@@ -195,6 +194,7 @@ int main(int argc, char **argv)
avcodec_get_frame_defaults(&frame);
got_frame = 0;
ret = avcodec_decode_audio4(dec_ctx, &frame, &got_frame, &packet);
av_free_packet(&packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
continue;
@@ -208,20 +208,15 @@ int main(int argc, char **argv)
}
/* pull filtered audio from the filtergraph */
while (1) {
ret = av_buffersink_get_buffer_ref(buffersink_ctx, &samplesref, 0);
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if(ret < 0)
goto end;
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
av_buffersink_get_buffer_ref(buffersink_ctx, &samplesref, 0);
if (samplesref) {
print_samplesref(samplesref);
avfilter_unref_bufferp(&samplesref);
avfilter_unref_buffer(samplesref);
}
}
}
}
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);

View File

@@ -34,7 +34,6 @@
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
const char *filter_descr = "scale=78:24";
@@ -84,21 +83,17 @@ static int init_filters(const char *filters_descr)
char args[512];
int ret;
AVFilter *buffersrc = avfilter_get_by_name("buffer");
AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum PixelFormat pix_fmts[] = { PIX_FMT_GRAY8, PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params;
filter_graph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
@@ -107,11 +102,8 @@ static int init_filters(const char *filters_descr)
}
/* buffer video sink: to terminate the filter chain. */
buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, buffersink_params, filter_graph);
av_free(buffersink_params);
NULL, pix_fmts, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return ret;
@@ -199,6 +191,7 @@ int main(int argc, char **argv)
avcodec_get_frame_defaults(&frame);
got_frame = 0;
ret = avcodec_decode_video2(dec_ctx, &frame, &got_frame, &packet);
av_free_packet(&packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
break;
@@ -208,27 +201,18 @@ int main(int argc, char **argv)
frame.pts = av_frame_get_best_effort_timestamp(&frame);
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame(buffersrc_ctx, &frame, 0) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
av_vsrc_buffer_add_frame(buffersrc_ctx, &frame, 0);
/* pull filtered pictures from the filtergraph */
while (1) {
ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
if (picref) {
display_picref(picref, buffersink_ctx->inputs[0]->time_base);
avfilter_unref_bufferp(&picref);
avfilter_unref_buffer(picref);
}
}
}
}
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);

View File

@@ -37,6 +37,8 @@
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#undef exit
/* 5 seconds stream duration */
#define STREAM_DURATION 200.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
@@ -55,22 +57,22 @@ static int audio_input_frame_size;
/*
* add an audio output stream
*/
static AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
AVCodec *codec;
/* find the audio encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find codec\n");
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
st = avformat_new_stream(oc, *codec);
st = avformat_new_stream(oc, codec);
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
st->id = 1;
@@ -90,14 +92,14 @@ static AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
return st;
}
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
static void open_audio(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
c = st->codec;
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
if (avcodec_open2(c, NULL, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
@@ -163,7 +165,6 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
fprintf(stderr, "Error while writing audio frame\n");
exit(1);
}
avcodec_free_frame(&frame);
}
static void close_audio(AVFormatContext *oc, AVStream *st)
@@ -176,26 +177,25 @@ static void close_audio(AVFormatContext *oc, AVStream *st)
/**************************************************************/
/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
static AVFrame *picture, *tmp_picture;
static uint8_t *video_outbuf;
static int frame_count, video_outbuf_size;
/* Add a video output stream. */
static AVStream *add_video_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
AVCodec *codec;
/* find the video encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
st = avformat_new_stream(oc, *codec);
st = avformat_new_stream(oc, codec);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
@@ -203,7 +203,13 @@ static AVStream *add_video_stream(AVFormatContext *oc, AVCodec **codec,
c = st->codec;
avcodec_get_context_defaults3(c, *codec);
/* find the video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
avcodec_get_context_defaults3(c, codec);
c->codec_id = codec_id;
@@ -220,11 +226,11 @@ static AVStream *add_video_stream(AVFormatContext *oc, AVCodec **codec,
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
@@ -237,14 +243,35 @@ static AVStream *add_video_stream(AVFormatContext *oc, AVCodec **codec,
return st;
}
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
{
int ret;
AVCodecContext *c = st->codec;
AVFrame *picture;
uint8_t *picture_buf;
int size;
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = av_malloc(size);
if (!picture_buf) {
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, width, height);
return picture;
}
static void open_video(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
c = st->codec;
/* open the codec */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
if (avcodec_open2(c, NULL, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
@@ -260,16 +287,9 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
video_outbuf = av_malloc(video_outbuf_size);
}
/* allocate and init a re-usable frame */
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
}
@@ -277,20 +297,18 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_YUV420P) {
ret = avpicture_alloc(&src_picture, PIX_FMT_YUV420P, c->width, c->height);
if (ret < 0) {
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
if (!tmp_picture) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index,
static void fill_yuv_image(AVFrame *pict, int frame_index,
int width, int height)
{
int x, y, i;
@@ -313,9 +331,11 @@ static void fill_yuv_image(AVPicture *pict, int frame_index,
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
int ret;
static struct SwsContext *sws_ctx;
AVCodecContext *c = st->codec;
int out_size, ret;
AVCodecContext *c;
static struct SwsContext *img_convert_ctx;
c = st->codec;
if (frame_count >= STREAM_NB_FRAMES) {
/* No more frames to compress. The codec has a latency of a few
@@ -325,22 +345,23 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!sws_ctx) {
sws_ctx = sws_getContext(c->width, c->height, PIX_FMT_YUV420P,
c->width, c->height, c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (!sws_ctx) {
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(c->width, c->height,
PIX_FMT_YUV420P,
c->width, c->height,
c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr,
"Could not initialize the conversion context\n");
"Cannot initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(&src_picture, frame_count, c->width, c->height);
sws_scale(sws_ctx,
(const uint8_t * const *)src_picture.data, src_picture.linesize,
0, c->height, dst_picture.data, dst_picture.linesize);
fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
0, c->height, picture->data, picture->linesize);
} else {
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
fill_yuv_image(picture, frame_count, c->width, c->height);
}
}
@@ -352,27 +373,19 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = dst_picture.data[0];
pkt.data = (uint8_t *)picture;
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
/* encode the image */
AVPacket pkt;
int got_output;
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame\n");
exit(1);
}
out_size = avcodec_encode_video(c, video_outbuf,
video_outbuf_size, picture);
/* If size is zero, it means the image was buffered. */
if (got_output) {
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);
if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(c->coded_frame->pts,
c->time_base, st->time_base);
@@ -380,6 +393,8 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = video_outbuf;
pkt.size = out_size;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
@@ -397,9 +412,12 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
static void close_video(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_free(frame);
av_free(picture->data[0]);
av_free(picture);
if (tmp_picture) {
av_free(tmp_picture->data[0]);
av_free(tmp_picture);
}
av_free(video_outbuf);
}
@@ -412,7 +430,6 @@ int main(int argc, char **argv)
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *audio_st, *video_st;
AVCodec *audio_codec, *video_codec;
double audio_pts, video_pts;
int i;
@@ -445,19 +462,19 @@ int main(int argc, char **argv)
* and initialize the codecs. */
video_st = NULL;
audio_st = NULL;
if (fmt->video_codec != AV_CODEC_ID_NONE) {
video_st = add_video_stream(oc, &video_codec, fmt->video_codec);
if (fmt->video_codec != CODEC_ID_NONE) {
video_st = add_video_stream(oc, fmt->video_codec);
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
audio_st = add_audio_stream(oc, &audio_codec, fmt->audio_codec);
if (fmt->audio_codec != CODEC_ID_NONE) {
audio_st = add_audio_stream(oc, fmt->audio_codec);
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (video_st)
open_video(oc, video_codec, video_st);
open_video(oc, video_st);
if (audio_st)
open_audio(oc, audio_codec, audio_st);
open_audio(oc, audio_st);
av_dump_format(oc, 0, filename, 1);
@@ -470,12 +487,9 @@ int main(int argc, char **argv)
}
/* Write the stream header, if any. */
if (avformat_write_header(oc, NULL) < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
return 1;
}
avformat_write_header(oc, NULL);
frame->pts = 0;
picture->pts = 0;
for (;;) {
/* Compute current audio and video time. */
if (audio_st)
@@ -498,7 +512,7 @@ int main(int argc, char **argv)
write_audio_frame(oc, audio_st);
} else {
write_video_frame(oc, video_st);
frame->pts++;
picture->pts++;
}
}

View File

@@ -1,142 +0,0 @@
/*
* Copyright (c) 2012 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libswscale API use example.
*/
#include <libavutil/imgutils.h>
#include <libavutil/parseutils.h>
#include <libswscale/swscale.h>
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
int width, int height, int frame_index)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
data[0][y * linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
data[1][y * linesize[1] + x] = 128 + y + i * 2;
data[2][y * linesize[2] + x] = 64 + x + i * 5;
}
}
}
int main(int argc, char **argv)
{
uint8_t *src_data[4], *dst_data[4];
int src_linesize[4], dst_linesize[4];
int src_w = 320, src_h = 240, dst_w, dst_h;
enum PixelFormat src_pix_fmt = PIX_FMT_YUV420P, dst_pix_fmt = PIX_FMT_RGB24;
const char *dst_size = NULL;
const char *dst_filename = NULL;
FILE *dst_file;
int dst_bufsize;
struct SwsContext *sws_ctx;
int i, ret;
if (argc != 3) {
fprintf(stderr, "Usage: %s output_file output_size\n"
"API example program to show how to scale an image with libswscale.\n"
"This program generates a series of pictures, rescales them to the given "
"output_size and saves them to an output file named output_file\n."
"\n", argv[0]);
exit(1);
}
dst_filename = argv[1];
dst_size = argv[2];
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
fprintf(stderr,
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
dst_size);
exit(1);
}
dst_file = fopen(dst_filename, "wb");
if (!dst_file) {
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
exit(1);
}
/* create scaling context */
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
dst_w, dst_h, dst_pix_fmt,
SWS_BILINEAR, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr,
"Impossible to create scale context for the conversion "
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
ret = AVERROR(EINVAL);
goto end;
}
/* allocate source and destination image buffers */
if ((ret = av_image_alloc(src_data, src_linesize,
src_w, src_h, src_pix_fmt, 16)) < 0) {
fprintf(stderr, "Could not allocate source image\n");
goto end;
}
/* buffer is going to be written to rawvideo file, no alignmnet */
if ((ret = av_image_alloc(dst_data, dst_linesize,
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
fprintf(stderr, "Could not allocate destination image\n");
goto end;
}
dst_bufsize = ret;
for (i = 0; i < 100; i++) {
/* generate synthetic video */
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
/* convert to destination format */
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
src_linesize, 0, src_h, dst_data, dst_linesize);
/* write scaled image to file */
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
}
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
end:
if (dst_file)
fclose(dst_file);
av_freep(&src_data[0]);
av_freep(&dst_data[0]);
sws_freeContext(sws_ctx);
return ret < 0;
}

View File

@@ -213,56 +213,8 @@ For ANY other help on Avisynth, please visit the
@section How can I join video files?
To "join" video files is quite ambiguous. The following list explains the
different kinds of "joining" and points out how those are addressed in
FFmpeg. To join video files may mean:
@itemize
@item
To put them one after the other: this is called to @emph{concatenate} them
(in short: concat) and is addressed
@ref{How can I concatenate video files, in this very faq}.
@item
To put them together in the same file, to let the user choose between the
different versions (example: different audio languages): this is called to
@emph{multiplex} them together (in short: mux), and is done by simply
invoking ffmpeg with several @option{-i} options.
@item
For audio, to put all channels together in a single stream (example: two
mono streams into one stereo stream): this is sometimes called to
@emph{merge} them, and can be done using the
@url{http://ffmpeg.org/ffmpeg.html#amerge, @code{amerge}} filter.
@item
For audio, to play one on top of the other: this is called to @emph{mix}
them, and can be done by first merging them into a single stream and then
using the @url{http://ffmpeg.org/ffmpeg.html#pan, @code{pan}} filter to mix
the channels at will.
@item
For video, to display both together, side by side or one on top of a part of
the other; it can be done using the
@url{http://ffmpeg.org/ffmpeg.html#overlay, @code{overlay}} video filter.
@end itemize
@anchor{How can I concatenate video files}
@section How can I concatenate video files?
There are several solutions, depending on the exact circumstances.
@subsection Concatenating using filters
FFmpeg has a @url{http://ffmpeg.org/ffmpeg.html#concat-1, @code{concat}}
filter designed specifically for that, with examples in the documentation.
@subsection Concatenating at the file level
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
video by merely concatenating the files them.
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to join video files by
merely concatenating them.
Hence you may concatenate your multimedia files by first transcoding them to
these privileged formats, then using the humble @code{cat} command (or the
@@ -270,38 +222,28 @@ equally humble @code{copy} under Windows), and finally transcoding back to your
format of choice.
@example
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
ffmpeg -i input1.avi -same_quant intermediate1.mpg
ffmpeg -i input2.avi -same_quant intermediate2.mpg
cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
ffmpeg -i intermediate_all.mpg -same_quant output.avi
@end example
Additionally, you can use the @code{concat} protocol instead of @code{cat} or
@code{copy} which will avoid creation of a potentially huge intermediate file.
Notice that you should either use @code{-same_quant} or set a reasonably high
bitrate for your intermediate and output files, if you want to preserve
video quality.
@example
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
@end example
Note that you may need to escape the character "|" which is special for many
shells.
Another option is usage of named pipes, should your platform support it:
Also notice that you may avoid the huge intermediate files by taking advantage
of named pipes, should your platform support it:
@example
mkfifo intermediate1.mpg
mkfifo intermediate2.mpg
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
ffmpeg -i input1.avi -same_quant -y intermediate1.mpg < /dev/null &
ffmpeg -i input2.avi -same_quant -y intermediate2.mpg < /dev/null &
cat intermediate1.mpg intermediate2.mpg |\
ffmpeg -f mpeg -i - -qscale:v 2 -c:v mpeg4 -acodec libmp3lame -q:a 4 output.avi
ffmpeg -f mpeg -i - -same_quant -c:v mpeg4 -acodec libmp3lame output.avi
@end example
@subsection Concatenating using raw audio and video
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
allow concatenation, and the transcoding step is almost lossless.
When using multiple yuv4mpegpipe(s), the first line needs to be discarded
@@ -309,8 +251,7 @@ from all but the first stream. This can be accomplished by piping through
@code{tail} as seen below. Note that when piping through @code{tail} you
must use command grouping, @code{@{ ;@}}, to background properly.
For example, let's say we want to concatenate two FLV files into an
output.flv file:
For example, let's say we want to join two FLV files into an output.flv file:
@example
mkfifo temp1.a
@@ -327,7 +268,7 @@ cat temp1.a temp2.a > all.a &
cat temp1.v temp2.v > all.v &
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
-f yuv4mpegpipe -i all.v \
-qscale:v 2 -y output.flv
-same_quant -y output.flv
rm temp[12].[av] all.[av]
@end example
@@ -369,34 +310,11 @@ specifying the exact format.
aconvert=s16:stereo:packed
@end example
@section Why does FFmpeg not see the subtitles in my VOB file?
VOB and a few other formats do not have a global header that describes
everything present in the file. Instead, applications are supposed to scan
the file to see what it contains. Since VOB files are frequently large, only
the beginning is scanned. If the subtitles happen only later in the file,
they will not be initally detected.
Some applications, including the @code{ffmpeg} command-line tool, can only
work with streams that were detected during the initial scan; streams that
are detected later are ignored.
The size of the initial scan is controlled by two options: @code{probesize}
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
the subtitle stream to be detected, both values must be large enough.
@chapter Development
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
Yes. Check the @file{doc/examples} directory in the source
repository, also available online at:
@url{https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples}.
Examples are also installed by default, usually in
@code{$PREFIX/share/ffmpeg/examples}.
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
Yes. Read the Developers Guide of the FFmpeg documentation. Alternatively,
examine the source code for one of the many open source projects that
already incorporate FFmpeg at (@url{projects.html}).
@@ -457,24 +375,6 @@ Yes, as long as the code is optional and can easily and cleanly be placed
under #if CONFIG_GPL without breaking anything. So, for example, a new codec
or filter would be OK under GPL while a bug fix to LGPL code would not.
@section I'm using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.
FFmpeg builds static libraries by default. In static libraries, dependencies
are not handled. That has two consequences. First, you must specify the
libraries in dependency order: @code{-lavdevice} must come before
@code{-lavformat}, @code{-lavutil} must come after everything else, etc.
Second, external libraries that are used in FFmpeg have to be specified too.
An easy way to get the full list of required libraries in dependency order
is to use @code{pkg-config}.
@example
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
@end example
See @file{doc/example/Makefile} and @file{doc/example/pc-uninstalled} for
more details.
@section I'm using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.
FFmpeg is a pure C project, so to use the libraries within your C++ application

View File

@@ -1,8 +1,8 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Automated Testing Environment
@settitle FATE Automated Testing Environment
@titlepage
@center @titlefont{FFmpeg Automated Testing Environment}
@center @titlefont{FATE Automated Testing Environment}
@end titlepage
@node Top
@@ -78,14 +78,11 @@ Do not put a '~' character in the samples path to indicate a home
directory. Because of shell nuances, this will cause FATE to fail.
@end float
To use a custom wrapper to run the test, pass @option{--target-exec} to
@command{configure} or set the @var{TARGET_EXEC} Make variable.
@chapter Submitting the results to the FFmpeg result aggregation server
To submit your results to the server you should run fate through the
shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs
shell script tests/fate.sh from the FFmpeg sources. This script needs
to be invoked with a configuration file as its first argument.
@example
@@ -121,9 +118,8 @@ present in $workdir as specified in the configuration file:
@item version
@end itemize
When you have everything working properly you can create an SSH key pair
and send the public key to the FATE server administrator who can be contacted
at the email address @email{fate-admin@@ffmpeg.org}.
When you have everything working properly you can create an SSH key and
send its public part to the FATE server administrator.
Configure your SSH client to use public key authentication with that key
when connecting to the FATE server. Also do not forget to check the identity
@@ -133,11 +129,6 @@ The FATE server's fingerprint is:
b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92
If you have problems connecting to the FATE server, it may help to try out
the @command{ssh} command with one or more @option{-v} options. You should
get detailed output concerning your SSH configuration and the authentication
process.
The only thing left is to automate the execution of the fate.sh script and
the synchronisation of the samples directory.
@@ -175,20 +166,11 @@ the synchronisation of the samples directory.
@item THREADS
Specify how many threads to use while running regression tests, it is
quite useful to detect thread-related regressions.
@item THREAD_TYPE
Specify which threading strategy test, either @var{slice} or @var{frame},
by default @var{slice+frame}
@item CPUFLAGS
Specify CPU flags.
@item TARGET_EXEC
Specify or override the wrapper used to run the tests.
The @var{TARGET_EXEC} option provides a way to run FATE wrapped in
@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets
through @command{ssh}.
@end table
@section Examples
Example:
@example
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
@end example

View File

@@ -79,126 +79,6 @@ The format option may be needed for raw input files.
@c man end DESCRIPTION
@chapter Detailed description
@c man begin DETAILED DESCRIPTION
The transcoding process in @command{ffmpeg} for each output can be described by
the following diagram:
@example
_______ ______________ _________ ______________ ________
| | | | | | | | | |
| input | demuxer | encoded data | decoder | decoded | encoder | encoded data | muxer | output |
| file | ---------> | packets | ---------> | frames | ---------> | packets | -------> | file |
|_______| |______________| |_________| |______________| |________|
@end example
@command{ffmpeg} calls the libavformat library (containing demuxers) to read
input files and get packets containing encoded data from them. When there are
multiple input files, @command{ffmpeg} tries to keep them synchronized by
tracking lowest timestamp on any active input stream.
Encoded packets are then passed to the decoder (unless streamcopy is selected
for the stream, see further for a description). The decoder produces
uncompressed frames (raw video/PCM audio/...) which can be processed further by
filtering (see next section). After filtering the frames are passed to the
encoder, which encodes them and outputs encoded packets again. Finally those are
passed to the muxer, which writes the encoded packets to the output file.
@section Filtering
Before encoding, @command{ffmpeg} can process raw audio and video frames using
filters from the libavfilter library. Several chained filters form a filter
graph. @command{ffmpeg} distinguishes between two types of filtergraphs -
simple and complex.
@subsection Simple filtergraphs
Simple filtergraphs are those that have exactly one input and output, both of
the same type. In the above diagram they can be represented by simply inserting
an additional step between decoding and encoding:
@example
_________ __________ ______________
| | | | | |
| decoded | simple filtergraph | filtered | encoder | encoded data |
| frames | -------------------> | frames | ---------> | packets |
|_________| |__________| |______________|
@end example
Simple filtergraphs are configured with the per-stream @option{-filter} option
(with @option{-vf} and @option{-af} aliases for video and audio respectively).
A simple filtergraph for video can look for example like this:
@example
_______ _____________ _______ _____ ________
| | | | | | | | | |
| input | ---> | deinterlace | ---> | scale | ---> | fps | ---> | output |
|_______| |_____________| |_______| |_____| |________|
@end example
Note that some filters change frame properties but not frame contents. E.g. the
@code{fps} filter in the example above changes number of frames, but does not
touch the frame contents. Another example is the @code{setpts} filter, which
only sets timestamps and otherwise passes the frames unchanged.
@subsection Complex filtergraphs
Complex filtergraphs are those which cannot be described as simply a linear
processing chain applied to one stream. This is the case e.g. when the graph has
more than one input and/or output, or when output stream type is different from
input. They can be represented with the following diagram:
@example
_________
| |
| input 0 |\ __________
|_________| \ | |
\ _________ /| output 0 |
\ | | / |__________|
_________ \| complex | /
| | | |/
| input 1 |---->| filter |\
|_________| | | \ __________
/| graph | \ | |
/ | | \| output 1 |
_________ / |_________| |__________|
| | /
| input 2 |/
|_________|
@end example
Complex filtergraphs are configured with the @option{-filter_complex} option.
Note that this option is global, since a complex filtergraph by its nature
cannot be unambiguously associated with a single stream or file.
A trivial example of a complex filtergraph is the @code{overlay} filter, which
has two video inputs and one video output, containing one video overlaid on top
of the other. Its audio counterpart is the @code{amix} filter.
@section Stream copy
Stream copy is a mode selected by supplying the @code{copy} parameter to the
@option{-codec} option. It makes @command{ffmpeg} omit the decoding and encoding
step for the specified stream, so it does only demuxing and muxing. It is useful
for changing the container format or modifying container-level metadata. The
diagram above will in this case simplify to this:
@example
_______ ______________ ________
| | | | | |
| input | demuxer | encoded data | muxer | output |
| file | ---------> | packets | -------> | file |
|_______| |______________| |________|
@end example
Since there is no decoding or encoding, it is very fast and there is no quality
loss. However it might not work in some cases because of many factors. Applying
filters is obviously also impossible, since filters work on uncompressed data.
@c man end DETAILED DESCRIPTION
@chapter Stream selection
@c man begin STREAM SELECTION
@@ -353,24 +233,6 @@ Specify the preset for matching stream(s).
@item -stats (@emph{global})
Print encoding progress/statistics. On by default.
@item -progress @var{url} (@emph{global})
Send program-friendly progress information to @var{url}.
Progress information is written approximately every second and at the end of
the encoding process. It is made of "@var{key}=@var{value}" lines. @var{key}
consists of only alphanumeric characters. The last key of a sequence of
progress information is always "progress".
@item -stdin
Enable interaction on standard input. On by default unless standard input is
used as an input. To explicitly disable interaction you need to specify
@code{-nostdin}.
Disabling interaction on standard input is useful, for example, if
ffmpeg is in the background process group. Roughly the same result can
be achieved with @code{ffmpeg ... < /dev/null} but it requires a
shell.
@item -debug_ts (@emph{global})
Print timestamp information. It is off by default. This option is
mostly useful for testing and debugging purposes, and the output
@@ -420,27 +282,10 @@ attachments.
@item -vframes @var{number} (@emph{output})
Set the number of video frames to record. This is an alias for @code{-frames:v}.
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
Set frame rate (Hz value, fraction or abbreviation).
As an input option, ignore any timestamps stored in the file and instead
generate timestamps assuming constant frame rate @var{fps}.
As an output option, duplicate or drop input frames to achieve constant output
frame rate @var{fps} (note that this actually causes the @code{fps} filter to be
inserted to the end of the corresponding filtergraph).
Set frame rate (Hz value, fraction or abbreviation), (default = 25). For output
streams implies @code{-vsync cfr}.
@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream})
Set frame size.
As an input option, this is a shortcut for the @option{video_size} private
option, recognized by some demuxers for which the frame size is either not
stored in the file or is configurable -- e.g. raw video or video grabbers.
As an output option, this inserts the @code{scale} video filter to the
@emph{end} of the corresponding filtergraph. Please use the @code{scale} filter
directly to insert it at the beginning or some other place.
The format is @samp{wxh} (default - same as source).
Set frame size. The format is @samp{wxh} (default - same as source).
@item -aspect[:@var{stream_specifier}] @var{aspect} (@emph{output,per-stream})
Set the video display aspect ratio specified by @var{aspect}.
@@ -476,7 +321,7 @@ Use same quantizer as source (implies VBR).
Note that this is NOT SAME QUALITY. Do not use this option unless you know you
need it.
@item -pass[:@var{stream_specifier}] @var{n} (@emph{output,per-stream})
@item -pass @var{n}
Select the pass number (1 or 2). It is used to do two-pass
video encoding. The statistics of the video are recorded in the first
pass into a log file (see also the option -passlogfile),
@@ -489,7 +334,7 @@ ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
@end example
@item -passlogfile[:@var{stream_specifier}] @var{prefix} (@emph{output,per-stream})
@item -passlogfile @var{prefix} (@emph{global})
Set two-pass log file name prefix to @var{prefix}, the default file name
prefix is ``ffmpeg2pass''. The complete file name will be
@file{PREFIX-N.log}, where N is a number specific to the output
@@ -622,23 +467,11 @@ Disable subtitle recording.
Deprecated, see -bsf
@end table
@section Advanced Subtitle options:
@section Audio/Video grab options
@table @option
@item -fix_sub_duration
Fix subtitles durations. For each subtitle, wait for the next packet in the
same stream and adjust the duration of the first to avoid overlap. This is
necessary with some subtitles codecs, especially DVB subtitles, because the
duration in the original packet is only a rough estimate and the end is
actually marked by an empty subtitle frame. Failing to use this option when
necessary can result in exaggerated durations or muxing failures due to
non-monotonic timestamps.
Note that this option will delay the output of all data until the next
subtitle packet is decoded: it may increase memory consumption and latency a
lot.
@item -isync (@emph{global})
Synchronize read on input.
@end table
@section Advanced options
@@ -752,7 +585,10 @@ filter. For example, if you need to merge a media (here @file{input.mkv}) with 2
mono audio streams into one single stereo channel audio stream (and keep the
video stream), you can use the following command:
@example
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
ffmpeg -i input.mkv -f lavfi -i "
amovie=input.mkv:si=1 [a1];
amovie=input.mkv:si=2 [a2];
[a1][a2] amerge" -c:a pcm_s16le -c:v copy output.mkv
@end example
@item -map_metadata[:@var{metadata_spec_out}] @var{infile}[:@var{metadata_spec_in}] (@emph{output,per-metadata})
@@ -850,13 +686,6 @@ Dump each input packet to stderr.
When dumping packets, also dump the payload.
@item -re (@emph{input})
Read input at native frame rate. Mainly used to simulate a grab device.
By default @command{ffmpeg} attempts to read the input(s) as fast as possible.
This option will slow down the reading of the input(s) to the native frame rate
of the input(s). It is useful for real-time output (e.g. live streaming). If
your input(s) is coming from some other live streaming source (through HTTP or
UDP for example) the server might already be in real-time, thus the option will
likely not be required. On the other hand, this is meaningful if your input(s)
is a file you are trying to push in real-time.
@item -loop_input
Loop over the input stream. Currently it works only for image
streams. This option is used for automatic FFserver testing.
@@ -923,7 +752,7 @@ Try to make the choice automatically, in order to generate a sane output.
Default value is -1.
@item -shortest (@emph{output})
@item -shortest
Finish encoding when the shortest input stream ends.
@item -dts_delta_threshold
Timestamp discontinuity delta threshold.
@@ -979,9 +808,6 @@ the matching type.
Output link labels are referred to with @option{-map}. Unlabeled outputs are
added to the first output file.
Note that with this option it is possible to use only lavfi sources without
normal input files.
For example, to overlay an image over video
@example
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
@@ -1004,29 +830,8 @@ graph will be added to the output file automatically, so we can simply write
@example
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
@end example
To generate 5 seconds of pure red video using lavfi @code{color} source:
@example
ffmpeg -filter_complex 'color=red' -t 5 out.mkv
@end example
@end table
As a special exception, you can use a bitmap subtitle stream as input: it
will be converted into a video with the same size as the largest video in
the file, or 720×576 if no video is present. Note that this is an
experimental and temporary solution. It will be removed once libavfilter has
proper support for subtitles.
For example, to hardcode subtitles on top of a DVB-T recording stored in
MPEG-TS format, delaying the subtitles by 1 second:
@example
ffmpeg -i input.ts -filter_complex \
'[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
-sn -map '#0x2dc' output.mkv
@end example
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
@section Preset files
A preset file contains a sequence of @var{option}=@var{value} pairs,
one for each line, specifying a sequence of options which would be
@@ -1050,15 +855,15 @@ First ffmpeg searches for a file named @var{arg}.ffpreset in the
directories @file{$FFMPEG_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in
the datadir defined at configuration time (usually @file{PREFIX/share/ffmpeg})
or in a @file{ffpresets} folder along the executable on win32,
in that order. For example, if the argument is @code{libvpx-1080p}, it will
search for the file @file{libvpx-1080p.ffpreset}.
in that order. For example, if the argument is @code{libx264-max}, it will
search for the file @file{libx264-max.ffpreset}.
If no such file is found, then ffmpeg will search for a file named
@var{codec_name}-@var{arg}.ffpreset in the above-mentioned
directories, where @var{codec_name} is the name of the codec to which
the preset file options will be applied. For example, if you select
the video codec with @code{-vcodec libvpx} and use @code{-vpre 1080p},
then it will search for the file @file{libvpx-1080p.ffpreset}.
the video codec with @code{-vcodec libx264} and use @code{-vpre max},
then it will search for the file @file{libx264-max.ffpreset}.
@c man end OPTIONS
@chapter Tips
@@ -1275,15 +1080,17 @@ composed of three digits padded with zeroes to express the sequence
number. It is the same syntax supported by the C printf function, but
only formats accepting a normal integer are suitable.
When importing an image sequence, -i also supports expanding
shell-like wildcard patterns (globbing) internally, by selecting the
image2-specific @code{-pattern_type glob} option.
For example, for creating a video from filenames matching the glob pattern
@code{foo-*.jpeg}:
@example
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
@end example
When importing an image sequence, -i also supports expanding shell-like
wildcard patterns (globbing) internally. To lower the chance of interfering
with your actual file names and the shell's glob expansion, you are required
to activate glob meta characters by prefixing them with a single @code{%}
character, like in @code{foo-%*.jpeg}, @code{foo-%?%?%?.jpeg} or
@code{foo-00%[234%]%*.jpeg}.
If your filename actually contains a character sequence of a @code{%} character
followed by a glob character, you must double the @code{%} character to escape
it. Imagine your files begin with @code{%?-foo-}, then you could use a glob
pattern like @code{%%?-foo-%*.jpeg}. For input patterns that could be both a
printf or a glob pattern, ffmpeg will assume it is a glob pattern.
@item
You can put many streams of the same type in the output:

View File

@@ -80,7 +80,7 @@ Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
Prettify the format of the displayed values, it corresponds to the
options "-unit -prefix -byte_binary_prefix -sexagesimal".
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
@item -print_format @var{writer_name}[=@var{writer_options}]
Set the output printing format.
@var{writer_name} specifies the name of the writer, and
@@ -94,13 +94,6 @@ For example for printing the output in JSON format, specify:
For more details on the available output printing formats, see the
Writers section below.
@item -show_data
Show payload data, as an hexadecimal and ASCII dump. Coupled with
@option{-show_packets}, it will dump the packets' data. Coupled with
@option{-show_streams}, it will dump the codec extradata.
The dump is printed as the "data" field. It may contain newlines.
@item -show_error
Show information about the error found when trying to probe the input.
@@ -170,10 +163,6 @@ Show information related to program and library versions. This is the
equivalent of setting both @option{-show_program_version} and
@option{-show_library_versions} options.
@item -bitexact
Force bitexact output, useful to produce output which is not dependent
on the specific build.
@item -i @var{input_file}
Read @var{input_file}.
@@ -222,11 +211,8 @@ If set to 1 specify not to print the section header and footer.
Default value is 0.
@end table
@section compact, csv
Compact and CSV format.
The @code{csv} writer is equivalent to @code{compact}, but supports
different defaults.
@section compact
Compact format.
Each section is printed on a single line.
If no option is specifid, the output has the form:
@@ -247,23 +233,21 @@ The description of the accepted options follows.
@item item_sep, s
Specify the character to use for separating fields in the output line.
It must be a single printable character, it is "|" by default ("," for
the @code{csv} writer).
It must be a single printable character, it is "|" by default.
@item nokey, nk
If set to 1 specify not to print the key of each field. Its default
value is 0 (1 for the @code{csv} writer).
value is 0.
@item escape, e
Set the escape mode to use, default to "c" ("csv" for the @code{csv}
writer).
Set the escape mode to use, default to "c".
It can assume one of the following values:
@table @option
@item c
Perform C-like escaping. Strings containing a newline ('\n'), carriage
return ('\r'), a tab ('\t'), a form feed ('\f'), the escaping
character ('\') or the item separator character @var{SEP} are escaped using C-like fashioned
Perform C-like escaping. Strings containing a newline ('\n') or
carriage return ('\r'), the escaping character ('\') or the item
separator character @var{SEP} are escaped using C-like fashioned
escaping, so that a newline is converted to the sequence "\n", a
carriage return to "\r", '\' to "\\" and the separator @var{SEP} is
converted to "\@var{SEP}".
@@ -277,80 +261,13 @@ containing a newline ('\n'), a carriage return ('\r'), a double quote
Perform no escaping.
@end table
@item print_section, p
Print the section name at the begin of each line if the value is
@code{1}, disable it with value set to @code{0}. Default value is
@code{1}.
@end table
@section flat
Flat format.
@section csv
CSV format.
A free-form output where each line contains an explicit key=value, such as
"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
directly embedded in sh scripts as long as the separator character is an
alphanumeric character or an underscore (see @var{sep_char} option).
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
The description of the accepted options follows.
@table @option
@item sep_char, s
Separator character used to separate the chapter, the section name, IDs and
potential tags in the printed field key.
Default value is '.'.
@item hierarchical, h
Specify if the section name specification should be hierarchical. If
set to 1, and if there is more than one section in the current
chapter, the section name will be prefixed by the name of the
chapter. A value of 0 will disable this behavior.
Default value is 1.
@end table
@section ini
INI format output.
Print output in an INI based format.
The following conventions are adopted:
@itemize
@item
all key and values are UTF-8
@item
'.' is the subgroup separator
@item
newline, '\t', '\f', '\b' and the following characters are escaped
@item
'\' is the escape character
@item
'#' is the comment indicator
@item
'=' is the key/value separator
@item
':' is not used but usually parsed as key/value separator
@end itemize
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
The description of the accepted options follows.
@table @option
@item hierarchical, h
Specify if the section name specification should be hierarchical. If
set to 1, and if there is more than one section in the current
chapter, the section name will be prefixed by the name of the
chapter. A value of 0 will disable this behavior.
Default value is 1.
@end table
This writer is equivalent to
@code{compact=item_sep=,:nokey=1:escape=csv}.
@section json
JSON based format.
@@ -425,7 +342,7 @@ MOV timecode is extracted from tmcd track, so is available in the tmcd
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
@item
DV, GXF and AVI timecodes are available in format metadata
DV and GXF timecodes are available in format metadata
(@option{-show_format}, see @var{TAG:timecode}).
@end itemize

View File

@@ -39,12 +39,9 @@
<xsd:attribute name="dts_time" type="xsd:float" />
<xsd:attribute name="duration" type="xsd:long" />
<xsd:attribute name="duration_time" type="xsd:float" />
<xsd:attribute name="convergence_duration" type="xsd:long" />
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
<xsd:attribute name="size" type="xsd:long" use="required" />
<xsd:attribute name="pos" type="xsd:long" />
<xsd:attribute name="flags" type="xsd:string" use="required" />
<xsd:attribute name="data" type="xsd:string" />
</xsd:complexType>
<xsd:complexType name="frameType">
@@ -56,15 +53,11 @@
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
<xsd:attribute name="pkt_dts" type="xsd:long" />
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
<xsd:attribute name="pkt_duration" type="xsd:long" />
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
<xsd:attribute name="pkt_pos" type="xsd:long" />
<!-- audio attributes -->
<xsd:attribute name="sample_fmt" type="xsd:string"/>
<xsd:attribute name="nb_samples" type="xsd:long" />
<xsd:attribute name="channels" type="xsd:int" />
<xsd:attribute name="channel_layout" type="xsd:string"/>
<!-- video attributes -->
<xsd:attribute name="width" type="xsd:long" />
@@ -87,19 +80,13 @@
</xsd:complexType>
<xsd:complexType name="streamType">
<xsd:sequence>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
<xsd:attribute name="index" type="xsd:int" use="required"/>
<xsd:attribute name="codec_name" type="xsd:string" />
<xsd:attribute name="codec_long_name" type="xsd:string" />
<xsd:attribute name="profile" type="xsd:string" />
<xsd:attribute name="codec_type" type="xsd:string" />
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
<xsd:attribute name="extradata" type="xsd:string" />
<!-- video attributes -->
<xsd:attribute name="width" type="xsd:int"/>
@@ -121,9 +108,7 @@
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
<xsd:attribute name="start_pts" type="xsd:long"/>
<xsd:attribute name="start_time" type="xsd:float"/>
<xsd:attribute name="duration_ts" type="xsd:long"/>
<xsd:attribute name="duration" type="xsd:float"/>
<xsd:attribute name="bit_rate" type="xsd:int"/>
<xsd:attribute name="nb_frames" type="xsd:int"/>
@@ -139,7 +124,7 @@
<xsd:attribute name="filename" type="xsd:string" use="required"/>
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
<xsd:attribute name="format_long_name" type="xsd:string"/>
<xsd:attribute name="format_long_name" type="xsd:string" use="required"/>
<xsd:attribute name="start_time" type="xsd:float"/>
<xsd:attribute name="duration" type="xsd:float"/>
<xsd:attribute name="size" type="xsd:long"/>

View File

@@ -12,7 +12,7 @@ Format negotiation
==================
The query_formats method should set, for each input and each output links,
the list of supported formats.
the list supported formats.
For video links, that means pixel format. For audio links, that means
channel layout, and sample format (the sample packing is implied by the
@@ -26,167 +26,14 @@ Format negotiation
references to the list are updated.
That means that if a filter requires that its input and output have the
same format amongst a supported list, all it has to do is use a reference
same format amongst a supported list, all it have to do is use a reference
to the same list of formats.
Buffer references ownership and permissions
===========================================
Principle
---------
Audio and video data are voluminous; the buffer and buffer reference
mechanism is intended to avoid, as much as possible, expensive copies of
that data while still allowing the filters to produce correct results.
The data is stored in buffers represented by AVFilterBuffer structures.
They must not be accessed directly, but through references stored in
AVFilterBufferRef structures. Several references can point to the
same buffer; the buffer is automatically deallocated once all
corresponding references have been destroyed.
The characteristics of the data (resolution, sample rate, etc.) are
stored in the reference; different references for the same buffer can
show different characteristics. In particular, a video reference can
point to only a part of a video buffer.
A reference is usually obtained as input to the start_frame or
filter_samples method or requested using the ff_get_video_buffer or
ff_get_audio_buffer functions. A new reference on an existing buffer can
be created with the avfilter_ref_buffer. A reference is destroyed using
the avfilter_unref_bufferp function.
Reference ownership
-------------------
At any time, a reference “belongs” to a particular piece of code,
usually a filter. With a few caveats that will be explained below, only
that piece of code is allowed to access it. It is also responsible for
destroying it, although this is sometimes done automatically (see the
section on link reference fields).
Here are the (fairly obvious) rules for reference ownership:
* A reference received by the start_frame or filter_samples method
belong to the corresponding filter.
Special exception: for video references: the reference may be used
internally for automatic copying and must not be destroyed before
end_frame; it can be given away to ff_start_frame.
* A reference passed to ff_start_frame or ff_filter_samples is given
away and must no longer be used.
* A reference created with avfilter_ref_buffer belongs to the code that
created it.
* A reference obtained with ff_get_video_buffer or ff_get_audio_buffer
belongs to the code that requested it.
* A reference given as return value by the get_video_buffer or
get_audio_buffer method is given away and must no longer be used.
Link reference fields
---------------------
The AVFilterLink structure has a few AVFilterBufferRef fields. Here are
the rules to handle them:
* cur_buf is set before the start_frame and filter_samples methods to
the same reference given as argument to the methods and belongs to the
destination filter of the link. If it has not been cleared after
end_frame or filter_samples, libavfilter will automatically destroy
the reference; therefore, any filter that needs to keep the reference
for longer must set cur_buf to NULL.
* out_buf belongs to the source filter of the link and can be used to
store a reference to the buffer that has been sent to the destination.
If it is not NULL after end_frame or filter_samples, libavfilter will
automatically destroy the reference.
If a video input pad does not have a start_frame method, the default
method will request a buffer on the first output of the filter, store
the reference in out_buf and push a second reference to the output.
* src_buf, cur_buf_copy and partial_buf are used by libavfilter
internally and must not be accessed by filters.
Reference permissions
---------------------
The AVFilterBufferRef structure has a perms field that describes what
the code that owns the reference is allowed to do to the buffer data.
Different references for the same buffer can have different permissions.
For video filters, the permissions only apply to the parts of the buffer
that have already been covered by the draw_slice method.
The value is a binary OR of the following constants:
* AV_PERM_READ: the owner can read the buffer data; this is essentially
always true and is there for self-documentation.
* AV_PERM_WRITE: the owner can modify the buffer data.
* AV_PERM_PRESERVE: the owner can rely on the fact that the buffer data
will not be modified by previous filters.
* AV_PERM_REUSE: the owner can output the buffer several times, without
modifying the data in between.
* AV_PERM_REUSE2: the owner can output the buffer several times and
modify the data in between (useless without the WRITE permissions).
* AV_PERM_ALIGN: the owner can access the data using fast operations
that require data alignment.
The READ, WRITE and PRESERVE permissions are about sharing the same
buffer between several filters to avoid expensive copies without them
doing conflicting changes on the data.
The REUSE and REUSE2 permissions are about special memory for direct
rendering. For example a buffer directly allocated in video memory must
not modified once it is displayed on screen, or it will cause tearing;
it will therefore not have the REUSE2 permission.
The ALIGN permission is about extracting part of the buffer, for
copy-less padding or cropping for example.
References received on input pads are guaranteed to have all the
permissions stated in the min_perms field and none of the permissions
stated in the rej_perms.
References obtained by ff_get_video_buffer and ff_get_audio_buffer are
guaranteed to have at least all the permissions requested as argument.
References created by avfilter_ref_buffer have the same permissions as
the original reference minus the ones explicitly masked; the mask is
usually ~0 to keep the same permissions.
Filters should remove permissions on reference they give to output
whenever necessary. It can be automatically done by setting the
rej_perms field on the output pad.
Here are a few guidelines corresponding to common situations:
* Filters that modify and forward their frame (like drawtext) need the
WRITE permission.
* Filters that read their input to produce a new frame on output (like
scale) need the READ permission on input and and must request a buffer
with the WRITE permission.
* Filters that intend to keep a reference after the filtering process
is finished (after end_frame or filter_samples returns) must have the
PRESERVE permission on it and remove the WRITE permission if they
create a new reference to give it away.
* Filters that intend to modify a reference they have kept after the end
of the filtering process need the REUSE2 permission and must remove
the PRESERVE permission if they create a new reference to give it
away.
TODO
Frame scheduling
@@ -223,7 +70,7 @@ Frame scheduling
request_frame method or the application.
If a filter has several inputs, the filter must be ready for frames
arriving randomly on any input. Therefore, any filter with several inputs
arriving randomly on any input. Therefore, any filter with several input
will most likely require some kind of queuing mechanism. It is perfectly
acceptable to have a limited queue and to drop frames when the inputs
are too unbalanced.
@@ -238,7 +85,7 @@ Frame scheduling
For a filter, if there are queued frames already ready, one of these
frames should be pushed. If not, the filter should request a frame on
one of its inputs, repeatedly until at least one frame has been pushed.
one of its input, repeatedly until at least one frame has been pushed.
Return values:
if request_frame could produce a frame, it should return 0;

File diff suppressed because it is too large Load Diff

View File

@@ -26,8 +26,8 @@ instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjp
@section OpenCORE and VisualOn libraries
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
libraries provide encoders for a number of audio codecs.
Spun off Google Android sources, OpenCore and VisualOn libraries provide
encoders for a number of audio codecs.
@float NOTE
OpenCORE and VisualOn libraries are under the Apache License 2.0
@@ -63,14 +63,6 @@ Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the
instructions for installing the library.
Then pass @code{--enable-libvo-amrwbenc} to configure to enable it.
@subsection Fraunhofer AAC library
FFmpeg can make use of the Fraunhofer AAC library for AAC encoding.
Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the
instructions for installing the library.
Then pass @code{--enable-libfdk-aac} to configure to enable it.
@section LAME
FFmpeg can make use of the LAME library for MP3 encoding.
@@ -79,14 +71,6 @@ Go to @url{http://lame.sourceforge.net/} and follow the
instructions for installing the library.
Then pass @code{--enable-libmp3lame} to configure to enable it.
@section TwoLAME
FFmpeg can make use of the TwoLAME library for MP2 encoding.
Go to @url{http://www.twolame.org/} and follow the
instructions for installing the library.
Then pass @code{--enable-libtwolame} to configure to enable it.
@section libvpx
FFmpeg can make use of the libvpx library for VP8 encoding.
@@ -109,17 +93,6 @@ x264 is under the GNU Public License Version 2 or later
details), you must upgrade FFmpeg's license to GPL in order to use it.
@end float
@section libilbc
iLBC is a narrowband speech codec that has been made freely available
by Google as part of the WebRTC project. libilbc is a packaging friendly
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
iLBC encoding and decoding.
Go to @url{https://github.com/dekkers/libilbc} and follow the instructions for
installing the library. Then pass @code{--enable-libilbc} to configure to
enable it.
@chapter Supported File Formats, Codecs or Features
@@ -143,8 +116,6 @@ library:
@item American Laser Games MM @tab @tab X
@tab Multimedia format used in games like Mad Dog McCree.
@item 3GPP AMR @tab X @tab X
@item Amazing Studio Packed Animation File @tab @tab X
@tab Multimedia format used in game Heart Of Darkness.
@item Apple HTTP Live Streaming @tab @tab X
@item Artworx Data Format @tab @tab X
@item ASF @tab X @tab X
@@ -212,7 +183,7 @@ library:
@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley
playout servers.
@item iCEDraw File @tab @tab X
@item ICO @tab X @tab X
@item ICO @tab @tab X
@tab Microsoft Windows ICO
@item id Quake II CIN video @tab @tab X
@item id RoQ @tab X @tab X
@@ -220,7 +191,6 @@ library:
@item IEC61937 encapsulation @tab X @tab X
@item IFF @tab @tab X
@tab Interchange File Format
@item iLBC @tab X @tab X
@item Interplay MVE @tab @tab X
@tab Format used in various Interplay computer games.
@item IV8 @tab @tab X
@@ -353,8 +323,6 @@ library:
@tab Multimedia format used by many games.
@item SMJPEG @tab X @tab X
@tab Used in certain Loki game ports.
@item Smush @tab @tab X
@tab Multimedia format used in some LucasArts games.
@item Sony OpenMG (OMA) @tab X @tab X
@tab Audio format used in Sony Sonic Stage and Sony Vegas.
@item Sony PlayStation STR @tab @tab X
@@ -369,7 +337,7 @@ library:
@item True Audio @tab @tab X
@item VC-1 test bitstream @tab X @tab X
@item WAV @tab X @tab X
@item WavPack @tab X @tab X
@item WavPack @tab @tab X
@item WebM @tab X @tab X
@item Windows Televison (WTV) @tab X @tab X
@item Wing Commander III movie @tab @tab X
@@ -385,6 +353,7 @@ library:
@item eXtended BINary text (XBIN) @tab @tab X
@item YUV4MPEG pipe @tab X @tab X
@item Psygnosis YOP @tab @tab X
@item ZeroCodec Lossless Video @tab @tab X
@end multitable
@code{X} means that encoding (resp. decoding) is supported.
@@ -458,7 +427,6 @@ following image formats are supported:
@item 8SVX fibonacci @tab @tab X
@item A64 multicolor @tab X @tab
@tab Creates video suitable to be played on a commodore 64 (multicolor mode).
@item Amazing Studio PAF Video @tab @tab X
@item American Laser Games MM @tab @tab X
@tab Used in games like Mad Dog McCree.
@item AMV Video @tab X @tab X
@@ -508,11 +476,9 @@ following image formats are supported:
@item Delphine Software International CIN video @tab @tab X
@tab Codec used in Delphine Software International games.
@item Discworld II BMV Video @tab @tab X
@item Canopus Lossless Codec @tab @tab X
@item Cinepak @tab @tab X
@item Cirrus Logic AccuPak @tab X @tab X
@tab fourcc: CLJR
@item CPiA Video Format @tab @tab X
@item Creative YUV (CYUV) @tab @tab X
@item DFA @tab @tab X
@tab Codec used in Chronomaster game.
@@ -582,18 +548,8 @@ following image formats are supported:
@item LCL (LossLess Codec Library) MSZH @tab @tab X
@item LCL (LossLess Codec Library) ZLIB @tab E @tab E
@item LOCO @tab @tab X
@item LucasArts Smush @tab @tab X
@tab Used in LucasArts games.
@item lossless MJPEG @tab X @tab X
@item Microsoft ATC Screen @tab @tab X
@tab Also known as Microsoft Screen 3.
@item Microsoft Expression Encoder Screen @tab @tab X
@tab Also known as Microsoft Titanium Screen 2.
@item Microsoft RLE @tab @tab X
@item Microsoft Screen 1 @tab @tab X
@tab Also known as Windows Media Video V7 Screen.
@item Microsoft Screen 2 @tab @tab X
@tab Also known as Windows Media Video V9 Screen.
@item Microsoft Video 1 @tab @tab X
@item Mimic @tab @tab X
@tab Used in MSN Messenger Webcam streams.
@@ -622,6 +578,8 @@ following image formats are supported:
@tab fourcc: VP60,VP61,VP62
@item VP8 @tab E @tab X
@tab fourcc: VP80, encoding supported through external library libvpx
@item planar RGB @tab @tab X
@tab fourcc: 8BPS
@item Prores @tab @tab X
@tab fourcc: apch,apcn,apcs,apco
@item Q-team QPEG @tab @tab X
@@ -661,13 +619,11 @@ following image formats are supported:
@tab fourcc: SP5X
@item TechSmith Screen Capture Codec @tab @tab X
@tab fourcc: TSCC
@item TechSmith Screen Capture Codec 2 @tab @tab X
@tab fourcc: TSC2
@item Theora @tab E @tab X
@tab encoding supported through external library libtheora
@item Tiertex Limited SEQ video @tab @tab X
@tab Codec used in DOS CD-ROM FlashBack game.
@item Ut Video @tab X @tab X
@item Ut Video @tab @tab X
@item v210 QuickTime uncompressed 4:2:2 10-bit @tab X @tab X
@item v308 QuickTime uncompressed 4:4:4 @tab X @tab X
@item v408 QuickTime uncompressed 4:4:4:4 @tab X @tab X
@@ -691,7 +647,6 @@ following image formats are supported:
@item Psygnosis YOP Video @tab @tab X
@item yuv4 @tab X @tab X
@tab libquicktime uncompressed packed 4:2:0
@item ZeroCodec Lossless Video @tab @tab X
@item ZLIB @tab X @tab X
@tab part of LCL, encoder experimental
@item Zip Motion Blocks Video @tab X @tab X
@@ -760,7 +715,6 @@ following image formats are supported:
@tab encoding supported through external library libopencore-amrnb
@item AMR-WB @tab E @tab X
@tab encoding supported through external library libvo-amrwbenc
@item Amazing Studio PAF Audio @tab @tab X
@item Apple lossless audio @tab X @tab X
@tab QuickTime fourcc 'alac'
@item Atrac 1 @tab @tab X
@@ -794,9 +748,6 @@ following image formats are supported:
@tab encoding supported through external library libgsm
@item GSM Microsoft variant @tab E @tab X
@tab encoding supported through external library libgsm
@item IAC (Indeo Audio Coder) @tab @tab X
@item iLBC (Internet Low Bitrate Codec) @tab E @tab E
@tab encoding and decoding supported through external library libilbc
@item IMC (Intel Music Coder) @tab @tab X
@item MACE (Macintosh Audio Compression/Expansion) 3:1 @tab @tab X
@item MACE (Macintosh Audio Compression/Expansion) 6:1 @tab @tab X
@@ -806,7 +757,6 @@ following image formats are supported:
@tab Only versions 3.97-3.99 are supported.
@item MP1 (MPEG audio layer 1) @tab @tab IX
@item MP2 (MPEG audio layer 2) @tab IX @tab IX
@tab libtwolame can be used alternatively for encoding.
@item MP3 (MPEG audio layer 3) @tab E @tab IX
@tab encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
@item MPEG-4 Audio Lossless Coding (ALS) @tab @tab X
@@ -863,8 +813,6 @@ following image formats are supported:
@item TrueHD @tab @tab X
@tab Used in HD-DVD and Blu-Ray discs.
@item TwinVQ (VQF flavor) @tab @tab X
@item VIMA @tab @tab X
@tab Used in LucasArts SMUSH animations.
@item Vorbis @tab E @tab X
@tab A native but very primitive encoder exists.
@item WavPack @tab @tab X
@@ -887,19 +835,14 @@ performance on systems without hardware floating point support).
@multitable @columnfractions .4 .1 .1 .1 .1
@item Name @tab Muxing @tab Demuxing @tab Encoding @tab Decoding
@item SSA/ASS @tab X @tab X @tab X @tab X
@item DVB @tab X @tab X @tab X @tab X
@item DVD @tab X @tab X @tab X @tab X
@item JACOsub @tab X @tab X @tab @tab X
@item MicroDVD @tab X @tab X @tab @tab X
@item PGS @tab @tab @tab @tab X
@item RealText @tab @tab X @tab @tab X
@item SAMI @tab @tab X @tab @tab X
@item SubRip (SRT) @tab X @tab X @tab X @tab X
@item SubViewer @tab @tab X @tab @tab X
@item 3GPP Timed Text @tab @tab @tab X @tab X
@item WebVTT @tab @tab X @tab @tab X
@item XSUB @tab @tab @tab X @tab X
@item SSA/ASS @tab X @tab X @tab X @tab X
@item DVB @tab X @tab X @tab X @tab X
@item DVD @tab X @tab X @tab X @tab X
@item JACOsub @tab X @tab X @tab @tab X
@item MicroDVD @tab X @tab X @tab @tab X
@item PGS @tab @tab @tab @tab X
@item SubRip (SRT) @tab X @tab X @tab X @tab X
@item XSUB @tab @tab @tab X @tab X
@end multitable
@code{X} means that the feature is supported.
@@ -908,31 +851,19 @@ performance on systems without hardware floating point support).
@multitable @columnfractions .4 .1
@item Name @tab Support
@item Apple HTTP Live Streaming @tab X
@item file @tab X
@item Gopher @tab X
@item HLS @tab X
@item HTTP @tab X
@item HTTPS @tab X
@item MMSH @tab X
@item MMST @tab X
@item MMS @tab X
@item pipe @tab X
@item RTMP @tab X
@item RTMPE @tab X
@item RTMPS @tab X
@item RTMPT @tab X
@item RTMPTE @tab X
@item RTMPTS @tab X
@item RTP @tab X
@item SCTP @tab X
@item TCP @tab X
@item TLS @tab X
@item UDP @tab X
@end multitable
@code{X} means that the protocol is supported.
@code{E} means that support is provided through an external library.
@section Input/Output Devices
@@ -940,17 +871,13 @@ performance on systems without hardware floating point support).
@item Name @tab Input @tab Output
@item ALSA @tab X @tab X
@item BKTR @tab X @tab
@item caca @tab @tab X
@item DV1394 @tab X @tab
@item Lavfi virtual device @tab X @tab
@item Linux framebuffer @tab X @tab
@item JACK @tab X @tab
@item LIBCDIO @tab X
@item LIBDC1394 @tab X @tab
@item OpenAL @tab X
@item OSS @tab X @tab X
@item Pulseaudio @tab X @tab
@item SDL @tab @tab X
@item Video4Linux2 @tab X @tab
@item VfW capture @tab X @tab
@item X11 grabbing @tab X @tab
@@ -962,10 +889,9 @@ performance on systems without hardware floating point support).
@multitable @columnfractions .4 .1 .1
@item Codec/format @tab Read @tab Write
@item AVI @tab X @tab X
@item DV @tab X @tab X
@item GXF @tab X @tab X
@item MOV @tab X @tab X
@item MOV @tab X @tab
@item MPEG1/2 @tab X @tab X
@item MXF @tab X @tab X
@end multitable

View File

@@ -258,32 +258,6 @@ git commit
@end example
@chapter Git configuration
In order to simplify a few workflows, it is advisable to configure both
your personal Git installation and your local FFmpeg repository.
@section Personal Git installation
Add the following to your @file{~/.gitconfig} to help @command{git send-email}
and @command{git format-patch} detect renames:
@example
[diff]
renames = copy
@end example
@section Repository configuration
In order to have @command{git send-email} automatically send patches
to the ffmpeg-devel mailing list, add the following stanza
to @file{/path/to/ffmpeg/repository/.git/config}:
@example
[sendemail]
to = ffmpeg-devel@@ffmpeg.org
@end example
@chapter FFmpeg specific
@section Reverting broken commits
@@ -372,43 +346,6 @@ git checkout -b svn_23456 $SHA1
where @var{$SHA1} is the commit hash from the @command{git log} output.
@chapter pre-push checklist
Once you have a set of commits that you feel are ready for pushing,
work through the following checklist to doublecheck everything is in
proper order. This list tries to be exhaustive. In case you are just
pushing a typo in a comment, some of the steps may be unnecessary.
Apply your common sense, but if in doubt, err on the side of caution.
First, make sure that the commits and branches you are going to push
match what you want pushed and that nothing is missing, extraneous or
wrong. You can see what will be pushed by running the git push command
with --dry-run first. And then inspecting the commits listed with
@command{git log -p 1234567..987654}. The @command{git status} command
may help in finding local changes that have been forgotten to be added.
Next let the code pass through a full run of our testsuite.
@itemize
@item @command{make distclean}
@item @command{/path/to/ffmpeg/configure}
@item @command{make check}
@item if fate fails due to missing samples run @command{make fate-rsync} and retry
@end itemize
Make sure all your changes have been checked before pushing them, the
testsuite only checks against regressions and that only to some extend. It does
obviously not check newly added features/code to be working unless you have
added a test for that (which is recommended).
Also note that every single commit should pass the test suite, not just
the result of a series of patches.
Once everything passed, push the changes to your public ffmpeg clone and post a
merge request to ffmpeg-devel. You can also push them directly but this is not
recommended.
@chapter Server Issues
Contact the project admins @email{root@@ffmpeg.org} if you have technical

View File

@@ -112,19 +112,6 @@ defaults to 0).
Set audio device number for devices with same name (starts at 0,
defaults to 0).
@item pixel_format
Select pixel format to be used by DirectShow. This may only be set when
the video codec is not set or set to rawvideo.
@item audio_buffer_size
Set audio device buffer size in milliseconds (which can directly
impact latency, depending on the device).
Defaults to using the audio device's
default buffer size (typically some multiple of 500ms).
Setting this value too low can degrade performance.
See also
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
@end table
@subsection Examples
@@ -192,59 +179,6 @@ ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
@section iec61883
FireWire DV/HDV input device using libiec61883.
To enable this input device, you need libiec61883, libraw1394 and
libavc1394 installed on your system. Use the configure option
@code{--enable-libiec61883} to compile with the device enabled.
The iec61883 capture device supports capturing from a video device
connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
FireWire stack (juju). This is the default DV/HDV input method in Linux
Kernel 2.6.37 and later, since the old FireWire stack was removed.
Specify the FireWire port to be used as input file, or "auto"
to choose the first port connected.
@subsection Options
@table @option
@item dvtype
Override autodetection of DV/HDV. This should only be used if auto
detection does not work, or if usage of a different device type
should be prohibited. Treating a DV device as HDV (or vice versa) will
not work and result in undefined behavior.
The values @option{auto}, @option{dv} and @option{hdv} are supported.
@item dvbuffer
Set maxiumum size of buffer for incoming data, in frames. For DV, this
is an exact value. For HDV, it is not frame exact, since HDV does
not have a fixed frame size.
@end table
@subsection Examples
@itemize
@item
Grab and show the input of a FireWire DV/HDV device.
@example
ffplay -f iec61883 -i auto
@end example
@item
Grab and record the input of a FireWire DV/HDV device,
using a packet buffer of 100000 packets if the source is HDV.
@example
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
@end example
@end itemize
@section jack
JACK input device.
@@ -643,23 +577,17 @@ properties of your X11 display (e.g. grep for "name" or "dimensions").
For example to grab from @file{:0.0} using @command{ffmpeg}:
@example
ffmpeg -f x11grab -r 25 -s cif -i :0.0 out.mpg
@end example
Grab at position @code{10,20}:
@example
# Grab at position 10,20.
ffmpeg -f x11grab -r 25 -s cif -i :0.0+10,20 out.mpg
@end example
@subsection Options
@subsection @var{follow_mouse} AVOption
@table @option
@item draw_mouse
Specify whether to draw the mouse pointer. A value of @code{0} specify
not to draw the pointer. Default value is @code{1}.
@item follow_mouse
Make the grabbed area follow the mouse. The argument can be
@code{centered} or a number of pixels @var{PIXELS}.
The syntax is:
@example
-follow_mouse centered|@var{PIXELS}
@end example
When it is specified with "centered", the grabbing region follows the mouse
pointer and keeps the pointer at the center of region; otherwise, the region
@@ -669,36 +597,28 @@ zero) to the edge of region.
For example:
@example
ffmpeg -f x11grab -follow_mouse centered -r 25 -s cif -i :0.0 out.mpg
@end example
To follow only when the mouse pointer reaches within 100 pixels to edge:
@example
# Follows only when the mouse pointer reaches within 100 pixels to edge
ffmpeg -f x11grab -follow_mouse 100 -r 25 -s cif -i :0.0 out.mpg
@end example
@item framerate
Set the grabbing frame rate. Default value is @code{ntsc},
corresponding to a framerate of @code{30000/1001}.
@subsection @var{show_region} AVOption
@item show_region
Show grabbed region on screen.
The syntax is:
@example
-show_region 1
@end example
If @var{show_region} is specified with @code{1}, then the grabbing
region will be indicated on screen. With this option, it is easy to
know what is being grabbed if only a portion of the screen is grabbed.
If @var{show_region} AVOption is specified with @var{1}, then the grabbing
region will be indicated on screen. With this option, it's easy to know what is
being grabbed if only a portion of the screen is grabbed.
For example:
@example
ffmpeg -f x11grab -show_region 1 -r 25 -s cif -i :0.0+10,20 out.mpg
@end example
With @var{follow_mouse}:
@example
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -r 25 -s cif -i :0.0 out.mpg
# With follow_mouse
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -r 25 -s cif -i :0.0 out.mpg
@end example
@item video_size
Set the video frame size. Default value is @code{vga}.
@end table
@c man end INPUT DEVICES

92
doc/libavfilter.texi Normal file
View File

@@ -0,0 +1,92 @@
\input texinfo @c -*- texinfo -*-
@settitle Libavfilter Documentation
@titlepage
@center @titlefont{Libavfilter Documentation}
@end titlepage
@top
@contents
@chapter Introduction
Libavfilter is the filtering API of FFmpeg. It is the substitute of the
now deprecated 'vhooks' and started as a Google Summer of Code project.
Audio filtering integration into the main FFmpeg repository is a work in
progress, so audio API and ABI should not be considered stable yet.
@chapter Tutorial
In libavfilter, it is possible for filters to have multiple inputs and
multiple outputs.
To illustrate the sorts of things that are possible, we can
use a complex filter graph. For example, the following one:
@example
input --> split --> fifo -----------------------> overlay --> output
| ^
| |
+------> fifo --> crop --> vflip --------+
@end example
splits the stream in two streams, sends one stream through the crop filter
and the vflip filter before merging it back with the other stream by
overlaying it on top. You can use the following command to achieve this:
@example
ffmpeg -i input -vf "[in] split [T1], fifo, [T2] overlay=0:H/2 [out]; [T1] fifo, crop=iw:ih/2:0:ih/2, vflip [T2]" output
@end example
The result will be that in output the top half of the video is mirrored
onto the bottom half.
Video filters are loaded using the @var{-vf} option passed to
@command{ffmpeg} or to @command{ffplay}. Filters in the same linear
chain are separated by commas. In our example, @var{split, fifo,
overlay} are in one linear chain, and @var{fifo, crop, vflip} are in
another. The points where the linear chains join are labeled by names
enclosed in square brackets. In our example, that is @var{[T1]} and
@var{[T2]}. The magic labels @var{[in]} and @var{[out]} are the points
where video is input and output.
Some filters take in input a list of parameters: they are specified
after the filter name and an equal sign, and are separated each other
by a semicolon.
There exist so-called @var{source filters} that do not have a video
input, and we expect in the future some @var{sink filters} that will
not have video output.
@chapter graph2dot
The @file{graph2dot} program included in the FFmpeg @file{tools}
directory can be used to parse a filter graph description and issue a
corresponding textual representation in the dot language.
Invoke the command:
@example
graph2dot -h
@end example
to see how to use @file{graph2dot}.
You can then pass the dot description to the @file{dot} program (from
the graphviz suite of programs) and obtain a graphical representation
of the filter graph.
For example the sequence of commands:
@example
echo @var{GRAPH_DESCRIPTION} | \
tools/graph2dot -o graph.tmp && \
dot -Tpng graph.tmp -o graph.png && \
display graph.png
@end example
can be used to create and display an image representing the graph
described by the @var{GRAPH_DESCRIPTION} string.
@include filters.texi
@bye

View File

@@ -1,65 +0,0 @@
MIPS optimizations info
===============================================
MIPS optimizations of codecs are targeting MIPS 74k family of
CPUs. Some of these optimizations are relying more on properties of
this architecture and some are relying less (and can be used on most
MIPS architectures without degradation in performance).
Along with FFMPEG copyright notice, there is MIPS copyright notice in
all the files that are created by people from MIPS Technologies.
Example of copyright notice:
===============================================
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Author Name (author_name@@mips.com)
*/
Files that have MIPS copyright notice in them:
===============================================
* libavutil/mips/
libm_mips.h
* libavcodec/mips/
acelp_filters_mips.c
acelp_vectors_mips.c
amrwbdec_mips.c
amrwbdec_mips.h
celp_filters_mips.c
celp_math_mips.c
compute_antialias_fixed.h
compute_antialias_float.h
lsp_mips.h
dsputil_mips.c
fft_mips.c
fft_table.h
fft_init_table.c
fmtconvert_mips.c
mpegaudiodsp_mips_fixed.c
mpegaudiodsp_mips_float.c

View File

@@ -129,39 +129,6 @@ ffmpeg -i INPUT -f framemd5 -
See also the @ref{md5} muxer.
@anchor{ico}
@section ico
ICO file muxer.
Microsoft's icon file format (ICO) has some strict limitations that should be noted:
@itemize
@item
Size cannot exceed 256 pixels in any dimension
@item
Only BMP and PNG images can be stored
@item
If a BMP image is used, it must be one of the following pixel formats:
@example
BMP Bit Depth FFmpeg Pixel Format
1bit pal8
4bit pal8
8bit pal8
16bit rgb555le
24bit bgr24
32bit bgra
@end example
@item
If a BMP image is used, it must use the BITMAPINFOHEADER DIB header
@item
If a PNG image is used, it must use the rgba pixel format
@end itemize
@anchor{image2}
@section image2
@@ -444,7 +411,7 @@ For example a 3D WebM clip can be created using the following command line:
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
@end example
@section segment, stream_segment, ssegment
@section segment
Basic stream segmenter.
@@ -452,166 +419,30 @@ The segmenter muxer outputs streams to a number of separate files of nearly
fixed duration. Output filename pattern can be set in a fashion similar to
@ref{image2}.
@code{stream_segment} is a variant of the muxer used to write to
streaming output formats, i.e. which do not require global headers,
and is recommended for outputting e.g. to MPEG transport stream segments.
@code{ssegment} is a shorter alias for @code{stream_segment}.
Every segment starts with a video keyframe, if a video stream is present.
Note that if you want accurate splitting for a video file, you need to
make the input key frames correspond to the exact splitting times
expected by the segmenter, or the segment muxer will start the new
segment with the key frame found next after the specified start
time.
The segment muxer works best with a single constant frame rate video.
Optionally it can generate a list of the created segments, by setting
the option @var{segment_list}. The list type is specified by the
@var{segment_list_type} option.
The segment muxer supports the following options:
Optionally it can generate a flat list of the created segments, one segment
per line.
@table @option
@item segment_format @var{format}
Override the inner container format, by default it is guessed by the filename
extension.
@item segment_time @var{t}
Set segment duration to @var{t} seconds.
@item segment_list @var{name}
Generate also a listfile named @var{name}. If not specified no
listfile is generated.
@item segment_list_flags @var{flags}
Set flags affecting the segment list generation.
It currently supports the following flags:
@table @var
@item cache
Allow caching (only affects M3U8 list files).
@item live
Allow live-friendly file generation.
This currently only affects M3U8 lists. In particular, write a fake
EXT-X-TARGETDURATION duration field at the top of the file, based on
the specified @var{segment_time}.
@end table
Default value is @code{cache}.
Generate also a listfile named @var{name}.
@item segment_list_size @var{size}
Overwrite the listfile once it reaches @var{size} entries. If 0
the listfile is never overwritten. Default value is 0.
@item segment_list type @var{type}
Specify the format for the segment list file.
The following values are recognized:
@table @option
@item flat
Generate a flat list for the created segments, one segment per line.
@item csv, ext
Generate a list for the created segments, one segment per line,
each line matching the format (comma-separated values):
@example
@var{segment_filename},@var{segment_start_time},@var{segment_end_time}
@end example
@var{segment_filename} is the name of the output file generated by the
muxer according to the provided pattern. CSV escaping (according to
RFC4180) is applied if required.
@var{segment_start_time} and @var{segment_end_time} specify
the segment start and end time expressed in seconds.
A list file with the suffix @code{".csv"} or @code{".ext"} will
auto-select this format.
@code{ext} is deprecated in favor or @code{csv}.
@item m3u8
Generate an extended M3U8 file, version 4, compliant with
@url{http://tools.ietf.org/id/draft-pantos-http-live-streaming-08.txt}.
A list file with the suffix @code{".m3u8"} will auto-select this format.
@end table
If not specified the type is guessed from the list file name suffix.
@item segment_time @var{time}
Set segment duration to @var{time}. Default value is "2".
@item segment_time_delta @var{delta}
Specify the accuracy time when selecting the start time for a
segment. Default value is "0".
When delta is specified a key-frame will start a new segment if its
PTS satisfies the relation:
@example
PTS >= start_time - time_delta
@end example
This option is useful when splitting video content, which is always
split at GOP boundaries, in case a key frame is found just before the
specified split time.
In particular may be used in combination with the @file{ffmpeg} option
@var{force_key_frames}. The key frame times specified by
@var{force_key_frames} may not be set accurately because of rounding
issues, with the consequence that a key frame time may result set just
before the specified time. For constant frame rate videos a value of
1/2*@var{frame_rate} should address the worst case mismatch between
the specified time and the time set by @var{force_key_frames}.
@item segment_times @var{times}
Specify a list of split points. @var{times} contains a list of comma
separated duration specifications, in increasing order.
Overwrite the listfile once it reaches @var{size} entries.
@item segment_wrap @var{limit}
Wrap around segment index once it reaches @var{limit}.
@end table
Some examples follow.
@itemize
@item
To remux the content of file @file{in.mkv} to a list of segments
@file{out-000.nut}, @file{out-001.nut}, etc., and write the list of
generated segments to @file{out.list}:
@example
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.list out%03d.nut
ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut
@end example
@item
As the example above, but segment the input file according to the split
points specified by the @var{segment_times} option:
@example
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut
@end example
@item
As the example above, but use the @code{ffmpeg} @var{force_key_frames}
option to force key frames in the input at the specified location, together
with the segment option @var{segment_time_delta} to account for
possible roundings operated when setting key frame times.
@example
ffmpeg -i in.mkv -force_key_frames 1,2,3,5,8,13,21 -vcodec mpeg4 -acodec pcm_s16le -map 0 \
-f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 -segment_time_delta 0.05 out%03d.nut
@end example
In order to force key frames on the input file, transcoding is
required.
@item
To convert the @file{in.mkv} to TS segments using the @code{libx264}
and @code{libfaac} encoders:
@example
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
@end example
@item
Segment the input file, and create an M3U8 live playlist (can be used
as live HLS source):
@example
ffmpeg -re -i in.mkv -codec copy -map 0 -f segment -segment_list playlist.m3u8 \
-segment_list_flags +live -segment_time 10 out%03d.mkv
@end example
@end itemize
@section mp3
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and

View File

@@ -22,88 +22,6 @@ A description of the currently available output devices follows.
ALSA (Advanced Linux Sound Architecture) output device.
@section caca
CACA output device.
This output devices allows to show a video stream in CACA window.
Only one CACA window is allowed per application, so you can
have only one instance of this output device in an application.
To enable this output device you need to configure FFmpeg with
@code{--enable-libcaca}.
libcaca is a graphics library that outputs text instead of pixels.
For more information about libcaca, check:
@url{http://caca.zoy.org/wiki/libcaca}
@subsection Options
@table @option
@item window_title
Set the CACA window title, if not specified default to the filename
specified for the output device.
@item window_size
Set the CACA window size, can be a string of the form
@var{width}x@var{height} or a video size abbreviation.
If not specified it defaults to the size of the input video.
@item driver
Set display driver.
@item algorithm
Set dithering algorithm. Dithering is necessary
because the picture being rendered has usually far more colours than
the available palette.
The accepted values are listed with @code{-list_dither algorithms}.
@item antialias
Set antialias method. Antialiasing smoothens the rendered
image and avoids the commonly seen staircase effect.
The accepted values are listed with @code{-list_dither antialiases}.
@item charset
Set which characters are going to be used when rendering text.
The accepted values are listed with @code{-list_dither charsets}.
@item color
Set color to be used when rendering text.
The accepted values are listed with @code{-list_dither colors}.
@item list_drivers
If set to @option{true}, print a list of available drivers and exit.
@item list_dither
List available dither options related to the argument.
The argument must be one of @code{algorithms}, @code{antialiases},
@code{charsets}, @code{colors}.
@end table
@subsection Examples
@itemize
@item
The following command shows the @command{ffmpeg} output is an
CACA window, forcing its size to 80x25:
@example
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
@end example
@item
Show the list of available drivers and exit:
@example
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
@end example
@item
Show the list of available dither colors and exit:
@example
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
@end example
@end itemize
@section oss
OSS (Open Sound System) output device.
@@ -137,8 +55,7 @@ to the same value of @var{window_title}.
@item window_size
Set the SDL window size, can be a string of the form
@var{width}x@var{height} or a video size abbreviation.
If not specified it defaults to the size of the input video,
downscaled according to the aspect ratio.
If not specified it defaults to the size of the input video.
@end table
@subsection Examples

View File

@@ -318,9 +318,24 @@ following "Devel" ones:
binutils, gcc4-core, make, git, mingw-runtime, texi2html
@end example
In order to run FATE you will also need the following "Utils" packages:
And the following "Utils" one:
@example
bc, diffutils
diffutils
@end example
Then run
@example
./configure
@end example
to make a static build.
To build shared libraries add a special compiler flag to work around current
@code{gcc4-core} package bugs in addition to the normal configure flags:
@example
./configure --enable-shared --disable-static --extra-cflags=-fno-reorder-functions
@end example
If you want to build FFmpeg with additional libraries, download Cygwin

View File

@@ -194,7 +194,7 @@ content across a TCP/IP network.
The required syntax is:
@example
rtmp://@var{server}[:@var{port}][/@var{app}][/@var{instance}][/@var{playpath}]
rtmp://@var{server}[:@var{port}][/@var{app}][/@var{playpath}]
@end example
The accepted parameters are:
@@ -209,88 +209,11 @@ The number of the TCP port to use (by default is 1935).
@item app
It is the name of the application to access. It usually corresponds to
the path where the application is installed on the RTMP server
(e.g. @file{/ondemand/}, @file{/flash/live/}, etc.). You can override
the value parsed from the URI through the @code{rtmp_app} option, too.
(e.g. @file{/ondemand/}, @file{/flash/live/}, etc.).
@item playpath
It is the path or name of the resource to play with reference to the
application specified in @var{app}, may be prefixed by "mp4:". You
can override the value parsed from the URI through the @code{rtmp_playpath}
option, too.
@item listen
Act as a server, listening for an incoming connection.
@item timeout
Maximum time to wait for the incoming connection. Implies listen.
@end table
Additionally, the following parameters can be set via command line options
(or in code via @code{AVOption}s):
@table @option
@item rtmp_app
Name of application to connect on the RTMP server. This option
overrides the parameter specified in the URI.
@item rtmp_buffer
Set the client buffer time in milliseconds. The default is 3000.
@item rtmp_conn
Extra arbitrary AMF connection parameters, parsed from a string,
e.g. like @code{B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0}.
Each value is prefixed by a single character denoting the type,
B for Boolean, N for number, S for string, O for object, or Z for null,
followed by a colon. For Booleans the data must be either 0 or 1 for
FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
1 to end or begin an object, respectively. Data items in subobjects may
be named, by prefixing the type with 'N' and specifying the name before
the value (i.e. @code{NB:myFlag:1}). This option may be used multiple
times to construct arbitrary AMF sequences.
@item rtmp_flashver
Version of the Flash plugin used to run the SWF player. The default
is LNX 9,0,124,2.
@item rtmp_flush_interval
Number of packets flushed in the same request (RTMPT only). The default
is 10.
@item rtmp_live
Specify that the media is a live stream. No resuming or seeking in
live streams is possible. The default value is @code{any}, which means the
subscriber first tries to play the live stream specified in the
playpath. If a live stream of that name is not found, it plays the
recorded stream. The other possible values are @code{live} and
@code{recorded}.
@item rtmp_pageurl
URL of the web page in which the media was embedded. By default no
value will be sent.
@item rtmp_playpath
Stream identifier to play or to publish. This option overrides the
parameter specified in the URI.
@item rtmp_subscribe
Name of live stream to subscribe to. By default no value will be sent.
It is only sent if the option is specified or if rtmp_live
is set to live.
@item rtmp_swfhash
SHA256 hash of the decompressed SWF file (32 bytes).
@item rtmp_swfsize
Size of the decompressed SWF file, required for SWFVerification.
@item rtmp_swfurl
URL of the SWF player for the media. By default no value will be sent.
@item rtmp_swfverify
URL to player swf file, compute hash/size automatically.
@item rtmp_tcurl
URL of the target stream. Defaults to proto://host[:port]/app.
application specified in @var{app}, may be prefixed by "mp4:".
@end table
@@ -300,46 +223,6 @@ For example to read with @command{ffplay} a multimedia resource named
ffplay rtmp://myserver/vod/sample
@end example
@section rtmpe
Encrypted Real-Time Messaging Protocol.
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
streaming multimedia content within standard cryptographic primitives,
consisting of Diffie-Hellman key exchange and HMACSHA256, generating
a pair of RC4 keys.
@section rtmps
Real-Time Messaging Protocol over a secure SSL connection.
The Real-Time Messaging Protocol (RTMPS) is used for streaming
multimedia content across an encrypted connection.
@section rtmpt
Real-Time Messaging Protocol tunneled through HTTP.
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
for streaming multimedia content within HTTP requests to traverse
firewalls.
@section rtmpte
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
is used for streaming multimedia content within HTTP requests to traverse
firewalls.
@section rtmpts
Real-Time Messaging Protocol tunneled through HTTPS.
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
for streaming multimedia content within HTTPS requests to traverse
firewalls.
@section rtmp, rtmpe, rtmps, rtmpt, rtmpte
Real-Time Messaging Protocol and its variants supported through
@@ -432,8 +315,6 @@ Flags for @code{rtsp_flags}:
@table @option
@item filter_src
Accept packets only from negotiated peer address and port.
@item listen
Act as a server, listening for an incoming connection.
@end table
When receiving data over UDP, the demuxer tries to reorder received packets
@@ -466,12 +347,6 @@ To send a stream in realtime to a RTSP server, for others to watch:
ffmpeg -re -i @var{input} -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
@end example
To receive a stream in realtime:
@example
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp @var{output}
@end example
@section sap
Session Announcement Protocol (RFC 2974). This is not technically a
@@ -578,11 +453,6 @@ tcp://@var{hostname}:@var{port}[?@var{options}]
@item listen
Listen for an incoming connection
@item timeout=@var{microseconds}
In read mode: if no data arrived in more than this time interval, raise error.
In write mode: if socket cannot be written in more than this time interval, raise error.
This also sets timeout on TCP connection establishing.
@example
ffmpeg -i @var{input} -f @var{format} tcp://@var{hostname}:@var{port}?listen
ffplay tcp://@var{hostname}:@var{port}
@@ -590,48 +460,6 @@ ffplay tcp://@var{hostname}:@var{port}
@end table
@section tls
Transport Layer Security/Secure Sockets Layer
The required syntax for a TLS/SSL url is:
@example
tls://@var{hostname}:@var{port}[?@var{options}]
@end example
@table @option
@item listen
Act as a server, listening for an incoming connection.
@item cafile=@var{filename}
Certificate authority file. The file must be in OpenSSL PEM format.
@item cert=@var{filename}
Certificate file. The file must be in OpenSSL PEM format.
@item key=@var{filename}
Private key file.
@item verify=@var{0|1}
Verify the peer's certificate.
@end table
Example command lines:
To create a TLS/SSL server that serves an input stream.
@example
ffmpeg -i @var{input} -f @var{format} tls://@var{hostname}:@var{port}?listen&cert=@var{server.crt}&key=@var{server.key}
@end example
To play back a stream from the TLS/SSL server using @command{ffplay}:
@example
ffplay tls://@var{hostname}:@var{port}
@end example
@section udp
User Datagram Protocol.
@@ -641,23 +469,16 @@ The required syntax for a UDP url is:
udp://@var{hostname}:@var{port}[?@var{options}]
@end example
@var{options} contains a list of &-separated options of the form @var{key}=@var{val}.
In case threading is enabled on the system, a circular buffer is used
to store the incoming data, which allows to reduce loss of data due to
UDP socket buffer overruns. The @var{fifo_size} and
@var{overrun_nonfatal} options are related to this buffer.
The list of supported options follows.
@var{options} contains a list of &-seperated options of the form @var{key}=@var{val}.
Follow the list of supported options.
@table @option
@item buffer_size=@var{size}
Set the UDP socket buffer size in bytes. This is used both for the
receiving and the sending buffer size.
set the UDP buffer size in bytes
@item localport=@var{port}
Override the local UDP port to bind with.
override the local UDP port to bind with
@item localaddr=@var{addr}
Choose the local IP address. This is useful e.g. if sending multicast
@@ -665,13 +486,13 @@ and the host has multiple interfaces, where the user can choose
which interface to send on by specifying the IP address of that interface.
@item pkt_size=@var{size}
Set the size in bytes of UDP packets.
set the size in bytes of UDP packets
@item reuse=@var{1|0}
Explicitly allow or disallow reusing UDP sockets.
explicitly allow or disallow reusing UDP sockets
@item ttl=@var{ttl}
Set the time to live value (for multicast only).
set the time to live value (for multicast only)
@item connect=@var{1|0}
Initialize the UDP socket with @code{connect()}. In this case, the
@@ -683,28 +504,9 @@ and makes writes return with AVERROR(ECONNREFUSED) if "destination
unreachable" is received.
For receiving, this gives the benefit of only receiving packets from
the specified peer address/port.
@item sources=@var{address}[,@var{address}]
Only receive packets sent to the multicast group from one of the
specified sender IP addresses.
@item block=@var{address}[,@var{address}]
Ignore packets sent to the multicast group from the specified
sender IP addresses.
@item fifo_size=@var{units}
Set the UDP receiving circular buffer size, expressed as a number of
packets with size of 188 bytes. If not specified defaults to 7*4096.
@item overrun_nonfatal=@var{1|0}
Survive in case of UDP receiving circular buffer overrun. Default
value is 0.
@item timeout=@var{microseconds}
In read mode: if no data arrived in more than this time interval, raise error.
@end table
Some usage examples of the UDP protocol with @command{ffmpeg} follow.
Some usage examples of the udp protocol with @command{ffmpeg} follow.
To stream over UDP to a remote endpoint:
@example

View File

@@ -144,7 +144,7 @@ The undefined value can be expressed using the "0:0" string.
@section Color
It can be the name of a color (case insensitive match) or a
[0x|#]RRGGBB[AA] sequence, possibly followed by "@@" and a string
[0x|#]RRGGBB[AA] sequence, possibly followed by "@" and a string
representing the alpha component.
The alpha component may be a string composed by "0x" followed by an

View File

@@ -158,9 +158,6 @@ $AFTER_BODY_OPEN
EOT
}
# declare encoding in header
$IN_ENCODING = $ENCODING = "utf-8";
# no navigation elements
$SECTION_NAVIGATION = 0;
# the same for texi2html 5.0

View File

@@ -1,4 +1,4 @@
#! /usr/bin/perl
#! /usr/bin/perl -w
# Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
@@ -23,8 +23,6 @@
# markup to Perl POD format. It's intended to be used to extract
# something suitable for a manpage from a Texinfo document.
use warnings;
$output = 0;
$skipping = 0;
%sects = ();

5056
ffmpeg.c

File diff suppressed because it is too large Load Diff

409
ffmpeg.h
View File

@@ -1,409 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef FFMPEG_H
#define FFMPEG_H
#include "config.h"
#include <stdint.h>
#include <stdio.h>
#include <signal.h>
#if HAVE_PTHREADS
#include <pthread.h>
#endif
#include "cmdutils.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavutil/avutil.h"
#include "libavutil/dict.h"
#include "libavutil/fifo.h"
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
#include "libswresample/swresample.h"
#define VSYNC_AUTO -1
#define VSYNC_PASSTHROUGH 0
#define VSYNC_CFR 1
#define VSYNC_VFR 2
#define VSYNC_DROP 0xff
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
/* select an input stream for an output stream */
typedef struct StreamMap {
int disabled; /** 1 is this mapping is disabled by a negative map */
int file_index;
int stream_index;
int sync_file_index;
int sync_stream_index;
char *linklabel; /** name of an output link, for mapping lavfi outputs */
} StreamMap;
typedef struct {
int file_idx, stream_idx, channel_idx; // input
int ofile_idx, ostream_idx; // output
} AudioChannelMap;
typedef struct OptionsContext {
/* input/output options */
int64_t start_time;
const char *format;
SpecifierOpt *codec_names;
int nb_codec_names;
SpecifierOpt *audio_channels;
int nb_audio_channels;
SpecifierOpt *audio_sample_rate;
int nb_audio_sample_rate;
SpecifierOpt *frame_rates;
int nb_frame_rates;
SpecifierOpt *frame_sizes;
int nb_frame_sizes;
SpecifierOpt *frame_pix_fmts;
int nb_frame_pix_fmts;
/* input options */
int64_t input_ts_offset;
int rate_emu;
SpecifierOpt *ts_scale;
int nb_ts_scale;
SpecifierOpt *dump_attachment;
int nb_dump_attachment;
/* output options */
StreamMap *stream_maps;
int nb_stream_maps;
AudioChannelMap *audio_channel_maps; /* one info entry per -map_channel */
int nb_audio_channel_maps; /* number of (valid) -map_channel settings */
int metadata_global_manual;
int metadata_streams_manual;
int metadata_chapters_manual;
const char **attachments;
int nb_attachments;
int chapters_input_file;
int64_t recording_time;
uint64_t limit_filesize;
float mux_preload;
float mux_max_delay;
int shortest;
int video_disable;
int audio_disable;
int subtitle_disable;
int data_disable;
/* indexed by output file stream index */
int *streamid_map;
int nb_streamid_map;
SpecifierOpt *metadata;
int nb_metadata;
SpecifierOpt *max_frames;
int nb_max_frames;
SpecifierOpt *bitstream_filters;
int nb_bitstream_filters;
SpecifierOpt *codec_tags;
int nb_codec_tags;
SpecifierOpt *sample_fmts;
int nb_sample_fmts;
SpecifierOpt *qscale;
int nb_qscale;
SpecifierOpt *forced_key_frames;
int nb_forced_key_frames;
SpecifierOpt *force_fps;
int nb_force_fps;
SpecifierOpt *frame_aspect_ratios;
int nb_frame_aspect_ratios;
SpecifierOpt *rc_overrides;
int nb_rc_overrides;
SpecifierOpt *intra_matrices;
int nb_intra_matrices;
SpecifierOpt *inter_matrices;
int nb_inter_matrices;
SpecifierOpt *top_field_first;
int nb_top_field_first;
SpecifierOpt *metadata_map;
int nb_metadata_map;
SpecifierOpt *presets;
int nb_presets;
SpecifierOpt *copy_initial_nonkeyframes;
int nb_copy_initial_nonkeyframes;
SpecifierOpt *copy_prior_start;
int nb_copy_prior_start;
SpecifierOpt *filters;
int nb_filters;
SpecifierOpt *fix_sub_duration;
int nb_fix_sub_duration;
SpecifierOpt *pass;
int nb_pass;
SpecifierOpt *passlogfiles;
int nb_passlogfiles;
} OptionsContext;
typedef struct InputFilter {
AVFilterContext *filter;
struct InputStream *ist;
struct FilterGraph *graph;
uint8_t *name;
} InputFilter;
typedef struct OutputFilter {
AVFilterContext *filter;
struct OutputStream *ost;
struct FilterGraph *graph;
uint8_t *name;
/* temporary storage until stream maps are processed */
AVFilterInOut *out_tmp;
} OutputFilter;
typedef struct FilterGraph {
int index;
const char *graph_desc;
AVFilterGraph *graph;
InputFilter **inputs;
int nb_inputs;
OutputFilter **outputs;
int nb_outputs;
} FilterGraph;
typedef struct InputStream {
int file_index;
AVStream *st;
int discard; /* true if stream data should be discarded */
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
AVCodec *dec;
AVFrame *decoded_frame;
int64_t start; /* time when read started */
/* predicted dts of the next packet read for this stream or (when there are
* several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
int64_t next_dts;
int64_t dts; ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
int64_t next_pts; ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
int64_t pts; ///< current pts of the decoded frame (in AV_TIME_BASE units)
int wrap_correction_done;
double ts_scale;
int is_start; /* is 1 at the start and after a discontinuity */
int saw_first_ts;
int showed_multi_packet_warning;
AVDictionary *opts;
AVRational framerate; /* framerate forced with -r */
int top_field_first;
int resample_height;
int resample_width;
int resample_pix_fmt;
int resample_sample_fmt;
int resample_sample_rate;
int resample_channels;
uint64_t resample_channel_layout;
int fix_sub_duration;
struct { /* previous decoded subtitle and related variables */
int got_output;
int ret;
AVSubtitle subtitle;
} prev_sub;
struct sub2video {
int64_t last_pts;
AVFilterBufferRef *ref;
int w, h;
} sub2video;
/* a pool of free buffers for decoded data */
FrameBuffer *buffer_pool;
int dr1;
/* decoded data from this stream goes into all those filters
* currently video and audio only */
InputFilter **filters;
int nb_filters;
} InputStream;
typedef struct InputFile {
AVFormatContext *ctx;
int eof_reached; /* true if eof reached */
int eagain; /* true if last read attempt returned EAGAIN */
int ist_index; /* index of first stream in input_streams */
int64_t ts_offset;
int nb_streams; /* number of stream that ffmpeg is aware of; may be different
from ctx.nb_streams if new streams appear during av_read_frame() */
int nb_streams_warn; /* number of streams that the user was warned of */
int rate_emu;
#if HAVE_PTHREADS
pthread_t thread; /* thread reading from this file */
int finished; /* the thread has exited */
int joined; /* the thread has been joined */
pthread_mutex_t fifo_lock; /* lock for access to fifo */
pthread_cond_t fifo_cond; /* the main thread will signal on this cond after reading from fifo */
AVFifoBuffer *fifo; /* demuxed packets are stored here; freed by the main thread */
#endif
} InputFile;
typedef struct OutputStream {
int file_index; /* file index */
int index; /* stream index in the output file */
int source_index; /* InputStream index */
AVStream *st; /* stream in the output file */
int encoding_needed; /* true if encoding needed for this stream */
int frame_number;
/* input pts and corresponding output pts
for A/V sync */
struct InputStream *sync_ist; /* input stream to sync against */
int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
/* pts of the first frame encoded for this stream, used for limiting
* recording time */
int64_t first_pts;
AVBitStreamFilterContext *bitstream_filters;
AVCodec *enc;
int64_t max_frames;
AVFrame *filtered_frame;
/* video only */
AVRational frame_rate;
int force_fps;
int top_field_first;
float frame_aspect_ratio;
float last_quality;
/* forced key frames */
int64_t *forced_kf_pts;
int forced_kf_count;
int forced_kf_index;
char *forced_keyframes;
/* audio only */
int audio_channels_map[SWR_CH_MAX]; /* list of the channels id to pick from the source stream */
int audio_channels_mapped; /* number of channels in audio_channels_map */
char *logfile_prefix;
FILE *logfile;
OutputFilter *filter;
char *avfilter;
int64_t sws_flags;
int64_t swr_dither_method;
double swr_dither_scale;
AVDictionary *opts;
int finished; /* no more packets should be written for this stream */
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
int stream_copy;
const char *attachment_filename;
int copy_initial_nonkeyframes;
int copy_prior_start;
int keep_pix_fmt;
} OutputStream;
typedef struct OutputFile {
AVFormatContext *ctx;
AVDictionary *opts;
int ost_index; /* index of the first stream in output_streams */
int64_t recording_time; ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
int64_t start_time; ///< start time in microseconds == AV_TIME_BASE units
uint64_t limit_filesize; /* filesize limit expressed in bytes */
int shortest;
} OutputFile;
extern InputStream **input_streams;
extern int nb_input_streams;
extern InputFile **input_files;
extern int nb_input_files;
extern OutputStream **output_streams;
extern int nb_output_streams;
extern OutputFile **output_files;
extern int nb_output_files;
extern FilterGraph **filtergraphs;
extern int nb_filtergraphs;
extern char *vstats_filename;
extern float audio_drift_threshold;
extern float dts_delta_threshold;
extern float dts_error_threshold;
extern int audio_volume;
extern int audio_sync_method;
extern int video_sync_method;
extern int do_benchmark;
extern int do_benchmark_all;
extern int do_deinterlace;
extern int do_hex_dump;
extern int do_pkt_dump;
extern int copy_ts;
extern int copy_tb;
extern int debug_ts;
extern int exit_on_error;
extern int print_stats;
extern int qp_hist;
extern int same_quant;
extern int stdin_interaction;
extern int frame_bits_per_raw_sample;
extern AVIOContext *progress_avio;
extern const AVIOInterruptCB int_cb;
extern const OptionDef options[];
void term_init(void);
void term_exit(void);
void reset_options(OptionsContext *o, int is_input);
void show_usage(void);
void opt_output_file(void *optctx, const char *filename);
void assert_avoptions(AVDictionary *m);
int guess_input_channel_layout(InputStream *ist);
enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target);
void choose_sample_fmt(AVStream *st, AVCodec *codec);
int configure_filtergraph(FilterGraph *fg);
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
#endif /* FFMPEG_H */

View File

@@ -1,774 +0,0 @@
/*
* ffmpeg filter configuration
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "ffmpeg.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavutil/audioconvert.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/pixdesc.h"
#include "libavutil/pixfmt.h"
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target)
{
if (codec && codec->pix_fmts) {
const enum PixelFormat *p = codec->pix_fmts;
int has_alpha= av_pix_fmt_descriptors[target].nb_components % 2 == 0;
enum PixelFormat best= PIX_FMT_NONE;
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
}
}
for (; *p != PIX_FMT_NONE; p++) {
best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
if (*p == target)
break;
}
if (*p == PIX_FMT_NONE) {
if (target != PIX_FMT_NONE)
av_log(NULL, AV_LOG_WARNING,
"Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
av_pix_fmt_descriptors[target].name,
codec->name,
av_pix_fmt_descriptors[best].name);
return best;
}
}
return target;
}
void choose_sample_fmt(AVStream *st, AVCodec *codec)
{
if (codec && codec->sample_fmts) {
const enum AVSampleFormat *p = codec->sample_fmts;
for (; *p != -1; p++) {
if (*p == st->codec->sample_fmt)
break;
}
if (*p == -1) {
if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
if(av_get_sample_fmt_name(st->codec->sample_fmt))
av_log(NULL, AV_LOG_WARNING,
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
av_get_sample_fmt_name(st->codec->sample_fmt),
codec->name,
av_get_sample_fmt_name(codec->sample_fmts[0]));
st->codec->sample_fmt = codec->sample_fmts[0];
}
}
}
static char *choose_pix_fmts(OutputStream *ost)
{
if (ost->keep_pix_fmt) {
if (ost->filter)
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
AVFILTER_AUTO_CONVERT_NONE);
if (ost->st->codec->pix_fmt == PIX_FMT_NONE)
return NULL;
return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
}
if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt)));
} else if (ost->enc && ost->enc->pix_fmts) {
const enum PixelFormat *p;
AVIOContext *s = NULL;
uint8_t *ret;
int len;
if (avio_open_dyn_buf(&s) < 0)
exit_program(1);
p = ost->enc->pix_fmts;
if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
if (ost->st->codec->codec_id == AV_CODEC_ID_MJPEG) {
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
} else if (ost->st->codec->codec_id == AV_CODEC_ID_LJPEG) {
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
}
}
for (; *p != PIX_FMT_NONE; p++) {
const char *name = av_get_pix_fmt_name(*p);
avio_printf(s, "%s:", name);
}
len = avio_close_dyn_buf(s, &ret);
ret[len - 1] = 0;
return ret;
} else
return NULL;
}
/**
* Define a function for building a string containing a list of
* allowed formats,
*/
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\
static char *choose_ ## var ## s(OutputStream *ost) \
{ \
if (ost->st->codec->var != none) { \
get_name(ost->st->codec->var); \
return av_strdup(name); \
} else if (ost->enc->supported_list) { \
const type *p; \
AVIOContext *s = NULL; \
uint8_t *ret; \
int len; \
\
if (avio_open_dyn_buf(&s) < 0) \
exit_program(1); \
\
for (p = ost->enc->supported_list; *p != none; p++) { \
get_name(*p); \
avio_printf(s, "%s" separator, name); \
} \
len = avio_close_dyn_buf(s, &ret); \
ret[len - 1] = 0; \
return ret; \
} else \
return NULL; \
}
// DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
// GET_PIX_FMT_NAME, ":")
DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
GET_SAMPLE_RATE_NAME, ",")
DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
GET_CH_LAYOUT_NAME, ",")
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
{
FilterGraph *fg = av_mallocz(sizeof(*fg));
if (!fg)
exit_program(1);
fg->index = nb_filtergraphs;
fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
fg->nb_outputs + 1);
if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
exit_program(1);
fg->outputs[0]->ost = ost;
fg->outputs[0]->graph = fg;
ost->filter = fg->outputs[0];
fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
fg->nb_inputs + 1);
if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
exit_program(1);
fg->inputs[0]->ist = ist;
fg->inputs[0]->graph = fg;
ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
&ist->nb_filters, ist->nb_filters + 1);
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
&nb_filtergraphs, nb_filtergraphs + 1);
filtergraphs[nb_filtergraphs - 1] = fg;
return fg;
}
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
{
InputStream *ist = NULL;
enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
int i;
// TODO: support other filter types
if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
"currently.\n");
exit_program(1);
}
if (in->name) {
AVFormatContext *s;
AVStream *st = NULL;
char *p;
int file_idx = strtol(in->name, &p, 0);
if (file_idx < 0 || file_idx >= nb_input_files) {
av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
file_idx, fg->graph_desc);
exit_program(1);
}
s = input_files[file_idx]->ctx;
for (i = 0; i < s->nb_streams; i++) {
enum AVMediaType stream_type = s->streams[i]->codec->codec_type;
if (stream_type != type &&
!(stream_type == AVMEDIA_TYPE_SUBTITLE &&
type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
continue;
if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
st = s->streams[i];
break;
}
}
if (!st) {
av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
"matches no streams.\n", p, fg->graph_desc);
exit_program(1);
}
ist = input_streams[input_files[file_idx]->ist_index + st->index];
} else {
/* find the first unused stream of corresponding type */
for (i = 0; i < nb_input_streams; i++) {
ist = input_streams[i];
if (ist->st->codec->codec_type == type && ist->discard)
break;
}
if (i == nb_input_streams) {
av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
"unlabeled input pad %d on filter %s\n", in->pad_idx,
in->filter_ctx->name);
exit_program(1);
}
}
av_assert0(ist);
ist->discard = 0;
ist->decoding_needed++;
ist->st->discard = AVDISCARD_NONE;
fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
&fg->nb_inputs, fg->nb_inputs + 1);
if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
exit_program(1);
fg->inputs[fg->nb_inputs - 1]->ist = ist;
fg->inputs[fg->nb_inputs - 1]->graph = fg;
ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
&ist->nb_filters, ist->nb_filters + 1);
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
}
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
char *pix_fmts;
OutputStream *ost = ofilter->ost;
AVCodecContext *codec = ost->st->codec;
AVFilterContext *last_filter = out->filter_ctx;
int pad_idx = out->pad_idx;
int ret;
char name[255];
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&ofilter->filter,
avfilter_get_by_name("ffbuffersink"),
name, NULL, NULL, fg->graph);
av_freep(&buffersink_params);
if (ret < 0)
return ret;
if (codec->width || codec->height) {
char args[255];
AVFilterContext *filter;
snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
codec->width,
codec->height,
(unsigned)ost->sws_flags);
snprintf(name, sizeof(name), "scaler for output stream %d:%d",
ost->file_index, ost->index);
if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
name, args, NULL, fg->graph)) < 0)
return ret;
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
return ret;
last_filter = filter;
pad_idx = 0;
}
if ((pix_fmts = choose_pix_fmts(ost))) {
AVFilterContext *filter;
snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
ost->file_index, ost->index);
if ((ret = avfilter_graph_create_filter(&filter,
avfilter_get_by_name("format"),
"format", pix_fmts, NULL,
fg->graph)) < 0)
return ret;
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
return ret;
last_filter = filter;
pad_idx = 0;
av_freep(&pix_fmts);
}
if (ost->frame_rate.num && 0) {
AVFilterContext *fps;
char args[255];
snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
ost->frame_rate.den);
snprintf(name, sizeof(name), "fps for output stream %d:%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
name, args, NULL, fg->graph);
if (ret < 0)
return ret;
ret = avfilter_link(last_filter, pad_idx, fps, 0);
if (ret < 0)
return ret;
last_filter = fps;
pad_idx = 0;
}
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
return ret;
return 0;
}
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
OutputStream *ost = ofilter->ost;
AVCodecContext *codec = ost->st->codec;
AVFilterContext *last_filter = out->filter_ctx;
int pad_idx = out->pad_idx;
char *sample_fmts, *sample_rates, *channel_layouts;
char name[255];
int ret;
snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&ofilter->filter,
avfilter_get_by_name("ffabuffersink"),
name, NULL, NULL, fg->graph);
if (ret < 0)
return ret;
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
AVFilterContext *filt_ctx; \
\
av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
"similarly to -af " filter_name "=%s.\n", arg); \
\
ret = avfilter_graph_create_filter(&filt_ctx, \
avfilter_get_by_name(filter_name), \
filter_name, arg, NULL, fg->graph); \
if (ret < 0) \
return ret; \
\
ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
if (ret < 0) \
return ret; \
\
last_filter = filt_ctx; \
pad_idx = 0; \
} while (0)
if (ost->audio_channels_mapped) {
int i;
AVBPrint pan_buf;
av_bprint_init(&pan_buf, 256, 8192);
av_bprintf(&pan_buf, "0x%"PRIx64,
av_get_default_channel_layout(ost->audio_channels_mapped));
for (i = 0; i < ost->audio_channels_mapped; i++)
if (ost->audio_channels_map[i] != -1)
av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
av_bprint_finalize(&pan_buf, NULL);
}
if (codec->channels && !codec->channel_layout)
codec->channel_layout = av_get_default_channel_layout(codec->channels);
sample_fmts = choose_sample_fmts(ost);
sample_rates = choose_sample_rates(ost);
channel_layouts = choose_channel_layouts(ost);
if (sample_fmts || sample_rates || channel_layouts) {
AVFilterContext *format;
char args[256];
args[0] = 0;
if (sample_fmts)
av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
sample_fmts);
if (sample_rates)
av_strlcatf(args, sizeof(args), "sample_rates=%s:",
sample_rates);
if (channel_layouts)
av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
channel_layouts);
av_freep(&sample_fmts);
av_freep(&sample_rates);
av_freep(&channel_layouts);
snprintf(name, sizeof(name), "audio format for output stream %d:%d",
ost->file_index, ost->index);
ret = avfilter_graph_create_filter(&format,
avfilter_get_by_name("aformat"),
name, args, NULL, fg->graph);
if (ret < 0)
return ret;
ret = avfilter_link(last_filter, pad_idx, format, 0);
if (ret < 0)
return ret;
last_filter = format;
pad_idx = 0;
}
if (audio_volume != 256 && 0) {
char args[256];
snprintf(args, sizeof(args), "%f", audio_volume / 256.);
AUTO_INSERT_FILTER("-vol", "volume", args);
}
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
return ret;
return 0;
}
#define DESCRIBE_FILTER_LINK(f, inout, in) \
{ \
AVFilterContext *ctx = inout->filter_ctx; \
AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads; \
int nb_pads = in ? ctx->input_count : ctx->output_count; \
AVIOContext *pb; \
\
if (avio_open_dyn_buf(&pb) < 0) \
exit_program(1); \
\
avio_printf(pb, "%s", ctx->filter->name); \
if (nb_pads > 1) \
avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\
avio_w8(pb, 0); \
avio_close_dyn_buf(pb, &f->name); \
}
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
av_freep(&ofilter->name);
DESCRIBE_FILTER_LINK(ofilter, out, 0);
switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
default: av_assert0(0);
}
}
static int sub2video_prepare(InputStream *ist)
{
AVFormatContext *avf = input_files[ist->file_index]->ctx;
int i, ret, w, h;
uint8_t *image[4];
int linesize[4];
/* Compute the size of the canvas for the subtitles stream.
If the subtitles codec has set a size, use it. Otherwise use the
maximum dimensions of the video streams in the same file. */
w = ist->st->codec->width;
h = ist->st->codec->height;
if (!(w && h)) {
for (i = 0; i < avf->nb_streams; i++) {
if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
w = FFMAX(w, avf->streams[i]->codec->width);
h = FFMAX(h, avf->streams[i]->codec->height);
}
}
if (!(w && h)) {
w = FFMAX(w, 720);
h = FFMAX(h, 576);
}
av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
}
ist->sub2video.w = ist->st->codec->width = w;
ist->sub2video.h = ist->st->codec->height = h;
/* rectangles are PIX_FMT_PAL8, but we have no guarantee that the
palettes for all rectangles are identical or compatible */
ist->st->codec->pix_fmt = PIX_FMT_RGB32;
ret = av_image_alloc(image, linesize, w, h, PIX_FMT_RGB32, 32);
if (ret < 0)
return ret;
memset(image[0], 0, h * linesize[0]);
ist->sub2video.ref = avfilter_get_video_buffer_ref_from_arrays(
image, linesize, AV_PERM_READ | AV_PERM_PRESERVE,
w, h, PIX_FMT_RGB32);
if (!ist->sub2video.ref) {
av_free(image[0]);
return AVERROR(ENOMEM);
}
return 0;
}
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
AVFilterInOut *in)
{
AVFilterContext *first_filter = in->filter_ctx;
AVFilter *filter = avfilter_get_by_name("buffer");
InputStream *ist = ifilter->ist;
AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
ist->st->time_base;
AVRational fr = ist->framerate.num ? ist->framerate :
ist->st->r_frame_rate;
AVRational sar;
AVBPrint args;
char name[255];
int pad_idx = in->pad_idx;
int ret;
if (ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
ret = sub2video_prepare(ist);
if (ret < 0)
return ret;
}
sar = ist->st->sample_aspect_ratio.num ?
ist->st->sample_aspect_ratio :
ist->st->codec->sample_aspect_ratio;
if(!sar.den)
sar = (AVRational){0,1};
av_bprint_init(&args, 0, 1);
av_bprintf(&args,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
"pixel_aspect=%d/%d:sws_param=flags=%d", ist->st->codec->width,
ist->st->codec->height, ist->st->codec->pix_fmt,
tb.num, tb.den, sar.num, sar.den,
SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
if (fr.num && fr.den)
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, name,
args.str, NULL, fg->graph)) < 0)
return ret;
if (ist->framerate.num) {
AVFilterContext *setpts;
snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&setpts,
avfilter_get_by_name("setpts"),
name, "N", NULL,
fg->graph)) < 0)
return ret;
if ((ret = avfilter_link(setpts, 0, first_filter, pad_idx)) < 0)
return ret;
first_filter = setpts;
pad_idx = 0;
}
if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
return ret;
return 0;
}
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
AVFilterInOut *in)
{
AVFilterContext *first_filter = in->filter_ctx;
AVFilter *filter = avfilter_get_by_name("abuffer");
InputStream *ist = ifilter->ist;
int pad_idx = in->pad_idx;
char args[255], name[255];
int ret;
snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
":channel_layout=0x%"PRIx64,
1, ist->st->codec->sample_rate,
ist->st->codec->sample_rate,
av_get_sample_fmt_name(ist->st->codec->sample_fmt),
ist->st->codec->channel_layout);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter,
name, args, NULL,
fg->graph)) < 0)
return ret;
#define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
AVFilterContext *filt_ctx; \
\
av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
"similarly to -af " filter_name "=%s.\n", arg); \
\
snprintf(name, sizeof(name), "graph %d %s for input stream %d:%d", \
fg->index, filter_name, ist->file_index, ist->st->index); \
ret = avfilter_graph_create_filter(&filt_ctx, \
avfilter_get_by_name(filter_name), \
name, arg, NULL, fg->graph); \
if (ret < 0) \
return ret; \
\
ret = avfilter_link(filt_ctx, 0, first_filter, pad_idx); \
if (ret < 0) \
return ret; \
\
first_filter = filt_ctx; \
} while (0)
if (audio_sync_method > 0) {
char args[256] = {0};
av_strlcatf(args, sizeof(args), "min_comp=0.001:min_hard_comp=%f", audio_drift_threshold);
if (audio_sync_method > 1)
av_strlcatf(args, sizeof(args), ":max_soft_comp=%f", audio_sync_method/(double)ist->st->codec->sample_rate);
AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
}
// if (ost->audio_channels_mapped) {
// int i;
// AVBPrint pan_buf;
// av_bprint_init(&pan_buf, 256, 8192);
// av_bprintf(&pan_buf, "0x%"PRIx64,
// av_get_default_channel_layout(ost->audio_channels_mapped));
// for (i = 0; i < ost->audio_channels_mapped; i++)
// if (ost->audio_channels_map[i] != -1)
// av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
// AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
// av_bprint_finalize(&pan_buf, NULL);
// }
if (audio_volume != 256) {
char args[256];
snprintf(args, sizeof(args), "%f", audio_volume / 256.);
AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
}
if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
return ret;
return 0;
}
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
AVFilterInOut *in)
{
av_freep(&ifilter->name);
DESCRIBE_FILTER_LINK(ifilter, in, 1);
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
default: av_assert0(0);
}
}
int configure_filtergraph(FilterGraph *fg)
{
AVFilterInOut *inputs, *outputs, *cur;
int ret, i, init = !fg->graph, simple = !fg->graph_desc;
const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
fg->graph_desc;
avfilter_graph_free(&fg->graph);
if (!(fg->graph = avfilter_graph_alloc()))
return AVERROR(ENOMEM);
if (simple) {
OutputStream *ost = fg->outputs[0]->ost;
char args[255];
snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
fg->graph->scale_sws_opts = av_strdup(args);
}
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
return ret;
if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' does not have "
"exactly one input and output.\n", graph_desc);
return AVERROR(EINVAL);
}
for (cur = inputs; !simple && init && cur; cur = cur->next)
init_input_filter(fg, cur);
for (cur = inputs, i = 0; cur; cur = cur->next, i++)
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
return ret;
avfilter_inout_free(&inputs);
if (!init || simple) {
/* we already know the mappings between lavfi outputs and output streams,
* so we can finish the setup */
for (cur = outputs, i = 0; cur; cur = cur->next, i++)
configure_output_filter(fg, fg->outputs[i], cur);
avfilter_inout_free(&outputs);
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
return ret;
} else {
/* wait until output mappings are processed */
for (cur = outputs; cur;) {
fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
&fg->nb_outputs, fg->nb_outputs + 1);
if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
exit_program(1);
fg->outputs[fg->nb_outputs - 1]->graph = fg;
fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
cur = cur->next;
fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
}
}
return 0;
}
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
{
int i;
for (i = 0; i < fg->nb_inputs; i++)
if (fg->inputs[i]->ist == ist)
return 1;
return 0;
}

File diff suppressed because it is too large Load Diff

1051
ffplay.c

File diff suppressed because it is too large Load Diff

1452
ffprobe.c

File diff suppressed because it is too large Load Diff

View File

@@ -40,7 +40,6 @@
#include "libavformat/internal.h"
#include "libavformat/url.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/lfg.h"
#include "libavutil/dict.h"
@@ -48,8 +47,6 @@
#include "libavutil/random_seed.h"
#include "libavutil/parseutils.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include <stdarg.h>
#include <unistd.h>
#include <fcntl.h>
@@ -58,6 +55,7 @@
#include <poll.h>
#endif
#include <errno.h>
#include <sys/time.h>
#include <time.h>
#include <sys/wait.h>
#include <signal.h>
@@ -563,11 +561,9 @@ static void start_multicast(void)
default_port = 6000;
for(stream = first_stream; stream != NULL; stream = stream->next) {
if (stream->is_multicast) {
unsigned random0 = av_lfg_get(&random_state);
unsigned random1 = av_lfg_get(&random_state);
/* open the RTP connection */
snprintf(session_id, sizeof(session_id), "%08x%08x",
random0, random1);
av_lfg_get(&random_state), av_lfg_get(&random_state));
/* choose a port if none given */
if (stream->multicast_port == 0) {
@@ -765,7 +761,7 @@ static void start_wait_request(HTTPContext *c, int is_rtsp)
static void http_send_too_busy_reply(int fd)
{
char buffer[400];
char buffer[300];
int len = snprintf(buffer, sizeof(buffer),
"HTTP/1.0 503 Server too busy\r\n"
"Content-type: text/html\r\n"
@@ -775,7 +771,6 @@ static void http_send_too_busy_reply(int fd)
"<p>The number of current connections is %d, and this exceeds the limit of %d.</p>\r\n"
"</body></html>\r\n",
nb_connections, nb_max_connections);
av_assert0(len < sizeof(buffer));
send(fd, buffer, len, 0);
}
@@ -1568,7 +1563,7 @@ static int http_parse_request(HTTPContext *c)
if (stream->stream_type == STREAM_TYPE_REDIRECT) {
c->http_error = 301;
q = c->buffer;
snprintf(q, c->buffer_size,
q += snprintf(q, c->buffer_size,
"HTTP/1.0 301 Moved\r\n"
"Location: %s\r\n"
"Content-type: text/html\r\n"
@@ -1576,7 +1571,6 @@ static int http_parse_request(HTTPContext *c)
"<html><head><title>Moved</title></head><body>\r\n"
"You should be <a href=\"%s\">redirected</a>.\r\n"
"</body></html>\r\n", stream->feed_filename, stream->feed_filename);
q += strlen(q);
/* prepare output buffer */
c->buffer_ptr = c->buffer;
c->buffer_end = q;
@@ -1607,7 +1601,7 @@ static int http_parse_request(HTTPContext *c)
if (c->post == 0 && max_bandwidth < current_bandwidth) {
c->http_error = 503;
q = c->buffer;
snprintf(q, c->buffer_size,
q += snprintf(q, c->buffer_size,
"HTTP/1.0 503 Server too busy\r\n"
"Content-type: text/html\r\n"
"\r\n"
@@ -1616,7 +1610,6 @@ static int http_parse_request(HTTPContext *c)
"<p>The bandwidth being served (including your stream) is %"PRIu64"kbit/sec, "
"and this exceeds the limit of %"PRIu64"kbit/sec.</p>\r\n"
"</body></html>\r\n", current_bandwidth, max_bandwidth);
q += strlen(q);
/* prepare output buffer */
c->buffer_ptr = c->buffer;
c->buffer_end = q;
@@ -1659,7 +1652,7 @@ static int http_parse_request(HTTPContext *c)
q = c->buffer;
switch(redir_type) {
case REDIR_ASX:
snprintf(q, c->buffer_size,
q += snprintf(q, c->buffer_size,
"HTTP/1.0 200 ASX Follows\r\n"
"Content-type: video/x-ms-asf\r\n"
"\r\n"
@@ -1667,25 +1660,22 @@ static int http_parse_request(HTTPContext *c)
//"<!-- Autogenerated by ffserver -->\r\n"
"<ENTRY><REF HREF=\"http://%s/%s%s\"/></ENTRY>\r\n"
"</ASX>\r\n", hostbuf, filename, info);
q += strlen(q);
break;
case REDIR_RAM:
snprintf(q, c->buffer_size,
q += snprintf(q, c->buffer_size,
"HTTP/1.0 200 RAM Follows\r\n"
"Content-type: audio/x-pn-realaudio\r\n"
"\r\n"
"# Autogenerated by ffserver\r\n"
"http://%s/%s%s\r\n", hostbuf, filename, info);
q += strlen(q);
break;
case REDIR_ASF:
snprintf(q, c->buffer_size,
q += snprintf(q, c->buffer_size,
"HTTP/1.0 200 ASF Redirect follows\r\n"
"Content-type: video/x-ms-asf\r\n"
"\r\n"
"[Reference]\r\n"
"Ref1=http://%s/%s%s\r\n", hostbuf, filename, info);
q += strlen(q);
break;
case REDIR_RTSP:
{
@@ -1695,13 +1685,12 @@ static int http_parse_request(HTTPContext *c)
p = strrchr(hostname, ':');
if (p)
*p = '\0';
snprintf(q, c->buffer_size,
q += snprintf(q, c->buffer_size,
"HTTP/1.0 200 RTSP Redirect follows\r\n"
/* XXX: incorrect mime type ? */
"Content-type: application/x-rtsp\r\n"
"\r\n"
"rtsp://%s:%d/%s\r\n", hostname, ntohs(my_rtsp_addr.sin_port), filename);
q += strlen(q);
}
break;
case REDIR_SDP:
@@ -1710,11 +1699,10 @@ static int http_parse_request(HTTPContext *c)
int sdp_data_size, len;
struct sockaddr_in my_addr;
snprintf(q, c->buffer_size,
q += snprintf(q, c->buffer_size,
"HTTP/1.0 200 OK\r\n"
"Content-type: application/sdp\r\n"
"\r\n");
q += strlen(q);
len = sizeof(my_addr);
getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
@@ -1833,12 +1821,12 @@ static int http_parse_request(HTTPContext *c)
}
/* prepare http header */
c->buffer[0] = 0;
av_strlcatf(c->buffer, c->buffer_size, "HTTP/1.0 200 OK\r\n");
q = c->buffer;
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 OK\r\n");
mime_type = c->stream->fmt->mime_type;
if (!mime_type)
mime_type = "application/x-octet-stream";
av_strlcatf(c->buffer, c->buffer_size, "Pragma: no-cache\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Pragma: no-cache\r\n");
/* for asf, we need extra headers */
if (!strcmp(c->stream->fmt->name,"asf_stream")) {
@@ -1846,11 +1834,10 @@ static int http_parse_request(HTTPContext *c)
c->wmp_client_id = av_lfg_get(&random_state);
av_strlcatf(c->buffer, c->buffer_size, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
}
av_strlcatf(c->buffer, c->buffer_size, "Content-Type: %s\r\n", mime_type);
av_strlcatf(c->buffer, c->buffer_size, "\r\n");
q = c->buffer + strlen(c->buffer);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-Type: %s\r\n", mime_type);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
/* prepare output buffer */
c->http_error = 0;
@@ -1861,7 +1848,7 @@ static int http_parse_request(HTTPContext *c)
send_error:
c->http_error = 404;
q = c->buffer;
snprintf(q, c->buffer_size,
q += snprintf(q, c->buffer_size,
"HTTP/1.0 404 Not Found\r\n"
"Content-type: text/html\r\n"
"\r\n"
@@ -1869,7 +1856,6 @@ static int http_parse_request(HTTPContext *c)
"<head><title>404 Not Found</title></head>\n"
"<body>%s</body>\n"
"</html>\n", msg);
q += strlen(q);
/* prepare output buffer */
c->buffer_ptr = c->buffer;
c->buffer_end = q;
@@ -3107,12 +3093,9 @@ static void rtsp_cmd_setup(HTTPContext *c, const char *url,
found:
/* generate session id if needed */
if (h->session_id[0] == '\0') {
unsigned random0 = av_lfg_get(&random_state);
unsigned random1 = av_lfg_get(&random_state);
if (h->session_id[0] == '\0')
snprintf(h->session_id, sizeof(h->session_id), "%08x%08x",
random0, random1);
}
av_lfg_get(&random_state), av_lfg_get(&random_state));
/* find rtp session, and create it if none found */
rtp_c = find_rtp_session(h->session_id);
@@ -3570,7 +3553,7 @@ static void extract_mpeg4_header(AVFormatContext *infile)
mpeg4_count = 0;
for(i=0;i<infile->nb_streams;i++) {
st = infile->streams[i];
if (st->codec->codec_id == AV_CODEC_ID_MPEG4 &&
if (st->codec->codec_id == CODEC_ID_MPEG4 &&
st->codec->extradata_size == 0) {
mpeg4_count++;
}
@@ -3583,7 +3566,7 @@ static void extract_mpeg4_header(AVFormatContext *infile)
if (av_read_packet(infile, &pkt) < 0)
break;
st = infile->streams[pkt.stream_index];
if (st->codec->codec_id == AV_CODEC_ID_MPEG4 &&
if (st->codec->codec_id == CODEC_ID_MPEG4 &&
st->codec->extradata_size == 0) {
av_freep(&st->codec->extradata);
/* fill extradata with the header */
@@ -3774,7 +3757,7 @@ static void build_feed_streams(void)
s->nb_streams = feed->nb_streams;
s->streams = feed->streams;
if (avformat_write_header(s, NULL) < 0) {
http_log("Container doesn't support the required parameters\n");
http_log("Container doesn't supports the required parameters\n");
exit(1);
}
/* XXX: need better api */
@@ -3899,22 +3882,22 @@ static void add_codec(FFStream *stream, AVCodecContext *av)
memcpy(st->codec, av, sizeof(AVCodecContext));
}
static enum AVCodecID opt_audio_codec(const char *arg)
static enum CodecID opt_audio_codec(const char *arg)
{
AVCodec *p= avcodec_find_encoder_by_name(arg);
if (p == NULL || p->type != AVMEDIA_TYPE_AUDIO)
return AV_CODEC_ID_NONE;
return CODEC_ID_NONE;
return p->id;
}
static enum AVCodecID opt_video_codec(const char *arg)
static enum CodecID opt_video_codec(const char *arg)
{
AVCodec *p= avcodec_find_encoder_by_name(arg);
if (p == NULL || p->type != AVMEDIA_TYPE_VIDEO)
return AV_CODEC_ID_NONE;
return CODEC_ID_NONE;
return p->id;
}
@@ -3957,7 +3940,7 @@ static int ffserver_opt_default(const char *opt, const char *arg,
static int ffserver_opt_preset(const char *arg,
AVCodecContext *avctx, int type,
enum AVCodecID *audio_id, enum AVCodecID *video_id)
enum CodecID *audio_id, enum CodecID *video_id)
{
FILE *f=NULL;
char filename[1000], tmp[1000], tmp2[1000], line[1000];
@@ -4039,7 +4022,7 @@ static int parse_ffconfig(const char *filename)
FFStream **last_stream, *stream, *redirect;
FFStream **last_feed, *feed, *s;
AVCodecContext audio_enc, video_enc;
enum AVCodecID audio_id, video_id;
enum CodecID audio_id, video_id;
f = fopen(filename, "r");
if (!f) {
@@ -4056,8 +4039,8 @@ static int parse_ffconfig(const char *filename)
stream = NULL;
feed = NULL;
redirect = NULL;
audio_id = AV_CODEC_ID_NONE;
video_id = AV_CODEC_ID_NONE;
audio_id = CODEC_ID_NONE;
video_id = CODEC_ID_NONE;
#define ERROR(...) report_config_error(filename, line_num, &errors, __VA_ARGS__)
for(;;) {
@@ -4250,8 +4233,8 @@ static int parse_ffconfig(const char *filename)
avcodec_get_context_defaults3(&video_enc, NULL);
avcodec_get_context_defaults3(&audio_enc, NULL);
audio_id = AV_CODEC_ID_NONE;
video_id = AV_CODEC_ID_NONE;
audio_id = CODEC_ID_NONE;
video_id = CODEC_ID_NONE;
if (stream->fmt) {
audio_id = stream->fmt->audio_codec;
video_id = stream->fmt->video_codec;
@@ -4333,13 +4316,13 @@ static int parse_ffconfig(const char *filename)
} else if (!av_strcasecmp(cmd, "AudioCodec")) {
get_arg(arg, sizeof(arg), &p);
audio_id = opt_audio_codec(arg);
if (audio_id == AV_CODEC_ID_NONE) {
if (audio_id == CODEC_ID_NONE) {
ERROR("Unknown AudioCodec: %s\n", arg);
}
} else if (!av_strcasecmp(cmd, "VideoCodec")) {
get_arg(arg, sizeof(arg), &p);
video_id = opt_video_codec(arg);
if (video_id == AV_CODEC_ID_NONE) {
if (video_id == CODEC_ID_NONE) {
ERROR("Unknown VideoCodec: %s\n", arg);
}
} else if (!av_strcasecmp(cmd, "MaxTime")) {
@@ -4530,9 +4513,9 @@ static int parse_ffconfig(const char *filename)
if (stream)
video_enc.dark_masking = atof(arg);
} else if (!av_strcasecmp(cmd, "NoVideo")) {
video_id = AV_CODEC_ID_NONE;
video_id = CODEC_ID_NONE;
} else if (!av_strcasecmp(cmd, "NoAudio")) {
audio_id = AV_CODEC_ID_NONE;
audio_id = CODEC_ID_NONE;
} else if (!av_strcasecmp(cmd, "ACL")) {
parse_acl_row(stream, feed, NULL, p, filename, line_num);
} else if (!av_strcasecmp(cmd, "DynamicACL")) {
@@ -4570,12 +4553,12 @@ static int parse_ffconfig(const char *filename)
ERROR("No corresponding <Stream> for </Stream>\n");
} else {
if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) {
if (audio_id != AV_CODEC_ID_NONE) {
if (audio_id != CODEC_ID_NONE) {
audio_enc.codec_type = AVMEDIA_TYPE_AUDIO;
audio_enc.codec_id = audio_id;
add_codec(stream, &audio_enc);
}
if (video_id != AV_CODEC_ID_NONE) {
if (video_id != CODEC_ID_NONE) {
video_enc.codec_type = AVMEDIA_TYPE_VIDEO;
video_enc.codec_id = video_id;
add_codec(stream, &video_enc);
@@ -4663,12 +4646,13 @@ static void opt_debug(void)
logfilename[0] = '-';
}
void show_help_default(const char *opt, const char *arg)
static int opt_help(const char *opt, const char *arg)
{
printf("usage: ffserver [options]\n"
"Hyper fast multi format Audio/Video streaming server\n");
printf("\n");
show_help_options(options, "Main options:", 0, 0, 0);
show_help_options(options, "Main options:\n", 0, 0);
return 0;
}
static const OptionDef options[] = {

View File

@@ -30,7 +30,8 @@
#include "dsputil.h"
#include "get_bits.h"
#include "libavutil/avassert.h"
//#undef NDEBUG
//#include <assert.h>
#define BLOCK_TYPE_VLC_BITS 5
#define ACDC_VLC_BITS 9
@@ -327,7 +328,7 @@ static inline void mcdc(uint16_t *dst, const uint16_t *src, int log2w,
}
break;
default:
av_assert2(0);
assert(0);
}
}
@@ -342,7 +343,7 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
uint16_t *start = (uint16_t *)f->last_picture.data[0];
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
av_assert2(code >= 0 && code <= 6);
assert(code >= 0 && code <= 6);
if (code == 0) {
if (f->g.buffer_end - f->g.buffer < 1) {
@@ -836,7 +837,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
// explicit check needed as memcpy below might not catch a NULL
if (!cfrm->data) {
av_log(f->avctx, AV_LOG_ERROR, "realloc falure\n");
av_log(f->avctx, AV_LOG_ERROR, "realloc falure");
return -1;
}
@@ -981,7 +982,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
AVCodec ff_fourxm_decoder = {
.name = "4xm",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_4XM,
.id = CODEC_ID_4XM,
.priv_data_size = sizeof(FourXContext),
.init = decode_init,
.close = decode_end,

View File

@@ -33,9 +33,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
@@ -222,7 +220,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
AVCodec ff_eightbps_decoder = {
.name = "8bps",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_8BPS,
.id = CODEC_ID_8BPS,
.priv_data_size = sizeof(EightBpsContext),
.init = decode_init,
.close = decode_end,

View File

@@ -37,9 +37,7 @@
* http://aminet.net/mods/smpl/
*/
#include "libavutil/avassert.h"
#include "avcodec.h"
#include "libavutil/common.h"
/** decoder context */
typedef struct EightSvxContext {
@@ -49,7 +47,7 @@ typedef struct EightSvxContext {
/* buffer used to store the whole audio decoded/interleaved chunk,
* which is sent with the first packet */
uint8_t *samples;
int64_t samples_size;
size_t samples_size;
int samples_idx;
} EightSvxContext;
@@ -113,25 +111,17 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
/* decode and interleave the first packet */
if (!esc->samples && avpkt) {
uint8_t *deinterleaved_samples, *p = NULL;
int packet_size = avpkt->size;
if (packet_size % avctx->channels) {
av_log(avctx, AV_LOG_WARNING, "Packet with odd size, ignoring last byte\n");
if (packet_size < avctx->channels)
return packet_size;
packet_size -= packet_size % avctx->channels;
}
esc->samples_size = !esc->table ?
packet_size : avctx->channels + (packet_size-avctx->channels) * 2;
esc->samples_size = avctx->codec->id == CODEC_ID_8SVX_RAW || avctx->codec->id ==CODEC_ID_PCM_S8_PLANAR?
avpkt->size : avctx->channels + (avpkt->size-avctx->channels) * 2;
if (!(esc->samples = av_malloc(esc->samples_size)))
return AVERROR(ENOMEM);
/* decompress */
if (esc->table) {
if (avctx->codec->id == CODEC_ID_8SVX_FIB || avctx->codec->id == CODEC_ID_8SVX_EXP) {
const uint8_t *buf = avpkt->data;
uint8_t *dst;
int buf_size = avpkt->size;
int i, n = esc->samples_size;
int n = esc->samples_size;
if (buf_size < 2) {
av_log(avctx, AV_LOG_ERROR, "packet size is too small\n");
@@ -139,15 +129,15 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
}
if (!(deinterleaved_samples = av_mallocz(n)))
return AVERROR(ENOMEM);
dst = p = deinterleaved_samples;
p = deinterleaved_samples;
/* the uncompressed starting value is contained in the first byte */
dst = deinterleaved_samples;
for (i = 0; i < avctx->channels; i++) {
delta_decode(dst, buf + 1, buf_size / avctx->channels - 1, buf[0], esc->table);
buf += buf_size / avctx->channels;
dst += n / avctx->channels - 1;
}
if (avctx->channels == 2) {
delta_decode(deinterleaved_samples , buf+1, buf_size/2-1, buf[0], esc->table);
buf += buf_size/2;
delta_decode(deinterleaved_samples+n/2-1, buf+1, buf_size/2-1, buf[0], esc->table);
} else
delta_decode(deinterleaved_samples , buf+1, buf_size-1 , buf[0], esc->table);
} else {
deinterleaved_samples = avpkt->data;
}
@@ -160,8 +150,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
}
/* get output buffer */
av_assert1(!(esc->samples_size % avctx->channels || esc->samples_idx % avctx->channels));
esc->frame.nb_samples = FFMIN(MAX_FRAME_SIZE, esc->samples_size - esc->samples_idx) / avctx->channels;
esc->frame.nb_samples = (FFMIN(MAX_FRAME_SIZE, esc->samples_size - esc->samples_idx) +avctx->channels-1) / avctx->channels;
if ((ret = avctx->get_buffer(avctx, &esc->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
@@ -177,7 +166,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
*dst++ = *src++ + 128;
esc->samples_idx += out_data_size;
return esc->table ?
return avctx->codec->id == CODEC_ID_8SVX_FIB || avctx->codec->id == CODEC_ID_8SVX_EXP ?
(avctx->frame_number == 0)*2 + out_data_size / 2 :
out_data_size;
}
@@ -192,10 +181,10 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
}
switch (avctx->codec->id) {
case AV_CODEC_ID_8SVX_FIB: esc->table = fibonacci; break;
case AV_CODEC_ID_8SVX_EXP: esc->table = exponential; break;
case AV_CODEC_ID_PCM_S8_PLANAR:
case AV_CODEC_ID_8SVX_RAW: esc->table = NULL; break;
case CODEC_ID_8SVX_FIB: esc->table = fibonacci; break;
case CODEC_ID_8SVX_EXP: esc->table = exponential; break;
case CODEC_ID_PCM_S8_PLANAR:
case CODEC_ID_8SVX_RAW: esc->table = NULL; break;
default:
av_log(avctx, AV_LOG_ERROR, "Invalid codec id %d.\n", avctx->codec->id);
return AVERROR_INVALIDDATA;
@@ -219,11 +208,10 @@ static av_cold int eightsvx_decode_close(AVCodecContext *avctx)
return 0;
}
#if CONFIG_EIGHTSVX_FIB_DECODER
AVCodec ff_eightsvx_fib_decoder = {
.name = "8svx_fib",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_8SVX_FIB,
.id = CODEC_ID_8SVX_FIB,
.priv_data_size = sizeof (EightSvxContext),
.init = eightsvx_decode_init,
.decode = eightsvx_decode_frame,
@@ -231,12 +219,11 @@ AVCodec ff_eightsvx_fib_decoder = {
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
};
#endif
#if CONFIG_EIGHTSVX_EXP_DECODER
AVCodec ff_eightsvx_exp_decoder = {
.name = "8svx_exp",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_8SVX_EXP,
.id = CODEC_ID_8SVX_EXP,
.priv_data_size = sizeof (EightSvxContext),
.init = eightsvx_decode_init,
.decode = eightsvx_decode_frame,
@@ -244,12 +231,11 @@ AVCodec ff_eightsvx_exp_decoder = {
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
};
#endif
#if CONFIG_PCM_S8_PLANAR_DECODER
AVCodec ff_pcm_s8_planar_decoder = {
.name = "pcm_s8_planar",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_PCM_S8_PLANAR,
.id = CODEC_ID_PCM_S8_PLANAR,
.priv_data_size = sizeof(EightSvxContext),
.init = eightsvx_decode_init,
.close = eightsvx_decode_close,
@@ -257,4 +243,3 @@ AVCodec ff_pcm_s8_planar_decoder = {
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
};
#endif

View File

@@ -6,7 +6,6 @@ FFLIBS = avutil
HEADERS = avcodec.h \
avfft.h \
dxva2.h \
old_codec_ids.h \
vaapi.h \
vda.h \
vdpau.h \
@@ -18,7 +17,6 @@ OBJS = allcodecs.o \
avpacket.o \
bitstream.o \
bitstream_filter.o \
codec_desc.o \
dsputil.o \
faanidct.o \
fmtconvert.o \
@@ -34,14 +32,13 @@ OBJS = allcodecs.o \
utils.o \
# parts needed for many different codecs
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
OBJS-$(CONFIG_AANDCT) += aandcttab.o
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
OBJS-$(CONFIG_DWT) += dwt.o snow.o
OBJS-$(CONFIG_DXVA2) += dxva2.o
OBJS-$(CONFIG_ENCODERS) += faandct.o jfdctfst.o jfdctint.o
OBJS-$(CONFIG_ERROR_RESILIENCE) += error_resilience.o
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
OBJS-$(CONFIG_DWT) += dwt.o
OBJS-$(CONFIG_DXVA2) += dxva2.o
FFT-OBJS-$(CONFIG_HARDCODED_TABLES) += cos_tables.o cos_fixed_tables.o
OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
$(FFT-OBJS-yes)
@@ -54,19 +51,14 @@ OBJS-$(CONFIG_LPC) += lpc.o
OBJS-$(CONFIG_LSP) += lsp.o
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o
OBJS-$(CONFIG_MPEGAUDIODSP) += mpegaudiodsp.o \
mpegaudiodsp_data.o \
mpegaudiodsp_fixed.o \
mpegaudiodsp_float.o
OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideo_motion.o
OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
motion_est.o ratecontrol.o
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
OBJS-$(CONFIG_SINEWIN) += sinewin.o
OBJS-$(CONFIG_VAAPI) += vaapi.o
OBJS-$(CONFIG_VDA) += vda.o
OBJS-$(CONFIG_VDPAU) += vdpau.o
OBJS-$(CONFIG_VP3DSP) += vp3dsp.o
# decoders/encoders/hardware accelerators
OBJS-$(CONFIG_A64MULTI_ENCODER) += a64multienc.o elbg.o
@@ -94,12 +86,8 @@ OBJS-$(CONFIG_AMRNB_DECODER) += amrnbdec.o celp_filters.o \
OBJS-$(CONFIG_AMRWB_DECODER) += amrwbdec.o celp_filters.o \
celp_math.o acelp_filters.o \
acelp_vectors.o \
acelp_pitch_delay.o
acelp_pitch_delay.o lsp.o
OBJS-$(CONFIG_AMV_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpeg.o \
mpegvideo_enc.o motion_est.o \
ratecontrol.o mpeg12data.o \
mpegvideo.o
OBJS-$(CONFIG_ANM_DECODER) += anm.o
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
OBJS-$(CONFIG_APE_DECODER) += apedec.o
@@ -113,7 +101,6 @@ OBJS-$(CONFIG_ATRAC1_DECODER) += atrac1.o atrac.o
OBJS-$(CONFIG_ATRAC3_DECODER) += atrac3.o atrac.o
OBJS-$(CONFIG_AURA_DECODER) += cyuv.o
OBJS-$(CONFIG_AURA2_DECODER) += aura.o
OBJS-$(CONFIG_AVRP_DECODER) += avrndec.o
OBJS-$(CONFIG_AVRP_DECODER) += r210dec.o
OBJS-$(CONFIG_AVRP_ENCODER) += r210enc.o
OBJS-$(CONFIG_AVS_DECODER) += avs.o
@@ -133,25 +120,26 @@ OBJS-$(CONFIG_BMV_VIDEO_DECODER) += bmv.o
OBJS-$(CONFIG_BMV_AUDIO_DECODER) += bmv.o
OBJS-$(CONFIG_C93_DECODER) += c93.o
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
cavsdata.o mpeg12data.o
mpeg12data.o mpegvideo.o
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
OBJS-$(CONFIG_CLLC_DECODER) += cllc.o
OBJS-$(CONFIG_COOK_DECODER) += cook.o
OBJS-$(CONFIG_CPIA_DECODER) += cpia.o
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadsp.o \
dca_parser.o synth_filter.o
OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o \
dca_parser.o
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
dirac_arith.o mpeg12data.o dwt.o
OBJS-$(CONFIG_DFA_DECODER) += dfa.o
OBJS-$(CONFIG_DNXHD_DECODER) += dnxhddec.o dnxhddata.o
OBJS-$(CONFIG_DNXHD_ENCODER) += dnxhdenc.o dnxhddata.o
OBJS-$(CONFIG_DNXHD_ENCODER) += dnxhdenc.o dnxhddata.o \
mpegvideo_enc.o motion_est.o \
ratecontrol.o mpeg12data.o \
mpegvideo.o
OBJS-$(CONFIG_DPX_DECODER) += dpx.o
OBJS-$(CONFIG_DPX_ENCODER) += dpxenc.o
OBJS-$(CONFIG_DSICINAUDIO_DECODER) += dsicinav.o
@@ -165,26 +153,30 @@ OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o dv_profile.o
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o ac3enc.o ac3enc_float.o \
ac3tab.o ac3.o kbdwin.o eac3_data.o
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
mpeg12data.o
mpeg12data.o mpegvideo.o \
error_resilience.o
OBJS-$(CONFIG_EATGQ_DECODER) += eatgq.o eaidct.o
OBJS-$(CONFIG_EATGV_DECODER) += eatgv.o
OBJS-$(CONFIG_EATQI_DECODER) += eatqi.o eaidct.o mpeg12.o \
mpeg12data.o
mpeg12data.o mpegvideo.o \
error_resilience.o
OBJS-$(CONFIG_EIGHTBPS_DECODER) += 8bps.o
OBJS-$(CONFIG_EIGHTSVX_EXP_DECODER) += 8svx.o
OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER) += 8svx.o
OBJS-$(CONFIG_EIGHTSVX_RAW_DECODER) += 8svx.o
OBJS-$(CONFIG_ESCAPE124_DECODER) += escape124.o
OBJS-$(CONFIG_ESCAPE130_DECODER) += escape130.o
OBJS-$(CONFIG_EXR_DECODER) += exr.o
OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o
OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o flacdsp.o
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o
@@ -195,32 +187,39 @@ OBJS-$(CONFIG_FOURXM_DECODER) += 4xm.o
OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o
OBJS-$(CONFIG_FRWU_DECODER) += frwu.o
OBJS-$(CONFIG_G723_1_DECODER) += g723_1.o acelp_vectors.o \
celp_filters.o
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o acelp_vectors.o celp_math.o
celp_filters.o celp_math.o
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o
OBJS-$(CONFIG_G729_DECODER) += g729dec.o lsp.o celp_math.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
OBJS-$(CONFIG_GIF_ENCODER) += gif.o lzwenc.o
OBJS-$(CONFIG_GSM_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
OBJS-$(CONFIG_GSM_MS_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
OBJS-$(CONFIG_H261_DECODER) += h261dec.o h261.o h261data.o
OBJS-$(CONFIG_H261_ENCODER) += h261enc.o h261.o h261data.o
OBJS-$(CONFIG_H261_DECODER) += h261dec.o h261.o h261data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_H261_ENCODER) += h261enc.o h261.o h261data.o \
mpegvideo_enc.o motion_est.o \
ratecontrol.o mpeg12data.o \
mpegvideo.o
OBJS-$(CONFIG_H263_DECODER) += h263dec.o h263.o ituh263dec.o \
mpeg4video.o mpeg4videodec.o flvdec.o\
intelh263dec.o
intelh263dec.o mpegvideo.o \
error_resilience.o
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_H263_ENCODER) += mpeg4videoenc.o mpeg4video.o \
h263.o ituh263enc.o flvenc.o
OBJS-$(CONFIG_H263_ENCODER) += mpegvideo_enc.o mpeg4video.o \
mpeg4videoenc.o motion_est.o \
ratecontrol.o h263.o ituh263enc.o \
flvenc.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_H264_DECODER) += h264.o \
h264_loopfilter.o h264_direct.o \
cabac.o h264_sei.o h264_ps.o \
h264_refs.o h264_cavlc.o h264_cabac.o
h264_refs.o h264_cavlc.o h264_cabac.o\
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
OBJS-$(CONFIG_H264_VDA_DECODER) += vda_h264_dec.o
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o
OBJS-$(CONFIG_IAC_DECODER) += imc.o
OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
OBJS-$(CONFIG_IDF_DECODER) += bintext.o cga_data.o
OBJS-$(CONFIG_IFF_BYTERUN1_DECODER) += iff.o
@@ -242,21 +241,26 @@ OBJS-$(CONFIG_JV_DECODER) += jvdec.o
OBJS-$(CONFIG_KGV1_DECODER) += kgv1dec.o
OBJS-$(CONFIG_KMVC_DECODER) += kmvc.o
OBJS-$(CONFIG_LAGARITH_DECODER) += lagarith.o lagarithrac.o
OBJS-$(CONFIG_LJPEG_ENCODER) += ljpegenc.o mjpegenc.o mjpeg.o
OBJS-$(CONFIG_LJPEG_ENCODER) += ljpegenc.o mjpegenc.o mjpeg.o \
mpegvideo_enc.o motion_est.o \
ratecontrol.o mpeg12data.o \
mpegvideo.o
OBJS-$(CONFIG_LOCO_DECODER) += loco.o
OBJS-$(CONFIG_MACE3_DECODER) += mace.o
OBJS-$(CONFIG_MACE6_DECODER) += mace.o
OBJS-$(CONFIG_MDEC_DECODER) += mdec.o mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MDEC_DECODER) += mdec.o mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_MICRODVD_DECODER) += microdvddec.o ass.o
OBJS-$(CONFIG_MIMIC_DECODER) += mimic.o
OBJS-$(CONFIG_MJPEG_DECODER) += mjpegdec.o mjpeg.o
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpeg.o
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpeg.o \
mpegvideo_enc.o motion_est.o \
ratecontrol.o mpeg12data.o \
mpegvideo.o
OBJS-$(CONFIG_MJPEGB_DECODER) += mjpegbdec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_MLP_DECODER) += mlpdec.o mlpdsp.o
OBJS-$(CONFIG_MMVIDEO_DECODER) += mmvideo.o
OBJS-$(CONFIG_MOTIONPIXELS_DECODER) += motionpixels.o
OBJS-$(CONFIG_MOVTEXT_DECODER) += movtextdec.o ass.o
OBJS-$(CONFIG_MOVTEXT_ENCODER) += movtextenc.o ass_split.o
OBJS-$(CONFIG_MP1_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
@@ -264,7 +268,7 @@ OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheade
OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP2_ENCODER) += mpegaudioenc.o mpegaudio.o \
mpegaudiodata.o mpegaudiodsp_data.o
mpegaudiodata.o
OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
@@ -281,19 +285,31 @@ OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o mpegaudiodec.o \
mpegaudiodecheader.o mpegaudio.o \
mpegaudiodata.o
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o mpegaudiodec.o \
mpegaudiodecheader.o mpegaudio.o \
mpegaudiodata.o
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_MPEG_XVMC_DECODER) += mpegvideo_xvmc.o
OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpeg12.o \
timecode.o
OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpegvideo_enc.o \
timecode.o \
motion_est.o ratecontrol.o \
mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o
OBJS-$(CONFIG_MPEG2VIDEO_DECODER) += mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpeg12.o \
timecode.o
OBJS-$(CONFIG_MPEG2VIDEO_DECODER) += mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpegvideo_enc.o \
timecode.o \
motion_est.o ratecontrol.o \
mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_MSMPEG4V1_DECODER) += msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
@@ -307,20 +323,14 @@ OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
h263dec.o h263.o ituh263dec.o \
mpeg4videodec.o
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
OBJS-$(CONFIG_MSA1_DECODER) += mss3.o mss34dsp.o
OBJS-$(CONFIG_MSS1_DECODER) += mss1.o mss12.o
OBJS-$(CONFIG_MSS2_DECODER) += mss2.o mss12.o mss2dsp.o
OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
OBJS-$(CONFIG_MTS2_DECODER) += mss4.o mss34dsp.o
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o \
audio_frame_queue.o
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += paf.o
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += paf.o
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o pnm.o
OBJS-$(CONFIG_PBM_DECODER) += pnmdec.o pnm.o
@@ -337,16 +347,18 @@ OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o pngdsp.o
OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o proresdsp.o
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o proresdata.o
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o
OBJS-$(CONFIG_PRORES_ANATOLIY_ENCODER) += proresenc_anatoliy.o
OBJS-$(CONFIG_PRORES_KOSTYA_ENCODER) += proresenc_kostya.o proresdata.o proresdsp.o
OBJS-$(CONFIG_PTX_DECODER) += ptx.o
OBJS-$(CONFIG_QCELP_DECODER) += qcelpdec.o \
OBJS-$(CONFIG_QCELP_DECODER) += qcelpdec.o celp_math.o \
celp_filters.o acelp_vectors.o \
acelp_filters.o
OBJS-$(CONFIG_QDM2_DECODER) += qdm2.o
OBJS-$(CONFIG_QDM2_DECODER) += qdm2.o mpegaudiodec.o \
mpegaudiodecheader.o mpegaudio.o \
mpegaudiodata.o
OBJS-$(CONFIG_QDRAW_DECODER) += qdrw.o
OBJS-$(CONFIG_QPEG_DECODER) += qpeg.o
OBJS-$(CONFIG_QTRLE_DECODER) += qtrle.o
@@ -358,11 +370,10 @@ OBJS-$(CONFIG_R210_ENCODER) += r210enc.o
OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o \
audio_frame_queue.o
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_filters.o
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_math.o celp_filters.o
OBJS-$(CONFIG_RALF_DECODER) += ralf.o
OBJS-$(CONFIG_RAWVIDEO_DECODER) += rawdec.o
OBJS-$(CONFIG_RAWVIDEO_ENCODER) += rawenc.o
OBJS-$(CONFIG_REALTEXT_DECODER) += realtextdec.o ass.o
OBJS-$(CONFIG_RL2_DECODER) += rl2.o
OBJS-$(CONFIG_ROQ_DECODER) += roqvideodec.o roqvideo.o
OBJS-$(CONFIG_ROQ_ENCODER) += roqvideoenc.o roqvideo.o elbg.o
@@ -373,11 +384,11 @@ OBJS-$(CONFIG_RV10_DECODER) += rv10.o
OBJS-$(CONFIG_RV10_ENCODER) += rv10enc.o
OBJS-$(CONFIG_RV20_DECODER) += rv10.o
OBJS-$(CONFIG_RV20_ENCODER) += rv20enc.o
OBJS-$(CONFIG_RV30_DECODER) += rv30.o rv34.o rv30dsp.o rv34dsp.o
OBJS-$(CONFIG_RV40_DECODER) += rv40.o rv34.o rv34dsp.o rv40dsp.o
OBJS-$(CONFIG_SAMI_DECODER) += samidec.o ass.o
OBJS-$(CONFIG_RV30_DECODER) += rv30.o rv34.o rv30dsp.o rv34dsp.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_RV40_DECODER) += rv40.o rv34.o rv34dsp.o rv40dsp.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_S302M_DECODER) += s302m.o
OBJS-$(CONFIG_SANM_DECODER) += sanm.o
OBJS-$(CONFIG_SGI_DECODER) += sgidec.o
OBJS-$(CONFIG_SGI_ENCODER) += sgienc.o rle.o
OBJS-$(CONFIG_SHORTEN_DECODER) += shorten.o
@@ -388,9 +399,12 @@ OBJS-$(CONFIG_SIPR_DECODER) += sipr.o acelp_pitch_delay.o \
OBJS-$(CONFIG_SMACKAUD_DECODER) += smacker.o
OBJS-$(CONFIG_SMACKER_DECODER) += smacker.o
OBJS-$(CONFIG_SMC_DECODER) += smc.o
OBJS-$(CONFIG_SNOW_DECODER) += snowdec.o snow.o
OBJS-$(CONFIG_SNOW_ENCODER) += snowenc.o snow.o \
h263.o ituh263enc.o
OBJS-$(CONFIG_SNOW_DECODER) += snowdec.o snow.o rangecoder.o
OBJS-$(CONFIG_SNOW_ENCODER) += snowenc.o snow.o rangecoder.o \
motion_est.o ratecontrol.o \
h263.o mpegvideo.o \
error_resilience.o ituh263enc.o \
mpegvideo_enc.o mpeg12data.o
OBJS-$(CONFIG_SOL_DPCM_DECODER) += dpcm.o
OBJS-$(CONFIG_SONIC_DECODER) += sonic.o
OBJS-$(CONFIG_SONIC_ENCODER) += sonic.o
@@ -398,38 +412,38 @@ OBJS-$(CONFIG_SONIC_LS_ENCODER) += sonic.o
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
OBJS-$(CONFIG_SUBRIP_DECODER) += srtdec.o ass.o
OBJS-$(CONFIG_SUBRIP_ENCODER) += srtenc.o ass_split.o
OBJS-$(CONFIG_SUBVIEWER_DECODER) += subviewerdec.o ass.o
OBJS-$(CONFIG_SUNRAST_DECODER) += sunrast.o
OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o svq13.o h263.o
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o h263.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o \
h263.o ituh263enc.o
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o h263.o h264.o \
motion_est.o h263.o \
mpegvideo.o error_resilience.o \
ituh263enc.o mpegvideo_enc.o \
ratecontrol.o mpeg12data.o
OBJS-$(CONFIG_SVQ3_DECODER) += h264.o svq3.o \
h264_loopfilter.o h264_direct.o \
h264_sei.o h264_ps.o h264_refs.o \
h264_cavlc.o h264_cabac.o cabac.o
h264_cavlc.o h264_cabac.o cabac.o \
mpegvideo.o error_resilience.o \
svq1dec.o svq1.o h263.o
OBJS-$(CONFIG_TARGA_DECODER) += targa.o
OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
OBJS-$(CONFIG_THEORA_DECODER) += xiph.o
OBJS-$(CONFIG_THP_DECODER) += mjpegdec.o mjpeg.o
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o tiff_data.o
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o
OBJS-$(CONFIG_TMV_DECODER) += tmv.o cga_data.o
OBJS-$(CONFIG_TRUEHD_DECODER) += mlpdec.o mlpdsp.o
OBJS-$(CONFIG_TRUEMOTION1_DECODER) += truemotion1.o
OBJS-$(CONFIG_TRUEMOTION2_DECODER) += truemotion2.o
OBJS-$(CONFIG_TRUESPEECH_DECODER) += truespeech.o
OBJS-$(CONFIG_TSCC_DECODER) += tscc.o msrledec.o
OBJS-$(CONFIG_TSCC2_DECODER) += tscc2.o
OBJS-$(CONFIG_TTA_DECODER) += tta.o
OBJS-$(CONFIG_TWINVQ_DECODER) += twinvq.o
OBJS-$(CONFIG_TWINVQ_DECODER) += twinvq.o celp_math.o
OBJS-$(CONFIG_TXD_DECODER) += txd.o s3tc.o
OBJS-$(CONFIG_ULTI_DECODER) += ulti.o
OBJS-$(CONFIG_UTVIDEO_DECODER) += utvideodec.o utvideo.o
OBJS-$(CONFIG_UTVIDEO_ENCODER) += utvideoenc.o utvideo.o
OBJS-$(CONFIG_UTVIDEO_DECODER) += utvideo.o
OBJS-$(CONFIG_V210_DECODER) += v210dec.o
OBJS-$(CONFIG_V210_ENCODER) += v210enc.o
OBJS-$(CONFIG_V308_DECODER) += v308dec.o
@@ -455,15 +469,14 @@ OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbis.o \
vorbis_data.o xiph.o
OBJS-$(CONFIG_VORBIS_ENCODER) += vorbisenc.o vorbis.o \
vorbis_data.o
OBJS-$(CONFIG_VP3_DECODER) += vp3.o
OBJS-$(CONFIG_VP3_DECODER) += vp3.o vp3dsp.o
OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56dsp.o \
vp56rac.o
vp3dsp.o vp56rac.o
OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \
vp6dsp.o vp56rac.o
vp3dsp.o vp6dsp.o vp56rac.o
OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
OBJS-$(CONFIG_WEBVTT_DECODER) += webvttdec.o
OBJS-$(CONFIG_WMALOSSLESS_DECODER) += wmalosslessdec.o wma_common.o
OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o wma_common.o
OBJS-$(CONFIG_WMAV1_DECODER) += wmadec.o wma.o wma_common.o aactab.o
@@ -471,7 +484,7 @@ OBJS-$(CONFIG_WMAV1_ENCODER) += wmaenc.o wma.o wma_common.o aactab.o
OBJS-$(CONFIG_WMAV2_DECODER) += wmadec.o wma.o wma_common.o aactab.o
OBJS-$(CONFIG_WMAV2_ENCODER) += wmaenc.o wma.o wma_common.o aactab.o
OBJS-$(CONFIG_WMAVOICE_DECODER) += wmavoice.o \
celp_filters.o \
celp_math.o celp_filters.o \
acelp_vectors.o acelp_filters.o
OBJS-$(CONFIG_WMV1_DECODER) += msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_WMV2_DECODER) += wmv2dec.o wmv2.o \
@@ -593,61 +606,53 @@ OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
OBJS-$(CONFIG_VIMA_DECODER) += vima.o adpcm_data.o
# libavformat dependencies
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_ADX_DEMUXER) += adx.o
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
ac3tab.o
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_DV_DEMUXER) += dv_profile.o
OBJS-$(CONFIG_DV_MUXER) += dv_profile.o timecode.o
OBJS-$(CONFIG_FLAC_DEMUXER) += flac.o flacdata.o vorbis_data.o \
OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o vorbis_data.o \
vorbis_parser.o xiph.o
OBJS-$(CONFIG_FLAC_MUXER) += flac.o flacdata.o vorbis_data.o
OBJS-$(CONFIG_FLAC_MUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
OBJS-$(CONFIG_GXF_DEMUXER) += mpeg12data.o
OBJS-$(CONFIG_IFF_DEMUXER) += iff.o
OBJS-$(CONFIG_ISMV_MUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_LATM_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += xiph.o mpeg4audio.o vorbis_data.o \
flac.o flacdata.o
flacdec.o flacdata.o flac.o
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MATROSKA_MUXER) += mpeg4audio.o mpegaudiodata.o \
flac.o flacdata.o vorbis_data.o xiph.o
OBJS-$(CONFIG_MP2_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
flacdec.o flacdata.o flac.o \
mpegaudiodata.o vorbis_data.o
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o timecode.o
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MXF_MUXER) += timecode.o
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
OBJS-$(CONFIG_OGG_DEMUXER) += xiph.o flac.o flacdata.o \
mpeg12data.o vorbis_parser.o \
dirac.o vorbis_data.o
OBJS-$(CONFIG_OGG_MUXER) += xiph.o flac.o flacdata.o \
OBJS-$(CONFIG_OGG_DEMUXER) += flacdec.o flacdata.o flac.o \
dirac.o mpeg12data.o vorbis_parser.o \
xiph.o vorbis_data.o
OBJS-$(CONFIG_OGG_MUXER) += xiph.o flacdec.o flacdata.o flac.o \
vorbis_data.o
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o xiph.o
OBJS-$(CONFIG_RTPDEC) += mjpeg.o
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o mpegvideo.o xiph.o
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \
xiph.o flac.o flacdata.o \
vorbis_data.o
OBJS-$(CONFIG_WEBM_MUXER) += xiph.o mpeg4audio.o \
flacdec.o flacdata.o flac.o \
mpegaudiodata.o vorbis_data.o
OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
# external codec libraries
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o audio_frame_queue.o
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o audio_frame_queue.o
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsm.o
OBJS-$(CONFIG_LIBILBC_DECODER) += libilbc.o
OBJS-$(CONFIG_LIBILBC_ENCODER) += libilbc.o
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o \
@@ -657,7 +662,6 @@ OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o \
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopus_dec.o vorbis_data.o
OBJS-$(CONFIG_LIBSCHROEDINGER_DECODER) += libschroedingerdec.o \
libschroedinger.o
OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \
@@ -666,15 +670,13 @@ OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o audio_frame_queue.o
OBJS-$(CONFIG_LIBSTAGEFRIGHT_H264_DECODER)+= libstagefright.o
OBJS-$(CONFIG_LIBTHEORA_ENCODER) += libtheoraenc.o
OBJS-$(CONFIG_LIBTWOLAME_ENCODER) += libtwolame.o
OBJS-$(CONFIG_LIBUTVIDEO_DECODER) += libutvideodec.o
OBJS-$(CONFIG_LIBUTVIDEO_ENCODER) += libutvideoenc.o
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
OBJS-$(CONFIG_LIBVORBIS_DECODER) += libvorbisdec.o
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o audio_frame_queue.o \
vorbis_data.o vorbis_parser.o xiph.o
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbis.o audio_frame_queue.o \
vorbis_data.o vorbis_parser.o
OBJS-$(CONFIG_LIBVPX_DECODER) += libvpxdec.o
OBJS-$(CONFIG_LIBVPX_ENCODER) += libvpxenc.o
OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
@@ -684,14 +686,12 @@ OBJS-$(CONFIG_LIBXVID_ENCODER) += libxvid.o
# parsers
OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o \
aacadtsdec.o mpeg4audio.o
OBJS-$(CONFIG_AAC_LATM_PARSER) += latm_parser.o
OBJS-$(CONFIG_AC3_PARSER) += ac3_parser.o ac3tab.o \
aac_ac3_parser.o
OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o
OBJS-$(CONFIG_BMP_PARSER) += bmp_parser.o
OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o dca.o
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o
OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o
OBJS-$(CONFIG_DVBSUB_PARSER) += dvbsub_parser.o
@@ -705,23 +705,27 @@ OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264.o \
cabac.o \
h264_refs.o h264_sei.o h264_direct.o \
h264_loopfilter.o h264_cabac.o \
h264_cavlc.o h264_ps.o
h264_cavlc.o h264_ps.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_AAC_LATM_PARSER) += latm_parser.o
OBJS-$(CONFIG_MJPEG_PARSER) += mjpeg_parser.o
OBJS-$(CONFIG_MLP_PARSER) += mlp_parser.o mlp.o
OBJS-$(CONFIG_MPEG4VIDEO_PARSER) += mpeg4video_parser.o h263.o \
mpegvideo.o error_resilience.o \
mpeg4videodec.o mpeg4video.o \
ituh263dec.o h263dec.o
OBJS-$(CONFIG_PNG_PARSER) += png_parser.o
OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o \
mpegaudiodecheader.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
mpeg12.o mpeg12data.o
mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
OBJS-$(CONFIG_RV30_PARSER) += rv34_parser.o
OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o \
msmpeg4.o msmpeg4data.o mpeg4video.o \
h263.o
h263.o mpegvideo.o error_resilience.o
OBJS-$(CONFIG_VORBIS_PARSER) += vorbis_parser.o xiph.o
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
@@ -744,9 +748,9 @@ OBJS-$(CONFIG_REMOVE_EXTRADATA_BSF) += remove_extradata_bsf.o
OBJS-$(CONFIG_TEXT2MOVSUB_BSF) += movsub_bsf.o
# thread libraries
OBJS-$(HAVE_PTHREADS) += pthread.o frame_thread_encoder.o
OBJS-$(HAVE_W32THREADS) += pthread.o frame_thread_encoder.o
OBJS-$(HAVE_OS2THREADS) += pthread.o frame_thread_encoder.o
OBJS-$(HAVE_PTHREADS) += pthread.o
OBJS-$(HAVE_W32THREADS) += pthread.o
OBJS-$(HAVE_OS2THREADS) += pthread.o
# inverse.o contains the ff_inverse table definition, which is used by
# the FASTDIV macro (from libavutil); since referencing the external
@@ -757,8 +761,8 @@ OBJS-$(!CONFIG_SMALL) += inverse.o
SKIPHEADERS += %_tablegen.h \
%_tables.h \
aac_tablegen_decl.h \
codec_names.h \
fft-internal.h \
old_codec_ids.h \
tableprint.h \
$(ARCH)/vp56_arith.h \
@@ -767,7 +771,7 @@ SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
SKIPHEADERS-$(CONFIG_LIBUTVIDEO) += libutvideo.h
SKIPHEADERS-$(CONFIG_MPEG_XVMC_DECODER) += xvmc.h
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
SKIPHEADERS-$(CONFIG_VDA) += vda.h
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h
SKIPHEADERS-$(HAVE_OS2THREADS) += os2threads.h
SKIPHEADERS-$(HAVE_W32THREADS) += w32pthreads.h
@@ -784,8 +788,6 @@ TESTPROGS = cabac \
TESTPROGS-$(HAVE_MMX) += motion
TESTOBJS = dctref.o
TOOLS = fourcc2pixfmt
HOSTPROGS = aac_tablegen \
aacps_tablegen \
cbrt_tablegen \
@@ -799,7 +801,7 @@ HOSTPROGS = aac_tablegen \
CLEANFILES = *_tables.c *_tables.h *_tablegen$(HOSTEXESUF)
$(SUBDIR)dct-test$(EXESUF): $(SUBDIR)dctref.o $(SUBDIR)aandcttab.o
$(SUBDIR)dct-test$(EXESUF): $(SUBDIR)dctref.o
TRIG_TABLES = cos cos_fixed sin
TRIG_TABLES := $(TRIG_TABLES:%=$(SUBDIR)%_tables.c)
@@ -833,3 +835,10 @@ $(SUBDIR)motionpixels.o: $(SUBDIR)motionpixels_tables.h
$(SUBDIR)pcm.o: $(SUBDIR)pcm_tables.h
$(SUBDIR)qdm2.o: $(SUBDIR)qdm2_tables.h
endif
CODEC_NAMES_SH := $(SRC_PATH)/$(SUBDIR)codec_names.sh
AVCODEC_H := $(SRC_PATH)/$(SUBDIR)avcodec.h
$(SUBDIR)codec_names.h: $(CODEC_NAMES_SH) config.h $(AVCODEC_H)
$(CC) $(CPPFLAGS) $(CFLAGS) -E $(AVCODEC_H) | \
$(CODEC_NAMES_SH) config.h $@
$(SUBDIR)utils.o: $(SUBDIR)codec_names.h

View File

@@ -29,7 +29,6 @@
#include "a64tables.h"
#include "elbg.h"
#include "internal.h"
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#define DITHERSTEPS 8
@@ -188,7 +187,7 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
c->mc_frame_counter = 0;
c->mc_use_5col = avctx->codec->id == AV_CODEC_ID_A64_MULTI5;
c->mc_use_5col = avctx->codec->id == CODEC_ID_A64_MULTI5;
c->mc_pal_size = 4 + c->mc_use_5col;
/* precalc luma values for later use */
@@ -369,11 +368,10 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return 0;
}
#if CONFIG_A64MULTI_ENCODER
AVCodec ff_a64multi_encoder = {
.name = "a64multi",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_A64_MULTI,
.id = CODEC_ID_A64_MULTI,
.priv_data_size = sizeof(A64Context),
.init = a64multi_init_encoder,
.encode2 = a64multi_encode_frame,
@@ -382,12 +380,11 @@ AVCodec ff_a64multi_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
.capabilities = CODEC_CAP_DELAY,
};
#endif
#if CONFIG_A64MULTI5_ENCODER
AVCodec ff_a64multi5_encoder = {
.name = "a64multi5",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_A64_MULTI5,
.id = CODEC_ID_A64_MULTI5,
.priv_data_size = sizeof(A64Context),
.init = a64multi_init_encoder,
.encode2 = a64multi_encode_frame,
@@ -396,4 +393,3 @@ AVCodec ff_a64multi5_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
.capabilities = CODEC_CAP_DELAY,
};
#endif

View File

@@ -30,7 +30,6 @@
#ifndef AVCODEC_AAC_H
#define AVCODEC_AAC_H
#include "libavutil/float_dsp.h"
#include "avcodec.h"
#include "dsputil.h"
#include "fft.h"
@@ -293,7 +292,6 @@ typedef struct {
FFTContext mdct_ltp;
DSPContext dsp;
FmtConvertContext fmt_conv;
AVFloatDSPContext fdsp;
int random_state;
/** @} */
@@ -304,15 +302,6 @@ typedef struct {
float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output).
/** @} */
/**
* @name Japanese DTV specific extension
* @{
*/
int enable_jp_dmono; ///< enable japanese DTV specific 'dual mono'
int dmono_mode; ///< select the channel to decode in dual mono.
/** @} */
DECLARE_ALIGNED(32, float, temp)[128];
OutputConfiguration oc[2];

View File

@@ -20,7 +20,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "parser.h"
#include "aac_ac3_parser.h"
@@ -79,7 +78,7 @@ get_next:
and total number of samples found in an AAC ADTS header are not
reliable. Bit rate is still accurate because the total frame duration in
seconds is still correct (as is the number of bits in the frame). */
if (avctx->codec_id != AV_CODEC_ID_AAC) {
if (avctx->codec_id != CODEC_ID_AAC) {
avctx->sample_rate = s->sample_rate;
/* allow downmixing to stereo (or mono for AC-3) */
@@ -87,8 +86,8 @@ get_next:
avctx->request_channels < s->channels &&
(avctx->request_channels <= 2 ||
(avctx->request_channels == 1 &&
(avctx->codec_id == AV_CODEC_ID_AC3 ||
avctx->codec_id == AV_CODEC_ID_EAC3)))) {
(avctx->codec_id == CODEC_ID_AC3 ||
avctx->codec_id == CODEC_ID_EAC3)))) {
avctx->channels = avctx->request_channels;
} else {
avctx->channels = s->channels;

View File

@@ -55,7 +55,7 @@ typedef struct AACAC3ParseContext {
uint64_t state;
int need_next_header;
enum AVCodecID codec_id;
enum CodecID codec_id;
} AACAC3ParseContext;
int ff_aac_ac3_parse(AVCodecParserContext *s1,

View File

@@ -61,7 +61,7 @@ static av_cold int aac_parse_init(AVCodecParserContext *s1)
AVCodecParser ff_aac_parser = {
.codec_ids = { AV_CODEC_ID_AAC },
.codec_ids = { CODEC_ID_AAC },
.priv_data_size = sizeof(AACAC3ParseContext),
.parser_init = aac_parse_init,
.parser_parse = ff_aac_ac3_parse,

View File

@@ -506,7 +506,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
idx = cb;
ppos = max_sfb;
while (ppos > 0) {
av_assert1(idx >= 0);
assert(idx >= 0);
cb = idx;
stackrun[stack_len] = path[ppos][cb].run;
stackcb [stack_len] = cb;
@@ -878,7 +878,7 @@ static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s,
} else {
for (w = 0; w < 8; w++) {
const float *coeffs = sce->coeffs + w*128;
curband = start = 0;
start = 0;
for (i = 0; i < 128; i++) {
if (i - start >= sce->ics.swb_sizes[curband]) {
start += sce->ics.swb_sizes[curband];

View File

@@ -79,7 +79,7 @@
Parametric Stereo.
*/
#include "libavutil/float_dsp.h"
#include "avcodec.h"
#include "internal.h"
#include "get_bits.h"
@@ -369,10 +369,12 @@ static void push_output_configuration(AACContext *ac) {
* configuration is unlocked.
*/
static void pop_output_configuration(AACContext *ac) {
if (ac->oc[1].status != OC_LOCKED && ac->oc[0].status != OC_NONE) {
ac->oc[1] = ac->oc[0];
ac->avctx->channels = ac->oc[1].channels;
ac->avctx->channel_layout = ac->oc[1].channel_layout;
if (ac->oc[1].status != OC_LOCKED) {
if (ac->oc[0].status == OC_LOCKED) {
ac->oc[1] = ac->oc[0];
ac->avctx->channels = ac->oc[1].channels;
ac->avctx->channel_layout = ac->oc[1].channel_layout;
}
}
}
@@ -485,7 +487,6 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return NULL;
ac->oc[1].m4ac.chan_config = 2;
ac->oc[1].m4ac.ps = 0;
}
// And vice-versa
if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
@@ -503,8 +504,6 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
return NULL;
ac->oc[1].m4ac.chan_config = 1;
if (ac->oc[1].m4ac.sbr)
ac->oc[1].m4ac.ps = -1;
}
// For indexed channel configurations map the channels solely based on position.
switch (ac->oc[1].m4ac.chan_config) {
@@ -573,8 +572,6 @@ static void decode_channel_map(uint8_t layout_map[][3],
case AAC_CHANNEL_LFE:
syn_ele = TYPE_LFE;
break;
default:
av_assert0(0);
}
layout_map[0][0] = syn_ele;
layout_map[0][1] = get_bits(gb, 4);
@@ -781,7 +778,7 @@ static int decode_audio_specific_config(AACContext *ac,
*
* @return Returns a 32-bit pseudorandom integer
*/
static av_always_inline int lcg_random(unsigned previous_val)
static av_always_inline int lcg_random(int previous_val)
{
return previous_val * 1664525 + 1013904223;
}
@@ -840,14 +837,6 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
ac->avctx = avctx;
ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) {
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
output_scale_factor = 1.0 / 32768.0;
} else {
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
output_scale_factor = 1.0;
}
if (avctx->extradata_size > 0) {
if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
avctx->extradata,
@@ -883,6 +872,14 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
}
}
if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) {
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
output_scale_factor = 1.0 / 32768.0;
} else {
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
output_scale_factor = 1.0;
}
AAC_INIT_VLC_STATIC( 0, 304);
AAC_INIT_VLC_STATIC( 1, 270);
AAC_INIT_VLC_STATIC( 2, 550);
@@ -899,7 +896,6 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
ff_dsputil_init(&ac->dsp, avctx);
ff_fmt_convert_init(&ac->fmt_conv, avctx);
avpriv_float_dsp_init(&ac->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
ac->random_state = 0x1f2e3d4c;
@@ -1306,7 +1302,7 @@ static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
t.i = s.i ^ (sign & 1U<<31);
*dst++ = v[idx>>4 & 3] * t.f;
sign <<= nz & 1;
sign <<= nz & 1; nz >>= 1;
t.i = s.i ^ (sign & 1U<<31);
*dst++ = v[idx>>6 & 3] * t.f;
@@ -2068,10 +2064,10 @@ static void windowing_and_mdct_ltp(AACContext *ac, float *out,
const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
ac->fdsp.vector_fmul(in, in, lwindow_prev, 1024);
ac->dsp.vector_fmul(in, in, lwindow_prev, 1024);
} else {
memset(in, 0, 448 * sizeof(float));
ac->fdsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
ac->dsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
}
if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
@@ -2375,21 +2371,6 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
return -7;
} else {
ac->oc[1].m4ac.chan_config = 0;
/**
* dual mono frames in Japanese DTV can have chan_config 0
* WITHOUT specifying PCE.
* thus, set dual mono as default.
*/
if (ac->enable_jp_dmono && ac->oc[0].status == OC_NONE) {
layout_map_tags = 2;
layout_map[0][0] = layout_map[1][0] = TYPE_SCE;
layout_map[0][2] = layout_map[1][2] = AAC_CHANNEL_FRONT;
layout_map[0][1] = 0;
layout_map[1][1] = 1;
if (output_configure(ac, layout_map, layout_map_tags,
0, OC_TRIAL_FRAME))
return -7;
}
}
ac->oc[1].m4ac.sample_rate = hdr_info.sample_rate;
ac->oc[1].m4ac.sampling_index = hdr_info.sampling_index;
@@ -2407,15 +2388,13 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
}
static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
int *got_frame_ptr, GetBitContext *gb, AVPacket *avpkt)
int *got_frame_ptr, GetBitContext *gb)
{
AACContext *ac = avctx->priv_data;
ChannelElement *che = NULL, *che_prev = NULL;
enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
int err, elem_id;
int samples = 0, multiplier, audio_found = 0, pce_found = 0;
int is_dmono, sce_count = 0;
float *tmp = NULL;
if (show_bits(gb, 12) == 0xfff) {
if (parse_adts_frame_header(ac, gb) < 0) {
@@ -2450,7 +2429,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
case TYPE_SCE:
err = decode_ics(ac, &che->ch[0], gb, 0, 0);
audio_found = 1;
sce_count++;
break;
case TYPE_CPE:
@@ -2529,20 +2507,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0;
samples <<= multiplier;
/* for dual-mono audio (SCE + SCE) */
is_dmono = ac->enable_jp_dmono && sce_count == 2 &&
ac->oc[1].channel_layout == (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT);
if (is_dmono) {
if (ac->dmono_mode == 0) {
tmp = ac->output_data[1];
ac->output_data[1] = ac->output_data[0];
} else if (ac->dmono_mode == 1) {
tmp = ac->output_data[0];
ac->output_data[0] = ac->output_data[1];
}
}
if (samples) {
/* get output buffer */
ac->frame.nb_samples = samples;
@@ -2565,25 +2529,12 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
}
*got_frame_ptr = !!samples;
if (is_dmono) {
if (ac->dmono_mode == 0)
ac->output_data[1] = tmp;
else if (ac->dmono_mode == 1)
ac->output_data[0] = tmp;
}
if (ac->oc[1].status && audio_found) {
avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier;
avctx->frame_size = samples;
ac->oc[1].status = OC_LOCKED;
}
if (multiplier) {
int side_size;
uint32_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
if (side && side_size>=4)
AV_WL32(side, 2*AV_RL32(side));
}
return 0;
fail:
pop_output_configuration(ac);
@@ -2604,10 +2555,6 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
const uint8_t *new_extradata = av_packet_get_side_data(avpkt,
AV_PKT_DATA_NEW_EXTRADATA,
&new_extradata_size);
int jp_dualmono_size;
const uint8_t *jp_dualmono = av_packet_get_side_data(avpkt,
AV_PKT_DATA_JP_DUALMONO,
&jp_dualmono_size);
if (new_extradata && 0) {
av_free(avctx->extradata);
@@ -2626,14 +2573,9 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
}
}
ac->enable_jp_dmono = !!jp_dualmono;
ac->dmono_mode = 0;
if (jp_dualmono && jp_dualmono_size > 0)
ac->dmono_mode = *jp_dualmono;
init_get_bits(&gb, buf, buf_size * 8);
if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb, avpkt)) < 0)
if ((err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb)) < 0)
return err;
buf_consumed = (get_bits_count(&gb) + 7) >> 3;
@@ -2668,7 +2610,7 @@ static av_cold int aac_decode_close(AVCodecContext *avctx)
struct LATMContext {
AACContext aac_ctx; ///< containing AACContext
int initialized; ///< initialized after a valid extradata was seen
int initialized; ///< initilized after a valid extradata was seen
// parser data
int audio_mux_version_A; ///< LATM syntax version
@@ -2713,15 +2655,10 @@ static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
if (bits_consumed < 0)
return AVERROR_INVALIDDATA;
if (!latmctx->initialized ||
ac->oc[1].m4ac.sample_rate != m4ac.sample_rate ||
if (ac->oc[1].m4ac.sample_rate != m4ac.sample_rate ||
ac->oc[1].m4ac.chan_config != m4ac.chan_config) {
if(latmctx->initialized) {
av_log(avctx, AV_LOG_INFO, "audio config changed\n");
} else {
av_log(avctx, AV_LOG_INFO, "initializing latmctx\n");
}
av_log(avctx, AV_LOG_INFO, "audio config changed\n");
latmctx->initialized = 0;
esize = (bits_consumed+7) / 8;
@@ -2765,9 +2702,9 @@ static int read_stream_mux_config(struct LATMContext *latmctx,
return AVERROR_PATCHWELCOME;
}
// for each program (which there is only one in DVB)
// for each program (which there is only on in DVB)
// for each layer (which there is only one in DVB)
// for each layer (which there is only on in DVB)
if (get_bits(gb, 3)) { // numLayer
av_log_missing_feature(latmctx->aac_ctx.avctx,
"multiple layers are not supported\n", 1);
@@ -2888,7 +2825,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out,
return AVERROR_INVALIDDATA;
muxlength = get_bits(&gb, 13) + 3;
// not enough data, the parser should have sorted this out
// not enough data, the parser should have sorted this
if (muxlength > avpkt->size)
return AVERROR_INVALIDDATA;
@@ -2918,7 +2855,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out,
return AVERROR_INVALIDDATA;
}
if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb, avpkt)) < 0)
if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb)) < 0)
return err;
return muxlength;
@@ -2939,12 +2876,12 @@ static av_cold int latm_decode_init(AVCodecContext *avctx)
AVCodec ff_aac_decoder = {
.name = "aac",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_AAC,
.id = CODEC_ID_AAC,
.priv_data_size = sizeof(AACContext),
.init = aac_decode_init,
.close = aac_decode_close,
.decode = aac_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
},
@@ -2961,12 +2898,12 @@ AVCodec ff_aac_decoder = {
AVCodec ff_aac_latm_decoder = {
.name = "aac_latm",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_AAC_LATM,
.id = CODEC_ID_AAC_LATM,
.priv_data_size = sizeof(struct LATMContext),
.init = latm_decode_init,
.close = aac_decode_close,
.decode = latm_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"),
.long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Codec LATM syntax)"),
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
},

View File

@@ -30,7 +30,6 @@
* add temporal noise shaping
***********************************/
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "put_bits.h"
@@ -183,9 +182,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
}
#define WINDOW_FUNC(type) \
static void apply_ ##type ##_window(DSPContext *dsp, AVFloatDSPContext *fdsp, \
SingleChannelElement *sce, \
const float *audio)
static void apply_ ##type ##_window(DSPContext *dsp, SingleChannelElement *sce, const float *audio)
WINDOW_FUNC(only_long)
{
@@ -193,7 +190,7 @@ WINDOW_FUNC(only_long)
const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
float *out = sce->ret;
fdsp->vector_fmul (out, audio, lwindow, 1024);
dsp->vector_fmul (out, audio, lwindow, 1024);
dsp->vector_fmul_reverse(out + 1024, audio + 1024, pwindow, 1024);
}
@@ -203,7 +200,7 @@ WINDOW_FUNC(long_start)
const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
float *out = sce->ret;
fdsp->vector_fmul(out, audio, lwindow, 1024);
dsp->vector_fmul(out, audio, lwindow, 1024);
memcpy(out + 1024, audio + 1024, sizeof(out[0]) * 448);
dsp->vector_fmul_reverse(out + 1024 + 448, audio + 1024 + 448, swindow, 128);
memset(out + 1024 + 576, 0, sizeof(out[0]) * 448);
@@ -216,7 +213,7 @@ WINDOW_FUNC(long_stop)
float *out = sce->ret;
memset(out, 0, sizeof(out[0]) * 448);
fdsp->vector_fmul(out + 448, audio + 448, swindow, 128);
dsp->vector_fmul(out + 448, audio + 448, swindow, 128);
memcpy(out + 576, audio + 576, sizeof(out[0]) * 448);
dsp->vector_fmul_reverse(out + 1024, audio + 1024, lwindow, 1024);
}
@@ -230,7 +227,7 @@ WINDOW_FUNC(eight_short)
int w;
for (w = 0; w < 8; w++) {
fdsp->vector_fmul (out, in, w ? pwindow : swindow, 128);
dsp->vector_fmul (out, in, w ? pwindow : swindow, 128);
out += 128;
in += 128;
dsp->vector_fmul_reverse(out, in, swindow, 128);
@@ -238,9 +235,7 @@ WINDOW_FUNC(eight_short)
}
}
static void (*const apply_window[4])(DSPContext *dsp, AVFloatDSPContext *fdsp,
SingleChannelElement *sce,
const float *audio) = {
static void (*const apply_window[4])(DSPContext *dsp, SingleChannelElement *sce, const float *audio) = {
[ONLY_LONG_SEQUENCE] = apply_only_long_window,
[LONG_START_SEQUENCE] = apply_long_start_window,
[EIGHT_SHORT_SEQUENCE] = apply_eight_short_window,
@@ -253,7 +248,7 @@ static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
int i;
float *output = sce->ret;
apply_window[sce->ics.window_sequence[0]](&s->dsp, &s->fdsp, sce, audio);
apply_window[sce->ics.window_sequence[0]](&s->dsp, sce, audio);
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE)
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
@@ -698,7 +693,6 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
int ret = 0;
ff_dsputil_init(&s->dsp, avctx);
avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
// window init
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
@@ -801,11 +795,11 @@ fail:
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
static const AVOption aacenc_options[] = {
{"stereo_mode", "Stereo coding method", offsetof(AACEncContext, options.stereo_mode), AV_OPT_TYPE_INT, {.i64 = 0}, -1, 1, AACENC_FLAGS, "stereo_mode"},
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.i64 = -1 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
{"ms_off", "Disable Mid/Side coding", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
{"ms_force", "Force Mid/Side for the whole frame if possible", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
{"aac_coder", "", offsetof(AACEncContext, options.aac_coder), AV_OPT_TYPE_INT, {.i64 = 2}, 0, AAC_CODER_NB-1, AACENC_FLAGS},
{"stereo_mode", "Stereo coding method", offsetof(AACEncContext, options.stereo_mode), AV_OPT_TYPE_INT, {.dbl = 0}, -1, 1, AACENC_FLAGS, "stereo_mode"},
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.dbl = -1 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
{"ms_off", "Disable Mid/Side coding", 0, AV_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
{"ms_force", "Force Mid/Side for the whole frame if possible", 0, AV_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"},
{"aac_coder", "", offsetof(AACEncContext, options.aac_coder), AV_OPT_TYPE_INT, {.dbl = 2}, 0, AAC_CODER_NB-1, AACENC_FLAGS},
{NULL}
};
@@ -819,7 +813,7 @@ static const AVClass aacenc_class = {
AVCodec ff_aac_encoder = {
.name = "aac",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_AAC,
.id = CODEC_ID_AAC,
.priv_data_size = sizeof(AACEncContext),
.init = aac_encode_init,
.encode2 = aac_encode_frame,
@@ -829,6 +823,6 @@ AVCodec ff_aac_encoder = {
CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
.priv_class = &aacenc_class,
};

View File

@@ -22,7 +22,6 @@
#ifndef AVCODEC_AACENC_H
#define AVCODEC_AACENC_H
#include "libavutil/float_dsp.h"
#include "avcodec.h"
#include "put_bits.h"
#include "dsputil.h"
@@ -62,7 +61,6 @@ typedef struct AACEncContext {
FFTContext mdct1024; ///< long (1024 samples) frame transform context
FFTContext mdct128; ///< short (128 samples) frame transform context
DSPContext dsp;
AVFloatDSPContext fdsp;
float *planar_samples[6]; ///< saved preprocessed input
int samplerate_index; ///< MPEG-4 samplerate index

View File

@@ -23,7 +23,6 @@
#ifndef AACPS_TABLEGEN_H
#define AACPS_TABLEGEN_H
#include <math.h>
#include <stdint.h>
#if CONFIG_HARDCODED_TABLES

View File

@@ -24,8 +24,6 @@
* AAC encoder psychoacoustic model
*/
#include "libavutil/libm.h"
#include "avcodec.h"
#include "aactab.h"
#include "psymodel.h"
@@ -294,7 +292,7 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
int i, j, g, start;
float prev, minscale, minath, minsnr, pe_min;
const int chan_bitrate = ctx->avctx->bit_rate / ctx->avctx->channels;
const int bandwidth = ctx->avctx->cutoff ? ctx->avctx->cutoff : AAC_CUTOFF(ctx->avctx);
const int bandwidth = ctx->avctx->cutoff ? ctx->avctx->cutoff : ctx->avctx->sample_rate / 2;
const float num_bark = calc_bark((float)bandwidth);
ctx->model_priv_data = av_mallocz(sizeof(AacPsyContext));
@@ -335,7 +333,7 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
coeff->spread_low[1] = pow(10.0, -bark_width * en_spread_low);
coeff->spread_hi [1] = pow(10.0, -bark_width * en_spread_hi);
pe_min = bark_pe * bark_width;
minsnr = exp2(pe_min / band_sizes[g]) - 1.5f;
minsnr = pow(2.0f, pe_min / band_sizes[g]) - 1.5f;
coeff->min_snr = av_clipf(1.0f / minsnr, PSY_SNR_25DB, PSY_SNR_1DB);
}
start = 0;
@@ -526,11 +524,8 @@ static float calc_reduction_3gpp(float a, float desired_pe, float pe,
{
float thr_avg, reduction;
if(active_lines == 0.0)
return 0;
thr_avg = exp2f((a - pe) / (4.0f * active_lines));
reduction = exp2f((a - desired_pe) / (4.0f * active_lines)) - thr_avg;
thr_avg = powf(2.0f, (a - pe) / (4.0f * active_lines));
reduction = powf(2.0f, (a - desired_pe) / (4.0f * active_lines)) - thr_avg;
return FFMAX(reduction, 0.0f);
}
@@ -568,7 +563,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
AacPsyChannel *pch = &pctx->ch[channel];
int start = 0;
int i, w, g;
float desired_bits, desired_pe, delta_pe, reduction= NAN, spread_en[128] = {0};
float desired_bits, desired_pe, delta_pe, reduction, spread_en[128] = {0};
float a = 0.0f, active_lines = 0.0f, norm_fac = 0.0f;
float pe = pctx->chan_bitrate > 32000 ? 0.0f : FFMAX(50.0f, 100.0f - pctx->chan_bitrate * 100.0f / 32000.0f);
const int num_bands = ctx->num_bands[wi->num_windows == 8];
@@ -588,7 +583,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
form_factor += sqrtf(fabs(coefs[start+i]));
}
band->thr = band->energy * 0.001258925f;
band->nz_lines = band->energy>0 ? form_factor / powf(band->energy / band_sizes[g], 0.25f) : 0;
band->nz_lines = form_factor / powf(band->energy / band_sizes[g], 0.25f);
start += band_sizes[g];
}
@@ -708,7 +703,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
float delta_sfb_pe = band->norm_fac * norm_fac * delta_pe;
float thr = band->thr;
thr *= exp2f(delta_sfb_pe / band->active_lines);
thr *= powf(2.0f, delta_sfb_pe / band->active_lines);
if (thr > coeffs[g].min_snr * band->energy && band->avoid_holes == PSY_3GPP_AH_INACTIVE)
thr = FFMAX(band->thr, coeffs[g].min_snr * band->energy);
band->thr = thr;

View File

@@ -1578,6 +1578,10 @@ static void sbr_hf_assemble(float Y1[38][64][2],
0.11516383427084,
0.03183050093751,
};
static const int8_t phi[2][4] = {
{ 1, 0, -1, 0}, // real
{ 0, 1, 0, -1}, // imaginary
};
float (*g_temp)[48] = ch_data->g_temp, (*q_temp)[48] = ch_data->q_temp;
int indexnoise = ch_data->f_indexnoise;
int indexsine = ch_data->f_indexsine;
@@ -1601,6 +1605,7 @@ static void sbr_hf_assemble(float Y1[38][64][2],
for (e = 0; e < ch_data->bs_num_env; e++) {
for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
int phi_sign = (1 - 2*(kx & 1));
LOCAL_ALIGNED_16(float, g_filt_tab, [48]);
LOCAL_ALIGNED_16(float, q_filt_tab, [48]);
float *g_filt, *q_filt;
@@ -1630,17 +1635,13 @@ static void sbr_hf_assemble(float Y1[38][64][2],
q_filt, indexnoise,
kx, m_max);
} else {
int idx = indexsine&1;
int A = (1-((indexsine+(kx & 1))&2));
int B = (A^(-idx)) + idx;
float *out = &Y1[i][kx][idx];
float *in = sbr->s_m[e];
for (m = 0; m+1 < m_max; m+=2) {
out[2*m ] += in[m ] * A;
out[2*m+2] += in[m+1] * B;
for (m = 0; m < m_max; m++) {
Y1[i][m + kx][0] +=
sbr->s_m[e][m] * phi[0][indexsine];
Y1[i][m + kx][1] +=
sbr->s_m[e][m] * (phi[1][indexsine] * phi_sign);
phi_sign = -phi_sign;
}
if(m_max&1)
out[2*m ] += in[m ] * A;
}
indexnoise = (indexnoise + m_max) & 0x1ff;
indexsine = (indexsine + 1) & 3;

View File

@@ -36,29 +36,14 @@ typedef struct AascContext {
AVCodecContext *avctx;
GetByteContext gb;
AVFrame frame;
uint32_t palette[AVPALETTE_COUNT];
int palette_size;
} AascContext;
static av_cold int aasc_decode_init(AVCodecContext *avctx)
{
AascContext *s = avctx->priv_data;
uint8_t *ptr;
int i;
s->avctx = avctx;
switch (avctx->bits_per_coded_sample) {
case 8:
avctx->pix_fmt = PIX_FMT_PAL8;
ptr = avctx->extradata;
s->palette_size = FFMIN(avctx->extradata_size, AVPALETTE_SIZE);
for (i = 0; i < s->palette_size / 4; i++) {
s->palette[i] = 0xFFU << 24 | AV_RL32(ptr);
ptr += 4;
}
break;
case 16:
avctx->pix_fmt = PIX_FMT_RGB555;
break;
@@ -81,7 +66,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AascContext *s = avctx->priv_data;
int compr, i, stride, psize;
int compr, i, stride;
s->frame.reference = 3;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
@@ -93,7 +78,6 @@ static int aasc_decode_frame(AVCodecContext *avctx,
compr = AV_RL32(buf);
buf += 4;
buf_size -= 4;
psize = avctx->bits_per_coded_sample / 8;
switch (avctx->codec_tag) {
case MKTAG('A', 'A', 'S', '4'):
bytestream2_init(&s->gb, buf - 4, buf_size + 4);
@@ -102,13 +86,13 @@ static int aasc_decode_frame(AVCodecContext *avctx,
case MKTAG('A', 'A', 'S', 'C'):
switch(compr){
case 0:
stride = (avctx->width * psize + psize) & ~psize;
stride = (avctx->width * 3 + 3) & ~3;
for(i = avctx->height - 1; i >= 0; i--){
if(avctx->width * psize > buf_size){
if(avctx->width*3 > buf_size){
av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n");
break;
}
memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width * psize);
memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width*3);
buf += stride;
buf_size -= stride;
}
@@ -127,9 +111,6 @@ static int aasc_decode_frame(AVCodecContext *avctx,
return -1;
}
if (avctx->pix_fmt == PIX_FMT_PAL8)
memcpy(s->frame.data[1], s->palette, s->palette_size);
*data_size = sizeof(AVFrame);
*(AVFrame*)data = s->frame;
@@ -151,7 +132,7 @@ static av_cold int aasc_decode_end(AVCodecContext *avctx)
AVCodec ff_aasc_decoder = {
.name = "aasc",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_AASC,
.id = CODEC_ID_AASC,
.priv_data_size = sizeof(AascContext),
.init = aasc_decode_init,
.close = aasc_decode_end,

View File

@@ -167,9 +167,9 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
if (hdr.bitstream_mode == 0x7 && hdr.channels > 1)
hdr_info->service_type = AV_AUDIO_SERVICE_TYPE_KARAOKE;
if(hdr.bitstream_id>10)
hdr_info->codec_id = AV_CODEC_ID_EAC3;
else if (hdr_info->codec_id == AV_CODEC_ID_NONE)
hdr_info->codec_id = AV_CODEC_ID_AC3;
hdr_info->codec_id = CODEC_ID_EAC3;
else if (hdr_info->codec_id == CODEC_ID_NONE)
hdr_info->codec_id = CODEC_ID_AC3;
*need_next_header = (hdr.frame_type != EAC3_FRAME_TYPE_AC3_CONVERT);
*new_frame_start = (hdr.frame_type != EAC3_FRAME_TYPE_DEPENDENT);
@@ -186,7 +186,7 @@ static av_cold int ac3_parse_init(AVCodecParserContext *s1)
AVCodecParser ff_ac3_parser = {
.codec_ids = { AV_CODEC_ID_AC3, AV_CODEC_ID_EAC3 },
.codec_ids = { CODEC_ID_AC3, CODEC_ID_EAC3 },
.priv_data_size = sizeof(AACAC3ParseContext),
.parser_init = ac3_parse_init,
.parser_parse = ff_aac_ac3_parse,

View File

@@ -620,6 +620,34 @@ static inline void do_imdct(AC3DecodeContext *s, int channels)
}
}
/**
* Downmix the output to mono or stereo.
*/
void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2],
int out_ch, int in_ch, int len)
{
int i, j;
float v0, v1;
if (out_ch == 2) {
for (i = 0; i < len; i++) {
v0 = v1 = 0.0f;
for (j = 0; j < in_ch; j++) {
v0 += samples[j][i] * matrix[j][0];
v1 += samples[j][i] * matrix[j][1];
}
samples[0][i] = v0;
samples[1][i] = v1;
}
} else if (out_ch == 1) {
for (i = 0; i < len; i++) {
v0 = 0.0f;
for (j = 0; j < in_ch; j++)
v0 += samples[j][i] * matrix[j][0];
samples[0][i] = v0;
}
}
}
/**
* Upmix delay samples from stereo to original channel layout.
*/
@@ -1238,19 +1266,19 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
do_imdct(s, s->channels);
if (downmix_output) {
s->ac3dsp.downmix(s->output, s->downmix_coeffs,
s->out_channels, s->fbw_channels, 256);
s->dsp.ac3_downmix(s->output, s->downmix_coeffs,
s->out_channels, s->fbw_channels, 256);
}
} else {
if (downmix_output) {
s->ac3dsp.downmix(s->transform_coeffs + 1, s->downmix_coeffs,
s->out_channels, s->fbw_channels, 256);
s->dsp.ac3_downmix(s->transform_coeffs + 1, s->downmix_coeffs,
s->out_channels, s->fbw_channels, 256);
}
if (downmix_output && !s->downmixed) {
s->downmixed = 1;
s->ac3dsp.downmix(s->delay, s->downmix_coeffs, s->out_channels,
s->fbw_channels, 128);
s->dsp.ac3_downmix(s->delay, s->downmix_coeffs, s->out_channels,
s->fbw_channels, 128);
}
do_imdct(s, s->out_channels);
@@ -1405,8 +1433,6 @@ static int ac3_decode_frame(AVCodecContext * avctx, void *data,
}
}
s->frame.decode_error_flags = err ? FF_DECODE_ERROR_INVALID_BITSTREAM : 0;
*got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
@@ -1428,9 +1454,9 @@ static av_cold int ac3_decode_end(AVCodecContext *avctx)
#define OFFSET(x) offsetof(AC3DecodeContext, x)
#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
static const AVOption options[] = {
{ "drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), AV_OPT_TYPE_FLOAT, {.dbl = 1.0}, 0.0, 1.0, PAR },
{ "drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), AV_OPT_TYPE_FLOAT, {1.0}, 0.0, 1.0, PAR },
{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 2, 0, "dmix_mode"},
{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), AV_OPT_TYPE_INT, {.dbl = -1 }, -1, 2, 0, "dmix_mode"},
{"ltrt_cmixlev", "Lt/Rt Center Mix Level", OFFSET(ltrt_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
{"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
{"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
@@ -1449,7 +1475,7 @@ static const AVClass ac3_decoder_class = {
AVCodec ff_ac3_decoder = {
.name = "ac3",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_AC3,
.id = CODEC_ID_AC3,
.priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init,
.close = ac3_decode_end,
@@ -1473,7 +1499,7 @@ static const AVClass eac3_decoder_class = {
AVCodec ff_eac3_decoder = {
.name = "eac3",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_EAC3,
.id = CODEC_ID_EAC3,
.priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init,
.close = ac3_decode_end,

View File

@@ -227,6 +227,9 @@ int ff_eac3_parse_header(AC3DecodeContext *s);
*/
void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
void ff_ac3_downmix_c(float (*samples)[256], float (*matrix)[2],
int out_ch, int in_ch, int len);
/**
* Apply spectral extension to each channel by copying lower frequency
* coefficients to higher frequency bins and applying side information to

View File

@@ -214,31 +214,6 @@ static void ac3_sum_square_butterfly_float_c(float sum[4],
}
}
static void ac3_downmix_c(float (*samples)[256], float (*matrix)[2],
int out_ch, int in_ch, int len)
{
int i, j;
float v0, v1;
if (out_ch == 2) {
for (i = 0; i < len; i++) {
v0 = v1 = 0.0f;
for (j = 0; j < in_ch; j++) {
v0 += samples[j][i] * matrix[j][0];
v1 += samples[j][i] * matrix[j][1];
}
samples[0][i] = v0;
samples[1][i] = v1;
}
} else if (out_ch == 1) {
for (i = 0; i < len; i++) {
v0 = 0.0f;
for (j = 0; j < in_ch; j++)
v0 += samples[j][i] * matrix[j][0];
samples[0][i] = v0;
}
}
}
av_cold void ff_ac3dsp_init(AC3DSPContext *c, int bit_exact)
{
c->ac3_exponent_min = ac3_exponent_min_c;
@@ -252,7 +227,6 @@ av_cold void ff_ac3dsp_init(AC3DSPContext *c, int bit_exact)
c->extract_exponents = ac3_extract_exponents_c;
c->sum_square_butterfly_int32 = ac3_sum_square_butterfly_int32_c;
c->sum_square_butterfly_float = ac3_sum_square_butterfly_float_c;
c->downmix = ac3_downmix_c;
if (ARCH_ARM)
ff_ac3dsp_init_arm(c, bit_exact);

View File

@@ -131,9 +131,6 @@ typedef struct AC3DSPContext {
void (*sum_square_butterfly_float)(float sum[4], const float *coef0,
const float *coef1, int len);
void (*downmix)(float (*samples)[256], float (*matrix)[2], int out_ch,
int in_ch, int len);
} AC3DSPContext;
void ff_ac3dsp_init (AC3DSPContext *c, int bit_exact);

View File

@@ -2427,7 +2427,7 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
s->avctx = avctx;
s->eac3 = avctx->codec_id == AV_CODEC_ID_EAC3;
s->eac3 = avctx->codec_id == CODEC_ID_EAC3;
ff_ac3_common_init();
@@ -2491,7 +2491,6 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
#endif
ff_dsputil_init(&s->dsp, avctx);
avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
dprint_options(s);

View File

@@ -29,8 +29,6 @@
#define AVCODEC_AC3ENC_H
#include <stdint.h>
#include "libavutil/float_dsp.h"
#include "ac3.h"
#include "ac3dsp.h"
#include "avcodec.h"
@@ -160,7 +158,6 @@ typedef struct AC3EncodeContext {
AVCodecContext *avctx; ///< parent AVCodecContext
PutBitContext pb; ///< bitstream writer context
DSPContext dsp;
AVFloatDSPContext fdsp;
AC3DSPContext ac3dsp; ///< AC-3 optimized functions
FFTContext mdct; ///< FFT context for MDCT calculation
const SampleType *mdct_window; ///< MDCT window function array

View File

@@ -34,13 +34,8 @@
#define AC3ENC_TYPE AC3ENC_TYPE_AC3_FIXED
#include "ac3enc_opts_template.c"
static const AVClass ac3enc_class = {
.class_name = "Fixed-Point AC-3 Encoder",
.item_name = av_default_item_name,
.option = ac3_options,
.version = LIBAVUTIL_VERSION_INT,
};
static const AVClass ac3enc_class = { "Fixed-Point AC-3 Encoder", av_default_item_name,
ac3fixed_options, LIBAVUTIL_VERSION_INT };
#include "ac3enc_template.c"
@@ -73,11 +68,10 @@ av_cold int AC3_NAME(mdct_init)(AC3EncodeContext *s)
/*
* Apply KBD window to input samples prior to MDCT.
*/
static void apply_window(void *dsp, int16_t *output, const int16_t *input,
static void apply_window(DSPContext *dsp, int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len)
{
DSPContext *dsp0 = dsp;
dsp0->apply_window_int16(output, input, window, len);
dsp->apply_window_int16(output, input, window, len);
}
@@ -155,7 +149,7 @@ static av_cold int ac3_fixed_encode_init(AVCodecContext *avctx)
AVCodec ff_ac3_fixed_encoder = {
.name = "ac3_fixed",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_AC3,
.id = CODEC_ID_AC3,
.priv_data_size = sizeof(AC3EncodeContext),
.init = ac3_fixed_encode_init,
.encode2 = ff_ac3_fixed_encode_frame,

View File

@@ -36,12 +36,8 @@
#if CONFIG_AC3_ENCODER
#define AC3ENC_TYPE AC3ENC_TYPE_AC3
#include "ac3enc_opts_template.c"
static const AVClass ac3enc_class = {
.class_name = "AC-3 Encoder",
.item_name = av_default_item_name,
.option = ac3_options,
.version = LIBAVUTIL_VERSION_INT,
};
static const AVClass ac3enc_class = { "AC-3 Encoder", av_default_item_name,
ac3_options, LIBAVUTIL_VERSION_INT };
#endif
#include "ac3enc_template.c"
@@ -90,12 +86,10 @@ av_cold int ff_ac3_float_mdct_init(AC3EncodeContext *s)
/*
* Apply KBD window to input samples prior to MDCT.
*/
static void apply_window(void *dsp, float *output,
const float *input, const float *window,
unsigned int len)
static void apply_window(DSPContext *dsp, float *output, const float *input,
const float *window, unsigned int len)
{
AVFloatDSPContext *fdsp = dsp;
fdsp->vector_fmul(output, input, window, len);
dsp->vector_fmul(output, input, window, len);
}
@@ -153,7 +147,7 @@ static CoefType calc_cpl_coord(CoefSumType energy_ch, CoefSumType energy_cpl)
AVCodec ff_ac3_encoder = {
.name = "ac3",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_AC3,
.id = CODEC_ID_AC3,
.priv_data_size = sizeof(AC3EncodeContext),
.init = ff_ac3_encode_init,
.encode2 = ff_ac3_float_encode_frame,

View File

@@ -23,54 +23,60 @@
#include "internal.h"
#include "ac3.h"
#if AC3ENC_TYPE == AC3ENC_TYPE_AC3_FIXED
static const AVOption ac3fixed_options[] = {
#elif AC3ENC_TYPE == AC3ENC_TYPE_AC3
static const AVOption ac3_options[] = {
#else /* AC3ENC_TYPE_EAC3 */
static const AVOption eac3_options[] = {
#endif
/* Metadata Options */
{"per_frame_metadata", "Allow Changing Metadata Per-Frame", OFFSET(allow_per_frame_metadata), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, AC3ENC_PARAM},
{"per_frame_metadata", "Allow Changing Metadata Per-Frame", OFFSET(allow_per_frame_metadata), AV_OPT_TYPE_INT, {.dbl = 0 }, 0, 1, AC3ENC_PARAM},
#if AC3ENC_TYPE != AC3ENC_TYPE_EAC3
/* AC-3 downmix levels */
{"center_mixlev", "Center Mix Level", OFFSET(center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = LEVEL_MINUS_4POINT5DB }, 0.0, 1.0, AC3ENC_PARAM},
{"surround_mixlev", "Surround Mix Level", OFFSET(surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = LEVEL_MINUS_6DB }, 0.0, 1.0, AC3ENC_PARAM},
#endif
/* audio production information */
{"mixing_level", "Mixing Level", OFFSET(mixing_level), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, 111, AC3ENC_PARAM},
{"room_type", "Room Type", OFFSET(room_type), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_SMALL_ROOM, AC3ENC_PARAM, "room_type"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
{"large", "Large Room", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_LARGE_ROOM }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
{"small", "Small Room", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_SMALL_ROOM }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
{"mixing_level", "Mixing Level", OFFSET(mixing_level), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, 111, AC3ENC_PARAM},
{"room_type", "Room Type", OFFSET(room_type), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_SMALL_ROOM, AC3ENC_PARAM, "room_type"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
{"large", "Large Room", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_LARGE_ROOM }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
{"small", "Small Room", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_SMALL_ROOM }, INT_MIN, INT_MAX, AC3ENC_PARAM, "room_type"},
/* other metadata options */
{"copyright", "Copyright Bit", OFFSET(copyright), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, 1, AC3ENC_PARAM},
{"dialnorm", "Dialogue Level (dB)", OFFSET(dialogue_level), AV_OPT_TYPE_INT, {.i64 = -31 }, -31, -1, AC3ENC_PARAM},
{"dsur_mode", "Dolby Surround Mode", OFFSET(dolby_surround_mode), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dsur_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
{"on", "Dolby Surround Encoded", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
{"off", "Not Dolby Surround Encoded", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
{"original", "Original Bit Stream", OFFSET(original), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, 1, AC3ENC_PARAM},
{"copyright", "Copyright Bit", OFFSET(copyright), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, 1, AC3ENC_PARAM},
{"dialnorm", "Dialogue Level (dB)", OFFSET(dialogue_level), AV_OPT_TYPE_INT, {.dbl = -31 }, -31, -1, AC3ENC_PARAM},
{"dsur_mode", "Dolby Surround Mode", OFFSET(dolby_surround_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dsur_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
{"on", "Dolby Surround Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
{"off", "Not Dolby Surround Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
{"original", "Original Bit Stream", OFFSET(original), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, 1, AC3ENC_PARAM},
/* extended bitstream information */
{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_DOWNMIX_LORO, AC3ENC_PARAM, "dmix_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
{"ltrt", "Lt/Rt Downmix Preferred", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_DOWNMIX_LTRT }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
{"loro", "Lo/Ro Downmix Preferred", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_DOWNMIX_LORO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_DOWNMIX_LORO, AC3ENC_PARAM, "dmix_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
{"ltrt", "Lt/Rt Downmix Preferred", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_DOWNMIX_LTRT }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
{"loro", "Lo/Ro Downmix Preferred", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_DOWNMIX_LORO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dmix_mode"},
{"ltrt_cmixlev", "Lt/Rt Center Mix Level", OFFSET(ltrt_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
{"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
{"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
{"loro_surmixlev", "Lo/Ro Surround Mix Level", OFFSET(loro_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
{"dsurex_mode", "Dolby Surround EX Mode", OFFSET(dolby_surround_ex_mode), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dsurex_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"on", "Dolby Surround EX Encoded", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"off", "Not Dolby Surround EX Encoded", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"dheadphone_mode", "Dolby Headphone Mode", OFFSET(dolby_headphone_mode), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dheadphone_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
{"on", "Dolby Headphone Encoded", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
{"off", "Not Dolby Headphone Encoded", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
{"ad_conv_type", "A/D Converter Type", OFFSET(ad_converter_type), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_ADCONV_HDCD, AC3ENC_PARAM, "ad_conv_type"},
{"standard", "Standard (default)", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_ADCONV_STANDARD }, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"},
{"hdcd", "HDCD", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_ADCONV_HDCD }, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"},
{"dsurex_mode", "Dolby Surround EX Mode", OFFSET(dolby_surround_ex_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dsurex_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"on", "Dolby Surround EX Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"off", "Not Dolby Surround EX Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"dheadphone_mode", "Dolby Headphone Mode", OFFSET(dolby_headphone_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dheadphone_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
{"on", "Dolby Headphone Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
{"off", "Not Dolby Headphone Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
{"ad_conv_type", "A/D Converter Type", OFFSET(ad_converter_type), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_ADCONV_HDCD, AC3ENC_PARAM, "ad_conv_type"},
{"standard", "Standard (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_ADCONV_STANDARD }, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"},
{"hdcd", "HDCD", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_ADCONV_HDCD }, INT_MIN, INT_MAX, AC3ENC_PARAM, "ad_conv_type"},
/* Other Encoding Options */
{"stereo_rematrixing", "Stereo Rematrixing", OFFSET(stereo_rematrixing), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_ON }, AC3ENC_OPT_OFF, AC3ENC_OPT_ON, AC3ENC_PARAM},
{"channel_coupling", "Channel Coupling", OFFSET(channel_coupling), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_AUTO }, AC3ENC_OPT_AUTO, AC3ENC_OPT_ON, AC3ENC_PARAM, "channel_coupling"},
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_AUTO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "channel_coupling"},
{"cpl_start_band", "Coupling Start Band", OFFSET(cpl_start), AV_OPT_TYPE_INT, {.i64 = AC3ENC_OPT_AUTO }, AC3ENC_OPT_AUTO, 15, AC3ENC_PARAM, "cpl_start_band"},
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.i64 = AC3ENC_OPT_AUTO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "cpl_start_band"},
{"stereo_rematrixing", "Stereo Rematrixing", OFFSET(stereo_rematrixing), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_ON }, AC3ENC_OPT_OFF, AC3ENC_OPT_ON, AC3ENC_PARAM},
{"channel_coupling", "Channel Coupling", OFFSET(channel_coupling), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_AUTO }, AC3ENC_OPT_AUTO, AC3ENC_OPT_ON, AC3ENC_PARAM, "channel_coupling"},
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_AUTO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "channel_coupling"},
{"cpl_start_band", "Coupling Start Band", OFFSET(cpl_start), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_AUTO }, AC3ENC_OPT_AUTO, 15, AC3ENC_PARAM, "cpl_start_band"},
{"auto", "Selected by the Encoder", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_AUTO }, INT_MIN, INT_MAX, AC3ENC_PARAM, "cpl_start_band"},
{NULL}
};

View File

@@ -33,7 +33,7 @@
static void scale_coefficients(AC3EncodeContext *s);
static void apply_window(void *dsp, SampleType *output,
static void apply_window(DSPContext *dsp, SampleType *output,
const SampleType *input, const SampleType *window,
unsigned int len);
@@ -110,13 +110,8 @@ static void apply_mdct(AC3EncodeContext *s)
AC3Block *block = &s->blocks[blk];
const SampleType *input_samples = &s->planar_samples[ch][blk * AC3_BLOCK_SIZE];
#if CONFIG_AC3ENC_FLOAT
apply_window(&s->fdsp, s->windowed_samples, input_samples,
s->mdct_window, AC3_WINDOW_SIZE);
#else
apply_window(&s->dsp, s->windowed_samples, input_samples,
s->mdct_window, AC3_WINDOW_SIZE);
#endif
if (s->fixed_point)
block->coeff_shift[ch+1] = normalize_samples(s);

View File

@@ -22,8 +22,6 @@
#include <inttypes.h>
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "avcodec.h"
#include "acelp_filters.h"
@@ -47,7 +45,7 @@ void ff_acelp_interpolate(int16_t* out, const int16_t* in,
{
int n, i;
av_assert1(frac_pos >= 0 && frac_pos < precision);
assert(frac_pos >= 0 && frac_pos < precision);
for (n = 0; n < length; n++) {
int idx = 0;
@@ -144,12 +142,3 @@ void ff_tilt_compensation(float *mem, float tilt, float *samples, int size)
samples[0] -= tilt * *mem;
*mem = new_tilt_mem;
}
void ff_acelp_filter_init(ACELPFContext *c)
{
c->acelp_interpolatef = ff_acelp_interpolatef;
c->acelp_apply_order_2_transfer_function = ff_acelp_apply_order_2_transfer_function;
if(HAVE_MIPSFPU)
ff_acelp_filter_init_mips(c);
}

View File

@@ -25,39 +25,6 @@
#include <stdint.h>
typedef struct ACELPFContext {
/**
* Floating point version of ff_acelp_interpolate()
*/
void (*acelp_interpolatef)(float *out, const float *in,
const float *filter_coeffs, int precision,
int frac_pos, int filter_length, int length);
/**
* Apply an order 2 rational transfer function in-place.
*
* @param out output buffer for filtered speech samples
* @param in input buffer containing speech data (may be the same as out)
* @param zero_coeffs z^-1 and z^-2 coefficients of the numerator
* @param pole_coeffs z^-1 and z^-2 coefficients of the denominator
* @param gain scale factor for final output
* @param mem intermediate values used by filter (should be 0 initially)
* @param n number of samples (should be a multiple of eight)
*/
void (*acelp_apply_order_2_transfer_function)(float *out, const float *in,
const float zero_coeffs[2],
const float pole_coeffs[2],
float gain,
float mem[2], int n);
}ACELPFContext;
/**
* Initialize ACELPFContext.
*/
void ff_acelp_filter_init(ACELPFContext *c);
void ff_acelp_filter_init_mips(ACELPFContext *c);
/**
* low-pass Finite Impulse Response filter coefficients.
*

View File

@@ -20,8 +20,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "libavutil/libm.h"
#include "libavutil/mathematics.h"
#include "avcodec.h"
#include "dsputil.h"
@@ -132,7 +130,7 @@ float ff_amr_set_fixed_gain(float fixed_gain_factor, float fixed_mean_energy,
// Note 10^(0.05 * -10log(average x2)) = 1/sqrt((average x2)).
float val = fixed_gain_factor *
exp2f(M_LOG2_10 * 0.05 *
(ff_scalarproduct_float_c(pred_table, prediction_error, 4) +
(ff_dot_productf(pred_table, prediction_error, 4) +
energy_mean)) /
sqrtf(fixed_mean_energy);

View File

@@ -21,11 +21,9 @@
*/
#include <inttypes.h>
#include "libavutil/common.h"
#include "avcodec.h"
#include "dsputil.h"
#include "acelp_vectors.h"
#include "celp_math.h"
const uint8_t ff_fc_2pulses_9bits_track1[16] =
{
@@ -203,7 +201,7 @@ void ff_adaptive_gain_control(float *out, const float *in, float speech_energ,
int size, float alpha, float *gain_mem)
{
int i;
float postfilter_energ = ff_scalarproduct_float_c(in, in, size);
float postfilter_energ = ff_dot_productf(in, in, size);
float gain_scale_factor = 1.0;
float mem = *gain_mem;
@@ -224,7 +222,7 @@ void ff_scale_vector_to_given_sum_of_squares(float *out, const float *in,
float sum_of_squares, const int n)
{
int i;
float scalefactor = ff_scalarproduct_float_c(in, in, n);
float scalefactor = ff_dot_productf(in, in, n);
if (scalefactor)
scalefactor = sqrt(sum_of_squares / scalefactor);
for (i = 0; i < n; i++)
@@ -262,11 +260,3 @@ void ff_clear_fixed_vector(float *out, const AMRFixed *in, int size)
} while (x < size && repeats);
}
}
void ff_acelp_vectors_init(ACELPVContext *c)
{
c->weighted_vector_sumf = ff_weighted_vector_sumf;
if(HAVE_MIPSFPU)
ff_acelp_vectors_init_mips(c);
}

View File

@@ -25,30 +25,6 @@
#include <stdint.h>
typedef struct ACELPVContext {
/**
* float implementation of weighted sum of two vectors.
* @param[out] out result of addition
* @param in_a first vector
* @param in_b second vector
* @param weight_coeff_a first vector weight coefficient
* @param weight_coeff_a second vector weight coefficient
* @param length vectors length (should be a multiple of two)
*
* @note It is safe to pass the same buffer for out and in_a or in_b.
*/
void (*weighted_vector_sumf)(float *out, const float *in_a, const float *in_b,
float weight_coeff_a, float weight_coeff_b,
int length);
}ACELPVContext;
/**
* Initialize ACELPVContext.
*/
void ff_acelp_vectors_init(ACELPVContext *c);
void ff_acelp_vectors_init_mips(ACELPVContext *c);
/** Sparse representation for the algebraic codebook (fixed) vector */
typedef struct {
int n;

View File

@@ -96,13 +96,13 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
unsigned int max_channels = 2;
switch(avctx->codec->id) {
case AV_CODEC_ID_ADPCM_EA:
case CODEC_ID_ADPCM_EA:
min_channels = 2;
break;
case AV_CODEC_ID_ADPCM_EA_R1:
case AV_CODEC_ID_ADPCM_EA_R2:
case AV_CODEC_ID_ADPCM_EA_R3:
case AV_CODEC_ID_ADPCM_EA_XAS:
case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3:
case CODEC_ID_ADPCM_EA_XAS:
max_channels = 6;
break;
}
@@ -112,22 +112,22 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
}
switch(avctx->codec->id) {
case AV_CODEC_ID_ADPCM_CT:
case CODEC_ID_ADPCM_CT:
c->status[0].step = c->status[1].step = 511;
break;
case AV_CODEC_ID_ADPCM_IMA_WAV:
case CODEC_ID_ADPCM_IMA_WAV:
if (avctx->bits_per_coded_sample != 4) {
av_log(avctx, AV_LOG_ERROR, "Only 4-bit ADPCM IMA WAV files are supported\n");
return -1;
}
break;
case AV_CODEC_ID_ADPCM_IMA_APC:
case CODEC_ID_ADPCM_IMA_APC:
if (avctx->extradata && avctx->extradata_size >= 8) {
c->status[0].predictor = AV_RL32(avctx->extradata);
c->status[1].predictor = AV_RL32(avctx->extradata + 4);
}
break;
case AV_CODEC_ID_ADPCM_IMA_WS:
case CODEC_ID_ADPCM_IMA_WS:
if (avctx->extradata && avctx->extradata_size >= 2)
c->vqa_version = AV_RL16(avctx->extradata);
break;
@@ -423,22 +423,22 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
switch (avctx->codec->id) {
/* constant, only check buf_size */
case AV_CODEC_ID_ADPCM_EA_XAS:
case CODEC_ID_ADPCM_EA_XAS:
if (buf_size < 76 * ch)
return 0;
nb_samples = 128;
break;
case AV_CODEC_ID_ADPCM_IMA_QT:
case CODEC_ID_ADPCM_IMA_QT:
if (buf_size < 34 * ch)
return 0;
nb_samples = 64;
break;
/* simple 4-bit adpcm */
case AV_CODEC_ID_ADPCM_CT:
case AV_CODEC_ID_ADPCM_IMA_APC:
case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
case AV_CODEC_ID_ADPCM_IMA_WS:
case AV_CODEC_ID_ADPCM_YAMAHA:
case CODEC_ID_ADPCM_CT:
case CODEC_ID_ADPCM_IMA_APC:
case CODEC_ID_ADPCM_IMA_EA_SEAD:
case CODEC_ID_ADPCM_IMA_WS:
case CODEC_ID_ADPCM_YAMAHA:
nb_samples = buf_size * 2 / ch;
break;
}
@@ -448,46 +448,46 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
/* simple 4-bit adpcm, with header */
header_size = 0;
switch (avctx->codec->id) {
case AV_CODEC_ID_ADPCM_4XM:
case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4; break;
case CODEC_ID_ADPCM_4XM:
case CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
case CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break;
case CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4; break;
}
if (header_size > 0)
return (buf_size - header_size) * 2 / ch;
/* more complex formats */
switch (avctx->codec->id) {
case AV_CODEC_ID_ADPCM_EA:
case CODEC_ID_ADPCM_EA:
has_coded_samples = 1;
*coded_samples = bytestream2_get_le32(gb);
*coded_samples -= *coded_samples % 28;
nb_samples = (buf_size - 12) / 30 * 28;
break;
case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
case CODEC_ID_ADPCM_IMA_EA_EACS:
has_coded_samples = 1;
*coded_samples = bytestream2_get_le32(gb);
nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
break;
case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
case CODEC_ID_ADPCM_EA_MAXIS_XA:
nb_samples = (buf_size - ch) / ch * 2;
break;
case AV_CODEC_ID_ADPCM_EA_R1:
case AV_CODEC_ID_ADPCM_EA_R2:
case AV_CODEC_ID_ADPCM_EA_R3:
case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3:
/* maximum number of samples */
/* has internal offsets and a per-frame switch to signal raw 16-bit */
has_coded_samples = 1;
switch (avctx->codec->id) {
case AV_CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R1:
header_size = 4 + 9 * ch;
*coded_samples = bytestream2_get_le32(gb);
break;
case AV_CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R2:
header_size = 4 + 5 * ch;
*coded_samples = bytestream2_get_le32(gb);
break;
case AV_CODEC_ID_ADPCM_EA_R3:
case CODEC_ID_ADPCM_EA_R3:
header_size = 4 + 5 * ch;
*coded_samples = bytestream2_get_be32(gb);
break;
@@ -496,35 +496,35 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
nb_samples = (buf_size - header_size) * 2 / ch;
nb_samples -= nb_samples % 28;
break;
case AV_CODEC_ID_ADPCM_IMA_DK3:
case CODEC_ID_ADPCM_IMA_DK3:
if (avctx->block_align > 0)
buf_size = FFMIN(buf_size, avctx->block_align);
nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
break;
case AV_CODEC_ID_ADPCM_IMA_DK4:
case CODEC_ID_ADPCM_IMA_DK4:
if (avctx->block_align > 0)
buf_size = FFMIN(buf_size, avctx->block_align);
nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
break;
case AV_CODEC_ID_ADPCM_IMA_WAV:
case CODEC_ID_ADPCM_IMA_WAV:
if (avctx->block_align > 0)
buf_size = FFMIN(buf_size, avctx->block_align);
nb_samples = 1 + (buf_size - 4 * ch) / (4 * ch) * 8;
break;
case AV_CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_MS:
if (avctx->block_align > 0)
buf_size = FFMIN(buf_size, avctx->block_align);
nb_samples = 2 + (buf_size - 7 * ch) * 2 / ch;
break;
case AV_CODEC_ID_ADPCM_SBPRO_2:
case AV_CODEC_ID_ADPCM_SBPRO_3:
case AV_CODEC_ID_ADPCM_SBPRO_4:
case CODEC_ID_ADPCM_SBPRO_2:
case CODEC_ID_ADPCM_SBPRO_3:
case CODEC_ID_ADPCM_SBPRO_4:
{
int samples_per_byte;
switch (avctx->codec->id) {
case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
case CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
case CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
case CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
}
if (!s->status[0].step_index) {
nb_samples++;
@@ -533,7 +533,7 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
nb_samples += buf_size * samples_per_byte / ch;
break;
}
case AV_CODEC_ID_ADPCM_SWF:
case CODEC_ID_ADPCM_SWF:
{
int buf_bits = buf_size * 8 - 2;
int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
@@ -546,14 +546,14 @@ static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb,
nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
break;
}
case AV_CODEC_ID_ADPCM_THP:
case CODEC_ID_ADPCM_THP:
has_coded_samples = 1;
bytestream2_skip(gb, 4); // channel size
*coded_samples = bytestream2_get_be32(gb);
*coded_samples -= *coded_samples % 14;
nb_samples = (buf_size - 80) / (8 * ch) * 14;
break;
case AV_CODEC_ID_ADPCM_XA:
case CODEC_ID_ADPCM_XA:
nb_samples = (buf_size / 128) * 224 / ch;
break;
}
@@ -605,7 +605,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
st = avctx->channels == 2 ? 1 : 0;
switch(avctx->codec->id) {
case AV_CODEC_ID_ADPCM_IMA_QT:
case CODEC_ID_ADPCM_IMA_QT:
/* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
Channel data is interleaved per-chunk. */
for (channel = 0; channel < avctx->channels; channel++) {
@@ -648,7 +648,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
}
}
break;
case AV_CODEC_ID_ADPCM_IMA_WAV:
case CODEC_ID_ADPCM_IMA_WAV:
for(i=0; i<avctx->channels; i++){
cs = &(c->status[i]);
cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
@@ -676,7 +676,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
samples += 7 * avctx->channels;
}
break;
case AV_CODEC_ID_ADPCM_4XM:
case CODEC_ID_ADPCM_4XM:
for (i = 0; i < avctx->channels; i++)
c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
@@ -701,7 +701,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
}
}
break;
case AV_CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_MS:
{
int block_predictor;
@@ -744,7 +744,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
}
break;
}
case AV_CODEC_ID_ADPCM_IMA_DK4:
case CODEC_ID_ADPCM_IMA_DK4:
for (channel = 0; channel < avctx->channels; channel++) {
cs = &c->status[channel];
cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
@@ -761,7 +761,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
}
break;
case AV_CODEC_ID_ADPCM_IMA_DK3:
case CODEC_ID_ADPCM_IMA_DK3:
{
int last_byte = 0;
int nibble;
@@ -822,7 +822,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
}
break;
}
case AV_CODEC_ID_ADPCM_IMA_ISS:
case CODEC_ID_ADPCM_IMA_ISS:
for (channel = 0; channel < avctx->channels; channel++) {
cs = &c->status[channel];
cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
@@ -849,14 +849,14 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
}
break;
case AV_CODEC_ID_ADPCM_IMA_APC:
case CODEC_ID_ADPCM_IMA_APC:
while (bytestream2_get_bytes_left(&gb) > 0) {
int v = bytestream2_get_byteu(&gb);
*samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
}
break;
case AV_CODEC_ID_ADPCM_IMA_WS:
case CODEC_ID_ADPCM_IMA_WS:
if (c->vqa_version == 3) {
for (channel = 0; channel < avctx->channels; channel++) {
int16_t *smp = samples + channel;
@@ -881,7 +881,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
}
bytestream2_seek(&gb, 0, SEEK_END);
break;
case AV_CODEC_ID_ADPCM_XA:
case CODEC_ID_ADPCM_XA:
while (bytestream2_get_bytes_left(&gb) >= 128) {
if ((ret = xa_decode(avctx, samples, buf + bytestream2_tell(&gb), &c->status[0],
&c->status[1], avctx->channels)) < 0)
@@ -890,7 +890,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
samples += 28 * 8;
}
break;
case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
case CODEC_ID_ADPCM_IMA_EA_EACS:
for (i=0; i<=st; i++) {
c->status[i].step_index = bytestream2_get_le32u(&gb);
if (c->status[i].step_index > 88u) {
@@ -908,14 +908,14 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
*samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
}
break;
case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
case CODEC_ID_ADPCM_IMA_EA_SEAD:
for (n = nb_samples >> (1 - st); n > 0; n--) {
int byte = bytestream2_get_byteu(&gb);
*samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
}
break;
case AV_CODEC_ID_ADPCM_EA:
case CODEC_ID_ADPCM_EA:
{
int previous_left_sample, previous_right_sample;
int current_left_sample, current_right_sample;
@@ -970,7 +970,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
break;
}
case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
case CODEC_ID_ADPCM_EA_MAXIS_XA:
{
int coeff[2][2], shift[2];
@@ -1000,14 +1000,14 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
bytestream2_seek(&gb, 0, SEEK_END);
break;
}
case AV_CODEC_ID_ADPCM_EA_R1:
case AV_CODEC_ID_ADPCM_EA_R2:
case AV_CODEC_ID_ADPCM_EA_R3: {
case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3: {
/* channel numbering
2chan: 0=fl, 1=fr
4chan: 0=fl, 1=rl, 2=fr, 3=rr
6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3;
int previous_sample, current_sample, next_sample;
int coeff1, coeff2;
int shift;
@@ -1025,7 +1025,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
bytestream2_seek(&gb, offsets[channel], SEEK_SET);
samplesC = samples + channel;
if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) {
current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
} else {
@@ -1074,7 +1074,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
count = FFMAX(count, count1);
}
if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) {
c->status[channel].predictor = current_sample;
c->status[channel].prev_sample = previous_sample;
}
@@ -1084,7 +1084,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
bytestream2_seek(&gb, 0, SEEK_END);
break;
}
case AV_CODEC_ID_ADPCM_EA_XAS:
case CODEC_ID_ADPCM_EA_XAS:
for (channel=0; channel<avctx->channels; channel++) {
int coeff[2][4], shift[4];
short *s2, *s = &samples[channel];
@@ -1113,9 +1113,9 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
}
}
break;
case AV_CODEC_ID_ADPCM_IMA_AMV:
case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
if (avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_AMV) {
case CODEC_ID_ADPCM_IMA_AMV:
case CODEC_ID_ADPCM_IMA_SMJPEG:
if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) {
c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
c->status[0].step_index = bytestream2_get_le16u(&gb);
bytestream2_skipu(&gb, 4);
@@ -1133,7 +1133,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
for (n = nb_samples >> (1 - st); n > 0; n--) {
int hi, lo, v = bytestream2_get_byteu(&gb);
if (avctx->codec->id == AV_CODEC_ID_ADPCM_IMA_AMV) {
if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) {
hi = v & 0x0F;
lo = v >> 4;
} else {
@@ -1145,16 +1145,16 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
*samples++ = adpcm_ima_expand_nibble(&c->status[0], hi, 3);
}
break;
case AV_CODEC_ID_ADPCM_CT:
case CODEC_ID_ADPCM_CT:
for (n = nb_samples >> (1 - st); n > 0; n--) {
int v = bytestream2_get_byteu(&gb);
*samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
*samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
}
break;
case AV_CODEC_ID_ADPCM_SBPRO_4:
case AV_CODEC_ID_ADPCM_SBPRO_3:
case AV_CODEC_ID_ADPCM_SBPRO_2:
case CODEC_ID_ADPCM_SBPRO_4:
case CODEC_ID_ADPCM_SBPRO_3:
case CODEC_ID_ADPCM_SBPRO_2:
if (!c->status[0].step_index) {
/* the first byte is a raw sample */
*samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
@@ -1163,7 +1163,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
c->status[0].step_index = 1;
nb_samples--;
}
if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
for (n = nb_samples >> (1 - st); n > 0; n--) {
int byte = bytestream2_get_byteu(&gb);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
@@ -1171,7 +1171,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
byte & 0x0F, 4, 0);
}
} else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
} else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
for (n = nb_samples / 3; n > 0; n--) {
int byte = bytestream2_get_byteu(&gb);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
@@ -1195,18 +1195,18 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
}
}
break;
case AV_CODEC_ID_ADPCM_SWF:
case CODEC_ID_ADPCM_SWF:
adpcm_swf_decode(avctx, buf, buf_size, samples);
bytestream2_seek(&gb, 0, SEEK_END);
break;
case AV_CODEC_ID_ADPCM_YAMAHA:
case CODEC_ID_ADPCM_YAMAHA:
for (n = nb_samples >> (1 - st); n > 0; n--) {
int v = bytestream2_get_byteu(&gb);
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
*samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
}
break;
case AV_CODEC_ID_ADPCM_THP:
case CODEC_ID_ADPCM_THP:
{
int table[2][16];
int prev[2][2];
@@ -1282,30 +1282,30 @@ AVCodec ff_ ## name_ ## _decoder = { \
}
/* Note: Do not forget to add new entries to the Makefile as well. */
ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, adpcm_ima_apc, "ADPCM IMA CRYO APC");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");
ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie");
ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology");
ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts");
ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1");
ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2");
ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3");
ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_APC, adpcm_ima_apc, "ADPCM IMA CRYO APC");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood");
ADPCM_DECODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
ADPCM_DECODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP");
ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA");
ADPCM_DECODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");

View File

@@ -94,7 +94,7 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
switch (avctx->codec->id) {
case AV_CODEC_ID_ADPCM_IMA_WAV:
case CODEC_ID_ADPCM_IMA_WAV:
/* each 16 bits sample gives one nibble
and we have 4 bytes per channel overhead */
avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
@@ -104,11 +104,11 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
avctx->block_align = BLKSIZE;
avctx->bits_per_coded_sample = 4;
break;
case AV_CODEC_ID_ADPCM_IMA_QT:
case CODEC_ID_ADPCM_IMA_QT:
avctx->frame_size = 64;
avctx->block_align = 34 * avctx->channels;
break;
case AV_CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_MS:
/* each 16 bits sample gives one nibble
and we have 7 bytes per channel overhead */
avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
@@ -125,11 +125,11 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
}
break;
case AV_CODEC_ID_ADPCM_YAMAHA:
case CODEC_ID_ADPCM_YAMAHA:
avctx->frame_size = BLKSIZE * 2 / avctx->channels;
avctx->block_align = BLKSIZE;
break;
case AV_CODEC_ID_ADPCM_SWF:
case CODEC_ID_ADPCM_SWF:
if (avctx->sample_rate != 11025 &&
avctx->sample_rate != 22050 &&
avctx->sample_rate != 44100) {
@@ -297,13 +297,13 @@ static void adpcm_compress_trellis(AVCodecContext *avctx,
nodes[0]->step = c->step_index;
nodes[0]->sample1 = c->sample1;
nodes[0]->sample2 = c->sample2;
if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
version == AV_CODEC_ID_ADPCM_IMA_QT ||
version == AV_CODEC_ID_ADPCM_SWF)
if (version == CODEC_ID_ADPCM_IMA_WAV ||
version == CODEC_ID_ADPCM_IMA_QT ||
version == CODEC_ID_ADPCM_SWF)
nodes[0]->sample1 = c->prev_sample;
if (version == AV_CODEC_ID_ADPCM_MS)
if (version == CODEC_ID_ADPCM_MS)
nodes[0]->step = c->idelta;
if (version == AV_CODEC_ID_ADPCM_YAMAHA) {
if (version == CODEC_ID_ADPCM_YAMAHA) {
if (c->step == 0) {
nodes[0]->step = 127;
nodes[0]->sample1 = 0;
@@ -325,7 +325,7 @@ static void adpcm_compress_trellis(AVCodecContext *avctx,
const int range = (j < frontier / 2) ? 1 : 0;
const int step = nodes[j]->step;
int nidx;
if (version == AV_CODEC_ID_ADPCM_MS) {
if (version == CODEC_ID_ADPCM_MS) {
const int predictor = ((nodes[j]->sample1 * c->coeff1) +
(nodes[j]->sample2 * c->coeff2)) / 64;
const int div = (sample - predictor) / step;
@@ -401,9 +401,9 @@ static void adpcm_compress_trellis(AVCodecContext *avctx,
STORE_NODE(ms, FFMAX(16,
(ff_adpcm_AdaptationTable[nibble] * step) >> 8));
}
} else if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
version == AV_CODEC_ID_ADPCM_IMA_QT ||
version == AV_CODEC_ID_ADPCM_SWF) {
} else if (version == CODEC_ID_ADPCM_IMA_WAV ||
version == CODEC_ID_ADPCM_IMA_QT ||
version == CODEC_ID_ADPCM_SWF) {
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
const int predictor = nodes[j]->sample1;\
const int div = (sample - predictor) * 4 / STEP_TABLE;\
@@ -422,7 +422,7 @@ static void adpcm_compress_trellis(AVCodecContext *avctx,
}
LOOP_NODES(ima, ff_adpcm_step_table[step],
av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
} else { //AV_CODEC_ID_ADPCM_YAMAHA
} else { //CODEC_ID_ADPCM_YAMAHA
LOOP_NODES(yamaha, step,
av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
127, 24567));
@@ -490,7 +490,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
samples = (const int16_t *)frame->data[0];
st = avctx->channels == 2;
if (avctx->codec_id == AV_CODEC_ID_ADPCM_SWF)
if (avctx->codec_id == CODEC_ID_ADPCM_SWF)
pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8;
else
pkt_size = avctx->block_align;
@@ -499,7 +499,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
dst = avpkt->data;
switch(avctx->codec->id) {
case AV_CODEC_ID_ADPCM_IMA_WAV:
case CODEC_ID_ADPCM_IMA_WAV:
n = frame->nb_samples / 8;
c->status[0].prev_sample = samples[0];
/* c->status[0].step_index = 0;
@@ -564,7 +564,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
}
}
break;
case AV_CODEC_ID_ADPCM_IMA_QT:
case CODEC_ID_ADPCM_IMA_QT:
{
int ch, i;
PutBitContext pb;
@@ -594,7 +594,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
flush_put_bits(&pb);
break;
}
case AV_CODEC_ID_ADPCM_SWF:
case CODEC_ID_ADPCM_SWF:
{
int i;
PutBitContext pb;
@@ -616,11 +616,10 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
if (avctx->trellis > 0) {
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
&c->status[0], n);
adpcm_compress_trellis(avctx, samples + 2, buf, &c->status[0], n);
if (avctx->channels == 2)
adpcm_compress_trellis(avctx, samples + avctx->channels + 1,
buf + n, &c->status[1], n);
adpcm_compress_trellis(avctx, samples + 3, buf + n,
&c->status[1], n);
for (i = 0; i < n; i++) {
put_bits(&pb, 4, buf[i]);
if (avctx->channels == 2)
@@ -639,7 +638,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
flush_put_bits(&pb);
break;
}
case AV_CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_MS:
for (i = 0; i < avctx->channels; i++) {
int predictor = 0;
*dst++ = predictor;
@@ -683,7 +682,7 @@ static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
}
}
break;
case AV_CODEC_ID_ADPCM_YAMAHA:
case CODEC_ID_ADPCM_YAMAHA:
n = frame->nb_samples / 2;
if (avctx->trellis > 0) {
FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
@@ -718,9 +717,6 @@ error:
return AVERROR(ENOMEM);
}
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
};
#define ADPCM_ENCODER(id_, name_, long_name_) \
AVCodec ff_ ## name_ ## _encoder = { \
@@ -731,12 +727,13 @@ AVCodec ff_ ## name_ ## _encoder = { \
.init = adpcm_encode_init, \
.encode2 = adpcm_encode_frame, \
.close = adpcm_encode_close, \
.sample_fmts = sample_fmts, \
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, \
AV_SAMPLE_FMT_NONE }, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \
}
ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
ADPCM_ENCODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
ADPCM_ENCODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
ADPCM_ENCODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");
ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime");
ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft");
ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash");
ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");

View File

@@ -18,7 +18,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "adx.h"

View File

@@ -89,7 +89,7 @@ static int adx_parse(AVCodecParserContext *s1,
}
AVCodecParser ff_adx_parser = {
.codec_ids = { AV_CODEC_ID_ADPCM_ADX },
.codec_ids = { CODEC_ID_ADPCM_ADX },
.priv_data_size = sizeof(ADXParseContext),
.parser_parse = adx_parse,
.parser_close = ff_parse_close,

View File

@@ -176,7 +176,7 @@ static void adx_decode_flush(AVCodecContext *avctx)
AVCodec ff_adpcm_adx_decoder = {
.name = "adpcm_adx",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_ADPCM_ADX,
.id = CODEC_ID_ADPCM_ADX,
.priv_data_size = sizeof(ADXContext),
.init = adx_decode_init,
.decode = adx_decode_frame,

View File

@@ -164,7 +164,7 @@ static int adx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
AVCodec ff_adpcm_adx_encoder = {
.name = "adpcm_adx",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_ADPCM_ADX,
.id = CODEC_ID_ADPCM_ADX,
.priv_data_size = sizeof(ADXContext),
.init = adx_encode_init,
.encode2 = adx_encode_frame,

View File

@@ -37,7 +37,7 @@
* 8bit sample size
* 8bit history mult (40)
* 8bit initial history (14)
* 8bit rice param limit (10)
* 8bit kmodifier (10)
* 8bit channels
* 16bit maxRun (255)
* 32bit max coded frame size (0 means unknown)
@@ -45,7 +45,7 @@
* 32bit samplerate
*/
#include "libavutil/audioconvert.h"
#include "avcodec.h"
#include "get_bits.h"
#include "bytestream.h"
@@ -53,137 +53,128 @@
#include "mathops.h"
#define ALAC_EXTRADATA_SIZE 36
#define MAX_CHANNELS 8
#define MAX_CHANNELS 2
typedef struct {
AVCodecContext *avctx;
AVFrame frame;
GetBitContext gb;
int channels;
int32_t *predict_error_buffer[2];
int32_t *output_samples_buffer[2];
int32_t *extra_bits_buffer[2];
int numchannels;
uint32_t max_samples_per_frame;
uint8_t sample_size;
uint8_t rice_history_mult;
uint8_t rice_initial_history;
uint8_t rice_limit;
/* buffers */
int32_t *predicterror_buffer[MAX_CHANNELS];
int extra_bits; /**< number of extra bits beyond 16-bit */
int nb_samples; /**< number of samples in the current frame */
int32_t *outputsamples_buffer[MAX_CHANNELS];
int direct_output;
int32_t *extra_bits_buffer[MAX_CHANNELS];
/* stuff from setinfo */
uint32_t setinfo_max_samples_per_frame; /* 0x1000 = 4096 */ /* max samples per frame? */
uint8_t setinfo_sample_size; /* 0x10 */
uint8_t setinfo_rice_historymult; /* 0x28 */
uint8_t setinfo_rice_initialhistory; /* 0x0a */
uint8_t setinfo_rice_kmodifier; /* 0x0e */
/* end setinfo stuff */
int extra_bits; /**< number of extra bits beyond 16-bit */
} ALACContext;
enum RawDataBlockType {
/* At the moment, only SCE, CPE, LFE, and END are recognized. */
TYPE_SCE,
TYPE_CPE,
TYPE_CCE,
TYPE_LFE,
TYPE_DSE,
TYPE_PCE,
TYPE_FIL,
TYPE_END
};
static const uint8_t alac_channel_layout_offsets[8][8] = {
{ 0 },
{ 0, 1 },
{ 2, 0, 1 },
{ 2, 0, 1, 3 },
{ 2, 0, 1, 3, 4 },
{ 2, 0, 1, 4, 5, 3 },
{ 2, 0, 1, 4, 5, 6, 3 },
{ 2, 6, 7, 0, 1, 4, 5, 3 }
};
static const uint16_t alac_channel_layouts[8] = {
AV_CH_LAYOUT_MONO,
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_4POINT0,
AV_CH_LAYOUT_5POINT0_BACK,
AV_CH_LAYOUT_5POINT1_BACK,
AV_CH_LAYOUT_6POINT1_BACK,
AV_CH_LAYOUT_7POINT1_WIDE_BACK
};
static inline unsigned int decode_scalar(GetBitContext *gb, int k, int bps)
{
unsigned int x = get_unary_0_9(gb);
static inline int decode_scalar(GetBitContext *gb, int k, int limit, int readsamplesize){
/* read x - number of 1s before 0 represent the rice */
int x = get_unary_0_9(gb);
if (x > 8) { /* RICE THRESHOLD */
/* use alternative encoding */
x = get_bits_long(gb, bps);
} else if (k != 1) {
int extrabits = show_bits(gb, k);
x = get_bits(gb, readsamplesize);
} else {
if (k >= limit)
k = limit;
/* multiply x by 2^k - 1, as part of their strange algorithm */
x = (x << k) - x;
if (k != 1) {
int extrabits = show_bits(gb, k);
if (extrabits > 1) {
x += extrabits - 1;
skip_bits(gb, k);
} else
skip_bits(gb, k - 1);
/* multiply x by 2^k - 1, as part of their strange algorithm */
x = (x << k) - x;
if (extrabits > 1) {
x += extrabits - 1;
skip_bits(gb, k);
} else
skip_bits(gb, k - 1);
}
}
return x;
}
static int rice_decompress(ALACContext *alac, int32_t *output_buffer,
int nb_samples, int bps, int rice_history_mult)
static int bastardized_rice_decompress(ALACContext *alac,
int32_t *output_buffer,
int output_size,
int readsamplesize, /* arg_10 */
int rice_initialhistory, /* arg424->b */
int rice_kmodifier, /* arg424->d */
int rice_historymult, /* arg424->c */
int rice_kmodifier_mask /* arg424->e */
)
{
int i;
unsigned int history = alac->rice_initial_history;
int output_count;
unsigned int history = rice_initialhistory;
int sign_modifier = 0;
for (i = 0; i < nb_samples; i++) {
int k;
unsigned int x;
for (output_count = 0; output_count < output_size; output_count++) {
int32_t x;
int32_t x_modified;
int32_t final_val;
/* standard rice encoding */
int k; /* size of extra bits */
if(get_bits_left(&alac->gb) <= 0)
return -1;
/* calculate rice param and decode next value */
/* read k, that is bits as is */
k = av_log2((history >> 9) + 3);
k = FFMIN(k, alac->rice_limit);
x = decode_scalar(&alac->gb, k, bps);
x += sign_modifier;
sign_modifier = 0;
output_buffer[i] = (x >> 1) ^ -(x & 1);
x= decode_scalar(&alac->gb, k, rice_kmodifier, readsamplesize);
/* update the history */
if (x > 0xffff)
x_modified = sign_modifier + x;
final_val = (x_modified + 1) / 2;
if (x_modified & 1) final_val *= -1;
output_buffer[output_count] = final_val;
sign_modifier = 0;
/* now update the history */
history += x_modified * rice_historymult
- ((history * rice_historymult) >> 9);
if (x_modified > 0xffff)
history = 0xffff;
else
history += x * rice_history_mult -
((history * rice_history_mult) >> 9);
/* special case: there may be compressed blocks of 0 */
if ((history < 128) && (i + 1 < nb_samples)) {
int block_size;
if ((history < 128) && (output_count+1 < output_size)) {
int k;
unsigned int block_size;
/* calculate rice param and decode block size */
k = 7 - av_log2(history) + ((history + 16) >> 6);
k = FFMIN(k, alac->rice_limit);
block_size = decode_scalar(&alac->gb, k, 16);
sign_modifier = 1;
k = 7 - av_log2(history) + ((history + 16) >> 6 /* / 64 */);
block_size= decode_scalar(&alac->gb, k, rice_kmodifier, 16);
if (block_size > 0) {
if (block_size >= nb_samples - i) {
av_log(alac->avctx, AV_LOG_ERROR,
"invalid zero block size of %d %d %d\n", block_size,
nb_samples, i);
block_size = nb_samples - i - 1;
if(block_size >= output_size - output_count){
av_log(alac->avctx, AV_LOG_ERROR, "invalid zero block size of %d %d %d\n", block_size, output_size, output_count);
block_size= output_size - output_count - 1;
}
memset(&output_buffer[i + 1], 0,
block_size * sizeof(*output_buffer));
i += block_size;
memset(&output_buffer[output_count+1], 0, block_size * 4);
output_count += block_size;
}
if (block_size <= 0xffff)
sign_modifier = 1;
if (block_size > 0xffff)
sign_modifier = 0;
history = 0;
}
}
@@ -195,81 +186,131 @@ static inline int sign_only(int v)
return v ? FFSIGN(v) : 0;
}
static void lpc_prediction(int32_t *error_buffer, int32_t *buffer_out,
int nb_samples, int bps, int16_t *lpc_coefs,
int lpc_order, int lpc_quant)
static void predictor_decompress_fir_adapt(int32_t *error_buffer,
int32_t *buffer_out,
int output_size,
int readsamplesize,
int16_t *predictor_coef_table,
int predictor_coef_num,
int predictor_quantitization)
{
int i;
int32_t *pred = buffer_out;
/* first sample always copies */
*buffer_out = *error_buffer;
if (nb_samples <= 1)
return;
if (!predictor_coef_num) {
if (output_size <= 1)
return;
if (!lpc_order) {
memcpy(&buffer_out[1], &error_buffer[1],
(nb_samples - 1) * sizeof(*buffer_out));
memcpy(buffer_out+1, error_buffer+1, (output_size-1) * 4);
return;
}
if (lpc_order == 31) {
/* simple 1st-order prediction */
for (i = 1; i < nb_samples; i++) {
buffer_out[i] = sign_extend(buffer_out[i - 1] + error_buffer[i],
bps);
if (predictor_coef_num == 0x1f) { /* 11111 - max value of predictor_coef_num */
/* second-best case scenario for fir decompression,
* error describes a small difference from the previous sample only
*/
if (output_size <= 1)
return;
for (i = 0; i < output_size - 1; i++) {
int32_t prev_value;
int32_t error_value;
prev_value = buffer_out[i];
error_value = error_buffer[i+1];
buffer_out[i+1] =
sign_extend((prev_value + error_value), readsamplesize);
}
return;
}
/* read warm-up samples */
for (i = 1; i <= lpc_order; i++)
buffer_out[i] = sign_extend(buffer_out[i - 1] + error_buffer[i], bps);
if (predictor_coef_num > 0)
for (i = 0; i < predictor_coef_num; i++) {
int32_t val;
/* NOTE: 4 and 8 are very common cases that could be optimized. */
val = buffer_out[i] + error_buffer[i+1];
val = sign_extend(val, readsamplesize);
buffer_out[i+1] = val;
}
for (; i < nb_samples; i++) {
int j;
int val = 0;
int error_val = error_buffer[i];
int error_sign;
int d = *pred++;
/* 4 and 8 are very common cases (the only ones i've seen). these
* should be unrolled and optimized
*/
/* LPC prediction */
for (j = 0; j < lpc_order; j++)
val += (pred[j] - d) * lpc_coefs[j];
val = (val + (1 << (lpc_quant - 1))) >> lpc_quant;
val += d + error_val;
buffer_out[i] = sign_extend(val, bps);
/* general case */
if (predictor_coef_num > 0) {
for (i = predictor_coef_num + 1; i < output_size; i++) {
int j;
int sum = 0;
int outval;
int error_val = error_buffer[i];
/* adapt LPC coefficients */
error_sign = sign_only(error_val);
if (error_sign) {
for (j = 0; j < lpc_order && error_val * error_sign > 0; j++) {
int sign;
val = d - pred[j];
sign = sign_only(val) * error_sign;
lpc_coefs[j] -= sign;
val *= sign;
error_val -= (val >> lpc_quant) * (j + 1);
for (j = 0; j < predictor_coef_num; j++) {
sum += (buffer_out[predictor_coef_num-j] - buffer_out[0]) *
predictor_coef_table[j];
}
outval = (1 << (predictor_quantitization-1)) + sum;
outval = outval >> predictor_quantitization;
outval = outval + buffer_out[0] + error_val;
outval = sign_extend(outval, readsamplesize);
buffer_out[predictor_coef_num+1] = outval;
if (error_val > 0) {
int predictor_num = predictor_coef_num - 1;
while (predictor_num >= 0 && error_val > 0) {
int val = buffer_out[0] - buffer_out[predictor_coef_num - predictor_num];
int sign = sign_only(val);
predictor_coef_table[predictor_num] -= sign;
val *= sign; /* absolute value */
error_val -= ((val >> predictor_quantitization) *
(predictor_coef_num - predictor_num));
predictor_num--;
}
} else if (error_val < 0) {
int predictor_num = predictor_coef_num - 1;
while (predictor_num >= 0 && error_val < 0) {
int val = buffer_out[0] - buffer_out[predictor_coef_num - predictor_num];
int sign = - sign_only(val);
predictor_coef_table[predictor_num] -= sign;
val *= sign; /* neg value */
error_val -= ((val >> predictor_quantitization) *
(predictor_coef_num - predictor_num));
predictor_num--;
}
}
buffer_out++;
}
}
}
static void decorrelate_stereo(int32_t *buffer[2], int nb_samples,
int decorr_shift, int decorr_left_weight)
static void decorrelate_stereo(int32_t *buffer[MAX_CHANNELS],
int numsamples, uint8_t interlacing_shift,
uint8_t interlacing_leftweight)
{
int i;
for (i = 0; i < nb_samples; i++) {
for (i = 0; i < numsamples; i++) {
int32_t a, b;
a = buffer[0][i];
b = buffer[1][i];
a -= (b * decorr_left_weight) >> decorr_shift;
a -= (b * interlacing_leftweight) >> interlacing_shift;
b += a;
buffer[0][i] = b;
@@ -277,90 +318,141 @@ static void decorrelate_stereo(int32_t *buffer[2], int nb_samples,
}
}
static void append_extra_bits(int32_t *buffer[2], int32_t *extra_bits_buffer[2],
int extra_bits, int channels, int nb_samples)
static void append_extra_bits(int32_t *buffer[MAX_CHANNELS],
int32_t *extra_bits_buffer[MAX_CHANNELS],
int extra_bits, int numchannels, int numsamples)
{
int i, ch;
for (ch = 0; ch < channels; ch++)
for (i = 0; i < nb_samples; i++)
for (ch = 0; ch < numchannels; ch++)
for (i = 0; i < numsamples; i++)
buffer[ch][i] = (buffer[ch][i] << extra_bits) | extra_bits_buffer[ch][i];
}
static int decode_element(AVCodecContext *avctx, void *data, int ch_index,
int channels)
static void interleave_stereo_16(int32_t *buffer[MAX_CHANNELS],
int16_t *buffer_out, int numsamples)
{
int i;
for (i = 0; i < numsamples; i++) {
*buffer_out++ = buffer[0][i];
*buffer_out++ = buffer[1][i];
}
}
static void interleave_stereo_24(int32_t *buffer[MAX_CHANNELS],
int32_t *buffer_out, int numsamples)
{
int i;
for (i = 0; i < numsamples; i++) {
*buffer_out++ = buffer[0][i] << 8;
*buffer_out++ = buffer[1][i] << 8;
}
}
static void interleave_stereo_32(int32_t *buffer[MAX_CHANNELS],
int32_t *buffer_out, int numsamples)
{
int i;
for (i = 0; i < numsamples; i++) {
*buffer_out++ = buffer[0][i];
*buffer_out++ = buffer[1][i];
}
}
static int alac_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *inbuffer = avpkt->data;
int input_buffer_size = avpkt->size;
ALACContext *alac = avctx->priv_data;
int has_size, bps, is_compressed, decorr_shift, decorr_left_weight, ret;
uint32_t output_samples;
int i, ch;
skip_bits(&alac->gb, 4); /* element instance tag */
skip_bits(&alac->gb, 12); /* unused header bits */
int channels;
unsigned int outputsamples;
int hassize;
unsigned int readsamplesize;
int isnotcompressed;
uint8_t interlacing_shift;
uint8_t interlacing_leftweight;
int i, ch, ret;
/* the number of output samples is stored in the frame */
has_size = get_bits1(&alac->gb);
init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8);
channels = get_bits(&alac->gb, 3) + 1;
if (channels != avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "frame header channel count mismatch\n");
return AVERROR_INVALIDDATA;
}
/* 2^result = something to do with output waiting.
* perhaps matters if we read > 1 frame in a pass?
*/
skip_bits(&alac->gb, 4);
skip_bits(&alac->gb, 12); /* unknown, skip 12 bits */
/* the output sample size is stored soon */
hassize = get_bits1(&alac->gb);
alac->extra_bits = get_bits(&alac->gb, 2) << 3;
bps = alac->sample_size - alac->extra_bits + channels - 1;
if (bps > 32) {
av_log(avctx, AV_LOG_ERROR, "bps is unsupported: %d\n", bps);
return AVERROR_PATCHWELCOME;
}
/* whether the frame is compressed */
is_compressed = !get_bits1(&alac->gb);
isnotcompressed = get_bits1(&alac->gb);
if (has_size)
output_samples = get_bits_long(&alac->gb, 32);
else
output_samples = alac->max_samples_per_frame;
if (!output_samples || output_samples > alac->max_samples_per_frame) {
av_log(avctx, AV_LOG_ERROR, "invalid samples per frame: %d\n",
output_samples);
return AVERROR_INVALIDDATA;
}
if (!alac->nb_samples) {
/* get output buffer */
alac->frame.nb_samples = output_samples;
if ((ret = avctx->get_buffer(avctx, &alac->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
if (hassize) {
/* now read the number of samples as a 32bit integer */
outputsamples = get_bits_long(&alac->gb, 32);
if(outputsamples > alac->setinfo_max_samples_per_frame){
av_log(avctx, AV_LOG_ERROR, "outputsamples %d > %d\n", outputsamples, alac->setinfo_max_samples_per_frame);
return -1;
}
} else if (output_samples != alac->nb_samples) {
av_log(avctx, AV_LOG_ERROR, "sample count mismatch: %u != %d\n",
output_samples, alac->nb_samples);
} else
outputsamples = alac->setinfo_max_samples_per_frame;
/* get output buffer */
if (outputsamples > INT32_MAX) {
av_log(avctx, AV_LOG_ERROR, "unsupported block size: %u\n", outputsamples);
return AVERROR_INVALIDDATA;
}
alac->nb_samples = output_samples;
if (alac->direct_output) {
for (ch = 0; ch < channels; ch++)
alac->output_samples_buffer[ch] = (int32_t *)alac->frame.extended_data[ch_index + ch];
alac->frame.nb_samples = outputsamples;
if ((ret = avctx->get_buffer(avctx, &alac->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (is_compressed) {
int16_t lpc_coefs[2][32];
int lpc_order[2];
int prediction_type[2];
int lpc_quant[2];
int rice_history_mult[2];
readsamplesize = alac->setinfo_sample_size - alac->extra_bits + channels - 1;
if (readsamplesize > MIN_CACHE_BITS) {
av_log(avctx, AV_LOG_ERROR, "readsamplesize too big (%d)\n", readsamplesize);
return -1;
}
decorr_shift = get_bits(&alac->gb, 8);
decorr_left_weight = get_bits(&alac->gb, 8);
if (!isnotcompressed) {
/* so it is compressed */
int16_t predictor_coef_table[MAX_CHANNELS][32];
int predictor_coef_num[MAX_CHANNELS];
int prediction_type[MAX_CHANNELS];
int prediction_quantitization[MAX_CHANNELS];
int ricemodifier[MAX_CHANNELS];
interlacing_shift = get_bits(&alac->gb, 8);
interlacing_leftweight = get_bits(&alac->gb, 8);
for (ch = 0; ch < channels; ch++) {
prediction_type[ch] = get_bits(&alac->gb, 4);
lpc_quant[ch] = get_bits(&alac->gb, 4);
rice_history_mult[ch] = get_bits(&alac->gb, 3);
lpc_order[ch] = get_bits(&alac->gb, 5);
prediction_type[ch] = get_bits(&alac->gb, 4);
prediction_quantitization[ch] = get_bits(&alac->gb, 4);
ricemodifier[ch] = get_bits(&alac->gb, 3);
predictor_coef_num[ch] = get_bits(&alac->gb, 5);
/* read the predictor table */
for (i = lpc_order[ch] - 1; i >= 0; i--)
lpc_coefs[ch][i] = get_sbits(&alac->gb, 16);
for (i = 0; i < predictor_coef_num[ch]; i++)
predictor_coef_table[ch][i] = (int16_t)get_bits(&alac->gb, 16);
}
if (alac->extra_bits) {
for (i = 0; i < alac->nb_samples; i++) {
for (i = 0; i < outputsamples; i++) {
if(get_bits_left(&alac->gb) <= 0)
return -1;
for (ch = 0; ch < channels; ch++)
@@ -368,9 +460,14 @@ static int decode_element(AVCodecContext *avctx, void *data, int ch_index,
}
}
for (ch = 0; ch < channels; ch++) {
int ret=rice_decompress(alac, alac->predict_error_buffer[ch],
alac->nb_samples, bps,
rice_history_mult[ch] * alac->rice_history_mult / 4);
int ret = bastardized_rice_decompress(alac,
alac->predicterror_buffer[ch],
outputsamples,
readsamplesize,
alac->setinfo_rice_initialhistory,
alac->setinfo_rice_kmodifier,
ricemodifier[ch] * alac->setinfo_rice_historymult / 4,
(1 << alac->setinfo_rice_kmodifier) - 1);
if(ret<0)
return ret;
@@ -383,145 +480,89 @@ static int decode_element(AVCodecContext *avctx, void *data, int ch_index,
* However, this prediction type is not currently used by the
* reference encoder.
*/
lpc_prediction(alac->predict_error_buffer[ch],
alac->predict_error_buffer[ch],
alac->nb_samples, bps, NULL, 31, 0);
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
alac->predicterror_buffer[ch],
outputsamples, readsamplesize,
NULL, 31, 0);
} else if (prediction_type[ch] > 0) {
av_log(avctx, AV_LOG_WARNING, "unknown prediction type: %i\n",
prediction_type[ch]);
}
lpc_prediction(alac->predict_error_buffer[ch],
alac->output_samples_buffer[ch], alac->nb_samples,
bps, lpc_coefs[ch], lpc_order[ch], lpc_quant[ch]);
predictor_decompress_fir_adapt(alac->predicterror_buffer[ch],
alac->outputsamples_buffer[ch],
outputsamples, readsamplesize,
predictor_coef_table[ch],
predictor_coef_num[ch],
prediction_quantitization[ch]);
}
} else {
/* not compressed, easy case */
for (i = 0; i < alac->nb_samples; i++) {
for (i = 0; i < outputsamples; i++) {
if(get_bits_left(&alac->gb) <= 0)
return -1;
for (ch = 0; ch < channels; ch++) {
alac->output_samples_buffer[ch][i] =
get_sbits_long(&alac->gb, alac->sample_size);
alac->outputsamples_buffer[ch][i] = get_sbits_long(&alac->gb,
alac->setinfo_sample_size);
}
}
alac->extra_bits = 0;
decorr_shift = 0;
decorr_left_weight = 0;
alac->extra_bits = 0;
interlacing_shift = 0;
interlacing_leftweight = 0;
}
if (get_bits(&alac->gb, 3) != 7)
av_log(avctx, AV_LOG_ERROR, "Error : Wrong End Of Frame\n");
if (channels == 2 && decorr_left_weight) {
decorrelate_stereo(alac->output_samples_buffer, alac->nb_samples,
decorr_shift, decorr_left_weight);
if (channels == 2 && interlacing_leftweight) {
decorrelate_stereo(alac->outputsamples_buffer, outputsamples,
interlacing_shift, interlacing_leftweight);
}
if (alac->extra_bits) {
append_extra_bits(alac->output_samples_buffer, alac->extra_bits_buffer,
alac->extra_bits, channels, alac->nb_samples);
append_extra_bits(alac->outputsamples_buffer, alac->extra_bits_buffer,
alac->extra_bits, alac->numchannels, outputsamples);
}
if(av_sample_fmt_is_planar(avctx->sample_fmt)) {
switch(alac->sample_size) {
case 16: {
for (ch = 0; ch < channels; ch++) {
int16_t *outbuffer = (int16_t *)alac->frame.extended_data[ch_index + ch];
for (i = 0; i < alac->nb_samples; i++)
*outbuffer++ = alac->output_samples_buffer[ch][i];
}}
switch(alac->setinfo_sample_size) {
case 16:
if (channels == 2) {
interleave_stereo_16(alac->outputsamples_buffer,
(int16_t *)alac->frame.data[0], outputsamples);
} else {
int16_t *outbuffer = (int16_t *)alac->frame.data[0];
for (i = 0; i < outputsamples; i++) {
outbuffer[i] = alac->outputsamples_buffer[0][i];
}
}
break;
case 24: {
for (ch = 0; ch < channels; ch++) {
for (i = 0; i < alac->nb_samples; i++)
alac->output_samples_buffer[ch][i] <<= 8;
}}
case 24:
if (channels == 2) {
interleave_stereo_24(alac->outputsamples_buffer,
(int32_t *)alac->frame.data[0], outputsamples);
} else {
int32_t *outbuffer = (int32_t *)alac->frame.data[0];
for (i = 0; i < outputsamples; i++)
outbuffer[i] = alac->outputsamples_buffer[0][i] << 8;
}
break;
case 32:
if (channels == 2) {
interleave_stereo_32(alac->outputsamples_buffer,
(int32_t *)alac->frame.data[0], outputsamples);
} else {
int32_t *outbuffer = (int32_t *)alac->frame.data[0];
for (i = 0; i < outputsamples; i++)
outbuffer[i] = alac->outputsamples_buffer[0][i];
}
break;
}
}else{
switch(alac->sample_size) {
case 16: {
int16_t *outbuffer = ((int16_t *)alac->frame.extended_data[0]) + ch_index;
for (i = 0; i < alac->nb_samples; i++) {
for (ch = 0; ch < channels; ch++)
*outbuffer++ = alac->output_samples_buffer[ch][i];
outbuffer += alac->channels - channels;
}
}
break;
case 24: {
int32_t *outbuffer = ((int32_t *)alac->frame.extended_data[0]) + ch_index;
for (i = 0; i < alac->nb_samples; i++) {
for (ch = 0; ch < channels; ch++)
*outbuffer++ = alac->output_samples_buffer[ch][i] << 8;
outbuffer += alac->channels - channels;
}
}
break;
case 32: {
int32_t *outbuffer = ((int32_t *)alac->frame.extended_data[0]) + ch_index;
for (i = 0; i < alac->nb_samples; i++) {
for (ch = 0; ch < channels; ch++)
*outbuffer++ = alac->output_samples_buffer[ch][i];
outbuffer += alac->channels - channels;
}
}
break;
}
}
return 0;
}
static int alac_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
ALACContext *alac = avctx->priv_data;
enum RawDataBlockType element;
int channels;
int ch, ret, got_end;
init_get_bits(&alac->gb, avpkt->data, avpkt->size * 8);
got_end = 0;
alac->nb_samples = 0;
ch = 0;
while (get_bits_left(&alac->gb) >= 3) {
element = get_bits(&alac->gb, 3);
if (element == TYPE_END) {
got_end = 1;
break;
}
if (element > TYPE_CPE && element != TYPE_LFE) {
av_log(avctx, AV_LOG_ERROR, "syntax element unsupported: %d\n", element);
return AVERROR_PATCHWELCOME;
}
channels = (element == TYPE_CPE) ? 2 : 1;
if (ch + channels > alac->channels) {
av_log(avctx, AV_LOG_ERROR, "invalid element channel count\n");
return AVERROR_INVALIDDATA;
}
ret = decode_element(avctx, data,
alac_channel_layout_offsets[alac->channels - 1][ch],
channels);
if (ret < 0 && get_bits_left(&alac->gb))
return ret;
ch += channels;
}
if (!got_end) {
av_log(avctx, AV_LOG_ERROR, "no end tag found. incomplete packet.\n");
return AVERROR_INVALIDDATA;
}
if (avpkt->size * 8 - get_bits_count(&alac->gb) > 8) {
av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n",
avpkt->size * 8 - get_bits_count(&alac->gb));
}
if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8)
av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb));
*got_frame_ptr = 1;
*(AVFrame *)data = alac->frame;
return avpkt->size;
return input_buffer_size;
}
static av_cold int alac_decode_close(AVCodecContext *avctx)
@@ -529,10 +570,9 @@ static av_cold int alac_decode_close(AVCodecContext *avctx)
ALACContext *alac = avctx->priv_data;
int ch;
for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) {
av_freep(&alac->predict_error_buffer[ch]);
if (!alac->direct_output)
av_freep(&alac->output_samples_buffer[ch]);
for (ch = 0; ch < alac->numchannels; ch++) {
av_freep(&alac->predicterror_buffer[ch]);
av_freep(&alac->outputsamples_buffer[ch]);
av_freep(&alac->extra_bits_buffer[ch]);
}
@@ -542,17 +582,14 @@ static av_cold int alac_decode_close(AVCodecContext *avctx)
static int allocate_buffers(ALACContext *alac)
{
int ch;
int buf_size = alac->max_samples_per_frame * sizeof(int32_t);
for (ch = 0; ch < alac->numchannels; ch++) {
int buf_size = alac->setinfo_max_samples_per_frame * sizeof(int32_t);
for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) {
FF_ALLOC_OR_GOTO(alac->avctx, alac->predict_error_buffer[ch],
FF_ALLOC_OR_GOTO(alac->avctx, alac->predicterror_buffer[ch],
buf_size, buf_alloc_fail);
alac->direct_output = alac->sample_size > 16 && av_sample_fmt_is_planar(alac->avctx->sample_fmt);
if (!alac->direct_output) {
FF_ALLOC_OR_GOTO(alac->avctx, alac->output_samples_buffer[ch],
buf_size, buf_alloc_fail);
}
FF_ALLOC_OR_GOTO(alac->avctx, alac->outputsamples_buffer[ch],
buf_size, buf_alloc_fail);
FF_ALLOC_OR_GOTO(alac->avctx, alac->extra_bits_buffer[ch],
buf_size, buf_alloc_fail);
@@ -572,18 +609,19 @@ static int alac_set_info(ALACContext *alac)
bytestream2_skipu(&gb, 12); // size:4, alac:4, version:4
alac->max_samples_per_frame = bytestream2_get_be32u(&gb);
if (!alac->max_samples_per_frame || alac->max_samples_per_frame > INT_MAX) {
av_log(alac->avctx, AV_LOG_ERROR, "max samples per frame invalid: %u\n",
alac->max_samples_per_frame);
/* buffer size / 2 ? */
alac->setinfo_max_samples_per_frame = bytestream2_get_be32u(&gb);
if (alac->setinfo_max_samples_per_frame >= UINT_MAX/4){
av_log(alac->avctx, AV_LOG_ERROR,
"setinfo_max_samples_per_frame too large\n");
return AVERROR_INVALIDDATA;
}
bytestream2_skipu(&gb, 1); // compatible version
alac->sample_size = bytestream2_get_byteu(&gb);
alac->rice_history_mult = bytestream2_get_byteu(&gb);
alac->rice_initial_history = bytestream2_get_byteu(&gb);
alac->rice_limit = bytestream2_get_byteu(&gb);
alac->channels = bytestream2_get_byteu(&gb);
alac->setinfo_sample_size = bytestream2_get_byteu(&gb);
alac->setinfo_rice_historymult = bytestream2_get_byteu(&gb);
alac->setinfo_rice_initialhistory = bytestream2_get_byteu(&gb);
alac->setinfo_rice_kmodifier = bytestream2_get_byteu(&gb);
alac->numchannels = bytestream2_get_byteu(&gb);
bytestream2_get_be16u(&gb); // maxRun
bytestream2_get_be32u(&gb); // max coded frame size
bytestream2_get_be32u(&gb); // average bitrate
@@ -595,48 +633,45 @@ static int alac_set_info(ALACContext *alac)
static av_cold int alac_decode_init(AVCodecContext * avctx)
{
int ret;
int req_packed;
ALACContext *alac = avctx->priv_data;
alac->avctx = avctx;
/* initialize from the extradata */
if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) {
av_log(avctx, AV_LOG_ERROR, "expected %d extradata bytes\n",
av_log(avctx, AV_LOG_ERROR, "alac: expected %d extradata bytes\n",
ALAC_EXTRADATA_SIZE);
return -1;
}
if (alac_set_info(alac)) {
av_log(avctx, AV_LOG_ERROR, "set_info failed\n");
av_log(avctx, AV_LOG_ERROR, "alac: set_info failed\n");
return -1;
}
req_packed = LIBAVCODEC_VERSION_MAJOR < 55 && !av_sample_fmt_is_planar(avctx->request_sample_fmt);
switch (alac->sample_size) {
case 16: avctx->sample_fmt = req_packed ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_S16P;
switch (alac->setinfo_sample_size) {
case 16: avctx->sample_fmt = AV_SAMPLE_FMT_S16;
break;
case 24:
case 32: avctx->sample_fmt = req_packed ? AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S32P;
case 32:
case 24: avctx->sample_fmt = AV_SAMPLE_FMT_S32;
break;
default: av_log_ask_for_sample(avctx, "Sample depth %d is not supported.\n",
alac->sample_size);
alac->setinfo_sample_size);
return AVERROR_PATCHWELCOME;
}
if (alac->channels < 1) {
if (alac->numchannels < 1) {
av_log(avctx, AV_LOG_WARNING, "Invalid channel count\n");
alac->channels = avctx->channels;
alac->numchannels = avctx->channels;
} else {
if (alac->channels > MAX_CHANNELS)
alac->channels = avctx->channels;
if (alac->numchannels > MAX_CHANNELS)
alac->numchannels = avctx->channels;
else
avctx->channels = alac->channels;
avctx->channels = alac->numchannels;
}
if (avctx->channels > MAX_CHANNELS) {
av_log(avctx, AV_LOG_ERROR, "Unsupported channel count: %d\n",
avctx->channels);
return AVERROR_PATCHWELCOME;
}
avctx->channel_layout = alac_channel_layouts[alac->channels - 1];
if ((ret = allocate_buffers(alac)) < 0) {
av_log(avctx, AV_LOG_ERROR, "Error allocating buffers\n");
@@ -652,7 +687,7 @@ static av_cold int alac_decode_init(AVCodecContext * avctx)
AVCodec ff_alac_decoder = {
.name = "alac",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_ALAC,
.id = CODEC_ID_ALAC,
.priv_data_size = sizeof(ALACContext),
.init = alac_decode_init,
.close = alac_decode_close,

View File

@@ -78,15 +78,17 @@ typedef struct AlacEncodeContext {
} AlacEncodeContext;
static void init_sample_buffers(AlacEncodeContext *s, int16_t **input_samples)
static void init_sample_buffers(AlacEncodeContext *s,
const int16_t *input_samples)
{
int ch, i;
for (ch = 0; ch < s->avctx->channels; ch++) {
int32_t *bptr = s->sample_buf[ch];
const int16_t *sptr = input_samples[ch];
for (i = 0; i < s->frame_size; i++)
bptr[i] = sptr[i];
const int16_t *sptr = input_samples + ch;
for (i = 0; i < s->frame_size; i++) {
s->sample_buf[ch][i] = *sptr;
sptr += s->avctx->channels;
}
}
}
@@ -345,7 +347,8 @@ static void alac_entropy_coder(AlacEncodeContext *s)
}
}
static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, int16_t **samples)
static int write_frame(AlacEncodeContext *s, AVPacket *avpkt,
const int16_t *samples)
{
int i, j;
int prediction_type = 0;
@@ -355,10 +358,8 @@ static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, int16_t **samples)
if (s->verbatim) {
write_frame_header(s);
/* samples are channel-interleaved in verbatim mode */
for (i = 0; i < s->frame_size; i++)
for (j = 0; j < s->avctx->channels; j++)
put_sbits(pb, 16, samples[j][i]);
for (i = 0; i < s->frame_size * s->avctx->channels; i++)
put_sbits(pb, 16, *samples++);
} else {
init_sample_buffers(s, samples);
write_frame_header(s);
@@ -425,6 +426,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
avctx->frame_size = s->frame_size = DEFAULT_FRAME_SIZE;
if (avctx->sample_fmt != AV_SAMPLE_FMT_S16) {
av_log(avctx, AV_LOG_ERROR, "only pcm_s16 input samples are supported\n");
return -1;
}
/* TODO: Correctly implement multi-channel ALAC.
It is similar to multi-channel AAC, in that it has a series of
single-channel (SCE), channel-pair (CPE), and LFE elements. */
@@ -536,11 +542,11 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
{
AlacEncodeContext *s = avctx->priv_data;
int out_bytes, max_frame_size, ret;
int16_t **samples = (int16_t **)frame->extended_data;
const int16_t *samples = (const int16_t *)frame->data[0];
s->frame_size = frame->nb_samples;
if (frame->nb_samples < DEFAULT_FRAME_SIZE)
if (avctx->frame_size < DEFAULT_FRAME_SIZE)
max_frame_size = get_max_frame_size(s->frame_size, avctx->channels,
DEFAULT_SAMPLE_SIZE);
else
@@ -568,13 +574,13 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
AVCodec ff_alac_encoder = {
.name = "alac",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_ALAC,
.id = CODEC_ID_ALAC,
.priv_data_size = sizeof(AlacEncodeContext),
.init = alac_encode_init,
.encode2 = alac_encode_frame,
.close = alac_encode_close,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16P,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
};

Some files were not shown because too many files have changed in this diff Show More