Compare commits

..

1 Commits

Author SHA1 Message Date
Michael Niedermayer
9f8d8c57fb update for 0.9
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2011-12-11 21:24:41 +01:00
2174 changed files with 108796 additions and 159445 deletions

29
.gitignore vendored
View File

@@ -1,17 +1,9 @@
.config
.version
*.a
*.o
*.d
*.def
*.dll
*.exe
*.ho
*.lib
*.pc
*.so
*.so.*
*.ver
*-example
*-test
*_g
@@ -23,26 +15,26 @@ config.*
doc/*.1
doc/*.html
doc/*.pod
doc/fate.txt
doxy
ffmpeg
ffplay
ffprobe
ffserver
avconv
doc/avoptions_codec.texi
doc/avoptions_format.texi
doc/print_options
doc/examples/decoding_encoding
doc/examples/filtering_audio
doc/examples/filtering_video
doc/examples/metadata
doc/examples/muxing
libavcodec/*_tablegen
libavcodec/*_tables.c
libavcodec/*_tables.h
libavcodec/codec_names.h
libavcodec/libavcodec*
libavcore/libavcore*
libavdevice/libavdevice*
libavfilter/libavfilter*
libavformat/libavformat*
libavutil/avconfig.h
libavutil/libavutil*
libpostproc/libpostproc*
libswresample/libswresample*
libswscale/libswscale*
tests/audiogen
tests/base64
tests/data
@@ -51,11 +43,8 @@ tests/tiny_psnr
tests/videogen
tests/vsynth1
tests/vsynth2
tools/aviocat
tools/cws2fws
tools/ffeval
tools/graph2dot
tools/ismindex
tools/lavfi-showfiltfmts
tools/pktdumper
tools/probetest

View File

@@ -500,3 +500,5 @@ necessary. Here is a sample; alter the names:
Ty Coon, President of Vice
That's all there is to it!

View File

@@ -1,89 +1,6 @@
Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
version next:
version 0.11:
Fixes:CVE-2012-2772, CVE-2012-2774, CVE-2012-2775, CVE-2012-2776, CVE-2012-2777,
CVE-2012-2779, CVE-2012-2782, CVE-2012-2783, CVE-2012-2784, CVE-2012-2785,
CVE-2012-2786, CVE-2012-2787, CVE-2012-2788, CVE-2012-2789, CVE-2012-2790,
CVE-2012-2791, CVE-2012-2792, CVE-2012-2793, CVE-2012-2794, CVE-2012-2795,
CVE-2012-2796, CVE-2012-2797, CVE-2012-2798, CVE-2012-2799, CVE-2012-2800,
CVE-2012-2801, CVE-2012-2802, CVE-2012-2803, CVE-2012-2804,
- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
- setfield filter
- CDXL demuxer and decoder
- Apple ProRes encoder
- ffprobe -count_packets and -count_frames options
- Sun Rasterfile Encoder
- ID3v2 attached pictures reading and writing
- WMA Lossless decoder
- bluray protocol
- blackdetect filter
- libutvideo encoder wrapper (--enable-libutvideo)
- swapuv filter
- bbox filter
- XBM encoder and decoder
- RealAudio Lossless decoder
- ZeroCodec decoder
- tile video filter
- Metal Gear Solid: The Twin Snakes demuxer
- OpenEXR image decoder
- removelogo filter
- drop support for ffmpeg without libavfilter
- drawtext video filter: fontconfig support
- ffmpeg -benchmark_all option
- super2xsai filter ported from libmpcodecs
- add libavresample audio conversion library for compatibility
- MicroDVD decoder
- Avid Meridien (AVUI) encoder and decoder
- accept + prefix to -pix_fmt option to disable automatic conversions.
- complete audio filtering in libavfilter and ffmpeg
- add fps filter
- audio split filter
- vorbis parser
- png parser
- audio mix filter
version 0.10:
- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
CVE-2011-3945, CVE-2011-3946, CVE-2011-3947, CVE-2011-3949,
CVE-2011-3950, CVE-2011-3951, CVE-2011-3952
- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
- SBaGen (SBG) binaural beats script demuxer
- OpenMG Audio muxer
- Timecode extraction in DV and MOV
- thumbnail video filter
- XML output in ffprobe
- asplit audio filter
- tinterlace video filter
- astreamsync audio filter
- amerge audio filter
- ISMV (Smooth Streaming) muxer
- GSM audio parser
- SMJPEG muxer
- XWD encoder and decoder
- Automatic thread count based on detection number of (available) CPU cores
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
- ffprobe -show_error option
- Avid 1:1 10-bit RGB Packer codec
- v308 Quicktime Uncompressed 4:4:4 encoder and decoder
- yuv4 libquicktime packed 4:2:0 encoder and decoder
- ffprobe -show_frames option
- silencedetect audio filter
- ffprobe -show_program_version, -show_library_versions, -show_versions options
- rv34: frame-level multi-threading
- optimized iMDCT transform on x86 using SSE for for mpegaudiodec
- Improved PGS subtitle decoder
- dumpgraph option to lavfi device
- r210 and r10k encoders
- ffwavesynth decoder
- aviocat tool
- ffeval tool
version 0.9:
- openal input device added
@@ -208,7 +125,7 @@ easier to use. The changes are:
- pan audio filter
- IFF Amiga Continuous Bitmap (ACBM) decoder
- ass filter
- CRI ADX audio format muxer and demuxer
- CRI ADX audio format demuxer
- Playstation Portable PMP format demuxer
- Microsoft Windows ICO demuxer
- life source
@@ -217,13 +134,11 @@ easier to use. The changes are:
- new option: -report
- Dxtory capture format decoder
- cellauto source
- Simple segmenting muxer
- Indeo 4 decoder
- SMJPEG demuxer
version 0.8:
- many many things we forgot because we rather write code than changelogs
- WebM support in Matroska de/muxer
- low overhead Ogg muxing
@@ -787,7 +702,7 @@ version 0.4.5:
- MPEG-4 vol header fixes (Jonathan Marsden <snmjbm at pacbell.net>)
- ARM optimizations (Lionel Ulmer <lionel.ulmer at free.fr>).
- Windows porting of file converter
- added MJPEG raw format (input/output)
- added MJPEG raw format (input/ouput)
- added JPEG image format support (input/output)

View File

@@ -31,13 +31,7 @@ PROJECT_NAME = FFmpeg
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER = 0.11.1
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
# pixels and the maximum width should not exceed 200 pixels. Doxygen will
# copy the logo to the output directory.
PROJECT_LOGO =
PROJECT_NUMBER = 0.9
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put.
@@ -631,7 +625,8 @@ EXCLUDE_SYMLINKS = NO
# for example use the pattern */test/*
EXCLUDE_PATTERNS = *.git \
*.d
*.d \
avconv.c
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
@@ -766,7 +761,7 @@ ALPHABETICAL_INDEX = YES
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
# in which this list will be split (can be a number in the range [1..20])
COLS_IN_ALPHA_INDEX = 2
COLS_IN_ALPHA_INDEX = 5
# In case all classes in a project start with a common prefix, all
# classes will be put under the same header in the alphabetical index.
@@ -800,13 +795,13 @@ HTML_FILE_EXTENSION = .html
# each generated HTML page. If it is left blank doxygen will generate a
# standard header.
HTML_HEADER = doc/doxy/header.html
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
# each generated HTML page. If it is left blank doxygen will generate a
# standard footer.
HTML_FOOTER = doc/doxy/footer.html
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to
@@ -815,7 +810,7 @@ HTML_FOOTER = doc/doxy/footer.html
# the style sheet file to the HTML output directory, so don't put your own
# stylesheet in the HTML output directory as well, or it will be erased!
HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
HTML_STYLESHEET =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
# Doxygen will adjust the colors in the stylesheet and background images

View File

@@ -20,12 +20,8 @@ Specifically, the GPL parts of FFmpeg are
There are a handful of files under other licensing terms, namely:
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
licensing details. Specifically note that you must credit the IJG in the
documentation accompanying your program if you only distribute executables.
You must also indicate any changes including additions and deletions to
those three files in the documentation.
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint.c, libavcodec/jrevdct.c
are taken from libjpeg, see the top of the files for licensing details.
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
the configure parameter --enable-version3 will activate this licensing option

View File

@@ -4,7 +4,7 @@ FFmpeg maintainers
Below is a list of the people maintaining different parts of the
FFmpeg code.
Please try to keep entries where you are the maintainer up to date!
Please try to keep entries where you are the maintainer upto date!
Names in () mean that the maintainer currently has no time to maintain the code.
A CC after the name means that the maintainer prefers to be CC-ed on patches
@@ -87,8 +87,6 @@ Generic Parts:
bitstream.c, bitstream.h Michael Niedermayer
CABAC:
cabac.h, cabac.c Michael Niedermayer
codec names:
codec_names.sh Nicolas George
DSP utilities:
dsputils.c, dsputils.h Michael Niedermayer
entropy coding:
@@ -142,7 +140,6 @@ Codecs:
dv.c Roman Shaposhnik
eacmv*, eaidct*, eat* Peter Ross
ffv1.c Michael Niedermayer
ffwavesynth.c Nicolas George
flac* Justin Ruggles
flashsv* Benjamin Larsson
flicvideo.c Mike Melanson
@@ -159,12 +156,10 @@ Codecs:
indeo5* Kostya Shishkov
interplayvideo.c Mike Melanson
ivi* Kostya Shishkov
jacosub* Clément Bœsch
jpeg_ls.c Kostya Shishkov
jvdec.c Peter Ross
kmvc.c Kostya Shishkov
lcl*.c Roberto Togni, Reimar Doeffinger
libcelt_dec.c Nicolas George
libgsm.c Michel Bardiaux
libdirac* David Conrad
libopenjpeg.c Jaikrishnan Menon
@@ -172,7 +167,6 @@ Codecs:
libschroedinger* David Conrad
libspeexdec.c Justin Ruggles
libtheoraenc.c David Conrad
libutvideo* Derek Buitenhuis
libvorbis.c David Conrad
libxavs.c Stefan Gehrer
libx264.c Mans Rullgard, Jason Garrett-Glaser
@@ -224,7 +218,6 @@ Codecs:
tta.c Alex Beregszaszi, Jaikrishnan Menon
txd.c Ivo van Poorten
ulti* Kostya Shishkov
v410*.c Derek Buitenhuis
vb.c Kostya Shishkov
vble.c Derek Buitenhuis
vc1* Kostya Shishkov
@@ -245,7 +238,6 @@ Codecs:
xan.c Mike Melanson
xl.c Kostya Shishkov
xvmc.c Ivan Kalvachev
zerocodec.c Derek Buitenhuis
zmbv* Kostya Shishkov
Hardware acceleration:
@@ -272,10 +264,6 @@ libavfilter
===========
Video filters:
graphdump.c Nicolas George
af_amerge.c Nicolas George
af_astreamsync.c Nicolas George
af_pan.c Nicolas George
vsrc_mandelbrot.c Michael Niedermayer
vf_yadif.c Michael Niedermayer
@@ -316,7 +304,6 @@ Muxers/Demuxers:
ipmovie.c Mike Melanson
img2.c Michael Niedermayer
iss.c Stefan Gehrer
jacosub* Clément Bœsch
jvdec.c Peter Ross
libmodplug.c Clément Bœsch
libnut.c Oded Shimon
@@ -337,7 +324,6 @@ Muxers/Demuxers:
msnwc_tcp.c Ramiro Polla
mtv.c Reynaldo H. Verdejo Pinochet
mxf* Baptiste Coudurier
mxfdec.c Tomas Härdin
nsvdec.c Francois Revol
nut.c Michael Niedermayer
nuv.c Reimar Doeffinger
@@ -357,7 +343,6 @@ Muxers/Demuxers:
rtpdec_asf.* Ronald S. Bultje
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
rtsp.c Luca Barbato
sbgdec.c Nicolas George
sdp.c Martin Storsjo
segafilm.c Mike Melanson
siff.c Kostya Shishkov
@@ -374,7 +359,6 @@ Muxers/Demuxers:
wv.c Kostya Shishkov
Protocols:
bluray.c Petri Hintukainen
http.c Ronald S. Bultje
mms*.c Ronald S. Bultje
udp.c Luca Abeni
@@ -400,8 +384,10 @@ x86 Michael Niedermayer
Releases
========
0.11 Michael Niedermayer
0.10 Michael Niedermayer
0.5 *Deprecated/Unmaintained*
0.6 *Deprecated/Unmaintained*
0.7 Michael Niedermayer
0.8 Michael Niedermayer
@@ -423,7 +409,6 @@ Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
Luca Barbato 6677 4209 213C 8843 5B67 29E7 E84C 78C2 84E9 0E34
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
Reimar Döffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7

View File

@@ -8,9 +8,9 @@ vpath %.S $(SRC_PATH)
vpath %.asm $(SRC_PATH)
vpath %.v $(SRC_PATH)
vpath %.texi $(SRC_PATH)
vpath %/fate_config.sh.template $(SRC_PATH)
PROGS-$(CONFIG_FFMPEG) += ffmpeg
PROGS-$(CONFIG_AVCONV) += avconv
PROGS-$(CONFIG_FFPLAY) += ffplay
PROGS-$(CONFIG_FFPROBE) += ffprobe
PROGS-$(CONFIG_FFSERVER) += ffserver
@@ -19,11 +19,11 @@ PROGS := $(PROGS-yes:%=%$(EXESUF))
INSTPROGS = $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
OBJS = $(PROGS-yes:%=%.o) cmdutils.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr base64
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
HOSTPROGS := $(TESTTOOLS:%=tests/%)
TOOLS = qt-faststart trasher
TOOLS-$(CONFIG_ZLIB) += cws2fws
BASENAMES = ffmpeg ffplay ffprobe ffserver
BASENAMES = ffmpeg avconv ffplay ffprobe ffserver
ALLPROGS = $(BASENAMES:%=%$(PROGSSUF)$(EXESUF))
ALLPROGS_G = $(BASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
ALLMANPAGES = $(BASENAMES:%=%.1)
@@ -31,7 +31,6 @@ ALLMANPAGES = $(BASENAMES:%=%.1)
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
FFLIBS-$(CONFIG_AVFILTER) += avfilter
FFLIBS-$(CONFIG_AVFORMAT) += avformat
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
FFLIBS-$(CONFIG_AVCODEC) += avcodec
FFLIBS-$(CONFIG_POSTPROC) += postproc
FFLIBS-$(CONFIG_SWRESAMPLE)+= swresample
@@ -39,8 +38,7 @@ FFLIBS-$(CONFIG_SWSCALE) += swscale
FFLIBS := avutil
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset)
SKIPHEADERS = cmdutils_common_opts.h
@@ -49,7 +47,7 @@ include $(SRC_PATH)/common.mak
FF_EXTRALIBS := $(FFEXTRALIBS)
FF_DEP_LIBS := $(DEP_LIBS)
all: $(PROGS)
all: $(filter-out avconv, $(PROGS))
$(PROGS): %$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
$(CP) $< $@$(PROGSSUF)
@@ -66,11 +64,9 @@ config.h: .config
@-printf '\nWARNING: $(?F) newer than config.h, rerun configure\n\n'
@-tput sgr0 2>/dev/null
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
ALTIVEC-OBJS ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS MMI-OBJS \
MMX-OBJS NEON-OBJS VIS-OBJS YASM-OBJS \
OBJS TESTOBJS
SUBDIR_VARS := OBJS FFLIBS CLEANFILES DIRS TESTPROGS EXAMPLES SKIPHEADERS \
ALTIVEC-OBJS MMX-OBJS NEON-OBJS X86-OBJS YASM-OBJS-FFT YASM-OBJS \
HOSTPROGS BUILT_HEADERS TESTOBJS ARCH_HEADERS ARMV6-OBJS TOOLS
define RESET
$(1) :=
@@ -81,8 +77,6 @@ define DOSUBDIR
$(foreach V,$(SUBDIR_VARS),$(eval $(call RESET,$(V))))
SUBDIR := $(1)/
include $(SRC_PATH)/$(1)/Makefile
-include $(SRC_PATH)/$(1)/$(ARCH)/Makefile
include $(SRC_PATH)/library.mak
endef
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
@@ -126,10 +120,9 @@ install-progs: install-progs-yes $(PROGS)
$(Q)mkdir -p "$(BINDIR)"
$(INSTALL) -c -m 755 $(INSTPROGS) "$(BINDIR)"
install-data: $(DATA_FILES) $(EXAMPLES_FILES)
$(Q)mkdir -p "$(DATADIR)/examples"
install-data: $(DATA_FILES)
$(Q)mkdir -p "$(DATADIR)"
$(INSTALL) -m 644 $(DATA_FILES) "$(DATADIR)"
$(INSTALL) -m 644 $(EXAMPLES_FILES) "$(DATADIR)/examples"
uninstall: uninstall-libs uninstall-headers uninstall-progs uninstall-data
@@ -163,8 +156,6 @@ coverage-html: coverage.info
$(Q)genhtml -o $@ $<
$(Q)touch $@
check: all alltools checkheaders examples testprogs fate
include $(SRC_PATH)/doc/Makefile
include $(SRC_PATH)/tests/Makefile
@@ -179,5 +170,5 @@ $(sort $(OBJDIRS)):
# so this saves some time on slow systems.
.SUFFIXES:
.PHONY: all all-yes alltools check *clean config install*
.PHONY: all all-yes alltools *clean config examples install*
.PHONY: testprogs uninstall*

8
README
View File

@@ -4,15 +4,9 @@ FFmpeg README
1) Documentation
----------------
* Read the documentation in the doc/ directory in git.
You can also view it online at http://ffmpeg.org/documentation.html
* Read the documentation in the doc/ directory.
2) Licensing
------------
* See the LICENSE file.
3) Build and Install
--------------------
* See the INSTALL file.

View File

@@ -1 +1 @@
0.11.1
0.9

View File

@@ -1 +1 @@
0.11.1
0.9

View File

@@ -1,13 +0,0 @@
OBJS-$(HAVE_ARMV5TE) += $(ARMV5TE-OBJS) $(ARMV5TE-OBJS-yes)
OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
OBJS-$(HAVE_ARMVFP) += $(ARMVFP-OBJS) $(ARMVFP-OBJS-yes)
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
OBJS-$(HAVE_VIS) += $(VIS-OBJS) $(VIS-OBJS-yes)
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
OBJS-$(HAVE_YASM) += $(YASM-OBJS) $(YASM-OBJS-yes)

4397
avconv.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -32,13 +32,8 @@
#include "libavformat/avformat.h"
#include "libavfilter/avfilter.h"
#include "libavdevice/avdevice.h"
#include "libavresample/avresample.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#if CONFIG_POSTPROC
#include "libpostproc/postprocess.h"
#endif
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/mathematics.h"
#include "libavutil/parseutils.h"
@@ -56,20 +51,17 @@
#endif
struct SwsContext *sws_opts;
SwrContext *swr_opts;
AVDictionary *format_opts, *codec_opts;
const int this_year = 2012;
static const int this_year = 2011;
static FILE *report_file;
void init_opts(void)
{
#if CONFIG_SWSCALE
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC,
NULL, NULL, NULL);
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC, NULL, NULL, NULL);
#endif
swr_opts = swr_alloc();
}
void uninit_opts(void)
@@ -78,12 +70,11 @@ void uninit_opts(void)
sws_freeContext(sws_opts);
sws_opts = NULL;
#endif
swr_free(&swr_opts);
av_dict_free(&format_opts);
av_dict_free(&codec_opts);
}
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
void log_callback_help(void* ptr, int level, const char* fmt, va_list vl)
{
vfprintf(stdout, fmt, vl);
}
@@ -102,20 +93,19 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
fflush(report_file);
}
double parse_number_or_die(const char *context, const char *numstr, int type,
double min, double max)
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
{
char *tail;
const char *error;
double d = av_strtod(numstr, &tail);
if (*tail)
error = "Expected number for %s but found: %s\n";
error= "Expected number for %s but found: %s\n";
else if (d < min || d > max)
error = "The value for %s was %s which is not within %f - %f\n";
else if (type == OPT_INT64 && (int64_t)d != d)
error = "Expected int64 for %s but found %s\n";
error= "The value for %s was %s which is not within %f - %f\n";
else if(type == OPT_INT64 && (int64_t)d != d)
error= "Expected int64 for %s but found %s\n";
else if (type == OPT_INT && (int)d != d)
error = "Expected int for %s but found %s\n";
error= "Expected int for %s but found %s\n";
else
return d;
av_log(NULL, AV_LOG_FATAL, error, context, numstr, min, max);
@@ -123,8 +113,7 @@ double parse_number_or_die(const char *context, const char *numstr, int type,
return 0;
}
int64_t parse_time_or_die(const char *context, const char *timestr,
int is_duration)
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
{
int64_t us;
if (av_parse_time(&us, timestr, is_duration) < 0) {
@@ -135,14 +124,13 @@ int64_t parse_time_or_die(const char *context, const char *timestr,
return us;
}
void show_help_options(const OptionDef *options, const char *msg, int mask,
int value)
void show_help_options(const OptionDef *options, const char *msg, int mask, int value)
{
const OptionDef *po;
int first;
first = 1;
for (po = options; po->name != NULL; po++) {
for(po = options; po->name != NULL; po++) {
char buf[64];
if ((po->flags & mask) == value) {
if (first) {
@@ -169,8 +157,7 @@ void show_help_children(const AVClass *class, int flags)
show_help_children(child, flags);
}
static const OptionDef *find_option(const OptionDef *po, const char *name)
{
static const OptionDef* find_option(const OptionDef *po, const char *name){
const char *p = strchr(name, ':');
int len = p ? p - name : strlen(name);
@@ -217,8 +204,8 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
buffsize += WideCharToMultiByte(CP_UTF8, 0, argv_w[i], -1,
NULL, 0, NULL, NULL);
win32_argv_utf8 = av_mallocz(sizeof(char *) * (win32_argc + 1) + buffsize);
argstr_flat = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1);
win32_argv_utf8 = av_mallocz(sizeof(char*) * (win32_argc + 1) + buffsize);
argstr_flat = (char*)win32_argv_utf8 + sizeof(char*) * (win32_argc + 1);
if (win32_argv_utf8 == NULL) {
LocalFree(argv_w);
return;
@@ -243,8 +230,8 @@ static inline void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
}
#endif /* WIN32 && !__MINGW32CE__ */
int parse_option(void *optctx, const char *opt, const char *arg,
const OptionDef *options)
int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options)
{
const OptionDef *po;
int bool_val = 1;
@@ -255,12 +242,14 @@ int parse_option(void *optctx, const char *opt, const char *arg,
if (!po->name && opt[0] == 'n' && opt[1] == 'o') {
/* handle 'no' bool option */
po = find_option(options, opt + 2);
if ((po->name && (po->flags & OPT_BOOL)))
bool_val = 0;
if (!(po->name && (po->flags & OPT_BOOL)))
goto unknown_opt;
bool_val = 0;
}
if (!po->name)
po = find_option(options, "default");
if (!po->name) {
unknown_opt:
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
return AVERROR(EINVAL);
}
@@ -271,14 +260,13 @@ int parse_option(void *optctx, const char *opt, const char *arg,
/* new-style options contain an offset into optctx, old-style address of
* a global var*/
dst = po->flags & (OPT_OFFSET | OPT_SPEC) ? (uint8_t *)optctx + po->u.off
: po->u.dst_ptr;
dst = po->flags & (OPT_OFFSET|OPT_SPEC) ? (uint8_t*)optctx + po->u.off : po->u.dst_ptr;
if (po->flags & OPT_SPEC) {
SpecifierOpt **so = dst;
char *p = strchr(opt, ':');
dstcount = (int *)(so + 1);
dstcount = (int*)(so + 1);
*so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1);
(*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : "");
dst = &(*so)[*dstcount - 1].u;
@@ -287,25 +275,24 @@ int parse_option(void *optctx, const char *opt, const char *arg,
if (po->flags & OPT_STRING) {
char *str;
str = av_strdup(arg);
*(char **)dst = str;
*(char**)dst = str;
} else if (po->flags & OPT_BOOL) {
*(int *)dst = bool_val;
*(int*)dst = bool_val;
} else if (po->flags & OPT_INT) {
*(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
*(int*)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
} else if (po->flags & OPT_INT64) {
*(int64_t *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX);
*(int64_t*)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX);
} else if (po->flags & OPT_TIME) {
*(int64_t *)dst = parse_time_or_die(opt, arg, 1);
*(int64_t*)dst = parse_time_or_die(opt, arg, 1);
} else if (po->flags & OPT_FLOAT) {
*(float *)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY);
*(float*)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY);
} else if (po->flags & OPT_DOUBLE) {
*(double *)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY);
*(double*)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY);
} else if (po->u.func_arg) {
int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg)
: po->u.func_arg(opt, arg);
int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg) :
po->u.func_arg(opt, arg);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR,
"Failed to set value '%s' for option '%s'\n", arg, opt);
av_log(NULL, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", arg, opt);
return ret;
}
}
@@ -315,7 +302,7 @@ int parse_option(void *optctx, const char *opt, const char *arg,
}
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options,
void (*parse_arg_function)(void *, const char*))
void (* parse_arg_function)(void *, const char*))
{
const char *opt;
int optindex, handleoptions = 1, ret;
@@ -345,8 +332,10 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
}
}
int locate_option(int argc, char **argv, const OptionDef *options,
const char *optname)
/*
* Return index of option opt in argv or 0 if not found.
*/
static int locate_option(int argc, char **argv, const OptionDef *options, const char *optname)
{
const OptionDef *po;
int i;
@@ -420,27 +409,24 @@ void parse_loglevel(int argc, char **argv, const OptionDef *options)
#define FLAGS(o) ((o)->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0
int opt_default(const char *opt, const char *arg)
{
const AVOption *oc, *of, *os, *oswr = NULL;
const AVOption *oc, *of, *os;
char opt_stripped[128];
const char *p;
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class(), *sc, *swr_class;
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class(), *sc;
if (!(p = strchr(opt, ':')))
p = opt + strlen(opt);
av_strlcpy(opt_stripped, opt, FFMIN(sizeof(opt_stripped), p - opt + 1));
if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) ||
((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') &&
(oc = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ))))
if ((oc = av_opt_find(&cc, opt_stripped, NULL, 0, AV_OPT_SEARCH_CHILDREN|AV_OPT_SEARCH_FAKE_OBJ)) ||
((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') &&
(oc = av_opt_find(&cc, opt+1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ))))
av_dict_set(&codec_opts, opt, arg, FLAGS(oc));
if ((of = av_opt_find(&fc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
if ((of = av_opt_find(&fc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&format_opts, opt, arg, FLAGS(of));
#if CONFIG_SWSCALE
sc = sws_get_class();
if ((os = av_opt_find(&sc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
if ((os = av_opt_find(&sc, opt, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
// XXX we only support sws_flags, not arbitrary sws options
int ret = av_opt_set(sws_opts, opt, arg, 0);
if (ret < 0) {
@@ -449,17 +435,8 @@ int opt_default(const char *opt, const char *arg)
}
}
#endif
swr_class = swr_get_class();
if (!oc && !of && !os && (oswr = av_opt_find(&swr_class, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
int ret = av_opt_set(swr_opts, opt, arg, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error setting option %s.\n", opt);
return ret;
}
}
if (oc || of || os || oswr)
if (oc || of || os)
return 0;
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
return AVERROR_OPTION_NOT_FOUND;
@@ -532,32 +509,6 @@ int opt_report(const char *opt)
return 0;
}
int opt_max_alloc(const char *opt, const char *arg)
{
char *tail;
size_t max;
max = strtol(arg, &tail, 10);
if (*tail) {
av_log(NULL, AV_LOG_FATAL, "Invalid max_alloc \"%s\".\n", arg);
exit_program(1);
}
av_max_alloc(max);
return 0;
}
int opt_cpuflags(const char *opt, const char *arg)
{
int ret;
unsigned flags = av_get_cpu_flags();
if ((ret = av_parse_cpu_caps(&flags, arg)) < 0)
return ret;
av_force_cpu_flags(flags);
return 0;
}
int opt_codec_debug(const char *opt, const char *arg)
{
av_log_set_level(AV_LOG_DEBUG);
@@ -592,15 +543,13 @@ static int warned_cfg = 0;
#define INDENT 1
#define SHOW_VERSION 2
#define SHOW_CONFIG 4
#define SHOW_COPYRIGHT 8
#define PRINT_LIB_INFO(libname, LIBNAME, flags, level) \
if (CONFIG_##LIBNAME) { \
const char *indent = flags & INDENT? " " : ""; \
if (flags & SHOW_VERSION) { \
unsigned int version = libname##_version(); \
av_log(NULL, level, \
"%slib%-11s %2d.%3d.%3d / %2d.%3d.%3d\n", \
av_log(NULL, level, "%slib%-9s %2d.%3d.%2d / %2d.%3d.%2d\n",\
indent, #libname, \
LIB##LIBNAME##_VERSION_MAJOR, \
LIB##LIBNAME##_VERSION_MINOR, \
@@ -629,42 +578,24 @@ static void print_all_libs_info(int flags, int level)
PRINT_LIB_INFO(avformat, AVFORMAT, flags, level);
PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level);
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
// PRINT_LIB_INFO(avresample, AVRESAMPLE, flags, level);
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
PRINT_LIB_INFO(swresample,SWRESAMPLE, flags, level);
#if CONFIG_POSTPROC
PRINT_LIB_INFO(postproc, POSTPROC, flags, level);
#endif
}
static void print_program_info(int flags, int level)
void show_banner(void)
{
const char *indent = flags & INDENT? " " : "";
av_log(NULL, level, "%s version " FFMPEG_VERSION, program_name);
if (flags & SHOW_COPYRIGHT)
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
program_birth_year, this_year);
av_log(NULL, level, "\n");
av_log(NULL, level, "%sbuilt on %s %s with %s %s\n",
indent, __DATE__, __TIME__, CC_TYPE, CC_VERSION);
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
}
void show_banner(int argc, char **argv, const OptionDef *options)
{
int idx = locate_option(argc, argv, options, "version");
if (idx)
return;
print_program_info (INDENT|SHOW_COPYRIGHT, AV_LOG_INFO);
av_log(NULL, AV_LOG_INFO, "%s version " FFMPEG_VERSION ", Copyright (c) %d-%d the FFmpeg developers\n",
program_name, program_birth_year, this_year);
av_log(NULL, AV_LOG_INFO, " built on %s %s with %s %s\n",
__DATE__, __TIME__, CC_TYPE, CC_VERSION);
av_log(NULL, AV_LOG_INFO, " configuration: " FFMPEG_CONFIGURATION "\n");
print_all_libs_info(INDENT|SHOW_CONFIG, AV_LOG_INFO);
print_all_libs_info(INDENT|SHOW_VERSION, AV_LOG_INFO);
}
int opt_version(const char *opt, const char *arg) {
av_log_set_callback(log_callback_help);
print_program_info (0 , AV_LOG_INFO);
printf("%s " FFMPEG_VERSION "\n", program_name);
print_all_libs_info(SHOW_VERSION, AV_LOG_INFO);
return 0;
}
@@ -741,133 +672,137 @@ int opt_license(const char *opt, const char *arg)
int opt_formats(const char *opt, const char *arg)
{
AVInputFormat *ifmt = NULL;
AVOutputFormat *ofmt = NULL;
AVInputFormat *ifmt=NULL;
AVOutputFormat *ofmt=NULL;
const char *last_name;
printf("File formats:\n"
" D. = Demuxing supported\n"
" .E = Muxing supported\n"
" --\n");
last_name = "000";
for (;;) {
int decode = 0;
int encode = 0;
const char *name = NULL;
const char *long_name = NULL;
printf(
"File formats:\n"
" D. = Demuxing supported\n"
" .E = Muxing supported\n"
" --\n");
last_name= "000";
for(;;){
int decode=0;
int encode=0;
const char *name=NULL;
const char *long_name=NULL;
while ((ofmt = av_oformat_next(ofmt))) {
if ((name == NULL || strcmp(ofmt->name, name) < 0) &&
strcmp(ofmt->name, last_name) > 0) {
name = ofmt->name;
long_name = ofmt->long_name;
encode = 1;
while((ofmt= av_oformat_next(ofmt))) {
if((name == NULL || strcmp(ofmt->name, name)<0) &&
strcmp(ofmt->name, last_name)>0){
name= ofmt->name;
long_name= ofmt->long_name;
encode=1;
}
}
while ((ifmt = av_iformat_next(ifmt))) {
if ((name == NULL || strcmp(ifmt->name, name) < 0) &&
strcmp(ifmt->name, last_name) > 0) {
name = ifmt->name;
long_name = ifmt->long_name;
encode = 0;
while((ifmt= av_iformat_next(ifmt))) {
if((name == NULL || strcmp(ifmt->name, name)<0) &&
strcmp(ifmt->name, last_name)>0){
name= ifmt->name;
long_name= ifmt->long_name;
encode=0;
}
if (name && strcmp(ifmt->name, name) == 0)
decode = 1;
if(name && strcmp(ifmt->name, name)==0)
decode=1;
}
if (name == NULL)
if(name==NULL)
break;
last_name = name;
last_name= name;
printf(" %s%s %-15s %s\n",
decode ? "D" : " ",
encode ? "E" : " ",
name,
printf(
" %s%s %-15s %s\n",
decode ? "D":" ",
encode ? "E":" ",
name,
long_name ? long_name:" ");
}
return 0;
}
static char get_media_type_char(enum AVMediaType type)
{
static const char map[AVMEDIA_TYPE_NB] = {
[AVMEDIA_TYPE_VIDEO] = 'V',
[AVMEDIA_TYPE_AUDIO] = 'A',
[AVMEDIA_TYPE_DATA] = 'D',
[AVMEDIA_TYPE_SUBTITLE] = 'S',
[AVMEDIA_TYPE_ATTACHMENT] = 'T',
};
return type >= 0 && type < AVMEDIA_TYPE_NB && map[type] ? map[type] : '?';
}
int opt_codecs(const char *opt, const char *arg)
{
AVCodec *p = NULL, *p2;
AVCodec *p=NULL, *p2;
const char *last_name;
printf("Codecs:\n"
" D..... = Decoding supported\n"
" .E.... = Encoding supported\n"
" ..V... = Video codec\n"
" ..A... = Audio codec\n"
" ..S... = Subtitle codec\n"
" ...S.. = Supports draw_horiz_band\n"
" ....D. = Supports direct rendering method 1\n"
" .....T = Supports weird frame truncation\n"
" ------\n");
printf(
"Codecs:\n"
" D..... = Decoding supported\n"
" .E.... = Encoding supported\n"
" ..V... = Video codec\n"
" ..A... = Audio codec\n"
" ..S... = Subtitle codec\n"
" ...S.. = Supports draw_horiz_band\n"
" ....D. = Supports direct rendering method 1\n"
" .....T = Supports weird frame truncation\n"
" ------\n");
last_name= "000";
for (;;) {
int decode = 0;
int encode = 0;
int cap = 0;
for(;;){
int decode=0;
int encode=0;
int cap=0;
const char *type_str;
p2 = NULL;
while ((p = av_codec_next(p))) {
if ((p2 == NULL || strcmp(p->name, p2->name) < 0) &&
strcmp(p->name, last_name) > 0) {
p2 = p;
decode = encode = cap = 0;
p2=NULL;
while((p= av_codec_next(p))) {
if((p2==NULL || strcmp(p->name, p2->name)<0) &&
strcmp(p->name, last_name)>0){
p2= p;
decode= encode= cap=0;
}
if (p2 && strcmp(p->name, p2->name) == 0) {
if (av_codec_is_decoder(p))
decode = 1;
if (av_codec_is_encoder(p))
encode = 1;
if(p2 && strcmp(p->name, p2->name)==0){
if(p->decode) decode=1;
if(p->encode) encode=1;
cap |= p->capabilities;
}
}
if (p2 == NULL)
if(p2==NULL)
break;
last_name = p2->name;
last_name= p2->name;
printf(" %s%s%c%s%s%s %-15s %s",
decode ? "D" : (/* p2->decoder ? "d" : */ " "),
encode ? "E" : " ",
get_media_type_char(p2->type),
cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S" : " ",
cap & CODEC_CAP_DR1 ? "D" : " ",
cap & CODEC_CAP_TRUNCATED ? "T" : " ",
p2->name,
p2->long_name ? p2->long_name : "");
#if 0
if (p2->decoder && decode == 0)
printf(" use %s for decoding", p2->decoder->name);
#endif
switch(p2->type) {
case AVMEDIA_TYPE_VIDEO:
type_str = "V";
break;
case AVMEDIA_TYPE_AUDIO:
type_str = "A";
break;
case AVMEDIA_TYPE_SUBTITLE:
type_str = "S";
break;
default:
type_str = "?";
break;
}
printf(
" %s%s%s%s%s%s %-15s %s",
decode ? "D": (/*p2->decoder ? "d":*/" "),
encode ? "E":" ",
type_str,
cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S":" ",
cap & CODEC_CAP_DR1 ? "D":" ",
cap & CODEC_CAP_TRUNCATED ? "T":" ",
p2->name,
p2->long_name ? p2->long_name : "");
/* if(p2->decoder && decode==0)
printf(" use %s for decoding", p2->decoder->name);*/
printf("\n");
}
printf("\n");
printf("Note, the names of encoders and decoders do not always match, so there are\n"
"several cases where the above table shows encoder only or decoder only entries\n"
"even though both encoding and decoding are supported. For example, the h263\n"
"decoder corresponds to the h263 and h263p encoders, for file formats it is even\n"
"worse.\n");
printf(
"Note, the names of encoders and decoders do not always match, so there are\n"
"several cases where the above table shows encoder only or decoder only entries\n"
"even though both encoding and decoding are supported. For example, the h263\n"
"decoder corresponds to the h263 and h263p encoders, for file formats it is even\n"
"worse.\n");
return 0;
}
int opt_bsfs(const char *opt, const char *arg)
{
AVBitStreamFilter *bsf = NULL;
AVBitStreamFilter *bsf=NULL;
printf("Bitstream filters:\n");
while ((bsf = av_bitstream_filter_next(bsf)))
while((bsf = av_bitstream_filter_next(bsf)))
printf("%s\n", bsf->name);
printf("\n");
return 0;
@@ -875,47 +810,31 @@ int opt_bsfs(const char *opt, const char *arg)
int opt_protocols(const char *opt, const char *arg)
{
void *opaque = NULL;
const char *name;
URLProtocol *up=NULL;
printf("Supported file protocols:\n"
"Input:\n");
while ((name = avio_enum_protocols(&opaque, 0)))
printf("%s\n", name);
printf("Output:\n");
while ((name = avio_enum_protocols(&opaque, 1)))
printf("%s\n", name);
"I.. = Input supported\n"
".O. = Output supported\n"
"..S = Seek supported\n"
"FLAGS NAME\n"
"----- \n");
while((up = av_protocol_next(up)))
printf("%c%c%c %s\n",
up->url_read ? 'I' : '.',
up->url_write ? 'O' : '.',
up->url_seek ? 'S' : '.',
up->name);
return 0;
}
int opt_filters(const char *opt, const char *arg)
{
AVFilter av_unused(**filter) = NULL;
char descr[64], *descr_cur;
int i, j;
const AVFilterPad *pad;
printf("Filters:\n");
#if CONFIG_AVFILTER
while ((filter = av_filter_next(filter)) && *filter) {
descr_cur = descr;
for (i = 0; i < 2; i++) {
if (i) {
*(descr_cur++) = '-';
*(descr_cur++) = '>';
}
pad = i ? (*filter)->outputs : (*filter)->inputs;
for (j = 0; pad[j].name; j++) {
if (descr_cur >= descr + sizeof(descr) - 4)
break;
*(descr_cur++) = get_media_type_char(pad[j].type);
}
if (!j)
*(descr_cur++) = '|';
}
*descr_cur = 0;
printf("%-16s %-10s %s\n", (*filter)->name, descr, (*filter)->description);
}
while ((filter = av_filter_next(filter)) && *filter)
printf("%-16s %s\n", (*filter)->name, (*filter)->description);
#endif
return 0;
}
@@ -924,14 +843,15 @@ int opt_pix_fmts(const char *opt, const char *arg)
{
enum PixelFormat pix_fmt;
printf("Pixel formats:\n"
"I.... = Supported Input format for conversion\n"
".O... = Supported Output format for conversion\n"
"..H.. = Hardware accelerated format\n"
"...P. = Paletted format\n"
"....B = Bitstream format\n"
"FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n"
"-----\n");
printf(
"Pixel formats:\n"
"I.... = Supported Input format for conversion\n"
".O... = Supported Output format for conversion\n"
"..H.. = Hardware accelerated format\n"
"...P. = Paletted format\n"
"....B = Bitstream format\n"
"FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n"
"-----\n");
#if !CONFIG_SWSCALE
# define sws_isSupportedInput(x) 0
@@ -981,8 +901,7 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
FILE *f = fopen(filename, "rb");
if (!f) {
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
strerror(errno));
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename, strerror(errno));
return AVERROR(errno);
}
fseek(f, 0, SEEK_END);
@@ -1013,14 +932,14 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
}
FILE *get_preset_file(char *filename, size_t filename_size,
const char *preset_name, int is_path,
const char *codec_name)
const char *preset_name, int is_path, const char *codec_name)
{
FILE *f = NULL;
int i;
const char *base[3] = { getenv("FFMPEG_DATADIR"),
getenv("HOME"),
FFMPEG_DATADIR, };
const char *base[3]= { getenv("FFMPEG_DATADIR"),
getenv("HOME"),
FFMPEG_DATADIR,
};
if (is_path) {
av_strlcpy(filename, preset_name, filename_size);
@@ -1046,14 +965,11 @@ FILE *get_preset_file(char *filename, size_t filename_size,
for (i = 0; i < 3 && !f; i++) {
if (!base[i])
continue;
snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i],
i != 1 ? "" : "/.ffmpeg", preset_name);
snprintf(filename, filename_size, "%s%s/%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", preset_name);
f = fopen(filename, "r");
if (!f && codec_name) {
snprintf(filename, filename_size,
"%s%s/%s-%s.ffpreset",
base[i], i != 1 ? "" : "/.ffmpeg", codec_name,
preset_name);
"%s%s/%s-%s.ffpreset", base[i], i != 1 ? "" : "/.ffmpeg", codec_name, preset_name);
f = fopen(filename, "r");
}
}
@@ -1064,23 +980,22 @@ FILE *get_preset_file(char *filename, size_t filename_size,
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
{
if (*spec <= '9' && *spec >= '0') /* opt:index */
if (*spec <= '9' && *spec >= '0') /* opt:index */
return strtol(spec, NULL, 0) == st->index;
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
*spec == 't') { /* opt:[vasdt] */
else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' || *spec == 't') { /* opt:[vasdt] */
enum AVMediaType type;
switch (*spec++) {
case 'v': type = AVMEDIA_TYPE_VIDEO; break;
case 'a': type = AVMEDIA_TYPE_AUDIO; break;
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
case 'd': type = AVMEDIA_TYPE_DATA; break;
case 'v': type = AVMEDIA_TYPE_VIDEO; break;
case 'a': type = AVMEDIA_TYPE_AUDIO; break;
case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
case 'd': type = AVMEDIA_TYPE_DATA; break;
case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
default: av_assert0(0);
default: abort(); // never reached, silence warning
}
if (type != st->codec->codec_type)
return 0;
if (*spec++ == ':') { /* possibly followed by :index */
if (*spec++ == ':') { /* possibly followed by :index */
int i, index = strtol(spec, NULL, 0);
for (i = 0; i < s->nb_streams; i++)
if (s->streams[i]->codec->codec_type == type && index-- == 0)
@@ -1099,9 +1014,8 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
if (*endptr++ == ':') {
int stream_idx = strtol(endptr, NULL, 0);
return stream_idx >= 0 &&
stream_idx < s->programs[i]->nb_stream_indexes &&
st->index == s->programs[i]->stream_index[stream_idx];
return (stream_idx >= 0 && stream_idx < s->programs[i]->nb_stream_indexes &&
st->index == s->programs[i]->stream_index[stream_idx]);
}
for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
@@ -1109,12 +1023,6 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
return 1;
}
return 0;
} else if (*spec == '#') {
int sid;
char *endptr;
sid = strtol(spec + 1, &endptr, 0);
if (!*endptr)
return st->id == sid;
} else if (!*spec) /* empty specifier, matches everything */
return 1;
@@ -1122,13 +1030,11 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
return AVERROR(EINVAL);
}
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
AVFormatContext *s, AVStream *st)
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatContext *s, AVStream *st)
{
AVDictionary *ret = NULL;
AVDictionaryEntry *t = NULL;
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM
: AV_OPT_FLAG_DECODING_PARAM;
int flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM;
char prefix = 0;
const AVClass *cc = avcodec_get_class();
@@ -1136,18 +1042,9 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
return NULL;
switch (codec->type) {
case AVMEDIA_TYPE_VIDEO:
prefix = 'v';
flags |= AV_OPT_FLAG_VIDEO_PARAM;
break;
case AVMEDIA_TYPE_AUDIO:
prefix = 'a';
flags |= AV_OPT_FLAG_AUDIO_PARAM;
break;
case AVMEDIA_TYPE_SUBTITLE:
prefix = 's';
flags |= AV_OPT_FLAG_SUBTITLE_PARAM;
break;
case AVMEDIA_TYPE_VIDEO: prefix = 'v'; flags |= AV_OPT_FLAG_VIDEO_PARAM; break;
case AVMEDIA_TYPE_AUDIO: prefix = 'a'; flags |= AV_OPT_FLAG_AUDIO_PARAM; break;
case AVMEDIA_TYPE_SUBTITLE: prefix = 's'; flags |= AV_OPT_FLAG_SUBTITLE_PARAM; break;
}
while (t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) {
@@ -1162,14 +1059,10 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
}
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
(codec && codec->priv_class &&
av_opt_find(&codec->priv_class, t->key, NULL, flags,
AV_OPT_SEARCH_FAKE_OBJ)))
(codec && codec->priv_class && av_opt_find(&codec->priv_class, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&ret, t->key, t->value, 0);
else if (t->key[0] == prefix &&
av_opt_find(&cc, t->key + 1, NULL, flags,
AV_OPT_SEARCH_FAKE_OBJ))
av_dict_set(&ret, t->key + 1, t->value, 0);
else if (t->key[0] == prefix && av_opt_find(&cc, t->key+1, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ))
av_dict_set(&ret, t->key+1, t->value, 0);
if (p)
*p = ':';
@@ -1177,8 +1070,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
return ret;
}
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
AVDictionary *codec_opts)
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
{
int i;
AVDictionary **opts;
@@ -1187,13 +1079,11 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
return NULL;
opts = av_mallocz(s->nb_streams * sizeof(*opts));
if (!opts) {
av_log(NULL, AV_LOG_ERROR,
"Could not alloc memory for stream options.\n");
av_log(NULL, AV_LOG_ERROR, "Could not alloc memory for stream options.\n");
return NULL;
}
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id),
s, s->streams[i]);
opts[i] = filter_codec_opts(codec_opts, avcodec_find_decoder(s->streams[i]->codec->codec_id), s, s->streams[i]);
return opts;
}

View File

@@ -43,15 +43,9 @@ extern const char program_name[];
*/
extern const int program_birth_year;
/**
* this year, defined by the program for show_banner()
*/
extern const int this_year;
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
extern AVFormatContext *avformat_opts;
extern struct SwsContext *sws_opts;
extern struct SwrContext *swr_opts;
extern AVDictionary *format_opts, *codec_opts;
/**
@@ -84,10 +78,6 @@ int opt_loglevel(const char *opt, const char *arg);
int opt_report(const char *opt);
int opt_max_alloc(const char *opt, const char *arg);
int opt_cpuflags(const char *opt, const char *arg);
int opt_codec_debug(const char *opt, const char *arg);
/**
@@ -101,15 +91,14 @@ int opt_timelimit(const char *opt, const char *arg);
* parsed or the corresponding value is invalid.
*
* @param context the context of the value to be set (e.g. the
* corresponding command line option name)
* corresponding commandline option name)
* @param numstr the string to be parsed
* @param type the type (OPT_INT64 or OPT_FLOAT) as which the
* string should be parsed
* @param min the minimum valid accepted value
* @param max the maximum valid accepted value
*/
double parse_number_or_die(const char *context, const char *numstr, int type,
double min, double max);
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max);
/**
* Parse a string specifying a time and return its corresponding
@@ -117,7 +106,7 @@ double parse_number_or_die(const char *context, const char *numstr, int type,
* the string cannot be correctly parsed.
*
* @param context the context of the value to be set (e.g. the
* corresponding command line option name)
* corresponding commandline option name)
* @param timestr the string to be parsed
* @param is_duration a flag which tells how to interpret timestr, if
* not zero timestr is interpreted as a duration, otherwise as a
@@ -125,8 +114,7 @@ double parse_number_or_die(const char *context, const char *numstr, int type,
*
* @see parse_date()
*/
int64_t parse_time_or_die(const char *context, const char *timestr,
int is_duration);
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration);
typedef struct SpecifierOpt {
char *specifier; /**< stream/chapter/program/... specifier */
@@ -172,8 +160,7 @@ typedef struct {
const char *argname;
} OptionDef;
void show_help_options(const OptionDef *options, const char *msg, int mask,
int value);
void show_help_options(const OptionDef *options, const char *msg, int mask, int value);
/**
* Show help for all options with given flags in class and all its
@@ -199,20 +186,13 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
*
* @return on success 1 if arg was consumed, 0 otherwise; negative number on error
*/
int parse_option(void *optctx, const char *opt, const char *arg,
const OptionDef *options);
int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options);
/**
* Find the '-loglevel' option in the command line args and apply it.
* Find the '-loglevel' option in the commandline args and apply it.
*/
void parse_loglevel(int argc, char **argv, const OptionDef *options);
/**
* Return index of option opt in argv or 0 if not found.
*/
int locate_option(int argc, char **argv, const OptionDef *options,
const char *optname);
/**
* Check if the given stream matches a stream specifier.
*
@@ -234,8 +214,7 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
* @param st A stream from s for which the options should be filtered.
* @return a pointer to the created dictionary
*/
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
AVFormatContext *s, AVStream *st);
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatContext *s, AVStream *st);
/**
* Setup AVCodecContext options for avformat_find_stream_info().
@@ -248,8 +227,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
* @return pointer to the created array of dictionaries, NULL if it
* cannot be created
*/
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
AVDictionary *codec_opts);
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts);
/**
* Print an error message to stderr, indicating filename and a human
@@ -267,7 +245,7 @@ void print_error(const char *filename, int err);
* current version of the repository and of the libav* libraries used by
* the program.
*/
void show_banner(int argc, char **argv, const OptionDef *options);
void show_banner(void);
/**
* Print the version of the program to stdout. The version message
@@ -358,7 +336,7 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size);
* at configuration time or in a "ffpresets" folder along the executable
* on win32, in that order. If no such file is found and
* codec_name is defined, then search for a file named
* codec_name-preset_name.avpreset in the above-mentioned directories.
* codec_name-preset_name.ffpreset in the above-mentioned directories.
*
* @param filename buffer where the name of the found filename is written
* @param filename_size size in bytes of the filename buffer

View File

@@ -14,7 +14,4 @@
{ "loglevel", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
{ "v", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
{ "debug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
{ "fdebug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
{ "report", 0, {(void*)opt_report}, "generate a report" },
{ "max_alloc", HAS_ARG, {(void*)opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
{ "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "force specific cpu flags", "flags" },

View File

@@ -20,7 +20,7 @@ $(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR))))
$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL))
endif
ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscale swresample
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
# NASM requires -I path terminated with /
IFLAGS := -I. -I$(SRC_PATH)/
@@ -73,7 +73,7 @@ COMPILE_S = $(call COMPILE,AS)
$(OBJS):
endif
include $(SRC_PATH)/arch.mak
OBJS-$(HAVE_MMX) += $(MMX-OBJS-yes)
OBJS += $(OBJS-yes)
FFLIBS := $(FFLIBS-yes) $(FFLIBS)
@@ -115,6 +115,6 @@ OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS))
CLEANSUFFIXES = *.d *.o *~ *.ho *.map *.ver *.gcno *.gcda
DISTCLEANSUFFIXES = *.pc
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a *.exp
-include $(wildcard $(OBJS:.o=.d) $(TESTOBJS:.o=.d))

578
configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -2,185 +2,27 @@ Never assume the API of libav* to be stable unless at least 1 month has passed
since the last major version increase.
The last version increases were:
libavcodec: 2012-01-27
libavdevice: 2011-04-18
libavfilter: 2011-04-18
libavformat: 2012-01-27
libavresample: 2012-04-24
libpostproc: 2011-04-18
libswresample: 2011-09-19
libswscale: 2011-06-20
libavutil: 2011-04-18
libavcodec: 2011-04-18
libavdevice: 2011-04-18
libavfilter: 2011-04-18
libavformat: 2011-04-18
libpostproc: 2011-04-18
libswscale: 2011-06-20
libavutil: 2011-04-18
API changes, most recent first:
2012-05-24 - xxxxxxx - lavu 51.54.100
Move AVPALETTE_SIZE and AVPALETTE_COUNT macros from
libavcodec/avcodec.h to libavutil/pixfmt.h.
2012-05-07 - xxxxxxx - lavf 54.5.100
Add av_guess_sample_aspect_ratio() function.
2012-04-20 - xxxxxxx - lavfi 2.70.100
Add avfilter_unref_bufferp() to avfilter.h.
2012-04-12 - xxxxxxx - lavfi 2.68.100
Install libavfilter/asrc_abuffer.h public header.
2012-03-26 - a67d9cf - lavfi 2.66.100
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
2012-05-15 - lavfi 2.17.0
Add support for audio filters
ac71230/a2cd9be - add video/audio buffer sink in a new installed
header buffersink.h
720c6b7 - add av_buffersrc_write_frame(), deprecate
av_vsrc_buffer_add_frame()
ab16504 - add avfilter_copy_buf_props()
9453c9e - add extended_data to AVFilterBuffer
1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays()
2012-05-09 - lavu 51.30.0 - samplefmt.h
142e740 - add av_samples_copy()
6d7f617 - add av_samples_set_silence()
2012-05-09 - a5117a2 - lavc 54.13.1
For audio formats with fixed frame size, the last frame
no longer needs to be padded with silence, libavcodec
will handle this internally (effectively all encoders
behave as if they had CODEC_CAP_SMALL_LAST_FRAME set).
2012-05-07 - 828bd08 - lavc 54.13.0 - avcodec.h
Add sample_rate and channel_layout fields to AVFrame.
2012-05-01 - 4010d72 - lavr 0.0.1
Change AV_MIX_COEFF_TYPE_Q6 to AV_MIX_COEFF_TYPE_Q8.
2012-04-25 - 3527a73 - lavu 51.29.0 - cpu.h
Add av_parse_cpu_flags()
2012-04-24 - c8af852 - lavr 0.0.0
Add libavresample audio conversion library
2012-04-20 - 0c0d1bc - lavu 51.28.0 - audio_fifo.h
Add audio FIFO functions:
av_audio_fifo_free()
av_audio_fifo_alloc()
av_audio_fifo_realloc()
av_audio_fifo_write()
av_audio_fifo_read()
av_audio_fifo_drain()
av_audio_fifo_reset()
av_audio_fifo_size()
av_audio_fifo_space()
2012-04-14 - lavfi 2.16.0 - avfiltergraph.h
d7bcc71 Add avfilter_graph_parse2().
2012-04-08 - 4d693b0 - lavu 51.27.0 - samplefmt.h
Add av_get_packed_sample_fmt() and av_get_planar_sample_fmt()
2012-03-21 - b75c67d - lavu 51.43.100
Add bprint.h for bprint API.
2012-02-21 - 9cbf17e - lavc 54.4.100
Add av_get_pcm_codec() function.
2012-02-16 - 560b224 - libswr 0.7.100
Add swr_set_matrix() function.
2012-02-09 - c28e7af - lavu 51.39.100
Add a new installed header libavutil/timestamp.h with timestamp
utilities.
2012-02-06 - 70ffda3 - lavu 51.38.100
Add av_parse_ratio() function to parseutils.h.
2012-02-06 - 70ffda3 - lavu 51.38.100
Add AV_LOG_MAX_OFFSET macro to log.h.
2012-02-02 - 0eaa123 - lavu 51.37.100
Add public timecode helpers.
2012-01-24 - 0c3577b - lavfi 2.60.100
Add avfilter_graph_dump.
2012-03-05 - lavc 54.8.0
6699d07 Add av_get_exact_bits_per_sample()
9524cf7 Add av_get_audio_frame_duration()
2012-03-04 - 44fe77b - lavc 54.7.0 - avcodec.h
Add av_codec_is_encoder/decoder().
2012-03-01 - 442c132 - lavc 54.3.0 - avcodec.h
Add av_packet_shrink_side_data.
2012-02-29 - dd2a4bc - lavf 54.2.0 - avformat.h
Add AVStream.attached_pic and AV_DISPOSITION_ATTACHED_PIC,
used for dealing with attached pictures/cover art.
2012-02-25 - c9bca80 - lavu 51.24.0 - error.h
Add AVERROR_UNKNOWN
NOTE: this was backported to 0.8
2012-02-20 - e9cda85 - lavc 54.2.0
Add duration field to AVCodecParserContext
2012-02-20 - 0b42a93 - lavu 51.23.1 - mathematics.h
Add av_rescale_q_rnd()
2012-02-08 - 38d5533 - lavu 51.22.1 - pixdesc.h
Add PIX_FMT_PSEUDOPAL flag.
2012-02-08 - 52f82a1 - lavc 54.01.0
Add avcodec_encode_video2() and deprecate avcodec_encode_video().
2012-02-01 - 316fc74 - lavc 54.01.0
Add av_fast_padded_malloc() as alternative for av_realloc() when aligned
memory is required. The buffer will always have FF_INPUT_BUFFER_PADDING_SIZE
zero-padded bytes at the end.
2012-01-31 - dd6d3b0 - lavf 54.01.0
Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags().
NOTE: this was backported to 0.8
2012-01-31 - af08d9a - lavc 54.01.0
Add avcodec_is_open() function.
NOTE: this was backported to 0.8
2012-01-30 - 8b93312 - lavu 51.22.0 - intfloat.h
Add a new installed header libavutil/intfloat.h with int/float punning
functions.
NOTE: this was backported to 0.8
2012-01-25 - lavf 53.22.0
f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible
buffered data within a muxer. Added AVFMT_ALLOW_FLUSH for
muxers supporting it (av_write_frame makes sure it is called
only for muxers with this flag).
2012-01-15 - lavc 53.34.0
New audio encoding API:
b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
encoders.
5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function.
b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
Add AVCodec.encode2().
2012-01-12 - 3167dc9 - lavfi 2.15.0
Add a new installed header -- libavfilter/version.h -- with version macros.
2011-12-08 - a502939 - lavfi 2.52.0
Add av_buffersink_poll_frame() to buffersink.h.
2011-12-08 - 26c6fec - lavu 51.31.0
2011-12-08 - xxxxxxx - lavu 51.31.0
Add av_log_format_line.
2011-12-03 - 976b095 - lavu 51.30.0
2011-12-03 - xxxxxxx - lavu 51.30.0
Add AVERROR_BUG.
2011-11-24 - 573ffbb - lavu 51.28.1
2011-xx-xx - xxxxxxx - lavu 51.28.1
Add av_get_alt_sample_fmt() to samplefmt.h.
2011-11-03 - 96949da - lavu 51.23.0
@@ -189,24 +31,14 @@ API changes, most recent first:
2011-10-20 - b35e9e1 - lavu 51.22.0
Add av_strtok() to avstring.h.
2011-01-03 - b73ec05 - lavu 51.21.0
Add av_popcount64
2011-12-18 - 8400b12 - lavc 53.28.1
Deprecate AVFrame.age. The field is unused.
2011-12-12 - 5266045 - lavf 53.17.0
Add avformat_close_input().
Deprecate av_close_input_file() and av_close_input_stream().
2011-12-02 - 0eea212 - lavc 53.25.0
2011-xx-xx - xxxxxxx - lavc 53.25.0
Add nb_samples and extended_data fields to AVFrame.
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
avcodec_decode_audio4() writes output samples to an AVFrame, which allows
audio decoders to use get_buffer().
2011-12-04 - 560f773 - lavc 53.24.0
2011-xx-xx - xxxxxxx - lavc 53.24.0
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
Change AVCodecContext.error[4] to [8] at next major bump.
@@ -247,7 +79,7 @@ API changes, most recent first:
Add av_toupper()/av_tolower()
2011-11-05 - b6d08f4 - lavf 53.13.0
Add avformat_network_init()/avformat_network_deinit()
Add avformat_network_init()/avformat_network_uninit()
2011-10-27 - 512557b - lavc 53.15.0
Remove avcodec_parse_frame.
@@ -332,10 +164,6 @@ API changes, most recent first:
2011-08-14 - 323b930 - lavu 51.12.0
Add av_fifo_peek2(), deprecate av_fifo_peek().
2011-08-26 - lavu 51.9.0
- add41de..abc78a5 Do not include intfloat_readwrite.h,
mathematics.h, rational.h, pixfmt.h, or log.h from avutil.h.
2011-08-16 - 48f9e45 - lavf 53.8.0
Add avformat_query_codec().
@@ -358,14 +186,9 @@ API changes, most recent first:
2011-07-10 - a67c061 - lavf 53.6.0
Add avformat_find_stream_info(), deprecate av_find_stream_info().
NOTE: this was backported to 0.7
2011-07-10 - 0b950fe - lavc 53.8.0
Add avcodec_open2(), deprecate avcodec_open().
NOTE: this was backported to 0.7
Add avcodec_alloc_context3. Deprecate avcodec_alloc_context() and
avcodec_alloc_context2().
2011-07-01 - b442ca6 - lavf 53.5.0 - avformat.h
Add function av_get_output_timestamp().
@@ -683,39 +506,39 @@ API changes, most recent first:
2011-01-31 - 910b5b8 - lavfi 1.75.0 - AVFilterLink sample_aspect_ratio
Add sample_aspect_ratio field to AVFilterLink.
2011-01-15 - a242ac3 - lavfi 1.74.0 - AVFilterBufferRefAudioProps
2011-01-15 - r26374 - lavfi 1.74.0 - AVFilterBufferRefAudioProps
Rename AVFilterBufferRefAudioProps.samples_nb to nb_samples.
2011-01-14 - 7f88a5b - lavf 52.93.0 - av_metadata_copy()
2011-01-14 - r26330 - lavf 52.93.0 - av_metadata_copy()
Add av_metadata_copy() in avformat.h.
2011-01-07 - 81c623f - lavc 52.107.0 - deprecate reordered_opaque
2011-01-07 - r26262 - lavc 52.107.0 - deprecate reordered_opaque
Deprecate reordered_opaque in favor of pkt_pts/dts.
2011-01-07 - 1919fea - lavc 52.106.0 - pkt_dts
2011-01-07 - r26261 - lavc 52.106.0 - pkt_dts
Add pkt_dts to AVFrame, this will in the future allow multithreading decoders
to not mess up dts.
2011-01-07 - 393cbb9 - lavc 52.105.0 - pkt_pts
2011-01-07 - r26260 - lavc 52.105.0 - pkt_pts
Add pkt_pts to AVFrame.
2011-01-07 - 060ec0a - lavc 52.104.0 - av_get_profile_name()
2011-01-07 - r26259 - lavc 52.104.0 - av_get_profile_name()
Add av_get_profile_name to libavcodec/avcodec.h.
2010-12-27 - 0ccabee - lavfi 1.71.0 - AV_PERM_NEG_LINESIZES
2010-12-27 - r26108 - lavfi 1.71.0 - AV_PERM_NEG_LINESIZES
Add AV_PERM_NEG_LINESIZES in avfilter.h.
2010-12-27 - 9128ae0 - lavf 52.91.0 - av_find_best_stream()
2010-12-27 - r26104 - lavf 52.91.0 - av_find_best_stream()
Add av_find_best_stream to libavformat/avformat.h.
2010-12-27 - 107a7e3 - lavf 52.90.0
2010-12-27 - r26103 - lavf 52.90.0
Add AVFMT_NOSTREAMS flag for formats with no streams,
like e.g. text metadata.
2010-12-22 - 0328b9e - lavu 50.36.0 - file.h
2010-12-22 - r26073 - lavu 50.36.0 - file.h
Add functions av_file_map() and av_file_unmap() in file.h.
2010-12-19 - 0bc55f5 - lavu 50.35.0 - error.h
2010-12-19 - r26056 - lavu 50.35.0 - error.h
Add "not found" error codes:
AVERROR_DEMUXER_NOT_FOUND
AVERROR_MUXER_NOT_FOUND
@@ -726,28 +549,28 @@ API changes, most recent first:
AVERROR_BSF_NOT_FOUND
AVERROR_STREAM_NOT_FOUND
2010-12-09 - c61cdd0 - lavcore 0.16.0 - avcore.h
2010-12-09 - r25923 - lavcore 0.16.0 - avcore.h
Move AV_NOPTS_VALUE, AV_TIME_BASE, AV_TIME_BASE_Q symbols from
avcodec.h to avcore.h.
2010-12-04 - 16cfc96 - lavc 52.98.0 - CODEC_CAP_NEG_LINESIZES
2010-12-04 - r25886 - lavc 52.98.0 - CODEC_CAP_NEG_LINESIZES
Add CODEC_CAP_NEG_LINESIZES codec capability flag in avcodec.h.
2010-12-04 - bb4afa1 - lavu 50.34.0 - av_get_pix_fmt_string()
2010-12-04 - r25879 - lavu 50.34.0 - av_get_pix_fmt_string()
Deprecate avcodec_pix_fmt_string() in favor of
pixdesc.h/av_get_pix_fmt_string().
2010-12-04 - 4da12e3 - lavcore 0.15.0 - av_image_alloc()
2010-12-04 - r25878 - lavcore 0.15.0 - av_image_alloc()
Add av_image_alloc() to libavcore/imgutils.h.
2010-12-02 - 037be76 - lavfi 1.67.0 - avfilter_graph_create_filter()
2010-12-02 - r25862 - lavfi 1.67.0 - avfilter_graph_create_filter()
Add function avfilter_graph_create_filter() in avfiltergraph.h.
2010-11-25 - 4723bc2 - lavfi 1.65.0 - avfilter_get_video_buffer_ref_from_arrays()
2010-11-25 - r25826 - lavfi 1.65.0 - avfilter_get_video_buffer_ref_from_arrays()
Add function avfilter_get_video_buffer_ref_from_arrays() in
avfilter.h.
2010-11-21 - 176a615 - lavcore 0.14.0 - audioconvert.h
2010-11-21 - r25787 - lavcore 0.14.0 - audioconvert.h
Add a public audio channel API in audioconvert.h, and deprecate the
corresponding functions in libavcodec:
avcodec_get_channel_name()
@@ -756,23 +579,23 @@ API changes, most recent first:
avcodec_channel_layout_num_channels()
and the CH_* macros defined in libavcodec/avcodec.h.
2010-11-21 - 6bfc268 - lavf 52.85.0 - avformat.h
2010-11-21 - r25777 - lavf 52.85.0 - avformat.h
Add av_append_packet().
2010-11-21 - a08d918 - lavc 52.97.0 - avcodec.h
2010-11-21 - r25776 - lavc 52.97.0 - avcodec.h
Add av_grow_packet().
2010-11-17 - 0985e1a - lavcore 0.13.0 - parseutils.h
2010-11-17 - r25761 - lavcore 0.13.0 - parseutils.h
Add av_parse_color() declared in libavcore/parseutils.h.
2010-11-13 - cb2c971 - lavc 52.95.0 - AVCodecContext
2010-11-13 - r25745 - lavc 52.95.0 - AVCodecContext
Add AVCodecContext.subtitle_header and AVCodecContext.subtitle_header_size
fields.
2010-11-13 - 5aaea02 - lavfi 1.62.0 - avfiltergraph.h
2010-11-13 - r25740 - lavfi 1.62.0 - avfiltergraph.h
Make avfiltergraph.h public.
2010-11-13 - 4fcbb2a - lavfi 1.61.0 - avfiltergraph.h
2010-11-13 - r25737 - lavfi 1.61.0 - avfiltergraph.h
Remove declarations from avfiltergraph.h for the functions:
avfilter_graph_check_validity()
avfilter_graph_config_links()
@@ -780,7 +603,7 @@ API changes, most recent first:
which are now internal.
Use avfilter_graph_config() instead.
2010-11-08 - d2af720 - lavu 50.33.0 - eval.h
2010-11-08 - r25708 - lavu 50.33.0 - eval.h
Deprecate functions:
av_parse_and_eval_expr(),
av_parse_expr(),
@@ -792,30 +615,30 @@ API changes, most recent first:
av_expr_eval(),
av_expr_free().
2010-11-08 - 24de0ed - lavfi 1.59.0 - avfilter_free()
2010-11-08 - r25707 - lavfi 1.59.0 - avfilter_free()
Rename avfilter_destroy() to avfilter_free().
This change breaks libavfilter API/ABI.
2010-11-07 - 1e80a0e - lavfi 1.58.0 - avfiltergraph.h
2010-11-07 - r25705 - lavfi 1.58.0 - avfiltergraph.h
Remove graphparser.h header, move AVFilterInOut and
avfilter_graph_parse() declarations to libavfilter/avfiltergraph.h.
2010-11-07 - 7313132 - lavfi 1.57.0 - AVFilterInOut
2010-11-07 - r25700 - lavfi 1.57.0 - AVFilterInOut
Rename field AVFilterInOut.filter to AVFilterInOut.filter_ctx.
This change breaks libavfilter API.
2010-11-04 - 97dd1e4 - lavfi 1.56.0 - avfilter_graph_free()
2010-11-04 - r25674 - lavfi 1.56.0 - avfilter_graph_free()
Rename avfilter_graph_destroy() to avfilter_graph_free().
This change breaks libavfilter API/ABI.
2010-11-04 - e15aeea - lavfi 1.55.0 - avfilter_graph_alloc()
2010-11-04 - r25673 - lavfi 1.55.0 - avfilter_graph_alloc()
Add avfilter_graph_alloc() to libavfilter/avfiltergraph.h.
2010-11-02 - 6f84cd1 - lavcore 0.12.0 - av_get_bits_per_sample_fmt()
2010-11-02 - r25654 - lavcore 0.12.0 - av_get_bits_per_sample_fmt()
Add av_get_bits_per_sample_fmt() to libavcore/samplefmt.h and
deprecate av_get_bits_per_sample_format().
2010-11-02 - d63e456 - lavcore 0.11.0 - samplefmt.h
2010-11-02 - r25653 - lavcore 0.11.0 - samplefmt.h
Add sample format functions in libavcore/samplefmt.h:
av_get_sample_fmt_name(),
av_get_sample_fmt(),
@@ -825,149 +648,149 @@ API changes, most recent first:
avcodec_get_sample_fmt(),
avcodec_sample_fmt_string().
2010-11-02 - 262d1c5 - lavcore 0.10.0 - samplefmt.h
2010-11-02 - r25652 - lavcore 0.10.0 - samplefmt.h
Define enum AVSampleFormat in libavcore/samplefmt.h, deprecate enum
SampleFormat.
2010-10-16 - 2a24df9 - lavfi 1.52.0 - avfilter_graph_config()
2010-10-16 - r25502 - lavfi 1.52.0 - avfilter_graph_config()
Add the function avfilter_graph_config() in avfiltergraph.h.
2010-10-15 - 03700d3 - lavf 52.83.0 - metadata API
2010-10-15 - r25493 - lavf 52.83.0 - metadata API
Change demuxers to export metadata in generic format and
muxers to accept generic format. Deprecate the public
conversion API.
2010-10-10 - 867ae7a - lavfi 1.49.0 - AVFilterLink.time_base
2010-10-10 - r25441 - lavfi 1.49.0 - AVFilterLink.time_base
Add time_base field to AVFilterLink.
2010-09-27 - c85eef4 - lavu 50.31.0 - av_set_options_string()
2010-09-27 - r25236 - lavu 50.31.0 - av_set_options_string()
Move av_set_options_string() from libavfilter/parseutils.h to
libavutil/opt.h.
2010-09-27 - acc0490 - lavfi 1.47.0 - AVFilterLink
2010-09-27 - r25227 - lavfi 1.47.0 - AVFilterLink
Make the AVFilterLink fields srcpad and dstpad store the pointers to
the source and destination pads, rather than their indexes.
2010-09-27 - 372e288 - lavu 50.30.0 - av_get_token()
2010-09-27 - r25225 - lavu 50.30.0 - av_get_token()
Move av_get_token() from libavfilter/parseutils.h to
libavutil/avstring.h.
2010-09-26 - 635d4ae - lsws 0.12.0 - swscale.h
2010-09-26 - r32368 - lsws 0.12.0 - swscale.h
Add the functions sws_alloc_context() and sws_init_context().
2010-09-26 - 6ed0404 - lavu 50.29.0 - opt.h
2010-09-26 - r25210 - lavu 50.29.0 - opt.h
Move libavcodec/opt.h to libavutil/opt.h.
2010-09-24 - 1c1c80f - lavu 50.28.0 - av_log_set_flags()
2010-09-24 - r25174 - lavu 50.28.0 - av_log_set_flags()
Default of av_log() changed due to many problems to the old no repeat
detection. Read the docs of AV_LOG_SKIP_REPEATED in log.h before
enabling it for your app!.
2010-09-24 - f66eb58 - lavc 52.90.0 - av_opt_show2()
2010-09-24 - r25167 - lavc 52.90.0 - av_opt_show2()
Deprecate av_opt_show() in favor or av_opt_show2().
2010-09-14 - bc6f0af - lavu 50.27.0 - av_popcount()
2010-09-14 - r25120 - lavu 50.27.0 - av_popcount()
Add av_popcount() to libavutil/common.h.
2010-09-08 - c6c98d0 - lavu 50.26.0 - av_get_cpu_flags()
2010-09-08 - r25076 - lavu 50.26.0 - av_get_cpu_flags()
Add av_get_cpu_flags().
2010-09-07 - 34017fd - lavcore 0.9.0 - av_image_copy()
2010-09-07 - r25067 - lavcore 0.9.0 - av_image_copy()
Add av_image_copy().
2010-09-07 - 9686abb - lavcore 0.8.0 - av_image_copy_plane()
2010-09-07 - r25064 - lavcore 0.8.0 - av_image_copy_plane()
Add av_image_copy_plane().
2010-09-07 - 9b7269e - lavcore 0.7.0 - imgutils.h
2010-09-07 - r25057 - lavcore 0.7.0 - imgutils.h
Adopt hierarchical scheme for the imgutils.h function names,
deprecate the old names.
2010-09-04 - 7160bb7 - lavu 50.25.0 - AV_CPU_FLAG_*
2010-09-04 - r25040 - lavu 50.25.0 - AV_CPU_FLAG_*
Deprecate the FF_MM_* flags defined in libavcodec/avcodec.h in favor
of the AV_CPU_FLAG_* flags defined in libavutil/cpu.h.
2010-08-26 - 5da19b5 - lavc 52.87.0 - avcodec_get_channel_layout()
2010-08-26 - r24936 - lavc 52.87.0 - avcodec_get_channel_layout()
Add avcodec_get_channel_layout() in audioconvert.h.
2010-08-20 - e344336 - lavcore 0.6.0 - av_fill_image_max_pixsteps()
2010-08-20 - r24851 - lavcore 0.6.0 - av_fill_image_max_pixsteps()
Rename av_fill_image_max_pixstep() to av_fill_image_max_pixsteps().
2010-08-18 - a6ddf8b - lavcore 0.5.0 - av_fill_image_max_pixstep()
2010-08-18 - r24827 - lavcore 0.5.0 - av_fill_image_max_pixstep()
Add av_fill_image_max_pixstep() in imgutils.h.
2010-08-17 - 4f2d2e4 - lavu 50.24.0 - AV_NE()
2010-08-17 - r24814 - lavu 50.24.0 - AV_NE()
Add the AV_NE macro.
2010-08-17 - ad2c950 - lavfi 1.36.0 - audio framework
2010-08-17 - r24811 - lavfi 1.36.0 - audio framework
Implement AVFilterBufferRefAudioProps struct for audio properties,
get_audio_buffer(), filter_samples() functions and related changes.
2010-08-12 - 81c1eca - lavcore 0.4.0 - av_get_image_linesize()
2010-08-12 - r24787 - lavcore 0.4.0 - av_get_image_linesize()
Add av_get_image_linesize() in imgutils.h.
2010-08-11 - c1db7bf - lavfi 1.34.0 - AVFilterBufferRef
2010-08-11 - r24773 - lavfi 1.34.0 - AVFilterBufferRef
Resize data and linesize arrays in AVFilterBufferRef to 8.
This change breaks libavfilter API/ABI.
2010-08-11 - 9f08d80 - lavc 52.85.0 - av_picture_data_copy()
2010-08-11 - r24768 - lavc 52.85.0 - av_picture_data_copy()
Add av_picture_data_copy in avcodec.h.
2010-08-11 - 84c0386 - lavfi 1.33.0 - avfilter_open()
2010-08-11 - r24765 - lavfi 1.33.0 - avfilter_open()
Change avfilter_open() signature:
AVFilterContext *avfilter_open(AVFilter *filter, const char *inst_name) ->
int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name);
This change breaks libavfilter API/ABI.
2010-08-11 - cc80caf - lavfi 1.32.0 - AVFilterBufferRef
2010-08-11 - r24763 - lavfi 1.32.0 - AVFilterBufferRef
Add a type field to AVFilterBufferRef, and move video specific
properties to AVFilterBufferRefVideoProps.
This change breaks libavfilter API/ABI.
2010-08-07 - 5d4890d - lavfi 1.31.0 - AVFilterLink
2010-08-07 - r24732 - lavfi 1.31.0 - AVFilterLink
Rename AVFilterLink fields:
AVFilterLink.srcpic -> AVFilterLink.src_buf
AVFilterLink.cur_pic -> AVFilterLink.cur_buf
AVFilterLink.outpic -> AVFilterLink.out_buf
2010-08-07 - 7fce481 - lavfi 1.30.0
2010-08-07 - r24731 - lavfi 1.30.0
Rename functions and fields:
avfilter_(un)ref_pic -> avfilter_(un)ref_buffer
avfilter_copy_picref_props -> avfilter_copy_buffer_ref_props
AVFilterBufferRef.pic -> AVFilterBufferRef.buffer
2010-08-07 - ecc8dad - lavfi 1.29.0 - AVFilterBufferRef
2010-08-07 - r24730 - lavfi 1.29.0 - AVFilterBufferRef
Rename AVFilterPicRef to AVFilterBufferRef.
2010-08-07 - d54e094 - lavfi 1.28.0 - AVFilterBuffer
2010-08-07 - r24728 - lavfi 1.28.0 - AVFilterBuffer
Move format field from AVFilterBuffer to AVFilterPicRef.
2010-08-06 - bf176f5 - lavcore 0.3.0 - av_check_image_size()
2010-08-06 - r24709 - lavcore 0.3.0 - av_check_image_size()
Deprecate avcodec_check_dimensions() in favor of the function
av_check_image_size() defined in libavcore/imgutils.h.
2010-07-30 - 56b5e9d - lavfi 1.27.0 - AVFilterBuffer
2010-07-30 - r24592 - lavfi 1.27.0 - AVFilterBuffer
Increase size of the arrays AVFilterBuffer.data and
AVFilterBuffer.linesize from 4 to 8.
This change breaks libavfilter ABI.
2010-07-29 - e7bd48a - lavcore 0.2.0 - imgutils.h
2010-07-29 - r24583 - lavcore 0.2.0 - imgutils.h
Add functions av_fill_image_linesizes() and
av_fill_image_pointers(), declared in libavcore/imgutils.h.
2010-07-27 - 126b638 - lavcore 0.1.0 - parseutils.h
2010-07-27 - r24518 - lavcore 0.1.0 - parseutils.h
Deprecate av_parse_video_frame_size() and av_parse_video_frame_rate()
defined in libavcodec in favor of the newly added functions
av_parse_video_size() and av_parse_video_rate() declared in
libavcore/parseutils.h.
2010-07-23 - 4485247 - lavu 50.23.0 - mathematics.h
2010-07-23 - r24439 - lavu 50.23.0 - mathematics.h
Add the M_PHI constant definition.
2010-07-22 - bdab614 - lavfi 1.26.0 - media format generalization
2010-07-22 - r24424 - lavfi 1.26.0 - media format generalization
Add a type field to AVFilterLink.
Change the field types:
@@ -987,235 +810,235 @@ API changes, most recent first:
This change breaks libavfilter API/ABI.
2010-07-21 - aac6ca6 - lavcore 0.0.0
2010-07-21 - r24393 - lavcore 0.0.0
Add libavcore.
2010-07-17 - b5c582f - lavfi 1.25.0 - AVFilterBuffer
2010-07-17 - r24291 - lavfi 1.25.0 - AVFilterBuffer
Remove w and h fields from AVFilterBuffer.
2010-07-17 - f0d77b2 - lavfi 1.24.0 - AVFilterBuffer
2010-07-17 - r24284 - lavfi 1.24.0 - AVFilterBuffer
Rename AVFilterPic to AVFilterBuffer.
2010-07-17 - 57fe80f - lavf 52.74.0 - url_fskip()
2010-07-17 - r24278 - lavf 52.74.0 - url_fskip()
Make url_fskip() return an int error code instead of void.
2010-07-11 - 23940f1 - lavc 52.83.0
2010-07-11 - r24199 - lavc 52.83.0
Add AVCodecContext.lpc_type and AVCodecContext.lpc_passes fields.
Add AVLPCType enum.
Deprecate AVCodecContext.use_lpc.
2010-07-11 - e1d7c88 - lavc 52.82.0 - avsubtitle_free()
2010-07-11 - r24185 - lavc 52.82.0 - avsubtitle_free()
Add a function for free the contents of a AVSubtitle generated by
avcodec_decode_subtitle.
2010-07-11 - b91d08f - lavu 50.22.0 - bswap.h and intreadwrite.h
2010-07-11 - r24174 - lavu 50.22.0 - bswap.h and intreadwrite.h
Make the bswap.h and intreadwrite.h API public.
2010-07-08 - ce1cd1c - lavu 50.21.0 - pixdesc.h
2010-07-08 - r24101 - lavu 50.21.0 - pixdesc.h
Rename read/write_line() to av_read/write_image_line().
2010-07-07 - 4d508e4 - lavfi 1.21.0 - avfilter_copy_picref_props()
2010-07-07 - r24091 - lavfi 1.21.0 - avfilter_copy_picref_props()
Add avfilter_copy_picref_props().
2010-07-03 - 2d525ef - lavc 52.79.0
2010-07-03 - r24021 - lavc 52.79.0
Add FF_COMPLIANCE_UNOFFICIAL and change all instances of
FF_COMPLIANCE_INOFFICIAL to use FF_COMPLIANCE_UNOFFICIAL.
2010-07-02 - 89eec74 - lavu 50.20.0 - lfg.h
2010-07-02 - r23985 - lavu 50.20.0 - lfg.h
Export av_lfg_init(), av_lfg_get(), av_mlfg_get(), and av_bmg_get() through
lfg.h.
2010-06-28 - a52e2c3 - lavfi 1.20.1 - av_parse_color()
2010-06-28 - r23835 - lavfi 1.20.1 - av_parse_color()
Extend av_parse_color() syntax, make it accept an alpha value specifier and
set the alpha value to 255 by default.
2010-06-22 - 735cf6b - lavf 52.71.0 - URLProtocol.priv_data_size, priv_data_class
2010-06-22 - r23706 - lavf 52.71.0 - URLProtocol.priv_data_size, priv_data_class
Add priv_data_size and priv_data_class to URLProtocol.
2010-06-22 - ffbb289 - lavf 52.70.0 - url_alloc(), url_connect()
2010-06-22 - r23704 - lavf 52.70.0 - url_alloc(), url_connect()
Add url_alloc() and url_connect().
2010-06-22 - 9b07a2d - lavf 52.69.0 - av_register_protocol2()
2010-06-22 - r23702 - lavf 52.69.0 - av_register_protocol2()
Add av_register_protocol2(), deprecating av_register_protocol().
2010-06-09 - 65db058 - lavu 50.19.0 - av_compare_mod()
2010-06-09 - r23551 - lavu 50.19.0 - av_compare_mod()
Add av_compare_mod() to libavutil/mathematics.h.
2010-06-05 - 0b99215 - lavu 50.18.0 - eval API
2010-06-05 - r23485 - lavu 50.18.0 - eval API
Make the eval API public.
2010-06-04 - 31878fc - lavu 50.17.0 - AV_BASE64_SIZE
2010-06-04 - r23461 - lavu 50.17.0 - AV_BASE64_SIZE
Add AV_BASE64_SIZE() macro.
2010-06-02 - 7e566bb - lavc 52.73.0 - av_get_codec_tag_string()
2010-06-02 - r23421 - lavc 52.73.0 - av_get_codec_tag_string()
Add av_get_codec_tag_string().
2010-06-01 - 2b99142 - lsws 0.11.0 - convertPalette API
2010-06-01 - r31301 - lsws 0.11.0 - convertPalette API
Add sws_convertPalette8ToPacked32() and sws_convertPalette8ToPacked24().
2010-05-26 - 93ebfee - lavc 52.72.0 - CODEC_CAP_EXPERIMENTAL
2010-05-26 - r23334 - lavc 52.72.0 - CODEC_CAP_EXPERIMENTAL
Add CODEC_CAP_EXPERIMENTAL flag.
NOTE: this was backported to 0.6
2010-05-23 - 9977863 - lavu 50.16.0 - av_get_random_seed()
2010-05-23 - r23255 - lavu 50.16.0 - av_get_random_seed()
Add av_get_random_seed().
2010-05-18 - 796ac23 - lavf 52.63.0 - AVFMT_FLAG_RTP_HINT
2010-05-18 - r23161 - lavf 52.63.0 - AVFMT_FLAG_RTP_HINT
Add AVFMT_FLAG_RTP_HINT as possible value for AVFormatContext.flags.
NOTE: this was backported to 0.6
2010-05-09 - b6bc205 - lavfi 1.20.0 - AVFilterPicRef
2010-05-09 - r23066 - lavfi 1.20.0 - AVFilterPicRef
Add interlaced and top_field_first fields to AVFilterPicRef.
------------------------------8<-------------------------------------
0.6 branch was cut here
----------------------------->8--------------------------------------
2010-05-01 - 8e2ee18 - lavf 52.62.0 - probe function
2010-05-01 - r23002 - lavf 52.62.0 - probe function
Add av_probe_input_format2 to API, it allows ignoring probe
results below given score and returns the actual probe score.
2010-04-01 - 3dd6180 - lavf 52.61.0 - metadata API
2010-04-01 - r22806 - lavf 52.61.0 - metadata API
Add a flag for av_metadata_set2() to disable overwriting of
existing tags.
2010-04-01 - 0fb49b5 - lavc 52.66.0
2010-04-01 - r22753 - lavc 52.66.0
Add avcodec_get_edge_width().
2010-03-31 - d103218 - lavc 52.65.0
2010-03-31 - r22750 - lavc 52.65.0
Add avcodec_copy_context().
2010-03-31 - 1a70d12 - lavf 52.60.0 - av_match_ext()
2010-03-31 - r22748 - lavf 52.60.0 - av_match_ext()
Make av_match_ext() public.
2010-03-31 - 1149150 - lavu 50.14.0 - AVMediaType
2010-03-31 - r22736 - lavu 50.14.0 - AVMediaType
Move AVMediaType enum from libavcodec to libavutil.
2010-03-31 - 72415b2 - lavc 52.64.0 - AVMediaType
2010-03-31 - r22735 - lavc 52.64.0 - AVMediaType
Define AVMediaType enum, and use it instead of enum CodecType, which
is deprecated and will be dropped at the next major bump.
2010-03-25 - 8795823 - lavu 50.13.0 - av_strerror()
2010-03-25 - r22684 - lavu 50.13.0 - av_strerror()
Implement av_strerror().
2010-03-23 - e1484eb - lavc 52.60.0 - av_dct_init()
2010-03-23 - r22649 - lavc 52.60.0 - av_dct_init()
Support DCT-I and DST-I.
2010-03-15 - b8819c8 - lavf 52.56.0 - AVFormatContext.start_time_realtime
2010-03-15 - r22540 - lavf 52.56.0 - AVFormatContext.start_time_realtime
Add AVFormatContext.start_time_realtime field.
2010-03-13 - 5bb5c1d - lavfi 1.18.0 - AVFilterPicRef.pos
2010-03-13 - r22506 - lavfi 1.18.0 - AVFilterPicRef.pos
Add AVFilterPicRef.pos field.
2010-03-13 - 60c144f - lavu 50.12.0 - error.h
2010-03-13 - r22501 - lavu 50.12.0 - error.h
Move error code definitions from libavcodec/avcodec.h to
the new public header libavutil/error.h.
2010-03-07 - c709483 - lavc 52.56.0 - avfft.h
2010-03-07 - r22291 - lavc 52.56.0 - avfft.h
Add public FFT interface.
2010-03-06 - ac6ef86 - lavu 50.11.0 - av_stristr()
2010-03-06 - r22251 - lavu 50.11.0 - av_stristr()
Add av_stristr().
2010-03-03 - 4b83fc0 - lavu 50.10.0 - av_tree_enumerate()
2010-03-03 - r22174 - lavu 50.10.0 - av_tree_enumerate()
Add av_tree_enumerate().
2010-02-07 - b687c1a - lavu 50.9.0 - av_compare_ts()
2010-02-07 - r21673 - lavu 50.9.0 - av_compare_ts()
Add av_compare_ts().
2010-02-05 - 3f3dc76 - lsws 0.10.0 - sws_getCoefficients()
2010-02-05 - r30513 - lsws 0.10.0 - sws_getCoefficients()
Add sws_getCoefficients().
2010-02-01 - ca76a11 - lavf 52.50.0 - metadata API
2010-02-01 - r21587 - lavf 52.50.0 - metadata API
Add a list of generic tag names, change 'author' -> 'artist',
'year' -> 'date'.
2010-01-30 - 80a07f6 - lavu 50.8.0 - av_get_pix_fmt()
2010-01-30 - r21545 - lavu 50.8.0 - av_get_pix_fmt()
Add av_get_pix_fmt().
2010-01-21 - 01cc47d - lsws 0.9.0 - sws_scale()
2010-01-21 - r30381 - lsws 0.9.0 - sws_scale()
Change constness attributes of sws_scale() parameters.
2010-01-10 - 3fb8e77 - lavfi 1.15.0 - avfilter_graph_config_links()
2010-01-10 - r21121 - lavfi 1.15.0 - avfilter_graph_config_links()
Add a log_ctx parameter to avfilter_graph_config_links().
2010-01-07 - 8e9767f - lsws 0.8.0 - sws_isSupported{In,Out}put()
2010-01-07 - r30236 - lsws 0.8.0 - sws_isSupported{In,Out}put()
Add sws_isSupportedInput() and sws_isSupportedOutput() functions.
2010-01-06 - c1d662f - lavfi 1.14.0 - avfilter_add_colorspace()
2010-01-06 - r21035 - lavfi 1.14.0 - avfilter_add_colorspace()
Change the avfilter_add_colorspace() signature, make it accept an
(AVFilterFormats **) rather than an (AVFilterFormats *) as before.
2010-01-03 - 4fd1f18 - lavfi 1.13.0 - avfilter_add_colorspace()
2010-01-03 - r21007 - lavfi 1.13.0 - avfilter_add_colorspace()
Add avfilter_add_colorspace().
2010-01-02 - 8eb631f - lavf 52.46.0 - av_match_ext()
2010-01-02 - r20998 - lavf 52.46.0 - av_match_ext()
Add av_match_ext(), it should be used in place of match_ext().
2010-01-01 - a1f547b - lavf 52.45.0 - av_guess_format()
2010-01-01 - r20991 - lavf 52.45.0 - av_guess_format()
Add av_guess_format(), it should be used in place of guess_format().
2009-12-13 - a181981 - lavf 52.43.0 - metadata API
2009-12-13 - r20834 - lavf 52.43.0 - metadata API
Add av_metadata_set2(), AV_METADATA_DONT_STRDUP_KEY and
AV_METADATA_DONT_STRDUP_VAL.
2009-12-13 - 277c733 - lavu 50.7.0 - avstring.h API
2009-12-13 - r20829 - lavu 50.7.0 - avstring.h API
Add av_d2str().
2009-12-13 - 02b398e - lavc 52.42.0 - AVStream
2009-12-13 - r20826 - lavc 52.42.0 - AVStream
Add avg_frame_rate.
2009-12-12 - 3ba69a1 - lavu 50.6.0 - av_bmg_next()
2009-12-12 - r20808 - lavu 50.6.0 - av_bmg_next()
Introduce the av_bmg_next() function.
2009-12-05 - a13a543 - lavfi 1.12.0 - avfilter_draw_slice()
2009-12-05 - r20734 - lavfi 1.12.0 - avfilter_draw_slice()
Add a slice_dir parameter to avfilter_draw_slice().
2009-11-26 - 4cc3f6a - lavfi 1.11.0 - AVFilter
2009-11-26 - r20611 - lavfi 1.11.0 - AVFilter
Remove the next field from AVFilter, this is not anymore required.
2009-11-25 - 1433c4a - lavfi 1.10.0 - avfilter_next()
2009-11-25 - r20607 - lavfi 1.10.0 - avfilter_next()
Introduce the avfilter_next() function.
2009-11-25 - 86a60fa - lavfi 1.9.0 - avfilter_register()
2009-11-25 - r20605 - lavfi 1.9.0 - avfilter_register()
Change the signature of avfilter_register() to make it return an
int. This is required since now the registration operation may fail.
2009-11-25 - 74a0059 - lavu 50.5.0 - pixdesc.h API
2009-11-25 - r20603 - lavu 50.5.0 - pixdesc.h API
Make the pixdesc.h API public.
2009-10-27 - 243110f - lavfi 1.5.0 - AVFilter.next
2009-10-27 - r20385 - lavfi 1.5.0 - AVFilter.next
Add a next field to AVFilter, this is used for simplifying the
registration and management of the registered filters.
2009-10-23 - cccd292 - lavfi 1.4.1 - AVFilter.description
2009-10-23 - r20356 - lavfi 1.4.1 - AVFilter.description
Add a description field to AVFilter.
2009-10-19 - 6b5dc05 - lavfi 1.3.0 - avfilter_make_format_list()
2009-10-19 - r20302 - lavfi 1.3.0 - avfilter_make_format_list()
Change the interface of avfilter_make_format_list() from
avfilter_make_format_list(int n, ...) to
avfilter_make_format_list(enum PixelFormat *pix_fmts).
2009-10-18 - 0eb4ff9 - lavfi 1.0.0 - avfilter_get_video_buffer()
2009-10-18 - r20272 - lavfi 1.0.0 - avfilter_get_video_buffer()
Make avfilter_get_video_buffer() recursive and add the w and h
parameters to it.
2009-10-07 - 46c40e4 - lavfi 0.5.1 - AVFilterPic
2009-10-07 - r20189 - lavfi 0.5.1 - AVFilterPic
Add w and h fields to AVFilterPic.
2009-06-22 - 92400be - lavf 52.34.1 - AVFormatContext.packet_size
2009-06-22 - r19250 - lavf 52.34.1 - AVFormatContext.packet_size
This is now an unsigned int instead of a signed int.
2009-06-19 - a4276ba - lavc 52.32.0 - AVSubtitle.pts
2009-06-19 - r19222 - lavc 52.32.0 - AVSubtitle.pts
Add a pts field to AVSubtitle which gives the subtitle packet pts
in AV_TIME_BASE. Some subtitle de-/encoders (e.g. XSUB) will
not work right without this.
2009-06-03 - 8f3f2e0 - lavc 52.30.2 - AV_PKT_FLAG_KEY
2009-06-03 - r19078 - lavc 52.30.2 - AV_PKT_FLAG_KEY
PKT_FLAG_KEY has been deprecated and will be dropped at the next
major version. Use AV_PKT_FLAG_KEY instead.
2009-06-01 - f988ce6 - lavc 52.30.0 - av_lockmgr_register()
2009-06-01 - r19025 - lavc 52.30.0 - av_lockmgr_register()
av_lockmgr_register() can be used to register a callback function
that lavc (and in the future, libraries that depend on lavc) can use
to implement mutexes. The application should provide a callback function
@@ -1223,27 +1046,27 @@ API changes, most recent first:
When the lock manager is registered, FFmpeg is guaranteed to behave
correctly in a multi-threaded application.
2009-04-30 - ce1d9c8 - lavc 52.28.0 - av_free_packet()
2009-04-30 - r18719 - lavc 52.28.0 - av_free_packet()
av_free_packet() is no longer an inline function. It is now exported.
2009-04-11 - 80d403f - lavc 52.25.0 - deprecate av_destruct_packet_nofree()
2009-04-11 - r18431 - lavc 52.25.0 - deprecate av_destruct_packet_nofree()
Please use NULL instead. This has been supported since r16506
(lavf > 52.23.1, lavc > 52.10.0).
2009-04-07 - 7a00bba - lavc 52.23.0 - avcodec_decode_video/audio/subtitle
2009-04-07 - r18351 - lavc 52.23.0 - avcodec_decode_video/audio/subtitle
The old decoding functions are deprecated, all new code should use the
new functions avcodec_decode_video2(), avcodec_decode_audio3() and
avcodec_decode_subtitle2(). These new functions take an AVPacket *pkt
argument instead of a const uint8_t *buf / int buf_size pair.
2009-04-03 - 7b09db3 - lavu 50.3.0 - av_fifo_space()
2009-04-03 - r18321 - lavu 50.3.0 - av_fifo_space()
Introduce the av_fifo_space() function.
2009-04-02 - fabd246 - lavc 52.23.0 - AVPacket
2009-04-02 - r18317 - lavc 52.23.0 - AVPacket
Move AVPacket declaration from libavformat/avformat.h to
libavcodec/avcodec.h.
2009-03-22 - 6e08ca9 - lavu 50.2.0 - RGB32 pixel formats
2009-03-22 - r18163 - lavu 50.2.0 - RGB32 pixel formats
Convert the pixel formats PIX_FMT_ARGB, PIX_FMT_RGBA, PIX_FMT_ABGR,
PIX_FMT_BGRA, which were defined as macros, into enum PixelFormat values.
Conversely PIX_FMT_RGB32, PIX_FMT_RGB32_1, PIX_FMT_BGR32 and
@@ -1252,17 +1075,17 @@ API changes, most recent first:
Re-sort the enum PixelFormat list accordingly.
This change breaks API/ABI backward compatibility.
2009-03-22 - f82674e - lavu 50.1.0 - PIX_FMT_RGB5X5 endian variants
2009-03-22 - r18133 - lavu 50.1.0 - PIX_FMT_RGB5X5 endian variants
Add the enum PixelFormat values:
PIX_FMT_RGB565BE, PIX_FMT_RGB565LE, PIX_FMT_RGB555BE, PIX_FMT_RGB555LE,
PIX_FMT_BGR565BE, PIX_FMT_BGR565LE, PIX_FMT_BGR555BE, PIX_FMT_BGR555LE.
2009-03-21 - ee6624e - lavu 50.0.0 - av_random*
2009-03-21 - r18116 - lavu 50.0.0 - av_random*
The Mersenne Twister PRNG implemented through the av_random* functions
was removed. Use the lagged Fibonacci PRNG through the av_lfg* functions
instead.
2009-03-08 - 41dd680 - lavu 50.0.0 - AVFifoBuffer
2009-03-08 - r17869 - lavu 50.0.0 - AVFifoBuffer
av_fifo_init, av_fifo_read, av_fifo_write and av_fifo_realloc were dropped
and replaced by av_fifo_alloc, av_fifo_generic_read, av_fifo_generic_write
and av_fifo_realloc2.
@@ -1271,7 +1094,7 @@ API changes, most recent first:
The AVFifoBuffer/struct AVFifoBuffer may only be used in an opaque way by
applications, they may not use sizeof() or directly access members.
2009-03-01 - ec26457 - lavf 52.31.0 - Generic metadata API
2009-03-01 - r17682 - lavf 52.31.0 - Generic metadata API
Introduce a new metadata API (see av_metadata_get() and friends).
The old API is now deprecated and should not be used anymore. This especially
includes the following structure fields:

View File

@@ -8,51 +8,31 @@ HTMLPAGES = $(PROGS-yes:%=doc/%.html) \
doc/git-howto.html \
doc/libavfilter.html \
doc/platform.html \
doc/syntax.html \
TXTPAGES = doc/fate.txt \
DOCS = $(HTMLPAGES) $(MANPAGES) $(PODPAGES)
ifdef HAVE_MAKEINFO
DOCS += $(TXTPAGES)
endif
all-$(CONFIG_DOC): documentation
documentation: $(DOCS)
TEXIDEP = awk '/^@(verbatim)?include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
doc/%.txt: TAG = TXT
doc/%.txt: doc/%.texi
$(Q)$(TEXIDEP)
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
doc/print_options.o: libavformat/options_table.h libavcodec/options_table.h
GENTEXI = format codec
GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
$(GENTEXI): TAG = GENTEXI
$(GENTEXI): doc/avoptions_%.texi: doc/print_options
$(M)doc/print_options $* > $@
TEXIDEP = awk '/^@include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
doc/%.html: TAG = HTML
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init
$(Q)$(TEXIDEP)
$(M)texi2html -I doc -monolithic --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
$(M)texi2html -monolithic --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
doc/%.pod: TAG = POD
doc/%.pod: doc/%.texi $(GENTEXI)
doc/%.pod: doc/%.texi
$(Q)$(TEXIDEP)
$(M)$(SRC_PATH)/doc/texi2pod.pl -Idoc $< $@
$(M)$(SRC_PATH)/doc/texi2pod.pl $< $@
doc/%.1: TAG = MAN
doc/%.1: doc/%.pod $(GENTEXI)
doc/%.1: doc/%.pod
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
$(DOCS): | doc/
$(DOCS): | doc
OBJDIRS += doc
install-progs-$(CONFIG_DOC): install-man
@@ -66,7 +46,7 @@ uninstall-man:
$(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES))
clean::
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
$(RM) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%)
-include $(wildcard $(DOCS:%=%.d))

View File

@@ -1,11 +1,12 @@
Release Notes
=============
* 0.11 "Happiness" May, 2012
* 0.9 "Harmony" December, 2011
General notes
-------------
See the Changelog file for a list of significant changes. Note, there
are many more new features and bugfixes than whats listed there.

1041
doc/avconv.texi Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,6 @@ corresponding value to true. They can be set to false by prefixing
with "no" the option name, for example using "-nofoo" in the
command line will set to false the boolean option with name "foo".
@anchor{Stream specifiers}
@section Stream specifiers
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
are used to precisely specify which stream(s) does a given option belong to.
@@ -41,8 +40,6 @@ streams of this type.
@item p:@var{program_id}[:@var{stream_index}]
If @var{stream_index} is given, then matches stream number @var{stream_index} in
program with id @var{program_id}. Otherwise matches all streams in this program.
@item #@var{stream_id}
Matches the stream by format-specific ID.
@end table
@section Generic options
@@ -121,8 +118,8 @@ Set the logging level used by the library.
By default the program logs to stderr, if coloring is supported by the
terminal, colors are used to mark errors and warnings. Log coloring
can be disabled setting the environment variable
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
the environment variable @env{AV_LOG_FORCE_COLOR}.
@env{FFMPEG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
the environment variable @env{FFMPEG_FORCE_COLOR}.
The use of the environment variable @env{NO_COLOR} is deprecated and
will be dropped in a following FFmpeg version.
@@ -136,15 +133,6 @@ It also implies @code{-loglevel verbose}.
Note: setting the environment variable @code{FFREPORT} to any value has the
same effect.
@item -cpuflags flags (@emph{global})
Allows setting and clearing cpu flags. This option is intended
for testing. Do not use it unless you know what you're doing.
@example
ffmpeg -cpuflags -sse+mmx ...
ffmpeg -cpuflags mmx ...
ffmpeg -cpuflags 0 ...
@end example
@end table
@section AVOptions
@@ -177,6 +165,3 @@ use @option{-option 0}/@option{-option 1}.
Note2 old undocumented way of specifying per-stream AVOptions by prepending
v/a/s to the options name is now obsolete and will be removed soon.
@include avoptions_codec.texi
@include avoptions_format.texi

View File

@@ -23,20 +23,6 @@ Below is a description of the currently available bitstream filters.
@section h264_mp4toannexb
Convert an H.264 bitstream from length prefixed mode to start code
prefixed mode (as defined in the Annex B of the ITU-T H.264
specification).
This is required by some streaming formats, typically the MPEG-2
transport stream format ("mpegts").
For example to remux an MP4 file containing an H.264 stream to mpegts
format with @command{ffmpeg}, you can use the command:
@example
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
@end example
@section imx_dump_header
@section mjpeg2jpeg
@@ -71,7 +57,7 @@ stream (carrying the AVI1 header ID and lacking a DHT segment) to
produce fully qualified JPEG images.
@example
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
ffmpeg -i mjpeg-movie.avi -c:v copy -vbsf mjpeg2jpeg frame_%d.jpg
exiftran -i -9 frame*.jpg
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
@end example

View File

@@ -48,16 +48,3 @@ top-field-first is assumed
@end table
@c man end VIDEO DECODERS
@chapter Audio Decoders
@c man begin AUDIO DECODERS
@section ffwavesynth
Internal wave synthetizer.
This decoder generates wave patterns according to predefined sequences. Its
use is purely internal and the format of the data it accepts is not publicly
documented.
@c man end AUDIO DECODERS

View File

@@ -75,34 +75,4 @@ the caller can decide which variant streams to actually receive.
The total bitrate of the variant that the stream belongs to is
available in a metadata key named "variant_bitrate".
@section sbg
SBaGen script demuxer.
This demuxer reads the script language used by SBaGen
@url{http://uazu.net/sbagen/} to generate binaural beats sessions. A SBG
script looks like that:
@example
-SE
a: 300-2.5/3 440+4.5/0
b: 300-2.5/0 440+4.5/3
off: -
NOW == a
+0:07:00 == b
+0:14:00 == a
+0:21:00 == b
+0:30:00 off
@end example
A SBG script can mix absolute and relative timestamps. If the script uses
either only absolute timestamps (including the script start time) or only
relative ones, then its layout is fixed, and the conversion is
straightforward. On the other hand, if the script mixes both kind of
timestamps, then the @var{NOW} reference for relative timestamps will be
taken from the current time of day at the time the script is read, and the
script layout will be frozen according to that reference. That means that if
the script is directly played, the actual times will match the absolute
timestamps up to the sound controller's clock accuracy, but if the user
somehow pauses the playback or seeks, all times will be shifted accordingly.
@c man end INPUT DEVICES

View File

@@ -14,13 +14,12 @@
@section API
@itemize @bullet
@item libavcodec is the library containing the codecs (both encoding and
decoding). Look at @file{doc/examples/decoding_encoding.c} to see how to use
it.
decoding). Look at @file{libavcodec/apiexample.c} to see how to use it.
@item libavformat is the library containing the file format handling (mux and
demux code for several formats). Look at @file{ffplay.c} to use it in a
player. See @file{doc/examples/muxing.c} to use it to generate audio or video
streams.
player. See @file{libavformat/output-example.c} to use it to generate
audio or video streams.
@end itemize
@@ -188,8 +187,6 @@ the following snippet into your @file{.vimrc}:
set expandtab
set shiftwidth=4
set softtabstop=4
set cindent
set cinoptions=(0
" allow tabs in Makefiles
autocmd FileType make set noexpandtab shiftwidth=8 softtabstop=8
" Trailing whitespace and tabs are forbidden, so highlight them.
@@ -201,16 +198,10 @@ autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
@example
(c-add-style "ffmpeg"
'("k&r"
(c-basic-offset . 4)
(indent-tabs-mode nil)
(show-trailing-whitespace t)
(c-offsets-alist
(statement-cont . (c-lineup-assignments +)))
)
)
(setq c-default-style "ffmpeg")
(setq c-default-style "k&r")
(setq-default c-basic-offset 4)
(setq-default indent-tabs-mode nil)
(setq-default show-trailing-whitespace t)
@end example
@section Development Policy
@@ -354,7 +345,7 @@ for us and greatly increases your chances of getting your patch applied.
Use the patcheck tool of FFmpeg to check your patch.
The tool is located in the tools directory.
Run the @ref{Regression tests} before submitting a patch in order to verify
Run the @ref{Regression Tests} before submitting a patch in order to verify
it does not cause unexpected problems.
Patches should be posted as base64 encoded attachments (or any other
@@ -517,13 +508,12 @@ not related to the comments received during review. Such patches will
be rejected. Instead, submit significant changes or new features as
separate patches.
@anchor{Regression tests}
@section Regression tests
Before submitting a patch (or committing to the repository), you should at least
test that you did not break anything.
Running 'make fate' accomplishes this, please see @url{fate.html} for details.
Running 'make fate' accomplishes this, please see @file{doc/fate.txt} for details.
[Of course, some patches may change the results of the regression tests. In
this case, the reference results of the regression tests shall be modified

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +0,0 @@
</div>
<div id="footer">
Generated on $datetime for $projectname by&#160;<a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
</div>
</div>
</body>
</html>

View File

@@ -1,14 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
</head>
<div id="container">
<div id="body">
<div>

View File

@@ -577,7 +577,7 @@ Allow to set any x264 option, see x264 --fullhelp for a list.
":".
@end table
For example to specify libx264 encoding options with @command{ffmpeg}:
For example to specify libx264 encoding options with @file{ffmpeg}:
@example
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
@end example

View File

@@ -1,174 +0,0 @@
The following table lists most error codes found in various operating
systems supported by FFmpeg.
OS
Code Std F LBMWwb Text (YMMV)
E2BIG POSIX ++++++ Argument list too long
EACCES POSIX ++++++ Permission denied
EADDRINUSE POSIX +++..+ Address in use
EADDRNOTAVAIL POSIX +++..+ Cannot assign requested address
EADV +..... Advertise error
EAFNOSUPPORT POSIX +++..+ Address family not supported
EAGAIN POSIX + ++++++ Resource temporarily unavailable
EALREADY POSIX +++..+ Operation already in progress
EAUTH .++... Authentication error
EBADARCH ..+... Bad CPU type in executable
EBADE +..... Invalid exchange
EBADEXEC ..+... Bad executable
EBADF POSIX ++++++ Bad file descriptor
EBADFD +..... File descriptor in bad state
EBADMACHO ..+... Malformed Macho file
EBADMSG POSIX ++4... Bad message
EBADR +..... Invalid request descriptor
EBADRPC .++... RPC struct is bad
EBADRQC +..... Invalid request code
EBADSLT +..... Invalid slot
EBFONT +..... Bad font file format
EBUSY POSIX - ++++++ Device or resource busy
ECANCELED POSIX +++... Operation canceled
ECHILD POSIX ++++++ No child processes
ECHRNG +..... Channel number out of range
ECOMM +..... Communication error on send
ECONNABORTED POSIX +++..+ Software caused connection abort
ECONNREFUSED POSIX - +++ss+ Connection refused
ECONNRESET POSIX +++..+ Connection reset
EDEADLK POSIX ++++++ Resource deadlock avoided
EDEADLOCK +..++. File locking deadlock error
EDESTADDRREQ POSIX +++... Destination address required
EDEVERR ..+... Device error
EDOM C89 - ++++++ Numerical argument out of domain
EDOOFUS .F.... Programming error
EDOTDOT +..... RFS specific error
EDQUOT POSIX +++... Disc quota exceeded
EEXIST POSIX ++++++ File exists
EFAULT POSIX - ++++++ Bad address
EFBIG POSIX - ++++++ File too large
EFTYPE .++... Inappropriate file type or format
EHOSTDOWN +++... Host is down
EHOSTUNREACH POSIX +++..+ No route to host
EHWPOISON +..... Memory page has hardware error
EIDRM POSIX +++... Identifier removed
EILSEQ C99 ++++++ Illegal byte sequence
EINPROGRESS POSIX - +++ss+ Operation in progress
EINTR POSIX - ++++++ Interrupted system call
EINVAL POSIX + ++++++ Invalid argument
EIO POSIX + ++++++ I/O error
EISCONN POSIX +++..+ Socket is already connected
EISDIR POSIX ++++++ Is a directory
EISNAM +..... Is a named type file
EKEYEXPIRED +..... Key has expired
EKEYREJECTED +..... Key was rejected by service
EKEYREVOKED +..... Key has been revoked
EL2HLT +..... Level 2 halted
EL2NSYNC +..... Level 2 not synchronized
EL3HLT +..... Level 3 halted
EL3RST +..... Level 3 reset
ELIBACC +..... Can not access a needed shared library
ELIBBAD +..... Accessing a corrupted shared library
ELIBEXEC +..... Cannot exec a shared library directly
ELIBMAX +..... Too many shared libraries
ELIBSCN +..... .lib section in a.out corrupted
ELNRNG +..... Link number out of range
ELOOP POSIX +++..+ Too many levels of symbolic links
EMEDIUMTYPE +..... Wrong medium type
EMFILE POSIX ++++++ Too many open files
EMLINK POSIX ++++++ Too many links
EMSGSIZE POSIX +++..+ Message too long
EMULTIHOP POSIX ++4... Multihop attempted
ENAMETOOLONG POSIX - ++++++ Filen ame too long
ENAVAIL +..... No XENIX semaphores available
ENEEDAUTH .++... Need authenticator
ENETDOWN POSIX +++..+ Network is down
ENETRESET SUSv3 +++..+ Network dropped connection on reset
ENETUNREACH POSIX +++..+ Network unreachable
ENFILE POSIX ++++++ Too many open files in system
ENOANO +..... No anode
ENOATTR .++... Attribute not found
ENOBUFS POSIX - +++..+ No buffer space available
ENOCSI +..... No CSI structure available
ENODATA XSR +N4... No message available
ENODEV POSIX - ++++++ No such device
ENOENT POSIX - ++++++ No such file or directory
ENOEXEC POSIX ++++++ Exec format error
ENOFILE ...++. No such file or directory
ENOKEY +..... Required key not available
ENOLCK POSIX ++++++ No locks available
ENOLINK POSIX ++4... Link has been severed
ENOMEDIUM +..... No medium found
ENOMEM POSIX ++++++ Not enough space
ENOMSG POSIX +++..+ No message of desired type
ENONET +..... Machine is not on the network
ENOPKG +..... Package not installed
ENOPROTOOPT POSIX +++..+ Protocol not available
ENOSPC POSIX ++++++ No space left on device
ENOSR XSR +N4... No STREAM resources
ENOSTR XSR +N4... Not a STREAM
ENOSYS POSIX + ++++++ Function not implemented
ENOTBLK +++... Block device required
ENOTCONN POSIX +++..+ Socket is not connected
ENOTDIR POSIX ++++++ Not a directory
ENOTEMPTY POSIX ++++++ Directory not empty
ENOTNAM +..... Not a XENIX named type file
ENOTRECOVERABLE SUSv4 - +..... State not recoverable
ENOTSOCK POSIX +++..+ Socket operation on non-socket
ENOTSUP POSIX +++... Operation not supported
ENOTTY POSIX ++++++ Inappropriate I/O control operation
ENOTUNIQ +..... Name not unique on network
ENXIO POSIX ++++++ No such device or address
EOPNOTSUPP POSIX +++..+ Operation not supported (on socket)
EOVERFLOW POSIX +++..+ Value too large to be stored in data type
EOWNERDEAD SUSv4 +..... Owner died
EPERM POSIX - ++++++ Operation not permitted
EPFNOSUPPORT +++..+ Protocol family not supported
EPIPE POSIX - ++++++ Broken pipe
EPROCLIM .++... Too many processes
EPROCUNAVAIL .++... Bad procedure for program
EPROGMISMATCH .++... Program version wrong
EPROGUNAVAIL .++... RPC prog. not avail
EPROTO POSIX ++4... Protocol error
EPROTONOSUPPORT POSIX - +++ss+ Protocol not supported
EPROTOTYPE POSIX +++..+ Protocol wrong type for socket
EPWROFF ..+... Device power is off
ERANGE C89 - ++++++ Result too large
EREMCHG +..... Remote address changed
EREMOTE +++... Object is remote
EREMOTEIO +..... Remote I/O error
ERESTART +..... Interrupted system call should be restarted
ERFKILL +..... Operation not possible due to RF-kill
EROFS POSIX ++++++ Read-only file system
ERPCMISMATCH .++... RPC version wrong
ESHLIBVERS ..+... Shared library version mismatch
ESHUTDOWN +++..+ Cannot send after socket shutdown
ESOCKTNOSUPPORT +++... Socket type not supported
ESPIPE POSIX ++++++ Illegal seek
ESRCH POSIX ++++++ No such process
ESRMNT +..... Srmount error
ESTALE POSIX +++..+ Stale NFS file handle
ESTRPIPE +..... Streams pipe error
ETIME XSR +N4... Stream ioctl timeout
ETIMEDOUT POSIX - +++ss+ Connection timed out
ETOOMANYREFS +++... Too many references: cannot splice
ETXTBSY POSIX +++... Text file busy
EUCLEAN +..... Structure needs cleaning
EUNATCH +..... Protocol driver not attached
EUSERS +++... Too many users
EWOULDBLOCK POSIX +++..+ Operation would block
EXDEV POSIX ++++++ Cross-device link
EXFULL +..... Exchange full
Notations:
F: used in FFmpeg (-: a few times, +: a lot)
SUSv3: Single Unix Specification, version 3
SUSv4: Single Unix Specification, version 4
XSR: XSI STREAMS (obsolete)
OS: availability on some supported operating systems
L: GNU/Linux
B: BSD (F: FreeBSD, N: NetBSD)
M: MacOS X
W: Microsoft Windows (s: emulated with winsock, see libavformat/network.h)
w: Mingw32 (3.17) and Mingw64 (2.0.1)
b: BeOS

View File

@@ -98,26 +98,6 @@ point (@var{x}, @var{y}) from the origin.
@item gcd(x, y)
Return the greatest common divisor of @var{x} and @var{y}. If both @var{x} and
@var{y} are 0 or either or both are less than zero then behavior is undefined.
@item if(x, y)
Evaluate @var{x}, and if the result is non-zero return the result of
the evaluation of @var{y}, return 0 otherwise.
@item ifnot(x, y)
Evaluate @var{x}, and if the result is zero return the result of the
evaluation of @var{y}, return 0 otherwise.
@item taylor(expr, x) taylor(expr, x, id)
Evaluate a taylor series at x.
expr represents the LD(id)-th derivates of f(x) at 0. If id is not specified
then 0 is assumed.
note, when you have the derivatives at y instead of 0
taylor(expr, x-y) can be used
When the series does not converge the results are undefined.
@item root(expr, max)
Finds x where f(x)=0 in the interval 0..max.
f() must be continuous or the result is undefined.
@end table
The following constants are available:
@@ -130,20 +110,19 @@ exp(1) (Euler's number), approximately 2.718
golden ratio (1+sqrt(5))/2, approximately 1.618
@end table
Assuming that an expression is considered "true" if it has a non-zero
value, note that:
Note that:
@code{*} works like AND
@code{+} works like OR
and the construct:
thus
@example
if A then B else C
@end example
is equivalent to
@example
if(A,B) + ifnot(A,C)
A*B + not(A)*C
@end example
In your C code, you can extend the list of unary and binary functions,

View File

@@ -1,28 +1,17 @@
# use pkg-config for getting CFLAGS and LDLIBS
FFMPEG_LIBS= libavdevice \
libavformat \
libavfilter \
libavcodec \
libavresample \
libswresample \
libswscale \
libavutil \
# use pkg-config for getting CFLAGS abd LDFLAGS
FFMPEG_LIBS=libavdevice libavformat libavfilter libavcodec libswscale libavutil
CFLAGS+=$(shell pkg-config --cflags $(FFMPEG_LIBS))
LDFLAGS+=$(shell pkg-config --libs $(FFMPEG_LIBS))
CFLAGS += -Wall -O2 -g
CFLAGS += $(shell pkg-config --cflags $(FFMPEG_LIBS))
LDLIBS += $(shell pkg-config --libs $(FFMPEG_LIBS))
EXAMPLES= decoding_encoding \
filtering_video \
filtering_audio \
metadata \
muxing \
EXAMPLES=decoding_encoding filtering metadata muxing
OBJS=$(addsuffix .o,$(EXAMPLES))
# the following examples make explicit use of the math library
decoding_encoding: LDLIBS += -lm
muxing: LDLIBS += -lm
%: %.o
$(CC) $< $(LDFLAGS) -o $@
%.o: %.c
$(CC) $< $(CFLAGS) -c -o $@
.phony: all clean

View File

@@ -29,13 +29,11 @@
* format handling
*/
#include <math.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavcodec/avcodec.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#define INBUF_SIZE 4096
#define AUDIO_INBUF_SIZE 20480
@@ -54,7 +52,7 @@ static void audio_encode_example(const char *filename)
float t, tincr;
uint8_t *outbuf;
printf("Encode audio file %s\n", filename);
printf("Audio encoding\n");
/* find the MP2 encoder */
codec = avcodec_find_encoder(CODEC_ID_MP2);
@@ -72,7 +70,7 @@ static void audio_encode_example(const char *filename)
c->sample_fmt = AV_SAMPLE_FMT_S16;
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
@@ -125,7 +123,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
av_init_packet(&avpkt);
printf("Decode audio file %s\n", filename);
printf("Audio decoding\n");
/* find the mpeg audio decoder */
codec = avcodec_find_decoder(CODEC_ID_MP2);
@@ -137,7 +135,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
c = avcodec_alloc_context3(codec);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
@@ -182,8 +180,6 @@ static void audio_decode_example(const char *outfilename, const char *filename)
}
avpkt.size -= len;
avpkt.data += len;
avpkt.dts =
avpkt.pts = AV_NOPTS_VALUE;
if (avpkt.size < AUDIO_REFILL_THRESH) {
/* Refill the input buffer, to avoid trying to decode
* incomplete frames. Instead of this, one could also use
@@ -213,13 +209,12 @@ static void video_encode_example(const char *filename, int codec_id)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int i, out_size, x, y, outbuf_size;
int i, out_size, size, x, y, outbuf_size;
FILE *f;
AVFrame *picture;
uint8_t *outbuf;
int had_output=0;
printf("Encode video file %s\n", filename);
printf("Video encoding\n");
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(codec_id);
@@ -246,7 +241,7 @@ static void video_encode_example(const char *filename, int codec_id)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
@@ -258,7 +253,7 @@ static void video_encode_example(const char *filename, int codec_id)
}
/* alloc image and output buffer */
outbuf_size = 100000 + 12*c->width*c->height;
outbuf_size = 100000;
outbuf = malloc(outbuf_size);
/* the image can be allocated by any means and av_image_alloc() is
@@ -287,17 +282,15 @@ static void video_encode_example(const char *filename, int codec_id)
/* encode the image */
out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
had_output |= out_size;
printf("encoding frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
/* get the delayed frames */
for(; out_size || !had_output; i++) {
for(; out_size; i++) {
fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
had_output |= out_size;
printf("write frame %3d (size=%5d)\n", i, out_size);
fwrite(outbuf, 1, out_size, f);
}
@@ -351,7 +344,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
printf("Decode video file %s\n", filename);
printf("Video decoding\n");
/* find the mpeg1 video decoder */
codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
@@ -371,7 +364,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
available in the bitstream. */
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
@@ -458,6 +451,9 @@ int main(int argc, char **argv)
{
const char *filename;
/* must be called before using avcodec lib */
avcodec_init();
/* register all the codecs */
avcodec_register_all();

View File

@@ -27,13 +27,11 @@
*/
#define _XOPEN_SOURCE 600 /* for usleep */
#include <unistd.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/vsrc_buffer.h>
const char *filter_descr = "scale=78:24";
@@ -47,7 +45,7 @@ static int64_t last_pts = AV_NOPTS_VALUE;
static int open_input_file(const char *filename)
{
int ret;
int ret, i;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
@@ -55,7 +53,7 @@ static int open_input_file(const char *filename)
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
if ((ret = av_find_stream_info(fmt_ctx)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
@@ -70,7 +68,7 @@ static int open_input_file(const char *filename)
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
/* init the video decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
if ((ret = avcodec_open(dec_ctx, dec)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
return ret;
}
@@ -120,13 +118,12 @@ static int init_filters(const char *filters_descr)
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
if ((ret = avfilter_graph_parse(filter_graph, filter_descr,
&inputs, &outputs, NULL)) < 0)
return ret;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
return ret;
return 0;
}
static void display_picref(AVFilterBufferRef *picref, AVRational time_base)
@@ -198,14 +195,15 @@ int main(int argc, char **argv)
}
if (got_frame) {
frame.pts = av_frame_get_best_effort_timestamp(&frame);
if (frame.pts == AV_NOPTS_VALUE)
frame.pts = frame.pkt_dts == AV_NOPTS_VALUE ?
frame.pkt_dts : frame.pkt_pts;
/* push the decoded frame into the filtergraph */
av_vsrc_buffer_add_frame(buffersrc_ctx, &frame, 0);
av_vsrc_buffer_add_frame(buffersrc_ctx, &frame);
/* pull filtered pictures from the filtergraph */
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
av_vsink_buffer_get_video_buffer_ref(buffersink_ctx, &picref, 0);
if (picref) {
display_picref(picref, buffersink_ctx->inputs[0]->time_base);
avfilter_unref_buffer(picref);
@@ -218,7 +216,7 @@ end:
avfilter_graph_free(&filter_graph);
if (dec_ctx)
avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
av_close_input_file(fmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
char buf[1024];

View File

@@ -1,235 +0,0 @@
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Clément Bœsch
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* API example for audio decoding and filtering
*/
#include <unistd.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
const char *filter_descr = "aresample=8000,aconvert=s16:mono";
const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
static int audio_stream_index = -1;
static int open_input_file(const char *filename)
{
int ret;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the audio stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
return ret;
}
audio_stream_index = ret;
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
return ret;
}
return 0;
}
static int init_filters(const char *filters_descr)
{
char args[512];
int ret;
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
const int64_t *chlayouts = avfilter_all_channel_layouts;
AVABufferSinkParams *abuffersink_params;
const AVFilterLink *outlink;
filter_graph = avfilter_graph_alloc();
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args), "%d:%d:0x%"PRIx64,
dec_ctx->sample_rate, dec_ctx->sample_fmt, dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
return ret;
}
/* buffer audio sink: to terminate the filter chain. */
abuffersink_params = av_abuffersink_params_alloc();
abuffersink_params->sample_fmts = sample_fmts;
abuffersink_params->channel_layouts = chlayouts;
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
NULL, abuffersink_params, filter_graph);
av_free(abuffersink_params);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
return ret;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
return ret;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
return ret;
/* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */
outlink = buffersink_ctx->inputs[0];
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
(int)outlink->sample_rate,
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
args);
return 0;
}
static void print_samplesref(AVFilterBufferRef *samplesref)
{
const AVFilterBufferRefAudioProps *props = samplesref->audio;
const int n = props->nb_samples * av_get_channel_layout_nb_channels(props->channel_layout);
const uint16_t *p = (uint16_t*)samplesref->data[0];
const uint16_t *p_end = p + n;
while (p < p_end) {
fputc(*p & 0xff, stdout);
fputc(*p>>8 & 0xff, stdout);
p++;
}
fflush(stdout);
}
int main(int argc, char **argv)
{
int ret;
AVPacket packet;
AVFrame frame;
int got_frame;
if (argc != 2) {
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
exit(1);
}
avcodec_register_all();
av_register_all();
avfilter_register_all();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)
goto end;
/* read all packets */
while (1) {
AVFilterBufferRef *samplesref;
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break;
if (packet.stream_index == audio_stream_index) {
avcodec_get_frame_defaults(&frame);
got_frame = 0;
ret = avcodec_decode_audio4(dec_ctx, &frame, &got_frame, &packet);
av_free_packet(&packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
continue;
}
if (got_frame) {
/* push the audio data from decoded frame into the filtergraph */
if (av_buffersrc_add_frame(buffersrc_ctx, &frame, 0) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
break;
}
/* pull filtered audio from the filtergraph */
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
av_buffersink_get_buffer_ref(buffersink_ctx, &samplesref, 0);
if (samplesref) {
print_samplesref(samplesref);
avfilter_unref_buffer(samplesref);
}
}
}
}
}
end:
avfilter_graph_free(&filter_graph);
if (dec_ctx)
avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
char buf[1024];
av_strerror(ret, buf, sizeof(buf));
fprintf(stderr, "Error occurred: %s\n", buf);
exit(1);
}
exit(0);
}

View File

@@ -50,6 +50,6 @@ int main (int argc, char **argv)
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
printf("%s=%s\n", tag->key, tag->value);
avformat_close_input(&fmt_ctx);
avformat_free_context(fmt_ctx);
return 0;
}

View File

@@ -33,9 +33,9 @@
#include <string.h>
#include <math.h>
#include <libavutil/mathematics.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include "libavutil/mathematics.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#undef exit
@@ -43,7 +43,7 @@
#define STREAM_DURATION 200.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC;
@@ -52,6 +52,8 @@ static int sws_flags = SWS_BICUBIC;
static float t, tincr, tincr2;
static int16_t *samples;
static uint8_t *audio_outbuf;
static int audio_outbuf_size;
static int audio_input_frame_size;
/*
@@ -61,16 +63,8 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
AVCodec *codec;
/* find the audio encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
st = avformat_new_stream(oc, codec);
st = avformat_new_stream(oc, NULL);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
@@ -78,12 +72,14 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
st->id = 1;
c = st->codec;
c->codec_id = codec_id;
c->codec_type = AVMEDIA_TYPE_AUDIO;
/* put sample parameters */
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = 64000;
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
c->channels = 2;
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
@@ -95,32 +91,54 @@ static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
static void open_audio(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVCodec *codec;
c = st->codec;
/* find the audio encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
/* open it */
if (avcodec_open2(c, NULL, NULL) < 0) {
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
/* init signal generator */
t = 0;
t = 0;
tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
audio_input_frame_size = 10000;
else
audio_outbuf_size = 10000;
audio_outbuf = av_malloc(audio_outbuf_size);
/* ugly hack for PCM codecs (will be removed ASAP with new PCM
support to compute the input frame size in samples */
if (c->frame_size <= 1) {
audio_input_frame_size = audio_outbuf_size / c->channels;
switch(st->codec->codec_id) {
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE:
audio_input_frame_size >>= 1;
break;
default:
break;
}
} else {
audio_input_frame_size = c->frame_size;
samples = av_malloc(audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels);
}
samples = av_malloc(audio_input_frame_size * 2 * c->channels);
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
* 'nb_channels' channels. */
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
'nb_channels' channels */
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
{
int j, i, v;
@@ -129,9 +147,9 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
q = samples;
for (j = 0; j < frame_size; j++) {
v = (int)(sin(t) * 10000);
for (i = 0; i < nb_channels; i++)
for(i = 0; i < nb_channels; i++)
*q++ = v;
t += tincr;
t += tincr;
tincr += tincr2;
}
}
@@ -139,28 +157,22 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame = avcodec_alloc_frame();
int got_packet;
AVPacket pkt;
av_init_packet(&pkt);
c = st->codec;
get_audio_frame(samples, audio_input_frame_size, c->channels);
frame->nb_samples = audio_input_frame_size;
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(uint8_t *)samples,
audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels, 1);
avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (!got_packet)
return;
pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = audio_outbuf;
/* Write the compressed frame to the media file. */
/* write the compressed frame in the media file */
if (av_interleaved_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "Error while writing audio frame\n");
exit(1);
@@ -172,6 +184,7 @@ static void close_audio(AVFormatContext *oc, AVStream *st)
avcodec_close(st->codec);
av_free(samples);
av_free(audio_outbuf);
}
/**************************************************************/
@@ -181,21 +194,14 @@ static AVFrame *picture, *tmp_picture;
static uint8_t *video_outbuf;
static int frame_count, video_outbuf_size;
/* Add a video output stream. */
/* add a video output stream */
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
AVCodec *codec;
/* find the video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
st = avformat_new_stream(oc, codec);
st = avformat_new_stream(oc, NULL);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
@@ -213,30 +219,30 @@ static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
c->codec_id = codec_id;
/* Put sample parameters. */
/* put sample parameters */
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 352;
c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
if (c->codec_id == CODEC_ID_MPEG1VIDEO){
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
This does not happen with normal video, it just happens here as
the motion of the chroma plane does not match the luma plane. */
c->mb_decision=2;
}
/* Some formats want stream headers to be separate. */
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
@@ -252,7 +258,7 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = av_malloc(size);
if (!picture_buf) {
av_free(picture);
@@ -265,38 +271,46 @@ static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
static void open_video(AVFormatContext *oc, AVStream *st)
{
AVCodec *codec;
AVCodecContext *c;
c = st->codec;
/* find the video encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
/* open the codec */
if (avcodec_open2(c, NULL, NULL) < 0) {
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* Allocate output buffer. */
/* XXX: API change will be done. */
/* Buffers passed into lav* can be allocated any way you prefer,
* as long as they're aligned enough for the architecture, and
* they're freed appropriately (such as using av_free for buffers
* allocated with av_malloc). */
/* allocate output buffer */
/* XXX: API change will be done */
/* buffers passed into lav* can be allocated any way you prefer,
as long as they're aligned enough for the architecture, and
they're freed appropriately (such as using av_free for buffers
allocated with av_malloc) */
video_outbuf_size = 200000;
video_outbuf = av_malloc(video_outbuf_size);
video_outbuf = av_malloc(video_outbuf_size);
}
/* Allocate the encoded raw picture. */
/* allocate the encoded raw picture */
picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
/* if the output format is not YUV420P, then a temporary YUV420P
picture is needed too. It is then converted to the required
output format */
tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_YUV420P) {
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
@@ -307,22 +321,23 @@ static void open_video(AVFormatContext *oc, AVStream *st)
}
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index,
int width, int height)
/* prepare a dummy image */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
for (y = 0; y < height/2; y++) {
for (x = 0; x < width/2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
@@ -338,13 +353,13 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
c = st->codec;
if (frame_count >= STREAM_NB_FRAMES) {
/* No more frames to compress. The codec has a latency of a few
* frames if using B-frames, so we get the last frames by
* passing the same picture again. */
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
} else {
if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
to the codec pixel format if needed */
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(c->width, c->height,
PIX_FMT_YUV420P,
@@ -352,8 +367,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr,
"Cannot initialize the conversion context\n");
fprintf(stderr, "Cannot initialize the conversion context\n");
exit(1);
}
}
@@ -365,38 +379,36 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
}
}
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* Raw video case - the API will change slightly in the near
* future for that. */
/* raw video case. The API will change slightly in the near
future for that. */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = (uint8_t *)picture;
pkt.size = sizeof(AVPicture);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = (uint8_t *)picture;
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
/* encode the image */
out_size = avcodec_encode_video(c, video_outbuf,
video_outbuf_size, picture);
/* If size is zero, it means the image was buffered. */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
/* if zero size, it means the image was buffered */
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);
if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(c->coded_frame->pts,
c->time_base, st->time_base);
if (c->coded_frame->key_frame)
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
if(c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = video_outbuf;
pkt.size = out_size;
pkt.data = video_outbuf;
pkt.size = out_size;
/* Write the compressed frame to the media file. */
/* write the compressed frame in the media file */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
@@ -433,7 +445,7 @@ int main(int argc, char **argv)
double audio_pts, video_pts;
int i;
/* Initialize libavcodec, and register all codecs and formats. */
/* initialize libavcodec, and register all codecs and formats */
av_register_all();
if (argc != 2) {
@@ -458,8 +470,8 @@ int main(int argc, char **argv)
}
fmt = oc->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
/* add the audio and video streams using the default format codecs
and initialize the codecs */
video_st = NULL;
audio_st = NULL;
if (fmt->video_codec != CODEC_ID_NONE) {
@@ -469,15 +481,15 @@ int main(int argc, char **argv)
audio_st = add_audio_stream(oc, fmt->audio_codec);
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
av_dump_format(oc, 0, filename, 1);
/* now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */
if (video_st)
open_video(oc, video_st);
if (audio_st)
open_audio(oc, audio_st);
av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
@@ -486,20 +498,18 @@ int main(int argc, char **argv)
}
}
/* Write the stream header, if any. */
avformat_write_header(oc, NULL);
/* write the stream header, if any */
av_write_header(oc);
picture->pts = 0;
for (;;) {
/* Compute current audio and video time. */
for(;;) {
/* compute current audio and video time */
if (audio_st)
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
else
audio_pts = 0.0;
if (video_st)
video_pts = (double)video_st->pts.val * video_st->time_base.num /
video_st->time_base.den;
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
else
video_pts = 0.0;
@@ -516,27 +526,28 @@ int main(int argc, char **argv)
}
}
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
/* write the trailer, if any. the trailer must be written
* before you close the CodecContexts open when you wrote the
* header; otherwise write_trailer may try to use memory that
* was freed on av_codec_close() */
av_write_trailer(oc);
/* Close each codec. */
/* close each codec */
if (video_st)
close_video(oc, video_st);
if (audio_st)
close_audio(oc, audio_st);
/* Free the streams. */
for (i = 0; i < oc->nb_streams; i++) {
/* free the streams */
for(i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}
if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
if (!(fmt->flags & AVFMT_NOFILE)) {
/* close the output file */
avio_close(oc->pb);
}
/* free the stream */
av_free(oc);

View File

@@ -272,44 +272,6 @@ ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
rm temp[12].[av] all.[av]
@end example
@section -profile option fails when encoding H.264 video with AAC audio
@command{ffmpeg} prints an error like
@example
Undefined constant or missing '(' in 'baseline'
Unable to parse option value "baseline"
Error setting option profile to value baseline.
@end example
Short answer: write @option{-profile:v} instead of @option{-profile}.
Long answer: this happens because the @option{-profile} option can apply to both
video and audio. Specifically the AAC encoder also defines some profiles, none
of which are named @var{baseline}.
The solution is to apply the @option{-profile} option to the video stream only
by using @url{http://ffmpeg.org/ffmpeg.html#Stream-specifiers-1, Stream specifiers}.
Appending @code{:v} to it will do exactly that.
@section Using @option{-f lavfi}, audio becomes mono for no apparent reason.
Use @option{-dumpgraph -} to find out exactly where the channel layout is
lost.
Most likely, it is through @code{auto-inserted aconvert}. Try to understand
why the converting filter was needed at that place.
Just before the output is a likely place, as @option{-f lavfi} currently
only support packed S16.
Then insert the correct @code{aconvert} explicitly in the filter graph,
specifying the exact format.
@example
aconvert=s16:stereo:packed
@end example
@chapter Development
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
@@ -415,16 +377,4 @@ wrong if it is larger than the average!
For example, if you have mixed 25 and 30 fps content, then r_frame_rate
will be 150.
@section Why is @code{make fate} not running all tests?
Make sure you have the fate-suite samples and the @code{SAMPLES} Make variable
or @code{FATE_SAMPLES} environment variable or the @code{--samples}
@command{configure} option is set to the right path.
@section Why is @code{make fate} not finding the samples?
Do you happen to have a @code{~} character in the samples path to indicate a
home directory? The value is used in ways where the shell cannot expand it,
causing FATE to not find files. Just replace @code{~} by the full path.
@bye

View File

@@ -5,172 +5,131 @@
@center @titlefont{FATE Automated Testing Environment}
@end titlepage
@node Top
@top
@contents
@chapter Introduction
FATE is an extended regression suite on the client-side and a means
for results aggregation and presentation on the server-side.
FATE provides a regression testsuite embedded within the FFmpeg build system.
It can be run locally and optionally configured to send reports to a web
aggregator and viewer @url{http://fate.ffmpeg.org}.
The first part of this document explains how you can use FATE from
your FFmpeg source directory to test your ffmpeg binary. The second
part describes how you can run FATE to submit the results to FFmpeg's
FATE server.
It is advised to run FATE before submitting patches to the current codebase
and provide new tests when submitting patches to add additional features.
In any way you can have a look at the publicly viewable FATE results
by visiting this website:
@chapter Running FATE
@url{http://fate.ffmpeg.org/}
@section Samples and References
In order to run, FATE needs a large amount of data (samples and references)
that is provided separately from the actual source distribution.
This is especially recommended for all people contributing source
code to FFmpeg, as it can be seen if some test on some platform broke
with there recent contribution. This usually happens on the platforms
the developers could not test on.
To inform the build system about the testsuite location, pass
@option{--samples=<path to the samples>} to @command{configure} or set the
@var{SAMPLES} Make variable or the @var{FATE_SAMPLES} environment variable
to a suitable value.
The second part of this document describes how you can run FATE to
submit your results to FFmpeg's FATE server. If you want to submit your
results be sure to check that your combination of CPU, OS and compiler
is not already listed on the above mentioned website.
In the third part you can find a comprehensive listing of FATE makefile
targets and variables.
@chapter Using FATE from your FFmpeg source directory
If you want to run FATE on your machine you need to have the samples
in place. You can get the samples via the build target fate-rsync.
Use this command from the top-level source directory:
The dataset is available through @command{rsync}, is possible to fetch
the current sample using the straight rsync command or through a specific
@ref{Makefile target}.
@example
make fate-rsync SAMPLES=fate-suite/
make fate SAMPLES=fate-suite/
# rsync -aL rsync://fate.ffmpeg.org/fate-suite/ fate-suite
@end example
The above commands set the samples location by passing a makefile
variable via command line. It is also possible to set the samples
location at source configuration time by invoking configure with
`--samples=<path to the samples directory>'. Afterwards you can
invoke the makefile targets without setting the SAMPLES makefile
variable. This is illustrated by the following commands:
@example
./configure --samples=fate-suite/
make fate-rsync
make fate
# make fate-rsync SAMPLES=fate-suite
@end example
Yet another way to tell FATE about the location of the sample
directory is by making sure the environment variable FATE_SAMPLES
contains the path to your samples directory. This can be achieved
by e.g. putting that variable in your shell profile or by setting
it in your interactive session.
@example
FATE_SAMPLES=fate-suite/ make fate
@end example
@float NOTE
Do not put a '~' character in the samples path to indicate a home
directory. Because of shell nuances, this will cause FATE to fail.
@end float
@chapter Submitting the results to the FFmpeg result aggregation server
To submit your results to the server you should run fate through the
shell script tests/fate.sh from the FFmpeg sources. This script needs
to be invoked with a configuration file as its first argument.
@example
tests/fate.sh /path/to/fate_config
@end example
A configuration file template with comments describing the individual
configuration variables can be found at @file{tests/fate_config.sh.template}.
@ifhtml
The mentioned configuration template is also available here:
@verbatiminclude ../tests/fate_config.sh.template
@end ifhtml
Create a configuration that suits your needs, based on the configuration
template. The `slot' configuration variable can be any string that is not
yet used, but it is suggested that you name it adhering to the following
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
itself will be sourced in a shell script, therefore all shell features may
be used. This enables you to setup the environment as you need it for your
build.
For your first test runs the `fate_recv' variable should be empty or
commented out. This will run everything as normal except that it will omit
the submission of the results to the server. The following files should be
present in $workdir as specified in the configuration file:
@itemize
@item configure.log
@item compile.log
@item test.log
@item report
@item version
@end itemize
When you have everything working properly you can create an SSH key and
send its public part to the FATE server administrator.
Configure your SSH client to use public key authentication with that key
when connecting to the FATE server. Also do not forget to check the identity
of the server and to accept its host key. This can usually be achieved by
running your SSH client manually and killing it after you accepted the key.
The FATE server's fingerprint is:
b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92
The only thing left is to automate the execution of the fate.sh script and
the synchronisation of the samples directory.
@chapter FATE makefile targets and variables
@section Makefile targets
@chapter Manual Run
FATE regression test can be run through @command{make}.
Specific Makefile targets and Makefile variables are available:
@anchor{Makefile target}
@section FATE Makefile targets
@table @option
@item fate-rsync
Download/synchronize sample files to the configured samples directory.
@item fate-list
Will list all fate/regression test targets.
List all fate/regression test targets.
@item fate-rsync
Shortcut to download the fate test samples to the specified testsuite location.
@item fate
Run the FATE test suite (requires the fate-suite dataset).
Run the FATE test suite (requires the fate-suite dataset).
@end table
@section Makefile variables
@section Fate Makefile variables
@table @option
@item V
Verbosity level, can be set to 0, 1 or 2.
@itemize
@item 0: show just the test arguments
@item 1: show just the command used in the test
@item 2: show everything
@end itemize
Verbosity level, can be set to 0, 1 or 2.
@table @option
@item 0
show just the test arguments
@item 1
show just the command used in the test
@item 2
show everything
@end table
@item SAMPLES
Specify or override the path to the FATE samples at make time, it has a
meaning only while running the regression tests.
Specify or override the path to the FATE samples at make time, it has a
meaning only while running the regression tests.
@item THREADS
Specify how many threads to use while running regression tests, it is
quite useful to detect thread-related regressions.
@item CPUFLAGS
Specify CPU flags.
Specify how many threads to use while running regression tests, it is
quite useful to detect thread-related regressions.
@end table
Example:
@example
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
make V=1 SAMPLES=/var/fate/samples THREADS=2 fate
@end example
@chapter Automated Tests
In order to automatically testing specific configurations, e.g. multiple
compilers, @command{tests/fate.sh} is provided.
This shell script builds FFmpeg, runs the regression tests and prepares a
report that can be sent to @url{fate.ffmpeg.org} or directly examined locally.
@section Testing Profiles
The configuration file passed to @command{fate.sh} is shell scripts as well.
It must provide at least a @var{slot} identifier, the @var{repo} from
which fetch the sources, the @var{samples} directory, a @var{workdir} with
enough space to build and run all the tests.
Optional submit command @var{fate_recv} and a @var{comment} to describe
the testing profile are available.
Additional optional parameter to tune the FFmpeg building and reporting process
can be passed.
@example
slot= # some unique identifier
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
samples=/path/to/fate/samples
workdir= # directory in which to do all the work
fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
comment= # optional description
# the following are optional and map to configure options
arch=
cpu=
cross_prefix=
cc=
target_os=
sysroot=
target_exec=
target_path=
extra_cflags=
extra_ldflags=
extra_libs=
extra_conf= # extra configure options not covered above
#make= # name of GNU make if not 'make'
makeopts= # extra options passed to 'make'
#tar= # command to create a tar archive from its arguments on
# stdout, defaults to 'tar c'
@end example
@section Submitting Reports
In order to send reports you need to create an @command{ssh} key and send it
to the fate server administrator.
The current server fingerprint is @var{b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92}

139
doc/fate.txt Normal file
View File

@@ -0,0 +1,139 @@
FATE Automated Testing Environment
==================================
FATE is an extended regression suite on the client-side and a means
for results aggregation and presentation on the server-side.
The first part of this document explains how you can use FATE from
your FFmpeg source directory to test your ffmpeg binary. The second
part describes how you can run FATE to submit the results to FFmpeg's
FATE server.
In any way you can have a look at the publicly viewable FATE results
by visiting this website:
http://fate.ffmpeg.org/
This is especially recommended for all people contributing source
code to FFmpeg, as it can be seen if some test on some platform broke
with there recent contribution. This usually happens on the platforms
the developers could not test on.
The second part of this document describes how you can run FATE to
submit your results to FFmpeg's FATE server. If you want to submit your
results be sure to check that your combination of CPU, OS and compiler
is not already listed on the above mentioned website.
In the third part you can find a comprehensive listing of FATE makefile
targets and variables.
1. Using FATE from your FFmpeg source directory
-----------------------------------------------
If you want to run FATE on your machine you need to have the samples
in place. You can get the samples via the build target fate-rsync.
Use this command from the top-level source directory:
# make fate-rsync SAMPLES=fate-suite/
# make fate SAMPLES=fate-suite/
The above commands set the samples location by passing a makefile
variable via command line. It is also possible to set the samples
location at source configuration time by invoking configure with
`--samples=<path to the samples directory>'. Afterwards you can
invoke the makefile targets without setting the SAMPLES makefile
variable. This is illustrated by the following commands:
# ./configure --samples=fate-suite/
# make fate-rsync
# make fate
Yet another way to tell FATE about the location of the sample
directory is by making sure the environment variable FATE_SAMPLES
contains the path to your samples directory. This can be achieved
by e.g. putting that variable in your shell profile or by setting
it in your interactive session.
# FATE_SAMPLES=fate-suite/ make fate
NOTE:
Do not put a '~' character in the samples path to indicate a home
directory. Because of shell nuances, this will cause FATE to fail.
2. Submitting the results to the FFmpeg result aggregation server
-----------------------------------------------------------------
To submit your results to the server you should run fate through the
shell script tests/fate.sh from the FFmpeg sources. This script needs
to be invoked with a configuration file as its first argument.
# tests/fate.sh /path/to/fate_config
A configuration file template with comments describing the individual
configuration variables can be found at tests/fate_config.sh.template .
Create a configuration that suits your needs, based on the configuration
template. The `slot' configuration variable can be any string that is not
yet used, but it is suggested that you name it adhering to the following
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
itself will be sourced in a shell script, therefore all shell features may
be used. This enables you to setup the environment as you need it for your
build.
For your first test runs the `fate_recv' variable should be empty or
commented out. This will run everything as normal except that it will omit
the submission of the results to the server. The following files should be
present in $workdir as specified in the configuration file:
- configure.log
- compile.log
- test.log
- report
- version
When you have everything working properly you can create an SSH key and
send its public part to the FATE server administrator.
Configure your SSH client to use public key authentication with that key
when connecting to the FATE server. Also do not forget to check the identity
of the server and to accept its host key. This can usually be achieved by
running your SSH client manually and killing it after you accepted the key.
The FATE server's fingerprint is:
b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92
The only thing left is to automate the execution of the fate.sh script and
the synchronisation of the samples directory.
3. FATE makefile targets and variables
--------------------------------------
FATE Makefile targets:
fate-list
Will list all fate/regression test targets.
fate
Run the FATE test suite (requires the fate-suite dataset).
FATE Makefile variables:
V
Verbosity level, can be set to 0, 1 or 2.
* 0: show just the test arguments
* 1: show just the command used in the test
* 2: show everything
SAMPLES
Specify or override the path to the FATE samples at make time, it has a
meaning only while running the regression tests.
THREADS
Specify how many threads to use while running regression tests, it is
quite useful to detect thread-related regressions.
Example:
make V=1 SAMPLES=/var/fate/samples THREADS=2 fate

4561
doc/ffmpeg-mt-authorship.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -29,7 +29,7 @@ rates and resize video on the fly with a high quality polyphase filter.
ffmpeg reads from an arbitrary number of input "files" (which can be regular
files, pipes, network streams, grabbing devices, etc.), specified by the
@code{-i} option, and writes to an arbitrary number of output "files", which are
specified by a plain output filename. Anything found on the command line which
specified by a plain output filename. Anything found on the commandline which
cannot be interpreted as an option is considered to be an output filename.
Each input or output file can in principle contain any number of streams of
@@ -143,7 +143,7 @@ Stop writing the output after its duration reaches @var{duration}.
@var{duration} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form.
@item -fs @var{limit_size} (@emph{output})
Set the file size limit, expressed in bytes.
Set the file size limit.
@item -ss @var{position} (@emph{input/output})
When used as an input option (before @code{-i}), seeks in this input file to
@@ -164,7 +164,7 @@ streams are delayed by @var{offset} seconds.
Set the recording timestamp in the container.
The syntax for @var{time} is:
@example
now|([(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...])|(HHMMSS[.m...]))[Z|z])
now|([(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH[:MM[:SS[.m...]]])|(HH[MM[SS[.m...]]]))[Z|z])
@end example
If the value is "now" it takes the current time.
Time is local time unless 'Z' or 'z' is appended, in which case it is
@@ -187,9 +187,9 @@ For example, for setting the title in the output file:
ffmpeg -i in.avi -metadata title="my title" out.flv
@end example
To set the language of the first audio stream:
To set the language of the second stream:
@example
ffmpeg -i INPUT -metadata:s:a:1 language=eng OUTPUT
ffmpeg -i INPUT -metadata:s:1 language=eng OUTPUT
@end example
@item -target @var{type} (@emph{output})
@@ -224,23 +224,12 @@ codec-dependent.
@var{filter_graph} is a description of the filter graph to apply to
the stream. Use @code{-filters} to show all the available filters
(including also sources and sinks).
See also the @option{-filter_complex} option if you want to create filter graphs
with multiple inputs and/or outputs.
@item -pre[:@var{stream_specifier}] @var{preset_name} (@emph{output,per-stream})
Specify the preset for matching stream(s).
@item -stats (@emph{global})
Print encoding progress/statistics. On by default.
@item -debug_ts (@emph{global})
Print timestamp information. It is off by default. This option is
mostly useful for testing and debugging purposes, and the output
format may change from one version to another, so it should not be
employed by portable scripts.
See also the option @code{-fdebug ts}.
@item -attach @var{filename} (@emph{output})
Add an attachment to the output file. This is supported by a few formats
like Matroska for e.g. fonts used in rendering subtitles. Attachments
@@ -282,10 +271,70 @@ attachments.
@item -vframes @var{number} (@emph{output})
Set the number of video frames to record. This is an alias for @code{-frames:v}.
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
Set frame rate (Hz value, fraction or abbreviation), (default = 25). For output
streams implies @code{-vsync cfr}.
Set frame rate (Hz value, fraction or abbreviation), (default = 25).
@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream})
Set frame size. The format is @samp{wxh} (default - same as source).
The following abbreviations are recognized:
@table @samp
@item sqcif
128x96
@item qcif
176x144
@item cif
352x288
@item 4cif
704x576
@item 16cif
1408x1152
@item qqvga
160x120
@item qvga
320x240
@item vga
640x480
@item svga
800x600
@item xga
1024x768
@item uxga
1600x1200
@item qxga
2048x1536
@item sxga
1280x1024
@item qsxga
2560x2048
@item hsxga
5120x4096
@item wvga
852x480
@item wxga
1366x768
@item wsxga
1600x1024
@item wuxga
1920x1200
@item woxga
2560x1600
@item wqsxga
3200x2048
@item wquxga
3840x2400
@item whsxga
6400x4096
@item whuxga
7680x4800
@item cga
320x200
@item ega
640x350
@item hd480
852x480
@item hd720
1280x720
@item hd1080
1920x1080
@end table
@item -aspect[:@var{stream_specifier}] @var{aspect} (@emph{output,per-stream})
Set the video display aspect ratio specified by @var{aspect}.
@@ -312,7 +361,25 @@ pad=width:height:x:y:color instead.
@item -vn (@emph{output})
Disable video recording.
@item -bt @var{tolerance}
Set video bitrate tolerance (in bits, default 4000k).
Has a minimum value of: (target_bitrate/target_framerate).
In 1-pass mode, bitrate tolerance specifies how far ratecontrol is
willing to deviate from the target average bitrate value. This is
not related to min/max bitrate. Lowering tolerance too much has
an adverse effect on quality.
@item -maxrate @var{bitrate}
Set max video bitrate (in bit/s).
Requires -bufsize to be set.
@item -minrate @var{bitrate}
Set min video bitrate (in bit/s).
Most useful in setting up a CBR encode:
@example
ffmpeg -i myfile.avi -b:v 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
@end example
It is of little use elsewise.
@item -bufsize @var{size}
Set video buffer verifier buffer size (in bits).
@item -vcodec @var{codec} (@emph{output})
Set the video codec. This is an alias for @code{-codec:v}.
@item -same_quant
@@ -357,29 +424,202 @@ also sources and sinks). This is an alias for @code{-filter:v}.
@item -pix_fmt[:@var{stream_specifier}] @var{format} (@emph{input/output,per-stream})
Set pixel format. Use @code{-pix_fmts} to show all the supported
pixel formats.
If the selected pixel format can not be selected, ffmpeg will print a
warning and select the best pixel format supported by the encoder.
If @var{pix_fmt} is prefixed by a @code{+}, ffmpeg will exit with an error
if the requested pixel format can not be selected, and automatic conversions
inside filter graphs are disabled.
If @var{pix_fmt} is a single @code{+}, ffmpeg selects the same pixel format
as the input (or graph output) and automatic conversions are disabled.
@item -sws_flags @var{flags} (@emph{input/output})
Set SwScaler flags.
@item -g @var{gop_size}
Set the group of pictures size.
@item -intra
deprecated, use -g 1
@item -vdt @var{n}
Discard threshold.
@item -qmin @var{q}
minimum video quantizer scale (VBR)
@item -qmax @var{q}
maximum video quantizer scale (VBR)
@item -qdiff @var{q}
maximum difference between the quantizer scales (VBR)
@item -qblur @var{blur}
video quantizer scale blur (VBR) (range 0.0 - 1.0)
@item -qcomp @var{compression}
video quantizer scale compression (VBR) (default 0.5).
Constant of ratecontrol equation. Recommended range for default rc_eq: 0.0-1.0
@item -lmin @var{lambda}
minimum video lagrange factor (VBR)
@item -lmax @var{lambda}
max video lagrange factor (VBR)
@item -mblmin @var{lambda}
minimum macroblock quantizer scale (VBR)
@item -mblmax @var{lambda}
maximum macroblock quantizer scale (VBR)
These four options (lmin, lmax, mblmin, mblmax) use 'lambda' units,
but you may use the QP2LAMBDA constant to easily convert from 'q' units:
@example
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
@end example
@item -rc_init_cplx @var{complexity}
initial complexity for single pass encoding
@item -b_qfactor @var{factor}
qp factor between P- and B-frames
@item -i_qfactor @var{factor}
qp factor between P- and I-frames
@item -b_qoffset @var{offset}
qp offset between P- and B-frames
@item -i_qoffset @var{offset}
qp offset between P- and I-frames
@item -rc_eq @var{equation}
Set rate control equation (see section "Expression Evaluation")
(default = @code{tex^qComp}).
When computing the rate control equation expression, besides the
standard functions defined in the section "Expression Evaluation", the
following functions are available:
@table @var
@item bits2qp(bits)
@item qp2bits(qp)
@end table
and the following constants are available:
@table @var
@item iTex
@item pTex
@item tex
@item mv
@item fCode
@item iCount
@item mcVar
@item var
@item isI
@item isP
@item isB
@item avgQP
@item qComp
@item avgIITex
@item avgPITex
@item avgPPTex
@item avgBPTex
@item avgTex
@end table
@item -rc_override[:@var{stream_specifier}] @var{override} (@emph{output,per-stream})
Rate control override for specific intervals, formatted as "int,int,int"
list separated with slashes. Two first values are the beginning and
end frame numbers, last one is quantizer to use if positive, or quality
factor if negative.
@item -me_method @var{method}
Set motion estimation method to @var{method}.
Available methods are (from lowest to best quality):
@table @samp
@item zero
Try just the (0, 0) vector.
@item phods
@item log
@item x1
@item hex
@item umh
@item epzs
(default method)
@item full
exhaustive search (slow and marginally better than epzs)
@end table
@item -dct_algo @var{algo}
Set DCT algorithm to @var{algo}. Available values are:
@table @samp
@item 0
FF_DCT_AUTO (default)
@item 1
FF_DCT_FASTINT
@item 2
FF_DCT_INT
@item 3
FF_DCT_MMX
@item 4
FF_DCT_MLIB
@item 5
FF_DCT_ALTIVEC
@end table
@item -idct_algo @var{algo}
Set IDCT algorithm to @var{algo}. Available values are:
@table @samp
@item 0
FF_IDCT_AUTO (default)
@item 1
FF_IDCT_INT
@item 2
FF_IDCT_SIMPLE
@item 3
FF_IDCT_SIMPLEMMX
@item 4
FF_IDCT_LIBMPEG2MMX
@item 5
FF_IDCT_PS2
@item 6
FF_IDCT_MLIB
@item 7
FF_IDCT_ARM
@item 8
FF_IDCT_ALTIVEC
@item 9
FF_IDCT_SH4
@item 10
FF_IDCT_SIMPLEARM
@end table
@item -er @var{n}
Set error resilience to @var{n}.
@table @samp
@item 1
FF_ER_CAREFUL (default)
@item 2
FF_ER_COMPLIANT
@item 3
FF_ER_AGGRESSIVE
@item 4
FF_ER_VERY_AGGRESSIVE
@end table
@item -ec @var{bit_mask}
Set error concealment to @var{bit_mask}. @var{bit_mask} is a bit mask of
the following values:
@table @samp
@item 1
FF_EC_GUESS_MVS (default = enabled)
@item 2
FF_EC_DEBLOCK (default = enabled)
@end table
@item -bf @var{frames}
Use 'frames' B-frames (supported for MPEG-1, MPEG-2 and MPEG-4).
@item -mbd @var{mode}
macroblock decision
@table @samp
@item 0
FF_MB_DECISION_SIMPLE: Use mb_cmp (cannot change it yet in ffmpeg).
@item 1
FF_MB_DECISION_BITS: Choose the one which needs the fewest bits.
@item 2
FF_MB_DECISION_RD: rate distortion
@end table
@item -4mv
Use four motion vector by macroblock (MPEG-4 only).
@item -part
Use data partitioning (MPEG-4 only).
@item -bug @var{param}
Work around encoder bugs that are not auto-detected.
@item -strict @var{strictness}
How strictly to follow the standards.
@item -aic
Enable Advanced intra coding (h263+).
@item -umv
Enable Unlimited Motion Vector (h263+)
@item -deinterlace
Deinterlace pictures.
This option is deprecated since the deinterlacing is very low quality.
Use the yadif filter with @code{-filter:v yadif}.
@item -ilme
Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
Use this option if your input file is interlaced and you want
@@ -438,11 +678,6 @@ Set the audio codec. This is an alias for @code{-codec:a}.
@item -sample_fmt[:@var{stream_specifier}] @var{sample_fmt} (@emph{output,per-stream})
Set the audio sample format. Use @code{-sample_fmts} to get a list
of supported sample formats.
@item -af @var{filter_graph} (@emph{output})
@var{filter_graph} is a description of the filter graph to apply to
the input audio.
Use the option "-filters" to show all the available filters (including
also sources and sinks). This is an alias for @code{-filter:a}.
@end table
@section Advanced Audio options:
@@ -450,6 +685,28 @@ also sources and sinks). This is an alias for @code{-filter:a}.
@table @option
@item -atag @var{fourcc/tag} (@emph{output})
Force audio tag/fourcc. This is an alias for @code{-tag:a}.
@item -audio_service_type @var{type}
Set the type of service that the audio stream contains.
@table @option
@item ma
Main Audio Service (default)
@item ef
Effects
@item vi
Visually Impaired
@item hi
Hearing Impaired
@item di
Dialogue
@item co
Commentary
@item em
Emergency
@item vo
Voice Over
@item ka
Karaoke
@end table
@item -absf @var{bitstream_filter}
Deprecated, see -bsf
@end table
@@ -477,7 +734,7 @@ Synchronize read on input.
@section Advanced options
@table @option
@item -map [-]@var{input_file_id}[:@var{stream_specifier}][,@var{sync_file_id}[:@var{stream_specifier}]] | @var{[linklabel]} (@emph{output})
@item -map [-]@var{input_file_id}[:@var{stream_specifier}][,@var{sync_file_id}[:@var{stream_specifier}]] (@emph{output})
Designate one or more input streams as a source for the output file. Each input
stream is identified by the input file index @var{input_file_id} and
@@ -493,10 +750,6 @@ the source for output stream 1, etc.
A @code{-} character before the stream identifier creates a "negative" mapping.
It disables matching streams from already created mappings.
An alternative @var{[linklabel]} form will map outputs from complex filter
graphs (see the @option{-filter_complex} option) to the output file.
@var{linklabel} must correspond to a defined output link label in the graph.
For example, to map ALL streams from the first input file to output
@example
ffmpeg -i INPUT -map 0 output
@@ -534,7 +787,7 @@ Note that using this option disables the default mappings for this output file.
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}]
Map an audio channel from a given input to an output. If
@var{output_file_id}.@var{stream_specifier} is not set, the audio channel will
@var{output_file_id}.@var{stream_specifier} are not set, the audio channel will
be mapped on all the audio streams.
Using "-1" instead of
@@ -556,63 +809,34 @@ The order of the "-map_channel" option specifies the order of the channels in
the output stream. The output channel layout is guessed from the number of
channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac"
in combination of "-map_channel" makes the channel gain levels to be updated if
input and output channel layouts don't match (for instance two "-map_channel"
options and "-ac 6").
channel layouts don't match (for instance two "-map_channel" options and "-ac
6").
You can also extract each channel of an input to specific outputs; the following
command extracts two channels of the @var{INPUT} audio stream (file 0, stream 0)
to the respective @var{OUTPUT_CH0} and @var{OUTPUT_CH1} outputs:
You can also extract each channel of an @var{INPUT} to specific outputs; the
following command extract each channel of the audio stream (file 0, stream 0)
to the respective @var{OUTPUT_CH0} and @var{OUTPUT_CH1}:
@example
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
@end example
The following example splits the channels of a stereo input into two separate
streams, which are put into the same output file:
The following example split the channels of a stereo input into streams:
@example
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
@end example
Note that currently each output stream can only contain channels from a single
input stream; you can't for example use "-map_channel" to pick multiple input
audio channels contained in different streams (from the same or different files)
and merge them into a single output stream. It is therefore not currently
possible, for example, to turn two separate mono streams into a single stereo
stream. However splitting a stereo stream into two single channel mono streams
is possible.
Note that "-map_channel" is currently limited to the scope of one input for
each output; you can't for example use it to pick multiple input audio files
and mix them into one single output.
If you need this feature, a possible workaround is to use the @emph{amerge}
filter. For example, if you need to merge a media (here @file{input.mkv}) with 2
mono audio streams into one single stereo channel audio stream (and keep the
video stream), you can use the following command:
@example
ffmpeg -i input.mkv -f lavfi -i "
amovie=input.mkv:si=1 [a1];
amovie=input.mkv:si=2 [a2];
[a1][a2] amerge" -c:a pcm_s16le -c:v copy output.mkv
@end example
@item -map_metadata[:@var{metadata_spec_out}] @var{infile}[:@var{metadata_spec_in}] (@emph{output,per-metadata})
@item -map_metadata[:@var{metadata_type}][:@var{index}] @var{infile}[:@var{metadata_type}][:@var{index}] (@emph{output,per-metadata})
Set metadata information of the next output file from @var{infile}. Note that
those are file indices (zero-based), not filenames.
Optional @var{metadata_spec_in/out} parameters specify, which metadata to copy.
A metadata specifier can have the following forms:
@table @option
@item @var{g}
global metadata, i.e. metadata that applies to the whole file
@item @var{s}[:@var{stream_spec}]
per-stream metadata. @var{stream_spec} is a stream specifier as described
in the @ref{Stream specifiers} chapter. In an input metadata specifier, the first
matching stream is copied from. In an output metadata specifier, all matching
streams are copied to.
@item @var{c}:@var{chapter_index}
per-chapter metadata. @var{chapter_index} is the zero-based chapter index.
@item @var{p}:@var{program_index}
per-program metadata. @var{program_index} is the zero-based program index.
@end table
If metadata specifier is omitted, it defaults to global.
Optional @var{metadata_type} parameters specify, which metadata to copy - (g)lobal
(i.e. metadata that applies to the whole file), per-(s)tream, per-(c)hapter or
per-(p)rogram. All metadata specifiers other than global must be followed by the
stream/chapter/program index. If metadata specifier is omitted, it defaults to
global.
By default, global metadata is copied from the first input file,
per-stream and per-chapter metadata is copied along with streams/chapters. These
@@ -624,14 +848,6 @@ of the output file:
@example
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
@end example
To do the reverse, i.e. copy global metadata to all audio streams:
@example
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
@end example
Note that simple @code{0} would work as well in this example, since global
metadata is assumed by default.
@item -map_chapters @var{input_file_index} (@emph{output})
Copy chapters from input file with index @var{input_file_index} to the next
output file. If no chapter mapping is specified, then chapters are copied from
@@ -675,15 +891,14 @@ Show benchmarking information at the end of an encode.
Shows CPU time used and maximum memory consumption.
Maximum memory consumption is not supported on all systems,
it will usually display as 0 if not supported.
@item -benchmark_all (@emph{global})
Show benchmarking information during the encode.
Shows CPU time used in various steps (audio/video encode/decode).
@item -timelimit @var{duration} (@emph{global})
Exit after ffmpeg has been running for @var{duration} seconds.
@item -dump (@emph{global})
Dump each input packet to stderr.
@item -hex (@emph{global})
When dumping packets, also dump the payload.
@item -ps @var{size}
Set RTP payload size in bytes.
@item -re (@emph{input})
Read input at native frame rate. Mainly used to simulate a grab device.
@item -loop_input
@@ -694,24 +909,21 @@ This option is deprecated, use -loop 1.
Repeatedly loop output for formats that support looping such as animated GIF
(0 will loop the output infinitely).
This option is deprecated, use -loop.
@item -threads @var{count}
Thread count.
@item -vsync @var{parameter}
Video sync method.
For compatibility reasons old values can be specified as numbers.
Newly added values will have to be specified as strings always.
@table @option
@item 0, passthrough
@item 0
Each frame is passed with its timestamp from the demuxer to the muxer.
@item 1, cfr
@item 1
Frames will be duplicated and dropped to achieve exactly the requested
constant framerate.
@item 2, vfr
@item 2
Frames are passed through with their timestamp or dropped so as to
prevent 2 frames from having the same timestamp.
@item drop
As passthrough but destroys all timestamps, making the muxer generate
fresh timestamps based on frame-rate.
@item -1, auto
@item -1
Chooses between 1 and 2 depending on muxer capabilities. This is the
default method.
@end table
@@ -725,33 +937,10 @@ Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps
the parameter is the maximum samples per second by which the audio is changed.
-async 1 is a special case where only the start of the audio stream is corrected
without any later correction.
This option has been deprecated. Use the @code{asyncts} audio filter instead.
@item -copyts
Copy timestamps from input to output.
@item -copytb @var{mode}
Specify how to set the encoder timebase when stream copying. @var{mode} is an
integer numeric value, and can assume one of the following values:
@table @option
@item 1
Use the demuxer timebase.
The time base is copied to the output encoder from the corresponding input
demuxer. This is sometimes required to avoid non monotonically increasing
timestamps when copying video streams with variable frame rate.
@item 0
Use the decoder timebase.
The time base is copied to the output encoder from the corresponding input
decoder.
@item -1
Try to make the choice automatically, in order to generate a sane output.
@end table
Default value is -1.
@item -copytb
Copy input stream time base from input to output when stream copying.
@item -shortest
Finish encoding when the shortest input stream ends.
@item -dts_delta_threshold
@@ -777,59 +966,14 @@ Set bitstream filters for matching streams. @var{bistream_filters} is
a comma-separated list of bitstream filters. Use the @code{-bsfs} option
to get the list of bitstream filters.
@example
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
ffmpeg -i h264.mp4 -c:v copy -vbsf h264_mp4toannexb -an out.h264
@end example
@example
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
ffmpeg -i file.mov -an -vn -sbsf mov2textsub -c:s copy -f rawvideo sub.txt
@end example
@item -tag[:@var{stream_specifier}] @var{codec_tag} (@emph{per-stream})
Force a tag/fourcc for matching streams.
@item -timecode @var{hh}:@var{mm}:@var{ss}SEP@var{ff}
Specify Timecode for writing. @var{SEP} is ':' for non drop timecode and ';'
(or '.') for drop.
@example
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
@end example
@item -filter_complex @var{filtergraph} (@emph{global})
Define a complex filter graph, i.e. one with arbitrary number of inputs and/or
outputs. For simple graphs -- those with one input and one output of the same
type -- see the @option{-filter} options. @var{filtergraph} is a description of
the filter graph, as described in @ref{Filtergraph syntax}.
Input link labels must refer to input streams using the
@code{[file_index:stream_specifier]} syntax (i.e. the same as @option{-map}
uses). If @var{stream_specifier} matches multiple streams, the first one will be
used. An unlabeled input will be connected to the first unused input stream of
the matching type.
Output link labels are referred to with @option{-map}. Unlabeled outputs are
added to the first output file.
For example, to overlay an image over video
@example
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
'[out]' out.mkv
@end example
Here @code{[0:v]} refers to the first video stream in the first input file,
which is linked to the first (main) input of the overlay filter. Similarly the
first video stream in the second input is linked to the second (overlay) input
of overlay.
Assuming there is only one video stream in each input file, we can omit input
labels, so the above is equivalent to
@example
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
'[out]' out.mkv
@end example
Furthermore we can omit the output label and the single output from the filter
graph will be added to the output file automatically, so we can simply write
@example
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
@end example
@end table
@section Preset files
@@ -891,7 +1035,7 @@ frame rate or decrease the frame size.
@item
If your computer is not fast enough, you can speed up the
compression at the expense of the compression ratio. You can use
'-me zero' to speed up motion estimation, and '-g 0' to disable
'-me zero' to speed up motion estimation, and '-intra' to disable
motion estimation completely (you have only I-frames, which means it
is about as good as JPEG compression).
@@ -1080,18 +1224,6 @@ composed of three digits padded with zeroes to express the sequence
number. It is the same syntax supported by the C printf function, but
only formats accepting a normal integer are suitable.
When importing an image sequence, -i also supports expanding shell-like
wildcard patterns (globbing) internally. To lower the chance of interfering
with your actual file names and the shell's glob expansion, you are required
to activate glob meta characters by prefixing them with a single @code{%}
character, like in @code{foo-%*.jpeg}, @code{foo-%?%?%?.jpeg} or
@code{foo-00%[234%]%*.jpeg}.
If your filename actually contains a character sequence of a @code{%} character
followed by a glob character, you must double the @code{%} character to escape
it. Imagine your files begin with @code{%?-foo-}, then you could use a glob
pattern like @code{%%?-foo-%*.jpeg}. For input patterns that could be both a
printf or a glob pattern, ffmpeg will assume it is a glob pattern.
@item
You can put many streams of the same type in the output:
@@ -1102,23 +1234,9 @@ ffmpeg -i test1.avi -i test2.avi -map 0.3 -map 0.2 -map 0.1 -map 0.0 -c copy tes
The resulting output file @file{test12.avi} will contain first four streams from
the input file in reverse order.
@item
To force CBR video output:
@example
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
@end example
@item
The four options lmin, lmax, mblmin and mblmax use 'lambda' units,
but you may use the QP2LAMBDA constant to easily convert from 'q' units:
@example
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
@end example
@end itemize
@c man end EXAMPLES
@include syntax.texi
@include eval.texi
@include decoders.texi
@include encoders.texi

View File

@@ -29,7 +29,7 @@
\ / :
+======\======================/======+ ^ :
------> 0 | : source_index : st-:--- | : :
OutputFile output_files[] / +------------------------------------+ : :
OuputFile output_files[] / +------------------------------------+ : :
/ 1 | : : : | : :
^ +------+------------+-----+ / +------------------------------------+ : :
: | : ost_index -:-----:------/ 2 | : : : | : :

View File

@@ -168,9 +168,6 @@ Seek backward/forward 10 seconds.
@item down/up
Seek backward/forward 1 minute.
@item page down/page up
Seek backward/forward 10 minutes.
@item mouse click
Seek to percentage in file corresponding to fraction of width.
@@ -178,7 +175,6 @@ Seek to percentage in file corresponding to fraction of width.
@c man end
@include syntax.texi
@include eval.texi
@include decoders.texi
@include demuxers.texi

View File

@@ -94,11 +94,6 @@ For example for printing the output in JSON format, specify:
For more details on the available output printing formats, see the
Writers section below.
@item -show_error
Show information about the error found when trying to probe the input.
The error information is printed within a section with name "ERROR".
@item -show_format
Show information about the container format of the input multimedia
stream.
@@ -106,11 +101,6 @@ stream.
All the container format information is printed within a section with
name "FORMAT".
@item -show_format_entry @var{name}
Like @option{-show_format}, but only prints the specified entry of the
container format information, rather than all. This option may be given more
than once, then all specified entries will be shown.
@item -show_packets
Show information about each packet contained in the input multimedia
stream.
@@ -118,13 +108,6 @@ stream.
The information for each single packet is printed within a dedicated
section with name "PACKET".
@item -show_frames
Show information about each frame contained in the input multimedia
stream.
The information for each single frame is printed within a dedicated
section with name "FRAME".
@item -show_streams
Show information about each media stream contained in the input
multimedia stream.
@@ -132,37 +115,6 @@ multimedia stream.
Each media stream information is printed within a dedicated section
with name "STREAM".
@item -count_frames
Count the number of frames per stream and report it in the
corresponding stream section.
@item -count_packets
Count the number of packets per stream and report it in the
corresponding stream section.
@item -show_private_data, -private
Show private data, that is data depending on the format of the
particular shown element.
This option is enabled by default, but you may need to disable it
for specific uses, for example when creating XSD-compliant XML output.
@item -show_program_version
Show information related to program version.
Version information is printed within a section with name
"PROGRAM_VERSION".
@item -show_library_versions
Show information related to library versions.
Version information for each library is printed within a section with
name "LIBRARY_VERSION".
@item -show_versions
Show information related to program and library versions. This is the
equivalent of setting both @option{-show_program_version} and
@option{-show_library_versions} options.
@item -i @var{input_file}
Read @var{input_file}.
@@ -172,7 +124,7 @@ Read @var{input_file}.
@chapter Writers
@c man begin WRITERS
A writer defines the output format adopted by @command{ffprobe}, and will be
A writer defines the output format adopted by @file{ffprobe}, and will be
used for printing all the parts of the output.
A writer may accept one or more arguments, which specify the options to
@@ -195,22 +147,6 @@ keyN=valN
Metadata tags are printed as a line in the corresponding FORMAT or
STREAM section, and are prefixed by the string "TAG:".
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
A description of the accepted options follows.
@table @option
@item nokey, nk
If set to 1 specify not to print the key of each field. Default value
is 0.
@item noprint_wrappers, nw
If set to 1 specify not to print the section header and footer.
Default value is 0.
@end table
@section compact
Compact format.
@@ -274,81 +210,10 @@ JSON based format.
Each section is printed using JSON notation.
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
The description of the accepted options follows.
@table @option
@item compact, c
If set to 1 enable compact output, that is each section will be
printed on a single line. Default value is 0.
@end table
For more information about JSON, see @url{http://www.json.org/}.
@section xml
XML based format.
The XML output is described in the XML schema description file
@file{ffprobe.xsd} installed in the FFmpeg datadir.
An updated version of the schema can be retrieved at the url
@url{http://www.ffmpeg.org/schema/ffprobe.xsd}, which redirects to the
latest schema committed into the FFmpeg development source code tree.
Note that the output issued will be compliant to the
@file{ffprobe.xsd} schema only when no special global output options
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
@option{sexagesimal} etc.) are specified.
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
The description of the accepted options follows.
@table @option
@item fully_qualified, q
If set to 1 specify if the output should be fully qualified. Default
value is 0.
This is required for generating an XML file which can be validated
through an XSD file.
@item xsd_compliant, x
If set to 1 perform more checks for ensuring that the output is XSD
compliant. Default value is 0.
This option automatically sets @option{fully_qualified} to 1.
@end table
For more information about the XML format, see
@url{http://www.w3.org/XML/}.
@c man end WRITERS
@chapter Timecode
@c man begin TIMECODE
@command{ffprobe} supports Timecode extraction:
@itemize
@item
MPEG1/2 timecode is extracted from the GOP, and is available in the video
stream details (@option{-show_streams}, see @var{timecode}).
@item
MOV timecode is extracted from tmcd track, so is available in the tmcd
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
@item
DV and GXF timecodes are available in format metadata
(@option{-show_format}, see @var{TAG:timecode}).
@end itemize
@c man end TIMECODE
@include syntax.texi
@include decoders.texi
@include demuxers.texi
@include protocols.texi

View File

@@ -1,167 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
<xsd:complexType name="ffprobeType">
<xsd:sequence>
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="packetsType">
<xsd:sequence>
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="framesType">
<xsd:sequence>
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="packetType">
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
<xsd:attribute name="pts" type="xsd:long" />
<xsd:attribute name="pts_time" type="xsd:float" />
<xsd:attribute name="dts" type="xsd:long" />
<xsd:attribute name="dts_time" type="xsd:float" />
<xsd:attribute name="duration" type="xsd:long" />
<xsd:attribute name="duration_time" type="xsd:float" />
<xsd:attribute name="size" type="xsd:long" use="required" />
<xsd:attribute name="pos" type="xsd:long" />
<xsd:attribute name="flags" type="xsd:string" use="required" />
</xsd:complexType>
<xsd:complexType name="frameType">
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
<xsd:attribute name="pts" type="xsd:long" />
<xsd:attribute name="pts_time" type="xsd:float"/>
<xsd:attribute name="pkt_pts" type="xsd:long" />
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
<xsd:attribute name="pkt_dts" type="xsd:long" />
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
<xsd:attribute name="pkt_pos" type="xsd:long" />
<!-- audio attributes -->
<xsd:attribute name="sample_fmt" type="xsd:string"/>
<xsd:attribute name="nb_samples" type="xsd:long" />
<!-- video attributes -->
<xsd:attribute name="width" type="xsd:long" />
<xsd:attribute name="height" type="xsd:long" />
<xsd:attribute name="pix_fmt" type="xsd:string"/>
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
<xsd:attribute name="pict_type" type="xsd:string"/>
<xsd:attribute name="coded_picture_number" type="xsd:long" />
<xsd:attribute name="display_picture_number" type="xsd:long" />
<xsd:attribute name="interlaced_frame" type="xsd:int" />
<xsd:attribute name="top_field_first" type="xsd:int" />
<xsd:attribute name="repeat_pict" type="xsd:int" />
<xsd:attribute name="reference" type="xsd:int" />
</xsd:complexType>
<xsd:complexType name="streamsType">
<xsd:sequence>
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="streamType">
<xsd:attribute name="index" type="xsd:int" use="required"/>
<xsd:attribute name="codec_name" type="xsd:string" />
<xsd:attribute name="codec_long_name" type="xsd:string" />
<xsd:attribute name="codec_type" type="xsd:string" />
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
<!-- video attributes -->
<xsd:attribute name="width" type="xsd:int"/>
<xsd:attribute name="height" type="xsd:int"/>
<xsd:attribute name="has_b_frames" type="xsd:int"/>
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
<xsd:attribute name="pix_fmt" type="xsd:string"/>
<xsd:attribute name="level" type="xsd:int"/>
<xsd:attribute name="timecode" type="xsd:string"/>
<!-- audio attributes -->
<xsd:attribute name="sample_fmt" type="xsd:string"/>
<xsd:attribute name="sample_rate" type="xsd:int"/>
<xsd:attribute name="channels" type="xsd:int"/>
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
<xsd:attribute name="id" type="xsd:string"/>
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
<xsd:attribute name="start_time" type="xsd:float"/>
<xsd:attribute name="duration" type="xsd:float"/>
<xsd:attribute name="bit_rate" type="xsd:int"/>
<xsd:attribute name="nb_frames" type="xsd:int"/>
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
</xsd:complexType>
<xsd:complexType name="formatType">
<xsd:sequence>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
<xsd:attribute name="filename" type="xsd:string" use="required"/>
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
<xsd:attribute name="format_long_name" type="xsd:string" use="required"/>
<xsd:attribute name="start_time" type="xsd:float"/>
<xsd:attribute name="duration" type="xsd:float"/>
<xsd:attribute name="size" type="xsd:long"/>
<xsd:attribute name="bit_rate" type="xsd:long"/>
</xsd:complexType>
<xsd:complexType name="tagType">
<xsd:attribute name="key" type="xsd:string" use="required"/>
<xsd:attribute name="value" type="xsd:string" use="required"/>
</xsd:complexType>
<xsd:complexType name="errorType">
<xsd:attribute name="code" type="xsd:int" use="required"/>
<xsd:attribute name="string" type="xsd:string" use="required"/>
</xsd:complexType>
<xsd:complexType name="programVersionType">
<xsd:attribute name="version" type="xsd:string" use="required"/>
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
<xsd:attribute name="compiler_type" type="xsd:string" use="required"/>
<xsd:attribute name="compiler_version" type="xsd:string" use="required"/>
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
</xsd:complexType>
<xsd:complexType name="libraryVersionType">
<xsd:attribute name="name" type="xsd:string" use="required"/>
<xsd:attribute name="major" type="xsd:int" use="required"/>
<xsd:attribute name="minor" type="xsd:int" use="required"/>
<xsd:attribute name="micro" type="xsd:int" use="required"/>
<xsd:attribute name="version" type="xsd:int" use="required"/>
</xsd:complexType>
<xsd:complexType name="libraryVersionsType">
<xsd:sequence>
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
</xsd:schema>

View File

@@ -373,3 +373,5 @@ ACL allow 192.168.0.0 192.168.255.255
<Redirect index.html>
URL http://www.ffmpeg.org/
</Redirect>

View File

@@ -147,7 +147,7 @@ that only captures in stereo and also requires that one channel be flipped.
If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before
starting ffmpeg.
@subsection The audio and video lose sync after a while.
@subsection The audio and video loose sync after a while.
Yes, they do.

View File

@@ -1,116 +0,0 @@
Filter design
=============
This document explains guidelines that should be observed (or ignored with
good reason) when writing filters for libavfilter.
In this document, the word “frame” indicates either a video frame or a group
of audio samples, as stored in an AVFilterBuffer structure.
Format negotiation
==================
The query_formats method should set, for each input and each output links,
the list supported formats.
For video links, that means pixel format. For audio links, that means
channel layout, and sample format (the sample packing is implied by the
sample format).
The lists are not just lists, they are references to shared objects. When
the negotiation mechanism computes the intersection of the formats
supported at each ends of a link, all references to both lists are
replaced with a reference to the intersection. And when a single format is
eventually chosen for a link amongst the remaining list, again, all
references to the list are updated.
That means that if a filter requires that its input and output have the
same format amongst a supported list, all it have to do is use a reference
to the same list of formats.
Buffer references ownership and permissions
===========================================
TODO
Frame scheduling
================
The purpose of these rules is to ensure that frames flow in the filter
graph without getting stuck and accumulating somewhere.
Simple filters that output one frame for each input frame should not have
to worry about it.
start_frame / filter_samples
----------------------------
These methods are called when a frame is pushed to the filter's input.
They can be called at any time except in a reentrant way.
If the input frame is enough to produce output, then the filter should
push the output frames on the output link immediately.
As an exception to the previous rule, if the input frame is enough to
produce several output frames, then the filter needs output only at
least one per link. The additional frames can be left buffered in the
filter; these buffered frames must be flushed immediately if a new input
produces new output.
(Example: framerate-doubling filter: start_frame must (1) flush the
second copy of the previous frame, if it is still there, (2) push the
first copy of the incoming frame, (3) keep the second copy for later.)
If the input frame is not enough to produce output, the filter must not
call request_frame to get more. It must just process the frame or queue
it. The task of requesting more frames is left to the filter's
request_frame method or the application.
If a filter has several inputs, the filter must be ready for frames
arriving randomly on any input. Therefore, any filter with several input
will most likely require some kind of queuing mechanism. It is perfectly
acceptable to have a limited queue and to drop frames when the inputs
are too unbalanced.
request_frame
-------------
This method is called when a frame is wanted on an output.
For an input, it should directly call start_frame or filter_samples on
the corresponding output.
For a filter, if there are queued frames already ready, one of these
frames should be pushed. If not, the filter should request a frame on
one of its input, repeatedly until at least one frame has been pushed.
Return values:
if request_frame could produce a frame, it should return 0;
if it could not for temporary reasons, it should return AVERROR(EAGAIN);
if it could not because there are no more frames, it should return
AVERROR_EOF.
The typical implementation of request_frame for a filter with several
inputs will look like that:
if (frames_queued) {
push_one_frame();
return 0;
}
while (!frame_pushed) {
input = input_where_a_frame_is_most_needed();
ret = avfilter_request_frame(input);
if (ret == AVERROR_EOF) {
process_eof_on_input();
} else if (ret < 0) {
return ret;
}
}
return 0;
Note that, except for filters that can have queued frames, request_frame
does not push frames: it requests them to its input, and as a reaction,
the start_frame / filter_samples method will be called and do the work.

File diff suppressed because it is too large Load Diff

View File

@@ -13,8 +13,7 @@
FFmpeg can be hooked up with a number of external libraries to add support
for more formats. None of them are used by default, their use has to be
explicitly requested by passing the appropriate flags to
@command{./configure}.
explicitly requested by passing the appropriate flags to @file{./configure}.
@section OpenJPEG
@@ -95,7 +94,7 @@ details), you must upgrade FFmpeg's license to GPL in order to use it.
@chapter Supported File Formats, Codecs or Features
@chapter Supported File Formats and Codecs
You can use the @code{-formats} and @code{-codecs} options to have an exhaustive list.
@@ -135,7 +134,7 @@ library:
@item Brute Force & Ignorance @tab @tab X
@tab Used in the game Flash Traffic: City of Angels.
@item BWF @tab X @tab X
@item CRI ADX @tab X @tab X
@item CRI ADX @tab @tab X
@tab Audio-only format used in console video games.
@item Discworld II BMV @tab @tab X
@item Interplay C93 @tab @tab X
@@ -144,8 +143,6 @@ library:
@tab Multimedia format used by Delphine Software games.
@item CD+G @tab @tab X
@tab Video format used by CD+G karaoke disks
@item Commodore CDXL @tab @tab X
@tab Amiga CD video format
@item Core Audio Format @tab X @tab X
@tab Apple Core Audio Format
@item CRC testing format @tab X @tab
@@ -211,7 +208,6 @@ library:
@item MAXIS XA @tab @tab X
@tab Used in Sim City 3000; file extension .xa.
@item MD Studio @tab @tab X
@item Metal Gear Solid: The Twin Snakes @tab @tab X
@item Mobotix .mxg @tab @tab X
@item Monkey's Audio @tab @tab X
@item Motion Pixels MVI @tab @tab X
@@ -311,7 +307,6 @@ library:
@item RTP @tab X @tab X
@item RTSP @tab X @tab X
@item SAP @tab X @tab X
@item SBG @tab @tab X
@item SDP @tab @tab X
@item Sega FILM/CPK @tab @tab X
@tab Used in many Sega Saturn console games.
@@ -321,9 +316,7 @@ library:
@tab Used in Sierra CD-ROM games.
@item Smacker @tab @tab X
@tab Multimedia format used by many games.
@item SMJPEG @tab X @tab X
@tab Used in certain Loki game ports.
@item Sony OpenMG (OMA) @tab X @tab X
@item Sony OpenMG (OMA) @tab @tab X
@tab Audio format used in Sony Sonic Stage and Sony Vegas.
@item Sony PlayStation STR @tab @tab X
@item Sony Wave64 (W64) @tab @tab X
@@ -353,7 +346,6 @@ library:
@item eXtended BINary text (XBIN) @tab @tab X
@item YUV4MPEG pipe @tab X @tab X
@item Psygnosis YOP @tab @tab X
@item ZeroCodec Lossless Video @tab @tab X
@end multitable
@code{X} means that encoding (resp. decoding) is supported.
@@ -373,8 +365,6 @@ following image formats are supported:
@tab Microsoft BMP image
@item DPX @tab X @tab X
@tab Digital Picture Exchange
@item EXR @tab @tab X
@tab OpenEXR
@item JPEG @tab X @tab X
@tab Progressive JPEG is not supported.
@item JPEG 2000 @tab X @tab X
@@ -400,16 +390,12 @@ following image formats are supported:
@tab V.Flash PTX format
@item SGI @tab X @tab X
@tab SGI RGB image format
@item Sun Rasterfile @tab X @tab X
@item Sun Rasterfile @tab @tab X
@tab Sun RAS image format
@item TIFF @tab X @tab X
@tab YUV, JPEG and some extension is not supported yet.
@item Truevision Targa @tab X @tab X
@tab Targa (.TGA) image format
@item XBM @tab X @tab X
@tab X BitMap image format
@item XWD @tab X @tab X
@tab X Window Dump image format
@end multitable
@code{X} means that encoding (resp. decoding) is supported.
@@ -449,18 +435,13 @@ following image formats are supported:
@item Autodesk Animator Flic video @tab @tab X
@item Autodesk RLE @tab @tab X
@tab fourcc: AASC
@item Avid 1:1 10-bit RGB Packer @tab X @tab X
@tab fourcc: AVrp
@item AVS (Audio Video Standard) video @tab @tab X
@tab Video encoding used by the Creature Shock game.
@item AYUV @tab X @tab X
@tab Microsoft uncompressed packed 4:4:4:4
@item Beam Software VB @tab @tab X
@item Bethesda VID video @tab @tab X
@tab Used in some games from Bethesda Softworks.
@item Bink Video @tab @tab X
@item Bitmap Brothers JV video @tab @tab X
@item y41p Brooktree uncompressed 4:1:1 12-bit @tab X @tab X
@item Brute Force & Ignorance @tab @tab X
@tab Used in the game Flash Traffic: City of Angels.
@item C93 video @tab @tab X
@@ -469,8 +450,6 @@ following image formats are supported:
@tab fourcc: CSCD
@item CD+G @tab @tab X
@tab Video codec for CD+G karaoke disks
@item CDXL @tab @tab X
@tab Amiga CD video codec
@item Chinese AVS video @tab E @tab X
@tab AVS1-P2, JiZhun profile, encoding through external library libxavs
@item Delphine Software International CIN video @tab @tab X
@@ -483,7 +462,7 @@ following image formats are supported:
@item DFA @tab @tab X
@tab Codec used in Chronomaster game.
@item Dirac @tab E @tab X
@tab supported through external library libschroedinger
@tab supported through external libdirac/libschroedinger libraries
@item Deluxe Paint Animation @tab @tab X
@item DNxHD @tab X @tab X
@tab aka SMPTE VC3
@@ -504,13 +483,12 @@ following image formats are supported:
@item Escape 124 @tab @tab X
@item Escape 130 @tab @tab X
@item FFmpeg video codec #1 @tab X @tab X
@tab lossless codec (fourcc: FFV1)
@tab experimental lossless codec (fourcc: FFV1)
@item Flash Screen Video v1 @tab X @tab X
@tab fourcc: FSV1
@item Flash Screen Video v2 @tab X @tab X
@item Flash Video (FLV) @tab X @tab X
@tab Sorenson H.263 used in Flash
@item Forward Uncompressed @tab @tab X
@item Fraps @tab @tab X
@item H.261 @tab X @tab X
@item H.263 / H.263-1996 @tab X @tab X
@@ -533,7 +511,6 @@ following image formats are supported:
@item Intel H.263 @tab @tab X
@item Intel Indeo 2 @tab @tab X
@item Intel Indeo 3 @tab @tab X
@item Intel Indeo 4 @tab @tab X
@item Intel Indeo 5 @tab @tab X
@item Interplay C93 @tab @tab X
@tab Used in the game Cyberia from Interplay.
@@ -591,8 +568,8 @@ following image formats are supported:
@tab fourcc: 'smc '
@item QuickTime video (RPZA) @tab @tab X
@tab fourcc: rpza
@item R10K AJA Kona 10-bit RGB Codec @tab X @tab X
@item R210 Quicktime Uncompressed RGB 10-bit @tab X @tab X
@item R10K AJA Kona 10-bit RGB Codec @tab @tab X
@item R210 Quicktime Uncompressed RGB 10-bit @tab @tab X
@item Raw Video @tab X @tab X
@item RealVideo 1.0 @tab X @tab X
@item RealVideo 2.0 @tab X @tab X
@@ -624,10 +601,7 @@ following image formats are supported:
@item Tiertex Limited SEQ video @tab @tab X
@tab Codec used in DOS CD-ROM FlashBack game.
@item Ut Video @tab @tab X
@item v210 QuickTime uncompressed 4:2:2 10-bit @tab X @tab X
@item v308 QuickTime uncompressed 4:4:4 @tab X @tab X
@item v408 QuickTime uncompressed 4:4:4:4 @tab X @tab X
@item v410 QuickTime uncompressed 4:4:4 10-bit @tab X @tab X
@item V210 Quicktime Uncompressed 4:2:2 10-bit @tab X @tab X
@item VBLE Lossless Codec @tab @tab X
@item VMware Screen Codec / VMware Video @tab @tab X
@tab Codec used in videos captured by VMware.
@@ -645,8 +619,6 @@ following image formats are supported:
@item WMV7 @tab X @tab X
@item YAMAHA SMAF @tab X @tab X
@item Psygnosis YOP Video @tab @tab X
@item yuv4 @tab X @tab X
@tab libquicktime uncompressed packed 4:2:0
@item ZLIB @tab X @tab X
@tab part of LCL, encoder experimental
@item Zip Motion Blocks Video @tab X @tab X
@@ -796,7 +768,6 @@ following image formats are supported:
@tab Real 28800 bit/s codec
@item RealAudio 3.0 (dnet) @tab IX @tab X
@tab Real low bitrate AC-3 codec
@item RealAudio Lossless @tab @tab X
@item RealAudio SIPR / ACELP.NET @tab @tab X
@item Shorten @tab @tab X
@item Sierra VMD audio @tab @tab X
@@ -819,7 +790,6 @@ following image formats are supported:
@item Westwood Audio (SND1) @tab @tab X
@item Windows Media Audio 1 @tab X @tab X
@item Windows Media Audio 2 @tab X @tab X
@item Windows Media Audio Lossless @tab @tab X
@item Windows Media Audio Pro @tab @tab X
@item Windows Media Audio Voice @tab @tab X
@end multitable
@@ -838,8 +808,7 @@ performance on systems without hardware floating point support).
@item SSA/ASS @tab X @tab X @tab X @tab X
@item DVB @tab X @tab X @tab X @tab X
@item DVD @tab X @tab X @tab X @tab X
@item JACOsub @tab X @tab X @tab @tab X
@item MicroDVD @tab X @tab X @tab @tab X
@item MicroDVD @tab X @tab X @tab @tab
@item PGS @tab @tab @tab @tab X
@item SubRip (SRT) @tab X @tab X @tab X @tab X
@item XSUB @tab @tab @tab X @tab X
@@ -872,12 +841,11 @@ performance on systems without hardware floating point support).
@item ALSA @tab X @tab X
@item BKTR @tab X @tab
@item DV1394 @tab X @tab
@item Linux framebuffer @tab X @tab
@item JACK @tab X @tab
@item LIBCDIO @tab X
@item LIBDC1394 @tab X @tab
@item OSS @tab X @tab X
@item Pulseaudio @tab X @tab
@item Video4Linux @tab X @tab
@item Video4Linux2 @tab X @tab
@item VfW capture @tab X @tab
@item X11 grabbing @tab X @tab
@@ -885,15 +853,4 @@ performance on systems without hardware floating point support).
@code{X} means that input/output is supported.
@section Timecode
@multitable @columnfractions .4 .1 .1
@item Codec/format @tab Read @tab Write
@item DV @tab X @tab X
@item GXF @tab X @tab X
@item MOV @tab X @tab
@item MPEG1/2 @tab X @tab X
@item MXF @tab X @tab X
@end multitable
@bye

View File

@@ -65,14 +65,6 @@ git clone git@@source.ffmpeg.org:ffmpeg <target>
This will put the FFmpeg sources into the directory @var{<target>} and let
you push back your changes to the remote repository.
Make sure that you do not have Windows line endings in your checkouts,
otherwise you may experience spurious compilation failures. One way to
achieve this is to run
@example
git config --global core.autocrlf false
@end example
@section Updating the source tree to the latest revision

View File

@@ -59,7 +59,7 @@ BSD video input device.
Windows DirectShow input device.
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
DirectShow support is enabled when FFmpeg is built with mingw-w64.
Currently only audio and video devices are supported.
Multiple devices may be opened as separate inputs, but they may also be
@@ -196,12 +196,12 @@ device.
Once you have created one or more JACK readable clients, you need to
connect them to one or more JACK writable clients.
To connect or disconnect JACK clients you can use the @command{jack_connect}
and @command{jack_disconnect} programs, or do it through a graphical interface,
for example with @command{qjackctl}.
To connect or disconnect JACK clients you can use the
@file{jack_connect} and @file{jack_disconnect} programs, or do it
through a graphical interface, for example with @file{qjackctl}.
To list the JACK clients and their properties you can invoke the command
@command{jack_lsp}.
@file{jack_lsp}.
Follows an example which shows how to capture a JACK readable client
with @command{ffmpeg}.
@@ -260,7 +260,7 @@ device.
@itemize
@item
Create a color video stream and play it back with @command{ffplay}:
Create a color video stream and play it back with @file{ffplay}:
@example
ffplay -f lavfi -graph "color=pink [out0]" dummy
@end example
@@ -280,14 +280,14 @@ ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [ou
@item
Read an audio stream from a file using the amovie source and play it
back with @command{ffplay}:
back with @file{ffplay}:
@example
ffplay -f lavfi "amovie=test.wav"
@end example
@item
Read an audio stream and a video stream and play it back with
@command{ffplay}:
@file{ffplay}:
@example
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
@end example
@@ -380,7 +380,7 @@ $ ffmpeg -f openal -i '' out.ogg
@end example
Capture from two devices simultaneously, writing to two different files,
within the same @command{ffmpeg} command:
within the same @file{ffmpeg} command:
@example
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
@end example
@@ -415,7 +415,7 @@ The filename to provide to the input device is a source device or the
string "default"
To list the pulse source devices and their properties you can invoke
the command @command{pactl list sources}.
the command @file{pactl list sources}.
@example
ffmpeg -f pulse -i default /tmp/pulse.wav
@@ -504,9 +504,9 @@ command:
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
@end example
@section video4linux2
@section video4linux and video4linux2
Video4Linux2 input video device.
Video4Linux and Video4Linux2 input video devices.
The name of the device to grab is a file device node, usually Linux
systems tend to automatically create such nodes when the device
@@ -514,28 +514,36 @@ systems tend to automatically create such nodes when the device
kind @file{/dev/video@var{N}}, where @var{N} is a number associated to
the device.
Video4Linux2 devices usually support a limited set of
@var{width}x@var{height} sizes and framerates. You can check which are
supported using @command{-list_formats all} for Video4Linux2 devices.
Video4Linux and Video4Linux2 devices only support a limited set of
@var{width}x@var{height} sizes and frame rates. You can check which are
supported for example with the command @file{dov4l} for Video4Linux
devices and the command @file{v4l-info} for Video4Linux2 devices.
Some usage examples of the video4linux2 devices with ffmpeg and ffplay:
If the size for the device is set to 0x0, the input device will
try to auto-detect the size to use.
Only for the video4linux2 device, if the frame rate is set to 0/0 the
input device will use the frame rate value already set in the driver.
The time base for the timestamps is 1 microsecond. Depending on the kernel
version and configuration, the timestamps may be derived from the real time
clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
boot time, unaffected by NTP or manual changes to the clock). The
@option{-timestamps abs} or @option{-ts abs} option can be used to force
conversion into the real time clock.
Video4Linux support is deprecated since Linux 2.6.30, and will be
dropped in later versions.
Note that if FFmpeg is build with v4l-utils support ("--enable-libv4l2"
option), it will always be used.
@example
# Grab and show the input of a video4linux2 device.
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
# Grab and record the input of a video4linux2 device, leave the
framerate and size as previously set.
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
Follow some usage examples of the video4linux devices with the ff*
tools.
@example
# Grab and show the input of a video4linux device, frame rate is set
# to the default of 25/1.
ffplay -s 320x240 -f video4linux /dev/video0
# Grab and show the input of a video4linux2 device, auto-adjust size.
ffplay -f video4linux2 /dev/video0
# Grab and record the input of a video4linux2 device, auto-adjust size,
# frame rate value defaults to 0/0 so it is read from the video4linux2
# driver.
ffmpeg -f video4linux2 -i /dev/video0 out.mpeg
@end example
"v4l" and "v4l2" can be used as aliases for the respective "video4linux" and
@@ -571,7 +579,7 @@ default to 0.
Check the X11 documentation (e.g. man X) for more detailed information.
Use the @command{dpyinfo} program for getting basic information about the
Use the @file{dpyinfo} program for getting basic information about the
properties of your X11 display (e.g. grep for "name" or "dimensions").
For example to grab from @file{:0.0} using @command{ffmpeg}:

View File

@@ -43,13 +43,13 @@ The result will be that in output the top half of the video is mirrored
onto the bottom half.
Video filters are loaded using the @var{-vf} option passed to
@command{ffmpeg} or to @command{ffplay}. Filters in the same linear
chain are separated by commas. In our example, @var{split, fifo,
overlay} are in one linear chain, and @var{fifo, crop, vflip} are in
another. The points where the linear chains join are labeled by names
enclosed in square brackets. In our example, that is @var{[T1]} and
@var{[T2]}. The magic labels @var{[in]} and @var{[out]} are the points
where video is input and output.
ffmpeg or to ffplay. Filters in the same linear chain are separated by
commas. In our example, @var{split, fifo, overlay} are in one linear
chain, and @var{fifo, crop, vflip} are in another. The points where
the linear chains join are labeled by names enclosed in square
brackets. In our example, that is @var{[T1]} and @var{[T2]}. The magic
labels @var{[in]} and @var{[out]} are the points where video is input
and output.
Some filters take in input a list of parameters: they are specified
after the filter name and an equal sign, and are separated each other

View File

@@ -56,37 +56,31 @@ See also the @ref{framecrc} muxer.
@anchor{framecrc}
@section framecrc
Per-packet CRC (Cyclic Redundancy Check) testing format.
Per-frame CRC (Cyclic Redundancy Check) testing format.
This muxer computes and prints the Adler-32 CRC for each audio
and video packet. By default audio frames are converted to signed
This muxer computes and prints the Adler-32 CRC for each decoded audio
and video frame. By default audio frames are converted to signed
16-bit raw audio and video frames to raw video before computing the
CRC.
The output of the muxer consists of a line for each audio and video
packet of the form:
@example
@var{stream_index}, @var{packet_dts}, @var{packet_pts}, @var{packet_duration}, @var{packet_size}, 0x@var{CRC}
@end example
frame of the form: @var{stream_index}, @var{frame_dts},
@var{frame_size}, 0x@var{CRC}, where @var{CRC} is a hexadecimal
number 0-padded to 8 digits containing the CRC of the decoded frame.
@var{CRC} is a hexadecimal number 0-padded to 8 digits containing the
CRC of the packet.
For example to compute the CRC of the audio and video frames in
@file{INPUT}, converted to raw audio and video packets, and store it
in the file @file{out.crc}:
For example to compute the CRC of each decoded frame in the input, and
store it in the file @file{out.crc}:
@example
ffmpeg -i INPUT -f framecrc out.crc
@end example
To print the information to stdout, use the command:
You can print the CRC of each decoded frame to stdout with the command:
@example
ffmpeg -i INPUT -f framecrc -
@end example
With @command{ffmpeg}, you can select the output format to which the
audio and video frames are encoded before computing the CRC for each
packet by specifying the audio and video codec. For example, to
You can select the output format of each frame with @command{ffmpeg} by
specifying the audio and video codec and format. For example, to
compute the CRC of each decoded input audio frame converted to PCM
unsigned 8-bit and of each decoded input video frame converted to
MPEG-2 video, use the command:
@@ -96,40 +90,6 @@ ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
See also the @ref{crc} muxer.
@anchor{framemd5}
@section framemd5
Per-packet MD5 testing format.
This muxer computes and prints the MD5 hash for each audio
and video packet. By default audio frames are converted to signed
16-bit raw audio and video frames to raw video before computing the
hash.
The output of the muxer consists of a line for each audio and video
packet of the form:
@example
@var{stream_index}, @var{packet_dts}, @var{packet_pts}, @var{packet_duration}, @var{packet_size}, @var{MD5}
@end example
@var{MD5} is a hexadecimal number representing the computed MD5 hash
for the packet.
For example to compute the MD5 of the audio and video frames in
@file{INPUT}, converted to raw audio and video packets, and store it
in the file @file{out.md5}:
@example
ffmpeg -i INPUT -f framemd5 out.md5
@end example
To print the information to stdout, use the command:
@example
ffmpeg -i INPUT -f framemd5 -
@end example
See also the @ref{md5} muxer.
@anchor{image2}
@section image2
Image file muxer.
@@ -187,104 +147,18 @@ each of the YUV420P components. To read or write this image file format,
specify the name of the '.Y' file. The muxer will automatically open the
'.U' and '.V' files as required.
@anchor{md5}
@section md5
@section mov
MD5 testing format.
MOV / MP4 muxer
This muxer computes and prints the MD5 hash of all the input audio
and video frames. By default audio frames are converted to signed
16-bit raw audio and video frames to raw video before computing the
hash.
The output of the muxer consists of a single line of the form:
MD5=@var{MD5}, where @var{MD5} is a hexadecimal number representing
the computed MD5 hash.
For example to compute the MD5 hash of the input converted to raw
audio and video, and store it in the file @file{out.md5}:
@example
ffmpeg -i INPUT -f md5 out.md5
@end example
You can print the MD5 to stdout with the command:
@example
ffmpeg -i INPUT -f md5 -
@end example
See also the @ref{framemd5} muxer.
@section MOV/MP4/ISMV
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
file has all the metadata about all packets stored in one location
(written at the end of the file, it can be moved to the start for
better playback using the @command{qt-faststart} tool). A fragmented
file consists of a number of fragments, where packets and metadata
about these packets are stored together. Writing a fragmented
file has the advantage that the file is decodable even if the
writing is interrupted (while a normal MOV/MP4 is undecodable if
it is not properly finished), and it requires less memory when writing
very long files (since writing normal MOV/MP4 files stores info about
every single packet in memory until the file is closed). The downside
is that it is less compatible with other applications.
Fragmentation is enabled by setting one of the AVOptions that define
how to cut the file into fragments:
The muxer options are:
@table @option
@item -moov_size @var{bytes}
Reserves space for the moov atom at the beginning of the file instead of placing the
moov atom at the end. If the space reserved is insufficient, muxing will fail.
@item -movflags frag_keyframe
Start a new fragment at each video keyframe.
@item -frag_duration @var{duration}
Create fragments that are @var{duration} microseconds long.
@item -frag_size @var{size}
Create fragments that contain up to @var{size} bytes of payload data.
@item -movflags frag_custom
Allow the caller to manually choose when to cut fragments, by
calling @code{av_write_frame(ctx, NULL)} to write a fragment with
the packets written so far. (This is only useful with other
applications integrating libavformat, not from @command{ffmpeg}.)
@item -min_frag_duration @var{duration}
Don't create fragments that are shorter than @var{duration} microseconds long.
@end table
If more than one condition is specified, fragments are cut when
one of the specified conditions is fulfilled. The exception to this is
@code{-min_frag_duration}, which has to be fulfilled for any of the other
conditions to apply.
Additionally, the way the output file is written can be adjusted
through a few other options:
@table @option
@item -movflags empty_moov
Write an initial moov atom directly at the start of the file, without
describing any samples in it. Generally, an mdat/moov pair is written
at the start of the file, as a normal MOV/MP4 file, containing only
a short portion of the file. With this option set, there is no initial
mdat atom, and the moov atom only describes the tracks but has
a zero duration.
Files written with this option set do not work in QuickTime.
This option is implicitly set when writing ismv (Smooth Streaming) files.
@item -movflags separate_moof
Write a separate moof (movie fragment) atom for each track. Normally,
packets for all tracks are written in a moof atom (which is slightly
more efficient), but with this option set, the muxer writes one moof/mdat
pair for each track, making it easier to separate tracks.
This option is implicitly set when writing ismv (Smooth Streaming) files.
@end table
Smooth Streaming content can be pushed in real time to a publishing
point on IIS with this muxer. Example:
@example
ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
@end example
@section mpegts
MPEG transport stream muxer.
@@ -411,71 +285,4 @@ For example a 3D WebM clip can be created using the following command line:
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
@end example
@section segment
Basic stream segmenter.
The segmenter muxer outputs streams to a number of separate files of nearly
fixed duration. Output filename pattern can be set in a fashion similar to
@ref{image2}.
Every segment starts with a video keyframe, if a video stream is present.
The segment muxer works best with a single constant frame rate video.
Optionally it can generate a flat list of the created segments, one segment
per line.
@table @option
@item segment_format @var{format}
Override the inner container format, by default it is guessed by the filename
extension.
@item segment_time @var{t}
Set segment duration to @var{t} seconds.
@item segment_list @var{name}
Generate also a listfile named @var{name}.
@item segment_list_size @var{size}
Overwrite the listfile once it reaches @var{size} entries.
@item segment_wrap @var{limit}
Wrap around segment index once it reaches @var{limit}.
@end table
@example
ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut
@end example
@section mp3
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the
@code{id3v2_version} option controls which one is used. The legacy ID3v1 tag is
not written by default, but may be enabled with the @code{write_id3v1} option.
For seekable output the muxer also writes a Xing frame at the beginning, which
contains the number of frames in the file. It is useful for computing duration
of VBR files.
The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures
are supplied to the muxer in form of a video stream with a single packet. There
can be any number of those streams, each will correspond to a single APIC frame.
The stream metadata tags @var{title} and @var{comment} map to APIC
@var{description} and @var{picture type} respectively. See
@url{http://id3.org/id3v2.4.0-frames} for allowed picture types.
Note that the APIC frames must be written at the beginning, so the muxer will
buffer the audio frames until it gets all the pictures. It is therefore advised
to provide the pictures as soon as possible to avoid excessive buffering.
Examples:
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
@example
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
@end example
Attach a picture to an mp3:
@example
ffmpeg -i input.mp3 -i cover.png -c copy -metadata:s:v title="Album cover"
-metadata:s:v comment="Cover (Front)" out.mp3
@end example
@c man end MUXERS

View File

@@ -60,7 +60,7 @@ If not specified it defaults to the size of the input video.
@subsection Examples
The following command shows the @command{ffmpeg} output is an
The following command shows the @file{ffmpeg} output is an
SDL window, forcing its size to the qcif format:
@example
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"

View File

@@ -27,11 +27,11 @@ to configure.
@section BSD
BSD make will not build FFmpeg, you need to install and use GNU Make
(@command{gmake}).
(@file{gmake}).
@section (Open)Solaris
GNU Make is required to build FFmpeg, so you have to invoke (@command{gmake}),
GNU Make is required to build FFmpeg, so you have to invoke (@file{gmake}),
standard Solaris Make will not work. When building with a non-c99 front-end
(gcc, generic suncc) add either @code{--extra-libs=/usr/lib/values-xpg6.o}
or @code{--extra-libs=/usr/lib/64/values-xpg6.o} to the configure options
@@ -89,7 +89,7 @@ section and the FAQ.
FFmpeg does not build out-of-the-box with the packages the automated MinGW
installer provides. It also requires coreutils to be installed and many other
packages updated to the latest version. The minimum versions for some packages
packages updated to the latest version. The minimum version for some packages
are listed below:
@itemize
@@ -109,11 +109,14 @@ Notes:
@item Building natively using MSYS can be sped up by disabling implicit rules
in the Makefile by calling @code{make -r} instead of plain @code{make}. This
speed up is close to non-existent for normal one-off builds and is only
noticeable when running make for a second time (for example during
noticeable when running make for a second time (for example in
@code{make install}).
@item In order to compile FFplay, you must have the MinGW development library
of @uref{http://www.libsdl.org/, SDL} and @code{pkg-config} installed.
of @uref{http://www.libsdl.org/, SDL}.
Edit the @file{bin/sdl-config} script so that it points to the correct prefix
where SDL was installed. Verify that @file{sdl-config} can be launched from
the MSYS command line.
@item By using @code{./configure --enable-shared} when configuring FFmpeg,
you can build the FFmpeg libraries (e.g. libavutil, libavcodec,
@@ -137,7 +140,7 @@ you might have to modify the procedures slightly.
@subsection Using static libraries
Assuming you have just built and installed FFmpeg in @file{/usr/local}:
Assuming you have just built and installed FFmpeg in @file{/usr/local}.
@enumerate
@@ -232,8 +235,6 @@ make install
Your install path (@file{/usr/local/} by default) should now have the
necessary DLL and LIB files under the @file{bin} directory.
@end enumerate
Alternatively, build the libraries with a cross compiler, according to
the instructions below in @ref{Cross compilation for Windows with Linux}.
@@ -272,7 +273,7 @@ To create import libraries that work with the @code{/OPT:REF} option
@enumerate
@item Open @emph{Visual Studio 2005 Command Prompt}.
@item Open @file{Visual Studio 2005 Command Prompt}.
Alternatively, in a normal command line prompt, call @file{vcvars32.bat}
which sets up the environment variables for the Visual C++ tools
@@ -282,14 +283,17 @@ which sets up the environment variables for the Visual C++ tools
@item Enter the @file{bin} directory where the created LIB and DLL files
are stored.
@item Generate new import libraries with @command{lib.exe}:
@item Generate new import libraries with @file{lib.exe}:
@example
lib /machine:i386 /def:..\lib\foo-version.def /out:foo.lib
lib /machine:i386 /def:..\lib\avcodec-53.def /out:avcodec.lib
lib /machine:i386 /def:..\lib\avdevice-53.def /out:avdevice.lib
lib /machine:i386 /def:..\lib\avfilter-2.def /out:avfilter.lib
lib /machine:i386 /def:..\lib\avformat-53.def /out:avformat.lib
lib /machine:i386 /def:..\lib\avutil-51.def /out:avutil.lib
lib /machine:i386 /def:..\lib\swscale-2.def /out:swscale.lib
@end example
Replace @code{foo-version} and @code{foo} with the respective library names.
@end enumerate
@anchor{Cross compilation for Windows with Linux}
@@ -331,8 +335,8 @@ Then run
to make a static build.
To build shared libraries add a special compiler flag to work around current
@code{gcc4-core} package bugs in addition to the normal configure flags:
The current @code{gcc4-core} package is buggy and needs this flag to build
shared libraries:
@example
./configure --enable-shared --disable-static --extra-cflags=-fno-reorder-functions
@@ -348,12 +352,16 @@ These library packages are only available from
@uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
@example
yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel,
libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
yasm, libSDL-devel, libdirac-devel, libfaac-devel, libaacplus-devel, libgsm-devel,
libmp3lame-devel, libschroedinger1.0-devel, speex-devel, libtheora-devel,
libxvidcore-devel
@end example
The recommendation for x264 is to build it from source, as it evolves too
quickly for Cygwin Ports to be up to date.
The recommendation for libnut and x264 is to build them from source by
yourself, as they evolve too quickly for Cygwin Ports to be up to date.
Cygwin 1.7.x has IPv6 support. You can add IPv6 to Cygwin 1.5.x by means
of the @code{libgetaddrinfo-devel} package, available at Cygwin Ports.
@section Crosscompilation for Windows under Cygwin

View File

@@ -1,125 +0,0 @@
/*
* Copyright (c) 2012 Anton Khirnov
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* generate texinfo manpages for avoptions
*/
#include <stddef.h>
#include <string.h>
#include <float.h>
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavutil/opt.h"
static void print_usage(void)
{
fprintf(stderr, "Usage: enum_options type\n"
"type: format codec\n");
exit(1);
}
static void print_option(const AVOption *opts, const AVOption *o, int per_stream)
{
printf("@item -%s%s @var{", o->name, per_stream ? "[:stream_specifier]" : "");
switch (o->type) {
case AV_OPT_TYPE_BINARY: printf("hexadecimal string"); break;
case AV_OPT_TYPE_STRING: printf("string"); break;
case AV_OPT_TYPE_INT:
case AV_OPT_TYPE_INT64: printf("integer"); break;
case AV_OPT_TYPE_FLOAT:
case AV_OPT_TYPE_DOUBLE: printf("float"); break;
case AV_OPT_TYPE_RATIONAL: printf("rational number"); break;
case AV_OPT_TYPE_FLAGS: printf("flags"); break;
default: printf("value"); break;
}
printf("} (@emph{");
if (o->flags & AV_OPT_FLAG_DECODING_PARAM) {
printf("input");
if (o->flags & AV_OPT_FLAG_ENCODING_PARAM)
printf("/");
}
if (o->flags & AV_OPT_FLAG_ENCODING_PARAM) printf("output");
if (o->flags & AV_OPT_FLAG_AUDIO_PARAM) printf(",audio");
if (o->flags & AV_OPT_FLAG_VIDEO_PARAM) printf(",video");
if (o->flags & AV_OPT_FLAG_SUBTITLE_PARAM) printf(",subtitles");
printf("})\n");
if (o->help)
printf("%s\n", o->help);
if (o->unit) {
const AVOption *u;
printf("\nPossible values:\n@table @samp\n");
for (u = opts; u->name; u++) {
if (u->type == AV_OPT_TYPE_CONST && u->unit && !strcmp(u->unit, o->unit))
printf("@item %s\n%s\n", u->name, u->help ? u->help : "");
}
printf("@end table\n");
}
}
static void show_opts(const AVOption *opts, int per_stream)
{
const AVOption *o;
printf("@table @option\n");
for (o = opts; o->name; o++) {
if (o->type != AV_OPT_TYPE_CONST)
print_option(opts, o, per_stream);
}
printf("@end table\n");
}
static void show_format_opts(void)
{
#include "libavformat/options_table.h"
printf("@section Format AVOptions\n");
show_opts(options, 0);
}
static void show_codec_opts(void)
{
#include "libavcodec/options_table.h"
printf("@section Codec AVOptions\n");
show_opts(options, 1);
}
int main(int argc, char **argv)
{
if (argc < 2)
print_usage();
printf("@c DO NOT EDIT THIS FILE!\n"
"@c It was generated by print_options.\n\n");
if (!strcmp(argv[1], "format"))
show_format_opts();
else if (!strcmp(argv[1], "codec"))
show_codec_opts();
else
print_usage();
return 0;
}

View File

@@ -19,34 +19,20 @@ supported protocols.
A description of the currently available protocols follows.
@section bluray
@section applehttp
Read BluRay playlist.
Read Apple HTTP Live Streaming compliant segmented stream as
a uniform one. The M3U8 playlists describing the segments can be
remote HTTP resources or local files, accessed using the standard
file protocol.
HTTP is default, specific protocol can be declared by specifying
"+@var{proto}" after the applehttp URI scheme name, where @var{proto}
is either "file" or "http".
The accepted options are:
@table @option
@item angle
BluRay angle
@item chapter
Start chapter (1...N)
@item playlist
Playlist to read (BDMV/PLAYLIST/?????.mpls)
@end table
Examples:
Read longest playlist from BluRay mounted to /mnt/bluray:
@example
bluray:/mnt/bluray
@end example
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
@example
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
applehttp://host/path/to/remote/resource.m3u8
applehttp+http://host/path/to/remote/resource.m3u8
applehttp+file://path/to/local/resource.m3u8
@end example
@section concat
@@ -66,7 +52,7 @@ resource to be concatenated, each one possibly specifying a distinct
protocol.
For example to read a sequence of files @file{split1.mpeg},
@file{split2.mpeg}, @file{split3.mpeg} with @command{ffplay} use the
@file{split2.mpeg}, @file{split3.mpeg} with @file{ffplay} use the
command:
@example
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
@@ -95,26 +81,6 @@ specified with the name "FILE.mpeg" is interpreted as the URL
Gopher protocol.
@section hls
Read Apple HTTP Live Streaming compliant segmented stream as
a uniform one. The M3U8 playlists describing the segments can be
remote HTTP resources or local files, accessed using the standard
file protocol.
The nested protocol is declared by specifying
"+@var{proto}" after the hls URI scheme name, where @var{proto}
is either "file" or "http".
@example
hls+http://host/path/to/remote/resource.m3u8
hls+file://path/to/local/resource.m3u8
@end example
Using this protocol is discouraged - the hls demuxer should work
just as well (if not, please report the issues) and is more complete.
To use the hls demuxer instead, simply use the direct URLs to the
m3u8 files.
@section http
HTTP (Hyper Text Transfer Protocol).
@@ -189,8 +155,8 @@ be seekable, so they will fail with the pipe output protocol.
Real-Time Messaging Protocol.
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
content across a TCP/IP network.
The Real-Time Messaging Protocol (RTMP) is used for streaming
multimedia content across a TCP/IP network.
The required syntax is:
@example
@@ -217,7 +183,7 @@ application specified in @var{app}, may be prefixed by "mp4:".
@end table
For example to read with @command{ffplay} a multimedia resource named
For example to read with @file{ffplay} a multimedia resource named
"sample" from the application "vod" from an RTMP server "myserver":
@example
ffplay rtmp://myserver/vod/sample
@@ -258,7 +224,7 @@ For example, to stream a file in real-time to an RTMP server using
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
@end example
To play the same stream using @command{ffplay}:
To play the same stream using @file{ffplay}:
@example
ffplay "rtmp://myserver/live/mystream live=1"
@end example
@@ -283,7 +249,7 @@ The required syntax for a RTSP url is:
rtsp://@var{hostname}[:@var{port}]/@var{path}
@end example
The following options (set on the @command{ffmpeg}/@command{ffplay} command
The following options (set on the @command{ffmpeg}/@file{ffplay} command
line, or set in code via @code{AVOption}s or in @code{avformat_open_input}),
are supported:
@@ -318,11 +284,11 @@ Accept packets only from negotiated peer address and port.
@end table
When receiving data over UDP, the demuxer tries to reorder received packets
(since they may arrive out of order, or packets may get lost totally). This
can be disabled by setting the maximum demuxing delay to zero (via
the @code{max_delay} field of AVFormatContext).
(since they may arrive out of order, or packets may get lost totally). In
order for this to be enabled, a maximum delay must be specified in the
@code{max_delay} field of AVFormatContext.
When watching multi-bitrate Real-RTSP streams with @command{ffplay}, the
When watching multi-bitrate Real-RTSP streams with @file{ffplay}, the
streams to display can be chosen with @code{-vst} @var{n} and
@code{-ast} @var{n} for video and audio respectively, and can be switched
on the fly by pressing @code{v} and @code{a}.
@@ -399,13 +365,13 @@ To broadcast a stream on the local subnet, for watching in VLC:
ffmpeg -re -i @var{input} -f sap sap://224.0.0.255?same_port=1
@end example
Similarly, for watching in @command{ffplay}:
Similarly, for watching in ffplay:
@example
ffmpeg -re -i @var{input} -f sap sap://224.0.0.255
@end example
And for watching in @command{ffplay}, over IPv6:
And for watching in ffplay, over IPv6:
@example
ffmpeg -re -i @var{input} -f sap sap://[ff0e::1:2:3:4]

View File

@@ -18,7 +18,7 @@ essential that changes to their codebase are publicly visible, clean and
easy reviewable that again leads us to:
* use of a revision control system like git
* separation of cosmetic from non-cosmetic changes (this is almost entirely
ignored by mentors and students in soc 2006 which might lead to a surprise
ignored by mentors and students in soc 2006 which might lead to a suprise
when the code will be reviewed at the end before a possible inclusion in
FFmpeg, individual changes were generally not reviewable due to cosmetics).
* frequent commits, so that comments can be provided early

View File

@@ -32,7 +32,7 @@ Special Converter v
Output
Planar/Packed convertion is done when needed during sample format convertion
Every step can be skipped without memcpy when its not needed.
Every step can be skiped without memcpy when its not needed.
Either Resampling and Rematrixing can be performed first depending on which
way its faster.
The Buffers are needed for resampling due to resamplng being a process that

View File

@@ -96,3 +96,4 @@ would benefit from it.
Also, as already hinted at, initFilter() accepts an optional convolutional
filter as input that can be used for contrast, saturation, blur, sharpening
shift, chroma vs. luma shift, ...

View File

@@ -1,158 +0,0 @@
@chapter Syntax
@c man begin SYNTAX
When evaluating specific formats, FFmpeg uses internal library parsing
functions, shared by the tools. This section documents the syntax of
some of these formats.
@anchor{date syntax}
@section Date
The accepted syntax is:
@example
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
now
@end example
If the value is "now" it takes the current time.
Time is local time unless Z is appended, in which case it is
interpreted as UTC.
If the year-month-day part is not specified it takes the current
year-month-day.
@anchor{time duration syntax}
@section Time duration
The accepted syntax is:
@example
[-]HH:MM:SS[.m...]
[-]S+[.m...]
@end example
@var{HH} expresses the number of hours, @var{MM} the number a of minutes
and @var{SS} the number of seconds.
@anchor{video size syntax}
@section Video size
Specify the size of the sourced video, it may be a string of the form
@var{width}x@var{height}, or the name of a size abbreviation.
The following abbreviations are recognized:
@table @samp
@item sqcif
128x96
@item qcif
176x144
@item cif
352x288
@item 4cif
704x576
@item 16cif
1408x1152
@item qqvga
160x120
@item qvga
320x240
@item vga
640x480
@item svga
800x600
@item xga
1024x768
@item uxga
1600x1200
@item qxga
2048x1536
@item sxga
1280x1024
@item qsxga
2560x2048
@item hsxga
5120x4096
@item wvga
852x480
@item wxga
1366x768
@item wsxga
1600x1024
@item wuxga
1920x1200
@item woxga
2560x1600
@item wqsxga
3200x2048
@item wquxga
3840x2400
@item whsxga
6400x4096
@item whuxga
7680x4800
@item cga
320x200
@item ega
640x350
@item hd480
852x480
@item hd720
1280x720
@item hd1080
1920x1080
@end table
@anchor{video rate syntax}
@section Video rate
Specify the frame rate of a video, expressed as the number of frames
generated per second. It has to be a string in the format
@var{frame_rate_num}/@var{frame_rate_den}, an integer number, a float
number or a valid video frame rate abbreviation.
The following abbreviations are recognized:
@table @samp
@item ntsc
30000/1001
@item pal
25/1
@item qntsc
30000/1
@item qpal
25/1
@item sntsc
30000/1
@item spal
25/1
@item film
24/1
@item ntsc-film
24000/1
@end table
@anchor{ratio syntax}
@section Ratio
A ratio can be expressed as an expression, or in the form
@var{numerator}:@var{denominator}.
Note that a ratio with infinite (1/0) or negative value is
considered valid, so you should check on the returned value if you
want to exclude those values.
The undefined value can be expressed using the "0:0" string.
@anchor{color syntax}
@section Color
It can be the name of a color (case insensitive match) or a
[0x|#]RRGGBB[AA] sequence, possibly followed by "@" and a string
representing the alpha component.
The alpha component may be a string composed by "0x" followed by an
hexadecimal number or a decimal number between 0.0 and 1.0, which
represents the opacity value (0x00/0.0 means completely transparent,
0xff/1.0 completely opaque).
If the alpha component is not specified then 0xff is assumed.
The string "random" will result in a random color.
@c man end SYNTAX

View File

@@ -36,7 +36,7 @@ $shift = "";
%defs = ();
$fnno = 1;
$inf = "";
@ibase = ();
$ibase = "";
while ($_ = shift) {
if (/^-D(.*)$/) {
@@ -52,8 +52,6 @@ while ($_ = shift) {
die "flags may only contain letters, digits, hyphens, dashes and underscores\n"
unless $flag =~ /^[a-zA-Z0-9_-]+$/;
$defs{$flag} = $value;
} elsif (/^-I(.*)$/) {
push @ibase, $1 ne "" ? $1 : shift;
} elsif (/^-/) {
usage();
} else {
@@ -63,12 +61,10 @@ while ($_ = shift) {
}
}
push @ibase, ".";
if (defined $in) {
$inf = gensym();
open($inf, "<$in") or die "opening \"$in\": $!\n";
push @ibase, $1 if $in =~ m|^(.+)/[^/]+$|;
$ibase = $1 if $in =~ m|^(.+)/[^/]+$|;
} else {
$inf = \*STDIN;
}
@@ -78,7 +74,7 @@ if (defined $out) {
}
while(defined $inf) {
INF: while(<$inf>) {
while(<$inf>) {
# Certain commands are discarded without further processing.
/^\@(?:
[a-z]+index # @*index: useful only in complete manual
@@ -108,10 +104,11 @@ INF: while(<$inf>) {
push @instack, $inf;
$inf = gensym();
for (@ibase) {
open($inf, "<" . $_ . "/" . $1) and next INF;
}
die "cannot open $1: $!\n";
# Try cwd and $ibase.
open($inf, "<" . $1)
or open($inf, "<" . $ibase . "/" . $1)
or die "cannot open $1 or $ibase/$1: $!\n";
next;
};
# Look for blocks surrounded by @c man begin SECTION ... @c man end.

View File

@@ -107,3 +107,4 @@ one with score 3)
Author: Michael niedermayer
Copyright LGPL

4273
ffmpeg.c

File diff suppressed because it is too large Load Diff

957
ffplay.c

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
vcodec=libvpx
g=120
rc_lookahead=16
quality=good
speed=0
profile=1
qmax=51
qmin=11
slices=4
vb=2M
#ignored unless using -pass 2
maxrate=24M
minrate=100k
arnr_max_frames=7
arnr_strength=5
arnr_type=3

View File

@@ -0,0 +1,17 @@
vcodec=libvpx
g=120
rc_lookahead=25
quality=good
speed=0
profile=1
qmax=51
qmin=11
slices=4
vb=2M
#ignored unless using -pass 2
maxrate=24M
minrate=100k
arnr_max_frames=7
arnr_strength=5
arnr_type=3

View File

@@ -0,0 +1,16 @@
vcodec=libvpx
g=120
rc_lookahead=16
quality=good
speed=0
profile=0
qmax=63
qmin=0
vb=768k
#ignored unless using -pass 2
maxrate=1.5M
minrate=40k
arnr_max_frames=7
arnr_strength=5
arnr_type=3

View File

@@ -0,0 +1,17 @@
vcodec=libvpx
g=120
rc_lookahead=16
quality=good
speed=0
profile=0
qmax=51
qmin=11
slices=4
vb=2M
#ignored unless using -pass 2
maxrate=24M
minrate=100k
arnr_max_frames=7
arnr_strength=5
arnr_type=3

View File

@@ -0,0 +1,17 @@
vcodec=libvpx
g=120
rc_lookahead=25
quality=good
speed=0
profile=0
qmax=51
qmin=11
slices=4
vb=2M
#ignored unless using -pass 2
maxrate=24M
minrate=100k
arnr_max_frames=7
arnr_strength=5
arnr_type=3

1205
ffprobe.c

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
/*
* Multiple format streaming server
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
*
* This file is part of FFmpeg.
@@ -18,11 +19,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* multiple format streaming server based on the FFmpeg libraries
*/
#include "config.h"
#if !HAVE_CLOSESOCKET
#define closesocket close
@@ -30,16 +26,13 @@
#include <string.h>
#include <stdlib.h>
#include "libavformat/avformat.h"
// FIXME those are internal headers, ffserver _really_ shouldn't use them
#include "libavformat/ffm.h"
#include "libavformat/network.h"
#include "libavformat/os_support.h"
#include "libavformat/rtpdec.h"
#include "libavformat/rtsp.h"
// XXX for ffio_open_dyn_packet_buffer, to be removed
#include "libavformat/avio_internal.h"
#include "libavformat/internal.h"
#include "libavformat/url.h"
#include "libavutil/avstring.h"
#include "libavutil/lfg.h"
#include "libavutil/dict.h"
@@ -339,7 +332,8 @@ static int resolve_host(struct in_addr *sin_addr, const char *hostname)
if (!ff_inet_aton(hostname, sin_addr)) {
#if HAVE_GETADDRINFO
struct addrinfo *ai, *cur;
struct addrinfo hints = { 0 };
struct addrinfo hints;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_INET;
if (getaddrinfo(hostname, NULL, &hints, &ai))
return -1;
@@ -481,7 +475,7 @@ static void start_children(FFStream *feed)
slash++;
strcpy(slash, "ffmpeg");
http_log("Launch command line: ");
http_log("Launch commandline: ");
http_log("%s ", pathname);
for (i = 1; feed->child_argv[i] && feed->child_argv[i][0]; i++)
http_log("%s ", feed->child_argv[i]);
@@ -501,10 +495,7 @@ static void start_children(FFStream *feed)
}
/* This is needed to make relative pathnames work */
if (chdir(my_program_dir) < 0) {
http_log("chdir failed\n");
exit(1);
}
chdir(my_program_dir);
signal(SIGPIPE, SIG_DFL);
@@ -858,7 +849,7 @@ static void close_connection(HTTPContext *c)
if (st->codec->codec)
avcodec_close(st->codec);
}
avformat_close_input(&c->fmt_in);
av_close_input_file(c->fmt_in);
}
/* free RTP output streams if any */
@@ -876,7 +867,7 @@ static void close_connection(HTTPContext *c)
}
h = c->rtp_handles[i];
if (h)
ffurl_close(h);
url_close(h);
}
ctx = &c->fmt_ctx;
@@ -1871,7 +1862,7 @@ static int http_parse_request(HTTPContext *c)
static void fmt_bytecount(AVIOContext *pb, int64_t count)
{
static const char suffix[] = " kMGTP";
static const char *suffix = " kMGTP";
const char *s;
for (s = suffix; count >= 100000 && s[1]; count /= 1000, s++);
@@ -2121,18 +2112,33 @@ static void compute_status(HTTPContext *c)
c->buffer_end = c->pb_buffer + len;
}
/* check if the parser needs to be opened for stream i */
static void open_parser(AVFormatContext *s, int i)
{
AVStream *st = s->streams[i];
AVCodec *codec;
if (!st->codec->codec) {
codec = avcodec_find_decoder(st->codec->codec_id);
if (codec && (codec->capabilities & CODEC_CAP_PARSE_ONLY)) {
st->codec->parse_only = 1;
if (avcodec_open2(st->codec, codec, NULL) < 0)
st->codec->parse_only = 0;
}
}
}
static int open_input_stream(HTTPContext *c, const char *info)
{
char buf[128];
char input_filename[1024];
AVFormatContext *s = NULL;
int buf_size, i, ret;
int i, ret;
int64_t stream_pos;
/* find file name */
if (c->stream->feed) {
strcpy(input_filename, c->stream->feed->feed_filename);
buf_size = FFM_PACKET_SIZE;
/* compute position (absolute time) */
if (av_find_info_tag(buf, sizeof(buf), "date", info)) {
if ((ret = av_parse_time(&stream_pos, buf, 0)) < 0)
@@ -2144,7 +2150,6 @@ static int open_input_stream(HTTPContext *c, const char *info)
stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000;
} else {
strcpy(input_filename, c->stream->feed_filename);
buf_size = 0;
/* compute position (relative time) */
if (av_find_info_tag(buf, sizeof(buf), "date", info)) {
if ((ret = av_parse_time(&stream_pos, buf, 1)) < 0)
@@ -2160,18 +2165,18 @@ static int open_input_stream(HTTPContext *c, const char *info)
http_log("could not open %s: %d\n", input_filename, ret);
return -1;
}
/* set buffer size */
if (buf_size > 0) ffio_set_buf_size(s->pb, buf_size);
s->flags |= AVFMT_FLAG_GENPTS;
c->fmt_in = s;
if (strcmp(s->iformat->name, "ffm") && avformat_find_stream_info(c->fmt_in, NULL) < 0) {
http_log("Could not find stream info '%s'\n", input_filename);
avformat_close_input(&s);
av_close_input_file(s);
return -1;
}
/* open each parser */
for(i=0;i<s->nb_streams;i++)
open_parser(s, i);
/* choose stream as clock source (we favorize video stream if
present) for packet sending */
c->pts_stream_index = 0;
@@ -2263,6 +2268,7 @@ static int http_prepare_data(HTTPContext *c)
* Default value from FFmpeg
* Try to set it use configuration option
*/
c->fmt_ctx.preload = (int)(0.5*AV_TIME_BASE);
c->fmt_ctx.max_delay = (int)(0.7*AV_TIME_BASE);
if (avformat_write_header(&c->fmt_ctx, NULL) < 0) {
@@ -2305,7 +2311,8 @@ static int http_prepare_data(HTTPContext *c)
return 0;
} else {
if (c->stream->loop) {
avformat_close_input(&c->fmt_in);
av_close_input_file(c->fmt_in);
c->fmt_in = NULL;
if (open_input_stream(c, "") < 0)
goto no_loop;
goto redo;
@@ -2381,7 +2388,7 @@ static int http_prepare_data(HTTPContext *c)
if (c->rtp_protocol == RTSP_LOWER_TRANSPORT_TCP)
max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
else
max_packet_size = c->rtp_handles[c->packet_stream_index]->max_packet_size;
max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]);
ret = ffio_open_dyn_packet_buf(&ctx->pb, max_packet_size);
} else {
ret = avio_open_dyn_buf(&ctx->pb);
@@ -2534,8 +2541,8 @@ static int http_send_data(HTTPContext *c)
} else {
/* send RTP packet directly in UDP */
c->buffer_ptr += 4;
ffurl_write(c->rtp_handles[c->packet_stream_index],
c->buffer_ptr, len);
url_write(c->rtp_handles[c->packet_stream_index],
c->buffer_ptr, len);
c->buffer_ptr += len;
/* here we continue as we can send several packets per 10 ms slot */
}
@@ -2585,11 +2592,8 @@ static int http_start_receive_data(HTTPContext *c)
if (c->stream->truncate) {
/* truncate feed file */
ffm_write_write_index(c->feed_fd, FFM_PACKET_SIZE);
ftruncate(c->feed_fd, FFM_PACKET_SIZE);
http_log("Truncating feed file '%s'\n", c->stream->feed_filename);
if (ftruncate(c->feed_fd, FFM_PACKET_SIZE) < 0) {
http_log("Error truncating feed file: %s\n", strerror(errno));
return -1;
}
} else {
if ((c->stream->feed_write_index = ffm_read_write_index(fd)) < 0) {
http_log("Error reading write index from feed file: %s\n", strerror(errno));
@@ -2732,7 +2736,7 @@ static int http_receive_data(HTTPContext *c)
/* Now we have the actual streams */
if (s->nb_streams != feed->nb_streams) {
avformat_close_input(&s);
av_close_input_stream(s);
av_free(pb);
http_log("Feed '%s' stream number does not match registered feed\n",
c->stream->feed_filename);
@@ -2745,7 +2749,7 @@ static int http_receive_data(HTTPContext *c)
avcodec_copy_context(fst->codec, st->codec);
}
avformat_close_input(&s);
av_close_input_stream(s);
av_free(pb);
}
c->buffer_ptr = c->buffer;
@@ -2837,7 +2841,7 @@ static int rtsp_parse_request(HTTPContext *c)
char protocol[32];
char line[1024];
int len;
RTSPMessageHeader header1 = { 0 }, *header = &header1;
RTSPMessageHeader header1, *header = &header1;
c->buffer_ptr[0] = '\0';
p = c->buffer;
@@ -2863,6 +2867,7 @@ static int rtsp_parse_request(HTTPContext *c)
}
/* parse each header line */
memset(header, 0, sizeof(*header));
/* skip to next line */
while (*p != '\n' && *p != '\0')
p++;
@@ -3420,10 +3425,10 @@ static int rtp_new_av_stream(HTTPContext *c,
"rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port));
}
if (ffurl_open(&h, ctx->filename, AVIO_FLAG_WRITE, NULL, NULL) < 0)
if (url_open(&h, ctx->filename, AVIO_FLAG_WRITE) < 0)
goto fail;
c->rtp_handles[stream_index] = h;
max_packet_size = h->max_packet_size;
max_packet_size = url_get_max_packet_size(h);
break;
case RTSP_LOWER_TRANSPORT_TCP:
/* RTP/TCP case */
@@ -3446,7 +3451,7 @@ static int rtp_new_av_stream(HTTPContext *c,
if (avformat_write_header(ctx, NULL) < 0) {
fail:
if (h)
ffurl_close(h);
url_close(h);
av_free(ctx);
return -1;
}
@@ -3483,7 +3488,7 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
}
fst->priv_data = av_mallocz(sizeof(FeedData));
fst->index = stream->nb_streams;
avpriv_set_pts_info(fst, 33, 1, 90000);
av_set_pts_info(fst, 33, 1, 90000);
fst->sample_aspect_ratio = codec->sample_aspect_ratio;
stream->streams[stream->nb_streams++] = fst;
return fst;
@@ -3624,7 +3629,7 @@ static void build_file_streams(void)
if (avformat_find_stream_info(infile, NULL) < 0) {
http_log("Could not find codec parameters from '%s'\n",
stream->feed_filename);
avformat_close_input(&infile);
av_close_input_file(infile);
goto fail;
}
extract_mpeg4_header(infile);
@@ -3632,7 +3637,7 @@ static void build_file_streams(void)
for(i=0;i<infile->nb_streams;i++)
add_av_stream1(stream, infile->streams[i]->codec, 1);
avformat_close_input(&infile);
av_close_input_file(infile);
}
}
}
@@ -3669,8 +3674,6 @@ static void build_feed_streams(void)
int matches = 0;
if (avformat_open_input(&s, feed->feed_filename, NULL, NULL) >= 0) {
/* set buffer size */
ffio_set_buf_size(s->pb, FFM_PACKET_SIZE);
/* Now see if it matches */
if (s->nb_streams == feed->nb_streams) {
matches = 1;
@@ -3724,7 +3727,7 @@ static void build_feed_streams(void)
http_log("Deleting feed file '%s' as stream counts differ (%d != %d)\n",
feed->feed_filename, s->nb_streams, feed->nb_streams);
avformat_close_input(&s);
av_close_input_file(s);
} else
http_log("Deleting feed file '%s' as it appears to be corrupt\n",
feed->feed_filename);
@@ -4230,8 +4233,8 @@ static int parse_ffconfig(const char *filename)
}
stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL);
avcodec_get_context_defaults3(&video_enc, NULL);
avcodec_get_context_defaults3(&audio_enc, NULL);
avcodec_get_context_defaults2(&video_enc, AVMEDIA_TYPE_VIDEO);
avcodec_get_context_defaults2(&audio_enc, AVMEDIA_TYPE_AUDIO);
audio_id = CODEC_ID_NONE;
video_id = CODEC_ID_NONE;
@@ -4665,13 +4668,13 @@ static const OptionDef options[] = {
int main(int argc, char **argv)
{
struct sigaction sigact = { { 0 } };
struct sigaction sigact;
parse_loglevel(argc, argv, options);
av_register_all();
avformat_network_init();
show_banner(argc, argv, options);
show_banner();
my_program_name = argv[0];
my_program_dir = getcwd(0, 0);
@@ -4683,6 +4686,7 @@ int main(int argc, char **argv)
av_lfg_init(&random_state, av_get_random_seed());
memset(&sigact, 0, sizeof(sigact));
sigact.sa_handler = handle_child_exit;
sigact.sa_flags = SA_NOCLDSTOP | SA_RESTART;
sigaction(SIGCHLD, &sigact, 0);

File diff suppressed because it is too large Load Diff

View File

@@ -38,20 +38,20 @@
#include "avcodec.h"
static const enum PixelFormat pixfmt_rgb24[] = {
PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE };
static const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE};
/*
* Decoder context
*/
typedef struct EightBpsContext {
AVCodecContext *avctx;
AVFrame pic;
unsigned char planes;
unsigned char planemap[4];
AVCodecContext *avctx;
AVFrame pic;
uint32_t pal[256];
unsigned char planes;
unsigned char planemap[4];
uint32_t pal[256];
} EightBpsContext;
@@ -60,90 +60,87 @@ typedef struct EightBpsContext {
* Decode a frame
*
*/
static int decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
EightBpsContext * const c = avctx->priv_data;
const unsigned char *encoded = buf;
unsigned char *pixptr, *pixptr_end;
unsigned int height = avctx->height; // Real image height
unsigned int dlen, p, row;
const unsigned char *lp, *dp;
unsigned char count;
unsigned int planes = c->planes;
unsigned char *planemap = c->planemap;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
EightBpsContext * const c = avctx->priv_data;
const unsigned char *encoded = buf;
unsigned char *pixptr, *pixptr_end;
unsigned int height = avctx->height; // Real image height
unsigned int dlen, p, row;
const unsigned char *lp, *dp;
unsigned char count;
unsigned int planes = c->planes;
unsigned char *planemap = c->planemap;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
if(c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if (avctx->get_buffer(avctx, &c->pic) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if(avctx->get_buffer(avctx, &c->pic) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
/* Set data pointer after line lengths */
dp = encoded + planes * (height << 1);
/* Set data pointer after line lengths */
dp = encoded + planes * (height << 1);
for (p = 0; p < planes; p++) {
/* Lines length pointer for this plane */
lp = encoded + p * (height << 1);
for (p = 0; p < planes; p++) {
/* Lines length pointer for this plane */
lp = encoded + p * (height << 1);
/* Decode a plane */
for (row = 0; row < height; row++) {
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
pixptr_end = pixptr + c->pic.linesize[0];
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
/* Decode a row of this plane */
while (dlen > 0) {
if (dp + 1 >= buf + buf_size)
return -1;
if ((count = *dp++) <= 127) {
count++;
dlen -= count + 1;
if (pixptr + count * planes > pixptr_end)
break;
if (dp + count > buf + buf_size)
return -1;
while (count--) {
*pixptr = *dp++;
pixptr += planes;
}
} else {
count = 257 - count;
if (pixptr + count * planes > pixptr_end)
break;
while (count--) {
*pixptr = *dp;
pixptr += planes;
}
dp++;
dlen -= 2;
/* Decode a plane */
for(row = 0; row < height; row++) {
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
pixptr_end = pixptr + c->pic.linesize[0];
dlen = av_be2ne16(*(const unsigned short *)(lp+row*2));
/* Decode a row of this plane */
while(dlen > 0) {
if(dp + 1 >= buf+buf_size) return -1;
if ((count = *dp++) <= 127) {
count++;
dlen -= count + 1;
if (pixptr + count * planes > pixptr_end)
break;
if(dp + count > buf+buf_size) return -1;
while(count--) {
*pixptr = *dp++;
pixptr += planes;
}
} else {
count = 257 - count;
if (pixptr + count * planes > pixptr_end)
break;
while(count--) {
*pixptr = *dp;
pixptr += planes;
}
dp++;
dlen -= 2;
}
}
}
}
}
}
if (avctx->bits_per_coded_sample <= 8) {
const uint8_t *pal = av_packet_get_side_data(avpkt,
AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
c->pic.palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE);
}
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
}
if (avctx->bits_per_coded_sample <= 8) {
const uint8_t *pal = av_packet_get_side_data(avpkt,
AV_PKT_DATA_PALETTE,
NULL);
if (pal) {
c->pic.palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE);
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = c->pic;
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
}
/* always report that the buffer was completely consumed */
return buf_size;
*data_size = sizeof(AVFrame);
*(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
}
@@ -154,47 +151,47 @@ static int decode_frame(AVCodecContext *avctx, void *data,
*/
static av_cold int decode_init(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
EightBpsContext * const c = avctx->priv_data;
c->avctx = avctx;
c->pic.data[0] = NULL;
c->avctx = avctx;
avcodec_get_frame_defaults(&c->pic);
switch (avctx->bits_per_coded_sample) {
case 8:
avctx->pix_fmt = PIX_FMT_PAL8;
c->planes = 1;
c->planemap[0] = 0; // 1st plane is palette indexes
break;
case 24:
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
c->planes = 3;
c->planemap[0] = 2; // 1st plane is red
c->planemap[1] = 1; // 2nd plane is green
c->planemap[2] = 0; // 3rd plane is blue
break;
case 32:
avctx->pix_fmt = PIX_FMT_RGB32;
c->planes = 4;
avcodec_get_frame_defaults(&c->pic);
c->pic.data[0] = NULL;
switch (avctx->bits_per_coded_sample) {
case 8:
avctx->pix_fmt = PIX_FMT_PAL8;
c->planes = 1;
c->planemap[0] = 0; // 1st plane is palette indexes
break;
case 24:
avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
c->planes = 3;
c->planemap[0] = 2; // 1st plane is red
c->planemap[1] = 1; // 2nd plane is green
c->planemap[2] = 0; // 3rd plane is blue
break;
case 32:
avctx->pix_fmt = PIX_FMT_RGB32;
c->planes = 4;
#if HAVE_BIGENDIAN
c->planemap[0] = 1; // 1st plane is red
c->planemap[1] = 2; // 2nd plane is green
c->planemap[2] = 3; // 3rd plane is blue
c->planemap[3] = 0; // 4th plane is alpha
c->planemap[0] = 1; // 1st plane is red
c->planemap[1] = 2; // 2nd plane is green
c->planemap[2] = 3; // 3rd plane is blue
c->planemap[3] = 0; // 4th plane is alpha
#else
c->planemap[0] = 2; // 1st plane is red
c->planemap[1] = 1; // 2nd plane is green
c->planemap[2] = 0; // 3rd plane is blue
c->planemap[3] = 3; // 4th plane is alpha
c->planemap[0] = 2; // 1st plane is red
c->planemap[1] = 1; // 2nd plane is green
c->planemap[2] = 0; // 3rd plane is blue
c->planemap[3] = 3; // 4th plane is alpha
#endif
break;
default:
av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n",
avctx->bits_per_coded_sample);
return -1;
}
break;
default:
av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n", avctx->bits_per_coded_sample);
return -1;
}
return 0;
return 0;
}
@@ -207,12 +204,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
*/
static av_cold int decode_end(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
EightBpsContext * const c = avctx->priv_data;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
return 0;
return 0;
}

View File

@@ -47,7 +47,7 @@ typedef struct EightSvxContext {
/* buffer used to store the whole audio decoded/interleaved chunk,
* which is sent with the first packet */
uint8_t *samples;
int64_t samples_size;
size_t samples_size;
int samples_idx;
} EightSvxContext;
@@ -106,11 +106,12 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
{
EightSvxContext *esc = avctx->priv_data;
int n, out_data_size, ret;
uint8_t *out_date;
uint8_t *src, *dst;
/* decode and interleave the first packet */
if (!esc->samples && avpkt) {
uint8_t *deinterleaved_samples, *p = NULL;
uint8_t *deinterleaved_samples;
esc->samples_size = avctx->codec->id == CODEC_ID_8SVX_RAW || avctx->codec->id ==CODEC_ID_PCM_S8_PLANAR?
avpkt->size : avctx->channels + (avpkt->size-avctx->channels) * 2;
@@ -129,7 +130,6 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
}
if (!(deinterleaved_samples = av_mallocz(n)))
return AVERROR(ENOMEM);
p = deinterleaved_samples;
/* the uncompressed starting value is contained in the first byte */
if (avctx->channels == 2) {
@@ -146,7 +146,6 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
interleave_stereo(esc->samples, deinterleaved_samples, esc->samples_size);
else
memcpy(esc->samples, deinterleaved_samples, esc->samples_size);
av_freep(&p);
}
/* get output buffer */

View File

@@ -3,14 +3,7 @@ include $(SUBDIR)../config.mak
NAME = avcodec
FFLIBS = avutil
HEADERS = avcodec.h \
avfft.h \
dxva2.h \
vaapi.h \
vda.h \
vdpau.h \
version.h \
xvmc.h \
HEADERS = avcodec.h avfft.h dxva2.h opt.h vaapi.h vda.h vdpau.h version.h xvmc.h
OBJS = allcodecs.o \
audioconvert.o \
@@ -46,7 +39,6 @@ OBJS-$(CONFIG_GOLOMB) += golomb.o
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o
OBJS-$(CONFIG_H264PRED) += h264pred.o
OBJS-$(CONFIG_HUFFMAN) += huffman.o
OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
OBJS-$(CONFIG_LPC) += lpc.o
OBJS-$(CONFIG_LSP) += lsp.o
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o
@@ -64,13 +56,11 @@ OBJS-$(CONFIG_VDPAU) += vdpau.o
OBJS-$(CONFIG_A64MULTI_ENCODER) += a64multienc.o elbg.o
OBJS-$(CONFIG_A64MULTI5_ENCODER) += a64multienc.o elbg.o
OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps.o \
aacadtsdec.o mpeg4audio.o kbdwin.o \
sbrdsp.o aacpsdsp.o
aacadtsdec.o mpeg4audio.o kbdwin.o
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o \
aacpsy.o aactab.o \
psymodel.o iirfilter.o \
mpeg4audio.o kbdwin.o \
audio_frame_queue.o
mpeg4audio.o kbdwin.o
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
OBJS-$(CONFIG_AC3_DECODER) += ac3dec.o ac3dec_data.o ac3.o kbdwin.o
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
@@ -101,18 +91,12 @@ OBJS-$(CONFIG_ATRAC1_DECODER) += atrac1.o atrac.o
OBJS-$(CONFIG_ATRAC3_DECODER) += atrac3.o atrac.o
OBJS-$(CONFIG_AURA_DECODER) += cyuv.o
OBJS-$(CONFIG_AURA2_DECODER) += aura.o
OBJS-$(CONFIG_AVRP_DECODER) += r210dec.o
OBJS-$(CONFIG_AVRP_ENCODER) += r210enc.o
OBJS-$(CONFIG_AVS_DECODER) += avs.o
OBJS-$(CONFIG_AVUI_DECODER) += avuidec.o
OBJS-$(CONFIG_AVUI_ENCODER) += avuienc.o
OBJS-$(CONFIG_AYUV_DECODER) += v408dec.o
OBJS-$(CONFIG_AYUV_ENCODER) += v408enc.o
OBJS-$(CONFIG_BETHSOFTVID_DECODER) += bethsoftvideo.o
OBJS-$(CONFIG_BFI_DECODER) += bfi.o
OBJS-$(CONFIG_BINK_DECODER) += bink.o binkdsp.o
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o wma.o wma_common.o
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o wma.o wma_common.o
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o wma.o
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o wma.o
OBJS-$(CONFIG_BINTEXT_DECODER) += bintext.o cga_data.o
OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o
OBJS-$(CONFIG_BMP_ENCODER) += bmpenc.o
@@ -122,15 +106,13 @@ OBJS-$(CONFIG_C93_DECODER) += c93.o
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
mpeg12data.o mpegvideo.o
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
OBJS-$(CONFIG_COOK_DECODER) += cook.o
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o \
dca_parser.o
OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
dirac_arith.o mpeg12data.o dwt.o
@@ -148,8 +130,8 @@ OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o
OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o dv_profile.o
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o dv_profile.o
OBJS-$(CONFIG_DVVIDEO_DECODER) += dv.o dvdata.o
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
@@ -170,13 +152,11 @@ OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER) += 8svx.o
OBJS-$(CONFIG_EIGHTSVX_RAW_DECODER) += 8svx.o
OBJS-$(CONFIG_ESCAPE124_DECODER) += escape124.o
OBJS-$(CONFIG_ESCAPE130_DECODER) += escape130.o
OBJS-$(CONFIG_EXR_DECODER) += exr.o
OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o
@@ -188,15 +168,15 @@ OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o
OBJS-$(CONFIG_FRWU_DECODER) += frwu.o
OBJS-$(CONFIG_G723_1_DECODER) += g723_1.o acelp_vectors.o \
celp_filters.o celp_math.o
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o acelp_vectors.o celp_math.o
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o
OBJS-$(CONFIG_G729_DECODER) += g729dec.o lsp.o celp_math.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
OBJS-$(CONFIG_GIF_ENCODER) += gif.o lzwenc.o
OBJS-$(CONFIG_GSM_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
OBJS-$(CONFIG_GSM_MS_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
OBJS-$(CONFIG_H261_DECODER) += h261dec.o h261.o h261data.o \
OBJS-$(CONFIG_H261_DECODER) += h261dec.o h261.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_H261_ENCODER) += h261enc.o h261.o h261data.o \
OBJS-$(CONFIG_H261_ENCODER) += h261enc.o h261.o \
mpegvideo_enc.o motion_est.o \
ratecontrol.o mpeg12data.o \
mpegvideo.o
@@ -227,11 +207,9 @@ OBJS-$(CONFIG_IFF_ILBM_DECODER) += iff.o
OBJS-$(CONFIG_IMC_DECODER) += imc.o
OBJS-$(CONFIG_INDEO2_DECODER) += indeo2.o
OBJS-$(CONFIG_INDEO3_DECODER) += indeo3.o
OBJS-$(CONFIG_INDEO4_DECODER) += indeo4.o ivi_common.o ivi_dsp.o
OBJS-$(CONFIG_INDEO5_DECODER) += indeo5.o ivi_common.o ivi_dsp.o
OBJS-$(CONFIG_INTERPLAY_DPCM_DECODER) += dpcm.o
OBJS-$(CONFIG_INTERPLAY_VIDEO_DECODER) += interplayvideo.o
OBJS-$(CONFIG_JACOSUB_DECODER) += jacosubdec.o ass.o
OBJS-$(CONFIG_JPEG2000_DECODER) += j2kdec.o mqcdec.o mqc.o j2k.o j2k_dwt.o
OBJS-$(CONFIG_JPEG2000_ENCODER) += j2kenc.o mqcenc.o mqc.o j2k.o j2k_dwt.o
OBJS-$(CONFIG_JPEGLS_DECODER) += jpeglsdec.o jpegls.o \
@@ -250,7 +228,6 @@ OBJS-$(CONFIG_MACE3_DECODER) += mace.o
OBJS-$(CONFIG_MACE6_DECODER) += mace.o
OBJS-$(CONFIG_MDEC_DECODER) += mdec.o mpeg12.o mpeg12data.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_MICRODVD_DECODER) += microdvddec.o ass.o
OBJS-$(CONFIG_MIMIC_DECODER) += mimic.o
OBJS-$(CONFIG_MJPEG_DECODER) += mjpegdec.o mjpeg.o
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpeg.o \
@@ -314,22 +291,19 @@ OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_MSMPEG4V1_DECODER) += msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
h263.o ituh263dec.o mpeg4videodec.o
OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
h263dec.o h263.o ituh263dec.o \
mpeg4videodec.o
OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
h263.o ituh263dec.o mpeg4videodec.o
OBJS-$(CONFIG_MSMPEG4V3_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
h263.o ituh263dec.o mpeg4videodec.o
OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4enc.o msmpeg4data.o \
h263dec.o h263.o ituh263dec.o \
mpeg4videodec.o
OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
h263.o ituh263dec.o mpeg4videodec.o
OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o \
audio_frame_queue.o
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o pnm.o
@@ -343,15 +317,13 @@ OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o
OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o
OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o pngdsp.o
OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o
OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o proresdsp.o
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o proresdata.o
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o
OBJS-$(CONFIG_PRORES_ANATOLIY_ENCODER) += proresenc_anatoliy.o
OBJS-$(CONFIG_PRORES_KOSTYA_ENCODER) += proresenc_kostya.o proresdata.o proresdsp.o
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc.o
OBJS-$(CONFIG_PTX_DECODER) += ptx.o
OBJS-$(CONFIG_QCELP_DECODER) += qcelpdec.o celp_math.o \
celp_filters.o acelp_vectors.o \
@@ -364,14 +336,10 @@ OBJS-$(CONFIG_QPEG_DECODER) += qpeg.o
OBJS-$(CONFIG_QTRLE_DECODER) += qtrle.o
OBJS-$(CONFIG_QTRLE_ENCODER) += qtrleenc.o
OBJS-$(CONFIG_R10K_DECODER) += r210dec.o
OBJS-$(CONFIG_R10K_ENCODER) += r210enc.o
OBJS-$(CONFIG_R210_DECODER) += r210dec.o
OBJS-$(CONFIG_R210_ENCODER) += r210enc.o
OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o \
audio_frame_queue.o
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_math.o celp_filters.o
OBJS-$(CONFIG_RALF_DECODER) += ralf.o
OBJS-$(CONFIG_RAWVIDEO_DECODER) += rawdec.o
OBJS-$(CONFIG_RAWVIDEO_ENCODER) += rawenc.o
OBJS-$(CONFIG_RL2_DECODER) += rl2.o
@@ -413,7 +381,6 @@ OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
OBJS-$(CONFIG_SUNRAST_DECODER) += sunrast.o
OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o h263.o \
mpegvideo.o error_resilience.o
OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o \
@@ -446,12 +413,6 @@ OBJS-$(CONFIG_ULTI_DECODER) += ulti.o
OBJS-$(CONFIG_UTVIDEO_DECODER) += utvideo.o
OBJS-$(CONFIG_V210_DECODER) += v210dec.o
OBJS-$(CONFIG_V210_ENCODER) += v210enc.o
OBJS-$(CONFIG_V308_DECODER) += v308dec.o
OBJS-$(CONFIG_V308_ENCODER) += v308enc.o
OBJS-$(CONFIG_V408_DECODER) += v408dec.o
OBJS-$(CONFIG_V408_ENCODER) += v408enc.o
OBJS-$(CONFIG_V410_DECODER) += v410dec.o
OBJS-$(CONFIG_V410_ENCODER) += v410enc.o
OBJS-$(CONFIG_V210X_DECODER) += v210x.o
OBJS-$(CONFIG_VB_DECODER) += vb.o
OBJS-$(CONFIG_VBLE_DECODER) += vble.o
@@ -477,12 +438,12 @@ OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \
OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
OBJS-$(CONFIG_WMALOSSLESS_DECODER) += wmalosslessdec.o wma_common.o
OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o wma_common.o
OBJS-$(CONFIG_WMAV1_DECODER) += wmadec.o wma.o wma_common.o aactab.o
OBJS-$(CONFIG_WMAV1_ENCODER) += wmaenc.o wma.o wma_common.o aactab.o
OBJS-$(CONFIG_WMAV2_DECODER) += wmadec.o wma.o wma_common.o aactab.o
OBJS-$(CONFIG_WMAV2_ENCODER) += wmaenc.o wma.o wma_common.o aactab.o
OBJS-$(CONFIG_WMALOSSLESS_DECODER) += wmalosslessdec.o wma.o
OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o
OBJS-$(CONFIG_WMAV1_DECODER) += wmadec.o wma.o aactab.o
OBJS-$(CONFIG_WMAV1_ENCODER) += wmaenc.o wma.o aactab.o
OBJS-$(CONFIG_WMAV2_DECODER) += wmadec.o wma.o aactab.o
OBJS-$(CONFIG_WMAV2_ENCODER) += wmaenc.o wma.o aactab.o
OBJS-$(CONFIG_WMAVOICE_DECODER) += wmavoice.o \
celp_math.o celp_filters.o \
acelp_vectors.o acelp_filters.o
@@ -491,7 +452,7 @@ OBJS-$(CONFIG_WMV2_DECODER) += wmv2dec.o wmv2.o \
msmpeg4.o msmpeg4data.o \
intrax8.o intrax8dsp.o
OBJS-$(CONFIG_WMV2_ENCODER) += wmv2enc.o wmv2.o \
msmpeg4.o msmpeg4enc.o msmpeg4data.o \
msmpeg4.o msmpeg4data.o \
mpeg4videodec.o ituh263dec.o h263dec.o
OBJS-$(CONFIG_WNV1_DECODER) += wnv1.o
OBJS-$(CONFIG_WS_SND1_DECODER) += ws-snd1.o
@@ -499,19 +460,10 @@ OBJS-$(CONFIG_XAN_DPCM_DECODER) += dpcm.o
OBJS-$(CONFIG_XAN_WC3_DECODER) += xan.o
OBJS-$(CONFIG_XAN_WC4_DECODER) += xxan.o
OBJS-$(CONFIG_XBIN_DECODER) += bintext.o cga_data.o
OBJS-$(CONFIG_XBM_DECODER) += xbmdec.o
OBJS-$(CONFIG_XBM_ENCODER) += xbmenc.o
OBJS-$(CONFIG_XL_DECODER) += xl.o
OBJS-$(CONFIG_XSUB_DECODER) += xsubdec.o
OBJS-$(CONFIG_XSUB_ENCODER) += xsubenc.o
OBJS-$(CONFIG_XWD_DECODER) += xwddec.o
OBJS-$(CONFIG_XWD_ENCODER) += xwdenc.o
OBJS-$(CONFIG_Y41P_DECODER) += y41pdec.o
OBJS-$(CONFIG_Y41P_ENCODER) += y41penc.o
OBJS-$(CONFIG_YOP_DECODER) += yop.o
OBJS-$(CONFIG_YUV4_DECODER) += yuv4dec.o
OBJS-$(CONFIG_YUV4_ENCODER) += yuv4enc.o
OBJS-$(CONFIG_ZEROCODEC_DECODER) += zerocodec.o
OBJS-$(CONFIG_ZLIB_DECODER) += lcldec.o
OBJS-$(CONFIG_ZLIB_ENCODER) += lclenc.o
OBJS-$(CONFIG_ZMBV_DECODER) += zmbv.o
@@ -570,20 +522,19 @@ OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o
OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o adx.o
OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o adx.o
OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o
OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o g722dec.o
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o g722enc.o
OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_APC_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o adpcm_data.o
@@ -597,13 +548,13 @@ OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcmenc.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcmenc.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcmenc.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
@@ -611,10 +562,9 @@ OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_ADX_DEMUXER) += adx.o
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_DV_DEMUXER) += dv_profile.o
OBJS-$(CONFIG_DV_MUXER) += dv_profile.o timecode.o
OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o vorbis_data.o \
vorbis_parser.o xiph.o
OBJS-$(CONFIG_DV_DEMUXER) += dvdata.o
OBJS-$(CONFIG_DV_MUXER) += dvdata.o timecode.o
OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLAC_MUXER) += flacdec.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
OBJS-$(CONFIG_GXF_DEMUXER) += mpeg12data.o
@@ -626,17 +576,15 @@ OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
flacdec.o flacdata.o flac.o \
mpegaudiodata.o vorbis_data.o
OBJS-$(CONFIG_MP2_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o timecode.o
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MXF_MUXER) += timecode.o
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
OBJS-$(CONFIG_OGG_DEMUXER) += flacdec.o flacdata.o flac.o \
dirac.o mpeg12data.o vorbis_parser.o \
xiph.o vorbis_data.o
dirac.o mpeg12data.o vorbis_data.o
OBJS-$(CONFIG_OGG_MUXER) += xiph.o flacdec.o flacdata.o flac.o \
vorbis_data.o
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o mpegvideo.o xiph.o
@@ -649,40 +597,38 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
# external codec libraries
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o audio_frame_queue.o
OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsm.o
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
OBJS-$(CONFIG_LIBSCHROEDINGER_DECODER) += libschroedingerdec.o \
libschroedinger.o
libschroedinger.o \
libdirac_libschro.o
OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \
libschroedinger.o
libschroedinger.o \
libdirac_libschro.o
OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o audio_frame_queue.o
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o
OBJS-$(CONFIG_LIBSTAGEFRIGHT_H264_DECODER)+= libstagefright.o
OBJS-$(CONFIG_LIBTHEORA_ENCODER) += libtheoraenc.o
OBJS-$(CONFIG_LIBUTVIDEO_DECODER) += libutvideodec.o
OBJS-$(CONFIG_LIBUTVIDEO_ENCODER) += libutvideoenc.o
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBUTVIDEO_DECODER) += libutvideo.o
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbis.o audio_frame_queue.o \
vorbis_data.o vorbis_parser.o
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbis.o vorbis_data.o
OBJS-$(CONFIG_LIBVPX_DECODER) += libvpxdec.o
OBJS-$(CONFIG_LIBVPX_ENCODER) += libvpxenc.o
OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
OBJS-$(CONFIG_LIBXAVS_ENCODER) += libxavs.o
OBJS-$(CONFIG_LIBXVID_ENCODER) += libxvid.o
OBJS-$(CONFIG_LIBXVID) += libxvidff.o libxvid_rc.o
# parsers
OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o \
@@ -691,7 +637,6 @@ OBJS-$(CONFIG_AC3_PARSER) += ac3_parser.o ac3tab.o \
aac_ac3_parser.o
OBJS-$(CONFIG_ADX_PARSER) += adx_parser.o adx.o
OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
OBJS-$(CONFIG_COOK_PARSER) += cook_parser.o
OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o
OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o
@@ -699,7 +644,6 @@ OBJS-$(CONFIG_DVBSUB_PARSER) += dvbsub_parser.o
OBJS-$(CONFIG_DVDSUB_PARSER) += dvdsub_parser.o
OBJS-$(CONFIG_FLAC_PARSER) += flac_parser.o flacdata.o flac.o \
vorbis_data.o
OBJS-$(CONFIG_GSM_PARSER) += gsm_parser.o
OBJS-$(CONFIG_H261_PARSER) += h261_parser.o
OBJS-$(CONFIG_H263_PARSER) += h263_parser.o
OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264.o \
@@ -715,7 +659,6 @@ OBJS-$(CONFIG_MPEG4VIDEO_PARSER) += mpeg4video_parser.o h263.o \
mpegvideo.o error_resilience.o \
mpeg4videodec.o mpeg4video.o \
ituh263dec.o h263dec.o
OBJS-$(CONFIG_PNG_PARSER) += png_parser.o
OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o \
mpegaudiodecheader.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
@@ -727,7 +670,6 @@ OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o \
msmpeg4.o msmpeg4data.o mpeg4video.o \
h263.o mpegvideo.o error_resilience.o
OBJS-$(CONFIG_VORBIS_PARSER) += vorbis_parser.o xiph.o
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
@@ -753,55 +695,45 @@ OBJS-$(HAVE_PTHREADS) += pthread.o
OBJS-$(HAVE_W32THREADS) += pthread.o
OBJS-$(HAVE_OS2THREADS) += pthread.o
OBJS-$(CONFIG_MLIB) += mlib/dsputil_mlib.o \
# inverse.o contains the ff_inverse table definition, which is used by
# the FASTDIV macro (from libavutil); since referencing the external
# table has a negative effect on performance, copy it in libavcodec as
# well.
OBJS-$(!CONFIG_SMALL) += inverse.o
-include $(SRC_PATH)/$(SUBDIR)$(ARCH)/Makefile
SKIPHEADERS += %_tablegen.h \
%_tables.h \
aac_tablegen_decl.h \
codec_names.h \
fft-internal.h \
tableprint.h \
$(ARCH)/vp56_arith.h \
$(ARCH)/vp56_arith.h
SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
SKIPHEADERS-$(CONFIG_LIBDIRAC) += libdirac.h
SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
SKIPHEADERS-$(CONFIG_LIBUTVIDEO) += libutvideo.h
SKIPHEADERS-$(CONFIG_MPEG_XVMC_DECODER) += xvmc.h
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
SKIPHEADERS-$(CONFIG_VDA) += vda_internal.h
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h
SKIPHEADERS-$(HAVE_OS2THREADS) += os2threads.h
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
SKIPHEADERS-$(HAVE_W32THREADS) += w32pthreads.h
TESTPROGS = cabac \
dct \
fft \
fft-fixed \
golomb \
iirfilter \
rangecoder \
snowenc \
TESTPROGS = cabac dct fft fft-fixed h264 iirfilter rangecoder snowenc
TESTPROGS-$(HAVE_MMX) += motion
TESTOBJS = dctref.o
HOSTPROGS = aac_tablegen \
aacps_tablegen \
cbrt_tablegen \
cos_tablegen \
dv_tablegen \
motionpixels_tablegen \
mpegaudio_tablegen \
pcm_tablegen \
qdm2_tablegen \
sinewin_tablegen \
HOSTPROGS = aac_tablegen aacps_tablegen cbrt_tablegen cos_tablegen \
dv_tablegen motionpixels_tablegen mpegaudio_tablegen \
pcm_tablegen qdm2_tablegen sinewin_tablegen
DIRS = alpha arm bfin mlib ppc ps2 sh4 sparc x86
CLEANFILES = *_tables.c *_tables.h *_tablegen$(HOSTEXESUF)
include $(SRC_PATH)/subdir.mak
$(SUBDIR)dct-test$(EXESUF): $(SUBDIR)dctref.o
TRIG_TABLES = cos cos_fixed sin

View File

@@ -50,9 +50,6 @@ typedef struct A64Context {
uint8_t *mc_colram;
uint8_t *mc_palette;
int mc_pal_size;
/* pts of the next packet that will be output */
int64_t next_pts;
} A64Context;
#endif /* AVCODEC_A64ENC_H */

View File

@@ -28,7 +28,6 @@
#include "a64colors.h"
#include "a64tables.h"
#include "elbg.h"
#include "internal.h"
#include "libavutil/intreadwrite.h"
#define DITHERSTEPS 8
@@ -222,8 +221,6 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
if (!avctx->codec_tag)
avctx->codec_tag = AV_RL32("a64m");
c->next_pts = AV_NOPTS_VALUE;
return 0;
}
@@ -242,19 +239,19 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
}
}
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
int buf_size, void *data)
{
A64Context *c = avctx->priv_data;
AVFrame *const p = &c->picture;
AVFrame *pict = data;
AVFrame *const p = (AVFrame *) & c->picture;
int frame;
int x, y;
int b_height;
int b_width;
int req_size, ret;
uint8_t *buf = NULL;
int req_size;
int *charmap = c->mc_charmap;
uint8_t *colram = c->mc_colram;
@@ -277,7 +274,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
}
/* no data, means end encoding asap */
if (!pict) {
if (!data) {
/* all done, end encoding */
if (!c->mc_lifetime) return 0;
/* no more frames in queue, prepare to flush remaining frames */
@@ -295,8 +292,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
p->key_frame = 1;
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
c->mc_frame_counter++;
if (c->next_pts == AV_NOPTS_VALUE)
c->next_pts = pict->pts;
/* lifetime is not reached so wait for next frame first */
return 0;
}
@@ -307,11 +302,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
req_size = 0;
/* any frames to encode? */
if (c->mc_lifetime) {
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
return ret;
buf = pkt->data;
/* calc optimal new charset + charmaps */
ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
@@ -320,12 +310,15 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
render_charset(avctx, charset, colram);
/* copy charset to buf */
memcpy(buf, charset, charset_size);
memcpy(buf,charset, charset_size);
/* advance pointers */
buf += charset_size;
charset += charset_size;
req_size += charset_size;
}
/* no charset so clean buf */
else memset(buf, 0, charset_size);
/* write x frames to buf */
for (frame = 0; frame < c->mc_lifetime; frame++) {
@@ -358,12 +351,11 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
/* reset counter */
c->mc_frame_counter = 0;
pkt->pts = pkt->dts = c->next_pts;
c->next_pts = AV_NOPTS_VALUE;
pkt->size = req_size;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = !!req_size;
if (req_size > buf_size) {
av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", req_size, buf_size);
return -1;
}
return req_size;
}
return 0;
}
@@ -374,7 +366,7 @@ AVCodec ff_a64multi_encoder = {
.id = CODEC_ID_A64_MULTI,
.priv_data_size = sizeof(A64Context),
.init = a64multi_init_encoder,
.encode2 = a64multi_encode_frame,
.encode = a64multi_encode_frame,
.close = a64multi_close_encoder,
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
@@ -387,7 +379,7 @@ AVCodec ff_a64multi5_encoder = {
.id = CODEC_ID_A64_MULTI5,
.priv_data_size = sizeof(A64Context),
.init = a64multi_init_encoder,
.encode2 = a64multi_encode_frame,
.encode = a64multi_encode_frame,
.close = a64multi_close_encoder,
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),

View File

@@ -84,7 +84,6 @@ enum BandType {
#define IS_CODEBOOK_UNSIGNED(x) ((x - 1) & 10)
enum ChannelPosition {
AAC_CHANNEL_OFF = 0,
AAC_CHANNEL_FRONT = 1,
AAC_CHANNEL_SIDE = 2,
AAC_CHANNEL_BACK = 3,
@@ -112,15 +111,6 @@ enum OCStatus {
OC_LOCKED, ///< Output configuration locked in place
};
typedef struct {
MPEG4AudioConfig m4ac;
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
int channels;
uint64_t channel_layout;
enum OCStatus status;
} OutputConfiguration;
/**
* Predictor State
*/
@@ -263,6 +253,8 @@ typedef struct {
AVCodecContext *avctx;
AVFrame frame;
MPEG4AudioConfig m4ac;
int is_saved; ///< Set if elements have stored overlap from previous frame.
DynamicRangeControl che_drc;
@@ -270,6 +262,9 @@ typedef struct {
* @name Channel element related data
* @{
*/
enum ChannelPosition che_pos[4][MAX_ELEM_ID]; /**< channel element channel mapping with the
* first index as the first 4 raw data block types
*/
ChannelElement *che[4][MAX_ELEM_ID];
ChannelElement *tag_che_map[4][MAX_ELEM_ID];
int tags_mapped;
@@ -304,8 +299,7 @@ typedef struct {
DECLARE_ALIGNED(32, float, temp)[128];
OutputConfiguration oc[2];
int warned_num_aac_frames;
enum OCStatus output_configured;
} AACContext;
#endif /* AVCODEC_AAC_H */

View File

@@ -93,7 +93,7 @@ get_next:
avctx->channels = s->channels;
avctx->channel_layout = s->channel_layout;
}
s1->duration = s->samples;
avctx->frame_size = s->samples;
avctx->audio_service_type = s->service_type;
}

View File

@@ -110,15 +110,14 @@ static av_always_inline float quantize_and_encode_band_cost_template(
int *bits, int BT_ZERO, int BT_UNSIGNED,
int BT_PAIR, int BT_ESC)
{
const int q_idx = POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512;
const float Q = ff_aac_pow2sf_tab [q_idx];
const float Q34 = ff_aac_pow34sf_tab[q_idx];
const float IQ = ff_aac_pow2sf_tab [POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
const float IQ = ff_aac_pow2sf_tab[POW_SF2_ZERO + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
const float Q = ff_aac_pow2sf_tab[POW_SF2_ZERO - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
const float CLIPPED_ESCAPE = 165140.0f*IQ;
int i, j;
float cost = 0;
const int dim = BT_PAIR ? 2 : 4;
int resbits = 0;
const float Q34 = sqrtf(Q * sqrtf(Q));
const int range = aac_cb_range[cb];
const int maxval = aac_cb_maxval[cb];
int off;
@@ -421,7 +420,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
const int run_esc = (1 << run_bits) - 1;
int idx, ppos, count;
int stackrun[120], stackcb[120], stack_len;
float next_minbits = INFINITY;
float next_minrd = INFINITY;
int next_mincb = 0;
abs_pow34_v(s->scoefs, sce->coeffs, 1024);
@@ -435,7 +434,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
size = sce->ics.swb_sizes[swb];
if (sce->zeroes[win*16 + swb]) {
float cost_stay_here = path[swb][0].cost;
float cost_get_here = next_minbits + run_bits + 4;
float cost_get_here = next_minrd + run_bits + 4;
if ( run_value_bits[sce->ics.num_windows == 8][path[swb][0].run]
!= run_value_bits[sce->ics.num_windows == 8][path[swb][0].run+1])
cost_stay_here += run_bits;
@@ -448,7 +447,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
path[swb+1][0].cost = cost_stay_here;
path[swb+1][0].run = path[swb][0].run + 1;
}
next_minbits = path[swb+1][0].cost;
next_minrd = path[swb+1][0].cost;
next_mincb = 0;
for (cb = 1; cb < 12; cb++) {
path[swb+1][cb].cost = 61450;
@@ -456,10 +455,10 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
path[swb+1][cb].run = 0;
}
} else {
float minbits = next_minbits;
float minrd = next_minrd;
int mincb = next_mincb;
int startcb = sce->band_type[win*16+swb];
next_minbits = INFINITY;
next_minrd = INFINITY;
next_mincb = 0;
for (cb = 0; cb < startcb; cb++) {
path[swb+1][cb].cost = 61450;
@@ -468,15 +467,15 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
}
for (cb = startcb; cb < 12; cb++) {
float cost_stay_here, cost_get_here;
float bits = 0.0f;
float rd = 0.0f;
for (w = 0; w < group_len; w++) {
bits += quantize_band_cost(s, sce->coeffs + start + w*128,
s->scoefs + start + w*128, size,
sce->sf_idx[(win+w)*16+swb], cb,
0, INFINITY, NULL);
rd += quantize_band_cost(s, sce->coeffs + start + w*128,
s->scoefs + start + w*128, size,
sce->sf_idx[(win+w)*16+swb], cb,
0, INFINITY, NULL);
}
cost_stay_here = path[swb][cb].cost + bits;
cost_get_here = minbits + bits + run_bits + 4;
cost_stay_here = path[swb][cb].cost + rd;
cost_get_here = minrd + rd + run_bits + 4;
if ( run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run]
!= run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run+1])
cost_stay_here += run_bits;
@@ -489,8 +488,8 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
path[swb+1][cb].cost = cost_stay_here;
path[swb+1][cb].run = path[swb][cb].run + 1;
}
if (path[swb+1][cb].cost < next_minbits) {
next_minbits = path[swb+1][cb].cost;
if (path[swb+1][cb].cost < next_minrd) {
next_minrd = path[swb+1][cb].cost;
next_mincb = cb;
}
}
@@ -714,17 +713,15 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
{
int start = 0, i, w, w2, g;
int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels;
float dists[128] = { 0 }, uplims[128];
float dists[128], uplims[128];
float maxvals[128];
int fflag, minscaler;
int its = 0;
int allz = 0;
float minthr = INFINITY;
// for values above this the decoder might end up in an endless loop
// due to always having more bits than what can be encoded.
destbits = FFMIN(destbits, 5800);
//XXX: some heuristic to determine initial quantizers will reduce search time
memset(dists, 0, sizeof(dists));
//determine zero bands and upper limits
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
for (g = 0; g < sce->ics.num_swb; g++) {
@@ -878,7 +875,7 @@ static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s,
} else {
for (w = 0; w < 8; w++) {
const float *coeffs = sce->coeffs + w*128;
curband = start = 0;
start = 0;
for (i = 0; i < 128; i++) {
if (i - start >= sce->ics.swb_sizes[curband]) {
start += sce->ics.swb_sizes[curband];

File diff suppressed because it is too large Load Diff

View File

@@ -80,14 +80,14 @@ static const float * const tns_tmp2_map[4] = {
static const int8_t tags_per_config[16] = { 0, 1, 1, 2, 3, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0 };
static const uint8_t aac_channel_layout_map[7][5][3] = {
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, },
{ { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_SCE, 1, AAC_CHANNEL_BACK }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_BACK }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_BACK }, { TYPE_LFE, 0, AAC_CHANNEL_LFE }, },
{ { TYPE_SCE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 0, AAC_CHANNEL_FRONT }, { TYPE_CPE, 1, AAC_CHANNEL_FRONT }, { TYPE_CPE, 2, AAC_CHANNEL_BACK }, { TYPE_LFE, 0, AAC_CHANNEL_LFE }, },
static const uint8_t aac_channel_layout_map[7][5][2] = {
{ { TYPE_SCE, 0 }, },
{ { TYPE_CPE, 0 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_SCE, 1 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_CPE, 1 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 1 }, },
{ { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 2 }, { TYPE_CPE, 1 }, },
};
static const uint64_t aac_channel_layout[8] = {
@@ -97,7 +97,7 @@ static const uint64_t aac_channel_layout[8] = {
AV_CH_LAYOUT_4POINT0,
AV_CH_LAYOUT_5POINT0_BACK,
AV_CH_LAYOUT_5POINT1_BACK,
AV_CH_LAYOUT_7POINT1_WIDE_BACK,
AV_CH_LAYOUT_7POINT1_WIDE,
0,
};

View File

@@ -34,7 +34,6 @@
#include "avcodec.h"
#include "put_bits.h"
#include "dsputil.h"
#include "internal.h"
#include "mpeg4audio.h"
#include "kbdwin.h"
#include "sinewin.h"
@@ -47,14 +46,6 @@
#define AAC_MAX_CHANNELS 6
#define ERROR_IF(cond, ...) \
if (cond) { \
av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
return AVERROR(EINVAL); \
}
float ff_aac_pow34sf_tab[428];
static const uint8_t swb_size_1024_96[] = {
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8,
12, 12, 12, 12, 12, 16, 16, 24, 28, 36, 44,
@@ -144,10 +135,7 @@ static const uint8_t aac_chan_configs[6][5] = {
{4, TYPE_SCE, TYPE_CPE, TYPE_CPE, TYPE_LFE}, // 6 channels - front center + stereo + back stereo + LFE
};
/**
* Table to remap channels from libavcodec's default order to AAC order.
*/
static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS] = {
static const uint8_t channel_maps[][AAC_MAX_CHANNELS] = {
{ 0 },
{ 0, 1 },
{ 2, 0, 1 },
@@ -168,7 +156,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
put_bits(&pb, 5, 2); //object type - AAC-LC
put_bits(&pb, 4, s->samplerate_index); //sample rate index
put_bits(&pb, 4, s->channels);
put_bits(&pb, 4, avctx->channels);
//GASpecificConfig
put_bits(&pb, 1, 0); //frame length - 1024 samples
put_bits(&pb, 1, 0); //does not depend on core coder
@@ -181,81 +169,117 @@ static void put_audio_specific_config(AVCodecContext *avctx)
flush_put_bits(&pb);
}
#define WINDOW_FUNC(type) \
static void apply_ ##type ##_window(DSPContext *dsp, SingleChannelElement *sce, const float *audio)
WINDOW_FUNC(only_long)
{
const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
float *out = sce->ret;
dsp->vector_fmul (out, audio, lwindow, 1024);
dsp->vector_fmul_reverse(out + 1024, audio + 1024, pwindow, 1024);
}
WINDOW_FUNC(long_start)
{
const float *lwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
float *out = sce->ret;
dsp->vector_fmul(out, audio, lwindow, 1024);
memcpy(out + 1024, audio + 1024, sizeof(out[0]) * 448);
dsp->vector_fmul_reverse(out + 1024 + 448, audio + 1024 + 448, swindow, 128);
memset(out + 1024 + 576, 0, sizeof(out[0]) * 448);
}
WINDOW_FUNC(long_stop)
{
const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float *swindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
float *out = sce->ret;
memset(out, 0, sizeof(out[0]) * 448);
dsp->vector_fmul(out + 448, audio + 448, swindow, 128);
memcpy(out + 576, audio + 576, sizeof(out[0]) * 448);
dsp->vector_fmul_reverse(out + 1024, audio + 1024, lwindow, 1024);
}
WINDOW_FUNC(eight_short)
{
const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
const float *in = audio + 448;
float *out = sce->ret;
int w;
for (w = 0; w < 8; w++) {
dsp->vector_fmul (out, in, w ? pwindow : swindow, 128);
out += 128;
in += 128;
dsp->vector_fmul_reverse(out, in, swindow, 128);
out += 128;
}
}
static void (*const apply_window[4])(DSPContext *dsp, SingleChannelElement *sce, const float *audio) = {
[ONLY_LONG_SEQUENCE] = apply_only_long_window,
[LONG_START_SEQUENCE] = apply_long_start_window,
[EIGHT_SHORT_SEQUENCE] = apply_eight_short_window,
[LONG_STOP_SEQUENCE] = apply_long_stop_window
};
static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
float *audio)
static av_cold int aac_encode_init(AVCodecContext *avctx)
{
AACEncContext *s = avctx->priv_data;
int i;
const uint8_t *sizes[2];
uint8_t grouping[AAC_MAX_CHANNELS];
int lengths[2];
avctx->frame_size = 1024;
for (i = 0; i < 16; i++)
if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i])
break;
if (i == 16) {
av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", avctx->sample_rate);
return -1;
}
if (avctx->channels > AAC_MAX_CHANNELS) {
av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", avctx->channels);
return -1;
}
if (avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW) {
av_log(avctx, AV_LOG_ERROR, "Unsupported profile %d\n", avctx->profile);
return -1;
}
if (1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "Too many bits per frame requested\n");
return -1;
}
s->samplerate_index = i;
dsputil_init(&s->dsp, avctx);
ff_mdct_init(&s->mdct1024, 11, 0, 1.0);
ff_mdct_init(&s->mdct128, 8, 0, 1.0);
// window init
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
ff_init_ff_sine_windows(10);
ff_init_ff_sine_windows(7);
s->chan_map = aac_chan_configs[avctx->channels-1];
s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0]));
s->cpe = av_mallocz(sizeof(ChannelElement) * s->chan_map[0]);
avctx->extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE);
avctx->extradata_size = 5;
put_audio_specific_config(avctx);
sizes[0] = swb_size_1024[i];
sizes[1] = swb_size_128[i];
lengths[0] = ff_aac_num_swb_1024[i];
lengths[1] = ff_aac_num_swb_128[i];
for (i = 0; i < s->chan_map[0]; i++)
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping);
s->psypp = ff_psy_preprocess_init(avctx);
s->coder = &ff_aac_coders[s->options.aac_coder];
s->lambda = avctx->global_quality ? avctx->global_quality : 120;
ff_aac_tableinit();
return 0;
}
static void apply_window_and_mdct(AVCodecContext *avctx, AACEncContext *s,
SingleChannelElement *sce, short *audio)
{
int i, k;
const int chans = avctx->channels;
const float * lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float * swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
const float * pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
float *output = sce->ret;
apply_window[sce->ics.window_sequence[0]](&s->dsp, sce, audio);
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE)
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
memcpy(output, sce->saved, sizeof(float)*1024);
if (sce->ics.window_sequence[0] == LONG_STOP_SEQUENCE) {
memset(output, 0, sizeof(output[0]) * 448);
for (i = 448; i < 576; i++)
output[i] = sce->saved[i] * pwindow[i - 448];
for (i = 576; i < 704; i++)
output[i] = sce->saved[i];
}
if (sce->ics.window_sequence[0] != LONG_START_SEQUENCE) {
for (i = 0; i < 1024; i++) {
output[i+1024] = audio[i * chans] * lwindow[1024 - i - 1];
sce->saved[i] = audio[i * chans] * lwindow[i];
}
} else {
for (i = 0; i < 448; i++)
output[i+1024] = audio[i * chans];
for (; i < 576; i++)
output[i+1024] = audio[i * chans] * swindow[576 - i - 1];
memset(output+1024+576, 0, sizeof(output[0]) * 448);
for (i = 0; i < 1024; i++)
sce->saved[i] = audio[i * chans];
}
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
else
for (i = 0; i < 1024; i += 128)
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + i, output + i*2);
memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024);
} else {
for (k = 0; k < 1024; k += 128) {
for (i = 448 + k; i < 448 + k + 256; i++)
output[i - 448 - k] = (i < 1024)
? sce->saved[i]
: audio[(i-1024)*chans];
s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128);
s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128);
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output);
}
for (i = 0; i < 1024; i++)
sce->saved[i] = audio[i * chans];
}
}
/**
@@ -464,69 +488,57 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s,
put_bits(&s->pb, 3, TYPE_FIL);
put_bits(&s->pb, 4, FFMIN(namelen, 15));
if (namelen >= 15)
put_bits(&s->pb, 8, namelen - 14);
put_bits(&s->pb, 8, namelen - 16);
put_bits(&s->pb, 4, 0); //extension type - filler
padbits = -put_bits_count(&s->pb) & 7;
padbits = 8 - (put_bits_count(&s->pb) & 7);
avpriv_align_put_bits(&s->pb);
for (i = 0; i < namelen - 2; i++)
put_bits(&s->pb, 8, name[i]);
put_bits(&s->pb, 12 - padbits, 0);
}
/*
* Deinterleave input samples.
* Channels are reordered from libavcodec's default order to AAC order.
*/
static void deinterleave_input_samples(AACEncContext *s, const AVFrame *frame)
{
int ch, i;
const int sinc = s->channels;
const uint8_t *channel_map = aac_chan_maps[sinc - 1];
/* deinterleave and remap input samples */
for (ch = 0; ch < sinc; ch++) {
/* copy last 1024 samples of previous frame to the start of the current frame */
memcpy(&s->planar_samples[ch][1024], &s->planar_samples[ch][2048], 1024 * sizeof(s->planar_samples[0][0]));
/* deinterleave */
i = 2048;
if (frame) {
const float *sptr = ((const float *)frame->data[0]) + channel_map[ch];
for (; i < 2048 + frame->nb_samples; i++) {
s->planar_samples[ch][i] = *sptr;
sptr += sinc;
}
}
memset(&s->planar_samples[ch][i], 0,
(3072 - i) * sizeof(s->planar_samples[0][0]));
}
}
static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr)
static int aac_encode_frame(AVCodecContext *avctx,
uint8_t *frame, int buf_size, void *data)
{
AACEncContext *s = avctx->priv_data;
float **samples = s->planar_samples, *samples2, *la, *overlap;
int16_t *samples = s->samples, *samples2, *la;
ChannelElement *cpe;
int i, ch, w, g, chans, tag, start_ch, ret;
int i, ch, w, g, chans, tag, start_ch;
int chan_el_counter[4];
FFPsyWindowInfo windows[AAC_MAX_CHANNELS];
if (s->last_frame == 2)
if (s->last_frame)
return 0;
/* add current frame to queue */
if (frame) {
if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
return ret;
if (data) {
if (!s->psypp) {
if (avctx->channels <= 2) {
memcpy(s->samples + 1024 * avctx->channels, data,
1024 * avctx->channels * sizeof(s->samples[0]));
} else {
for (i = 0; i < 1024; i++)
for (ch = 0; ch < avctx->channels; ch++)
s->samples[(i + 1024) * avctx->channels + ch] =
((int16_t*)data)[i * avctx->channels +
channel_maps[avctx->channels-1][ch]];
}
} else {
start_ch = 0;
samples2 = s->samples + 1024 * avctx->channels;
for (i = 0; i < s->chan_map[0]; i++) {
tag = s->chan_map[i+1];
chans = tag == TYPE_CPE ? 2 : 1;
ff_psy_preprocess(s->psypp,
(uint16_t*)data + channel_maps[avctx->channels-1][start_ch],
samples2 + start_ch, start_ch, chans);
start_ch += chans;
}
}
}
deinterleave_input_samples(s, frame);
if (s->psypp)
ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);
if (!avctx->frame_number)
if (!avctx->frame_number) {
memcpy(s->samples, s->samples + 1024 * avctx->channels,
1024 * avctx->channels * sizeof(s->samples[0]));
return 0;
}
start_ch = 0;
for (i = 0; i < s->chan_map[0]; i++) {
@@ -537,10 +549,9 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
for (ch = 0; ch < chans; ch++) {
IndividualChannelStream *ics = &cpe->ch[ch].ics;
int cur_channel = start_ch + ch;
overlap = &samples[cur_channel][0];
samples2 = overlap + 1024;
la = samples2 + (448+64);
if (!frame)
samples2 = samples + cur_channel;
la = samples2 + (448+64) * avctx->channels;
if (!data)
la = NULL;
if (tag == TYPE_LFE) {
wi[ch].window_type[0] = ONLY_LONG_SEQUENCE;
@@ -567,19 +578,13 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
for (w = 0; w < ics->num_windows; w++)
ics->group_len[w] = wi[ch].grouping[w];
apply_window_and_mdct(s, &cpe->ch[ch], overlap);
apply_window_and_mdct(avctx, s, &cpe->ch[ch], samples2);
}
start_ch += chans;
}
if ((ret = ff_alloc_packet2(avctx, avpkt, 768 * s->channels))) {
av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
return ret;
}
do {
int frame_bits;
init_put_bits(&s->pb, avpkt->data, avpkt->size);
init_put_bits(&s->pb, frame, buf_size*8);
if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & CODEC_FLAG_BITEXACT))
put_bitstream_info(avctx, s, LIBAVCODEC_IDENT);
start_ch = 0;
@@ -639,8 +644,8 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
}
frame_bits = put_bits_count(&s->pb);
if (frame_bits <= 6144 * s->channels - 3) {
s->psy.bitres.bits = frame_bits / s->channels;
if (frame_bits <= 6144 * avctx->channels - 3) {
s->psy.bitres.bits = frame_bits / avctx->channels;
break;
}
@@ -659,15 +664,11 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
s->lambda = FFMIN(s->lambda, 65536.f);
}
if (!frame)
s->last_frame++;
ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
&avpkt->duration);
avpkt->size = put_bits_count(&s->pb) >> 3;
*got_packet_ptr = 1;
return 0;
if (!data)
s->last_frame = 1;
memcpy(s->samples, s->samples + 1024 * avctx->channels,
1024 * avctx->channels * sizeof(s->samples[0]));
return put_bits_count(&s->pb)>>3;
}
static av_cold int aac_encode_end(AVCodecContext *avctx)
@@ -677,122 +678,12 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
ff_mdct_end(&s->mdct1024);
ff_mdct_end(&s->mdct128);
ff_psy_end(&s->psy);
if (s->psypp)
ff_psy_preprocess_end(s->psypp);
av_freep(&s->buffer.samples);
ff_psy_preprocess_end(s->psypp);
av_freep(&s->samples);
av_freep(&s->cpe);
ff_af_queue_close(&s->afq);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
{
int ret = 0;
ff_dsputil_init(&s->dsp, avctx);
// window init
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
ff_init_ff_sine_windows(10);
ff_init_ff_sine_windows(7);
if (ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0))
return ret;
if (ret = ff_mdct_init(&s->mdct128, 8, 0, 32768.0))
return ret;
return 0;
}
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
{
int ch;
FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 3 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail);
FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail);
FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail);
for(ch = 0; ch < s->channels; ch++)
s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch;
#if FF_API_OLD_ENCODE_AUDIO
if (!(avctx->coded_frame = avcodec_alloc_frame()))
goto alloc_fail;
#endif
return 0;
alloc_fail:
return AVERROR(ENOMEM);
}
static av_cold int aac_encode_init(AVCodecContext *avctx)
{
AACEncContext *s = avctx->priv_data;
int i, ret = 0;
const uint8_t *sizes[2];
uint8_t grouping[AAC_MAX_CHANNELS];
int lengths[2];
avctx->frame_size = 1024;
for (i = 0; i < 16; i++)
if (avctx->sample_rate == avpriv_mpeg4audio_sample_rates[i])
break;
s->channels = avctx->channels;
ERROR_IF(i == 16,
"Unsupported sample rate %d\n", avctx->sample_rate);
ERROR_IF(s->channels > AAC_MAX_CHANNELS,
"Unsupported number of channels: %d\n", s->channels);
ERROR_IF(avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW,
"Unsupported profile %d\n", avctx->profile);
ERROR_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels,
"Too many bits per frame requested\n");
s->samplerate_index = i;
s->chan_map = aac_chan_configs[s->channels-1];
if (ret = dsp_init(avctx, s))
goto fail;
if (ret = alloc_buffers(avctx, s))
goto fail;
avctx->extradata_size = 5;
put_audio_specific_config(avctx);
sizes[0] = swb_size_1024[i];
sizes[1] = swb_size_128[i];
lengths[0] = ff_aac_num_swb_1024[i];
lengths[1] = ff_aac_num_swb_128[i];
for (i = 0; i < s->chan_map[0]; i++)
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping))
goto fail;
s->psypp = ff_psy_preprocess_init(avctx);
s->coder = &ff_aac_coders[s->options.aac_coder];
s->lambda = avctx->global_quality ? avctx->global_quality : 120;
ff_aac_tableinit();
for (i = 0; i < 428; i++)
ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i]));
avctx->delay = 1024;
ff_af_queue_init(avctx, &s->afq);
return 0;
fail:
aac_encode_end(avctx);
return ret;
}
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
static const AVOption aacenc_options[] = {
{"stereo_mode", "Stereo coding method", offsetof(AACEncContext, options.stereo_mode), AV_OPT_TYPE_INT, {.dbl = 0}, -1, 1, AACENC_FLAGS, "stereo_mode"},
@@ -816,13 +707,10 @@ AVCodec ff_aac_encoder = {
.id = CODEC_ID_AAC,
.priv_data_size = sizeof(AACEncContext),
.init = aac_encode_init,
.encode2 = aac_encode_frame,
.encode = aac_encode_frame,
.close = aac_encode_end,
.supported_samplerates = avpriv_mpeg4audio_sample_rates,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY |
CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
.priv_class = &aacenc_class,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
.priv_class = &aacenc_class,
};

View File

@@ -27,7 +27,7 @@
#include "dsputil.h"
#include "aac.h"
#include "audio_frame_queue.h"
#include "psymodel.h"
#define AAC_CODER_NB 4
@@ -61,10 +61,9 @@ typedef struct AACEncContext {
FFTContext mdct1024; ///< long (1024 samples) frame transform context
FFTContext mdct128; ///< short (128 samples) frame transform context
DSPContext dsp;
float *planar_samples[6]; ///< saved preprocessed input
int16_t *samples; ///< saved preprocessed input
int samplerate_index; ///< MPEG-4 samplerate index
int channels; ///< channel count
const uint8_t *chan_map; ///< channel configuration map
ChannelElement *cpe; ///< channel elements
@@ -74,15 +73,8 @@ typedef struct AACEncContext {
int cur_channel;
int last_frame;
float lambda;
AudioFrameQueue afq;
DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients
DECLARE_ALIGNED(32, float, scoefs)[1024]; ///< scaled coefficients
struct {
float *samples;
} buffer;
} AACEncContext;
extern float ff_aac_pow34sf_tab[428];
#endif /* AVCODEC_AACENC_H */

View File

@@ -27,7 +27,6 @@
#include "aacps.h"
#include "aacps_tablegen.h"
#include "aacpsdata.c"
#include "dsputil.h"
#define PS_BASELINE 0 ///< Operate in Baseline PS mode
///< Baseline implies 10 or 20 stereo bands,
@@ -224,7 +223,7 @@ int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps
cnt -= 2 + ps_read_extension_data(gb, ps, ps_extension_id);
}
if (cnt < 0) {
av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d\n", cnt);
av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d", cnt);
goto err;
}
skip_bits(gb, cnt);
@@ -276,16 +275,12 @@ int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps
err:
ps->start = 0;
skip_bits_long(gb_host, bits_left);
memset(ps->iid_par, 0, sizeof(ps->iid_par));
memset(ps->icc_par, 0, sizeof(ps->icc_par));
memset(ps->ipd_par, 0, sizeof(ps->ipd_par));
memset(ps->opd_par, 0, sizeof(ps->opd_par));
return bits_left;
}
/** Split one subband into 2 subsubbands with a symmetric real filter.
* The filter must have its non-center even coefficients equal to zero. */
static void hybrid2_re(float (*in)[2], float (*out)[32][2], const float filter[8], int len, int reverse)
static void hybrid2_re(float (*in)[2], float (*out)[32][2], const float filter[7], int len, int reverse)
{
int i, j;
for (i = 0; i < len; i++, in++) {
@@ -305,14 +300,26 @@ static void hybrid2_re(float (*in)[2], float (*out)[32][2], const float filter[8
}
/** Split one subband into 6 subsubbands with a complex filter */
static void hybrid6_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2], const float (*filter)[8][2], int len)
static void hybrid6_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int len)
{
int i;
int i, j, ssb;
int N = 8;
LOCAL_ALIGNED_16(float, temp, [8], [2]);
float temp[8][2];
for (i = 0; i < len; i++, in++) {
dsp->hybrid_analysis(temp, in, filter, 1, N);
for (ssb = 0; ssb < N; ssb++) {
float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1];
for (j = 0; j < 6; j++) {
float in0_re = in[j][0];
float in0_im = in[j][1];
float in1_re = in[12-j][0];
float in1_im = in[12-j][1];
sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im);
sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re);
}
temp[ssb][0] = sum_re;
temp[ssb][1] = sum_im;
}
out[0][i][0] = temp[6][0];
out[0][i][1] = temp[6][1];
out[1][i][0] = temp[7][0];
@@ -328,18 +335,28 @@ static void hybrid6_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2], c
}
}
static void hybrid4_8_12_cx(PSDSPContext *dsp, float (*in)[2], float (*out)[32][2], const float (*filter)[8][2], int N, int len)
static void hybrid4_8_12_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int N, int len)
{
int i;
int i, j, ssb;
for (i = 0; i < len; i++, in++) {
dsp->hybrid_analysis(out[0] + i, in, filter, 32, N);
for (ssb = 0; ssb < N; ssb++) {
float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1];
for (j = 0; j < 6; j++) {
float in0_re = in[j][0];
float in0_im = in[j][1];
float in1_re = in[12-j][0];
float in1_im = in[12-j][1];
sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im);
sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re);
}
out[ssb][i][0] = sum_re;
out[ssb][i][1] = sum_im;
}
}
}
static void hybrid_analysis(PSDSPContext *dsp, float out[91][32][2],
float in[5][44][2], float L[2][38][64],
int is34, int len)
static void hybrid_analysis(float out[91][32][2], float in[5][44][2], float L[2][38][64], int is34, int len)
{
int i, j;
for (i = 0; i < 5; i++) {
@@ -349,17 +366,27 @@ static void hybrid_analysis(PSDSPContext *dsp, float out[91][32][2],
}
}
if (is34) {
hybrid4_8_12_cx(dsp, in[0], out, f34_0_12, 12, len);
hybrid4_8_12_cx(dsp, in[1], out+12, f34_1_8, 8, len);
hybrid4_8_12_cx(dsp, in[2], out+20, f34_2_4, 4, len);
hybrid4_8_12_cx(dsp, in[3], out+24, f34_2_4, 4, len);
hybrid4_8_12_cx(dsp, in[4], out+28, f34_2_4, 4, len);
dsp->hybrid_analysis_ileave(out + 27, L, 5, len);
hybrid4_8_12_cx(in[0], out, f34_0_12, 12, len);
hybrid4_8_12_cx(in[1], out+12, f34_1_8, 8, len);
hybrid4_8_12_cx(in[2], out+20, f34_2_4, 4, len);
hybrid4_8_12_cx(in[3], out+24, f34_2_4, 4, len);
hybrid4_8_12_cx(in[4], out+28, f34_2_4, 4, len);
for (i = 0; i < 59; i++) {
for (j = 0; j < len; j++) {
out[i+32][j][0] = L[0][j][i+5];
out[i+32][j][1] = L[1][j][i+5];
}
}
} else {
hybrid6_cx(dsp, in[0], out, f20_0_8, len);
hybrid6_cx(in[0], out, f20_0_8, len);
hybrid2_re(in[1], out+6, g1_Q2, len, 1);
hybrid2_re(in[2], out+8, g1_Q2, len, 0);
dsp->hybrid_analysis_ileave(out + 7, L, 3, len);
for (i = 0; i < 61; i++) {
for (j = 0; j < len; j++) {
out[i+10][j][0] = L[0][j][i+3];
out[i+10][j][1] = L[1][j][i+3];
}
}
}
//update in_buf
for (i = 0; i < 5; i++) {
@@ -367,8 +394,7 @@ static void hybrid_analysis(PSDSPContext *dsp, float out[91][32][2],
}
}
static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
float in[91][32][2], int is34, int len)
static void hybrid_synthesis(float out[2][38][64], float in[91][32][2], int is34, int len)
{
int i, n;
if (is34) {
@@ -392,7 +418,12 @@ static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
out[1][n][4] += in[28+i][n][1];
}
}
dsp->hybrid_synthesis_deint(out, in + 27, 5, len);
for (i = 0; i < 59; i++) {
for (n = 0; n < len; n++) {
out[0][n][i+5] = in[i+32][n][0];
out[1][n][i+5] = in[i+32][n][1];
}
}
} else {
for (n = 0; n < len; n++) {
out[0][n][0] = in[0][n][0] + in[1][n][0] + in[2][n][0] +
@@ -404,7 +435,12 @@ static void hybrid_synthesis(PSDSPContext *dsp, float out[2][38][64],
out[0][n][2] = in[8][n][0] + in[9][n][0];
out[1][n][2] = in[8][n][1] + in[9][n][1];
}
dsp->hybrid_synthesis_deint(out, in + 7, 3, len);
for (i = 0; i < 61; i++) {
for (n = 0; n < len; n++) {
out[0][n][i+3] = in[i+10][n][0];
out[1][n][i+3] = in[i+10][n][1];
}
}
}
}
@@ -608,8 +644,8 @@ static void map_val_20_to_34(float par[PS_MAX_NR_IIDICC])
static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[32][2], int is34)
{
LOCAL_ALIGNED_16(float, power, [34], [PS_QMF_TIME_SLOTS]);
LOCAL_ALIGNED_16(float, transient_gain, [34], [PS_QMF_TIME_SLOTS]);
float power[34][PS_QMF_TIME_SLOTS] = {{0}};
float transient_gain[34][PS_QMF_TIME_SLOTS];
float *peak_decay_nrg = ps->peak_decay_nrg;
float *power_smooth = ps->power_smooth;
float *peak_decay_diff_smooth = ps->peak_decay_diff_smooth;
@@ -621,8 +657,10 @@ static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[3
const float a_smooth = 0.25f; ///< Smoothing coefficient
int i, k, m, n;
int n0 = 0, nL = 32;
memset(power, 0, 34 * sizeof(*power));
static const int link_delay[] = { 3, 4, 5 };
static const float a[] = { 0.65143905753106f,
0.56471812200776f,
0.48954165955695f };
if (is34 != ps->is34bands_old) {
memset(ps->peak_decay_nrg, 0, sizeof(ps->peak_decay_nrg));
@@ -632,9 +670,11 @@ static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[3
memset(ps->ap_delay, 0, sizeof(ps->ap_delay));
}
for (k = 0; k < NR_BANDS[is34]; k++) {
int i = k_to_i[k];
ps->dsp.add_squares(power[i], s[k], nL - n0);
for (n = n0; n < nL; n++) {
for (k = 0; k < NR_BANDS[is34]; k++) {
int i = k_to_i[k];
power[i][n] += s[k][n][0] * s[k][n][0] + s[k][n][1] * s[k][n][1];
}
}
//Transient detection
@@ -662,31 +702,54 @@ static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[3
for (k = 0; k < NR_ALLPASS_BANDS[is34]; k++) {
int b = k_to_i[k];
float g_decay_slope = 1.f - DECAY_SLOPE * (k - DECAY_CUTOFF[is34]);
float ag[PS_AP_LINKS];
g_decay_slope = av_clipf(g_decay_slope, 0.f, 1.f);
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
for (m = 0; m < PS_AP_LINKS; m++) {
memcpy(ap_delay[k][m], ap_delay[k][m]+numQMFSlots, 5*sizeof(ap_delay[k][m][0]));
ag[m] = a[m] * g_decay_slope;
}
for (n = n0; n < nL; n++) {
float in_re = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][0] -
delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][1];
float in_im = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][1] +
delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][0];
for (m = 0; m < PS_AP_LINKS; m++) {
float a_re = ag[m] * in_re;
float a_im = ag[m] * in_im;
float link_delay_re = ap_delay[k][m][n+5-link_delay[m]][0];
float link_delay_im = ap_delay[k][m][n+5-link_delay[m]][1];
float fractional_delay_re = Q_fract_allpass[is34][k][m][0];
float fractional_delay_im = Q_fract_allpass[is34][k][m][1];
ap_delay[k][m][n+5][0] = in_re;
ap_delay[k][m][n+5][1] = in_im;
in_re = link_delay_re * fractional_delay_re - link_delay_im * fractional_delay_im - a_re;
in_im = link_delay_re * fractional_delay_im + link_delay_im * fractional_delay_re - a_im;
ap_delay[k][m][n+5][0] += ag[m] * in_re;
ap_delay[k][m][n+5][1] += ag[m] * in_im;
}
out[k][n][0] = transient_gain[b][n] * in_re;
out[k][n][1] = transient_gain[b][n] * in_im;
}
ps->dsp.decorrelate(out[k], delay[k] + PS_MAX_DELAY - 2, ap_delay[k],
phi_fract[is34][k], Q_fract_allpass[is34][k],
transient_gain[b], g_decay_slope, nL - n0);
}
for (; k < SHORT_DELAY_BAND[is34]; k++) {
int i = k_to_i[k];
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
//H = delay 14
ps->dsp.mul_pair_single(out[k], delay[k] + PS_MAX_DELAY - 14,
transient_gain[i], nL - n0);
for (n = n0; n < nL; n++) {
//H = delay 14
out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][0];
out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][1];
}
}
for (; k < NR_BANDS[is34]; k++) {
int i = k_to_i[k];
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
//H = delay 1
ps->dsp.mul_pair_single(out[k], delay[k] + PS_MAX_DELAY - 1,
transient_gain[i], nL - n0);
for (n = n0; n < nL; n++) {
//H = delay 1
out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][0];
out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][1];
}
}
}
@@ -730,7 +793,7 @@ static void remap20(int8_t (**p_par_mapped)[PS_MAX_NR_IIDICC],
static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2], int is34)
{
int e, b, k;
int e, b, k, n;
float (*H11)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H11;
float (*H12)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H12;
@@ -842,52 +905,78 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
H22[0][e+1][b] = h22;
}
for (k = 0; k < NR_BANDS[is34]; k++) {
float h[2][4];
float h_step[2][4];
float h11r, h12r, h21r, h22r;
float h11i, h12i, h21i, h22i;
float h11r_step, h12r_step, h21r_step, h22r_step;
float h11i_step, h12i_step, h21i_step, h22i_step;
int start = ps->border_position[e];
int stop = ps->border_position[e+1];
float width = 1.f / (stop - start);
b = k_to_i[k];
h[0][0] = H11[0][e][b];
h[0][1] = H12[0][e][b];
h[0][2] = H21[0][e][b];
h[0][3] = H22[0][e][b];
h11r = H11[0][e][b];
h12r = H12[0][e][b];
h21r = H21[0][e][b];
h22r = H22[0][e][b];
if (!PS_BASELINE && ps->enable_ipdopd) {
//Is this necessary? ps_04_new seems unchanged
if ((is34 && k <= 13 && k >= 9) || (!is34 && k <= 1)) {
h[1][0] = -H11[1][e][b];
h[1][1] = -H12[1][e][b];
h[1][2] = -H21[1][e][b];
h[1][3] = -H22[1][e][b];
h11i = -H11[1][e][b];
h12i = -H12[1][e][b];
h21i = -H21[1][e][b];
h22i = -H22[1][e][b];
} else {
h[1][0] = H11[1][e][b];
h[1][1] = H12[1][e][b];
h[1][2] = H21[1][e][b];
h[1][3] = H22[1][e][b];
h11i = H11[1][e][b];
h12i = H12[1][e][b];
h21i = H21[1][e][b];
h22i = H22[1][e][b];
}
}
//Interpolation
h_step[0][0] = (H11[0][e+1][b] - h[0][0]) * width;
h_step[0][1] = (H12[0][e+1][b] - h[0][1]) * width;
h_step[0][2] = (H21[0][e+1][b] - h[0][2]) * width;
h_step[0][3] = (H22[0][e+1][b] - h[0][3]) * width;
h11r_step = (H11[0][e+1][b] - h11r) * width;
h12r_step = (H12[0][e+1][b] - h12r) * width;
h21r_step = (H21[0][e+1][b] - h21r) * width;
h22r_step = (H22[0][e+1][b] - h22r) * width;
if (!PS_BASELINE && ps->enable_ipdopd) {
h_step[1][0] = (H11[1][e+1][b] - h[1][0]) * width;
h_step[1][1] = (H12[1][e+1][b] - h[1][1]) * width;
h_step[1][2] = (H21[1][e+1][b] - h[1][2]) * width;
h_step[1][3] = (H22[1][e+1][b] - h[1][3]) * width;
h11i_step = (H11[1][e+1][b] - h11i) * width;
h12i_step = (H12[1][e+1][b] - h12i) * width;
h21i_step = (H21[1][e+1][b] - h21i) * width;
h22i_step = (H22[1][e+1][b] - h22i) * width;
}
for (n = start + 1; n <= stop; n++) {
//l is s, r is d
float l_re = l[k][n][0];
float l_im = l[k][n][1];
float r_re = r[k][n][0];
float r_im = r[k][n][1];
h11r += h11r_step;
h12r += h12r_step;
h21r += h21r_step;
h22r += h22r_step;
if (!PS_BASELINE && ps->enable_ipdopd) {
h11i += h11i_step;
h12i += h12i_step;
h21i += h21i_step;
h22i += h22i_step;
l[k][n][0] = h11r*l_re + h21r*r_re - h11i*l_im - h21i*r_im;
l[k][n][1] = h11r*l_im + h21r*r_im + h11i*l_re + h21i*r_re;
r[k][n][0] = h12r*l_re + h22r*r_re - h12i*l_im - h22i*r_im;
r[k][n][1] = h12r*l_im + h22r*r_im + h12i*l_re + h22i*r_re;
} else {
l[k][n][0] = h11r*l_re + h21r*r_re;
l[k][n][1] = h11r*l_im + h21r*r_im;
r[k][n][0] = h12r*l_re + h22r*r_re;
r[k][n][1] = h12r*l_im + h22r*r_im;
}
}
ps->dsp.stereo_interpolate[!PS_BASELINE && ps->enable_ipdopd](
l[k] + start + 1, r[k] + start + 1,
h, h_step, stop - start);
}
}
}
int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float R[2][38][64], int top)
{
LOCAL_ALIGNED_16(float, Lbuf, [91], [32][2]);
LOCAL_ALIGNED_16(float, Rbuf, [91], [32][2]);
float Lbuf[91][32][2];
float Rbuf[91][32][2];
const int len = 32;
int is34 = ps->is34bands;
@@ -896,11 +985,11 @@ int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float
if (top < NR_ALLPASS_BANDS[is34])
memset(ps->ap_delay + top, 0, (NR_ALLPASS_BANDS[is34] - top)*sizeof(ps->ap_delay[0]));
hybrid_analysis(&ps->dsp, Lbuf, ps->in_buf, L, is34, len);
hybrid_analysis(Lbuf, ps->in_buf, L, is34, len);
decorrelation(ps, Rbuf, Lbuf, is34);
stereo_processing(ps, Lbuf, Rbuf, is34);
hybrid_synthesis(&ps->dsp, L, Lbuf, is34, len);
hybrid_synthesis(&ps->dsp, R, Rbuf, is34, len);
hybrid_synthesis(L, Lbuf, is34, len);
hybrid_synthesis(R, Rbuf, is34, len);
return 0;
}
@@ -948,5 +1037,4 @@ av_cold void ff_ps_init(void) {
av_cold void ff_ps_ctx_init(PSContext *ps)
{
ff_psdsp_init(&ps->dsp);
}

View File

@@ -24,7 +24,6 @@
#include <stdint.h>
#include "aacpsdsp.h"
#include "avcodec.h"
#include "get_bits.h"
@@ -61,19 +60,18 @@ typedef struct {
int is34bands;
int is34bands_old;
DECLARE_ALIGNED(16, float, in_buf)[5][44][2];
DECLARE_ALIGNED(16, float, delay)[PS_MAX_SSB][PS_QMF_TIME_SLOTS + PS_MAX_DELAY][2];
DECLARE_ALIGNED(16, float, ap_delay)[PS_MAX_AP_BANDS][PS_AP_LINKS][PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2];
DECLARE_ALIGNED(16, float, peak_decay_nrg)[34];
DECLARE_ALIGNED(16, float, power_smooth)[34];
DECLARE_ALIGNED(16, float, peak_decay_diff_smooth)[34];
DECLARE_ALIGNED(16, float, H11)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
DECLARE_ALIGNED(16, float, H12)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
DECLARE_ALIGNED(16, float, H21)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
DECLARE_ALIGNED(16, float, H22)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
float in_buf[5][44][2];
float delay[PS_MAX_SSB][PS_QMF_TIME_SLOTS + PS_MAX_DELAY][2];
float ap_delay[PS_MAX_AP_BANDS][PS_AP_LINKS][PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2];
float peak_decay_nrg[34];
float power_smooth[34];
float peak_decay_diff_smooth[34];
float H11[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
float H12[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
float H21[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
float H22[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
int8_t opd_hist[PS_MAX_NR_IIDICC];
int8_t ipd_hist[PS_MAX_NR_IIDICC];
PSDSPContext dsp;
} PSContext;
void ff_ps_init(void);

View File

@@ -69,23 +69,23 @@ int main(void)
write_float_3d_array(HB, 46, 8, 4);
printf("};\n");
printf("static const DECLARE_ALIGNED(16, float, f20_0_8)[8][8][2] = {\n");
write_float_3d_array(f20_0_8, 8, 8, 2);
printf("static const float f20_0_8[8][7][2] = {\n");
write_float_3d_array(f20_0_8, 8, 7, 2);
printf("};\n");
printf("static const DECLARE_ALIGNED(16, float, f34_0_12)[12][8][2] = {\n");
write_float_3d_array(f34_0_12, 12, 8, 2);
printf("static const float f34_0_12[12][7][2] = {\n");
write_float_3d_array(f34_0_12, 12, 7, 2);
printf("};\n");
printf("static const DECLARE_ALIGNED(16, float, f34_1_8)[8][8][2] = {\n");
write_float_3d_array(f34_1_8, 8, 8, 2);
printf("static const float f34_1_8[8][7][2] = {\n");
write_float_3d_array(f34_1_8, 8, 7, 2);
printf("};\n");
printf("static const DECLARE_ALIGNED(16, float, f34_2_4)[4][8][2] = {\n");
write_float_3d_array(f34_2_4, 4, 8, 2);
printf("static const float f34_2_4[4][7][2] = {\n");
write_float_3d_array(f34_2_4, 4, 7, 2);
printf("};\n");
printf("static const DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2] = {\n");
printf("static const float Q_fract_allpass[2][50][3][2] = {\n");
write_float_4d_array(Q_fract_allpass, 2, 50, 3, 2);
printf("};\n");
printf("static const DECLARE_ALIGNED(16, float, phi_fract)[2][50][2] = {\n");
printf("static const float phi_fract[2][50][2] = {\n");
write_float_3d_array(phi_fract, 2, 50, 2);
printf("};\n");

View File

@@ -31,7 +31,6 @@
#else
#include "libavutil/common.h"
#include "libavutil/mathematics.h"
#include "libavutil/mem.h"
#define NR_ALLPASS_BANDS20 30
#define NR_ALLPASS_BANDS34 50
#define PS_AP_LINKS 3
@@ -39,12 +38,12 @@ static float pd_re_smooth[8*8*8];
static float pd_im_smooth[8*8*8];
static float HA[46][8][4];
static float HB[46][8][4];
static DECLARE_ALIGNED(16, float, f20_0_8) [ 8][8][2];
static DECLARE_ALIGNED(16, float, f34_0_12)[12][8][2];
static DECLARE_ALIGNED(16, float, f34_1_8) [ 8][8][2];
static DECLARE_ALIGNED(16, float, f34_2_4) [ 4][8][2];
static DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2];
static DECLARE_ALIGNED(16, float, phi_fract)[2][50][2];
static float f20_0_8 [ 8][7][2];
static float f34_0_12[12][7][2];
static float f34_1_8 [ 8][7][2];
static float f34_2_4 [ 4][7][2];
static float Q_fract_allpass[2][50][3][2];
static float phi_fract[2][50][2];
static const float g0_Q8[] = {
0.00746082949812f, 0.02270420949825f, 0.04546865930473f, 0.07266113929591f,
@@ -66,7 +65,7 @@ static const float g2_Q4[] = {
0.16486303567403f, 0.23279856662996f, 0.25f
};
static void make_filters_from_proto(float (*filter)[8][2], const float *proto, int bands)
static void make_filters_from_proto(float (*filter)[7][2], const float *proto, int bands)
{
int q, n;
for (q = 0; q < bands; q++) {

View File

@@ -1,214 +0,0 @@
/*
* Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "aacpsdsp.h"
static void ps_add_squares_c(float *dst, const float (*src)[2], int n)
{
int i;
for (i = 0; i < n; i++)
dst[i] += src[i][0] * src[i][0] + src[i][1] * src[i][1];
}
static void ps_mul_pair_single_c(float (*dst)[2], float (*src0)[2], float *src1,
int n)
{
int i;
for (i = 0; i < n; i++) {
dst[i][0] = src0[i][0] * src1[i];
dst[i][1] = src0[i][1] * src1[i];
}
}
static void ps_hybrid_analysis_c(float (*out)[2], float (*in)[2],
const float (*filter)[8][2],
int stride, int n)
{
int i, j;
for (i = 0; i < n; i++) {
float sum_re = filter[i][6][0] * in[6][0];
float sum_im = filter[i][6][0] * in[6][1];
for (j = 0; j < 6; j++) {
float in0_re = in[j][0];
float in0_im = in[j][1];
float in1_re = in[12-j][0];
float in1_im = in[12-j][1];
sum_re += filter[i][j][0] * (in0_re + in1_re) -
filter[i][j][1] * (in0_im - in1_im);
sum_im += filter[i][j][0] * (in0_im + in1_im) +
filter[i][j][1] * (in0_re - in1_re);
}
out[i * stride][0] = sum_re;
out[i * stride][1] = sum_im;
}
}
static void ps_hybrid_analysis_ileave_c(float (*out)[32][2], float L[2][38][64],
int i, int len)
{
int j;
for (; i < 64; i++) {
for (j = 0; j < len; j++) {
out[i][j][0] = L[0][j][i];
out[i][j][1] = L[1][j][i];
}
}
}
static void ps_hybrid_synthesis_deint_c(float out[2][38][64],
float (*in)[32][2],
int i, int len)
{
int n;
for (; i < 64; i++) {
for (n = 0; n < len; n++) {
out[0][n][i] = in[i][n][0];
out[1][n][i] = in[i][n][1];
}
}
}
static void ps_decorrelate_c(float (*out)[2], float (*delay)[2],
float (*ap_delay)[PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2],
const float phi_fract[2], float (*Q_fract)[2],
const float *transient_gain,
float g_decay_slope,
int len)
{
static const float a[] = { 0.65143905753106f,
0.56471812200776f,
0.48954165955695f };
float ag[PS_AP_LINKS];
int m, n;
for (m = 0; m < PS_AP_LINKS; m++)
ag[m] = a[m] * g_decay_slope;
for (n = 0; n < len; n++) {
float in_re = delay[n][0] * phi_fract[0] - delay[n][1] * phi_fract[1];
float in_im = delay[n][0] * phi_fract[1] + delay[n][1] * phi_fract[0];
for (m = 0; m < PS_AP_LINKS; m++) {
float a_re = ag[m] * in_re;
float a_im = ag[m] * in_im;
float link_delay_re = ap_delay[m][n+2-m][0];
float link_delay_im = ap_delay[m][n+2-m][1];
float fractional_delay_re = Q_fract[m][0];
float fractional_delay_im = Q_fract[m][1];
float apd_re = in_re;
float apd_im = in_im;
in_re = link_delay_re * fractional_delay_re -
link_delay_im * fractional_delay_im - a_re;
in_im = link_delay_re * fractional_delay_im +
link_delay_im * fractional_delay_re - a_im;
ap_delay[m][n+5][0] = apd_re + ag[m] * in_re;
ap_delay[m][n+5][1] = apd_im + ag[m] * in_im;
}
out[n][0] = transient_gain[n] * in_re;
out[n][1] = transient_gain[n] * in_im;
}
}
static void ps_stereo_interpolate_c(float (*l)[2], float (*r)[2],
float h[2][4], float h_step[2][4],
int len)
{
float h0 = h[0][0];
float h1 = h[0][1];
float h2 = h[0][2];
float h3 = h[0][3];
float hs0 = h_step[0][0];
float hs1 = h_step[0][1];
float hs2 = h_step[0][2];
float hs3 = h_step[0][3];
int n;
for (n = 0; n < len; n++) {
//l is s, r is d
float l_re = l[n][0];
float l_im = l[n][1];
float r_re = r[n][0];
float r_im = r[n][1];
h0 += hs0;
h1 += hs1;
h2 += hs2;
h3 += hs3;
l[n][0] = h0 * l_re + h2 * r_re;
l[n][1] = h0 * l_im + h2 * r_im;
r[n][0] = h1 * l_re + h3 * r_re;
r[n][1] = h1 * l_im + h3 * r_im;
}
}
static void ps_stereo_interpolate_ipdopd_c(float (*l)[2], float (*r)[2],
float h[2][4], float h_step[2][4],
int len)
{
float h00 = h[0][0], h10 = h[1][0];
float h01 = h[0][1], h11 = h[1][1];
float h02 = h[0][2], h12 = h[1][2];
float h03 = h[0][3], h13 = h[1][3];
float hs00 = h_step[0][0], hs10 = h_step[1][0];
float hs01 = h_step[0][1], hs11 = h_step[1][1];
float hs02 = h_step[0][2], hs12 = h_step[1][2];
float hs03 = h_step[0][3], hs13 = h_step[1][3];
int n;
for (n = 0; n < len; n++) {
//l is s, r is d
float l_re = l[n][0];
float l_im = l[n][1];
float r_re = r[n][0];
float r_im = r[n][1];
h00 += hs00;
h01 += hs01;
h02 += hs02;
h03 += hs03;
h10 += hs10;
h11 += hs11;
h12 += hs12;
h13 += hs13;
l[n][0] = h00 * l_re + h02 * r_re - h10 * l_im - h12 * r_im;
l[n][1] = h00 * l_im + h02 * r_im + h10 * l_re + h12 * r_re;
r[n][0] = h01 * l_re + h03 * r_re - h11 * l_im - h13 * r_im;
r[n][1] = h01 * l_im + h03 * r_im + h11 * l_re + h13 * r_re;
}
}
av_cold void ff_psdsp_init(PSDSPContext *s)
{
s->add_squares = ps_add_squares_c;
s->mul_pair_single = ps_mul_pair_single_c;
s->hybrid_analysis = ps_hybrid_analysis_c;
s->hybrid_analysis_ileave = ps_hybrid_analysis_ileave_c;
s->hybrid_synthesis_deint = ps_hybrid_synthesis_deint_c;
s->decorrelate = ps_decorrelate_c;
s->stereo_interpolate[0] = ps_stereo_interpolate_c;
s->stereo_interpolate[1] = ps_stereo_interpolate_ipdopd_c;
if (ARCH_ARM)
ff_psdsp_init_arm(s);
}

View File

@@ -1,53 +0,0 @@
/*
* Copyright (c) 2012 Mans Rullgard
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef LIBAVCODEC_AACPSDSP_H
#define LIBAVCODEC_AACPSDSP_H
#define PS_QMF_TIME_SLOTS 32
#define PS_AP_LINKS 3
#define PS_MAX_AP_DELAY 5
typedef struct PSDSPContext {
void (*add_squares)(float *dst, const float (*src)[2], int n);
void (*mul_pair_single)(float (*dst)[2], float (*src0)[2], float *src1,
int n);
void (*hybrid_analysis)(float (*out)[2], float (*in)[2],
const float (*filter)[8][2],
int stride, int n);
void (*hybrid_analysis_ileave)(float (*out)[32][2], float L[2][38][64],
int i, int len);
void (*hybrid_synthesis_deint)(float out[2][38][64], float (*in)[32][2],
int i, int len);
void (*decorrelate)(float (*out)[2], float (*delay)[2],
float (*ap_delay)[PS_QMF_TIME_SLOTS+PS_MAX_AP_DELAY][2],
const float phi_fract[2], float (*Q_fract)[2],
const float *transient_gain,
float g_decay_slope,
int len);
void (*stereo_interpolate[2])(float (*l)[2], float (*r)[2],
float h[2][4], float h_step[2][4],
int len);
} PSDSPContext;
void ff_psdsp_init(PSDSPContext *s);
void ff_psdsp_init_arm(PSDSPContext *s);
#endif /* LIBAVCODEC_AACPSDSP_H */

View File

@@ -216,7 +216,7 @@ static const float psy_fir_coeffs[] = {
};
/**
* Calculate the ABR attack threshold from the above LAME psymodel table.
* calculates the attack threshold for ABR from the above table for the LAME psy model
*/
static float lame_calc_attack_threshold(int bitrate)
{
@@ -389,8 +389,9 @@ static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
AacPsyChannel *pch = &pctx->ch[channel];
uint8_t grouping = 0;
int next_type = pch->next_window_seq;
FFPsyWindowInfo wi = { { 0 } };
FFPsyWindowInfo wi;
memset(&wi, 0, sizeof(wi));
if (la) {
float s[8], v;
int switch_to_eight = 0;
@@ -399,7 +400,7 @@ static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
int stay_short = 0;
for (i = 0; i < 8; i++) {
for (j = 0; j < 128; j++) {
v = iir_filter(la[i*128+j], pch->iir_state);
v = iir_filter(la[(i*128+j)*ctx->avctx->channels], pch->iir_state);
sum += v*v;
}
s[i] = sum;
@@ -775,8 +776,9 @@ static void lame_apply_block_type(AacPsyChannel *ctx, FFPsyWindowInfo *wi, int u
ctx->next_window_seq = blocktype;
}
static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
const float *la, int channel, int prev_type)
static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx,
const int16_t *audio, const int16_t *la,
int channel, int prev_type)
{
AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data;
AacPsyChannel *pch = &pctx->ch[channel];
@@ -784,28 +786,29 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
int uselongblock = 1;
int attacks[AAC_NUM_BLOCKS_SHORT + 1] = { 0 };
int i;
FFPsyWindowInfo wi = { { 0 } };
FFPsyWindowInfo wi;
memset(&wi, 0, sizeof(wi));
if (la) {
float hpfsmpl[AAC_BLOCK_SIZE_LONG];
float const *pf = hpfsmpl;
float attack_intensity[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
float energy_subshort[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
float energy_short[AAC_NUM_BLOCKS_SHORT + 1] = { 0 };
const float *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN);
int chans = ctx->avctx->channels;
const int16_t *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN) * chans;
int j, att_sum = 0;
/* LAME comment: apply high pass filter of fs/4 */
for (i = 0; i < AAC_BLOCK_SIZE_LONG; i++) {
float sum1, sum2;
sum1 = firbuf[i + (PSY_LAME_FIR_LEN - 1) / 2];
sum1 = firbuf[(i + ((PSY_LAME_FIR_LEN - 1) / 2)) * chans];
sum2 = 0.0;
for (j = 0; j < ((PSY_LAME_FIR_LEN - 1) / 2) - 1; j += 2) {
sum1 += psy_fir_coeffs[j] * (firbuf[i + j] + firbuf[i + PSY_LAME_FIR_LEN - j]);
sum2 += psy_fir_coeffs[j + 1] * (firbuf[i + j + 1] + firbuf[i + PSY_LAME_FIR_LEN - j - 1]);
sum1 += psy_fir_coeffs[j] * (firbuf[(i + j) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j) * chans]);
sum2 += psy_fir_coeffs[j + 1] * (firbuf[(i + j + 1) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j - 1) * chans]);
}
/* NOTE: The LAME psymodel expects it's input in the range -32768 to 32768. Tuning this for normalized floats would be difficult. */
hpfsmpl[i] = (sum1 + sum2) * 32768.0f;
hpfsmpl[i] = sum1 + sum2;
}
/* Calculate the energies of each sub-shortblock */
@@ -820,15 +823,16 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
float const *const pfe = pf + AAC_BLOCK_SIZE_LONG / (AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS);
float p = 1.0f;
for (; pf < pfe; pf++)
p = FFMAX(p, fabsf(*pf));
if (p < fabsf(*pf))
p = fabsf(*pf);
pch->prev_energy_subshort[i] = energy_subshort[i + PSY_LAME_NUM_SUBBLOCKS] = p;
energy_short[1 + i / PSY_LAME_NUM_SUBBLOCKS] += p;
/* NOTE: The indexes below are [i + 3 - 2] in the LAME source.
* Obviously the 3 and 2 have some significance, or this would be just [i + 1]
* (which is what we use here). What the 3 stands for is ambiguous, as it is both
* number of short blocks, and the number of sub-short blocks.
* It seems that LAME is comparing each sub-block to sub-block + 1 in the
* previous block.
/* FIXME: The indexes below are [i + 3 - 2] in the LAME source.
* Obviously the 3 and 2 have some significance, or this would be just [i + 1]
* (which is what we use here). What the 3 stands for is ambigious, as it is both
* number of short blocks, and the number of sub-short blocks.
* It seems that LAME is comparing each sub-block to sub-block + 1 in the
* previous block.
*/
if (p > energy_subshort[i + 1])
p = p / energy_subshort[i + 1];

View File

@@ -32,7 +32,6 @@
#include "aacsbrdata.h"
#include "fft.h"
#include "aacps.h"
#include "sbrdsp.h"
#include "libavutil/libm.h"
#include "libavutil/avassert.h"
@@ -129,24 +128,13 @@ av_cold void ff_aac_sbr_init(void)
ff_ps_init();
}
/** Places SBR in pure upsampling mode. */
static void sbr_turnoff(SpectralBandReplication *sbr) {
sbr->start = 0;
// Init defults used in pure upsampling mode
sbr->kx[1] = 32; //Typo in spec, kx' inits to 32
sbr->m[1] = 0;
// Reset values for first SBR header
sbr->data[0].e_a[1] = sbr->data[1].e_a[1] = -1;
memset(&sbr->spectrum_params, -1, sizeof(SpectrumParameters));
}
av_cold void ff_aac_sbr_ctx_init(AACContext *ac, SpectralBandReplication *sbr)
{
float mdct_scale;
if(sbr->mdct.mdct_bits)
return;
sbr->kx[0] = sbr->kx[1];
sbr_turnoff(sbr);
sbr->kx[0] = sbr->kx[1] = 32; //Typo in spec, kx' inits to 32
sbr->data[0].e_a[1] = sbr->data[1].e_a[1] = -1;
sbr->data[0].synthesis_filterbank_samples_offset = SBR_SYNTHESIS_BUF_SIZE - (1280 - 128);
sbr->data[1].synthesis_filterbank_samples_offset = SBR_SYNTHESIS_BUF_SIZE - (1280 - 128);
/* SBR requires samples to be scaled to +/-32768.0 to work correctly.
@@ -156,7 +144,6 @@ av_cold void ff_aac_sbr_ctx_init(AACContext *ac, SpectralBandReplication *sbr)
ff_mdct_init(&sbr->mdct, 7, 1, 1.0 / (64 * mdct_scale));
ff_mdct_init(&sbr->mdct_ana, 7, 1, -2.0 * mdct_scale);
ff_ps_ctx_init(&sbr->ps);
ff_sbrdsp_init(&sbr->dsp);
}
av_cold void ff_aac_sbr_ctx_close(SpectralBandReplication *sbr)
@@ -918,7 +905,7 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
{
switch (bs_extension_id) {
case EXTENSION_ID_PS:
if (!ac->oc[1].m4ac.ps) {
if (!ac->m4ac.ps) {
av_log(ac->avctx, AV_LOG_ERROR, "Parametric Stereo signaled to be not-present but was found in the bitstream.\n");
skip_bits_long(gb, *num_bits_left); // bs_fill_bits
*num_bits_left = 0;
@@ -933,9 +920,7 @@ static void read_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
}
break;
default:
// some files contain 0-padding
if (bs_extension_id || *num_bits_left > 16 || show_bits(gb, *num_bits_left))
av_log_missing_feature(ac->avctx, "Reserved SBR extensions are", 1);
av_log_missing_feature(ac->avctx, "Reserved SBR extensions are", 1);
skip_bits_long(gb, *num_bits_left); // bs_fill_bits
*num_bits_left = 0;
break;
@@ -1011,18 +996,18 @@ static unsigned int read_sbr_data(AACContext *ac, SpectralBandReplication *sbr,
if (id_aac == TYPE_SCE || id_aac == TYPE_CCE) {
if (read_sbr_single_channel_element(ac, sbr, gb)) {
sbr_turnoff(sbr);
sbr->start = 0;
return get_bits_count(gb) - cnt;
}
} else if (id_aac == TYPE_CPE) {
if (read_sbr_channel_pair_element(ac, sbr, gb)) {
sbr_turnoff(sbr);
sbr->start = 0;
return get_bits_count(gb) - cnt;
}
} else {
av_log(ac->avctx, AV_LOG_ERROR,
"Invalid bitstream - cannot apply SBR to element type %d\n", id_aac);
sbr_turnoff(sbr);
sbr->start = 0;
return get_bits_count(gb) - cnt;
}
if (get_bits1(gb)) { // bs_extended_data
@@ -1054,7 +1039,7 @@ static void sbr_reset(AACContext *ac, SpectralBandReplication *sbr)
if (err < 0) {
av_log(ac->avctx, AV_LOG_ERROR,
"SBR reset failed. Switching SBR to pure upsampling mode.\n");
sbr_turnoff(sbr);
sbr->start = 0;
}
}
@@ -1077,9 +1062,9 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
sbr->reset = 0;
if (!sbr->sample_rate)
sbr->sample_rate = 2 * ac->oc[1].m4ac.sample_rate; //TODO use the nominal sample rate for arbitrary sample rate support
if (!ac->oc[1].m4ac.ext_sample_rate)
ac->oc[1].m4ac.ext_sample_rate = 2 * ac->oc[1].m4ac.sample_rate;
sbr->sample_rate = 2 * ac->m4ac.sample_rate; //TODO use the nominal sample rate for arbitrary sample rate support
if (!ac->m4ac.ext_sample_rate)
ac->m4ac.ext_sample_rate = 2 * ac->m4ac.sample_rate;
if (crc) {
skip_bits(gb, 10); // bs_sbr_crc_bits; TODO - implement CRC check
@@ -1089,7 +1074,6 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
//Save some state from the previous frame.
sbr->kx[0] = sbr->kx[1];
sbr->m[0] = sbr->m[1];
sbr->kx_and_m_pushed = 1;
num_sbr_bits++;
if (get_bits1(gb)) // bs_header_flag
@@ -1159,21 +1143,33 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
* @param x pointer to the beginning of the first sample window
* @param W array of complex-valued samples split into subbands
*/
static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct,
SBRDSPContext *sbrdsp, const float *in, float *x,
static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct, const float *in, float *x,
float z[320], float W[2][32][32][2])
{
int i;
int i, k;
memcpy(W[0], W[1], sizeof(W[0]));
memcpy(x , x+1024, (320-32)*sizeof(x[0]));
memcpy(x+288, in, 1024*sizeof(x[0]));
for (i = 0; i < 32; i++) { // numTimeSlots*RATE = 16*2 as 960 sample frames
// are not supported
dsp->vector_fmul_reverse(z, sbr_qmf_window_ds, x, 320);
sbrdsp->sum64x5(z);
sbrdsp->qmf_pre_shuffle(z);
for (k = 0; k < 64; k++) {
float f = z[k] + z[k + 64] + z[k + 128] + z[k + 192] + z[k + 256];
z[k] = f;
}
//Shuffle to IMDCT
z[64] = z[0];
for (k = 1; k < 32; k++) {
z[64+2*k-1] = z[ k];
z[64+2*k ] = -z[64-k];
}
z[64+63] = z[32];
mdct->imdct_half(mdct, z, z+64);
sbrdsp->qmf_post_shuffle(W[1][i], z);
for (k = 0; k < 32; k++) {
W[1][i][k][0] = -z[63-k];
W[1][i][k][1] = z[k];
}
x += 32;
}
}
@@ -1183,22 +1179,20 @@ static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct,
* (14496-3 sp04 p206)
*/
static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
SBRDSPContext *sbrdsp,
float *out, float X[2][38][64],
float mdct_buf[2][64],
float *v0, int *v_off, const unsigned int div)
{
int i, n;
const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us;
const int step = 128 >> div;
float *v;
for (i = 0; i < 32; i++) {
if (*v_off < step) {
if (*v_off == 0) {
int saved_samples = (1280 - 128) >> div;
memcpy(&v0[SBR_SYNTHESIS_BUF_SIZE - saved_samples], v0, saved_samples * sizeof(float));
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - step;
*v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - (128 >> div);
} else {
*v_off -= step;
*v_off -= 128 >> div;
}
v = v0 + *v_off;
if (div) {
@@ -1207,12 +1201,20 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
X[0][i][32+n] = X[1][i][31-n];
}
mdct->imdct_half(mdct, mdct_buf[0], X[0][i]);
sbrdsp->qmf_deint_neg(v, mdct_buf[0]);
for (n = 0; n < 32; n++) {
v[ n] = mdct_buf[0][63 - 2*n];
v[63 - n] = -mdct_buf[0][62 - 2*n];
}
} else {
sbrdsp->neg_odd_64(X[1][i]);
for (n = 1; n < 64; n+=2) {
X[1][i][n] = -X[1][i][n];
}
mdct->imdct_half(mdct, mdct_buf[0], X[0][i]);
mdct->imdct_half(mdct, mdct_buf[1], X[1][i]);
sbrdsp->qmf_deint_bfly(v, mdct_buf[1], mdct_buf[0]);
for (n = 0; n < 64; n++) {
v[ n] = -mdct_buf[0][63 - n] + mdct_buf[1][ n ];
v[127 - n] = mdct_buf[0][63 - n] + mdct_buf[1][ n ];
}
}
dsp->vector_fmul_add(out, v , sbr_qmf_window , zero64, 64 >> div);
dsp->vector_fmul_add(out, v + ( 192 >> div), sbr_qmf_window + ( 64 >> div), out , 64 >> div);
@@ -1228,20 +1230,45 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
}
}
static void autocorrelate(const float x[40][2], float phi[3][2][2], int lag)
{
int i;
float real_sum = 0.0f;
float imag_sum = 0.0f;
if (lag) {
for (i = 1; i < 38; i++) {
real_sum += x[i][0] * x[i+lag][0] + x[i][1] * x[i+lag][1];
imag_sum += x[i][0] * x[i+lag][1] - x[i][1] * x[i+lag][0];
}
phi[2-lag][1][0] = real_sum + x[ 0][0] * x[lag][0] + x[ 0][1] * x[lag][1];
phi[2-lag][1][1] = imag_sum + x[ 0][0] * x[lag][1] - x[ 0][1] * x[lag][0];
if (lag == 1) {
phi[0][0][0] = real_sum + x[38][0] * x[39][0] + x[38][1] * x[39][1];
phi[0][0][1] = imag_sum + x[38][0] * x[39][1] - x[38][1] * x[39][0];
}
} else {
for (i = 1; i < 38; i++) {
real_sum += x[i][0] * x[i][0] + x[i][1] * x[i][1];
}
phi[2][1][0] = real_sum + x[ 0][0] * x[ 0][0] + x[ 0][1] * x[ 0][1];
phi[1][0][0] = real_sum + x[38][0] * x[38][0] + x[38][1] * x[38][1];
}
}
/** High Frequency Generation (14496-3 sp04 p214+) and Inverse Filtering
* (14496-3 sp04 p214)
* Warning: This routine does not seem numerically stable.
*/
static void sbr_hf_inverse_filter(SBRDSPContext *dsp,
float (*alpha0)[2], float (*alpha1)[2],
static void sbr_hf_inverse_filter(float (*alpha0)[2], float (*alpha1)[2],
const float X_low[32][40][2], int k0)
{
int k;
for (k = 0; k < k0; k++) {
LOCAL_ALIGNED_16(float, phi, [3], [2][2]);
float dk;
float phi[3][2][2], dk;
dsp->autocorrelate(X_low[k], phi);
autocorrelate(X_low[k], phi, 0);
autocorrelate(X_low[k], phi, 1);
autocorrelate(X_low[k], phi, 2);
dk = phi[2][1][0] * phi[1][0][0] -
(phi[1][1][0] * phi[1][1][0] + phi[1][1][1] * phi[1][1][1]) / 1.000001f;
@@ -1337,11 +1364,12 @@ static int sbr_hf_gen(AACContext *ac, SpectralBandReplication *sbr,
const float bw_array[5], const uint8_t *t_env,
int bs_num_env)
{
int j, x;
int i, j, x;
int g = 0;
int k = sbr->kx[1];
for (j = 0; j < sbr->num_patches; j++) {
for (x = 0; x < sbr->patch_num_subbands[j]; x++, k++) {
float alpha[4];
const int p = sbr->patch_start_subband[j] + x;
while (g <= sbr->n_q && k >= sbr->f_tablenoise[g])
g++;
@@ -1353,10 +1381,26 @@ static int sbr_hf_gen(AACContext *ac, SpectralBandReplication *sbr,
return -1;
}
sbr->dsp.hf_gen(X_high[k] + ENVELOPE_ADJUSTMENT_OFFSET,
X_low[p] + ENVELOPE_ADJUSTMENT_OFFSET,
alpha0[p], alpha1[p], bw_array[g],
2 * t_env[0], 2 * t_env[bs_num_env]);
alpha[0] = alpha1[p][0] * bw_array[g] * bw_array[g];
alpha[1] = alpha1[p][1] * bw_array[g] * bw_array[g];
alpha[2] = alpha0[p][0] * bw_array[g];
alpha[3] = alpha0[p][1] * bw_array[g];
for (i = 2 * t_env[0]; i < 2 * t_env[bs_num_env]; i++) {
const int idx = i + ENVELOPE_ADJUSTMENT_OFFSET;
X_high[k][idx][0] =
X_low[p][idx - 2][0] * alpha[0] -
X_low[p][idx - 2][1] * alpha[1] +
X_low[p][idx - 1][0] * alpha[2] -
X_low[p][idx - 1][1] * alpha[3] +
X_low[p][idx][0];
X_high[k][idx][1] =
X_low[p][idx - 2][1] * alpha[0] +
X_low[p][idx - 2][0] * alpha[1] +
X_low[p][idx - 1][1] * alpha[2] +
X_low[p][idx - 1][0] * alpha[3] +
X_low[p][idx][1];
}
}
}
if (k < sbr->m[1] + sbr->kx[1])
@@ -1367,8 +1411,8 @@ static int sbr_hf_gen(AACContext *ac, SpectralBandReplication *sbr,
/// Generate the subband filtered lowband
static int sbr_x_gen(SpectralBandReplication *sbr, float X[2][38][64],
const float Y0[38][64][2], const float Y1[38][64][2],
const float X_low[32][40][2], int ch)
const float X_low[32][40][2], const float Y[2][38][64][2],
int ch)
{
int k, i;
const int i_f = 32;
@@ -1382,8 +1426,8 @@ static int sbr_x_gen(SpectralBandReplication *sbr, float X[2][38][64],
}
for (; k < sbr->kx[0] + sbr->m[0]; k++) {
for (i = 0; i < i_Temp; i++) {
X[0][i][k] = Y0[i + i_f][k][0];
X[1][i][k] = Y0[i + i_f][k][1];
X[0][i][k] = Y[0][i + i_f][k][0];
X[1][i][k] = Y[0][i + i_f][k][1];
}
}
@@ -1395,8 +1439,8 @@ static int sbr_x_gen(SpectralBandReplication *sbr, float X[2][38][64],
}
for (; k < sbr->kx[1] + sbr->m[1]; k++) {
for (i = i_Temp; i < i_f; i++) {
X[0][i][k] = Y1[i][k][0];
X[1][i][k] = Y1[i][k][1];
X[0][i][k] = Y[1][i][k][0];
X[1][i][k] = Y[1][i][k][1];
}
}
return 0;
@@ -1405,7 +1449,7 @@ static int sbr_x_gen(SpectralBandReplication *sbr, float X[2][38][64],
/** High Frequency Adjustment (14496-3 sp04 p217) and Mapping
* (14496-3 sp04 p217)
*/
static int sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
static void sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
SBRData *ch_data, int e_a[2])
{
int e, i, m;
@@ -1416,12 +1460,7 @@ static int sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
uint16_t *table = ch_data->bs_freq_res[e + 1] ? sbr->f_tablehigh : sbr->f_tablelow;
int k;
if (sbr->kx[1] != table[0]) {
av_log(ac->avctx, AV_LOG_ERROR, "kx != f_table{high,low}[0]. "
"Derived frequency tables were not regenerated.\n");
sbr_turnoff(sbr);
return AVERROR_BUG;
}
av_assert0(sbr->kx[1] <= table[0]);
for (i = 0; i < ilim; i++)
for (m = table[i]; m < table[i + 1]; m++)
sbr->e_origmapped[e][m - sbr->kx[1]] = ch_data->env_facs[e+1][i];
@@ -1456,15 +1495,13 @@ static int sbr_mapping(AACContext *ac, SpectralBandReplication *sbr,
}
memcpy(ch_data->s_indexmapped[0], ch_data->s_indexmapped[ch_data->bs_num_env], sizeof(ch_data->s_indexmapped[0]));
return 0;
}
/// Estimation of current envelope (14496-3 sp04 p218)
static void sbr_env_estimate(float (*e_curr)[48], float X_high[64][40][2],
SpectralBandReplication *sbr, SBRData *ch_data)
{
int e, m;
int kx1 = sbr->kx[1];
int e, i, m;
if (sbr->bs_interpol_freq) {
for (e = 0; e < ch_data->bs_num_env; e++) {
@@ -1473,7 +1510,12 @@ static void sbr_env_estimate(float (*e_curr)[48], float X_high[64][40][2],
int iub = ch_data->t_env[e + 1] * 2 + ENVELOPE_ADJUSTMENT_OFFSET;
for (m = 0; m < sbr->m[1]; m++) {
float sum = sbr->dsp.sum_square(X_high[m+kx1] + ilb, iub - ilb);
float sum = 0.0f;
for (i = ilb; i < iub; i++) {
sum += X_high[m + sbr->kx[1]][i][0] * X_high[m + sbr->kx[1]][i][0] +
X_high[m + sbr->kx[1]][i][1] * X_high[m + sbr->kx[1]][i][1];
}
e_curr[e][m] = sum * recip_env_size;
}
}
@@ -1491,11 +1533,14 @@ static void sbr_env_estimate(float (*e_curr)[48], float X_high[64][40][2],
const int den = env_size * (table[p + 1] - table[p]);
for (k = table[p]; k < table[p + 1]; k++) {
sum += sbr->dsp.sum_square(X_high[k] + ilb, iub - ilb);
for (i = ilb; i < iub; i++) {
sum += X_high[k][i][0] * X_high[k][i][0] +
X_high[k][i][1] * X_high[k][i][1];
}
}
sum /= den;
for (k = table[p]; k < table[p + 1]; k++) {
e_curr[e][k - kx1] = sum;
e_curr[e][k - sbr->kx[1]] = sum;
}
}
}
@@ -1562,8 +1607,7 @@ static void sbr_gain_calc(AACContext *ac, SpectralBandReplication *sbr,
}
/// Assembling HF Signals (14496-3 sp04 p220)
static void sbr_hf_assemble(float Y1[38][64][2],
const float X_high[64][40][2],
static void sbr_hf_assemble(float Y[2][38][64][2], const float X_high[64][40][2],
SpectralBandReplication *sbr, SBRData *ch_data,
const int e_a[2])
{
@@ -1585,6 +1629,7 @@ static void sbr_hf_assemble(float Y1[38][64][2],
float (*g_temp)[48] = ch_data->g_temp, (*q_temp)[48] = ch_data->q_temp;
int indexnoise = ch_data->f_indexnoise;
int indexsine = ch_data->f_indexsine;
memcpy(Y[0], Y[1], sizeof(Y[0]));
if (sbr->reset) {
for (i = 0; i < h_SL; i++) {
@@ -1606,44 +1651,63 @@ static void sbr_hf_assemble(float Y1[38][64][2],
for (e = 0; e < ch_data->bs_num_env; e++) {
for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
int phi_sign = (1 - 2*(kx & 1));
LOCAL_ALIGNED_16(float, g_filt_tab, [48]);
LOCAL_ALIGNED_16(float, q_filt_tab, [48]);
float *g_filt, *q_filt;
if (h_SL && e != e_a[0] && e != e_a[1]) {
g_filt = g_filt_tab;
q_filt = q_filt_tab;
for (m = 0; m < m_max; m++) {
const int idx1 = i + h_SL;
g_filt[m] = 0.0f;
q_filt[m] = 0.0f;
for (j = 0; j <= h_SL; j++) {
g_filt[m] += g_temp[idx1 - j][m] * h_smooth[j];
q_filt[m] += q_temp[idx1 - j][m] * h_smooth[j];
}
float g_filt = 0.0f;
for (j = 0; j <= h_SL; j++)
g_filt += g_temp[idx1 - j][m] * h_smooth[j];
Y[1][i][m + kx][0] =
X_high[m + kx][i + ENVELOPE_ADJUSTMENT_OFFSET][0] * g_filt;
Y[1][i][m + kx][1] =
X_high[m + kx][i + ENVELOPE_ADJUSTMENT_OFFSET][1] * g_filt;
}
} else {
g_filt = g_temp[i + h_SL];
q_filt = q_temp[i];
for (m = 0; m < m_max; m++) {
const float g_filt = g_temp[i + h_SL][m];
Y[1][i][m + kx][0] =
X_high[m + kx][i + ENVELOPE_ADJUSTMENT_OFFSET][0] * g_filt;
Y[1][i][m + kx][1] =
X_high[m + kx][i + ENVELOPE_ADJUSTMENT_OFFSET][1] * g_filt;
}
}
sbr->dsp.hf_g_filt(Y1[i] + kx, X_high + kx, g_filt, m_max,
i + ENVELOPE_ADJUSTMENT_OFFSET);
if (e != e_a[0] && e != e_a[1]) {
sbr->dsp.hf_apply_noise[indexsine](Y1[i] + kx, sbr->s_m[e],
q_filt, indexnoise,
kx, m_max);
} else {
for (m = 0; m < m_max; m++) {
Y1[i][m + kx][0] +=
indexnoise = (indexnoise + 1) & 0x1ff;
if (sbr->s_m[e][m]) {
Y[1][i][m + kx][0] +=
sbr->s_m[e][m] * phi[0][indexsine];
Y[1][i][m + kx][1] +=
sbr->s_m[e][m] * (phi[1][indexsine] * phi_sign);
} else {
float q_filt;
if (h_SL) {
const int idx1 = i + h_SL;
q_filt = 0.0f;
for (j = 0; j <= h_SL; j++)
q_filt += q_temp[idx1 - j][m] * h_smooth[j];
} else {
q_filt = q_temp[i][m];
}
Y[1][i][m + kx][0] +=
q_filt * sbr_noise_table[indexnoise][0];
Y[1][i][m + kx][1] +=
q_filt * sbr_noise_table[indexnoise][1];
}
phi_sign = -phi_sign;
}
} else {
indexnoise = (indexnoise + m_max) & 0x1ff;
for (m = 0; m < m_max; m++) {
Y[1][i][m + kx][0] +=
sbr->s_m[e][m] * phi[0][indexsine];
Y1[i][m + kx][1] +=
Y[1][i][m + kx][1] +=
sbr->s_m[e][m] * (phi[1][indexsine] * phi_sign);
phi_sign = -phi_sign;
}
}
indexnoise = (indexnoise + m_max) & 0x1ff;
indexsine = (indexsine + 1) & 3;
}
}
@@ -1654,54 +1718,39 @@ static void sbr_hf_assemble(float Y1[38][64][2],
void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
float* L, float* R)
{
int downsampled = ac->oc[1].m4ac.ext_sample_rate < sbr->sample_rate;
int downsampled = ac->m4ac.ext_sample_rate < sbr->sample_rate;
int ch;
int nch = (id_aac == TYPE_CPE) ? 2 : 1;
int err;
if (!sbr->kx_and_m_pushed) {
sbr->kx[0] = sbr->kx[1];
sbr->m[0] = sbr->m[1];
} else {
sbr->kx_and_m_pushed = 0;
}
if (sbr->start) {
sbr_dequant(sbr, id_aac);
}
for (ch = 0; ch < nch; ch++) {
/* decode channel */
sbr_qmf_analysis(&ac->dsp, &sbr->mdct_ana, &sbr->dsp, ch ? R : L, sbr->data[ch].analysis_filterbank_samples,
sbr_qmf_analysis(&ac->dsp, &sbr->mdct_ana, ch ? R : L, sbr->data[ch].analysis_filterbank_samples,
(float*)sbr->qmf_filter_scratch,
sbr->data[ch].W);
sbr_lf_gen(ac, sbr, sbr->X_low, sbr->data[ch].W);
sbr->data[ch].Ypos ^= 1;
if (sbr->start) {
sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1, sbr->X_low, sbr->k[0]);
sbr_hf_inverse_filter(sbr->alpha0, sbr->alpha1, sbr->X_low, sbr->k[0]);
sbr_chirp(sbr, &sbr->data[ch]);
sbr_hf_gen(ac, sbr, sbr->X_high, sbr->X_low, sbr->alpha0, sbr->alpha1,
sbr->data[ch].bw_array, sbr->data[ch].t_env,
sbr->data[ch].bs_num_env);
// hf_adj
err = sbr_mapping(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
if (!err) {
sbr_env_estimate(sbr->e_curr, sbr->X_high, sbr, &sbr->data[ch]);
sbr_gain_calc(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
sbr_hf_assemble(sbr->data[ch].Y[sbr->data[ch].Ypos],
sbr->X_high, sbr, &sbr->data[ch],
sbr->data[ch].e_a);
}
sbr_mapping(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
sbr_env_estimate(sbr->e_curr, sbr->X_high, sbr, &sbr->data[ch]);
sbr_gain_calc(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
sbr_hf_assemble(sbr->data[ch].Y, sbr->X_high, sbr, &sbr->data[ch],
sbr->data[ch].e_a);
}
/* synthesis */
sbr_x_gen(sbr, sbr->X[ch],
sbr->data[ch].Y[1-sbr->data[ch].Ypos],
sbr->data[ch].Y[ sbr->data[ch].Ypos],
sbr->X_low, ch);
sbr_x_gen(sbr, sbr->X[ch], sbr->X_low, sbr->data[ch].Y, ch);
}
if (ac->oc[1].m4ac.ps == 1) {
if (ac->m4ac.ps == 1) {
if (sbr->ps.start) {
ff_ps_apply(ac->avctx, &sbr->ps, sbr->X[0], sbr->X[1], sbr->kx[1] + sbr->m[1]);
} else {
@@ -1710,12 +1759,12 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
nch = 2;
}
sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, &sbr->dsp, L, sbr->X[0], sbr->qmf_filter_scratch,
sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, L, sbr->X[0], sbr->qmf_filter_scratch,
sbr->data[0].synthesis_filterbank_samples,
&sbr->data[0].synthesis_filterbank_samples_offset,
downsampled);
if (nch == 2)
sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, &sbr->dsp, R, sbr->X[1], sbr->qmf_filter_scratch,
sbr_qmf_synthesis(&ac->dsp, &sbr->mdct, R, sbr->X[1], sbr->qmf_filter_scratch,
sbr->data[1].synthesis_filterbank_samples,
&sbr->data[1].synthesis_filterbank_samples_offset,
downsampled);

View File

@@ -267,8 +267,8 @@ static const int8_t sbr_offset[6][16] = {
};
///< window coefficients for analysis/synthesis QMF banks
static DECLARE_ALIGNED(32, float, sbr_qmf_window_ds)[320];
static DECLARE_ALIGNED(32, float, sbr_qmf_window_us)[640] = {
static DECLARE_ALIGNED(16, float, sbr_qmf_window_ds)[320];
static DECLARE_ALIGNED(16, float, sbr_qmf_window_us)[640] = {
0.0000000000, -0.0005525286, -0.0005617692, -0.0004947518,
-0.0004875227, -0.0004893791, -0.0005040714, -0.0005226564,
-0.0005466565, -0.0005677802, -0.0005870930, -0.0006132747,
@@ -352,8 +352,7 @@ static DECLARE_ALIGNED(32, float, sbr_qmf_window_us)[640] = {
0.8537385600,
};
/* First two entries repeated at end to simplify SIMD implementations. */
const DECLARE_ALIGNED(16, float, ff_sbr_noise_table)[][2] = {
static const float sbr_noise_table[512][2] = {
{-0.99948153278296, -0.59483417516607}, { 0.97113454393991, -0.67528515225647},
{ 0.14130051758487, -0.95090983575689}, {-0.47005496701697, -0.37340549728647},
{ 0.80705063769351, 0.29653668284408}, {-0.38981478896926, 0.89572605717087},
@@ -610,7 +609,6 @@ const DECLARE_ALIGNED(16, float, ff_sbr_noise_table)[][2] = {
{-0.93412041758744, 0.41374052024363}, { 0.96063943315511, 0.93116709541280},
{ 0.97534253457837, 0.86150930812689}, { 0.99642466504163, 0.70190043427512},
{-0.94705089665984, -0.29580042814306}, { 0.91599807087376, -0.98147830385781},
{-0.99948153278296, -0.59483417516607}, { 0.97113454393991, -0.67528515225647},
};
#endif /* AVCODEC_AACSBRDATA_H */

View File

@@ -33,8 +33,8 @@
#include <stdint.h>
DECLARE_ALIGNED(32, float, ff_aac_kbd_long_1024)[1024];
DECLARE_ALIGNED(32, float, ff_aac_kbd_short_128)[128];
DECLARE_ALIGNED(16, float, ff_aac_kbd_long_1024)[1024];
DECLARE_ALIGNED(16, float, ff_aac_kbd_short_128)[128];
const uint8_t ff_aac_num_swb_1024[] = {
41, 41, 47, 49, 49, 51, 47, 47, 43, 43, 43, 40, 40

View File

@@ -44,8 +44,8 @@
/* @name window coefficients
* @{
*/
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_long_1024)[1024];
DECLARE_ALIGNED(32, extern float, ff_aac_kbd_short_128)[128];
DECLARE_ALIGNED(16, extern float, ff_aac_kbd_long_1024)[1024];
DECLARE_ALIGNED(16, extern float, ff_aac_kbd_short_128)[128];
// @}
/* @name number of scalefactor window bands for long and short transform windows respectively

View File

@@ -34,26 +34,23 @@
typedef struct AascContext {
AVCodecContext *avctx;
GetByteContext gb;
AVFrame frame;
} AascContext;
#define FETCH_NEXT_STREAM_BYTE() \
if (stream_ptr >= buf_size) \
{ \
av_log(s->avctx, AV_LOG_ERROR, " AASC: stream ptr just went out of bounds (fetch)\n"); \
break; \
} \
stream_byte = buf[stream_ptr++];
static av_cold int aasc_decode_init(AVCodecContext *avctx)
{
AascContext *s = avctx->priv_data;
s->avctx = avctx;
switch (avctx->bits_per_coded_sample) {
case 16:
avctx->pix_fmt = PIX_FMT_RGB555;
break;
case 24:
avctx->pix_fmt = PIX_FMT_BGR24;
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n", avctx->bits_per_coded_sample);
return -1;
}
avctx->pix_fmt = PIX_FMT_BGR24;
avcodec_get_frame_defaults(&s->frame);
return 0;
@@ -78,38 +75,21 @@ static int aasc_decode_frame(AVCodecContext *avctx,
compr = AV_RL32(buf);
buf += 4;
buf_size -= 4;
switch (avctx->codec_tag) {
case MKTAG('A', 'A', 'S', '4'):
bytestream2_init(&s->gb, buf - 4, buf_size + 4);
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
break;
case MKTAG('A', 'A', 'S', 'C'):
switch(compr){
case 0:
stride = (avctx->width * 3 + 3) & ~3;
for(i = avctx->height - 1; i >= 0; i--){
if(avctx->width*3 > buf_size){
av_log(avctx, AV_LOG_ERROR, "Next line is beyond buffer bounds\n");
break;
}
memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width*3);
buf += stride;
buf_size -= stride;
}
break;
case 1:
bytestream2_init(&s->gb, buf, buf_size);
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, buf - 4, buf_size + 4);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
return -1;
}
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown FourCC: %X\n", avctx->codec_tag);
return -1;
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = s->frame;
@@ -138,5 +118,5 @@ AVCodec ff_aasc_decoder = {
.close = aasc_decode_end,
.decode = aasc_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Autodesk RLE"),
.long_name = NULL_IF_CONFIG_SMALL("Autodesk RLE"),
};

Some files were not shown because too many files have changed in this diff Show More