Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9f8d8c57fb |
119
.gitignore
vendored
119
.gitignore
vendored
@@ -1,78 +1,53 @@
|
||||
*.a
|
||||
.config
|
||||
.version
|
||||
*.o
|
||||
*.d
|
||||
*.def
|
||||
*.dll
|
||||
*.dylib
|
||||
*.exe
|
||||
*.exp
|
||||
*.gcda
|
||||
*.gcno
|
||||
*.h.c
|
||||
*.ilk
|
||||
*.lib
|
||||
*.pc
|
||||
*.pdb
|
||||
*.so
|
||||
*.so.*
|
||||
*.ver
|
||||
*.ho
|
||||
*-example
|
||||
*-test
|
||||
*_g
|
||||
/.config
|
||||
/.version
|
||||
/ffmpeg
|
||||
/ffplay
|
||||
/ffprobe
|
||||
/ffserver
|
||||
/config.*
|
||||
/coverage.info
|
||||
/version.h
|
||||
/doc/*.1
|
||||
/doc/*.3
|
||||
/doc/*.html
|
||||
/doc/*.pod
|
||||
/doc/config.texi
|
||||
/doc/avoptions_codec.texi
|
||||
/doc/avoptions_format.texi
|
||||
/doc/examples/decoding_encoding
|
||||
/doc/examples/demuxing
|
||||
/doc/examples/filtering_audio
|
||||
/doc/examples/filtering_video
|
||||
/doc/examples/metadata
|
||||
/doc/examples/muxing
|
||||
/doc/examples/pc-uninstalled
|
||||
/doc/examples/resampling_audio
|
||||
/doc/examples/scaling_video
|
||||
/doc/fate.txt
|
||||
/doc/doxy/html/
|
||||
/doc/print_options
|
||||
/lcov/
|
||||
/libavcodec/*_tablegen
|
||||
/libavcodec/*_tables.c
|
||||
/libavcodec/*_tables.h
|
||||
/libavutil/avconfig.h
|
||||
/tests/audiogen
|
||||
/tests/base64
|
||||
/tests/data/
|
||||
/tests/rotozoom
|
||||
/tests/tiny_psnr
|
||||
/tests/tiny_ssim
|
||||
/tests/videogen
|
||||
/tests/vsynth1/
|
||||
/tools/aviocat
|
||||
/tools/ffbisect
|
||||
/tools/bisect.need
|
||||
/tools/cws2fws
|
||||
/tools/fourcc2pixfmt
|
||||
/tools/ffescape
|
||||
/tools/ffeval
|
||||
/tools/ffhash
|
||||
/tools/graph2dot
|
||||
/tools/ismindex
|
||||
/tools/pktdumper
|
||||
/tools/probetest
|
||||
/tools/qt-faststart
|
||||
/tools/trasher
|
||||
/tools/seek_print
|
||||
/tools/zmqsend
|
||||
*.def
|
||||
*.dll
|
||||
*.lib
|
||||
*.exp
|
||||
config.*
|
||||
doc/*.1
|
||||
doc/*.html
|
||||
doc/*.pod
|
||||
doxy
|
||||
ffmpeg
|
||||
ffplay
|
||||
ffprobe
|
||||
ffserver
|
||||
avconv
|
||||
libavcodec/*_tablegen
|
||||
libavcodec/*_tables.c
|
||||
libavcodec/*_tables.h
|
||||
libavcodec/codec_names.h
|
||||
libavcodec/libavcodec*
|
||||
libavcore/libavcore*
|
||||
libavdevice/libavdevice*
|
||||
libavfilter/libavfilter*
|
||||
libavformat/libavformat*
|
||||
libavutil/avconfig.h
|
||||
libavutil/libavutil*
|
||||
libpostproc/libpostproc*
|
||||
libswresample/libswresample*
|
||||
libswscale/libswscale*
|
||||
tests/audiogen
|
||||
tests/base64
|
||||
tests/data
|
||||
tests/rotozoom
|
||||
tests/tiny_psnr
|
||||
tests/videogen
|
||||
tests/vsynth1
|
||||
tests/vsynth2
|
||||
tools/cws2fws
|
||||
tools/graph2dot
|
||||
tools/lavfi-showfiltfmts
|
||||
tools/pktdumper
|
||||
tools/probetest
|
||||
tools/qt-faststart
|
||||
tools/trasher
|
||||
version.h
|
||||
|
@@ -500,3 +500,5 @@ necessary. Here is a sample; alter the names:
|
||||
Ty Coon, President of Vice
|
||||
|
||||
That's all there is to it!
|
||||
|
||||
|
||||
|
59
CREDITS
59
CREDITS
@@ -1,6 +1,55 @@
|
||||
See the Git history of the project (git://source.ffmpeg.org/ffmpeg) to
|
||||
get the names of people who have contributed to FFmpeg.
|
||||
This file contains the names of some of the people who have contributed to
|
||||
FFmpeg. The names are sorted alphabetically by last name. As this file is
|
||||
currently quite outdated and git serves as a much better tool for determining
|
||||
authorship, it remains here for historical reasons only.
|
||||
|
||||
To check the log, you can type the command "git log" in the FFmpeg
|
||||
source directory, or browse the online repository at
|
||||
http://source.ffmpeg.org.
|
||||
Dénes Balatoni
|
||||
Michel Bardiaux
|
||||
Fabrice Bellard
|
||||
Patrice Bensoussan
|
||||
Alex Beregszaszi
|
||||
BERO
|
||||
Thilo Borgmann
|
||||
Mario Brito
|
||||
Ronald Bultje
|
||||
Alex Converse
|
||||
Maarten Daniels
|
||||
Reimar Doeffinger
|
||||
Tim Ferguson
|
||||
Brian Foley
|
||||
Arpad Gereoffy
|
||||
Philip Gladstone
|
||||
Vladimir Gneushev
|
||||
Roine Gustafsson
|
||||
David Hammerton
|
||||
Wolfgang Hesseler
|
||||
Marc Hoffman
|
||||
Falk Hueffner
|
||||
Aurélien Jacobs
|
||||
Steven Johnson
|
||||
Zdenek Kabelac
|
||||
Robin Kay
|
||||
Todd Kirby
|
||||
Nick Kurshev
|
||||
Benjamin Larsson
|
||||
Loïc Le Loarer
|
||||
Daniel Maas
|
||||
Mike Melanson
|
||||
Loren Merritt
|
||||
Jeff Muizelaar
|
||||
Michael Niedermayer
|
||||
François Revol
|
||||
Peter Ross
|
||||
Måns Rullgård
|
||||
Stefano Sabatini
|
||||
Roman Shaposhnik
|
||||
Oded Shimon
|
||||
Dieter Shirley
|
||||
Konstantin Shishkov
|
||||
Juan J. Sierralta
|
||||
Ewald Snel
|
||||
Sascha Sommer
|
||||
Leon van Stuivenberg
|
||||
Roberto Togni
|
||||
Lionel Ulmer
|
||||
Reynaldo Verdejo
|
||||
|
316
Changelog
316
Changelog
@@ -1,314 +1,6 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.0:
|
||||
|
||||
- curves filter
|
||||
- reference-counting for AVFrame and AVPacket data
|
||||
- ffmpeg now fails when input options are used for output file
|
||||
or vice versa
|
||||
- support for Monkey's Audio versions from 3.93
|
||||
- perms and aperms filters
|
||||
- audio filtering support in ffplay
|
||||
- 10% faster aac encoding on x86 and MIPS
|
||||
- sine audio filter source
|
||||
- WebP demuxing and decoding support
|
||||
- new ffmpeg options -filter_script and -filter_complex_script, which allow a
|
||||
filtergraph description to be read from a file
|
||||
- OpenCL support
|
||||
- audio phaser filter
|
||||
- separatefields filter
|
||||
- libquvi demuxer
|
||||
- uniform options syntax across all filters
|
||||
- telecine filter
|
||||
- new interlace filter
|
||||
- smptehdbars source
|
||||
- inverse telecine filters (fieldmatch and decimate)
|
||||
- colorbalance filter
|
||||
- colorchannelmixer filter
|
||||
- The matroska demuxer can now output proper verbatim ASS packets. It will
|
||||
become the default at the next libavformat major bump.
|
||||
- decent native animated GIF encoding
|
||||
- asetrate filter
|
||||
- interleave filter
|
||||
- timeline editing with filters
|
||||
- vidstabdetect and vidstabtransform filters for video stabilization using
|
||||
the vid.stab library
|
||||
- astats filter
|
||||
- trim and atrim filters
|
||||
- ffmpeg -t and -ss (output-only) options are now sample-accurate when
|
||||
transcoding audio
|
||||
- Matroska muxer can now put the index at the beginning of the file.
|
||||
- extractplanes filter
|
||||
- avectorscope filter
|
||||
- ADPCM DTK decoder
|
||||
- ADP demuxer
|
||||
- RSD demuxer
|
||||
- RedSpark demuxer
|
||||
- ADPCM IMA Radical decoder
|
||||
- zmq filters
|
||||
- DCT denoiser filter (dctdnoiz)
|
||||
- Wavelet denoiser filter ported from libmpcodecs as owdenoise (formerly "ow")
|
||||
- Apple Intermediate Codec decoder
|
||||
- Escape 130 video decoder
|
||||
- FTP protocol support
|
||||
- V4L2 output device
|
||||
- 3D LUT filter (lut3d)
|
||||
- SMPTE 302M audio encoder
|
||||
- support for slice multithreading in libavfilter
|
||||
- Hald CLUT support (generation and filtering)
|
||||
- VC-1 interlaced B-frame support
|
||||
- support for WavPack muxing (raw and in Matroska)
|
||||
- XVideo output device
|
||||
- vignette filter
|
||||
- True Audio (TTA) encoder
|
||||
- Go2Webinar decoder
|
||||
- mcdeint filter ported from libmpcodecs
|
||||
- sab filter ported from libmpcodecs
|
||||
- ffprobe -show_chapters option
|
||||
- WavPack encoding through libwavpack
|
||||
- rotate filter
|
||||
- spp filter ported from libmpcodecs
|
||||
- libgme support
|
||||
- psnr filter
|
||||
|
||||
|
||||
version 1.2:
|
||||
|
||||
- VDPAU hardware acceleration through normal hwaccel
|
||||
- SRTP support
|
||||
- Error diffusion dither in Swscale
|
||||
- Chained Ogg support
|
||||
- Theora Midstream reconfiguration support
|
||||
- EVRC decoder
|
||||
- audio fade filter
|
||||
- filtering audio with unknown channel layout
|
||||
- allpass, bass, bandpass, bandreject, biquad, equalizer, highpass, lowpass
|
||||
and treble audio filter
|
||||
- improved showspectrum filter, with multichannel support and sox-like colors
|
||||
- histogram filter
|
||||
- tee muxer
|
||||
- il filter ported from libmpcodecs
|
||||
- support ID3v2 tags in ASF files
|
||||
- encrypted TTA stream decoding support
|
||||
- RF64 support in WAV muxer
|
||||
- noise filter ported from libmpcodecs
|
||||
- Subtitles character encoding conversion
|
||||
- blend filter
|
||||
- stereo3d filter ported from libmpcodecs
|
||||
|
||||
|
||||
version 1.1:
|
||||
|
||||
- stream disposition information printing in ffprobe
|
||||
- filter for loudness analysis following EBU R128
|
||||
- Opus encoder using libopus
|
||||
- ffprobe -select_streams option
|
||||
- Pinnacle TARGA CineWave YUV16 decoder
|
||||
- TAK demuxer, decoder and parser
|
||||
- DTS-HD demuxer
|
||||
- remove -same_quant, it hasn't worked for years
|
||||
- FFM2 support
|
||||
- X-Face image encoder and decoder
|
||||
- 24-bit FLAC encoding
|
||||
- multi-channel ALAC encoding up to 7.1
|
||||
- metadata (INFO tag) support in WAV muxer
|
||||
- subtitles raw text decoder
|
||||
- support for building DLLs using MSVC
|
||||
- LVF demuxer
|
||||
- ffescape tool
|
||||
- metadata (info chunk) support in CAF muxer
|
||||
- field filter ported from libmpcodecs
|
||||
- AVR demuxer
|
||||
- geq filter ported from libmpcodecs
|
||||
- remove ffserver daemon mode
|
||||
- AST muxer/demuxer
|
||||
- new expansion syntax for drawtext
|
||||
- BRender PIX image decoder
|
||||
- ffprobe -show_entries option
|
||||
- ffprobe -sections option
|
||||
- ADPCM IMA Dialogic decoder
|
||||
- BRSTM demuxer
|
||||
- animated GIF decoder and demuxer
|
||||
- PVF demuxer
|
||||
- subtitles filter
|
||||
- IRCAM muxer/demuxer
|
||||
- Paris Audio File demuxer
|
||||
- Virtual concatenation demuxer
|
||||
- VobSub demuxer
|
||||
- JSON captions for TED talks decoding support
|
||||
- SOX Resampler support in libswresample
|
||||
- aselect filter
|
||||
- SGI RLE 8-bit decoder
|
||||
- Silicon Graphics Motion Video Compressor 1 & 2 decoder
|
||||
- Silicon Graphics Movie demuxer
|
||||
- apad filter
|
||||
- Resolution & pixel format change support with multithreading for H.264
|
||||
- documentation split into per-component manuals
|
||||
- pp (postproc) filter ported from MPlayer
|
||||
- NIST Sphere demuxer
|
||||
- MPL2, VPlayer, MPlayer, AQTitle, PJS and SubViewer v1 subtitles demuxers and decoders
|
||||
- Sony Wave64 muxer
|
||||
- adobe and limelight publisher authentication in RTMP
|
||||
- data: URI scheme
|
||||
- support building on the Plan 9 operating system
|
||||
- kerndeint filter ported from MPlayer
|
||||
- histeq filter ported from VirtualDub
|
||||
- Megalux Frame demuxer
|
||||
- 012v decoder
|
||||
- Improved AVC Intra decoding support
|
||||
|
||||
|
||||
version 1.0:
|
||||
|
||||
- INI and flat output in ffprobe
|
||||
- Scene detection in libavfilter
|
||||
- Indeo Audio decoder
|
||||
- channelsplit audio filter
|
||||
- setnsamples audio filter
|
||||
- atempo filter
|
||||
- ffprobe -show_data option
|
||||
- RTMPT protocol support
|
||||
- iLBC encoding/decoding via libilbc
|
||||
- Microsoft Screen 1 decoder
|
||||
- join audio filter
|
||||
- audio channel mapping filter
|
||||
- Microsoft ATC Screen decoder
|
||||
- RTSP listen mode
|
||||
- TechSmith Screen Codec 2 decoder
|
||||
- AAC encoding via libfdk-aac
|
||||
- Microsoft Expression Encoder Screen decoder
|
||||
- RTMPS protocol support
|
||||
- RTMPTS protocol support
|
||||
- RTMPE protocol support
|
||||
- RTMPTE protocol support
|
||||
- showwaves and showspectrum filter
|
||||
- LucasArts SMUSH playback support
|
||||
- SAMI, RealText and SubViewer demuxers and decoders
|
||||
- Heart Of Darkness PAF playback support
|
||||
- iec61883 device
|
||||
- asettb filter
|
||||
- new option: -progress
|
||||
- 3GPP Timed Text encoder/decoder
|
||||
- GeoTIFF decoder support
|
||||
- ffmpeg -(no)stdin option
|
||||
- Opus decoder using libopus
|
||||
- caca output device using libcaca
|
||||
- alphaextract and alphamerge filters
|
||||
- concat filter
|
||||
- flite filter
|
||||
- Canopus Lossless Codec decoder
|
||||
- bitmap subtitles in filters (experimental and temporary)
|
||||
- MP2 encoding via TwoLAME
|
||||
- bmp parser
|
||||
- smptebars source
|
||||
- asetpts filter
|
||||
- hue filter
|
||||
- ICO muxer
|
||||
- SubRip encoder and decoder without embedded timing
|
||||
- edge detection filter
|
||||
- framestep filter
|
||||
- ffmpeg -shortest option is now per-output file
|
||||
-pass and -passlogfile are now per-output stream
|
||||
- volume measurement filter
|
||||
- Ut Video encoder
|
||||
- Microsoft Screen 2 decoder
|
||||
- smartblur filter ported from MPlayer
|
||||
- CPiA decoder
|
||||
- decimate filter ported from MPlayer
|
||||
- RTP depacketization of JPEG
|
||||
- Smooth Streaming live segmenter muxer
|
||||
- F4V muxer
|
||||
- sendcmd and asendcmd filters
|
||||
- WebVTT demuxer and decoder (simple tags supported)
|
||||
- RTP packetization of JPEG
|
||||
- faststart option in the MOV/MP4 muxer
|
||||
- support for building with MSVC
|
||||
|
||||
|
||||
version 0.11:
|
||||
|
||||
- Fixes: CVE-2012-2772, CVE-2012-2774, CVE-2012-2775, CVE-2012-2776, CVE-2012-2777,
|
||||
CVE-2012-2779, CVE-2012-2782, CVE-2012-2783, CVE-2012-2784, CVE-2012-2785,
|
||||
CVE-2012-2786, CVE-2012-2787, CVE-2012-2788, CVE-2012-2789, CVE-2012-2790,
|
||||
CVE-2012-2791, CVE-2012-2792, CVE-2012-2793, CVE-2012-2794, CVE-2012-2795,
|
||||
CVE-2012-2796, CVE-2012-2797, CVE-2012-2798, CVE-2012-2799, CVE-2012-2800,
|
||||
CVE-2012-2801, CVE-2012-2802, CVE-2012-2803, CVE-2012-2804,
|
||||
- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
|
||||
- setfield filter
|
||||
- CDXL demuxer and decoder
|
||||
- Apple ProRes encoder
|
||||
- ffprobe -count_packets and -count_frames options
|
||||
- Sun Rasterfile Encoder
|
||||
- ID3v2 attached pictures reading and writing
|
||||
- WMA Lossless decoder
|
||||
- bluray protocol
|
||||
- blackdetect filter
|
||||
- libutvideo encoder wrapper (--enable-libutvideo)
|
||||
- swapuv filter
|
||||
- bbox filter
|
||||
- XBM encoder and decoder
|
||||
- RealAudio Lossless decoder
|
||||
- ZeroCodec decoder
|
||||
- tile video filter
|
||||
- Metal Gear Solid: The Twin Snakes demuxer
|
||||
- OpenEXR image decoder
|
||||
- removelogo filter
|
||||
- drop support for ffmpeg without libavfilter
|
||||
- drawtext video filter: fontconfig support
|
||||
- ffmpeg -benchmark_all option
|
||||
- super2xsai filter ported from libmpcodecs
|
||||
- add libavresample audio conversion library for compatibility
|
||||
- MicroDVD decoder
|
||||
- Avid Meridien (AVUI) encoder and decoder
|
||||
- accept + prefix to -pix_fmt option to disable automatic conversions.
|
||||
- complete audio filtering in libavfilter and ffmpeg
|
||||
- add fps filter
|
||||
- vorbis parser
|
||||
- png parser
|
||||
- audio mix filter
|
||||
- ffv1: support (draft) version 1.3
|
||||
|
||||
|
||||
version 0.10:
|
||||
|
||||
- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
|
||||
CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
|
||||
CVE-2011-3945, CVE-2011-3946, CVE-2011-3947, CVE-2011-3949,
|
||||
CVE-2011-3950, CVE-2011-3951, CVE-2011-3952
|
||||
- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
|
||||
- SBaGen (SBG) binaural beats script demuxer
|
||||
- OpenMG Audio muxer
|
||||
- Timecode extraction in DV and MOV
|
||||
- thumbnail video filter
|
||||
- XML output in ffprobe
|
||||
- asplit audio filter
|
||||
- tinterlace video filter
|
||||
- astreamsync audio filter
|
||||
- amerge audio filter
|
||||
- ISMV (Smooth Streaming) muxer
|
||||
- GSM audio parser
|
||||
- SMJPEG muxer
|
||||
- XWD encoder and decoder
|
||||
- Automatic thread count based on detection number of (available) CPU cores
|
||||
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
|
||||
- ffprobe -show_error option
|
||||
- Avid 1:1 10-bit RGB Packer codec
|
||||
- v308 Quicktime Uncompressed 4:4:4 encoder and decoder
|
||||
- yuv4 libquicktime packed 4:2:0 encoder and decoder
|
||||
- ffprobe -show_frames option
|
||||
- silencedetect audio filter
|
||||
- ffprobe -show_program_version, -show_library_versions, -show_versions options
|
||||
- rv34: frame-level multi-threading
|
||||
- optimized iMDCT transform on x86 using SSE for for mpegaudiodec
|
||||
- Improved PGS subtitle decoder
|
||||
- dumpgraph option to lavfi device
|
||||
- r210 and r10k encoders
|
||||
- ffwavesynth decoder
|
||||
- aviocat tool
|
||||
- ffeval tool
|
||||
|
||||
|
||||
version 0.9:
|
||||
|
||||
- openal input device added
|
||||
@@ -433,7 +125,7 @@ easier to use. The changes are:
|
||||
- pan audio filter
|
||||
- IFF Amiga Continuous Bitmap (ACBM) decoder
|
||||
- ass filter
|
||||
- CRI ADX audio format muxer and demuxer
|
||||
- CRI ADX audio format demuxer
|
||||
- Playstation Portable PMP format demuxer
|
||||
- Microsoft Windows ICO demuxer
|
||||
- life source
|
||||
@@ -442,13 +134,11 @@ easier to use. The changes are:
|
||||
- new option: -report
|
||||
- Dxtory capture format decoder
|
||||
- cellauto source
|
||||
- Simple segmenting muxer
|
||||
- Indeo 4 decoder
|
||||
- SMJPEG demuxer
|
||||
|
||||
|
||||
version 0.8:
|
||||
|
||||
|
||||
- many many things we forgot because we rather write code than changelogs
|
||||
- WebM support in Matroska de/muxer
|
||||
- low overhead Ogg muxing
|
||||
@@ -1012,7 +702,7 @@ version 0.4.5:
|
||||
- MPEG-4 vol header fixes (Jonathan Marsden <snmjbm at pacbell.net>)
|
||||
- ARM optimizations (Lionel Ulmer <lionel.ulmer at free.fr>).
|
||||
- Windows porting of file converter
|
||||
- added MJPEG raw format (input/output)
|
||||
- added MJPEG raw format (input/ouput)
|
||||
- added JPEG image format support (input/output)
|
||||
|
||||
|
||||
|
@@ -31,20 +31,14 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.0.2
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
# pixels and the maximum width should not exceed 200 pixels. Doxygen will
|
||||
# copy the logo to the output directory.
|
||||
PROJECT_LOGO =
|
||||
PROJECT_NUMBER = 0.9
|
||||
|
||||
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
|
||||
# base path where the generated documentation will be put.
|
||||
# If a relative path is entered, it will be relative to the location
|
||||
# where doxygen was started. If left blank the current directory will be used.
|
||||
|
||||
OUTPUT_DIRECTORY = doc/doxy
|
||||
OUTPUT_DIRECTORY = doxy
|
||||
|
||||
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
|
||||
# 4096 sub-directories (in 2 levels) under the output directory of each output
|
||||
@@ -277,7 +271,7 @@ SUBGROUPING = YES
|
||||
# be useful for C code in case the coding convention dictates that all compound
|
||||
# types are typedef'ed and only the typedef is referenced, never the tag name.
|
||||
|
||||
TYPEDEF_HIDES_STRUCT = YES
|
||||
TYPEDEF_HIDES_STRUCT = NO
|
||||
|
||||
# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
|
||||
# determine which symbols to keep in memory and which to flush to disk.
|
||||
@@ -288,7 +282,7 @@ TYPEDEF_HIDES_STRUCT = YES
|
||||
# causing a significant performance penality.
|
||||
# If the system has enough physical memory increasing the cache will improve the
|
||||
# performance by keeping more symbols in memory. Note that the value works on
|
||||
# a logarithmic scale so increasing the size by one will roughly double the
|
||||
# a logarithmic scale so increasing the size by one will rougly double the
|
||||
# memory usage. The cache size is given by this formula:
|
||||
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
|
||||
# corresponding to a cache size of 2^16 = 65536 symbols
|
||||
@@ -409,7 +403,7 @@ INLINE_INFO = YES
|
||||
# alphabetically by member name. If set to NO the members will appear in
|
||||
# declaration order.
|
||||
|
||||
SORT_MEMBER_DOCS = NO
|
||||
SORT_MEMBER_DOCS = YES
|
||||
|
||||
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
|
||||
# brief documentation of file, namespace and class members alphabetically
|
||||
@@ -489,6 +483,12 @@ MAX_INITIALIZER_LINES = 30
|
||||
|
||||
SHOW_USED_FILES = YES
|
||||
|
||||
# If the sources in your project are distributed over multiple directories
|
||||
# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
|
||||
# in the documentation. The default is NO.
|
||||
|
||||
SHOW_DIRECTORIES = NO
|
||||
|
||||
# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
|
||||
# This will remove the Files entry from the Quick Index and from the
|
||||
# Folder Tree View (if specified). The default is YES.
|
||||
@@ -625,7 +625,8 @@ EXCLUDE_SYMLINKS = NO
|
||||
# for example use the pattern */test/*
|
||||
|
||||
EXCLUDE_PATTERNS = *.git \
|
||||
*.d
|
||||
*.d \
|
||||
avconv.c
|
||||
|
||||
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
|
||||
# (namespaces, classes, functions, etc.) that should be excluded from the
|
||||
@@ -639,14 +640,15 @@ EXCLUDE_SYMBOLS =
|
||||
# directories that contain example code fragments that are included (see
|
||||
# the \include command).
|
||||
|
||||
EXAMPLE_PATH = doc/examples/
|
||||
EXAMPLE_PATH = libavcodec/ \
|
||||
libavformat/
|
||||
|
||||
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
|
||||
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
|
||||
# and *.h) to filter out the source-files in the directories. If left
|
||||
# blank all files are included.
|
||||
|
||||
EXAMPLE_PATTERNS = *.c
|
||||
EXAMPLE_PATTERNS = *-example.c
|
||||
|
||||
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
|
||||
# searched for input files to be used with the \include or \dontinclude
|
||||
@@ -709,7 +711,7 @@ INLINE_SOURCES = NO
|
||||
# doxygen to hide any special comment blocks from generated source code
|
||||
# fragments. Normal C and C++ comments will always remain visible.
|
||||
|
||||
STRIP_CODE_COMMENTS = NO
|
||||
STRIP_CODE_COMMENTS = YES
|
||||
|
||||
# If the REFERENCED_BY_RELATION tag is set to YES
|
||||
# then for each documented function all documented
|
||||
@@ -759,7 +761,7 @@ ALPHABETICAL_INDEX = YES
|
||||
# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
|
||||
# in which this list will be split (can be a number in the range [1..20])
|
||||
|
||||
COLS_IN_ALPHA_INDEX = 2
|
||||
COLS_IN_ALPHA_INDEX = 5
|
||||
|
||||
# In case all classes in a project start with a common prefix, all
|
||||
# classes will be put under the same header in the alphabetical index.
|
||||
@@ -793,13 +795,13 @@ HTML_FILE_EXTENSION = .html
|
||||
# each generated HTML page. If it is left blank doxygen will generate a
|
||||
# standard header.
|
||||
|
||||
#HTML_HEADER = doc/doxy/header.html
|
||||
HTML_HEADER =
|
||||
|
||||
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
|
||||
# each generated HTML page. If it is left blank doxygen will generate a
|
||||
# standard footer.
|
||||
|
||||
#HTML_FOOTER = doc/doxy/footer.html
|
||||
HTML_FOOTER =
|
||||
|
||||
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
|
||||
# style sheet that is used by each HTML page. It can be used to
|
||||
@@ -808,7 +810,7 @@ HTML_FILE_EXTENSION = .html
|
||||
# the style sheet file to the HTML output directory, so don't put your own
|
||||
# stylesheet in the HTML output directory as well, or it will be erased!
|
||||
|
||||
#HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
|
||||
HTML_STYLESHEET =
|
||||
|
||||
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
|
||||
# Doxygen will adjust the colors in the stylesheet and background images
|
||||
@@ -818,7 +820,7 @@ HTML_FILE_EXTENSION = .html
|
||||
# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
|
||||
# The allowed range is 0 to 359.
|
||||
|
||||
#HTML_COLORSTYLE_HUE = 120
|
||||
HTML_COLORSTYLE_HUE = 120
|
||||
|
||||
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
|
||||
# the colors in the HTML output. For a value of 0 the output will use
|
||||
@@ -841,6 +843,12 @@ HTML_COLORSTYLE_GAMMA = 80
|
||||
|
||||
HTML_TIMESTAMP = YES
|
||||
|
||||
# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
|
||||
# files or namespaces will be aligned in HTML using tables. If set to
|
||||
# NO a bullet list will be used.
|
||||
|
||||
HTML_ALIGN_MEMBERS = YES
|
||||
|
||||
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
|
||||
# documentation will contain sections that can be hidden and shown after the
|
||||
# page has loaded. For this to work a browser that supports
|
||||
@@ -1021,6 +1029,11 @@ ENUM_VALUES_PER_LINE = 4
|
||||
|
||||
GENERATE_TREEVIEW = NO
|
||||
|
||||
# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
|
||||
# and Class Hierarchy pages using a tree view instead of an ordered list.
|
||||
|
||||
USE_INLINE_TREES = NO
|
||||
|
||||
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
|
||||
# used to set the initial width (in pixels) of the frame in which the tree
|
||||
# is shown.
|
||||
@@ -1356,9 +1369,14 @@ INCLUDE_FILE_PATTERNS =
|
||||
# instead of the = operator.
|
||||
|
||||
PREDEFINED = "__attribute__(x)=" \
|
||||
"RENAME(x)=x ## _TMPL" \
|
||||
"DEF(x)=x ## _TMPL" \
|
||||
HAVE_AV_CONFIG_H \
|
||||
HAVE_MMX \
|
||||
HAVE_MMX2 \
|
||||
HAVE_AMD3DNOW \
|
||||
"DECLARE_ALIGNED(a,t,n)=t n" \
|
||||
"offsetof(x,y)=0x42" \
|
||||
av_alloc_size \
|
||||
"offsetof(x,y)=0x42"
|
||||
|
||||
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
|
||||
# this tag can be used to specify a list of macro names that should be expanded.
|
92
LICENSE
92
LICENSE
@@ -1,4 +1,5 @@
|
||||
FFmpeg:
|
||||
-------
|
||||
|
||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||
@@ -13,90 +14,33 @@ configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
Specifically, the GPL parts of FFmpeg are
|
||||
|
||||
- libpostproc
|
||||
- libmpcodecs
|
||||
- optional x86 optimizations in the files
|
||||
libavcodec/x86/idct_mmx.c
|
||||
- libutvideo encoding/decoding wrappers in
|
||||
libavcodec/libutvideo*.cpp
|
||||
- the X11 grabber in libavdevice/x11grab.c
|
||||
- the swresample test app in
|
||||
libswresample/swresample-test.c
|
||||
- the texi2pod.pl tool
|
||||
- the following filters in libavfilter:
|
||||
- f_ebur128.c
|
||||
- vf_blackframe.c
|
||||
- vf_boxblur.c
|
||||
- vf_colormatrix.c
|
||||
- vf_cropdetect.c
|
||||
- vf_decimate.c
|
||||
- vf_delogo.c
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_hue.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mcdeint.c
|
||||
- vf_mp.c
|
||||
- vf_noise.c
|
||||
- vf_owdenoise.c
|
||||
- vf_pp.c
|
||||
- vf_sab.c
|
||||
- vf_smartblur.c
|
||||
- vf_spp.c
|
||||
- vf_stereo3d.c
|
||||
- vf_super2xsai.c
|
||||
- vf_tinterlace.c
|
||||
- vf_yadif.c
|
||||
- vsrc_mptestsrc.c
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint.c, libavcodec/jrevdct.c
|
||||
are taken from libjpeg, see the top of the files for licensing details.
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
the configure parameter --enable-version3 will activate this licensing option
|
||||
for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
|
||||
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
|
||||
libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
|
||||
licensing details. Specifically note that you must credit the IJG in the
|
||||
documentation accompanying your program if you only distribute executables.
|
||||
You must also indicate any changes including additions and deletions to
|
||||
those three files in the documentation.
|
||||
external libraries:
|
||||
-------------------
|
||||
|
||||
Some external libraries, e.g. libx264, are under GPL and can be used in
|
||||
conjunction with FFmpeg. They require --enable-gpl to be passed to configure
|
||||
as well.
|
||||
|
||||
external libraries
|
||||
==================
|
||||
The OpenCORE external libraries are under the Apache License 2.0. That license
|
||||
is incompatible with the LGPL v2.1 and the GPL v2, but not with version 3 of
|
||||
those licenses. So to combine the OpenCORE libraries with FFmpeg, the license
|
||||
version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
FFmpeg can be combined with a number of external libraries, which sometimes
|
||||
affect the licensing of binaries resulting from the combination.
|
||||
|
||||
compatible libraries
|
||||
--------------------
|
||||
|
||||
The following libraries are under GPL:
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libutvideo
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libxavs
|
||||
- libxvid
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing --enable-gpl to configure.
|
||||
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
incompatible libraries
|
||||
----------------------
|
||||
|
||||
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
|
||||
are incompatible with the GPLv2 and v3. We do not know for certain if their
|
||||
licenses are compatible with the LGPL.
|
||||
If you wish to enable these libraries, pass --enable-nonfree to configure.
|
||||
But note that if you enable any of these libraries the resulting binary will
|
||||
be under a complex license mix that is more restrictive than the LGPL and that
|
||||
may result in additional obligations. It is possible that these
|
||||
restrictions cause the resulting binary to be unredistributeable.
|
||||
The nonfree external libraries libfaac and libaacplus can be hooked up in FFmpeg.
|
||||
You need to pass --enable-nonfree to configure to enable it. Employ this option
|
||||
with care as FFmpeg then becomes nonfree and unredistributable.
|
||||
|
138
MAINTAINERS
138
MAINTAINERS
@@ -4,11 +4,11 @@ FFmpeg maintainers
|
||||
Below is a list of the people maintaining different parts of the
|
||||
FFmpeg code.
|
||||
|
||||
Please try to keep entries where you are the maintainer up to date!
|
||||
Please try to keep entries where you are the maintainer upto date!
|
||||
|
||||
Names in () mean that the maintainer currently has no time to maintain the code.
|
||||
A (CC <address>) after the name means that the maintainer prefers to be CC-ed on
|
||||
patches and related discussions.
|
||||
A CC after the name means that the maintainer prefers to be CC-ed on patches
|
||||
and related discussions.
|
||||
|
||||
|
||||
Project Leader
|
||||
@@ -46,7 +46,7 @@ Miscellaneous Areas
|
||||
documentation Mike Melanson
|
||||
website Robert Swain, Lou Logan
|
||||
build system (configure,Makefiles) Diego Biurrun, Mans Rullgard
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Döffinger, Alexander Strasser
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Döffinger
|
||||
mailinglists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
@@ -62,20 +62,11 @@ Internal Interfaces:
|
||||
libavutil/common.h Michael Niedermayer
|
||||
|
||||
Other:
|
||||
bprint Nicolas George
|
||||
bswap.h
|
||||
des Reimar Doeffinger
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
lzo Reimar Doeffinger
|
||||
mathematics.c, mathematics.h Michael Niedermayer
|
||||
opencl.c, opencl.h Wei Gao
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
rc4 Reimar Doeffinger
|
||||
ripemd.c, ripemd.h James Almer
|
||||
timecode Clément Bœsch
|
||||
mathematics.c, mathematics.h Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
bswap.h
|
||||
|
||||
|
||||
libavcodec
|
||||
@@ -96,8 +87,6 @@ Generic Parts:
|
||||
bitstream.c, bitstream.h Michael Niedermayer
|
||||
CABAC:
|
||||
cabac.h, cabac.c Michael Niedermayer
|
||||
codec names:
|
||||
codec_names.sh Nicolas George
|
||||
DSP utilities:
|
||||
dsputils.c, dsputils.h Michael Niedermayer
|
||||
entropy coding:
|
||||
@@ -138,32 +127,28 @@ Codecs:
|
||||
binkaudio.c Peter Ross
|
||||
bmp.c Mans Rullgard, Kostya Shishkov
|
||||
cavs* Stefan Gehrer
|
||||
cdxl.c Paul B Mahol
|
||||
celp_filters.* Vitor Sessak
|
||||
cinepak.c Roberto Togni
|
||||
cljr Alex Beregszaszi
|
||||
cllc.c Derek Buitenhuis
|
||||
cook.c, cookdata.h Benjamin Larsson
|
||||
cpia.c Stephan Hilb
|
||||
crystalhd.c Philip Langdale
|
||||
cscd.c Reimar Doeffinger
|
||||
dca.c Kostya Shishkov, Benjamin Larsson
|
||||
dnxhd* Baptiste Coudurier
|
||||
dpcm.c Mike Melanson
|
||||
dv.c Roman Shaposhnik
|
||||
dxa.c Kostya Shishkov
|
||||
dv.c Roman Shaposhnik
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
ffv1.c Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
flac* Justin Ruggles
|
||||
flashsv* Benjamin Larsson
|
||||
flicvideo.c Mike Melanson
|
||||
g722.c Martin Storsjo
|
||||
g726.c Roman Shaposhnik
|
||||
gifdec.c Baptiste Coudurier
|
||||
h264* Loren Merritt, Michael Niedermayer
|
||||
h261* Michael Niedermayer
|
||||
h263* Michael Niedermayer
|
||||
h264* Loren Merritt, Michael Niedermayer
|
||||
huffyuv.c Michael Niedermayer
|
||||
idcinvideo.c Mike Melanson
|
||||
imc* Benjamin Larsson
|
||||
@@ -171,41 +156,35 @@ Codecs:
|
||||
indeo5* Kostya Shishkov
|
||||
interplayvideo.c Mike Melanson
|
||||
ivi* Kostya Shishkov
|
||||
jacosub* Clément Bœsch
|
||||
jpeg2000* Nicolas Bertrand
|
||||
jpeg_ls.c Kostya Shishkov
|
||||
jvdec.c Peter Ross
|
||||
kmvc.c Kostya Shishkov
|
||||
lcl*.c Roberto Togni, Reimar Doeffinger
|
||||
libcelt_dec.c Nicolas George
|
||||
libdirac* David Conrad
|
||||
libgsm.c Michel Bardiaux
|
||||
libdirac* David Conrad
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libschroedinger* David Conrad
|
||||
libspeexdec.c Justin Ruggles
|
||||
libtheoraenc.c David Conrad
|
||||
libutvideo* Derek Buitenhuis
|
||||
libvorbis.c David Conrad
|
||||
libx264.c Mans Rullgard, Jason Garrett-Glaser
|
||||
libxavs.c Stefan Gehrer
|
||||
libx264.c Mans Rullgard, Jason Garrett-Glaser
|
||||
loco.c Kostya Shishkov
|
||||
lzo.h, lzo.c Reimar Doeffinger
|
||||
mdec.c Michael Niedermayer
|
||||
mimic.c Ramiro Polla
|
||||
mjpeg*.c Michael Niedermayer
|
||||
mjpeg.c Michael Niedermayer
|
||||
mlp* Ramiro Polla
|
||||
mmvideo.c Peter Ross
|
||||
mpc* Kostya Shishkov
|
||||
mpeg12.c, mpeg12data.h Michael Niedermayer
|
||||
mpegvideo.c, mpegvideo.h Michael Niedermayer
|
||||
mqc* Nicolas Bertrand
|
||||
msmpeg4.c, msmpeg4data.h Michael Niedermayer
|
||||
msrle.c Mike Melanson
|
||||
msvideo1.c Mike Melanson
|
||||
nellymoserdec.c Benjamin Larsson
|
||||
nuv.c Reimar Doeffinger
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
pgssubdec.c Reimar Doeffinger
|
||||
ptx.c Ivo van Poorten
|
||||
@@ -225,13 +204,11 @@ Codecs:
|
||||
s3tc* Ivo van Poorten
|
||||
smacker.c Kostya Shishkov
|
||||
smc.c Mike Melanson
|
||||
smvjpegdec.c Ash Hughes
|
||||
snow.c Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
srt* Aurelien Jacobs
|
||||
sunrast.c Ivo van Poorten
|
||||
svq3.c Michael Niedermayer
|
||||
tak* Paul B Mahol
|
||||
targa.c Kostya Shishkov
|
||||
tiff.c Kostya Shishkov
|
||||
truemotion1* Mike Melanson
|
||||
@@ -239,19 +216,15 @@ Codecs:
|
||||
truespeech.c Kostya Shishkov
|
||||
tscc.c Kostya Shishkov
|
||||
tta.c Alex Beregszaszi, Jaikrishnan Menon
|
||||
ttaenc.c Paul B Mahol
|
||||
txd.c Ivo van Poorten
|
||||
ulti* Kostya Shishkov
|
||||
v410*.c Derek Buitenhuis
|
||||
vb.c Kostya Shishkov
|
||||
vble.c Derek Buitenhuis
|
||||
vc1* Kostya Shishkov
|
||||
vcr1.c Michael Niedermayer
|
||||
vda_h264_dec.c Xidorn Quan
|
||||
vima.c Paul B Mahol
|
||||
vmnc.c Kostya Shishkov
|
||||
vorbis_dec.c Denes Balatoni, David Conrad
|
||||
vorbis_enc.c Oded Shimon
|
||||
vorbis_dec.c Denes Balatoni, David Conrad
|
||||
vp3* Mike Melanson
|
||||
vp5 Aurelien Jacobs
|
||||
vp6 Aurelien Jacobs
|
||||
@@ -263,11 +236,8 @@ Codecs:
|
||||
wmv2.c Michael Niedermayer
|
||||
wnv1.c Kostya Shishkov
|
||||
xan.c Mike Melanson
|
||||
xbm* Paul B Mahol
|
||||
xl.c Kostya Shishkov
|
||||
xvmc.c Ivan Kalvachev
|
||||
xwd* Paul B Mahol
|
||||
zerocodec.c Derek Buitenhuis
|
||||
zmbv* Kostya Shishkov
|
||||
|
||||
Hardware acceleration:
|
||||
@@ -285,31 +255,18 @@ libavdevice
|
||||
libavdevice/avdevice.h
|
||||
|
||||
|
||||
dshow.c Roger Pack
|
||||
iec61883.c Georg Lippitsch
|
||||
libdc1394.c Roman Shaposhnik
|
||||
v4l2.c Luca Abeni
|
||||
vfwcap.c Ramiro Polla
|
||||
|
||||
|
||||
libavfilter
|
||||
===========
|
||||
|
||||
Generic parts:
|
||||
graphdump.c Nicolas George
|
||||
|
||||
Filters:
|
||||
af_amerge.c Nicolas George
|
||||
af_aresample.c Michael Niedermayer
|
||||
af_astreamsync.c Nicolas George
|
||||
af_atempo.c Pavel Koshevoy
|
||||
af_pan.c Nicolas George
|
||||
vf_delogo.c Jean Delvare (CC <khali@linux-fr.org>)
|
||||
vf_drawbox.c/drawgrid Andrey Utkin
|
||||
vf_scale.c Michael Niedermayer
|
||||
Video filters:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
vf_yadif.c Michael Niedermayer
|
||||
|
||||
Sources:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
|
||||
libavformat
|
||||
===========
|
||||
@@ -324,27 +281,17 @@ Generic parts:
|
||||
Muxers/Demuxers:
|
||||
4xm.c Mike Melanson
|
||||
adtsenc.c Robert Swain
|
||||
afc.c Paul B Mahol
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiff.c Baptiste Coudurier
|
||||
ape.c Kostya Shishkov
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
avi* Michael Niedermayer
|
||||
avisynth.c AvxSynth Team (avxsynth.testing at gmail dot com)
|
||||
avr.c Paul B Mahol
|
||||
bink.c Peter Ross
|
||||
brstm.c Paul B Mahol
|
||||
caf* Peter Ross
|
||||
cdxl.c Paul B Mahol
|
||||
crc.c Michael Niedermayer
|
||||
daud.c Reimar Doeffinger
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
dxa.c Kostya Shishkov
|
||||
electronicarts.c Peter Ross
|
||||
epafdec.c Paul B Mahol
|
||||
ffm* Baptiste Coudurier
|
||||
flac* Justin Ruggles
|
||||
flic.c Mike Melanson
|
||||
@@ -354,26 +301,22 @@ Muxers/Demuxers:
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
img2*.c Michael Niedermayer
|
||||
ipmovie.c Mike Melanson
|
||||
ircam* Paul B Mahol
|
||||
img2.c Michael Niedermayer
|
||||
iss.c Stefan Gehrer
|
||||
jacosub* Clément Bœsch
|
||||
jvdec.c Peter Ross
|
||||
libmodplug.c Clément Bœsch
|
||||
libnut.c Oded Shimon
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
lxfdec.c Tomas Härdin
|
||||
matroska.c Aurelien Jacobs
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
metadata* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
microdvd* Aurelien Jacobs
|
||||
mm.c Peter Ross
|
||||
mov.c Michael Niedermayer, Baptiste Coudurier
|
||||
movenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
movenc.c Michael Niedermayer, Baptiste Coudurier
|
||||
mpc.c Kostya Shishkov
|
||||
mpeg.c Michael Niedermayer
|
||||
mpegenc.c Michael Niedermayer
|
||||
@@ -381,8 +324,6 @@ Muxers/Demuxers:
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier
|
||||
mxfdec.c Tomas Härdin
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut.c Michael Niedermayer
|
||||
nuv.c Reimar Doeffinger
|
||||
@@ -390,10 +331,8 @@ Muxers/Demuxers:
|
||||
oggenc.c Baptiste Coudurier
|
||||
oggparse*.c David Conrad
|
||||
oma.c Maxim Poliakovski
|
||||
paf.c Paul B Mahol
|
||||
psxstr.c Mike Melanson
|
||||
pva.c Ivo van Poorten
|
||||
pvfdec.c Paul B Mahol
|
||||
r3d.c Baptiste Coudurier
|
||||
raw.c Michael Niedermayer
|
||||
rdt.c Ronald S. Bultje
|
||||
@@ -404,48 +343,27 @@ Muxers/Demuxers:
|
||||
rtpdec_asf.* Ronald S. Bultje
|
||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||
rtsp.c Luca Barbato
|
||||
sbgdec.c Nicolas George
|
||||
sdp.c Martin Storsjo
|
||||
segafilm.c Mike Melanson
|
||||
siff.c Kostya Shishkov
|
||||
smacker.c Kostya Shishkov
|
||||
smjpeg* Paul B Mahol
|
||||
srtdec.c Aurelien Jacobs
|
||||
swf.c Baptiste Coudurier
|
||||
takdec.c Paul B Mahol
|
||||
tta.c Alex Beregszaszi
|
||||
txd.c Ivo van Poorten
|
||||
voc.c Aurelien Jacobs
|
||||
wav.c Michael Niedermayer
|
||||
wc3movie.c Mike Melanson
|
||||
webvtt* Matthew J Heaney
|
||||
westwood.c Mike Melanson
|
||||
wtv.c Peter Ross
|
||||
wv.c Kostya Shishkov
|
||||
wvenc.c Paul B Mahol
|
||||
|
||||
Protocols:
|
||||
bluray.c Petri Hintukainen
|
||||
ftp.c Lukasz Marek
|
||||
http.c Ronald S. Bultje
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
|
||||
|
||||
libswresample
|
||||
=============
|
||||
|
||||
Generic parts:
|
||||
audioconvert.c Michael Niedermayer
|
||||
dither.c Michael Niedermayer
|
||||
rematrix*.c Michael Niedermayer
|
||||
swresample*.c Michael Niedermayer
|
||||
|
||||
Resamplers:
|
||||
resample*.c Michael Niedermayer
|
||||
soxr_resample.c Rob Sykes
|
||||
|
||||
|
||||
Operating systems / CPU architectures
|
||||
=====================================
|
||||
|
||||
@@ -466,10 +384,11 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
2.0 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
0.5 *Deprecated/Unmaintained*
|
||||
0.6 *Deprecated/Unmaintained*
|
||||
0.7 Michael Niedermayer
|
||||
0.8 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
GnuPG Fingerprints of maintainers and contributors
|
||||
@@ -477,24 +396,19 @@ GnuPG Fingerprints of maintainers and contributors
|
||||
|
||||
Anssi Hannula 1A92 FF42 2DD9 8D2E 8AF7 65A9 4278 C520 513D F3CB
|
||||
Anton Khirnov 6D0C 6625 56F8 65D1 E5F5 814B B50A 1241 C067 07AB
|
||||
Ash Hughes 694D 43D2 D180 C7C7 6421 ABD3 A641 D0B7 623D 6029
|
||||
Attila Kinali 11F0 F9A6 A1D2 11F6 C745 D10C 6520 BCDD F2DF E765
|
||||
Baptiste Coudurier 8D77 134D 20CC 9220 201F C5DB 0AC9 325C 5C1A BAAA
|
||||
Ben Littler 3EE3 3723 E560 3214 A8CD 4DEB 2CDB FCE7 768C 8D2C
|
||||
Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
|
||||
Bœsch Clément 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
|
||||
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
Diego Biurrun 8227 1E31 B6D9 4994 7427 E220 9CAE D6CC 4757 FCC5
|
||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Justin Ruggles 3136 ECC0 C10D 6C04 5F43 CA29 FCBE CD2A 3787 1EBF
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Luca Barbato 6677 4209 213C 8843 5B67 29E7 E84C 78C2 84E9 0E34
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Reimar Döffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
@@ -502,7 +416,5 @@ Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
Robert Swain EE7A 56EA 4A81 A7B5 2001 A521 67FA 362D A2FC 3E71
|
||||
Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Tomas Härdin A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Wei Gao 4269 7741 857A 0E60 9EC5 08D2 4744 4EFA 62C1 87B9
|
||||
Stefano Sabatini 9A43 10F8 D32C D33C 48E7 C52C 5DF2 8E4D B2EE 066B
|
||||
Tomas Härdin D133 29CA 4EEC 9DB4 7076 F697 B04B 7403 3313 41FD
|
||||
|
85
Makefile
85
Makefile
@@ -8,31 +8,29 @@ vpath %.S $(SRC_PATH)
|
||||
vpath %.asm $(SRC_PATH)
|
||||
vpath %.v $(SRC_PATH)
|
||||
vpath %.texi $(SRC_PATH)
|
||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||
|
||||
PROGS-$(CONFIG_FFMPEG) += ffmpeg
|
||||
PROGS-$(CONFIG_AVCONV) += avconv
|
||||
PROGS-$(CONFIG_FFPLAY) += ffplay
|
||||
PROGS-$(CONFIG_FFPROBE) += ffprobe
|
||||
PROGS-$(CONFIG_FFSERVER) += ffserver
|
||||
|
||||
PROGS := $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
PROGS := $(PROGS-yes:%=%$(EXESUF))
|
||||
INSTPROGS = $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
|
||||
OBJS = cmdutils.o $(EXEOBJS)
|
||||
OBJS-ffmpeg = ffmpeg_opt.o ffmpeg_filter.o
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
OBJS = $(PROGS-yes:%=%.o) cmdutils.o
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%)
|
||||
TOOLS = qt-faststart trasher
|
||||
TOOLS-$(CONFIG_ZLIB) += cws2fws
|
||||
|
||||
BASENAMES = ffmpeg ffplay ffprobe ffserver
|
||||
BASENAMES = ffmpeg avconv ffplay ffprobe ffserver
|
||||
ALLPROGS = $(BASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
||||
ALLPROGS_G = $(BASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||
ALLMANPAGES = $(BASENAMES:%=%.1)
|
||||
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE)+= swresample
|
||||
@@ -40,26 +38,25 @@ FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
|
||||
FFLIBS := avutil
|
||||
|
||||
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
|
||||
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile $(SRC_PATH)/doc/examples/README
|
||||
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset)
|
||||
|
||||
SKIPHEADERS = cmdutils_common_opts.h compat/w32pthreads.h
|
||||
SKIPHEADERS = cmdutils_common_opts.h
|
||||
|
||||
include $(SRC_PATH)/common.mak
|
||||
|
||||
FF_EXTRALIBS := $(FFEXTRALIBS)
|
||||
FF_DEP_LIBS := $(DEP_LIBS)
|
||||
|
||||
all: $(PROGS)
|
||||
all: $(filter-out avconv, $(PROGS))
|
||||
|
||||
$(PROGS): %$(EXESUF): %_g$(EXESUF)
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
$(PROGS): %$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(CP) $< $@$(PROGSSUF)
|
||||
$(STRIP) $@$(PROGSSUF)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
|
||||
$(LD) $(LDFLAGS) $(LD_O) $^ $(ELIBS)
|
||||
$(TOOLS): %$(EXESUF): %.o
|
||||
$(LD) $(LDFLAGS) -o $@ $< $(ELIBS)
|
||||
|
||||
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
|
||||
tools/cws2fws$(EXESUF): ELIBS = -lz
|
||||
|
||||
config.h: .config
|
||||
.config: $(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c))
|
||||
@@ -67,13 +64,9 @@ config.h: .config
|
||||
@-printf '\nWARNING: $(?F) newer than config.h, rerun configure\n\n'
|
||||
@-tput sgr0 2>/dev/null
|
||||
|
||||
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS VIS-OBJS \
|
||||
MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||
OBJS HOSTOBJS TESTOBJS
|
||||
SUBDIR_VARS := OBJS FFLIBS CLEANFILES DIRS TESTPROGS EXAMPLES SKIPHEADERS \
|
||||
ALTIVEC-OBJS MMX-OBJS NEON-OBJS X86-OBJS YASM-OBJS-FFT YASM-OBJS \
|
||||
HOSTPROGS BUILT_HEADERS TESTOBJS ARCH_HEADERS ARMV6-OBJS TOOLS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -84,25 +77,16 @@ define DOSUBDIR
|
||||
$(foreach V,$(SUBDIR_VARS),$(eval $(call RESET,$(V))))
|
||||
SUBDIR := $(1)/
|
||||
include $(SRC_PATH)/$(1)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(ARCH)/Makefile
|
||||
include $(SRC_PATH)/library.mak
|
||||
endef
|
||||
|
||||
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
|
||||
|
||||
define DOPROG
|
||||
OBJS-$(1) += $(1).o cmdutils.o $(EXEOBJS)
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
|
||||
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(LIBS-$(1))
|
||||
-include $$(OBJS-$(1):.o=.d)
|
||||
endef
|
||||
ffplay.o: CFLAGS += $(SDL_CFLAGS)
|
||||
ffplay_g$(EXESUF): FF_EXTRALIBS += $(SDL_LIBS)
|
||||
ffserver_g$(EXESUF): LDFLAGS += $(FFSERVERLDFLAGS)
|
||||
|
||||
$(foreach P,$(PROGS-yes),$(eval $(call DOPROG,$(P))))
|
||||
|
||||
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
%$(PROGSSUF)_g$(EXESUF): %.o cmdutils.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) -o $@ $< cmdutils.o $(FF_EXTRALIBS)
|
||||
|
||||
OBJDIRS += tools
|
||||
|
||||
@@ -136,10 +120,9 @@ install-progs: install-progs-yes $(PROGS)
|
||||
$(Q)mkdir -p "$(BINDIR)"
|
||||
$(INSTALL) -c -m 755 $(INSTPROGS) "$(BINDIR)"
|
||||
|
||||
install-data: $(DATA_FILES) $(EXAMPLES_FILES)
|
||||
$(Q)mkdir -p "$(DATADIR)/examples"
|
||||
install-data: $(DATA_FILES)
|
||||
$(Q)mkdir -p "$(DATADIR)"
|
||||
$(INSTALL) -m 644 $(DATA_FILES) "$(DATADIR)"
|
||||
$(INSTALL) -m 644 $(EXAMPLES_FILES) "$(DATADIR)/examples"
|
||||
|
||||
uninstall: uninstall-libs uninstall-headers uninstall-progs uninstall-data
|
||||
|
||||
@@ -152,18 +135,26 @@ uninstall-data:
|
||||
clean::
|
||||
$(RM) $(ALLPROGS) $(ALLPROGS_G)
|
||||
$(RM) $(CLEANSUFFIXES)
|
||||
$(RM) $(TOOLS)
|
||||
$(RM) $(CLEANSUFFIXES:%=tools/%)
|
||||
$(RM) coverage.info
|
||||
$(RM) -r coverage-html
|
||||
$(RM) -rf coverage.info lcov
|
||||
|
||||
distclean::
|
||||
$(RM) $(DISTCLEANSUFFIXES)
|
||||
$(RM) config.* .config libavutil/avconfig.h .version version.h libavcodec/codec_names.h
|
||||
$(RM) config.* .version version.h libavutil/avconfig.h
|
||||
|
||||
config:
|
||||
$(SRC_PATH)/configure $(value FFMPEG_CONFIGURATION)
|
||||
|
||||
check: all alltools examples testprogs fate
|
||||
# Without the sed genthml thinks "libavutil" and "./libavutil" are two different things
|
||||
coverage.info: $(wildcard *.gcda *.gcno */*.gcda */*.gcno */*/*.gcda */*/*.gcno)
|
||||
$(Q)lcov -c -d . -b . | sed -e 's#/./#/#g' > $@
|
||||
|
||||
coverage-html: coverage.info
|
||||
$(Q)mkdir -p $@
|
||||
$(Q)genhtml -o $@ $<
|
||||
$(Q)touch $@
|
||||
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/tests/Makefile
|
||||
@@ -179,5 +170,5 @@ $(sort $(OBJDIRS)):
|
||||
# so this saves some time on slow systems.
|
||||
.SUFFIXES:
|
||||
|
||||
.PHONY: all all-yes alltools check *clean config install*
|
||||
.PHONY: all all-yes alltools *clean config examples install*
|
||||
.PHONY: testprogs uninstall*
|
||||
|
8
README
8
README
@@ -4,15 +4,9 @@ FFmpeg README
|
||||
1) Documentation
|
||||
----------------
|
||||
|
||||
* Read the documentation in the doc/ directory in git.
|
||||
You can also view it online at http://ffmpeg.org/documentation.html
|
||||
* Read the documentation in the doc/ directory.
|
||||
|
||||
2) Licensing
|
||||
------------
|
||||
|
||||
* See the LICENSE file.
|
||||
|
||||
3) Build and Install
|
||||
--------------------
|
||||
|
||||
* See the INSTALL file.
|
||||
|
16
arch.mak
16
arch.mak
@@ -1,16 +0,0 @@
|
||||
OBJS-$(HAVE_ARMV5TE) += $(ARMV5TE-OBJS) $(ARMV5TE-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
|
||||
OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_VIS) += $(VIS-OBJS) $(VIS-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_YASM) += $(YASM-OBJS) $(YASM-OBJS-yes)
|
1548
cmdutils.c
1548
cmdutils.c
File diff suppressed because it is too large
Load Diff
271
cmdutils.h
271
cmdutils.h
@@ -43,26 +43,10 @@ extern const char program_name[];
|
||||
*/
|
||||
extern const int program_birth_year;
|
||||
|
||||
/**
|
||||
* this year, defined by the program for show_banner()
|
||||
*/
|
||||
extern const int this_year;
|
||||
|
||||
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
|
||||
extern AVFormatContext *avformat_opts;
|
||||
extern struct SwsContext *sws_opts;
|
||||
extern AVDictionary *swr_opts;
|
||||
extern AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||
|
||||
/**
|
||||
* Register a program-specific cleanup routine.
|
||||
*/
|
||||
void register_exit(void (*cb)(int ret));
|
||||
|
||||
/**
|
||||
* Wraps exit with a program-specific cleanup routine.
|
||||
*/
|
||||
void exit_program(int ret);
|
||||
extern AVDictionary *format_opts, *codec_opts;
|
||||
|
||||
/**
|
||||
* Initialize the cmdutils option system, in particular
|
||||
@@ -85,27 +69,21 @@ void log_callback_help(void* ptr, int level, const char* fmt, va_list vl);
|
||||
* Fallback for options that are not explicitly handled, these will be
|
||||
* parsed through AVOptions.
|
||||
*/
|
||||
int opt_default(void *optctx, const char *opt, const char *arg);
|
||||
int opt_default(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Set the libav* libraries log level.
|
||||
*/
|
||||
int opt_loglevel(void *optctx, const char *opt, const char *arg);
|
||||
int opt_loglevel(const char *opt, const char *arg);
|
||||
|
||||
int opt_report(const char *opt);
|
||||
|
||||
int opt_max_alloc(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
int opt_cpuflags(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
int opt_codec_debug(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
int opt_opencl(void *optctx, const char *opt, const char *arg);
|
||||
int opt_codec_debug(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Limit the execution time.
|
||||
*/
|
||||
int opt_timelimit(void *optctx, const char *opt, const char *arg);
|
||||
int opt_timelimit(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Parse a string and return its corresponding value as a double.
|
||||
@@ -113,15 +91,14 @@ int opt_timelimit(void *optctx, const char *opt, const char *arg);
|
||||
* parsed or the corresponding value is invalid.
|
||||
*
|
||||
* @param context the context of the value to be set (e.g. the
|
||||
* corresponding command line option name)
|
||||
* corresponding commandline option name)
|
||||
* @param numstr the string to be parsed
|
||||
* @param type the type (OPT_INT64 or OPT_FLOAT) as which the
|
||||
* string should be parsed
|
||||
* @param min the minimum valid accepted value
|
||||
* @param max the maximum valid accepted value
|
||||
*/
|
||||
double parse_number_or_die(const char *context, const char *numstr, int type,
|
||||
double min, double max);
|
||||
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max);
|
||||
|
||||
/**
|
||||
* Parse a string specifying a time and return its corresponding
|
||||
@@ -129,16 +106,15 @@ double parse_number_or_die(const char *context, const char *numstr, int type,
|
||||
* the string cannot be correctly parsed.
|
||||
*
|
||||
* @param context the context of the value to be set (e.g. the
|
||||
* corresponding command line option name)
|
||||
* corresponding commandline option name)
|
||||
* @param timestr the string to be parsed
|
||||
* @param is_duration a flag which tells how to interpret timestr, if
|
||||
* not zero timestr is interpreted as a duration, otherwise as a
|
||||
* date
|
||||
*
|
||||
* @see av_parse_time()
|
||||
* @see parse_date()
|
||||
*/
|
||||
int64_t parse_time_or_die(const char *context, const char *timestr,
|
||||
int is_duration);
|
||||
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration);
|
||||
|
||||
typedef struct SpecifierOpt {
|
||||
char *specifier; /**< stream/chapter/program/... specifier */
|
||||
@@ -151,7 +127,7 @@ typedef struct SpecifierOpt {
|
||||
} u;
|
||||
} SpecifierOpt;
|
||||
|
||||
typedef struct OptionDef {
|
||||
typedef struct {
|
||||
const char *name;
|
||||
int flags;
|
||||
#define HAS_ARG 0x0001
|
||||
@@ -160,42 +136,31 @@ typedef struct OptionDef {
|
||||
#define OPT_STRING 0x0008
|
||||
#define OPT_VIDEO 0x0010
|
||||
#define OPT_AUDIO 0x0020
|
||||
#define OPT_GRAB 0x0040
|
||||
#define OPT_INT 0x0080
|
||||
#define OPT_FLOAT 0x0100
|
||||
#define OPT_SUBTITLE 0x0200
|
||||
#define OPT_INT64 0x0400
|
||||
#define OPT_EXIT 0x0800
|
||||
#define OPT_DATA 0x1000
|
||||
#define OPT_PERFILE 0x2000 /* the option is per-file (currently ffmpeg-only).
|
||||
implied by OPT_OFFSET or OPT_SPEC */
|
||||
#define OPT_FUNC2 0x2000
|
||||
#define OPT_OFFSET 0x4000 /* option is specified as an offset in a passed optctx */
|
||||
#define OPT_SPEC 0x8000 /* option is to be stored in an array of SpecifierOpt.
|
||||
Implies OPT_OFFSET. Next element after the offset is
|
||||
an int containing element count in the array. */
|
||||
#define OPT_TIME 0x10000
|
||||
#define OPT_DOUBLE 0x20000
|
||||
#define OPT_INPUT 0x40000
|
||||
#define OPT_OUTPUT 0x80000
|
||||
union {
|
||||
void *dst_ptr;
|
||||
int (*func_arg)(void *, const char *, const char *);
|
||||
int (*func_arg)(const char *, const char *);
|
||||
int (*func2_arg)(void *, const char *, const char *);
|
||||
size_t off;
|
||||
} u;
|
||||
const char *help;
|
||||
const char *argname;
|
||||
} OptionDef;
|
||||
|
||||
/**
|
||||
* Print help for all options matching specified flags.
|
||||
*
|
||||
* @param options a list of options
|
||||
* @param msg title of this group. Only printed if at least one option matches.
|
||||
* @param req_flags print only options which have all those flags set.
|
||||
* @param rej_flags don't print options which have any of those flags set.
|
||||
* @param alt_flags print only options that have at least one of those flags set
|
||||
*/
|
||||
void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
int rej_flags, int alt_flags);
|
||||
void show_help_options(const OptionDef *options, const char *msg, int mask, int value);
|
||||
|
||||
/**
|
||||
* Show help for all options with given flags in class and all its
|
||||
@@ -203,23 +168,10 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
*/
|
||||
void show_help_children(const AVClass *class, int flags);
|
||||
|
||||
/**
|
||||
* Per-fftool specific help handler. Implemented in each
|
||||
* fftool, called by show_help().
|
||||
*/
|
||||
void show_help_default(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Generic -h handler common to all fftools.
|
||||
*/
|
||||
int show_help(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Parse the command line arguments.
|
||||
*
|
||||
* @param optctx an opaque options context
|
||||
* @param argc number of command line arguments
|
||||
* @param argv values of command line arguments
|
||||
* @param options Array with the definitions required to interpret every
|
||||
* option of the form: -option_name [argument]
|
||||
* @param parse_arg_function Name of the function called to process every
|
||||
@@ -234,115 +186,13 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
|
||||
*
|
||||
* @return on success 1 if arg was consumed, 0 otherwise; negative number on error
|
||||
*/
|
||||
int parse_option(void *optctx, const char *opt, const char *arg,
|
||||
const OptionDef *options);
|
||||
int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options);
|
||||
|
||||
/**
|
||||
* An option extracted from the commandline.
|
||||
* Cannot use AVDictionary because of options like -map which can be
|
||||
* used multiple times.
|
||||
*/
|
||||
typedef struct Option {
|
||||
const OptionDef *opt;
|
||||
const char *key;
|
||||
const char *val;
|
||||
} Option;
|
||||
|
||||
typedef struct OptionGroupDef {
|
||||
/**< group name */
|
||||
const char *name;
|
||||
/**
|
||||
* Option to be used as group separator. Can be NULL for groups which
|
||||
* are terminated by a non-option argument (e.g. ffmpeg output files)
|
||||
*/
|
||||
const char *sep;
|
||||
/**
|
||||
* Option flags that must be set on each option that is
|
||||
* applied to this group
|
||||
*/
|
||||
int flags;
|
||||
} OptionGroupDef;
|
||||
|
||||
typedef struct OptionGroup {
|
||||
const OptionGroupDef *group_def;
|
||||
const char *arg;
|
||||
|
||||
Option *opts;
|
||||
int nb_opts;
|
||||
|
||||
AVDictionary *codec_opts;
|
||||
AVDictionary *format_opts;
|
||||
AVDictionary *resample_opts;
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *swr_opts;
|
||||
} OptionGroup;
|
||||
|
||||
/**
|
||||
* A list of option groups that all have the same group type
|
||||
* (e.g. input files or output files)
|
||||
*/
|
||||
typedef struct OptionGroupList {
|
||||
const OptionGroupDef *group_def;
|
||||
|
||||
OptionGroup *groups;
|
||||
int nb_groups;
|
||||
} OptionGroupList;
|
||||
|
||||
typedef struct OptionParseContext {
|
||||
OptionGroup global_opts;
|
||||
|
||||
OptionGroupList *groups;
|
||||
int nb_groups;
|
||||
|
||||
/* parsing state */
|
||||
OptionGroup cur_group;
|
||||
} OptionParseContext;
|
||||
|
||||
/**
|
||||
* Parse an options group and write results into optctx.
|
||||
*
|
||||
* @param optctx an app-specific options context. NULL for global options group
|
||||
*/
|
||||
int parse_optgroup(void *optctx, OptionGroup *g);
|
||||
|
||||
/**
|
||||
* Split the commandline into an intermediate form convenient for further
|
||||
* processing.
|
||||
*
|
||||
* The commandline is assumed to be composed of options which either belong to a
|
||||
* group (those with OPT_SPEC, OPT_OFFSET or OPT_PERFILE) or are global
|
||||
* (everything else).
|
||||
*
|
||||
* A group (defined by an OptionGroupDef struct) is a sequence of options
|
||||
* terminated by either a group separator option (e.g. -i) or a parameter that
|
||||
* is not an option (doesn't start with -). A group without a separator option
|
||||
* must always be first in the supplied groups list.
|
||||
*
|
||||
* All options within the same group are stored in one OptionGroup struct in an
|
||||
* OptionGroupList, all groups with the same group definition are stored in one
|
||||
* OptionGroupList in OptionParseContext.groups. The order of group lists is the
|
||||
* same as the order of group definitions.
|
||||
*/
|
||||
int split_commandline(OptionParseContext *octx, int argc, char *argv[],
|
||||
const OptionDef *options,
|
||||
const OptionGroupDef *groups, int nb_groups);
|
||||
|
||||
/**
|
||||
* Free all allocated memory in an OptionParseContext.
|
||||
*/
|
||||
void uninit_parse_context(OptionParseContext *octx);
|
||||
|
||||
/**
|
||||
* Find the '-loglevel' option in the command line args and apply it.
|
||||
* Find the '-loglevel' option in the commandline args and apply it.
|
||||
*/
|
||||
void parse_loglevel(int argc, char **argv, const OptionDef *options);
|
||||
|
||||
/**
|
||||
* Return index of option opt in argv or 0 if not found.
|
||||
*/
|
||||
int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
const char *optname);
|
||||
|
||||
/**
|
||||
* Check if the given stream matches a stream specifier.
|
||||
*
|
||||
@@ -360,16 +210,11 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
|
||||
* Create a new options dictionary containing only the options from
|
||||
* opts which apply to the codec with ID codec_id.
|
||||
*
|
||||
* @param opts dictionary to place options in
|
||||
* @param codec_id ID of the codec that should be filtered for
|
||||
* @param s Corresponding format context.
|
||||
* @param st A stream from s for which the options should be filtered.
|
||||
* @param codec The particular codec for which the options should be filtered.
|
||||
* If null, the default one is looked up according to the codec id.
|
||||
* @return a pointer to the created dictionary
|
||||
*/
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
||||
AVFormatContext *s, AVStream *st, AVCodec *codec);
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec, AVFormatContext *s, AVStream *st);
|
||||
|
||||
/**
|
||||
* Setup AVCodecContext options for avformat_find_stream_info().
|
||||
@@ -382,8 +227,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
||||
* @return pointer to the created array of dictionaries, NULL if it
|
||||
* cannot be created
|
||||
*/
|
||||
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
||||
AVDictionary *codec_opts);
|
||||
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts);
|
||||
|
||||
/**
|
||||
* Print an error message to stderr, indicating filename and a human
|
||||
@@ -401,7 +245,7 @@ void print_error(const char *filename, int err);
|
||||
* current version of the repository and of the libav* libraries used by
|
||||
* the program.
|
||||
*/
|
||||
void show_banner(int argc, char **argv, const OptionDef *options);
|
||||
void show_banner(void);
|
||||
|
||||
/**
|
||||
* Print the version of the program to stdout. The version message
|
||||
@@ -409,81 +253,62 @@ void show_banner(int argc, char **argv, const OptionDef *options);
|
||||
* libraries.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_version(void *optctx, const char *opt, const char *arg);
|
||||
int opt_version(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print the license of the program to stdout. The license depends on
|
||||
* the license of the libraries compiled into the program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_license(void *optctx, const char *opt, const char *arg);
|
||||
int opt_license(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the formats supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_formats(void *optctx, const char *opt, const char *arg);
|
||||
int opt_formats(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the codecs supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_codecs(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the decoders supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_decoders(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the encoders supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_encoders(void *optctx, const char *opt, const char *arg);
|
||||
int opt_codecs(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the filters supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_filters(void *optctx, const char *opt, const char *arg);
|
||||
int opt_filters(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the bit stream filters supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_bsfs(void *optctx, const char *opt, const char *arg);
|
||||
int opt_bsfs(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the protocols supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_protocols(void *optctx, const char *opt, const char *arg);
|
||||
int opt_protocols(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the pixel formats supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_pix_fmts(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the standard channel layouts supported by
|
||||
* the program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_layouts(void *optctx, const char *opt, const char *arg);
|
||||
int opt_pix_fmts(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the sample formats supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_sample_fmts(void *optctx, const char *opt, const char *arg);
|
||||
int show_sample_fmts(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Return a positive value if a line read from standard input
|
||||
@@ -495,7 +320,6 @@ int read_yesno(void);
|
||||
* Read the file with name filename, and put its content in a newly
|
||||
* allocated 0-terminated buffer.
|
||||
*
|
||||
* @param filename file to read from
|
||||
* @param bufptr location where pointer to buffer is returned
|
||||
* @param size location where size of buffer is returned
|
||||
* @return 0 in case of success, a negative value corresponding to an
|
||||
@@ -512,7 +336,7 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size);
|
||||
* at configuration time or in a "ffpresets" folder along the executable
|
||||
* on win32, in that order. If no such file is found and
|
||||
* codec_name is defined, then search for a file named
|
||||
* codec_name-preset_name.avpreset in the above-mentioned directories.
|
||||
* codec_name-preset_name.ffpreset in the above-mentioned directories.
|
||||
*
|
||||
* @param filename buffer where the name of the found filename is written
|
||||
* @param filename_size size in bytes of the filename buffer
|
||||
@@ -524,39 +348,20 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size);
|
||||
FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
const char *preset_name, int is_path, const char *codec_name);
|
||||
|
||||
/**
|
||||
* Do all the necessary cleanup and abort.
|
||||
* This function is implemented in the avtools, not cmdutils.
|
||||
*/
|
||||
void exit_program(int ret);
|
||||
|
||||
/**
|
||||
* Realloc array to hold new_size elements of elem_size.
|
||||
* Calls exit() on failure.
|
||||
* Calls exit_program() on failure.
|
||||
*
|
||||
* @param array array to reallocate
|
||||
* @param elem_size size in bytes of each element
|
||||
* @param size new element count will be written here
|
||||
* @param new_size number of elements to place in reallocated array
|
||||
* @return reallocated array
|
||||
*/
|
||||
void *grow_array(void *array, int elem_size, int *size, int new_size);
|
||||
|
||||
#define media_type_string av_get_media_type_string
|
||||
|
||||
#define GROW_ARRAY(array, nb_elems)\
|
||||
array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1)
|
||||
|
||||
#define GET_PIX_FMT_NAME(pix_fmt)\
|
||||
const char *name = av_get_pix_fmt_name(pix_fmt);
|
||||
|
||||
#define GET_SAMPLE_FMT_NAME(sample_fmt)\
|
||||
const char *name = av_get_sample_fmt_name(sample_fmt)
|
||||
|
||||
#define GET_SAMPLE_RATE_NAME(rate)\
|
||||
char name[16];\
|
||||
snprintf(name, sizeof(name), "%d", rate);
|
||||
|
||||
#define GET_CH_LAYOUT_NAME(ch_layout)\
|
||||
char name[16];\
|
||||
snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
|
||||
|
||||
#define GET_CH_LAYOUT_DESC(ch_layout)\
|
||||
char name[128];\
|
||||
av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
|
||||
|
||||
#endif /* CMDUTILS_H */
|
||||
|
@@ -1,24 +1,17 @@
|
||||
{ "L" , OPT_EXIT, {.func_arg = show_license}, "show license" },
|
||||
{ "h" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "?" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "-help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "version" , OPT_EXIT, {.func_arg = show_version}, "show version" },
|
||||
{ "formats" , OPT_EXIT, {.func_arg = show_formats }, "show available formats" },
|
||||
{ "codecs" , OPT_EXIT, {.func_arg = show_codecs }, "show available codecs" },
|
||||
{ "decoders" , OPT_EXIT, {.func_arg = show_decoders }, "show available decoders" },
|
||||
{ "encoders" , OPT_EXIT, {.func_arg = show_encoders }, "show available encoders" },
|
||||
{ "bsfs" , OPT_EXIT, {.func_arg = show_bsfs }, "show available bit stream filters" },
|
||||
{ "protocols" , OPT_EXIT, {.func_arg = show_protocols}, "show available protocols" },
|
||||
{ "filters" , OPT_EXIT, {.func_arg = show_filters }, "show available filters" },
|
||||
{ "pix_fmts" , OPT_EXIT, {.func_arg = show_pix_fmts }, "show available pixel formats" },
|
||||
{ "layouts" , OPT_EXIT, {.func_arg = show_layouts }, "show standard channel layouts" },
|
||||
{ "L", OPT_EXIT, {(void*)opt_license}, "show license" },
|
||||
{ "h", OPT_EXIT, {(void*)opt_help}, "show help" },
|
||||
{ "?", OPT_EXIT, {(void*)opt_help}, "show help" },
|
||||
{ "help", OPT_EXIT, {(void*)opt_help}, "show help" },
|
||||
{ "-help", OPT_EXIT, {(void*)opt_help}, "show help" },
|
||||
{ "version", OPT_EXIT, {(void*)opt_version}, "show version" },
|
||||
{ "formats" , OPT_EXIT, {(void*)opt_formats }, "show available formats" },
|
||||
{ "codecs" , OPT_EXIT, {(void*)opt_codecs }, "show available codecs" },
|
||||
{ "bsfs" , OPT_EXIT, {(void*)opt_bsfs }, "show available bit stream filters" },
|
||||
{ "protocols", OPT_EXIT, {(void*)opt_protocols}, "show available protocols" },
|
||||
{ "filters", OPT_EXIT, {(void*)opt_filters }, "show available filters" },
|
||||
{ "pix_fmts" , OPT_EXIT, {(void*)opt_pix_fmts }, "show available pixel formats" },
|
||||
{ "sample_fmts", OPT_EXIT, {.func_arg = show_sample_fmts }, "show available audio sample formats" },
|
||||
{ "loglevel" , HAS_ARG, {.func_arg = opt_loglevel}, "set logging level", "loglevel" },
|
||||
{ "v", HAS_ARG, {.func_arg = opt_loglevel}, "set logging level", "loglevel" },
|
||||
{ "report" , 0, {(void*)opt_report}, "generate a report" },
|
||||
{ "max_alloc" , HAS_ARG, {.func_arg = opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
|
||||
{ "cpuflags" , HAS_ARG | OPT_EXPERT, {.func_arg = opt_cpuflags}, "force specific cpu flags", "flags" },
|
||||
#if CONFIG_OPENCL
|
||||
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, "set OpenCL environment options" },
|
||||
#endif
|
||||
{ "loglevel", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "v", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "debug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
|
||||
{ "report", 0, {(void*)opt_report}, "generate a report" },
|
||||
|
63
common.mak
63
common.mak
@@ -10,9 +10,8 @@ ifndef SUBDIR
|
||||
ifndef V
|
||||
Q = @
|
||||
ECHO = printf "$(1)\t%s\n" $(2)
|
||||
BRIEF = CC CXX HOSTCC HOSTLD AS YASM AR LD STRIP CP
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPYASM RANLIB RM
|
||||
|
||||
BRIEF = CC CXX AS YASM AR LD HOSTCC STRIP CP
|
||||
SILENT = DEPCC YASMDEP RM RANLIB
|
||||
MSG = $@
|
||||
M = @$(call ECHO,$(TAG),$@);
|
||||
$(foreach VAR,$(BRIEF), \
|
||||
@@ -21,23 +20,21 @@ $(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR))))
|
||||
$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL))
|
||||
endif
|
||||
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscale swresample
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
|
||||
|
||||
# NASM requires -I path terminated with /
|
||||
IFLAGS := -I. -I$(SRC_PATH)/
|
||||
CPPFLAGS := $(IFLAGS) $(CPPFLAGS)
|
||||
CFLAGS += $(ECFLAGS)
|
||||
CCFLAGS = $(CPPFLAGS) $(CFLAGS)
|
||||
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
|
||||
CXXFLAGS += $(CPPFLAGS) $(CFLAGS)
|
||||
YASMFLAGS += $(IFLAGS:%=%/) -Pconfig.asm
|
||||
|
||||
HOSTCCFLAGS = $(IFLAGS) $(HOSTCPPFLAGS) $(HOSTCFLAGS)
|
||||
LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
|
||||
CCFLAGS = $(CFLAGS)
|
||||
CXXFLAGS := $(CFLAGS) $(CXXFLAGS)
|
||||
YASMFLAGS += $(IFLAGS) -I$(SRC_PATH)/libavutil/x86/ -Pconfig.asm
|
||||
HOSTCFLAGS += $(IFLAGS)
|
||||
LDFLAGS := $(ALLFFLIBS:%=-Llib%) $(LDFLAGS)
|
||||
|
||||
define COMPILE
|
||||
$(call $(1)DEP,$(1))
|
||||
$($(1)) $($(1)FLAGS) $($(1)_DEPFLAGS) $($(1)_C) $($(1)_O) $<
|
||||
$($(1)DEP)
|
||||
$($(1)) $(CPPFLAGS) $($(1)FLAGS) $($(1)_DEPFLAGS) -c $($(1)_O) $<
|
||||
endef
|
||||
|
||||
COMPILE_C = $(call COMPILE,CC)
|
||||
@@ -56,11 +53,8 @@ COMPILE_S = $(call COMPILE,AS)
|
||||
%.o: %.S
|
||||
$(COMPILE_S)
|
||||
|
||||
%.i: %.c
|
||||
$(CC) $(CCFLAGS) $(CC_E) $<
|
||||
|
||||
%.h.c:
|
||||
$(Q)echo '#include "$*.h"' >$@
|
||||
%.ho: %.h
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) -Wno-unused -c -o $@ -x c $<
|
||||
|
||||
%.ver: %.v
|
||||
$(Q)sed 's/$$MAJOR/$($(basename $(@F))_VERSION_MAJOR)/' $^ > $@
|
||||
@@ -79,14 +73,13 @@ COMPILE_S = $(call COMPILE,AS)
|
||||
$(OBJS):
|
||||
endif
|
||||
|
||||
include $(SRC_PATH)/arch.mak
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS-yes)
|
||||
|
||||
OBJS += $(OBJS-yes)
|
||||
FFLIBS := $(FFLIBS-yes) $(FFLIBS)
|
||||
TESTPROGS += $(TESTPROGS-yes)
|
||||
|
||||
LDLIBS = $(FFLIBS:%=%$(BUILDSUF))
|
||||
FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(EXTRALIBS)
|
||||
FFEXTRALIBS := $(FFLIBS:%=-l%$(BUILDSUF)) $(EXTRALIBS)
|
||||
|
||||
EXAMPLES := $(EXAMPLES:%=$(SUBDIR)%-example$(EXESUF))
|
||||
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
|
||||
@@ -97,45 +90,31 @@ HOSTPROGS := $(HOSTPROGS:%=$(SUBDIR)%$(HOSTEXESUF))
|
||||
TOOLS += $(TOOLS-yes)
|
||||
TOOLOBJS := $(TOOLS:%=tools/%.o)
|
||||
TOOLS := $(TOOLS:%=tools/%$(EXESUF))
|
||||
HEADERS += $(HEADERS-yes)
|
||||
|
||||
DEP_LIBS := $(foreach NAME,$(FFLIBS),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME))
|
||||
|
||||
SRC_DIR := $(SRC_PATH)/lib$(NAME)
|
||||
ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)/$(ARCH)/*.h))
|
||||
SKIPHEADERS += $(ARCH_HEADERS:%=$(ARCH)/%) $(SKIPHEADERS-)
|
||||
SKIPHEADERS := $(SKIPHEADERS:%=$(SUBDIR)%)
|
||||
HOBJS = $(filter-out $(SKIPHEADERS:.h=.h.o),$(ALLHEADERS:.h=.h.o))
|
||||
checkheaders: $(HOBJS)
|
||||
.SECONDARY: $(HOBJS:.o=.c)
|
||||
checkheaders: $(filter-out $(SKIPHEADERS:.h=.ho),$(ALLHEADERS:.h=.ho))
|
||||
|
||||
alltools: $(TOOLS)
|
||||
|
||||
$(HOSTOBJS): %.o: %.c
|
||||
$(call COMPILE,HOSTCC)
|
||||
$(HOSTCC) $(HOSTCFLAGS) -c -o $@ $<
|
||||
|
||||
$(HOSTPROGS): %$(HOSTEXESUF): %.o
|
||||
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $< $(HOSTLIBS)
|
||||
$(HOSTCC) $(HOSTLDFLAGS) -o $@ $< $(HOSTLIBS)
|
||||
|
||||
$(OBJS): | $(sort $(dir $(OBJS)))
|
||||
$(HOBJS): | $(sort $(dir $(HOBJS)))
|
||||
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
|
||||
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
|
||||
$(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(TESTOBJS))
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
|
||||
CLEANSUFFIXES = *.d *.o *~ *.ho *.map *.ver *.gcno *.gcda
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a *.exp
|
||||
|
||||
define RULES
|
||||
clean::
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d)
|
||||
$(RM) $(HOSTPROGS)
|
||||
$(RM) $(TOOLS)
|
||||
endef
|
||||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d))
|
||||
-include $(wildcard $(OBJS:.o=.d) $(TESTOBJS:.o=.d))
|
||||
|
@@ -1,14 +0,0 @@
|
||||
/*
|
||||
* Workaround aix-specific class() function clashing with ffmpeg class usage
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_AIX_MATH_H
|
||||
#define COMPAT_AIX_MATH_H
|
||||
|
||||
#define class class_in_math_h_causes_problems
|
||||
|
||||
#include_next <math.h>
|
||||
|
||||
#undef class
|
||||
|
||||
#endif /* COMPAT_AIX_MATH_H */
|
@@ -1,879 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
// NOTE: this is a partial update of the Avisynth C interface to recognize
|
||||
// new color spaces added in Avisynth 2.60. By no means is this document
|
||||
// completely Avisynth 2.60 compliant.
|
||||
|
||||
#ifndef __AVISYNTH_C__
|
||||
#define __AVISYNTH_C__
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
typedef unsigned char BYTE;
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVISYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 4 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
AVS_SAMPLE_INT16 = 1<<1,
|
||||
AVS_SAMPLE_INT24 = 1<<2,
|
||||
AVS_SAMPLE_INT32 = 1<<3,
|
||||
AVS_SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {AVS_PLANAR_Y=1<<0,
|
||||
AVS_PLANAR_U=1<<1,
|
||||
AVS_PLANAR_V=1<<2,
|
||||
AVS_PLANAR_ALIGNED=1<<3,
|
||||
AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_A=1<<4,
|
||||
AVS_PLANAR_R=1<<5,
|
||||
AVS_PLANAR_G=1<<6,
|
||||
AVS_PLANAR_B=1<<7,
|
||||
AVS_PLANAR_A_ALIGNED=AVS_PLANAR_A|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_R_ALIGNED=AVS_PLANAR_R|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_G_ALIGNED=AVS_PLANAR_G|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_B_ALIGNED=AVS_PLANAR_B|AVS_PLANAR_ALIGNED};
|
||||
|
||||
// Colorspace properties.
|
||||
enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_YUV = 1<<29,
|
||||
AVS_CS_INTERLEAVED = 1<<30,
|
||||
AVS_CS_PLANAR = 1<<31,
|
||||
|
||||
AVS_CS_SHIFT_SUB_WIDTH = 0,
|
||||
AVS_CS_SHIFT_SUB_HEIGHT = 1 << 3,
|
||||
AVS_CS_SHIFT_SAMPLE_BITS = 1 << 4,
|
||||
|
||||
AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH,
|
||||
AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24
|
||||
AVS_CS_SUB_WIDTH_2 = 0 << AVS_CS_SHIFT_SUB_WIDTH, // YV12, I420, YV16
|
||||
AVS_CS_SUB_WIDTH_4 = 1 << AVS_CS_SHIFT_SUB_WIDTH, // YUV9, YV411
|
||||
|
||||
AVS_CS_VPLANEFIRST = 1 << 3, // YV12, YV16, YV24, YV411, YUV9
|
||||
AVS_CS_UPLANEFIRST = 1 << 4, // I420
|
||||
|
||||
AVS_CS_SUB_HEIGHT_MASK = 7 << AVS_CS_SHIFT_SUB_HEIGHT,
|
||||
AVS_CS_SUB_HEIGHT_1 = 3 << AVS_CS_SHIFT_SUB_HEIGHT, // YV16, YV24, YV411
|
||||
AVS_CS_SUB_HEIGHT_2 = 0 << AVS_CS_SHIFT_SUB_HEIGHT, // YV12, I420
|
||||
AVS_CS_SUB_HEIGHT_4 = 1 << AVS_CS_SHIFT_SUB_HEIGHT, // YUV9
|
||||
|
||||
AVS_CS_SAMPLE_BITS_MASK = 7 << AVS_CS_SHIFT_SAMPLE_BITS,
|
||||
AVS_CS_SAMPLE_BITS_8 = 0 << AVS_CS_SHIFT_SAMPLE_BITS,
|
||||
AVS_CS_SAMPLE_BITS_16 = 1 << AVS_CS_SHIFT_SAMPLE_BITS,
|
||||
AVS_CS_SAMPLE_BITS_32 = 2 << AVS_CS_SHIFT_SAMPLE_BITS,
|
||||
|
||||
AVS_CS_PLANAR_MASK = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_BGR | AVS_CS_SAMPLE_BITS_MASK | AVS_CS_SUB_HEIGHT_MASK | AVS_CS_SUB_WIDTH_MASK,
|
||||
AVS_CS_PLANAR_FILTER = ~( AVS_CS_VPLANEFIRST | AVS_CS_UPLANEFIRST )};
|
||||
|
||||
// Specific colorformats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
|
||||
// AVS_CS_YV12 = 1<<3 Reserved
|
||||
// AVS_CS_I420 = 1<<4 Reserved
|
||||
AVS_CS_RAW32 = 1<<5 | AVS_CS_INTERLEAVED,
|
||||
|
||||
AVS_CS_YV24 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1, // YVU 4:4:4 planar
|
||||
AVS_CS_YV16 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // YVU 4:2:2 planar
|
||||
AVS_CS_YV12 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YVU 4:2:0 planar
|
||||
AVS_CS_I420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_UPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YUV 4:2:0 planar
|
||||
AVS_CS_IYUV = AVS_CS_I420,
|
||||
AVS_CS_YV411 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:1 planar
|
||||
AVS_CS_YUV9 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_4 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:0 planar
|
||||
AVS_CS_Y8 = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 // Y 4:0:0 planar
|
||||
};
|
||||
|
||||
enum {
|
||||
AVS_IT_BFF = 1<<0,
|
||||
AVS_IT_TFF = 1<<1,
|
||||
AVS_IT_FIELDBASED = 1<<2};
|
||||
|
||||
enum {
|
||||
AVS_FILTER_TYPE=1,
|
||||
AVS_FILTER_INPUT_COLORSPACE=2,
|
||||
AVS_FILTER_OUTPUT_TYPE=9,
|
||||
AVS_FILTER_NAME=4,
|
||||
AVS_FILTER_AUTHOR=5,
|
||||
AVS_FILTER_VERSION=6,
|
||||
AVS_FILTER_ARGS=7,
|
||||
AVS_FILTER_ARGS_INFO=8,
|
||||
AVS_FILTER_ARGS_DESCRIPTION=10,
|
||||
AVS_FILTER_DESCRIPTION=11};
|
||||
|
||||
enum { //SUBTYPES
|
||||
AVS_FILTER_TYPE_AUDIO=1,
|
||||
AVS_FILTER_TYPE_VIDEO=2,
|
||||
AVS_FILTER_OUTPUT_TYPE_SAME=3,
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
|
||||
int pixel_type;
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type;
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
|
||||
// useful functions of the above
|
||||
AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p)
|
||||
{ return (p->width!=0); }
|
||||
|
||||
AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p)
|
||||
{ return (p->audio_samples_per_second!=0); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_BGR); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
|
||||
|
||||
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
|
||||
|
||||
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_YUV ); }
|
||||
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_INLINE int avs_is_yv24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV24 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_yv16(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV16 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV12 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_yv411(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV411 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_y8(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_Y8 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return avs_is_planar(p) ? ((p->pixel_type & AVS_CS_PLANAR_MASK) == (c_space & AVS_CS_PLANAR_FILTER)) : ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
case AVS_SAMPLE_INT8: return sizeof(signed char);
|
||||
case AVS_SAMPLE_INT16: return sizeof(signed short);
|
||||
case AVS_SAMPLE_INT24: return 3;
|
||||
case AVS_SAMPLE_INT32: return sizeof(signed int);
|
||||
case AVS_SAMPLE_FLOAT: return sizeof(float);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames)
|
||||
{ return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); }
|
||||
|
||||
AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); }
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes)
|
||||
{ return bytes / avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return samples * avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels; }
|
||||
|
||||
AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p)
|
||||
{ return p->sample_type;}
|
||||
|
||||
// useful mutator
|
||||
AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type|=property; }
|
||||
|
||||
AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type&=~property; }
|
||||
|
||||
AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased)
|
||||
{ if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; }
|
||||
|
||||
AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator)
|
||||
{
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
p->fps_numerator = numerator/x;
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoFrame
|
||||
//
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
BYTE * data;
|
||||
int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
volatile long sequence_number;
|
||||
|
||||
volatile long refcount;
|
||||
} AVS_VideoFrameBuffer;
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
volatile long refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
int row_sizeUV, heightUV;
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_sizeUV;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = (p->row_sizeUV+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_sizeUV;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->heightUV;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Value
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
// for some function e'rror
|
||||
short array_size;
|
||||
union {
|
||||
void * clip; // do not use directly, use avs_take_clip
|
||||
char boolean;
|
||||
int integer;
|
||||
float floating_pt;
|
||||
const char * string;
|
||||
const AVS_Value * array;
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
{ return v.d.integer; }
|
||||
AVSC_INLINE const char * avs_as_string(AVS_Value v)
|
||||
{ return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE double avs_as_float(AVS_Value v)
|
||||
{ return avs_is_int(v) ? v.d.integer : v.d.floating_pt; }
|
||||
AVSC_INLINE const char * avs_as_error(AVS_Value v)
|
||||
{ return avs_is_error(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v)
|
||||
{ return v.d.array; }
|
||||
AVSC_INLINE int avs_array_size(AVS_Value v)
|
||||
{ return avs_is_array(v) ? v.array_size : 1; }
|
||||
AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
{ return avs_is_array(v) ? v.d.array[index] : v; }
|
||||
|
||||
// only use these functions on an AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Clip
|
||||
//
|
||||
|
||||
AVSC_API(void, avs_release_clip)(AVS_Clip *);
|
||||
AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *);
|
||||
|
||||
AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error
|
||||
|
||||
AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *);
|
||||
|
||||
AVSC_API(int, avs_get_version)(AVS_Clip *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n);
|
||||
// The returned video frame must be released with avs_release_video_frame
|
||||
|
||||
AVSC_API(int, avs_get_parity)(AVS_Clip *, int n);
|
||||
// return field parity if field_based, else parity of first field in frame
|
||||
|
||||
AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
// start and count are in samples
|
||||
|
||||
AVSC_API(int, avs_set_cache_hints)(AVS_Clip *,
|
||||
int cachehints, int frame_range);
|
||||
|
||||
// This is the callback type used by avs_add_function
|
||||
typedef AVS_Value (AVSC_CC * AVS_ApplyFunc)
|
||||
(AVS_ScriptEnvironment *, AVS_Value args, void * user_data);
|
||||
|
||||
typedef struct AVS_FilterInfo AVS_FilterInfo;
|
||||
struct AVS_FilterInfo
|
||||
{
|
||||
// these members should not be modified outside of the AVS_ApplyFunc callback
|
||||
AVS_Clip * child;
|
||||
AVS_VideoInfo vi;
|
||||
AVS_ScriptEnvironment * env;
|
||||
AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints,
|
||||
int frame_range);
|
||||
void (AVSC_CC * free_filter)(AVS_FilterInfo *);
|
||||
|
||||
// Should be set when ever there is an error to report.
|
||||
// It is cleared before any of the above methods are called
|
||||
const char * error;
|
||||
// this is to store whatever and may be modified at will
|
||||
void * user_data;
|
||||
};
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
// clip.
|
||||
AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e,
|
||||
AVS_FilterInfo * * fi,
|
||||
AVS_Value child, int store_child);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_ScriptEnvironment
|
||||
//
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
AVS_CPU_FORCE = 0x01, // N/A
|
||||
AVS_CPU_FPU = 0x02, // 386/486DX
|
||||
AVS_CPU_MMX = 0x04, // P55C, K6, PII
|
||||
AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
AVS_CPU_SSE2 = 0x20, // PIV, Hammer
|
||||
AVS_CPU_3DNOW = 0x40, // K6-2
|
||||
AVS_CPU_3DNOW_EXT = 0x80, // Athlon
|
||||
AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2,
|
||||
// which only Hammer will have anyway)
|
||||
AVS_CPUF_SSE3 = 0x100, // PIV+, K8 Venice
|
||||
AVS_CPUF_SSSE3 = 0x200, // Core 2
|
||||
AVS_CPUF_SSE4 = 0x400, // Penryn, Wolfdale, Yorkfield
|
||||
AVS_CPUF_SSE4_1 = 0x400,
|
||||
AVS_CPUF_SSE4_2 = 0x800, // Nehalem
|
||||
};
|
||||
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...);
|
||||
|
||||
AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, void* val);
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
|
||||
AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *,
|
||||
const char * name, const char * params,
|
||||
AVS_ApplyFunc apply, void * user_data);
|
||||
|
||||
AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name);
|
||||
|
||||
AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name,
|
||||
AVS_Value args, const char** arg_names);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val);
|
||||
|
||||
AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val);
|
||||
|
||||
//void avs_push_context(AVS_ScriptEnvironment *, int level=0);
|
||||
//void avs_pop_context(AVS_ScriptEnvironment *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, BYTE* dstp, int dst_pitch, const BYTE* srcp, int src_pitch, int row_size, int height);
|
||||
|
||||
typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env);
|
||||
AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height);
|
||||
// The returned video frame must be be released
|
||||
|
||||
AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem);
|
||||
|
||||
AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir);
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version);
|
||||
|
||||
// this symbol is the entry point for the plugin and must
|
||||
// be defined
|
||||
AVSC_EXPORT
|
||||
const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env);
|
||||
|
||||
|
||||
AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *);
|
||||
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV);
|
||||
// The returned video frame must be be released
|
||||
|
||||
#ifdef AVSC_NO_DECLSPEC
|
||||
// use LoadLibrary and related functions to dynamically load Avisynth instead of declspec(dllimport)
|
||||
/*
|
||||
The following functions needs to have been declared, probably from windows.h
|
||||
|
||||
void* malloc(size_t)
|
||||
void free(void*);
|
||||
|
||||
HMODULE LoadLibrary(const char*);
|
||||
void* GetProcAddress(HMODULE, const char*);
|
||||
FreeLibrary(HMODULE);
|
||||
*/
|
||||
|
||||
|
||||
typedef struct AVS_Library AVS_Library;
|
||||
|
||||
#define AVSC_DECLARE_FUNC(name) name##_func name
|
||||
|
||||
struct AVS_Library {
|
||||
HMODULE handle;
|
||||
|
||||
AVSC_DECLARE_FUNC(avs_add_function);
|
||||
AVSC_DECLARE_FUNC(avs_at_exit);
|
||||
AVSC_DECLARE_FUNC(avs_bit_blt);
|
||||
AVSC_DECLARE_FUNC(avs_check_version);
|
||||
AVSC_DECLARE_FUNC(avs_clip_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_copy_clip);
|
||||
AVSC_DECLARE_FUNC(avs_copy_value);
|
||||
AVSC_DECLARE_FUNC(avs_copy_video_frame);
|
||||
AVSC_DECLARE_FUNC(avs_create_script_environment);
|
||||
AVSC_DECLARE_FUNC(avs_delete_script_environment);
|
||||
AVSC_DECLARE_FUNC(avs_function_exists);
|
||||
AVSC_DECLARE_FUNC(avs_get_audio);
|
||||
AVSC_DECLARE_FUNC(avs_get_cpu_flags);
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_get_frame);
|
||||
AVSC_DECLARE_FUNC(avs_get_parity);
|
||||
AVSC_DECLARE_FUNC(avs_get_var);
|
||||
AVSC_DECLARE_FUNC(avs_get_version);
|
||||
AVSC_DECLARE_FUNC(avs_get_video_info);
|
||||
AVSC_DECLARE_FUNC(avs_invoke);
|
||||
AVSC_DECLARE_FUNC(avs_make_writable);
|
||||
AVSC_DECLARE_FUNC(avs_new_c_filter);
|
||||
AVSC_DECLARE_FUNC(avs_new_video_frame_a);
|
||||
AVSC_DECLARE_FUNC(avs_release_clip);
|
||||
AVSC_DECLARE_FUNC(avs_release_value);
|
||||
AVSC_DECLARE_FUNC(avs_release_video_frame);
|
||||
AVSC_DECLARE_FUNC(avs_save_string);
|
||||
AVSC_DECLARE_FUNC(avs_set_cache_hints);
|
||||
AVSC_DECLARE_FUNC(avs_set_global_var);
|
||||
AVSC_DECLARE_FUNC(avs_set_memory_max);
|
||||
AVSC_DECLARE_FUNC(avs_set_to_clip);
|
||||
AVSC_DECLARE_FUNC(avs_set_var);
|
||||
AVSC_DECLARE_FUNC(avs_set_working_dir);
|
||||
AVSC_DECLARE_FUNC(avs_sprintf);
|
||||
AVSC_DECLARE_FUNC(avs_subframe);
|
||||
AVSC_DECLARE_FUNC(avs_subframe_planar);
|
||||
AVSC_DECLARE_FUNC(avs_take_clip);
|
||||
AVSC_DECLARE_FUNC(avs_vsprintf);
|
||||
};
|
||||
|
||||
#undef AVSC_DECLARE_FUNC
|
||||
|
||||
|
||||
AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
||||
if (library == NULL)
|
||||
return NULL;
|
||||
library->handle = LoadLibrary("avisynth");
|
||||
if (library->handle == NULL)
|
||||
goto fail;
|
||||
|
||||
#define __AVSC_STRINGIFY(x) #x
|
||||
#define AVSC_STRINGIFY(x) __AVSC_STRINGIFY(x)
|
||||
#define AVSC_LOAD_FUNC(name) {\
|
||||
library->name = (name##_func) GetProcAddress(library->handle, AVSC_STRINGIFY(name));\
|
||||
if (library->name == NULL)\
|
||||
goto fail;\
|
||||
}
|
||||
|
||||
AVSC_LOAD_FUNC(avs_add_function);
|
||||
AVSC_LOAD_FUNC(avs_at_exit);
|
||||
AVSC_LOAD_FUNC(avs_bit_blt);
|
||||
AVSC_LOAD_FUNC(avs_check_version);
|
||||
AVSC_LOAD_FUNC(avs_clip_get_error);
|
||||
AVSC_LOAD_FUNC(avs_copy_clip);
|
||||
AVSC_LOAD_FUNC(avs_copy_value);
|
||||
AVSC_LOAD_FUNC(avs_copy_video_frame);
|
||||
AVSC_LOAD_FUNC(avs_create_script_environment);
|
||||
AVSC_LOAD_FUNC(avs_delete_script_environment);
|
||||
AVSC_LOAD_FUNC(avs_function_exists);
|
||||
AVSC_LOAD_FUNC(avs_get_audio);
|
||||
AVSC_LOAD_FUNC(avs_get_cpu_flags);
|
||||
AVSC_LOAD_FUNC(avs_get_error);
|
||||
AVSC_LOAD_FUNC(avs_get_frame);
|
||||
AVSC_LOAD_FUNC(avs_get_parity);
|
||||
AVSC_LOAD_FUNC(avs_get_var);
|
||||
AVSC_LOAD_FUNC(avs_get_version);
|
||||
AVSC_LOAD_FUNC(avs_get_video_info);
|
||||
AVSC_LOAD_FUNC(avs_invoke);
|
||||
AVSC_LOAD_FUNC(avs_make_writable);
|
||||
AVSC_LOAD_FUNC(avs_new_c_filter);
|
||||
AVSC_LOAD_FUNC(avs_new_video_frame_a);
|
||||
AVSC_LOAD_FUNC(avs_release_clip);
|
||||
AVSC_LOAD_FUNC(avs_release_value);
|
||||
AVSC_LOAD_FUNC(avs_release_video_frame);
|
||||
AVSC_LOAD_FUNC(avs_save_string);
|
||||
AVSC_LOAD_FUNC(avs_set_cache_hints);
|
||||
AVSC_LOAD_FUNC(avs_set_global_var);
|
||||
AVSC_LOAD_FUNC(avs_set_memory_max);
|
||||
AVSC_LOAD_FUNC(avs_set_to_clip);
|
||||
AVSC_LOAD_FUNC(avs_set_var);
|
||||
AVSC_LOAD_FUNC(avs_set_working_dir);
|
||||
AVSC_LOAD_FUNC(avs_sprintf);
|
||||
AVSC_LOAD_FUNC(avs_subframe);
|
||||
AVSC_LOAD_FUNC(avs_subframe_planar);
|
||||
AVSC_LOAD_FUNC(avs_take_clip);
|
||||
AVSC_LOAD_FUNC(avs_vsprintf);
|
||||
|
||||
#undef __AVSC_STRINGIFY
|
||||
#undef AVSC_STRINGIFY
|
||||
#undef AVSC_LOAD_FUNC
|
||||
|
||||
return library;
|
||||
|
||||
fail:
|
||||
free(library);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
AVSC_INLINE void avs_free_library(AVS_Library *library) {
|
||||
if (library == NULL)
|
||||
return;
|
||||
FreeLibrary(library->handle);
|
||||
free(library);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -1,68 +0,0 @@
|
||||
// Copyright (c) 2011 FFmpegSource Project
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
/* these are defines/functions that are used and were changed in the switch to 2.6
|
||||
* and are needed to maintain full compatility with 2.5 */
|
||||
|
||||
enum {
|
||||
AVS_CS_YV12_25 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420_25 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
};
|
||||
|
||||
AVSC_INLINE int avs_get_height_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->row_size>>1;
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV)
|
||||
{
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_yv12_25(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12_25) == AVS_CS_YV12_25)||((p->pixel_type & AVS_CS_I420_25) == AVS_CS_I420_25); }
|
@@ -1,727 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef __AVXSYNTH_C__
|
||||
#define __AVXSYNTH_C__
|
||||
|
||||
#include "windowsPorts/windows2linux.h"
|
||||
#include <stdarg.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVXSYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 3 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
AVS_SAMPLE_INT16 = 1<<1,
|
||||
AVS_SAMPLE_INT24 = 1<<2,
|
||||
AVS_SAMPLE_INT32 = 1<<3,
|
||||
AVS_SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {AVS_PLANAR_Y=1<<0,
|
||||
AVS_PLANAR_U=1<<1,
|
||||
AVS_PLANAR_V=1<<2,
|
||||
AVS_PLANAR_ALIGNED=1<<3,
|
||||
AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED};
|
||||
|
||||
// Colorspace properties.
|
||||
enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_YUV = 1<<29,
|
||||
AVS_CS_INTERLEAVED = 1<<30,
|
||||
AVS_CS_PLANAR = 1<<31};
|
||||
|
||||
// Specific colorformats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YV12 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
AVS_CS_IYUV = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR // same as above
|
||||
};
|
||||
|
||||
enum {
|
||||
AVS_IT_BFF = 1<<0,
|
||||
AVS_IT_TFF = 1<<1,
|
||||
AVS_IT_FIELDBASED = 1<<2};
|
||||
|
||||
enum {
|
||||
AVS_FILTER_TYPE=1,
|
||||
AVS_FILTER_INPUT_COLORSPACE=2,
|
||||
AVS_FILTER_OUTPUT_TYPE=9,
|
||||
AVS_FILTER_NAME=4,
|
||||
AVS_FILTER_AUTHOR=5,
|
||||
AVS_FILTER_VERSION=6,
|
||||
AVS_FILTER_ARGS=7,
|
||||
AVS_FILTER_ARGS_INFO=8,
|
||||
AVS_FILTER_ARGS_DESCRIPTION=10,
|
||||
AVS_FILTER_DESCRIPTION=11};
|
||||
|
||||
enum { //SUBTYPES
|
||||
AVS_FILTER_TYPE_AUDIO=1,
|
||||
AVS_FILTER_TYPE_VIDEO=2,
|
||||
AVS_FILTER_OUTPUT_TYPE_SAME=3,
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
|
||||
int pixel_type;
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type;
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
|
||||
// useful functions of the above
|
||||
AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p)
|
||||
{ return (p->width!=0); }
|
||||
|
||||
AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p)
|
||||
{ return (p->audio_samples_per_second!=0); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_BGR); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
|
||||
|
||||
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
|
||||
|
||||
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_YUV ); }
|
||||
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12) == AVS_CS_YV12)||((p->pixel_type & AVS_CS_I420) == AVS_CS_I420); }
|
||||
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
case AVS_SAMPLE_INT8: return sizeof(signed char);
|
||||
case AVS_SAMPLE_INT16: return sizeof(signed short);
|
||||
case AVS_SAMPLE_INT24: return 3;
|
||||
case AVS_SAMPLE_INT32: return sizeof(signed int);
|
||||
case AVS_SAMPLE_FLOAT: return sizeof(float);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames)
|
||||
{ return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); }
|
||||
|
||||
AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); }
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes)
|
||||
{ return bytes / avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return samples * avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels; }
|
||||
|
||||
AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p)
|
||||
{ return p->sample_type;}
|
||||
|
||||
// useful mutator
|
||||
AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type|=property; }
|
||||
|
||||
AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type&=~property; }
|
||||
|
||||
AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased)
|
||||
{ if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; }
|
||||
|
||||
AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator)
|
||||
{
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
p->fps_numerator = numerator/x;
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoFrame
|
||||
//
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
unsigned char * data;
|
||||
int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
long sequence_number;
|
||||
|
||||
long refcount;
|
||||
} AVS_VideoFrameBuffer;
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
int refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_size>>1;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Value
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
// for some function e'rror
|
||||
short array_size;
|
||||
union {
|
||||
void * clip; // do not use directly, use avs_take_clip
|
||||
char boolean;
|
||||
int integer;
|
||||
INT64 integer64; // match addition of __int64 to avxplugin.h
|
||||
float floating_pt;
|
||||
const char * string;
|
||||
const AVS_Value * array;
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
{ return v.d.integer; }
|
||||
AVSC_INLINE const char * avs_as_string(AVS_Value v)
|
||||
{ return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE double avs_as_float(AVS_Value v)
|
||||
{ return avs_is_int(v) ? v.d.integer : v.d.floating_pt; }
|
||||
AVSC_INLINE const char * avs_as_error(AVS_Value v)
|
||||
{ return avs_is_error(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v)
|
||||
{ return v.d.array; }
|
||||
AVSC_INLINE int avs_array_size(AVS_Value v)
|
||||
{ return avs_is_array(v) ? v.array_size : 1; }
|
||||
AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
{ return avs_is_array(v) ? v.d.array[index] : v; }
|
||||
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Clip
|
||||
//
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_clip)(AVS_Clip *);
|
||||
AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *);
|
||||
|
||||
AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error
|
||||
|
||||
AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *);
|
||||
|
||||
AVSC_API(int, avs_get_version)(AVS_Clip *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n);
|
||||
// The returned video frame must be released with avs_release_video_frame
|
||||
|
||||
AVSC_API(int, avs_get_parity)(AVS_Clip *, int n);
|
||||
// return field parity if field_based, else parity of first field in frame
|
||||
|
||||
AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
// start and count are in samples
|
||||
|
||||
AVSC_API(int, avs_set_cache_hints)(AVS_Clip *,
|
||||
int cachehints, size_t frame_range);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// This is the callback type used by avs_add_function
|
||||
typedef AVS_Value (AVSC_CC * AVS_ApplyFunc)
|
||||
(AVS_ScriptEnvironment *, AVS_Value args, void * user_data);
|
||||
|
||||
typedef struct AVS_FilterInfo AVS_FilterInfo;
|
||||
struct AVS_FilterInfo
|
||||
{
|
||||
// these members should not be modified outside of the AVS_ApplyFunc callback
|
||||
AVS_Clip * child;
|
||||
AVS_VideoInfo vi;
|
||||
AVS_ScriptEnvironment * env;
|
||||
AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints,
|
||||
int frame_range);
|
||||
void (AVSC_CC * free_filter)(AVS_FilterInfo *);
|
||||
|
||||
// Should be set when ever there is an error to report.
|
||||
// It is cleared before any of the above methods are called
|
||||
const char * error;
|
||||
// this is to store whatever and may be modified at will
|
||||
void * user_data;
|
||||
};
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
// clip.
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e,
|
||||
AVS_FilterInfo * * fi,
|
||||
AVS_Value child, int store_child);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_ScriptEnvironment
|
||||
//
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
AVS_CPU_FORCE = 0x01, // N/A
|
||||
AVS_CPU_FPU = 0x02, // 386/486DX
|
||||
AVS_CPU_MMX = 0x04, // P55C, K6, PII
|
||||
AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
AVS_CPU_SSE2 = 0x20, // PIV, Hammer
|
||||
AVS_CPU_3DNOW = 0x40, // K6-2
|
||||
AVS_CPU_3DNOW_EXT = 0x80, // Athlon
|
||||
AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2,
|
||||
// which only Hammer will have anyway)
|
||||
};
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...);
|
||||
|
||||
AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, va_list val);
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
|
||||
AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *,
|
||||
const char * name, const char * params,
|
||||
AVS_ApplyFunc apply, void * user_data);
|
||||
|
||||
AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name);
|
||||
|
||||
AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name,
|
||||
AVS_Value args, const char** arg_names);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val);
|
||||
|
||||
AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val);
|
||||
|
||||
//void avs_push_context(AVS_ScriptEnvironment *, int level=0);
|
||||
//void avs_pop_context(AVS_ScriptEnvironment *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, unsigned char* dstp, int dst_pitch, const unsigned char* srcp, int src_pitch, int row_size, int height);
|
||||
|
||||
typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env);
|
||||
AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height);
|
||||
// The returned video frame must be be released
|
||||
|
||||
AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem);
|
||||
|
||||
AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir);
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// this symbol is the entry point for the plugin and must
|
||||
// be defined
|
||||
AVSC_EXPORT
|
||||
const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env);
|
||||
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *);
|
||||
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV);
|
||||
// The returned video frame must be be released
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif //__AVXSYNTH_C__
|
@@ -1,85 +0,0 @@
|
||||
#ifndef __DATA_TYPE_CONVERSIONS_H__
|
||||
#define __DATA_TYPE_CONVERSIONS_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <wchar.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
|
||||
typedef int64_t __int64;
|
||||
typedef int32_t __int32;
|
||||
#ifdef __cplusplus
|
||||
typedef bool BOOL;
|
||||
#else
|
||||
typedef uint32_t BOOL;
|
||||
#endif // __cplusplus
|
||||
typedef void* HMODULE;
|
||||
typedef void* LPVOID;
|
||||
typedef void* PVOID;
|
||||
typedef PVOID HANDLE;
|
||||
typedef HANDLE HWND;
|
||||
typedef HANDLE HINSTANCE;
|
||||
typedef void* HDC;
|
||||
typedef void* HBITMAP;
|
||||
typedef void* HICON;
|
||||
typedef void* HFONT;
|
||||
typedef void* HGDIOBJ;
|
||||
typedef void* HBRUSH;
|
||||
typedef void* HMMIO;
|
||||
typedef void* HACMSTREAM;
|
||||
typedef void* HACMDRIVER;
|
||||
typedef void* HIC;
|
||||
typedef void* HACMOBJ;
|
||||
typedef HACMSTREAM* LPHACMSTREAM;
|
||||
typedef void* HACMDRIVERID;
|
||||
typedef void* LPHACMDRIVER;
|
||||
typedef unsigned char BYTE;
|
||||
typedef BYTE* LPBYTE;
|
||||
typedef char TCHAR;
|
||||
typedef TCHAR* LPTSTR;
|
||||
typedef const TCHAR* LPCTSTR;
|
||||
typedef char* LPSTR;
|
||||
typedef LPSTR LPOLESTR;
|
||||
typedef const char* LPCSTR;
|
||||
typedef LPCSTR LPCOLESTR;
|
||||
typedef wchar_t WCHAR;
|
||||
typedef unsigned short WORD;
|
||||
typedef unsigned int UINT;
|
||||
typedef UINT MMRESULT;
|
||||
typedef uint32_t DWORD;
|
||||
typedef DWORD COLORREF;
|
||||
typedef DWORD FOURCC;
|
||||
typedef DWORD HRESULT;
|
||||
typedef DWORD* LPDWORD;
|
||||
typedef DWORD* DWORD_PTR;
|
||||
typedef int32_t LONG;
|
||||
typedef int32_t* LONG_PTR;
|
||||
typedef LONG_PTR LRESULT;
|
||||
typedef uint32_t ULONG;
|
||||
typedef uint32_t* ULONG_PTR;
|
||||
//typedef __int64_t intptr_t;
|
||||
typedef uint64_t _fsize_t;
|
||||
|
||||
|
||||
//
|
||||
// Structures
|
||||
//
|
||||
|
||||
typedef struct _GUID {
|
||||
DWORD Data1;
|
||||
WORD Data2;
|
||||
WORD Data3;
|
||||
BYTE Data4[8];
|
||||
} GUID;
|
||||
|
||||
typedef GUID REFIID;
|
||||
typedef GUID CLSID;
|
||||
typedef CLSID* LPCLSID;
|
||||
typedef GUID IID;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
#endif // __DATA_TYPE_CONVERSIONS_H__
|
@@ -1,77 +0,0 @@
|
||||
#ifndef __WINDOWS2LINUX_H__
|
||||
#define __WINDOWS2LINUX_H__
|
||||
|
||||
/*
|
||||
* LINUX SPECIFIC DEFINITIONS
|
||||
*/
|
||||
//
|
||||
// Data types conversions
|
||||
//
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "basicDataTypeConversions.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
//
|
||||
// purposefully define the following MSFT definitions
|
||||
// to mean nothing (as they do not mean anything on Linux)
|
||||
//
|
||||
#define __stdcall
|
||||
#define __cdecl
|
||||
#define noreturn
|
||||
#define __declspec(x)
|
||||
#define STDAPI extern "C" HRESULT
|
||||
#define STDMETHODIMP HRESULT __stdcall
|
||||
#define STDMETHODIMP_(x) x __stdcall
|
||||
|
||||
#define STDMETHOD(x) virtual HRESULT x
|
||||
#define STDMETHOD_(a, x) virtual a x
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE true
|
||||
#endif
|
||||
|
||||
#ifndef FALSE
|
||||
#define FALSE false
|
||||
#endif
|
||||
|
||||
#define S_OK (0x00000000)
|
||||
#define S_FALSE (0x00000001)
|
||||
#define E_NOINTERFACE (0X80004002)
|
||||
#define E_POINTER (0x80004003)
|
||||
#define E_FAIL (0x80004005)
|
||||
#define E_OUTOFMEMORY (0x8007000E)
|
||||
|
||||
#define INVALID_HANDLE_VALUE ((HANDLE)((LONG_PTR)-1))
|
||||
#define FAILED(hr) ((hr) & 0x80000000)
|
||||
#define SUCCEEDED(hr) (!FAILED(hr))
|
||||
|
||||
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) ((a << 24) | (b << 16) | (c << 8) | (d))
|
||||
#define MAKEWORD(a,b) ((a << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
#define lstrcmpi strcasecmp
|
||||
#define _stricmp strcasecmp
|
||||
#define InterlockedIncrement(x) __sync_fetch_and_add((x), 1)
|
||||
#define InterlockedDecrement(x) __sync_fetch_and_sub((x), 1)
|
||||
// Windows uses (new, old) ordering but GCC has (old, new)
|
||||
#define InterlockedCompareExchange(x,y,z) __sync_val_compare_and_swap(x,z,y)
|
||||
|
||||
#define UInt32x32To64(a, b) ( (uint64_t) ( ((uint64_t)((uint32_t)(a))) * ((uint32_t)(b)) ) )
|
||||
#define Int64ShrlMod32(a, b) ( (uint64_t) ( (uint64_t)(a) >> (b) ) )
|
||||
#define Int32x32To64(a, b) ((__int64)(((__int64)((long)(a))) * ((long)(b))))
|
||||
|
||||
#define MulDiv(nNumber, nNumerator, nDenominator) (int32_t) (((int64_t) (nNumber) * (int64_t) (nNumerator) + (int64_t) ((nDenominator)/2)) / (int64_t) (nDenominator))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // __WINDOWS2LINUX_H__
|
@@ -1,86 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file was copied from the following newsgroup posting:
|
||||
*
|
||||
* Newsgroups: mod.std.unix
|
||||
* Subject: public domain AT&T getopt source
|
||||
* Date: 3 Nov 85 19:34:15 GMT
|
||||
*
|
||||
* Here's something you've all been waiting for: the AT&T public domain
|
||||
* source for getopt(3). It is the code which was given out at the 1985
|
||||
* UNIFORUM conference in Dallas. I obtained it by electronic mail
|
||||
* directly from AT&T. The people there assure me that it is indeed
|
||||
* in the public domain.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
static int opterr = 1;
|
||||
static int optind = 1;
|
||||
static int optopt;
|
||||
static char *optarg;
|
||||
|
||||
#undef fprintf
|
||||
|
||||
static int getopt(int argc, char *argv[], char *opts)
|
||||
{
|
||||
static int sp = 1;
|
||||
int c;
|
||||
char *cp;
|
||||
|
||||
if (sp == 1) {
|
||||
if (optind >= argc ||
|
||||
argv[optind][0] != '-' || argv[optind][1] == '\0')
|
||||
return EOF;
|
||||
else if (!strcmp(argv[optind], "--")) {
|
||||
optind++;
|
||||
return EOF;
|
||||
}
|
||||
}
|
||||
optopt = c = argv[optind][sp];
|
||||
if (c == ':' || (cp = strchr(opts, c)) == NULL) {
|
||||
fprintf(stderr, ": illegal option -- %c\n", c);
|
||||
if (argv[optind][++sp] == '\0') {
|
||||
optind++;
|
||||
sp = 1;
|
||||
}
|
||||
return '?';
|
||||
}
|
||||
if (*++cp == ':') {
|
||||
if (argv[optind][sp+1] != '\0')
|
||||
optarg = &argv[optind++][sp+1];
|
||||
else if(++optind >= argc) {
|
||||
fprintf(stderr, ": option requires an argument -- %c\n", c);
|
||||
sp = 1;
|
||||
return '?';
|
||||
} else
|
||||
optarg = argv[optind++];
|
||||
sp = 1;
|
||||
} else {
|
||||
if (argv[optind][++sp] == '\0') {
|
||||
sp = 1;
|
||||
optind++;
|
||||
}
|
||||
optarg = NULL;
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
* C99-compatible snprintf() and vsnprintf() implementations
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <limits.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "compat/va_copy.h"
|
||||
#include "libavutil/error.h"
|
||||
|
||||
#if defined(__MINGW32__)
|
||||
#define EOVERFLOW EFBIG
|
||||
#endif
|
||||
|
||||
int avpriv_snprintf(char *s, size_t n, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = avpriv_vsnprintf(s, n, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
|
||||
va_list ap)
|
||||
{
|
||||
int ret;
|
||||
va_list ap_copy;
|
||||
|
||||
if (n == 0)
|
||||
return _vscprintf(fmt, ap);
|
||||
else if (n > INT_MAX)
|
||||
return AVERROR(EOVERFLOW);
|
||||
|
||||
/* we use n - 1 here because if the buffer is not big enough, the MS
|
||||
* runtime libraries don't add a terminating zero at the end. MSDN
|
||||
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
||||
* is one less than the actual buffer, and zero it before calling
|
||||
* _snprintf/_vsnprintf() to workaround this problem.
|
||||
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
memset(s, 0, n);
|
||||
va_copy(ap_copy, ap);
|
||||
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
||||
va_end(ap_copy);
|
||||
if (ret == -1)
|
||||
ret = _vscprintf(fmt, ap);
|
||||
|
||||
return ret;
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
* C99-compatible snprintf() and vsnprintf() implementations
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_SNPRINTF_H
|
||||
#define COMPAT_SNPRINTF_H
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int avpriv_snprintf(char *s, size_t n, const char *fmt, ...);
|
||||
int avpriv_vsnprintf(char *s, size_t n, const char *fmt, va_list ap);
|
||||
|
||||
#undef snprintf
|
||||
#undef _snprintf
|
||||
#undef vsnprintf
|
||||
#define snprintf avpriv_snprintf
|
||||
#define _snprintf avpriv_snprintf
|
||||
#define vsnprintf avpriv_vsnprintf
|
||||
|
||||
#endif /* COMPAT_SNPRINTF_H */
|
@@ -1,10 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
n=10
|
||||
|
||||
case "$1" in
|
||||
-n) n=$2; shift 2 ;;
|
||||
-n*) n=${1#-n}; shift ;;
|
||||
esac
|
||||
|
||||
exec sed ${n}q "$@"
|
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
int plan9_main(int argc, char **argv);
|
||||
|
||||
#undef main
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
/* The setfcr() function in lib9 is broken, must use asm. */
|
||||
#ifdef __i386
|
||||
short fcr;
|
||||
__asm__ volatile ("fstcw %0 \n"
|
||||
"or $63, %0 \n"
|
||||
"fldcw %0 \n"
|
||||
: "=m"(fcr));
|
||||
#endif
|
||||
|
||||
return plan9_main(argc, argv);
|
||||
}
|
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
exec awk "BEGIN { for (i = 2; i < ARGC; i++) printf \"$1\", ARGV[i] }" "$@"
|
@@ -1,93 +0,0 @@
|
||||
/*
|
||||
* C99-compatible strtod() implementation
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
|
||||
static char *check_nan_suffix(char *s)
|
||||
{
|
||||
char *start = s;
|
||||
|
||||
if (*s++ != '(')
|
||||
return start;
|
||||
|
||||
while ((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') ||
|
||||
(*s >= '0' && *s <= '9') || *s == '_')
|
||||
s++;
|
||||
|
||||
return *s == ')' ? s + 1 : start;
|
||||
}
|
||||
|
||||
#undef strtod
|
||||
double strtod(const char *, char **);
|
||||
|
||||
double avpriv_strtod(const char *nptr, char **endptr)
|
||||
{
|
||||
char *end;
|
||||
double res;
|
||||
|
||||
/* Skip leading spaces */
|
||||
while (av_isspace(*nptr))
|
||||
nptr++;
|
||||
|
||||
if (!av_strncasecmp(nptr, "infinity", 8)) {
|
||||
end = nptr + 8;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "inf", 3)) {
|
||||
end = nptr + 3;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "+infinity", 9)) {
|
||||
end = nptr + 9;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "+inf", 4)) {
|
||||
end = nptr + 4;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "-infinity", 9)) {
|
||||
end = nptr + 9;
|
||||
res = -INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "-inf", 4)) {
|
||||
end = nptr + 4;
|
||||
res = -INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "nan", 3)) {
|
||||
end = check_nan_suffix(nptr + 3);
|
||||
res = NAN;
|
||||
} else if (!av_strncasecmp(nptr, "+nan", 4) ||
|
||||
!av_strncasecmp(nptr, "-nan", 4)) {
|
||||
end = check_nan_suffix(nptr + 4);
|
||||
res = NAN;
|
||||
} else if (!av_strncasecmp(nptr, "0x", 2) ||
|
||||
!av_strncasecmp(nptr, "-0x", 3) ||
|
||||
!av_strncasecmp(nptr, "+0x", 3)) {
|
||||
/* FIXME this doesn't handle exponents, non-integers (float/double)
|
||||
* and numbers too large for long long */
|
||||
res = strtoll(nptr, &end, 16);
|
||||
} else {
|
||||
res = strtod(nptr, &end);
|
||||
}
|
||||
|
||||
if (endptr)
|
||||
*endptr = end;
|
||||
|
||||
return res;
|
||||
}
|
@@ -1,7 +0,0 @@
|
||||
#include_next <math.h>
|
||||
|
||||
#undef INFINITY
|
||||
#undef NAN
|
||||
|
||||
#define INFINITY (*(const float*)((const unsigned []){ 0x7f800000 }))
|
||||
#define NAN (*(const float*)((const unsigned []){ 0x7fc00000 }))
|
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
* MSVC Compatible va_copy macro
|
||||
* Copyright (c) 2012 Derek Buitenhuis
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
#if !defined(va_copy) && defined(_MSC_VER)
|
||||
#define va_copy(dst, src) ((dst) = (src))
|
||||
#endif
|
1072
doc/APIchanges
1072
doc/APIchanges
File diff suppressed because it is too large
Load Diff
107
doc/Makefile
107
doc/Makefile
@@ -1,120 +1,53 @@
|
||||
LIBRARIES-$(CONFIG_AVUTIL) += libavutil
|
||||
LIBRARIES-$(CONFIG_SWSCALE) += libswscale
|
||||
LIBRARIES-$(CONFIG_SWRESAMPLE) += libswresample
|
||||
LIBRARIES-$(CONFIG_AVCODEC) += libavcodec
|
||||
LIBRARIES-$(CONFIG_AVFORMAT) += libavformat
|
||||
LIBRARIES-$(CONFIG_AVDEVICE) += libavdevice
|
||||
LIBRARIES-$(CONFIG_AVFILTER) += libavfilter
|
||||
|
||||
COMPONENTS-$(CONFIG_AVUTIL) += ffmpeg-utils
|
||||
COMPONENTS-$(CONFIG_SWSCALE) += ffmpeg-scaler
|
||||
COMPONENTS-$(CONFIG_SWRESAMPLE) += ffmpeg-resampler
|
||||
COMPONENTS-$(CONFIG_AVCODEC) += ffmpeg-codecs ffmpeg-bitstream-filters
|
||||
COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols
|
||||
COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices
|
||||
COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters
|
||||
|
||||
MANPAGES1 = $(PROGS-yes:%=doc/%.1) $(PROGS-yes:%=doc/%-all.1) $(COMPONENTS-yes:%=doc/%.1)
|
||||
MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
|
||||
MANPAGES = $(MANPAGES1) $(MANPAGES3)
|
||||
PODPAGES = $(PROGS-yes:%=doc/%.pod) $(PROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(PROGS-yes:%=doc/%.html) $(PROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
|
||||
MANPAGES = $(PROGS-yes:%=doc/%.1)
|
||||
PODPAGES = $(PROGS-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(PROGS-yes:%=doc/%.html) \
|
||||
doc/developer.html \
|
||||
doc/faq.html \
|
||||
doc/fate.html \
|
||||
doc/general.html \
|
||||
doc/git-howto.html \
|
||||
doc/nut.html \
|
||||
doc/libavfilter.html \
|
||||
doc/platform.html \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
DOCS = $(HTMLPAGES) $(MANPAGES) $(PODPAGES)
|
||||
|
||||
all-$(CONFIG_DOC): documentation
|
||||
|
||||
DOCS-$(CONFIG_HTMLPAGES) += $(HTMLPAGES)
|
||||
DOCS-$(CONFIG_PODPAGES) += $(PODPAGES)
|
||||
DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
|
||||
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
|
||||
DOCS = $(DOCS-yes)
|
||||
|
||||
all-$(CONFIG_DOC): doc
|
||||
|
||||
doc: documentation
|
||||
|
||||
apidoc: doc/doxy/html
|
||||
documentation: $(DOCS)
|
||||
|
||||
TEXIDEP = awk '/^@(verbatim)?include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
|
||||
|
||||
doc/%.txt: TAG = TXT
|
||||
doc/%.txt: doc/%.texi
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
|
||||
|
||||
GENTEXI = format codec
|
||||
GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
|
||||
|
||||
$(GENTEXI): TAG = GENTEXI
|
||||
$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
|
||||
$(M)doc/print_options $* > $@
|
||||
TEXIDEP = awk '/^@include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
|
||||
doc/%-all.html: TAG = HTML
|
||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
$(M)texi2html -monolithic --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
|
||||
doc/%.pod: TAG = POD
|
||||
doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
doc/%.pod: doc/%.texi
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-not-all=yes -Idoc $< $@
|
||||
$(M)$(SRC_PATH)/doc/texi2pod.pl $< $@
|
||||
|
||||
doc/%-all.pod: TAG = POD
|
||||
doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-all=yes -Idoc $< $@
|
||||
|
||||
doc/%.1 doc/%.3: TAG = MAN
|
||||
doc/%.1: doc/%.pod $(GENTEXI)
|
||||
doc/%.1: TAG = MAN
|
||||
doc/%.1: doc/%.pod
|
||||
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
doc/%.3: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
|
||||
|
||||
$(DOCS) doc/doxy/html: | doc/
|
||||
$(DOCS): | doc
|
||||
OBJDIRS += doc
|
||||
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(INSTHEADERS)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $^
|
||||
|
||||
install-man:
|
||||
|
||||
ifdef CONFIG_MANPAGES
|
||||
install-progs-$(CONFIG_DOC): install-man
|
||||
|
||||
install-man: $(MANPAGES)
|
||||
$(Q)mkdir -p "$(MANDIR)/man1"
|
||||
$(INSTALL) -m 644 $(MANPAGES1) "$(MANDIR)/man1"
|
||||
$(Q)mkdir -p "$(MANDIR)/man3"
|
||||
$(INSTALL) -m 644 $(MANPAGES3) "$(MANDIR)/man3"
|
||||
endif
|
||||
$(INSTALL) -m 644 $(MANPAGES) "$(MANDIR)/man1"
|
||||
|
||||
uninstall: uninstall-man
|
||||
|
||||
uninstall-man:
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(MANPAGES1))
|
||||
$(RM) $(addprefix "$(MANDIR)/man3/",$(MANPAGES3))
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES))
|
||||
|
||||
clean:: docclean
|
||||
|
||||
distclean:: docclean
|
||||
$(RM) doc/config.texi
|
||||
|
||||
docclean:
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
|
||||
$(RM) -r doc/doxy/html
|
||||
clean::
|
||||
$(RM) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%)
|
||||
|
||||
-include $(wildcard $(DOCS:%=%.d))
|
||||
|
||||
.PHONY: apidoc doc documentation
|
||||
.PHONY: documentation
|
||||
|
@@ -1,11 +1,12 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 2.0 "Nameless" July, 2013
|
||||
* 0.9 "Harmony" December, 2011
|
||||
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
See the Changelog file for a list of significant changes. Note, there
|
||||
are many more new features and bugfixes than whats listed there.
|
||||
|
||||
|
@@ -1,11 +0,0 @@
|
||||
@chapter Authors
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
1041
doc/avconv.texi
Normal file
1041
doc/avconv.texi
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,33 +1,30 @@
|
||||
All the numerical options, if not specified otherwise, accept a string
|
||||
representing a number as input, which may be followed by one of the SI
|
||||
unit prefixes, for example: 'K', 'M', or 'G'.
|
||||
|
||||
If 'i' is appended to the SI unit prefix, the complete prefix will be
|
||||
interpreted as a unit prefix for binary multiplies, which are based on
|
||||
powers of 1024 instead of powers of 1000. Appending 'B' to the SI unit
|
||||
prefix multiplies the value by 8. This allows using, for example:
|
||||
'KB', 'MiB', 'G' and 'B' as number suffixes.
|
||||
All the numerical options, if not specified otherwise, accept in input
|
||||
a string representing a number, which may contain one of the
|
||||
International System number postfixes, for example 'K', 'M', 'G'.
|
||||
If 'i' is appended after the postfix, powers of 2 are used instead of
|
||||
powers of 10. The 'B' postfix multiplies the value for 8, and can be
|
||||
appended after another postfix or used alone. This allows using for
|
||||
example 'KB', 'MiB', 'G' and 'B' as postfix.
|
||||
|
||||
Options which do not take arguments are boolean options, and set the
|
||||
corresponding value to true. They can be set to false by prefixing
|
||||
the option name with "no". For example using "-nofoo"
|
||||
will set the boolean option with name "foo" to false.
|
||||
with "no" the option name, for example using "-nofoo" in the
|
||||
command line will set to false the boolean option with name "foo".
|
||||
|
||||
@anchor{Stream specifiers}
|
||||
@section Stream specifiers
|
||||
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
|
||||
are used to precisely specify which stream(s) a given option belongs to.
|
||||
are used to precisely specify which stream(s) does a given option belong to.
|
||||
|
||||
A stream specifier is a string generally appended to the option name and
|
||||
separated from it by a colon. E.g. @code{-codec:a:1 ac3} contains the
|
||||
@code{a:1} stream specifier, which matches the second audio stream. Therefore, it
|
||||
separated from it by a colon. E.g. @code{-codec:a:1 ac3} option contains
|
||||
@code{a:1} stream specifer, which matches the second audio stream. Therefore it
|
||||
would select the ac3 codec for the second audio stream.
|
||||
|
||||
A stream specifier can match several streams, so that the option is applied to all
|
||||
A stream specifier can match several stream, the option is then applied to all
|
||||
of them. E.g. the stream specifier in @code{-b:a 128k} matches all audio
|
||||
streams.
|
||||
|
||||
An empty stream specifier matches all streams. For example, @code{-codec copy}
|
||||
An empty stream specifier matches all streams, for example @code{-codec copy}
|
||||
or @code{-codec: copy} would copy all the streams without reencoding.
|
||||
|
||||
Possible forms of stream specifiers are:
|
||||
@@ -36,54 +33,25 @@ Possible forms of stream specifiers are:
|
||||
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
|
||||
thread count for the second stream to 4.
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' for video, 'a' for audio, 's' for subtitle,
|
||||
'd' for data, and 't' for attachments. If @var{stream_index} is given, then it matches
|
||||
stream number @var{stream_index} of this type. Otherwise, it matches all
|
||||
@var{stream_type} is one of: 'v' for video, 'a' for audio, 's' for subtitle,
|
||||
'd' for data and 't' for attachments. If @var{stream_index} is given, then
|
||||
matches stream number @var{stream_index} of this type. Otherwise matches all
|
||||
streams of this type.
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
If @var{stream_index} is given, then it matches the stream with number @var{stream_index}
|
||||
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
|
||||
program.
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by a format-specific ID.
|
||||
If @var{stream_index} is given, then matches stream number @var{stream_index} in
|
||||
program with id @var{program_id}. Otherwise matches all streams in this program.
|
||||
@end table
|
||||
|
||||
@section Generic options
|
||||
|
||||
These options are shared amongst the ff* tools.
|
||||
These options are shared amongst the av* tools.
|
||||
|
||||
@table @option
|
||||
|
||||
@item -L
|
||||
Show license.
|
||||
|
||||
@item -h, -?, -help, --help [@var{arg}]
|
||||
Show help. An optional parameter may be specified to print help about a specific
|
||||
item.
|
||||
|
||||
Possible values of @var{arg} are:
|
||||
@table @option
|
||||
@item decoder=@var{decoder_name}
|
||||
Print detailed information about the decoder named @var{decoder_name}. Use the
|
||||
@option{-decoders} option to get a list of all decoders.
|
||||
|
||||
@item encoder=@var{encoder_name}
|
||||
Print detailed information about the encoder named @var{encoder_name}. Use the
|
||||
@option{-encoders} option to get a list of all encoders.
|
||||
|
||||
@item demuxer=@var{demuxer_name}
|
||||
Print detailed information about the demuxer named @var{demuxer_name}. Use the
|
||||
@option{-formats} option to get a list of all demuxers and muxers.
|
||||
|
||||
@item muxer=@var{muxer_name}
|
||||
Print detailed information about the muxer named @var{muxer_name}. Use the
|
||||
@option{-formats} option to get a list of all muxers and demuxers.
|
||||
|
||||
@item filter=@var{filter_name}
|
||||
Print detailed information about the filter name @var{filter_name}. Use the
|
||||
@option{-filters} option to get a list of all filters.
|
||||
|
||||
@end table
|
||||
@item -h, -?, -help, --help
|
||||
Show help.
|
||||
|
||||
@item -version
|
||||
Show version.
|
||||
@@ -91,17 +59,32 @@ Show version.
|
||||
@item -formats
|
||||
Show available formats.
|
||||
|
||||
The fields preceding the format names have the following meanings:
|
||||
@table @samp
|
||||
@item D
|
||||
Decoding available
|
||||
@item E
|
||||
Encoding available
|
||||
@end table
|
||||
|
||||
@item -codecs
|
||||
Show all codecs known to libavcodec.
|
||||
Show available codecs.
|
||||
|
||||
Note that the term 'codec' is used throughout this documentation as a shortcut
|
||||
for what is more correctly called a media bitstream format.
|
||||
|
||||
@item -decoders
|
||||
Show available decoders.
|
||||
|
||||
@item -encoders
|
||||
Show all available encoders.
|
||||
The fields preceding the codec names have the following meanings:
|
||||
@table @samp
|
||||
@item D
|
||||
Decoding available
|
||||
@item E
|
||||
Encoding available
|
||||
@item V/A/S
|
||||
Video/audio/subtitle codec
|
||||
@item S
|
||||
Codec supports slices
|
||||
@item D
|
||||
Codec supports direct rendering
|
||||
@item T
|
||||
Codec can handle input truncated at random locations instead of only at frame boundaries
|
||||
@end table
|
||||
|
||||
@item -bsfs
|
||||
Show available bitstream filters.
|
||||
@@ -118,46 +101,25 @@ Show available pixel formats.
|
||||
@item -sample_fmts
|
||||
Show available sample formats.
|
||||
|
||||
@item -layouts
|
||||
Show channel names and standard channel layouts.
|
||||
|
||||
@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}
|
||||
@item -loglevel @var{loglevel} | -v @var{loglevel}
|
||||
Set the logging level used by the library.
|
||||
Adding "repeat+" indicates that repeated log output should not be compressed
|
||||
to the first line and the "Last message repeated n times" line will be
|
||||
omitted. "repeat" can also be used alone.
|
||||
If "repeat" is used alone, and with no prior loglevel set, the default
|
||||
loglevel will be used. If multiple loglevel parameters are given, using
|
||||
'repeat' will not change the loglevel.
|
||||
@var{loglevel} is a number or a string containing one of the following values:
|
||||
@table @samp
|
||||
@item quiet
|
||||
Show nothing at all; be silent.
|
||||
@item panic
|
||||
Only show fatal errors which could lead the process to crash, such as
|
||||
and assert failure. This is not currently used for anything.
|
||||
@item fatal
|
||||
Only show fatal errors. These are errors after which the process absolutely
|
||||
cannot continue after.
|
||||
@item error
|
||||
Show all errors, including ones which can be recovered from.
|
||||
@item warning
|
||||
Show all warnings and errors. Any message related to possibly
|
||||
incorrect or unexpected events will be shown.
|
||||
@item info
|
||||
Show informative messages during processing. This is in addition to
|
||||
warnings and errors. This is the default value.
|
||||
@item verbose
|
||||
Same as @code{info}, except more verbose.
|
||||
@item debug
|
||||
Show everything, including debugging information.
|
||||
@end table
|
||||
|
||||
By default the program logs to stderr, if coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{AV_LOG_FORCE_COLOR}.
|
||||
@env{FFMPEG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{FFMPEG_FORCE_COLOR}.
|
||||
The use of the environment variable @env{NO_COLOR} is deprecated and
|
||||
will be dropped in a following FFmpeg version.
|
||||
|
||||
@@ -168,85 +130,9 @@ directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
options will affect the report; options values must be escaped if they
|
||||
contain special characters or the options delimiter ':' (see the
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual). The
|
||||
following option is recognized:
|
||||
@table @option
|
||||
@item file
|
||||
set the file name to use for the report; @code{%p} is expanded to the name
|
||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||
to a plain @code{%}
|
||||
@end table
|
||||
Note: setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect.
|
||||
|
||||
Errors in parsing the environment variable are not fatal, and will not
|
||||
appear in the report.
|
||||
|
||||
@item -cpuflags flags (@emph{global})
|
||||
Allows setting and clearing cpu flags. This option is intended
|
||||
for testing. Do not use it unless you know what you're doing.
|
||||
@example
|
||||
ffmpeg -cpuflags -sse+mmx ...
|
||||
ffmpeg -cpuflags mmx ...
|
||||
ffmpeg -cpuflags 0 ...
|
||||
@end example
|
||||
Possible flags for this option are:
|
||||
@table @samp
|
||||
@item x86
|
||||
@table @samp
|
||||
@item mmx
|
||||
@item mmxext
|
||||
@item sse
|
||||
@item sse2
|
||||
@item sse2slow
|
||||
@item sse3
|
||||
@item sse3slow
|
||||
@item ssse3
|
||||
@item atom
|
||||
@item sse4.1
|
||||
@item sse4.2
|
||||
@item avx
|
||||
@item xop
|
||||
@item fma4
|
||||
@item 3dnow
|
||||
@item 3dnowext
|
||||
@item cmov
|
||||
@end table
|
||||
@item ARM
|
||||
@table @samp
|
||||
@item armv5te
|
||||
@item armv6
|
||||
@item armv6t2
|
||||
@item vfp
|
||||
@item vfpv3
|
||||
@item neon
|
||||
@end table
|
||||
@item PowerPC
|
||||
@table @samp
|
||||
@item altivec
|
||||
@end table
|
||||
@item Specific Processors
|
||||
@table @samp
|
||||
@item pentium2
|
||||
@item pentium3
|
||||
@item pentium4
|
||||
@item k6
|
||||
@item k62
|
||||
@item athlon
|
||||
@item athlonxp
|
||||
@item k8
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item -opencl_options options (@emph{global})
|
||||
Set OpenCL environment options. This option is only available when
|
||||
FFmpeg has been compiled with @code{--enable-opencl}.
|
||||
|
||||
@var{options} must be a list of @var{key}=@var{value} option pairs
|
||||
separated by ':'. See the ``OpenCL Options'' section in the
|
||||
ffmpeg-utils manual for the list of supported options.
|
||||
@end table
|
||||
|
||||
@section AVOptions
|
||||
|
@@ -17,37 +17,12 @@ Below is a description of the currently available bitstream filters.
|
||||
|
||||
@section aac_adtstoasc
|
||||
|
||||
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
|
||||
bitstream filter.
|
||||
|
||||
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
|
||||
ADTS header and removes the ADTS header.
|
||||
|
||||
This is required for example when copying an AAC stream from a raw
|
||||
ADTS AAC container to a FLV or a MOV/MP4 file.
|
||||
|
||||
@section chomp
|
||||
|
||||
Remove zero padding at the end of a packet.
|
||||
|
||||
@section dump_extradata
|
||||
|
||||
@section h264_mp4toannexb
|
||||
|
||||
Convert an H.264 bitstream from length prefixed mode to start code
|
||||
prefixed mode (as defined in the Annex B of the ITU-T H.264
|
||||
specification).
|
||||
|
||||
This is required by some streaming formats, typically the MPEG-2
|
||||
transport stream format ("mpegts").
|
||||
|
||||
For example to remux an MP4 file containing an H.264 stream to mpegts
|
||||
format with @command{ffmpeg}, you can use the command:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
|
||||
@end example
|
||||
|
||||
@section imx_dump_header
|
||||
|
||||
@section mjpeg2jpeg
|
||||
@@ -82,7 +57,7 @@ stream (carrying the AVI1 header ID and lacking a DHT segment) to
|
||||
produce fully qualified JPEG images.
|
||||
|
||||
@example
|
||||
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
|
||||
ffmpeg -i mjpeg-movie.avi -c:v copy -vbsf mjpeg2jpeg frame_%d.jpg
|
||||
exiftran -i -9 frame*.jpg
|
||||
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
@end example
|
||||
|
1073
doc/codecs.texi
1073
doc/codecs.texi
File diff suppressed because it is too large
Load Diff
@@ -48,114 +48,3 @@ top-field-first is assumed
|
||||
@end table
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@c man begin AUDIO DECODERS
|
||||
|
||||
@section ffwavesynth
|
||||
|
||||
Internal wave synthetizer.
|
||||
|
||||
This decoder generates wave patterns according to predefined sequences. Its
|
||||
use is purely internal and the format of the data it accepts is not publicly
|
||||
documented.
|
||||
|
||||
@section libcelt
|
||||
|
||||
libcelt decoder wrapper.
|
||||
|
||||
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
|
||||
Requires the presence of the libcelt headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libcelt}.
|
||||
|
||||
@section libgsm
|
||||
|
||||
libgsm decoder wrapper.
|
||||
|
||||
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
|
||||
the presence of the libgsm headers and library during configuration. You need
|
||||
to explicitly configure the build with @code{--enable-libgsm}.
|
||||
|
||||
This decoder supports both the ordinary GSM and the Microsoft variant.
|
||||
|
||||
@section libilbc
|
||||
|
||||
libilbc decoder wrapper.
|
||||
|
||||
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
|
||||
audio codec. Requires the presence of the libilbc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libilbc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libilbc wrapper.
|
||||
|
||||
@table @option
|
||||
@item enhance
|
||||
|
||||
Enable the enhancement of the decoded audio when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
libopencore-amrnb decoder wrapper.
|
||||
|
||||
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Narrowband audio codec. Using it requires the presence of the
|
||||
libopencore-amrnb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrnb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
|
||||
without this library.
|
||||
|
||||
@section libopencore-amrwb
|
||||
|
||||
libopencore-amrwb decoder wrapper.
|
||||
|
||||
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Wideband audio codec. Using it requires the presence of the
|
||||
libopencore-amrwb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrwb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
|
||||
without this library.
|
||||
|
||||
@section libopus
|
||||
|
||||
libopus decoder wrapper.
|
||||
|
||||
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
|
||||
Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section dvdsub
|
||||
|
||||
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
|
||||
also be found in VobSub file pairs and in some Matroska files.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item palette
|
||||
Specify the global palette used by the bitmaps. When stored in VobSub, the
|
||||
palette is normally specified in the index file; in Matroska, the palette is
|
||||
stored in the codec extra-data in the same format as in VobSub. In DVDs, the
|
||||
palette is stored in the IFO file, and therefore not available when reading
|
||||
from dumped VOB files.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
@end table
|
||||
|
||||
@c man end SUBTILES DECODERS
|
||||
|
149
doc/default.css
149
doc/default.css
@@ -1,149 +0,0 @@
|
||||
a {
|
||||
color: #2D6198;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: #884488;
|
||||
}
|
||||
|
||||
#banner {
|
||||
background-color: white;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#banner img {
|
||||
padding-bottom: 1px;
|
||||
padding-top: 5px;
|
||||
}
|
||||
|
||||
#body {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
body {
|
||||
background-color: #313131;
|
||||
margin: 0;
|
||||
text-align: justify;
|
||||
}
|
||||
|
||||
.center {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#container {
|
||||
background-color: white;
|
||||
color: #202020;
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
#footer {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
h1, h2, h3 {
|
||||
padding-left: 0.4em;
|
||||
border-radius: 4px;
|
||||
padding-bottom: 0.2em;
|
||||
padding-top: 0.2em;
|
||||
border: 1px solid #6A996A;
|
||||
}
|
||||
|
||||
h1 {
|
||||
background-color: #7BB37B;
|
||||
color: #151515;
|
||||
font-size: 1.2em;
|
||||
padding-bottom: 0.3em;
|
||||
padding-top: 0.3em;
|
||||
}
|
||||
|
||||
h2 {
|
||||
color: #313131;
|
||||
font-size: 0.9em;
|
||||
background-color: #ABE3AB;
|
||||
}
|
||||
|
||||
h3 {
|
||||
color: #313131;
|
||||
font-size: 0.8em;
|
||||
margin-bottom: -8px;
|
||||
background-color: #BBF3BB;
|
||||
}
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
#navbar {
|
||||
background-color: #738073;
|
||||
border-bottom: 1px solid #5C665C;
|
||||
border-top: 1px solid #5C665C;
|
||||
margin-top: 12px;
|
||||
padding: 0.3em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#navbar a, #navbar_secondary a {
|
||||
color: white;
|
||||
padding: 0.3em;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#navbar a:hover, #navbar_secondary a:hover {
|
||||
background-color: #313131;
|
||||
color: white;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#navbar_secondary {
|
||||
background-color: #738073;
|
||||
border-bottom: 1px solid #5C665C;
|
||||
border-left: 1px solid #5C665C;
|
||||
border-right: 1px solid #5C665C;
|
||||
padding: 0.3em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
p {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
pre {
|
||||
margin-left: 3em;
|
||||
margin-right: 3em;
|
||||
padding: 0.3em;
|
||||
border: 1px solid #bbb;
|
||||
background-color: #f7f7f7;
|
||||
}
|
||||
|
||||
dl dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
#proj_desc {
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
#repos {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
border-collapse: collapse;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
|
||||
#repos th {
|
||||
background-color: #7BB37B;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
|
||||
#repos td {
|
||||
padding: 0.2em;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
@@ -6,18 +6,64 @@ multimedia streams from a particular type of file.
|
||||
|
||||
When you configure your FFmpeg build, all the supported demuxers
|
||||
are enabled by default. You can list all available ones using the
|
||||
configure option @code{--list-demuxers}.
|
||||
configure option "--list-demuxers".
|
||||
|
||||
You can disable all the demuxers using the configure option
|
||||
@code{--disable-demuxers}, and selectively enable a single demuxer with
|
||||
the option @code{--enable-demuxer=@var{DEMUXER}}, or disable it
|
||||
with the option @code{--disable-demuxer=@var{DEMUXER}}.
|
||||
"--disable-demuxers", and selectively enable a single demuxer with
|
||||
the option "--enable-demuxer=@var{DEMUXER}", or disable it
|
||||
with the option "--disable-demuxer=@var{DEMUXER}".
|
||||
|
||||
The option @code{-formats} of the ff* tools will display the list of
|
||||
The option "-formats" of the ff* tools will display the list of
|
||||
enabled demuxers.
|
||||
|
||||
The description of some of the currently available demuxers follows.
|
||||
|
||||
@section image2
|
||||
|
||||
Image file demuxer.
|
||||
|
||||
This demuxer reads from a list of image files specified by a pattern.
|
||||
|
||||
The pattern may contain the string "%d" or "%0@var{N}d", which
|
||||
specifies the position of the characters representing a sequential
|
||||
number in each filename matched by the pattern. If the form
|
||||
"%d0@var{N}d" is used, the string representing the number in each
|
||||
filename is 0-padded and @var{N} is the total number of 0-padded
|
||||
digits representing the number. The literal character '%' can be
|
||||
specified in the pattern with the string "%%".
|
||||
|
||||
If the pattern contains "%d" or "%0@var{N}d", the first filename of
|
||||
the file list specified by the pattern must contain a number
|
||||
inclusively contained between 0 and 4, all the following numbers must
|
||||
be sequential. This limitation may be hopefully fixed.
|
||||
|
||||
The pattern may contain a suffix which is used to automatically
|
||||
determine the format of the images contained in the files.
|
||||
|
||||
For example the pattern "img-%03d.bmp" will match a sequence of
|
||||
filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
|
||||
@file{img-010.bmp}, etc.; the pattern "i%%m%%g-%d.jpg" will match a
|
||||
sequence of filenames of the form @file{i%m%g-1.jpg},
|
||||
@file{i%m%g-2.jpg}, ..., @file{i%m%g-10.jpg}, etc.
|
||||
|
||||
The size, the pixel format, and the format of each image must be the
|
||||
same for all the files in the sequence.
|
||||
|
||||
The following example shows how to use @command{ffmpeg} for creating a
|
||||
video from the images in the file sequence @file{img-001.jpeg},
|
||||
@file{img-002.jpeg}, ..., assuming an input frame rate of 10 frames per
|
||||
second:
|
||||
@example
|
||||
ffmpeg -i 'img-%03d.jpeg' -r 10 out.mkv
|
||||
@end example
|
||||
|
||||
Note that the pattern must not necessarily contain "%d" or
|
||||
"%0@var{N}d", for example to convert a single image file
|
||||
@file{img.jpeg} you can employ the command:
|
||||
@example
|
||||
ffmpeg -i img.jpeg img.png
|
||||
@end example
|
||||
|
||||
@section applehttp
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
@@ -29,312 +75,4 @@ the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@anchor{concat}
|
||||
@section concat
|
||||
|
||||
Virtual concatenation script demuxer.
|
||||
|
||||
This demuxer reads a list of files and other directives from a text file and
|
||||
demuxes them one after the other, as if all their packet had been muxed
|
||||
together.
|
||||
|
||||
The timestamps in the files are adjusted so that the first file starts at 0
|
||||
and each next file starts where the previous one finishes. Note that it is
|
||||
done globally and may cause gaps if all streams do not have exactly the same
|
||||
length.
|
||||
|
||||
All files must have the same streams (same codecs, same time base, etc.).
|
||||
|
||||
The duration of each file is used to adjust the timestamps of the next file:
|
||||
if the duration is incorrect (because it was computed using the bit-rate or
|
||||
because the file is truncated, for example), it can cause artifacts. The
|
||||
@code{duration} directive can be used to override the duration stored in
|
||||
each file.
|
||||
|
||||
@subsection Syntax
|
||||
|
||||
The script is a text file in extended-ASCII, with one directive per line.
|
||||
Empty lines, leading spaces and lines starting with '#' are ignored. The
|
||||
following directive is recognized:
|
||||
|
||||
@table @option
|
||||
|
||||
@item @code{file @var{path}}
|
||||
Path to a file to read; special characters and spaces must be escaped with
|
||||
backslash or single quotes.
|
||||
|
||||
All subsequent directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
to 1 if it was to its default -1.
|
||||
|
||||
To make FFmpeg recognize the format automatically, this directive must
|
||||
appears exactly as is (no extra space or byte-order-mark) on the very first
|
||||
line of the script.
|
||||
|
||||
@item @code{duration @var{dur}}
|
||||
Duration of the file. This information can be specified from the file;
|
||||
specifying it here may be more efficient or help if the information from the
|
||||
file is not available or accurate.
|
||||
|
||||
If the duration is set for all files, then it is possible to seek in the
|
||||
whole concatenated video.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following option:
|
||||
|
||||
@table @option
|
||||
|
||||
@item safe
|
||||
If set to 1, reject unsafe file paths. A file path is considered safe if it
|
||||
does not contain a protocol specification and is relative and all components
|
||||
only contain characters from the portable character set (letters, digits,
|
||||
period, underscore and hyphen) and have no period at the beginning of a
|
||||
component.
|
||||
|
||||
If set to 0, any file name is accepted.
|
||||
|
||||
The default is -1, it is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@end table
|
||||
|
||||
@section libgme
|
||||
|
||||
The Game Music Emu library is a collection of video game music file emulators.
|
||||
|
||||
See @url{http://code.google.com/p/game-music-emu/} for more information.
|
||||
|
||||
Some files have multiple tracks. The demuxer will pick the first track by
|
||||
default. The @option{track_index} option can be used to select a different
|
||||
track. Track indexes start at 0. The demuxer exports the number of tracks as
|
||||
@var{tracks} meta data entry.
|
||||
|
||||
For very large files, the @option{max_size} option may have to be adjusted.
|
||||
|
||||
@section libquvi
|
||||
|
||||
Play media from Internet services using the quvi project.
|
||||
|
||||
The demuxer accepts a @option{format} option to request a specific quality. It
|
||||
is by default set to @var{best}.
|
||||
|
||||
See @url{http://quvi.sourceforge.net/} for more information.
|
||||
|
||||
FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be
|
||||
enabled.
|
||||
|
||||
@section image2
|
||||
|
||||
Image file demuxer.
|
||||
|
||||
This demuxer reads from a list of image files specified by a pattern.
|
||||
The syntax and meaning of the pattern is specified by the
|
||||
option @var{pattern_type}.
|
||||
|
||||
The pattern may contain a suffix which is used to automatically
|
||||
determine the format of the images contained in the files.
|
||||
|
||||
The size, the pixel format, and the format of each image must be the
|
||||
same for all the files in the sequence.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item framerate
|
||||
Set the frame rate for the video stream. It defaults to 25.
|
||||
@item loop
|
||||
If set to 1, loop over the input. Default value is 0.
|
||||
@item pattern_type
|
||||
Select the pattern type used to interpret the provided filename.
|
||||
|
||||
@var{pattern_type} accepts one of the following values.
|
||||
@table @option
|
||||
@item sequence
|
||||
Select a sequence pattern type, used to specify a sequence of files
|
||||
indexed by sequential numbers.
|
||||
|
||||
A sequence pattern may contain the string "%d" or "%0@var{N}d", which
|
||||
specifies the position of the characters representing a sequential
|
||||
number in each filename matched by the pattern. If the form
|
||||
"%d0@var{N}d" is used, the string representing the number in each
|
||||
filename is 0-padded and @var{N} is the total number of 0-padded
|
||||
digits representing the number. The literal character '%' can be
|
||||
specified in the pattern with the string "%%".
|
||||
|
||||
If the sequence pattern contains "%d" or "%0@var{N}d", the first filename of
|
||||
the file list specified by the pattern must contain a number
|
||||
inclusively contained between @var{start_number} and
|
||||
@var{start_number}+@var{start_number_range}-1, and all the following
|
||||
numbers must be sequential.
|
||||
|
||||
For example the pattern "img-%03d.bmp" will match a sequence of
|
||||
filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
|
||||
@file{img-010.bmp}, etc.; the pattern "i%%m%%g-%d.jpg" will match a
|
||||
sequence of filenames of the form @file{i%m%g-1.jpg},
|
||||
@file{i%m%g-2.jpg}, ..., @file{i%m%g-10.jpg}, etc.
|
||||
|
||||
Note that the pattern must not necessarily contain "%d" or
|
||||
"%0@var{N}d", for example to convert a single image file
|
||||
@file{img.jpeg} you can employ the command:
|
||||
@example
|
||||
ffmpeg -i img.jpeg img.png
|
||||
@end example
|
||||
|
||||
@item glob
|
||||
Select a glob wildcard pattern type.
|
||||
|
||||
The pattern is interpreted like a @code{glob()} pattern. This is only
|
||||
selectable if libavformat was compiled with globbing support.
|
||||
|
||||
@item glob_sequence @emph{(deprecated, will be removed)}
|
||||
Select a mixed glob wildcard/sequence pattern.
|
||||
|
||||
If your version of libavformat was compiled with globbing support, and
|
||||
the provided pattern contains at least one glob meta character among
|
||||
@code{%*?[]@{@}} that is preceded by an unescaped "%", the pattern is
|
||||
interpreted like a @code{glob()} pattern, otherwise it is interpreted
|
||||
like a sequence pattern.
|
||||
|
||||
All glob special characters @code{%*?[]@{@}} must be prefixed
|
||||
with "%". To escape a literal "%" you shall use "%%".
|
||||
|
||||
For example the pattern @code{foo-%*.jpeg} will match all the
|
||||
filenames prefixed by "foo-" and terminating with ".jpeg", and
|
||||
@code{foo-%?%?%?.jpeg} will match all the filenames prefixed with
|
||||
"foo-", followed by a sequence of three characters, and terminating
|
||||
with ".jpeg".
|
||||
|
||||
This pattern type is deprecated in favor of @var{glob} and
|
||||
@var{sequence}.
|
||||
@end table
|
||||
|
||||
Default value is @var{glob_sequence}.
|
||||
@item pixel_format
|
||||
Set the pixel format of the images to read. If not specified the pixel
|
||||
format is guessed from the first image file in the sequence.
|
||||
@item start_number
|
||||
Set the index of the file matched by the image file pattern to start
|
||||
to read from. Default value is 0.
|
||||
@item start_number_range
|
||||
Set the index interval range to check when looking for the first image
|
||||
file in the sequence, starting from @var{start_number}. Default value
|
||||
is 5.
|
||||
@item ts_from_file
|
||||
If set to 1, will set frame timestamp to modification time of image file. Note
|
||||
that monotonity of timestamps is not provided: images go in the same order as
|
||||
without this option. Default value is 0.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Use @command{ffmpeg} for creating a video from the images in the file
|
||||
sequence @file{img-001.jpeg}, @file{img-002.jpeg}, ..., assuming an
|
||||
input frame rate of 10 frames per second:
|
||||
@example
|
||||
ffmpeg -i 'img-%03d.jpeg' -r 10 out.mkv
|
||||
@end example
|
||||
|
||||
@item
|
||||
As above, but start by reading from a file with index 100 in the sequence:
|
||||
@example
|
||||
ffmpeg -start_number 100 -i 'img-%03d.jpeg' -r 10 out.mkv
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read images matching the "*.png" glob pattern , that is all the files
|
||||
terminating with the ".png" suffix:
|
||||
@example
|
||||
ffmpeg -pattern_type glob -i "*.png" -r 10 out.mkv
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video demuxer.
|
||||
|
||||
This demuxer allows to read raw video data. Since there is no header
|
||||
specifying the assumed video parameters, the user must specify them
|
||||
in order to be able to decode the data correctly.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set input video frame rate. Default value is 25.
|
||||
|
||||
@item pixel_format
|
||||
Set the input video pixel format. Default value is @code{yuv420p}.
|
||||
|
||||
@item video_size
|
||||
Set the input video size. This value must be specified explicitly.
|
||||
@end table
|
||||
|
||||
For example to read a rawvideo file @file{input.raw} with
|
||||
@command{ffplay}, assuming a pixel format of @code{rgb24}, a video
|
||||
size of @code{320x240}, and a frame rate of 10 images per second, use
|
||||
the command:
|
||||
@example
|
||||
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
|
||||
@end example
|
||||
|
||||
@section sbg
|
||||
|
||||
SBaGen script demuxer.
|
||||
|
||||
This demuxer reads the script language used by SBaGen
|
||||
@url{http://uazu.net/sbagen/} to generate binaural beats sessions. A SBG
|
||||
script looks like that:
|
||||
@example
|
||||
-SE
|
||||
a: 300-2.5/3 440+4.5/0
|
||||
b: 300-2.5/0 440+4.5/3
|
||||
off: -
|
||||
NOW == a
|
||||
+0:07:00 == b
|
||||
+0:14:00 == a
|
||||
+0:21:00 == b
|
||||
+0:30:00 off
|
||||
@end example
|
||||
|
||||
A SBG script can mix absolute and relative timestamps. If the script uses
|
||||
either only absolute timestamps (including the script start time) or only
|
||||
relative ones, then its layout is fixed, and the conversion is
|
||||
straightforward. On the other hand, if the script mixes both kind of
|
||||
timestamps, then the @var{NOW} reference for relative timestamps will be
|
||||
taken from the current time of day at the time the script is read, and the
|
||||
script layout will be frozen according to that reference. That means that if
|
||||
the script is directly played, the actual times will match the absolute
|
||||
timestamps up to the sound controller's clock accuracy, but if the user
|
||||
somehow pauses the playback or seeks, all times will be shifted accordingly.
|
||||
|
||||
@section tedcaptions
|
||||
|
||||
JSON captions used for @url{http://www.ted.com/, TED Talks}.
|
||||
|
||||
TED does not provide links to the captions, but they can be guessed from the
|
||||
page. The file @file{tools/bookmarklets.html} from the FFmpeg source tree
|
||||
contains a bookmarklet to expose them.
|
||||
|
||||
This demuxer accepts the following option:
|
||||
@table @option
|
||||
@item start_time
|
||||
Set the start time of the TED talk, in milliseconds. The default is 15000
|
||||
(15s). It is used to sync the captions with the downloadable videos, because
|
||||
they include a 15s intro.
|
||||
@end table
|
||||
|
||||
Example: convert the captions to a format most players understand:
|
||||
@example
|
||||
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
|
||||
@end example
|
||||
|
||||
@c man end DEMUXERS
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -14,13 +14,12 @@
|
||||
@section API
|
||||
@itemize @bullet
|
||||
@item libavcodec is the library containing the codecs (both encoding and
|
||||
decoding). Look at @file{doc/examples/decoding_encoding.c} to see how to use
|
||||
it.
|
||||
decoding). Look at @file{libavcodec/apiexample.c} to see how to use it.
|
||||
|
||||
@item libavformat is the library containing the file format handling (mux and
|
||||
demux code for several formats). Look at @file{ffplay.c} to use it in a
|
||||
player. See @file{doc/examples/muxing.c} to use it to generate audio or video
|
||||
streams.
|
||||
player. See @file{libavformat/output-example.c} to use it to generate
|
||||
audio or video streams.
|
||||
|
||||
@end itemize
|
||||
|
||||
@@ -147,45 +146,30 @@ GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
|
||||
@end itemize
|
||||
|
||||
@subsection Naming conventions
|
||||
All names should be composed with underscores (_), not CamelCase. For example,
|
||||
@samp{avfilter_get_video_buffer} is an acceptable function name and
|
||||
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
|
||||
All names are using underscores (_), not CamelCase. For example, @samp{avfilter_get_video_buffer} is
|
||||
a valid function name and @samp{AVFilterGetVideo} is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in the CamelCase
|
||||
|
||||
There are the following conventions for naming variables and functions:
|
||||
|
||||
There are following conventions for naming variables and functions:
|
||||
@itemize @bullet
|
||||
@item
|
||||
For local variables no prefix is required.
|
||||
@item
|
||||
For file-scope variables and functions declared as @code{static}, no prefix
|
||||
is required.
|
||||
For variables and functions declared as @code{static} no prefixes are required.
|
||||
@item
|
||||
For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an @code{ff_} prefix should be used,
|
||||
e.g. @samp{ff_w64_demuxer}.
|
||||
For variables and functions used internally by the library, @code{ff_} prefix
|
||||
should be used.
|
||||
For example, @samp{ff_w64_demuxer}.
|
||||
@item
|
||||
For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use @code{avpriv_} as prefix, for example,
|
||||
@samp{avpriv_aac_parse_header}.
|
||||
For variables and functions used internally across multiple libraries, use
|
||||
@code{avpriv_}. For example, @samp{avpriv_aac_parse_header}.
|
||||
@item
|
||||
Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used @code{av_} (@code{avformat_} for libavformat,
|
||||
@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
@code{lib<name>/lib<name>.v} files.
|
||||
For exported names, each library has its own prefixes. Just check the existing
|
||||
code and name accordingly.
|
||||
@end itemize
|
||||
|
||||
Furthermore, name space reserved for the system should not be invaded.
|
||||
Identifiers ending in @code{_t} are reserved by
|
||||
@url{http://pubs.opengroup.org/onlinepubs/007904975/functions/xsh_chap02_02.html#tag_02_02_02, POSIX}.
|
||||
Also avoid names starting with @code{__} or @code{_} followed by an uppercase
|
||||
letter as they are reserved by the C standard. Names starting with @code{_}
|
||||
are reserved at the file level and may not be used for externally visible
|
||||
symbols. If in doubt, just avoid names starting with @code{_} altogether.
|
||||
|
||||
@subsection Miscellaneous conventions
|
||||
@subsection Miscellanous conventions
|
||||
@itemize @bullet
|
||||
@item
|
||||
fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
@@ -203,10 +187,8 @@ the following snippet into your @file{.vimrc}:
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" allow tabs in Makefiles
|
||||
autocmd FileType make set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
@@ -216,29 +198,18 @@ autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@example
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
(setq c-default-style "k&r")
|
||||
(setq-default c-basic-offset 4)
|
||||
(setq-default indent-tabs-mode nil)
|
||||
(setq-default show-trailing-whitespace t)
|
||||
@end example
|
||||
|
||||
@section Development Policy
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Contributions should be licensed under the
|
||||
@uref{http://www.gnu.org/licenses/lgpl-2.1.html, LGPL 2.1},
|
||||
including an "or any later version" clause, or, if you prefer
|
||||
a gift-style license, the
|
||||
@uref{http://www.isc.org/software/license/, ISC} or
|
||||
@uref{http://mit-license.org/, MIT} license.
|
||||
@uref{http://www.gnu.org/licenses/gpl-2.0.html, GPL 2} including
|
||||
Contributions should be licensed under the LGPL 2.1, including an
|
||||
"or any later version" clause, or the MIT license. GPL 2 including
|
||||
an "or any later version" clause is also acceptable, but LGPL is
|
||||
preferred.
|
||||
@item
|
||||
@@ -248,13 +219,6 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
You can commit unfinished stuff (for testing etc), but it must be disabled
|
||||
(#ifdef etc) by default so it does not interfere with other developers'
|
||||
work.
|
||||
@item
|
||||
The commit message should have a short first line in the form of
|
||||
a @samp{topic: short description} as a header, separated by a newline
|
||||
from the body consisting of an explanation of why the change is necessary.
|
||||
If the commit fixes a known bug on the bug tracker, the commit message
|
||||
should include its bug ID. Referring to the issue on the bug tracker does
|
||||
not exempt you from writing an excerpt of the bug in the commit message.
|
||||
@item
|
||||
You do not have to over-test things. If it works for you, and you think it
|
||||
should work for others, then commit. If your code has problems
|
||||
@@ -344,8 +308,7 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
change (e.g. encoder bug fix that matters for the decoder).
|
||||
@item
|
||||
Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
@@ -361,6 +324,8 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
|
||||
We think our rules are not too hard. If you have comments, contact us.
|
||||
|
||||
Note, these rules are mostly borrowed from the MPlayer project.
|
||||
|
||||
@anchor{Submitting patches}
|
||||
@section Submitting patches
|
||||
|
||||
@@ -380,9 +345,14 @@ for us and greatly increases your chances of getting your patch applied.
|
||||
Use the patcheck tool of FFmpeg to check your patch.
|
||||
The tool is located in the tools directory.
|
||||
|
||||
Run the @ref{Regression tests} before submitting a patch in order to verify
|
||||
Run the @ref{Regression Tests} before submitting a patch in order to verify
|
||||
it does not cause unexpected problems.
|
||||
|
||||
Patches should be posted as base64 encoded attachments (or any other
|
||||
encoding which ensures that the patch will not be trashed during
|
||||
transmission) to the ffmpeg-devel mailing list, see
|
||||
@url{http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel}
|
||||
|
||||
It also helps quite a bit if you tell us what the patch does (for example
|
||||
'replaces lrint by lrintf'), and why (for example '*BSD isn't C99 compliant
|
||||
and has no lrint()')
|
||||
@@ -390,13 +360,6 @@ and has no lrint()')
|
||||
Also please if you send several patches, send each patch as a separate mail,
|
||||
do not attach several unrelated patches to the same mail.
|
||||
|
||||
Patches should be posted to the
|
||||
@uref{http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel, ffmpeg-devel}
|
||||
mailing list. Use @code{git send-email} when possible since it will properly
|
||||
send patches without requiring extra care. If you cannot, then send patches
|
||||
as base64-encoded attachments, so your patch is not trashed during
|
||||
transmission.
|
||||
|
||||
Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
@@ -421,11 +384,9 @@ send a reminder by email. Your patch should eventually be dealt with.
|
||||
@item
|
||||
Did you register it in @file{allcodecs.c} or @file{allformats.c}?
|
||||
@item
|
||||
Did you add the AVCodecID to @file{avcodec.h}?
|
||||
When adding new codec IDs, also add an entry to the codec descriptor
|
||||
list in @file{libavcodec/codec_desc.c}.
|
||||
Did you add the CodecID to @file{avcodec.h}?
|
||||
@item
|
||||
If it has a FourCC, did you add it to @file{libavformat/riff.c},
|
||||
If it has a fourCC, did you add it to @file{libavformat/riff.c},
|
||||
even if it is only a decoder?
|
||||
@item
|
||||
Did you add a rule to compile the appropriate files in the Makefile?
|
||||
@@ -457,7 +418,7 @@ send a reminder by email. Your patch should eventually be dealt with.
|
||||
Was the patch generated with git format-patch or send-email?
|
||||
@item
|
||||
Did you sign off your patch? (git commit -s)
|
||||
See @url{http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches} for the meaning
|
||||
See @url{http://kerneltrap.org/files/Jeremy/DCO.txt} for the meaning
|
||||
of sign off.
|
||||
@item
|
||||
Did you provide a clear git commit log message?
|
||||
@@ -478,10 +439,8 @@ send a reminder by email. Your patch should eventually be dealt with.
|
||||
other security issues?
|
||||
@item
|
||||
Did you test your decoder or demuxer against damaged data? If no, see
|
||||
tools/trasher, the noise bitstream filter, and
|
||||
@uref{http://caca.zoy.org/wiki/zzuf, zzuf}. Your decoder or demuxer
|
||||
should not crash, end in a (near) infinite loop, or allocate ridiculous
|
||||
amounts of memory when fed damaged data.
|
||||
tools/trasher and the noise bitstream filter. Your decoder or demuxer
|
||||
should not crash or end in a (near) infinite loop when fed damaged data.
|
||||
@item
|
||||
Does the patch not mix functional and cosmetic changes?
|
||||
@item
|
||||
@@ -521,13 +480,6 @@ send a reminder by email. Your patch should eventually be dealt with.
|
||||
Consider to add a regression test for your code.
|
||||
@item
|
||||
If you added YASM code please check that things still work with --disable-yasm
|
||||
@item
|
||||
Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like @code{av_malloc()}
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
@item
|
||||
Test your code with valgrind and or Address Sanitizer to ensure it's free
|
||||
of leaks, out of array accesses, etc.
|
||||
@end enumerate
|
||||
|
||||
@section Patch review process
|
||||
@@ -556,153 +508,15 @@ not related to the comments received during review. Such patches will
|
||||
be rejected. Instead, submit significant changes or new features as
|
||||
separate patches.
|
||||
|
||||
@anchor{Regression tests}
|
||||
@section Regression tests
|
||||
|
||||
Before submitting a patch (or committing to the repository), you should at least
|
||||
test that you did not break anything.
|
||||
|
||||
Running 'make fate' accomplishes this, please see @url{fate.html} for details.
|
||||
Running 'make fate' accomplishes this, please see @file{doc/fate.txt} for details.
|
||||
|
||||
[Of course, some patches may change the results of the regression tests. In
|
||||
this case, the reference results of the regression tests shall be modified
|
||||
accordingly].
|
||||
|
||||
@subsection Adding files to the fate-suite dataset
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be inlcuded in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductionary message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
|
||||
|
||||
@subsection Visualizing Test Coverage
|
||||
|
||||
The FFmpeg build system allows visualizing the test coverage in an easy
|
||||
manner with the coverage tools @code{gcov}/@code{lcov}. This involves
|
||||
the following steps:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Configure to compile with instrumentation enabled:
|
||||
@code{configure --toolchain=gcov}.
|
||||
@item
|
||||
Run your test case, either manually or via FATE. This can be either
|
||||
the full FATE regression suite, or any arbitrary invocation of any
|
||||
front-end tool provided by FFmpeg, in any combination.
|
||||
@item
|
||||
Run @code{make lcov} to generate coverage data in HTML format.
|
||||
@item
|
||||
View @code{lcov/index.html} in your preferred HTML viewer.
|
||||
@end enumerate
|
||||
|
||||
You can use the command @code{make lcov-reset} to reset the coverage
|
||||
measurements. You will need to rerun @code{make lcov} after running a
|
||||
new test.
|
||||
|
||||
@subsection Using Valgrind
|
||||
|
||||
The configure script provides a shortcut for using valgrind to spot bugs
|
||||
related to memory handling. Just add the option
|
||||
@code{--toolchain=valgrind-memcheck} or @code{--toolchain=valgrind-massif}
|
||||
to your configure line, and reasonable defaults will be set for running
|
||||
FATE under the supervision of either the @strong{memcheck} or the
|
||||
@strong{massif} tool of the valgrind suite.
|
||||
|
||||
In case you need finer control over how valgrind is invoked, use the
|
||||
@code{--target-exec='valgrind <your_custom_valgrind_options>} option in
|
||||
your configure line instead.
|
||||
|
||||
@anchor{Release process}
|
||||
@section Release process
|
||||
|
||||
FFmpeg maintains a set of @strong{release branches}, which are the
|
||||
recommended deliverable for system integrators and distributors (such as
|
||||
Linux distributions, etc.). At regular times, a @strong{release
|
||||
manager} prepares, tests and publishes tarballs on the
|
||||
@url{http://ffmpeg.org} website.
|
||||
|
||||
There are two kinds of releases:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
@strong{Major releases} always include the latest and greatest
|
||||
features and functionality.
|
||||
@item
|
||||
@strong{Point releases} are cut from @strong{release} branches,
|
||||
which are named @code{release/X}, with @code{X} being the release
|
||||
version number.
|
||||
@end enumerate
|
||||
|
||||
Note that we promise to our users that shared libraries from any FFmpeg
|
||||
release never break programs that have been @strong{compiled} against
|
||||
previous versions of @strong{the same release series} in any case!
|
||||
|
||||
However, from time to time, we do make API changes that require adaptations
|
||||
in applications. Such changes are only allowed in (new) major releases and
|
||||
require further steps such as bumping library version numbers and/or
|
||||
adjustments to the symbol versioning file. Please discuss such changes
|
||||
on the @strong{ffmpeg-devel} mailing list in time to allow forward planning.
|
||||
|
||||
@anchor{Criteria for Point Releases}
|
||||
@subsection Criteria for Point Releases
|
||||
|
||||
Changes that match the following criteria are valid candidates for
|
||||
inclusion into a point release:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Fixes a security issue, preferably identified by a @strong{CVE
|
||||
number} issued by @url{http://cve.mitre.org/}.
|
||||
@item
|
||||
Fixes a documented bug in @url{https://trac.ffmpeg.org}.
|
||||
@item
|
||||
Improves the included documentation.
|
||||
@item
|
||||
Retains both source code and binary compatibility with previous
|
||||
point releases of the same release branch.
|
||||
@end enumerate
|
||||
|
||||
The order for checking the rules is (1 OR 2 OR 3) AND 4.
|
||||
|
||||
|
||||
@subsection Release Checklist
|
||||
|
||||
The release process involves the following steps:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Ensure that the @file{RELEASE} file contains the version number for
|
||||
the upcoming release.
|
||||
@item
|
||||
Add the release at @url{https://trac.ffmpeg.org/admin/ticket/versions}.
|
||||
@item
|
||||
Announce the intent to do a release to the mailing list.
|
||||
@item
|
||||
Make sure all relevant security fixes have been backported. See
|
||||
@url{https://ffmpeg.org/security.html}.
|
||||
@item
|
||||
Ensure that the FATE regression suite still passes in the release
|
||||
branch on at least @strong{i386} and @strong{amd64}
|
||||
(cf. @ref{Regression tests}).
|
||||
@item
|
||||
Prepare the release tarballs in @code{bz2} and @code{gz} formats, and
|
||||
supplementing files that contain @code{gpg} signatures
|
||||
@item
|
||||
Publish the tarballs at @url{http://ffmpeg.org/releases}. Create and
|
||||
push an annotated tag in the form @code{nX}, with @code{X}
|
||||
containing the version number.
|
||||
@item
|
||||
Propose and send a patch to the @strong{ffmpeg-devel} mailing list
|
||||
with a news entry for the website.
|
||||
@item
|
||||
Publish the news entry.
|
||||
@item
|
||||
Send announcement to the mailing list.
|
||||
@end enumerate
|
||||
|
||||
@bye
|
||||
|
@@ -1,21 +0,0 @@
|
||||
@chapter Device Options
|
||||
@c man begin DEVICE OPTIONS
|
||||
|
||||
The libavdevice library provides the same interface as
|
||||
libavformat. Namely, an input device is considered like a demuxer, and
|
||||
an output device like a muxer, and the interface and generic device
|
||||
options are the same provided by libavformat (see the ffmpeg-formats
|
||||
manual).
|
||||
|
||||
In addition each input or output device may support so-called private
|
||||
options, which are specific for that component.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools, or by setting the value explicitly in the device
|
||||
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
|
||||
for programmatic use.
|
||||
|
||||
@c man end DEVICE OPTIONS
|
||||
|
||||
@include indevs.texi
|
||||
@include outdevs.texi
|
@@ -1,14 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
SRC_PATH="${1}"
|
||||
DOXYFILE="${2}"
|
||||
|
||||
shift 2
|
||||
|
||||
doxygen - <<EOF
|
||||
@INCLUDE = ${DOXYFILE}
|
||||
INPUT = $@
|
||||
HTML_HEADER = ${SRC_PATH}/doc/doxy/header.html
|
||||
HTML_FOOTER = ${SRC_PATH}/doc/doxy/footer.html
|
||||
HTML_STYLESHEET = ${SRC_PATH}/doc/doxy/doxy_stylesheet.css
|
||||
EOF
|
File diff suppressed because it is too large
Load Diff
@@ -1,9 +0,0 @@
|
||||
|
||||
<footer class="footer pagination-right">
|
||||
<span class="label label-info">
|
||||
Generated on $datetime for $projectname by <a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
|
||||
</span>
|
||||
</footer>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@@ -1,16 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
|
||||
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
|
||||
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
|
||||
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
|
||||
<!--Header replace -->
|
||||
|
||||
</head>
|
||||
|
||||
<div class="container">
|
||||
|
||||
<!--Header replace -->
|
||||
<div class="menu">
|
File diff suppressed because it is too large
Load Diff
174
doc/errno.txt
174
doc/errno.txt
@@ -1,174 +0,0 @@
|
||||
The following table lists most error codes found in various operating
|
||||
systems supported by FFmpeg.
|
||||
|
||||
OS
|
||||
Code Std F LBMWwb Text (YMMV)
|
||||
|
||||
E2BIG POSIX ++++++ Argument list too long
|
||||
EACCES POSIX ++++++ Permission denied
|
||||
EADDRINUSE POSIX +++..+ Address in use
|
||||
EADDRNOTAVAIL POSIX +++..+ Cannot assign requested address
|
||||
EADV +..... Advertise error
|
||||
EAFNOSUPPORT POSIX +++..+ Address family not supported
|
||||
EAGAIN POSIX + ++++++ Resource temporarily unavailable
|
||||
EALREADY POSIX +++..+ Operation already in progress
|
||||
EAUTH .++... Authentication error
|
||||
EBADARCH ..+... Bad CPU type in executable
|
||||
EBADE +..... Invalid exchange
|
||||
EBADEXEC ..+... Bad executable
|
||||
EBADF POSIX ++++++ Bad file descriptor
|
||||
EBADFD +..... File descriptor in bad state
|
||||
EBADMACHO ..+... Malformed Macho file
|
||||
EBADMSG POSIX ++4... Bad message
|
||||
EBADR +..... Invalid request descriptor
|
||||
EBADRPC .++... RPC struct is bad
|
||||
EBADRQC +..... Invalid request code
|
||||
EBADSLT +..... Invalid slot
|
||||
EBFONT +..... Bad font file format
|
||||
EBUSY POSIX - ++++++ Device or resource busy
|
||||
ECANCELED POSIX +++... Operation canceled
|
||||
ECHILD POSIX ++++++ No child processes
|
||||
ECHRNG +..... Channel number out of range
|
||||
ECOMM +..... Communication error on send
|
||||
ECONNABORTED POSIX +++..+ Software caused connection abort
|
||||
ECONNREFUSED POSIX - +++ss+ Connection refused
|
||||
ECONNRESET POSIX +++..+ Connection reset
|
||||
EDEADLK POSIX ++++++ Resource deadlock avoided
|
||||
EDEADLOCK +..++. File locking deadlock error
|
||||
EDESTADDRREQ POSIX +++... Destination address required
|
||||
EDEVERR ..+... Device error
|
||||
EDOM C89 - ++++++ Numerical argument out of domain
|
||||
EDOOFUS .F.... Programming error
|
||||
EDOTDOT +..... RFS specific error
|
||||
EDQUOT POSIX +++... Disc quota exceeded
|
||||
EEXIST POSIX ++++++ File exists
|
||||
EFAULT POSIX - ++++++ Bad address
|
||||
EFBIG POSIX - ++++++ File too large
|
||||
EFTYPE .++... Inappropriate file type or format
|
||||
EHOSTDOWN +++... Host is down
|
||||
EHOSTUNREACH POSIX +++..+ No route to host
|
||||
EHWPOISON +..... Memory page has hardware error
|
||||
EIDRM POSIX +++... Identifier removed
|
||||
EILSEQ C99 ++++++ Illegal byte sequence
|
||||
EINPROGRESS POSIX - +++ss+ Operation in progress
|
||||
EINTR POSIX - ++++++ Interrupted system call
|
||||
EINVAL POSIX + ++++++ Invalid argument
|
||||
EIO POSIX + ++++++ I/O error
|
||||
EISCONN POSIX +++..+ Socket is already connected
|
||||
EISDIR POSIX ++++++ Is a directory
|
||||
EISNAM +..... Is a named type file
|
||||
EKEYEXPIRED +..... Key has expired
|
||||
EKEYREJECTED +..... Key was rejected by service
|
||||
EKEYREVOKED +..... Key has been revoked
|
||||
EL2HLT +..... Level 2 halted
|
||||
EL2NSYNC +..... Level 2 not synchronized
|
||||
EL3HLT +..... Level 3 halted
|
||||
EL3RST +..... Level 3 reset
|
||||
ELIBACC +..... Can not access a needed shared library
|
||||
ELIBBAD +..... Accessing a corrupted shared library
|
||||
ELIBEXEC +..... Cannot exec a shared library directly
|
||||
ELIBMAX +..... Too many shared libraries
|
||||
ELIBSCN +..... .lib section in a.out corrupted
|
||||
ELNRNG +..... Link number out of range
|
||||
ELOOP POSIX +++..+ Too many levels of symbolic links
|
||||
EMEDIUMTYPE +..... Wrong medium type
|
||||
EMFILE POSIX ++++++ Too many open files
|
||||
EMLINK POSIX ++++++ Too many links
|
||||
EMSGSIZE POSIX +++..+ Message too long
|
||||
EMULTIHOP POSIX ++4... Multihop attempted
|
||||
ENAMETOOLONG POSIX - ++++++ Filen ame too long
|
||||
ENAVAIL +..... No XENIX semaphores available
|
||||
ENEEDAUTH .++... Need authenticator
|
||||
ENETDOWN POSIX +++..+ Network is down
|
||||
ENETRESET SUSv3 +++..+ Network dropped connection on reset
|
||||
ENETUNREACH POSIX +++..+ Network unreachable
|
||||
ENFILE POSIX ++++++ Too many open files in system
|
||||
ENOANO +..... No anode
|
||||
ENOATTR .++... Attribute not found
|
||||
ENOBUFS POSIX - +++..+ No buffer space available
|
||||
ENOCSI +..... No CSI structure available
|
||||
ENODATA XSR +N4... No message available
|
||||
ENODEV POSIX - ++++++ No such device
|
||||
ENOENT POSIX - ++++++ No such file or directory
|
||||
ENOEXEC POSIX ++++++ Exec format error
|
||||
ENOFILE ...++. No such file or directory
|
||||
ENOKEY +..... Required key not available
|
||||
ENOLCK POSIX ++++++ No locks available
|
||||
ENOLINK POSIX ++4... Link has been severed
|
||||
ENOMEDIUM +..... No medium found
|
||||
ENOMEM POSIX ++++++ Not enough space
|
||||
ENOMSG POSIX +++..+ No message of desired type
|
||||
ENONET +..... Machine is not on the network
|
||||
ENOPKG +..... Package not installed
|
||||
ENOPROTOOPT POSIX +++..+ Protocol not available
|
||||
ENOSPC POSIX ++++++ No space left on device
|
||||
ENOSR XSR +N4... No STREAM resources
|
||||
ENOSTR XSR +N4... Not a STREAM
|
||||
ENOSYS POSIX + ++++++ Function not implemented
|
||||
ENOTBLK +++... Block device required
|
||||
ENOTCONN POSIX +++..+ Socket is not connected
|
||||
ENOTDIR POSIX ++++++ Not a directory
|
||||
ENOTEMPTY POSIX ++++++ Directory not empty
|
||||
ENOTNAM +..... Not a XENIX named type file
|
||||
ENOTRECOVERABLE SUSv4 - +..... State not recoverable
|
||||
ENOTSOCK POSIX +++..+ Socket operation on non-socket
|
||||
ENOTSUP POSIX +++... Operation not supported
|
||||
ENOTTY POSIX ++++++ Inappropriate I/O control operation
|
||||
ENOTUNIQ +..... Name not unique on network
|
||||
ENXIO POSIX ++++++ No such device or address
|
||||
EOPNOTSUPP POSIX +++..+ Operation not supported (on socket)
|
||||
EOVERFLOW POSIX +++..+ Value too large to be stored in data type
|
||||
EOWNERDEAD SUSv4 +..... Owner died
|
||||
EPERM POSIX - ++++++ Operation not permitted
|
||||
EPFNOSUPPORT +++..+ Protocol family not supported
|
||||
EPIPE POSIX - ++++++ Broken pipe
|
||||
EPROCLIM .++... Too many processes
|
||||
EPROCUNAVAIL .++... Bad procedure for program
|
||||
EPROGMISMATCH .++... Program version wrong
|
||||
EPROGUNAVAIL .++... RPC prog. not avail
|
||||
EPROTO POSIX ++4... Protocol error
|
||||
EPROTONOSUPPORT POSIX - +++ss+ Protocol not supported
|
||||
EPROTOTYPE POSIX +++..+ Protocol wrong type for socket
|
||||
EPWROFF ..+... Device power is off
|
||||
ERANGE C89 - ++++++ Result too large
|
||||
EREMCHG +..... Remote address changed
|
||||
EREMOTE +++... Object is remote
|
||||
EREMOTEIO +..... Remote I/O error
|
||||
ERESTART +..... Interrupted system call should be restarted
|
||||
ERFKILL +..... Operation not possible due to RF-kill
|
||||
EROFS POSIX ++++++ Read-only file system
|
||||
ERPCMISMATCH .++... RPC version wrong
|
||||
ESHLIBVERS ..+... Shared library version mismatch
|
||||
ESHUTDOWN +++..+ Cannot send after socket shutdown
|
||||
ESOCKTNOSUPPORT +++... Socket type not supported
|
||||
ESPIPE POSIX ++++++ Illegal seek
|
||||
ESRCH POSIX ++++++ No such process
|
||||
ESRMNT +..... Srmount error
|
||||
ESTALE POSIX +++..+ Stale NFS file handle
|
||||
ESTRPIPE +..... Streams pipe error
|
||||
ETIME XSR +N4... Stream ioctl timeout
|
||||
ETIMEDOUT POSIX - +++ss+ Connection timed out
|
||||
ETOOMANYREFS +++... Too many references: cannot splice
|
||||
ETXTBSY POSIX +++... Text file busy
|
||||
EUCLEAN +..... Structure needs cleaning
|
||||
EUNATCH +..... Protocol driver not attached
|
||||
EUSERS +++... Too many users
|
||||
EWOULDBLOCK POSIX +++..+ Operation would block
|
||||
EXDEV POSIX ++++++ Cross-device link
|
||||
EXFULL +..... Exchange full
|
||||
|
||||
Notations:
|
||||
|
||||
F: used in FFmpeg (-: a few times, +: a lot)
|
||||
|
||||
SUSv3: Single Unix Specification, version 3
|
||||
SUSv4: Single Unix Specification, version 4
|
||||
XSR: XSI STREAMS (obsolete)
|
||||
|
||||
OS: availability on some supported operating systems
|
||||
L: GNU/Linux
|
||||
B: BSD (F: FreeBSD, N: NetBSD)
|
||||
M: MacOS X
|
||||
W: Microsoft Windows (s: emulated with winsock, see libavformat/network.h)
|
||||
w: Mingw32 (3.17) and Mingw64 (2.0.1)
|
||||
b: BeOS
|
183
doc/eval.texi
Normal file
183
doc/eval.texi
Normal file
@@ -0,0 +1,183 @@
|
||||
@chapter Expression Evaluation
|
||||
@c man begin EXPRESSION EVALUATION
|
||||
|
||||
When evaluating an arithmetic expression, FFmpeg uses an internal
|
||||
formula evaluator, implemented through the @file{libavutil/eval.h}
|
||||
interface.
|
||||
|
||||
An expression may contain unary, binary operators, constants, and
|
||||
functions.
|
||||
|
||||
Two expressions @var{expr1} and @var{expr2} can be combined to form
|
||||
another expression "@var{expr1};@var{expr2}".
|
||||
@var{expr1} and @var{expr2} are evaluated in turn, and the new
|
||||
expression evaluates to the value of @var{expr2}.
|
||||
|
||||
The following binary operators are available: @code{+}, @code{-},
|
||||
@code{*}, @code{/}, @code{^}.
|
||||
|
||||
The following unary operators are available: @code{+}, @code{-}.
|
||||
|
||||
The following functions are available:
|
||||
@table @option
|
||||
@item sinh(x)
|
||||
@item cosh(x)
|
||||
@item tanh(x)
|
||||
@item sin(x)
|
||||
@item cos(x)
|
||||
@item tan(x)
|
||||
@item atan(x)
|
||||
@item asin(x)
|
||||
@item acos(x)
|
||||
@item exp(x)
|
||||
@item log(x)
|
||||
@item abs(x)
|
||||
@item squish(x)
|
||||
@item gauss(x)
|
||||
@item isnan(x)
|
||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||
|
||||
@item mod(x, y)
|
||||
@item max(x, y)
|
||||
@item min(x, y)
|
||||
@item eq(x, y)
|
||||
@item gte(x, y)
|
||||
@item gt(x, y)
|
||||
@item lte(x, y)
|
||||
@item lt(x, y)
|
||||
@item st(var, expr)
|
||||
Allow to store the value of the expression @var{expr} in an internal
|
||||
variable. @var{var} specifies the number of the variable where to
|
||||
store the value, and it is a value ranging from 0 to 9. The function
|
||||
returns the value stored in the internal variable.
|
||||
Note, Variables are currently not shared between expressions.
|
||||
|
||||
@item ld(var)
|
||||
Allow to load the value of the internal variable with number
|
||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||
The function returns the loaded value.
|
||||
|
||||
@item while(cond, expr)
|
||||
Evaluate expression @var{expr} while the expression @var{cond} is
|
||||
non-zero, and returns the value of the last @var{expr} evaluation, or
|
||||
NAN if @var{cond} was always false.
|
||||
|
||||
@item ceil(expr)
|
||||
Round the value of expression @var{expr} upwards to the nearest
|
||||
integer. For example, "ceil(1.5)" is "2.0".
|
||||
|
||||
@item floor(expr)
|
||||
Round the value of expression @var{expr} downwards to the nearest
|
||||
integer. For example, "floor(-1.5)" is "-2.0".
|
||||
|
||||
@item trunc(expr)
|
||||
Round the value of expression @var{expr} towards zero to the nearest
|
||||
integer. For example, "trunc(-1.5)" is "-1.0".
|
||||
|
||||
@item sqrt(expr)
|
||||
Compute the square root of @var{expr}. This is equivalent to
|
||||
"(@var{expr})^.5".
|
||||
|
||||
@item not(expr)
|
||||
Return 1.0 if @var{expr} is zero, 0.0 otherwise.
|
||||
|
||||
@item pow(x, y)
|
||||
Compute the power of @var{x} elevated @var{y}, it is equivalent to
|
||||
"(@var{x})^(@var{y})".
|
||||
|
||||
@item random(x)
|
||||
Return a pseudo random value between 0.0 and 1.0. @var{x} is the index of the
|
||||
internal variable which will be used to save the seed/state.
|
||||
|
||||
@item hypot(x, y)
|
||||
This function is similar to the C function with the same name; it returns
|
||||
"sqrt(@var{x}*@var{x} + @var{y}*@var{y})", the length of the hypotenuse of a
|
||||
right triangle with sides of length @var{x} and @var{y}, or the distance of the
|
||||
point (@var{x}, @var{y}) from the origin.
|
||||
|
||||
@item gcd(x, y)
|
||||
Return the greatest common divisor of @var{x} and @var{y}. If both @var{x} and
|
||||
@var{y} are 0 or either or both are less than zero then behavior is undefined.
|
||||
@end table
|
||||
|
||||
The following constants are available:
|
||||
@table @option
|
||||
@item PI
|
||||
area of the unit disc, approximately 3.14
|
||||
@item E
|
||||
exp(1) (Euler's number), approximately 2.718
|
||||
@item PHI
|
||||
golden ratio (1+sqrt(5))/2, approximately 1.618
|
||||
@end table
|
||||
|
||||
Note that:
|
||||
|
||||
@code{*} works like AND
|
||||
|
||||
@code{+} works like OR
|
||||
|
||||
thus
|
||||
@example
|
||||
if A then B else C
|
||||
@end example
|
||||
is equivalent to
|
||||
@example
|
||||
A*B + not(A)*C
|
||||
@end example
|
||||
|
||||
In your C code, you can extend the list of unary and binary functions,
|
||||
and define recognized constants, so that they are available for your
|
||||
expressions.
|
||||
|
||||
The evaluator also recognizes the International System number
|
||||
postfixes. If 'i' is appended after the postfix, powers of 2 are used
|
||||
instead of powers of 10. The 'B' postfix multiplies the value for 8,
|
||||
and can be appended after another postfix or used alone. This allows
|
||||
using for example 'KB', 'MiB', 'G' and 'B' as postfix.
|
||||
|
||||
Follows the list of available International System postfixes, with
|
||||
indication of the corresponding powers of 10 and of 2.
|
||||
@table @option
|
||||
@item y
|
||||
-24 / -80
|
||||
@item z
|
||||
-21 / -70
|
||||
@item a
|
||||
-18 / -60
|
||||
@item f
|
||||
-15 / -50
|
||||
@item p
|
||||
-12 / -40
|
||||
@item n
|
||||
-9 / -30
|
||||
@item u
|
||||
-6 / -20
|
||||
@item m
|
||||
-3 / -10
|
||||
@item c
|
||||
-2
|
||||
@item d
|
||||
-1
|
||||
@item h
|
||||
2
|
||||
@item k
|
||||
3 / 10
|
||||
@item K
|
||||
3 / 10
|
||||
@item M
|
||||
6 / 20
|
||||
@item G
|
||||
9 / 30
|
||||
@item T
|
||||
12 / 40
|
||||
@item P
|
||||
15 / 40
|
||||
@item E
|
||||
18 / 50
|
||||
@item Z
|
||||
21 / 60
|
||||
@item Y
|
||||
24 / 70
|
||||
@end table
|
||||
|
||||
@c man end
|
@@ -1,37 +1,21 @@
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
# use pkg-config for getting CFLAGS abd LDFLAGS
|
||||
FFMPEG_LIBS=libavdevice libavformat libavfilter libavcodec libswscale libavutil
|
||||
CFLAGS+=$(shell pkg-config --cflags $(FFMPEG_LIBS))
|
||||
LDFLAGS+=$(shell pkg-config --libs $(FFMPEG_LIBS))
|
||||
|
||||
CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= decoding_encoding \
|
||||
demuxing \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
muxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
EXAMPLES=decoding_encoding filtering metadata muxing
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
%: %.o
|
||||
$(CC) $< $(LDFLAGS) -o $@
|
||||
|
||||
.phony: all clean-test clean
|
||||
%.o: %.c
|
||||
$(CC) $< $(CFLAGS) -c -o $@
|
||||
|
||||
.phony: all clean
|
||||
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
||||
clean:
|
||||
rm -rf $(EXAMPLES) $(OBJS)
|
||||
|
@@ -1,18 +0,0 @@
|
||||
FFmpeg examples README
|
||||
----------------------
|
||||
|
||||
Both following use cases rely on pkg-config and make, thus make sure
|
||||
that you have them installed and working on your system.
|
||||
|
||||
|
||||
1) Build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
2) Build the examples in-tree
|
||||
|
||||
Assuming you are in the source FFmpeg checkout directory, you need to build
|
||||
FFmpeg (no need to make install in any prefix). Then you can go into the
|
||||
doc/examples and run a command such as PKG_CONFIG_PATH=pc-uninstalled make.
|
@@ -27,76 +27,18 @@
|
||||
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
|
||||
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
|
||||
* format handling
|
||||
* @example doc/examples/decoding_encoding.c
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/common.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#define INBUF_SIZE 4096
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* just pick the highest supported samplerate */
|
||||
static int select_sample_rate(AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
best_samplerate = FFMAX(*p, best_samplerate);
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(AVCodec *codec)
|
||||
{
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio encoding example
|
||||
*/
|
||||
@@ -104,83 +46,44 @@ static void audio_encode_example(const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
int buffer_size;
|
||||
int frame_size, i, j, out_size, outbuf_size;
|
||||
FILE *f;
|
||||
uint16_t *samples;
|
||||
short *samples;
|
||||
float t, tincr;
|
||||
uint8_t *outbuf;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
printf("Audio encoding\n");
|
||||
|
||||
/* find the MP2 encoder */
|
||||
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
|
||||
codec = avcodec_find_encoder(CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 64000;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(codec, c->sample_fmt)) {
|
||||
fprintf(stderr, "Encoder does not support sample format %s",
|
||||
av_get_sample_fmt_name(c->sample_fmt));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* the codec gives us the frame size, in samples */
|
||||
frame_size = c->frame_size;
|
||||
samples = malloc(frame_size * 2 * c->channels);
|
||||
outbuf_size = 10000;
|
||||
outbuf = malloc(outbuf_size);
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* the codec gives us the frame size, in samples,
|
||||
* we calculate the size of the samples buffer in bytes */
|
||||
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
|
||||
c->sample_fmt, 0);
|
||||
samples = av_malloc(buffer_size);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
|
||||
buffer_size);
|
||||
exit(1);
|
||||
}
|
||||
/* setup the data pointers in the AVFrame */
|
||||
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(const uint8_t*)samples, buffer_size, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not setup audio frame\n");
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -188,46 +91,19 @@ static void audio_encode_example(const char *filename)
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for(i=0;i<200;i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
for(j=0;j<frame_size;j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
samples[2*j+1] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
/* encode the samples */
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
}
|
||||
fclose(f);
|
||||
free(outbuf);
|
||||
free(samples);
|
||||
|
||||
av_freep(&samples);
|
||||
avcodec_free_frame(&frame);
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
}
|
||||
@@ -247,30 +123,26 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
printf("Decode audio file %s to %s\n", filename, outfilename);
|
||||
printf("Audio decoding\n");
|
||||
|
||||
/* find the mpeg audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
codec = avcodec_find_decoder(CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
@@ -288,7 +160,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
|
||||
if (!decoded_frame) {
|
||||
if (!(decoded_frame = avcodec_alloc_frame())) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
fprintf(stderr, "out of memory\n");
|
||||
exit(1);
|
||||
}
|
||||
} else
|
||||
@@ -308,8 +180,6 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
avpkt.dts =
|
||||
avpkt.pts = AV_NOPTS_VALUE;
|
||||
if (avpkt.size < AUDIO_REFILL_THRESH) {
|
||||
/* Refill the input buffer, to avoid trying to decode
|
||||
* incomplete frames. Instead of this, one could also use
|
||||
@@ -329,7 +199,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
avcodec_free_frame(&decoded_frame);
|
||||
av_free(decoded_frame);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -339,26 +209,22 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, ret, x, y, got_output;
|
||||
int i, out_size, size, x, y, outbuf_size;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
AVFrame *picture;
|
||||
uint8_t *outbuf;
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
printf("Video encoding\n");
|
||||
|
||||
/* find the mpeg1 video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
picture= avcodec_alloc_frame();
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
@@ -369,105 +235,79 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
c->time_base= (AVRational){1,25};
|
||||
c->gop_size = 10; /* emit one intra frame every ten frames */
|
||||
c->max_b_frames=1;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
c->pix_fmt = PIX_FMT_YUV420P;
|
||||
|
||||
if(codec_id == AV_CODEC_ID_H264)
|
||||
if(codec_id == CODEC_ID_H264)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame->format = c->pix_fmt;
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
/* alloc image and output buffer */
|
||||
outbuf_size = 100000;
|
||||
outbuf = malloc(outbuf_size);
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
* just the most convenient way if av_malloc() is to be used */
|
||||
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
|
||||
c->pix_fmt, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw picture buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
av_image_alloc(picture->data, picture->linesize,
|
||||
c->width, c->height, c->pix_fmt, 1);
|
||||
|
||||
/* encode 1 second of video */
|
||||
for(i=0;i<25;i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for(y=0;y<c->height;y++) {
|
||||
for(x=0;x<c->width;x++) {
|
||||
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
|
||||
picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for(y=0;y<c->height/2;y++) {
|
||||
for(x=0;x<c->width/2;x++) {
|
||||
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
|
||||
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
|
||||
picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
|
||||
picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
|
||||
frame->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
|
||||
printf("encoding frame %3d (size=%5d)\n", i, out_size);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
for(; out_size; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
|
||||
printf("write frame %3d (size=%5d)\n", i, out_size);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real mpeg file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
outbuf[0] = 0x00;
|
||||
outbuf[1] = 0x00;
|
||||
outbuf[2] = 0x01;
|
||||
outbuf[3] = 0xb7;
|
||||
fwrite(outbuf, 1, 4, f);
|
||||
fclose(f);
|
||||
free(outbuf);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_freep(&frame->data[0]);
|
||||
avcodec_free_frame(&frame);
|
||||
av_free(picture->data[0]);
|
||||
av_free(picture);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
@@ -488,42 +328,15 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
|
||||
{
|
||||
int len, got_frame;
|
||||
char buf[1024];
|
||||
|
||||
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
|
||||
return len;
|
||||
}
|
||||
if (got_frame) {
|
||||
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
avctx->width, avctx->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
pkt->size -= len;
|
||||
pkt->data += len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void video_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_count;
|
||||
int frame, got_picture, len;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVFrame *picture;
|
||||
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
char buf[1024];
|
||||
AVPacket avpkt;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
@@ -531,20 +344,17 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s to %s\n", filename, outfilename);
|
||||
printf("Video decoding\n");
|
||||
|
||||
/* find the mpeg1 video decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
|
||||
codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
picture= avcodec_alloc_frame();
|
||||
|
||||
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
|
||||
@@ -554,24 +364,20 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
available in the bitstream. */
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* the codec gives us the frame size, in samples */
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame_count = 0;
|
||||
frame = 0;
|
||||
for(;;) {
|
||||
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (avpkt.size == 0)
|
||||
@@ -593,9 +399,26 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
/* here, we use a stream based decoder (mpeg1video), so we
|
||||
feed decoder and see if it could decode a frame */
|
||||
avpkt.data = inbuf;
|
||||
while (avpkt.size > 0)
|
||||
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
|
||||
while (avpkt.size > 0) {
|
||||
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", frame);
|
||||
exit(1);
|
||||
}
|
||||
if (got_picture) {
|
||||
printf("saving frame %3d\n", frame);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, frame);
|
||||
pgm_save(picture->data[0], picture->linesize[0],
|
||||
c->width, c->height, buf);
|
||||
frame++;
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
}
|
||||
}
|
||||
|
||||
/* some codecs, such as MPEG, transmit the I and P frame with a
|
||||
@@ -603,48 +426,50 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
chance to get the last frame of the video */
|
||||
avpkt.data = NULL;
|
||||
avpkt.size = 0;
|
||||
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
|
||||
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
|
||||
if (got_picture) {
|
||||
printf("saving last frame %3d\n", frame);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, frame);
|
||||
pgm_save(picture->data[0], picture->linesize[0],
|
||||
c->width, c->height, buf);
|
||||
frame++;
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
avcodec_free_frame(&frame);
|
||||
av_free(picture);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *output_type;
|
||||
const char *filename;
|
||||
|
||||
/* must be called before using avcodec lib */
|
||||
avcodec_init();
|
||||
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_type\n"
|
||||
"API example program to decode/encode a media stream with libavcodec.\n"
|
||||
"This program generates a synthetic stream and encodes it to a file\n"
|
||||
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
|
||||
"The encoded stream is then decoded and written to a raw data output.\n"
|
||||
"output_type must be choosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
argv[0]);
|
||||
return 1;
|
||||
}
|
||||
output_type = argv[1];
|
||||
if (argc <= 1) {
|
||||
audio_encode_example("/tmp/test.mp2");
|
||||
audio_decode_example("/tmp/test.sw", "/tmp/test.mp2");
|
||||
|
||||
if (!strcmp(output_type, "h264")) {
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.sw", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
video_encode_example("/tmp/test.h264", CODEC_ID_H264);
|
||||
video_encode_example("/tmp/test.mpg", CODEC_ID_MPEG1VIDEO);
|
||||
filename = "/tmp/test.mpg";
|
||||
} else {
|
||||
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
|
||||
output_type);
|
||||
return 1;
|
||||
filename = argv[1];
|
||||
}
|
||||
|
||||
// audio_decode_example("/tmp/test.sw", filename);
|
||||
video_decode_example("/tmp/test%d.pgm", filename);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1,342 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat demuxing API use example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example doc/examples/demuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static uint8_t **audio_dst_data = NULL;
|
||||
static int audio_dst_linesize;
|
||||
static int audio_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, av_frame_get_channels(frame),
|
||||
frame->nb_samples, frame->format, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate audio buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* TODO: extend return code of the av_samples_* functions so that this call is not needed */
|
||||
audio_dst_bufsize =
|
||||
av_samples_get_buffer_size(NULL, av_frame_get_channels(frame),
|
||||
frame->nb_samples, frame->format, 1);
|
||||
|
||||
/* copy audio data to destination buffer:
|
||||
* this is required since rawaudio expects non aligned data */
|
||||
av_samples_copy(audio_dst_data, frame->data, 0, 0,
|
||||
frame->nb_samples, av_frame_get_channels(frame), frame->format);
|
||||
|
||||
/* write to rawaudio file */
|
||||
fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
|
||||
av_freep(&audio_dst_data[0]);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* register all formats and codecs */
|
||||
av_register_all();
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
int nb_planes;
|
||||
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dec_ctx = audio_stream->codec;
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
|
||||
audio_dec_ctx->channels : 1;
|
||||
audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
|
||||
if (!audio_dst_data) {
|
||||
fprintf(stderr, "Could not allocate audio data buffers\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
decode_packet(&got_frame, 0);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
const char *fmt;
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt)) < 0)
|
||||
goto end;
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
if (video_dec_ctx)
|
||||
avcodec_close(video_dec_ctx);
|
||||
if (audio_dec_ctx)
|
||||
avcodec_close(audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
av_free(frame);
|
||||
av_free(video_dst_data[0]);
|
||||
av_free(audio_dst_data);
|
||||
|
||||
return ret < 0;
|
||||
}
|
@@ -24,18 +24,14 @@
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example doc/examples/filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavfilter/vsrc_buffer.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24";
|
||||
|
||||
@@ -49,7 +45,7 @@ static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
int ret, i;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
@@ -57,7 +53,7 @@ static int open_input_file(const char *filename)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
if ((ret = av_find_stream_info(fmt_ctx)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
@@ -72,7 +68,7 @@ static int open_input_file(const char *filename)
|
||||
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
if ((ret = avcodec_open(dec_ctx, dec)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
@@ -88,18 +84,14 @@ static int init_filters(const char *filters_descr)
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
AVBufferSinkParams *buffersink_params;
|
||||
|
||||
enum PixelFormat pix_fmts[] = { PIX_FMT_GRAY8, PIX_FMT_NONE };
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -108,11 +100,8 @@ static int init_filters(const char *filters_descr)
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
buffersink_params = av_buffersink_params_alloc();
|
||||
buffersink_params->pixel_fmts = pix_fmts;
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, buffersink_params, filter_graph);
|
||||
av_free(buffersink_params);
|
||||
NULL, pix_fmts, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
return ret;
|
||||
@@ -129,42 +118,41 @@ static int init_filters(const char *filters_descr)
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
if ((ret = avfilter_graph_parse(filter_graph, filter_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
static void display_picref(AVFilterBufferRef *picref, AVRational time_base)
|
||||
{
|
||||
int x, y;
|
||||
uint8_t *p0, *p;
|
||||
int64_t delay;
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
if (picref->pts != AV_NOPTS_VALUE) {
|
||||
if (last_pts != AV_NOPTS_VALUE) {
|
||||
/* sleep roughly the right amount of time;
|
||||
* usleep is in microseconds, just like AV_TIME_BASE. */
|
||||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
delay = av_rescale_q(picref->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
last_pts = picref->pts;
|
||||
}
|
||||
|
||||
/* Trivial ASCII grayscale display. */
|
||||
p0 = frame->data[0];
|
||||
p0 = picref->data[0];
|
||||
puts("\033c");
|
||||
for (y = 0; y < frame->height; y++) {
|
||||
for (y = 0; y < picref->video->h; y++) {
|
||||
p = p0;
|
||||
for (x = 0; x < frame->width; x++)
|
||||
for (x = 0; x < picref->video->w; x++)
|
||||
putchar(" .-+#"[*(p++) / 52]);
|
||||
putchar('\n');
|
||||
p0 += frame->linesize[0];
|
||||
p0 += picref->linesize[0];
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
@@ -173,14 +161,9 @@ int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
AVFrame frame;
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
@@ -197,48 +180,43 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
AVFilterBufferRef *picref;
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
avcodec_get_frame_defaults(frame);
|
||||
avcodec_get_frame_defaults(&frame);
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
|
||||
ret = avcodec_decode_video2(dec_ctx, &frame, &got_frame, &packet);
|
||||
av_free_packet(&packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
|
||||
if (frame.pts == AV_NOPTS_VALUE)
|
||||
frame.pts = frame.pkt_dts == AV_NOPTS_VALUE ?
|
||||
frame.pkt_dts : frame.pkt_pts;
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
av_vsrc_buffer_add_frame(buffersrc_ctx, &frame);
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
/* pull filtered pictures from the filtergraph */
|
||||
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
|
||||
av_vsink_buffer_get_video_buffer_ref(buffersink_ctx, &picref, 0);
|
||||
if (picref) {
|
||||
display_picref(picref, buffersink_ctx->inputs[0]->time_base);
|
||||
avfilter_unref_buffer(picref);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
if (dec_ctx)
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
av_close_input_file(fmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
char buf[1024];
|
@@ -1,265 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example doc/examples/filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret;
|
||||
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
avcodec_register_all();
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
avcodec_get_frame_defaults(frame);
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if(ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
if (dec_ctx)
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "Error occurred: %s\n", buf);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
@@ -23,7 +23,6 @@
|
||||
/**
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example doc/examples/metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -51,6 +50,6 @@ int main (int argc, char **argv)
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
avformat_free_context(fmt_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -26,7 +26,6 @@
|
||||
*
|
||||
* Output a media file in any supported libavformat format.
|
||||
* The default codecs are used.
|
||||
* @example doc/examples/muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
@@ -34,174 +33,112 @@
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libswscale/swscale.h"
|
||||
|
||||
#undef exit
|
||||
|
||||
/* 5 seconds stream duration */
|
||||
#define STREAM_DURATION 200.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
static int sws_flags = SWS_BICUBIC;
|
||||
|
||||
/* Add an output stream. */
|
||||
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static float t, tincr, tincr2;
|
||||
static int16_t *samples;
|
||||
static uint8_t *audio_outbuf;
|
||||
static int audio_outbuf_size;
|
||||
static int audio_input_frame_size;
|
||||
|
||||
/*
|
||||
* add an audio output stream
|
||||
*/
|
||||
static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
st = avformat_new_stream(oc, *codec);
|
||||
st = avformat_new_stream(oc, NULL);
|
||||
if (!st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
}
|
||||
st->id = oc->nb_streams-1;
|
||||
st->id = 1;
|
||||
|
||||
c = st->codec;
|
||||
c->codec_id = codec_id;
|
||||
c->codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
c->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
break;
|
||||
/* put sample parameters */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
c->time_base.den = STREAM_FRAME_RATE;
|
||||
c->time_base.num = 1;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
// some formats want stream headers to be separate
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static float t, tincr, tincr2;
|
||||
|
||||
static uint8_t **src_samples_data;
|
||||
static int src_samples_linesize;
|
||||
static int src_nb_samples;
|
||||
|
||||
static int max_dst_nb_samples;
|
||||
uint8_t **dst_samples_data;
|
||||
int dst_samples_linesize;
|
||||
int dst_samples_size;
|
||||
|
||||
struct SwrContext *swr_ctx = NULL;
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
static void open_audio(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int ret;
|
||||
AVCodec *codec;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* find the audio encoder */
|
||||
codec = avcodec_find_encoder(c->codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
t = 0;
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
|
||||
10000 : c->frame_size;
|
||||
audio_outbuf_size = 10000;
|
||||
audio_outbuf = av_malloc(audio_outbuf_size);
|
||||
|
||||
ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
|
||||
src_nb_samples, c->sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
/* ugly hack for PCM codecs (will be removed ASAP with new PCM
|
||||
support to compute the input frame size in samples */
|
||||
if (c->frame_size <= 1) {
|
||||
audio_input_frame_size = audio_outbuf_size / c->channels;
|
||||
switch(st->codec->codec_id) {
|
||||
case CODEC_ID_PCM_S16LE:
|
||||
case CODEC_ID_PCM_S16BE:
|
||||
case CODEC_ID_PCM_U16LE:
|
||||
case CODEC_ID_PCM_U16BE:
|
||||
audio_input_frame_size >>= 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
audio_input_frame_size = c->frame_size;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = src_nb_samples;
|
||||
ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
|
||||
max_dst_nb_samples, c->sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
exit(1);
|
||||
}
|
||||
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
|
||||
c->sample_fmt, 0);
|
||||
samples = av_malloc(audio_input_frame_size * 2 * c->channels);
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
'nb_channels' channels */
|
||||
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
||||
{
|
||||
int j, i, v;
|
||||
@@ -210,9 +147,9 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
||||
q = samples;
|
||||
for (j = 0; j < frame_size; j++) {
|
||||
v = (int)(sin(t) * 10000);
|
||||
for (i = 0; i < nb_channels; i++)
|
||||
for(i = 0; i < nb_channels; i++)
|
||||
*q++ = v;
|
||||
t += tincr;
|
||||
t += tincr;
|
||||
tincr += tincr2;
|
||||
}
|
||||
}
|
||||
@@ -220,141 +157,187 @@ static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
||||
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame = avcodec_alloc_frame();
|
||||
int got_packet, ret, dst_nb_samples;
|
||||
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
c = st->codec;
|
||||
|
||||
get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
|
||||
get_audio_frame(samples, audio_input_frame_size, c->channels);
|
||||
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
if (swr_ctx) {
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_free(dst_samples_data[0]);
|
||||
ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
|
||||
dst_nb_samples, c->sample_fmt, 0);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
|
||||
c->sample_fmt, 0);
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx,
|
||||
dst_samples_data, dst_nb_samples,
|
||||
(const uint8_t **)src_samples_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
dst_samples_data[0] = src_samples_data[0];
|
||||
dst_nb_samples = src_nb_samples;
|
||||
}
|
||||
|
||||
frame->nb_samples = dst_nb_samples;
|
||||
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
dst_samples_data[0], dst_samples_size, 0);
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (!got_packet)
|
||||
return;
|
||||
pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
|
||||
|
||||
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = audio_outbuf;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
/* write the compressed frame in the media file */
|
||||
if (av_interleaved_write_frame(oc, &pkt) != 0) {
|
||||
fprintf(stderr, "Error while writing audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
avcodec_free_frame(&frame);
|
||||
}
|
||||
|
||||
static void close_audio(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
avcodec_close(st->codec);
|
||||
av_free(src_samples_data[0]);
|
||||
av_free(dst_samples_data[0]);
|
||||
|
||||
av_free(samples);
|
||||
av_free(audio_outbuf);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *frame;
|
||||
static AVPicture src_picture, dst_picture;
|
||||
static int frame_count;
|
||||
static AVFrame *picture, *tmp_picture;
|
||||
static uint8_t *video_outbuf;
|
||||
static int frame_count, video_outbuf_size;
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
/* add a video output stream */
|
||||
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = st->codec;
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
AVCodec *codec;
|
||||
|
||||
st = avformat_new_stream(oc, NULL);
|
||||
if (!st) {
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* find the video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
avcodec_get_context_defaults3(c, codec);
|
||||
|
||||
c->codec_id = codec_id;
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* time base: this is the fundamental unit of time (in seconds) in terms
|
||||
of which frame timestamps are represented. for fixed-fps content,
|
||||
timebase should be 1/framerate and timestamp increments should be
|
||||
identically 1. */
|
||||
c->time_base.den = STREAM_FRAME_RATE;
|
||||
c->time_base.num = 1;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == CODEC_ID_MPEG1VIDEO){
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
This does not happen with normal video, it just happens here as
|
||||
the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision=2;
|
||||
}
|
||||
// some formats want stream headers to be separate
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
uint8_t *picture_buf;
|
||||
int size;
|
||||
|
||||
picture = avcodec_alloc_frame();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
size = avpicture_get_size(pix_fmt, width, height);
|
||||
picture_buf = av_malloc(size);
|
||||
if (!picture_buf) {
|
||||
av_free(picture);
|
||||
return NULL;
|
||||
}
|
||||
avpicture_fill((AVPicture *)picture, picture_buf,
|
||||
pix_fmt, width, height);
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* find the video encoder */
|
||||
codec = avcodec_find_encoder(c->codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
video_outbuf = NULL;
|
||||
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
|
||||
/* allocate output buffer */
|
||||
/* XXX: API change will be done */
|
||||
/* buffers passed into lav* can be allocated any way you prefer,
|
||||
as long as they're aligned enough for the architecture, and
|
||||
they're freed appropriately (such as using av_free for buffers
|
||||
allocated with av_malloc) */
|
||||
video_outbuf_size = 200000;
|
||||
video_outbuf = av_malloc(video_outbuf_size);
|
||||
}
|
||||
|
||||
/* allocate the encoded raw picture */
|
||||
picture = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!picture) {
|
||||
fprintf(stderr, "Could not allocate picture\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Allocate the encoded raw picture. */
|
||||
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate temporary picture: %s\n",
|
||||
av_err2str(ret));
|
||||
/* if the output format is not YUV420P, then a temporary YUV420P
|
||||
picture is needed too. It is then converted to the required
|
||||
output format */
|
||||
tmp_picture = NULL;
|
||||
if (c->pix_fmt != PIX_FMT_YUV420P) {
|
||||
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!tmp_picture) {
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* copy data and linesize picture pointers to frame */
|
||||
*((AVPicture *)frame) = dst_picture;
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVPicture *pict, int frame_index,
|
||||
int width, int height)
|
||||
/* prepare a dummy image */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
|
||||
{
|
||||
int x, y, i;
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
for (y = 0; y < height; y++) {
|
||||
for (x = 0; x < width; x++) {
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
for (y = 0; y < height/2; y++) {
|
||||
for (x = 0; x < width/2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
@@ -363,72 +346,76 @@ static void fill_yuv_image(AVPicture *pict, int frame_index,
|
||||
|
||||
static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
int ret;
|
||||
static struct SwsContext *sws_ctx;
|
||||
AVCodecContext *c = st->codec;
|
||||
int out_size, ret;
|
||||
AVCodecContext *c;
|
||||
static struct SwsContext *img_convert_ctx;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
if (frame_count >= STREAM_NB_FRAMES) {
|
||||
/* No more frames to compress. The codec has a latency of a few
|
||||
* frames if using B-frames, so we get the last frames by
|
||||
* passing the same picture again. */
|
||||
/* no more frame to compress. The codec has a latency of a few
|
||||
frames if using B frames, so we get the last frames by
|
||||
passing the same picture again */
|
||||
} else {
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
if (c->pix_fmt != PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!sws_ctx) {
|
||||
sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height, c->pix_fmt,
|
||||
sws_flags, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
to the codec pixel format if needed */
|
||||
if (img_convert_ctx == NULL) {
|
||||
img_convert_ctx = sws_getContext(c->width, c->height,
|
||||
PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
sws_flags, NULL, NULL, NULL);
|
||||
if (img_convert_ctx == NULL) {
|
||||
fprintf(stderr, "Cannot initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(&src_picture, frame_count, c->width, c->height);
|
||||
sws_scale(sws_ctx,
|
||||
(const uint8_t * const *)src_picture.data, src_picture.linesize,
|
||||
0, c->height, dst_picture.data, dst_picture.linesize);
|
||||
fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
|
||||
sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
|
||||
0, c->height, picture->data, picture->linesize);
|
||||
} else {
|
||||
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
|
||||
fill_yuv_image(picture, frame_count, c->width, c->height);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* Raw video case - directly store the picture in the packet */
|
||||
/* raw video case. The API will change slightly in the near
|
||||
future for that. */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = dst_picture.data[0];
|
||||
pkt.size = sizeof(AVPicture);
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = (uint8_t *)picture;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
AVPacket pkt = { 0 };
|
||||
int got_packet;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
/* If size is zero, it means the image was buffered. */
|
||||
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
|
||||
/* if zero size, it means the image was buffered */
|
||||
if (out_size > 0) {
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
if (!ret && got_packet && pkt.size) {
|
||||
if (c->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
|
||||
if(c->coded_frame->key_frame)
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = video_outbuf;
|
||||
pkt.size = out_size;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
/* write the compressed frame in the media file */
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
fprintf(stderr, "Error while writing video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame_count++;
|
||||
@@ -437,9 +424,13 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
static void close_video(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
avcodec_close(st->codec);
|
||||
av_free(src_picture.data[0]);
|
||||
av_free(dst_picture.data[0]);
|
||||
av_free(frame);
|
||||
av_free(picture->data[0]);
|
||||
av_free(picture);
|
||||
if (tmp_picture) {
|
||||
av_free(tmp_picture->data[0]);
|
||||
av_free(tmp_picture);
|
||||
}
|
||||
av_free(video_outbuf);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@@ -451,20 +442,17 @@ int main(int argc, char **argv)
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVStream *audio_st, *video_st;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
double audio_time, video_time;
|
||||
int ret;
|
||||
double audio_pts, video_pts;
|
||||
int i;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
/* initialize libavcodec, and register all codecs and formats */
|
||||
av_register_all();
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
@@ -482,83 +470,87 @@ int main(int argc, char **argv)
|
||||
}
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
/* add the audio and video streams using the default format codecs
|
||||
and initialize the codecs */
|
||||
video_st = NULL;
|
||||
audio_st = NULL;
|
||||
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
video_st = add_stream(oc, &video_codec, fmt->video_codec);
|
||||
if (fmt->video_codec != CODEC_ID_NONE) {
|
||||
video_st = add_video_stream(oc, fmt->video_codec);
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
|
||||
if (fmt->audio_codec != CODEC_ID_NONE) {
|
||||
audio_st = add_audio_stream(oc, fmt->audio_codec);
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (video_st)
|
||||
open_video(oc, video_codec, video_st);
|
||||
if (audio_st)
|
||||
open_audio(oc, audio_codec, audio_st);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* now that all the parameters are set, we can open the audio and
|
||||
video codecs and allocate the necessary encode buffers */
|
||||
if (video_st)
|
||||
open_video(oc, video_st);
|
||||
if (audio_st)
|
||||
open_audio(oc, audio_st);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
|
||||
fprintf(stderr, "Could not open '%s'\n", filename);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
/* write the stream header, if any */
|
||||
av_write_header(oc);
|
||||
picture->pts = 0;
|
||||
for(;;) {
|
||||
/* compute current audio and video time */
|
||||
if (audio_st)
|
||||
audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
|
||||
else
|
||||
audio_pts = 0.0;
|
||||
|
||||
if (frame)
|
||||
frame->pts = 0;
|
||||
for (;;) {
|
||||
/* Compute current audio and video time. */
|
||||
audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0;
|
||||
video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0;
|
||||
if (video_st)
|
||||
video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
|
||||
else
|
||||
video_pts = 0.0;
|
||||
|
||||
if ((!audio_st || audio_time >= STREAM_DURATION) &&
|
||||
(!video_st || video_time >= STREAM_DURATION))
|
||||
if ((!audio_st || audio_pts >= STREAM_DURATION) &&
|
||||
(!video_st || video_pts >= STREAM_DURATION))
|
||||
break;
|
||||
|
||||
/* write interleaved audio and video frames */
|
||||
if (!video_st || (video_st && audio_st && audio_time < video_time)) {
|
||||
if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
|
||||
write_audio_frame(oc, audio_st);
|
||||
} else {
|
||||
write_video_frame(oc, video_st);
|
||||
frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
|
||||
picture->pts++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
/* write the trailer, if any. the trailer must be written
|
||||
* before you close the CodecContexts open when you wrote the
|
||||
* header; otherwise write_trailer may try to use memory that
|
||||
* was freed on av_codec_close() */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
/* close each codec */
|
||||
if (video_st)
|
||||
close_video(oc, video_st);
|
||||
if (audio_st)
|
||||
close_audio(oc, audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
/* free the streams */
|
||||
for(i = 0; i < oc->nb_streams; i++) {
|
||||
av_freep(&oc->streams[i]->codec);
|
||||
av_freep(&oc->streams[i]);
|
||||
}
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
/* close the output file */
|
||||
avio_close(oc->pb);
|
||||
}
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
av_free(oc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1,211 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @example doc/examples/resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_free(dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
@@ -1,141 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example doc/examples/scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignment */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
266
doc/faq.texi
266
doc/faq.texi
@@ -79,17 +79,6 @@ not a bug they should fix:
|
||||
Then again, some of them do not know the difference between an undecidable
|
||||
problem and an NP-hard problem...
|
||||
|
||||
@section I have installed this library with my distro's package manager. Why does @command{configure} not see it?
|
||||
|
||||
Distributions usually split libraries in several packages. The main package
|
||||
contains the files necessary to run programs using the library. The
|
||||
development package contains the files necessary to build programs using the
|
||||
library. Sometimes, docs and/or data are in a separate package too.
|
||||
|
||||
To build FFmpeg, you need to install the development package. It is usually
|
||||
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
||||
build is finished, but be sure to keep the main package.
|
||||
|
||||
@chapter Usage
|
||||
|
||||
@section ffmpeg does not work; what is wrong?
|
||||
@@ -110,16 +99,7 @@ Then you may run:
|
||||
|
||||
Notice that @samp{%d} is replaced by the image number.
|
||||
|
||||
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc.
|
||||
|
||||
Use the @option{-start_number} option to declare a starting number for
|
||||
the sequence. This is useful if your sequence does not start with
|
||||
@file{img001.jpg} but is still in a numerical order. The following
|
||||
example will start with @file{img100.jpg}:
|
||||
|
||||
@example
|
||||
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
|
||||
@end example
|
||||
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc...
|
||||
|
||||
If you have large number of pictures to rename, you can use the
|
||||
following command to ease the burden. The command, using the bourne
|
||||
@@ -142,12 +122,6 @@ Then run:
|
||||
|
||||
The same logic is used for any image format that ffmpeg reads.
|
||||
|
||||
You can also use @command{cat} to pipe images to ffmpeg:
|
||||
|
||||
@example
|
||||
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
|
||||
@end example
|
||||
|
||||
@section How do I encode movie to single pictures?
|
||||
|
||||
Use:
|
||||
@@ -239,67 +213,8 @@ For ANY other help on Avisynth, please visit the
|
||||
|
||||
@section How can I join video files?
|
||||
|
||||
To "join" video files is quite ambiguous. The following list explains the
|
||||
different kinds of "joining" and points out how those are addressed in
|
||||
FFmpeg. To join video files may mean:
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
To put them one after the other: this is called to @emph{concatenate} them
|
||||
(in short: concat) and is addressed
|
||||
@ref{How can I concatenate video files, in this very faq}.
|
||||
|
||||
@item
|
||||
To put them together in the same file, to let the user choose between the
|
||||
different versions (example: different audio languages): this is called to
|
||||
@emph{multiplex} them together (in short: mux), and is done by simply
|
||||
invoking ffmpeg with several @option{-i} options.
|
||||
|
||||
@item
|
||||
For audio, to put all channels together in a single stream (example: two
|
||||
mono streams into one stereo stream): this is sometimes called to
|
||||
@emph{merge} them, and can be done using the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter.
|
||||
|
||||
@item
|
||||
For audio, to play one on top of the other: this is called to @emph{mix}
|
||||
them, and can be done by first merging them into a single stream and then
|
||||
using the @url{http://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix
|
||||
the channels at will.
|
||||
|
||||
@item
|
||||
For video, to display both together, side by side or one on top of a part of
|
||||
the other; it can be done using the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter.
|
||||
|
||||
@end itemize
|
||||
|
||||
@anchor{How can I concatenate video files}
|
||||
@section How can I concatenate video files?
|
||||
|
||||
There are several solutions, depending on the exact circumstances.
|
||||
|
||||
@subsection Concatenating using the concat @emph{filter}
|
||||
|
||||
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-filters.html#concat,
|
||||
@code{concat}} filter designed specifically for that, with examples in the
|
||||
documentation. This operation is recommended if you need to re-encode.
|
||||
|
||||
@subsection Concatenating using the concat @emph{demuxer}
|
||||
|
||||
FFmpeg has a @url{http://www.ffmpeg.org/ffmpeg-formats.html#concat,
|
||||
@code{concat}} demuxer which you can use when you want to avoid a re-encode and
|
||||
your format doesn't support file level concatenation.
|
||||
|
||||
@subsection Concatenating using the concat @emph{protocol} (file level)
|
||||
|
||||
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to join video files by
|
||||
merely concatenating them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
these privileged formats, then using the humble @code{cat} command (or the
|
||||
@@ -307,38 +222,28 @@ equally humble @code{copy} under Windows), and finally transcoding back to your
|
||||
format of choice.
|
||||
|
||||
@example
|
||||
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
ffmpeg -i input1.avi -same_quant intermediate1.mpg
|
||||
ffmpeg -i input2.avi -same_quant intermediate2.mpg
|
||||
cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
ffmpeg -i intermediate_all.mpg -same_quant output.avi
|
||||
@end example
|
||||
|
||||
Additionally, you can use the @code{concat} protocol instead of @code{cat} or
|
||||
@code{copy} which will avoid creation of a potentially huge intermediate file.
|
||||
Notice that you should either use @code{-same_quant} or set a reasonably high
|
||||
bitrate for your intermediate and output files, if you want to preserve
|
||||
video quality.
|
||||
|
||||
@example
|
||||
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
@end example
|
||||
|
||||
Note that you may need to escape the character "|" which is special for many
|
||||
shells.
|
||||
|
||||
Another option is usage of named pipes, should your platform support it:
|
||||
Also notice that you may avoid the huge intermediate files by taking advantage
|
||||
of named pipes, should your platform support it:
|
||||
|
||||
@example
|
||||
mkfifo intermediate1.mpg
|
||||
mkfifo intermediate2.mpg
|
||||
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
|
||||
ffmpeg -i input1.avi -same_quant -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -same_quant -y intermediate2.mpg < /dev/null &
|
||||
cat intermediate1.mpg intermediate2.mpg |\
|
||||
ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
|
||||
ffmpeg -f mpeg -i - -same_quant -c:v mpeg4 -acodec libmp3lame output.avi
|
||||
@end example
|
||||
|
||||
@subsection Concatenating using raw audio and video
|
||||
|
||||
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
|
||||
allow concatenation, and the transcoding step is almost lossless.
|
||||
When using multiple yuv4mpegpipe(s), the first line needs to be discarded
|
||||
@@ -346,8 +251,7 @@ from all but the first stream. This can be accomplished by piping through
|
||||
@code{tail} as seen below. Note that when piping through @code{tail} you
|
||||
must use command grouping, @code{@{ ;@}}, to background properly.
|
||||
|
||||
For example, let's say we want to concatenate two FLV files into an
|
||||
output.flv file:
|
||||
For example, let's say we want to join two FLV files into an output.flv file:
|
||||
|
||||
@example
|
||||
mkfifo temp1.a
|
||||
@@ -364,90 +268,15 @@ cat temp1.a temp2.a > all.a &
|
||||
cat temp1.v temp2.v > all.v &
|
||||
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
|
||||
-f yuv4mpegpipe -i all.v \
|
||||
-y output.flv
|
||||
-same_quant -y output.flv
|
||||
rm temp[12].[av] all.[av]
|
||||
@end example
|
||||
|
||||
@section -profile option fails when encoding H.264 video with AAC audio
|
||||
|
||||
@command{ffmpeg} prints an error like
|
||||
|
||||
@example
|
||||
Undefined constant or missing '(' in 'baseline'
|
||||
Unable to parse option value "baseline"
|
||||
Error setting option profile to value baseline.
|
||||
@end example
|
||||
|
||||
Short answer: write @option{-profile:v} instead of @option{-profile}.
|
||||
|
||||
Long answer: this happens because the @option{-profile} option can apply to both
|
||||
video and audio. Specifically the AAC encoder also defines some profiles, none
|
||||
of which are named @var{baseline}.
|
||||
|
||||
The solution is to apply the @option{-profile} option to the video stream only
|
||||
by using @url{http://ffmpeg.org/ffmpeg.html#Stream-specifiers-1, Stream specifiers}.
|
||||
Appending @code{:v} to it will do exactly that.
|
||||
|
||||
@section Using @option{-f lavfi}, audio becomes mono for no apparent reason.
|
||||
|
||||
Use @option{-dumpgraph -} to find out exactly where the channel layout is
|
||||
lost.
|
||||
|
||||
Most likely, it is through @code{auto-inserted aresample}. Try to understand
|
||||
why the converting filter was needed at that place.
|
||||
|
||||
Just before the output is a likely place, as @option{-f lavfi} currently
|
||||
only support packed S16.
|
||||
|
||||
Then insert the correct @code{aformat} explicitly in the filtergraph,
|
||||
specifying the exact format.
|
||||
|
||||
@example
|
||||
aformat=sample_fmts=s16:channel_layouts=stereo
|
||||
@end example
|
||||
|
||||
@section Why does FFmpeg not see the subtitles in my VOB file?
|
||||
|
||||
VOB and a few other formats do not have a global header that describes
|
||||
everything present in the file. Instead, applications are supposed to scan
|
||||
the file to see what it contains. Since VOB files are frequently large, only
|
||||
the beginning is scanned. If the subtitles happen only later in the file,
|
||||
they will not be initally detected.
|
||||
|
||||
Some applications, including the @code{ffmpeg} command-line tool, can only
|
||||
work with streams that were detected during the initial scan; streams that
|
||||
are detected later are ignored.
|
||||
|
||||
The size of the initial scan is controlled by two options: @code{probesize}
|
||||
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
|
||||
the subtitle stream to be detected, both values must be large enough.
|
||||
|
||||
@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead?
|
||||
|
||||
The @option{-sameq} option meant "same quantizer", and made sense only in a
|
||||
very limited set of cases. Unfortunately, a lot of people mistook it for
|
||||
"same quality" and used it in places where it did not make sense: it had
|
||||
roughly the expected visible effect, but achieved it in a very inefficient
|
||||
way.
|
||||
|
||||
Each encoder has its own set of options to set the quality-vs-size balance,
|
||||
use the options for the encoder you are using to set the quality level to a
|
||||
point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@chapter Development
|
||||
|
||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||
|
||||
Yes. Check the @file{doc/examples} directory in the source
|
||||
repository, also available online at:
|
||||
@url{https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples}.
|
||||
|
||||
Examples are also installed by default, usually in
|
||||
@code{$PREFIX/share/ffmpeg/examples}.
|
||||
|
||||
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
|
||||
Yes. Read the Developers Guide of the FFmpeg documentation. Alternatively,
|
||||
examine the source code for one of the many open source projects that
|
||||
already incorporate FFmpeg at (@url{projects.html}).
|
||||
|
||||
@@ -459,8 +288,31 @@ with @code{#ifdef}s related to the compiler.
|
||||
|
||||
@section Is Microsoft Visual C++ supported?
|
||||
|
||||
Yes. Please see the @uref{platform.html, Microsoft Visual C++}
|
||||
section in the FFmpeg documentation.
|
||||
No. Microsoft Visual C++ is not compliant to the C99 standard and does
|
||||
not - among other things - support the inline assembly used in FFmpeg.
|
||||
If you wish to use MSVC++ for your
|
||||
project then you can link the MSVC++ code with libav* as long as
|
||||
you compile the latter with a working C compiler. For more information, see
|
||||
the @emph{Microsoft Visual C++ compatibility} section in the FFmpeg
|
||||
documentation.
|
||||
|
||||
There have been efforts to make FFmpeg compatible with MSVC++ in the
|
||||
past. However, they have all been rejected as too intrusive, especially
|
||||
since MinGW does the job adequately. None of the core developers
|
||||
work with MSVC++ and thus this item is low priority. Should you find
|
||||
the silver bullet that solves this problem, feel free to shoot it at us.
|
||||
|
||||
We strongly recommend you to move over from MSVC++ to MinGW tools.
|
||||
|
||||
@section Can I use FFmpeg or libavcodec under Windows?
|
||||
|
||||
Yes, but the Cygwin or MinGW tools @emph{must} be used to compile FFmpeg.
|
||||
Read the @emph{Windows} section in the FFmpeg documentation to find more
|
||||
information.
|
||||
|
||||
To get help and instructions for building FFmpeg under Windows, check out
|
||||
the FFmpeg Windows Help Forum at
|
||||
@url{http://ffmpeg.arrozcru.org/}.
|
||||
|
||||
@section Can you add automake, libtool or autoconf support?
|
||||
|
||||
@@ -485,24 +337,6 @@ Yes, as long as the code is optional and can easily and cleanly be placed
|
||||
under #if CONFIG_GPL without breaking anything. So, for example, a new codec
|
||||
or filter would be OK under GPL while a bug fix to LGPL code would not.
|
||||
|
||||
@section I'm using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.
|
||||
|
||||
FFmpeg builds static libraries by default. In static libraries, dependencies
|
||||
are not handled. That has two consequences. First, you must specify the
|
||||
libraries in dependency order: @code{-lavdevice} must come before
|
||||
@code{-lavformat}, @code{-lavutil} must come after everything else, etc.
|
||||
Second, external libraries that are used in FFmpeg have to be specified too.
|
||||
|
||||
An easy way to get the full list of required libraries in dependency order
|
||||
is to use @code{pkg-config}.
|
||||
|
||||
@example
|
||||
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
|
||||
@end example
|
||||
|
||||
See @file{doc/example/Makefile} and @file{doc/example/pc-uninstalled} for
|
||||
more details.
|
||||
|
||||
@section I'm using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.
|
||||
|
||||
FFmpeg is a pure C project, so to use the libraries within your C++ application
|
||||
@@ -518,8 +352,8 @@ to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
|
||||
|
||||
@section I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?
|
||||
|
||||
You have to create a custom AVIOContext using @code{avio_alloc_context},
|
||||
see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer or MPlayer2 sources.
|
||||
You have to implement a URLProtocol, see @file{libavformat/file.c} in
|
||||
FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer sources.
|
||||
|
||||
@section Where can I find libav* headers for Pascal/Delphi?
|
||||
|
||||
@@ -543,16 +377,4 @@ wrong if it is larger than the average!
|
||||
For example, if you have mixed 25 and 30 fps content, then r_frame_rate
|
||||
will be 150.
|
||||
|
||||
@section Why is @code{make fate} not running all tests?
|
||||
|
||||
Make sure you have the fate-suite samples and the @code{SAMPLES} Make variable
|
||||
or @code{FATE_SAMPLES} environment variable or the @code{--samples}
|
||||
@command{configure} option is set to the right path.
|
||||
|
||||
@section Why is @code{make fate} not finding the samples?
|
||||
|
||||
Do you happen to have a @code{~} character in the samples path to indicate a
|
||||
home directory? The value is used in ways where the shell cannot expand it,
|
||||
causing FATE to not find files. Just replace @code{~} by the full path.
|
||||
|
||||
@bye
|
||||
|
258
doc/fate.texi
258
doc/fate.texi
@@ -1,201 +1,135 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Automated Testing Environment
|
||||
@settitle FATE Automated Testing Environment
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Automated Testing Environment}
|
||||
@center @titlefont{FATE Automated Testing Environment}
|
||||
@end titlepage
|
||||
|
||||
@node Top
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
FATE provides a regression testsuite embedded within the FFmpeg build system.
|
||||
It can be run locally and optionally configured to send reports to a web
|
||||
aggregator and viewer @url{http://fate.ffmpeg.org}.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
It is advised to run FATE before submitting patches to the current codebase
|
||||
and provide new tests when submitting patches to add additional features.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
@chapter Running FATE
|
||||
|
||||
@url{http://fate.ffmpeg.org/}
|
||||
@section Samples and References
|
||||
In order to run, FATE needs a large amount of data (samples and references)
|
||||
that is provided separately from the actual source distribution.
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with their recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
To inform the build system about the testsuite location, pass
|
||||
@option{--samples=<path to the samples>} to @command{configure} or set the
|
||||
@var{SAMPLES} Make variable or the @var{FATE_SAMPLES} environment variable
|
||||
to a suitable value.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
|
||||
|
||||
@chapter Using FATE from your FFmpeg source directory
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
The dataset is available through @command{rsync}, is possible to fetch
|
||||
the current sample using the straight rsync command or through a specific
|
||||
@ref{Makefile target}.
|
||||
|
||||
@example
|
||||
make fate-rsync SAMPLES=fate-suite/
|
||||
make fate SAMPLES=fate-suite/
|
||||
# rsync -aL rsync://fate.ffmpeg.org/fate-suite/ fate-suite
|
||||
@end example
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
`--samples=<path to the samples directory>'. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
|
||||
@example
|
||||
./configure --samples=fate-suite/
|
||||
make fate-rsync
|
||||
make fate
|
||||
# make fate-rsync SAMPLES=fate-suite
|
||||
@end example
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
it in your interactive session.
|
||||
|
||||
@example
|
||||
FATE_SAMPLES=fate-suite/ make fate
|
||||
@end example
|
||||
|
||||
@float NOTE
|
||||
Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
@command{configure} or set the @var{TARGET_EXEC} Make variable.
|
||||
|
||||
|
||||
@chapter Submitting the results to the FFmpeg result aggregation server
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
@example
|
||||
tests/fate.sh /path/to/fate_config
|
||||
@end example
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at @file{doc/fate_config.sh.template}.
|
||||
|
||||
@ifhtml
|
||||
The mentioned configuration template is also available here:
|
||||
@verbatiminclude fate_config.sh.template
|
||||
@end ifhtml
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The `slot' configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
|
||||
@itemize
|
||||
@item configure.log
|
||||
@item compile.log
|
||||
@item test.log
|
||||
@item report
|
||||
@item version
|
||||
@end itemize
|
||||
|
||||
When you have everything working properly you can create an SSH key pair
|
||||
and send the public key to the FATE server administrator who can be contacted
|
||||
at the email address @email{fate-admin@@ffmpeg.org}.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server's fingerprint is:
|
||||
@chapter Manual Run
|
||||
FATE regression test can be run through @command{make}.
|
||||
Specific Makefile targets and Makefile variables are available:
|
||||
|
||||
@anchor{Makefile target}
|
||||
@section FATE Makefile targets
|
||||
@table @option
|
||||
@item RSA
|
||||
d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
|
||||
@item ECDSA
|
||||
76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
|
||||
@end table
|
||||
|
||||
If you have problems connecting to the FATE server, it may help to try out
|
||||
the @command{ssh} command with one or more @option{-v} options. You should
|
||||
get detailed output concerning your SSH configuration and the authentication
|
||||
process.
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
|
||||
@chapter FATE makefile targets and variables
|
||||
|
||||
@section Makefile targets
|
||||
|
||||
@table @option
|
||||
@item fate-rsync
|
||||
Download/synchronize sample files to the configured samples directory.
|
||||
|
||||
@item fate-list
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
List all fate/regression test targets.
|
||||
@item fate-rsync
|
||||
Shortcut to download the fate test samples to the specified testsuite location.
|
||||
@item fate
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
@end table
|
||||
|
||||
@section Makefile variables
|
||||
|
||||
@section Fate Makefile variables
|
||||
@table @option
|
||||
@item V
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
@itemize
|
||||
@item 0: show just the test arguments
|
||||
@item 1: show just the command used in the test
|
||||
@item 2: show everything
|
||||
@end itemize
|
||||
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
@table @option
|
||||
@item 0
|
||||
show just the test arguments
|
||||
@item 1
|
||||
show just the command used in the test
|
||||
@item 2
|
||||
show everything
|
||||
@end table
|
||||
@item SAMPLES
|
||||
Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
|
||||
Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
@item THREADS
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @var{slice} or @var{frame},
|
||||
by default @var{slice+frame}
|
||||
@item CPUFLAGS
|
||||
Specify CPU flags.
|
||||
@item TARGET_EXEC
|
||||
Specify or override the wrapper used to run the tests.
|
||||
The @var{TARGET_EXEC} option provides a way to run FATE wrapped in
|
||||
@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets
|
||||
through @command{ssh}.
|
||||
@item GEN
|
||||
Set to @var{1} to generate the missing or mismatched references.
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
@example
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 fate
|
||||
@end example
|
||||
|
||||
@chapter Automated Tests
|
||||
In order to automatically testing specific configurations, e.g. multiple
|
||||
compilers, @command{tests/fate.sh} is provided.
|
||||
|
||||
This shell script builds FFmpeg, runs the regression tests and prepares a
|
||||
report that can be sent to @url{fate.ffmpeg.org} or directly examined locally.
|
||||
|
||||
@section Testing Profiles
|
||||
The configuration file passed to @command{fate.sh} is shell scripts as well.
|
||||
|
||||
It must provide at least a @var{slot} identifier, the @var{repo} from
|
||||
which fetch the sources, the @var{samples} directory, a @var{workdir} with
|
||||
enough space to build and run all the tests.
|
||||
Optional submit command @var{fate_recv} and a @var{comment} to describe
|
||||
the testing profile are available.
|
||||
|
||||
Additional optional parameter to tune the FFmpeg building and reporting process
|
||||
can be passed.
|
||||
|
||||
@example
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
|
||||
slot= # some unique identifier
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
samples=/path/to/fate/samples
|
||||
workdir= # directory in which to do all the work
|
||||
fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
|
||||
comment= # optional description
|
||||
|
||||
# the following are optional and map to configure options
|
||||
arch=
|
||||
cpu=
|
||||
cross_prefix=
|
||||
cc=
|
||||
target_os=
|
||||
sysroot=
|
||||
target_exec=
|
||||
target_path=
|
||||
extra_cflags=
|
||||
extra_ldflags=
|
||||
extra_libs=
|
||||
extra_conf= # extra configure options not covered above
|
||||
|
||||
#make= # name of GNU make if not 'make'
|
||||
makeopts= # extra options passed to 'make'
|
||||
#tar= # command to create a tar archive from its arguments on
|
||||
# stdout, defaults to 'tar c'
|
||||
@end example
|
||||
|
||||
@section Submitting Reports
|
||||
In order to send reports you need to create an @command{ssh} key and send it
|
||||
to the fate server administrator.
|
||||
The current server fingerprint is @var{b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92}
|
||||
|
||||
|
139
doc/fate.txt
Normal file
139
doc/fate.txt
Normal file
@@ -0,0 +1,139 @@
|
||||
FATE Automated Testing Environment
|
||||
==================================
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
|
||||
http://fate.ffmpeg.org/
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with there recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
|
||||
|
||||
1. Using FATE from your FFmpeg source directory
|
||||
-----------------------------------------------
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
|
||||
# make fate-rsync SAMPLES=fate-suite/
|
||||
# make fate SAMPLES=fate-suite/
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
`--samples=<path to the samples directory>'. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
|
||||
# ./configure --samples=fate-suite/
|
||||
# make fate-rsync
|
||||
# make fate
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
it in your interactive session.
|
||||
|
||||
# FATE_SAMPLES=fate-suite/ make fate
|
||||
|
||||
NOTE:
|
||||
Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
|
||||
|
||||
2. Submitting the results to the FFmpeg result aggregation server
|
||||
-----------------------------------------------------------------
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script tests/fate.sh from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
# tests/fate.sh /path/to/fate_config
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at tests/fate_config.sh.template .
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The `slot' configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
|
||||
- configure.log
|
||||
- compile.log
|
||||
- test.log
|
||||
- report
|
||||
- version
|
||||
|
||||
When you have everything working properly you can create an SSH key and
|
||||
send its public part to the FATE server administrator.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server's fingerprint is:
|
||||
|
||||
b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
|
||||
3. FATE makefile targets and variables
|
||||
--------------------------------------
|
||||
|
||||
FATE Makefile targets:
|
||||
|
||||
fate-list
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
fate
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
|
||||
FATE Makefile variables:
|
||||
|
||||
V
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
* 0: show just the test arguments
|
||||
* 1: show just the command used in the test
|
||||
* 2: show everything
|
||||
|
||||
SAMPLES
|
||||
Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
|
||||
THREADS
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
Example:
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 fate
|
@@ -1,45 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Bitstream Filters Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Bitstream Filters Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the bitstream filters provided by the
|
||||
libavcodec library.
|
||||
|
||||
A bitstream filter operates on the encoded stream data, and performs
|
||||
bitstream level modifications without performing decoding.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include bitstream_filters.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-bitstream-filters
|
||||
@settitle FFmpeg bitstream filters
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Codecs Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Codecs Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the codecs (decoders and encoders) provided by
|
||||
the libavcodec library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include codecs.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-codecs
|
||||
@settitle FFmpeg codecs
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Devices Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Devices Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the input and output devices provided by the
|
||||
libavdevice library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include devices.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavdevice.html,libavdevice}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavdevice(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-devices
|
||||
@settitle FFmpeg devices
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Filters Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Filters Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes filters, sources, and sinks provided by the
|
||||
libavfilter library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include filters.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavfilter.html,libavfilter}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavfilter(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-filters
|
||||
@settitle FFmpeg filters
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Formats Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Formats Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the supported formats (muxers and demuxers)
|
||||
provided by the libavformat library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include formats.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-formats
|
||||
@settitle FFmpeg formats
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
4561
doc/ffmpeg-mt-authorship.txt
Normal file
4561
doc/ffmpeg-mt-authorship.txt
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Protocols Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Protocols Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the input and output protocols provided by the
|
||||
libavformat library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include protocols.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-protocols
|
||||
@settitle FFmpeg protocols
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,44 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Resampler Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Resampler Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The FFmpeg resampler provides an high-level interface to the
|
||||
libswresample library audio resampling utilities. In particular it
|
||||
allows to perform audio resampling, audio channel layout rematrixing,
|
||||
and convert audio format and packing layout.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include resampler.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libswresample.html,libswresample}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-resampler
|
||||
@settitle FFmpeg Resampler
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,43 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Scaler Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Scaler Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The FFmpeg rescaler provides an high-level interface to the libswscale
|
||||
library image conversion utilities. In particular it allows to perform
|
||||
image rescaling and pixel format conversion.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include scaler.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libswscale.html,libswscale}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-scaler
|
||||
@settitle FFmpeg video scaling and pixel format converter
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Utilities Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Utilities Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes some generic features and utilities provided
|
||||
by the libavutil library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include utils.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-utils
|
||||
@settitle FFmpeg utilities
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
1070
doc/ffmpeg.texi
1070
doc/ffmpeg.texi
File diff suppressed because it is too large
Load Diff
@@ -29,7 +29,7 @@
|
||||
\ / :
|
||||
+======\======================/======+ ^ :
|
||||
------> 0 | : source_index : st-:--- | : :
|
||||
OutputFile output_files[] / +------------------------------------+ : :
|
||||
OuputFile output_files[] / +------------------------------------+ : :
|
||||
/ 1 | : : : | : :
|
||||
^ +------+------------+-----+ / +------------------------------------+ : :
|
||||
: | : ost_index -:-----:------/ 2 | : : : | : :
|
||||
|
126
doc/ffplay.texi
126
doc/ffplay.texi
@@ -11,7 +11,11 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffplay [@var{options}] [@file{input_file}]
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffplay [options] [@file{input_file}]
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -73,22 +77,11 @@ Default value is "video", if video is not present or cannot be played
|
||||
You can interactively cycle through the available show modes by
|
||||
pressing the key @key{w}.
|
||||
|
||||
@item -vf @var{filtergraph}
|
||||
Create the filtergraph specified by @var{filtergraph} and use it to
|
||||
filter the video stream.
|
||||
|
||||
@var{filtergraph} is a description of the filtergraph to apply to
|
||||
the stream, and must have a single video input and a single video
|
||||
output. In the filtergraph, the input is associated to the label
|
||||
@code{in}, and the output to the label @code{out}. See the
|
||||
ffmpeg-filters manual for more information about the filtergraph
|
||||
syntax.
|
||||
|
||||
@item -af @var{filtergraph}
|
||||
@var{filtergraph} is a description of the filtergraph to apply to
|
||||
the input audio.
|
||||
@item -vf @var{filter_graph}
|
||||
@var{filter_graph} is a description of the filter graph to apply to
|
||||
the input video.
|
||||
Use the option "-filters" to show all the available filters (including
|
||||
sources and sinks).
|
||||
also sources and sinks).
|
||||
|
||||
@item -i @var{input_file}
|
||||
Read @var{input_file}.
|
||||
@@ -99,13 +92,9 @@ Read @var{input_file}.
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format.
|
||||
This option has been deprecated in favor of private options, try -pixel_format.
|
||||
|
||||
@item -stats
|
||||
Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
|
||||
Show the stream duration, the codec parameters, the current position in
|
||||
the stream and the audio/video synchronisation drift.
|
||||
@item -bug
|
||||
Work around bugs.
|
||||
@item -fast
|
||||
@@ -145,20 +134,8 @@ Exit when video is done playing.
|
||||
Exit if any key is pressed.
|
||||
@item -exitonmousedown
|
||||
Exit if any mouse button is pressed.
|
||||
|
||||
@item -codec:@var{media_specifier} @var{codec_name}
|
||||
Force a specific decoder implementation for the stream identified by
|
||||
@var{media_specifier}, which can assume the values @code{a} (audio),
|
||||
@code{v} (video), and @code{s} subtitle.
|
||||
|
||||
@item -acodec @var{codec_name}
|
||||
Force a specific audio decoder.
|
||||
|
||||
@item -vcodec @var{codec_name}
|
||||
Force a specific video decoder.
|
||||
|
||||
@item -scodec @var{codec_name}
|
||||
Force a specific subtitle decoder.
|
||||
@item -codec:@var{stream_type}
|
||||
Force a specific decoder implementation
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
@@ -191,9 +168,6 @@ Seek backward/forward 10 seconds.
|
||||
@item down/up
|
||||
Seek backward/forward 1 minute.
|
||||
|
||||
@item page down/page up
|
||||
Seek backward/forward 10 minutes.
|
||||
|
||||
@item mouse click
|
||||
Seek to percentage in file corresponding to fraction of width.
|
||||
|
||||
@@ -201,74 +175,28 @@ Seek to percentage in file corresponding to fraction of width.
|
||||
|
||||
@c man end
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include eval.texi
|
||||
@include decoders.texi
|
||||
@include demuxers.texi
|
||||
@include muxers.texi
|
||||
@include indevs.texi
|
||||
@include outdevs.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffplay.html,ffplay},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffplay-all.html,ffmpeg-all},
|
||||
@end ifset
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffplay(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffplay-all(1),
|
||||
@end ifset
|
||||
ffmpeg(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffplay
|
||||
@settitle FFplay media player
|
||||
|
||||
@c man begin SEEALSO
|
||||
ffmpeg(1), ffprobe(1), ffserver(1) and the FFmpeg HTML documentation
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHORS
|
||||
The FFmpeg developers
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
|
402
doc/ffprobe.texi
402
doc/ffprobe.texi
@@ -11,7 +11,13 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffprobe [@var{options}] [@file{input_file}]
|
||||
The generic syntax is:
|
||||
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffprobe [options] [@file{input_file}]
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -39,10 +45,6 @@ ffprobe output is designed to be easily parsable by a textual filter,
|
||||
and consists of one or more sections of a form defined by the selected
|
||||
writer, which is specified by the @option{print_format} option.
|
||||
|
||||
Sections may contain other nested sections, and are identified by a
|
||||
name (which may be shared by other sections), and an unique
|
||||
name. See the output of @option{sections}.
|
||||
|
||||
Metadata tags stored in the container or in the streams are recognized
|
||||
and printed in the corresponding "FORMAT" or "STREAM" section.
|
||||
|
||||
@@ -78,7 +80,7 @@ Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
|
||||
Prettify the format of the displayed values, it corresponds to the
|
||||
options "-unit -prefix -byte_binary_prefix -sexagesimal".
|
||||
|
||||
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
@item -print_format @var{writer_name}[=@var{writer_options}]
|
||||
Set the output printing format.
|
||||
|
||||
@var{writer_name} specifies the name of the writer, and
|
||||
@@ -92,37 +94,6 @@ For example for printing the output in JSON format, specify:
|
||||
For more details on the available output printing formats, see the
|
||||
Writers section below.
|
||||
|
||||
@item -sections
|
||||
Print sections structure and section information, and exit. The output
|
||||
is not meant to be parsed by a machine.
|
||||
|
||||
@item -select_streams @var{stream_specifier}
|
||||
Select only the streams specified by @var{stream_specifier}. This
|
||||
option affects only the options related to streams
|
||||
(e.g. @code{show_streams}, @code{show_packets}, etc.).
|
||||
|
||||
For example to show only audio streams, you can use the command:
|
||||
@example
|
||||
ffprobe -show_streams -select_streams a INPUT
|
||||
@end example
|
||||
|
||||
To show only video packets belonging to the video stream with index 1:
|
||||
@example
|
||||
ffprobe -show_packets -select_streams v:1 INPUT
|
||||
@end example
|
||||
|
||||
@item -show_data
|
||||
Show payload data, as an hexadecimal and ASCII dump. Coupled with
|
||||
@option{-show_packets}, it will dump the packets' data. Coupled with
|
||||
@option{-show_streams}, it will dump the codec extradata.
|
||||
|
||||
The dump is printed as the "data" field. It may contain newlines.
|
||||
|
||||
@item -show_error
|
||||
Show information about the error found when trying to probe the input.
|
||||
|
||||
The error information is printed within a section with name "ERROR".
|
||||
|
||||
@item -show_format
|
||||
Show information about the container format of the input multimedia
|
||||
stream.
|
||||
@@ -130,64 +101,6 @@ stream.
|
||||
All the container format information is printed within a section with
|
||||
name "FORMAT".
|
||||
|
||||
@item -show_format_entry @var{name}
|
||||
Like @option{-show_format}, but only prints the specified entry of the
|
||||
container format information, rather than all. This option may be given more
|
||||
than once, then all specified entries will be shown.
|
||||
|
||||
This option is deprecated, use @code{show_entries} instead.
|
||||
|
||||
@item -show_entries @var{section_entries}
|
||||
Set list of entries to show.
|
||||
|
||||
Entries are specified according to the following
|
||||
syntax. @var{section_entries} contains a list of section entries
|
||||
separated by @code{:}. Each section entry is composed by a section
|
||||
name (or unique name), optionally followed by a list of entries local
|
||||
to that section, separated by @code{,}.
|
||||
|
||||
If section name is specified but is followed by no @code{=}, all
|
||||
entries are printed to output, together with all the contained
|
||||
sections. Otherwise only the entries specified in the local section
|
||||
entries list are printed. In particular, if @code{=} is specified but
|
||||
the list of local entries is empty, then no entries will be shown for
|
||||
that section.
|
||||
|
||||
Note that the order of specification of the local section entries is
|
||||
not honored in the output, and the usual display order will be
|
||||
retained.
|
||||
|
||||
The formal syntax is given by:
|
||||
@example
|
||||
@var{LOCAL_SECTION_ENTRIES} ::= @var{SECTION_ENTRY_NAME}[,@var{LOCAL_SECTION_ENTRIES}]
|
||||
@var{SECTION_ENTRY} ::= @var{SECTION_NAME}[=[@var{LOCAL_SECTION_ENTRIES}]]
|
||||
@var{SECTION_ENTRIES} ::= @var{SECTION_ENTRY}[:@var{SECTION_ENTRIES}]
|
||||
@end example
|
||||
|
||||
For example, to show only the index and type of each stream, and the PTS
|
||||
time, duration time, and stream index of the packets, you can specify
|
||||
the argument:
|
||||
@example
|
||||
packet=pts_time,duration_time,stream_index : stream=index,codec_type
|
||||
@end example
|
||||
|
||||
To show all the entries in the section "format", but only the codec
|
||||
type in the section "stream", specify the argument:
|
||||
@example
|
||||
format : stream=codec_type
|
||||
@end example
|
||||
|
||||
To show all the tags in the stream and format sections:
|
||||
@example
|
||||
format_tags : format_tags
|
||||
@end example
|
||||
|
||||
To show only the @code{title} tag (if available) in the stream
|
||||
sections:
|
||||
@example
|
||||
stream_tags=title
|
||||
@end example
|
||||
|
||||
@item -show_packets
|
||||
Show information about each packet contained in the input multimedia
|
||||
stream.
|
||||
@@ -195,13 +108,6 @@ stream.
|
||||
The information for each single packet is printed within a dedicated
|
||||
section with name "PACKET".
|
||||
|
||||
@item -show_frames
|
||||
Show information about each frame contained in the input multimedia
|
||||
stream.
|
||||
|
||||
The information for each single frame is printed within a dedicated
|
||||
section with name "FRAME".
|
||||
|
||||
@item -show_streams
|
||||
Show information about each media stream contained in the input
|
||||
multimedia stream.
|
||||
@@ -209,46 +115,6 @@ multimedia stream.
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "STREAM".
|
||||
|
||||
@item -show_chapters
|
||||
Show information about chapters stored in the format.
|
||||
|
||||
Each chapter is printed within a dedicated section with name "CHAPTER".
|
||||
|
||||
@item -count_frames
|
||||
Count the number of frames per stream and report it in the
|
||||
corresponding stream section.
|
||||
|
||||
@item -count_packets
|
||||
Count the number of packets per stream and report it in the
|
||||
corresponding stream section.
|
||||
|
||||
@item -show_private_data, -private
|
||||
Show private data, that is data depending on the format of the
|
||||
particular shown element.
|
||||
This option is enabled by default, but you may need to disable it
|
||||
for specific uses, for example when creating XSD-compliant XML output.
|
||||
|
||||
@item -show_program_version
|
||||
Show information related to program version.
|
||||
|
||||
Version information is printed within a section with name
|
||||
"PROGRAM_VERSION".
|
||||
|
||||
@item -show_library_versions
|
||||
Show information related to library versions.
|
||||
|
||||
Version information for each library is printed within a section with
|
||||
name "LIBRARY_VERSION".
|
||||
|
||||
@item -show_versions
|
||||
Show information related to program and library versions. This is the
|
||||
equivalent of setting both @option{-show_program_version} and
|
||||
@option{-show_library_versions} options.
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
|
||||
@item -i @var{input_file}
|
||||
Read @var{input_file}.
|
||||
|
||||
@@ -258,12 +124,11 @@ Read @var{input_file}.
|
||||
@chapter Writers
|
||||
@c man begin WRITERS
|
||||
|
||||
A writer defines the output format adopted by @command{ffprobe}, and will be
|
||||
A writer defines the output format adopted by @file{ffprobe}, and will be
|
||||
used for printing all the parts of the output.
|
||||
|
||||
A writer may accept one or more arguments, which specify the options
|
||||
to adopt. The options are specified as a list of @var{key}=@var{value}
|
||||
pairs, separated by ":".
|
||||
A writer may accept one or more arguments, which specify the options to
|
||||
adopt.
|
||||
|
||||
A description of the currently available writers follows.
|
||||
|
||||
@@ -282,24 +147,8 @@ keyN=valN
|
||||
Metadata tags are printed as a line in the corresponding FORMAT or
|
||||
STREAM section, and are prefixed by the string "TAG:".
|
||||
|
||||
A description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item nokey, nk
|
||||
If set to 1 specify not to print the key of each field. Default value
|
||||
is 0.
|
||||
|
||||
@item noprint_wrappers, nw
|
||||
If set to 1 specify not to print the section header and footer.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section compact, csv
|
||||
Compact and CSV format.
|
||||
|
||||
The @code{csv} writer is equivalent to @code{compact}, but supports
|
||||
different defaults.
|
||||
@section compact
|
||||
Compact format.
|
||||
|
||||
Each section is printed on a single line.
|
||||
If no option is specifid, the output has the form:
|
||||
@@ -311,29 +160,30 @@ Metadata tags are printed in the corresponding "format" or "stream"
|
||||
section. A metadata tag key, if printed, is prefixed by the string
|
||||
"tag:".
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item item_sep, s
|
||||
Specify the character to use for separating fields in the output line.
|
||||
It must be a single printable character, it is "|" by default ("," for
|
||||
the @code{csv} writer).
|
||||
It must be a single printable character, it is "|" by default.
|
||||
|
||||
@item nokey, nk
|
||||
If set to 1 specify not to print the key of each field. Its default
|
||||
value is 0 (1 for the @code{csv} writer).
|
||||
value is 0.
|
||||
|
||||
@item escape, e
|
||||
Set the escape mode to use, default to "c" ("csv" for the @code{csv}
|
||||
writer).
|
||||
Set the escape mode to use, default to "c".
|
||||
|
||||
It can assume one of the following values:
|
||||
@table @option
|
||||
@item c
|
||||
Perform C-like escaping. Strings containing a newline ('\n'), carriage
|
||||
return ('\r'), a tab ('\t'), a form feed ('\f'), the escaping
|
||||
character ('\') or the item separator character @var{SEP} are escaped using C-like fashioned
|
||||
Perform C-like escaping. Strings containing a newline ('\n') or
|
||||
carriage return ('\r'), the escaping character ('\') or the item
|
||||
separator character @var{SEP} are escaped using C-like fashioned
|
||||
escaping, so that a newline is converted to the sequence "\n", a
|
||||
carriage return to "\r", '\' to "\\" and the separator @var{SEP} is
|
||||
converted to "\@var{SEP}".
|
||||
@@ -347,219 +197,41 @@ containing a newline ('\n'), a carriage return ('\r'), a double quote
|
||||
Perform no escaping.
|
||||
@end table
|
||||
|
||||
@item print_section, p
|
||||
Print the section name at the begin of each line if the value is
|
||||
@code{1}, disable it with value set to @code{0}. Default value is
|
||||
@code{1}.
|
||||
|
||||
@end table
|
||||
|
||||
@section flat
|
||||
Flat format.
|
||||
@section csv
|
||||
CSV format.
|
||||
|
||||
A free-form output where each line contains an explicit key=value, such as
|
||||
"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
|
||||
directly embedded in sh scripts as long as the separator character is an
|
||||
alphanumeric character or an underscore (see @var{sep_char} option).
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item sep_char, s
|
||||
Separator character used to separate the chapter, the section name, IDs and
|
||||
potential tags in the printed field key.
|
||||
|
||||
Default value is '.'.
|
||||
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
set to 1, and if there is more than one section in the current
|
||||
chapter, the section name will be prefixed by the name of the
|
||||
chapter. A value of 0 will disable this behavior.
|
||||
|
||||
Default value is 1.
|
||||
@end table
|
||||
|
||||
@section ini
|
||||
INI format output.
|
||||
|
||||
Print output in an INI based format.
|
||||
|
||||
The following conventions are adopted:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
all key and values are UTF-8
|
||||
@item
|
||||
'.' is the subgroup separator
|
||||
@item
|
||||
newline, '\t', '\f', '\b' and the following characters are escaped
|
||||
@item
|
||||
'\' is the escape character
|
||||
@item
|
||||
'#' is the comment indicator
|
||||
@item
|
||||
'=' is the key/value separator
|
||||
@item
|
||||
':' is not used but usually parsed as key/value separator
|
||||
@end itemize
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
set to 1, and if there is more than one section in the current
|
||||
chapter, the section name will be prefixed by the name of the
|
||||
chapter. A value of 0 will disable this behavior.
|
||||
|
||||
Default value is 1.
|
||||
@end table
|
||||
This writer is equivalent to
|
||||
@code{compact=item_sep=,:nokey=1:escape=csv}.
|
||||
|
||||
@section json
|
||||
JSON based format.
|
||||
|
||||
Each section is printed using JSON notation.
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item compact, c
|
||||
If set to 1 enable compact output, that is each section will be
|
||||
printed on a single line. Default value is 0.
|
||||
@end table
|
||||
|
||||
For more information about JSON, see @url{http://www.json.org/}.
|
||||
|
||||
@section xml
|
||||
XML based format.
|
||||
|
||||
The XML output is described in the XML schema description file
|
||||
@file{ffprobe.xsd} installed in the FFmpeg datadir.
|
||||
|
||||
An updated version of the schema can be retrieved at the url
|
||||
@url{http://www.ffmpeg.org/schema/ffprobe.xsd}, which redirects to the
|
||||
latest schema committed into the FFmpeg development source code tree.
|
||||
|
||||
Note that the output issued will be compliant to the
|
||||
@file{ffprobe.xsd} schema only when no special global output options
|
||||
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
|
||||
@option{sexagesimal} etc.) are specified.
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item fully_qualified, q
|
||||
If set to 1 specify if the output should be fully qualified. Default
|
||||
value is 0.
|
||||
This is required for generating an XML file which can be validated
|
||||
through an XSD file.
|
||||
|
||||
@item xsd_compliant, x
|
||||
If set to 1 perform more checks for ensuring that the output is XSD
|
||||
compliant. Default value is 0.
|
||||
This option automatically sets @option{fully_qualified} to 1.
|
||||
@end table
|
||||
|
||||
For more information about the XML format, see
|
||||
@url{http://www.w3.org/XML/}.
|
||||
@c man end WRITERS
|
||||
|
||||
@chapter Timecode
|
||||
@c man begin TIMECODE
|
||||
|
||||
@command{ffprobe} supports Timecode extraction:
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
MPEG1/2 timecode is extracted from the GOP, and is available in the video
|
||||
stream details (@option{-show_streams}, see @var{timecode}).
|
||||
|
||||
@item
|
||||
MOV timecode is extracted from tmcd track, so is available in the tmcd
|
||||
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
|
||||
|
||||
@item
|
||||
DV, GXF and AVI timecodes are available in format metadata
|
||||
(@option{-show_format}, see @var{TAG:timecode}).
|
||||
|
||||
@end itemize
|
||||
@c man end TIMECODE
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include decoders.texi
|
||||
@include demuxers.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffprobe.html,ffprobe},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffprobe-all.html,ffprobe-all},
|
||||
@end ifset
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffprobe(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffprobe-all(1),
|
||||
@end ifset
|
||||
ffmpeg(1), ffplay(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
@include indevs.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffprobe
|
||||
@settitle ffprobe media prober
|
||||
|
||||
@c man begin SEEALSO
|
||||
ffmpeg(1), ffplay(1), ffserver(1) and the FFmpeg HTML documentation
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHORS
|
||||
The FFmpeg developers
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
|
218
doc/ffprobe.xsd
218
doc/ffprobe.xsd
@@ -1,218 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="convergence_duration" type="xsd:long" />
|
||||
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="coded_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="display_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
@@ -25,6 +25,10 @@ MaxBandwidth 1000
|
||||
# '-' is the standard output.
|
||||
CustomLog -
|
||||
|
||||
# Suppress that if you want to launch ffserver as a daemon.
|
||||
NoDaemon
|
||||
|
||||
|
||||
##################################################################
|
||||
# Definition of the live feeds. Each live feed contains one video
|
||||
# and/or audio sequence coming from an ffmpeg encoder or another
|
||||
@@ -369,3 +373,5 @@ ACL allow 192.168.0.0 192.168.255.255
|
||||
<Redirect index.html>
|
||||
URL http://www.ffmpeg.org/
|
||||
</Redirect>
|
||||
|
||||
|
||||
|
@@ -9,35 +9,52 @@
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Synopsis
|
||||
@chapter Synopsys
|
||||
|
||||
ffserver [@var{options}]
|
||||
The generic syntax is:
|
||||
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffserver [options]
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
@command{ffserver} is a streaming server for both audio and video. It
|
||||
supports several live feeds, streaming from files and time shifting on
|
||||
live feeds (you can seek to positions in the past on each live feed,
|
||||
provided you specify a big enough feed storage in
|
||||
@file{ffserver.conf}).
|
||||
ffserver is a streaming server for both audio and video. It supports
|
||||
|
||||
@command{ffserver} receives prerecorded files or FFM streams from some
|
||||
@command{ffmpeg} instance as input, then streams them over
|
||||
RTP/RTSP/HTTP.
|
||||
several live feeds, streaming from files and time shifting on live feeds
|
||||
(you can seek to positions in the past on each live feed, provided you
|
||||
specify a big enough feed storage in ffserver.conf).
|
||||
|
||||
An @command{ffserver} instance will listen on some port as specified
|
||||
in the configuration file. You can launch one or more instances of
|
||||
@command{ffmpeg} and send one or more FFM streams to the port where
|
||||
ffserver is expecting to receive them. Alternately, you can make
|
||||
@command{ffserver} launch such @command{ffmpeg} instances at startup.
|
||||
ffserver runs in daemon mode by default; that is, it puts itself in
|
||||
the background and detaches from its TTY, unless it is launched in
|
||||
debug mode or a NoDaemon option is specified in the configuration
|
||||
file.
|
||||
|
||||
Input streams are called feeds, and each one is specified by a
|
||||
@code{<Feed>} section in the configuration file.
|
||||
This documentation covers only the streaming aspects of ffserver /
|
||||
ffmpeg. All questions about parameters for ffmpeg, codec questions,
|
||||
etc. are not covered here. Read @file{ffmpeg.html} for more
|
||||
information.
|
||||
|
||||
@section How does it work?
|
||||
|
||||
ffserver receives prerecorded files or FFM streams from some ffmpeg
|
||||
instance as input, then streams them over RTP/RTSP/HTTP.
|
||||
|
||||
An ffserver instance will listen on some port as specified in the
|
||||
configuration file. You can launch one or more instances of ffmpeg and
|
||||
send one or more FFM streams to the port where ffserver is expecting
|
||||
to receive them. Alternately, you can make ffserver launch such ffmpeg
|
||||
instances at startup.
|
||||
|
||||
Input streams are called feeds, and each one is specified by a <Feed>
|
||||
section in the configuration file.
|
||||
|
||||
For each feed you can have different output streams in various
|
||||
formats, each one specified by a @code{<Stream>} section in the
|
||||
configuration file.
|
||||
formats, each one specified by a <Stream> section in the configuration
|
||||
file.
|
||||
|
||||
@section Status stream
|
||||
|
||||
@@ -73,6 +90,14 @@ web server can be used to serve up the files just as well.
|
||||
It can stream prerecorded video from .ffm files, though it is somewhat tricky
|
||||
to make it work correctly.
|
||||
|
||||
@section What do I need?
|
||||
|
||||
I use Linux on a 900 MHz Duron with a cheapo Bt848 based TV capture card. I'm
|
||||
using stock Linux 2.4.17 with the stock drivers. [Actually that isn't true,
|
||||
I needed some special drivers for my motherboard-based sound card.]
|
||||
|
||||
I understand that FreeBSD systems work just fine as well.
|
||||
|
||||
@section How do I make it work?
|
||||
|
||||
First, build the kit. It *really* helps to have installed LAME first. Then when
|
||||
@@ -122,7 +147,7 @@ that only captures in stereo and also requires that one channel be flipped.
|
||||
If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before
|
||||
starting ffmpeg.
|
||||
|
||||
@subsection The audio and video lose sync after a while.
|
||||
@subsection The audio and video loose sync after a while.
|
||||
|
||||
Yes, they do.
|
||||
|
||||
@@ -213,19 +238,6 @@ You use this by adding the ?date= to the end of the URL for the stream.
|
||||
For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
|
||||
@c man end
|
||||
|
||||
@section What is FFM, FFM2
|
||||
|
||||
FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of
|
||||
video and audio streams and encoding options, and can store a moving time segment
|
||||
of an infinite movie or a whole movie.
|
||||
|
||||
FFM is version specific, and there is limited compatibility of FFM files
|
||||
generated by one version of ffmpeg/ffserver and another version of
|
||||
ffmpeg/ffserver. It may work but it is not guaranteed to work.
|
||||
|
||||
FFM2 is extensible while maintaining compatibility and should work between
|
||||
differing versions of tools. FFM2 is the default.
|
||||
|
||||
@chapter Options
|
||||
@c man begin OPTIONS
|
||||
|
||||
@@ -242,79 +254,26 @@ within the various <Stream> sections. Since ffserver will not launch
|
||||
any ffmpeg instances, you will have to launch them manually.
|
||||
@item -d
|
||||
Enable debug mode. This option increases log verbosity, directs log
|
||||
messages to stdout.
|
||||
messages to stdout and causes ffserver to run in the foreground
|
||||
rather than as a daemon.
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffserver.html,ffserver},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffserver-all.html,ffserver-all},
|
||||
@end ifset
|
||||
the @file{doc/ffserver.conf} example,
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffserver(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffserver-all(1),
|
||||
@end ifset
|
||||
the @file{doc/ffserver.conf} example, ffmpeg(1), ffplay(1), ffprobe(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffserver
|
||||
@settitle ffserver video server
|
||||
|
||||
@c man begin SEEALSO
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), the @file{ffserver.conf}
|
||||
example and the FFmpeg HTML documentation
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHORS
|
||||
The FFmpeg developers
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
|
@@ -1,270 +0,0 @@
|
||||
Filter design
|
||||
=============
|
||||
|
||||
This document explains guidelines that should be observed (or ignored with
|
||||
good reason) when writing filters for libavfilter.
|
||||
|
||||
In this document, the word “frame” indicates either a video frame or a group
|
||||
of audio samples, as stored in an AVFilterBuffer structure.
|
||||
|
||||
|
||||
Format negotiation
|
||||
==================
|
||||
|
||||
The query_formats method should set, for each input and each output links,
|
||||
the list of supported formats.
|
||||
|
||||
For video links, that means pixel format. For audio links, that means
|
||||
channel layout, sample format (the sample packing is implied by the sample
|
||||
format) and sample rate.
|
||||
|
||||
The lists are not just lists, they are references to shared objects. When
|
||||
the negotiation mechanism computes the intersection of the formats
|
||||
supported at each end of a link, all references to both lists are replaced
|
||||
with a reference to the intersection. And when a single format is
|
||||
eventually chosen for a link amongst the remaining list, again, all
|
||||
references to the list are updated.
|
||||
|
||||
That means that if a filter requires that its input and output have the
|
||||
same format amongst a supported list, all it has to do is use a reference
|
||||
to the same list of formats.
|
||||
|
||||
query_formats can leave some formats unset and return AVERROR(EAGAIN) to
|
||||
cause the negotiation mechanism to try again later. That can be used by
|
||||
filters with complex requirements to use the format negotiated on one link
|
||||
to set the formats supported on another.
|
||||
|
||||
|
||||
Buffer references ownership and permissions
|
||||
===========================================
|
||||
|
||||
Principle
|
||||
---------
|
||||
|
||||
Audio and video data are voluminous; the buffer and buffer reference
|
||||
mechanism is intended to avoid, as much as possible, expensive copies of
|
||||
that data while still allowing the filters to produce correct results.
|
||||
|
||||
The data is stored in buffers represented by AVFilterBuffer structures.
|
||||
They must not be accessed directly, but through references stored in
|
||||
AVFilterBufferRef structures. Several references can point to the
|
||||
same buffer; the buffer is automatically deallocated once all
|
||||
corresponding references have been destroyed.
|
||||
|
||||
The characteristics of the data (resolution, sample rate, etc.) are
|
||||
stored in the reference; different references for the same buffer can
|
||||
show different characteristics. In particular, a video reference can
|
||||
point to only a part of a video buffer.
|
||||
|
||||
A reference is usually obtained as input to the start_frame or
|
||||
filter_frame method or requested using the ff_get_video_buffer or
|
||||
ff_get_audio_buffer functions. A new reference on an existing buffer can
|
||||
be created with the avfilter_ref_buffer. A reference is destroyed using
|
||||
the avfilter_unref_bufferp function.
|
||||
|
||||
Reference ownership
|
||||
-------------------
|
||||
|
||||
At any time, a reference “belongs” to a particular piece of code,
|
||||
usually a filter. With a few caveats that will be explained below, only
|
||||
that piece of code is allowed to access it. It is also responsible for
|
||||
destroying it, although this is sometimes done automatically (see the
|
||||
section on link reference fields).
|
||||
|
||||
Here are the (fairly obvious) rules for reference ownership:
|
||||
|
||||
* A reference received by the filter_frame method (or its start_frame
|
||||
deprecated version) belongs to the corresponding filter.
|
||||
|
||||
Special exception: for video references: the reference may be used
|
||||
internally for automatic copying and must not be destroyed before
|
||||
end_frame; it can be given away to ff_start_frame.
|
||||
|
||||
* A reference passed to ff_filter_frame (or the deprecated
|
||||
ff_start_frame) is given away and must no longer be used.
|
||||
|
||||
* A reference created with avfilter_ref_buffer belongs to the code that
|
||||
created it.
|
||||
|
||||
* A reference obtained with ff_get_video_buffer or ff_get_audio_buffer
|
||||
belongs to the code that requested it.
|
||||
|
||||
* A reference given as return value by the get_video_buffer or
|
||||
get_audio_buffer method is given away and must no longer be used.
|
||||
|
||||
Link reference fields
|
||||
---------------------
|
||||
|
||||
The AVFilterLink structure has a few AVFilterBufferRef fields. The
|
||||
cur_buf and out_buf were used with the deprecated
|
||||
start_frame/draw_slice/end_frame API and should no longer be used.
|
||||
src_buf, cur_buf_copy and partial_buf are used by libavfilter internally
|
||||
and must not be accessed by filters.
|
||||
|
||||
Reference permissions
|
||||
---------------------
|
||||
|
||||
The AVFilterBufferRef structure has a perms field that describes what
|
||||
the code that owns the reference is allowed to do to the buffer data.
|
||||
Different references for the same buffer can have different permissions.
|
||||
|
||||
For video filters that implement the deprecated
|
||||
start_frame/draw_slice/end_frame API, the permissions only apply to the
|
||||
parts of the buffer that have already been covered by the draw_slice
|
||||
method.
|
||||
|
||||
The value is a binary OR of the following constants:
|
||||
|
||||
* AV_PERM_READ: the owner can read the buffer data; this is essentially
|
||||
always true and is there for self-documentation.
|
||||
|
||||
* AV_PERM_WRITE: the owner can modify the buffer data.
|
||||
|
||||
* AV_PERM_PRESERVE: the owner can rely on the fact that the buffer data
|
||||
will not be modified by previous filters.
|
||||
|
||||
* AV_PERM_REUSE: the owner can output the buffer several times, without
|
||||
modifying the data in between.
|
||||
|
||||
* AV_PERM_REUSE2: the owner can output the buffer several times and
|
||||
modify the data in between (useless without the WRITE permissions).
|
||||
|
||||
* AV_PERM_ALIGN: the owner can access the data using fast operations
|
||||
that require data alignment.
|
||||
|
||||
The READ, WRITE and PRESERVE permissions are about sharing the same
|
||||
buffer between several filters to avoid expensive copies without them
|
||||
doing conflicting changes on the data.
|
||||
|
||||
The REUSE and REUSE2 permissions are about special memory for direct
|
||||
rendering. For example a buffer directly allocated in video memory must
|
||||
not modified once it is displayed on screen, or it will cause tearing;
|
||||
it will therefore not have the REUSE2 permission.
|
||||
|
||||
The ALIGN permission is about extracting part of the buffer, for
|
||||
copy-less padding or cropping for example.
|
||||
|
||||
|
||||
References received on input pads are guaranteed to have all the
|
||||
permissions stated in the min_perms field and none of the permissions
|
||||
stated in the rej_perms.
|
||||
|
||||
References obtained by ff_get_video_buffer and ff_get_audio_buffer are
|
||||
guaranteed to have at least all the permissions requested as argument.
|
||||
|
||||
References created by avfilter_ref_buffer have the same permissions as
|
||||
the original reference minus the ones explicitly masked; the mask is
|
||||
usually ~0 to keep the same permissions.
|
||||
|
||||
Filters should remove permissions on reference they give to output
|
||||
whenever necessary. It can be automatically done by setting the
|
||||
rej_perms field on the output pad.
|
||||
|
||||
Here are a few guidelines corresponding to common situations:
|
||||
|
||||
* Filters that modify and forward their frame (like drawtext) need the
|
||||
WRITE permission.
|
||||
|
||||
* Filters that read their input to produce a new frame on output (like
|
||||
scale) need the READ permission on input and and must request a buffer
|
||||
with the WRITE permission.
|
||||
|
||||
* Filters that intend to keep a reference after the filtering process
|
||||
is finished (after filter_frame returns) must have the PRESERVE
|
||||
permission on it and remove the WRITE permission if they create a new
|
||||
reference to give it away.
|
||||
|
||||
* Filters that intend to modify a reference they have kept after the end
|
||||
of the filtering process need the REUSE2 permission and must remove
|
||||
the PRESERVE permission if they create a new reference to give it
|
||||
away.
|
||||
|
||||
|
||||
Frame scheduling
|
||||
================
|
||||
|
||||
The purpose of these rules is to ensure that frames flow in the filter
|
||||
graph without getting stuck and accumulating somewhere.
|
||||
|
||||
Simple filters that output one frame for each input frame should not have
|
||||
to worry about it.
|
||||
|
||||
filter_frame
|
||||
------------
|
||||
|
||||
This method is called when a frame is pushed to the filter's input. It
|
||||
can be called at any time except in a reentrant way.
|
||||
|
||||
If the input frame is enough to produce output, then the filter should
|
||||
push the output frames on the output link immediately.
|
||||
|
||||
As an exception to the previous rule, if the input frame is enough to
|
||||
produce several output frames, then the filter needs output only at
|
||||
least one per link. The additional frames can be left buffered in the
|
||||
filter; these buffered frames must be flushed immediately if a new input
|
||||
produces new output.
|
||||
|
||||
(Example: frame rate-doubling filter: filter_frame must (1) flush the
|
||||
second copy of the previous frame, if it is still there, (2) push the
|
||||
first copy of the incoming frame, (3) keep the second copy for later.)
|
||||
|
||||
If the input frame is not enough to produce output, the filter must not
|
||||
call request_frame to get more. It must just process the frame or queue
|
||||
it. The task of requesting more frames is left to the filter's
|
||||
request_frame method or the application.
|
||||
|
||||
If a filter has several inputs, the filter must be ready for frames
|
||||
arriving randomly on any input. Therefore, any filter with several inputs
|
||||
will most likely require some kind of queuing mechanism. It is perfectly
|
||||
acceptable to have a limited queue and to drop frames when the inputs
|
||||
are too unbalanced.
|
||||
|
||||
request_frame
|
||||
-------------
|
||||
|
||||
This method is called when a frame is wanted on an output.
|
||||
|
||||
For an input, it should directly call filter_frame on the corresponding
|
||||
output.
|
||||
|
||||
For a filter, if there are queued frames already ready, one of these
|
||||
frames should be pushed. If not, the filter should request a frame on
|
||||
one of its inputs, repeatedly until at least one frame has been pushed.
|
||||
|
||||
Return values:
|
||||
if request_frame could produce a frame, it should return 0;
|
||||
if it could not for temporary reasons, it should return AVERROR(EAGAIN);
|
||||
if it could not because there are no more frames, it should return
|
||||
AVERROR_EOF.
|
||||
|
||||
The typical implementation of request_frame for a filter with several
|
||||
inputs will look like that:
|
||||
|
||||
if (frames_queued) {
|
||||
push_one_frame();
|
||||
return 0;
|
||||
}
|
||||
while (!frame_pushed) {
|
||||
input = input_where_a_frame_is_most_needed();
|
||||
ret = ff_request_frame(input);
|
||||
if (ret == AVERROR_EOF) {
|
||||
process_eof_on_input();
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
Note that, except for filters that can have queued frames, request_frame
|
||||
does not push frames: it requests them to its input, and as a reaction,
|
||||
the filter_frame method will be called and do the work.
|
||||
|
||||
Legacy API
|
||||
==========
|
||||
|
||||
Until libavfilter 3.23, the filter_frame method was split:
|
||||
|
||||
- for video filters, it was made of start_frame, draw_slice (that could be
|
||||
called several times on distinct parts of the frame) and end_frame;
|
||||
|
||||
- for audio filters, it was called filter_samples.
|
8456
doc/filters.texi
8456
doc/filters.texi
File diff suppressed because it is too large
Load Diff
155
doc/formats.texi
155
doc/formats.texi
@@ -1,155 +0,0 @@
|
||||
@chapter Format Options
|
||||
@c man begin FORMAT OPTIONS
|
||||
|
||||
The libavformat library provides some generic global options, which
|
||||
can be set on all the muxers and demuxers. In addition each muxer or
|
||||
demuxer may support so-called private options, which are specific for
|
||||
that component.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools, or by setting the value explicitly in the
|
||||
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
|
||||
for programmatic use.
|
||||
|
||||
The list of supported options follows:
|
||||
|
||||
@table @option
|
||||
@item avioflags @var{flags} (@emph{input/output})
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item direct
|
||||
Reduce buffering.
|
||||
@end table
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will allow to detect more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@item packetsize @var{integer} (@emph{output})
|
||||
Set packet size.
|
||||
|
||||
@item fflags @var{flags} (@emph{input/output})
|
||||
Set format flags.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item genpts
|
||||
Generate PTS.
|
||||
@item nofillin
|
||||
Do not fill in missing values that can be exactly calculated.
|
||||
@item noparse
|
||||
Disable AVParsers, this needs @code{+nofillin} too.
|
||||
@item igndts
|
||||
Ignore DTS.
|
||||
@item discardcorrupt
|
||||
Discard corrupted frames.
|
||||
@item sortdts
|
||||
Try to interleave output packets by DTS.
|
||||
@item keepside
|
||||
Do not merge side data.
|
||||
@item latm
|
||||
Enable RTP MP4A-LATM payload.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by optional buffering
|
||||
@end table
|
||||
|
||||
@item seek2any @var{integer} (@emph{input})
|
||||
Forces seeking to enable seek to any mode if set to 1. Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will allow to detect more accurate information, but will
|
||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
Set decryption key.
|
||||
|
||||
@item indexmem @var{integer} (@emph{input})
|
||||
Set max memory used for timestamp index (per stream).
|
||||
|
||||
@item rtbufsize @var{integer} (@emph{input})
|
||||
Set max memory used for buffering real-time frames.
|
||||
|
||||
@item fdebug @var{flags} (@emph{input/output})
|
||||
Print specific debug info.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item ts
|
||||
@end table
|
||||
|
||||
@item max_delay @var{integer} (@emph{input/output})
|
||||
Set maximum muxing or demuxing delay in microseconds.
|
||||
|
||||
@item fpsprobesize @var{integer} (@emph{input})
|
||||
Set number of frames used to probe fps.
|
||||
|
||||
@item audio_preload @var{integer} (@emph{output})
|
||||
Set microseconds by which audio packets should be interleaved earlier.
|
||||
|
||||
@item chunk_duration @var{integer} (@emph{output})
|
||||
Set microseconds for each chunk.
|
||||
|
||||
@item chunk_size @var{integer} (@emph{output})
|
||||
Set size in bytes for each chunk.
|
||||
|
||||
@item err_detect, f_err_detect @var{flags} (@emph{input})
|
||||
Set error detection flags. @code{f_err_detect} is deprecated and
|
||||
should be used only via the @command{ffmpeg} tool.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item crccheck
|
||||
Verify embedded CRCs.
|
||||
@item bitstream
|
||||
Detect bitstream specification deviations.
|
||||
@item buffer
|
||||
Detect improper bitstream length.
|
||||
@item explode
|
||||
Abort decoding on minor error detection.
|
||||
@item careful
|
||||
Consider things that violate the spec and have not been seen in the
|
||||
wild as errors.
|
||||
@item compliant
|
||||
Consider all spec non compliancies as errors.
|
||||
@item aggressive
|
||||
Consider things that a sane encoder should not do as an error.
|
||||
@end table
|
||||
|
||||
@item use_wallclock_as_timestamps @var{integer} (@emph{input})
|
||||
Use wallclock as timestamps.
|
||||
|
||||
@item avoid_negative_ts @var{integer} (@emph{output})
|
||||
Shift timestamps to make them non-negative. A value of 1 enables shifting,
|
||||
a value of 0 disables it, the default value of -1 enables shifting
|
||||
when required by the target format.
|
||||
|
||||
When shifting is enabled, all output timestamps are shifted by the
|
||||
same amount. Audio, video, and subtitles desynching and relative
|
||||
timestamp differences are preserved compared to how they would have
|
||||
been without shifting.
|
||||
|
||||
Also note that this affects only leading negative timestamps, and not
|
||||
non-monotonic negative timestamps.
|
||||
|
||||
@item skip_initial_bytes @var{integer} (@emph{input})
|
||||
Set number initial bytes to skip. Default is 0.
|
||||
|
||||
@item correct_ts_overflow @var{integer} (@emph{input})
|
||||
Correct single timestamp overflows if set to 1. Default is 1.
|
||||
|
||||
@item flush_packets @var{integer} (@emph{output})
|
||||
Flush the underlying I/O stream after each packet. Default 1 enables it, and
|
||||
has the effect of reducing the latency; 0 disables it and may slightly
|
||||
increase performance in some cases.
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
|
||||
@include demuxers.texi
|
||||
@include muxers.texi
|
||||
@include metadata.texi
|
271
doc/general.texi
271
doc/general.texi
@@ -13,8 +13,7 @@
|
||||
|
||||
FFmpeg can be hooked up with a number of external libraries to add support
|
||||
for more formats. None of them are used by default, their use has to be
|
||||
explicitly requested by passing the appropriate flags to
|
||||
@command{./configure}.
|
||||
explicitly requested by passing the appropriate flags to @file{./configure}.
|
||||
|
||||
@section OpenJPEG
|
||||
|
||||
@@ -24,22 +23,17 @@ instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjp
|
||||
@file{./configure}.
|
||||
|
||||
|
||||
@section OpenCORE, VisualOn, and Fraunhofer libraries
|
||||
@section OpenCORE and VisualOn libraries
|
||||
|
||||
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
|
||||
libraries provide encoders for a number of audio codecs.
|
||||
Spun off Google Android sources, OpenCore and VisualOn libraries provide
|
||||
encoders for a number of audio codecs.
|
||||
|
||||
@float NOTE
|
||||
OpenCORE and VisualOn libraries are under the Apache License 2.0
|
||||
(see @url{http://www.apache.org/licenses/LICENSE-2.0} for details), which is
|
||||
incompatible to the LGPL version 2.1 and GPL version 2. You have to
|
||||
incompatible with the LGPL version 2.1 and GPL version 2. You have to
|
||||
upgrade FFmpeg's license to LGPL version 3 (or if you have enabled
|
||||
GPL components, GPL version 3) by passing @code{--enable-version3} to configure in
|
||||
order to use it.
|
||||
|
||||
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
|
||||
and is not known to be compatible to the LGPL. Therefore, you have to pass
|
||||
@code{--enable-nonfree} to configure to use it.
|
||||
GPL components, GPL version 3) to use it.
|
||||
@end float
|
||||
|
||||
@subsection OpenCORE AMR
|
||||
@@ -68,14 +62,6 @@ Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libvo-amrwbenc} to configure to enable it.
|
||||
|
||||
@subsection Fraunhofer AAC library
|
||||
|
||||
FFmpeg can make use of the Fraunhofer AAC library for AAC encoding.
|
||||
|
||||
Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libfdk-aac} to configure to enable it.
|
||||
|
||||
@section LAME
|
||||
|
||||
FFmpeg can make use of the LAME library for MP3 encoding.
|
||||
@@ -84,14 +70,6 @@ Go to @url{http://lame.sourceforge.net/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libmp3lame} to configure to enable it.
|
||||
|
||||
@section TwoLAME
|
||||
|
||||
FFmpeg can make use of the TwoLAME library for MP2 encoding.
|
||||
|
||||
Go to @url{http://www.twolame.org/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libtwolame} to configure to enable it.
|
||||
|
||||
@section libvpx
|
||||
|
||||
FFmpeg can make use of the libvpx library for VP8 encoding.
|
||||
@@ -100,14 +78,6 @@ Go to @url{http://www.webmproject.org/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libvpx} to configure to
|
||||
enable it.
|
||||
|
||||
@section libwavpack
|
||||
|
||||
FFmpeg can make use of the libwavpack library for WavPack encoding.
|
||||
|
||||
Go to @url{http://www.wavpack.com/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
||||
enable it.
|
||||
|
||||
@section x264
|
||||
|
||||
FFmpeg can make use of the x264 library for H.264 encoding.
|
||||
@@ -122,20 +92,9 @@ x264 is under the GNU Public License Version 2 or later
|
||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
@end float
|
||||
|
||||
@section libilbc
|
||||
|
||||
iLBC is a narrowband speech codec that has been made freely available
|
||||
by Google as part of the WebRTC project. libilbc is a packaging friendly
|
||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||
iLBC encoding and decoding.
|
||||
|
||||
Go to @url{https://github.com/dekkers/libilbc} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libilbc} to configure to
|
||||
enable it.
|
||||
|
||||
|
||||
|
||||
@chapter Supported File Formats, Codecs or Features
|
||||
@chapter Supported File Formats and Codecs
|
||||
|
||||
You can use the @code{-formats} and @code{-codecs} options to have an exhaustive list.
|
||||
|
||||
@@ -156,21 +115,11 @@ library:
|
||||
@item American Laser Games MM @tab @tab X
|
||||
@tab Multimedia format used in games like Mad Dog McCree.
|
||||
@item 3GPP AMR @tab X @tab X
|
||||
@item Amazing Studio Packed Animation File @tab @tab X
|
||||
@tab Multimedia format used in game Heart Of Darkness.
|
||||
@item Apple HTTP Live Streaming @tab @tab X
|
||||
@item Artworx Data Format @tab @tab X
|
||||
@item ADP @tab @tab X
|
||||
@tab Audio format used on the Nintendo Gamecube.
|
||||
@item AFC @tab @tab X
|
||||
@tab Audio format used on the Nintendo Gamecube.
|
||||
@item ASF @tab X @tab X
|
||||
@item AST @tab X @tab X
|
||||
@tab Audio format used on the Nintendo Wii.
|
||||
@item AVI @tab X @tab X
|
||||
@item AVISynth @tab @tab X
|
||||
@item AVR @tab @tab X
|
||||
@tab Audio format used on Mac.
|
||||
@item AVS @tab @tab X
|
||||
@tab Multimedia format used by the Creature Shock game.
|
||||
@item Beam Software SIFF @tab @tab X
|
||||
@@ -184,10 +133,8 @@ library:
|
||||
@tab Used in Z and Z95 games.
|
||||
@item Brute Force & Ignorance @tab @tab X
|
||||
@tab Used in the game Flash Traffic: City of Angels.
|
||||
@item BRSTM @tab @tab X
|
||||
@tab Audio format used on the Nintendo Wii.
|
||||
@item BWF @tab X @tab X
|
||||
@item CRI ADX @tab X @tab X
|
||||
@item CRI ADX @tab @tab X
|
||||
@tab Audio-only format used in console video games.
|
||||
@item Discworld II BMV @tab @tab X
|
||||
@item Interplay C93 @tab @tab X
|
||||
@@ -196,8 +143,6 @@ library:
|
||||
@tab Multimedia format used by Delphine Software games.
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video format used by CD+G karaoke disks
|
||||
@item Commodore CDXL @tab @tab X
|
||||
@tab Amiga CD video format
|
||||
@item Core Audio Format @tab X @tab X
|
||||
@tab Apple Core Audio Format
|
||||
@item CRC testing format @tab X @tab
|
||||
@@ -216,7 +161,6 @@ library:
|
||||
@item Electronic Arts cdata @tab @tab X
|
||||
@item Electronic Arts Multimedia @tab @tab X
|
||||
@tab Used in various EA games; files have extensions like WVE and UV2.
|
||||
@item Ensoniq Paris Audio File @tab @tab X
|
||||
@item FFM (FFserver live feed) @tab X @tab X
|
||||
@item Flash (SWF) @tab X @tab X
|
||||
@item Flash 9 (AVM2) @tab X @tab X
|
||||
@@ -231,12 +175,12 @@ library:
|
||||
@item G.723.1 @tab X @tab X
|
||||
@item G.729 BIT @tab X @tab X
|
||||
@item G.729 raw @tab @tab X
|
||||
@item GIF Animation @tab X @tab X
|
||||
@item GIF Animation @tab X @tab
|
||||
@item GXF @tab X @tab X
|
||||
@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley
|
||||
playout servers.
|
||||
@item iCEDraw File @tab @tab X
|
||||
@item ICO @tab X @tab X
|
||||
@item ICO @tab @tab X
|
||||
@tab Microsoft Windows ICO
|
||||
@item id Quake II CIN video @tab @tab X
|
||||
@item id RoQ @tab X @tab X
|
||||
@@ -244,20 +188,17 @@ library:
|
||||
@item IEC61937 encapsulation @tab X @tab X
|
||||
@item IFF @tab @tab X
|
||||
@tab Interchange File Format
|
||||
@item iLBC @tab X @tab X
|
||||
@item Interplay MVE @tab @tab X
|
||||
@tab Format used in various Interplay computer games.
|
||||
@item IV8 @tab @tab X
|
||||
@tab A format generated by IndigoVision 8000 video server.
|
||||
@item IVF (On2) @tab X @tab X
|
||||
@tab A format used by libvpx
|
||||
@item IRCAM @tab X @tab X
|
||||
@item LATM @tab X @tab X
|
||||
@item LMLM4 @tab @tab X
|
||||
@tab Used by Linux Media Labs MPEG-4 PCI boards
|
||||
@item LOAS @tab @tab X
|
||||
@tab contains LATM multiplexed AAC audio
|
||||
@item LVF @tab @tab X
|
||||
@item LXF @tab @tab X
|
||||
@tab VR native stream format, used by Leitch/Harris' video servers.
|
||||
@item Matroska @tab X @tab X
|
||||
@@ -267,9 +208,6 @@ library:
|
||||
@item MAXIS XA @tab @tab X
|
||||
@tab Used in Sim City 3000; file extension .xa.
|
||||
@item MD Studio @tab @tab X
|
||||
@item Metal Gear Solid: The Twin Snakes @tab @tab X
|
||||
@item Megalux Frame @tab @tab X
|
||||
@tab Used by Megalux Ultimate Paint
|
||||
@item Mobotix .mxg @tab @tab X
|
||||
@item Monkey's Audio @tab @tab X
|
||||
@item Motion Pixels MVI @tab @tab X
|
||||
@@ -297,7 +235,6 @@ library:
|
||||
@tab SMPTE 386M, D-10/IMX Mapping.
|
||||
@item NC camera feed @tab @tab X
|
||||
@tab NC (AVIP NC4600) camera streams
|
||||
@item NIST SPeech HEader REsources @tab @tab X
|
||||
@item NTT TwinVQ (VQF) @tab @tab X
|
||||
@tab Nippon Telegraph and Telephone Corporation TwinVQ.
|
||||
@item Nullsoft Streaming Video @tab @tab X
|
||||
@@ -306,7 +243,6 @@ library:
|
||||
@tab NUT Open Container Format
|
||||
@item Ogg @tab X @tab X
|
||||
@item Playstation Portable PMP @tab @tab X
|
||||
@item Portable Voice Format @tab @tab X
|
||||
@item TechnoTrend PVA @tab @tab X
|
||||
@tab Used by TechnoTrend DVB PCI boards.
|
||||
@item QCP @tab @tab X
|
||||
@@ -317,7 +253,6 @@ library:
|
||||
@item raw Dirac @tab X @tab X
|
||||
@item raw DNxHD @tab X @tab X
|
||||
@item raw DTS @tab X @tab X
|
||||
@item raw DTS-HD @tab @tab X
|
||||
@item raw E-AC-3 @tab X @tab X
|
||||
@item raw FLAC @tab X @tab X
|
||||
@item raw GSM @tab @tab X
|
||||
@@ -335,9 +270,8 @@ library:
|
||||
@item raw video @tab X @tab X
|
||||
@item raw id RoQ @tab X @tab
|
||||
@item raw Shorten @tab @tab X
|
||||
@item raw TAK @tab @tab X
|
||||
@item raw TrueHD @tab X @tab X
|
||||
@item raw VC-1 @tab X @tab X
|
||||
@item raw VC-1 @tab @tab X
|
||||
@item raw PCM A-law @tab X @tab X
|
||||
@item raw PCM mu-law @tab X @tab X
|
||||
@item raw PCM signed 8 bit @tab X @tab X
|
||||
@@ -363,37 +297,29 @@ library:
|
||||
@tab File format used by RED Digital cameras, contains JPEG 2000 frames and PCM audio.
|
||||
@item RealMedia @tab X @tab X
|
||||
@item Redirector @tab @tab X
|
||||
@item RedSpark @tab @tab X
|
||||
@item Renderware TeXture Dictionary @tab @tab X
|
||||
@item RL2 @tab @tab X
|
||||
@tab Audio and video format used in some games by Entertainment Software Partners.
|
||||
@item RPL/ARMovie @tab @tab X
|
||||
@item Lego Mindstorms RSO @tab X @tab X
|
||||
@item RSD @tab @tab X
|
||||
@item RTMP @tab X @tab X
|
||||
@tab Output is performed by publishing stream to RTMP server
|
||||
@item RTP @tab X @tab X
|
||||
@item RTSP @tab X @tab X
|
||||
@item SAP @tab X @tab X
|
||||
@item SBG @tab @tab X
|
||||
@item SDP @tab @tab X
|
||||
@item Sega FILM/CPK @tab @tab X
|
||||
@tab Used in many Sega Saturn console games.
|
||||
@item Silicon Graphics Movie @tab @tab X
|
||||
@item Sierra SOL @tab @tab X
|
||||
@tab .sol files used in Sierra Online games.
|
||||
@item Sierra VMD @tab @tab X
|
||||
@tab Used in Sierra CD-ROM games.
|
||||
@item Smacker @tab @tab X
|
||||
@tab Multimedia format used by many games.
|
||||
@item SMJPEG @tab X @tab X
|
||||
@tab Used in certain Loki game ports.
|
||||
@item Smush @tab @tab X
|
||||
@tab Multimedia format used in some LucasArts games.
|
||||
@item Sony OpenMG (OMA) @tab X @tab X
|
||||
@item Sony OpenMG (OMA) @tab @tab X
|
||||
@tab Audio format used in Sony Sonic Stage and Sony Vegas.
|
||||
@item Sony PlayStation STR @tab @tab X
|
||||
@item Sony Wave64 (W64) @tab X @tab X
|
||||
@item Sony Wave64 (W64) @tab @tab X
|
||||
@item SoX native format @tab X @tab X
|
||||
@item SUN AU format @tab X @tab X
|
||||
@item Text files @tab @tab X
|
||||
@@ -403,9 +329,8 @@ library:
|
||||
@tab Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
|
||||
@item True Audio @tab @tab X
|
||||
@item VC-1 test bitstream @tab X @tab X
|
||||
@item Vivo @tab @tab X
|
||||
@item WAV @tab X @tab X
|
||||
@item WavPack @tab X @tab X
|
||||
@item WavPack @tab @tab X
|
||||
@item WebM @tab X @tab X
|
||||
@item Windows Televison (WTV) @tab X @tab X
|
||||
@item Wing Commander III movie @tab @tab X
|
||||
@@ -435,14 +360,11 @@ following image formats are supported:
|
||||
@item .Y.U.V @tab X @tab X
|
||||
@tab one raw file per component
|
||||
@item animated GIF @tab X @tab X
|
||||
@tab Only uncompressed GIFs are generated.
|
||||
@item BMP @tab X @tab X
|
||||
@tab Microsoft BMP image
|
||||
@item PIX @tab @tab X
|
||||
@tab PIX is an image format used in the Argonaut BRender engine.
|
||||
@item DPX @tab X @tab X
|
||||
@tab Digital Picture Exchange
|
||||
@item EXR @tab @tab X
|
||||
@tab OpenEXR
|
||||
@item JPEG @tab X @tab X
|
||||
@tab Progressive JPEG is not supported.
|
||||
@item JPEG 2000 @tab X @tab X
|
||||
@@ -468,18 +390,12 @@ following image formats are supported:
|
||||
@tab V.Flash PTX format
|
||||
@item SGI @tab X @tab X
|
||||
@tab SGI RGB image format
|
||||
@item Sun Rasterfile @tab X @tab X
|
||||
@item Sun Rasterfile @tab @tab X
|
||||
@tab Sun RAS image format
|
||||
@item TIFF @tab X @tab X
|
||||
@tab YUV, JPEG and some extension is not supported yet.
|
||||
@item Truevision Targa @tab X @tab X
|
||||
@tab Targa (.TGA) image format
|
||||
@item XBM @tab X @tab X
|
||||
@tab X BitMap image format
|
||||
@item XFace @tab X @tab X
|
||||
@tab X-Face image format
|
||||
@item XWD @tab X @tab X
|
||||
@tab X Window Dump image format
|
||||
@end multitable
|
||||
|
||||
@code{X} means that encoding (resp. decoding) is supported.
|
||||
@@ -493,15 +409,15 @@ following image formats are supported:
|
||||
@item 4X Movie @tab @tab X
|
||||
@tab Used in certain computer games.
|
||||
@item 8088flex TMV @tab @tab X
|
||||
@item 8SVX exponential @tab @tab X
|
||||
@item 8SVX fibonacci @tab @tab X
|
||||
@item A64 multicolor @tab X @tab
|
||||
@tab Creates video suitable to be played on a commodore 64 (multicolor mode).
|
||||
@item Amazing Studio PAF Video @tab @tab X
|
||||
@item American Laser Games MM @tab @tab X
|
||||
@tab Used in games like Mad Dog McCree.
|
||||
@item AMV Video @tab X @tab X
|
||||
@tab Used in Chinese MP3 players.
|
||||
@item ANSI/ASCII art @tab @tab X
|
||||
@item Apple Intermediate Codec @tab @tab X
|
||||
@item Apple MJPEG-B @tab @tab X
|
||||
@item Apple ProRes @tab X @tab X
|
||||
@item Apple QuickDraw @tab @tab X
|
||||
@@ -519,18 +435,13 @@ following image formats are supported:
|
||||
@item Autodesk Animator Flic video @tab @tab X
|
||||
@item Autodesk RLE @tab @tab X
|
||||
@tab fourcc: AASC
|
||||
@item Avid 1:1 10-bit RGB Packer @tab X @tab X
|
||||
@tab fourcc: AVrp
|
||||
@item AVS (Audio Video Standard) video @tab @tab X
|
||||
@tab Video encoding used by the Creature Shock game.
|
||||
@item AYUV @tab X @tab X
|
||||
@tab Microsoft uncompressed packed 4:4:4:4
|
||||
@item Beam Software VB @tab @tab X
|
||||
@item Bethesda VID video @tab @tab X
|
||||
@tab Used in some games from Bethesda Softworks.
|
||||
@item Bink Video @tab @tab X
|
||||
@item Bitmap Brothers JV video @tab @tab X
|
||||
@item y41p Brooktree uncompressed 4:1:1 12-bit @tab X @tab X
|
||||
@item Brute Force & Ignorance @tab @tab X
|
||||
@tab Used in the game Flash Traffic: City of Angels.
|
||||
@item C93 video @tab @tab X
|
||||
@@ -539,23 +450,19 @@ following image formats are supported:
|
||||
@tab fourcc: CSCD
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video codec for CD+G karaoke disks
|
||||
@item CDXL @tab @tab X
|
||||
@tab Amiga CD video codec
|
||||
@item Chinese AVS video @tab E @tab X
|
||||
@tab AVS1-P2, JiZhun profile, encoding through external library libxavs
|
||||
@item Delphine Software International CIN video @tab @tab X
|
||||
@tab Codec used in Delphine Software International games.
|
||||
@item Discworld II BMV Video @tab @tab X
|
||||
@item Canopus Lossless Codec @tab @tab X
|
||||
@item Cinepak @tab @tab X
|
||||
@item Cirrus Logic AccuPak @tab X @tab X
|
||||
@tab fourcc: CLJR
|
||||
@item CPiA Video Format @tab @tab X
|
||||
@item Creative YUV (CYUV) @tab @tab X
|
||||
@item DFA @tab @tab X
|
||||
@tab Codec used in Chronomaster game.
|
||||
@item Dirac @tab E @tab X
|
||||
@tab supported through external library libschroedinger
|
||||
@tab supported through external libdirac/libschroedinger libraries
|
||||
@item Deluxe Paint Animation @tab @tab X
|
||||
@item DNxHD @tab X @tab X
|
||||
@tab aka SMPTE VC3
|
||||
@@ -576,16 +483,13 @@ following image formats are supported:
|
||||
@item Escape 124 @tab @tab X
|
||||
@item Escape 130 @tab @tab X
|
||||
@item FFmpeg video codec #1 @tab X @tab X
|
||||
@tab lossless codec (fourcc: FFV1)
|
||||
@tab experimental lossless codec (fourcc: FFV1)
|
||||
@item Flash Screen Video v1 @tab X @tab X
|
||||
@tab fourcc: FSV1
|
||||
@item Flash Screen Video v2 @tab X @tab X
|
||||
@item Flash Video (FLV) @tab X @tab X
|
||||
@tab Sorenson H.263 used in Flash
|
||||
@item Forward Uncompressed @tab @tab X
|
||||
@item Fraps @tab @tab X
|
||||
@item Go2Webinar @tab @tab X
|
||||
@tab fourcc: G2M4
|
||||
@item H.261 @tab X @tab X
|
||||
@item H.263 / H.263-1996 @tab X @tab X
|
||||
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
|
||||
@@ -607,7 +511,6 @@ following image formats are supported:
|
||||
@item Intel H.263 @tab @tab X
|
||||
@item Intel Indeo 2 @tab @tab X
|
||||
@item Intel Indeo 3 @tab @tab X
|
||||
@item Intel Indeo 4 @tab @tab X
|
||||
@item Intel Indeo 5 @tab @tab X
|
||||
@item Interplay C93 @tab @tab X
|
||||
@tab Used in the game Cyberia from Interplay.
|
||||
@@ -622,18 +525,8 @@ following image formats are supported:
|
||||
@item LCL (LossLess Codec Library) MSZH @tab @tab X
|
||||
@item LCL (LossLess Codec Library) ZLIB @tab E @tab E
|
||||
@item LOCO @tab @tab X
|
||||
@item LucasArts Smush @tab @tab X
|
||||
@tab Used in LucasArts games.
|
||||
@item lossless MJPEG @tab X @tab X
|
||||
@item Microsoft ATC Screen @tab @tab X
|
||||
@tab Also known as Microsoft Screen 3.
|
||||
@item Microsoft Expression Encoder Screen @tab @tab X
|
||||
@tab Also known as Microsoft Titanium Screen 2.
|
||||
@item Microsoft RLE @tab @tab X
|
||||
@item Microsoft Screen 1 @tab @tab X
|
||||
@tab Also known as Windows Media Video V7 Screen.
|
||||
@item Microsoft Screen 2 @tab @tab X
|
||||
@tab Also known as Windows Media Video V9 Screen.
|
||||
@item Microsoft Video 1 @tab @tab X
|
||||
@item Mimic @tab @tab X
|
||||
@tab Used in MSN Messenger Webcam streams.
|
||||
@@ -662,8 +555,8 @@ following image formats are supported:
|
||||
@tab fourcc: VP60,VP61,VP62
|
||||
@item VP8 @tab E @tab X
|
||||
@tab fourcc: VP80, encoding supported through external library libvpx
|
||||
@item Pinnacle TARGA CineWave YUV16 @tab @tab X
|
||||
@tab fourcc: Y216
|
||||
@item planar RGB @tab @tab X
|
||||
@tab fourcc: 8BPS
|
||||
@item Prores @tab @tab X
|
||||
@tab fourcc: apch,apcn,apcs,apco
|
||||
@item Q-team QPEG @tab @tab X
|
||||
@@ -675,8 +568,8 @@ following image formats are supported:
|
||||
@tab fourcc: 'smc '
|
||||
@item QuickTime video (RPZA) @tab @tab X
|
||||
@tab fourcc: rpza
|
||||
@item R10K AJA Kona 10-bit RGB Codec @tab X @tab X
|
||||
@item R210 Quicktime Uncompressed RGB 10-bit @tab X @tab X
|
||||
@item R10K AJA Kona 10-bit RGB Codec @tab @tab X
|
||||
@item R210 Quicktime Uncompressed RGB 10-bit @tab @tab X
|
||||
@item Raw Video @tab X @tab X
|
||||
@item RealVideo 1.0 @tab X @tab X
|
||||
@item RealVideo 2.0 @tab X @tab X
|
||||
@@ -687,11 +580,8 @@ following image formats are supported:
|
||||
@tab Texture dictionaries used by the Renderware Engine.
|
||||
@item RL2 video @tab @tab X
|
||||
@tab used in some games by Entertainment Software Partners
|
||||
@item SGI RLE 8-bit @tab @tab X
|
||||
@item Sierra VMD video @tab @tab X
|
||||
@tab Used in Sierra VMD files.
|
||||
@item Silicon Graphics Motion Video Compressor 1 (MVC1) @tab @tab X
|
||||
@item Silicon Graphics Motion Video Compressor 2 (MVC2) @tab @tab X
|
||||
@item Smacker video @tab @tab X
|
||||
@tab Video encoding used in Smacker.
|
||||
@item SMPTE VC-1 @tab @tab X
|
||||
@@ -706,17 +596,12 @@ following image formats are supported:
|
||||
@tab fourcc: SP5X
|
||||
@item TechSmith Screen Capture Codec @tab @tab X
|
||||
@tab fourcc: TSCC
|
||||
@item TechSmith Screen Capture Codec 2 @tab @tab X
|
||||
@tab fourcc: TSC2
|
||||
@item Theora @tab E @tab X
|
||||
@tab encoding supported through external library libtheora
|
||||
@item Tiertex Limited SEQ video @tab @tab X
|
||||
@tab Codec used in DOS CD-ROM FlashBack game.
|
||||
@item Ut Video @tab X @tab X
|
||||
@item v210 QuickTime uncompressed 4:2:2 10-bit @tab X @tab X
|
||||
@item v308 QuickTime uncompressed 4:4:4 @tab X @tab X
|
||||
@item v408 QuickTime uncompressed 4:4:4:4 @tab X @tab X
|
||||
@item v410 QuickTime uncompressed 4:4:4 10-bit @tab X @tab X
|
||||
@item Ut Video @tab @tab X
|
||||
@item V210 Quicktime Uncompressed 4:2:2 10-bit @tab X @tab X
|
||||
@item VBLE Lossless Codec @tab @tab X
|
||||
@item VMware Screen Codec / VMware Video @tab @tab X
|
||||
@tab Codec used in videos captured by VMware.
|
||||
@@ -734,9 +619,6 @@ following image formats are supported:
|
||||
@item WMV7 @tab X @tab X
|
||||
@item YAMAHA SMAF @tab X @tab X
|
||||
@item Psygnosis YOP Video @tab @tab X
|
||||
@item yuv4 @tab X @tab X
|
||||
@tab libquicktime uncompressed packed 4:2:0
|
||||
@item ZeroCodec Lossless Video @tab @tab X
|
||||
@item ZLIB @tab X @tab X
|
||||
@tab part of LCL, encoder experimental
|
||||
@item Zip Motion Blocks Video @tab X @tab X
|
||||
@@ -751,8 +633,7 @@ following image formats are supported:
|
||||
|
||||
@multitable @columnfractions .4 .1 .1 .4
|
||||
@item Name @tab Encoding @tab Decoding @tab Comments
|
||||
@item 8SVX exponential @tab @tab X
|
||||
@item 8SVX fibonacci @tab @tab X
|
||||
@item 8SVX audio @tab @tab X
|
||||
@item AAC+ @tab E @tab X
|
||||
@tab encoding supported through external library libaacplus
|
||||
@item AAC @tab E @tab X
|
||||
@@ -783,21 +664,19 @@ following image formats are supported:
|
||||
@item ADPCM IMA Westwood @tab @tab X
|
||||
@item ADPCM ISS IMA @tab @tab X
|
||||
@tab Used in FunCom games.
|
||||
@item ADPCM IMA Dialogic @tab @tab X
|
||||
@item ADPCM IMA Duck DK3 @tab @tab X
|
||||
@tab Used in some Sega Saturn console games.
|
||||
@item ADPCM IMA Duck DK4 @tab @tab X
|
||||
@tab Used in some Sega Saturn console games.
|
||||
@item ADPCM IMA Radical @tab @tab X
|
||||
@item ADPCM Microsoft @tab X @tab X
|
||||
@item ADPCM MS IMA @tab X @tab X
|
||||
@item ADPCM Nintendo Gamecube AFC @tab @tab X
|
||||
@item ADPCM Nintendo Gamecube DTK @tab @tab X
|
||||
@item ADPCM Nintendo Gamecube THP @tab @tab X
|
||||
@item ADPCM QT IMA @tab X @tab X
|
||||
@item ADPCM SEGA CRI ADX @tab X @tab X
|
||||
@tab Used in Sega Dreamcast games.
|
||||
@item ADPCM Shockwave Flash @tab X @tab X
|
||||
@item ADPCM SMJPEG IMA @tab @tab X
|
||||
@tab Used in certain Loki game ports.
|
||||
@item ADPCM Sound Blaster Pro 2-bit @tab @tab X
|
||||
@item ADPCM Sound Blaster Pro 2.6-bit @tab @tab X
|
||||
@item ADPCM Sound Blaster Pro 4-bit @tab @tab X
|
||||
@@ -808,7 +687,6 @@ following image formats are supported:
|
||||
@tab encoding supported through external library libopencore-amrnb
|
||||
@item AMR-WB @tab E @tab X
|
||||
@tab encoding supported through external library libvo-amrwbenc
|
||||
@item Amazing Studio PAF Audio @tab @tab X
|
||||
@item Apple lossless audio @tab X @tab X
|
||||
@tab QuickTime fourcc 'alac'
|
||||
@item Atrac 1 @tab @tab X
|
||||
@@ -835,7 +713,6 @@ following image formats are supported:
|
||||
@item DSP Group TrueSpeech @tab @tab X
|
||||
@item DV audio @tab @tab X
|
||||
@item Enhanced AC-3 @tab X @tab X
|
||||
@item EVRC (Enhanced Variable Rate Codec) @tab @tab X
|
||||
@item FLAC (Free Lossless Audio Codec) @tab X @tab IX
|
||||
@item G.723.1 @tab X @tab X
|
||||
@item G.729 @tab @tab X
|
||||
@@ -843,33 +720,24 @@ following image formats are supported:
|
||||
@tab encoding supported through external library libgsm
|
||||
@item GSM Microsoft variant @tab E @tab X
|
||||
@tab encoding supported through external library libgsm
|
||||
@item IAC (Indeo Audio Coder) @tab @tab X
|
||||
@item iLBC (Internet Low Bitrate Codec) @tab E @tab E
|
||||
@tab encoding and decoding supported through external library libilbc
|
||||
@item IMC (Intel Music Coder) @tab @tab X
|
||||
@item MACE (Macintosh Audio Compression/Expansion) 3:1 @tab @tab X
|
||||
@item MACE (Macintosh Audio Compression/Expansion) 6:1 @tab @tab X
|
||||
@item MLP (Meridian Lossless Packing) @tab @tab X
|
||||
@tab Used in DVD-Audio discs.
|
||||
@item Monkey's Audio @tab @tab X
|
||||
@tab Only versions 3.97-3.99 are supported.
|
||||
@item MP1 (MPEG audio layer 1) @tab @tab IX
|
||||
@item MP2 (MPEG audio layer 2) @tab IX @tab IX
|
||||
@tab libtwolame can be used alternatively for encoding.
|
||||
@item MP3 (MPEG audio layer 3) @tab E @tab IX
|
||||
@tab encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
|
||||
@item MPEG-4 Audio Lossless Coding (ALS) @tab @tab X
|
||||
@item Musepack SV7 @tab @tab X
|
||||
@item Musepack SV8 @tab @tab X
|
||||
@item Nellymoser Asao @tab X @tab X
|
||||
@item Opus @tab E @tab E
|
||||
@tab supported through external library libopus
|
||||
@item PCM A-law @tab X @tab X
|
||||
@item PCM mu-law @tab X @tab X
|
||||
@item PCM signed 8-bit planar @tab X @tab X
|
||||
@item PCM signed 16-bit big-endian planar @tab X @tab X
|
||||
@item PCM signed 16-bit little-endian planar @tab X @tab X
|
||||
@item PCM signed 24-bit little-endian planar @tab X @tab X
|
||||
@item PCM signed 32-bit little-endian planar @tab X @tab X
|
||||
@item PCM 16-bit little-endian planar @tab @tab X
|
||||
@item PCM 32-bit floating point big-endian @tab X @tab X
|
||||
@item PCM 32-bit floating point little-endian @tab X @tab X
|
||||
@item PCM 64-bit floating point big-endian @tab X @tab X
|
||||
@@ -900,34 +768,28 @@ following image formats are supported:
|
||||
@tab Real 28800 bit/s codec
|
||||
@item RealAudio 3.0 (dnet) @tab IX @tab X
|
||||
@tab Real low bitrate AC-3 codec
|
||||
@item RealAudio Lossless @tab @tab X
|
||||
@item RealAudio SIPR / ACELP.NET @tab @tab X
|
||||
@item Shorten @tab @tab X
|
||||
@item Sierra VMD audio @tab @tab X
|
||||
@tab Used in Sierra VMD files.
|
||||
@item Smacker audio @tab @tab X
|
||||
@item SMPTE 302M AES3 audio @tab X @tab X
|
||||
@item SMPTE 302M AES3 audio @tab @tab X
|
||||
@item Sonic @tab X @tab X
|
||||
@tab experimental codec
|
||||
@item Sonic lossless @tab X @tab X
|
||||
@tab experimental codec
|
||||
@item Speex @tab E @tab E
|
||||
@tab supported through external library libspeex
|
||||
@item TAK (Tom's lossless Audio Kompressor) @tab @tab X
|
||||
@item True Audio (TTA) @tab X @tab X
|
||||
@item True Audio (TTA) @tab @tab X
|
||||
@item TrueHD @tab @tab X
|
||||
@tab Used in HD-DVD and Blu-Ray discs.
|
||||
@item TwinVQ (VQF flavor) @tab @tab X
|
||||
@item VIMA @tab @tab X
|
||||
@tab Used in LucasArts SMUSH animations.
|
||||
@item Vorbis @tab E @tab X
|
||||
@tab A native but very primitive encoder exists.
|
||||
@item WavPack @tab E @tab X
|
||||
@tab supported through external library libwavpack
|
||||
@item WavPack @tab @tab X
|
||||
@item Westwood Audio (SND1) @tab @tab X
|
||||
@item Windows Media Audio 1 @tab X @tab X
|
||||
@item Windows Media Audio 2 @tab X @tab X
|
||||
@item Windows Media Audio Lossless @tab @tab X
|
||||
@item Windows Media Audio Pro @tab @tab X
|
||||
@item Windows Media Audio Voice @tab @tab X
|
||||
@end multitable
|
||||
@@ -943,27 +805,13 @@ performance on systems without hardware floating point support).
|
||||
|
||||
@multitable @columnfractions .4 .1 .1 .1 .1
|
||||
@item Name @tab Muxing @tab Demuxing @tab Encoding @tab Decoding
|
||||
@item 3GPP Timed Text @tab @tab @tab X @tab X
|
||||
@item AQTitle @tab @tab X @tab @tab X
|
||||
@item DVB @tab X @tab X @tab X @tab X
|
||||
@item DVD @tab X @tab X @tab X @tab X
|
||||
@item JACOsub @tab X @tab X @tab @tab X
|
||||
@item MicroDVD @tab X @tab X @tab @tab X
|
||||
@item MPL2 @tab @tab X @tab @tab X
|
||||
@item MPsub (MPlayer) @tab @tab X @tab @tab X
|
||||
@item PGS @tab @tab @tab @tab X
|
||||
@item PJS (Phoenix) @tab @tab X @tab @tab X
|
||||
@item RealText @tab @tab X @tab @tab X
|
||||
@item SAMI @tab @tab X @tab @tab X
|
||||
@item SSA/ASS @tab X @tab X @tab X @tab X
|
||||
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
||||
@item SubViewer v1 @tab @tab X @tab @tab X
|
||||
@item SubViewer @tab @tab X @tab @tab X
|
||||
@item TED Talks captions @tab @tab X @tab @tab X
|
||||
@item VobSub (IDX+SUB) @tab @tab X @tab @tab X
|
||||
@item VPlayer @tab @tab X @tab @tab X
|
||||
@item WebVTT @tab X @tab X @tab @tab X
|
||||
@item XSUB @tab @tab @tab X @tab X
|
||||
@item SSA/ASS @tab X @tab X @tab X @tab X
|
||||
@item DVB @tab X @tab X @tab X @tab X
|
||||
@item DVD @tab X @tab X @tab X @tab X
|
||||
@item MicroDVD @tab X @tab X @tab @tab
|
||||
@item PGS @tab @tab @tab @tab X
|
||||
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
||||
@item XSUB @tab @tab @tab X @tab X
|
||||
@end multitable
|
||||
|
||||
@code{X} means that the feature is supported.
|
||||
@@ -972,31 +820,19 @@ performance on systems without hardware floating point support).
|
||||
|
||||
@multitable @columnfractions .4 .1
|
||||
@item Name @tab Support
|
||||
@item Apple HTTP Live Streaming @tab X
|
||||
@item file @tab X
|
||||
@item Gopher @tab X
|
||||
@item HLS @tab X
|
||||
@item HTTP @tab X
|
||||
@item HTTPS @tab X
|
||||
@item MMSH @tab X
|
||||
@item MMST @tab X
|
||||
@item MMS @tab X
|
||||
@item pipe @tab X
|
||||
@item RTMP @tab X
|
||||
@item RTMPE @tab X
|
||||
@item RTMPS @tab X
|
||||
@item RTMPT @tab X
|
||||
@item RTMPTE @tab X
|
||||
@item RTMPTS @tab X
|
||||
@item RTP @tab X
|
||||
@item SCTP @tab X
|
||||
@item TCP @tab X
|
||||
@item TLS @tab X
|
||||
@item UDP @tab X
|
||||
@end multitable
|
||||
|
||||
@code{X} means that the protocol is supported.
|
||||
|
||||
@code{E} means that support is provided through an external library.
|
||||
|
||||
|
||||
@section Input/Output Devices
|
||||
|
||||
@@ -1004,34 +840,17 @@ performance on systems without hardware floating point support).
|
||||
@item Name @tab Input @tab Output
|
||||
@item ALSA @tab X @tab X
|
||||
@item BKTR @tab X @tab
|
||||
@item caca @tab @tab X
|
||||
@item DV1394 @tab X @tab
|
||||
@item Lavfi virtual device @tab X @tab
|
||||
@item Linux framebuffer @tab X @tab
|
||||
@item JACK @tab X @tab
|
||||
@item LIBCDIO @tab X
|
||||
@item LIBDC1394 @tab X @tab
|
||||
@item OpenAL @tab X
|
||||
@item OSS @tab X @tab X
|
||||
@item Pulseaudio @tab X @tab
|
||||
@item SDL @tab @tab X
|
||||
@item Video4Linux2 @tab X @tab X
|
||||
@item Video4Linux @tab X @tab
|
||||
@item Video4Linux2 @tab X @tab
|
||||
@item VfW capture @tab X @tab
|
||||
@item X11 grabbing @tab X @tab
|
||||
@end multitable
|
||||
|
||||
@code{X} means that input/output is supported.
|
||||
|
||||
@section Timecode
|
||||
|
||||
@multitable @columnfractions .4 .1 .1
|
||||
@item Codec/format @tab Read @tab Write
|
||||
@item AVI @tab X @tab X
|
||||
@item DV @tab X @tab X
|
||||
@item GXF @tab X @tab X
|
||||
@item MOV @tab X @tab X
|
||||
@item MPEG1/2 @tab X @tab X
|
||||
@item MXF @tab X @tab X
|
||||
@end multitable
|
||||
|
||||
@bye
|
||||
|
@@ -65,14 +65,6 @@ git clone git@@source.ffmpeg.org:ffmpeg <target>
|
||||
This will put the FFmpeg sources into the directory @var{<target>} and let
|
||||
you push back your changes to the remote repository.
|
||||
|
||||
Make sure that you do not have Windows line endings in your checkouts,
|
||||
otherwise you may experience spurious compilation failures. One way to
|
||||
achieve this is to run
|
||||
|
||||
@example
|
||||
git config --global core.autocrlf false
|
||||
@end example
|
||||
|
||||
|
||||
@section Updating the source tree to the latest revision
|
||||
|
||||
@@ -258,32 +250,6 @@ git commit
|
||||
@end example
|
||||
|
||||
|
||||
@chapter Git configuration
|
||||
|
||||
In order to simplify a few workflows, it is advisable to configure both
|
||||
your personal Git installation and your local FFmpeg repository.
|
||||
|
||||
@section Personal Git installation
|
||||
|
||||
Add the following to your @file{~/.gitconfig} to help @command{git send-email}
|
||||
and @command{git format-patch} detect renames:
|
||||
|
||||
@example
|
||||
[diff]
|
||||
renames = copy
|
||||
@end example
|
||||
|
||||
@section Repository configuration
|
||||
|
||||
In order to have @command{git send-email} automatically send patches
|
||||
to the ffmpeg-devel mailing list, add the following stanza
|
||||
to @file{/path/to/ffmpeg/repository/.git/config}:
|
||||
|
||||
@example
|
||||
[sendemail]
|
||||
to = ffmpeg-devel@@ffmpeg.org
|
||||
@end example
|
||||
|
||||
@chapter FFmpeg specific
|
||||
|
||||
@section Reverting broken commits
|
||||
@@ -372,43 +338,6 @@ git checkout -b svn_23456 $SHA1
|
||||
|
||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||
|
||||
|
||||
@chapter pre-push checklist
|
||||
|
||||
Once you have a set of commits that you feel are ready for pushing,
|
||||
work through the following checklist to doublecheck everything is in
|
||||
proper order. This list tries to be exhaustive. In case you are just
|
||||
pushing a typo in a comment, some of the steps may be unnecessary.
|
||||
Apply your common sense, but if in doubt, err on the side of caution.
|
||||
|
||||
First, make sure that the commits and branches you are going to push
|
||||
match what you want pushed and that nothing is missing, extraneous or
|
||||
wrong. You can see what will be pushed by running the git push command
|
||||
with --dry-run first. And then inspecting the commits listed with
|
||||
@command{git log -p 1234567..987654}. The @command{git status} command
|
||||
may help in finding local changes that have been forgotten to be added.
|
||||
|
||||
Next let the code pass through a full run of our testsuite.
|
||||
|
||||
@itemize
|
||||
@item @command{make distclean}
|
||||
@item @command{/path/to/ffmpeg/configure}
|
||||
@item @command{make check}
|
||||
@item if fate fails due to missing samples run @command{make fate-rsync} and retry
|
||||
@end itemize
|
||||
|
||||
Make sure all your changes have been checked before pushing them, the
|
||||
testsuite only checks against regressions and that only to some extend. It does
|
||||
obviously not check newly added features/code to be working unless you have
|
||||
added a test for that (which is recommended).
|
||||
|
||||
Also note that every single commit should pass the test suite, not just
|
||||
the result of a series of patches.
|
||||
|
||||
Once everything passed, push the changes to your public ffmpeg clone and post a
|
||||
merge request to ffmpeg-devel. You can also push them directly but this is not
|
||||
recommended.
|
||||
|
||||
@chapter Server Issues
|
||||
|
||||
Contact the project admins @email{root@@ffmpeg.org} if you have technical
|
||||
|
288
doc/indevs.texi
288
doc/indevs.texi
@@ -59,7 +59,7 @@ BSD video input device.
|
||||
|
||||
Windows DirectShow input device.
|
||||
|
||||
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
|
||||
DirectShow support is enabled when FFmpeg is built with mingw-w64.
|
||||
Currently only audio and video devices are supported.
|
||||
|
||||
Multiple devices may be opened as separate inputs, but they may also be
|
||||
@@ -86,7 +86,7 @@ fail to open.
|
||||
Set the video size in the captured video.
|
||||
|
||||
@item framerate
|
||||
Set the frame rate in the captured video.
|
||||
Set the framerate in the captured video.
|
||||
|
||||
@item sample_rate
|
||||
Set the sample rate (in Hz) of the captured audio.
|
||||
@@ -112,19 +112,6 @@ defaults to 0).
|
||||
Set audio device number for devices with same name (starts at 0,
|
||||
defaults to 0).
|
||||
|
||||
@item pixel_format
|
||||
Select pixel format to be used by DirectShow. This may only be set when
|
||||
the video codec is not set or set to rawvideo.
|
||||
|
||||
@item audio_buffer_size
|
||||
Set audio device buffer size in milliseconds (which can directly
|
||||
impact latency, depending on the device).
|
||||
Defaults to using the audio device's
|
||||
default buffer size (typically some multiple of 500ms).
|
||||
Setting this value too low can degrade performance.
|
||||
See also
|
||||
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -192,66 +179,6 @@ ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
|
||||
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
|
||||
@section iec61883
|
||||
|
||||
FireWire DV/HDV input device using libiec61883.
|
||||
|
||||
To enable this input device, you need libiec61883, libraw1394 and
|
||||
libavc1394 installed on your system. Use the configure option
|
||||
@code{--enable-libiec61883} to compile with the device enabled.
|
||||
|
||||
The iec61883 capture device supports capturing from a video device
|
||||
connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
|
||||
FireWire stack (juju). This is the default DV/HDV input method in Linux
|
||||
Kernel 2.6.37 and later, since the old FireWire stack was removed.
|
||||
|
||||
Specify the FireWire port to be used as input file, or "auto"
|
||||
to choose the first port connected.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item dvtype
|
||||
Override autodetection of DV/HDV. This should only be used if auto
|
||||
detection does not work, or if usage of a different device type
|
||||
should be prohibited. Treating a DV device as HDV (or vice versa) will
|
||||
not work and result in undefined behavior.
|
||||
The values @option{auto}, @option{dv} and @option{hdv} are supported.
|
||||
|
||||
@item dvbuffer
|
||||
Set maxiumum size of buffer for incoming data, in frames. For DV, this
|
||||
is an exact value. For HDV, it is not frame exact, since HDV does
|
||||
not have a fixed frame size.
|
||||
|
||||
@item dvguid
|
||||
Select the capture device by specifying it's GUID. Capturing will only
|
||||
be performed from the specified device and fails if no device with the
|
||||
given GUID is found. This is useful to select the input if multiple
|
||||
devices are connected at the same time.
|
||||
Look at /sys/bus/firewire/devices to find out the GUIDs.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Grab and show the input of a FireWire DV/HDV device.
|
||||
@example
|
||||
ffplay -f iec61883 -i auto
|
||||
@end example
|
||||
|
||||
@item
|
||||
Grab and record the input of a FireWire DV/HDV device,
|
||||
using a packet buffer of 100000 packets if the source is HDV.
|
||||
@example
|
||||
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section jack
|
||||
|
||||
JACK input device.
|
||||
@@ -269,12 +196,12 @@ device.
|
||||
Once you have created one or more JACK readable clients, you need to
|
||||
connect them to one or more JACK writable clients.
|
||||
|
||||
To connect or disconnect JACK clients you can use the @command{jack_connect}
|
||||
and @command{jack_disconnect} programs, or do it through a graphical interface,
|
||||
for example with @command{qjackctl}.
|
||||
To connect or disconnect JACK clients you can use the
|
||||
@file{jack_connect} and @file{jack_disconnect} programs, or do it
|
||||
through a graphical interface, for example with @file{qjackctl}.
|
||||
|
||||
To list the JACK clients and their properties you can invoke the command
|
||||
@command{jack_lsp}.
|
||||
@file{jack_lsp}.
|
||||
|
||||
Follows an example which shows how to capture a JACK readable client
|
||||
with @command{ffmpeg}.
|
||||
@@ -327,28 +254,22 @@ label, but all the others need to be specified explicitly.
|
||||
|
||||
If not specified defaults to the filename specified for the input
|
||||
device.
|
||||
|
||||
@item graph_file
|
||||
Set the filename of the filtergraph to be read and sent to the other
|
||||
filters. Syntax of the filtergraph is the same as the one specified by
|
||||
the option @var{graph}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Create a color video stream and play it back with @command{ffplay}:
|
||||
Create a color video stream and play it back with @file{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
|
||||
ffplay -f lavfi -graph "color=pink [out0]" dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
As the previous example, but use filename for specifying the graph
|
||||
description, and omit the "out0" label:
|
||||
@example
|
||||
ffplay -f lavfi color=c=pink
|
||||
ffplay -f lavfi color=pink
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -359,14 +280,14 @@ ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [ou
|
||||
|
||||
@item
|
||||
Read an audio stream from a file using the amovie source and play it
|
||||
back with @command{ffplay}:
|
||||
back with @file{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi "amovie=test.wav"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read an audio stream and a video stream and play it back with
|
||||
@command{ffplay}:
|
||||
@file{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||
@end example
|
||||
@@ -459,7 +380,7 @@ $ ffmpeg -f openal -i '' out.ogg
|
||||
@end example
|
||||
|
||||
Capture from two devices simultaneously, writing to two different files,
|
||||
within the same @command{ffmpeg} command:
|
||||
within the same @file{ffmpeg} command:
|
||||
@example
|
||||
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
|
||||
@end example
|
||||
@@ -494,7 +415,7 @@ The filename to provide to the input device is a source device or the
|
||||
string "default"
|
||||
|
||||
To list the pulse source devices and their properties you can invoke
|
||||
the command @command{pactl list sources}.
|
||||
the command @file{pactl list sources}.
|
||||
|
||||
@example
|
||||
ffmpeg -f pulse -i default /tmp/pulse.wav
|
||||
@@ -583,15 +504,9 @@ command:
|
||||
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
|
||||
@end example
|
||||
|
||||
@section video4linux2, v4l2
|
||||
@section video4linux and video4linux2
|
||||
|
||||
Video4Linux2 input video device.
|
||||
|
||||
"v4l2" can be used as alias for "video4linux2".
|
||||
|
||||
If FFmpeg is built with v4l-utils support (by using the
|
||||
@code{--enable-libv4l2} configure option), it is possible to use it with the
|
||||
@code{-use_libv4l2} input device option.
|
||||
Video4Linux and Video4Linux2 input video devices.
|
||||
|
||||
The name of the device to grab is a file device node, usually Linux
|
||||
systems tend to automatically create such nodes when the device
|
||||
@@ -599,107 +514,40 @@ systems tend to automatically create such nodes when the device
|
||||
kind @file{/dev/video@var{N}}, where @var{N} is a number associated to
|
||||
the device.
|
||||
|
||||
Video4Linux2 devices usually support a limited set of
|
||||
Video4Linux and Video4Linux2 devices only support a limited set of
|
||||
@var{width}x@var{height} sizes and frame rates. You can check which are
|
||||
supported using @command{-list_formats all} for Video4Linux2 devices.
|
||||
Some devices, like TV cards, support one or more standards. It is possible
|
||||
to list all the supported standards using @command{-list_standards all}.
|
||||
supported for example with the command @file{dov4l} for Video4Linux
|
||||
devices and the command @file{v4l-info} for Video4Linux2 devices.
|
||||
|
||||
The time base for the timestamps is 1 microsecond. Depending on the kernel
|
||||
version and configuration, the timestamps may be derived from the real time
|
||||
clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
|
||||
boot time, unaffected by NTP or manual changes to the clock). The
|
||||
@option{-timestamps abs} or @option{-ts abs} option can be used to force
|
||||
conversion into the real time clock.
|
||||
If the size for the device is set to 0x0, the input device will
|
||||
try to auto-detect the size to use.
|
||||
Only for the video4linux2 device, if the frame rate is set to 0/0 the
|
||||
input device will use the frame rate value already set in the driver.
|
||||
|
||||
Some usage examples of the video4linux2 device with @command{ffmpeg}
|
||||
and @command{ffplay}:
|
||||
@itemize
|
||||
@item
|
||||
Grab and show the input of a video4linux2 device:
|
||||
Video4Linux support is deprecated since Linux 2.6.30, and will be
|
||||
dropped in later versions.
|
||||
|
||||
Note that if FFmpeg is build with v4l-utils support ("--enable-libv4l2"
|
||||
option), it will always be used.
|
||||
|
||||
Follow some usage examples of the video4linux devices with the ff*
|
||||
tools.
|
||||
@example
|
||||
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
|
||||
# Grab and show the input of a video4linux device, frame rate is set
|
||||
# to the default of 25/1.
|
||||
ffplay -s 320x240 -f video4linux /dev/video0
|
||||
|
||||
# Grab and show the input of a video4linux2 device, auto-adjust size.
|
||||
ffplay -f video4linux2 /dev/video0
|
||||
|
||||
# Grab and record the input of a video4linux2 device, auto-adjust size,
|
||||
# frame rate value defaults to 0/0 so it is read from the video4linux2
|
||||
# driver.
|
||||
ffmpeg -f video4linux2 -i /dev/video0 out.mpeg
|
||||
@end example
|
||||
|
||||
@item
|
||||
Grab and record the input of a video4linux2 device, leave the
|
||||
frame rate and size as previously set:
|
||||
@example
|
||||
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
For more information about Video4Linux, check @url{http://linuxtv.org/}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item standard
|
||||
Set the standard. Must be the name of a supported standard. To get a
|
||||
list of the supported standards, use the @option{list_standards}
|
||||
option.
|
||||
|
||||
@item channel
|
||||
Set the input channel number. Default to -1, which means using the
|
||||
previously selected channel.
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. The argument must be a string in the form
|
||||
@var{WIDTH}x@var{HEIGHT} or a valid size abbreviation.
|
||||
|
||||
@item pixel_format
|
||||
Select the pixel format (only valid for raw video input).
|
||||
|
||||
@item input_format
|
||||
Set the preferred pixel format (for raw video) or a codec name.
|
||||
This option allows to select the input format, when several are
|
||||
available.
|
||||
|
||||
@item framerate
|
||||
Set the preferred video frame rate.
|
||||
|
||||
@item list_formats
|
||||
List available formats (supported pixel formats, codecs, and frame
|
||||
sizes) and exit.
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item all
|
||||
Show all available (compressed and non-compressed) formats.
|
||||
|
||||
@item raw
|
||||
Show only raw video (non-compressed) formats.
|
||||
|
||||
@item compressed
|
||||
Show only compressed formats.
|
||||
@end table
|
||||
|
||||
@item list_standards
|
||||
List supported standards and exit.
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item all
|
||||
Show all supported standards.
|
||||
@end table
|
||||
|
||||
@item timestamps, ts
|
||||
Set type of timestamps for grabbed frames.
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item default
|
||||
Use timestamps from the kernel.
|
||||
|
||||
@item abs
|
||||
Use absolute timestamps (wall clock).
|
||||
|
||||
@item mono2abs
|
||||
Force conversion from monotonic to absolute timestamps.
|
||||
@end table
|
||||
|
||||
Default value is @code{default}.
|
||||
@end table
|
||||
"v4l" and "v4l2" can be used as aliases for the respective "video4linux" and
|
||||
"video4linux2".
|
||||
|
||||
@section vfwcap
|
||||
|
||||
@@ -731,29 +579,23 @@ default to 0.
|
||||
|
||||
Check the X11 documentation (e.g. man X) for more detailed information.
|
||||
|
||||
Use the @command{dpyinfo} program for getting basic information about the
|
||||
Use the @file{dpyinfo} program for getting basic information about the
|
||||
properties of your X11 display (e.g. grep for "name" or "dimensions").
|
||||
|
||||
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -f x11grab -r 25 -s cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
Grab at position @code{10,20}:
|
||||
@example
|
||||
# Grab at position 10,20.
|
||||
ffmpeg -f x11grab -r 25 -s cif -i :0.0+10,20 out.mpg
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
@subsection @var{follow_mouse} AVOption
|
||||
|
||||
@table @option
|
||||
@item draw_mouse
|
||||
Specify whether to draw the mouse pointer. A value of @code{0} specify
|
||||
not to draw the pointer. Default value is @code{1}.
|
||||
|
||||
@item follow_mouse
|
||||
Make the grabbed area follow the mouse. The argument can be
|
||||
@code{centered} or a number of pixels @var{PIXELS}.
|
||||
The syntax is:
|
||||
@example
|
||||
-follow_mouse centered|@var{PIXELS}
|
||||
@end example
|
||||
|
||||
When it is specified with "centered", the grabbing region follows the mouse
|
||||
pointer and keeps the pointer at the center of region; otherwise, the region
|
||||
@@ -763,36 +605,28 @@ zero) to the edge of region.
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -follow_mouse centered -r 25 -s cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
To follow only when the mouse pointer reaches within 100 pixels to edge:
|
||||
@example
|
||||
# Follows only when the mouse pointer reaches within 100 pixels to edge
|
||||
ffmpeg -f x11grab -follow_mouse 100 -r 25 -s cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item framerate
|
||||
Set the grabbing frame rate. Default value is @code{ntsc},
|
||||
corresponding to a frame rate of @code{30000/1001}.
|
||||
@subsection @var{show_region} AVOption
|
||||
|
||||
@item show_region
|
||||
Show grabbed region on screen.
|
||||
The syntax is:
|
||||
@example
|
||||
-show_region 1
|
||||
@end example
|
||||
|
||||
If @var{show_region} is specified with @code{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it is easy to
|
||||
know what is being grabbed if only a portion of the screen is grabbed.
|
||||
If @var{show_region} AVOption is specified with @var{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it's easy to know what is
|
||||
being grabbed if only a portion of the screen is grabbed.
|
||||
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -show_region 1 -r 25 -s cif -i :0.0+10,20 out.mpg
|
||||
@end example
|
||||
|
||||
With @var{follow_mouse}:
|
||||
@example
|
||||
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -r 25 -s cif -i :0.0 out.mpg
|
||||
# With follow_mouse
|
||||
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -r 25 -s cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
@end table
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -24,7 +24,7 @@ a mail for every change to every issue.
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://trac.ffmpeg.org
|
||||
http(s)://ffmpeg.org/trac/ffmpeg
|
||||
|
||||
Type:
|
||||
-----
|
||||
|
@@ -1,48 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavcodec Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavcodec Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavcodec library provides a generic encoding/decoding framework
|
||||
and contains multiple decoders and encoders for audio, video and
|
||||
subtitle streams, and several bitstream filters.
|
||||
|
||||
The shared architecture provides various services ranging from bit
|
||||
stream I/O to DSP optimizations, and makes it suitable for
|
||||
implementing robust and fast codecs as well as for experimentation.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs}, @url{ffmpeg-bitstream-filters.html,bitstream-filters},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1),
|
||||
libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavcodec
|
||||
@settitle media streams decoding and encoding library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,45 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavdevice Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavdevice Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavdevice library provides a generic framework for grabbing from
|
||||
and rendering to many common multimedia input/output devices, and
|
||||
supports several input and output devices, including Video4Linux2,
|
||||
VfW, DShow, and ALSA.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-devices(1),
|
||||
libavutil(3), libavcodec(3), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavdevice
|
||||
@settitle multimedia device handling library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -9,36 +9,84 @@
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@chapter Introduction
|
||||
|
||||
The libavfilter library provides a generic audio/video filtering
|
||||
framework containing several filters, sources and sinks.
|
||||
Libavfilter is the filtering API of FFmpeg. It is the substitute of the
|
||||
now deprecated 'vhooks' and started as a Google Summer of Code project.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
Audio filtering integration into the main FFmpeg repository is a work in
|
||||
progress, so audio API and ABI should not be considered stable yet.
|
||||
|
||||
@chapter See Also
|
||||
@chapter Tutorial
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters},
|
||||
@url{libavutil.html,libavutil}, @url{libswscale.html,libswscale}, @url{libswresample.html,libswresample},
|
||||
@url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}, @url{libavdevice.html,libavdevice}
|
||||
@end ifhtml
|
||||
In libavfilter, it is possible for filters to have multiple inputs and
|
||||
multiple outputs.
|
||||
To illustrate the sorts of things that are possible, we can
|
||||
use a complex filter graph. For example, the following one:
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-filters(1),
|
||||
libavutil(3), libswscale(3), libswresample(3), libavcodec(3), libavformat(3), libavdevice(3)
|
||||
@end ifnothtml
|
||||
@example
|
||||
input --> split --> fifo -----------------------> overlay --> output
|
||||
| ^
|
||||
| |
|
||||
+------> fifo --> crop --> vflip --------+
|
||||
@end example
|
||||
|
||||
@include authors.texi
|
||||
splits the stream in two streams, sends one stream through the crop filter
|
||||
and the vflip filter before merging it back with the other stream by
|
||||
overlaying it on top. You can use the following command to achieve this:
|
||||
|
||||
@ignore
|
||||
@example
|
||||
ffmpeg -i input -vf "[in] split [T1], fifo, [T2] overlay=0:H/2 [out]; [T1] fifo, crop=iw:ih/2:0:ih/2, vflip [T2]" output
|
||||
@end example
|
||||
|
||||
@setfilename libavfilter
|
||||
@settitle multimedia filtering library
|
||||
The result will be that in output the top half of the video is mirrored
|
||||
onto the bottom half.
|
||||
|
||||
@end ignore
|
||||
Video filters are loaded using the @var{-vf} option passed to
|
||||
ffmpeg or to ffplay. Filters in the same linear chain are separated by
|
||||
commas. In our example, @var{split, fifo, overlay} are in one linear
|
||||
chain, and @var{fifo, crop, vflip} are in another. The points where
|
||||
the linear chains join are labeled by names enclosed in square
|
||||
brackets. In our example, that is @var{[T1]} and @var{[T2]}. The magic
|
||||
labels @var{[in]} and @var{[out]} are the points where video is input
|
||||
and output.
|
||||
|
||||
Some filters take in input a list of parameters: they are specified
|
||||
after the filter name and an equal sign, and are separated each other
|
||||
by a semicolon.
|
||||
|
||||
There exist so-called @var{source filters} that do not have a video
|
||||
input, and we expect in the future some @var{sink filters} that will
|
||||
not have video output.
|
||||
|
||||
@chapter graph2dot
|
||||
|
||||
The @file{graph2dot} program included in the FFmpeg @file{tools}
|
||||
directory can be used to parse a filter graph description and issue a
|
||||
corresponding textual representation in the dot language.
|
||||
|
||||
Invoke the command:
|
||||
@example
|
||||
graph2dot -h
|
||||
@end example
|
||||
|
||||
to see how to use @file{graph2dot}.
|
||||
|
||||
You can then pass the dot description to the @file{dot} program (from
|
||||
the graphviz suite of programs) and obtain a graphical representation
|
||||
of the filter graph.
|
||||
|
||||
For example the sequence of commands:
|
||||
@example
|
||||
echo @var{GRAPH_DESCRIPTION} | \
|
||||
tools/graph2dot -o graph.tmp && \
|
||||
dot -Tpng graph.tmp -o graph.png && \
|
||||
display graph.png
|
||||
@end example
|
||||
|
||||
can be used to create and display an image representing the graph
|
||||
described by the @var{GRAPH_DESCRIPTION} string.
|
||||
|
||||
@include filters.texi
|
||||
|
||||
@bye
|
||||
|
@@ -1,48 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavformat Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavformat Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavformat library provides a generic framework for multiplexing
|
||||
and demultiplexing (muxing and demuxing) audio, video and subtitle
|
||||
streams. It encompasses multiple muxers and demuxers for multimedia
|
||||
container formats.
|
||||
|
||||
It also supports several input and output protocols to access a media
|
||||
resource.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats}, @url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-formats(1), ffmpeg-protocols(1),
|
||||
libavutil(3), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavformat
|
||||
@settitle multimedia muxing and demuxing library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,44 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavutil Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavutil Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavutil library is a utility library to aid portable
|
||||
multimedia programming. It contains safe portable string functions,
|
||||
random number generators, data structures, additional mathematics
|
||||
functions, cryptography and multimedia related functionality (like
|
||||
enumerations for pixel and sample formats).
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavutil
|
||||
@settitle multimedia-biased utility library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,70 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libswresample Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libswresample Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libswresample library performs highly optimized audio resampling,
|
||||
rematrixing and sample format conversion operations.
|
||||
|
||||
Specifically, this library performs the following conversions:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@emph{Resampling}: is the process of changing the audio rate, for
|
||||
example from an high sample rate of 44100Hz to 8000Hz. Audio
|
||||
conversion from high to low sample rate is a lossy process. Several
|
||||
resampling options and algorithms are available.
|
||||
|
||||
@item
|
||||
@emph{Format conversion}: is the process of converting the type of
|
||||
samples, for example from 16-bit signed samples to unsigned 8-bit or
|
||||
float samples. It also handles packing conversion, when passing from
|
||||
packed layout (all samples belonging to distinct channels interleaved
|
||||
in the same buffer), to planar layout (all samples belonging to the
|
||||
same channel stored in a dedicated buffer or "plane").
|
||||
|
||||
@item
|
||||
@emph{Rematrixing}: is the process of changing the channel layout, for
|
||||
example from stereo to mono. When the input channels cannot be mapped
|
||||
to the output streams, the process is lossy, since it involves
|
||||
different gain factors and mixing.
|
||||
@end itemize
|
||||
|
||||
Various other audio conversions (e.g. stretching and padding) are
|
||||
enabled through dedicated options.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-resampler(1),
|
||||
libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libswresample
|
||||
@settitle audio resampling library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,63 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libswscale Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libswscale Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libswscale library performs highly optimized image scaling and
|
||||
colorspace and pixel format conversion operations.
|
||||
|
||||
Specifically, this library performs the following conversions:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@emph{Rescaling}: is the process of changing the video size. Several
|
||||
rescaling options and algorithms are available. This is usually a
|
||||
lossy process.
|
||||
|
||||
@item
|
||||
@emph{Pixel format conversion}: is the process of converting the image
|
||||
format and colorspace of the image, for example from planar YUV420P to
|
||||
RGB24 packed. It also handles packing conversion, that is converts
|
||||
from packed layout (all pixels belonging to distinct planes
|
||||
interleaved in the same buffer), to planar layout (all samples
|
||||
belonging to the same plane stored in a dedicated buffer or "plane").
|
||||
|
||||
This is usually a lossy process in case the source and destination
|
||||
colorspaces differ.
|
||||
@end itemize
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-scaler(1),
|
||||
libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libswscale
|
||||
@settitle video scaling and pixel format conversion library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -65,20 +65,4 @@ title=chapter \#1
|
||||
title=multi\
|
||||
line
|
||||
@end example
|
||||
|
||||
By using the ffmetadata muxer and demuxer it is possible to extract
|
||||
metadata from an input file to an ffmetadata file, and then transcode
|
||||
the file into an output file with the edited ffmetadata file.
|
||||
|
||||
Extracting an ffmetadata file with @file{ffmpeg} goes as follows:
|
||||
@example
|
||||
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
|
||||
@end example
|
||||
|
||||
Reinserting edited metadata information from the FFMETADATAFILE file can
|
||||
be done as:
|
||||
@example
|
||||
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
|
||||
@end example
|
||||
|
||||
@c man end METADATA
|
||||
|
70
doc/mips.txt
70
doc/mips.txt
@@ -1,70 +0,0 @@
|
||||
MIPS optimizations info
|
||||
===============================================
|
||||
|
||||
MIPS optimizations of codecs are targeting MIPS 74k family of
|
||||
CPUs. Some of these optimizations are relying more on properties of
|
||||
this architecture and some are relying less (and can be used on most
|
||||
MIPS architectures without degradation in performance).
|
||||
|
||||
Along with FFMPEG copyright notice, there is MIPS copyright notice in
|
||||
all the files that are created by people from MIPS Technologies.
|
||||
|
||||
Example of copyright notice:
|
||||
===============================================
|
||||
/*
|
||||
* Copyright (c) 2012
|
||||
* MIPS Technologies, Inc., California.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* Author: Author Name (author_name@@mips.com)
|
||||
*/
|
||||
|
||||
Files that have MIPS copyright notice in them:
|
||||
===============================================
|
||||
* libavutil/mips/
|
||||
float_dsp_mips.c
|
||||
libm_mips.h
|
||||
* libavcodec/mips/
|
||||
aaccoder_mips.c
|
||||
aacpsy_mips.h
|
||||
ac3dsp_mips.c
|
||||
acelp_filters_mips.c
|
||||
acelp_vectors_mips.c
|
||||
amrwbdec_mips.c
|
||||
amrwbdec_mips.h
|
||||
celp_filters_mips.c
|
||||
celp_math_mips.c
|
||||
compute_antialias_fixed.h
|
||||
compute_antialias_float.h
|
||||
lsp_mips.h
|
||||
dsputil_mips.c
|
||||
fft_mips.c
|
||||
fft_table.h
|
||||
fft_init_table.c
|
||||
fmtconvert_mips.c
|
||||
iirfilter_mips.c
|
||||
mpegaudiodsp_mips_fixed.c
|
||||
mpegaudiodsp_mips_float.c
|
@@ -57,11 +57,6 @@ which re-allocates them for other threads.
|
||||
Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
|
||||
speed gain at this point but it should work.
|
||||
|
||||
If there are inter-frame dependencies, so the codec calls
|
||||
ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
|
||||
frames must then be freed with ff_thread_release_buffer().
|
||||
Otherwise leave it at zero and decode directly into the user-supplied frames.
|
||||
|
||||
Call ff_thread_report_progress() after some part of the current picture has decoded.
|
||||
A good place to put this is where draw_horiz_band() is called - add this if it isn't
|
||||
called anywhere, as it's useful too and the implementation is trivial when you're
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user