Compare commits
96 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
39fe8033bb | ||
![]() |
964f8419dd | ||
![]() |
5c2ffa2ce7 | ||
![]() |
6be8e44c00 | ||
![]() |
565581b0a4 | ||
![]() |
dc85ca0945 | ||
![]() |
a53ca16ae9 | ||
![]() |
92e5e62156 | ||
![]() |
e1c9434424 | ||
![]() |
af3f7c88f2 | ||
![]() |
b4294e2319 | ||
![]() |
7d97936495 | ||
![]() |
f9fc08de65 | ||
![]() |
31d3b3b5d5 | ||
![]() |
706809adb2 | ||
![]() |
4e66ca5f37 | ||
![]() |
530ce76a05 | ||
![]() |
0e74b21427 | ||
![]() |
381a914024 | ||
![]() |
d0d9182d3e | ||
![]() |
320704f739 | ||
![]() |
be1d65031f | ||
![]() |
bbc4d287c9 | ||
![]() |
3bb942e6f0 | ||
![]() |
e863d3306f | ||
![]() |
433ec3afa3 | ||
![]() |
d0cb4dc471 | ||
![]() |
95b1cbc4cb | ||
![]() |
177fc2438a | ||
![]() |
46b99bb70c | ||
![]() |
03275ed219 | ||
![]() |
ae5f69a98f | ||
![]() |
dc72a59fe5 | ||
![]() |
70af4f209f | ||
![]() |
0964e189da | ||
![]() |
7883efbbb2 | ||
![]() |
af67af5938 | ||
![]() |
d3c564b784 | ||
![]() |
bb5314daba | ||
![]() |
33a3f1fe9d | ||
![]() |
ac3fc94eb0 | ||
![]() |
d838c40823 | ||
![]() |
eea82203ba | ||
![]() |
617d91b76a | ||
![]() |
f1150e0c7d | ||
![]() |
f46b57657b | ||
![]() |
662ab44c2d | ||
![]() |
cc0a684497 | ||
![]() |
7f0f6602cb | ||
![]() |
d5d5f96068 | ||
![]() |
91ac6d9902 | ||
![]() |
0a311df6d9 | ||
![]() |
48094cb23a | ||
![]() |
d3d6849d17 | ||
![]() |
ddb92c6df1 | ||
![]() |
55a97399bc | ||
![]() |
eecbd9a78f | ||
![]() |
a441a96eb2 | ||
![]() |
dff4dbdf60 | ||
![]() |
622e926e9c | ||
![]() |
8ad71e276c | ||
![]() |
cdcd99eecc | ||
![]() |
9dc14fd3f0 | ||
![]() |
ba2d20b449 | ||
![]() |
b395bac383 | ||
![]() |
3768afd5a5 | ||
![]() |
e0f0486c7f | ||
![]() |
56e987d2cd | ||
![]() |
c64bfd7c3d | ||
![]() |
0e2b69b4e6 | ||
![]() |
63040dcddd | ||
![]() |
1e78d75d6a | ||
![]() |
91fdba3d79 | ||
![]() |
68efb539e3 | ||
![]() |
ee50ec19c7 | ||
![]() |
3a68e989ec | ||
![]() |
61a72fd9c8 | ||
![]() |
c254214ea3 | ||
![]() |
59417897be | ||
![]() |
03f82b5668 | ||
![]() |
7f8059bdfe | ||
![]() |
0f9098cb18 | ||
![]() |
c4e3dd06e8 | ||
![]() |
88a145738b | ||
![]() |
1ed0a61ea8 | ||
![]() |
9f2905d299 | ||
![]() |
f6c3fe94da | ||
![]() |
727749d30f | ||
![]() |
c7c82acf96 | ||
![]() |
f8f5db3b70 | ||
![]() |
51157dab37 | ||
![]() |
982caeac3e | ||
![]() |
d108d0804a | ||
![]() |
072e7fad87 | ||
![]() |
484302d183 | ||
![]() |
734cfa8e8b |
104
.gitignore
vendored
104
.gitignore
vendored
@@ -1,68 +1,64 @@
|
||||
.config
|
||||
.version
|
||||
*.a
|
||||
*.o
|
||||
*.d
|
||||
*.def
|
||||
*.dll
|
||||
*.dylib
|
||||
*.exe
|
||||
*.exp
|
||||
*.h.c
|
||||
*.ilk
|
||||
*.ho
|
||||
*.lib
|
||||
*.pc
|
||||
*.pdb
|
||||
*.so
|
||||
*.so.*
|
||||
*.ver
|
||||
*-example
|
||||
*-test
|
||||
*_g
|
||||
/.config
|
||||
/.version
|
||||
/ffmpeg
|
||||
/ffplay
|
||||
/ffprobe
|
||||
/ffserver
|
||||
/config.*
|
||||
/version.h
|
||||
/doc/*.1
|
||||
/doc/*.3
|
||||
/doc/*.html
|
||||
/doc/*.pod
|
||||
/doc/avoptions_codec.texi
|
||||
/doc/avoptions_format.texi
|
||||
/doc/examples/decoding_encoding
|
||||
/doc/examples/demuxing
|
||||
/doc/examples/filtering_audio
|
||||
/doc/examples/filtering_video
|
||||
/doc/examples/metadata
|
||||
/doc/examples/muxing
|
||||
/doc/examples/resampling_audio
|
||||
/doc/examples/scaling_video
|
||||
/doc/fate.txt
|
||||
/doc/doxy/html/
|
||||
/doc/print_options
|
||||
/libavcodec/*_tablegen
|
||||
/libavcodec/*_tables.c
|
||||
/libavcodec/*_tables.h
|
||||
/libavutil/avconfig.h
|
||||
/tests/audiogen
|
||||
/tests/base64
|
||||
/tests/data/
|
||||
/tests/rotozoom
|
||||
/tests/tiny_psnr
|
||||
/tests/videogen
|
||||
/tests/vsynth1/
|
||||
/tools/aviocat
|
||||
/tools/ffbisect
|
||||
/tools/bisect.need
|
||||
/tools/cws2fws
|
||||
/tools/fourcc2pixfmt
|
||||
/tools/ffescape
|
||||
/tools/ffeval
|
||||
/tools/graph2dot
|
||||
/tools/ismindex
|
||||
/tools/pktdumper
|
||||
/tools/probetest
|
||||
/tools/qt-faststart
|
||||
/tools/trasher
|
||||
*.def
|
||||
*.dll
|
||||
*.lib
|
||||
*.exp
|
||||
config.*
|
||||
doc/*.1
|
||||
doc/*.html
|
||||
doc/*.pod
|
||||
doc/fate.txt
|
||||
doxy
|
||||
ffmpeg
|
||||
ffplay
|
||||
ffprobe
|
||||
ffserver
|
||||
avconv
|
||||
doc/avoptions_codec.texi
|
||||
doc/avoptions_format.texi
|
||||
doc/print_options
|
||||
doc/examples/decoding_encoding
|
||||
doc/examples/filtering_audio
|
||||
doc/examples/filtering_video
|
||||
doc/examples/metadata
|
||||
doc/examples/muxing
|
||||
libavcodec/*_tablegen
|
||||
libavcodec/*_tables.c
|
||||
libavcodec/*_tables.h
|
||||
libavcodec/codec_names.h
|
||||
libavutil/avconfig.h
|
||||
tests/audiogen
|
||||
tests/base64
|
||||
tests/data
|
||||
tests/rotozoom
|
||||
tests/tiny_psnr
|
||||
tests/videogen
|
||||
tests/vsynth1
|
||||
tests/vsynth2
|
||||
tools/aviocat
|
||||
tools/cws2fws
|
||||
tools/ffeval
|
||||
tools/graph2dot
|
||||
tools/ismindex
|
||||
tools/lavfi-showfiltfmts
|
||||
tools/pktdumper
|
||||
tools/probetest
|
||||
tools/qt-faststart
|
||||
tools/trasher
|
||||
version.h
|
||||
|
211
Changelog
211
Changelog
@@ -1,209 +1,15 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version <next>:
|
||||
- h264: fix deadlocks with broken/fuzzed files
|
||||
- flvdec: make decoder more robust
|
||||
- vorbisdec: fix buffer overflow (CVE-2013-0894)
|
||||
- ac3dec: validate channel output mode against channel count
|
||||
- doc: minor improvements
|
||||
- loco: check that there is data left after decoding a plane.
|
||||
- mov: use the format context for logging.
|
||||
- lagarith: avoid infinite loop in lag_rac_refill() with corrupted files
|
||||
- flicvideo: avoid an infinite loop in byte run compression
|
||||
- av_memcpy_backptr: avoid an infinite loop for back = 0
|
||||
- mlpdec: do not try to allocate a zero-sized output buffer.
|
||||
- qtrle: add more checks against pixel_ptr being negative.
|
||||
- 4xm: check the return value of read_huffman_tables().
|
||||
- cavs: initialize various context tables, avoids crashes with corrupted files
|
||||
- x86/H.264: Don't use redzone in AVX h264_deblock on Win64
|
||||
- VQA video: check chunk sizes before reading chunks
|
||||
- RoQ video decoder: check dimensions validity
|
||||
- QDM2: check array index before use, fix out of array accesses
|
||||
- mpegvideo: Do REBASE_PICTURE with byte pointers
|
||||
- SVQ3: unbreak decoding
|
||||
- libopencore-amrwb: Make AMR-WB ifdeffery more precise
|
||||
- libopencore-amr: Conditionally compile decoder and encoder bits
|
||||
- arm: Fall back to runtime cpu feature detection via /proc/cpuinfo
|
||||
- xxan: properly handle odd heights
|
||||
- msrledec: check bounds before constructing a possibly invalid pointer,
|
||||
- qtrle: fix the topmost line for 1bit
|
||||
- aasc: fix output for msrle compression
|
||||
- yop: check for input overreads
|
||||
- yop: check that extradata is large enough
|
||||
- fraps: fix off-by one bug for version 1
|
||||
- vf_delogo: fix copying the input frame
|
||||
- vf_delogo: fix an uninitialized read
|
||||
- dnxhdenc: fix invalid reads in dnxhd_mb_var_thread()
|
||||
- ATRAC3: use correct loop variable in add_tonal_components()
|
||||
- MLP: store the channel layout for each substream
|
||||
- MLP decoder: TrueHD: use Libav channel order
|
||||
- x86: ac3: Fix HAVE_MMXEXT condition to only refer to external assembly
|
||||
- arm: vp8: Fix the plain-armv6 version of vp8_luma_dc_wht
|
||||
- lavr: call mix_function_init() in ff_audio_mix_set_matrix()
|
||||
- rtpenc_chain: Use the original AVFormatContext for getting payload type
|
||||
- rtp: Make sure the output format pointer is set
|
||||
- rtp: Make sure priv_data is set before reading it
|
||||
- videodsp_armv5te: remove #if HAVE_ARMV5TE_EXTERNAL
|
||||
- get_bits: change the failure condition in init_get_bits
|
||||
- mpegvideo: fix loop condition in draw_line()
|
||||
- fate: update ref after rv30_loop_filter fix
|
||||
- RV30: fix masking in rv30_loop_filter()
|
||||
- libcdio: support recent cdio-paranoia
|
||||
- Theora: Skip zero-sized headers
|
||||
- H.264: add 3 pixels below for subpixel filter wait position
|
||||
- H.264: fix ff_generate_sliding_window_mmcos() prototype
|
||||
- H.264: don't clobber mmco opcode tables for non-first slice headers
|
||||
- libx264: use the library specific default rc_initial_buffer_occupancy
|
||||
- lavc: set the default rc_initial_buffer_occupancy
|
||||
- lavc: introduce the convenience function init_get_bits8
|
||||
- lavc: check for overflow in init_get_bits
|
||||
- configure: enable pic for shared libs on AArch64
|
||||
- zmbv: Reset the decoder on keyframe errors
|
||||
- VC1 decoder: prevent a crash due missing pred_flag parameter
|
||||
- matroska: Fix use after free
|
||||
- VP3: Fix double free in vp3_decode_end()
|
||||
- Fix a crash on windows platforms related to automatic stack alignment
|
||||
in libavresample
|
||||
- Fix memleaks in the Ogg demuxer. Related to CVE-2012-2882
|
||||
|
||||
|
||||
version 1.1:
|
||||
|
||||
- stream disposition information printing in ffprobe
|
||||
- filter for loudness analysis following EBU R128
|
||||
- Opus encoder using libopus
|
||||
- ffprobe -select_streams option
|
||||
- Pinnacle TARGA CineWave YUV16 decoder
|
||||
- TAK demuxer, decoder and parser
|
||||
- DTS-HD demuxer
|
||||
- remove -same_quant, it hasn't worked for years
|
||||
- FFM2 support
|
||||
- X-Face image encoder and decoder
|
||||
- 24-bit FLAC encoding
|
||||
- multi-channel ALAC encoding up to 7.1
|
||||
- metadata (INFO tag) support in WAV muxer
|
||||
- subtitles raw text decoder
|
||||
- support for building DLLs using MSVC
|
||||
- LVF demuxer
|
||||
- ffescape tool
|
||||
- metadata (info chunk) support in CAF muxer
|
||||
- field filter ported from libmpcodecs
|
||||
- AVR demuxer
|
||||
- geq filter ported from libmpcodecs
|
||||
- remove ffserver daemon mode
|
||||
- AST muxer/demuxer
|
||||
- new expansion syntax for drawtext
|
||||
- BRender PIX image decoder
|
||||
- ffprobe -show_entries option
|
||||
- ffprobe -sections option
|
||||
- ADPCM IMA Dialogic decoder
|
||||
- BRSTM demuxer
|
||||
- animated GIF decoder and demuxer
|
||||
- PVF demuxer
|
||||
- subtitles filter
|
||||
- IRCAM muxer/demuxer
|
||||
- Paris Audio File demuxer
|
||||
- Virtual concatenation demuxer
|
||||
- VobSub demuxer
|
||||
- JSON captions for TED talks decoding support
|
||||
- SOX Resampler support in libswresample
|
||||
- aselect filter
|
||||
- SGI RLE 8-bit decoder
|
||||
- Silicon Graphics Motion Video Compressor 1 & 2 decoder
|
||||
- Silicon Graphics Movie demuxer
|
||||
- apad filter
|
||||
- Resolution & pixel format change support with multithreading for H.264
|
||||
- documentation split into per-component manuals
|
||||
- pp (postproc) filter ported from MPlayer
|
||||
- NIST Sphere demuxer
|
||||
- MPL2, VPlayer, MPlayer, AQTitle, PJS and SubViewer v1 subtitles demuxers and decoders
|
||||
- Sony Wave64 muxer
|
||||
- adobe and limelight publisher authentication in RTMP
|
||||
- data: URI scheme
|
||||
- support building on the Plan 9 operating system
|
||||
- kerndeint filter ported from MPlayer
|
||||
- histeq filter ported from VirtualDub
|
||||
- Megalux Frame demuxer
|
||||
- 012v decoder
|
||||
- Improved AVC Intra decoding support
|
||||
|
||||
|
||||
version 1.0:
|
||||
|
||||
- INI and flat output in ffprobe
|
||||
- Scene detection in libavfilter
|
||||
- Indeo Audio decoder
|
||||
- channelsplit audio filter
|
||||
- setnsamples audio filter
|
||||
- atempo filter
|
||||
- ffprobe -show_data option
|
||||
- RTMPT protocol support
|
||||
- iLBC encoding/decoding via libilbc
|
||||
- Microsoft Screen 1 decoder
|
||||
- join audio filter
|
||||
- audio channel mapping filter
|
||||
- Microsoft ATC Screen decoder
|
||||
- RTSP listen mode
|
||||
- TechSmith Screen Codec 2 decoder
|
||||
- AAC encoding via libfdk-aac
|
||||
- Microsoft Expression Encoder Screen decoder
|
||||
- RTMPS protocol support
|
||||
- RTMPTS protocol support
|
||||
- RTMPE protocol support
|
||||
- RTMPTE protocol support
|
||||
- showwaves and showspectrum filter
|
||||
- LucasArts SMUSH playback support
|
||||
- SAMI, RealText and SubViewer demuxers and decoders
|
||||
- Heart Of Darkness PAF playback support
|
||||
- iec61883 device
|
||||
- asettb filter
|
||||
- new option: -progress
|
||||
- 3GPP Timed Text encoder/decoder
|
||||
- GeoTIFF decoder support
|
||||
- ffmpeg -(no)stdin option
|
||||
- Opus decoder using libopus
|
||||
- caca output device using libcaca
|
||||
- alphaextract and alphamerge filters
|
||||
- concat filter
|
||||
- flite filter
|
||||
- Canopus Lossless Codec decoder
|
||||
- bitmap subtitles in filters (experimental and temporary)
|
||||
- MP2 encoding via TwoLAME
|
||||
- bmp parser
|
||||
- smptebars source
|
||||
- asetpts filter
|
||||
- hue filter
|
||||
- ICO muxer
|
||||
- SubRip encoder and decoder without embedded timing
|
||||
- edge detection filter
|
||||
- framestep filter
|
||||
- ffmpeg -shortest option is now per-output file
|
||||
-pass and -passlogfile are now per-output stream
|
||||
- volume measurement filter
|
||||
- Ut Video encoder
|
||||
- Microsoft Screen 2 decoder
|
||||
- smartblur filter ported from MPlayer
|
||||
- CPiA decoder
|
||||
- decimate filter ported from MPlayer
|
||||
- RTP depacketization of JPEG
|
||||
- Smooth Streaming live segmenter muxer
|
||||
- F4V muxer
|
||||
- sendcmd and asendcmd filters
|
||||
- WebVTT demuxer and decoder (simple tags supported)
|
||||
- RTP packetization of JPEG
|
||||
- faststart option in the MOV/MP4 muxer
|
||||
- support for building with MSVC
|
||||
|
||||
version next:
|
||||
|
||||
version 0.11:
|
||||
|
||||
- Fixes: CVE-2012-2772, CVE-2012-2774, CVE-2012-2775, CVE-2012-2776, CVE-2012-2777,
|
||||
CVE-2012-2779, CVE-2012-2782, CVE-2012-2783, CVE-2012-2784, CVE-2012-2785,
|
||||
CVE-2012-2786, CVE-2012-2787, CVE-2012-2788, CVE-2012-2789, CVE-2012-2790,
|
||||
CVE-2012-2791, CVE-2012-2792, CVE-2012-2793, CVE-2012-2794, CVE-2012-2795,
|
||||
CVE-2012-2796, CVE-2012-2797, CVE-2012-2798, CVE-2012-2799, CVE-2012-2800,
|
||||
CVE-2012-2801, CVE-2012-2802, CVE-2012-2803, CVE-2012-2804,
|
||||
Fixes:CVE-2012-2772, CVE-2012-2774, CVE-2012-2775, CVE-2012-2776, CVE-2012-2777,
|
||||
CVE-2012-2779, CVE-2012-2782, CVE-2012-2783, CVE-2012-2784, CVE-2012-2785,
|
||||
CVE-2012-2786, CVE-2012-2787, CVE-2012-2788, CVE-2012-2789, CVE-2012-2790,
|
||||
CVE-2012-2791, CVE-2012-2792, CVE-2012-2793, CVE-2012-2794, CVE-2012-2795,
|
||||
CVE-2012-2796, CVE-2012-2797, CVE-2012-2798, CVE-2012-2799, CVE-2012-2800,
|
||||
CVE-2012-2801, CVE-2012-2802, CVE-2012-2803, CVE-2012-2804,
|
||||
- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
|
||||
- setfield filter
|
||||
- CDXL demuxer and decoder
|
||||
@@ -234,14 +40,13 @@ version 0.11:
|
||||
- accept + prefix to -pix_fmt option to disable automatic conversions.
|
||||
- complete audio filtering in libavfilter and ffmpeg
|
||||
- add fps filter
|
||||
- audio split filter
|
||||
- vorbis parser
|
||||
- png parser
|
||||
- audio mix filter
|
||||
- ffv1: support (draft) version 1.3
|
||||
|
||||
|
||||
version 0.10:
|
||||
|
||||
- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
|
||||
CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
|
||||
CVE-2011-3945, CVE-2011-3946, CVE-2011-3947, CVE-2011-3949,
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 1.1.4
|
||||
PROJECT_NUMBER = 0.11.1
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
@@ -44,7 +44,7 @@ PROJECT_LOGO =
|
||||
# If a relative path is entered, it will be relative to the location
|
||||
# where doxygen was started. If left blank the current directory will be used.
|
||||
|
||||
OUTPUT_DIRECTORY = doc/doxy
|
||||
OUTPUT_DIRECTORY = doxy
|
||||
|
||||
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
|
||||
# 4096 sub-directories (in 2 levels) under the output directory of each output
|
||||
@@ -288,7 +288,7 @@ TYPEDEF_HIDES_STRUCT = NO
|
||||
# causing a significant performance penality.
|
||||
# If the system has enough physical memory increasing the cache will improve the
|
||||
# performance by keeping more symbols in memory. Note that the value works on
|
||||
# a logarithmic scale so increasing the size by one will roughly double the
|
||||
# a logarithmic scale so increasing the size by one will rougly double the
|
||||
# memory usage. The cache size is given by this formula:
|
||||
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
|
||||
# corresponding to a cache size of 2^16 = 65536 symbols
|
||||
@@ -489,6 +489,12 @@ MAX_INITIALIZER_LINES = 30
|
||||
|
||||
SHOW_USED_FILES = YES
|
||||
|
||||
# If the sources in your project are distributed over multiple directories
|
||||
# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
|
||||
# in the documentation. The default is NO.
|
||||
|
||||
SHOW_DIRECTORIES = NO
|
||||
|
||||
# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
|
||||
# This will remove the Files entry from the Quick Index and from the
|
||||
# Folder Tree View (if specified). The default is YES.
|
||||
@@ -639,14 +645,15 @@ EXCLUDE_SYMBOLS =
|
||||
# directories that contain example code fragments that are included (see
|
||||
# the \include command).
|
||||
|
||||
EXAMPLE_PATH = doc/examples/
|
||||
EXAMPLE_PATH = libavcodec/ \
|
||||
libavformat/
|
||||
|
||||
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
|
||||
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
|
||||
# and *.h) to filter out the source-files in the directories. If left
|
||||
# blank all files are included.
|
||||
|
||||
EXAMPLE_PATTERNS = *.c
|
||||
EXAMPLE_PATTERNS = *-example.c
|
||||
|
||||
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
|
||||
# searched for input files to be used with the \include or \dontinclude
|
||||
@@ -793,13 +800,13 @@ HTML_FILE_EXTENSION = .html
|
||||
# each generated HTML page. If it is left blank doxygen will generate a
|
||||
# standard header.
|
||||
|
||||
#HTML_HEADER = doc/doxy/header.html
|
||||
HTML_HEADER = doc/doxy/header.html
|
||||
|
||||
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
|
||||
# each generated HTML page. If it is left blank doxygen will generate a
|
||||
# standard footer.
|
||||
|
||||
#HTML_FOOTER = doc/doxy/footer.html
|
||||
HTML_FOOTER = doc/doxy/footer.html
|
||||
|
||||
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
|
||||
# style sheet that is used by each HTML page. It can be used to
|
||||
@@ -808,7 +815,7 @@ HTML_FILE_EXTENSION = .html
|
||||
# the style sheet file to the HTML output directory, so don't put your own
|
||||
# stylesheet in the HTML output directory as well, or it will be erased!
|
||||
|
||||
#HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
|
||||
HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
|
||||
|
||||
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
|
||||
# Doxygen will adjust the colors in the stylesheet and background images
|
||||
@@ -818,7 +825,7 @@ HTML_FILE_EXTENSION = .html
|
||||
# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
|
||||
# The allowed range is 0 to 359.
|
||||
|
||||
#HTML_COLORSTYLE_HUE = 120
|
||||
HTML_COLORSTYLE_HUE = 120
|
||||
|
||||
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
|
||||
# the colors in the HTML output. For a value of 0 the output will use
|
||||
@@ -841,6 +848,12 @@ HTML_COLORSTYLE_GAMMA = 80
|
||||
|
||||
HTML_TIMESTAMP = YES
|
||||
|
||||
# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
|
||||
# files or namespaces will be aligned in HTML using tables. If set to
|
||||
# NO a bullet list will be used.
|
||||
|
||||
HTML_ALIGN_MEMBERS = YES
|
||||
|
||||
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
|
||||
# documentation will contain sections that can be hidden and shown after the
|
||||
# page has loaded. For this to work a browser that supports
|
||||
@@ -1021,6 +1034,11 @@ ENUM_VALUES_PER_LINE = 4
|
||||
|
||||
GENERATE_TREEVIEW = NO
|
||||
|
||||
# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
|
||||
# and Class Hierarchy pages using a tree view instead of an ordered list.
|
||||
|
||||
USE_INLINE_TREES = NO
|
||||
|
||||
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
|
||||
# used to set the initial width (in pixels) of the frame in which the tree
|
||||
# is shown.
|
||||
@@ -1356,9 +1374,14 @@ INCLUDE_FILE_PATTERNS =
|
||||
# instead of the = operator.
|
||||
|
||||
PREDEFINED = "__attribute__(x)=" \
|
||||
"RENAME(x)=x ## _TMPL" \
|
||||
"DEF(x)=x ## _TMPL" \
|
||||
HAVE_AV_CONFIG_H \
|
||||
HAVE_MMX \
|
||||
HAVE_MMX2 \
|
||||
HAVE_AMD3DNOW \
|
||||
"DECLARE_ALIGNED(a,t,n)=t n" \
|
||||
"offsetof(x,y)=0x42" \
|
||||
av_alloc_size \
|
||||
"offsetof(x,y)=0x42"
|
||||
|
||||
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
|
||||
# this tag can be used to specify a list of macro names that should be expanded.
|
65
LICENSE
65
LICENSE
@@ -1,4 +1,5 @@
|
||||
FFmpeg:
|
||||
-------
|
||||
|
||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||
@@ -13,35 +14,9 @@ configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
Specifically, the GPL parts of FFmpeg are
|
||||
|
||||
- libpostproc
|
||||
- libmpcodecs
|
||||
- optional x86 optimizations in the files
|
||||
libavcodec/x86/idct_mmx.c
|
||||
- libutvideo encoding/decoding wrappers in
|
||||
libavcodec/libutvideo*.cpp
|
||||
- the X11 grabber in libavdevice/x11grab.c
|
||||
- the swresample test app in
|
||||
libswresample/swresample-test.c
|
||||
- the texi2pod.pl tool
|
||||
- the following filters in libavfilter:
|
||||
- f_ebur128.c
|
||||
- vf_blackframe.c
|
||||
- vf_boxblur.c
|
||||
- vf_colormatrix.c
|
||||
- vf_cropdetect.c
|
||||
- vf_decimate.c
|
||||
- vf_delogo.c
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_hue.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mp.c
|
||||
- vf_pp.c
|
||||
- vf_smartblur.c
|
||||
- vf_super2xsai.c
|
||||
- vf_tinterlace.c
|
||||
- vf_yadif.c
|
||||
- vsrc_mptestsrc.c
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
@@ -58,32 +33,18 @@ for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
|
||||
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
|
||||
|
||||
|
||||
external libraries
|
||||
==================
|
||||
external libraries:
|
||||
-------------------
|
||||
|
||||
FFmpeg can be combined with a number of external libraries, which sometimes
|
||||
affect the licensing of binaries resulting from the combination.
|
||||
Some external libraries, e.g. libx264, are under GPL and can be used in
|
||||
conjunction with FFmpeg. They require --enable-gpl to be passed to configure
|
||||
as well.
|
||||
|
||||
compatible libraries
|
||||
--------------------
|
||||
The OpenCORE external libraries are under the Apache License 2.0. That license
|
||||
is incompatible with the LGPL v2.1 and the GPL v2, but not with version 3 of
|
||||
those licenses. So to combine the OpenCORE libraries with FFmpeg, the license
|
||||
version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
The libcdio, libx264, libxavs and libxvid libraries are under GPL. When
|
||||
combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing --enable-gpl to configure.
|
||||
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
incompatible libraries
|
||||
----------------------
|
||||
|
||||
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
|
||||
are incompatible with the GPLv2 and v3. We do not know for certain if their
|
||||
licenses are compatible with the LGPL.
|
||||
If you wish to enable these libraries, pass --enable-nonfree to configure.
|
||||
But note that if you enable any of these libraries the resulting binary will
|
||||
be under a complex license mix that is more restrictive than the LGPL and that
|
||||
may result in additional obligations. It is possible that these
|
||||
restrictions cause the resulting binary to be unredistributeable.
|
||||
The nonfree external libraries libfaac and libaacplus can be hooked up in FFmpeg.
|
||||
You need to pass --enable-nonfree to configure to enable it. Employ this option
|
||||
with care as FFmpeg then becomes nonfree and unredistributable.
|
||||
|
67
MAINTAINERS
67
MAINTAINERS
@@ -130,12 +130,9 @@ Codecs:
|
||||
bmp.c Mans Rullgard, Kostya Shishkov
|
||||
cavs* Stefan Gehrer
|
||||
celp_filters.* Vitor Sessak
|
||||
cdxl.c Paul B Mahol
|
||||
cinepak.c Roberto Togni
|
||||
cljr Alex Beregszaszi
|
||||
cllc.c Derek Buitenhuis
|
||||
cook.c, cookdata.h Benjamin Larsson
|
||||
cpia.c Stephan Hilb
|
||||
crystalhd.c Philip Langdale
|
||||
cscd.c Reimar Doeffinger
|
||||
dca.c Kostya Shishkov, Benjamin Larsson
|
||||
@@ -183,7 +180,7 @@ Codecs:
|
||||
lzo.h, lzo.c Reimar Doeffinger
|
||||
mdec.c Michael Niedermayer
|
||||
mimic.c Ramiro Polla
|
||||
mjpeg*.c Michael Niedermayer
|
||||
mjpeg.c Michael Niedermayer
|
||||
mlp* Ramiro Polla
|
||||
mmvideo.c Peter Ross
|
||||
mpc* Kostya Shishkov
|
||||
@@ -194,7 +191,6 @@ Codecs:
|
||||
msvideo1.c Mike Melanson
|
||||
nellymoserdec.c Benjamin Larsson
|
||||
nuv.c Reimar Doeffinger
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
pgssubdec.c Reimar Doeffinger
|
||||
ptx.c Ivo van Poorten
|
||||
@@ -219,7 +215,6 @@ Codecs:
|
||||
srt* Aurelien Jacobs
|
||||
sunrast.c Ivo van Poorten
|
||||
svq3.c Michael Niedermayer
|
||||
tak* Paul B Mahol
|
||||
targa.c Kostya Shishkov
|
||||
tiff.c Kostya Shishkov
|
||||
truemotion1* Mike Melanson
|
||||
@@ -234,8 +229,6 @@ Codecs:
|
||||
vble.c Derek Buitenhuis
|
||||
vc1* Kostya Shishkov
|
||||
vcr1.c Michael Niedermayer
|
||||
vda_h264_dec.c Xidorn Quan
|
||||
vima.c Paul B Mahol
|
||||
vmnc.c Kostya Shishkov
|
||||
vorbis_enc.c Oded Shimon
|
||||
vorbis_dec.c Denes Balatoni, David Conrad
|
||||
@@ -250,10 +243,8 @@ Codecs:
|
||||
wmv2.c Michael Niedermayer
|
||||
wnv1.c Kostya Shishkov
|
||||
xan.c Mike Melanson
|
||||
xbm* Paul B Mahol
|
||||
xl.c Kostya Shishkov
|
||||
xvmc.c Ivan Kalvachev
|
||||
xwd* Paul B Mahol
|
||||
zerocodec.c Derek Buitenhuis
|
||||
zmbv* Kostya Shishkov
|
||||
|
||||
@@ -272,27 +263,22 @@ libavdevice
|
||||
libavdevice/avdevice.h
|
||||
|
||||
|
||||
iec61883.c Georg Lippitsch
|
||||
libdc1394.c Roman Shaposhnik
|
||||
v4l2.c Luca Abeni
|
||||
vfwcap.c Ramiro Polla
|
||||
dshow.c Roger Pack
|
||||
|
||||
|
||||
libavfilter
|
||||
===========
|
||||
|
||||
Generic parts:
|
||||
Video filters:
|
||||
graphdump.c Nicolas George
|
||||
|
||||
Filters:
|
||||
af_amerge.c Nicolas George
|
||||
af_astreamsync.c Nicolas George
|
||||
af_atempo.c Pavel Koshevoy
|
||||
af_pan.c Nicolas George
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
vf_yadif.c Michael Niedermayer
|
||||
|
||||
Sources:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
|
||||
libavformat
|
||||
===========
|
||||
@@ -307,25 +293,17 @@ Generic parts:
|
||||
Muxers/Demuxers:
|
||||
4xm.c Mike Melanson
|
||||
adtsenc.c Robert Swain
|
||||
afc.c Paul B Mahol
|
||||
aiff.c Baptiste Coudurier
|
||||
ape.c Kostya Shishkov
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
avi* Michael Niedermayer
|
||||
avr.c Paul B Mahol
|
||||
bink.c Peter Ross
|
||||
brstm.c Paul B Mahol
|
||||
caf* Peter Ross
|
||||
cdxl.c Paul B Mahol
|
||||
crc.c Michael Niedermayer
|
||||
daud.c Reimar Doeffinger
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
dxa.c Kostya Shishkov
|
||||
electronicarts.c Peter Ross
|
||||
epafdec.c Paul B Mahol
|
||||
ffm* Baptiste Coudurier
|
||||
flac* Justin Ruggles
|
||||
flic.c Mike Melanson
|
||||
@@ -336,22 +314,19 @@ Muxers/Demuxers:
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
ipmovie.c Mike Melanson
|
||||
img2*.c Michael Niedermayer
|
||||
ircam* Paul B Mahol
|
||||
img2.c Michael Niedermayer
|
||||
iss.c Stefan Gehrer
|
||||
jacosub* Clément Bœsch
|
||||
jvdec.c Peter Ross
|
||||
libmodplug.c Clément Bœsch
|
||||
libnut.c Oded Shimon
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
lxfdec.c Tomas Härdin
|
||||
matroska.c Aurelien Jacobs
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
metadata* Aurelien Jacobs
|
||||
microdvd* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
mm.c Peter Ross
|
||||
mov.c Michael Niedermayer, Baptiste Coudurier
|
||||
movenc.c Michael Niedermayer, Baptiste Coudurier
|
||||
@@ -363,7 +338,6 @@ Muxers/Demuxers:
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier
|
||||
mxfdec.c Tomas Härdin
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut.c Michael Niedermayer
|
||||
nuv.c Reimar Doeffinger
|
||||
@@ -371,10 +345,8 @@ Muxers/Demuxers:
|
||||
oggenc.c Baptiste Coudurier
|
||||
oggparse*.c David Conrad
|
||||
oma.c Maxim Poliakovski
|
||||
paf.c Paul B Mahol
|
||||
psxstr.c Mike Melanson
|
||||
pva.c Ivo van Poorten
|
||||
pvfdec.c Paul B Mahol
|
||||
r3d.c Baptiste Coudurier
|
||||
raw.c Michael Niedermayer
|
||||
rdt.c Ronald S. Bultje
|
||||
@@ -390,10 +362,8 @@ Muxers/Demuxers:
|
||||
segafilm.c Mike Melanson
|
||||
siff.c Kostya Shishkov
|
||||
smacker.c Kostya Shishkov
|
||||
smjpeg* Paul B Mahol
|
||||
srtdec.c Aurelien Jacobs
|
||||
swf.c Baptiste Coudurier
|
||||
takdec.c Paul B Mahol
|
||||
tta.c Alex Beregszaszi
|
||||
txd.c Ivo van Poorten
|
||||
voc.c Aurelien Jacobs
|
||||
@@ -402,7 +372,6 @@ Muxers/Demuxers:
|
||||
westwood.c Mike Melanson
|
||||
wtv.c Peter Ross
|
||||
wv.c Kostya Shishkov
|
||||
wvenc.c Paul B Mahol
|
||||
|
||||
Protocols:
|
||||
bluray.c Petri Hintukainen
|
||||
@@ -411,20 +380,6 @@ Protocols:
|
||||
udp.c Luca Abeni
|
||||
|
||||
|
||||
libswresample
|
||||
=============
|
||||
|
||||
Generic parts:
|
||||
audioconvert.c Michael Niedermayer
|
||||
dither.c Michael Niedermayer
|
||||
rematrix*.c Michael Niedermayer
|
||||
swresample*.c Michael Niedermayer
|
||||
|
||||
Resamplers:
|
||||
resample*.c Michael Niedermayer
|
||||
soxr_resample.c Rob Sykes
|
||||
|
||||
|
||||
Operating systems / CPU architectures
|
||||
=====================================
|
||||
|
||||
@@ -445,11 +400,9 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
1.2 Michael Niedermayer
|
||||
1.1 Michael Niedermayer
|
||||
1.0 Michael Niedermayer
|
||||
0.11 Michael Niedermayer
|
||||
0.10 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
GnuPG Fingerprints of maintainers and contributors
|
||||
@@ -461,7 +414,6 @@ Attila Kinali 11F0 F9A6 A1D2 11F6 C745 D10C 6520 BCDD F2DF E765
|
||||
Baptiste Coudurier 8D77 134D 20CC 9220 201F C5DB 0AC9 325C 5C1A BAAA
|
||||
Ben Littler 3EE3 3723 E560 3214 A8CD 4DEB 2CDB FCE7 768C 8D2C
|
||||
Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
|
||||
Bœsch Clément 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
|
||||
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
Diego Biurrun 8227 1E31 B6D9 4994 7427 E220 9CAE D6CC 4757 FCC5
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
@@ -479,6 +431,5 @@ Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
Robert Swain EE7A 56EA 4A81 A7B5 2001 A521 67FA 362D A2FC 3E71
|
||||
Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Tomas Härdin A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Stefano Sabatini 9A43 10F8 D32C D33C 48E7 C52C 5DF2 8E4D B2EE 066B
|
||||
Tomas Härdin D133 29CA 4EEC 9DB4 7076 F697 B04B 7403 3313 41FD
|
||||
|
52
Makefile
52
Makefile
@@ -15,11 +15,9 @@ PROGS-$(CONFIG_FFPLAY) += ffplay
|
||||
PROGS-$(CONFIG_FFPROBE) += ffprobe
|
||||
PROGS-$(CONFIG_FFSERVER) += ffserver
|
||||
|
||||
PROGS := $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
PROGS := $(PROGS-yes:%=%$(EXESUF))
|
||||
INSTPROGS = $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
|
||||
OBJS = cmdutils.o $(EXEOBJS)
|
||||
OBJS-ffmpeg = ffmpeg_opt.o ffmpeg_filter.o
|
||||
OBJS = $(PROGS-yes:%=%.o) cmdutils.o
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
TOOLS = qt-faststart trasher
|
||||
@@ -42,7 +40,7 @@ FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
FFLIBS := avutil
|
||||
|
||||
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
|
||||
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile $(SRC_PATH)/doc/examples/README
|
||||
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile
|
||||
|
||||
SKIPHEADERS = cmdutils_common_opts.h
|
||||
|
||||
@@ -53,14 +51,14 @@ FF_DEP_LIBS := $(DEP_LIBS)
|
||||
|
||||
all: $(PROGS)
|
||||
|
||||
$(PROGS): %$(EXESUF): %_g$(EXESUF)
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
$(PROGS): %$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(CP) $< $@$(PROGSSUF)
|
||||
$(STRIP) $@$(PROGSSUF)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
|
||||
$(LD) $(LDFLAGS) $(LD_O) $^ $(ELIBS)
|
||||
$(TOOLS): %$(EXESUF): %.o
|
||||
$(LD) $(LDFLAGS) -o $@ $< $(ELIBS)
|
||||
|
||||
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
|
||||
tools/cws2fws$(EXESUF): ELIBS = -lz
|
||||
|
||||
config.h: .config
|
||||
.config: $(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c))
|
||||
@@ -69,12 +67,10 @@ config.h: .config
|
||||
@-tput sgr0 2>/dev/null
|
||||
|
||||
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS VIS-OBJS \
|
||||
MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||
OBJS HOSTOBJS TESTOBJS
|
||||
ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ALTIVEC-OBJS ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS MMI-OBJS \
|
||||
MMX-OBJS NEON-OBJS VIS-OBJS YASM-OBJS \
|
||||
OBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -91,19 +87,12 @@ endef
|
||||
|
||||
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
|
||||
|
||||
define DOPROG
|
||||
OBJS-$(1) += $(1).o cmdutils.o $(EXEOBJS)
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
|
||||
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(LIBS-$(1))
|
||||
-include $$(OBJS-$(1):.o=.d)
|
||||
endef
|
||||
ffplay.o: CFLAGS += $(SDL_CFLAGS)
|
||||
ffplay_g$(EXESUF): FF_EXTRALIBS += $(SDL_LIBS)
|
||||
ffserver_g$(EXESUF): LDFLAGS += $(FFSERVERLDFLAGS)
|
||||
|
||||
$(foreach P,$(PROGS-yes),$(eval $(call DOPROG,$(P))))
|
||||
|
||||
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
%$(PROGSSUF)_g$(EXESUF): %.o cmdutils.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) -o $@ $< cmdutils.o $(FF_EXTRALIBS)
|
||||
|
||||
OBJDIRS += tools
|
||||
|
||||
@@ -153,13 +142,14 @@ uninstall-data:
|
||||
clean::
|
||||
$(RM) $(ALLPROGS) $(ALLPROGS_G)
|
||||
$(RM) $(CLEANSUFFIXES)
|
||||
$(RM) $(TOOLS)
|
||||
$(RM) $(CLEANSUFFIXES:%=tools/%)
|
||||
$(RM) coverage.info
|
||||
$(RM) -r coverage-html
|
||||
|
||||
distclean::
|
||||
$(RM) $(DISTCLEANSUFFIXES)
|
||||
$(RM) config.* .version version.h libavutil/avconfig.h libavcodec/codec_names.h
|
||||
$(RM) config.* .version version.h libavutil/avconfig.h
|
||||
|
||||
config:
|
||||
$(SRC_PATH)/configure $(value FFMPEG_CONFIGURATION)
|
||||
@@ -173,7 +163,7 @@ coverage-html: coverage.info
|
||||
$(Q)genhtml -o $@ $<
|
||||
$(Q)touch $@
|
||||
|
||||
check: all alltools examples testprogs fate
|
||||
check: all alltools checkheaders examples testprogs fate
|
||||
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/tests/Makefile
|
||||
|
7
arch.mak
7
arch.mak
@@ -1,12 +1,9 @@
|
||||
OBJS-$(HAVE_ARMV5TE) += $(ARMV5TE-OBJS) $(ARMV5TE-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
|
||||
OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMVFP) += $(ARMVFP-OBJS) $(ARMVFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
|
||||
|
1230
cmdutils.c
1230
cmdutils.c
File diff suppressed because it is too large
Load Diff
256
cmdutils.h
256
cmdutils.h
@@ -75,25 +75,25 @@ void log_callback_help(void* ptr, int level, const char* fmt, va_list vl);
|
||||
* Fallback for options that are not explicitly handled, these will be
|
||||
* parsed through AVOptions.
|
||||
*/
|
||||
int opt_default(void *optctx, const char *opt, const char *arg);
|
||||
int opt_default(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Set the libav* libraries log level.
|
||||
*/
|
||||
int opt_loglevel(void *optctx, const char *opt, const char *arg);
|
||||
int opt_loglevel(const char *opt, const char *arg);
|
||||
|
||||
int opt_report(const char *opt);
|
||||
|
||||
int opt_max_alloc(void *optctx, const char *opt, const char *arg);
|
||||
int opt_max_alloc(const char *opt, const char *arg);
|
||||
|
||||
int opt_cpuflags(void *optctx, const char *opt, const char *arg);
|
||||
int opt_cpuflags(const char *opt, const char *arg);
|
||||
|
||||
int opt_codec_debug(void *optctx, const char *opt, const char *arg);
|
||||
int opt_codec_debug(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Limit the execution time.
|
||||
*/
|
||||
int opt_timelimit(void *optctx, const char *opt, const char *arg);
|
||||
int opt_timelimit(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Parse a string and return its corresponding value as a double.
|
||||
@@ -139,7 +139,7 @@ typedef struct SpecifierOpt {
|
||||
} u;
|
||||
} SpecifierOpt;
|
||||
|
||||
typedef struct OptionDef {
|
||||
typedef struct {
|
||||
const char *name;
|
||||
int flags;
|
||||
#define HAS_ARG 0x0001
|
||||
@@ -148,14 +148,14 @@ typedef struct OptionDef {
|
||||
#define OPT_STRING 0x0008
|
||||
#define OPT_VIDEO 0x0010
|
||||
#define OPT_AUDIO 0x0020
|
||||
#define OPT_GRAB 0x0040
|
||||
#define OPT_INT 0x0080
|
||||
#define OPT_FLOAT 0x0100
|
||||
#define OPT_SUBTITLE 0x0200
|
||||
#define OPT_INT64 0x0400
|
||||
#define OPT_EXIT 0x0800
|
||||
#define OPT_DATA 0x1000
|
||||
#define OPT_PERFILE 0x2000 /* the option is per-file (currently ffmpeg-only).
|
||||
implied by OPT_OFFSET or OPT_SPEC */
|
||||
#define OPT_FUNC2 0x2000
|
||||
#define OPT_OFFSET 0x4000 /* option is specified as an offset in a passed optctx */
|
||||
#define OPT_SPEC 0x8000 /* option is to be stored in an array of SpecifierOpt.
|
||||
Implies OPT_OFFSET. Next element after the offset is
|
||||
@@ -164,24 +164,16 @@ typedef struct OptionDef {
|
||||
#define OPT_DOUBLE 0x20000
|
||||
union {
|
||||
void *dst_ptr;
|
||||
int (*func_arg)(void *, const char *, const char *);
|
||||
int (*func_arg)(const char *, const char *);
|
||||
int (*func2_arg)(void *, const char *, const char *);
|
||||
size_t off;
|
||||
} u;
|
||||
const char *help;
|
||||
const char *argname;
|
||||
} OptionDef;
|
||||
|
||||
/**
|
||||
* Print help for all options matching specified flags.
|
||||
*
|
||||
* @param options a list of options
|
||||
* @param msg title of this group. Only printed if at least one option matches.
|
||||
* @param req_flags print only options which have all those flags set.
|
||||
* @param rej_flags don't print options which have any of those flags set.
|
||||
* @param alt_flags print only options that have at least one of those flags set
|
||||
*/
|
||||
void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
int rej_flags, int alt_flags);
|
||||
void show_help_options(const OptionDef *options, const char *msg, int mask,
|
||||
int value);
|
||||
|
||||
/**
|
||||
* Show help for all options with given flags in class and all its
|
||||
@@ -189,23 +181,10 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
*/
|
||||
void show_help_children(const AVClass *class, int flags);
|
||||
|
||||
/**
|
||||
* Per-avtool specific help handler. Implemented in each
|
||||
* avtool, called by show_help().
|
||||
*/
|
||||
void show_help_default(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Generic -h handler common to all avtools.
|
||||
*/
|
||||
int show_help(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Parse the command line arguments.
|
||||
*
|
||||
* @param optctx an opaque options context
|
||||
* @param argc number of command line arguments
|
||||
* @param argv values of command line arguments
|
||||
* @param options Array with the definitions required to interpret every
|
||||
* option of the form: -option_name [argument]
|
||||
* @param parse_arg_function Name of the function called to process every
|
||||
@@ -223,95 +202,6 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
|
||||
int parse_option(void *optctx, const char *opt, const char *arg,
|
||||
const OptionDef *options);
|
||||
|
||||
/**
|
||||
* An option extracted from the commandline.
|
||||
* Cannot use AVDictionary because of options like -map which can be
|
||||
* used multiple times.
|
||||
*/
|
||||
typedef struct Option {
|
||||
const OptionDef *opt;
|
||||
const char *key;
|
||||
const char *val;
|
||||
} Option;
|
||||
|
||||
typedef struct OptionGroupDef {
|
||||
/**< group name */
|
||||
const char *name;
|
||||
/**
|
||||
* Option to be used as group separator. Can be NULL for groups which
|
||||
* are terminated by a non-option argument (e.g. ffmpeg output files)
|
||||
*/
|
||||
const char *sep;
|
||||
} OptionGroupDef;
|
||||
|
||||
typedef struct OptionGroup {
|
||||
const OptionGroupDef *group_def;
|
||||
const char *arg;
|
||||
|
||||
Option *opts;
|
||||
int nb_opts;
|
||||
|
||||
AVDictionary *codec_opts;
|
||||
AVDictionary *format_opts;
|
||||
struct SwsContext *sws_opts;
|
||||
struct SwrContext *swr_opts;
|
||||
} OptionGroup;
|
||||
|
||||
/**
|
||||
* A list of option groups that all have the same group type
|
||||
* (e.g. input files or output files)
|
||||
*/
|
||||
typedef struct OptionGroupList {
|
||||
const OptionGroupDef *group_def;
|
||||
|
||||
OptionGroup *groups;
|
||||
int nb_groups;
|
||||
} OptionGroupList;
|
||||
|
||||
typedef struct OptionParseContext {
|
||||
OptionGroup global_opts;
|
||||
|
||||
OptionGroupList *groups;
|
||||
int nb_groups;
|
||||
|
||||
/* parsing state */
|
||||
OptionGroup cur_group;
|
||||
} OptionParseContext;
|
||||
|
||||
/**
|
||||
* Parse an options group and write results into optctx.
|
||||
*
|
||||
* @param optctx an app-specific options context. NULL for global options group
|
||||
*/
|
||||
int parse_optgroup(void *optctx, OptionGroup *g);
|
||||
|
||||
/**
|
||||
* Split the commandline into an intermediate form convenient for further
|
||||
* processing.
|
||||
*
|
||||
* The commandline is assumed to be composed of options which either belong to a
|
||||
* group (those with OPT_SPEC, OPT_OFFSET or OPT_PERFILE) or are global
|
||||
* (everything else).
|
||||
*
|
||||
* A group (defined by an OptionGroupDef struct) is a sequence of options
|
||||
* terminated by either a group separator option (e.g. -i) or a parameter that
|
||||
* is not an option (doesn't start with -). A group without a separator option
|
||||
* must always be first in the supplied groups list.
|
||||
*
|
||||
* All options within the same group are stored in one OptionGroup struct in an
|
||||
* OptionGroupList, all groups with the same group definition are stored in one
|
||||
* OptionGroupList in OptionParseContext.groups. The order of group lists is the
|
||||
* same as the order of group definitions.
|
||||
*/
|
||||
int split_commandline(OptionParseContext *octx, int argc, char *argv[],
|
||||
const OptionDef *options,
|
||||
const OptionGroupDef *groups, int nb_groups);
|
||||
|
||||
/**
|
||||
* Free all allocated memory in an OptionParseContext.
|
||||
*/
|
||||
void uninit_parse_context(OptionParseContext *octx);
|
||||
|
||||
/**
|
||||
* Find the '-loglevel' option in the command line args and apply it.
|
||||
*/
|
||||
@@ -340,16 +230,12 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
|
||||
* Create a new options dictionary containing only the options from
|
||||
* opts which apply to the codec with ID codec_id.
|
||||
*
|
||||
* @param opts dictionary to place options in
|
||||
* @param codec_id ID of the codec that should be filtered for
|
||||
* @param s Corresponding format context.
|
||||
* @param st A stream from s for which the options should be filtered.
|
||||
* @param codec The particular codec for which the options should be filtered.
|
||||
* If null, the default one is looked up according to the codec id.
|
||||
* @return a pointer to the created dictionary
|
||||
*/
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
||||
AVFormatContext *s, AVStream *st, AVCodec *codec);
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, AVCodec *codec,
|
||||
AVFormatContext *s, AVStream *st);
|
||||
|
||||
/**
|
||||
* Setup AVCodecContext options for avformat_find_stream_info().
|
||||
@@ -389,81 +275,62 @@ void show_banner(int argc, char **argv, const OptionDef *options);
|
||||
* libraries.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_version(void *optctx, const char *opt, const char *arg);
|
||||
int opt_version(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print the license of the program to stdout. The license depends on
|
||||
* the license of the libraries compiled into the program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_license(void *optctx, const char *opt, const char *arg);
|
||||
int opt_license(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the formats supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_formats(void *optctx, const char *opt, const char *arg);
|
||||
int opt_formats(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the codecs supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_codecs(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the decoders supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_decoders(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the encoders supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_encoders(void *optctx, const char *opt, const char *arg);
|
||||
int opt_codecs(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the filters supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_filters(void *optctx, const char *opt, const char *arg);
|
||||
int opt_filters(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the bit stream filters supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_bsfs(void *optctx, const char *opt, const char *arg);
|
||||
int opt_bsfs(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the protocols supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_protocols(void *optctx, const char *opt, const char *arg);
|
||||
int opt_protocols(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the pixel formats supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_pix_fmts(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the standard channel layouts supported by
|
||||
* the program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_layouts(void *optctx, const char *opt, const char *arg);
|
||||
int opt_pix_fmts(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the sample formats supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_sample_fmts(void *optctx, const char *opt, const char *arg);
|
||||
int show_sample_fmts(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Return a positive value if a line read from standard input
|
||||
@@ -475,7 +342,6 @@ int read_yesno(void);
|
||||
* Read the file with name filename, and put its content in a newly
|
||||
* allocated 0-terminated buffer.
|
||||
*
|
||||
* @param filename file to read from
|
||||
* @param bufptr location where pointer to buffer is returned
|
||||
* @param size location where size of buffer is returned
|
||||
* @return 0 in case of success, a negative value corresponding to an
|
||||
@@ -504,80 +370,20 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size);
|
||||
FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
const char *preset_name, int is_path, const char *codec_name);
|
||||
|
||||
/**
|
||||
* Do all the necessary cleanup and abort.
|
||||
* This function is implemented in the avtools, not cmdutils.
|
||||
*/
|
||||
void exit_program(int ret);
|
||||
|
||||
/**
|
||||
* Realloc array to hold new_size elements of elem_size.
|
||||
* Calls exit() on failure.
|
||||
* Calls exit_program() on failure.
|
||||
*
|
||||
* @param array array to reallocate
|
||||
* @param elem_size size in bytes of each element
|
||||
* @param size new element count will be written here
|
||||
* @param new_size number of elements to place in reallocated array
|
||||
* @return reallocated array
|
||||
*/
|
||||
void *grow_array(void *array, int elem_size, int *size, int new_size);
|
||||
|
||||
#define GROW_ARRAY(array, nb_elems)\
|
||||
array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1)
|
||||
|
||||
typedef struct FrameBuffer {
|
||||
uint8_t *base[4];
|
||||
uint8_t *data[4];
|
||||
int linesize[4];
|
||||
|
||||
int h, w;
|
||||
enum AVPixelFormat pix_fmt;
|
||||
|
||||
int refcount;
|
||||
struct FrameBuffer **pool; ///< head of the buffer pool
|
||||
struct FrameBuffer *next;
|
||||
} FrameBuffer;
|
||||
|
||||
/**
|
||||
* Get a frame from the pool. This is intended to be used as a callback for
|
||||
* AVCodecContext.get_buffer.
|
||||
*
|
||||
* @param s codec context. s->opaque must be a pointer to the head of the
|
||||
* buffer pool.
|
||||
* @param frame frame->opaque will be set to point to the FrameBuffer
|
||||
* containing the frame data.
|
||||
*/
|
||||
int codec_get_buffer(AVCodecContext *s, AVFrame *frame);
|
||||
|
||||
/**
|
||||
* A callback to be used for AVCodecContext.release_buffer along with
|
||||
* codec_get_buffer().
|
||||
*/
|
||||
void codec_release_buffer(AVCodecContext *s, AVFrame *frame);
|
||||
|
||||
/**
|
||||
* A callback to be used for AVFilterBuffer.free.
|
||||
* @param fb buffer to free. fb->priv must be a pointer to the FrameBuffer
|
||||
* containing the buffer data.
|
||||
*/
|
||||
void filter_release_buffer(AVFilterBuffer *fb);
|
||||
|
||||
/**
|
||||
* Free all the buffers in the pool. This must be called after all the
|
||||
* buffers have been released.
|
||||
*/
|
||||
void free_buffer_pool(FrameBuffer **pool);
|
||||
|
||||
#define GET_PIX_FMT_NAME(pix_fmt)\
|
||||
const char *name = av_get_pix_fmt_name(pix_fmt);
|
||||
|
||||
#define GET_SAMPLE_FMT_NAME(sample_fmt)\
|
||||
const char *name = av_get_sample_fmt_name(sample_fmt)
|
||||
|
||||
#define GET_SAMPLE_RATE_NAME(rate)\
|
||||
char name[16];\
|
||||
snprintf(name, sizeof(name), "%d", rate);
|
||||
|
||||
#define GET_CH_LAYOUT_NAME(ch_layout)\
|
||||
char name[16];\
|
||||
snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
|
||||
|
||||
#define GET_CH_LAYOUT_DESC(ch_layout)\
|
||||
char name[128];\
|
||||
av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
|
||||
|
||||
#endif /* CMDUTILS_H */
|
||||
|
@@ -1,21 +1,20 @@
|
||||
{ "L" , OPT_EXIT, {.func_arg = show_license}, "show license" },
|
||||
{ "h" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "?" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "-help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "version" , OPT_EXIT, {.func_arg = show_version}, "show version" },
|
||||
{ "formats" , OPT_EXIT, {.func_arg = show_formats }, "show available formats" },
|
||||
{ "codecs" , OPT_EXIT, {.func_arg = show_codecs }, "show available codecs" },
|
||||
{ "decoders" , OPT_EXIT, {.func_arg = show_decoders }, "show available decoders" },
|
||||
{ "encoders" , OPT_EXIT, {.func_arg = show_encoders }, "show available encoders" },
|
||||
{ "bsfs" , OPT_EXIT, {.func_arg = show_bsfs }, "show available bit stream filters" },
|
||||
{ "protocols" , OPT_EXIT, {.func_arg = show_protocols}, "show available protocols" },
|
||||
{ "filters" , OPT_EXIT, {.func_arg = show_filters }, "show available filters" },
|
||||
{ "pix_fmts" , OPT_EXIT, {.func_arg = show_pix_fmts }, "show available pixel formats" },
|
||||
{ "layouts" , OPT_EXIT, {.func_arg = show_layouts }, "show standard channel layouts" },
|
||||
{ "L", OPT_EXIT, {(void*)opt_license}, "show license" },
|
||||
{ "h", OPT_EXIT, {(void*)opt_help}, "show help" },
|
||||
{ "?", OPT_EXIT, {(void*)opt_help}, "show help" },
|
||||
{ "help", OPT_EXIT, {(void*)opt_help}, "show help" },
|
||||
{ "-help", OPT_EXIT, {(void*)opt_help}, "show help" },
|
||||
{ "version", OPT_EXIT, {(void*)opt_version}, "show version" },
|
||||
{ "formats" , OPT_EXIT, {(void*)opt_formats }, "show available formats" },
|
||||
{ "codecs" , OPT_EXIT, {(void*)opt_codecs }, "show available codecs" },
|
||||
{ "bsfs" , OPT_EXIT, {(void*)opt_bsfs }, "show available bit stream filters" },
|
||||
{ "protocols", OPT_EXIT, {(void*)opt_protocols}, "show available protocols" },
|
||||
{ "filters", OPT_EXIT, {(void*)opt_filters }, "show available filters" },
|
||||
{ "pix_fmts" , OPT_EXIT, {(void*)opt_pix_fmts }, "show available pixel formats" },
|
||||
{ "sample_fmts", OPT_EXIT, {.func_arg = show_sample_fmts }, "show available audio sample formats" },
|
||||
{ "loglevel" , HAS_ARG, {.func_arg = opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "v", HAS_ARG, {.func_arg = opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "report" , 0, {(void*)opt_report}, "generate a report" },
|
||||
{ "max_alloc" , HAS_ARG, {.func_arg = opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
|
||||
{ "cpuflags" , HAS_ARG | OPT_EXPERT, {.func_arg = opt_cpuflags}, "force specific cpu flags", "flags" },
|
||||
{ "loglevel", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "v", HAS_ARG, {(void*)opt_loglevel}, "set libav* logging level", "loglevel" },
|
||||
{ "debug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
|
||||
{ "fdebug", HAS_ARG, {(void*)opt_codec_debug}, "set debug flags", "flags" },
|
||||
{ "report", 0, {(void*)opt_report}, "generate a report" },
|
||||
{ "max_alloc", HAS_ARG, {(void*)opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
|
||||
{ "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "force specific cpu flags", "flags" },
|
||||
|
56
common.mak
56
common.mak
@@ -10,9 +10,8 @@ ifndef SUBDIR
|
||||
ifndef V
|
||||
Q = @
|
||||
ECHO = printf "$(1)\t%s\n" $(2)
|
||||
BRIEF = CC CXX HOSTCC HOSTLD AS YASM AR LD STRIP CP
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPYASM RANLIB RM
|
||||
|
||||
BRIEF = CC CXX AS YASM AR LD HOSTCC STRIP CP
|
||||
SILENT = DEPCC YASMDEP RM RANLIB
|
||||
MSG = $@
|
||||
M = @$(call ECHO,$(TAG),$@);
|
||||
$(foreach VAR,$(BRIEF), \
|
||||
@@ -27,17 +26,15 @@ ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscal
|
||||
IFLAGS := -I. -I$(SRC_PATH)/
|
||||
CPPFLAGS := $(IFLAGS) $(CPPFLAGS)
|
||||
CFLAGS += $(ECFLAGS)
|
||||
CCFLAGS = $(CPPFLAGS) $(CFLAGS)
|
||||
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
|
||||
CXXFLAGS += $(CPPFLAGS) $(CFLAGS)
|
||||
YASMFLAGS += $(IFLAGS:%=%/) -Pconfig.asm
|
||||
|
||||
HOSTCCFLAGS = $(IFLAGS) $(HOSTCFLAGS)
|
||||
LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
|
||||
CCFLAGS = $(CFLAGS)
|
||||
CXXFLAGS := $(CFLAGS) $(CXXFLAGS)
|
||||
YASMFLAGS += $(IFLAGS) -I$(SRC_PATH)/libavutil/x86/ -Pconfig.asm
|
||||
HOSTCFLAGS += $(IFLAGS)
|
||||
LDFLAGS := $(ALLFFLIBS:%=-Llib%) $(LDFLAGS)
|
||||
|
||||
define COMPILE
|
||||
$(call $(1)DEP,$(1))
|
||||
$($(1)) $($(1)FLAGS) $($(1)_DEPFLAGS) $($(1)_C) $($(1)_O) $<
|
||||
$($(1)DEP)
|
||||
$($(1)) $(CPPFLAGS) $($(1)FLAGS) $($(1)_DEPFLAGS) -c $($(1)_O) $<
|
||||
endef
|
||||
|
||||
COMPILE_C = $(call COMPILE,CC)
|
||||
@@ -56,11 +53,8 @@ COMPILE_S = $(call COMPILE,AS)
|
||||
%.o: %.S
|
||||
$(COMPILE_S)
|
||||
|
||||
%.i: %.c
|
||||
$(CC) $(CCFLAGS) $(CC_E) $<
|
||||
|
||||
%.h.c:
|
||||
$(Q)echo '#include "$*.h"' >$@
|
||||
%.ho: %.h
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) -Wno-unused -c -o $@ -x c $<
|
||||
|
||||
%.ver: %.v
|
||||
$(Q)sed 's/$$MAJOR/$($(basename $(@F))_VERSION_MAJOR)/' $^ > $@
|
||||
@@ -85,8 +79,7 @@ OBJS += $(OBJS-yes)
|
||||
FFLIBS := $(FFLIBS-yes) $(FFLIBS)
|
||||
TESTPROGS += $(TESTPROGS-yes)
|
||||
|
||||
LDLIBS = $(FFLIBS:%=%$(BUILDSUF))
|
||||
FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(EXTRALIBS)
|
||||
FFEXTRALIBS := $(FFLIBS:%=-l%$(BUILDSUF)) $(EXTRALIBS)
|
||||
|
||||
EXAMPLES := $(EXAMPLES:%=$(SUBDIR)%-example$(EXESUF))
|
||||
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
|
||||
@@ -97,44 +90,31 @@ HOSTPROGS := $(HOSTPROGS:%=$(SUBDIR)%$(HOSTEXESUF))
|
||||
TOOLS += $(TOOLS-yes)
|
||||
TOOLOBJS := $(TOOLS:%=tools/%.o)
|
||||
TOOLS := $(TOOLS:%=tools/%$(EXESUF))
|
||||
HEADERS += $(HEADERS-yes)
|
||||
|
||||
DEP_LIBS := $(foreach NAME,$(FFLIBS),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME))
|
||||
|
||||
ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)/$(ARCH)/*.h))
|
||||
SKIPHEADERS += $(ARCH_HEADERS:%=$(ARCH)/%) $(SKIPHEADERS-)
|
||||
SKIPHEADERS := $(SKIPHEADERS:%=$(SUBDIR)%)
|
||||
HOBJS = $(filter-out $(SKIPHEADERS:.h=.h.o),$(ALLHEADERS:.h=.h.o))
|
||||
checkheaders: $(HOBJS)
|
||||
.SECONDARY: $(HOBJS:.o=.c)
|
||||
checkheaders: $(filter-out $(SKIPHEADERS:.h=.ho),$(ALLHEADERS:.h=.ho))
|
||||
|
||||
alltools: $(TOOLS)
|
||||
|
||||
$(HOSTOBJS): %.o: %.c
|
||||
$(call COMPILE,HOSTCC)
|
||||
$(HOSTCC) $(HOSTCFLAGS) -c -o $@ $<
|
||||
|
||||
$(HOSTPROGS): %$(HOSTEXESUF): %.o
|
||||
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $< $(HOSTLIBS)
|
||||
$(HOSTCC) $(HOSTLDFLAGS) -o $@ $< $(HOSTLIBS)
|
||||
|
||||
$(OBJS): | $(sort $(dir $(OBJS)))
|
||||
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
|
||||
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
|
||||
$(HOBJS): | $(sort $(dir $(HOBJS)))
|
||||
$(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS) $(HOBJS))
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOSTOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
|
||||
CLEANSUFFIXES = *.d *.o *~ *.ho *.map *.ver *.gcno *.gcda
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
define RULES
|
||||
clean::
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d)
|
||||
$(RM) $(HOSTPROGS)
|
||||
$(RM) $(TOOLS)
|
||||
endef
|
||||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d))
|
||||
-include $(wildcard $(OBJS:.o=.d) $(TESTOBJS:.o=.d))
|
||||
|
@@ -1,86 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file was copied from the following newsgroup posting:
|
||||
*
|
||||
* Newsgroups: mod.std.unix
|
||||
* Subject: public domain AT&T getopt source
|
||||
* Date: 3 Nov 85 19:34:15 GMT
|
||||
*
|
||||
* Here's something you've all been waiting for: the AT&T public domain
|
||||
* source for getopt(3). It is the code which was given out at the 1985
|
||||
* UNIFORUM conference in Dallas. I obtained it by electronic mail
|
||||
* directly from AT&T. The people there assure me that it is indeed
|
||||
* in the public domain.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
static int opterr = 1;
|
||||
static int optind = 1;
|
||||
static int optopt;
|
||||
static char *optarg;
|
||||
|
||||
#undef fprintf
|
||||
|
||||
static int getopt(int argc, char *argv[], char *opts)
|
||||
{
|
||||
static int sp = 1;
|
||||
int c;
|
||||
char *cp;
|
||||
|
||||
if (sp == 1) {
|
||||
if (optind >= argc ||
|
||||
argv[optind][0] != '-' || argv[optind][1] == '\0')
|
||||
return EOF;
|
||||
else if (!strcmp(argv[optind], "--")) {
|
||||
optind++;
|
||||
return EOF;
|
||||
}
|
||||
}
|
||||
optopt = c = argv[optind][sp];
|
||||
if (c == ':' || (cp = strchr(opts, c)) == NULL) {
|
||||
fprintf(stderr, ": illegal option -- %c\n", c);
|
||||
if (argv[optind][++sp] == '\0') {
|
||||
optind++;
|
||||
sp = 1;
|
||||
}
|
||||
return '?';
|
||||
}
|
||||
if (*++cp == ':') {
|
||||
if (argv[optind][sp+1] != '\0')
|
||||
optarg = &argv[optind++][sp+1];
|
||||
else if(++optind >= argc) {
|
||||
fprintf(stderr, ": option requires an argument -- %c\n", c);
|
||||
sp = 1;
|
||||
return '?';
|
||||
} else
|
||||
optarg = argv[optind++];
|
||||
sp = 1;
|
||||
} else {
|
||||
if (argv[optind][++sp] == '\0') {
|
||||
sp = 1;
|
||||
optind++;
|
||||
}
|
||||
optarg = NULL;
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
* C99-compatible snprintf() and vsnprintf() implementations
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <limits.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "compat/va_copy.h"
|
||||
#include "libavutil/error.h"
|
||||
|
||||
#if defined(__MINGW32__)
|
||||
#define EOVERFLOW EFBIG
|
||||
#endif
|
||||
|
||||
int avpriv_snprintf(char *s, size_t n, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = avpriv_vsnprintf(s, n, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
|
||||
va_list ap)
|
||||
{
|
||||
int ret;
|
||||
va_list ap_copy;
|
||||
|
||||
if (n == 0)
|
||||
return _vscprintf(fmt, ap);
|
||||
else if (n > INT_MAX)
|
||||
return AVERROR(EOVERFLOW);
|
||||
|
||||
/* we use n - 1 here because if the buffer is not big enough, the MS
|
||||
* runtime libraries don't add a terminating zero at the end. MSDN
|
||||
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
||||
* is one less than the actual buffer, and zero it before calling
|
||||
* _snprintf/_vsnprintf() to workaround this problem.
|
||||
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
memset(s, 0, n);
|
||||
va_copy(ap_copy, ap);
|
||||
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
||||
va_end(ap_copy);
|
||||
if (ret == -1)
|
||||
ret = _vscprintf(fmt, ap);
|
||||
|
||||
return ret;
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
* C99-compatible snprintf() and vsnprintf() implementations
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_SNPRINTF_H
|
||||
#define COMPAT_SNPRINTF_H
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int avpriv_snprintf(char *s, size_t n, const char *fmt, ...);
|
||||
int avpriv_vsnprintf(char *s, size_t n, const char *fmt, va_list ap);
|
||||
|
||||
#undef snprintf
|
||||
#undef _snprintf
|
||||
#undef vsnprintf
|
||||
#define snprintf avpriv_snprintf
|
||||
#define _snprintf avpriv_snprintf
|
||||
#define vsnprintf avpriv_vsnprintf
|
||||
|
||||
#endif /* COMPAT_SNPRINTF_H */
|
@@ -1,10 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
n=10
|
||||
|
||||
case "$1" in
|
||||
-n) n=$2; shift 2 ;;
|
||||
-n*) n=${1#-n}; shift ;;
|
||||
esac
|
||||
|
||||
exec sed ${n}q "$@"
|
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
int plan9_main(int argc, char **argv);
|
||||
|
||||
#undef main
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
/* The setfcr() function in lib9 is broken, must use asm. */
|
||||
#ifdef __i386
|
||||
short fcr;
|
||||
__asm__ volatile ("fstcw %0 \n"
|
||||
"or $63, %0 \n"
|
||||
"fldcw %0 \n"
|
||||
: "=m"(fcr));
|
||||
#endif
|
||||
|
||||
return plan9_main(argc, argv);
|
||||
}
|
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
exec awk "BEGIN { for (i = 2; i < ARGC; i++) printf \"$1\", ARGV[i] }" "$@"
|
@@ -1,94 +0,0 @@
|
||||
/*
|
||||
* C99-compatible strtod() implementation
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <ctype.h>
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
|
||||
static char *check_nan_suffix(char *s)
|
||||
{
|
||||
char *start = s;
|
||||
|
||||
if (*s++ != '(')
|
||||
return start;
|
||||
|
||||
while ((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') ||
|
||||
(*s >= '0' && *s <= '9') || *s == '_')
|
||||
s++;
|
||||
|
||||
return *s == ')' ? s + 1 : start;
|
||||
}
|
||||
|
||||
#undef strtod
|
||||
double strtod(const char *, char **);
|
||||
|
||||
double avpriv_strtod(const char *nptr, char **endptr)
|
||||
{
|
||||
char *end;
|
||||
double res;
|
||||
|
||||
/* Skip leading spaces */
|
||||
while (isspace(*nptr))
|
||||
nptr++;
|
||||
|
||||
if (!av_strncasecmp(nptr, "infinity", 8)) {
|
||||
end = nptr + 8;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "inf", 3)) {
|
||||
end = nptr + 3;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "+infinity", 9)) {
|
||||
end = nptr + 9;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "+inf", 4)) {
|
||||
end = nptr + 4;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "-infinity", 9)) {
|
||||
end = nptr + 9;
|
||||
res = -INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "-inf", 4)) {
|
||||
end = nptr + 4;
|
||||
res = -INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "nan", 3)) {
|
||||
end = check_nan_suffix(nptr + 3);
|
||||
res = NAN;
|
||||
} else if (!av_strncasecmp(nptr, "+nan", 4) ||
|
||||
!av_strncasecmp(nptr, "-nan", 4)) {
|
||||
end = check_nan_suffix(nptr + 4);
|
||||
res = NAN;
|
||||
} else if (!av_strncasecmp(nptr, "0x", 2) ||
|
||||
!av_strncasecmp(nptr, "-0x", 3) ||
|
||||
!av_strncasecmp(nptr, "+0x", 3)) {
|
||||
/* FIXME this doesn't handle exponents, non-integers (float/double)
|
||||
* and numbers too large for long long */
|
||||
res = strtoll(nptr, &end, 16);
|
||||
} else {
|
||||
res = strtod(nptr, &end);
|
||||
}
|
||||
|
||||
if (endptr)
|
||||
*endptr = end;
|
||||
|
||||
return res;
|
||||
}
|
@@ -1,7 +0,0 @@
|
||||
#include_next <math.h>
|
||||
|
||||
#undef INFINITY
|
||||
#undef NAN
|
||||
|
||||
#define INFINITY (*(const float*)((const unsigned []){ 0x7f800000 }))
|
||||
#define NAN (*(const float*)((const unsigned []){ 0x7fc00000 }))
|
503
doc/APIchanges
503
doc/APIchanges
@@ -1,276 +1,69 @@
|
||||
Never assume the API of libav* to be stable unless at least 1 month has passed
|
||||
since the last major version increase or the API was added.
|
||||
since the last major version increase.
|
||||
|
||||
The last version increases were:
|
||||
libavcodec: 2012-01-27
|
||||
libavdevice: 2011-04-18
|
||||
libavfilter: 2012-06-22
|
||||
libavfilter: 2011-04-18
|
||||
libavformat: 2012-01-27
|
||||
libavresample: 2012-10-05
|
||||
libavresample: 2012-04-24
|
||||
libpostproc: 2011-04-18
|
||||
libswresample: 2011-09-19
|
||||
libswscale: 2011-06-20
|
||||
libavutil: 2012-10-22
|
||||
libavutil: 2011-04-18
|
||||
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2012-12-20 - xxxxxxx - lavfi 3.28.100 - avfilter.h
|
||||
Add AVFilterLink.channels, avfilter_link_get_channels()
|
||||
and avfilter_ref_get_channels().
|
||||
|
||||
2012-12-15 - 2ada584d - lavc 54.80.100 - avcodec.h
|
||||
Add pkt_size field to AVFrame.
|
||||
|
||||
2012-11-25 - c70ec631 - lavu 52.9.100 - opt.h
|
||||
Add the following convenience functions to opt.h:
|
||||
av_opt_get_image_size
|
||||
av_opt_get_pixel_fmt
|
||||
av_opt_get_sample_fmt
|
||||
av_opt_set_image_size
|
||||
av_opt_set_pixel_fmt
|
||||
av_opt_set_sample_fmt
|
||||
|
||||
2012-11-17 - 4cd74c81 - lavu 52.8.100 - bprint.h
|
||||
Add av_bprint_strftime().
|
||||
|
||||
2012-11-15 - 92648107 - lavu 52.7.100 - opt.h
|
||||
Add av_opt_get_key_value().
|
||||
|
||||
2012-11-13 - 79456652 - lavfi 3.23.100 - avfilter.h
|
||||
Add channels field to AVFilterBufferRefAudioProps.
|
||||
|
||||
2012-11-03 - 481fdeee - lavu 52.3.100 - opt.h
|
||||
Add AV_OPT_TYPE_SAMPLE_FMT value to AVOptionType enum.
|
||||
|
||||
2012-10-21 - 6fb2fd8 - lavc 54.68.100 - avcodec.h
|
||||
lavfi 3.20.100 - avfilter.h
|
||||
Add AV_PKT_DATA_STRINGS_METADATA side data type, used to transmit key/value
|
||||
strings between AVPacket and AVFrame, and add metadata field to
|
||||
AVCodecContext (which shall not be accessed by users; see AVFrame metadata
|
||||
instead).
|
||||
|
||||
2012-09-27 - a70b493 - lavd 54.3.100 - version.h
|
||||
Add LIBAVDEVICE_IDENT symbol.
|
||||
|
||||
2012-09-27 - a70b493 - lavfi 3.18.100 - version.h
|
||||
Add LIBAVFILTER_IDENT symbol.
|
||||
|
||||
2012-09-27 - a70b493 - libswr 0.16.100 - version.h
|
||||
Add LIBSWRESAMPLE_VERSION, LIBSWRESAMPLE_BUILD
|
||||
and LIBSWRESAMPLE_IDENT symbols.
|
||||
|
||||
2012-09-06 - 29e972f - lavu 51.72.100 - parseutils.h
|
||||
Add av_small_strptime() time parsing function.
|
||||
|
||||
Can be used as a stripped-down replacement for strptime(), on
|
||||
systems which do not support it.
|
||||
|
||||
2012-08-25 - 2626cc4 - lavf 54.28.100
|
||||
Matroska demuxer now identifies SRT subtitles as AV_CODEC_ID_SUBRIP instead
|
||||
of AV_CODEC_ID_TEXT.
|
||||
|
||||
2012-08-13 - 5c0d8bc - lavfi 3.8.100 - avfilter.h
|
||||
Add avfilter_get_class() function, and priv_class field to AVFilter
|
||||
struct.
|
||||
|
||||
2012-08-12 - a25346e - lavu 51.69.100 - opt.h
|
||||
Add AV_OPT_FLAG_FILTERING_PARAM symbol in opt.h.
|
||||
|
||||
2012-07-31 - 23fc4dd - lavc 54.46.100
|
||||
Add channels field to AVFrame.
|
||||
|
||||
2012-07-30 - f893904 - lavu 51.66.100
|
||||
Add av_get_channel_description()
|
||||
and av_get_standard_channel_layout() functions.
|
||||
|
||||
2012-07-21 - 016a472 - lavc 54.43.100
|
||||
Add decode_error_flags field to AVFrame.
|
||||
|
||||
2012-07-20 - b062936 - lavf 54.18.100
|
||||
Add avformat_match_stream_specifier() function.
|
||||
|
||||
2012-07-14 - f49ec1b - lavc 54.38.100 - avcodec.h
|
||||
Add metadata to AVFrame, and the accessor functions
|
||||
av_frame_get_metadata() and av_frame_set_metadata().
|
||||
|
||||
2012-07-10 - 0e003d8 - lavc 54.33.100
|
||||
Add av_fast_padded_mallocz().
|
||||
|
||||
2012-07-10 - 21d5609 - lavfi 3.2.0 - avfilter.h
|
||||
Add init_opaque() callback to AVFilter struct.
|
||||
|
||||
2012-06-26 - e6674e4 - lavu 51.63.100 - imgutils.h
|
||||
Add functions to libavutil/imgutils.h:
|
||||
av_image_get_buffer_size()
|
||||
av_image_fill_arrays()
|
||||
av_image_copy_to_buffer()
|
||||
|
||||
2012-06-24 - c41899a - lavu 51.62.100 - version.h
|
||||
version moved from avutil.h to version.h
|
||||
|
||||
2012-04-11 - 359abb1 - lavu 51.58.100 - error.h
|
||||
Add av_make_error_string() and av_err2str() utilities to
|
||||
libavutil/error.h.
|
||||
|
||||
2012-06-05 - 62b39d4 - lavc 54.24.100
|
||||
Add pkt_duration field to AVFrame.
|
||||
|
||||
2012-05-24 - f2ee065 - lavu 51.54.100
|
||||
2012-05-24 - xxxxxxx - lavu 51.54.100
|
||||
Move AVPALETTE_SIZE and AVPALETTE_COUNT macros from
|
||||
libavcodec/avcodec.h to libavutil/pixfmt.h.
|
||||
|
||||
2012-05-14 - 94a9ac1 - lavf 54.5.100
|
||||
2012-05-07 - xxxxxxx - lavf 54.5.100
|
||||
Add av_guess_sample_aspect_ratio() function.
|
||||
|
||||
2012-04-20 - 65fa7bc - lavfi 2.70.100
|
||||
2012-04-20 - xxxxxxx - lavfi 2.70.100
|
||||
Add avfilter_unref_bufferp() to avfilter.h.
|
||||
|
||||
2012-04-13 - 162e400 - lavfi 2.68.100
|
||||
2012-04-12 - xxxxxxx - lavfi 2.68.100
|
||||
Install libavfilter/asrc_abuffer.h public header.
|
||||
|
||||
2012-03-26 - a67d9cf - lavfi 2.66.100
|
||||
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
|
||||
|
||||
2012-12-29 - 2ce43b3 / d8fd06c - lavu 52.13.100 / 52.3.0 - avstring.h
|
||||
Add av_basename() and av_dirname().
|
||||
|
||||
2012-11-11 - 03b0787 / 5980f5d - lavu 52.6.100 / 52.2.0 - audioconvert.h
|
||||
Rename audioconvert.h to channel_layout.h. audioconvert.h is now deprecated.
|
||||
|
||||
2012-11-05 - 7d26be6 / dfde8a3 - lavu 52.5.100 / 52.1.0 - intmath.h
|
||||
Add av_ctz() for trailing zero bit count
|
||||
|
||||
2012-10-21 - e3a91c5 / a893655 - lavu 51.77.100 / 51.45.0 - error.h
|
||||
Add AVERROR_EXPERIMENTAL
|
||||
|
||||
2012-10-12 - a33ed6b / d2fcb35 - lavu 51.76.100 / 51.44.0 - pixdesc.h
|
||||
Add functions for accessing pixel format descriptors.
|
||||
Accessing the av_pix_fmt_descriptors array directly is now
|
||||
deprecated.
|
||||
|
||||
2012-10-11 - f391e40 / 9a92aea - lavu 51.75.100 / 51.43.0 - aes.h, md5.h, sha.h, tree.h
|
||||
Add functions for allocating the opaque contexts for the algorithms,
|
||||
|
||||
2012-10-10 - de31814 / b522000 - lavf 54.32.100 / 54.18.0 - avio.h
|
||||
Add avio_closep to complement avio_close.
|
||||
|
||||
2012-10-08 - ae77266 / 78071a1 - lavu 51.74.100 / 51.42.0 - pixfmt.h
|
||||
Rename PixelFormat to AVPixelFormat and all PIX_FMT_* to AV_PIX_FMT_*.
|
||||
To provide backwards compatibility, PixelFormat is now #defined as
|
||||
AVPixelFormat.
|
||||
Note that this can break user code that includes pixfmt.h and uses the
|
||||
'PixelFormat' identifier. Such code should either #undef PixelFormat
|
||||
or stop using the PixelFormat name.
|
||||
|
||||
2012-10-05 - 55c49af / e7ba5b1 - lavr 1.0.0 - avresample.h
|
||||
Data planes parameters to avresample_convert() and
|
||||
avresample_read() are now uint8_t** instead of void**.
|
||||
Libavresample is now stable.
|
||||
|
||||
2012-09-24 - 46a3595 / a42aada - lavc 54.59.100 / 54.28.0 - avcodec.h
|
||||
Add avcodec_free_frame(). This function must now
|
||||
be used for freeing an AVFrame.
|
||||
|
||||
2012-09-12 - e3e09f2 / 8919fee - lavu 51.73.100 / 51.41.0 - audioconvert.h
|
||||
Added AV_CH_LOW_FREQUENCY_2 channel mask value.
|
||||
|
||||
2012-09-04 - b21b5b0 / 686a329 - lavu 51.71.100 / 51.40.0 - opt.h
|
||||
Reordered the fields in default_val in AVOption, changed which
|
||||
default_val field is used for which AVOptionType.
|
||||
|
||||
2012-08-30 - 98298eb / a231832 - lavc 54.54.101 / 54.26.1 - avcodec.h
|
||||
Add codec descriptor properties AV_CODEC_PROP_LOSSY and
|
||||
AV_CODEC_PROP_LOSSLESS.
|
||||
|
||||
2012-08-18 - lavc 54.26 - avcodec.h
|
||||
Add codec descriptors for accessing codec properties without having
|
||||
to refer to a specific decoder or encoder.
|
||||
|
||||
f5f3684 / c223d79 - Add an AVCodecDescriptor struct and functions
|
||||
avcodec_descriptor_get() and avcodec_descriptor_next().
|
||||
f5f3684 / 51efed1 - Add AVCodecDescriptor.props and AV_CODEC_PROP_INTRA_ONLY.
|
||||
6c180b3 / 91e59fe - Add avcodec_descriptor_get_by_name().
|
||||
|
||||
2012-08-08 - f5f3684 / 987170c - lavu 51.68.100 / 51.38.0 - dict.h
|
||||
Add av_dict_count().
|
||||
|
||||
2012-08-07 - 7a72695 / 104e10f - lavc 54.51.100 / 54.25.0 - avcodec.h
|
||||
Rename CodecID to AVCodecID and all CODEC_ID_* to AV_CODEC_ID_*.
|
||||
To provide backwards compatibility, CodecID is now #defined as AVCodecID.
|
||||
Note that this can break user code that includes avcodec.h and uses the
|
||||
'CodecID' identifier. Such code should either #undef CodecID or stop using the
|
||||
CodecID name.
|
||||
|
||||
2012-08-03 - e776ee8 / 239fdf1 - lavu 51.66.101 / 51.37.1 - cpu.h
|
||||
lsws 2.1.1 - swscale.h
|
||||
Rename AV_CPU_FLAG_MMX2 ---> AV_CPU_FLAG_MMXEXT.
|
||||
Rename SWS_CPU_CAPS_MMX2 ---> SWS_CPU_CAPS_MMXEXT.
|
||||
|
||||
2012-07-29 - 7c26761 / 681ed00 - lavf 54.22.100 / 54.13.0 - avformat.h
|
||||
Add AVFMT_FLAG_NOBUFFER for low latency use cases.
|
||||
|
||||
2012-07-10 - 5fade8a - lavu 51.37.0
|
||||
Add av_malloc_array() and av_mallocz_array()
|
||||
|
||||
2012-06-22 - e847f41 / d3d3a32 - lavu 51.61.100 / 51.34.0
|
||||
Add av_usleep()
|
||||
|
||||
2012-06-20 - 4da42eb / ae0a301 - lavu 51.60.100 / 51.33.0
|
||||
Move av_gettime() to libavutil, add libavutil/time.h
|
||||
|
||||
2012-06-09 - 82edf67 / 3971be0 - lavr 0.0.3
|
||||
Add a parameter to avresample_build_matrix() for Dolby/DPLII downmixing.
|
||||
|
||||
2012-06-12 - c7b9eab / 9baeff9 - lavfi 2.79.100 / 2.23.0 - avfilter.h
|
||||
Add AVFilterContext.nb_inputs/outputs. Deprecate
|
||||
AVFilterContext.input/output_count.
|
||||
|
||||
2012-06-12 - c7b9eab / 84b9fbe - lavfi 2.79.100 / 2.22.0 - avfilter.h
|
||||
Add avfilter_pad_get_type() and avfilter_pad_get_name(). Those
|
||||
should now be used instead of accessing AVFilterPad members
|
||||
directly.
|
||||
|
||||
2012-06-12 - 3630a07 / b0f0dfc - lavu 51.57.100 / 51.32.0 - audioconvert.h
|
||||
Add av_get_channel_layout_channel_index(), av_get_channel_name()
|
||||
and av_channel_layout_extract_channel().
|
||||
|
||||
2012-05-25 - 53ce990 / 154486f - lavu 51.55.100 / 51.31.0 - opt.h
|
||||
Add av_opt_set_bin()
|
||||
|
||||
2012-05-15 - lavfi 2.74.100 / 2.17.0
|
||||
2012-05-15 - lavfi 2.17.0
|
||||
Add support for audio filters
|
||||
61930bd / ac71230, 1cbf7fb / a2cd9be - add video/audio buffer sink in a new installed
|
||||
ac71230/a2cd9be - add video/audio buffer sink in a new installed
|
||||
header buffersink.h
|
||||
1cbf7fb / 720c6b7 - add av_buffersrc_write_frame(), deprecate
|
||||
720c6b7 - add av_buffersrc_write_frame(), deprecate
|
||||
av_vsrc_buffer_add_frame()
|
||||
61930bd / ab16504 - add avfilter_copy_buf_props()
|
||||
61930bd / 9453c9e - add extended_data to AVFilterBuffer
|
||||
61930bd / 1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays()
|
||||
ab16504 - add avfilter_copy_buf_props()
|
||||
9453c9e - add extended_data to AVFilterBuffer
|
||||
1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays()
|
||||
|
||||
2012-05-09 - lavu 51.53.100 / 51.30.0 - samplefmt.h
|
||||
61930bd / 142e740 - add av_samples_copy()
|
||||
61930bd / 6d7f617 - add av_samples_set_silence()
|
||||
2012-05-09 - lavu 51.30.0 - samplefmt.h
|
||||
142e740 - add av_samples_copy()
|
||||
6d7f617 - add av_samples_set_silence()
|
||||
|
||||
2012-05-09 - 61930bd / a5117a2 - lavc 54.21.101 / 54.13.1
|
||||
2012-05-09 - a5117a2 - lavc 54.13.1
|
||||
For audio formats with fixed frame size, the last frame
|
||||
no longer needs to be padded with silence, libavcodec
|
||||
will handle this internally (effectively all encoders
|
||||
behave as if they had CODEC_CAP_SMALL_LAST_FRAME set).
|
||||
|
||||
2012-05-07 - 653d117 / 828bd08 - lavc 54.20.100 / 54.13.0 - avcodec.h
|
||||
2012-05-07 - 828bd08 - lavc 54.13.0 - avcodec.h
|
||||
Add sample_rate and channel_layout fields to AVFrame.
|
||||
|
||||
2012-05-01 - 2330eb1 / 4010d72 - lavr 0.0.1
|
||||
2012-05-01 - 4010d72 - lavr 0.0.1
|
||||
Change AV_MIX_COEFF_TYPE_Q6 to AV_MIX_COEFF_TYPE_Q8.
|
||||
|
||||
2012-04-25 - e890b68 / 3527a73 - lavu 51.48.100 / 51.29.0 - cpu.h
|
||||
2012-04-25 - 3527a73 - lavu 51.29.0 - cpu.h
|
||||
Add av_parse_cpu_flags()
|
||||
|
||||
2012-04-24 - 3ead79e / c8af852 - lavr 0.0.0
|
||||
2012-04-24 - c8af852 - lavr 0.0.0
|
||||
Add libavresample audio conversion library
|
||||
|
||||
2012-04-20 - 3194ab7 / 0c0d1bc - lavu 51.47.100 / 51.28.0 - audio_fifo.h
|
||||
2012-04-20 - 0c0d1bc - lavu 51.28.0 - audio_fifo.h
|
||||
Add audio FIFO functions:
|
||||
av_audio_fifo_free()
|
||||
av_audio_fifo_alloc()
|
||||
@@ -282,10 +75,10 @@ API changes, most recent first:
|
||||
av_audio_fifo_size()
|
||||
av_audio_fifo_space()
|
||||
|
||||
2012-04-14 - lavfi 2.70.100 / 2.16.0 - avfiltergraph.h
|
||||
7432bcf / d7bcc71 Add avfilter_graph_parse2().
|
||||
2012-04-14 - lavfi 2.16.0 - avfiltergraph.h
|
||||
d7bcc71 Add avfilter_graph_parse2().
|
||||
|
||||
2012-04-08 - 6bfb304 / 4d693b0 - lavu 51.46.100 / 51.27.0 - samplefmt.h
|
||||
2012-04-08 - 4d693b0 - lavu 51.27.0 - samplefmt.h
|
||||
Add av_get_packed_sample_fmt() and av_get_planar_sample_fmt()
|
||||
|
||||
2012-03-21 - b75c67d - lavu 51.43.100
|
||||
@@ -313,73 +106,69 @@ API changes, most recent first:
|
||||
2012-01-24 - 0c3577b - lavfi 2.60.100
|
||||
Add avfilter_graph_dump.
|
||||
|
||||
2012-03-20 - 0ebd836 / 3c90cc2 - lavfo 54.2.0
|
||||
Deprecate av_read_packet(), use av_read_frame() with
|
||||
AVFMT_FLAG_NOPARSE | AVFMT_FLAG_NOFILLIN in AVFormatContext.flags
|
||||
2012-03-05 - lavc 54.8.0
|
||||
6699d07 Add av_get_exact_bits_per_sample()
|
||||
9524cf7 Add av_get_audio_frame_duration()
|
||||
|
||||
2012-03-05 - lavc 54.10.100 / 54.8.0
|
||||
f095391 / 6699d07 Add av_get_exact_bits_per_sample()
|
||||
f095391 / 9524cf7 Add av_get_audio_frame_duration()
|
||||
|
||||
2012-03-04 - 2af8f2c / 44fe77b - lavc 54.8.100 / 54.7.0 - avcodec.h
|
||||
2012-03-04 - 44fe77b - lavc 54.7.0 - avcodec.h
|
||||
Add av_codec_is_encoder/decoder().
|
||||
|
||||
2012-03-01 - 1eb7f39 / 442c132 - lavc 54.5.100 / 54.3.0 - avcodec.h
|
||||
2012-03-01 - 442c132 - lavc 54.3.0 - avcodec.h
|
||||
Add av_packet_shrink_side_data.
|
||||
|
||||
2012-02-29 - 79ae084 / dd2a4bc - lavf 54.2.100 / 54.2.0 - avformat.h
|
||||
2012-02-29 - dd2a4bc - lavf 54.2.0 - avformat.h
|
||||
Add AVStream.attached_pic and AV_DISPOSITION_ATTACHED_PIC,
|
||||
used for dealing with attached pictures/cover art.
|
||||
|
||||
2012-02-25 - 305e4b3 / c9bca80 - lavu 51.41.100 / 51.24.0 - error.h
|
||||
2012-02-25 - c9bca80 - lavu 51.24.0 - error.h
|
||||
Add AVERROR_UNKNOWN
|
||||
NOTE: this was backported to 0.8
|
||||
|
||||
2012-02-20 - eadd426 / e9cda85 - lavc 54.2.100 / 54.2.0
|
||||
2012-02-20 - e9cda85 - lavc 54.2.0
|
||||
Add duration field to AVCodecParserContext
|
||||
|
||||
2012-02-20 - eadd426 / 0b42a93 - lavu 51.40.100 / 51.23.1 - mathematics.h
|
||||
2012-02-20 - 0b42a93 - lavu 51.23.1 - mathematics.h
|
||||
Add av_rescale_q_rnd()
|
||||
|
||||
2012-02-08 - f2b20b7 / 38d5533 - lavu 51.38.101 / 51.22.1 - pixdesc.h
|
||||
2012-02-08 - 38d5533 - lavu 51.22.1 - pixdesc.h
|
||||
Add PIX_FMT_PSEUDOPAL flag.
|
||||
|
||||
2012-02-08 - f2b20b7 / 52f82a1 - lavc 54.2.100 / 54.1.0
|
||||
2012-02-08 - 52f82a1 - lavc 54.01.0
|
||||
Add avcodec_encode_video2() and deprecate avcodec_encode_video().
|
||||
|
||||
2012-02-01 - 4c677df / 316fc74 - lavc 54.1.0
|
||||
2012-02-01 - 316fc74 - lavc 54.01.0
|
||||
Add av_fast_padded_malloc() as alternative for av_realloc() when aligned
|
||||
memory is required. The buffer will always have FF_INPUT_BUFFER_PADDING_SIZE
|
||||
zero-padded bytes at the end.
|
||||
|
||||
2012-01-31 - a369a6b / dd6d3b0 - lavf 54.1.0
|
||||
2012-01-31 - dd6d3b0 - lavf 54.01.0
|
||||
Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags().
|
||||
NOTE: this was backported to 0.8
|
||||
|
||||
2012-01-31 - a369a6b / af08d9a - lavc 54.1.0
|
||||
2012-01-31 - af08d9a - lavc 54.01.0
|
||||
Add avcodec_is_open() function.
|
||||
NOTE: this was backported to 0.8
|
||||
|
||||
2012-01-30 - 151ecc2 / 8b93312 - lavu 51.36.100 / 51.22.0 - intfloat.h
|
||||
2012-01-30 - 8b93312 - lavu 51.22.0 - intfloat.h
|
||||
Add a new installed header libavutil/intfloat.h with int/float punning
|
||||
functions.
|
||||
NOTE: this was backported to 0.8
|
||||
|
||||
2012-01-25 - lavf 53.31.100 / 53.22.0
|
||||
3c5fe5b / f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible
|
||||
2012-01-25 - lavf 53.22.0
|
||||
f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible
|
||||
buffered data within a muxer. Added AVFMT_ALLOW_FLUSH for
|
||||
muxers supporting it (av_write_frame makes sure it is called
|
||||
only for muxers with this flag).
|
||||
|
||||
2012-01-15 - lavc 53.56.105 / 53.34.0
|
||||
2012-01-15 - lavc 53.34.0
|
||||
New audio encoding API:
|
||||
67f5650 / b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
|
||||
b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
|
||||
encoders.
|
||||
67f5650 / 5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function.
|
||||
67f5650 / b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
|
||||
5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function.
|
||||
b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
|
||||
Add AVCodec.encode2().
|
||||
|
||||
2012-01-12 - b18e17e / 3167dc9 - lavfi 2.59.100 / 2.15.0
|
||||
2012-01-12 - 3167dc9 - lavfi 2.15.0
|
||||
Add a new installed header -- libavfilter/version.h -- with version macros.
|
||||
|
||||
2011-12-08 - a502939 - lavfi 2.52.0
|
||||
@@ -400,37 +189,37 @@ API changes, most recent first:
|
||||
2011-10-20 - b35e9e1 - lavu 51.22.0
|
||||
Add av_strtok() to avstring.h.
|
||||
|
||||
2012-01-03 - ad1c8dd / b73ec05 - lavu 51.34.100 / 51.21.0
|
||||
2011-01-03 - b73ec05 - lavu 51.21.0
|
||||
Add av_popcount64
|
||||
|
||||
2011-12-18 - 7c29313 / 8400b12 - lavc 53.46.1 / 53.28.1
|
||||
2011-12-18 - 8400b12 - lavc 53.28.1
|
||||
Deprecate AVFrame.age. The field is unused.
|
||||
|
||||
2011-12-12 - 8bc7fe4 / 5266045 - lavf 53.25.0 / 53.17.0
|
||||
2011-12-12 - 5266045 - lavf 53.17.0
|
||||
Add avformat_close_input().
|
||||
Deprecate av_close_input_file() and av_close_input_stream().
|
||||
|
||||
2011-12-02 - e4de716 / 0eea212 - lavc 53.40.0 / 53.25.0
|
||||
2011-12-02 - 0eea212 - lavc 53.25.0
|
||||
Add nb_samples and extended_data fields to AVFrame.
|
||||
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
|
||||
Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
|
||||
avcodec_decode_audio4() writes output samples to an AVFrame, which allows
|
||||
audio decoders to use get_buffer().
|
||||
|
||||
2011-12-04 - e4de716 / 560f773 - lavc 53.40.0 / 53.24.0
|
||||
2011-12-04 - 560f773 - lavc 53.24.0
|
||||
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
|
||||
Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
|
||||
Change AVCodecContext.error[4] to [8] at next major bump.
|
||||
Add AV_NUM_DATA_POINTERS to simplify the bump transition.
|
||||
|
||||
2011-11-23 - 8e576d5 / bbb46f3 - lavu 51.27.0 / 51.18.0
|
||||
2011-11-23 - bbb46f3 - lavu 51.18.0
|
||||
Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
|
||||
av_samples_alloc(), to samplefmt.h.
|
||||
|
||||
2011-11-23 - 8e576d5 / 8889cc4 - lavu 51.27.0 / 51.17.0
|
||||
2011-11-23 - 8889cc4 - lavu 51.17.0
|
||||
Add planar sample formats and av_sample_fmt_is_planar() to samplefmt.h.
|
||||
|
||||
2011-11-19 - dbb38bc / f3a29b7 - lavc 53.36.0 / 53.21.0
|
||||
2011-11-19 - f3a29b7 - lavc 53.21.0
|
||||
Move some AVCodecContext fields to a new private struct, AVCodecInternal,
|
||||
which is accessed from a new field, AVCodecContext.internal.
|
||||
- fields moved:
|
||||
@@ -438,55 +227,55 @@ API changes, most recent first:
|
||||
AVCodecContext.internal_buffer_count --> AVCodecInternal.buffer_count
|
||||
AVCodecContext.is_copy --> AVCodecInternal.is_copy
|
||||
|
||||
2011-11-16 - 8709ba9 / 6270671 - lavu 51.26.0 / 51.16.0
|
||||
2011-11-16 - 6270671 - lavu 51.16.0
|
||||
Add av_timegm()
|
||||
|
||||
2011-11-13 - lavf 53.21.0 / 53.15.0
|
||||
2011-11-13 - lavf 53.15.0
|
||||
New interrupt callback API, allowing per-AVFormatContext/AVIOContext
|
||||
interrupt callbacks.
|
||||
5f268ca / 6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to
|
||||
6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to
|
||||
AVFormatContext.
|
||||
5f268ca / 1dee0ac Add avio_open2() with additional parameters. Those are
|
||||
1dee0ac Add avio_open2() with additional parameters. Those are
|
||||
an interrupt callback and an options AVDictionary.
|
||||
This will allow passing AVOptions to protocols after lavf
|
||||
54.0.
|
||||
|
||||
2011-11-06 - 13b7781 / ba04ecf - lavu 51.24.0 / 51.14.0
|
||||
2011-11-06 - ba04ecf - lavu 51.14.0
|
||||
Add av_strcasecmp() and av_strncasecmp() to avstring.h.
|
||||
|
||||
2011-11-06 - 13b7781 / 07b172f - lavu 51.24.0 / 51.13.0
|
||||
2011-11-06 - 07b172f - lavu 51.13.0
|
||||
Add av_toupper()/av_tolower()
|
||||
|
||||
2011-11-05 - d8cab5c / b6d08f4 - lavf 53.19.0 / 53.13.0
|
||||
2011-11-05 - b6d08f4 - lavf 53.13.0
|
||||
Add avformat_network_init()/avformat_network_deinit()
|
||||
|
||||
2011-10-27 - 6faf0a2 / 512557b - lavc 53.24.0 / 53.15.0
|
||||
2011-10-27 - 512557b - lavc 53.15.0
|
||||
Remove avcodec_parse_frame.
|
||||
Deprecate AVCodecContext.parse_only and CODEC_CAP_PARSE_ONLY.
|
||||
|
||||
2011-10-19 - d049257 / 569129a - lavf 53.17.0 / 53.10.0
|
||||
2011-10-19 - 569129a - lavf 53.10.0
|
||||
Add avformat_new_stream(). Deprecate av_new_stream().
|
||||
|
||||
2011-10-13 - 91eb1b1 / b631fba - lavf 53.16.0 / 53.9.0
|
||||
2011-10-13 - b631fba - lavf 53.9.0
|
||||
Add AVFMT_NO_BYTE_SEEK AVInputFormat flag.
|
||||
|
||||
2011-10-12 - lavu 51.21.0 / 51.12.0
|
||||
2011-10-12 - lavu 51.12.0
|
||||
AVOptions API rewrite.
|
||||
|
||||
- f884ef0 / 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_*
|
||||
- 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_*
|
||||
- new setting/getting functions with slightly different semantics:
|
||||
f884ef0 / dac66da av_set_string3 -> av_opt_set
|
||||
dac66da av_set_string3 -> av_opt_set
|
||||
av_set_double -> av_opt_set_double
|
||||
av_set_q -> av_opt_set_q
|
||||
av_set_int -> av_opt_set_int
|
||||
|
||||
f884ef0 / 41d9d51 av_get_string -> av_opt_get
|
||||
41d9d51 av_get_string -> av_opt_get
|
||||
av_get_double -> av_opt_get_double
|
||||
av_get_q -> av_opt_get_q
|
||||
av_get_int -> av_opt_get_int
|
||||
|
||||
- f884ef0 / 8c5dcaa trivial rename av_next_option -> av_opt_next
|
||||
- f884ef0 / 641c7af new functions - av_opt_child_next, av_opt_child_class_next
|
||||
- 8c5dcaa trivial rename av_next_option -> av_opt_next
|
||||
- 641c7af new functions - av_opt_child_next, av_opt_child_class_next
|
||||
and av_opt_find2()
|
||||
|
||||
2011-09-22 - a70e787 - lavu 51.17.0
|
||||
@@ -532,31 +321,31 @@ API changes, most recent first:
|
||||
2011-08-20 - 69e2c1a - lavu 51.13.0
|
||||
Add av_get_media_type_string().
|
||||
|
||||
2011-09-03 - 1889c67 / fb4ca26 - lavc 53.13.0
|
||||
2011-09-03 - fb4ca26 - lavc 53.13.0
|
||||
lavf 53.11.0
|
||||
lsws 2.1.0
|
||||
Add {avcodec,avformat,sws}_get_class().
|
||||
|
||||
2011-08-03 - 1889c67 / c11fb82 - lavu 51.15.0
|
||||
2011-08-03 - c11fb82 - lavu 51.15.0
|
||||
Add AV_OPT_SEARCH_FAKE_OBJ flag for av_opt_find() function.
|
||||
|
||||
2011-08-14 - 323b930 - lavu 51.12.0
|
||||
Add av_fifo_peek2(), deprecate av_fifo_peek().
|
||||
|
||||
2011-08-26 - lavu 51.14.0 / 51.9.0
|
||||
- 976a8b2 / add41de..976a8b2 / abc78a5 Do not include intfloat_readwrite.h,
|
||||
2011-08-26 - lavu 51.9.0
|
||||
- add41de..abc78a5 Do not include intfloat_readwrite.h,
|
||||
mathematics.h, rational.h, pixfmt.h, or log.h from avutil.h.
|
||||
|
||||
2011-08-16 - 27fbe31 / 48f9e45 - lavf 53.11.0 / 53.8.0
|
||||
2011-08-16 - 48f9e45 - lavf 53.8.0
|
||||
Add avformat_query_codec().
|
||||
|
||||
2011-08-16 - 27fbe31 / bca06e7 - lavc 53.11.0
|
||||
2011-08-16 - bca06e7 - lavc 53.11.0
|
||||
Add avcodec_get_type().
|
||||
|
||||
2011-08-06 - 0cb233c / 2f63440 - lavf 53.7.0
|
||||
2011-08-06 - 2f63440 - lavf 53.7.0
|
||||
Add error_recognition to AVFormatContext.
|
||||
|
||||
2011-08-02 - 1d186e9 / 9d39cbf - lavc 53.9.1
|
||||
2011-08-02 - 9d39cbf - lavc 53.9.1
|
||||
Add AV_PKT_FLAG_CORRUPT AVPacket flag.
|
||||
|
||||
2011-07-16 - b57df29 - lavfi 2.27.0
|
||||
@@ -567,11 +356,11 @@ API changes, most recent first:
|
||||
avfilter_set_common_packing_formats()
|
||||
avfilter_all_packing_formats()
|
||||
|
||||
2011-07-10 - 3602ad7 / a67c061 - lavf 53.6.0
|
||||
2011-07-10 - a67c061 - lavf 53.6.0
|
||||
Add avformat_find_stream_info(), deprecate av_find_stream_info().
|
||||
NOTE: this was backported to 0.7
|
||||
|
||||
2011-07-10 - 3602ad7 / 0b950fe - lavc 53.8.0
|
||||
2011-07-10 - 0b950fe - lavc 53.8.0
|
||||
Add avcodec_open2(), deprecate avcodec_open().
|
||||
NOTE: this was backported to 0.7
|
||||
|
||||
@@ -614,35 +403,35 @@ API changes, most recent first:
|
||||
2011-06-12 - 6119b23 - lavfi 2.16.0 - avfilter_graph_parse()
|
||||
Change avfilter_graph_parse() signature.
|
||||
|
||||
2011-06-23 - 686959e / 67e9ae1 - lavu 51.10.0 / 51.8.0 - attributes.h
|
||||
2011-06-23 - 67e9ae1 - lavu 51.8.0 - attributes.h
|
||||
Add av_printf_format().
|
||||
|
||||
2011-06-16 - 2905e3f / 05e84c9, 2905e3f / 25de595 - lavf 53.4.0 / 53.2.0 - avformat.h
|
||||
2011-06-16 - 05e84c9, 25de595 - lavf 53.2.0 - avformat.h
|
||||
Add avformat_open_input and avformat_write_header().
|
||||
Deprecate av_open_input_stream, av_open_input_file,
|
||||
AVFormatParameters and av_write_header.
|
||||
|
||||
2011-06-16 - 2905e3f / 7e83e1c, 2905e3f / dc59ec5 - lavu 51.9.0 / 51.7.0 - opt.h
|
||||
2011-06-16 - 7e83e1c, dc59ec5 - lavu 51.7.0 - opt.h
|
||||
Add av_opt_set_dict() and av_opt_find().
|
||||
Deprecate av_find_opt().
|
||||
Add AV_DICT_APPEND flag.
|
||||
|
||||
2011-06-10 - 45fb647 / cb7c11c - lavu 51.6.0 - opt.h
|
||||
2011-06-10 - cb7c11c - lavu 51.6.0 - opt.h
|
||||
Add av_opt_flag_is_set().
|
||||
|
||||
2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays
|
||||
Add avfilter_get_audio_buffer_ref_from_arrays() to avfilter.h.
|
||||
|
||||
2011-06-09 - f9ecb84 / d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
2011-06-09 - d9f80ea - lavu 51.8.0 - AVMetadata
|
||||
Move AVMetadata from lavf to lavu and rename it to
|
||||
AVDictionary -- new installed header dict.h.
|
||||
All av_metadata_* functions renamed to av_dict_*.
|
||||
|
||||
2011-06-07 - d552f61 / a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
2011-06-07 - a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
|
||||
Add av_get_bytes_per_sample() in libavutil/samplefmt.h.
|
||||
Deprecate av_get_bits_per_sample_fmt().
|
||||
|
||||
2011-06-05 - f956924 / b39b062 - lavu 51.8.0 - opt.h
|
||||
2011-06-05 - b39b062 - lavu 51.8.0 - opt.h
|
||||
Add av_opt_free convenience function.
|
||||
|
||||
2011-06-06 - 95a0242 - lavfi 2.14.0 - AVFilterBufferRefAudioProps
|
||||
@@ -672,7 +461,7 @@ API changes, most recent first:
|
||||
Add av_get_pix_fmt_name() in libavutil/pixdesc.h, and deprecate
|
||||
avcodec_get_pix_fmt_name() in libavcodec/avcodec.h in its favor.
|
||||
|
||||
2011-05-25 - 39e4206 / 30315a8 - lavf 53.3.0 - avformat.h
|
||||
2011-05-25 - 30315a8 - lavf 53.3.0 - avformat.h
|
||||
Add fps_probe_size to AVFormatContext.
|
||||
|
||||
2011-05-22 - 5ecdfd0 - lavf 53.2.0 - avformat.h
|
||||
@@ -688,10 +477,10 @@ API changes, most recent first:
|
||||
2011-05-14 - 9fdf772 - lavfi 2.6.0 - avcodec.h
|
||||
Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h.
|
||||
|
||||
2011-05-18 - 75a37b5 / 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
2011-05-18 - 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
|
||||
Add request_sample_fmt field to AVCodecContext.
|
||||
|
||||
2011-05-10 - 59eb12f / 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
2011-05-10 - 188dea1 - lavc 53.6.0 - avcodec.h
|
||||
Deprecate AVLPCType and the following fields in
|
||||
AVCodecContext: lpc_coeff_precision, prediction_order_method,
|
||||
min_partition_order, max_partition_order, lpc_type, lpc_passes.
|
||||
@@ -721,81 +510,81 @@ API changes, most recent first:
|
||||
Add av_dynarray_add function for adding
|
||||
an element to a dynamic array.
|
||||
|
||||
2011-04-26 - d7e5aeb / bebe72f - lavu 51.1.0 - avutil.h
|
||||
2011-04-26 - bebe72f - lavu 51.1.0 - avutil.h
|
||||
Add AVPictureType enum and av_get_picture_type_char(), deprecate
|
||||
FF_*_TYPE defines and av_get_pict_type_char() defined in
|
||||
libavcodec/avcodec.h.
|
||||
|
||||
2011-04-26 - d7e5aeb / 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
2011-04-26 - 10d3940 - lavfi 2.3.0 - avfilter.h
|
||||
Add pict_type and key_frame fields to AVFilterBufferRefVideo.
|
||||
|
||||
2011-04-26 - d7e5aeb / 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
2011-04-26 - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
|
||||
Add sample_aspect_ratio fields to vsrc_buffer arguments
|
||||
|
||||
2011-04-21 - 8772156 / 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
|
||||
Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading.
|
||||
|
||||
2011-04-15 - lavc 52.120.0 - avcodec.h
|
||||
AVPacket structure got additional members for passing side information:
|
||||
c407984 / 4de339e introduce side information for AVPacket
|
||||
c407984 / 2d8591c make containers pass palette change in AVPacket
|
||||
4de339e introduce side information for AVPacket
|
||||
2d8591c make containers pass palette change in AVPacket
|
||||
|
||||
2011-04-12 - lavf 52.107.0 - avio.h
|
||||
Avio cleanup, part II - deprecate the entire URLContext API:
|
||||
c55780d / 175389c add avio_check as a replacement for url_exist
|
||||
9891004 / ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
175389c add avio_check as a replacement for url_exist
|
||||
ff1ec0c add avio_pause and avio_seek_time as replacements
|
||||
for _av_url_read_fseek/fpause
|
||||
d4d0932 / cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
|
||||
should be used instead.
|
||||
c88caa5 / 80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
c88caa5 / f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
d4d0932 / f8270bb add avio_enum_protocols.
|
||||
d4d0932 / 5593f03 deprecate URLProtocol.
|
||||
d4d0932 / c486dad deprecate URLContext.
|
||||
d4d0932 / 026e175 deprecate the typedef for URLInterruptCB
|
||||
c88caa5 / 8e76a19 deprecate av_register_protocol2.
|
||||
11d7841 / b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
11d7841 / 1305d93 deprecate av_url_read_seek
|
||||
11d7841 / fa104e1 deprecate av_url_read_pause
|
||||
434f248 / 727c7aa deprecate url_get_filename().
|
||||
434f248 / 5958df3 deprecate url_max_packet_size().
|
||||
434f248 / 1869ea0 deprecate url_get_file_handle().
|
||||
434f248 / 32a97d4 deprecate url_filesize().
|
||||
434f248 / e52a914 deprecate url_close().
|
||||
434f248 / 58a48c6 deprecate url_seek().
|
||||
434f248 / 925e908 deprecate url_write().
|
||||
434f248 / dce3756 deprecate url_read_complete().
|
||||
434f248 / bc371ac deprecate url_read().
|
||||
434f248 / 0589da0 deprecate url_open().
|
||||
434f248 / 62eaaea deprecate url_connect.
|
||||
434f248 / 5652bb9 deprecate url_alloc.
|
||||
434f248 / 333e894 deprecate url_open_protocol
|
||||
434f248 / e230705 deprecate url_poll and URLPollEntry
|
||||
80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
|
||||
f87b1b3 rename open flags: URL_* -> AVIO_*
|
||||
f8270bb add avio_enum_protocols.
|
||||
5593f03 deprecate URLProtocol.
|
||||
c486dad deprecate URLContext.
|
||||
026e175 deprecate the typedef for URLInterruptCB
|
||||
8e76a19 deprecate av_register_protocol2.
|
||||
b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
|
||||
1305d93 deprecate av_url_read_seek
|
||||
fa104e1 deprecate av_url_read_pause
|
||||
727c7aa deprecate url_get_filename().
|
||||
5958df3 deprecate url_max_packet_size().
|
||||
1869ea0 deprecate url_get_file_handle().
|
||||
32a97d4 deprecate url_filesize().
|
||||
e52a914 deprecate url_close().
|
||||
58a48c6 deprecate url_seek().
|
||||
925e908 deprecate url_write().
|
||||
dce3756 deprecate url_read_complete().
|
||||
bc371ac deprecate url_read().
|
||||
0589da0 deprecate url_open().
|
||||
62eaaea deprecate url_connect.
|
||||
5652bb9 deprecate url_alloc.
|
||||
333e894 deprecate url_open_protocol
|
||||
e230705 deprecate url_poll and URLPollEntry
|
||||
|
||||
2011-04-08 - lavf 52.106.0 - avformat.h
|
||||
Minor avformat.h cleanup:
|
||||
d4d0932 / a9bf9d8 deprecate av_guess_image2_codec
|
||||
d4d0932 / c3675df rename avf_sdp_create->av_sdp_create
|
||||
a9bf9d8 deprecate av_guess_image2_codec
|
||||
c3675df rename avf_sdp_create->av_sdp_create
|
||||
|
||||
2011-04-03 - lavf 52.105.0 - avio.h
|
||||
Large-scale renaming/deprecating of AVIOContext-related functions:
|
||||
2cae980 / 724f6a0 deprecate url_fdopen
|
||||
2cae980 / 403ee83 deprecate url_open_dyn_packet_buf
|
||||
2cae980 / 6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
2cae980 / b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
2cae980 / 8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
724f6a0 deprecate url_fdopen
|
||||
403ee83 deprecate url_open_dyn_packet_buf
|
||||
6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
|
||||
b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
|
||||
8978fed introduce an AVIOContext.seekable field as a replacement for
|
||||
AVIOContext.is_streamed and url_is_streamed()
|
||||
1caa412 / b64030f deprecate get_checksum()
|
||||
1caa412 / 4c4427a deprecate init_checksum()
|
||||
2fd41c9 / 4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
4fa0e24 / 933e90a deprecate av_url_read_fseek/fpause
|
||||
4fa0e24 / 8d9769a deprecate url_fileno
|
||||
0fecf26 / b7f2fdd rename put_flush_packet -> avio_flush
|
||||
0fecf26 / 35f1023 deprecate url_close_buf
|
||||
0fecf26 / 83fddae deprecate url_open_buf
|
||||
0fecf26 / d9d86e0 rename url_fprintf -> avio_printf
|
||||
0fecf26 / 59f65d9 deprecate url_setbufsize
|
||||
6947b0c / 3e68b3b deprecate url_ferror
|
||||
b64030f deprecate get_checksum()
|
||||
4c4427a deprecate init_checksum()
|
||||
4ec153b deprecate udp_set_remote_url/get_local_port
|
||||
933e90a deprecate av_url_read_fseek/fpause
|
||||
8d9769a deprecate url_fileno
|
||||
b7f2fdd rename put_flush_packet -> avio_flush
|
||||
35f1023 deprecate url_close_buf
|
||||
83fddae deprecate url_open_buf
|
||||
d9d86e0 rename url_fprintf -> avio_printf
|
||||
59f65d9 deprecate url_setbufsize
|
||||
3e68b3b deprecate url_ferror
|
||||
e8bb2e2 deprecate url_fget_max_packet_size
|
||||
76aa876 rename url_fsize -> avio_size
|
||||
e519753 deprecate url_fgetc
|
||||
@@ -816,7 +605,7 @@ API changes, most recent first:
|
||||
b3db9ce deprecate get_partial_buffer
|
||||
8d9ac96 rename av_alloc_put_byte -> avio_alloc_context
|
||||
|
||||
2011-03-25 - 27ef7b1 / 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
2011-03-25 - 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
|
||||
Add audio_service_type field to AVCodecContext.
|
||||
|
||||
2011-03-17 - e309fdc - lavu 50.40.0 - pixfmt.h
|
||||
@@ -854,11 +643,11 @@ API changes, most recent first:
|
||||
2011-02-10 - 12c14cd - lavf 52.99.0 - AVStream.disposition
|
||||
Add AV_DISPOSITION_HEARING_IMPAIRED and AV_DISPOSITION_VISUAL_IMPAIRED.
|
||||
|
||||
2011-02-09 - c0b102c - lavc 52.112.0 - avcodec_thread_init()
|
||||
2011-02-09 - 5592734 - lavc 52.112.0 - avcodec_thread_init()
|
||||
Deprecate avcodec_thread_init()/avcodec_thread_free() use; instead
|
||||
set thread_count before calling avcodec_open.
|
||||
|
||||
2011-02-09 - 37b00b4 - lavc 52.111.0 - threading API
|
||||
2011-02-09 - 778b08a - lavc 52.111.0 - threading API
|
||||
Add CODEC_CAP_FRAME_THREADS with new restrictions on get_buffer()/
|
||||
release_buffer()/draw_horiz_band() callbacks for appropriate codecs.
|
||||
Add thread_type and active_thread_type fields to AVCodecContext.
|
||||
|
68
doc/Makefile
68
doc/Makefile
@@ -1,45 +1,25 @@
|
||||
LIBRARIES-$(CONFIG_AVUTIL) += libavutil
|
||||
LIBRARIES-$(CONFIG_SWSCALE) += libswscale
|
||||
LIBRARIES-$(CONFIG_SWRESAMPLE) += libswresample
|
||||
LIBRARIES-$(CONFIG_AVCODEC) += libavcodec
|
||||
LIBRARIES-$(CONFIG_AVFORMAT) += libavformat
|
||||
LIBRARIES-$(CONFIG_AVDEVICE) += libavdevice
|
||||
LIBRARIES-$(CONFIG_AVFILTER) += libavfilter
|
||||
|
||||
COMPONENTS-yes = $(PROGS-yes)
|
||||
COMPONENTS-$(CONFIG_AVUTIL) += ffmpeg-utils
|
||||
COMPONENTS-$(CONFIG_SWSCALE) += ffmpeg-scaler
|
||||
COMPONENTS-$(CONFIG_SWRESAMPLE) += ffmpeg-resampler
|
||||
COMPONENTS-$(CONFIG_AVCODEC) += ffmpeg-codecs ffmpeg-bitstream-filters
|
||||
COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols
|
||||
COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices
|
||||
COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters
|
||||
|
||||
MANPAGES = $(COMPONENTS-yes:%=doc/%.1) $(LIBRARIES-yes:%=doc/%.3)
|
||||
PODPAGES = $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
|
||||
MANPAGES = $(PROGS-yes:%=doc/%.1)
|
||||
PODPAGES = $(PROGS-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(PROGS-yes:%=doc/%.html) \
|
||||
doc/developer.html \
|
||||
doc/faq.html \
|
||||
doc/fate.html \
|
||||
doc/general.html \
|
||||
doc/git-howto.html \
|
||||
doc/nut.html \
|
||||
doc/libavfilter.html \
|
||||
doc/platform.html \
|
||||
doc/syntax.html \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
|
||||
DOCS-$(CONFIG_HTMLPAGES) += $(HTMLPAGES)
|
||||
DOCS-$(CONFIG_PODPAGES) += $(PODPAGES)
|
||||
DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
|
||||
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
|
||||
DOCS = $(DOCS-yes)
|
||||
DOCS = $(HTMLPAGES) $(MANPAGES) $(PODPAGES)
|
||||
ifdef HAVE_MAKEINFO
|
||||
DOCS += $(TXTPAGES)
|
||||
endif
|
||||
|
||||
all-$(CONFIG_DOC): doc
|
||||
all-$(CONFIG_DOC): documentation
|
||||
|
||||
doc: documentation
|
||||
|
||||
apidoc: doc/doxy/html
|
||||
documentation: $(DOCS)
|
||||
|
||||
TEXIDEP = awk '/^@(verbatim)?include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
|
||||
@@ -49,11 +29,13 @@ doc/%.txt: doc/%.texi
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
|
||||
|
||||
doc/print_options.o: libavformat/options_table.h libavcodec/options_table.h
|
||||
|
||||
GENTEXI = format codec
|
||||
GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
|
||||
|
||||
$(GENTEXI): TAG = GENTEXI
|
||||
$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
|
||||
$(GENTEXI): doc/avoptions_%.texi: doc/print_options
|
||||
$(M)doc/print_options $* > $@
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
@@ -64,40 +46,28 @@ doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
doc/%.pod: TAG = POD
|
||||
doc/%.pod: doc/%.texi $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Idoc $< $@
|
||||
$(M)$(SRC_PATH)/doc/texi2pod.pl -Idoc $< $@
|
||||
|
||||
doc/%.1 doc/%.3: TAG = MAN
|
||||
doc/%.1: TAG = MAN
|
||||
doc/%.1: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
doc/%.3: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
|
||||
|
||||
$(DOCS) doc/doxy/html: | doc/
|
||||
$(DOCS): | doc/
|
||||
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(INSTHEADERS)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $^
|
||||
|
||||
install-man:
|
||||
|
||||
ifdef CONFIG_MANPAGES
|
||||
install-progs-$(CONFIG_DOC): install-man
|
||||
|
||||
install-man: $(MANPAGES)
|
||||
$(Q)mkdir -p "$(MANDIR)/man1"
|
||||
$(INSTALL) -m 644 $(MANPAGES) "$(MANDIR)/man1"
|
||||
endif
|
||||
|
||||
uninstall: uninstall-man
|
||||
|
||||
uninstall-man:
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES))
|
||||
|
||||
clean:: docclean
|
||||
|
||||
docclean:
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
|
||||
$(RM) -r doc/doxy/html
|
||||
clean::
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
|
||||
|
||||
-include $(wildcard $(DOCS:%=%.d))
|
||||
|
||||
.PHONY: apidoc doc documentation
|
||||
.PHONY: documentation
|
||||
|
@@ -1,7 +1,7 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 1.1 "Fire Flower" January, 2013
|
||||
* 0.11 "Happiness" May, 2012
|
||||
|
||||
|
||||
General notes
|
||||
@@ -14,12 +14,3 @@ accepted. If you are experiencing issues with any formally released version of
|
||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
||||
|
||||
Of big interest to our Windows users, FFmpeg now supports building with the MSVC
|
||||
compiler. Since MSVC does not support C99 features used extensively by FFmpeg,
|
||||
this has been accomplished using a converter that turns C99 code to C89. See the
|
||||
platform-specific documentation for more detailed documentation on building
|
||||
FFmpeg with MSVC.
|
||||
|
||||
The used output sample format for several audio decoders has changed, make
|
||||
sure you always check/use AVCodecContext.sample_fmt or AVFrame.format.
|
||||
|
@@ -1,11 +0,0 @@
|
||||
@chapter Authors
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
@@ -1,11 +1,10 @@
|
||||
All the numerical options, if not specified otherwise, accept in input
|
||||
a string representing a number, which may contain one of the
|
||||
SI unit prefixes, for example 'K', 'M', 'G'.
|
||||
If 'i' is appended after the prefix, binary prefixes are used,
|
||||
which are based on powers of 1024 instead of powers of 1000.
|
||||
The 'B' postfix multiplies the value by 8, and can be
|
||||
appended after a unit prefix or used alone. This allows using for
|
||||
example 'KB', 'MiB', 'G' and 'B' as number postfix.
|
||||
International System number postfixes, for example 'K', 'M', 'G'.
|
||||
If 'i' is appended after the postfix, powers of 2 are used instead of
|
||||
powers of 10. The 'B' postfix multiplies the value for 8, and can be
|
||||
appended after another postfix or used alone. This allows using for
|
||||
example 'KB', 'MiB', 'G' and 'B' as postfix.
|
||||
|
||||
Options which do not take arguments are boolean options, and set the
|
||||
corresponding value to true. They can be set to false by prefixing
|
||||
@@ -19,10 +18,10 @@ are used to precisely specify which stream(s) does a given option belong to.
|
||||
|
||||
A stream specifier is a string generally appended to the option name and
|
||||
separated from it by a colon. E.g. @code{-codec:a:1 ac3} option contains
|
||||
@code{a:1} stream specifier, which matches the second audio stream. Therefore it
|
||||
@code{a:1} stream specifer, which matches the second audio stream. Therefore it
|
||||
would select the ac3 codec for the second audio stream.
|
||||
|
||||
A stream specifier can match several streams, the option is then applied to all
|
||||
A stream specifier can match several stream, the option is then applied to all
|
||||
of them. E.g. the stream specifier in @code{-b:a 128k} matches all audio
|
||||
streams.
|
||||
|
||||
@@ -45,7 +44,6 @@ program with id @var{program_id}. Otherwise matches all streams in this program.
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by format-specific ID.
|
||||
@end table
|
||||
|
||||
@section Generic options
|
||||
|
||||
These options are shared amongst the av* tools.
|
||||
@@ -55,29 +53,8 @@ These options are shared amongst the av* tools.
|
||||
@item -L
|
||||
Show license.
|
||||
|
||||
@item -h, -?, -help, --help [@var{arg}]
|
||||
Show help. An optional parameter may be specified to print help about a specific
|
||||
item.
|
||||
|
||||
Possible values of @var{arg} are:
|
||||
@table @option
|
||||
@item decoder=@var{decoder_name}
|
||||
Print detailed information about the decoder named @var{decoder_name}. Use the
|
||||
@option{-decoders} option to get a list of all decoders.
|
||||
|
||||
@item encoder=@var{encoder_name}
|
||||
Print detailed information about the encoder named @var{encoder_name}. Use the
|
||||
@option{-encoders} option to get a list of all encoders.
|
||||
|
||||
@item demuxer=@var{demuxer_name}
|
||||
Print detailed information about the demuxer named @var{demuxer_name}. Use the
|
||||
@option{-formats} option to get a list of all demuxers and muxers.
|
||||
|
||||
@item muxer=@var{muxer_name}
|
||||
Print detailed information about the muxer named @var{muxer_name}. Use the
|
||||
@option{-formats} option to get a list of all muxers and demuxers.
|
||||
|
||||
@end table
|
||||
@item -h, -?, -help, --help
|
||||
Show help.
|
||||
|
||||
@item -version
|
||||
Show version.
|
||||
@@ -94,16 +71,23 @@ Encoding available
|
||||
@end table
|
||||
|
||||
@item -codecs
|
||||
Show all codecs known to libavcodec.
|
||||
Show available codecs.
|
||||
|
||||
Note that the term 'codec' is used throughout this documentation as a shortcut
|
||||
for what is more correctly called a media bitstream format.
|
||||
|
||||
@item -decoders
|
||||
Show available decoders.
|
||||
|
||||
@item -encoders
|
||||
Show all available encoders.
|
||||
The fields preceding the codec names have the following meanings:
|
||||
@table @samp
|
||||
@item D
|
||||
Decoding available
|
||||
@item E
|
||||
Encoding available
|
||||
@item V/A/S
|
||||
Video/audio/subtitle codec
|
||||
@item S
|
||||
Codec supports slices
|
||||
@item D
|
||||
Codec supports direct rendering
|
||||
@item T
|
||||
Codec can handle input truncated at random locations instead of only at frame boundaries
|
||||
@end table
|
||||
|
||||
@item -bsfs
|
||||
Show available bitstream filters.
|
||||
@@ -120,9 +104,6 @@ Show available pixel formats.
|
||||
@item -sample_fmts
|
||||
Show available sample formats.
|
||||
|
||||
@item -layouts
|
||||
Show channel names and standard channel layouts.
|
||||
|
||||
@item -loglevel @var{loglevel} | -v @var{loglevel}
|
||||
Set the logging level used by the library.
|
||||
@var{loglevel} is a number or a string containing one of the following values:
|
||||
@@ -152,21 +133,8 @@ directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
options will affect the report; options values must be escaped if they
|
||||
contain special characters or the options delimiter ':' (see the
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual). The
|
||||
following option is recognized:
|
||||
@table @option
|
||||
@item file
|
||||
set the file name to use for the report; @code{%p} is expanded to the name
|
||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||
to a plain @code{%}
|
||||
@end table
|
||||
|
||||
Errors in parsing the environment variable are not fatal, and will not
|
||||
appear in the report.
|
||||
Note: setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect.
|
||||
|
||||
@item -cpuflags flags (@emph{global})
|
||||
Allows setting and clearing cpu flags. This option is intended
|
||||
@@ -209,3 +177,6 @@ use @option{-option 0}/@option{-option 1}.
|
||||
|
||||
Note2 old undocumented way of specifying per-stream AVOptions by prepending
|
||||
v/a/s to the options name is now obsolete and will be removed soon.
|
||||
|
||||
@include avoptions_codec.texi
|
||||
@include avoptions_format.texi
|
||||
|
@@ -61,29 +61,3 @@ use is purely internal and the format of the data it accepts is not publicly
|
||||
documented.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section dvdsub
|
||||
|
||||
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
|
||||
also be found in VobSub file pairs and in some Matroska files.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item palette
|
||||
Specify the global palette used by the bitmaps. When stored in VobSub, the
|
||||
palette is normally specified in the index file; in Matroska, the palette is
|
||||
stored in the codec extra-data in the same format as in VobSub. In DVDs, the
|
||||
palette is stored in the IFO file, and therefore not available when reading
|
||||
from dumped VOB files.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
@end table
|
||||
|
||||
@c man end SUBTILES DECODERS
|
||||
|
149
doc/default.css
149
doc/default.css
@@ -1,149 +0,0 @@
|
||||
a {
|
||||
color: #2D6198;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: #884488;
|
||||
}
|
||||
|
||||
#banner {
|
||||
background-color: white;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#banner img {
|
||||
padding-bottom: 1px;
|
||||
padding-top: 5px;
|
||||
}
|
||||
|
||||
#body {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
body {
|
||||
background-color: #313131;
|
||||
margin: 0;
|
||||
text-align: justify;
|
||||
}
|
||||
|
||||
.center {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#container {
|
||||
background-color: white;
|
||||
color: #202020;
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
#footer {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
h1, h2, h3 {
|
||||
padding-left: 0.4em;
|
||||
border-radius: 4px;
|
||||
padding-bottom: 0.2em;
|
||||
padding-top: 0.2em;
|
||||
border: 1px solid #6A996A;
|
||||
}
|
||||
|
||||
h1 {
|
||||
background-color: #7BB37B;
|
||||
color: #151515;
|
||||
font-size: 1.2em;
|
||||
padding-bottom: 0.3em;
|
||||
padding-top: 0.3em;
|
||||
}
|
||||
|
||||
h2 {
|
||||
color: #313131;
|
||||
font-size: 0.9em;
|
||||
background-color: #ABE3AB;
|
||||
}
|
||||
|
||||
h3 {
|
||||
color: #313131;
|
||||
font-size: 0.8em;
|
||||
margin-bottom: -8px;
|
||||
background-color: #BBF3BB;
|
||||
}
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
#navbar {
|
||||
background-color: #738073;
|
||||
border-bottom: 1px solid #5C665C;
|
||||
border-top: 1px solid #5C665C;
|
||||
margin-top: 12px;
|
||||
padding: 0.3em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#navbar a, #navbar_secondary a {
|
||||
color: white;
|
||||
padding: 0.3em;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#navbar a:hover, #navbar_secondary a:hover {
|
||||
background-color: #313131;
|
||||
color: white;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#navbar_secondary {
|
||||
background-color: #738073;
|
||||
border-bottom: 1px solid #5C665C;
|
||||
border-left: 1px solid #5C665C;
|
||||
border-right: 1px solid #5C665C;
|
||||
padding: 0.3em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
p {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
pre {
|
||||
margin-left: 3em;
|
||||
margin-right: 3em;
|
||||
padding: 0.3em;
|
||||
border: 1px solid #bbb;
|
||||
background-color: #f7f7f7;
|
||||
}
|
||||
|
||||
dl dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
#proj_desc {
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
#repos {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
border-collapse: collapse;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
|
||||
#repos th {
|
||||
background-color: #7BB37B;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
|
||||
#repos td {
|
||||
padding: 0.2em;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
@@ -23,31 +23,8 @@ The description of some of the currently available demuxers follows.
|
||||
Image file demuxer.
|
||||
|
||||
This demuxer reads from a list of image files specified by a pattern.
|
||||
The syntax and meaning of the pattern is specified by the
|
||||
option @var{pattern_type}.
|
||||
|
||||
The pattern may contain a suffix which is used to automatically
|
||||
determine the format of the images contained in the files.
|
||||
|
||||
The size, the pixel format, and the format of each image must be the
|
||||
same for all the files in the sequence.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item framerate
|
||||
Set the framerate for the video stream. It defaults to 25.
|
||||
@item loop
|
||||
If set to 1, loop over the input. Default value is 0.
|
||||
@item pattern_type
|
||||
Select the pattern type used to interpret the provided filename.
|
||||
|
||||
@var{pattern_type} accepts one of the following values.
|
||||
@table @option
|
||||
@item sequence
|
||||
Select a sequence pattern type, used to specify a sequence of files
|
||||
indexed by sequential numbers.
|
||||
|
||||
A sequence pattern may contain the string "%d" or "%0@var{N}d", which
|
||||
The pattern may contain the string "%d" or "%0@var{N}d", which
|
||||
specifies the position of the characters representing a sequential
|
||||
number in each filename matched by the pattern. If the form
|
||||
"%d0@var{N}d" is used, the string representing the number in each
|
||||
@@ -55,11 +32,13 @@ filename is 0-padded and @var{N} is the total number of 0-padded
|
||||
digits representing the number. The literal character '%' can be
|
||||
specified in the pattern with the string "%%".
|
||||
|
||||
If the sequence pattern contains "%d" or "%0@var{N}d", the first filename of
|
||||
If the pattern contains "%d" or "%0@var{N}d", the first filename of
|
||||
the file list specified by the pattern must contain a number
|
||||
inclusively contained between @var{start_number} and
|
||||
@var{start_number}+@var{start_number_range}-1, and all the following
|
||||
numbers must be sequential.
|
||||
inclusively contained between 0 and 4, all the following numbers must
|
||||
be sequential. This limitation may be hopefully fixed.
|
||||
|
||||
The pattern may contain a suffix which is used to automatically
|
||||
determine the format of the images contained in the files.
|
||||
|
||||
For example the pattern "img-%03d.bmp" will match a sequence of
|
||||
filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
|
||||
@@ -67,6 +46,17 @@ filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
|
||||
sequence of filenames of the form @file{i%m%g-1.jpg},
|
||||
@file{i%m%g-2.jpg}, ..., @file{i%m%g-10.jpg}, etc.
|
||||
|
||||
The size, the pixel format, and the format of each image must be the
|
||||
same for all the files in the sequence.
|
||||
|
||||
The following example shows how to use @command{ffmpeg} for creating a
|
||||
video from the images in the file sequence @file{img-001.jpeg},
|
||||
@file{img-002.jpeg}, ..., assuming an input frame rate of 10 frames per
|
||||
second:
|
||||
@example
|
||||
ffmpeg -i 'img-%03d.jpeg' -r 10 out.mkv
|
||||
@end example
|
||||
|
||||
Note that the pattern must not necessarily contain "%d" or
|
||||
"%0@var{N}d", for example to convert a single image file
|
||||
@file{img.jpeg} you can employ the command:
|
||||
@@ -74,75 +64,6 @@ Note that the pattern must not necessarily contain "%d" or
|
||||
ffmpeg -i img.jpeg img.png
|
||||
@end example
|
||||
|
||||
@item glob
|
||||
Select a glob wildcard pattern type.
|
||||
|
||||
The pattern is interpreted like a @code{glob()} pattern. This is only
|
||||
selectable if libavformat was compiled with globbing support.
|
||||
|
||||
@item glob_sequence @emph{(deprecated, will be removed)}
|
||||
Select a mixed glob wildcard/sequence pattern.
|
||||
|
||||
If your version of libavformat was compiled with globbing support, and
|
||||
the provided pattern contains at least one glob meta character among
|
||||
@code{%*?[]@{@}} that is preceded by an unescaped "%", the pattern is
|
||||
interpreted like a @code{glob()} pattern, otherwise it is interpreted
|
||||
like a sequence pattern.
|
||||
|
||||
All glob special characters @code{%*?[]@{@}} must be prefixed
|
||||
with "%". To escape a literal "%" you shall use "%%".
|
||||
|
||||
For example the pattern @code{foo-%*.jpeg} will match all the
|
||||
filenames prefixed by "foo-" and terminating with ".jpeg", and
|
||||
@code{foo-%?%?%?.jpeg} will match all the filenames prefixed with
|
||||
"foo-", followed by a sequence of three characters, and terminating
|
||||
with ".jpeg".
|
||||
|
||||
This pattern type is deprecated in favor of @var{glob} and
|
||||
@var{sequence}.
|
||||
@end table
|
||||
|
||||
Default value is @var{glob_sequence}.
|
||||
@item pixel_format
|
||||
Set the pixel format of the images to read. If not specified the pixel
|
||||
format is guessed from the first image file in the sequence.
|
||||
@item start_number
|
||||
Set the index of the file matched by the image file pattern to start
|
||||
to read from. Default value is 0.
|
||||
@item start_number_range
|
||||
Set the index interval range to check when looking for the first image
|
||||
file in the sequence, starting from @var{start_number}. Default value
|
||||
is 5.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Use @command{ffmpeg} for creating a video from the images in the file
|
||||
sequence @file{img-001.jpeg}, @file{img-002.jpeg}, ..., assuming an
|
||||
input frame rate of 10 frames per second:
|
||||
@example
|
||||
ffmpeg -i 'img-%03d.jpeg' -r 10 out.mkv
|
||||
@end example
|
||||
|
||||
@item
|
||||
As above, but start by reading from a file with index 100 in the sequence:
|
||||
@example
|
||||
ffmpeg -start_number 100 -i 'img-%03d.jpeg' -r 10 out.mkv
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read images matching the "*.png" glob pattern , that is all the files
|
||||
terminating with the ".png" suffix:
|
||||
@example
|
||||
ffmpeg -pattern_type glob -i "*.png" -r 10 out.mkv
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section applehttp
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
@@ -184,56 +105,4 @@ the script is directly played, the actual times will match the absolute
|
||||
timestamps up to the sound controller's clock accuracy, but if the user
|
||||
somehow pauses the playback or seeks, all times will be shifted accordingly.
|
||||
|
||||
@section concat
|
||||
|
||||
Virtual concatenation script demuxer.
|
||||
|
||||
This demuxer reads a list of files and other directives from a text file and
|
||||
demuxes them one after the other, as if all their packet had been muxed
|
||||
together.
|
||||
|
||||
The timestamps in the files are adjusted so that the first file starts at 0
|
||||
and each next file starts where the previous one finishes. Note that it is
|
||||
done globally and may cause gaps if all streams do not have exactly the same
|
||||
length.
|
||||
|
||||
All files must have the same streams (same codecs, same time base, etc.).
|
||||
|
||||
This script format can currently not be probed, it must be specified explicitly.
|
||||
|
||||
@subsection Syntax
|
||||
|
||||
The script is a text file in extended-ASCII, with one directive per line.
|
||||
Empty lines, leading spaces and lines starting with '#' are ignored. The
|
||||
following directive is recognized:
|
||||
|
||||
@table @option
|
||||
|
||||
@item @code{file @var{path}}
|
||||
Path to a file to read; special characters and spaces must be escaped with
|
||||
backslash or single quotes.
|
||||
|
||||
@end table
|
||||
|
||||
@section tedcaptions
|
||||
|
||||
JSON captions used for @url{http://www.ted.com/, TED Talks}.
|
||||
|
||||
TED does not provide links to the captions, but they can be guessed from the
|
||||
page. The file @file{tools/bookmarklets.html} from the FFmpeg source tree
|
||||
contains a bookmarklet to expose them.
|
||||
|
||||
This demuxer accepts the following option:
|
||||
@table @option
|
||||
@item start_time
|
||||
Set the start time of the TED talk, in milliseconds. The default is 15000
|
||||
(15s). It is used to sync the captions with the downloadable videos, because
|
||||
they include a 15s intro.
|
||||
@end table
|
||||
|
||||
Example: convert the captions to a format most players understand:
|
||||
@example
|
||||
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
|
||||
@end example
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -170,7 +170,7 @@ For exported names, each library has its own prefixes. Just check the existing
|
||||
code and name accordingly.
|
||||
@end itemize
|
||||
|
||||
@subsection Miscellaneous conventions
|
||||
@subsection Miscellanous conventions
|
||||
@itemize @bullet
|
||||
@item
|
||||
fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
@@ -190,8 +190,8 @@ set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" allow tabs in Makefiles
|
||||
autocmd FileType make set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
@@ -204,8 +204,8 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(indent-tabs-mode nil)
|
||||
(show-trailing-whitespace t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
@@ -317,8 +317,7 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
change (e.g. encoder bug fix that matters for the decoder).
|
||||
@item
|
||||
Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
@@ -394,9 +393,7 @@ send a reminder by email. Your patch should eventually be dealt with.
|
||||
@item
|
||||
Did you register it in @file{allcodecs.c} or @file{allformats.c}?
|
||||
@item
|
||||
Did you add the AVCodecID to @file{avcodec.h}?
|
||||
When adding new codec IDs, also add an entry to the codec descriptor
|
||||
list in @file{libavcodec/codec_desc.c}.
|
||||
Did you add the CodecID to @file{avcodec.h}?
|
||||
@item
|
||||
If it has a fourCC, did you add it to @file{libavformat/riff.c},
|
||||
even if it is only a decoder?
|
||||
@@ -430,7 +427,7 @@ send a reminder by email. Your patch should eventually be dealt with.
|
||||
Was the patch generated with git format-patch or send-email?
|
||||
@item
|
||||
Did you sign off your patch? (git commit -s)
|
||||
See @url{http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches} for the meaning
|
||||
See @url{http://kerneltrap.org/files/Jeremy/DCO.txt} for the meaning
|
||||
of sign off.
|
||||
@item
|
||||
Did you provide a clear git commit log message?
|
||||
@@ -492,10 +489,6 @@ send a reminder by email. Your patch should eventually be dealt with.
|
||||
Consider to add a regression test for your code.
|
||||
@item
|
||||
If you added YASM code please check that things still work with --disable-yasm
|
||||
@item
|
||||
Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like @code{av_malloc()}
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
@end enumerate
|
||||
|
||||
@section Patch review process
|
||||
@@ -536,16 +529,4 @@ Running 'make fate' accomplishes this, please see @url{fate.html} for details.
|
||||
this case, the reference results of the regression tests shall be modified
|
||||
accordingly].
|
||||
|
||||
@subsection Adding files to the fate-suite dataset
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be inlcuded in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductionary message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
|
||||
|
||||
@bye
|
||||
|
@@ -1,14 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
SRC_PATH="${1}"
|
||||
DOXYFILE="${2}"
|
||||
|
||||
shift 2
|
||||
|
||||
doxygen - <<EOF
|
||||
@INCLUDE = ${DOXYFILE}
|
||||
INPUT = $@
|
||||
HTML_HEADER = ${SRC_PATH}/doc/doxy/header.html
|
||||
HTML_FOOTER = ${SRC_PATH}/doc/doxy/footer.html
|
||||
HTML_STYLESHEET = ${SRC_PATH}/doc/doxy/doxy_stylesheet.css
|
||||
EOF
|
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,10 @@
|
||||
</div>
|
||||
|
||||
<div id="footer">
|
||||
Generated on $datetime for $projectname by <a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
|
||||
</div>
|
||||
|
||||
<footer class="footer pagination-right">
|
||||
<span class="label label-info">
|
||||
Generated on $datetime for $projectname by <a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
|
||||
</span>
|
||||
</footer>
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
@@ -1,16 +1,14 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
|
||||
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
|
||||
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
|
||||
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
|
||||
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
|
||||
<!--Header replace -->
|
||||
|
||||
</head>
|
||||
|
||||
<div class="container">
|
||||
<div id="container">
|
||||
|
||||
<!--Header replace -->
|
||||
<div class="menu">
|
||||
<div id="body">
|
||||
<div>
|
||||
|
@@ -420,46 +420,6 @@ Selected by Encoder (default)
|
||||
A description of some of the currently available video encoders
|
||||
follows.
|
||||
|
||||
@section libtheora
|
||||
|
||||
Theora format supported through libtheora.
|
||||
|
||||
Requires the presence of the libtheora headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libtheora}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following global options are mapped to internal libtheora options
|
||||
which affect the quality and the bitrate of the encoded stream.
|
||||
|
||||
@table @option
|
||||
@item b
|
||||
Set the video bitrate, only works if the @code{qscale} flag in
|
||||
@option{flags} is not enabled.
|
||||
|
||||
@item flags
|
||||
Used to enable constant quality mode encoding through the
|
||||
@option{qscale} flag, and to enable the @code{pass1} and @code{pass2}
|
||||
modes.
|
||||
|
||||
@item g
|
||||
Set the GOP size.
|
||||
|
||||
@item global_quality
|
||||
Set the global quality in lambda units, only works if the
|
||||
@code{qscale} flag in @option{flags} is enabled. The value is clipped
|
||||
in the [0 - 10*@code{FF_QP2LAMBDA}] range, and then multiplied for 6.3
|
||||
to get a value in the native libtheora range [0-63]. A higher value
|
||||
corresponds to a higher quality.
|
||||
|
||||
For example, to set maximum constant quality encoding with
|
||||
@command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i INPUT -flags:v qscale -global_quality:v "10*QP2LAMBDA" -codec:v libtheora OUTPUT.ogg
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section libvpx
|
||||
|
||||
VP8 format supported through libvpx.
|
||||
@@ -551,12 +511,6 @@ rc_2pass_vbr_minsection_pct
|
||||
@item slices
|
||||
@code{VP8E_SET_TOKEN_PARTITIONS}
|
||||
|
||||
@item max-intra-rate
|
||||
@code{VP8E_SET_MAX_INTRA_BITRATE_PCT}
|
||||
|
||||
@item force_key_frames
|
||||
@code{VPX_EFLAG_FORCE_KF}
|
||||
|
||||
@item Alternate reference frame related
|
||||
@table @option
|
||||
@item vp8flags altref
|
||||
@@ -617,12 +571,10 @@ Specify Weighted prediction for P-frames.
|
||||
Deprecated in favor of @var{x264opts} (see @var{weightp} libx264 option).
|
||||
|
||||
@item x264opts @var{options}
|
||||
Allow to set any x264 option, see @code{x264 --fullhelp} for a list.
|
||||
Allow to set any x264 option, see x264 --fullhelp for a list.
|
||||
|
||||
@var{options} is a list of @var{key}=@var{value} couples separated by
|
||||
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
|
||||
themselves, use "," instead. They accept it as well since long ago but this
|
||||
is kept undocumented for some reason.
|
||||
":".
|
||||
@end table
|
||||
|
||||
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
|
@@ -21,78 +21,30 @@ The following unary operators are available: @code{+}, @code{-}.
|
||||
The following functions are available:
|
||||
@table @option
|
||||
@item sinh(x)
|
||||
Compute hyperbolic sine of @var{x}.
|
||||
|
||||
@item cosh(x)
|
||||
Compute hyperbolic cosine of @var{x}.
|
||||
|
||||
@item tanh(x)
|
||||
Compute hyperbolic tangent of @var{x}.
|
||||
|
||||
@item sin(x)
|
||||
Compute sine of @var{x}.
|
||||
|
||||
@item cos(x)
|
||||
Compute cosine of @var{x}.
|
||||
|
||||
@item tan(x)
|
||||
Compute tangent of @var{x}.
|
||||
|
||||
@item atan(x)
|
||||
Compute arctangent of @var{x}.
|
||||
|
||||
@item asin(x)
|
||||
Compute arcsine of @var{x}.
|
||||
|
||||
@item acos(x)
|
||||
Compute arccosine of @var{x}.
|
||||
|
||||
@item exp(x)
|
||||
Compute exponential of @var{x} (with base @code{e}, the Euler's number).
|
||||
|
||||
@item log(x)
|
||||
Compute natural logarithm of @var{x}.
|
||||
|
||||
@item abs(x)
|
||||
Compute absolute value of @var{x}.
|
||||
|
||||
@item squish(x)
|
||||
Compute expression @code{1/(1 + exp(4*x))}.
|
||||
|
||||
@item gauss(x)
|
||||
Compute Gauss function of @var{x}, corresponding to
|
||||
@code{exp(-x*x/2) / sqrt(2*PI)}.
|
||||
|
||||
@item isinf(x)
|
||||
Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
||||
|
||||
@item isnan(x)
|
||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||
|
||||
@item mod(x, y)
|
||||
Compute the remainder of division of @var{x} by @var{y}.
|
||||
|
||||
@item max(x, y)
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
|
||||
@item min(x, y)
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
|
||||
@item eq(x, y)
|
||||
Return 1 if @var{x} and @var{y} are equivalent, 0 otherwise.
|
||||
|
||||
@item gte(x, y)
|
||||
Return 1 if @var{x} is greater than or equal to @var{y}, 0 otherwise.
|
||||
|
||||
@item gt(x, y)
|
||||
Return 1 if @var{x} is greater than @var{y}, 0 otherwise.
|
||||
|
||||
@item lte(x, y)
|
||||
Return 1 if @var{x} is lesser than or equal to @var{y}, 0 otherwise.
|
||||
|
||||
@item lt(x, y)
|
||||
Return 1 if @var{x} is lesser than @var{y}, 0 otherwise.
|
||||
|
||||
@item st(var, expr)
|
||||
Allow to store the value of the expression @var{expr} in an internal
|
||||
variable. @var{var} specifies the number of the variable where to
|
||||
|
@@ -3,22 +3,20 @@ FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libavresample \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
|
||||
CFLAGS += -Wall -O2 -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
CFLAGS += $(shell pkg-config --cflags $(FFMPEG_LIBS))
|
||||
LDLIBS += $(shell pkg-config --libs $(FFMPEG_LIBS))
|
||||
|
||||
EXAMPLES= decoding_encoding \
|
||||
demuxing \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
muxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
@@ -26,12 +24,9 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
.phony: all clean
|
||||
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
||||
clean:
|
||||
rm -rf $(EXAMPLES) $(OBJS)
|
||||
|
@@ -1,18 +0,0 @@
|
||||
FFmpeg examples README
|
||||
----------------------
|
||||
|
||||
Both following use cases rely on pkg-config and make, thus make sure
|
||||
that you have them installed and working on your system.
|
||||
|
||||
|
||||
1) Build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
2) Build the examples in-tree
|
||||
|
||||
Assuming you are in the source FFmpeg checkout directory, you need to build
|
||||
FFmpeg (no need to make install in any prefix). Then you can go into the
|
||||
doc/examples and run a command such as PKG_CONFIG_PATH=pc-uninstalled make.
|
@@ -27,16 +27,13 @@
|
||||
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
|
||||
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
|
||||
* format handling
|
||||
* @example doc/examples/decoding_encoding.c
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/common.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
|
||||
@@ -44,59 +41,6 @@
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* just pick the highest supported samplerate */
|
||||
static int select_sample_rate(AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
best_samplerate = FFMAX(*p, best_samplerate);
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(AVCodec *codec)
|
||||
{
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channells = 0;
|
||||
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channells) {
|
||||
best_ch_layout = *p;
|
||||
best_nb_channells = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio encoding example
|
||||
*/
|
||||
@@ -104,83 +48,44 @@ static void audio_encode_example(const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
int buffer_size;
|
||||
int frame_size, i, j, out_size, outbuf_size;
|
||||
FILE *f;
|
||||
uint16_t *samples;
|
||||
short *samples;
|
||||
float t, tincr;
|
||||
uint8_t *outbuf;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
|
||||
/* find the MP2 encoder */
|
||||
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
|
||||
codec = avcodec_find_encoder(CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 64000;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(codec, c->sample_fmt)) {
|
||||
fprintf(stderr, "Encoder does not support sample format %s",
|
||||
av_get_sample_fmt_name(c->sample_fmt));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* the codec gives us the frame size, in samples */
|
||||
frame_size = c->frame_size;
|
||||
samples = malloc(frame_size * 2 * c->channels);
|
||||
outbuf_size = 10000;
|
||||
outbuf = malloc(outbuf_size);
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* the codec gives us the frame size, in samples,
|
||||
* we calculate the size of the samples buffer in bytes */
|
||||
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
|
||||
c->sample_fmt, 0);
|
||||
samples = av_malloc(buffer_size);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
|
||||
buffer_size);
|
||||
exit(1);
|
||||
}
|
||||
/* setup the data pointers in the AVFrame */
|
||||
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(const uint8_t*)samples, buffer_size, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not setup audio frame\n");
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -188,46 +93,19 @@ static void audio_encode_example(const char *filename)
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for(i=0;i<200;i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
for(j=0;j<frame_size;j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
samples[2*j+1] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
/* encode the samples */
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
}
|
||||
fclose(f);
|
||||
free(outbuf);
|
||||
free(samples);
|
||||
|
||||
av_freep(&samples);
|
||||
avcodec_free_frame(&frame);
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
}
|
||||
@@ -247,30 +125,26 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
printf("Decode audio file %s to %s\n", filename, outfilename);
|
||||
printf("Decode audio file %s\n", filename);
|
||||
|
||||
/* find the mpeg audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
codec = avcodec_find_decoder(CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
@@ -288,7 +162,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
|
||||
if (!decoded_frame) {
|
||||
if (!(decoded_frame = avcodec_alloc_frame())) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
fprintf(stderr, "out of memory\n");
|
||||
exit(1);
|
||||
}
|
||||
} else
|
||||
@@ -329,7 +203,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
avcodec_free_frame(&decoded_frame);
|
||||
av_free(decoded_frame);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -339,26 +213,23 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, ret, x, y, got_output;
|
||||
int i, out_size, x, y, outbuf_size;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
AVFrame *picture;
|
||||
uint8_t *outbuf;
|
||||
int had_output=0;
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
|
||||
/* find the mpeg1 video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
picture= avcodec_alloc_frame();
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
@@ -369,105 +240,81 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
c->time_base= (AVRational){1,25};
|
||||
c->gop_size = 10; /* emit one intra frame every ten frames */
|
||||
c->max_b_frames=1;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
c->pix_fmt = PIX_FMT_YUV420P;
|
||||
|
||||
if(codec_id == AV_CODEC_ID_H264)
|
||||
if(codec_id == CODEC_ID_H264)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame->format = c->pix_fmt;
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
/* alloc image and output buffer */
|
||||
outbuf_size = 100000 + 12*c->width*c->height;
|
||||
outbuf = malloc(outbuf_size);
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
* just the most convenient way if av_malloc() is to be used */
|
||||
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
|
||||
c->pix_fmt, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw picture buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
av_image_alloc(picture->data, picture->linesize,
|
||||
c->width, c->height, c->pix_fmt, 1);
|
||||
|
||||
/* encode 1 second of video */
|
||||
for(i=0;i<25;i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for(y=0;y<c->height;y++) {
|
||||
for(x=0;x<c->width;x++) {
|
||||
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
|
||||
picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for(y=0;y<c->height/2;y++) {
|
||||
for(x=0;x<c->width/2;x++) {
|
||||
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
|
||||
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
|
||||
picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
|
||||
picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
|
||||
frame->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
|
||||
had_output |= out_size;
|
||||
printf("encoding frame %3d (size=%5d)\n", i, out_size);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
for(; out_size || !had_output; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
|
||||
had_output |= out_size;
|
||||
printf("write frame %3d (size=%5d)\n", i, out_size);
|
||||
fwrite(outbuf, 1, out_size, f);
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real mpeg file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
outbuf[0] = 0x00;
|
||||
outbuf[1] = 0x00;
|
||||
outbuf[2] = 0x01;
|
||||
outbuf[3] = 0xb7;
|
||||
fwrite(outbuf, 1, 4, f);
|
||||
fclose(f);
|
||||
free(outbuf);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_freep(&frame->data[0]);
|
||||
avcodec_free_frame(&frame);
|
||||
av_free(picture->data[0]);
|
||||
av_free(picture);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
@@ -488,42 +335,15 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
|
||||
{
|
||||
int len, got_frame;
|
||||
char buf[1024];
|
||||
|
||||
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
|
||||
return len;
|
||||
}
|
||||
if (got_frame) {
|
||||
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
avctx->width, avctx->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
pkt->size -= len;
|
||||
pkt->data += len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void video_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_count;
|
||||
int frame, got_picture, len;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVFrame *picture;
|
||||
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
char buf[1024];
|
||||
AVPacket avpkt;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
@@ -531,20 +351,17 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s to %s\n", filename, outfilename);
|
||||
printf("Decode video file %s\n", filename);
|
||||
|
||||
/* find the mpeg1 video decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
|
||||
codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
picture= avcodec_alloc_frame();
|
||||
|
||||
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
|
||||
@@ -555,23 +372,19 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* the codec gives us the frame size, in samples */
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
fprintf(stderr, "could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame_count = 0;
|
||||
frame = 0;
|
||||
for(;;) {
|
||||
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (avpkt.size == 0)
|
||||
@@ -593,9 +406,26 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
/* here, we use a stream based decoder (mpeg1video), so we
|
||||
feed decoder and see if it could decode a frame */
|
||||
avpkt.data = inbuf;
|
||||
while (avpkt.size > 0)
|
||||
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
|
||||
while (avpkt.size > 0) {
|
||||
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", frame);
|
||||
exit(1);
|
||||
}
|
||||
if (got_picture) {
|
||||
printf("saving frame %3d\n", frame);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, frame);
|
||||
pgm_save(picture->data[0], picture->linesize[0],
|
||||
c->width, c->height, buf);
|
||||
frame++;
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
}
|
||||
}
|
||||
|
||||
/* some codecs, such as MPEG, transmit the I and P frame with a
|
||||
@@ -603,48 +433,47 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
chance to get the last frame of the video */
|
||||
avpkt.data = NULL;
|
||||
avpkt.size = 0;
|
||||
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
|
||||
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
|
||||
if (got_picture) {
|
||||
printf("saving last frame %3d\n", frame);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, frame);
|
||||
pgm_save(picture->data[0], picture->linesize[0],
|
||||
c->width, c->height, buf);
|
||||
frame++;
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
avcodec_free_frame(&frame);
|
||||
av_free(picture);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *output_type;
|
||||
const char *filename;
|
||||
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_type\n"
|
||||
"API example program to decode/encode a media stream with libavcodec.\n"
|
||||
"This program generates a synthetic stream and encodes it to a file\n"
|
||||
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
|
||||
"The encoded stream is then decoded and written to a raw data output.\n"
|
||||
"output_type must be choosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
argv[0]);
|
||||
return 1;
|
||||
}
|
||||
output_type = argv[1];
|
||||
if (argc <= 1) {
|
||||
audio_encode_example("/tmp/test.mp2");
|
||||
audio_decode_example("/tmp/test.sw", "/tmp/test.mp2");
|
||||
|
||||
if (!strcmp(output_type, "h264")) {
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.sw", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
video_encode_example("/tmp/test.h264", CODEC_ID_H264);
|
||||
video_encode_example("/tmp/test.mpg", CODEC_ID_MPEG1VIDEO);
|
||||
filename = "/tmp/test.mpg";
|
||||
} else {
|
||||
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
|
||||
output_type);
|
||||
return 1;
|
||||
filename = argv[1];
|
||||
}
|
||||
|
||||
// audio_decode_example("/tmp/test.sw", filename);
|
||||
video_decode_example("/tmp/test%d.pgm", filename);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1,340 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat demuxing API use example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example doc/examples/demuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static uint8_t **audio_dst_data = NULL;
|
||||
static int audio_dst_linesize;
|
||||
static int audio_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
ret = av_samples_alloc(audio_dst_data, &audio_dst_linesize, frame->channels,
|
||||
frame->nb_samples, frame->format, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate audio buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* TODO: extend return code of the av_samples_* functions so that this call is not needed */
|
||||
audio_dst_bufsize =
|
||||
av_samples_get_buffer_size(NULL, frame->channels,
|
||||
frame->nb_samples, frame->format, 1);
|
||||
|
||||
/* copy audio data to destination buffer:
|
||||
* this is required since rawaudio expects non aligned data */
|
||||
av_samples_copy(audio_dst_data, frame->data, 0, 0,
|
||||
frame->nb_samples, frame->channels, frame->format);
|
||||
|
||||
/* write to rawaudio file */
|
||||
fwrite(audio_dst_data[0], 1, audio_dst_bufsize, audio_dst_file);
|
||||
av_freep(&audio_dst_data[0]);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* register all formats and codecs */
|
||||
av_register_all();
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
int nb_planes;
|
||||
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dec_ctx = audio_stream->codec;
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
|
||||
audio_dec_ctx->channels : 1;
|
||||
audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
|
||||
if (!audio_dst_data) {
|
||||
fprintf(stderr, "Could not allocate audio data buffers\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0)
|
||||
decode_packet(&got_frame, 0);
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
const char *fmt;
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt)) < 0)
|
||||
goto end;
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
if (video_dec_ctx)
|
||||
avcodec_close(video_dec_ctx);
|
||||
if (audio_dec_ctx)
|
||||
avcodec_close(audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
av_free(frame);
|
||||
av_free(video_dst_data[0]);
|
||||
av_free(audio_dst_data);
|
||||
|
||||
return ret < 0;
|
||||
}
|
@@ -25,7 +25,6 @@
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example doc/examples/filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
@@ -85,23 +84,21 @@ static int init_filters(const char *filters_descr)
|
||||
char args[512];
|
||||
int ret;
|
||||
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("ffabuffersink");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
const int64_t *chlayouts = avfilter_all_channel_layouts;
|
||||
AVABufferSinkParams *abuffersink_params;
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
snprintf(args, sizeof(args), "%d:%d:0x%"PRIx64,
|
||||
dec_ctx->sample_rate, dec_ctx->sample_fmt, dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -112,6 +109,7 @@ static int init_filters(const char *filters_descr)
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
abuffersink_params = av_abuffersink_params_alloc();
|
||||
abuffersink_params->sample_fmts = sample_fmts;
|
||||
abuffersink_params->channel_layouts = chlayouts;
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, abuffersink_params, filter_graph);
|
||||
av_free(abuffersink_params);
|
||||
@@ -196,6 +194,7 @@ int main(int argc, char **argv)
|
||||
avcodec_get_frame_defaults(&frame);
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_audio4(dec_ctx, &frame, &got_frame, &packet);
|
||||
av_free_packet(&packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
|
||||
continue;
|
||||
@@ -209,20 +208,15 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_buffer_ref(buffersink_ctx, &samplesref, 0);
|
||||
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if(ret < 0)
|
||||
goto end;
|
||||
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
|
||||
av_buffersink_get_buffer_ref(buffersink_ctx, &samplesref, 0);
|
||||
if (samplesref) {
|
||||
print_samplesref(samplesref);
|
||||
avfilter_unref_bufferp(&samplesref);
|
||||
avfilter_unref_buffer(samplesref);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
|
@@ -24,7 +24,6 @@
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example doc/examples/filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
@@ -35,7 +34,6 @@
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24";
|
||||
|
||||
@@ -85,21 +83,17 @@ static int init_filters(const char *filters_descr)
|
||||
char args[512];
|
||||
int ret;
|
||||
AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
AVBufferSinkParams *buffersink_params;
|
||||
|
||||
enum PixelFormat pix_fmts[] = { PIX_FMT_GRAY8, PIX_FMT_NONE };
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -108,11 +102,8 @@ static int init_filters(const char *filters_descr)
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
buffersink_params = av_buffersink_params_alloc();
|
||||
buffersink_params->pixel_fmts = pix_fmts;
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, buffersink_params, filter_graph);
|
||||
av_free(buffersink_params);
|
||||
NULL, pix_fmts, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
return ret;
|
||||
@@ -200,6 +191,7 @@ int main(int argc, char **argv)
|
||||
avcodec_get_frame_defaults(&frame);
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_video2(dec_ctx, &frame, &got_frame, &packet);
|
||||
av_free_packet(&packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
|
||||
break;
|
||||
@@ -209,27 +201,18 @@ int main(int argc, char **argv)
|
||||
frame.pts = av_frame_get_best_effort_timestamp(&frame);
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame(buffersrc_ctx, &frame, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
av_vsrc_buffer_add_frame(buffersrc_ctx, &frame, 0);
|
||||
|
||||
/* pull filtered pictures from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
while (avfilter_poll_frame(buffersink_ctx->inputs[0])) {
|
||||
av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
|
||||
if (picref) {
|
||||
display_picref(picref, buffersink_ctx->inputs[0]->time_base);
|
||||
avfilter_unref_bufferp(&picref);
|
||||
avfilter_unref_buffer(picref);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
|
@@ -23,7 +23,6 @@
|
||||
/**
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example doc/examples/metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
@@ -26,7 +26,6 @@
|
||||
*
|
||||
* Output a media file in any supported libavformat format.
|
||||
* The default codecs are used.
|
||||
* @example doc/examples/muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
@@ -38,11 +37,13 @@
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
#undef exit
|
||||
|
||||
/* 5 seconds stream duration */
|
||||
#define STREAM_DURATION 200.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
static int sws_flags = SWS_BICUBIC;
|
||||
|
||||
@@ -53,95 +54,53 @@ static float t, tincr, tincr2;
|
||||
static int16_t *samples;
|
||||
static int audio_input_frame_size;
|
||||
|
||||
/* Add an output stream. */
|
||||
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
/*
|
||||
* add an audio output stream
|
||||
*/
|
||||
static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
AVCodec *codec;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
/* find the audio encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
st = avformat_new_stream(oc, *codec);
|
||||
st = avformat_new_stream(oc, codec);
|
||||
if (!st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
}
|
||||
st->id = oc->nb_streams-1;
|
||||
st->id = 1;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
st->id = 1;
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
break;
|
||||
/* put sample parameters */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
avcodec_get_context_defaults3(c, *codec);
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
c->time_base.den = STREAM_FRAME_RATE;
|
||||
c->time_base.num = 1;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
// some formats want stream headers to be separate
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static float t, tincr, tincr2;
|
||||
static int16_t *samples;
|
||||
static int audio_input_frame_size;
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
static void open_audio(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int ret;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* open it */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
if (avcodec_open2(c, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -158,10 +117,6 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
samples = av_malloc(audio_input_frame_size *
|
||||
av_get_bytes_per_sample(c->sample_fmt) *
|
||||
c->channels);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "Could not allocate audio samples buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
@@ -186,7 +141,7 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame = avcodec_alloc_frame();
|
||||
int got_packet, ret;
|
||||
int got_packet;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = st->codec;
|
||||
@@ -199,25 +154,17 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
|
||||
av_get_bytes_per_sample(c->sample_fmt) *
|
||||
c->channels, 1);
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (!got_packet)
|
||||
return;
|
||||
|
||||
pkt.stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
if (av_interleaved_write_frame(oc, &pkt) != 0) {
|
||||
fprintf(stderr, "Error while writing audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
avcodec_free_frame(&frame);
|
||||
}
|
||||
|
||||
static void close_audio(AVFormatContext *oc, AVStream *st)
|
||||
@@ -230,54 +177,138 @@ static void close_audio(AVFormatContext *oc, AVStream *st)
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *frame;
|
||||
static AVPicture src_picture, dst_picture;
|
||||
static int frame_count;
|
||||
static AVFrame *picture, *tmp_picture;
|
||||
static uint8_t *video_outbuf;
|
||||
static int frame_count, video_outbuf_size;
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
/* Add a video output stream. */
|
||||
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = st->codec;
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
AVCodec *codec;
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
/* find the video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
st = avformat_new_stream(oc, codec);
|
||||
if (!st) {
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* find the video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
avcodec_get_context_defaults3(c, codec);
|
||||
|
||||
c->codec_id = codec_id;
|
||||
|
||||
/* Put sample parameters. */
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
c->time_base.den = STREAM_FRAME_RATE;
|
||||
c->time_base.num = 1;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
uint8_t *picture_buf;
|
||||
int size;
|
||||
|
||||
picture = avcodec_alloc_frame();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
size = avpicture_get_size(pix_fmt, width, height);
|
||||
picture_buf = av_malloc(size);
|
||||
if (!picture_buf) {
|
||||
av_free(picture);
|
||||
return NULL;
|
||||
}
|
||||
avpicture_fill((AVPicture *)picture, picture_buf,
|
||||
pix_fmt, width, height);
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* open the codec */
|
||||
if (avcodec_open2(c, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
video_outbuf = NULL;
|
||||
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
|
||||
/* Allocate output buffer. */
|
||||
/* XXX: API change will be done. */
|
||||
/* Buffers passed into lav* can be allocated any way you prefer,
|
||||
* as long as they're aligned enough for the architecture, and
|
||||
* they're freed appropriately (such as using av_free for buffers
|
||||
* allocated with av_malloc). */
|
||||
video_outbuf_size = 200000;
|
||||
video_outbuf = av_malloc(video_outbuf_size);
|
||||
}
|
||||
|
||||
/* Allocate the encoded raw picture. */
|
||||
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
|
||||
picture = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!picture) {
|
||||
fprintf(stderr, "Could not allocate picture\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate temporary picture: %s\n",
|
||||
av_err2str(ret));
|
||||
tmp_picture = NULL;
|
||||
if (c->pix_fmt != PIX_FMT_YUV420P) {
|
||||
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!tmp_picture) {
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* copy data and linesize picture pointers to frame */
|
||||
*((AVPicture *)frame) = dst_picture;
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVPicture *pict, int frame_index,
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i;
|
||||
@@ -300,69 +331,70 @@ static void fill_yuv_image(AVPicture *pict, int frame_index,
|
||||
|
||||
static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
int ret;
|
||||
static struct SwsContext *sws_ctx;
|
||||
AVCodecContext *c = st->codec;
|
||||
int out_size, ret;
|
||||
AVCodecContext *c;
|
||||
static struct SwsContext *img_convert_ctx;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
if (frame_count >= STREAM_NB_FRAMES) {
|
||||
/* No more frames to compress. The codec has a latency of a few
|
||||
* frames if using B-frames, so we get the last frames by
|
||||
* passing the same picture again. */
|
||||
} else {
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
if (c->pix_fmt != PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!sws_ctx) {
|
||||
sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height, c->pix_fmt,
|
||||
sws_flags, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
if (img_convert_ctx == NULL) {
|
||||
img_convert_ctx = sws_getContext(c->width, c->height,
|
||||
PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
sws_flags, NULL, NULL, NULL);
|
||||
if (img_convert_ctx == NULL) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
"Cannot initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(&src_picture, frame_count, c->width, c->height);
|
||||
sws_scale(sws_ctx,
|
||||
(const uint8_t * const *)src_picture.data, src_picture.linesize,
|
||||
0, c->height, dst_picture.data, dst_picture.linesize);
|
||||
fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
|
||||
sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
|
||||
0, c->height, picture->data, picture->linesize);
|
||||
} else {
|
||||
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
|
||||
fill_yuv_image(picture, frame_count, c->width, c->height);
|
||||
}
|
||||
}
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* Raw video case - directly store the picture in the packet */
|
||||
/* Raw video case - the API will change slightly in the near
|
||||
* future for that. */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = dst_picture.data[0];
|
||||
pkt.data = (uint8_t *)picture;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
/* encode the image */
|
||||
AVPacket pkt;
|
||||
int got_output;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
out_size = avcodec_encode_video(c, video_outbuf,
|
||||
video_outbuf_size, picture);
|
||||
/* If size is zero, it means the image was buffered. */
|
||||
if (got_output) {
|
||||
if (out_size > 0) {
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
if (c->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts = av_rescale_q(c->coded_frame->pts,
|
||||
c->time_base, st->time_base);
|
||||
if (c->coded_frame->key_frame)
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = video_outbuf;
|
||||
pkt.size = out_size;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
@@ -371,7 +403,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
}
|
||||
}
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
fprintf(stderr, "Error while writing video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame_count++;
|
||||
@@ -380,9 +412,13 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
static void close_video(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
avcodec_close(st->codec);
|
||||
av_free(src_picture.data[0]);
|
||||
av_free(dst_picture.data[0]);
|
||||
av_free(frame);
|
||||
av_free(picture->data[0]);
|
||||
av_free(picture);
|
||||
if (tmp_picture) {
|
||||
av_free(tmp_picture->data[0]);
|
||||
av_free(tmp_picture);
|
||||
}
|
||||
av_free(video_outbuf);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@@ -394,9 +430,8 @@ int main(int argc, char **argv)
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVStream *audio_st, *video_st;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
double audio_pts, video_pts;
|
||||
int ret, i;
|
||||
int i;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
@@ -404,10 +439,8 @@ int main(int argc, char **argv)
|
||||
if (argc != 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
@@ -429,43 +462,34 @@ int main(int argc, char **argv)
|
||||
* and initialize the codecs. */
|
||||
video_st = NULL;
|
||||
audio_st = NULL;
|
||||
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
video_st = add_stream(oc, &video_codec, fmt->video_codec);
|
||||
if (fmt->video_codec != CODEC_ID_NONE) {
|
||||
video_st = add_video_stream(oc, fmt->video_codec);
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
|
||||
if (fmt->audio_codec != CODEC_ID_NONE) {
|
||||
audio_st = add_audio_stream(oc, fmt->audio_codec);
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (video_st)
|
||||
open_video(oc, video_codec, video_st);
|
||||
open_video(oc, video_st);
|
||||
if (audio_st)
|
||||
open_audio(oc, audio_codec, audio_st);
|
||||
open_audio(oc, audio_st);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
|
||||
fprintf(stderr, "Could not open '%s'\n", filename);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
avformat_write_header(oc, NULL);
|
||||
|
||||
if (frame)
|
||||
frame->pts = 0;
|
||||
picture->pts = 0;
|
||||
for (;;) {
|
||||
/* Compute current audio and video time. */
|
||||
if (audio_st)
|
||||
@@ -488,7 +512,7 @@ int main(int argc, char **argv)
|
||||
write_audio_frame(oc, audio_st);
|
||||
} else {
|
||||
write_video_frame(oc, video_st);
|
||||
frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
|
||||
picture->pts++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,223 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @example doc/examples/resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int alloc_samples_array_and_data(uint8_t ***data, int *linesize, int nb_channels,
|
||||
int nb_samples, enum AVSampleFormat sample_fmt, int align)
|
||||
{
|
||||
int nb_planes = av_sample_fmt_is_planar(sample_fmt) ? nb_channels : 1;
|
||||
|
||||
*data = av_malloc(sizeof(*data) * nb_planes);
|
||||
if (!*data)
|
||||
return AVERROR(ENOMEM);
|
||||
return av_samples_alloc(*data, linesize, nb_channels,
|
||||
nb_samples, sample_fmt, align);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = alloc_samples_array_and_data(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = alloc_samples_array_and_data(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_free(dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
@@ -1,141 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example doc/examples/scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignmnet */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
212
doc/faq.texi
212
doc/faq.texi
@@ -79,17 +79,6 @@ not a bug they should fix:
|
||||
Then again, some of them do not know the difference between an undecidable
|
||||
problem and an NP-hard problem...
|
||||
|
||||
@section I have installed this library with my distro's package manager. Why does @command{configure} not see it?
|
||||
|
||||
Distributions usually split libraries in several packages. The main package
|
||||
contains the files necessary to run programs using the library. The
|
||||
development package contains the files necessary to build programs using the
|
||||
library. Sometimes, docs and/or data are in a separate package too.
|
||||
|
||||
To build FFmpeg, you need to install the development package. It is usually
|
||||
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
||||
build is finished, but be sure to keep the main package.
|
||||
|
||||
@chapter Usage
|
||||
|
||||
@section ffmpeg does not work; what is wrong?
|
||||
@@ -110,16 +99,7 @@ Then you may run:
|
||||
|
||||
Notice that @samp{%d} is replaced by the image number.
|
||||
|
||||
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc.
|
||||
|
||||
Use the @option{-start_number} option to declare a starting number for
|
||||
the sequence. This is useful if your sequence does not start with
|
||||
@file{img001.jpg} but is still in a numerical order. The following
|
||||
example will start with @file{img100.jpg}:
|
||||
|
||||
@example
|
||||
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
|
||||
@end example
|
||||
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc...
|
||||
|
||||
If you have large number of pictures to rename, you can use the
|
||||
following command to ease the burden. The command, using the bourne
|
||||
@@ -142,12 +122,6 @@ Then run:
|
||||
|
||||
The same logic is used for any image format that ffmpeg reads.
|
||||
|
||||
You can also use @command{cat} to pipe images to ffmpeg:
|
||||
|
||||
@example
|
||||
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
|
||||
@end example
|
||||
|
||||
@section How do I encode movie to single pictures?
|
||||
|
||||
Use:
|
||||
@@ -239,63 +213,8 @@ For ANY other help on Avisynth, please visit the
|
||||
|
||||
@section How can I join video files?
|
||||
|
||||
To "join" video files is quite ambiguous. The following list explains the
|
||||
different kinds of "joining" and points out how those are addressed in
|
||||
FFmpeg. To join video files may mean:
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
To put them one after the other: this is called to @emph{concatenate} them
|
||||
(in short: concat) and is addressed
|
||||
@ref{How can I concatenate video files, in this very faq}.
|
||||
|
||||
@item
|
||||
To put them together in the same file, to let the user choose between the
|
||||
different versions (example: different audio languages): this is called to
|
||||
@emph{multiplex} them together (in short: mux), and is done by simply
|
||||
invoking ffmpeg with several @option{-i} options.
|
||||
|
||||
@item
|
||||
For audio, to put all channels together in a single stream (example: two
|
||||
mono streams into one stereo stream): this is sometimes called to
|
||||
@emph{merge} them, and can be done using the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter.
|
||||
|
||||
@item
|
||||
For audio, to play one on top of the other: this is called to @emph{mix}
|
||||
them, and can be done by first merging them into a single stream and then
|
||||
using the @url{http://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix
|
||||
the channels at will.
|
||||
|
||||
@item
|
||||
For video, to display both together, side by side or one on top of a part of
|
||||
the other; it can be done using the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter.
|
||||
|
||||
@end itemize
|
||||
|
||||
@anchor{How can I concatenate video files}
|
||||
@section How can I concatenate video files?
|
||||
|
||||
There are several solutions, depending on the exact circumstances.
|
||||
|
||||
@subsection Concatenating using the concat @emph{filter}
|
||||
|
||||
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-filters.html#concat,
|
||||
@code{concat}} filter designed specifically for that, with examples in the
|
||||
documentation. This operation is recommended if you need to re-encode.
|
||||
|
||||
@subsection Concatenating using the concat @emph{demuxer}
|
||||
|
||||
FFmpeg has a @url{http://www.ffmpeg.org/ffmpeg-formats.html#concat,
|
||||
@code{concat}} demuxer which you can use when you want to avoid a re-encode and
|
||||
your format doesn't support file level concatenation.
|
||||
|
||||
@subsection Concatenating using the concat @emph{protocol} (file level)
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files them.
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to join video files by
|
||||
merely concatenating them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
these privileged formats, then using the humble @code{cat} command (or the
|
||||
@@ -303,38 +222,28 @@ equally humble @code{copy} under Windows), and finally transcoding back to your
|
||||
format of choice.
|
||||
|
||||
@example
|
||||
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
ffmpeg -i input1.avi -same_quant intermediate1.mpg
|
||||
ffmpeg -i input2.avi -same_quant intermediate2.mpg
|
||||
cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
ffmpeg -i intermediate_all.mpg -same_quant output.avi
|
||||
@end example
|
||||
|
||||
Additionally, you can use the @code{concat} protocol instead of @code{cat} or
|
||||
@code{copy} which will avoid creation of a potentially huge intermediate file.
|
||||
Notice that you should either use @code{-same_quant} or set a reasonably high
|
||||
bitrate for your intermediate and output files, if you want to preserve
|
||||
video quality.
|
||||
|
||||
@example
|
||||
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
@end example
|
||||
|
||||
Note that you may need to escape the character "|" which is special for many
|
||||
shells.
|
||||
|
||||
Another option is usage of named pipes, should your platform support it:
|
||||
Also notice that you may avoid the huge intermediate files by taking advantage
|
||||
of named pipes, should your platform support it:
|
||||
|
||||
@example
|
||||
mkfifo intermediate1.mpg
|
||||
mkfifo intermediate2.mpg
|
||||
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
|
||||
ffmpeg -i input1.avi -same_quant -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -same_quant -y intermediate2.mpg < /dev/null &
|
||||
cat intermediate1.mpg intermediate2.mpg |\
|
||||
ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
|
||||
ffmpeg -f mpeg -i - -same_quant -c:v mpeg4 -acodec libmp3lame output.avi
|
||||
@end example
|
||||
|
||||
@subsection Concatenating using raw audio and video
|
||||
|
||||
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
|
||||
allow concatenation, and the transcoding step is almost lossless.
|
||||
When using multiple yuv4mpegpipe(s), the first line needs to be discarded
|
||||
@@ -342,8 +251,7 @@ from all but the first stream. This can be accomplished by piping through
|
||||
@code{tail} as seen below. Note that when piping through @code{tail} you
|
||||
must use command grouping, @code{@{ ;@}}, to background properly.
|
||||
|
||||
For example, let's say we want to concatenate two FLV files into an
|
||||
output.flv file:
|
||||
For example, let's say we want to join two FLV files into an output.flv file:
|
||||
|
||||
@example
|
||||
mkfifo temp1.a
|
||||
@@ -360,7 +268,7 @@ cat temp1.a temp2.a > all.a &
|
||||
cat temp1.v temp2.v > all.v &
|
||||
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
|
||||
-f yuv4mpegpipe -i all.v \
|
||||
-y output.flv
|
||||
-same_quant -y output.flv
|
||||
rm temp[12].[av] all.[av]
|
||||
@end example
|
||||
|
||||
@@ -402,48 +310,11 @@ specifying the exact format.
|
||||
aconvert=s16:stereo:packed
|
||||
@end example
|
||||
|
||||
@section Why does FFmpeg not see the subtitles in my VOB file?
|
||||
|
||||
VOB and a few other formats do not have a global header that describes
|
||||
everything present in the file. Instead, applications are supposed to scan
|
||||
the file to see what it contains. Since VOB files are frequently large, only
|
||||
the beginning is scanned. If the subtitles happen only later in the file,
|
||||
they will not be initally detected.
|
||||
|
||||
Some applications, including the @code{ffmpeg} command-line tool, can only
|
||||
work with streams that were detected during the initial scan; streams that
|
||||
are detected later are ignored.
|
||||
|
||||
The size of the initial scan is controlled by two options: @code{probesize}
|
||||
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
|
||||
the subtitle stream to be detected, both values must be large enough.
|
||||
|
||||
@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead?
|
||||
|
||||
The @option{-sameq} option meant "same quantizer", and made sense only in a
|
||||
very limited set of cases. Unfortunately, a lot of people mistook it for
|
||||
"same quality" and used it in places where it did not make sense: it had
|
||||
roughly the expected visible effect, but achieved it in a very inefficient
|
||||
way.
|
||||
|
||||
Each encoder has its own set of options to set the quality-vs-size balance,
|
||||
use the options for the encoder you are using to set the quality level to a
|
||||
point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@chapter Development
|
||||
|
||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||
|
||||
Yes. Check the @file{doc/examples} directory in the source
|
||||
repository, also available online at:
|
||||
@url{https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples}.
|
||||
|
||||
Examples are also installed by default, usually in
|
||||
@code{$PREFIX/share/ffmpeg/examples}.
|
||||
|
||||
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
|
||||
Yes. Read the Developers Guide of the FFmpeg documentation. Alternatively,
|
||||
examine the source code for one of the many open source projects that
|
||||
already incorporate FFmpeg at (@url{projects.html}).
|
||||
|
||||
@@ -455,8 +326,31 @@ with @code{#ifdef}s related to the compiler.
|
||||
|
||||
@section Is Microsoft Visual C++ supported?
|
||||
|
||||
Yes. Please see the @uref{platform.html, Microsoft Visual C++}
|
||||
section in the FFmpeg documentation.
|
||||
No. Microsoft Visual C++ is not compliant to the C99 standard and does
|
||||
not - among other things - support the inline assembly used in FFmpeg.
|
||||
If you wish to use MSVC++ for your
|
||||
project then you can link the MSVC++ code with libav* as long as
|
||||
you compile the latter with a working C compiler. For more information, see
|
||||
the @emph{Microsoft Visual C++ compatibility} section in the FFmpeg
|
||||
documentation.
|
||||
|
||||
There have been efforts to make FFmpeg compatible with MSVC++ in the
|
||||
past. However, they have all been rejected as too intrusive, especially
|
||||
since MinGW does the job adequately. None of the core developers
|
||||
work with MSVC++ and thus this item is low priority. Should you find
|
||||
the silver bullet that solves this problem, feel free to shoot it at us.
|
||||
|
||||
We strongly recommend you to move over from MSVC++ to MinGW tools.
|
||||
|
||||
@section Can I use FFmpeg or libavcodec under Windows?
|
||||
|
||||
Yes, but the Cygwin or MinGW tools @emph{must} be used to compile FFmpeg.
|
||||
Read the @emph{Windows} section in the FFmpeg documentation to find more
|
||||
information.
|
||||
|
||||
To get help and instructions for building FFmpeg under Windows, check out
|
||||
the FFmpeg Windows Help Forum at
|
||||
@url{http://ffmpeg.arrozcru.org/}.
|
||||
|
||||
@section Can you add automake, libtool or autoconf support?
|
||||
|
||||
@@ -481,24 +375,6 @@ Yes, as long as the code is optional and can easily and cleanly be placed
|
||||
under #if CONFIG_GPL without breaking anything. So, for example, a new codec
|
||||
or filter would be OK under GPL while a bug fix to LGPL code would not.
|
||||
|
||||
@section I'm using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.
|
||||
|
||||
FFmpeg builds static libraries by default. In static libraries, dependencies
|
||||
are not handled. That has two consequences. First, you must specify the
|
||||
libraries in dependency order: @code{-lavdevice} must come before
|
||||
@code{-lavformat}, @code{-lavutil} must come after everything else, etc.
|
||||
Second, external libraries that are used in FFmpeg have to be specified too.
|
||||
|
||||
An easy way to get the full list of required libraries in dependency order
|
||||
is to use @code{pkg-config}.
|
||||
|
||||
@example
|
||||
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
|
||||
@end example
|
||||
|
||||
See @file{doc/example/Makefile} and @file{doc/example/pc-uninstalled} for
|
||||
more details.
|
||||
|
||||
@section I'm using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.
|
||||
|
||||
FFmpeg is a pure C project, so to use the libraries within your C++ application
|
||||
@@ -514,8 +390,8 @@ to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
|
||||
|
||||
@section I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?
|
||||
|
||||
You have to create a custom AVIOContext using @code{avio_alloc_context},
|
||||
see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer or MPlayer2 sources.
|
||||
You have to implement a URLProtocol, see @file{libavformat/file.c} in
|
||||
FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer sources.
|
||||
|
||||
@section Where can I find libav* headers for Pascal/Delphi?
|
||||
|
||||
|
@@ -1,8 +1,8 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Automated Testing Environment
|
||||
@settitle FATE Automated Testing Environment
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Automated Testing Environment}
|
||||
@center @titlefont{FATE Automated Testing Environment}
|
||||
@end titlepage
|
||||
|
||||
@node Top
|
||||
@@ -78,14 +78,11 @@ Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
@command{configure} or set the @var{TARGET_EXEC} Make variable.
|
||||
|
||||
|
||||
@chapter Submitting the results to the FFmpeg result aggregation server
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs
|
||||
shell script tests/fate.sh from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
@example
|
||||
@@ -93,11 +90,11 @@ tests/fate.sh /path/to/fate_config
|
||||
@end example
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at @file{doc/fate_config.sh.template}.
|
||||
configuration variables can be found at @file{tests/fate_config.sh.template}.
|
||||
|
||||
@ifhtml
|
||||
The mentioned configuration template is also available here:
|
||||
@verbatiminclude fate_config.sh.template
|
||||
@verbatiminclude ../tests/fate_config.sh.template
|
||||
@end ifhtml
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
@@ -121,9 +118,8 @@ present in $workdir as specified in the configuration file:
|
||||
@item version
|
||||
@end itemize
|
||||
|
||||
When you have everything working properly you can create an SSH key pair
|
||||
and send the public key to the FATE server administrator who can be contacted
|
||||
at the email address @email{fate-admin@@ffmpeg.org}.
|
||||
When you have everything working properly you can create an SSH key and
|
||||
send its public part to the FATE server administrator.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
@@ -133,11 +129,6 @@ The FATE server's fingerprint is:
|
||||
|
||||
b1:31:c8:79:3f:04:1d:f8:f2:23:26:5a:fd:55:fa:92
|
||||
|
||||
If you have problems connecting to the FATE server, it may help to try out
|
||||
the @command{ssh} command with one or more @option{-v} options. You should
|
||||
get detailed output concerning your SSH configuration and the authentication
|
||||
process.
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
@@ -175,20 +166,11 @@ the synchronisation of the samples directory.
|
||||
@item THREADS
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @var{slice} or @var{frame},
|
||||
by default @var{slice+frame}
|
||||
@item CPUFLAGS
|
||||
Specify CPU flags.
|
||||
@item TARGET_EXEC
|
||||
Specify or override the wrapper used to run the tests.
|
||||
The @var{TARGET_EXEC} option provides a way to run FATE wrapped in
|
||||
@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets
|
||||
through @command{ssh}.
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
||||
Example:
|
||||
@example
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
|
||||
@end example
|
||||
|
@@ -1,45 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Bitstream Filters Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Bitstream Filters Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the bitstream filters provided by the
|
||||
libavcodec library.
|
||||
|
||||
A bitstream filter operates on the encoded stream data, and performs
|
||||
bitstream level modifications without performing decoding.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include bitstream_filters.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-bitstream-filters
|
||||
@settitle FFmpeg bitstream filters
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
File diff suppressed because it is too large
Load Diff
@@ -1,62 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Devices Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Devices Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the input and output devices provided by the
|
||||
libavdevice library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter Device Options
|
||||
@c man begin DEVICE OPTIONS
|
||||
|
||||
The libavdevice library provides the same interface as
|
||||
libavformat. Namely, an input device is considered like a demuxer, and
|
||||
an output device like a muxer, and the interface and generic device
|
||||
options are the same provided by libavformat (see the ffmpeg-formats
|
||||
manual).
|
||||
|
||||
In addition each input or output device may support so-called private
|
||||
options, which are specific for that component.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools, or by setting the value explicitly in the device
|
||||
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
|
||||
for programmatic use.
|
||||
|
||||
@c man end DEVICE OPTIONS
|
||||
|
||||
@include indevs.texi
|
||||
@include outdevs.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavdevice.html,libavdevice}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavdevice(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-devices
|
||||
@settitle FFmpeg devices
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Filters Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Filters Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes filters, sources, and sinks provided by the
|
||||
libavfilter library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include filters.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavfilter.html,libavfilter}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavfilter(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-filters
|
||||
@settitle FFmpeg filters
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,171 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Formats Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Formats Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the supported formats (muxers and demuxers)
|
||||
provided by the libavformat library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter Format Options
|
||||
@c man begin FORMAT OPTIONS
|
||||
|
||||
The libavformat library provides some generic global options, which
|
||||
can be set on all the muxers and demuxers. In addition each muxer or
|
||||
demuxer may support so-called private options, which are specific for
|
||||
that component.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools, or by setting the value explicitly in the
|
||||
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
|
||||
for programmatic use.
|
||||
|
||||
The list of supported options follows:
|
||||
|
||||
@table @option
|
||||
@item avioflags @var{flags} (@emph{input/output})
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item direct
|
||||
Reduce buffering.
|
||||
@end table
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will allow to detect more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@item packetsize @var{integer} (@emph{output})
|
||||
Set packet size.
|
||||
|
||||
@item fflags @var{flags} (@emph{input/output})
|
||||
Set format flags.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item genpts
|
||||
Generate PTS.
|
||||
@item nofillin
|
||||
Do not fill in missing values that can be exactly calculated.
|
||||
@item noparse
|
||||
Disable AVParsers, this needs @code{+nofillin} too.
|
||||
@item igndts
|
||||
Ignore DTS.
|
||||
@item discardcorrupt
|
||||
Discard corrupted frames.
|
||||
@item sortdts
|
||||
Try to interleave output packets by DTS.
|
||||
@item keepside
|
||||
Do not merge side data.
|
||||
@item latm
|
||||
Enable RTP MP4A-LATM payload.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by optional buffering
|
||||
@end table
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to estimate duration.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
Set decryption key.
|
||||
|
||||
@item indexmem @var{integer} (@emph{input})
|
||||
Set max memory used for timestamp index (per stream).
|
||||
|
||||
@item rtbufsize @var{integer} (@emph{input})
|
||||
Set max memory used for buffering real-time frames.
|
||||
|
||||
@item fdebug @var{flags} (@emph{input/output})
|
||||
Print specific debug info.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item ts
|
||||
@end table
|
||||
|
||||
@item max_delay @var{integer} (@emph{input/output})
|
||||
Set maximum muxing or demuxing delay in microseconds.
|
||||
|
||||
@item fpsprobesize @var{integer} (@emph{input})
|
||||
Set number of frames used to probe fps.
|
||||
|
||||
@item audio_preload @var{integer} (@emph{output})
|
||||
Set microseconds by which audio packets should be interleaved earlier.
|
||||
|
||||
@item chunk_duration @var{integer} (@emph{output})
|
||||
Set microseconds for each chunk.
|
||||
|
||||
@item chunk_size @var{integer} (@emph{output})
|
||||
Set size in bytes for each chunk.
|
||||
|
||||
@item err_detect, f_err_detect @var{flags} (@emph{input})
|
||||
Set error detection flags. @code{f_err_detect} is deprecated and
|
||||
should be used only via the @command{ffmpeg} tool.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item crccheck
|
||||
Verify embedded CRCs.
|
||||
@item bitstream
|
||||
Detect bitstream specification deviations.
|
||||
@item buffer
|
||||
Detect improper bitstream length.
|
||||
@item explode
|
||||
Abort decoding on minor error detection.
|
||||
@item careful
|
||||
Consider things that violate the spec and have not been seen in the
|
||||
wild as errors.
|
||||
@item compliant
|
||||
Consider all spec non compliancies as errors.
|
||||
@item aggressive
|
||||
Consider things that a sane encoder should not do as an error.
|
||||
@end table
|
||||
|
||||
@item use_wallclock_as_timestamps @var{integer} (@emph{input})
|
||||
Use wallclock as timestamps.
|
||||
|
||||
@item avoid_negative_ts @var{integer} (@emph{output})
|
||||
Shift timestamps to make them positive. 1 enables, 0 disables, default
|
||||
of -1 enables when required by target format.
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
|
||||
@include demuxers.texi
|
||||
@include muxers.texi
|
||||
@include metadata.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-formats
|
||||
@settitle FFmpeg formats
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Protocols Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Protocols Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the input and output protocols provided by the
|
||||
libavformat library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include protocols.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-protocols
|
||||
@settitle FFmpeg protocols
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,238 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Resampler Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Resampler Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The FFmpeg resampler provides an high-level interface to the
|
||||
libswresample library audio resampling utilities. In particular it
|
||||
allows to perform audio resampling, audio channel layout rematrixing,
|
||||
and convert audio format and packing layout.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter Resampler Options
|
||||
@c man begin RESAMPLER OPTIONS
|
||||
|
||||
The audio resampler supports the following named options.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools, @var{option}=@var{value} for the aresample filter,
|
||||
by setting the value explicitly in the
|
||||
@code{SwrContext} options or using the @file{libavutil/opt.h} API for
|
||||
programmatic use.
|
||||
|
||||
@table @option
|
||||
|
||||
@item ich, in_channel_count
|
||||
Set the number of input channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
@option{in_channel_layout} is set.
|
||||
|
||||
@item och, out_channel_count
|
||||
Set the number of output channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
@option{out_channel_layout} is set.
|
||||
|
||||
@item uch, used_channel_count
|
||||
Set the number of used channels. Default value is 0. This option is
|
||||
only used for special remapping.
|
||||
|
||||
@item isr, in_sample_rate
|
||||
Set the input sample rate. Default value is 0.
|
||||
|
||||
@item osr, out_sample_rate
|
||||
Set the output sample rate. Default value is 0.
|
||||
|
||||
@item isf, in_sample_fmt
|
||||
Specify the input sample format. It is set by default to @code{none}.
|
||||
|
||||
@item osf, out_sample_fmt
|
||||
Specify the output sample format. It is set by default to @code{none}.
|
||||
|
||||
@item tsf, internal_sample_fmt
|
||||
Set the internal sample format. Default value is @code{none}.
|
||||
|
||||
@item icl, in_channel_layout
|
||||
Set the input channel layout.
|
||||
|
||||
@item ocl, out_channel_layout
|
||||
Set the output channel layout.
|
||||
|
||||
@item clev, center_mix_level
|
||||
Set center mix level. It is a value expressed in deciBel, and must be
|
||||
inclusively included between -32 and +32.
|
||||
|
||||
@item slev, surround_mix_level
|
||||
Set surround mix level. It is a value expressed in deciBel, and must
|
||||
be inclusively included between -32 and +32.
|
||||
|
||||
@item lfe_mix_evel
|
||||
Set LFE mix level.
|
||||
|
||||
@item rmvol, rematrix_volume
|
||||
Set rematrix volume. Default value is 1.0.
|
||||
|
||||
@item flags, swr_flags
|
||||
Set flags used by the converter. Default value is 0.
|
||||
|
||||
It supports the following individual flags:
|
||||
@table @option
|
||||
@item res
|
||||
force resampling
|
||||
@end table
|
||||
|
||||
@item dither_scale
|
||||
Set the dither scale. Default value is 1.
|
||||
|
||||
@item dither_method
|
||||
Set dither method. Default value is 0.
|
||||
|
||||
Supported values:
|
||||
@table @samp
|
||||
@item rectangular
|
||||
select rectangular dither
|
||||
@item triangular
|
||||
select triangular dither
|
||||
@item triangular_hp
|
||||
select triangular dither with high pass
|
||||
@end table
|
||||
|
||||
@item resampler
|
||||
Set resampling engine. Default value is swr.
|
||||
|
||||
Supported values:
|
||||
@table @samp
|
||||
@item swr
|
||||
select the native SW Resampler; filter options precision and cheby are not
|
||||
applicable in this case.
|
||||
@item soxr
|
||||
select the SoX Resampler (where available); compensation, and filter options
|
||||
filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
|
||||
case.
|
||||
@end table
|
||||
|
||||
@item filter_size
|
||||
For swr only, set resampling filter size, default value is 32.
|
||||
|
||||
@item phase_shift
|
||||
For swr only, set resampling phase shift, default value is 10, must be included
|
||||
between 0 and 30.
|
||||
|
||||
@item linear_interp
|
||||
Use Linear Interpolation if set to 1, default value is 0.
|
||||
|
||||
@item cutoff
|
||||
Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
|
||||
value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
|
||||
(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
|
||||
|
||||
@item precision
|
||||
For soxr only, the precision in bits to which the resampled signal will be
|
||||
calculated. The default value of 20 (which, with suitable dithering, is
|
||||
appropriate for a destination bit-depth of 16) gives SoX's 'High Quality'; a
|
||||
value of 28 gives SoX's 'Very High Quality'.
|
||||
|
||||
@item cheby
|
||||
For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
|
||||
approximation for 'irrational' ratios. Default value is 0.
|
||||
|
||||
@item async
|
||||
For swr only, simple 1 parameter audio sync to timestamps using stretching,
|
||||
squeezing, filling and trimming. Setting this to 1 will enable filling and
|
||||
trimming, larger values represent the maximum amount in samples that the data
|
||||
may be stretched or squeezed for each second.
|
||||
Default value is 0, thus no compensation is applied to make the samples match
|
||||
the audio timestamps.
|
||||
|
||||
@item min_comp
|
||||
For swr only, set the minimum difference between timestamps and audio data (in
|
||||
seconds) to trigger stretching/squeezing/filling or trimming of the
|
||||
data to make it match the timestamps. The default is that
|
||||
stretching/squeezing/filling and trimming is disabled
|
||||
(@option{min_comp} = @code{FLT_MAX}).
|
||||
|
||||
@item min_hard_comp
|
||||
For swr only, set the minimum difference between timestamps and audio data (in
|
||||
seconds) to trigger adding/dropping samples to make it match the
|
||||
timestamps. This option effectively is a threshold to select between
|
||||
hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
|
||||
all compensation is by default disabled through @option{min_comp}.
|
||||
The default is 0.1.
|
||||
|
||||
@item comp_duration
|
||||
For swr only, set duration (in seconds) over which data is stretched/squeezed
|
||||
to make it match the timestamps. Must be a non-negative double float value,
|
||||
default value is 1.0.
|
||||
|
||||
@item max_soft_comp
|
||||
For swr only, set maximum factor by which data is stretched/squeezed to make it
|
||||
match the timestamps. Must be a non-negative double float value, default value
|
||||
is 0.
|
||||
|
||||
@item matrix_encoding
|
||||
Select matrixed stereo encoding.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@item none
|
||||
select none
|
||||
@item dolby
|
||||
select Dolby
|
||||
@item dplii
|
||||
select Dolby Pro Logic II
|
||||
@end table
|
||||
|
||||
Default value is @code{none}.
|
||||
|
||||
@item filter_type
|
||||
For swr only, select resampling filter type. This only affects resampling
|
||||
operations.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@item cubic
|
||||
select cubic
|
||||
@item blackman_nuttall
|
||||
select Blackman Nuttall Windowed Sinc
|
||||
@item kaiser
|
||||
select Kaiser Windowed Sinc
|
||||
@end table
|
||||
|
||||
@item kaiser_beta
|
||||
For swr only, set Kaiser Window Beta value. Must be an integer included between
|
||||
2 and 16, default value is 9.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end RESAMPLER OPTIONS
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libswresample.html,libswresample}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-resampler
|
||||
@settitle FFmpeg Resampler
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,141 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Scaler Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Scaler Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The FFmpeg rescaler provides an high-level interface to the libswscale
|
||||
library image conversion utilities. In particular it allows to perform
|
||||
image rescaling and pixel format conversion.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter Scaler Options
|
||||
@c man begin SCALER OPTIONS
|
||||
|
||||
The video scaler supports the following named options.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools. For programmatic use, they can be set explicitly in the
|
||||
@code{SwsContext} options or through the @file{libavutil/opt.h} API.
|
||||
|
||||
@table @option
|
||||
|
||||
@item sws_flags
|
||||
Set the scaler flags. This is also used to set the scaling
|
||||
algorithm. Only a single algorithm should be selected.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@item fast_bilinear
|
||||
Select fast bilinear scaling algorithm.
|
||||
|
||||
@item bilinear
|
||||
Select bilinear scaling algorithm.
|
||||
|
||||
@item bicubic
|
||||
Select bicubic scaling algorithm.
|
||||
|
||||
@item experimental
|
||||
Select experimental scaling algorithm.
|
||||
|
||||
@item neighbor
|
||||
Select nearest neighbor rescaling algorithm.
|
||||
|
||||
@item area
|
||||
Select averaging area rescaling algorithm.
|
||||
|
||||
@item bicubiclin
|
||||
Select bicubic scaling algorithm for the luma component, bilinear for
|
||||
chroma components.
|
||||
|
||||
@item gauss
|
||||
Select Gaussian rescaling algorithm.
|
||||
|
||||
@item sinc
|
||||
Select sinc rescaling algorithm.
|
||||
|
||||
@item lanczos
|
||||
Select lanczos rescaling algorithm.
|
||||
|
||||
@item spline
|
||||
Select natural bicubic spline rescaling algorithm.
|
||||
|
||||
@item print_info
|
||||
Enable printing/debug logging.
|
||||
|
||||
@item accurate_rnd
|
||||
Enable accurate rounding.
|
||||
|
||||
@item full_chroma_int
|
||||
Enable full chroma interpolation.
|
||||
|
||||
@item full_chroma_inp
|
||||
Select full chroma input.
|
||||
|
||||
@item bitexact
|
||||
Enable bitexact output.
|
||||
@end table
|
||||
|
||||
@item srcw
|
||||
Set source width.
|
||||
|
||||
@item srch
|
||||
Set source height.
|
||||
|
||||
@item dstw
|
||||
Set destination width.
|
||||
|
||||
@item dsth
|
||||
Set destination height.
|
||||
|
||||
@item src_format
|
||||
Set source pixel format (must be expressed as an integer).
|
||||
|
||||
@item dst_format
|
||||
Set destination pixel format (must be expressed as an integer).
|
||||
|
||||
@item src_range
|
||||
Select source range.
|
||||
|
||||
@item dst_range
|
||||
Select destination range.
|
||||
|
||||
@item param0, param1
|
||||
Set scaling algorithm parameters. The specified values are specific of
|
||||
some scaling algorithms and ignored by others. The specified values
|
||||
are floating point number values.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end SCALER OPTIONS
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libswscale.html,libswscale}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-scaler
|
||||
@settitle FFmpeg video scaling and pixel format converter
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,43 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Utilities Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Utilities Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes some generic features and utilities provided
|
||||
by the libavutil library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include syntax.texi
|
||||
@include eval.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-utils
|
||||
@settitle FFmpeg utilities
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
359
doc/ffmpeg.texi
359
doc/ffmpeg.texi
@@ -11,7 +11,13 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffmpeg [@var{global_options}] @{[@var{input_file_options}] -i @file{input_file}@} ... @{[@var{output_file_options}] @file{output_file}@} ...
|
||||
The generic syntax is:
|
||||
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffmpeg [global options] [[infile options][@option{-i} @var{infile}]]... @{[outfile options] @var{outfile}@}...
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -52,7 +58,7 @@ options apply ONLY to the next input or output file and are reset between files.
|
||||
@item
|
||||
To set the video bitrate of the output file to 64kbit/s:
|
||||
@example
|
||||
ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
|
||||
ffmpeg -i input.avi -b:v 64k output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -73,126 +79,6 @@ The format option may be needed for raw input files.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter Detailed description
|
||||
@c man begin DETAILED DESCRIPTION
|
||||
|
||||
The transcoding process in @command{ffmpeg} for each output can be described by
|
||||
the following diagram:
|
||||
|
||||
@example
|
||||
_______ ______________ _________ ______________ ________
|
||||
| | | | | | | | | |
|
||||
| input | demuxer | encoded data | decoder | decoded | encoder | encoded data | muxer | output |
|
||||
| file | ---------> | packets | ---------> | frames | ---------> | packets | -------> | file |
|
||||
|_______| |______________| |_________| |______________| |________|
|
||||
|
||||
@end example
|
||||
|
||||
@command{ffmpeg} calls the libavformat library (containing demuxers) to read
|
||||
input files and get packets containing encoded data from them. When there are
|
||||
multiple input files, @command{ffmpeg} tries to keep them synchronized by
|
||||
tracking lowest timestamp on any active input stream.
|
||||
|
||||
Encoded packets are then passed to the decoder (unless streamcopy is selected
|
||||
for the stream, see further for a description). The decoder produces
|
||||
uncompressed frames (raw video/PCM audio/...) which can be processed further by
|
||||
filtering (see next section). After filtering the frames are passed to the
|
||||
encoder, which encodes them and outputs encoded packets again. Finally those are
|
||||
passed to the muxer, which writes the encoded packets to the output file.
|
||||
|
||||
@section Filtering
|
||||
Before encoding, @command{ffmpeg} can process raw audio and video frames using
|
||||
filters from the libavfilter library. Several chained filters form a filter
|
||||
graph. @command{ffmpeg} distinguishes between two types of filtergraphs -
|
||||
simple and complex.
|
||||
|
||||
@subsection Simple filtergraphs
|
||||
Simple filtergraphs are those that have exactly one input and output, both of
|
||||
the same type. In the above diagram they can be represented by simply inserting
|
||||
an additional step between decoding and encoding:
|
||||
|
||||
@example
|
||||
_________ __________ ______________
|
||||
| | | | | |
|
||||
| decoded | simple filtergraph | filtered | encoder | encoded data |
|
||||
| frames | -------------------> | frames | ---------> | packets |
|
||||
|_________| |__________| |______________|
|
||||
|
||||
@end example
|
||||
|
||||
Simple filtergraphs are configured with the per-stream @option{-filter} option
|
||||
(with @option{-vf} and @option{-af} aliases for video and audio respectively).
|
||||
A simple filtergraph for video can look for example like this:
|
||||
|
||||
@example
|
||||
_______ _____________ _______ _____ ________
|
||||
| | | | | | | | | |
|
||||
| input | ---> | deinterlace | ---> | scale | ---> | fps | ---> | output |
|
||||
|_______| |_____________| |_______| |_____| |________|
|
||||
|
||||
@end example
|
||||
|
||||
Note that some filters change frame properties but not frame contents. E.g. the
|
||||
@code{fps} filter in the example above changes number of frames, but does not
|
||||
touch the frame contents. Another example is the @code{setpts} filter, which
|
||||
only sets timestamps and otherwise passes the frames unchanged.
|
||||
|
||||
@subsection Complex filtergraphs
|
||||
Complex filtergraphs are those which cannot be described as simply a linear
|
||||
processing chain applied to one stream. This is the case e.g. when the graph has
|
||||
more than one input and/or output, or when output stream type is different from
|
||||
input. They can be represented with the following diagram:
|
||||
|
||||
@example
|
||||
_________
|
||||
| |
|
||||
| input 0 |\ __________
|
||||
|_________| \ | |
|
||||
\ _________ /| output 0 |
|
||||
\ | | / |__________|
|
||||
_________ \| complex | /
|
||||
| | | |/
|
||||
| input 1 |---->| filter |\
|
||||
|_________| | | \ __________
|
||||
/| graph | \ | |
|
||||
/ | | \| output 1 |
|
||||
_________ / |_________| |__________|
|
||||
| | /
|
||||
| input 2 |/
|
||||
|_________|
|
||||
|
||||
@end example
|
||||
|
||||
Complex filtergraphs are configured with the @option{-filter_complex} option.
|
||||
Note that this option is global, since a complex filtergraph by its nature
|
||||
cannot be unambiguously associated with a single stream or file.
|
||||
|
||||
A trivial example of a complex filtergraph is the @code{overlay} filter, which
|
||||
has two video inputs and one video output, containing one video overlaid on top
|
||||
of the other. Its audio counterpart is the @code{amix} filter.
|
||||
|
||||
@section Stream copy
|
||||
Stream copy is a mode selected by supplying the @code{copy} parameter to the
|
||||
@option{-codec} option. It makes @command{ffmpeg} omit the decoding and encoding
|
||||
step for the specified stream, so it does only demuxing and muxing. It is useful
|
||||
for changing the container format or modifying container-level metadata. The
|
||||
diagram above will in this case simplify to this:
|
||||
|
||||
@example
|
||||
_______ ______________ ________
|
||||
| | | | | |
|
||||
| input | demuxer | encoded data | muxer | output |
|
||||
| file | ---------> | packets | -------> | file |
|
||||
|_______| |______________| |________|
|
||||
|
||||
@end example
|
||||
|
||||
Since there is no decoding or encoding, it is very fast and there is no quality
|
||||
loss. However it might not work in some cases because of many factors. Applying
|
||||
filters is obviously also impossible, since filters work on uncompressed data.
|
||||
|
||||
@c man end DETAILED DESCRIPTION
|
||||
|
||||
@chapter Stream selection
|
||||
@c man begin STREAM SELECTION
|
||||
|
||||
@@ -347,24 +233,6 @@ Specify the preset for matching stream(s).
|
||||
@item -stats (@emph{global})
|
||||
Print encoding progress/statistics. On by default.
|
||||
|
||||
@item -progress @var{url} (@emph{global})
|
||||
Send program-friendly progress information to @var{url}.
|
||||
|
||||
Progress information is written approximately every second and at the end of
|
||||
the encoding process. It is made of "@var{key}=@var{value}" lines. @var{key}
|
||||
consists of only alphanumeric characters. The last key of a sequence of
|
||||
progress information is always "progress".
|
||||
|
||||
@item -stdin
|
||||
Enable interaction on standard input. On by default unless standard input is
|
||||
used as an input. To explicitly disable interaction you need to specify
|
||||
@code{-nostdin}.
|
||||
|
||||
Disabling interaction on standard input is useful, for example, if
|
||||
ffmpeg is in the background process group. Roughly the same result can
|
||||
be achieved with @code{ffmpeg ... < /dev/null} but it requires a
|
||||
shell.
|
||||
|
||||
@item -debug_ts (@emph{global})
|
||||
Print timestamp information. It is off by default. This option is
|
||||
mostly useful for testing and debugging purposes, and the output
|
||||
@@ -414,26 +282,10 @@ attachments.
|
||||
@item -vframes @var{number} (@emph{output})
|
||||
Set the number of video frames to record. This is an alias for @code{-frames:v}.
|
||||
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
|
||||
Set frame rate (Hz value, fraction or abbreviation).
|
||||
|
||||
As an input option, ignore any timestamps stored in the file and instead
|
||||
generate timestamps assuming constant frame rate @var{fps}.
|
||||
|
||||
As an output option, duplicate or drop input frames to achieve constant output
|
||||
frame rate @var{fps}.
|
||||
|
||||
Set frame rate (Hz value, fraction or abbreviation), (default = 25). For output
|
||||
streams implies @code{-vsync cfr}.
|
||||
@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream})
|
||||
Set frame size.
|
||||
|
||||
As an input option, this is a shortcut for the @option{video_size} private
|
||||
option, recognized by some demuxers for which the frame size is either not
|
||||
stored in the file or is configurable -- e.g. raw video or video grabbers.
|
||||
|
||||
As an output option, this inserts the @code{scale} video filter to the
|
||||
@emph{end} of the corresponding filtergraph. Please use the @code{scale} filter
|
||||
directly to insert it at the beginning or some other place.
|
||||
|
||||
The format is @samp{wxh} (default - same as source).
|
||||
Set frame size. The format is @samp{wxh} (default - same as source).
|
||||
|
||||
@item -aspect[:@var{stream_specifier}] @var{aspect} (@emph{output,per-stream})
|
||||
Set the video display aspect ratio specified by @var{aspect}.
|
||||
@@ -463,8 +315,13 @@ Disable video recording.
|
||||
|
||||
@item -vcodec @var{codec} (@emph{output})
|
||||
Set the video codec. This is an alias for @code{-codec:v}.
|
||||
@item -same_quant
|
||||
Use same quantizer as source (implies VBR).
|
||||
|
||||
@item -pass[:@var{stream_specifier}] @var{n} (@emph{output,per-stream})
|
||||
Note that this is NOT SAME QUALITY. Do not use this option unless you know you
|
||||
need it.
|
||||
|
||||
@item -pass @var{n}
|
||||
Select the pass number (1 or 2). It is used to do two-pass
|
||||
video encoding. The statistics of the video are recorded in the first
|
||||
pass into a log file (see also the option -passlogfile),
|
||||
@@ -477,7 +334,7 @@ ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
|
||||
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
|
||||
@end example
|
||||
|
||||
@item -passlogfile[:@var{stream_specifier}] @var{prefix} (@emph{output,per-stream})
|
||||
@item -passlogfile @var{prefix} (@emph{global})
|
||||
Set two-pass log file name prefix to @var{prefix}, the default file name
|
||||
prefix is ``ffmpeg2pass''. The complete file name will be
|
||||
@file{PREFIX-N.log}, where N is a number specific to the output
|
||||
@@ -610,23 +467,11 @@ Disable subtitle recording.
|
||||
Deprecated, see -bsf
|
||||
@end table
|
||||
|
||||
@section Advanced Subtitle options:
|
||||
@section Audio/Video grab options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -fix_sub_duration
|
||||
Fix subtitles durations. For each subtitle, wait for the next packet in the
|
||||
same stream and adjust the duration of the first to avoid overlap. This is
|
||||
necessary with some subtitles codecs, especially DVB subtitles, because the
|
||||
duration in the original packet is only a rough estimate and the end is
|
||||
actually marked by an empty subtitle frame. Failing to use this option when
|
||||
necessary can result in exaggerated durations or muxing failures due to
|
||||
non-monotonic timestamps.
|
||||
|
||||
Note that this option will delay the output of all data until the next
|
||||
subtitle packet is decoded: it may increase memory consumption and latency a
|
||||
lot.
|
||||
|
||||
@item -isync (@emph{global})
|
||||
Synchronize read on input.
|
||||
@end table
|
||||
|
||||
@section Advanced options
|
||||
@@ -740,7 +585,10 @@ filter. For example, if you need to merge a media (here @file{input.mkv}) with 2
|
||||
mono audio streams into one single stereo channel audio stream (and keep the
|
||||
video stream), you can use the following command:
|
||||
@example
|
||||
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
|
||||
ffmpeg -i input.mkv -f lavfi -i "
|
||||
amovie=input.mkv:si=1 [a1];
|
||||
amovie=input.mkv:si=2 [a2];
|
||||
[a1][a2] amerge" -c:a pcm_s16le -c:v copy output.mkv
|
||||
@end example
|
||||
|
||||
@item -map_metadata[:@var{metadata_spec_out}] @var{infile}[:@var{metadata_spec_in}] (@emph{output,per-metadata})
|
||||
@@ -789,7 +637,39 @@ Copy chapters from input file with index @var{input_file_index} to the next
|
||||
output file. If no chapter mapping is specified, then chapters are copied from
|
||||
the first input file with at least one chapter. Use a negative file index to
|
||||
disable any chapter copying.
|
||||
|
||||
@item -debug @var{category}
|
||||
Print specific debug info.
|
||||
@var{category} is a number or a string containing one of the following values:
|
||||
@table @samp
|
||||
@item bitstream
|
||||
@item buffers
|
||||
picture buffer allocations
|
||||
@item bugs
|
||||
@item dct_coeff
|
||||
@item er
|
||||
error recognition
|
||||
@item mb_type
|
||||
macroblock (MB) type
|
||||
@item mmco
|
||||
memory management control operations (H.264)
|
||||
@item mv
|
||||
motion vector
|
||||
@item pict
|
||||
picture info
|
||||
@item pts
|
||||
@item qp
|
||||
per-block quantization parameter (QP)
|
||||
@item rc
|
||||
rate control
|
||||
@item skip
|
||||
@item startcode
|
||||
@item thread_ops
|
||||
threading operations
|
||||
@item vis_mb_type
|
||||
visualize block types
|
||||
@item vis_qp
|
||||
visualize quantization parameter (QP), lower QP are tinted greener
|
||||
@end table
|
||||
@item -benchmark (@emph{global})
|
||||
Show benchmarking information at the end of an encode.
|
||||
Shows CPU time used and maximum memory consumption.
|
||||
@@ -806,13 +686,6 @@ Dump each input packet to stderr.
|
||||
When dumping packets, also dump the payload.
|
||||
@item -re (@emph{input})
|
||||
Read input at native frame rate. Mainly used to simulate a grab device.
|
||||
By default @command{ffmpeg} attempts to read the input(s) as fast as possible.
|
||||
This option will slow down the reading of the input(s) to the native frame rate
|
||||
of the input(s). It is useful for real-time output (e.g. live streaming). If
|
||||
your input(s) is coming from some other live streaming source (through HTTP or
|
||||
UDP for example) the server might already be in real-time, thus the option will
|
||||
likely not be required. On the other hand, this is meaningful if your input(s)
|
||||
is a file you are trying to push in real-time.
|
||||
@item -loop_input
|
||||
Loop over the input stream. Currently it works only for image
|
||||
streams. This option is used for automatic FFserver testing.
|
||||
@@ -852,17 +725,9 @@ Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps
|
||||
the parameter is the maximum samples per second by which the audio is changed.
|
||||
-async 1 is a special case where only the start of the audio stream is corrected
|
||||
without any later correction.
|
||||
This option has been deprecated. Use the @code{aresample} audio filter instead.
|
||||
|
||||
This option has been deprecated. Use the @code{asyncts} audio filter instead.
|
||||
@item -copyts
|
||||
Do not process input timestamps, but keep their values without trying
|
||||
to sanitize them. In particular, do not remove the initial start time
|
||||
offset value.
|
||||
|
||||
Note that, depending on the @option{vsync} option or on specific muxer
|
||||
processing, the output timestamps may mismatch with the input
|
||||
timestamps even when this option is selected.
|
||||
|
||||
Copy timestamps from input to output.
|
||||
@item -copytb @var{mode}
|
||||
Specify how to set the encoder timebase when stream copying. @var{mode} is an
|
||||
integer numeric value, and can assume one of the following values:
|
||||
@@ -887,7 +752,7 @@ Try to make the choice automatically, in order to generate a sane output.
|
||||
|
||||
Default value is -1.
|
||||
|
||||
@item -shortest (@emph{output})
|
||||
@item -shortest
|
||||
Finish encoding when the shortest input stream ends.
|
||||
@item -dts_delta_threshold
|
||||
Timestamp discontinuity delta threshold.
|
||||
@@ -908,7 +773,7 @@ ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
|
||||
@end example
|
||||
|
||||
@item -bsf[:@var{stream_specifier}] @var{bitstream_filters} (@emph{output,per-stream})
|
||||
Set bitstream filters for matching streams. @var{bitstream_filters} is
|
||||
Set bitstream filters for matching streams. @var{bistream_filters} is
|
||||
a comma-separated list of bitstream filters. Use the @code{-bsfs} option
|
||||
to get the list of bitstream filters.
|
||||
@example
|
||||
@@ -932,8 +797,7 @@ ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
|
||||
Define a complex filter graph, i.e. one with arbitrary number of inputs and/or
|
||||
outputs. For simple graphs -- those with one input and one output of the same
|
||||
type -- see the @option{-filter} options. @var{filtergraph} is a description of
|
||||
the filter graph, as described in the ``Filtergraph syntax'' section of the
|
||||
ffmpeg-filters manual.
|
||||
the filter graph, as described in @ref{Filtergraph syntax}.
|
||||
|
||||
Input link labels must refer to input streams using the
|
||||
@code{[file_index:stream_specifier]} syntax (i.e. the same as @option{-map}
|
||||
@@ -944,9 +808,6 @@ the matching type.
|
||||
Output link labels are referred to with @option{-map}. Unlabeled outputs are
|
||||
added to the first output file.
|
||||
|
||||
Note that with this option it is possible to use only lavfi sources without
|
||||
normal input files.
|
||||
|
||||
For example, to overlay an image over video
|
||||
@example
|
||||
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
|
||||
@@ -969,29 +830,8 @@ graph will be added to the output file automatically, so we can simply write
|
||||
@example
|
||||
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
|
||||
@end example
|
||||
|
||||
To generate 5 seconds of pure red video using lavfi @code{color} source:
|
||||
@example
|
||||
ffmpeg -filter_complex 'color=red' -t 5 out.mkv
|
||||
@end example
|
||||
@end table
|
||||
|
||||
As a special exception, you can use a bitmap subtitle stream as input: it
|
||||
will be converted into a video with the same size as the largest video in
|
||||
the file, or 720x576 if no video is present. Note that this is an
|
||||
experimental and temporary solution. It will be removed once libavfilter has
|
||||
proper support for subtitles.
|
||||
|
||||
For example, to hardcode subtitles on top of a DVB-T recording stored in
|
||||
MPEG-TS format, delaying the subtitles by 1 second:
|
||||
@example
|
||||
ffmpeg -i input.ts -filter_complex \
|
||||
'[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
|
||||
-sn -map '#0x2dc' output.mkv
|
||||
@end example
|
||||
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
|
||||
audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
|
||||
|
||||
@section Preset files
|
||||
A preset file contains a sequence of @var{option}=@var{value} pairs,
|
||||
one for each line, specifying a sequence of options which would be
|
||||
@@ -1015,15 +855,15 @@ First ffmpeg searches for a file named @var{arg}.ffpreset in the
|
||||
directories @file{$FFMPEG_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in
|
||||
the datadir defined at configuration time (usually @file{PREFIX/share/ffmpeg})
|
||||
or in a @file{ffpresets} folder along the executable on win32,
|
||||
in that order. For example, if the argument is @code{libvpx-1080p}, it will
|
||||
search for the file @file{libvpx-1080p.ffpreset}.
|
||||
in that order. For example, if the argument is @code{libx264-max}, it will
|
||||
search for the file @file{libx264-max.ffpreset}.
|
||||
|
||||
If no such file is found, then ffmpeg will search for a file named
|
||||
@var{codec_name}-@var{arg}.ffpreset in the above-mentioned
|
||||
directories, where @var{codec_name} is the name of the codec to which
|
||||
the preset file options will be applied. For example, if you select
|
||||
the video codec with @code{-vcodec libvpx} and use @code{-vpre 1080p},
|
||||
then it will search for the file @file{libvpx-1080p.ffpreset}.
|
||||
the video codec with @code{-vcodec libx264} and use @code{-vpre max},
|
||||
then it will search for the file @file{libx264-max.ffpreset}.
|
||||
@c man end OPTIONS
|
||||
|
||||
@chapter Tips
|
||||
@@ -1240,21 +1080,23 @@ composed of three digits padded with zeroes to express the sequence
|
||||
number. It is the same syntax supported by the C printf function, but
|
||||
only formats accepting a normal integer are suitable.
|
||||
|
||||
When importing an image sequence, -i also supports expanding
|
||||
shell-like wildcard patterns (globbing) internally, by selecting the
|
||||
image2-specific @code{-pattern_type glob} option.
|
||||
|
||||
For example, for creating a video from filenames matching the glob pattern
|
||||
@code{foo-*.jpeg}:
|
||||
@example
|
||||
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
|
||||
@end example
|
||||
When importing an image sequence, -i also supports expanding shell-like
|
||||
wildcard patterns (globbing) internally. To lower the chance of interfering
|
||||
with your actual file names and the shell's glob expansion, you are required
|
||||
to activate glob meta characters by prefixing them with a single @code{%}
|
||||
character, like in @code{foo-%*.jpeg}, @code{foo-%?%?%?.jpeg} or
|
||||
@code{foo-00%[234%]%*.jpeg}.
|
||||
If your filename actually contains a character sequence of a @code{%} character
|
||||
followed by a glob character, you must double the @code{%} character to escape
|
||||
it. Imagine your files begin with @code{%?-foo-}, then you could use a glob
|
||||
pattern like @code{%%?-foo-%*.jpeg}. For input patterns that could be both a
|
||||
printf or a glob pattern, ffmpeg will assume it is a glob pattern.
|
||||
|
||||
@item
|
||||
You can put many streams of the same type in the output:
|
||||
|
||||
@example
|
||||
ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
|
||||
ffmpeg -i test1.avi -i test2.avi -map 0.3 -map 0.2 -map 0.1 -map 0.0 -c copy test12.nut
|
||||
@end example
|
||||
|
||||
The resulting output file @file{test12.avi} will contain first four streams from
|
||||
@@ -1276,35 +1118,32 @@ ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
|
||||
@end itemize
|
||||
@c man end EXAMPLES
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
@include syntax.texi
|
||||
@include eval.texi
|
||||
@include decoders.texi
|
||||
@include encoders.texi
|
||||
@include demuxers.texi
|
||||
@include muxers.texi
|
||||
@include indevs.texi
|
||||
@include outdevs.texi
|
||||
@include protocols.texi
|
||||
@include bitstream_filters.texi
|
||||
@include filters.texi
|
||||
@include metadata.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg
|
||||
@settitle ffmpeg video converter
|
||||
|
||||
@c man begin SEEALSO
|
||||
ffplay(1), ffprobe(1), ffserver(1) and the FFmpeg HTML documentation
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHORS
|
||||
See git history
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
|
@@ -11,7 +11,11 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffplay [@var{options}] [@file{input_file}]
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffplay [options] [@file{input_file}]
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -130,20 +134,8 @@ Exit when video is done playing.
|
||||
Exit if any key is pressed.
|
||||
@item -exitonmousedown
|
||||
Exit if any mouse button is pressed.
|
||||
|
||||
@item -codec:@var{media_specifier} @var{codec_name}
|
||||
Force a specific decoder implementation for the stream identified by
|
||||
@var{media_specifier}, which can assume the values @code{a} (audio),
|
||||
@code{v} (video), and @code{s} subtitle.
|
||||
|
||||
@item -acodec @var{codec_name}
|
||||
Force a specific audio decoder.
|
||||
|
||||
@item -vcodec @var{codec_name}
|
||||
Force a specific video decoder.
|
||||
|
||||
@item -scodec @var{codec_name}
|
||||
Force a specific subtitle decoder.
|
||||
@item -codec:@var{stream_type}
|
||||
Force a specific decoder implementation
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
@@ -186,35 +178,29 @@ Seek to percentage in file corresponding to fraction of width.
|
||||
|
||||
@c man end
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
@include syntax.texi
|
||||
@include eval.texi
|
||||
@include decoders.texi
|
||||
@include demuxers.texi
|
||||
@include muxers.texi
|
||||
@include indevs.texi
|
||||
@include outdevs.texi
|
||||
@include protocols.texi
|
||||
@include filters.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffplay
|
||||
@settitle FFplay media player
|
||||
|
||||
@c man begin SEEALSO
|
||||
ffmpeg(1), ffprobe(1), ffserver(1) and the FFmpeg HTML documentation
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHORS
|
||||
The FFmpeg developers
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
|
245
doc/ffprobe.texi
245
doc/ffprobe.texi
@@ -11,7 +11,13 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffprobe [@var{options}] [@file{input_file}]
|
||||
The generic syntax is:
|
||||
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffprobe [options] [@file{input_file}]
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -39,10 +45,6 @@ ffprobe output is designed to be easily parsable by a textual filter,
|
||||
and consists of one or more sections of a form defined by the selected
|
||||
writer, which is specified by the @option{print_format} option.
|
||||
|
||||
Sections may contain other nested sections, and are identified by a
|
||||
name (which may be shared by other sections), and an unique
|
||||
name. See the output of @option{sections}.
|
||||
|
||||
Metadata tags stored in the container or in the streams are recognized
|
||||
and printed in the corresponding "FORMAT" or "STREAM" section.
|
||||
|
||||
@@ -78,7 +80,7 @@ Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
|
||||
Prettify the format of the displayed values, it corresponds to the
|
||||
options "-unit -prefix -byte_binary_prefix -sexagesimal".
|
||||
|
||||
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
@item -print_format @var{writer_name}[=@var{writer_options}]
|
||||
Set the output printing format.
|
||||
|
||||
@var{writer_name} specifies the name of the writer, and
|
||||
@@ -92,32 +94,6 @@ For example for printing the output in JSON format, specify:
|
||||
For more details on the available output printing formats, see the
|
||||
Writers section below.
|
||||
|
||||
@item -sections
|
||||
Print sections structure and section information, and exit. The output
|
||||
is not meant to be parsed by a machine.
|
||||
|
||||
@item -select_streams @var{stream_specifier}
|
||||
Select only the streams specified by @var{stream_specifier}. This
|
||||
option affects only the options related to streams
|
||||
(e.g. @code{show_streams}, @code{show_packets}, etc.).
|
||||
|
||||
For example to show only audio streams, you can use the command:
|
||||
@example
|
||||
ffprobe -show_streams -select_streams a INPUT
|
||||
@end example
|
||||
|
||||
To show only video packets belonging to the video stream with index 1:
|
||||
@example
|
||||
ffprobe -show_packets -select_streams v:1 INPUT
|
||||
@end example
|
||||
|
||||
@item -show_data
|
||||
Show payload data, as an hexadecimal and ASCII dump. Coupled with
|
||||
@option{-show_packets}, it will dump the packets' data. Coupled with
|
||||
@option{-show_streams}, it will dump the codec extradata.
|
||||
|
||||
The dump is printed as the "data" field. It may contain newlines.
|
||||
|
||||
@item -show_error
|
||||
Show information about the error found when trying to probe the input.
|
||||
|
||||
@@ -135,59 +111,6 @@ Like @option{-show_format}, but only prints the specified entry of the
|
||||
container format information, rather than all. This option may be given more
|
||||
than once, then all specified entries will be shown.
|
||||
|
||||
This option is deprecated, use @code{show_entries} instead.
|
||||
|
||||
@item -show_entries @var{section_entries}
|
||||
Set list of entries to show.
|
||||
|
||||
Entries are specified according to the following
|
||||
syntax. @var{section_entries} contains a list of section entries
|
||||
separated by @code{:}. Each section entry is composed by a section
|
||||
name (or unique name), optionally followed by a list of entries local
|
||||
to that section, separated by @code{,}.
|
||||
|
||||
If section name is specified but is followed by no @code{=}, all
|
||||
entries are printed to output, together with all the contained
|
||||
sections. Otherwise only the entries specified in the local section
|
||||
entries list are printed. In particular, if @code{=} is specified but
|
||||
the list of local entries is empty, then no entries will be shown for
|
||||
that section.
|
||||
|
||||
Note that the order of specification of the local section entries is
|
||||
not honored in the output, and the usual display order will be
|
||||
retained.
|
||||
|
||||
The formal syntax is given by:
|
||||
@example
|
||||
@var{LOCAL_SECTION_ENTRIES} ::= @var{SECTION_ENTRY_NAME}[,@var{LOCAL_SECTION_ENTRIES}]
|
||||
@var{SECTION_ENTRY} ::= @var{SECTION_NAME}[=[@var{LOCAL_SECTION_ENTRIES}]]
|
||||
@var{SECTION_ENTRIES} ::= @var{SECTION_ENTRY}[:@var{SECTION_ENTRIES}]
|
||||
@end example
|
||||
|
||||
For example, to show only the index and type of each stream, and the PTS
|
||||
time, duration time, and stream index of the packets, you can specify
|
||||
the argument:
|
||||
@example
|
||||
packet=pts_time,duration_time,stream_index : stream=index,codec_type
|
||||
@end example
|
||||
|
||||
To show all the entries in the section "format", but only the codec
|
||||
type in the section "stream", specify the argument:
|
||||
@example
|
||||
format : stream=codec_type
|
||||
@end example
|
||||
|
||||
To show all the tags in the stream and format sections:
|
||||
@example
|
||||
format_tags : format_tags
|
||||
@end example
|
||||
|
||||
To show only the @code{title} tag (if available) in the stream
|
||||
sections:
|
||||
@example
|
||||
stream_tags=title
|
||||
@end example
|
||||
|
||||
@item -show_packets
|
||||
Show information about each packet contained in the input multimedia
|
||||
stream.
|
||||
@@ -240,10 +163,6 @@ Show information related to program and library versions. This is the
|
||||
equivalent of setting both @option{-show_program_version} and
|
||||
@option{-show_library_versions} options.
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
|
||||
@item -i @var{input_file}
|
||||
Read @var{input_file}.
|
||||
|
||||
@@ -256,9 +175,8 @@ Read @var{input_file}.
|
||||
A writer defines the output format adopted by @command{ffprobe}, and will be
|
||||
used for printing all the parts of the output.
|
||||
|
||||
A writer may accept one or more arguments, which specify the options
|
||||
to adopt. The options are specified as a list of @var{key}=@var{value}
|
||||
pairs, separated by ":".
|
||||
A writer may accept one or more arguments, which specify the options to
|
||||
adopt.
|
||||
|
||||
A description of the currently available writers follows.
|
||||
|
||||
@@ -277,6 +195,9 @@ keyN=valN
|
||||
Metadata tags are printed as a line in the corresponding FORMAT or
|
||||
STREAM section, and are prefixed by the string "TAG:".
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
A description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@@ -290,11 +211,8 @@ If set to 1 specify not to print the section header and footer.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section compact, csv
|
||||
Compact and CSV format.
|
||||
|
||||
The @code{csv} writer is equivalent to @code{compact}, but supports
|
||||
different defaults.
|
||||
@section compact
|
||||
Compact format.
|
||||
|
||||
Each section is printed on a single line.
|
||||
If no option is specifid, the output has the form:
|
||||
@@ -306,29 +224,30 @@ Metadata tags are printed in the corresponding "format" or "stream"
|
||||
section. A metadata tag key, if printed, is prefixed by the string
|
||||
"tag:".
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item item_sep, s
|
||||
Specify the character to use for separating fields in the output line.
|
||||
It must be a single printable character, it is "|" by default ("," for
|
||||
the @code{csv} writer).
|
||||
It must be a single printable character, it is "|" by default.
|
||||
|
||||
@item nokey, nk
|
||||
If set to 1 specify not to print the key of each field. Its default
|
||||
value is 0 (1 for the @code{csv} writer).
|
||||
value is 0.
|
||||
|
||||
@item escape, e
|
||||
Set the escape mode to use, default to "c" ("csv" for the @code{csv}
|
||||
writer).
|
||||
Set the escape mode to use, default to "c".
|
||||
|
||||
It can assume one of the following values:
|
||||
@table @option
|
||||
@item c
|
||||
Perform C-like escaping. Strings containing a newline ('\n'), carriage
|
||||
return ('\r'), a tab ('\t'), a form feed ('\f'), the escaping
|
||||
character ('\') or the item separator character @var{SEP} are escaped using C-like fashioned
|
||||
Perform C-like escaping. Strings containing a newline ('\n') or
|
||||
carriage return ('\r'), the escaping character ('\') or the item
|
||||
separator character @var{SEP} are escaped using C-like fashioned
|
||||
escaping, so that a newline is converted to the sequence "\n", a
|
||||
carriage return to "\r", '\' to "\\" and the separator @var{SEP} is
|
||||
converted to "\@var{SEP}".
|
||||
@@ -342,83 +261,22 @@ containing a newline ('\n'), a carriage return ('\r'), a double quote
|
||||
Perform no escaping.
|
||||
@end table
|
||||
|
||||
@item print_section, p
|
||||
Print the section name at the begin of each line if the value is
|
||||
@code{1}, disable it with value set to @code{0}. Default value is
|
||||
@code{1}.
|
||||
|
||||
@end table
|
||||
|
||||
@section flat
|
||||
Flat format.
|
||||
@section csv
|
||||
CSV format.
|
||||
|
||||
A free-form output where each line contains an explicit key=value, such as
|
||||
"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
|
||||
directly embedded in sh scripts as long as the separator character is an
|
||||
alphanumeric character or an underscore (see @var{sep_char} option).
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item sep_char, s
|
||||
Separator character used to separate the chapter, the section name, IDs and
|
||||
potential tags in the printed field key.
|
||||
|
||||
Default value is '.'.
|
||||
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
set to 1, and if there is more than one section in the current
|
||||
chapter, the section name will be prefixed by the name of the
|
||||
chapter. A value of 0 will disable this behavior.
|
||||
|
||||
Default value is 1.
|
||||
@end table
|
||||
|
||||
@section ini
|
||||
INI format output.
|
||||
|
||||
Print output in an INI based format.
|
||||
|
||||
The following conventions are adopted:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
all key and values are UTF-8
|
||||
@item
|
||||
'.' is the subgroup separator
|
||||
@item
|
||||
newline, '\t', '\f', '\b' and the following characters are escaped
|
||||
@item
|
||||
'\' is the escape character
|
||||
@item
|
||||
'#' is the comment indicator
|
||||
@item
|
||||
'=' is the key/value separator
|
||||
@item
|
||||
':' is not used but usually parsed as key/value separator
|
||||
@end itemize
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
set to 1, and if there is more than one section in the current
|
||||
chapter, the section name will be prefixed by the name of the
|
||||
chapter. A value of 0 will disable this behavior.
|
||||
|
||||
Default value is 1.
|
||||
@end table
|
||||
This writer is equivalent to
|
||||
@code{compact=item_sep=,:nokey=1:escape=csv}.
|
||||
|
||||
@section json
|
||||
JSON based format.
|
||||
|
||||
Each section is printed using JSON notation.
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@@ -445,6 +303,9 @@ Note that the output issued will be compliant to the
|
||||
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
|
||||
@option{sexagesimal} etc.) are specified.
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@@ -481,41 +342,31 @@ MOV timecode is extracted from tmcd track, so is available in the tmcd
|
||||
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
|
||||
|
||||
@item
|
||||
DV, GXF and AVI timecodes are available in format metadata
|
||||
DV and GXF timecodes are available in format metadata
|
||||
(@option{-show_format}, see @var{TAG:timecode}).
|
||||
|
||||
@end itemize
|
||||
@c man end TIMECODE
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffplay.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
@include syntax.texi
|
||||
@include decoders.texi
|
||||
@include demuxers.texi
|
||||
@include protocols.texi
|
||||
@include indevs.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffprobe
|
||||
@settitle ffprobe media prober
|
||||
|
||||
@c man begin SEEALSO
|
||||
ffmpeg(1), ffplay(1), ffserver(1) and the FFmpeg HTML documentation
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHORS
|
||||
The FFmpeg developers
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
|
@@ -39,12 +39,9 @@
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="convergence_duration" type="xsd:long" />
|
||||
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
@@ -56,16 +53,11 @@
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
@@ -87,35 +79,14 @@
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
@@ -137,9 +108,7 @@
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
@@ -155,7 +124,7 @@
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
@@ -188,7 +157,6 @@
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
|
@@ -25,6 +25,10 @@ MaxBandwidth 1000
|
||||
# '-' is the standard output.
|
||||
CustomLog -
|
||||
|
||||
# Suppress that if you want to launch ffserver as a daemon.
|
||||
NoDaemon
|
||||
|
||||
|
||||
##################################################################
|
||||
# Definition of the live feeds. Each live feed contains one video
|
||||
# and/or audio sequence coming from an ffmpeg encoder or another
|
||||
|
@@ -9,9 +9,15 @@
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Synopsis
|
||||
@chapter Synopsys
|
||||
|
||||
ffserver [@var{options}]
|
||||
The generic syntax is:
|
||||
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffserver [options]
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -22,6 +28,11 @@ several live feeds, streaming from files and time shifting on live feeds
|
||||
(you can seek to positions in the past on each live feed, provided you
|
||||
specify a big enough feed storage in ffserver.conf).
|
||||
|
||||
ffserver runs in daemon mode by default; that is, it puts itself in
|
||||
the background and detaches from its TTY, unless it is launched in
|
||||
debug mode or a NoDaemon option is specified in the configuration
|
||||
file.
|
||||
|
||||
This documentation covers only the streaming aspects of ffserver /
|
||||
ffmpeg. All questions about parameters for ffmpeg, codec questions,
|
||||
etc. are not covered here. Read @file{ffmpeg.html} for more
|
||||
@@ -81,7 +92,7 @@ to make it work correctly.
|
||||
|
||||
@section What do I need?
|
||||
|
||||
I use Linux on a 900 MHz Duron with a cheap Bt848 based TV capture card. I'm
|
||||
I use Linux on a 900 MHz Duron with a cheapo Bt848 based TV capture card. I'm
|
||||
using stock Linux 2.4.17 with the stock drivers. [Actually that isn't true,
|
||||
I needed some special drivers for my motherboard-based sound card.]
|
||||
|
||||
@@ -227,19 +238,6 @@ You use this by adding the ?date= to the end of the URL for the stream.
|
||||
For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
|
||||
@c man end
|
||||
|
||||
@section What is FFM, FFM2
|
||||
|
||||
FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of
|
||||
video and audio streams and encoding options, and can store a moving time segment
|
||||
of an infinite movie or a whole movie.
|
||||
|
||||
FFM is version specific, and there is limited compatibility of FFM files
|
||||
generated by one version of ffmpeg/ffserver and another version of
|
||||
ffmpeg/ffserver. It may work but its not guaranteed to work.
|
||||
|
||||
FFM2 is extensible while maintaining compatibility and should work between
|
||||
differing versions of tools. FFM2 is the default.
|
||||
|
||||
@chapter Options
|
||||
@c man begin OPTIONS
|
||||
|
||||
@@ -256,40 +254,26 @@ within the various <Stream> sections. Since ffserver will not launch
|
||||
any ffmpeg instances, you will have to launch them manually.
|
||||
@item -d
|
||||
Enable debug mode. This option increases log verbosity, directs log
|
||||
messages to stdout.
|
||||
messages to stdout and causes ffserver to run in the foreground
|
||||
rather than as a daemon.
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
The @file{doc/ffserver.conf} example,
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
The @file{doc/ffserver.conf} example, ffmpeg(1), ffplay(1), ffprobe(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffserver
|
||||
@settitle ffserver video server
|
||||
|
||||
@c man begin SEEALSO
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), the @file{ffserver.conf}
|
||||
example and the FFmpeg HTML documentation
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHORS
|
||||
The FFmpeg developers
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
|
@@ -12,7 +12,7 @@ Format negotiation
|
||||
==================
|
||||
|
||||
The query_formats method should set, for each input and each output links,
|
||||
the list of supported formats.
|
||||
the list supported formats.
|
||||
|
||||
For video links, that means pixel format. For audio links, that means
|
||||
channel layout, and sample format (the sample packing is implied by the
|
||||
@@ -26,167 +26,14 @@ Format negotiation
|
||||
references to the list are updated.
|
||||
|
||||
That means that if a filter requires that its input and output have the
|
||||
same format amongst a supported list, all it has to do is use a reference
|
||||
same format amongst a supported list, all it have to do is use a reference
|
||||
to the same list of formats.
|
||||
|
||||
|
||||
Buffer references ownership and permissions
|
||||
===========================================
|
||||
|
||||
Principle
|
||||
---------
|
||||
|
||||
Audio and video data are voluminous; the buffer and buffer reference
|
||||
mechanism is intended to avoid, as much as possible, expensive copies of
|
||||
that data while still allowing the filters to produce correct results.
|
||||
|
||||
The data is stored in buffers represented by AVFilterBuffer structures.
|
||||
They must not be accessed directly, but through references stored in
|
||||
AVFilterBufferRef structures. Several references can point to the
|
||||
same buffer; the buffer is automatically deallocated once all
|
||||
corresponding references have been destroyed.
|
||||
|
||||
The characteristics of the data (resolution, sample rate, etc.) are
|
||||
stored in the reference; different references for the same buffer can
|
||||
show different characteristics. In particular, a video reference can
|
||||
point to only a part of a video buffer.
|
||||
|
||||
A reference is usually obtained as input to the start_frame or
|
||||
filter_frame method or requested using the ff_get_video_buffer or
|
||||
ff_get_audio_buffer functions. A new reference on an existing buffer can
|
||||
be created with the avfilter_ref_buffer. A reference is destroyed using
|
||||
the avfilter_unref_bufferp function.
|
||||
|
||||
Reference ownership
|
||||
-------------------
|
||||
|
||||
At any time, a reference “belongs” to a particular piece of code,
|
||||
usually a filter. With a few caveats that will be explained below, only
|
||||
that piece of code is allowed to access it. It is also responsible for
|
||||
destroying it, although this is sometimes done automatically (see the
|
||||
section on link reference fields).
|
||||
|
||||
Here are the (fairly obvious) rules for reference ownership:
|
||||
|
||||
* A reference received by the start_frame or filter_frame method
|
||||
belong to the corresponding filter.
|
||||
|
||||
Special exception: for video references: the reference may be used
|
||||
internally for automatic copying and must not be destroyed before
|
||||
end_frame; it can be given away to ff_start_frame.
|
||||
|
||||
* A reference passed to ff_start_frame or ff_filter_frame is given
|
||||
away and must no longer be used.
|
||||
|
||||
* A reference created with avfilter_ref_buffer belongs to the code that
|
||||
created it.
|
||||
|
||||
* A reference obtained with ff_get_video_buffer or ff_get_audio_buffer
|
||||
belongs to the code that requested it.
|
||||
|
||||
* A reference given as return value by the get_video_buffer or
|
||||
get_audio_buffer method is given away and must no longer be used.
|
||||
|
||||
Link reference fields
|
||||
---------------------
|
||||
|
||||
The AVFilterLink structure has a few AVFilterBufferRef fields. Here are
|
||||
the rules to handle them:
|
||||
|
||||
* cur_buf is set before the start_frame and filter_frame methods to
|
||||
the same reference given as argument to the methods and belongs to the
|
||||
destination filter of the link. If it has not been cleared after
|
||||
end_frame or filter_frame, libavfilter will automatically destroy
|
||||
the reference; therefore, any filter that needs to keep the reference
|
||||
for longer must set cur_buf to NULL.
|
||||
|
||||
* out_buf belongs to the source filter of the link and can be used to
|
||||
store a reference to the buffer that has been sent to the destination.
|
||||
If it is not NULL after end_frame or filter_frame, libavfilter will
|
||||
automatically destroy the reference.
|
||||
|
||||
If a video input pad does not have a start_frame method, the default
|
||||
method will request a buffer on the first output of the filter, store
|
||||
the reference in out_buf and push a second reference to the output.
|
||||
|
||||
* src_buf, cur_buf_copy and partial_buf are used by libavfilter
|
||||
internally and must not be accessed by filters.
|
||||
|
||||
Reference permissions
|
||||
---------------------
|
||||
|
||||
The AVFilterBufferRef structure has a perms field that describes what
|
||||
the code that owns the reference is allowed to do to the buffer data.
|
||||
Different references for the same buffer can have different permissions.
|
||||
|
||||
For video filters, the permissions only apply to the parts of the buffer
|
||||
that have already been covered by the draw_slice method.
|
||||
|
||||
The value is a binary OR of the following constants:
|
||||
|
||||
* AV_PERM_READ: the owner can read the buffer data; this is essentially
|
||||
always true and is there for self-documentation.
|
||||
|
||||
* AV_PERM_WRITE: the owner can modify the buffer data.
|
||||
|
||||
* AV_PERM_PRESERVE: the owner can rely on the fact that the buffer data
|
||||
will not be modified by previous filters.
|
||||
|
||||
* AV_PERM_REUSE: the owner can output the buffer several times, without
|
||||
modifying the data in between.
|
||||
|
||||
* AV_PERM_REUSE2: the owner can output the buffer several times and
|
||||
modify the data in between (useless without the WRITE permissions).
|
||||
|
||||
* AV_PERM_ALIGN: the owner can access the data using fast operations
|
||||
that require data alignment.
|
||||
|
||||
The READ, WRITE and PRESERVE permissions are about sharing the same
|
||||
buffer between several filters to avoid expensive copies without them
|
||||
doing conflicting changes on the data.
|
||||
|
||||
The REUSE and REUSE2 permissions are about special memory for direct
|
||||
rendering. For example a buffer directly allocated in video memory must
|
||||
not modified once it is displayed on screen, or it will cause tearing;
|
||||
it will therefore not have the REUSE2 permission.
|
||||
|
||||
The ALIGN permission is about extracting part of the buffer, for
|
||||
copy-less padding or cropping for example.
|
||||
|
||||
|
||||
References received on input pads are guaranteed to have all the
|
||||
permissions stated in the min_perms field and none of the permissions
|
||||
stated in the rej_perms.
|
||||
|
||||
References obtained by ff_get_video_buffer and ff_get_audio_buffer are
|
||||
guaranteed to have at least all the permissions requested as argument.
|
||||
|
||||
References created by avfilter_ref_buffer have the same permissions as
|
||||
the original reference minus the ones explicitly masked; the mask is
|
||||
usually ~0 to keep the same permissions.
|
||||
|
||||
Filters should remove permissions on reference they give to output
|
||||
whenever necessary. It can be automatically done by setting the
|
||||
rej_perms field on the output pad.
|
||||
|
||||
Here are a few guidelines corresponding to common situations:
|
||||
|
||||
* Filters that modify and forward their frame (like drawtext) need the
|
||||
WRITE permission.
|
||||
|
||||
* Filters that read their input to produce a new frame on output (like
|
||||
scale) need the READ permission on input and and must request a buffer
|
||||
with the WRITE permission.
|
||||
|
||||
* Filters that intend to keep a reference after the filtering process
|
||||
is finished (after end_frame or filter_frame returns) must have the
|
||||
PRESERVE permission on it and remove the WRITE permission if they
|
||||
create a new reference to give it away.
|
||||
|
||||
* Filters that intend to modify a reference they have kept after the end
|
||||
of the filtering process need the REUSE2 permission and must remove
|
||||
the PRESERVE permission if they create a new reference to give it
|
||||
away.
|
||||
TODO
|
||||
|
||||
|
||||
Frame scheduling
|
||||
@@ -198,7 +45,7 @@ Frame scheduling
|
||||
Simple filters that output one frame for each input frame should not have
|
||||
to worry about it.
|
||||
|
||||
start_frame / filter_frame
|
||||
start_frame / filter_samples
|
||||
----------------------------
|
||||
|
||||
These methods are called when a frame is pushed to the filter's input.
|
||||
@@ -223,7 +70,7 @@ Frame scheduling
|
||||
request_frame method or the application.
|
||||
|
||||
If a filter has several inputs, the filter must be ready for frames
|
||||
arriving randomly on any input. Therefore, any filter with several inputs
|
||||
arriving randomly on any input. Therefore, any filter with several input
|
||||
will most likely require some kind of queuing mechanism. It is perfectly
|
||||
acceptable to have a limited queue and to drop frames when the inputs
|
||||
are too unbalanced.
|
||||
@@ -233,12 +80,12 @@ Frame scheduling
|
||||
|
||||
This method is called when a frame is wanted on an output.
|
||||
|
||||
For an input, it should directly call start_frame or filter_frame on
|
||||
For an input, it should directly call start_frame or filter_samples on
|
||||
the corresponding output.
|
||||
|
||||
For a filter, if there are queued frames already ready, one of these
|
||||
frames should be pushed. If not, the filter should request a frame on
|
||||
one of its inputs, repeatedly until at least one frame has been pushed.
|
||||
one of its input, repeatedly until at least one frame has been pushed.
|
||||
|
||||
Return values:
|
||||
if request_frame could produce a frame, it should return 0;
|
||||
@@ -266,4 +113,4 @@ Frame scheduling
|
||||
|
||||
Note that, except for filters that can have queued frames, request_frame
|
||||
does not push frames: it requests them to its input, and as a reaction,
|
||||
the start_frame / filter_frame method will be called and do the work.
|
||||
the start_frame / filter_samples method will be called and do the work.
|
||||
|
3371
doc/filters.texi
3371
doc/filters.texi
File diff suppressed because it is too large
Load Diff
170
doc/general.texi
170
doc/general.texi
@@ -26,8 +26,8 @@ instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjp
|
||||
|
||||
@section OpenCORE and VisualOn libraries
|
||||
|
||||
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
|
||||
libraries provide encoders for a number of audio codecs.
|
||||
Spun off Google Android sources, OpenCore and VisualOn libraries provide
|
||||
encoders for a number of audio codecs.
|
||||
|
||||
@float NOTE
|
||||
OpenCORE and VisualOn libraries are under the Apache License 2.0
|
||||
@@ -63,14 +63,6 @@ Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libvo-amrwbenc} to configure to enable it.
|
||||
|
||||
@subsection Fraunhofer AAC library
|
||||
|
||||
FFmpeg can make use of the Fraunhofer AAC library for AAC encoding.
|
||||
|
||||
Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libfdk-aac} to configure to enable it.
|
||||
|
||||
@section LAME
|
||||
|
||||
FFmpeg can make use of the LAME library for MP3 encoding.
|
||||
@@ -79,14 +71,6 @@ Go to @url{http://lame.sourceforge.net/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libmp3lame} to configure to enable it.
|
||||
|
||||
@section TwoLAME
|
||||
|
||||
FFmpeg can make use of the TwoLAME library for MP2 encoding.
|
||||
|
||||
Go to @url{http://www.twolame.org/} and follow the
|
||||
instructions for installing the library.
|
||||
Then pass @code{--enable-libtwolame} to configure to enable it.
|
||||
|
||||
@section libvpx
|
||||
|
||||
FFmpeg can make use of the libvpx library for VP8 encoding.
|
||||
@@ -109,17 +93,6 @@ x264 is under the GNU Public License Version 2 or later
|
||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
@end float
|
||||
|
||||
@section libilbc
|
||||
|
||||
iLBC is a narrowband speech codec that has been made freely available
|
||||
by Google as part of the WebRTC project. libilbc is a packaging friendly
|
||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||
iLBC encoding and decoding.
|
||||
|
||||
Go to @url{https://github.com/dekkers/libilbc} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libilbc} to configure to
|
||||
enable it.
|
||||
|
||||
|
||||
|
||||
@chapter Supported File Formats, Codecs or Features
|
||||
@@ -143,19 +116,11 @@ library:
|
||||
@item American Laser Games MM @tab @tab X
|
||||
@tab Multimedia format used in games like Mad Dog McCree.
|
||||
@item 3GPP AMR @tab X @tab X
|
||||
@item Amazing Studio Packed Animation File @tab @tab X
|
||||
@tab Multimedia format used in game Heart Of Darkness.
|
||||
@item Apple HTTP Live Streaming @tab @tab X
|
||||
@item Artworx Data Format @tab @tab X
|
||||
@item AFC @tab @tab X
|
||||
@tab Audio format used on the Nintendo Gamecube.
|
||||
@item ASF @tab X @tab X
|
||||
@item AST @tab X @tab X
|
||||
@tab Audio format used on the Nintendo Wii.
|
||||
@item AVI @tab X @tab X
|
||||
@item AVISynth @tab @tab X
|
||||
@item AVR @tab @tab X
|
||||
@tab Audio format used on Mac.
|
||||
@item AVS @tab @tab X
|
||||
@tab Multimedia format used by the Creature Shock game.
|
||||
@item Beam Software SIFF @tab @tab X
|
||||
@@ -169,8 +134,6 @@ library:
|
||||
@tab Used in Z and Z95 games.
|
||||
@item Brute Force & Ignorance @tab @tab X
|
||||
@tab Used in the game Flash Traffic: City of Angels.
|
||||
@item BRSTM @tab @tab X
|
||||
@tab Audio format used on the Nintendo Wii.
|
||||
@item BWF @tab X @tab X
|
||||
@item CRI ADX @tab X @tab X
|
||||
@tab Audio-only format used in console video games.
|
||||
@@ -201,7 +164,6 @@ library:
|
||||
@item Electronic Arts cdata @tab @tab X
|
||||
@item Electronic Arts Multimedia @tab @tab X
|
||||
@tab Used in various EA games; files have extensions like WVE and UV2.
|
||||
@item Ensoniq Paris Audio File @tab @tab X
|
||||
@item FFM (FFserver live feed) @tab X @tab X
|
||||
@item Flash (SWF) @tab X @tab X
|
||||
@item Flash 9 (AVM2) @tab X @tab X
|
||||
@@ -216,12 +178,12 @@ library:
|
||||
@item G.723.1 @tab X @tab X
|
||||
@item G.729 BIT @tab X @tab X
|
||||
@item G.729 raw @tab @tab X
|
||||
@item GIF Animation @tab X @tab X
|
||||
@item GIF Animation @tab X @tab
|
||||
@item GXF @tab X @tab X
|
||||
@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley
|
||||
playout servers.
|
||||
@item iCEDraw File @tab @tab X
|
||||
@item ICO @tab X @tab X
|
||||
@item ICO @tab @tab X
|
||||
@tab Microsoft Windows ICO
|
||||
@item id Quake II CIN video @tab @tab X
|
||||
@item id RoQ @tab X @tab X
|
||||
@@ -229,20 +191,17 @@ library:
|
||||
@item IEC61937 encapsulation @tab X @tab X
|
||||
@item IFF @tab @tab X
|
||||
@tab Interchange File Format
|
||||
@item iLBC @tab X @tab X
|
||||
@item Interplay MVE @tab @tab X
|
||||
@tab Format used in various Interplay computer games.
|
||||
@item IV8 @tab @tab X
|
||||
@tab A format generated by IndigoVision 8000 video server.
|
||||
@item IVF (On2) @tab X @tab X
|
||||
@tab A format used by libvpx
|
||||
@item IRCAM @tab X @tab X
|
||||
@item LATM @tab X @tab X
|
||||
@item LMLM4 @tab @tab X
|
||||
@tab Used by Linux Media Labs MPEG-4 PCI boards
|
||||
@item LOAS @tab @tab X
|
||||
@tab contains LATM multiplexed AAC audio
|
||||
@item LVF @tab @tab X
|
||||
@item LXF @tab @tab X
|
||||
@tab VR native stream format, used by Leitch/Harris' video servers.
|
||||
@item Matroska @tab X @tab X
|
||||
@@ -253,8 +212,6 @@ library:
|
||||
@tab Used in Sim City 3000; file extension .xa.
|
||||
@item MD Studio @tab @tab X
|
||||
@item Metal Gear Solid: The Twin Snakes @tab @tab X
|
||||
@item Megalux Frame @tab @tab X
|
||||
@tab Used by Megalux Ultimate Paint
|
||||
@item Mobotix .mxg @tab @tab X
|
||||
@item Monkey's Audio @tab @tab X
|
||||
@item Motion Pixels MVI @tab @tab X
|
||||
@@ -282,7 +239,6 @@ library:
|
||||
@tab SMPTE 386M, D-10/IMX Mapping.
|
||||
@item NC camera feed @tab @tab X
|
||||
@tab NC (AVIP NC4600) camera streams
|
||||
@item NIST SPeech HEader REsources @tab @tab X
|
||||
@item NTT TwinVQ (VQF) @tab @tab X
|
||||
@tab Nippon Telegraph and Telephone Corporation TwinVQ.
|
||||
@item Nullsoft Streaming Video @tab @tab X
|
||||
@@ -291,7 +247,6 @@ library:
|
||||
@tab NUT Open Container Format
|
||||
@item Ogg @tab X @tab X
|
||||
@item Playstation Portable PMP @tab @tab X
|
||||
@item Portable Voice Format @tab @tab X
|
||||
@item TechnoTrend PVA @tab @tab X
|
||||
@tab Used by TechnoTrend DVB PCI boards.
|
||||
@item QCP @tab @tab X
|
||||
@@ -302,7 +257,6 @@ library:
|
||||
@item raw Dirac @tab X @tab X
|
||||
@item raw DNxHD @tab X @tab X
|
||||
@item raw DTS @tab X @tab X
|
||||
@item raw DTS-HD @tab @tab X
|
||||
@item raw E-AC-3 @tab X @tab X
|
||||
@item raw FLAC @tab X @tab X
|
||||
@item raw GSM @tab @tab X
|
||||
@@ -320,7 +274,6 @@ library:
|
||||
@item raw video @tab X @tab X
|
||||
@item raw id RoQ @tab X @tab
|
||||
@item raw Shorten @tab @tab X
|
||||
@item raw TAK @tab @tab X
|
||||
@item raw TrueHD @tab X @tab X
|
||||
@item raw VC-1 @tab @tab X
|
||||
@item raw PCM A-law @tab X @tab X
|
||||
@@ -362,7 +315,6 @@ library:
|
||||
@item SDP @tab @tab X
|
||||
@item Sega FILM/CPK @tab @tab X
|
||||
@tab Used in many Sega Saturn console games.
|
||||
@item Silicon Graphics Movie @tab @tab X
|
||||
@item Sierra SOL @tab @tab X
|
||||
@tab .sol files used in Sierra Online games.
|
||||
@item Sierra VMD @tab @tab X
|
||||
@@ -371,12 +323,10 @@ library:
|
||||
@tab Multimedia format used by many games.
|
||||
@item SMJPEG @tab X @tab X
|
||||
@tab Used in certain Loki game ports.
|
||||
@item Smush @tab @tab X
|
||||
@tab Multimedia format used in some LucasArts games.
|
||||
@item Sony OpenMG (OMA) @tab X @tab X
|
||||
@tab Audio format used in Sony Sonic Stage and Sony Vegas.
|
||||
@item Sony PlayStation STR @tab @tab X
|
||||
@item Sony Wave64 (W64) @tab X @tab X
|
||||
@item Sony Wave64 (W64) @tab @tab X
|
||||
@item SoX native format @tab X @tab X
|
||||
@item SUN AU format @tab X @tab X
|
||||
@item Text files @tab @tab X
|
||||
@@ -386,9 +336,8 @@ library:
|
||||
@tab Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
|
||||
@item True Audio @tab @tab X
|
||||
@item VC-1 test bitstream @tab X @tab X
|
||||
@item Vivo @tab @tab X
|
||||
@item WAV @tab X @tab X
|
||||
@item WavPack @tab X @tab X
|
||||
@item WavPack @tab @tab X
|
||||
@item WebM @tab X @tab X
|
||||
@item Windows Televison (WTV) @tab X @tab X
|
||||
@item Wing Commander III movie @tab @tab X
|
||||
@@ -404,6 +353,7 @@ library:
|
||||
@item eXtended BINary text (XBIN) @tab @tab X
|
||||
@item YUV4MPEG pipe @tab X @tab X
|
||||
@item Psygnosis YOP @tab @tab X
|
||||
@item ZeroCodec Lossless Video @tab @tab X
|
||||
@end multitable
|
||||
|
||||
@code{X} means that encoding (resp. decoding) is supported.
|
||||
@@ -421,8 +371,6 @@ following image formats are supported:
|
||||
@tab Only uncompressed GIFs are generated.
|
||||
@item BMP @tab X @tab X
|
||||
@tab Microsoft BMP image
|
||||
@item PIX @tab @tab X
|
||||
@tab PIX is an image format used in the Argonaut BRender engine.
|
||||
@item DPX @tab X @tab X
|
||||
@tab Digital Picture Exchange
|
||||
@item EXR @tab @tab X
|
||||
@@ -460,8 +408,6 @@ following image formats are supported:
|
||||
@tab Targa (.TGA) image format
|
||||
@item XBM @tab X @tab X
|
||||
@tab X BitMap image format
|
||||
@item XFace @tab X @tab X
|
||||
@tab X-Face image format
|
||||
@item XWD @tab X @tab X
|
||||
@tab X Window Dump image format
|
||||
@end multitable
|
||||
@@ -477,9 +423,10 @@ following image formats are supported:
|
||||
@item 4X Movie @tab @tab X
|
||||
@tab Used in certain computer games.
|
||||
@item 8088flex TMV @tab @tab X
|
||||
@item 8SVX exponential @tab @tab X
|
||||
@item 8SVX fibonacci @tab @tab X
|
||||
@item A64 multicolor @tab X @tab
|
||||
@tab Creates video suitable to be played on a commodore 64 (multicolor mode).
|
||||
@item Amazing Studio PAF Video @tab @tab X
|
||||
@item American Laser Games MM @tab @tab X
|
||||
@tab Used in games like Mad Dog McCree.
|
||||
@item AMV Video @tab X @tab X
|
||||
@@ -529,11 +476,9 @@ following image formats are supported:
|
||||
@item Delphine Software International CIN video @tab @tab X
|
||||
@tab Codec used in Delphine Software International games.
|
||||
@item Discworld II BMV Video @tab @tab X
|
||||
@item Canopus Lossless Codec @tab @tab X
|
||||
@item Cinepak @tab @tab X
|
||||
@item Cirrus Logic AccuPak @tab X @tab X
|
||||
@tab fourcc: CLJR
|
||||
@item CPiA Video Format @tab @tab X
|
||||
@item Creative YUV (CYUV) @tab @tab X
|
||||
@item DFA @tab @tab X
|
||||
@tab Codec used in Chronomaster game.
|
||||
@@ -603,18 +548,8 @@ following image formats are supported:
|
||||
@item LCL (LossLess Codec Library) MSZH @tab @tab X
|
||||
@item LCL (LossLess Codec Library) ZLIB @tab E @tab E
|
||||
@item LOCO @tab @tab X
|
||||
@item LucasArts Smush @tab @tab X
|
||||
@tab Used in LucasArts games.
|
||||
@item lossless MJPEG @tab X @tab X
|
||||
@item Microsoft ATC Screen @tab @tab X
|
||||
@tab Also known as Microsoft Screen 3.
|
||||
@item Microsoft Expression Encoder Screen @tab @tab X
|
||||
@tab Also known as Microsoft Titanium Screen 2.
|
||||
@item Microsoft RLE @tab @tab X
|
||||
@item Microsoft Screen 1 @tab @tab X
|
||||
@tab Also known as Windows Media Video V7 Screen.
|
||||
@item Microsoft Screen 2 @tab @tab X
|
||||
@tab Also known as Windows Media Video V9 Screen.
|
||||
@item Microsoft Video 1 @tab @tab X
|
||||
@item Mimic @tab @tab X
|
||||
@tab Used in MSN Messenger Webcam streams.
|
||||
@@ -643,8 +578,8 @@ following image formats are supported:
|
||||
@tab fourcc: VP60,VP61,VP62
|
||||
@item VP8 @tab E @tab X
|
||||
@tab fourcc: VP80, encoding supported through external library libvpx
|
||||
@item Pinnacle TARGA CineWave YUV16 @tab @tab X
|
||||
@tab fourcc: Y216
|
||||
@item planar RGB @tab @tab X
|
||||
@tab fourcc: 8BPS
|
||||
@item Prores @tab @tab X
|
||||
@tab fourcc: apch,apcn,apcs,apco
|
||||
@item Q-team QPEG @tab @tab X
|
||||
@@ -668,11 +603,8 @@ following image formats are supported:
|
||||
@tab Texture dictionaries used by the Renderware Engine.
|
||||
@item RL2 video @tab @tab X
|
||||
@tab used in some games by Entertainment Software Partners
|
||||
@item SGI RLE 8-bit @tab @tab X
|
||||
@item Sierra VMD video @tab @tab X
|
||||
@tab Used in Sierra VMD files.
|
||||
@item Silicon Graphics Motion Video Compressor 1 (MVC1) @tab @tab X
|
||||
@item Silicon Graphics Motion Video Compressor 2 (MVC2) @tab @tab X
|
||||
@item Smacker video @tab @tab X
|
||||
@tab Video encoding used in Smacker.
|
||||
@item SMPTE VC-1 @tab @tab X
|
||||
@@ -687,13 +619,11 @@ following image formats are supported:
|
||||
@tab fourcc: SP5X
|
||||
@item TechSmith Screen Capture Codec @tab @tab X
|
||||
@tab fourcc: TSCC
|
||||
@item TechSmith Screen Capture Codec 2 @tab @tab X
|
||||
@tab fourcc: TSC2
|
||||
@item Theora @tab E @tab X
|
||||
@tab encoding supported through external library libtheora
|
||||
@item Tiertex Limited SEQ video @tab @tab X
|
||||
@tab Codec used in DOS CD-ROM FlashBack game.
|
||||
@item Ut Video @tab X @tab X
|
||||
@item Ut Video @tab @tab X
|
||||
@item v210 QuickTime uncompressed 4:2:2 10-bit @tab X @tab X
|
||||
@item v308 QuickTime uncompressed 4:4:4 @tab X @tab X
|
||||
@item v408 QuickTime uncompressed 4:4:4:4 @tab X @tab X
|
||||
@@ -717,7 +647,6 @@ following image formats are supported:
|
||||
@item Psygnosis YOP Video @tab @tab X
|
||||
@item yuv4 @tab X @tab X
|
||||
@tab libquicktime uncompressed packed 4:2:0
|
||||
@item ZeroCodec Lossless Video @tab @tab X
|
||||
@item ZLIB @tab X @tab X
|
||||
@tab part of LCL, encoder experimental
|
||||
@item Zip Motion Blocks Video @tab X @tab X
|
||||
@@ -732,8 +661,7 @@ following image formats are supported:
|
||||
|
||||
@multitable @columnfractions .4 .1 .1 .4
|
||||
@item Name @tab Encoding @tab Decoding @tab Comments
|
||||
@item 8SVX exponential @tab @tab X
|
||||
@item 8SVX fibonacci @tab @tab X
|
||||
@item 8SVX audio @tab @tab X
|
||||
@item AAC+ @tab E @tab X
|
||||
@tab encoding supported through external library libaacplus
|
||||
@item AAC @tab E @tab X
|
||||
@@ -764,19 +692,19 @@ following image formats are supported:
|
||||
@item ADPCM IMA Westwood @tab @tab X
|
||||
@item ADPCM ISS IMA @tab @tab X
|
||||
@tab Used in FunCom games.
|
||||
@item ADPCM IMA Dialogic @tab @tab X
|
||||
@item ADPCM IMA Duck DK3 @tab @tab X
|
||||
@tab Used in some Sega Saturn console games.
|
||||
@item ADPCM IMA Duck DK4 @tab @tab X
|
||||
@tab Used in some Sega Saturn console games.
|
||||
@item ADPCM Microsoft @tab X @tab X
|
||||
@item ADPCM MS IMA @tab X @tab X
|
||||
@item ADPCM Nintendo Gamecube AFC @tab @tab X
|
||||
@item ADPCM Nintendo Gamecube THP @tab @tab X
|
||||
@item ADPCM QT IMA @tab X @tab X
|
||||
@item ADPCM SEGA CRI ADX @tab X @tab X
|
||||
@tab Used in Sega Dreamcast games.
|
||||
@item ADPCM Shockwave Flash @tab X @tab X
|
||||
@item ADPCM SMJPEG IMA @tab @tab X
|
||||
@tab Used in certain Loki game ports.
|
||||
@item ADPCM Sound Blaster Pro 2-bit @tab @tab X
|
||||
@item ADPCM Sound Blaster Pro 2.6-bit @tab @tab X
|
||||
@item ADPCM Sound Blaster Pro 4-bit @tab @tab X
|
||||
@@ -787,7 +715,6 @@ following image formats are supported:
|
||||
@tab encoding supported through external library libopencore-amrnb
|
||||
@item AMR-WB @tab E @tab X
|
||||
@tab encoding supported through external library libvo-amrwbenc
|
||||
@item Amazing Studio PAF Audio @tab @tab X
|
||||
@item Apple lossless audio @tab X @tab X
|
||||
@tab QuickTime fourcc 'alac'
|
||||
@item Atrac 1 @tab @tab X
|
||||
@@ -821,9 +748,6 @@ following image formats are supported:
|
||||
@tab encoding supported through external library libgsm
|
||||
@item GSM Microsoft variant @tab E @tab X
|
||||
@tab encoding supported through external library libgsm
|
||||
@item IAC (Indeo Audio Coder) @tab @tab X
|
||||
@item iLBC (Internet Low Bitrate Codec) @tab E @tab E
|
||||
@tab encoding and decoding supported through external library libilbc
|
||||
@item IMC (Intel Music Coder) @tab @tab X
|
||||
@item MACE (Macintosh Audio Compression/Expansion) 3:1 @tab @tab X
|
||||
@item MACE (Macintosh Audio Compression/Expansion) 6:1 @tab @tab X
|
||||
@@ -833,22 +757,15 @@ following image formats are supported:
|
||||
@tab Only versions 3.97-3.99 are supported.
|
||||
@item MP1 (MPEG audio layer 1) @tab @tab IX
|
||||
@item MP2 (MPEG audio layer 2) @tab IX @tab IX
|
||||
@tab libtwolame can be used alternatively for encoding.
|
||||
@item MP3 (MPEG audio layer 3) @tab E @tab IX
|
||||
@tab encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
|
||||
@item MPEG-4 Audio Lossless Coding (ALS) @tab @tab X
|
||||
@item Musepack SV7 @tab @tab X
|
||||
@item Musepack SV8 @tab @tab X
|
||||
@item Nellymoser Asao @tab X @tab X
|
||||
@item Opus @tab E @tab E
|
||||
@tab supported through external library libopus
|
||||
@item PCM A-law @tab X @tab X
|
||||
@item PCM mu-law @tab X @tab X
|
||||
@item PCM signed 8-bit planar @tab X @tab X
|
||||
@item PCM signed 16-bit big-endian planar @tab X @tab X
|
||||
@item PCM signed 16-bit little-endian planar @tab X @tab X
|
||||
@item PCM signed 24-bit little-endian planar @tab X @tab X
|
||||
@item PCM signed 32-bit little-endian planar @tab X @tab X
|
||||
@item PCM 16-bit little-endian planar @tab @tab X
|
||||
@item PCM 32-bit floating point big-endian @tab X @tab X
|
||||
@item PCM 32-bit floating point little-endian @tab X @tab X
|
||||
@item PCM 64-bit floating point big-endian @tab X @tab X
|
||||
@@ -892,13 +809,10 @@ following image formats are supported:
|
||||
@tab experimental codec
|
||||
@item Speex @tab E @tab E
|
||||
@tab supported through external library libspeex
|
||||
@item TAK (Tom's lossless Audio Kompressor) @tab @tab X
|
||||
@item True Audio (TTA) @tab @tab X
|
||||
@item TrueHD @tab @tab X
|
||||
@tab Used in HD-DVD and Blu-Ray discs.
|
||||
@item TwinVQ (VQF flavor) @tab @tab X
|
||||
@item VIMA @tab @tab X
|
||||
@tab Used in LucasArts SMUSH animations.
|
||||
@item Vorbis @tab E @tab X
|
||||
@tab A native but very primitive encoder exists.
|
||||
@item WavPack @tab @tab X
|
||||
@@ -921,27 +835,14 @@ performance on systems without hardware floating point support).
|
||||
|
||||
@multitable @columnfractions .4 .1 .1 .1 .1
|
||||
@item Name @tab Muxing @tab Demuxing @tab Encoding @tab Decoding
|
||||
@item 3GPP Timed Text @tab @tab @tab X @tab X
|
||||
@item AQTitle @tab @tab X @tab @tab X
|
||||
@item DVB @tab X @tab X @tab X @tab X
|
||||
@item DVD @tab X @tab X @tab X @tab X
|
||||
@item JACOsub @tab X @tab X @tab @tab X
|
||||
@item MicroDVD @tab X @tab X @tab @tab X
|
||||
@item MPL2 @tab @tab X @tab @tab X
|
||||
@item MPsub (MPlayer) @tab @tab X @tab @tab X
|
||||
@item PGS @tab @tab @tab @tab X
|
||||
@item PJS (Phoenix) @tab @tab X @tab @tab X
|
||||
@item RealText @tab @tab X @tab @tab X
|
||||
@item SAMI @tab @tab X @tab @tab X
|
||||
@item SSA/ASS @tab X @tab X @tab X @tab X
|
||||
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
||||
@item SubViewer v1 @tab @tab X @tab @tab X
|
||||
@item SubViewer @tab @tab X @tab @tab X
|
||||
@item TED Talks captions @tab @tab X @tab @tab X
|
||||
@item VobSub (IDX+SUB) @tab @tab X @tab @tab X
|
||||
@item VPlayer @tab @tab X @tab @tab X
|
||||
@item WebVTT @tab @tab X @tab @tab X
|
||||
@item XSUB @tab @tab @tab X @tab X
|
||||
@item SSA/ASS @tab X @tab X @tab X @tab X
|
||||
@item DVB @tab X @tab X @tab X @tab X
|
||||
@item DVD @tab X @tab X @tab X @tab X
|
||||
@item JACOsub @tab X @tab X @tab @tab X
|
||||
@item MicroDVD @tab X @tab X @tab @tab X
|
||||
@item PGS @tab @tab @tab @tab X
|
||||
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
||||
@item XSUB @tab @tab @tab X @tab X
|
||||
@end multitable
|
||||
|
||||
@code{X} means that the feature is supported.
|
||||
@@ -950,31 +851,19 @@ performance on systems without hardware floating point support).
|
||||
|
||||
@multitable @columnfractions .4 .1
|
||||
@item Name @tab Support
|
||||
@item Apple HTTP Live Streaming @tab X
|
||||
@item file @tab X
|
||||
@item Gopher @tab X
|
||||
@item HLS @tab X
|
||||
@item HTTP @tab X
|
||||
@item HTTPS @tab X
|
||||
@item MMSH @tab X
|
||||
@item MMST @tab X
|
||||
@item MMS @tab X
|
||||
@item pipe @tab X
|
||||
@item RTMP @tab X
|
||||
@item RTMPE @tab X
|
||||
@item RTMPS @tab X
|
||||
@item RTMPT @tab X
|
||||
@item RTMPTE @tab X
|
||||
@item RTMPTS @tab X
|
||||
@item RTP @tab X
|
||||
@item SCTP @tab X
|
||||
@item TCP @tab X
|
||||
@item TLS @tab X
|
||||
@item UDP @tab X
|
||||
@end multitable
|
||||
|
||||
@code{X} means that the protocol is supported.
|
||||
|
||||
@code{E} means that support is provided through an external library.
|
||||
|
||||
|
||||
@section Input/Output Devices
|
||||
|
||||
@@ -982,17 +871,13 @@ performance on systems without hardware floating point support).
|
||||
@item Name @tab Input @tab Output
|
||||
@item ALSA @tab X @tab X
|
||||
@item BKTR @tab X @tab
|
||||
@item caca @tab @tab X
|
||||
@item DV1394 @tab X @tab
|
||||
@item Lavfi virtual device @tab X @tab
|
||||
@item Linux framebuffer @tab X @tab
|
||||
@item JACK @tab X @tab
|
||||
@item LIBCDIO @tab X
|
||||
@item LIBDC1394 @tab X @tab
|
||||
@item OpenAL @tab X
|
||||
@item OSS @tab X @tab X
|
||||
@item Pulseaudio @tab X @tab
|
||||
@item SDL @tab @tab X
|
||||
@item Video4Linux2 @tab X @tab
|
||||
@item VfW capture @tab X @tab
|
||||
@item X11 grabbing @tab X @tab
|
||||
@@ -1004,10 +889,9 @@ performance on systems without hardware floating point support).
|
||||
|
||||
@multitable @columnfractions .4 .1 .1
|
||||
@item Codec/format @tab Read @tab Write
|
||||
@item AVI @tab X @tab X
|
||||
@item DV @tab X @tab X
|
||||
@item GXF @tab X @tab X
|
||||
@item MOV @tab X @tab X
|
||||
@item MOV @tab X @tab
|
||||
@item MPEG1/2 @tab X @tab X
|
||||
@item MXF @tab X @tab X
|
||||
@end multitable
|
||||
|
@@ -258,32 +258,6 @@ git commit
|
||||
@end example
|
||||
|
||||
|
||||
@chapter Git configuration
|
||||
|
||||
In order to simplify a few workflows, it is advisable to configure both
|
||||
your personal Git installation and your local FFmpeg repository.
|
||||
|
||||
@section Personal Git installation
|
||||
|
||||
Add the following to your @file{~/.gitconfig} to help @command{git send-email}
|
||||
and @command{git format-patch} detect renames:
|
||||
|
||||
@example
|
||||
[diff]
|
||||
renames = copy
|
||||
@end example
|
||||
|
||||
@section Repository configuration
|
||||
|
||||
In order to have @command{git send-email} automatically send patches
|
||||
to the ffmpeg-devel mailing list, add the following stanza
|
||||
to @file{/path/to/ffmpeg/repository/.git/config}:
|
||||
|
||||
@example
|
||||
[sendemail]
|
||||
to = ffmpeg-devel@@ffmpeg.org
|
||||
@end example
|
||||
|
||||
@chapter FFmpeg specific
|
||||
|
||||
@section Reverting broken commits
|
||||
@@ -372,43 +346,6 @@ git checkout -b svn_23456 $SHA1
|
||||
|
||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||
|
||||
|
||||
@chapter pre-push checklist
|
||||
|
||||
Once you have a set of commits that you feel are ready for pushing,
|
||||
work through the following checklist to doublecheck everything is in
|
||||
proper order. This list tries to be exhaustive. In case you are just
|
||||
pushing a typo in a comment, some of the steps may be unnecessary.
|
||||
Apply your common sense, but if in doubt, err on the side of caution.
|
||||
|
||||
First, make sure that the commits and branches you are going to push
|
||||
match what you want pushed and that nothing is missing, extraneous or
|
||||
wrong. You can see what will be pushed by running the git push command
|
||||
with --dry-run first. And then inspecting the commits listed with
|
||||
@command{git log -p 1234567..987654}. The @command{git status} command
|
||||
may help in finding local changes that have been forgotten to be added.
|
||||
|
||||
Next let the code pass through a full run of our testsuite.
|
||||
|
||||
@itemize
|
||||
@item @command{make distclean}
|
||||
@item @command{/path/to/ffmpeg/configure}
|
||||
@item @command{make check}
|
||||
@item if fate fails due to missing samples run @command{make fate-rsync} and retry
|
||||
@end itemize
|
||||
|
||||
Make sure all your changes have been checked before pushing them, the
|
||||
testsuite only checks against regressions and that only to some extend. It does
|
||||
obviously not check newly added features/code to be working unless you have
|
||||
added a test for that (which is recommended).
|
||||
|
||||
Also note that every single commit should pass the test suite, not just
|
||||
the result of a series of patches.
|
||||
|
||||
Once everything passed, push the changes to your public ffmpeg clone and post a
|
||||
merge request to ffmpeg-devel. You can also push them directly but this is not
|
||||
recommended.
|
||||
|
||||
@chapter Server Issues
|
||||
|
||||
Contact the project admins @email{root@@ffmpeg.org} if you have technical
|
||||
|
131
doc/indevs.texi
131
doc/indevs.texi
@@ -112,19 +112,6 @@ defaults to 0).
|
||||
Set audio device number for devices with same name (starts at 0,
|
||||
defaults to 0).
|
||||
|
||||
@item pixel_format
|
||||
Select pixel format to be used by DirectShow. This may only be set when
|
||||
the video codec is not set or set to rawvideo.
|
||||
|
||||
@item audio_buffer_size
|
||||
Set audio device buffer size in milliseconds (which can directly
|
||||
impact latency, depending on the device).
|
||||
Defaults to using the audio device's
|
||||
default buffer size (typically some multiple of 500ms).
|
||||
Setting this value too low can degrade performance.
|
||||
See also
|
||||
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -192,66 +179,6 @@ ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
|
||||
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
|
||||
@section iec61883
|
||||
|
||||
FireWire DV/HDV input device using libiec61883.
|
||||
|
||||
To enable this input device, you need libiec61883, libraw1394 and
|
||||
libavc1394 installed on your system. Use the configure option
|
||||
@code{--enable-libiec61883} to compile with the device enabled.
|
||||
|
||||
The iec61883 capture device supports capturing from a video device
|
||||
connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
|
||||
FireWire stack (juju). This is the default DV/HDV input method in Linux
|
||||
Kernel 2.6.37 and later, since the old FireWire stack was removed.
|
||||
|
||||
Specify the FireWire port to be used as input file, or "auto"
|
||||
to choose the first port connected.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item dvtype
|
||||
Override autodetection of DV/HDV. This should only be used if auto
|
||||
detection does not work, or if usage of a different device type
|
||||
should be prohibited. Treating a DV device as HDV (or vice versa) will
|
||||
not work and result in undefined behavior.
|
||||
The values @option{auto}, @option{dv} and @option{hdv} are supported.
|
||||
|
||||
@item dvbuffer
|
||||
Set maxiumum size of buffer for incoming data, in frames. For DV, this
|
||||
is an exact value. For HDV, it is not frame exact, since HDV does
|
||||
not have a fixed frame size.
|
||||
|
||||
@item dvguid
|
||||
Select the capture device by specifying it's GUID. Capturing will only
|
||||
be performed from the specified device and fails if no device with the
|
||||
given GUID is found. This is useful to select the input if multiple
|
||||
devices are connected at the same time.
|
||||
Look at /sys/bus/firewire/devices to find out the GUIDs.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Grab and show the input of a FireWire DV/HDV device.
|
||||
@example
|
||||
ffplay -f iec61883 -i auto
|
||||
@end example
|
||||
|
||||
@item
|
||||
Grab and record the input of a FireWire DV/HDV device,
|
||||
using a packet buffer of 100000 packets if the source is HDV.
|
||||
@example
|
||||
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section jack
|
||||
|
||||
JACK input device.
|
||||
@@ -327,12 +254,6 @@ label, but all the others need to be specified explicitly.
|
||||
|
||||
If not specified defaults to the filename specified for the input
|
||||
device.
|
||||
|
||||
@item graph_file
|
||||
Set the filename of the filtergraph to be read and sent to the other
|
||||
filters. Syntax of the filtergraph is the same as the one specified by
|
||||
the option @var{graph}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -341,14 +262,14 @@ the option @var{graph}.
|
||||
@item
|
||||
Create a color video stream and play it back with @command{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
|
||||
ffplay -f lavfi -graph "color=pink [out0]" dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
As the previous example, but use filename for specifying the graph
|
||||
description, and omit the "out0" label:
|
||||
@example
|
||||
ffplay -f lavfi color=c=pink
|
||||
ffplay -f lavfi color=pink
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -656,23 +577,17 @@ properties of your X11 display (e.g. grep for "name" or "dimensions").
|
||||
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -f x11grab -r 25 -s cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
Grab at position @code{10,20}:
|
||||
@example
|
||||
# Grab at position 10,20.
|
||||
ffmpeg -f x11grab -r 25 -s cif -i :0.0+10,20 out.mpg
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
@subsection @var{follow_mouse} AVOption
|
||||
|
||||
@table @option
|
||||
@item draw_mouse
|
||||
Specify whether to draw the mouse pointer. A value of @code{0} specify
|
||||
not to draw the pointer. Default value is @code{1}.
|
||||
|
||||
@item follow_mouse
|
||||
Make the grabbed area follow the mouse. The argument can be
|
||||
@code{centered} or a number of pixels @var{PIXELS}.
|
||||
The syntax is:
|
||||
@example
|
||||
-follow_mouse centered|@var{PIXELS}
|
||||
@end example
|
||||
|
||||
When it is specified with "centered", the grabbing region follows the mouse
|
||||
pointer and keeps the pointer at the center of region; otherwise, the region
|
||||
@@ -682,36 +597,28 @@ zero) to the edge of region.
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -follow_mouse centered -r 25 -s cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
To follow only when the mouse pointer reaches within 100 pixels to edge:
|
||||
@example
|
||||
# Follows only when the mouse pointer reaches within 100 pixels to edge
|
||||
ffmpeg -f x11grab -follow_mouse 100 -r 25 -s cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item framerate
|
||||
Set the grabbing frame rate. Default value is @code{ntsc},
|
||||
corresponding to a framerate of @code{30000/1001}.
|
||||
@subsection @var{show_region} AVOption
|
||||
|
||||
@item show_region
|
||||
Show grabbed region on screen.
|
||||
The syntax is:
|
||||
@example
|
||||
-show_region 1
|
||||
@end example
|
||||
|
||||
If @var{show_region} is specified with @code{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it is easy to
|
||||
know what is being grabbed if only a portion of the screen is grabbed.
|
||||
If @var{show_region} AVOption is specified with @var{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it's easy to know what is
|
||||
being grabbed if only a portion of the screen is grabbed.
|
||||
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -show_region 1 -r 25 -s cif -i :0.0+10,20 out.mpg
|
||||
@end example
|
||||
|
||||
With @var{follow_mouse}:
|
||||
@example
|
||||
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -r 25 -s cif -i :0.0 out.mpg
|
||||
# With follow_mouse
|
||||
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -r 25 -s cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
@end table
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -1,48 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavcodec Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavcodec Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavcodec library provides a generic encoding/decoding framework
|
||||
and contains multiple decoders and encoders for audio, video and
|
||||
subtitle streams, and several bitstream filters.
|
||||
|
||||
The shared architecture provides various services ranging from bit
|
||||
stream I/O to DSP optimizations, and makes it suitable for
|
||||
implementing robust and fast codecs as well as for experimentation.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs}, @url{ffmpeg-bitstream-filters.html,bitstream-filters},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1),
|
||||
libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavcodec
|
||||
@settitle media streams decoding and encoding library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,45 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavdevice Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavdevice Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavdevice library provides a generic framework for grabbing from
|
||||
and rendering to many common multimedia input/output devices, and
|
||||
supports several input and output devices, including Video4Linux2,
|
||||
VfW, DShow, and ALSA.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-devices(1),
|
||||
libavutil(3), libavcodec(3), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavdevice
|
||||
@settitle multimedia device handling library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -9,36 +9,84 @@
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@chapter Introduction
|
||||
|
||||
The libavfilter library provides a generic audio/video filtering
|
||||
framework containing several filters, sources and sinks.
|
||||
Libavfilter is the filtering API of FFmpeg. It is the substitute of the
|
||||
now deprecated 'vhooks' and started as a Google Summer of Code project.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
Audio filtering integration into the main FFmpeg repository is a work in
|
||||
progress, so audio API and ABI should not be considered stable yet.
|
||||
|
||||
@chapter See Also
|
||||
@chapter Tutorial
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters},
|
||||
@url{libavutil.html,libavutil}, @url{libswscale.html,libswscale}, @url{libswresample.html,libswresample},
|
||||
@url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}, @url{libavdevice.html,libavdevice}
|
||||
@end ifhtml
|
||||
In libavfilter, it is possible for filters to have multiple inputs and
|
||||
multiple outputs.
|
||||
To illustrate the sorts of things that are possible, we can
|
||||
use a complex filter graph. For example, the following one:
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-filters(1),
|
||||
libavutil(3), libswscale(3), libswresample(3), libavcodec(3), libavformat(3), libavdevice(3)
|
||||
@end ifnothtml
|
||||
@example
|
||||
input --> split --> fifo -----------------------> overlay --> output
|
||||
| ^
|
||||
| |
|
||||
+------> fifo --> crop --> vflip --------+
|
||||
@end example
|
||||
|
||||
@include authors.texi
|
||||
splits the stream in two streams, sends one stream through the crop filter
|
||||
and the vflip filter before merging it back with the other stream by
|
||||
overlaying it on top. You can use the following command to achieve this:
|
||||
|
||||
@ignore
|
||||
@example
|
||||
ffmpeg -i input -vf "[in] split [T1], fifo, [T2] overlay=0:H/2 [out]; [T1] fifo, crop=iw:ih/2:0:ih/2, vflip [T2]" output
|
||||
@end example
|
||||
|
||||
@setfilename libavfilter
|
||||
@settitle multimedia filtering library
|
||||
The result will be that in output the top half of the video is mirrored
|
||||
onto the bottom half.
|
||||
|
||||
@end ignore
|
||||
Video filters are loaded using the @var{-vf} option passed to
|
||||
@command{ffmpeg} or to @command{ffplay}. Filters in the same linear
|
||||
chain are separated by commas. In our example, @var{split, fifo,
|
||||
overlay} are in one linear chain, and @var{fifo, crop, vflip} are in
|
||||
another. The points where the linear chains join are labeled by names
|
||||
enclosed in square brackets. In our example, that is @var{[T1]} and
|
||||
@var{[T2]}. The magic labels @var{[in]} and @var{[out]} are the points
|
||||
where video is input and output.
|
||||
|
||||
Some filters take in input a list of parameters: they are specified
|
||||
after the filter name and an equal sign, and are separated each other
|
||||
by a semicolon.
|
||||
|
||||
There exist so-called @var{source filters} that do not have a video
|
||||
input, and we expect in the future some @var{sink filters} that will
|
||||
not have video output.
|
||||
|
||||
@chapter graph2dot
|
||||
|
||||
The @file{graph2dot} program included in the FFmpeg @file{tools}
|
||||
directory can be used to parse a filter graph description and issue a
|
||||
corresponding textual representation in the dot language.
|
||||
|
||||
Invoke the command:
|
||||
@example
|
||||
graph2dot -h
|
||||
@end example
|
||||
|
||||
to see how to use @file{graph2dot}.
|
||||
|
||||
You can then pass the dot description to the @file{dot} program (from
|
||||
the graphviz suite of programs) and obtain a graphical representation
|
||||
of the filter graph.
|
||||
|
||||
For example the sequence of commands:
|
||||
@example
|
||||
echo @var{GRAPH_DESCRIPTION} | \
|
||||
tools/graph2dot -o graph.tmp && \
|
||||
dot -Tpng graph.tmp -o graph.png && \
|
||||
display graph.png
|
||||
@end example
|
||||
|
||||
can be used to create and display an image representing the graph
|
||||
described by the @var{GRAPH_DESCRIPTION} string.
|
||||
|
||||
@include filters.texi
|
||||
|
||||
@bye
|
||||
|
@@ -1,48 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavformat Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavformat Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavformat library provides a generic framework for multiplexing
|
||||
and demultiplexing (muxing and demuxing) audio, video and subtitle
|
||||
streams. It encompasses multiple muxers and demuxers for multimedia
|
||||
container formats.
|
||||
|
||||
It also supports several input and output protocols to access a media
|
||||
resource.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats}, @url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-formats(1), ffmpeg-protocols(1),
|
||||
libavutil(3), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavformat
|
||||
@settitle multimedia muxing and demuxing library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,44 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavutil Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavutil Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavutil library is a utility library to aid portable
|
||||
multimedia programming. It contains safe portable string functions,
|
||||
random number generators, data structures, additional mathematics
|
||||
functions, cryptography and multimedia related functionality (like
|
||||
enumerations for pixel and sample formats).
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavutil
|
||||
@settitle multimedia-biased utility library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,70 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libswresample Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libswresample Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libswresample library performs highly optimized audio resampling,
|
||||
rematrixing and sample format conversion operations.
|
||||
|
||||
Specifically, this library performs the following conversions:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@emph{Resampling}: is the process of changing the audio rate, for
|
||||
example from an high sample rate of 44100Hz to 8000Hz. Audio
|
||||
conversion from high to low sample rate is a lossy process. Several
|
||||
resampling options and algorithms are available.
|
||||
|
||||
@item
|
||||
@emph{Format conversion}: is the process of converting the type of
|
||||
samples, for example from 16-bit signed samples to unsigned 8-bit or
|
||||
float samples. It also handles packing conversion, when passing from
|
||||
packed layout (all samples belonging to distinct channels interleaved
|
||||
in the same buffer), to planar layout (all samples belonging to the
|
||||
same channel stored in a dedicated buffer or "plane").
|
||||
|
||||
@item
|
||||
@emph{Rematrixing}: is the process of changing the channel layout, for
|
||||
example from stereo to mono. When the input channels cannot be mapped
|
||||
to the output streams, the process is lossy, since it involves
|
||||
different gain factors and mixing.
|
||||
@end itemize
|
||||
|
||||
Various other audio conversions (e.g. stretching and padding) are
|
||||
enabled through dedicated options.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-resampler(1),
|
||||
libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libswresample
|
||||
@settitle audio resampling library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@@ -1,63 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libswscale Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libswscale Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libswscale library performs highly optimized image scaling and
|
||||
colorspace and pixel format conversion operations.
|
||||
|
||||
Specifically, this library performs the following conversions:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@emph{Rescaling}: is the process of changing the video size. Several
|
||||
rescaling options and algorithms are available. This is usually a
|
||||
lossy process.
|
||||
|
||||
@item
|
||||
@emph{Pixel format conversion}: is the process of converting the image
|
||||
format and colorspace of the image, for example from planar YUV420P to
|
||||
RGB24 packed. It also handles packing conversion, that is converts
|
||||
from packed layout (all pixels belonging to distinct planes
|
||||
interleaved in the same buffer), to planar layout (all samples
|
||||
belonging to the same plane stored in a dedicated buffer or "plane").
|
||||
|
||||
This is usually a lossy process in case the source and destination
|
||||
colorspaces differ.
|
||||
@end itemize
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-scaler(1),
|
||||
libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libswscale
|
||||
@settitle video scaling and pixel format conversion library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
67
doc/mips.txt
67
doc/mips.txt
@@ -1,67 +0,0 @@
|
||||
MIPS optimizations info
|
||||
===============================================
|
||||
|
||||
MIPS optimizations of codecs are targeting MIPS 74k family of
|
||||
CPUs. Some of these optimizations are relying more on properties of
|
||||
this architecture and some are relying less (and can be used on most
|
||||
MIPS architectures without degradation in performance).
|
||||
|
||||
Along with FFMPEG copyright notice, there is MIPS copyright notice in
|
||||
all the files that are created by people from MIPS Technologies.
|
||||
|
||||
Example of copyright notice:
|
||||
===============================================
|
||||
/*
|
||||
* Copyright (c) 2012
|
||||
* MIPS Technologies, Inc., California.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* Author: Author Name (author_name@@mips.com)
|
||||
*/
|
||||
|
||||
Files that have MIPS copyright notice in them:
|
||||
===============================================
|
||||
* libavutil/mips/
|
||||
float_dsp_mips.c
|
||||
libm_mips.h
|
||||
* libavcodec/mips/
|
||||
ac3dsp_mips.c
|
||||
acelp_filters_mips.c
|
||||
acelp_vectors_mips.c
|
||||
amrwbdec_mips.c
|
||||
amrwbdec_mips.h
|
||||
celp_filters_mips.c
|
||||
celp_math_mips.c
|
||||
compute_antialias_fixed.h
|
||||
compute_antialias_float.h
|
||||
lsp_mips.h
|
||||
dsputil_mips.c
|
||||
fft_mips.c
|
||||
fft_table.h
|
||||
fft_init_table.c
|
||||
fmtconvert_mips.c
|
||||
mpegaudiodsp_mips_fixed.c
|
||||
mpegaudiodsp_mips_float.c
|
266
doc/muxers.texi
266
doc/muxers.texi
@@ -129,65 +129,6 @@ ffmpeg -i INPUT -f framemd5 -
|
||||
|
||||
See also the @ref{md5} muxer.
|
||||
|
||||
@anchor{hls}
|
||||
@section hls
|
||||
|
||||
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
|
||||
the HTTP Live Streaming specification.
|
||||
|
||||
It creates a playlist file and numbered segment files. The output
|
||||
filename specifies the playlist filename; the segment filenames
|
||||
receive the same basename as the playlist, a sequential number and
|
||||
a .ts extension.
|
||||
|
||||
@example
|
||||
ffmpeg -i in.nut out.m3u8
|
||||
@end example
|
||||
|
||||
@table @option
|
||||
@item -hls_time @var{seconds}
|
||||
Set the segment length in seconds.
|
||||
@item -hls_list_size @var{size}
|
||||
Set the maximum number of playlist entries.
|
||||
@item -hls_wrap @var{wrap}
|
||||
Set the number after which index wraps.
|
||||
@item -start_number @var{number}
|
||||
Start the sequence from @var{number}.
|
||||
@end table
|
||||
|
||||
@anchor{ico}
|
||||
@section ico
|
||||
|
||||
ICO file muxer.
|
||||
|
||||
Microsoft's icon file format (ICO) has some strict limitations that should be noted:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Size cannot exceed 256 pixels in any dimension
|
||||
|
||||
@item
|
||||
Only BMP and PNG images can be stored
|
||||
|
||||
@item
|
||||
If a BMP image is used, it must be one of the following pixel formats:
|
||||
@example
|
||||
BMP Bit Depth FFmpeg Pixel Format
|
||||
1bit pal8
|
||||
4bit pal8
|
||||
8bit pal8
|
||||
16bit rgb555le
|
||||
24bit bgr24
|
||||
32bit bgra
|
||||
@end example
|
||||
|
||||
@item
|
||||
If a BMP image is used, it must use the BITMAPINFOHEADER DIB header
|
||||
|
||||
@item
|
||||
If a PNG image is used, it must use the rgba pixel format
|
||||
@end itemize
|
||||
|
||||
@anchor{image2}
|
||||
@section image2
|
||||
|
||||
@@ -240,11 +181,6 @@ Note also that the pattern must not necessarily contain "%d" or
|
||||
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
|
||||
@end example
|
||||
|
||||
@table @option
|
||||
@item -start_number @var{number}
|
||||
Start the sequence from @var{number}.
|
||||
@end table
|
||||
|
||||
The image muxer supports the .Y.U.V image file format. This format is
|
||||
special in that that each image frame consists of three files, for
|
||||
each of the YUV420P components. To read or write this image file format,
|
||||
@@ -283,8 +219,7 @@ See also the @ref{framemd5} muxer.
|
||||
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
|
||||
file has all the metadata about all packets stored in one location
|
||||
(written at the end of the file, it can be moved to the start for
|
||||
better playback by adding @var{faststart} to the @var{movflags}, or
|
||||
using the @command{qt-faststart} tool). A fragmented
|
||||
better playback using the @command{qt-faststart} tool). A fragmented
|
||||
file consists of a number of fragments, where packets and metadata
|
||||
about these packets are stored together. Writing a fragmented
|
||||
file has the advantage that the file is decodable even if the
|
||||
@@ -342,10 +277,6 @@ more efficient), but with this option set, the muxer writes one moof/mdat
|
||||
pair for each track, making it easier to separate tracks.
|
||||
|
||||
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
||||
@item -movflags faststart
|
||||
Run a second pass moving the moov atom on top of the file. This
|
||||
operation can take a while, and will not work in various situations such
|
||||
as fragmented output, thus it is not enabled by default.
|
||||
@end table
|
||||
|
||||
Smooth Streaming content can be pushed in real time to a publishing
|
||||
@@ -480,7 +411,7 @@ For example a 3D WebM clip can be created using the following command line:
|
||||
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
|
||||
@end example
|
||||
|
||||
@section segment, stream_segment, ssegment
|
||||
@section segment
|
||||
|
||||
Basic stream segmenter.
|
||||
|
||||
@@ -488,200 +419,30 @@ The segmenter muxer outputs streams to a number of separate files of nearly
|
||||
fixed duration. Output filename pattern can be set in a fashion similar to
|
||||
@ref{image2}.
|
||||
|
||||
@code{stream_segment} is a variant of the muxer used to write to
|
||||
streaming output formats, i.e. which do not require global headers,
|
||||
and is recommended for outputting e.g. to MPEG transport stream segments.
|
||||
@code{ssegment} is a shorter alias for @code{stream_segment}.
|
||||
|
||||
Every segment starts with a keyframe of the selected reference stream,
|
||||
which is set through the @option{reference_stream} option.
|
||||
|
||||
Note that if you want accurate splitting for a video file, you need to
|
||||
make the input key frames correspond to the exact splitting times
|
||||
expected by the segmenter, or the segment muxer will start the new
|
||||
segment with the key frame found next after the specified start
|
||||
time.
|
||||
|
||||
Every segment starts with a video keyframe, if a video stream is present.
|
||||
The segment muxer works best with a single constant frame rate video.
|
||||
|
||||
Optionally it can generate a list of the created segments, by setting
|
||||
the option @var{segment_list}. The list type is specified by the
|
||||
@var{segment_list_type} option.
|
||||
|
||||
The segment muxer supports the following options:
|
||||
Optionally it can generate a flat list of the created segments, one segment
|
||||
per line.
|
||||
|
||||
@table @option
|
||||
@item reference_stream @var{specifier}
|
||||
Set the reference stream, as specified by the string @var{specifier}.
|
||||
If @var{specifier} is set to @code{auto}, the reference is choosen
|
||||
automatically. Otherwise it must be a stream specifier (see the ``Stream
|
||||
specifiers'' chapter in the ffmpeg manual) which specifies the
|
||||
reference stream. The default value is ``auto''.
|
||||
|
||||
@item segment_format @var{format}
|
||||
Override the inner container format, by default it is guessed by the filename
|
||||
extension.
|
||||
@item segment_time @var{t}
|
||||
Set segment duration to @var{t} seconds.
|
||||
@item segment_list @var{name}
|
||||
Generate also a listfile named @var{name}. If not specified no
|
||||
listfile is generated.
|
||||
@item segment_list_flags @var{flags}
|
||||
Set flags affecting the segment list generation.
|
||||
|
||||
It currently supports the following flags:
|
||||
@table @var
|
||||
@item cache
|
||||
Allow caching (only affects M3U8 list files).
|
||||
|
||||
@item live
|
||||
Allow live-friendly file generation.
|
||||
|
||||
This currently only affects M3U8 lists. In particular, write a fake
|
||||
EXT-X-TARGETDURATION duration field at the top of the file, based on
|
||||
the specified @var{segment_time}.
|
||||
@end table
|
||||
|
||||
Default value is @code{cache}.
|
||||
|
||||
Generate also a listfile named @var{name}.
|
||||
@item segment_list_size @var{size}
|
||||
Overwrite the listfile once it reaches @var{size} entries. If 0
|
||||
the listfile is never overwritten. Default value is 0.
|
||||
@item segment_list type @var{type}
|
||||
Specify the format for the segment list file.
|
||||
|
||||
The following values are recognized:
|
||||
@table @option
|
||||
@item flat
|
||||
Generate a flat list for the created segments, one segment per line.
|
||||
|
||||
@item csv, ext
|
||||
Generate a list for the created segments, one segment per line,
|
||||
each line matching the format (comma-separated values):
|
||||
@example
|
||||
@var{segment_filename},@var{segment_start_time},@var{segment_end_time}
|
||||
@end example
|
||||
|
||||
@var{segment_filename} is the name of the output file generated by the
|
||||
muxer according to the provided pattern. CSV escaping (according to
|
||||
RFC4180) is applied if required.
|
||||
|
||||
@var{segment_start_time} and @var{segment_end_time} specify
|
||||
the segment start and end time expressed in seconds.
|
||||
|
||||
A list file with the suffix @code{".csv"} or @code{".ext"} will
|
||||
auto-select this format.
|
||||
|
||||
@code{ext} is deprecated in favor or @code{csv}.
|
||||
|
||||
@item m3u8
|
||||
Generate an extended M3U8 file, version 4, compliant with
|
||||
@url{http://tools.ietf.org/id/draft-pantos-http-live-streaming-08.txt}.
|
||||
|
||||
A list file with the suffix @code{".m3u8"} will auto-select this format.
|
||||
@end table
|
||||
|
||||
If not specified the type is guessed from the list file name suffix.
|
||||
@item segment_time @var{time}
|
||||
Set segment duration to @var{time}. Default value is "2".
|
||||
@item segment_time_delta @var{delta}
|
||||
Specify the accuracy time when selecting the start time for a
|
||||
segment. Default value is "0".
|
||||
|
||||
When delta is specified a key-frame will start a new segment if its
|
||||
PTS satisfies the relation:
|
||||
@example
|
||||
PTS >= start_time - time_delta
|
||||
@end example
|
||||
|
||||
This option is useful when splitting video content, which is always
|
||||
split at GOP boundaries, in case a key frame is found just before the
|
||||
specified split time.
|
||||
|
||||
In particular may be used in combination with the @file{ffmpeg} option
|
||||
@var{force_key_frames}. The key frame times specified by
|
||||
@var{force_key_frames} may not be set accurately because of rounding
|
||||
issues, with the consequence that a key frame time may result set just
|
||||
before the specified time. For constant frame rate videos a value of
|
||||
1/2*@var{frame_rate} should address the worst case mismatch between
|
||||
the specified time and the time set by @var{force_key_frames}.
|
||||
|
||||
@item segment_times @var{times}
|
||||
Specify a list of split points. @var{times} contains a list of comma
|
||||
separated duration specifications, in increasing order.
|
||||
|
||||
@item segment_frames @var{frames}
|
||||
Specify a list of split video frame numbers. @var{frames} contains a
|
||||
list of comma separated integer numbers, in increasing order.
|
||||
|
||||
This option specifies to start a new segment whenever a reference
|
||||
stream key frame is found and the sequential number (starting from 0)
|
||||
of the frame is greater or equal to the next value in the list.
|
||||
|
||||
Overwrite the listfile once it reaches @var{size} entries.
|
||||
@item segment_wrap @var{limit}
|
||||
Wrap around segment index once it reaches @var{limit}.
|
||||
|
||||
@item segment_start_number @var{number}
|
||||
Set the sequence number of the first segment. Defaults to @code{0}.
|
||||
|
||||
@item reset_timestamps @var{1|0}
|
||||
Reset timestamps at the begin of each segment, so that each segment
|
||||
will start with near-zero timestamps. It is meant to ease the playback
|
||||
of the generated segments. May not work with some combinations of
|
||||
muxers/codecs. It is set to @code{0} by default.
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
To remux the content of file @file{in.mkv} to a list of segments
|
||||
@file{out-000.nut}, @file{out-001.nut}, etc., and write the list of
|
||||
generated segments to @file{out.list}:
|
||||
@example
|
||||
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.list out%03d.nut
|
||||
ffmpeg -i in.mkv -c copy -map 0 -f segment -list out.list out%03d.nut
|
||||
@end example
|
||||
|
||||
@item
|
||||
As the example above, but segment the input file according to the split
|
||||
points specified by the @var{segment_times} option:
|
||||
@example
|
||||
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut
|
||||
@end example
|
||||
|
||||
@item
|
||||
As the example above, but use the @code{ffmpeg} @var{force_key_frames}
|
||||
option to force key frames in the input at the specified location, together
|
||||
with the segment option @var{segment_time_delta} to account for
|
||||
possible roundings operated when setting key frame times.
|
||||
@example
|
||||
ffmpeg -i in.mkv -force_key_frames 1,2,3,5,8,13,21 -codec:v mpeg4 -codec:a pcm_s16le -map 0 \
|
||||
-f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 -segment_time_delta 0.05 out%03d.nut
|
||||
@end example
|
||||
In order to force key frames on the input file, transcoding is
|
||||
required.
|
||||
|
||||
@item
|
||||
Segment the input file by splitting the input file according to the
|
||||
frame numbers sequence specified with the @var{segment_frames} option:
|
||||
@example
|
||||
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_frames 100,200,300,500,800 out%03d.nut
|
||||
@end example
|
||||
|
||||
@item
|
||||
To convert the @file{in.mkv} to TS segments using the @code{libx264}
|
||||
and @code{libfaac} encoders:
|
||||
@example
|
||||
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
|
||||
@end example
|
||||
|
||||
@item
|
||||
Segment the input file, and create an M3U8 live playlist (can be used
|
||||
as live HLS source):
|
||||
@example
|
||||
ffmpeg -re -i in.mkv -codec copy -map 0 -f segment -segment_list playlist.m3u8 \
|
||||
-segment_list_flags +live -segment_time 10 out%03d.mkv
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section mp3
|
||||
|
||||
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
|
||||
@@ -711,11 +472,10 @@ Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
||||
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
|
||||
@end example
|
||||
|
||||
To attach a picture to an mp3 file select both the audio and the picture stream
|
||||
with @code{map}:
|
||||
Attach a picture to an mp3:
|
||||
@example
|
||||
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
|
||||
-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
|
||||
ffmpeg -i input.mp3 -i cover.png -c copy -metadata:s:v title="Album cover"
|
||||
-metadata:s:v comment="Cover (Front)" out.mp3
|
||||
@end example
|
||||
|
||||
@c man end MUXERS
|
||||
|
138
doc/nut.texi
138
doc/nut.texi
@@ -1,138 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle NUT
|
||||
|
||||
@titlepage
|
||||
@center @titlefont{NUT}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
NUT is a low overhead generic container format. It stores audio, video,
|
||||
subtitle and user-defined streams in a simple, yet efficient, way.
|
||||
|
||||
It was created by a group of FFmpeg and MPlayer developers in 2003
|
||||
and was finalized in 2008.
|
||||
|
||||
The official nut specification is at svn://svn.mplayerhq.hu/nut
|
||||
In case of any differences between this text and the official specification,
|
||||
the official specification shall prevail.
|
||||
|
||||
@chapter Container-specific codec tags
|
||||
|
||||
@section Generic raw YUVA formats
|
||||
|
||||
Since many exotic planar YUVA pixel formats are not considered by
|
||||
the AVI/QuickTime FourCC lists, the following scheme is adopted for
|
||||
representing them.
|
||||
|
||||
The first two bytes can contain the values:
|
||||
Y1 = only Y
|
||||
Y2 = Y+A
|
||||
Y3 = YUV
|
||||
Y4 = YUVA
|
||||
|
||||
The third byte represents the width and height chroma subsampling
|
||||
values for the UV planes, that is the amount to shift the luma
|
||||
width/height right to find the chroma width/height.
|
||||
|
||||
The fourth byte is the number of bits used (8, 16, ...).
|
||||
|
||||
If the order of bytes is inverted, that means that each component has
|
||||
to be read big-endian.
|
||||
|
||||
@section Raw Audio
|
||||
|
||||
@multitable @columnfractions .4 .4
|
||||
@item ALAW @tab A-LAW
|
||||
@item ULAW @tab MU-LAW
|
||||
@item P<type><interleaving><bits> @tab little-endian PCM
|
||||
@item <bits><interleaving><type>P @tab big-endian PCM
|
||||
@end multitable
|
||||
|
||||
<type> is S for signed integer, U for unsigned integer, F for IEEE float
|
||||
<interleaving> is D for default, P is for planar.
|
||||
<bits> is 8/16/24/32
|
||||
|
||||
@example
|
||||
PFD[32] would for example be signed 32 bit little-endian IEEE float
|
||||
@end example
|
||||
|
||||
@section Subtitles
|
||||
|
||||
@multitable @columnfractions .4 .4
|
||||
@item UTF8 @tab Raw UTF-8
|
||||
@item SSA[0] @tab SubStation Alpha
|
||||
@item DVDS @tab DVD subtitles
|
||||
@item DVBS @tab DVB subtitles
|
||||
@end multitable
|
||||
|
||||
@section Raw Data
|
||||
|
||||
@multitable @columnfractions .4 .4
|
||||
@item UTF8 @tab Raw UTF-8
|
||||
@end multitable
|
||||
|
||||
@section Codecs
|
||||
|
||||
@multitable @columnfractions .4 .4
|
||||
@item 3IV1 @tab non-compliant MPEG-4 generated by old 3ivx
|
||||
@item ASV1 @tab Asus Video
|
||||
@item ASV2 @tab Asus Video 2
|
||||
@item CVID @tab Cinepak
|
||||
@item CYUV @tab Creative YUV
|
||||
@item DIVX @tab non-compliant MPEG-4 generated by old DivX
|
||||
@item DUCK @tab Truemotion 1
|
||||
@item FFV1 @tab FFmpeg video 1
|
||||
@item FFVH @tab FFmpeg Huffyuv
|
||||
@item H261 @tab ITU H.261
|
||||
@item H262 @tab ITU H.262
|
||||
@item H263 @tab ITU H.263
|
||||
@item H264 @tab ITU H.264
|
||||
@item HFYU @tab Huffyuv
|
||||
@item I263 @tab Intel H.263
|
||||
@item IV31 @tab Indeo 3.1
|
||||
@item IV32 @tab Indeo 3.2
|
||||
@item IV50 @tab Indeo 5.0
|
||||
@item LJPG @tab ITU JPEG (lossless)
|
||||
@item MJLS @tab ITU JPEG-LS
|
||||
@item MJPG @tab ITU JPEG
|
||||
@item MPG4 @tab MS MPEG-4v1 (not ISO MPEG-4)
|
||||
@item MP42 @tab MS MPEG-4v2
|
||||
@item MP43 @tab MS MPEG-4v3
|
||||
@item MP4V @tab ISO MPEG-4 Part 2 Video (from old encoders)
|
||||
@item mpg1 @tab ISO MPEG-1 Video
|
||||
@item mpg2 @tab ISO MPEG-2 Video
|
||||
@item MRLE @tab MS RLE
|
||||
@item MSVC @tab MS Video 1
|
||||
@item RT21 @tab Indeo 2.1
|
||||
@item RV10 @tab RealVideo 1.0
|
||||
@item RV20 @tab RealVideo 2.0
|
||||
@item RV30 @tab RealVideo 3.0
|
||||
@item RV40 @tab RealVideo 4.0
|
||||
@item SNOW @tab FFmpeg Snow
|
||||
@item SVQ1 @tab Sorenson Video 1
|
||||
@item SVQ3 @tab Sorenson Video 3
|
||||
@item theo @tab Xiph Theora
|
||||
@item TM20 @tab Truemotion 2.0
|
||||
@item UMP4 @tab non-compliant MPEG-4 generated by UB Video MPEG-4
|
||||
@item VCR1 @tab ATI VCR1
|
||||
@item VP30 @tab VP 3.0
|
||||
@item VP31 @tab VP 3.1
|
||||
@item VP50 @tab VP 5.0
|
||||
@item VP60 @tab VP 6.0
|
||||
@item VP61 @tab VP 6.1
|
||||
@item VP62 @tab VP 6.2
|
||||
@item VP70 @tab VP 7.0
|
||||
@item WMV1 @tab MS WMV7
|
||||
@item WMV2 @tab MS WMV8
|
||||
@item WMV3 @tab MS WMV9
|
||||
@item WV1F @tab non-compliant MPEG-4 generated by ?
|
||||
@item WVC1 @tab VC-1
|
||||
@item XVID @tab non-compliant MPEG-4 generated by old Xvid
|
||||
@item XVIX @tab non-compliant MPEG-4 generated by old Xvid with interlacing bug
|
||||
@end multitable
|
||||
|
@@ -253,7 +253,7 @@ Optimization guide for ARM11 (used in Nokia N800 Internet Tablet):
|
||||
http://infocenter.arm.com/help/topic/com.arm.doc.ddi0211j/DDI0211J_arm1136_r1p5_trm.pdf
|
||||
Optimization guide for Intel XScale (used in Sharp Zaurus PDA):
|
||||
http://download.intel.com/design/intelxscale/27347302.pdf
|
||||
Intel Wireless MMX 2 Coprocessor: Programmers Reference Manual
|
||||
Intel Wireless MMX2 Coprocessor: Programmers Reference Manual
|
||||
http://download.intel.com/design/intelxscale/31451001.pdf
|
||||
|
||||
PowerPC-specific:
|
||||
|
@@ -22,88 +22,6 @@ A description of the currently available output devices follows.
|
||||
|
||||
ALSA (Advanced Linux Sound Architecture) output device.
|
||||
|
||||
@section caca
|
||||
|
||||
CACA output device.
|
||||
|
||||
This output devices allows to show a video stream in CACA window.
|
||||
Only one CACA window is allowed per application, so you can
|
||||
have only one instance of this output device in an application.
|
||||
|
||||
To enable this output device you need to configure FFmpeg with
|
||||
@code{--enable-libcaca}.
|
||||
libcaca is a graphics library that outputs text instead of pixels.
|
||||
|
||||
For more information about libcaca, check:
|
||||
@url{http://caca.zoy.org/wiki/libcaca}
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item window_title
|
||||
Set the CACA window title, if not specified default to the filename
|
||||
specified for the output device.
|
||||
|
||||
@item window_size
|
||||
Set the CACA window size, can be a string of the form
|
||||
@var{width}x@var{height} or a video size abbreviation.
|
||||
If not specified it defaults to the size of the input video.
|
||||
|
||||
@item driver
|
||||
Set display driver.
|
||||
|
||||
@item algorithm
|
||||
Set dithering algorithm. Dithering is necessary
|
||||
because the picture being rendered has usually far more colours than
|
||||
the available palette.
|
||||
The accepted values are listed with @code{-list_dither algorithms}.
|
||||
|
||||
@item antialias
|
||||
Set antialias method. Antialiasing smoothens the rendered
|
||||
image and avoids the commonly seen staircase effect.
|
||||
The accepted values are listed with @code{-list_dither antialiases}.
|
||||
|
||||
@item charset
|
||||
Set which characters are going to be used when rendering text.
|
||||
The accepted values are listed with @code{-list_dither charsets}.
|
||||
|
||||
@item color
|
||||
Set color to be used when rendering text.
|
||||
The accepted values are listed with @code{-list_dither colors}.
|
||||
|
||||
@item list_drivers
|
||||
If set to @option{true}, print a list of available drivers and exit.
|
||||
|
||||
@item list_dither
|
||||
List available dither options related to the argument.
|
||||
The argument must be one of @code{algorithms}, @code{antialiases},
|
||||
@code{charsets}, @code{colors}.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
The following command shows the @command{ffmpeg} output is an
|
||||
CACA window, forcing its size to 80x25:
|
||||
@example
|
||||
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
|
||||
@end example
|
||||
|
||||
@item
|
||||
Show the list of available drivers and exit:
|
||||
@example
|
||||
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
|
||||
@end example
|
||||
|
||||
@item
|
||||
Show the list of available dither colors and exit:
|
||||
@example
|
||||
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section oss
|
||||
|
||||
OSS (Open Sound System) output device.
|
||||
@@ -137,8 +55,7 @@ to the same value of @var{window_title}.
|
||||
@item window_size
|
||||
Set the SDL window size, can be a string of the form
|
||||
@var{width}x@var{height} or a video size abbreviation.
|
||||
If not specified it defaults to the size of the input video,
|
||||
downscaled according to the aspect ratio.
|
||||
If not specified it defaults to the size of the input video.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
@@ -1,8 +1,8 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Platform Specific Information
|
||||
@settitle Platform Specific information
|
||||
@titlepage
|
||||
@center @titlefont{Platform Specific Information}
|
||||
@center @titlefont{Platform Specific information}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
@@ -77,15 +77,30 @@ For information about compiling FFmpeg on OS/2 see
|
||||
@chapter Windows
|
||||
|
||||
To get help and instructions for building FFmpeg under Windows, check out
|
||||
the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}.
|
||||
the FFmpeg Windows Help Forum at
|
||||
@url{http://ffmpeg.arrozcru.org/}.
|
||||
|
||||
@section Native Windows compilation using MinGW or MinGW-w64
|
||||
@section Native Windows compilation
|
||||
|
||||
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
|
||||
toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
|
||||
@url{http://www.mingw.org/} or @url{http://mingw-w64.sourceforge.net/}.
|
||||
You can find detailed installation instructions in the download section and
|
||||
the FAQ.
|
||||
FFmpeg can be built to run natively on Windows using the MinGW tools. Install
|
||||
the latest versions of MSYS and MinGW from @url{http://www.mingw.org/}.
|
||||
You can find detailed installation instructions in the download
|
||||
section and the FAQ.
|
||||
|
||||
FFmpeg does not build out-of-the-box with the packages the automated MinGW
|
||||
installer provides. It also requires coreutils to be installed and many other
|
||||
packages updated to the latest version. The minimum versions for some packages
|
||||
are listed below:
|
||||
|
||||
@itemize
|
||||
@item bash 3.1
|
||||
@item msys-make 3.81-2 (note: not mingw32-make)
|
||||
@item w32api 3.13
|
||||
@item mingw-runtime 3.15
|
||||
@end itemize
|
||||
|
||||
FFmpeg automatically passes @code{-fno-common} to the compiler to work around
|
||||
a GCC bug (see @url{http://gcc.gnu.org/bugzilla/show_bug.cgi?id=37216}).
|
||||
|
||||
Notes:
|
||||
|
||||
@@ -106,105 +121,149 @@ libavformat) as DLLs.
|
||||
|
||||
@end itemize
|
||||
|
||||
@section Microsoft Visual C++
|
||||
@section Microsoft Visual C++ compatibility
|
||||
|
||||
FFmpeg can be built with MSVC using a C99-to-C89 conversion utility and
|
||||
wrapper.
|
||||
As stated in the FAQ, FFmpeg will not compile under MSVC++. However, if you
|
||||
want to use the libav* libraries in your own applications, you can still
|
||||
compile those applications using MSVC++. But the libav* libraries you link
|
||||
to @emph{must} be built with MinGW. However, you will not be able to debug
|
||||
inside the libav* libraries, since MSVC++ does not recognize the debug
|
||||
symbols generated by GCC.
|
||||
We strongly recommend you to move over from MSVC++ to MinGW tools.
|
||||
|
||||
You will need the following prerequisites:
|
||||
This description of how to use the FFmpeg libraries with MSVC++ is based on
|
||||
Microsoft Visual C++ 2005 Express Edition. If you have a different version,
|
||||
you might have to modify the procedures slightly.
|
||||
|
||||
@itemize
|
||||
@item @uref{http://download.videolan.org/pub/contrib/c99-to-c89/, C99-to-C89 Converter & Wrapper}
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
@item @uref{http://www.mingw.org/, MSYS}
|
||||
@item @uref{http://yasm.tortall.net/, YASM}
|
||||
@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if
|
||||
you want to run @uref{fate.html, FATE}.
|
||||
@end itemize
|
||||
@subsection Using static libraries
|
||||
|
||||
To set up a proper MSVC environment in MSYS, you simply need to run
|
||||
@code{msys.bat} from the Visual Studio command prompt.
|
||||
Assuming you have just built and installed FFmpeg in @file{/usr/local}:
|
||||
|
||||
Place @code{makedef}, @code{c99wrap.exe}, @code{c99conv.exe}, and @code{yasm.exe}
|
||||
somewhere in your @code{PATH}.
|
||||
@enumerate
|
||||
|
||||
Next, make sure @code{inttypes.h} and any other headers and libs you want to use
|
||||
are located in a spot that MSVC can see. Do so by modifying the @code{LIB} and
|
||||
@code{INCLUDE} environment variables to include the @strong{Windows} paths to
|
||||
these directories. Alternatively, you can try and use the
|
||||
@code{--extra-cflags}/@code{--extra-ldflags} configure options.
|
||||
@item Create a new console application ("File / New / Project") and then
|
||||
select "Win32 Console Application". On the appropriate page of the
|
||||
Application Wizard, uncheck the "Precompiled headers" option.
|
||||
|
||||
Finally, run:
|
||||
@item Write the source code for your application, or, for testing, just
|
||||
copy the code from an existing sample application into the source file
|
||||
that MSVC++ has already created for you. For example, you can copy
|
||||
@file{libavformat/output-example.c} from the FFmpeg distribution.
|
||||
|
||||
@item Open the "Project / Properties" dialog box. In the "Configuration"
|
||||
combo box, select "All Configurations" so that the changes you make will
|
||||
affect both debug and release builds. In the tree view on the left hand
|
||||
side, select "C/C++ / General", then edit the "Additional Include
|
||||
Directories" setting to contain the path where the FFmpeg includes were
|
||||
installed (i.e. @file{c:\msys\1.0\local\include}).
|
||||
Do not add MinGW's include directory here, or the include files will
|
||||
conflict with MSVC's.
|
||||
|
||||
@item Still in the "Project / Properties" dialog box, select
|
||||
"Linker / General" from the tree view and edit the
|
||||
"Additional Library Directories" setting to contain the @file{lib}
|
||||
directory where FFmpeg was installed (i.e. @file{c:\msys\1.0\local\lib}),
|
||||
the directory where MinGW libs are installed (i.e. @file{c:\mingw\lib}),
|
||||
and the directory where MinGW's GCC libs are installed
|
||||
(i.e. @file{C:\mingw\lib\gcc\mingw32\4.2.1-sjlj}). Then select
|
||||
"Linker / Input" from the tree view, and add the files @file{libavformat.a},
|
||||
@file{libavcodec.a}, @file{libavutil.a}, @file{libmingwex.a},
|
||||
@file{libgcc.a}, and any other libraries you used (i.e. @file{libz.a})
|
||||
to the end of "Additional Dependencies".
|
||||
|
||||
@item Now, select "C/C++ / Code Generation" from the tree view. Select
|
||||
"Debug" in the "Configuration" combo box. Make sure that "Runtime
|
||||
Library" is set to "Multi-threaded Debug DLL". Then, select "Release" in
|
||||
the "Configuration" combo box and make sure that "Runtime Library" is
|
||||
set to "Multi-threaded DLL".
|
||||
|
||||
@item Click "OK" to close the "Project / Properties" dialog box.
|
||||
|
||||
@item MSVC++ lacks some C99 header files that are fundamental for FFmpeg.
|
||||
Get msinttypes from @url{http://code.google.com/p/msinttypes/downloads/list}
|
||||
and install it in MSVC++'s include directory
|
||||
(i.e. @file{C:\Program Files\Microsoft Visual Studio 8\VC\include}).
|
||||
|
||||
@item MSVC++ also does not understand the @code{inline} keyword used by
|
||||
FFmpeg, so you must add this line before @code{#include}ing libav*:
|
||||
@example
|
||||
#define inline _inline
|
||||
@end example
|
||||
|
||||
@item Build your application, everything should work.
|
||||
|
||||
@end enumerate
|
||||
|
||||
@subsection Using shared libraries
|
||||
|
||||
This is how to create DLL and LIB files that are compatible with MSVC++:
|
||||
|
||||
@enumerate
|
||||
|
||||
@item Add a call to @file{vcvars32.bat} (which sets up the environment
|
||||
variables for the Visual C++ tools) as the first line of @file{msys.bat}.
|
||||
The standard location for @file{vcvars32.bat} is
|
||||
@file{C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat},
|
||||
and the standard location for @file{msys.bat} is @file{C:\msys\1.0\msys.bat}.
|
||||
If this corresponds to your setup, add the following line as the first line
|
||||
of @file{msys.bat}:
|
||||
|
||||
@example
|
||||
./configure --toolchain=msvc
|
||||
call "C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat"
|
||||
@end example
|
||||
|
||||
Alternatively, you may start the @file{Visual Studio 2005 Command Prompt},
|
||||
and run @file{c:\msys\1.0\msys.bat} from there.
|
||||
|
||||
@item Within the MSYS shell, run @code{lib.exe}. If you get a help message
|
||||
from @file{Microsoft (R) Library Manager}, this means your environment
|
||||
variables are set up correctly, the @file{Microsoft (R) Library Manager}
|
||||
is on the path and will be used by FFmpeg to create
|
||||
MSVC++-compatible import libraries.
|
||||
|
||||
@item Build FFmpeg with
|
||||
|
||||
@example
|
||||
./configure --enable-shared
|
||||
make
|
||||
make install
|
||||
@end example
|
||||
|
||||
If you wish to compile shared libraries, add @code{--enable-shared} to your
|
||||
configure options. Note that due to the way MSVC handles DLL imports and
|
||||
exports, you cannot compile static and shared libraries at the same time, and
|
||||
enabling shared libraries will automatically disable the static ones.
|
||||
Your install path (@file{/usr/local/} by default) should now have the
|
||||
necessary DLL and LIB files under the @file{bin} directory.
|
||||
|
||||
Notes:
|
||||
|
||||
@itemize
|
||||
|
||||
@item It is possible that coreutils' @code{link.exe} conflicts with MSVC's linker.
|
||||
You can find out by running @code{which link} to see which @code{link.exe} you
|
||||
are using. If it is located at @code{/bin/link.exe}, then you have the wrong one
|
||||
in your @code{PATH}. Either move or remove that copy, or make sure MSVC's
|
||||
@code{link.exe} takes precedence in your @code{PATH} over coreutils'.
|
||||
|
||||
@item If you wish to build with zlib support, you will have to grab a compatible
|
||||
zlib binary from somewhere, with an MSVC import lib, or if you wish to link
|
||||
statically, you can follow the instructions below to build a compatible
|
||||
@code{zlib.lib} with MSVC. Regardless of which method you use, you must still
|
||||
follow step 3, or compilation will fail.
|
||||
@enumerate
|
||||
@item Grab the @uref{http://zlib.net/, zlib sources}.
|
||||
@item Edit @code{win32/Makefile.msc} so that it uses -MT instead of -MD, since
|
||||
this is how FFmpeg is built as well.
|
||||
@item Edit @code{zconf.h} and remove its inclusion of @code{unistd.h}. This gets
|
||||
erroneously included when building FFmpeg.
|
||||
@item Run @code{nmake -f win32/Makefile.msc}.
|
||||
@item Move @code{zlib.lib}, @code{zconf.h}, and @code{zlib.h} to somewhere MSVC
|
||||
can see.
|
||||
@end enumerate
|
||||
|
||||
@item FFmpeg has been tested with Visual Studio 2010 and 2012, Pro and Express.
|
||||
Anything else is not officially supported.
|
||||
Alternatively, build the libraries with a cross compiler, according to
|
||||
the instructions below in @ref{Cross compilation for Windows with Linux}.
|
||||
|
||||
@end itemize
|
||||
|
||||
@subsection Linking to FFmpeg with Microsoft Visual C++
|
||||
|
||||
If you plan to link with MSVC-built static libraries, you will need
|
||||
to make sure you have @code{Runtime Library} set to
|
||||
@code{Multi-threaded (/MT)} in your project's settings.
|
||||
To use those files with MSVC++, do the same as you would do with
|
||||
the static libraries, as described above. But in Step 4,
|
||||
you should only need to add the directory where the LIB files are installed
|
||||
(i.e. @file{c:\msys\usr\local\bin}). This is not a typo, the LIB files are
|
||||
installed in the @file{bin} directory. And instead of adding the static
|
||||
libraries (@file{libxxx.a} files) you should add the MSVC import libraries
|
||||
(@file{avcodec.lib}, @file{avformat.lib}, and
|
||||
@file{avutil.lib}). Note that you should not use the GCC import
|
||||
libraries (@file{libxxx.dll.a} files), as these will give you undefined
|
||||
reference errors. There should be no need for @file{libmingwex.a},
|
||||
@file{libgcc.a}, and @file{wsock32.lib}, nor any other external library
|
||||
statically linked into the DLLs.
|
||||
|
||||
FFmpeg headers do not declare global data for Windows DLLs through the usual
|
||||
dllexport/dllimport interface. Such data will be exported properly while
|
||||
building, but to use them in your MSVC code you will have to edit the
|
||||
building, but to use them in your MSVC++ code you will have to edit the
|
||||
appropriate headers and mark the data as dllimport. For example, in
|
||||
libavutil/pixdesc.h you should have:
|
||||
@example
|
||||
extern __declspec(dllimport) const AVPixFmtDescriptor av_pix_fmt_descriptors[];
|
||||
@end example
|
||||
|
||||
You will also need to define @code{inline} to something MSVC understands:
|
||||
@example
|
||||
#define inline __inline
|
||||
@end example
|
||||
|
||||
Also note, that as stated in @strong{Microsoft Visual C++}, you will need
|
||||
an MSVC-compatible @uref{http://code.google.com/p/msinttypes/, inttypes.h}.
|
||||
|
||||
If you plan on using import libraries created by dlltool, you must
|
||||
set @code{References} to @code{No (/OPT:NOREF)} under the linker optimization
|
||||
settings, otherwise the resulting binaries will fail during runtime.
|
||||
This is not required when using import libraries generated by @code{lib.exe}.
|
||||
Note that using import libraries created by dlltool requires
|
||||
the linker optimization option to be set to
|
||||
"References: Keep Unreferenced Data (@code{/OPT:NOREF})", otherwise
|
||||
the resulting binaries will fail during runtime. This isn't
|
||||
required when using import libraries generated by lib.exe.
|
||||
This issue is reported upstream at
|
||||
@url{http://sourceware.org/bugzilla/show_bug.cgi?id=12633}.
|
||||
|
||||
@@ -213,12 +272,12 @@ To create import libraries that work with the @code{/OPT:REF} option
|
||||
|
||||
@enumerate
|
||||
|
||||
@item Open the @emph{Visual Studio Command Prompt}.
|
||||
@item Open @emph{Visual Studio 2005 Command Prompt}.
|
||||
|
||||
Alternatively, in a normal command line prompt, call @file{vcvars32.bat}
|
||||
which sets up the environment variables for the Visual C++ tools
|
||||
(the standard location for this file is something like
|
||||
@file{C:\Program Files (x86_\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat}).
|
||||
(the standard location for this file is
|
||||
@file{C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat}).
|
||||
|
||||
@item Enter the @file{bin} directory where the created LIB and DLL files
|
||||
are stored.
|
||||
@@ -259,9 +318,24 @@ following "Devel" ones:
|
||||
binutils, gcc4-core, make, git, mingw-runtime, texi2html
|
||||
@end example
|
||||
|
||||
In order to run FATE you will also need the following "Utils" packages:
|
||||
And the following "Utils" one:
|
||||
@example
|
||||
bc, diffutils
|
||||
diffutils
|
||||
@end example
|
||||
|
||||
Then run
|
||||
|
||||
@example
|
||||
./configure
|
||||
@end example
|
||||
|
||||
to make a static build.
|
||||
|
||||
To build shared libraries add a special compiler flag to work around current
|
||||
@code{gcc4-core} package bugs in addition to the normal configure flags:
|
||||
|
||||
@example
|
||||
./configure --enable-shared --disable-static --extra-cflags=-fno-reorder-functions
|
||||
@end example
|
||||
|
||||
If you want to build FFmpeg with additional libraries, download Cygwin
|
||||
@@ -303,67 +377,4 @@ and for a build with shared libraries
|
||||
./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
|
||||
@end example
|
||||
|
||||
@chapter Plan 9
|
||||
|
||||
The native @uref{http://plan9.bell-labs.com/plan9/, Plan 9} compiler
|
||||
does not implement all the C99 features needed by FFmpeg so the gcc
|
||||
port must be used. Furthermore, a few items missing from the C
|
||||
library and shell environment need to be fixed.
|
||||
|
||||
@itemize
|
||||
|
||||
@item GNU awk, grep, make, and sed
|
||||
|
||||
Working packages of these tools can be found at
|
||||
@uref{http://code.google.com/p/ports2plan9/downloads/list, ports2plan9}.
|
||||
They can be installed with @uref{http://9front.org/, 9front's} @code{pkg}
|
||||
utility by setting @code{pkgpath} to
|
||||
@code{http://ports2plan9.googlecode.com/files/}.
|
||||
|
||||
@item Missing/broken @code{head} and @code{printf} commands
|
||||
|
||||
Replacements adequate for building FFmpeg can be found in the
|
||||
@code{compat/plan9} directory. Place these somewhere they will be
|
||||
found by the shell. These are not full implementations of the
|
||||
commands and are @emph{not} suitable for general use.
|
||||
|
||||
@item Missing C99 @code{stdint.h} and @code{inttypes.h}
|
||||
|
||||
Replacement headers are available from
|
||||
@url{http://code.google.com/p/plan9front/issues/detail?id=152}.
|
||||
|
||||
@item Missing or non-standard library functions
|
||||
|
||||
Some functions in the C library are missing or incomplete. The
|
||||
@code{@uref{http://ports2plan9.googlecode.com/files/gcc-apelibs-1207.tbz,
|
||||
gcc-apelibs-1207}} package from
|
||||
@uref{http://code.google.com/p/ports2plan9/downloads/list, ports2plan9}
|
||||
includes an updated C library, but installing the full package gives
|
||||
unusable executables. Instead, keep the files from @code{gccbin.tgz}
|
||||
under @code{/386/lib/gnu}. From the @code{libc.a} archive in the
|
||||
@code{gcc-apelibs-1207} package, extract the following object files and
|
||||
turn them into a library:
|
||||
|
||||
@itemize
|
||||
@item @code{strerror.o}
|
||||
@item @code{strtoll.o}
|
||||
@item @code{snprintf.o}
|
||||
@item @code{vsnprintf.o}
|
||||
@item @code{vfprintf.o}
|
||||
@item @code{_IO_getc.o}
|
||||
@item @code{_IO_putc.o}
|
||||
@end itemize
|
||||
|
||||
Use the @code{--extra-libs} option of @code{configure} to inform the
|
||||
build system of this library.
|
||||
|
||||
@item FPU exceptions enabled by default
|
||||
|
||||
Unlike most other systems, Plan 9 enables FPU exceptions by default.
|
||||
These must be disabled before calling any FFmpeg functions. While the
|
||||
included tools will do this automatically, other users of the
|
||||
libraries must do it themselves.
|
||||
|
||||
@end itemize
|
||||
|
||||
@bye
|
||||
|
@@ -75,15 +75,6 @@ ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
|
||||
Note that you may need to escape the character "|" which is special for
|
||||
many shells.
|
||||
|
||||
@section data
|
||||
|
||||
Data in-line in the URI. See @url{http://en.wikipedia.org/wiki/Data_URI_scheme}.
|
||||
|
||||
For example, to convert a GIF file given inline with @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
|
||||
@end example
|
||||
|
||||
@section file
|
||||
|
||||
File access protocol.
|
||||
@@ -203,7 +194,7 @@ content across a TCP/IP network.
|
||||
|
||||
The required syntax is:
|
||||
@example
|
||||
rtmp://@var{server}[:@var{port}][/@var{app}][/@var{instance}][/@var{playpath}]
|
||||
rtmp://@var{server}[:@var{port}][/@var{app}][/@var{playpath}]
|
||||
@end example
|
||||
|
||||
The accepted parameters are:
|
||||
@@ -218,88 +209,11 @@ The number of the TCP port to use (by default is 1935).
|
||||
@item app
|
||||
It is the name of the application to access. It usually corresponds to
|
||||
the path where the application is installed on the RTMP server
|
||||
(e.g. @file{/ondemand/}, @file{/flash/live/}, etc.). You can override
|
||||
the value parsed from the URI through the @code{rtmp_app} option, too.
|
||||
(e.g. @file{/ondemand/}, @file{/flash/live/}, etc.).
|
||||
|
||||
@item playpath
|
||||
It is the path or name of the resource to play with reference to the
|
||||
application specified in @var{app}, may be prefixed by "mp4:". You
|
||||
can override the value parsed from the URI through the @code{rtmp_playpath}
|
||||
option, too.
|
||||
|
||||
@item listen
|
||||
Act as a server, listening for an incoming connection.
|
||||
|
||||
@item timeout
|
||||
Maximum time to wait for the incoming connection. Implies listen.
|
||||
@end table
|
||||
|
||||
Additionally, the following parameters can be set via command line options
|
||||
(or in code via @code{AVOption}s):
|
||||
@table @option
|
||||
|
||||
@item rtmp_app
|
||||
Name of application to connect on the RTMP server. This option
|
||||
overrides the parameter specified in the URI.
|
||||
|
||||
@item rtmp_buffer
|
||||
Set the client buffer time in milliseconds. The default is 3000.
|
||||
|
||||
@item rtmp_conn
|
||||
Extra arbitrary AMF connection parameters, parsed from a string,
|
||||
e.g. like @code{B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0}.
|
||||
Each value is prefixed by a single character denoting the type,
|
||||
B for Boolean, N for number, S for string, O for object, or Z for null,
|
||||
followed by a colon. For Booleans the data must be either 0 or 1 for
|
||||
FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
|
||||
1 to end or begin an object, respectively. Data items in subobjects may
|
||||
be named, by prefixing the type with 'N' and specifying the name before
|
||||
the value (i.e. @code{NB:myFlag:1}). This option may be used multiple
|
||||
times to construct arbitrary AMF sequences.
|
||||
|
||||
@item rtmp_flashver
|
||||
Version of the Flash plugin used to run the SWF player. The default
|
||||
is LNX 9,0,124,2.
|
||||
|
||||
@item rtmp_flush_interval
|
||||
Number of packets flushed in the same request (RTMPT only). The default
|
||||
is 10.
|
||||
|
||||
@item rtmp_live
|
||||
Specify that the media is a live stream. No resuming or seeking in
|
||||
live streams is possible. The default value is @code{any}, which means the
|
||||
subscriber first tries to play the live stream specified in the
|
||||
playpath. If a live stream of that name is not found, it plays the
|
||||
recorded stream. The other possible values are @code{live} and
|
||||
@code{recorded}.
|
||||
|
||||
@item rtmp_pageurl
|
||||
URL of the web page in which the media was embedded. By default no
|
||||
value will be sent.
|
||||
|
||||
@item rtmp_playpath
|
||||
Stream identifier to play or to publish. This option overrides the
|
||||
parameter specified in the URI.
|
||||
|
||||
@item rtmp_subscribe
|
||||
Name of live stream to subscribe to. By default no value will be sent.
|
||||
It is only sent if the option is specified or if rtmp_live
|
||||
is set to live.
|
||||
|
||||
@item rtmp_swfhash
|
||||
SHA256 hash of the decompressed SWF file (32 bytes).
|
||||
|
||||
@item rtmp_swfsize
|
||||
Size of the decompressed SWF file, required for SWFVerification.
|
||||
|
||||
@item rtmp_swfurl
|
||||
URL of the SWF player for the media. By default no value will be sent.
|
||||
|
||||
@item rtmp_swfverify
|
||||
URL to player swf file, compute hash/size automatically.
|
||||
|
||||
@item rtmp_tcurl
|
||||
URL of the target stream. Defaults to proto://host[:port]/app.
|
||||
application specified in @var{app}, may be prefixed by "mp4:".
|
||||
|
||||
@end table
|
||||
|
||||
@@ -309,46 +223,6 @@ For example to read with @command{ffplay} a multimedia resource named
|
||||
ffplay rtmp://myserver/vod/sample
|
||||
@end example
|
||||
|
||||
@section rtmpe
|
||||
|
||||
Encrypted Real-Time Messaging Protocol.
|
||||
|
||||
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
|
||||
streaming multimedia content within standard cryptographic primitives,
|
||||
consisting of Diffie-Hellman key exchange and HMACSHA256, generating
|
||||
a pair of RC4 keys.
|
||||
|
||||
@section rtmps
|
||||
|
||||
Real-Time Messaging Protocol over a secure SSL connection.
|
||||
|
||||
The Real-Time Messaging Protocol (RTMPS) is used for streaming
|
||||
multimedia content across an encrypted connection.
|
||||
|
||||
@section rtmpt
|
||||
|
||||
Real-Time Messaging Protocol tunneled through HTTP.
|
||||
|
||||
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
|
||||
for streaming multimedia content within HTTP requests to traverse
|
||||
firewalls.
|
||||
|
||||
@section rtmpte
|
||||
|
||||
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
|
||||
|
||||
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
|
||||
is used for streaming multimedia content within HTTP requests to traverse
|
||||
firewalls.
|
||||
|
||||
@section rtmpts
|
||||
|
||||
Real-Time Messaging Protocol tunneled through HTTPS.
|
||||
|
||||
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
|
||||
for streaming multimedia content within HTTPS requests to traverse
|
||||
firewalls.
|
||||
|
||||
@section rtmp, rtmpe, rtmps, rtmpt, rtmpte
|
||||
|
||||
Real-Time Messaging Protocol and its variants supported through
|
||||
@@ -441,8 +315,6 @@ Flags for @code{rtsp_flags}:
|
||||
@table @option
|
||||
@item filter_src
|
||||
Accept packets only from negotiated peer address and port.
|
||||
@item listen
|
||||
Act as a server, listening for an incoming connection.
|
||||
@end table
|
||||
|
||||
When receiving data over UDP, the demuxer tries to reorder received packets
|
||||
@@ -475,12 +347,6 @@ To send a stream in realtime to a RTSP server, for others to watch:
|
||||
ffmpeg -re -i @var{input} -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
|
||||
@end example
|
||||
|
||||
To receive a stream in realtime:
|
||||
|
||||
@example
|
||||
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp @var{output}
|
||||
@end example
|
||||
|
||||
@section sap
|
||||
|
||||
Session Announcement Protocol (RFC 2974). This is not technically a
|
||||
@@ -587,11 +453,6 @@ tcp://@var{hostname}:@var{port}[?@var{options}]
|
||||
@item listen
|
||||
Listen for an incoming connection
|
||||
|
||||
@item timeout=@var{microseconds}
|
||||
In read mode: if no data arrived in more than this time interval, raise error.
|
||||
In write mode: if socket cannot be written in more than this time interval, raise error.
|
||||
This also sets timeout on TCP connection establishing.
|
||||
|
||||
@example
|
||||
ffmpeg -i @var{input} -f @var{format} tcp://@var{hostname}:@var{port}?listen
|
||||
ffplay tcp://@var{hostname}:@var{port}
|
||||
@@ -599,48 +460,6 @@ ffplay tcp://@var{hostname}:@var{port}
|
||||
|
||||
@end table
|
||||
|
||||
@section tls
|
||||
|
||||
Transport Layer Security/Secure Sockets Layer
|
||||
|
||||
The required syntax for a TLS/SSL url is:
|
||||
@example
|
||||
tls://@var{hostname}:@var{port}[?@var{options}]
|
||||
@end example
|
||||
|
||||
@table @option
|
||||
|
||||
@item listen
|
||||
Act as a server, listening for an incoming connection.
|
||||
|
||||
@item cafile=@var{filename}
|
||||
Certificate authority file. The file must be in OpenSSL PEM format.
|
||||
|
||||
@item cert=@var{filename}
|
||||
Certificate file. The file must be in OpenSSL PEM format.
|
||||
|
||||
@item key=@var{filename}
|
||||
Private key file.
|
||||
|
||||
@item verify=@var{0|1}
|
||||
Verify the peer's certificate.
|
||||
|
||||
@end table
|
||||
|
||||
Example command lines:
|
||||
|
||||
To create a TLS/SSL server that serves an input stream.
|
||||
|
||||
@example
|
||||
ffmpeg -i @var{input} -f @var{format} tls://@var{hostname}:@var{port}?listen&cert=@var{server.crt}&key=@var{server.key}
|
||||
@end example
|
||||
|
||||
To play back a stream from the TLS/SSL server using @command{ffplay}:
|
||||
|
||||
@example
|
||||
ffplay tls://@var{hostname}:@var{port}
|
||||
@end example
|
||||
|
||||
@section udp
|
||||
|
||||
User Datagram Protocol.
|
||||
@@ -650,23 +469,16 @@ The required syntax for a UDP url is:
|
||||
udp://@var{hostname}:@var{port}[?@var{options}]
|
||||
@end example
|
||||
|
||||
@var{options} contains a list of &-separated options of the form @var{key}=@var{val}.
|
||||
|
||||
In case threading is enabled on the system, a circular buffer is used
|
||||
to store the incoming data, which allows to reduce loss of data due to
|
||||
UDP socket buffer overruns. The @var{fifo_size} and
|
||||
@var{overrun_nonfatal} options are related to this buffer.
|
||||
|
||||
The list of supported options follows.
|
||||
@var{options} contains a list of &-seperated options of the form @var{key}=@var{val}.
|
||||
Follow the list of supported options.
|
||||
|
||||
@table @option
|
||||
|
||||
@item buffer_size=@var{size}
|
||||
Set the UDP socket buffer size in bytes. This is used both for the
|
||||
receiving and the sending buffer size.
|
||||
set the UDP buffer size in bytes
|
||||
|
||||
@item localport=@var{port}
|
||||
Override the local UDP port to bind with.
|
||||
override the local UDP port to bind with
|
||||
|
||||
@item localaddr=@var{addr}
|
||||
Choose the local IP address. This is useful e.g. if sending multicast
|
||||
@@ -674,13 +486,13 @@ and the host has multiple interfaces, where the user can choose
|
||||
which interface to send on by specifying the IP address of that interface.
|
||||
|
||||
@item pkt_size=@var{size}
|
||||
Set the size in bytes of UDP packets.
|
||||
set the size in bytes of UDP packets
|
||||
|
||||
@item reuse=@var{1|0}
|
||||
Explicitly allow or disallow reusing UDP sockets.
|
||||
explicitly allow or disallow reusing UDP sockets
|
||||
|
||||
@item ttl=@var{ttl}
|
||||
Set the time to live value (for multicast only).
|
||||
set the time to live value (for multicast only)
|
||||
|
||||
@item connect=@var{1|0}
|
||||
Initialize the UDP socket with @code{connect()}. In this case, the
|
||||
@@ -692,28 +504,9 @@ and makes writes return with AVERROR(ECONNREFUSED) if "destination
|
||||
unreachable" is received.
|
||||
For receiving, this gives the benefit of only receiving packets from
|
||||
the specified peer address/port.
|
||||
|
||||
@item sources=@var{address}[,@var{address}]
|
||||
Only receive packets sent to the multicast group from one of the
|
||||
specified sender IP addresses.
|
||||
|
||||
@item block=@var{address}[,@var{address}]
|
||||
Ignore packets sent to the multicast group from the specified
|
||||
sender IP addresses.
|
||||
|
||||
@item fifo_size=@var{units}
|
||||
Set the UDP receiving circular buffer size, expressed as a number of
|
||||
packets with size of 188 bytes. If not specified defaults to 7*4096.
|
||||
|
||||
@item overrun_nonfatal=@var{1|0}
|
||||
Survive in case of UDP receiving circular buffer overrun. Default
|
||||
value is 0.
|
||||
|
||||
@item timeout=@var{microseconds}
|
||||
In read mode: if no data arrived in more than this time interval, raise error.
|
||||
@end table
|
||||
|
||||
Some usage examples of the UDP protocol with @command{ffmpeg} follow.
|
||||
Some usage examples of the udp protocol with @command{ffmpeg} follow.
|
||||
|
||||
To stream over UDP to a remote endpoint:
|
||||
@example
|
||||
|
@@ -23,7 +23,7 @@ Let's consider the problem of minimizing:
|
||||
|
||||
rate is the filesize
|
||||
distortion is the quality
|
||||
lambda is a fixed value chosen as a tradeoff between quality and filesize
|
||||
lambda is a fixed value choosen as a tradeoff between quality and filesize
|
||||
Is this equivalent to finding the best quality for a given max
|
||||
filesize? The answer is yes. For each filesize limit there is some lambda
|
||||
factor for which minimizing above will get you the best quality (using your
|
||||
|
@@ -31,7 +31,7 @@ Special Converter v
|
||||
v
|
||||
Output
|
||||
|
||||
Planar/Packed conversion is done when needed during sample format conversion.
|
||||
Planar/Packed convertion is done when needed during sample format convertion
|
||||
Every step can be skipped without memcpy when its not needed.
|
||||
Either Resampling and Rematrixing can be performed first depending on which
|
||||
way its faster.
|
||||
@@ -39,8 +39,8 @@ The Buffers are needed for resampling due to resamplng being a process that
|
||||
requires future and past data, it thus also introduces inevitably a delay when
|
||||
used.
|
||||
Internally 32bit float and 16bit int is supported currently, other formats can
|
||||
easily be added.
|
||||
easily be added
|
||||
Externally all sample formats in packed and planar configuration are supported
|
||||
It's also trivial to add special converters for common cases.
|
||||
If only sample format and/or packed/planar conversion is needed, it
|
||||
Its also trivial to add special converters for common cases
|
||||
If only sample format and or packed/planar convertion is needed it
|
||||
is performed from input to output directly in a single pass with no intermediates.
|
||||
|
@@ -58,7 +58,7 @@ Input to YUV Converter
|
||||
|
||||
Horizontal scaler
|
||||
There are several horizontal scalers. A special case worth mentioning is
|
||||
the fast bilinear scaler that is made of runtime-generated MMXEXT code
|
||||
the fast bilinear scaler that is made of runtime-generated MMX2 code
|
||||
using specially tuned pshufw instructions.
|
||||
The remaining scalers are specially-tuned for various filter lengths.
|
||||
They scale 8-bit unsigned planar data to 16-bit signed planar data.
|
||||
|
@@ -1,81 +1,9 @@
|
||||
@chapter Syntax
|
||||
@c man begin SYNTAX
|
||||
|
||||
This section documents the syntax and formats employed by the FFmpeg
|
||||
libraries and tools.
|
||||
|
||||
@anchor{quoting_and_escaping}
|
||||
@section Quoting and escaping
|
||||
|
||||
FFmpeg adopts the following quoting and escaping mechanism, unless
|
||||
explicitly specified. The following rules are applied:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@code{'} and @code{\} are special characters (respectively used for
|
||||
quoting and escaping). In addition to them, there might be other
|
||||
special characters depending on the specific syntax where the escaping
|
||||
and quoting are employed.
|
||||
|
||||
@item
|
||||
A special character is escaped by prefixing it with a '\'.
|
||||
|
||||
@item
|
||||
All characters enclosed between '' are included literally in the
|
||||
parsed string. The quote character @code{'} itself cannot be quoted,
|
||||
so you may need to close the quote and escape it.
|
||||
|
||||
@item
|
||||
Leading and trailing whitespaces, unless escaped or quoted, are
|
||||
removed from the parsed string.
|
||||
@end itemize
|
||||
|
||||
Note that you may need to add a second level of escaping when using
|
||||
the command line or a script, which depends on the syntax of the
|
||||
adopted shell language.
|
||||
|
||||
The function @code{av_get_token} defined in
|
||||
@file{libavutil/avstring.h} can be used to parse a token quoted or
|
||||
escaped according to the rules defined above.
|
||||
|
||||
The tool @file{tools/ffescape} in the FFmpeg source tree can be used
|
||||
to automatically quote or escape a string in a script.
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Escape the string @code{Crime d'Amour} containing the @code{'} special
|
||||
character:
|
||||
@example
|
||||
Crime d\'Amour
|
||||
@end example
|
||||
|
||||
@item
|
||||
The string above contains a quote, so the @code{'} needs to be escaped
|
||||
when quoting it:
|
||||
@example
|
||||
'Crime d'\''Amour'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Include leading or trailing whitespaces using quoting:
|
||||
@example
|
||||
' this string starts and ends with whitespaces '
|
||||
@end example
|
||||
|
||||
@item
|
||||
Escaping and quoting can be mixed together:
|
||||
@example
|
||||
' The string '\'string\'' is a string '
|
||||
@end example
|
||||
|
||||
@item
|
||||
To include a literal @code{\} you can use either escaping or quoting:
|
||||
@example
|
||||
'c:\foo' can be written as c:\\foo
|
||||
@end example
|
||||
@end itemize
|
||||
When evaluating specific formats, FFmpeg uses internal library parsing
|
||||
functions, shared by the tools. This section documents the syntax of
|
||||
some of these formats.
|
||||
|
||||
@anchor{date syntax}
|
||||
@section Date
|
||||
@@ -216,7 +144,7 @@ The undefined value can be expressed using the "0:0" string.
|
||||
@section Color
|
||||
|
||||
It can be the name of a color (case insensitive match) or a
|
||||
[0x|#]RRGGBB[AA] sequence, possibly followed by "@@" and a string
|
||||
[0x|#]RRGGBB[AA] sequence, possibly followed by "@" and a string
|
||||
representing the alpha component.
|
||||
|
||||
The alpha component may be a string composed by "0x" followed by an
|
||||
|
94
doc/t2h.init
94
doc/t2h.init
@@ -6,18 +6,73 @@ sub FFmpeg_end_section($$)
|
||||
|
||||
$EXTRA_HEAD =
|
||||
'<link rel="icon" href="favicon.png" type="image/png" />
|
||||
<link rel="stylesheet" type="text/css" href="default.css" />
|
||||
';
|
||||
|
||||
$CSS_LINES = $ENV{"FFMPEG_CSS"} || <<EOT;
|
||||
<link rel="stylesheet" type="text/css" href="default.css" />
|
||||
$CSS_LINES = <<EOT;
|
||||
<style type="text/css">
|
||||
<!--
|
||||
a.summary-letter { text-decoration: none }
|
||||
a { color: #2D6198; }
|
||||
a:visited { color: #884488; }
|
||||
h1 a, h2 a, h3 a { text-decoration: inherit; color: inherit; }
|
||||
p { margin-left: 1em; margin-right: 1em; }
|
||||
table { margin-left: 2em; }
|
||||
pre { margin-left: 2em; }
|
||||
#footer { text-align: center; }
|
||||
#body { margin-left: 1em; margin-right: 1em; }
|
||||
body { background-color: #313131; margin: 0; }
|
||||
|
||||
#container {
|
||||
background-color: white;
|
||||
color: #202020;
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
h1 {
|
||||
background-color: #7BB37B;
|
||||
border: 1px solid #6A996A;
|
||||
color: #151515;
|
||||
font-size: 1.2em;
|
||||
padding-bottom: 0.2em;
|
||||
padding-left: 0.4em;
|
||||
padding-top: 0.2em;
|
||||
}
|
||||
|
||||
h2 {
|
||||
color: #313131;
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
h3 {
|
||||
color: #313131;
|
||||
font-size: 0.8em;
|
||||
margin-bottom: -8px;
|
||||
}
|
||||
|
||||
.note {
|
||||
margin: 1em;
|
||||
border: 1px solid #bbc9d8;
|
||||
background-color: #dde1e1;
|
||||
}
|
||||
|
||||
.important {
|
||||
margin: 1em;
|
||||
border: 1px solid #d26767;
|
||||
background-color: #f8e1e1;
|
||||
}
|
||||
|
||||
-->
|
||||
</style>
|
||||
EOT
|
||||
|
||||
my $TEMPLATE_HEADER = $ENV{"FFMPEG_HEADER"} || <<EOT;
|
||||
<link rel="icon" href="favicon.png" type="image/png" />
|
||||
</head>
|
||||
<body>
|
||||
<div id="container">
|
||||
EOT
|
||||
my $FFMPEG_NAVBAR = $ENV{"FFMPEG_NAVBAR"} || '';
|
||||
|
||||
$AFTER_BODY_OPEN =
|
||||
'<div id="container">' .
|
||||
"\n$FFMPEG_NAVBAR\n" .
|
||||
'<div id="body">';
|
||||
|
||||
$PRE_BODY_CLOSE = '</div></div>';
|
||||
|
||||
@@ -28,11 +83,9 @@ $print_page_foot = \&FFmpeg_print_page_foot;
|
||||
sub FFmpeg_print_page_foot($$)
|
||||
{
|
||||
my $fh = shift;
|
||||
my $program_string = defined &T2H_DEFAULT_program_string ?
|
||||
T2H_DEFAULT_program_string() : program_string();
|
||||
print $fh '<footer class="footer pagination-right">' . "\n";
|
||||
print $fh '<span class="label label-info">' . $program_string;
|
||||
print $fh "</span></footer></div>\n";
|
||||
print $fh '<div id="footer">' . "\n";
|
||||
T2H_DEFAULT_print_page_foot($fh);
|
||||
print $fh "</div>\n";
|
||||
}
|
||||
|
||||
$float = \&FFmpeg_float;
|
||||
@@ -54,11 +107,11 @@ sub FFmpeg_float($$$$)
|
||||
|
||||
if ($caption =~ /NOTE/)
|
||||
{
|
||||
$class = "alert alert-info";
|
||||
$class = "note";
|
||||
}
|
||||
elsif ($caption =~ /IMPORTANT/)
|
||||
{
|
||||
$class = "alert alert-warning";
|
||||
$class = "important";
|
||||
}
|
||||
|
||||
return '<div class="float ' . $class . '">' . "$label\n" . $text . '</div>';
|
||||
@@ -81,7 +134,7 @@ sub FFmpeg_print_page_head($$)
|
||||
$longtitle = "FFmpeg documentation : " . $longtitle;
|
||||
|
||||
print $fh <<EOT;
|
||||
<!DOCTYPE html>
|
||||
$DOCTYPE
|
||||
<html>
|
||||
$Texi2HTML::THISDOC{'copying'}<!-- Created on $Texi2HTML::THISDOC{today} by $Texi2HTML::THISDOC{program} -->
|
||||
<!--
|
||||
@@ -97,13 +150,14 @@ $description
|
||||
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
|
||||
$encoding
|
||||
$CSS_LINES
|
||||
$TEMPLATE_HEADER
|
||||
$EXTRA_HEAD
|
||||
</head>
|
||||
|
||||
<body $BODYTEXT>
|
||||
$AFTER_BODY_OPEN
|
||||
EOT
|
||||
}
|
||||
|
||||
# declare encoding in header
|
||||
$IN_ENCODING = $ENCODING = "utf-8";
|
||||
|
||||
# no navigation elements
|
||||
$SECTION_NAVIGATION = 0;
|
||||
# the same for texi2html 5.0
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#! /usr/bin/perl
|
||||
#! /usr/bin/perl -w
|
||||
|
||||
# Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
|
||||
|
||||
@@ -23,13 +23,11 @@
|
||||
# markup to Perl POD format. It's intended to be used to extract
|
||||
# something suitable for a manpage from a Texinfo document.
|
||||
|
||||
use warnings;
|
||||
|
||||
$output = 0;
|
||||
$skipping = 0;
|
||||
%chapters = ();
|
||||
@chapters_sequence = ();
|
||||
$chapter = "";
|
||||
%sects = ();
|
||||
@sects_sequence = ();
|
||||
$section = "";
|
||||
@icstack = ();
|
||||
@endwstack = ();
|
||||
@skstack = ();
|
||||
@@ -116,24 +114,18 @@ INF: while(<$inf>) {
|
||||
die "cannot open $1: $!\n";
|
||||
};
|
||||
|
||||
/^\@chapter\s+([A-Za-z ]+)/ and do {
|
||||
# close old chapter
|
||||
$chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name);
|
||||
|
||||
# start new chapter
|
||||
$chapter_name = $1, push (@chapters_sequence, $chapter_name);
|
||||
$chapters{$chapter_name} = "" unless exists $chapters{$chapter_name};
|
||||
$chapter = "";
|
||||
$output = 1;
|
||||
# Look for blocks surrounded by @c man begin SECTION ... @c man end.
|
||||
# This really oughta be @ifman ... @end ifman and the like, but such
|
||||
# would require rev'ing all other Texinfo translators.
|
||||
/^\@c\s+man\s+begin\s+([A-Za-z ]+)/ and $sect = $1, push (@sects_sequence, $sect), $output = 1, next;
|
||||
/^\@c\s+man\s+end/ and do {
|
||||
$sects{$sect} = "" unless exists $sects{$sect};
|
||||
$sects{$sect} .= postprocess($section);
|
||||
$section = "";
|
||||
$output = 0;
|
||||
next;
|
||||
};
|
||||
|
||||
/^\@bye/ and do {
|
||||
# close old chapter
|
||||
$chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name);
|
||||
last INF;
|
||||
};
|
||||
|
||||
# handle variables
|
||||
/^\@set\s+([a-zA-Z0-9_-]+)\s*(.*)$/ and do {
|
||||
$defs{$1} = $2;
|
||||
@@ -156,14 +148,14 @@ INF: while(<$inf>) {
|
||||
# Ignore @end foo, where foo is not an operation which may
|
||||
# cause us to skip, if we are presently skipping.
|
||||
my $ended = $1;
|
||||
next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/;
|
||||
next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex)$/;
|
||||
|
||||
die "\@end $ended without \@$ended at line $.\n" unless defined $endw;
|
||||
die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw;
|
||||
|
||||
$endw = pop @endwstack;
|
||||
|
||||
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/) {
|
||||
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) {
|
||||
$skipping = pop @skstack;
|
||||
next;
|
||||
} elsif ($ended =~ /^(?:example|smallexample|display)$/) {
|
||||
@@ -196,11 +188,11 @@ INF: while(<$inf>) {
|
||||
next;
|
||||
};
|
||||
|
||||
/^\@(ignore|menu|iftex|ifhtml|ifnothtml)\b/ and do {
|
||||
/^\@(ignore|menu|iftex)\b/ and do {
|
||||
push @endwstack, $endw;
|
||||
push @skstack, $skipping;
|
||||
$endw = $1;
|
||||
$skipping = $endw !~ /ifnothtml/;
|
||||
$skipping = 1;
|
||||
next;
|
||||
};
|
||||
|
||||
@@ -217,6 +209,7 @@ INF: while(<$inf>) {
|
||||
s/\@TeX\{\}/TeX/g;
|
||||
s/\@pounds\{\}/\#/g;
|
||||
s/\@minus(?:\{\})?/-/g;
|
||||
s/\\,/,/g;
|
||||
|
||||
# Now the ones that have to be replaced by special escapes
|
||||
# (which will be turned back into text by unmunge())
|
||||
@@ -274,7 +267,7 @@ INF: while(<$inf>) {
|
||||
push @icstack, $ic;
|
||||
$endw = $1;
|
||||
$ic = $2;
|
||||
$ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env|command)/B/;
|
||||
$ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env)/B/;
|
||||
$ic =~ s/\@(?:code|kbd)/C/;
|
||||
$ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
|
||||
$ic =~ s/\@(?:file)/F/;
|
||||
@@ -299,7 +292,7 @@ INF: while(<$inf>) {
|
||||
}
|
||||
};
|
||||
|
||||
$chapter .= $shift.$_."\n";
|
||||
$section .= $shift.$_."\n";
|
||||
}
|
||||
# End of current file.
|
||||
close($inf);
|
||||
@@ -308,15 +301,16 @@ $inf = pop @instack;
|
||||
|
||||
die "No filename or title\n" unless defined $fn && defined $tl;
|
||||
|
||||
$chapters{NAME} = "$fn \- $tl\n";
|
||||
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
|
||||
$sects{NAME} = "$fn \- $tl\n";
|
||||
$sects{FOOTNOTES} .= "=back\n" if exists $sects{FOOTNOTES};
|
||||
|
||||
unshift @chapters_sequence, "NAME";
|
||||
for $chapter (@chapters_sequence) {
|
||||
if (exists $chapters{$chapter}) {
|
||||
$head = uc($chapter);
|
||||
unshift @sects_sequence, "NAME";
|
||||
for $sect (@sects_sequence) {
|
||||
if(exists $sects{$sect}) {
|
||||
$head = $sect;
|
||||
$head =~ s/SEEALSO/SEE ALSO/;
|
||||
print "=head1 $head\n\n";
|
||||
print scalar unmunge ($chapters{$chapter});
|
||||
print scalar unmunge ($sects{$sect});
|
||||
print "\n";
|
||||
}
|
||||
}
|
||||
@@ -370,7 +364,7 @@ sub postprocess
|
||||
# @uref can take one, two, or three arguments, with different
|
||||
# semantics each time. @url and @email are just like @uref with
|
||||
# one argument, for our purposes.
|
||||
s/\@(?:uref|url|email)\{([^\},]*),?[^\}]*\}/<B<$1>>/g;
|
||||
s/\@(?:uref|url|email)\{([^\},]*)\}/<B<$1>>/g;
|
||||
s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g;
|
||||
s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g;
|
||||
|
||||
@@ -414,13 +408,13 @@ sub unmunge
|
||||
|
||||
sub add_footnote
|
||||
{
|
||||
unless (exists $chapters{FOOTNOTES}) {
|
||||
$chapters{FOOTNOTES} = "\n=over 4\n\n";
|
||||
unless (exists $sects{FOOTNOTES}) {
|
||||
$sects{FOOTNOTES} = "\n=over 4\n\n";
|
||||
}
|
||||
|
||||
$chapters{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
|
||||
$chapters{FOOTNOTES} .= $_[0];
|
||||
$chapters{FOOTNOTES} .= "\n\n";
|
||||
$sects{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
|
||||
$sects{FOOTNOTES} .= $_[0];
|
||||
$sects{FOOTNOTES} .= "\n\n";
|
||||
}
|
||||
|
||||
# stolen from Symbol.pm
|
||||
|
@@ -85,8 +85,8 @@ here are some edges we could choose from:
|
||||
/ \
|
||||
O-----2--4--O
|
||||
|
||||
Finding the new best paths and scores for each point of our new column is
|
||||
trivial given we know the previous column best paths and scores:
|
||||
Finding the new best pathes and scores for each point of our new column is
|
||||
trivial given we know the previous column best pathes and scores:
|
||||
|
||||
O-----0-----8
|
||||
\
|
||||
|
420
ffmpeg.h
420
ffmpeg.h
@@ -1,420 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFMPEG_H
|
||||
#define FFMPEG_H
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <signal.h>
|
||||
|
||||
#if HAVE_PTHREADS
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
#include "cmdutils.h"
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/avfiltergraph.h"
|
||||
|
||||
#include "libavutil/avutil.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/fifo.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "libavutil/rational.h"
|
||||
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
#define VSYNC_AUTO -1
|
||||
#define VSYNC_PASSTHROUGH 0
|
||||
#define VSYNC_CFR 1
|
||||
#define VSYNC_VFR 2
|
||||
#define VSYNC_DROP 0xff
|
||||
|
||||
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
|
||||
|
||||
/* select an input stream for an output stream */
|
||||
typedef struct StreamMap {
|
||||
int disabled; /* 1 is this mapping is disabled by a negative map */
|
||||
int file_index;
|
||||
int stream_index;
|
||||
int sync_file_index;
|
||||
int sync_stream_index;
|
||||
char *linklabel; /* name of an output link, for mapping lavfi outputs */
|
||||
} StreamMap;
|
||||
|
||||
typedef struct {
|
||||
int file_idx, stream_idx, channel_idx; // input
|
||||
int ofile_idx, ostream_idx; // output
|
||||
} AudioChannelMap;
|
||||
|
||||
typedef struct OptionsContext {
|
||||
OptionGroup *g;
|
||||
|
||||
/* input/output options */
|
||||
int64_t start_time;
|
||||
const char *format;
|
||||
|
||||
SpecifierOpt *codec_names;
|
||||
int nb_codec_names;
|
||||
SpecifierOpt *audio_channels;
|
||||
int nb_audio_channels;
|
||||
SpecifierOpt *audio_sample_rate;
|
||||
int nb_audio_sample_rate;
|
||||
SpecifierOpt *frame_rates;
|
||||
int nb_frame_rates;
|
||||
SpecifierOpt *frame_sizes;
|
||||
int nb_frame_sizes;
|
||||
SpecifierOpt *frame_pix_fmts;
|
||||
int nb_frame_pix_fmts;
|
||||
|
||||
/* input options */
|
||||
int64_t input_ts_offset;
|
||||
int rate_emu;
|
||||
|
||||
SpecifierOpt *ts_scale;
|
||||
int nb_ts_scale;
|
||||
SpecifierOpt *dump_attachment;
|
||||
int nb_dump_attachment;
|
||||
|
||||
/* output options */
|
||||
StreamMap *stream_maps;
|
||||
int nb_stream_maps;
|
||||
AudioChannelMap *audio_channel_maps; /* one info entry per -map_channel */
|
||||
int nb_audio_channel_maps; /* number of (valid) -map_channel settings */
|
||||
int metadata_global_manual;
|
||||
int metadata_streams_manual;
|
||||
int metadata_chapters_manual;
|
||||
const char **attachments;
|
||||
int nb_attachments;
|
||||
|
||||
int chapters_input_file;
|
||||
|
||||
int64_t recording_time;
|
||||
uint64_t limit_filesize;
|
||||
float mux_preload;
|
||||
float mux_max_delay;
|
||||
int shortest;
|
||||
|
||||
int video_disable;
|
||||
int audio_disable;
|
||||
int subtitle_disable;
|
||||
int data_disable;
|
||||
|
||||
/* indexed by output file stream index */
|
||||
int *streamid_map;
|
||||
int nb_streamid_map;
|
||||
|
||||
SpecifierOpt *metadata;
|
||||
int nb_metadata;
|
||||
SpecifierOpt *max_frames;
|
||||
int nb_max_frames;
|
||||
SpecifierOpt *bitstream_filters;
|
||||
int nb_bitstream_filters;
|
||||
SpecifierOpt *codec_tags;
|
||||
int nb_codec_tags;
|
||||
SpecifierOpt *sample_fmts;
|
||||
int nb_sample_fmts;
|
||||
SpecifierOpt *qscale;
|
||||
int nb_qscale;
|
||||
SpecifierOpt *forced_key_frames;
|
||||
int nb_forced_key_frames;
|
||||
SpecifierOpt *force_fps;
|
||||
int nb_force_fps;
|
||||
SpecifierOpt *frame_aspect_ratios;
|
||||
int nb_frame_aspect_ratios;
|
||||
SpecifierOpt *rc_overrides;
|
||||
int nb_rc_overrides;
|
||||
SpecifierOpt *intra_matrices;
|
||||
int nb_intra_matrices;
|
||||
SpecifierOpt *inter_matrices;
|
||||
int nb_inter_matrices;
|
||||
SpecifierOpt *top_field_first;
|
||||
int nb_top_field_first;
|
||||
SpecifierOpt *metadata_map;
|
||||
int nb_metadata_map;
|
||||
SpecifierOpt *presets;
|
||||
int nb_presets;
|
||||
SpecifierOpt *copy_initial_nonkeyframes;
|
||||
int nb_copy_initial_nonkeyframes;
|
||||
SpecifierOpt *copy_prior_start;
|
||||
int nb_copy_prior_start;
|
||||
SpecifierOpt *filters;
|
||||
int nb_filters;
|
||||
SpecifierOpt *reinit_filters;
|
||||
int nb_reinit_filters;
|
||||
SpecifierOpt *fix_sub_duration;
|
||||
int nb_fix_sub_duration;
|
||||
SpecifierOpt *pass;
|
||||
int nb_pass;
|
||||
SpecifierOpt *passlogfiles;
|
||||
int nb_passlogfiles;
|
||||
} OptionsContext;
|
||||
|
||||
typedef struct InputFilter {
|
||||
AVFilterContext *filter;
|
||||
struct InputStream *ist;
|
||||
struct FilterGraph *graph;
|
||||
uint8_t *name;
|
||||
} InputFilter;
|
||||
|
||||
typedef struct OutputFilter {
|
||||
AVFilterContext *filter;
|
||||
struct OutputStream *ost;
|
||||
struct FilterGraph *graph;
|
||||
uint8_t *name;
|
||||
|
||||
/* temporary storage until stream maps are processed */
|
||||
AVFilterInOut *out_tmp;
|
||||
} OutputFilter;
|
||||
|
||||
typedef struct FilterGraph {
|
||||
int index;
|
||||
const char *graph_desc;
|
||||
|
||||
AVFilterGraph *graph;
|
||||
|
||||
InputFilter **inputs;
|
||||
int nb_inputs;
|
||||
OutputFilter **outputs;
|
||||
int nb_outputs;
|
||||
} FilterGraph;
|
||||
|
||||
typedef struct InputStream {
|
||||
int file_index;
|
||||
AVStream *st;
|
||||
int discard; /* true if stream data should be discarded */
|
||||
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
|
||||
AVCodec *dec;
|
||||
AVFrame *decoded_frame;
|
||||
|
||||
int64_t start; /* time when read started */
|
||||
/* predicted dts of the next packet read for this stream or (when there are
|
||||
* several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
|
||||
int64_t next_dts;
|
||||
int64_t dts; ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
|
||||
|
||||
int64_t next_pts; ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
|
||||
int64_t pts; ///< current pts of the decoded frame (in AV_TIME_BASE units)
|
||||
int wrap_correction_done;
|
||||
|
||||
int64_t filter_in_rescale_delta_last;
|
||||
|
||||
double ts_scale;
|
||||
int is_start; /* is 1 at the start and after a discontinuity */
|
||||
int saw_first_ts;
|
||||
int showed_multi_packet_warning;
|
||||
AVDictionary *opts;
|
||||
AVRational framerate; /* framerate forced with -r */
|
||||
int top_field_first;
|
||||
|
||||
int resample_height;
|
||||
int resample_width;
|
||||
int resample_pix_fmt;
|
||||
|
||||
int resample_sample_fmt;
|
||||
int resample_sample_rate;
|
||||
int resample_channels;
|
||||
uint64_t resample_channel_layout;
|
||||
|
||||
int fix_sub_duration;
|
||||
struct { /* previous decoded subtitle and related variables */
|
||||
int got_output;
|
||||
int ret;
|
||||
AVSubtitle subtitle;
|
||||
} prev_sub;
|
||||
|
||||
struct sub2video {
|
||||
int64_t last_pts;
|
||||
int64_t end_pts;
|
||||
AVFilterBufferRef *ref;
|
||||
int w, h;
|
||||
} sub2video;
|
||||
|
||||
/* a pool of free buffers for decoded data */
|
||||
FrameBuffer *buffer_pool;
|
||||
int dr1;
|
||||
|
||||
/* decoded data from this stream goes into all those filters
|
||||
* currently video and audio only */
|
||||
InputFilter **filters;
|
||||
int nb_filters;
|
||||
|
||||
int reinit_filters;
|
||||
} InputStream;
|
||||
|
||||
typedef struct InputFile {
|
||||
AVFormatContext *ctx;
|
||||
int eof_reached; /* true if eof reached */
|
||||
int eagain; /* true if last read attempt returned EAGAIN */
|
||||
int ist_index; /* index of first stream in input_streams */
|
||||
int64_t ts_offset;
|
||||
int nb_streams; /* number of stream that ffmpeg is aware of; may be different
|
||||
from ctx.nb_streams if new streams appear during av_read_frame() */
|
||||
int nb_streams_warn; /* number of streams that the user was warned of */
|
||||
int rate_emu;
|
||||
|
||||
#if HAVE_PTHREADS
|
||||
pthread_t thread; /* thread reading from this file */
|
||||
int finished; /* the thread has exited */
|
||||
int joined; /* the thread has been joined */
|
||||
pthread_mutex_t fifo_lock; /* lock for access to fifo */
|
||||
pthread_cond_t fifo_cond; /* the main thread will signal on this cond after reading from fifo */
|
||||
AVFifoBuffer *fifo; /* demuxed packets are stored here; freed by the main thread */
|
||||
#endif
|
||||
} InputFile;
|
||||
|
||||
typedef struct OutputStream {
|
||||
int file_index; /* file index */
|
||||
int index; /* stream index in the output file */
|
||||
int source_index; /* InputStream index */
|
||||
AVStream *st; /* stream in the output file */
|
||||
int encoding_needed; /* true if encoding needed for this stream */
|
||||
int frame_number;
|
||||
/* input pts and corresponding output pts
|
||||
for A/V sync */
|
||||
struct InputStream *sync_ist; /* input stream to sync against */
|
||||
int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
|
||||
/* pts of the first frame encoded for this stream, used for limiting
|
||||
* recording time */
|
||||
int64_t first_pts;
|
||||
AVBitStreamFilterContext *bitstream_filters;
|
||||
AVCodec *enc;
|
||||
int64_t max_frames;
|
||||
AVFrame *filtered_frame;
|
||||
|
||||
/* video only */
|
||||
AVRational frame_rate;
|
||||
int force_fps;
|
||||
int top_field_first;
|
||||
|
||||
float frame_aspect_ratio;
|
||||
|
||||
/* forced key frames */
|
||||
int64_t *forced_kf_pts;
|
||||
int forced_kf_count;
|
||||
int forced_kf_index;
|
||||
char *forced_keyframes;
|
||||
|
||||
/* audio only */
|
||||
int audio_channels_map[SWR_CH_MAX]; /* list of the channels id to pick from the source stream */
|
||||
int audio_channels_mapped; /* number of channels in audio_channels_map */
|
||||
|
||||
char *logfile_prefix;
|
||||
FILE *logfile;
|
||||
|
||||
OutputFilter *filter;
|
||||
char *avfilter;
|
||||
|
||||
int64_t sws_flags;
|
||||
int64_t swr_filter_type;
|
||||
int64_t swr_dither_method;
|
||||
double swr_dither_scale;
|
||||
AVDictionary *opts;
|
||||
int finished; /* no more packets should be written for this stream */
|
||||
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
|
||||
int stream_copy;
|
||||
const char *attachment_filename;
|
||||
int copy_initial_nonkeyframes;
|
||||
int copy_prior_start;
|
||||
|
||||
int keep_pix_fmt;
|
||||
} OutputStream;
|
||||
|
||||
typedef struct OutputFile {
|
||||
AVFormatContext *ctx;
|
||||
AVDictionary *opts;
|
||||
int ost_index; /* index of the first stream in output_streams */
|
||||
int64_t recording_time; ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
|
||||
int64_t start_time; ///< start time in microseconds == AV_TIME_BASE units
|
||||
uint64_t limit_filesize; /* filesize limit expressed in bytes */
|
||||
|
||||
int shortest;
|
||||
} OutputFile;
|
||||
|
||||
extern InputStream **input_streams;
|
||||
extern int nb_input_streams;
|
||||
extern InputFile **input_files;
|
||||
extern int nb_input_files;
|
||||
|
||||
extern OutputStream **output_streams;
|
||||
extern int nb_output_streams;
|
||||
extern OutputFile **output_files;
|
||||
extern int nb_output_files;
|
||||
|
||||
extern FilterGraph **filtergraphs;
|
||||
extern int nb_filtergraphs;
|
||||
|
||||
extern char *vstats_filename;
|
||||
|
||||
extern float audio_drift_threshold;
|
||||
extern float dts_delta_threshold;
|
||||
extern float dts_error_threshold;
|
||||
|
||||
extern int audio_volume;
|
||||
extern int audio_sync_method;
|
||||
extern int video_sync_method;
|
||||
extern int do_benchmark;
|
||||
extern int do_benchmark_all;
|
||||
extern int do_deinterlace;
|
||||
extern int do_hex_dump;
|
||||
extern int do_pkt_dump;
|
||||
extern int copy_ts;
|
||||
extern int copy_tb;
|
||||
extern int debug_ts;
|
||||
extern int exit_on_error;
|
||||
extern int print_stats;
|
||||
extern int qp_hist;
|
||||
extern int stdin_interaction;
|
||||
extern int frame_bits_per_raw_sample;
|
||||
extern AVIOContext *progress_avio;
|
||||
|
||||
extern const AVIOInterruptCB int_cb;
|
||||
|
||||
extern const OptionDef options[];
|
||||
|
||||
void term_init(void);
|
||||
void term_exit(void);
|
||||
|
||||
void reset_options(OptionsContext *o, int is_input);
|
||||
void show_usage(void);
|
||||
|
||||
void opt_output_file(void *optctx, const char *filename);
|
||||
|
||||
void assert_avoptions(AVDictionary *m);
|
||||
|
||||
int guess_input_channel_layout(InputStream *ist);
|
||||
|
||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target);
|
||||
void choose_sample_fmt(AVStream *st, AVCodec *codec);
|
||||
|
||||
int configure_filtergraph(FilterGraph *fg);
|
||||
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
|
||||
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
|
||||
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
|
||||
|
||||
int ffmpeg_parse_options(int argc, char **argv);
|
||||
|
||||
#endif /* FFMPEG_H */
|
790
ffmpeg_filter.c
790
ffmpeg_filter.c
@@ -1,790 +0,0 @@
|
||||
/*
|
||||
* ffmpeg filter configuration
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/avfiltergraph.h"
|
||||
#include "libavfilter/buffersink.h"
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target)
|
||||
{
|
||||
if (codec && codec->pix_fmts) {
|
||||
const enum AVPixelFormat *p = codec->pix_fmts;
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
|
||||
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
|
||||
enum AVPixelFormat best= AV_PIX_FMT_NONE;
|
||||
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
}
|
||||
}
|
||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||
best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
|
||||
if (*p == target)
|
||||
break;
|
||||
}
|
||||
if (*p == AV_PIX_FMT_NONE) {
|
||||
if (target != AV_PIX_FMT_NONE)
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
|
||||
av_get_pix_fmt_name(target),
|
||||
codec->name,
|
||||
av_get_pix_fmt_name(best));
|
||||
return best;
|
||||
}
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
void choose_sample_fmt(AVStream *st, AVCodec *codec)
|
||||
{
|
||||
if (codec && codec->sample_fmts) {
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
for (; *p != -1; p++) {
|
||||
if (*p == st->codec->sample_fmt)
|
||||
break;
|
||||
}
|
||||
if (*p == -1) {
|
||||
if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
|
||||
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
|
||||
if(av_get_sample_fmt_name(st->codec->sample_fmt))
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
|
||||
av_get_sample_fmt_name(st->codec->sample_fmt),
|
||||
codec->name,
|
||||
av_get_sample_fmt_name(codec->sample_fmts[0]));
|
||||
st->codec->sample_fmt = codec->sample_fmts[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static char *choose_pix_fmts(OutputStream *ost)
|
||||
{
|
||||
if (ost->keep_pix_fmt) {
|
||||
if (ost->filter)
|
||||
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
|
||||
AVFILTER_AUTO_CONVERT_NONE);
|
||||
if (ost->st->codec->pix_fmt == AV_PIX_FMT_NONE)
|
||||
return NULL;
|
||||
return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
|
||||
}
|
||||
if (ost->st->codec->pix_fmt != AV_PIX_FMT_NONE) {
|
||||
return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt)));
|
||||
} else if (ost->enc && ost->enc->pix_fmts) {
|
||||
const enum AVPixelFormat *p;
|
||||
AVIOContext *s = NULL;
|
||||
uint8_t *ret;
|
||||
int len;
|
||||
|
||||
if (avio_open_dyn_buf(&s) < 0)
|
||||
exit(1);
|
||||
|
||||
p = ost->enc->pix_fmts;
|
||||
if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
|
||||
if (ost->st->codec->codec_id == AV_CODEC_ID_MJPEG) {
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
|
||||
} else if (ost->st->codec->codec_id == AV_CODEC_ID_LJPEG) {
|
||||
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
|
||||
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
}
|
||||
}
|
||||
|
||||
for (; *p != AV_PIX_FMT_NONE; p++) {
|
||||
const char *name = av_get_pix_fmt_name(*p);
|
||||
avio_printf(s, "%s:", name);
|
||||
}
|
||||
len = avio_close_dyn_buf(s, &ret);
|
||||
ret[len - 1] = 0;
|
||||
return ret;
|
||||
} else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Define a function for building a string containing a list of
|
||||
* allowed formats. */
|
||||
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\
|
||||
static char *choose_ ## var ## s(OutputStream *ost) \
|
||||
{ \
|
||||
if (ost->st->codec->var != none) { \
|
||||
get_name(ost->st->codec->var); \
|
||||
return av_strdup(name); \
|
||||
} else if (ost->enc->supported_list) { \
|
||||
const type *p; \
|
||||
AVIOContext *s = NULL; \
|
||||
uint8_t *ret; \
|
||||
int len; \
|
||||
\
|
||||
if (avio_open_dyn_buf(&s) < 0) \
|
||||
exit(1); \
|
||||
\
|
||||
for (p = ost->enc->supported_list; *p != none; p++) { \
|
||||
get_name(*p); \
|
||||
avio_printf(s, "%s" separator, name); \
|
||||
} \
|
||||
len = avio_close_dyn_buf(s, &ret); \
|
||||
ret[len - 1] = 0; \
|
||||
return ret; \
|
||||
} else \
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
// DEF_CHOOSE_FORMAT(enum AVPixelFormat, pix_fmt, pix_fmts, AV_PIX_FMT_NONE,
|
||||
// GET_PIX_FMT_NAME, ":")
|
||||
|
||||
DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
|
||||
AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
|
||||
|
||||
DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
|
||||
GET_SAMPLE_RATE_NAME, ",")
|
||||
|
||||
DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
|
||||
GET_CH_LAYOUT_NAME, ",")
|
||||
|
||||
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
|
||||
{
|
||||
FilterGraph *fg = av_mallocz(sizeof(*fg));
|
||||
|
||||
if (!fg)
|
||||
exit(1);
|
||||
fg->index = nb_filtergraphs;
|
||||
|
||||
GROW_ARRAY(fg->outputs, fg->nb_outputs);
|
||||
if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
|
||||
exit(1);
|
||||
fg->outputs[0]->ost = ost;
|
||||
fg->outputs[0]->graph = fg;
|
||||
|
||||
ost->filter = fg->outputs[0];
|
||||
|
||||
GROW_ARRAY(fg->inputs, fg->nb_inputs);
|
||||
if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
|
||||
exit(1);
|
||||
fg->inputs[0]->ist = ist;
|
||||
fg->inputs[0]->graph = fg;
|
||||
|
||||
GROW_ARRAY(ist->filters, ist->nb_filters);
|
||||
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
|
||||
|
||||
GROW_ARRAY(filtergraphs, nb_filtergraphs);
|
||||
filtergraphs[nb_filtergraphs - 1] = fg;
|
||||
|
||||
return fg;
|
||||
}
|
||||
|
||||
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
||||
{
|
||||
InputStream *ist = NULL;
|
||||
enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
|
||||
int i;
|
||||
|
||||
// TODO: support other filter types
|
||||
if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
|
||||
"currently.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (in->name) {
|
||||
AVFormatContext *s;
|
||||
AVStream *st = NULL;
|
||||
char *p;
|
||||
int file_idx = strtol(in->name, &p, 0);
|
||||
|
||||
if (file_idx < 0 || file_idx >= nb_input_files) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
|
||||
file_idx, fg->graph_desc);
|
||||
exit(1);
|
||||
}
|
||||
s = input_files[file_idx]->ctx;
|
||||
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
enum AVMediaType stream_type = s->streams[i]->codec->codec_type;
|
||||
if (stream_type != type &&
|
||||
!(stream_type == AVMEDIA_TYPE_SUBTITLE &&
|
||||
type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
|
||||
continue;
|
||||
if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
|
||||
st = s->streams[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!st) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
|
||||
"matches no streams.\n", p, fg->graph_desc);
|
||||
exit(1);
|
||||
}
|
||||
ist = input_streams[input_files[file_idx]->ist_index + st->index];
|
||||
} else {
|
||||
/* find the first unused stream of corresponding type */
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
if (ist->st->codec->codec_type == type && ist->discard)
|
||||
break;
|
||||
}
|
||||
if (i == nb_input_streams) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
|
||||
"unlabeled input pad %d on filter %s\n", in->pad_idx,
|
||||
in->filter_ctx->name);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
av_assert0(ist);
|
||||
|
||||
ist->discard = 0;
|
||||
ist->decoding_needed++;
|
||||
ist->st->discard = AVDISCARD_NONE;
|
||||
|
||||
GROW_ARRAY(fg->inputs, fg->nb_inputs);
|
||||
if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
|
||||
exit(1);
|
||||
fg->inputs[fg->nb_inputs - 1]->ist = ist;
|
||||
fg->inputs[fg->nb_inputs - 1]->graph = fg;
|
||||
|
||||
GROW_ARRAY(ist->filters, ist->nb_filters);
|
||||
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
|
||||
}
|
||||
|
||||
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||
{
|
||||
char *pix_fmts;
|
||||
OutputStream *ost = ofilter->ost;
|
||||
AVCodecContext *codec = ost->st->codec;
|
||||
AVFilterContext *last_filter = out->filter_ctx;
|
||||
int pad_idx = out->pad_idx;
|
||||
int ret;
|
||||
char name[255];
|
||||
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
|
||||
|
||||
snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&ofilter->filter,
|
||||
avfilter_get_by_name("ffbuffersink"),
|
||||
name, NULL, NULL, fg->graph);
|
||||
av_freep(&buffersink_params);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (codec->width || codec->height) {
|
||||
char args[255];
|
||||
AVFilterContext *filter;
|
||||
|
||||
snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
|
||||
codec->width,
|
||||
codec->height,
|
||||
(unsigned)ost->sws_flags);
|
||||
snprintf(name, sizeof(name), "scaler for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
|
||||
name, args, NULL, fg->graph)) < 0)
|
||||
return ret;
|
||||
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
last_filter = filter;
|
||||
pad_idx = 0;
|
||||
}
|
||||
|
||||
if ((pix_fmts = choose_pix_fmts(ost))) {
|
||||
AVFilterContext *filter;
|
||||
snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
if ((ret = avfilter_graph_create_filter(&filter,
|
||||
avfilter_get_by_name("format"),
|
||||
"format", pix_fmts, NULL,
|
||||
fg->graph)) < 0)
|
||||
return ret;
|
||||
if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
last_filter = filter;
|
||||
pad_idx = 0;
|
||||
av_freep(&pix_fmts);
|
||||
}
|
||||
|
||||
if (ost->frame_rate.num && 0) {
|
||||
AVFilterContext *fps;
|
||||
char args[255];
|
||||
|
||||
snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
|
||||
ost->frame_rate.den);
|
||||
snprintf(name, sizeof(name), "fps for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
|
||||
name, args, NULL, fg->graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avfilter_link(last_filter, pad_idx, fps, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
last_filter = fps;
|
||||
pad_idx = 0;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||
{
|
||||
OutputStream *ost = ofilter->ost;
|
||||
AVCodecContext *codec = ost->st->codec;
|
||||
AVFilterContext *last_filter = out->filter_ctx;
|
||||
int pad_idx = out->pad_idx;
|
||||
char *sample_fmts, *sample_rates, *channel_layouts;
|
||||
char name[255];
|
||||
int ret;
|
||||
|
||||
|
||||
snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&ofilter->filter,
|
||||
avfilter_get_by_name("ffabuffersink"),
|
||||
name, NULL, NULL, fg->graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
|
||||
"similarly to -af " filter_name "=%s.\n", arg); \
|
||||
\
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(filter_name), \
|
||||
filter_name, arg, NULL, fg->graph); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
\
|
||||
ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
\
|
||||
last_filter = filt_ctx; \
|
||||
pad_idx = 0; \
|
||||
} while (0)
|
||||
if (ost->audio_channels_mapped) {
|
||||
int i;
|
||||
AVBPrint pan_buf;
|
||||
av_bprint_init(&pan_buf, 256, 8192);
|
||||
av_bprintf(&pan_buf, "0x%"PRIx64,
|
||||
av_get_default_channel_layout(ost->audio_channels_mapped));
|
||||
for (i = 0; i < ost->audio_channels_mapped; i++)
|
||||
if (ost->audio_channels_map[i] != -1)
|
||||
av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
|
||||
|
||||
AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
|
||||
av_bprint_finalize(&pan_buf, NULL);
|
||||
}
|
||||
|
||||
if (codec->channels && !codec->channel_layout)
|
||||
codec->channel_layout = av_get_default_channel_layout(codec->channels);
|
||||
|
||||
sample_fmts = choose_sample_fmts(ost);
|
||||
sample_rates = choose_sample_rates(ost);
|
||||
channel_layouts = choose_channel_layouts(ost);
|
||||
if (sample_fmts || sample_rates || channel_layouts) {
|
||||
AVFilterContext *format;
|
||||
char args[256];
|
||||
args[0] = 0;
|
||||
|
||||
if (sample_fmts)
|
||||
av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
|
||||
sample_fmts);
|
||||
if (sample_rates)
|
||||
av_strlcatf(args, sizeof(args), "sample_rates=%s:",
|
||||
sample_rates);
|
||||
if (channel_layouts)
|
||||
av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
|
||||
channel_layouts);
|
||||
|
||||
av_freep(&sample_fmts);
|
||||
av_freep(&sample_rates);
|
||||
av_freep(&channel_layouts);
|
||||
|
||||
snprintf(name, sizeof(name), "audio format for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&format,
|
||||
avfilter_get_by_name("aformat"),
|
||||
name, args, NULL, fg->graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avfilter_link(last_filter, pad_idx, format, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
last_filter = format;
|
||||
pad_idx = 0;
|
||||
}
|
||||
|
||||
if (audio_volume != 256 && 0) {
|
||||
char args[256];
|
||||
|
||||
snprintf(args, sizeof(args), "%f", audio_volume / 256.);
|
||||
AUTO_INSERT_FILTER("-vol", "volume", args);
|
||||
}
|
||||
|
||||
if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DESCRIBE_FILTER_LINK(f, inout, in) \
|
||||
{ \
|
||||
AVFilterContext *ctx = inout->filter_ctx; \
|
||||
AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads; \
|
||||
int nb_pads = in ? ctx->input_count : ctx->output_count; \
|
||||
AVIOContext *pb; \
|
||||
\
|
||||
if (avio_open_dyn_buf(&pb) < 0) \
|
||||
exit(1); \
|
||||
\
|
||||
avio_printf(pb, "%s", ctx->filter->name); \
|
||||
if (nb_pads > 1) \
|
||||
avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));\
|
||||
avio_w8(pb, 0); \
|
||||
avio_close_dyn_buf(pb, &f->name); \
|
||||
}
|
||||
|
||||
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||
{
|
||||
av_freep(&ofilter->name);
|
||||
DESCRIBE_FILTER_LINK(ofilter, out, 0);
|
||||
|
||||
switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
|
||||
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
|
||||
default: av_assert0(0);
|
||||
}
|
||||
}
|
||||
|
||||
static int sub2video_prepare(InputStream *ist)
|
||||
{
|
||||
AVFormatContext *avf = input_files[ist->file_index]->ctx;
|
||||
int i, ret, w, h;
|
||||
uint8_t *image[4];
|
||||
int linesize[4];
|
||||
|
||||
/* Compute the size of the canvas for the subtitles stream.
|
||||
If the subtitles codec has set a size, use it. Otherwise use the
|
||||
maximum dimensions of the video streams in the same file. */
|
||||
w = ist->st->codec->width;
|
||||
h = ist->st->codec->height;
|
||||
if (!(w && h)) {
|
||||
for (i = 0; i < avf->nb_streams; i++) {
|
||||
if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
w = FFMAX(w, avf->streams[i]->codec->width);
|
||||
h = FFMAX(h, avf->streams[i]->codec->height);
|
||||
}
|
||||
}
|
||||
if (!(w && h)) {
|
||||
w = FFMAX(w, 720);
|
||||
h = FFMAX(h, 576);
|
||||
}
|
||||
av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
|
||||
}
|
||||
ist->sub2video.w = ist->st->codec->width = ist->resample_width = w;
|
||||
ist->sub2video.h = ist->st->codec->height = ist->resample_height = h;
|
||||
|
||||
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
|
||||
palettes for all rectangles are identical or compatible */
|
||||
ist->st->codec->pix_fmt = AV_PIX_FMT_RGB32;
|
||||
|
||||
ret = av_image_alloc(image, linesize, w, h, AV_PIX_FMT_RGB32, 32);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
memset(image[0], 0, h * linesize[0]);
|
||||
ist->sub2video.ref = avfilter_get_video_buffer_ref_from_arrays(
|
||||
image, linesize, AV_PERM_READ | AV_PERM_PRESERVE,
|
||||
w, h, AV_PIX_FMT_RGB32);
|
||||
if (!ist->sub2video.ref) {
|
||||
av_free(image[0]);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVFilterInOut *in)
|
||||
{
|
||||
AVFilterContext *first_filter = in->filter_ctx;
|
||||
AVFilter *filter = avfilter_get_by_name("buffer");
|
||||
InputStream *ist = ifilter->ist;
|
||||
AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
|
||||
ist->st->time_base;
|
||||
AVRational fr = ist->framerate.num ? ist->framerate :
|
||||
ist->st->r_frame_rate;
|
||||
AVRational sar;
|
||||
AVBPrint args;
|
||||
char name[255];
|
||||
int pad_idx = in->pad_idx;
|
||||
int ret;
|
||||
|
||||
if (!ist->framerate.num && ist->st->codec->ticks_per_frame>1) {
|
||||
AVRational codec_fr = av_inv_q(ist->st->codec->time_base);
|
||||
AVRational avg_fr = ist->st->avg_frame_rate;
|
||||
codec_fr.den *= ist->st->codec->ticks_per_frame;
|
||||
if ( codec_fr.num>0 && codec_fr.den>0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
|
||||
&& fabs(1.0 - av_q2d(av_div_q(avg_fr, fr)))>0.1)
|
||||
fr = codec_fr;
|
||||
}
|
||||
|
||||
if (ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
ret = sub2video_prepare(ist);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
sar = ist->st->sample_aspect_ratio.num ?
|
||||
ist->st->sample_aspect_ratio :
|
||||
ist->st->codec->sample_aspect_ratio;
|
||||
if(!sar.den)
|
||||
sar = (AVRational){0,1};
|
||||
av_bprint_init(&args, 0, 1);
|
||||
av_bprintf(&args,
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
|
||||
"pixel_aspect=%d/%d:sws_param=flags=%d", ist->resample_width,
|
||||
ist->resample_height, ist->resample_pix_fmt,
|
||||
tb.num, tb.den, sar.num, sar.den,
|
||||
SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
|
||||
if (fr.num && fr.den)
|
||||
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||
ist->file_index, ist->st->index);
|
||||
|
||||
if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, name,
|
||||
args.str, NULL, fg->graph)) < 0)
|
||||
return ret;
|
||||
|
||||
if (ist->framerate.num) {
|
||||
AVFilterContext *setpts;
|
||||
|
||||
snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
|
||||
ist->file_index, ist->st->index);
|
||||
if ((ret = avfilter_graph_create_filter(&setpts,
|
||||
avfilter_get_by_name("setpts"),
|
||||
name, "N", NULL,
|
||||
fg->graph)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_link(setpts, 0, first_filter, pad_idx)) < 0)
|
||||
return ret;
|
||||
|
||||
first_filter = setpts;
|
||||
pad_idx = 0;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVFilterInOut *in)
|
||||
{
|
||||
AVFilterContext *first_filter = in->filter_ctx;
|
||||
AVFilter *filter = avfilter_get_by_name("abuffer");
|
||||
InputStream *ist = ifilter->ist;
|
||||
int pad_idx = in->pad_idx;
|
||||
char args[255], name[255];
|
||||
int ret;
|
||||
|
||||
snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
|
||||
":channel_layout=0x%"PRIx64,
|
||||
1, ist->st->codec->sample_rate,
|
||||
ist->st->codec->sample_rate,
|
||||
av_get_sample_fmt_name(ist->st->codec->sample_fmt),
|
||||
ist->st->codec->channel_layout);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||
ist->file_index, ist->st->index);
|
||||
|
||||
if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter,
|
||||
name, args, NULL,
|
||||
fg->graph)) < 0)
|
||||
return ret;
|
||||
|
||||
#define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
|
||||
"similarly to -af " filter_name "=%s.\n", arg); \
|
||||
\
|
||||
snprintf(name, sizeof(name), "graph %d %s for input stream %d:%d", \
|
||||
fg->index, filter_name, ist->file_index, ist->st->index); \
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(filter_name), \
|
||||
name, arg, NULL, fg->graph); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
\
|
||||
ret = avfilter_link(filt_ctx, 0, first_filter, pad_idx); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
\
|
||||
first_filter = filt_ctx; \
|
||||
} while (0)
|
||||
|
||||
if (audio_sync_method > 0) {
|
||||
char args[256] = {0};
|
||||
|
||||
av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
|
||||
if (audio_drift_threshold != 0.1)
|
||||
av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
|
||||
AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
|
||||
}
|
||||
|
||||
// if (ost->audio_channels_mapped) {
|
||||
// int i;
|
||||
// AVBPrint pan_buf;
|
||||
// av_bprint_init(&pan_buf, 256, 8192);
|
||||
// av_bprintf(&pan_buf, "0x%"PRIx64,
|
||||
// av_get_default_channel_layout(ost->audio_channels_mapped));
|
||||
// for (i = 0; i < ost->audio_channels_mapped; i++)
|
||||
// if (ost->audio_channels_map[i] != -1)
|
||||
// av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
|
||||
// AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
|
||||
// av_bprint_finalize(&pan_buf, NULL);
|
||||
// }
|
||||
|
||||
if (audio_volume != 256) {
|
||||
char args[256];
|
||||
|
||||
av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
|
||||
"audio filter instead.\n");
|
||||
|
||||
snprintf(args, sizeof(args), "%f", audio_volume / 256.);
|
||||
AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
|
||||
}
|
||||
if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVFilterInOut *in)
|
||||
{
|
||||
av_freep(&ifilter->name);
|
||||
DESCRIBE_FILTER_LINK(ifilter, in, 1);
|
||||
|
||||
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
||||
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
||||
default: av_assert0(0);
|
||||
}
|
||||
}
|
||||
|
||||
int configure_filtergraph(FilterGraph *fg)
|
||||
{
|
||||
AVFilterInOut *inputs, *outputs, *cur;
|
||||
int ret, i, init = !fg->graph, simple = !fg->graph_desc;
|
||||
const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
|
||||
fg->graph_desc;
|
||||
|
||||
avfilter_graph_free(&fg->graph);
|
||||
if (!(fg->graph = avfilter_graph_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (simple) {
|
||||
OutputStream *ost = fg->outputs[0]->ost;
|
||||
char args[255];
|
||||
snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
|
||||
fg->graph->scale_sws_opts = av_strdup(args);
|
||||
|
||||
args[0] = 0;
|
||||
if (ost->swr_filter_type != SWR_FILTER_TYPE_KAISER)
|
||||
av_strlcatf(args, sizeof(args), "filter_type=%d:", (int)ost->swr_filter_type);
|
||||
if (ost->swr_dither_method)
|
||||
av_strlcatf(args, sizeof(args), "dither_method=%d:", (int)ost->swr_dither_method);
|
||||
if (ost->swr_dither_scale != 1.0)
|
||||
av_strlcatf(args, sizeof(args), "dither_scale=%f:", ost->swr_dither_scale);
|
||||
if (strlen(args))
|
||||
args[strlen(args)-1] = 0;
|
||||
av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
|
||||
return ret;
|
||||
|
||||
if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' does not have "
|
||||
"exactly one input and output.\n", graph_desc);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for (cur = inputs; !simple && init && cur; cur = cur->next)
|
||||
init_input_filter(fg, cur);
|
||||
|
||||
for (cur = inputs, i = 0; cur; cur = cur->next, i++)
|
||||
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
|
||||
return ret;
|
||||
avfilter_inout_free(&inputs);
|
||||
|
||||
if (!init || simple) {
|
||||
/* we already know the mappings between lavfi outputs and output streams,
|
||||
* so we can finish the setup */
|
||||
for (cur = outputs, i = 0; cur; cur = cur->next, i++)
|
||||
configure_output_filter(fg, fg->outputs[i], cur);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
/* wait until output mappings are processed */
|
||||
for (cur = outputs; cur;) {
|
||||
GROW_ARRAY(fg->outputs, fg->nb_outputs);
|
||||
if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
|
||||
exit(1);
|
||||
fg->outputs[fg->nb_outputs - 1]->graph = fg;
|
||||
fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
|
||||
cur = cur->next;
|
||||
fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < fg->nb_inputs; i++)
|
||||
if (fg->inputs[i]->ist == ist)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
2630
ffmpeg_opt.c
2630
ffmpeg_opt.c
File diff suppressed because it is too large
Load Diff
259
ffserver.c
259
ffserver.c
@@ -29,7 +29,6 @@
|
||||
#endif
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include "libavformat/avformat.h"
|
||||
// FIXME those are internal headers, ffserver _really_ shouldn't use them
|
||||
#include "libavformat/ffm.h"
|
||||
@@ -41,17 +40,13 @@
|
||||
#include "libavformat/internal.h"
|
||||
#include "libavformat/url.h"
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/lfg.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/random_seed.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
@@ -60,6 +55,7 @@
|
||||
#include <poll.h>
|
||||
#endif
|
||||
#include <errno.h>
|
||||
#include <sys/time.h>
|
||||
#include <time.h>
|
||||
#include <sys/wait.h>
|
||||
#include <signal.h>
|
||||
@@ -307,10 +303,12 @@ static int rtp_new_av_stream(HTTPContext *c,
|
||||
HTTPContext *rtsp_c);
|
||||
|
||||
static const char *my_program_name;
|
||||
static const char *my_program_dir;
|
||||
|
||||
static const char *config_filename = "/etc/ffserver.conf";
|
||||
|
||||
static int ffserver_debug;
|
||||
static int ffserver_daemon;
|
||||
static int no_launch;
|
||||
static int need_to_start_children;
|
||||
|
||||
@@ -328,40 +326,12 @@ static AVLFG random_state;
|
||||
|
||||
static FILE *logfile = NULL;
|
||||
|
||||
static int64_t ffm_read_write_index(int fd)
|
||||
{
|
||||
uint8_t buf[8];
|
||||
|
||||
if (lseek(fd, 8, SEEK_SET) < 0)
|
||||
return AVERROR(EIO);
|
||||
if (read(fd, buf, 8) != 8)
|
||||
return AVERROR(EIO);
|
||||
return AV_RB64(buf);
|
||||
}
|
||||
|
||||
static int ffm_write_write_index(int fd, int64_t pos)
|
||||
{
|
||||
uint8_t buf[8];
|
||||
int i;
|
||||
|
||||
for(i=0;i<8;i++)
|
||||
buf[i] = (pos >> (56 - i * 8)) & 0xff;
|
||||
if (lseek(fd, 8, SEEK_SET) < 0)
|
||||
return AVERROR(EIO);
|
||||
if (write(fd, buf, 8) != 8)
|
||||
return AVERROR(EIO);
|
||||
return 8;
|
||||
}
|
||||
|
||||
static void ffm_set_write_index(AVFormatContext *s, int64_t pos,
|
||||
int64_t file_size)
|
||||
{
|
||||
FFMContext *ffm = s->priv_data;
|
||||
ffm->write_index = pos;
|
||||
ffm->file_size = file_size;
|
||||
}
|
||||
|
||||
/* FIXME: make ffserver work with IPv6 */
|
||||
void av_noreturn exit_program(int ret)
|
||||
{
|
||||
exit(ret);
|
||||
}
|
||||
|
||||
/* resolve host with also IP address parsing */
|
||||
static int resolve_host(struct in_addr *sin_addr, const char *hostname)
|
||||
{
|
||||
@@ -521,12 +491,19 @@ static void start_children(FFStream *feed)
|
||||
close(i);
|
||||
|
||||
if (!ffserver_debug) {
|
||||
if (!freopen("/dev/null", "r", stdin))
|
||||
http_log("failed to redirect STDIN to /dev/null\n;");
|
||||
if (!freopen("/dev/null", "w", stdout))
|
||||
http_log("failed to redirect STDOUT to /dev/null\n;");
|
||||
if (!freopen("/dev/null", "w", stderr))
|
||||
http_log("failed to redirect STDERR to /dev/null\n;");
|
||||
i = open("/dev/null", O_RDWR);
|
||||
if (i != -1) {
|
||||
dup2(i, 0);
|
||||
dup2(i, 1);
|
||||
dup2(i, 2);
|
||||
close(i);
|
||||
}
|
||||
}
|
||||
|
||||
/* This is needed to make relative pathnames work */
|
||||
if (chdir(my_program_dir) < 0) {
|
||||
http_log("chdir failed\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
signal(SIGPIPE, SIG_DFL);
|
||||
@@ -578,17 +555,15 @@ static void start_multicast(void)
|
||||
FFStream *stream;
|
||||
char session_id[32];
|
||||
HTTPContext *rtp_c;
|
||||
struct sockaddr_in dest_addr = {0};
|
||||
struct sockaddr_in dest_addr;
|
||||
int default_port, stream_index;
|
||||
|
||||
default_port = 6000;
|
||||
for(stream = first_stream; stream != NULL; stream = stream->next) {
|
||||
if (stream->is_multicast) {
|
||||
unsigned random0 = av_lfg_get(&random_state);
|
||||
unsigned random1 = av_lfg_get(&random_state);
|
||||
/* open the RTP connection */
|
||||
snprintf(session_id, sizeof(session_id), "%08x%08x",
|
||||
random0, random1);
|
||||
av_lfg_get(&random_state), av_lfg_get(&random_state));
|
||||
|
||||
/* choose a port if none given */
|
||||
if (stream->multicast_port == 0) {
|
||||
@@ -786,17 +761,16 @@ static void start_wait_request(HTTPContext *c, int is_rtsp)
|
||||
|
||||
static void http_send_too_busy_reply(int fd)
|
||||
{
|
||||
char buffer[400];
|
||||
char buffer[300];
|
||||
int len = snprintf(buffer, sizeof(buffer),
|
||||
"HTTP/1.0 503 Server too busy\r\n"
|
||||
"Content-type: text/html\r\n"
|
||||
"\r\n"
|
||||
"<html><head><title>Too busy</title></head><body>\r\n"
|
||||
"<p>The server is too busy to serve your request at this time.</p>\r\n"
|
||||
"<p>The number of current connections is %u, and this exceeds the limit of %u.</p>\r\n"
|
||||
"<p>The number of current connections is %d, and this exceeds the limit of %d.</p>\r\n"
|
||||
"</body></html>\r\n",
|
||||
nb_connections, nb_max_connections);
|
||||
av_assert0(len < sizeof(buffer));
|
||||
send(fd, buffer, len, 0);
|
||||
}
|
||||
|
||||
@@ -804,8 +778,7 @@ static void http_send_too_busy_reply(int fd)
|
||||
static void new_connection(int server_fd, int is_rtsp)
|
||||
{
|
||||
struct sockaddr_in from_addr;
|
||||
socklen_t len;
|
||||
int fd;
|
||||
int fd, len;
|
||||
HTTPContext *c = NULL;
|
||||
|
||||
len = sizeof(from_addr);
|
||||
@@ -921,8 +894,6 @@ static void close_connection(HTTPContext *c)
|
||||
|
||||
for(i=0; i<ctx->nb_streams; i++)
|
||||
av_free(ctx->streams[i]);
|
||||
av_freep(&ctx->streams);
|
||||
av_freep(&ctx->priv_data);
|
||||
|
||||
if (c->stream && !c->post && c->stream->stream_type == STREAM_TYPE_LIVE)
|
||||
current_bandwidth -= c->stream->bandwidth;
|
||||
@@ -1490,8 +1461,7 @@ enum RedirType {
|
||||
/* parse http request and prepare header */
|
||||
static int http_parse_request(HTTPContext *c)
|
||||
{
|
||||
const char *p;
|
||||
char *p1;
|
||||
char *p;
|
||||
enum RedirType redir_type;
|
||||
char cmd[32];
|
||||
char info[1024], filename[1024];
|
||||
@@ -1502,10 +1472,10 @@ static int http_parse_request(HTTPContext *c)
|
||||
FFStream *stream;
|
||||
int i;
|
||||
char ratebuf[32];
|
||||
const char *useragent = 0;
|
||||
char *useragent = 0;
|
||||
|
||||
p = c->buffer;
|
||||
get_word(cmd, sizeof(cmd), &p);
|
||||
get_word(cmd, sizeof(cmd), (const char **)&p);
|
||||
av_strlcpy(c->method, cmd, sizeof(c->method));
|
||||
|
||||
if (!strcmp(cmd, "GET"))
|
||||
@@ -1515,7 +1485,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
else
|
||||
return -1;
|
||||
|
||||
get_word(url, sizeof(url), &p);
|
||||
get_word(url, sizeof(url), (const char **)&p);
|
||||
av_strlcpy(c->url, url, sizeof(c->url));
|
||||
|
||||
get_word(protocol, sizeof(protocol), (const char **)&p);
|
||||
@@ -1528,10 +1498,10 @@ static int http_parse_request(HTTPContext *c)
|
||||
http_log("%s - - New connection: %s %s\n", inet_ntoa(c->from_addr.sin_addr), cmd, url);
|
||||
|
||||
/* find the filename and the optional info string in the request */
|
||||
p1 = strchr(url, '?');
|
||||
if (p1) {
|
||||
av_strlcpy(info, p1, sizeof(info));
|
||||
*p1 = '\0';
|
||||
p = strchr(url, '?');
|
||||
if (p) {
|
||||
av_strlcpy(info, p, sizeof(info));
|
||||
*p = '\0';
|
||||
} else
|
||||
info[0] = '\0';
|
||||
|
||||
@@ -1593,7 +1563,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
if (stream->stream_type == STREAM_TYPE_REDIRECT) {
|
||||
c->http_error = 301;
|
||||
q = c->buffer;
|
||||
snprintf(q, c->buffer_size,
|
||||
q += snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 301 Moved\r\n"
|
||||
"Location: %s\r\n"
|
||||
"Content-type: text/html\r\n"
|
||||
@@ -1601,7 +1571,6 @@ static int http_parse_request(HTTPContext *c)
|
||||
"<html><head><title>Moved</title></head><body>\r\n"
|
||||
"You should be <a href=\"%s\">redirected</a>.\r\n"
|
||||
"</body></html>\r\n", stream->feed_filename, stream->feed_filename);
|
||||
q += strlen(q);
|
||||
/* prepare output buffer */
|
||||
c->buffer_ptr = c->buffer;
|
||||
c->buffer_end = q;
|
||||
@@ -1632,7 +1601,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
if (c->post == 0 && max_bandwidth < current_bandwidth) {
|
||||
c->http_error = 503;
|
||||
q = c->buffer;
|
||||
snprintf(q, c->buffer_size,
|
||||
q += snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 503 Server too busy\r\n"
|
||||
"Content-type: text/html\r\n"
|
||||
"\r\n"
|
||||
@@ -1641,7 +1610,6 @@ static int http_parse_request(HTTPContext *c)
|
||||
"<p>The bandwidth being served (including your stream) is %"PRIu64"kbit/sec, "
|
||||
"and this exceeds the limit of %"PRIu64"kbit/sec.</p>\r\n"
|
||||
"</body></html>\r\n", current_bandwidth, max_bandwidth);
|
||||
q += strlen(q);
|
||||
/* prepare output buffer */
|
||||
c->buffer_ptr = c->buffer;
|
||||
c->buffer_end = q;
|
||||
@@ -1650,7 +1618,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
}
|
||||
|
||||
if (redir_type != REDIR_NONE) {
|
||||
const char *hostinfo = 0;
|
||||
char *hostinfo = 0;
|
||||
|
||||
for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
|
||||
if (av_strncasecmp(p, "Host:", 5) == 0) {
|
||||
@@ -1684,7 +1652,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
q = c->buffer;
|
||||
switch(redir_type) {
|
||||
case REDIR_ASX:
|
||||
snprintf(q, c->buffer_size,
|
||||
q += snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 200 ASX Follows\r\n"
|
||||
"Content-type: video/x-ms-asf\r\n"
|
||||
"\r\n"
|
||||
@@ -1692,25 +1660,22 @@ static int http_parse_request(HTTPContext *c)
|
||||
//"<!-- Autogenerated by ffserver -->\r\n"
|
||||
"<ENTRY><REF HREF=\"http://%s/%s%s\"/></ENTRY>\r\n"
|
||||
"</ASX>\r\n", hostbuf, filename, info);
|
||||
q += strlen(q);
|
||||
break;
|
||||
case REDIR_RAM:
|
||||
snprintf(q, c->buffer_size,
|
||||
q += snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 200 RAM Follows\r\n"
|
||||
"Content-type: audio/x-pn-realaudio\r\n"
|
||||
"\r\n"
|
||||
"# Autogenerated by ffserver\r\n"
|
||||
"http://%s/%s%s\r\n", hostbuf, filename, info);
|
||||
q += strlen(q);
|
||||
break;
|
||||
case REDIR_ASF:
|
||||
snprintf(q, c->buffer_size,
|
||||
q += snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 200 ASF Redirect follows\r\n"
|
||||
"Content-type: video/x-ms-asf\r\n"
|
||||
"\r\n"
|
||||
"[Reference]\r\n"
|
||||
"Ref1=http://%s/%s%s\r\n", hostbuf, filename, info);
|
||||
q += strlen(q);
|
||||
break;
|
||||
case REDIR_RTSP:
|
||||
{
|
||||
@@ -1720,27 +1685,24 @@ static int http_parse_request(HTTPContext *c)
|
||||
p = strrchr(hostname, ':');
|
||||
if (p)
|
||||
*p = '\0';
|
||||
snprintf(q, c->buffer_size,
|
||||
q += snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 200 RTSP Redirect follows\r\n"
|
||||
/* XXX: incorrect mime type ? */
|
||||
"Content-type: application/x-rtsp\r\n"
|
||||
"\r\n"
|
||||
"rtsp://%s:%d/%s\r\n", hostname, ntohs(my_rtsp_addr.sin_port), filename);
|
||||
q += strlen(q);
|
||||
}
|
||||
break;
|
||||
case REDIR_SDP:
|
||||
{
|
||||
uint8_t *sdp_data;
|
||||
int sdp_data_size;
|
||||
socklen_t len;
|
||||
int sdp_data_size, len;
|
||||
struct sockaddr_in my_addr;
|
||||
|
||||
snprintf(q, c->buffer_size,
|
||||
q += snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 200 OK\r\n"
|
||||
"Content-type: application/sdp\r\n"
|
||||
"\r\n");
|
||||
q += strlen(q);
|
||||
|
||||
len = sizeof(my_addr);
|
||||
getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
|
||||
@@ -1784,7 +1746,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
if (!stream->is_feed) {
|
||||
/* However it might be a status report from WMP! Let us log the
|
||||
* data as it might come in handy one day. */
|
||||
const char *logline = 0;
|
||||
char *logline = 0;
|
||||
int client_id = 0;
|
||||
|
||||
for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
|
||||
@@ -1859,12 +1821,12 @@ static int http_parse_request(HTTPContext *c)
|
||||
}
|
||||
|
||||
/* prepare http header */
|
||||
c->buffer[0] = 0;
|
||||
av_strlcatf(c->buffer, c->buffer_size, "HTTP/1.0 200 OK\r\n");
|
||||
q = c->buffer;
|
||||
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 OK\r\n");
|
||||
mime_type = c->stream->fmt->mime_type;
|
||||
if (!mime_type)
|
||||
mime_type = "application/x-octet-stream";
|
||||
av_strlcatf(c->buffer, c->buffer_size, "Pragma: no-cache\r\n");
|
||||
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Pragma: no-cache\r\n");
|
||||
|
||||
/* for asf, we need extra headers */
|
||||
if (!strcmp(c->stream->fmt->name,"asf_stream")) {
|
||||
@@ -1872,11 +1834,10 @@ static int http_parse_request(HTTPContext *c)
|
||||
|
||||
c->wmp_client_id = av_lfg_get(&random_state);
|
||||
|
||||
av_strlcatf(c->buffer, c->buffer_size, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
|
||||
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
|
||||
}
|
||||
av_strlcatf(c->buffer, c->buffer_size, "Content-Type: %s\r\n", mime_type);
|
||||
av_strlcatf(c->buffer, c->buffer_size, "\r\n");
|
||||
q = c->buffer + strlen(c->buffer);
|
||||
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-Type: %s\r\n", mime_type);
|
||||
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
|
||||
|
||||
/* prepare output buffer */
|
||||
c->http_error = 0;
|
||||
@@ -1887,7 +1848,7 @@ static int http_parse_request(HTTPContext *c)
|
||||
send_error:
|
||||
c->http_error = 404;
|
||||
q = c->buffer;
|
||||
snprintf(q, c->buffer_size,
|
||||
q += snprintf(q, c->buffer_size,
|
||||
"HTTP/1.0 404 Not Found\r\n"
|
||||
"Content-type: text/html\r\n"
|
||||
"\r\n"
|
||||
@@ -1895,7 +1856,6 @@ static int http_parse_request(HTTPContext *c)
|
||||
"<head><title>404 Not Found</title></head>\n"
|
||||
"<body>%s</body>\n"
|
||||
"</html>\n", msg);
|
||||
q += strlen(q);
|
||||
/* prepare output buffer */
|
||||
c->buffer_ptr = c->buffer;
|
||||
c->buffer_end = q;
|
||||
@@ -2045,7 +2005,7 @@ static void compute_status(HTTPContext *c)
|
||||
if (stream->pid) {
|
||||
avio_printf(pb, "Running as pid %d.\n", stream->pid);
|
||||
|
||||
#if defined(linux)
|
||||
#if defined(linux) && !defined(CONFIG_NOCUTILS)
|
||||
{
|
||||
FILE *pid_stat;
|
||||
char ps_cmd[64];
|
||||
@@ -2060,7 +2020,7 @@ static void compute_status(HTTPContext *c)
|
||||
char cpuperc[10];
|
||||
char cpuused[64];
|
||||
|
||||
if (fscanf(pid_stat, "%9s %63s", cpuperc,
|
||||
if (fscanf(pid_stat, "%10s %64s", cpuperc,
|
||||
cpuused) == 2) {
|
||||
avio_printf(pb, "Currently using %s%% of the cpu. Total time used %s.\n",
|
||||
cpuperc, cpuused);
|
||||
@@ -2715,6 +2675,8 @@ static int http_receive_data(HTTPContext *c)
|
||||
/* a packet has been received : write it in the store, except
|
||||
if header */
|
||||
if (c->data_count > FFM_PACKET_SIZE) {
|
||||
|
||||
// printf("writing pos=0x%"PRIx64" size=0x%"PRIx64"\n", feed->feed_write_index, feed->feed_size);
|
||||
/* XXX: use llseek or url_seek */
|
||||
lseek(c->feed_fd, feed->feed_write_index, SEEK_SET);
|
||||
if (write(c->feed_fd, c->buffer, FFM_PACKET_SIZE) < 0) {
|
||||
@@ -2961,14 +2923,12 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
|
||||
{
|
||||
AVFormatContext *avc;
|
||||
AVStream *avs = NULL;
|
||||
AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);
|
||||
int i;
|
||||
|
||||
avc = avformat_alloc_context();
|
||||
if (avc == NULL || !rtp_format) {
|
||||
if (avc == NULL) {
|
||||
return -1;
|
||||
}
|
||||
avc->oformat = rtp_format;
|
||||
av_dict_set(&avc->metadata, "title",
|
||||
stream->title[0] ? stream->title : "No Title", 0);
|
||||
avc->nb_streams = stream->nb_streams;
|
||||
@@ -3018,8 +2978,7 @@ static void rtsp_cmd_describe(HTTPContext *c, const char *url)
|
||||
char path1[1024];
|
||||
const char *path;
|
||||
uint8_t *content;
|
||||
int content_length;
|
||||
socklen_t len;
|
||||
int content_length, len;
|
||||
struct sockaddr_in my_addr;
|
||||
|
||||
/* find which url is asked */
|
||||
@@ -3134,12 +3093,9 @@ static void rtsp_cmd_setup(HTTPContext *c, const char *url,
|
||||
found:
|
||||
|
||||
/* generate session id if needed */
|
||||
if (h->session_id[0] == '\0') {
|
||||
unsigned random0 = av_lfg_get(&random_state);
|
||||
unsigned random1 = av_lfg_get(&random_state);
|
||||
if (h->session_id[0] == '\0')
|
||||
snprintf(h->session_id, sizeof(h->session_id), "%08x%08x",
|
||||
random0, random1);
|
||||
}
|
||||
av_lfg_get(&random_state), av_lfg_get(&random_state));
|
||||
|
||||
/* find rtp session, and create it if none found */
|
||||
rtp_c = find_rtp_session(h->session_id);
|
||||
@@ -3508,9 +3464,6 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
|
||||
{
|
||||
AVStream *fst;
|
||||
|
||||
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
|
||||
return NULL;
|
||||
|
||||
fst = av_mallocz(sizeof(AVStream));
|
||||
if (!fst)
|
||||
return NULL;
|
||||
@@ -3518,7 +3471,7 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
|
||||
fst->codec = avcodec_alloc_context3(NULL);
|
||||
memcpy(fst->codec, codec, sizeof(AVCodecContext));
|
||||
if (codec->extradata_size) {
|
||||
fst->codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
fst->codec->extradata = av_malloc(codec->extradata_size);
|
||||
memcpy(fst->codec->extradata, codec->extradata,
|
||||
codec->extradata_size);
|
||||
}
|
||||
@@ -3597,12 +3550,10 @@ static void extract_mpeg4_header(AVFormatContext *infile)
|
||||
AVStream *st;
|
||||
const uint8_t *p;
|
||||
|
||||
infile->flags |= AVFMT_FLAG_NOFILLIN | AVFMT_FLAG_NOPARSE;
|
||||
|
||||
mpeg4_count = 0;
|
||||
for(i=0;i<infile->nb_streams;i++) {
|
||||
st = infile->streams[i];
|
||||
if (st->codec->codec_id == AV_CODEC_ID_MPEG4 &&
|
||||
if (st->codec->codec_id == CODEC_ID_MPEG4 &&
|
||||
st->codec->extradata_size == 0) {
|
||||
mpeg4_count++;
|
||||
}
|
||||
@@ -3612,10 +3563,10 @@ static void extract_mpeg4_header(AVFormatContext *infile)
|
||||
|
||||
printf("MPEG4 without extra data: trying to find header in %s\n", infile->filename);
|
||||
while (mpeg4_count > 0) {
|
||||
if (av_read_frame(infile, &pkt) < 0)
|
||||
if (av_read_packet(infile, &pkt) < 0)
|
||||
break;
|
||||
st = infile->streams[pkt.stream_index];
|
||||
if (st->codec->codec_id == AV_CODEC_ID_MPEG4 &&
|
||||
if (st->codec->codec_id == CODEC_ID_MPEG4 &&
|
||||
st->codec->extradata_size == 0) {
|
||||
av_freep(&st->codec->extradata);
|
||||
/* fill extradata with the header */
|
||||
@@ -3627,7 +3578,7 @@ static void extract_mpeg4_header(AVFormatContext *infile)
|
||||
p[2] == 0x01 && p[3] == 0xb6) {
|
||||
size = p - pkt.data;
|
||||
// av_hex_dump_log(infile, AV_LOG_DEBUG, pkt.data, size);
|
||||
st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
st->codec->extradata = av_malloc(size);
|
||||
st->codec->extradata_size = size;
|
||||
memcpy(st->codec->extradata, pkt.data, size);
|
||||
break;
|
||||
@@ -3806,7 +3757,7 @@ static void build_feed_streams(void)
|
||||
s->nb_streams = feed->nb_streams;
|
||||
s->streams = feed->streams;
|
||||
if (avformat_write_header(s, NULL) < 0) {
|
||||
http_log("Container doesn't support the required parameters\n");
|
||||
http_log("Container doesn't supports the required parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
/* XXX: need better api */
|
||||
@@ -3860,9 +3811,6 @@ static void add_codec(FFStream *stream, AVCodecContext *av)
|
||||
{
|
||||
AVStream *st;
|
||||
|
||||
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
|
||||
return;
|
||||
|
||||
/* compute default parameters */
|
||||
switch(av->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -3934,22 +3882,22 @@ static void add_codec(FFStream *stream, AVCodecContext *av)
|
||||
memcpy(st->codec, av, sizeof(AVCodecContext));
|
||||
}
|
||||
|
||||
static enum AVCodecID opt_audio_codec(const char *arg)
|
||||
static enum CodecID opt_audio_codec(const char *arg)
|
||||
{
|
||||
AVCodec *p= avcodec_find_encoder_by_name(arg);
|
||||
|
||||
if (p == NULL || p->type != AVMEDIA_TYPE_AUDIO)
|
||||
return AV_CODEC_ID_NONE;
|
||||
return CODEC_ID_NONE;
|
||||
|
||||
return p->id;
|
||||
}
|
||||
|
||||
static enum AVCodecID opt_video_codec(const char *arg)
|
||||
static enum CodecID opt_video_codec(const char *arg)
|
||||
{
|
||||
AVCodec *p= avcodec_find_encoder_by_name(arg);
|
||||
|
||||
if (p == NULL || p->type != AVMEDIA_TYPE_VIDEO)
|
||||
return AV_CODEC_ID_NONE;
|
||||
return CODEC_ID_NONE;
|
||||
|
||||
return p->id;
|
||||
}
|
||||
@@ -3992,7 +3940,7 @@ static int ffserver_opt_default(const char *opt, const char *arg,
|
||||
|
||||
static int ffserver_opt_preset(const char *arg,
|
||||
AVCodecContext *avctx, int type,
|
||||
enum AVCodecID *audio_id, enum AVCodecID *video_id)
|
||||
enum CodecID *audio_id, enum CodecID *video_id)
|
||||
{
|
||||
FILE *f=NULL;
|
||||
char filename[1000], tmp[1000], tmp2[1000], line[1000];
|
||||
@@ -4074,7 +4022,7 @@ static int parse_ffconfig(const char *filename)
|
||||
FFStream **last_stream, *stream, *redirect;
|
||||
FFStream **last_feed, *feed, *s;
|
||||
AVCodecContext audio_enc, video_enc;
|
||||
enum AVCodecID audio_id, video_id;
|
||||
enum CodecID audio_id, video_id;
|
||||
|
||||
f = fopen(filename, "r");
|
||||
if (!f) {
|
||||
@@ -4091,8 +4039,8 @@ static int parse_ffconfig(const char *filename)
|
||||
stream = NULL;
|
||||
feed = NULL;
|
||||
redirect = NULL;
|
||||
audio_id = AV_CODEC_ID_NONE;
|
||||
video_id = AV_CODEC_ID_NONE;
|
||||
audio_id = CODEC_ID_NONE;
|
||||
video_id = CODEC_ID_NONE;
|
||||
|
||||
#define ERROR(...) report_config_error(filename, line_num, &errors, __VA_ARGS__)
|
||||
for(;;) {
|
||||
@@ -4120,7 +4068,7 @@ static int parse_ffconfig(const char *filename)
|
||||
ERROR("%s:%d: Invalid host/IP address: %s\n", arg);
|
||||
}
|
||||
} else if (!av_strcasecmp(cmd, "NoDaemon")) {
|
||||
// do nothing here, its the default now
|
||||
ffserver_daemon = 0;
|
||||
} else if (!av_strcasecmp(cmd, "RTSPPort")) {
|
||||
get_arg(arg, sizeof(arg), &p);
|
||||
val = atoi(arg);
|
||||
@@ -4207,7 +4155,10 @@ static int parse_ffconfig(const char *filename)
|
||||
feed->child_argv[i] = av_strdup(arg);
|
||||
}
|
||||
|
||||
feed->child_argv[i] = av_asprintf("http://%s:%d/%s",
|
||||
feed->child_argv[i] = av_malloc(30 + strlen(feed->filename));
|
||||
|
||||
snprintf(feed->child_argv[i], 30+strlen(feed->filename),
|
||||
"http://%s:%d/%s",
|
||||
(my_http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" :
|
||||
inet_ntoa(my_http_addr.sin_addr),
|
||||
ntohs(my_http_addr.sin_port), feed->filename);
|
||||
@@ -4269,7 +4220,7 @@ static int parse_ffconfig(const char *filename)
|
||||
stream = av_mallocz(sizeof(FFStream));
|
||||
get_arg(stream->filename, sizeof(stream->filename), &p);
|
||||
q = strrchr(stream->filename, '>');
|
||||
if (q)
|
||||
if (*q)
|
||||
*q = '\0';
|
||||
|
||||
for (s = first_stream; s; s = s->next) {
|
||||
@@ -4282,8 +4233,8 @@ static int parse_ffconfig(const char *filename)
|
||||
avcodec_get_context_defaults3(&video_enc, NULL);
|
||||
avcodec_get_context_defaults3(&audio_enc, NULL);
|
||||
|
||||
audio_id = AV_CODEC_ID_NONE;
|
||||
video_id = AV_CODEC_ID_NONE;
|
||||
audio_id = CODEC_ID_NONE;
|
||||
video_id = CODEC_ID_NONE;
|
||||
if (stream->fmt) {
|
||||
audio_id = stream->fmt->audio_codec;
|
||||
video_id = stream->fmt->video_codec;
|
||||
@@ -4365,13 +4316,13 @@ static int parse_ffconfig(const char *filename)
|
||||
} else if (!av_strcasecmp(cmd, "AudioCodec")) {
|
||||
get_arg(arg, sizeof(arg), &p);
|
||||
audio_id = opt_audio_codec(arg);
|
||||
if (audio_id == AV_CODEC_ID_NONE) {
|
||||
if (audio_id == CODEC_ID_NONE) {
|
||||
ERROR("Unknown AudioCodec: %s\n", arg);
|
||||
}
|
||||
} else if (!av_strcasecmp(cmd, "VideoCodec")) {
|
||||
get_arg(arg, sizeof(arg), &p);
|
||||
video_id = opt_video_codec(arg);
|
||||
if (video_id == AV_CODEC_ID_NONE) {
|
||||
if (video_id == CODEC_ID_NONE) {
|
||||
ERROR("Unknown VideoCodec: %s\n", arg);
|
||||
}
|
||||
} else if (!av_strcasecmp(cmd, "MaxTime")) {
|
||||
@@ -4562,9 +4513,9 @@ static int parse_ffconfig(const char *filename)
|
||||
if (stream)
|
||||
video_enc.dark_masking = atof(arg);
|
||||
} else if (!av_strcasecmp(cmd, "NoVideo")) {
|
||||
video_id = AV_CODEC_ID_NONE;
|
||||
video_id = CODEC_ID_NONE;
|
||||
} else if (!av_strcasecmp(cmd, "NoAudio")) {
|
||||
audio_id = AV_CODEC_ID_NONE;
|
||||
audio_id = CODEC_ID_NONE;
|
||||
} else if (!av_strcasecmp(cmd, "ACL")) {
|
||||
parse_acl_row(stream, feed, NULL, p, filename, line_num);
|
||||
} else if (!av_strcasecmp(cmd, "DynamicACL")) {
|
||||
@@ -4602,12 +4553,12 @@ static int parse_ffconfig(const char *filename)
|
||||
ERROR("No corresponding <Stream> for </Stream>\n");
|
||||
} else {
|
||||
if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) {
|
||||
if (audio_id != AV_CODEC_ID_NONE) {
|
||||
if (audio_id != CODEC_ID_NONE) {
|
||||
audio_enc.codec_type = AVMEDIA_TYPE_AUDIO;
|
||||
audio_enc.codec_id = audio_id;
|
||||
add_codec(stream, &audio_enc);
|
||||
}
|
||||
if (video_id != AV_CODEC_ID_NONE) {
|
||||
if (video_id != CODEC_ID_NONE) {
|
||||
video_enc.codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
video_enc.codec_id = video_id;
|
||||
add_codec(stream, &video_enc);
|
||||
@@ -4691,15 +4642,17 @@ static void handle_child_exit(int sig)
|
||||
static void opt_debug(void)
|
||||
{
|
||||
ffserver_debug = 1;
|
||||
ffserver_daemon = 0;
|
||||
logfilename[0] = '-';
|
||||
}
|
||||
|
||||
void show_help_default(const char *opt, const char *arg)
|
||||
static int opt_help(const char *opt, const char *arg)
|
||||
{
|
||||
printf("usage: ffserver [options]\n"
|
||||
"Hyper fast multi format Audio/Video streaming server\n");
|
||||
printf("\n");
|
||||
show_help_options(options, "Main options:", 0, 0, 0);
|
||||
show_help_options(options, "Main options:\n", 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const OptionDef options[] = {
|
||||
@@ -4721,6 +4674,8 @@ int main(int argc, char **argv)
|
||||
show_banner(argc, argv, options);
|
||||
|
||||
my_program_name = argv[0];
|
||||
my_program_dir = getcwd(0, 0);
|
||||
ffserver_daemon = 1;
|
||||
|
||||
parse_options(NULL, argc, argv, options, NULL);
|
||||
|
||||
@@ -4752,9 +4707,37 @@ int main(int argc, char **argv)
|
||||
|
||||
compute_bandwidth();
|
||||
|
||||
/* put the process in background and detach it from its TTY */
|
||||
if (ffserver_daemon) {
|
||||
int pid;
|
||||
|
||||
pid = fork();
|
||||
if (pid < 0) {
|
||||
perror("fork");
|
||||
exit(1);
|
||||
} else if (pid > 0) {
|
||||
/* parent : exit */
|
||||
exit(0);
|
||||
} else {
|
||||
/* child */
|
||||
setsid();
|
||||
close(0);
|
||||
open("/dev/null", O_RDWR);
|
||||
if (strcmp(logfilename, "-") != 0) {
|
||||
close(1);
|
||||
dup(0);
|
||||
}
|
||||
close(2);
|
||||
dup(0);
|
||||
}
|
||||
}
|
||||
|
||||
/* signal init */
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
if (ffserver_daemon)
|
||||
chdir("/");
|
||||
|
||||
if (http_server() < 0) {
|
||||
http_log("Could not start server\n");
|
||||
exit(1);
|
||||
|
@@ -1,172 +0,0 @@
|
||||
/*
|
||||
* 012v decoder
|
||||
*
|
||||
* Copyright (C) 2012 Carl Eugen Hoyos
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
avctx->pix_fmt = PIX_FMT_YUV422P16;
|
||||
avctx->bits_per_raw_sample = 10;
|
||||
|
||||
avctx->coded_frame = avcodec_alloc_frame();
|
||||
if (!avctx->coded_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (avctx->codec_tag == MKTAG('a', '1', '2', 'v'))
|
||||
av_log_ask_for_sample(avctx, "Samples with actual transparency needed\n");
|
||||
|
||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||
avctx->coded_frame->key_frame = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
int line = 0, ret;
|
||||
const int width = avctx->width;
|
||||
AVFrame *pic = avctx->coded_frame;
|
||||
uint16_t *y, *u, *v;
|
||||
const uint8_t *line_end, *src = avpkt->data;
|
||||
int stride = avctx->width * 8 / 3;
|
||||
|
||||
if (pic->data[0])
|
||||
avctx->release_buffer(avctx, pic);
|
||||
|
||||
if (width == 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (avpkt->size < avctx->height * stride) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Packet too small: %d instead of %d\n",
|
||||
avpkt->size, avctx->height * stride);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
pic->reference = 0;
|
||||
if ((ret = ff_get_buffer(avctx, pic)) < 0)
|
||||
return ret;
|
||||
|
||||
y = (uint16_t *)pic->data[0];
|
||||
u = (uint16_t *)pic->data[1];
|
||||
v = (uint16_t *)pic->data[2];
|
||||
line_end = avpkt->data + stride;
|
||||
|
||||
while (line++ < avctx->height) {
|
||||
while (1) {
|
||||
uint32_t t = AV_RL32(src);
|
||||
src += 4;
|
||||
*u++ = t << 6 & 0xFFC0;
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*v++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*y++ = t << 6 & 0xFFC0;
|
||||
*u++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
if (src >= line_end - 2) {
|
||||
if (!(width & 1)) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*v++ = t << 6 & 0xFFC0;
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*u++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*y++ = t << 6 & 0xFFC0;
|
||||
*v++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 2) {
|
||||
if (width & 1) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*got_frame = 1;
|
||||
*(AVFrame*)data= *avctx->coded_frame;
|
||||
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static av_cold int zero12v_decode_close(AVCodecContext *avctx)
|
||||
{
|
||||
AVFrame *pic = avctx->coded_frame;
|
||||
if (pic->data[0])
|
||||
avctx->release_buffer(avctx, pic);
|
||||
av_freep(&avctx->coded_frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVCodec ff_zero12v_decoder = {
|
||||
.name = "012v",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = AV_CODEC_ID_012V,
|
||||
.init = zero12v_decode_init,
|
||||
.close = zero12v_decode_close,
|
||||
.decode = zero12v_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
|
||||
};
|
@@ -29,9 +29,9 @@
|
||||
#include "bytestream.h"
|
||||
#include "dsputil.h"
|
||||
#include "get_bits.h"
|
||||
#include "internal.h"
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
//#undef NDEBUG
|
||||
//#include <assert.h>
|
||||
|
||||
#define BLOCK_TYPE_VLC_BITS 5
|
||||
#define ACDC_VLC_BITS 9
|
||||
@@ -328,7 +328,7 @@ static inline void mcdc(uint16_t *dst, const uint16_t *src, int log2w,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
av_assert2(0);
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -343,14 +343,14 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
|
||||
uint16_t *start = (uint16_t *)f->last_picture.data[0];
|
||||
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
|
||||
|
||||
av_assert2(code >= 0 && code <= 6);
|
||||
assert(code >= 0 && code <= 6);
|
||||
|
||||
if (code == 0) {
|
||||
if (bytestream2_get_bytes_left(&f->g) < 1) {
|
||||
if (f->g.buffer_end - f->g.buffer < 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||
return;
|
||||
}
|
||||
src += f->mv[bytestream2_get_byteu(&f->g)];
|
||||
src += f->mv[bytestream2_get_byte(&f->g)];
|
||||
if (start > src || src > end) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return;
|
||||
@@ -369,37 +369,37 @@ static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
|
||||
} else if (code == 3 && f->version < 2) {
|
||||
mcdc(dst, src, log2w, h, stride, 1, 0);
|
||||
} else if (code == 4) {
|
||||
if (bytestream2_get_bytes_left(&f->g) < 1) {
|
||||
if (f->g.buffer_end - f->g.buffer < 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
|
||||
return;
|
||||
}
|
||||
src += f->mv[bytestream2_get_byteu(&f->g)];
|
||||
src += f->mv[bytestream2_get_byte(&f->g)];
|
||||
if (start > src || src > end) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
|
||||
return;
|
||||
}
|
||||
if (bytestream2_get_bytes_left(&f->g2) < 2){
|
||||
if (f->g2.buffer_end - f->g2.buffer < 1){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16u(&f->g2));
|
||||
mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16(&f->g2));
|
||||
} else if (code == 5) {
|
||||
if (bytestream2_get_bytes_left(&f->g2) < 2) {
|
||||
if (f->g2.buffer_end - f->g2.buffer < 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16u(&f->g2));
|
||||
mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16(&f->g2));
|
||||
} else if (code == 6) {
|
||||
if (bytestream2_get_bytes_left(&f->g2) < 4) {
|
||||
if (f->g2.buffer_end - f->g2.buffer < 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
|
||||
return;
|
||||
}
|
||||
if (log2w) {
|
||||
dst[0] = bytestream2_get_le16u(&f->g2);
|
||||
dst[1] = bytestream2_get_le16u(&f->g2);
|
||||
dst[0] = bytestream2_get_le16(&f->g2);
|
||||
dst[1] = bytestream2_get_le16(&f->g2);
|
||||
} else {
|
||||
dst[0] = bytestream2_get_le16u(&f->g2);
|
||||
dst[stride] = bytestream2_get_le16u(&f->g2);
|
||||
dst[0] = bytestream2_get_le16(&f->g2);
|
||||
dst[stride] = bytestream2_get_le16(&f->g2);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -429,7 +429,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
bytestream_size = FFMAX(length - bitstream_size - wordstream_size, 0);
|
||||
}
|
||||
|
||||
if (bitstream_size > length || bitstream_size >= INT_MAX/8 ||
|
||||
if (bitstream_size > length ||
|
||||
bytestream_size > length - bitstream_size ||
|
||||
wordstream_size > length - bytestream_size - bitstream_size ||
|
||||
extra > length - bytestream_size - bitstream_size - wordstream_size) {
|
||||
@@ -749,10 +749,8 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
}
|
||||
|
||||
prestream = read_huffman_tables(f, prestream, buf + length - prestream);
|
||||
if (!prestream) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Error reading Huffman tables.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (!prestream)
|
||||
return -1;
|
||||
|
||||
init_get_bits(&f->gb, buf + 4, 8 * bitstream_size);
|
||||
|
||||
@@ -786,7 +784,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
}
|
||||
|
||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
@@ -814,11 +812,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (f->version <= 1) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "cfrm in version %d\n", f->version);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < CFRAME_BUFFER_COUNT; i++)
|
||||
if (f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number)
|
||||
av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n",
|
||||
@@ -844,7 +837,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
// explicit check needed as memcpy below might not catch a NULL
|
||||
if (!cfrm->data) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "realloc failure\n");
|
||||
av_log(f->avctx, AV_LOG_ERROR, "realloc falure");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -899,12 +892,10 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
|
||||
if (!f->last_picture.data[0]) {
|
||||
f->last_picture.reference = 3;
|
||||
if (ff_get_buffer(avctx, &f->last_picture) < 0) {
|
||||
if (avctx->get_buffer(avctx, &f->last_picture) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
for (i=0; i<avctx->height; i++)
|
||||
memset(f->last_picture.data[0] + i*f->last_picture.linesize[0], 0, 2*avctx->width);
|
||||
}
|
||||
|
||||
p->pict_type = AV_PICTURE_TYPE_P;
|
||||
@@ -923,7 +914,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
|
||||
|
||||
*picture = *p;
|
||||
*got_frame = 1;
|
||||
*data_size = sizeof(AVPicture);
|
||||
|
||||
emms_c();
|
||||
|
||||
@@ -946,7 +937,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
|
||||
if (avctx->extradata_size != 4 || !avctx->extradata) {
|
||||
av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return 1;
|
||||
}
|
||||
if((avctx->width % 16) || (avctx->height % 16)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported width/height\n");
|
||||
@@ -960,9 +951,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
init_vlcs(f);
|
||||
|
||||
if (f->version > 2)
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB565;
|
||||
avctx->pix_fmt = PIX_FMT_RGB565;
|
||||
else
|
||||
avctx->pix_fmt = AV_PIX_FMT_BGR555;
|
||||
avctx->pix_fmt = PIX_FMT_BGR555;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -991,7 +982,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
||||
AVCodec ff_fourxm_decoder = {
|
||||
.name = "4xm",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = AV_CODEC_ID_4XM,
|
||||
.id = CODEC_ID_4XM,
|
||||
.priv_data_size = sizeof(FourXContext),
|
||||
.init = decode_init,
|
||||
.close = decode_end,
|
||||
|
@@ -33,17 +33,17 @@
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "avcodec.h"
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
static const enum AVPixelFormat pixfmt_rgb24[] = {
|
||||
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE };
|
||||
static const enum PixelFormat pixfmt_rgb24[] = {
|
||||
PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE };
|
||||
|
||||
/*
|
||||
* Decoder context
|
||||
*/
|
||||
typedef struct EightBpsContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame pic;
|
||||
@@ -54,8 +54,14 @@ typedef struct EightBpsContext {
|
||||
uint32_t pal[256];
|
||||
} EightBpsContext;
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
* Decode a frame
|
||||
*
|
||||
*/
|
||||
static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
int *data_size, AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
@@ -74,7 +80,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
|
||||
c->pic.reference = 0;
|
||||
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
|
||||
if (ff_get_buffer(avctx, &c->pic) < 0){
|
||||
if (avctx->get_buffer(avctx, &c->pic) < 0){
|
||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -90,8 +96,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
for (row = 0; row < height; row++) {
|
||||
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
|
||||
pixptr_end = pixptr + c->pic.linesize[0];
|
||||
if(lp - encoded + row*2 + 1 >= buf_size)
|
||||
return -1;
|
||||
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
|
||||
/* Decode a row of this plane */
|
||||
while (dlen > 0) {
|
||||
@@ -135,13 +139,19 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
|
||||
}
|
||||
|
||||
*got_frame = 1;
|
||||
*data_size = sizeof(AVFrame);
|
||||
*(AVFrame*)data = c->pic;
|
||||
|
||||
/* always report that the buffer was completely consumed */
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
* Init 8BPS decoder
|
||||
*
|
||||
*/
|
||||
static av_cold int decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
EightBpsContext * const c = avctx->priv_data;
|
||||
@@ -152,7 +162,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avcodec_get_frame_defaults(&c->pic);
|
||||
switch (avctx->bits_per_coded_sample) {
|
||||
case 8:
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
avctx->pix_fmt = PIX_FMT_PAL8;
|
||||
c->planes = 1;
|
||||
c->planemap[0] = 0; // 1st plane is palette indexes
|
||||
break;
|
||||
@@ -164,7 +174,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
c->planemap[2] = 0; // 3rd plane is blue
|
||||
break;
|
||||
case 32:
|
||||
avctx->pix_fmt = AV_PIX_FMT_RGB32;
|
||||
avctx->pix_fmt = PIX_FMT_RGB32;
|
||||
c->planes = 4;
|
||||
#if HAVE_BIGENDIAN
|
||||
c->planemap[0] = 1; // 1st plane is red
|
||||
@@ -187,6 +197,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
* Uninit 8BPS decoder
|
||||
*
|
||||
*/
|
||||
static av_cold int decode_end(AVCodecContext *avctx)
|
||||
{
|
||||
EightBpsContext * const c = avctx->priv_data;
|
||||
@@ -197,10 +215,12 @@ static av_cold int decode_end(AVCodecContext *avctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
AVCodec ff_eightbps_decoder = {
|
||||
.name = "8bps",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.id = AV_CODEC_ID_8BPS,
|
||||
.id = CODEC_ID_8BPS,
|
||||
.priv_data_size = sizeof(EightBpsContext),
|
||||
.init = decode_init,
|
||||
.close = decode_end,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user