Compare commits

..

4 Commits

Author SHA1 Message Date
Michael Niedermayer
a74f292d4a tests/Makefile: fix ffprobe test dependancy
This fixes the ffprobe tests under mingw/wine

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit a96e3a3e77)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2012-09-28 03:02:28 +02:00
Michael Niedermayer
38df088545 asfenc: avoid negative timestamps
Fixes Ticket1606

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit b4c753487c)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2012-09-28 02:47:37 +02:00
Michael Niedermayer
2efbff1db3 Revert "ffprobe: Add a few common disposition cases"
revert requested by ubitux for 1.0

This reverts commit 8e2a950b29.
2012-09-28 02:27:10 +02:00
Michael Niedermayer
f41c9f53de Update for 1.0
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2012-09-28 02:18:32 +02:00
2027 changed files with 50421 additions and 83659 deletions

10
.gitignore vendored
View File

@@ -3,14 +3,10 @@
*.d
*.def
*.dll
*.dylib
*.exe
*.exp
*.h.c
*.ilk
*.lib
*.pc
*.pdb
*.so
*.so.*
*.ver
@@ -26,7 +22,6 @@
/config.*
/version.h
/doc/*.1
/doc/*.3
/doc/*.html
/doc/*.pod
/doc/avoptions_codec.texi
@@ -37,11 +32,10 @@
/doc/examples/filtering_video
/doc/examples/metadata
/doc/examples/muxing
/doc/examples/resampling_audio
/doc/examples/scaling_video
/doc/fate.txt
/doc/doxy/html/
/doc/print_options
/doxy/
/libavcodec/*_tablegen
/libavcodec/*_tables.c
/libavcodec/*_tables.h
@@ -57,8 +51,6 @@
/tools/ffbisect
/tools/bisect.need
/tools/cws2fws
/tools/fourcc2pixfmt
/tools/ffescape
/tools/ffeval
/tools/graph2dot
/tools/ismindex

993
Changelog Normal file
View File

@@ -0,0 +1,993 @@
Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
version next:
version 1.0:
- INI and flat output in ffprobe
- Scene detection in libavfilter
- Indeo Audio decoder
- channelsplit audio filter
- setnsamples audio filter
- atempo filter
- ffprobe -show_data option
- RTMPT protocol support
- iLBC encoding/decoding via libilbc
- Microsoft Screen 1 decoder
- join audio filter
- audio channel mapping filter
- Microsoft ATC Screen decoder
- RTSP listen mode
- TechSmith Screen Codec 2 decoder
- AAC encoding via libfdk-aac
- Microsoft Expression Encoder Screen decoder
- RTMPS protocol support
- RTMPTS protocol support
- RTMPE protocol support
- RTMPTE protocol support
- showwaves and showspectrum filter
- LucasArts SMUSH playback support
- SAMI, RealText and SubViewer demuxers and decoders
- Heart Of Darkness PAF playback support
- iec61883 device
- asettb filter
- new option: -progress
- 3GPP Timed Text encoder/decoder
- GeoTIFF decoder support
- ffmpeg -(no)stdin option
- Opus decoder using libopus
- caca output device using libcaca
- alphaextract and alphamerge filters
- concat filter
- flite filter
- Canopus Lossless Codec decoder
- bitmap subtitles in filters (experimental and temporary)
- MP2 encoding via TwoLAME
- bmp parser
- smptebars source
- asetpts filter
- hue filter
- ICO muxer
- SubRip encoder and decoder without embedded timing
- edge detection filter
- framestep filter
- ffmpeg -shortest option is now per-output file
-pass and -passlogfile are now per-output stream
- volume measurement filter
- Ut Video encoder
- Microsoft Screen 2 decoder
- Matroska demuxer now identifies SRT subtitles as AV_CODEC_ID_SUBRIP
instead of AV_CODEC_ID_TEXT
- smartblur filter ported from MPlayer
- CPiA decoder
- decimate filter ported from MPlayer
- RTP depacketization of JPEG
- Smooth Streaming live segmenter muxer
- F4V muxer
- sendcmd and asendcmd filters
- WebVTT demuxer and decoder (simple tags supported)
- RTP packetization of JPEG
- faststart option in the MOV/MP4 muxer
version 0.11:
- Fixes: CVE-2012-2772, CVE-2012-2774, CVE-2012-2775, CVE-2012-2776, CVE-2012-2777,
CVE-2012-2779, CVE-2012-2782, CVE-2012-2783, CVE-2012-2784, CVE-2012-2785,
CVE-2012-2786, CVE-2012-2787, CVE-2012-2788, CVE-2012-2789, CVE-2012-2790,
CVE-2012-2791, CVE-2012-2792, CVE-2012-2793, CVE-2012-2794, CVE-2012-2795,
CVE-2012-2796, CVE-2012-2797, CVE-2012-2798, CVE-2012-2799, CVE-2012-2800,
CVE-2012-2801, CVE-2012-2802, CVE-2012-2803, CVE-2012-2804,
- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
- setfield filter
- CDXL demuxer and decoder
- Apple ProRes encoder
- ffprobe -count_packets and -count_frames options
- Sun Rasterfile Encoder
- ID3v2 attached pictures reading and writing
- WMA Lossless decoder
- bluray protocol
- blackdetect filter
- libutvideo encoder wrapper (--enable-libutvideo)
- swapuv filter
- bbox filter
- XBM encoder and decoder
- RealAudio Lossless decoder
- ZeroCodec decoder
- tile video filter
- Metal Gear Solid: The Twin Snakes demuxer
- OpenEXR image decoder
- removelogo filter
- drop support for ffmpeg without libavfilter
- drawtext video filter: fontconfig support
- ffmpeg -benchmark_all option
- super2xsai filter ported from libmpcodecs
- add libavresample audio conversion library for compatibility
- MicroDVD decoder
- Avid Meridien (AVUI) encoder and decoder
- accept + prefix to -pix_fmt option to disable automatic conversions.
- complete audio filtering in libavfilter and ffmpeg
- add fps filter
- vorbis parser
- png parser
- audio mix filter
version 0.10:
- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
CVE-2011-3945, CVE-2011-3946, CVE-2011-3947, CVE-2011-3949,
CVE-2011-3950, CVE-2011-3951, CVE-2011-3952
- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
- SBaGen (SBG) binaural beats script demuxer
- OpenMG Audio muxer
- Timecode extraction in DV and MOV
- thumbnail video filter
- XML output in ffprobe
- asplit audio filter
- tinterlace video filter
- astreamsync audio filter
- amerge audio filter
- ISMV (Smooth Streaming) muxer
- GSM audio parser
- SMJPEG muxer
- XWD encoder and decoder
- Automatic thread count based on detection number of (available) CPU cores
- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
- ffprobe -show_error option
- Avid 1:1 10-bit RGB Packer codec
- v308 Quicktime Uncompressed 4:4:4 encoder and decoder
- yuv4 libquicktime packed 4:2:0 encoder and decoder
- ffprobe -show_frames option
- silencedetect audio filter
- ffprobe -show_program_version, -show_library_versions, -show_versions options
- rv34: frame-level multi-threading
- optimized iMDCT transform on x86 using SSE for for mpegaudiodec
- Improved PGS subtitle decoder
- dumpgraph option to lavfi device
- r210 and r10k encoders
- ffwavesynth decoder
- aviocat tool
- ffeval tool
version 0.9:
- openal input device added
- boxblur filter added
- BWF muxer
- Flash Screen Video 2 decoder
- lavfi input device added
- added avconv, which is almost the same for now, except
for a few incompatible changes in the options, which will hopefully make them
easier to use. The changes are:
* The options placement is now strictly enforced! While in theory the
options for ffmpeg should be given in [input options] -i INPUT [output
options] OUTPUT order, in practice it was possible to give output options
before the -i and it mostly worked. Except when it didn't - the behavior was
a bit inconsistent. In avconv, it is not possible to mix input and output
options. All non-global options are reset after an input or output filename.
* All per-file options are now truly per-file - they apply only to the next
input or output file and specifying different values for different files
will now work properly (notably -ss and -t options).
* All per-stream options are now truly per-stream - it is possible to
specify which stream(s) should a given option apply to. See the Stream
specifiers section in the avconv manual for details.
* In ffmpeg some options (like -newvideo/-newaudio/...) are irregular in the
sense that they're specified after the output filename instead of before,
like all other options. In avconv this irregularity is removed, all options
apply to the next input or output file.
* -newvideo/-newaudio/-newsubtitle options were removed. Not only were they
irregular and highly confusing, they were also redundant. In avconv the -map
option will create new streams in the output file and map input streams to
them. E.g. avconv -i INPUT -map 0 OUTPUT will create an output stream for
each stream in the first input file.
* The -map option now has slightly different and more powerful syntax:
+ Colons (':') are used to separate file index/stream type/stream index
instead of dots. Comma (',') is used to separate the sync stream instead
of colon.. This is done for consistency with other options.
+ It's possible to specify stream type. E.g. -map 0:a:2 creates an
output stream from the third input audio stream.
+ Omitting the stream index now maps all the streams of the given type,
not just the first. E.g. -map 0:s creates output streams for all the
subtitle streams in the first input file.
+ Since -map can now match multiple streams, negative mappings were
introduced. Negative mappings disable some streams from an already
defined map. E.g. '-map 0 -map -0:a:1' means 'create output streams for
all the stream in the first input file, except for the second audio
stream'.
* There is a new option -c (or -codec) for choosing the decoder/encoder to
use, which allows to precisely specify target stream(s) consistently with
other options. E.g. -c:v lib264 sets the codec for all video streams, -c:a:0
libvorbis sets the codec for the first audio stream and -c copy copies all
the streams without reencoding. Old -vcodec/-acodec/-scodec options are now
aliases to -c:v/a/s
* It is now possible to precisely specify which stream should an AVOption
apply to. E.g. -b:v:0 2M sets the bitrate for the first video stream, while
-b:a 128k sets the bitrate for all audio streams. Note that the old -ab 128k
syntax is deprecated and will stop working soon.
* -map_chapters now takes only an input file index and applies to the next
output file. This is consistent with how all the other options work.
* -map_metadata now takes only an input metadata specifier and applies to
the next output file. Output metadata specifier is now part of the option
name, similarly to the AVOptions/map/codec feature above.
* -metadata can now be used to set metadata on streams and chapters, e.g.
-metadata:s:1 language=eng sets the language of the first stream to 'eng'.
This made -vlang/-alang/-slang options redundant, so they were removed.
* -qscale option now uses stream specifiers and applies to all streams, not
just video. I.e. plain -qscale number would now apply to all streams. To get
the old behavior, use -qscale:v. Also there is now a shortcut -q for -qscale
and -aq is now an alias for -q:a.
* -vbsf/-absf/-sbsf options were removed and replaced by a -bsf option which
uses stream specifiers. Use -bsf:v/a/s instead of the old options.
* -itsscale option now uses stream specifiers, so its argument is only the
scale parameter.
* -intra option was removed, use -g 0 for the same effect.
* -psnr option was removed, use -flags +psnr for the same effect.
* -vf option is now an alias to the new -filter option, which uses stream specifiers.
* -vframes/-aframes/-dframes options are now aliases to the new -frames option.
* -vtag/-atag/-stag options are now aliases to the new -tag option.
- XMV demuxer
- LOAS demuxer
- ashowinfo filter added
- Windows Media Image decoder
- amovie source added
- LATM muxer/demuxer
- Speex encoder via libspeex
- JSON output in ffprobe
- WTV muxer
- Optional C++ Support (needed for libstagefright)
- H.264 Decoding on Android via Stagefright
- Prores decoder
- BIN/XBIN/ADF/IDF text file decoder
- aconvert audio filter added
- audio support to lavfi input device added
- libcdio-paranoia input device for audio CD grabbing
- Apple ProRes decoder
- CELT in Ogg demuxing
- G.723.1 demuxer and decoder
- libmodplug support (--enable-libmodplug)
- VC-1 interlaced decoding
- libutvideo wrapper (--enable-libutvideo)
- aevalsrc audio source added
- Ut Video decoder
- Speex encoding via libspeex
- 4:2:2 H.264 decoding support
- 4:2:2 and 4:4:4 H.264 encoding with libx264
- Pulseaudio input device
- Prores encoder
- Video Decoder Acceleration (VDA) HWAccel module.
- replacement Indeo 3 decoder
- new ffmpeg option: -map_channel
- volume audio filter added
- earwax audio filter added
- libv4l2 support (--enable-libv4l2)
- TLS/SSL and HTTPS protocol support
- AVOptions API rewritten and documented
- most of CODEC_FLAG2_*, some CODEC_FLAG_* and many codec-specific fields in
AVCodecContext deprecated. Codec private options should be used instead.
- Properly working defaults in libx264 wrapper, support for native presets.
- Encrypted OMA files support
- Discworld II BMV decoding support
- VBLE Decoder
- OS X Video Decoder Acceleration (VDA) support
- compact and csv output in ffprobe
- pan audio filter
- IFF Amiga Continuous Bitmap (ACBM) decoder
- ass filter
- CRI ADX audio format muxer and demuxer
- Playstation Portable PMP format demuxer
- Microsoft Windows ICO demuxer
- life source
- PCM format support in OMA demuxer
- CLJR encoder
- new option: -report
- Dxtory capture format decoder
- cellauto source
- Simple segmenting muxer
- Indeo 4 decoder
- SMJPEG demuxer
version 0.8:
- many many things we forgot because we rather write code than changelogs
- WebM support in Matroska de/muxer
- low overhead Ogg muxing
- MMS-TCP support
- VP8 de/encoding via libvpx
- Demuxer for On2's IVF format
- Pictor/PC Paint decoder
- HE-AAC v2 decoder
- HE-AAC v2 encoding with libaacplus
- libfaad2 wrapper removed
- DTS-ES extension (XCh) decoding support
- native VP8 decoder
- RTSP tunneling over HTTP
- RTP depacketization of SVQ3
- -strict inofficial replaced by -strict unofficial
- ffplay -exitonkeydown and -exitonmousedown options added
- native GSM / GSM MS decoder
- RTP depacketization of QDM2
- ANSI/ASCII art playback system
- Lego Mindstorms RSO de/muxer
- libavcore added (and subsequently removed)
- SubRip subtitle file muxer and demuxer
- Chinese AVS encoding via libxavs
- ffprobe -show_packets option added
- RTP packetization of Theora and Vorbis
- RTP depacketization of MP4A-LATM
- RTP packetization and depacketization of VP8
- hflip filter
- Apple HTTP Live Streaming demuxer
- a64 codec
- MMS-HTTP support
- G.722 ADPCM audio encoder/decoder
- R10k video decoder
- ocv_smooth filter
- frei0r wrapper filter
- change crop filter syntax to width:height:x:y
- make the crop filter accept parametric expressions
- make ffprobe accept AVFormatContext options
- yadif filter
- blackframe filter
- Demuxer for Leitch/Harris' VR native stream format (LXF)
- RTP depacketization of the X-QT QuickTime format
- SAP (Session Announcement Protocol, RFC 2974) muxer and demuxer
- cropdetect filter
- ffmpeg -crop* options removed
- transpose filter added
- ffmpeg -force_key_frames option added
- demuxer for receiving raw rtp:// URLs without an SDP description
- single stream LATM/LOAS decoder
- setpts filter added
- Win64 support for optimized x86 assembly functions
- MJPEG/AVI1 to JPEG/JFIF bitstream filter
- ASS subtitle encoder and decoder
- IEC 61937 encapsulation for E-AC-3, TrueHD, DTS-HD (for HDMI passthrough)
- overlay filter added
- rename aspect filter to setdar, and pixelaspect to setsar
- IEC 61937 demuxer
- Mobotix .mxg demuxer
- frei0r source added
- hqdn3d filter added
- RTP depacketization of QCELP
- FLAC parser added
- gradfun filter added
- AMR-WB decoder
- replace the ocv_smooth filter with a more generic ocv filter
- Windows Televison (WTV) demuxer
- FFmpeg metadata format muxer and demuxer
- SubRip (srt) subtitle encoder and decoder
- floating-point AC-3 encoder added
- Lagarith decoder
- ffmpeg -copytb option added
- IVF muxer added
- Wing Commander IV movies decoder added
- movie source added
- Bink version 'b' audio and video decoder
- Bitmap Brothers JV playback system
- Apple HTTP Live Streaming protocol handler
- sndio support for playback and record
- Linux framebuffer input device added
- Chronomaster DFA decoder
- DPX image encoder
- MicroDVD subtitle file muxer and demuxer
- Playstation Portable PMP format demuxer
- fieldorder video filter added
- AAC encoding via libvo-aacenc
- AMR-WB encoding via libvo-amrwbenc
- xWMA demuxer
- Mobotix MxPEG decoder
- VP8 frame-multithreading
- NEON optimizations for VP8
- Lots of deprecated API cruft removed
- fft and imdct optimizations for AVX (Sandy Bridge) processors
- showinfo filter added
- SMPTE 302M AES3 audio decoder
- Apple Core Audio Format muxer
- 9bit and 10bit per sample support in the H.264 decoder
- 9bit and 10bit FFV1 encoding / decoding
- split filter added
- select filter added
- sdl output device added
- libmpcodecs video filter support (3 times as many filters than before)
- mpeg2 aspect ratio dection fixed
- libxvid aspect pickiness fixed
- Frame multithreaded decoding
- E-AC-3 audio encoder
- ac3enc: add channel coupling support
- floating-point sample format support to the ac3, eac3, dca, aac, and vorbis decoders.
- H264/MPEG frame-level multi-threading
- All av_metadata_* functions renamed to av_dict_* and moved to libavutil
- 4:4:4 H.264 decoding support
- 10-bit H.264 optimizations for x86
- lut, lutrgb, and lutyuv filters added
- buffersink libavfilter sink added
- Bump libswscale for recently reported ABI break
- New J2K encoder (via OpenJPEG)
version 0.7:
- all the changes for 0.8, but keeping API/ABI compatibility with the 0.6 release
version 0.6:
- PB-frame decoding for H.263
- deprecated vhook subsystem removed
- deprecated old scaler removed
- VQF demuxer
- Alpha channel scaler
- PCX encoder
- RTP packetization of H.263
- RTP packetization of AMR
- RTP depacketization of Vorbis
- CorePNG decoding support
- Cook multichannel decoding support
- introduced avlanguage helpers in libavformat
- 8088flex TMV demuxer and decoder
- per-stream language-tags extraction in asfdec
- V210 decoder and encoder
- remaining GPL parts in AC-3 decoder converted to LGPL
- QCP demuxer
- SoX native format muxer and demuxer
- AMR-NB decoding/encoding, AMR-WB decoding via OpenCORE libraries
- DPX image decoder
- Electronic Arts Madcow decoder
- DivX (XSUB) subtitle encoder
- nonfree libamr support for AMR-NB/WB decoding/encoding removed
- experimental AAC encoder
- RTP depacketization of ASF and RTSP from WMS servers
- RTMP support in libavformat
- noX handling for OPT_BOOL X options
- Wave64 demuxer
- IEC-61937 compatible Muxer
- TwinVQ decoder
- Bluray (PGS) subtitle decoder
- LPCM support in MPEG-TS (HDMV RID as found on Blu-ray disks)
- WMA Pro decoder
- Core Audio Format demuxer
- Atrac1 decoder
- MD STUDIO audio demuxer
- RF64 support in WAV demuxer
- MPEG-4 Audio Lossless Coding (ALS) decoder
- -formats option split into -formats, -codecs, -bsfs, and -protocols
- IV8 demuxer
- CDG demuxer and decoder
- R210 decoder
- Auravision Aura 1 and 2 decoders
- Deluxe Paint Animation playback system
- SIPR decoder
- Adobe Filmstrip muxer and demuxer
- RTP depacketization of H.263
- Bink demuxer and audio/video decoders
- enable symbol versioning by default for linkers that support it
- IFF PBM/ILBM bitmap decoder
- concat protocol
- Indeo 5 decoder
- RTP depacketization of AMR
- WMA Voice decoder
- ffprobe tool
- AMR-NB decoder
- RTSP muxer
- HE-AAC v1 decoder
- Kega Game Video (KGV1) decoder
- VorbisComment writing for FLAC, Ogg FLAC and Ogg Speex files
- RTP depacketization of Theora
- HTTP Digest authentication
- RTMP/RTMPT/RTMPS/RTMPE/RTMPTE protocol support via librtmp
- Psygnosis YOP demuxer and video decoder
- spectral extension support in the E-AC-3 decoder
- unsharp video filter
- RTP hinting in the mov/3gp/mp4 muxer
- Dirac in Ogg demuxing
- seek to keyframes in Ogg
- 4:2:2 and 4:4:4 Theora decoding
- 35% faster VP3/Theora decoding
- faster AAC decoding
- faster H.264 decoding
- RealAudio 1.0 (14.4K) encoder
version 0.5:
- DV50 AKA DVCPRO50 encoder, decoder, muxer and demuxer
- TechSmith Camtasia (TSCC) video decoder
- IBM Ultimotion (ULTI) video decoder
- Sierra Online audio file demuxer and decoder
- Apple QuickDraw (qdrw) video decoder
- Creative ADPCM audio decoder (16 bits as well as 8 bits schemes)
- Electronic Arts Multimedia (WVE/UV2/etc.) file demuxer
- Miro VideoXL (VIXL) video decoder
- H.261 video encoder
- QPEG video decoder
- Nullsoft Video (NSV) file demuxer
- Shorten audio decoder
- LOCO video decoder
- Apple Lossless Audio Codec (ALAC) decoder
- Winnov WNV1 video decoder
- Autodesk Animator Studio Codec (AASC) decoder
- Indeo 2 video decoder
- Fraps FPS1 video decoder
- Snow video encoder/decoder
- Sonic audio encoder/decoder
- Vorbis audio decoder
- Macromedia ADPCM decoder
- Duck TrueMotion 2 video decoder
- support for decoding FLX and DTA extensions in FLIC files
- H.264 custom quantization matrices support
- ffserver fixed, it should now be usable again
- QDM2 audio decoder
- Real Cooker audio decoder
- TrueSpeech audio decoder
- WMA2 audio decoder fixed, now all files should play correctly
- RealAudio 14.4 and 28.8 decoders fixed
- JPEG-LS decoder
- build system improvements
- tabs and trailing whitespace removed from the codebase
- CamStudio video decoder
- AIFF/AIFF-C audio format, encoding and decoding
- ADTS AAC file reading and writing
- Creative VOC file reading and writing
- American Laser Games multimedia (*.mm) playback system
- Zip Motion Blocks Video decoder
- improved Theora/VP3 decoder
- True Audio (TTA) decoder
- AVS demuxer and video decoder
- JPEG-LS encoder
- Smacker demuxer and decoder
- NuppelVideo/MythTV demuxer and RTjpeg decoder
- KMVC decoder
- MPEG-2 intra VLC support
- MPEG-2 4:2:2 encoder
- Flash Screen Video decoder
- GXF demuxer
- Chinese AVS decoder
- GXF muxer
- MXF demuxer
- VC-1/WMV3/WMV9 video decoder
- MacIntel support
- AVISynth support
- VMware video decoder
- VP5 video decoder
- VP6 video decoder
- WavPack lossless audio decoder
- Targa (.TGA) picture decoder
- Vorbis audio encoder
- Delphine Software .cin demuxer/audio and video decoder
- Tiertex .seq demuxer/video decoder
- MTV demuxer
- TIFF picture encoder and decoder
- GIF picture decoder
- Intel Music Coder decoder
- Zip Motion Blocks Video encoder
- Musepack decoder
- Flash Screen Video encoder
- Theora encoding via libtheora
- BMP encoder
- WMA encoder
- GSM-MS encoder and decoder
- DCA decoder
- DXA demuxer and decoder
- DNxHD decoder
- Gamecube movie (.THP) playback system
- Blackfin optimizations
- Interplay C93 demuxer and video decoder
- Bethsoft VID demuxer and video decoder
- CRYO APC demuxer
- Atrac3 decoder
- V.Flash PTX decoder
- RoQ muxer, RoQ audio encoder
- Renderware TXD demuxer and decoder
- extern C declarations for C++ removed from headers
- sws_flags command line option
- codebook generator
- RoQ video encoder
- QTRLE encoder
- OS/2 support removed and restored again
- AC-3 decoder
- NUT muxer
- additional SPARC (VIS) optimizations
- Matroska muxer
- slice-based parallel H.264 decoding
- Monkey's Audio demuxer and decoder
- AMV audio and video decoder
- DNxHD encoder
- H.264 PAFF decoding
- Nellymoser ASAO decoder
- Beam Software SIFF demuxer and decoder
- libvorbis Vorbis decoding removed in favor of native decoder
- IntraX8 (J-Frame) subdecoder for WMV2 and VC-1
- Ogg (Theora, Vorbis and FLAC) muxer
- The "device" muxers and demuxers are now in a new libavdevice library
- PC Paintbrush PCX decoder
- Sun Rasterfile decoder
- TechnoTrend PVA demuxer
- Linux Media Labs MPEG-4 (LMLM4) demuxer
- AVM2 (Flash 9) SWF muxer
- QT variant of IMA ADPCM encoder
- VFW grabber
- iPod/iPhone compatible mp4 muxer
- Mimic decoder
- MSN TCP Webcam stream demuxer
- RL2 demuxer / decoder
- IFF demuxer
- 8SVX audio decoder
- non-recursive Makefiles
- BFI demuxer
- MAXIS EA XA (.xa) demuxer / decoder
- BFI video decoder
- OMA demuxer
- MLP/TrueHD decoder
- Electronic Arts CMV decoder
- Motion Pixels Video decoder
- Motion Pixels MVI demuxer
- removed animated GIF decoder/demuxer
- D-Cinema audio muxer
- Electronic Arts TGV decoder
- Apple Lossless Audio Codec (ALAC) encoder
- AAC decoder
- floating point PCM encoder/decoder
- MXF muxer
- DV100 AKA DVCPRO HD decoder and demuxer
- E-AC-3 support added to AC-3 decoder
- Nellymoser ASAO encoder
- ASS and SSA demuxer and muxer
- liba52 wrapper removed
- SVQ3 watermark decoding support
- Speex decoding via libspeex
- Electronic Arts TGQ decoder
- RV40 decoder
- QCELP / PureVoice decoder
- RV30 decoder
- hybrid WavPack support
- R3D REDCODE demuxer
- ALSA support for playback and record
- Electronic Arts TQI decoder
- OpenJPEG based JPEG 2000 decoder
- NC (NC4600) camera file demuxer
- Gopher client support
- MXF D-10 muxer
- generic metadata API
- flash ScreenVideo2 encoder
version 0.4.9-pre1:
- DV encoder, DV muxer
- Microsoft RLE video decoder
- Microsoft Video-1 decoder
- Apple Animation (RLE) decoder
- Apple Graphics (SMC) decoder
- Apple Video (RPZA) decoder
- Cinepak decoder
- Sega FILM (CPK) file demuxer
- Westwood multimedia support (VQA & AUD files)
- Id Quake II CIN playback support
- 8BPS video decoder
- FLIC playback support
- RealVideo 2.0 (RV20) decoder
- Duck TrueMotion v1 (DUCK) video decoder
- Sierra VMD demuxer and video decoder
- MSZH and ZLIB decoder support
- SVQ1 video encoder
- AMR-WB support
- PPC optimizations
- rate distortion optimal cbp support
- rate distorted optimal ac prediction for MPEG-4
- rate distorted optimal lambda->qp support
- AAC encoding with libfaac
- Sunplus JPEG codec (SP5X) support
- use Lagrange multipler instead of QP for ratecontrol
- Theora/VP3 decoding support
- XA and ADX ADPCM codecs
- export MPEG-2 active display area / pan scan
- Add support for configuring with IBM XLC
- floating point AAN DCT
- initial support for zygo video (not complete)
- RGB ffv1 support
- new audio/video parser API
- av_log() system
- av_read_frame() and av_seek_frame() support
- missing last frame fixes
- seek by mouse in ffplay
- noise reduction of DCT coefficients
- H.263 OBMC & 4MV support
- H.263 alternative inter vlc support
- H.263 loop filter
- H.263 slice structured mode
- interlaced DCT support for MPEG-2 encoding
- stuffing to stay above min_bitrate
- MB type & QP visualization
- frame stepping for ffplay
- interlaced motion estimation
- alternate scantable support
- SVCD scan offset support
- closed GOP support
- SSE2 FDCT
- quantizer noise shaping
- G.726 ADPCM audio codec
- MS ADPCM encoding
- multithreaded/SMP motion estimation
- multithreaded/SMP encoding for MPEG-1/MPEG-2/MPEG-4/H.263
- multithreaded/SMP decoding for MPEG-2
- FLAC decoder
- Metrowerks CodeWarrior suppport
- H.263+ custom pcf support
- nicer output for 'ffmpeg -formats'
- Matroska demuxer
- SGI image format, encoding and decoding
- H.264 loop filter support
- H.264 CABAC support
- nicer looking arrows for the motion vector visualization
- improved VCD support
- audio timestamp drift compensation
- MPEG-2 YUV 422/444 support
- polyphase kaiser windowed sinc and blackman nuttall windowed sinc audio resample
- better image scaling
- H.261 support
- correctly interleave packets during encoding
- VIS optimized motion compensation
- intra_dc_precision>0 encoding support
- support reuse of motion vectors/MB types/field select values of the source video
- more accurate deblock filter
- padding support
- many optimizations and bugfixes
- FunCom ISS audio file demuxer and according ADPCM decoding
version 0.4.8:
- MPEG-2 video encoding (Michael)
- Id RoQ playback subsystem (Mike Melanson and Tim Ferguson)
- Wing Commander III Movie (.mve) file playback subsystem (Mike Melanson
and Mario Brito)
- Xan DPCM audio decoder (Mario Brito)
- Interplay MVE playback subsystem (Mike Melanson)
- Duck DK3 and DK4 ADPCM audio decoders (Mike Melanson)
version 0.4.7:
- RealAudio 1.0 (14_4) and 2.0 (28_8) native decoders. Author unknown, code from mplayerhq
(originally from public domain player for Amiga at http://www.honeypot.net/audio)
- current version now also compiles with older GCC (Fabrice)
- 4X multimedia playback system including 4xm file demuxer (Mike
Melanson), and 4X video and audio codecs (Michael)
- Creative YUV (CYUV) decoder (Mike Melanson)
- FFV1 codec (our very simple lossless intra only codec, compresses much better
than HuffYUV) (Michael)
- ASV1 (Asus), H.264, Intel indeo3 codecs have been added (various)
- tiny PNG encoder and decoder, tiny GIF decoder, PAM decoder (PPM with
alpha support), JPEG YUV colorspace support. (Fabrice Bellard)
- ffplay has been replaced with a newer version which uses SDL (optionally)
for multiplatform support (Fabrice)
- Sorenson Version 3 codec (SVQ3) support has been added (decoding only) - donated
by anonymous
- AMR format has been added (Johannes Carlsson)
- 3GP support has been added (Johannes Carlsson)
- VP3 codec has been added (Mike Melanson)
- more MPEG-1/2 fixes
- better multiplatform support, MS Visual Studio fixes (various)
- AltiVec optimizations (Magnus Damn and others)
- SH4 processor support has been added (BERO)
- new public interfaces (avcodec_get_pix_fmt) (Roman Shaposhnick)
- VOB streaming support (Brian Foley)
- better MP3 autodetection (Andriy Rysin)
- qpel encoding (Michael)
- 4mv+b frames encoding finally fixed (Michael)
- chroma ME (Michael)
- 5 comparison functions for ME (Michael)
- B-frame encoding speedup (Michael)
- WMV2 codec (unfinished - Michael)
- user specified diamond size for EPZS (Michael)
- Playstation STR playback subsystem, still experimental (Mike and Michael)
- ASV2 codec (Michael)
- CLJR decoder (Alex)
.. And lots more new enhancements and fixes.
version 0.4.6:
- completely new integer only MPEG audio layer 1/2/3 decoder rewritten
from scratch
- Recoded DCT and motion vector search with gcc (no longer depends on nasm)
- fix quantization bug in AC3 encoder
- added PCM codecs and format. Corrected WAV/AVI/ASF PCM issues
- added prototype ffplay program
- added GOB header parsing on H.263/H.263+ decoder (Juanjo)
- bug fix on MCBPC tables of H.263 (Juanjo)
- bug fix on DC coefficients of H.263 (Juanjo)
- added Advanced Prediction Mode on H.263/H.263+ decoder (Juanjo)
- now we can decode H.263 streams found in QuickTime files (Juanjo)
- now we can decode H.263 streams found in VIVO v1 files(Juanjo)
- preliminary RTP "friendly" mode for H.263/H.263+ coding. (Juanjo)
- added GOB header for H.263/H.263+ coding on RTP mode (Juanjo)
- now H.263 picture size is returned on the first decoded frame (Juanjo)
- added first regression tests
- added MPEG-2 TS demuxer
- new demux API for libav
- more accurate and faster IDCT (Michael)
- faster and entropy-controlled motion search (Michael)
- two pass video encoding (Michael)
- new video rate control (Michael)
- added MSMPEG4V1, MSMPEGV2 and WMV1 support (Michael)
- great performance improvement of video encoders and decoders (Michael)
- new and faster bit readers and vlc parsers (Michael)
- high quality encoding mode: tries all macroblock/VLC types (Michael)
- added DV video decoder
- preliminary RTP/RTSP support in ffserver and libavformat
- H.263+ AIC decoding/encoding support (Juanjo)
- VCD MPEG-PS mode (Juanjo)
- PSNR stuff (Juanjo)
- simple stats output (Juanjo)
- 16-bit and 15-bit RGB/BGR/GBR support (Bisqwit)
version 0.4.5:
- some header fixes (Zdenek Kabelac <kabi at informatics.muni.cz>)
- many MMX optimizations (Nick Kurshev <nickols_k at mail.ru>)
- added configure system (actually a small shell script)
- added MPEG audio layer 1/2/3 decoding using LGPL'ed mpglib by
Michael Hipp (temporary solution - waiting for integer only
decoder)
- fixed VIDIOCSYNC interrupt
- added Intel H.263 decoding support ('I263' AVI fourCC)
- added Real Video 1.0 decoding (needs further testing)
- simplified image formats again. Added PGM format (=grey
pgm). Renamed old PGM to PGMYUV.
- fixed msmpeg4 slice issues (tell me if you still find problems)
- fixed OpenDivX bugs with newer versions (added VOL header decoding)
- added support for MPlayer interface
- added macroblock skip optimization
- added MJPEG decoder
- added mmx/mmxext IDCT from libmpeg2
- added pgmyuvpipe, ppm, and ppm_pipe formats (original patch by Celer
<celer at shell.scrypt.net>)
- added pixel format conversion layer (e.g. for MJPEG or PPM)
- added deinterlacing option
- MPEG-1/2 fixes
- MPEG-4 vol header fixes (Jonathan Marsden <snmjbm at pacbell.net>)
- ARM optimizations (Lionel Ulmer <lionel.ulmer at free.fr>).
- Windows porting of file converter
- added MJPEG raw format (input/output)
- added JPEG image format support (input/output)
version 0.4.4:
- fixed some std header definitions (Bjorn Lindgren
<bjorn.e.lindgren at telia.com>).
- added MPEG demuxer (MPEG-1 and 2 compatible).
- added ASF demuxer
- added prototype RM demuxer
- added AC3 decoding (done with libac3 by Aaron Holtzman)
- added decoding codec parameter guessing (.e.g. for MPEG, because the
header does not include them)
- fixed header generation in MPEG-1, AVI and ASF muxer: wmplayer can now
play them (only tested video)
- fixed H.263 white bug
- fixed phase rounding in img resample filter
- add MMX code for polyphase img resample filter
- added CPU autodetection
- added generic title/author/copyright/comment string handling (ASF and RM
use them)
- added SWF demux to extract MP3 track (not usable yet because no MP3
decoder)
- added fractional frame rate support
- codecs are no longer searched by read_header() (should fix ffserver
segfault)
version 0.4.3:
- BGR24 patch (initial patch by Jeroen Vreeken <pe1rxq at amsat.org>)
- fixed raw yuv output
- added motion rounding support in MPEG-4
- fixed motion bug rounding in MSMPEG4
- added B-frame handling in video core
- added full MPEG-1 decoding support
- added partial (frame only) MPEG-2 support
- changed the FOURCC code for H.263 to "U263" to be able to see the
+AVI/H.263 file with the UB Video H.263+ decoder. MPlayer works with
this +codec ;) (JuanJo).
- Halfpel motion estimation after MB type selection (JuanJo)
- added pgm and .Y.U.V output format
- suppressed 'img:' protocol. Simply use: /tmp/test%d.[pgm|Y] as input or
output.
- added pgmpipe I/O format (original patch from Martin Aumueller
<lists at reserv.at>, but changed completely since we use a format
instead of a protocol)
version 0.4.2:
- added H.263/MPEG-4/MSMPEG4 decoding support. MPEG-4 decoding support
(for OpenDivX) is almost complete: 8x8 MVs and rounding are
missing. MSMPEG4 support is complete.
- added prototype MPEG-1 decoder. Only I- and P-frames handled yet (it
can decode ffmpeg MPEGs :-)).
- added libavcodec API documentation (see apiexample.c).
- fixed image polyphase bug (the bottom of some images could be
greenish)
- added support for non clipped motion vectors (decoding only)
and image sizes non-multiple of 16
- added support for AC prediction (decoding only)
- added file overwrite confirmation (can be disabled with -y)
- added custom size picture to H.263 using H.263+ (Juanjo)
version 0.4.1:
- added MSMPEG4 (aka DivX) compatible encoder. Changed default codec
of AVI and ASF to DIV3.
- added -me option to set motion estimation method
(default=log). suppressed redundant -hq option.
- added options -acodec and -vcodec to force a given codec (useful for
AVI for example)
- fixed -an option
- improved dct_quantize speed
- factorized some motion estimation code
version 0.4.0:
- removing grab code from ffserver and moved it to ffmpeg. Added
multistream support to ffmpeg.
- added timeshifting support for live feeds (option ?date=xxx in the
URL)
- added high quality image resize code with polyphase filter (need
mmx/see optimization). Enable multiple image size support in ffserver.
- added multi live feed support in ffserver
- suppressed master feature from ffserver (it should be done with an
external program which opens the .ffm url and writes it to another
ffserver)
- added preliminary support for video stream parsing (WAV and AVI half
done). Added proper support for audio/video file conversion in
ffmpeg.
- added preliminary support for video file sending from ffserver
- redesigning I/O subsystem: now using URL based input and output
(see avio.h)
- added WAV format support
- added "tty user interface" to ffmpeg to stop grabbing gracefully
- added MMX/SSE optimizations to SAD (Sums of Absolutes Differences)
(Juan J. Sierralta P. a.k.a. "Juanjo" <juanjo at atmlab.utfsm.cl>)
- added MMX DCT from mpeg2_movie 1.5 (Juanjo)
- added new motion estimation algorithms, log and phods (Juanjo)
- changed directories: libav for format handling, libavcodec for
codecs
version 0.3.4:
- added stereo in MPEG audio encoder
version 0.3.3:
- added 'high quality' mode which use motion vectors. It can be used in
real time at low resolution.
- fixed rounding problems which caused quality problems at high
bitrates and large GOP size
version 0.3.2: small fixes
- ASF fixes
- put_seek bug fix
version 0.3.1: added avi/divx support
- added AVI support
- added MPEG-4 codec compatible with OpenDivX. It is based on the H.263 codec
- added sound for flash format (not tested)
version 0.3: initial public release

View File

@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER = 1.1.15
PROJECT_NUMBER = 1.0
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55
@@ -44,7 +44,7 @@ PROJECT_LOGO =
# If a relative path is entered, it will be relative to the location
# where doxygen was started. If left blank the current directory will be used.
OUTPUT_DIRECTORY = doc/doxy
OUTPUT_DIRECTORY = doxy
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
# 4096 sub-directories (in 2 levels) under the output directory of each output
@@ -288,7 +288,7 @@ TYPEDEF_HIDES_STRUCT = NO
# causing a significant performance penality.
# If the system has enough physical memory increasing the cache will improve the
# performance by keeping more symbols in memory. Note that the value works on
# a logarithmic scale so increasing the size by one will roughly double the
# a logarithmic scale so increasing the size by one will rougly double the
# memory usage. The cache size is given by this formula:
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols
@@ -489,6 +489,12 @@ MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
# If the sources in your project are distributed over multiple directories
# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
# in the documentation. The default is NO.
SHOW_DIRECTORIES = NO
# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
# This will remove the Files entry from the Quick Index and from the
# Folder Tree View (if specified). The default is YES.
@@ -639,14 +645,15 @@ EXCLUDE_SYMBOLS =
# directories that contain example code fragments that are included (see
# the \include command).
EXAMPLE_PATH = doc/examples/
EXAMPLE_PATH = libavcodec/ \
libavformat/
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
# and *.h) to filter out the source-files in the directories. If left
# blank all files are included.
EXAMPLE_PATTERNS = *.c
EXAMPLE_PATTERNS = *-example.c
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude
@@ -793,13 +800,13 @@ HTML_FILE_EXTENSION = .html
# each generated HTML page. If it is left blank doxygen will generate a
# standard header.
#HTML_HEADER = doc/doxy/header.html
HTML_HEADER = doc/doxy/header.html
# The HTML_FOOTER tag can be used to specify a personal HTML footer for
# each generated HTML page. If it is left blank doxygen will generate a
# standard footer.
#HTML_FOOTER = doc/doxy/footer.html
HTML_FOOTER = doc/doxy/footer.html
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
# style sheet that is used by each HTML page. It can be used to
@@ -808,7 +815,7 @@ HTML_FILE_EXTENSION = .html
# the style sheet file to the HTML output directory, so don't put your own
# stylesheet in the HTML output directory as well, or it will be erased!
#HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
# Doxygen will adjust the colors in the stylesheet and background images
@@ -818,7 +825,7 @@ HTML_FILE_EXTENSION = .html
# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
# The allowed range is 0 to 359.
#HTML_COLORSTYLE_HUE = 120
HTML_COLORSTYLE_HUE = 120
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
# the colors in the HTML output. For a value of 0 the output will use
@@ -841,6 +848,12 @@ HTML_COLORSTYLE_GAMMA = 80
HTML_TIMESTAMP = YES
# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
HTML_ALIGN_MEMBERS = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
# page has loaded. For this to work a browser that supports
@@ -1021,6 +1034,11 @@ ENUM_VALUES_PER_LINE = 4
GENERATE_TREEVIEW = NO
# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
# and Class Hierarchy pages using a tree view instead of an ordered list.
USE_INLINE_TREES = NO
# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
# used to set the initial width (in pixels) of the frame in which the tree
# is shown.
@@ -1356,9 +1374,14 @@ INCLUDE_FILE_PATTERNS =
# instead of the = operator.
PREDEFINED = "__attribute__(x)=" \
"RENAME(x)=x ## _TMPL" \
"DEF(x)=x ## _TMPL" \
HAVE_AV_CONFIG_H \
HAVE_MMX \
HAVE_MMXEXT \
HAVE_AMD3DNOW \
"DECLARE_ALIGNED(a,t,n)=t n" \
"offsetof(x,y)=0x42" \
av_alloc_size \
"offsetof(x,y)=0x42"
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.

View File

@@ -23,21 +23,13 @@ Specifically, the GPL parts of FFmpeg are
libswresample/swresample-test.c
- the texi2pod.pl tool
- the following filters in libavfilter:
- f_ebur128.c
- vf_blackframe.c
- vf_boxblur.c
- vf_colormatrix.c
- vf_cropdetect.c
- vf_decimate.c
- vf_delogo.c
- vf_geq.c
- vf_histeq.c
- vf_hqdn3d.c
- vf_hue.c
- vf_kerndeint.c
- vf_mp.c
- vf_pp.c
- vf_smartblur.c
- vf_super2xsai.c
- vf_tinterlace.c
- vf_yadif.c

View File

@@ -130,7 +130,6 @@ Codecs:
bmp.c Mans Rullgard, Kostya Shishkov
cavs* Stefan Gehrer
celp_filters.* Vitor Sessak
cdxl.c Paul B Mahol
cinepak.c Roberto Togni
cljr Alex Beregszaszi
cllc.c Derek Buitenhuis
@@ -183,7 +182,7 @@ Codecs:
lzo.h, lzo.c Reimar Doeffinger
mdec.c Michael Niedermayer
mimic.c Ramiro Polla
mjpeg*.c Michael Niedermayer
mjpeg.c Michael Niedermayer
mlp* Ramiro Polla
mmvideo.c Peter Ross
mpc* Kostya Shishkov
@@ -194,7 +193,6 @@ Codecs:
msvideo1.c Mike Melanson
nellymoserdec.c Benjamin Larsson
nuv.c Reimar Doeffinger
paf.* Paul B Mahol
pcx.c Ivo van Poorten
pgssubdec.c Reimar Doeffinger
ptx.c Ivo van Poorten
@@ -219,7 +217,6 @@ Codecs:
srt* Aurelien Jacobs
sunrast.c Ivo van Poorten
svq3.c Michael Niedermayer
tak* Paul B Mahol
targa.c Kostya Shishkov
tiff.c Kostya Shishkov
truemotion1* Mike Melanson
@@ -235,7 +232,6 @@ Codecs:
vc1* Kostya Shishkov
vcr1.c Michael Niedermayer
vda_h264_dec.c Xidorn Quan
vima.c Paul B Mahol
vmnc.c Kostya Shishkov
vorbis_enc.c Oded Shimon
vorbis_dec.c Denes Balatoni, David Conrad
@@ -250,10 +246,8 @@ Codecs:
wmv2.c Michael Niedermayer
wnv1.c Kostya Shishkov
xan.c Mike Melanson
xbm* Paul B Mahol
xl.c Kostya Shishkov
xvmc.c Ivan Kalvachev
xwd* Paul B Mahol
zerocodec.c Derek Buitenhuis
zmbv* Kostya Shishkov
@@ -276,7 +270,7 @@ libavdevice
libdc1394.c Roman Shaposhnik
v4l2.c Luca Abeni
vfwcap.c Ramiro Polla
dshow.c Roger Pack
libavfilter
===========
@@ -307,25 +301,17 @@ Generic parts:
Muxers/Demuxers:
4xm.c Mike Melanson
adtsenc.c Robert Swain
afc.c Paul B Mahol
aiff.c Baptiste Coudurier
ape.c Kostya Shishkov
ass* Aurelien Jacobs
astdec.c Paul B Mahol
astenc.c James Almer
avi* Michael Niedermayer
avr.c Paul B Mahol
bink.c Peter Ross
brstm.c Paul B Mahol
caf* Peter Ross
cdxl.c Paul B Mahol
crc.c Michael Niedermayer
daud.c Reimar Doeffinger
dtshddec.c Paul B Mahol
dv.c Roman Shaposhnik
dxa.c Kostya Shishkov
electronicarts.c Peter Ross
epafdec.c Paul B Mahol
ffm* Baptiste Coudurier
flac* Justin Ruggles
flic.c Mike Melanson
@@ -336,22 +322,19 @@ Muxers/Demuxers:
idroqdec.c Mike Melanson
iff.c Jaikrishnan Menon
ipmovie.c Mike Melanson
img2*.c Michael Niedermayer
ircam* Paul B Mahol
img2.c Michael Niedermayer
iss.c Stefan Gehrer
jacosub* Clément Bœsch
jvdec.c Peter Ross
libmodplug.c Clément Bœsch
libnut.c Oded Shimon
lmlm4.c Ivo van Poorten
lvfdec.c Paul B Mahol
lxfdec.c Tomas Härdin
matroska.c Aurelien Jacobs
matroskadec.c Aurelien Jacobs
matroskaenc.c David Conrad
metadata* Aurelien Jacobs
microdvd* Aurelien Jacobs
mgsts.c Paul B Mahol
mm.c Peter Ross
mov.c Michael Niedermayer, Baptiste Coudurier
movenc.c Michael Niedermayer, Baptiste Coudurier
@@ -363,7 +346,6 @@ Muxers/Demuxers:
mtv.c Reynaldo H. Verdejo Pinochet
mxf* Baptiste Coudurier
mxfdec.c Tomas Härdin
nistspheredec.c Paul B Mahol
nsvdec.c Francois Revol
nut.c Michael Niedermayer
nuv.c Reimar Doeffinger
@@ -371,10 +353,8 @@ Muxers/Demuxers:
oggenc.c Baptiste Coudurier
oggparse*.c David Conrad
oma.c Maxim Poliakovski
paf.c Paul B Mahol
psxstr.c Mike Melanson
pva.c Ivo van Poorten
pvfdec.c Paul B Mahol
r3d.c Baptiste Coudurier
raw.c Michael Niedermayer
rdt.c Ronald S. Bultje
@@ -390,10 +370,8 @@ Muxers/Demuxers:
segafilm.c Mike Melanson
siff.c Kostya Shishkov
smacker.c Kostya Shishkov
smjpeg* Paul B Mahol
srtdec.c Aurelien Jacobs
swf.c Baptiste Coudurier
takdec.c Paul B Mahol
tta.c Alex Beregszaszi
txd.c Ivo van Poorten
voc.c Aurelien Jacobs
@@ -402,7 +380,6 @@ Muxers/Demuxers:
westwood.c Mike Melanson
wtv.c Peter Ross
wv.c Kostya Shishkov
wvenc.c Paul B Mahol
Protocols:
bluray.c Petri Hintukainen
@@ -411,20 +388,6 @@ Protocols:
udp.c Luca Abeni
libswresample
=============
Generic parts:
audioconvert.c Michael Niedermayer
dither.c Michael Niedermayer
rematrix*.c Michael Niedermayer
swresample*.c Michael Niedermayer
Resamplers:
resample*.c Michael Niedermayer
soxr_resample.c Rob Sykes
Operating systems / CPU architectures
=====================================
@@ -445,11 +408,10 @@ x86 Michael Niedermayer
Releases
========
1.2 Michael Niedermayer
1.1 Michael Niedermayer
1.0 Michael Niedermayer
0.11 Michael Niedermayer
0.10 Michael Niedermayer
If you want to maintain an older release, please contact us
GnuPG Fingerprints of maintainers and contributors
@@ -479,6 +441,6 @@ Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
Robert Swain EE7A 56EA 4A81 A7B5 2001 A521 67FA 362D A2FC 3E71
Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
Stefano Sabatini 9A43 10F8 D32C D33C 48E7 C52C 5DF2 8E4D B2EE 066B
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
Tomas Härdin A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
Tomas Härdin D133 29CA 4EEC 9DB4 7076 F697 B04B 7403 3313 41FD

View File

@@ -17,8 +17,7 @@ PROGS-$(CONFIG_FFSERVER) += ffserver
PROGS := $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
INSTPROGS = $(PROGS-yes:%=%$(PROGSSUF)$(EXESUF))
OBJS = cmdutils.o $(EXEOBJS)
OBJS = cmdutils.o
OBJS-ffmpeg = ffmpeg_opt.o ffmpeg_filter.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr base64
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
@@ -42,7 +41,7 @@ FFLIBS-$(CONFIG_SWSCALE) += swscale
FFLIBS := avutil
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile $(SRC_PATH)/doc/examples/README
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile
SKIPHEADERS = cmdutils_common_opts.h
@@ -57,8 +56,8 @@ $(PROGS): %$(EXESUF): %_g$(EXESUF)
$(CP) $< $@
$(STRIP) $@
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
$(LD) $(LDFLAGS) $(LD_O) $^ $(ELIBS)
$(TOOLS): %$(EXESUF): %.o
$(LD) $(LDFLAGS) $(LD_O) $< $(ELIBS)
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
@@ -69,9 +68,9 @@ config.h: .config
@-tput sgr0 2>/dev/null
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
ARMV5TE-OBJS ARMV6-OBJS VFP-OBJS NEON-OBJS \
ALTIVEC-OBJS VIS-OBJS \
ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
ARMV5TE-OBJS ARMV6-OBJS ARMVFP-OBJS NEON-OBJS \
MMI-OBJS ALTIVEC-OBJS VIS-OBJS \
MMX-OBJS YASM-OBJS \
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
OBJS HOSTOBJS TESTOBJS
@@ -92,8 +91,8 @@ endef
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
define DOPROG
OBJS-$(1) += $(1).o cmdutils.o $(EXEOBJS)
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
OBJS-$(1) += $(1).o
$(1)$(PROGSSUF)_g$(EXESUF): $(OBJS-$(1))
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(LIBS-$(1))
@@ -102,8 +101,8 @@ endef
$(foreach P,$(PROGS-yes),$(eval $(call DOPROG,$(P))))
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
%$(PROGSSUF)_g$(EXESUF): %.o cmdutils.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) cmdutils.o $(FF_EXTRALIBS)
OBJDIRS += tools
@@ -153,6 +152,7 @@ uninstall-data:
clean::
$(RM) $(ALLPROGS) $(ALLPROGS_G)
$(RM) $(CLEANSUFFIXES)
$(RM) $(TOOLS)
$(RM) $(CLEANSUFFIXES:%=tools/%)
$(RM) coverage.info
$(RM) -r coverage-html

View File

@@ -1 +1 @@
1.1.15
1.0.git

1
VERSION Normal file
View File

@@ -0,0 +1 @@
1.0

View File

@@ -1,8 +1,9 @@
OBJS-$(HAVE_ARMV5TE) += $(ARMV5TE-OBJS) $(ARMV5TE-OBJS-yes)
OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
OBJS-$(HAVE_ARMVFP) += $(ARMVFP-OBJS) $(ARMVFP-OBJS-yes)
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)

View File

@@ -41,7 +41,6 @@
#endif
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/mathematics.h"
#include "libavutil/imgutils.h"
#include "libavutil/parseutils.h"
@@ -55,17 +54,14 @@
#include "libavformat/network.h"
#endif
#if HAVE_SYS_RESOURCE_H
#include <sys/time.h>
#include <sys/resource.h>
#endif
static int init_report(const char *env);
struct SwsContext *sws_opts;
SwrContext *swr_opts;
AVDictionary *format_opts, *codec_opts;
const int this_year = 2014;
const int this_year = 2012;
static FILE *report_file;
@@ -130,7 +126,7 @@ double parse_number_or_die(const char *context, const char *numstr, int type,
else
return d;
av_log(NULL, AV_LOG_FATAL, error, context, numstr, min, max);
exit(1);
exit_program(1);
return 0;
}
@@ -141,7 +137,7 @@ int64_t parse_time_or_die(const char *context, const char *timestr,
if (av_parse_time(&us, timestr, is_duration) < 0) {
av_log(NULL, AV_LOG_FATAL, "Invalid %s specification for %s: %s\n",
is_duration ? "duration" : "date", context, timestr);
exit(1);
exit_program(1);
}
return us;
}
@@ -200,7 +196,7 @@ static const OptionDef *find_option(const OptionDef *po, const char *name)
return po;
}
#if HAVE_COMMANDLINETOARGVW
#if defined(_WIN32) && !defined(__MINGW32CE__)
#include <windows.h>
#include <shellapi.h>
/* Will be leaked on exit */
@@ -260,16 +256,38 @@ static inline void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
{
/* nothing to do */
}
#endif /* HAVE_COMMANDLINETOARGVW */
#endif /* WIN32 && !__MINGW32CE__ */
static int write_option(void *optctx, const OptionDef *po, const char *opt,
const char *arg)
int parse_option(void *optctx, const char *opt, const char *arg,
const OptionDef *options)
{
const OptionDef *po;
int bool_val = 1;
int *dstcount;
void *dst;
po = find_option(options, opt);
if (!po->name && opt[0] == 'n' && opt[1] == 'o') {
/* handle 'no' bool option */
po = find_option(options, opt + 2);
if ((po->name && (po->flags & OPT_BOOL)))
bool_val = 0;
}
if (!po->name)
po = find_option(options, "default");
if (!po->name) {
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
return AVERROR(EINVAL);
}
if (po->flags & HAS_ARG && !arg) {
av_log(NULL, AV_LOG_ERROR, "Missing argument for option '%s'\n", opt);
return AVERROR(EINVAL);
}
/* new-style options contain an offset into optctx, old-style address of
* a global var*/
void *dst = po->flags & (OPT_OFFSET | OPT_SPEC) ?
(uint8_t *)optctx + po->u.off : po->u.dst_ptr;
int *dstcount;
dst = po->flags & (OPT_OFFSET | OPT_SPEC) ? (uint8_t *)optctx + po->u.off
: po->u.dst_ptr;
if (po->flags & OPT_SPEC) {
SpecifierOpt **so = dst;
@@ -286,7 +304,9 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
str = av_strdup(arg);
// av_freep(dst);
*(char **)dst = str;
} else if (po->flags & OPT_BOOL || po->flags & OPT_INT) {
} else if (po->flags & OPT_BOOL) {
*(int *)dst = bool_val;
} else if (po->flags & OPT_INT) {
*(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
} else if (po->flags & OPT_INT64) {
*(int64_t *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX);
@@ -305,41 +325,7 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
}
}
if (po->flags & OPT_EXIT)
exit(0);
return 0;
}
int parse_option(void *optctx, const char *opt, const char *arg,
const OptionDef *options)
{
const OptionDef *po;
int ret;
po = find_option(options, opt);
if (!po->name && opt[0] == 'n' && opt[1] == 'o') {
/* handle 'no' bool option */
po = find_option(options, opt + 2);
if ((po->name && (po->flags & OPT_BOOL)))
arg = "0";
} else if (po->flags & OPT_BOOL)
arg = "1";
if (!po->name)
po = find_option(options, "default");
if (!po->name) {
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
return AVERROR(EINVAL);
}
if (po->flags & HAS_ARG && !arg) {
av_log(NULL, AV_LOG_ERROR, "Missing argument for option '%s'\n", opt);
return AVERROR(EINVAL);
}
ret = write_option(optctx, po, opt, arg);
if (ret < 0)
return ret;
exit_program(0);
return !!(po->flags & HAS_ARG);
}
@@ -365,7 +351,7 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
opt++;
if ((ret = parse_option(optctx, opt, argv[optindex], options)) < 0)
exit(1);
exit_program(1);
optindex += ret;
} else {
if (parse_arg_function)
@@ -374,29 +360,6 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
}
}
int parse_optgroup(void *optctx, OptionGroup *g)
{
int i, ret;
av_log(NULL, AV_LOG_DEBUG, "Parsing a group of options: %s %s.\n",
g->group_def->name, g->arg);
for (i = 0; i < g->nb_opts; i++) {
Option *o = &g->opts[i];
av_log(NULL, AV_LOG_DEBUG, "Applying option %s (%s) with argument %s.\n",
o->key, o->opt->help, o->val);
ret = write_option(optctx, o->opt, o->key, o->val);
if (ret < 0)
return ret;
}
av_log(NULL, AV_LOG_DEBUG, "Successfully parsed a group of options.\n");
return 0;
}
int locate_option(int argc, char **argv, const OptionDef *options,
const char *optname)
{
@@ -417,7 +380,7 @@ int locate_option(int argc, char **argv, const OptionDef *options,
(po->name && !strcmp(optname, po->name)))
return i;
if (po->flags & HAS_ARG)
if (!po || po->flags & HAS_ARG)
i++;
}
return 0;
@@ -450,14 +413,13 @@ static void dump_argument(const char *a)
void parse_loglevel(int argc, char **argv, const OptionDef *options)
{
int idx = locate_option(argc, argv, options, "loglevel");
const char *env;
if (!idx)
idx = locate_option(argc, argv, options, "v");
if (idx && argv[idx + 1])
opt_loglevel(NULL, "loglevel", argv[idx + 1]);
idx = locate_option(argc, argv, options, "report");
if ((env = getenv("FFREPORT")) || idx) {
init_report(env);
if (idx || getenv("FFREPORT")) {
opt_report("report");
if (report_file) {
int i;
fprintf(report_file, "Command line:\n");
@@ -474,14 +436,9 @@ void parse_loglevel(int argc, char **argv, const OptionDef *options)
int opt_default(void *optctx, const char *opt, const char *arg)
{
const AVOption *o;
int consumed = 0;
char opt_stripped[128];
const char *p;
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class();
const AVClass *sc, *swr_class;
if (!strcmp(opt, "debug") || !strcmp(opt, "fdebug"))
av_log_set_level(AV_LOG_DEBUG);
const AVClass *cc = avcodec_get_class(), *fc = avformat_get_class(), *sc, *swr_class;
if (!(p = strchr(opt, ':')))
p = opt + strlen(opt);
@@ -490,268 +447,41 @@ int opt_default(void *optctx, const char *opt, const char *arg)
if ((o = av_opt_find(&cc, opt_stripped, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) ||
((opt[0] == 'v' || opt[0] == 'a' || opt[0] == 's') &&
(o = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ)))) {
(o = av_opt_find(&cc, opt + 1, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ))))
av_dict_set(&codec_opts, opt, arg, FLAGS);
consumed = 1;
}
if ((o = av_opt_find(&fc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
else if ((o = av_opt_find(&fc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
av_dict_set(&format_opts, opt, arg, FLAGS);
if(consumed)
av_log(NULL, AV_LOG_VERBOSE, "Routing %s to codec and muxer layer\n", opt);
consumed = 1;
}
#if CONFIG_SWSCALE
sc = sws_get_class();
if (!consumed && av_opt_find(&sc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
if (!o && (o = av_opt_find(&sc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
// XXX we only support sws_flags, not arbitrary sws options
int ret = av_opt_set(sws_opts, opt, arg, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error setting option %s.\n", opt);
return ret;
}
consumed = 1;
}
#endif
#if CONFIG_SWRESAMPLE
swr_class = swr_get_class();
if (!consumed && av_opt_find(&swr_class, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
if (!o && (o = av_opt_find(&swr_class, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
int ret = av_opt_set(swr_opts, opt, arg, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error setting option %s.\n", opt);
return ret;
}
consumed = 1;
}
#endif
if (consumed)
if (o)
return 0;
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
return AVERROR_OPTION_NOT_FOUND;
}
/*
* Check whether given option is a group separator.
*
* @return index of the group definition that matched or -1 if none
*/
static int match_group_separator(const OptionGroupDef *groups, int nb_groups,
const char *opt)
{
int i;
for (i = 0; i < nb_groups; i++) {
const OptionGroupDef *p = &groups[i];
if (p->sep && !strcmp(p->sep, opt))
return i;
}
return -1;
}
/*
* Finish parsing an option group.
*
* @param group_idx which group definition should this group belong to
* @param arg argument of the group delimiting option
*/
static void finish_group(OptionParseContext *octx, int group_idx,
const char *arg)
{
OptionGroupList *l = &octx->groups[group_idx];
OptionGroup *g;
GROW_ARRAY(l->groups, l->nb_groups);
g = &l->groups[l->nb_groups - 1];
*g = octx->cur_group;
g->arg = arg;
g->group_def = l->group_def;
#if CONFIG_SWSCALE
g->sws_opts = sws_opts;
#endif
g->swr_opts = swr_opts;
g->codec_opts = codec_opts;
g->format_opts = format_opts;
codec_opts = NULL;
format_opts = NULL;
#if CONFIG_SWSCALE
sws_opts = NULL;
#endif
swr_opts = NULL;
init_opts();
memset(&octx->cur_group, 0, sizeof(octx->cur_group));
}
/*
* Add an option instance to currently parsed group.
*/
static void add_opt(OptionParseContext *octx, const OptionDef *opt,
const char *key, const char *val)
{
int global = !(opt->flags & (OPT_PERFILE | OPT_SPEC | OPT_OFFSET));
OptionGroup *g = global ? &octx->global_opts : &octx->cur_group;
GROW_ARRAY(g->opts, g->nb_opts);
g->opts[g->nb_opts - 1].opt = opt;
g->opts[g->nb_opts - 1].key = key;
g->opts[g->nb_opts - 1].val = val;
}
static void init_parse_context(OptionParseContext *octx,
const OptionGroupDef *groups, int nb_groups)
{
static const OptionGroupDef global_group = { "global" };
int i;
memset(octx, 0, sizeof(*octx));
octx->nb_groups = nb_groups;
octx->groups = av_mallocz(sizeof(*octx->groups) * octx->nb_groups);
if (!octx->groups)
exit(1);
for (i = 0; i < octx->nb_groups; i++)
octx->groups[i].group_def = &groups[i];
octx->global_opts.group_def = &global_group;
octx->global_opts.arg = "";
init_opts();
}
void uninit_parse_context(OptionParseContext *octx)
{
int i, j;
for (i = 0; i < octx->nb_groups; i++) {
OptionGroupList *l = &octx->groups[i];
for (j = 0; j < l->nb_groups; j++) {
av_freep(&l->groups[j].opts);
av_dict_free(&l->groups[j].codec_opts);
av_dict_free(&l->groups[j].format_opts);
#if CONFIG_SWSCALE
sws_freeContext(l->groups[j].sws_opts);
#endif
if(CONFIG_SWRESAMPLE)
swr_free(&l->groups[j].swr_opts);
}
av_freep(&l->groups);
}
av_freep(&octx->groups);
av_freep(&octx->cur_group.opts);
av_freep(&octx->global_opts.opts);
uninit_opts();
}
int split_commandline(OptionParseContext *octx, int argc, char *argv[],
const OptionDef *options,
const OptionGroupDef *groups, int nb_groups)
{
int optindex = 1;
/* perform system-dependent conversions for arguments list */
prepare_app_arguments(&argc, &argv);
init_parse_context(octx, groups, nb_groups);
av_log(NULL, AV_LOG_DEBUG, "Splitting the commandline.\n");
while (optindex < argc) {
const char *opt = argv[optindex++], *arg;
const OptionDef *po;
int ret;
av_log(NULL, AV_LOG_DEBUG, "Reading option '%s' ...", opt);
/* unnamed group separators, e.g. output filename */
if (opt[0] != '-' || !opt[1]) {
finish_group(octx, 0, opt);
av_log(NULL, AV_LOG_DEBUG, " matched as %s.\n", groups[0].name);
continue;
}
opt++;
#define GET_ARG(arg) \
do { \
arg = argv[optindex++]; \
if (!arg) { \
av_log(NULL, AV_LOG_ERROR, "Missing argument for option '%s'.\n", opt);\
return AVERROR(EINVAL); \
} \
} while (0)
/* named group separators, e.g. -i */
if ((ret = match_group_separator(groups, nb_groups, opt)) >= 0) {
GET_ARG(arg);
finish_group(octx, ret, arg);
av_log(NULL, AV_LOG_DEBUG, " matched as %s with argument '%s'.\n",
groups[ret].name, arg);
continue;
}
/* normal options */
po = find_option(options, opt);
if (po->name) {
if (po->flags & OPT_EXIT) {
/* optional argument, e.g. -h */
arg = argv[optindex++];
} else if (po->flags & HAS_ARG) {
GET_ARG(arg);
} else {
arg = "1";
}
add_opt(octx, po, opt, arg);
av_log(NULL, AV_LOG_DEBUG, " matched as option '%s' (%s) with "
"argument '%s'.\n", po->name, po->help, arg);
continue;
}
/* AVOptions */
if (argv[optindex]) {
ret = opt_default(NULL, opt, argv[optindex]);
if (ret >= 0) {
av_log(NULL, AV_LOG_DEBUG, " matched as AVOption '%s' with "
"argument '%s'.\n", opt, argv[optindex]);
optindex++;
continue;
} else if (ret != AVERROR_OPTION_NOT_FOUND) {
av_log(NULL, AV_LOG_ERROR, "Error parsing option '%s' "
"with argument '%s'.\n", opt, argv[optindex]);
return ret;
}
}
/* boolean -nofoo options */
if (opt[0] == 'n' && opt[1] == 'o' &&
(po = find_option(options, opt + 2)) &&
po->name && po->flags & OPT_BOOL) {
add_opt(octx, po, opt, "0");
av_log(NULL, AV_LOG_DEBUG, " matched as option '%s' (%s) with "
"argument 0.\n", po->name, po->help);
continue;
}
av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'.\n", opt);
return AVERROR_OPTION_NOT_FOUND;
}
if (octx->cur_group.nb_opts || codec_opts || format_opts)
av_log(NULL, AV_LOG_WARNING, "Trailing options were found on the "
"commandline.\n");
av_log(NULL, AV_LOG_DEBUG, "Finished splitting the commandline.\n");
return 0;
}
int opt_loglevel(void *optctx, const char *opt, const char *arg)
{
const struct { const char *name; int level; } log_levels[] = {
@@ -781,89 +511,30 @@ int opt_loglevel(void *optctx, const char *opt, const char *arg)
"Possible levels are numbers or:\n", arg);
for (i = 0; i < FF_ARRAY_ELEMS(log_levels); i++)
av_log(NULL, AV_LOG_FATAL, "\"%s\"\n", log_levels[i].name);
exit(1);
exit_program(1);
}
av_log_set_level(level);
return 0;
}
static void expand_filename_template(AVBPrint *bp, const char *template,
struct tm *tm)
int opt_report(const char *opt)
{
int c;
while ((c = *(template++))) {
if (c == '%') {
if (!(c = *(template++)))
break;
switch (c) {
case 'p':
av_bprintf(bp, "%s", program_name);
break;
case 't':
av_bprintf(bp, "%04d%02d%02d-%02d%02d%02d",
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
break;
case '%':
av_bprint_chars(bp, c, 1);
break;
}
} else {
av_bprint_chars(bp, c, 1);
}
}
}
static int init_report(const char *env)
{
char *filename_template = NULL;
char *key, *val;
int ret, count = 0;
char filename[64];
time_t now;
struct tm *tm;
AVBPrint filename;
if (report_file) /* already opened */
return 0;
time(&now);
tm = localtime(&now);
while (env && *env) {
if ((ret = av_opt_get_key_value(&env, "=", ":", 0, &key, &val)) < 0) {
if (count)
av_log(NULL, AV_LOG_ERROR,
"Failed to parse FFREPORT environment variable: %s\n",
av_err2str(ret));
break;
}
if (*env)
env++;
count++;
if (!strcmp(key, "file")) {
av_free(filename_template);
filename_template = val;
val = NULL;
} else {
av_log(NULL, AV_LOG_ERROR, "Unknown key '%s' in FFREPORT\n", key);
}
av_free(val);
av_free(key);
}
av_bprint_init(&filename, 0, 1);
expand_filename_template(&filename,
av_x_if_null(filename_template, "%p-%t.log"), tm);
av_free(filename_template);
if (!av_bprint_is_complete(&filename)) {
av_log(NULL, AV_LOG_ERROR, "Out of memory building report file name\n");
return AVERROR(ENOMEM);
}
report_file = fopen(filename.str, "w");
snprintf(filename, sizeof(filename), "%s-%04d%02d%02d-%02d%02d%02d.log",
program_name,
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
report_file = fopen(filename, "w");
if (!report_file) {
av_log(NULL, AV_LOG_ERROR, "Failed to open report \"%s\": %s\n",
filename.str, strerror(errno));
filename, strerror(errno));
return AVERROR(errno);
}
av_log_set_callback(log_callback_report);
@@ -873,17 +544,11 @@ static int init_report(const char *env)
program_name,
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec,
filename.str);
filename);
av_log_set_level(FFMAX(av_log_get_level(), AV_LOG_VERBOSE));
av_bprint_finalize(&filename, NULL);
return 0;
}
int opt_report(const char *opt)
{
return init_report(NULL);
}
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
{
char *tail;
@@ -892,7 +557,7 @@ int opt_max_alloc(void *optctx, const char *opt, const char *arg)
max = strtol(arg, &tail, 10);
if (*tail) {
av_log(NULL, AV_LOG_FATAL, "Invalid max_alloc \"%s\".\n", arg);
exit(1);
exit_program(1);
}
av_max_alloc(max);
return 0;
@@ -910,6 +575,12 @@ int opt_cpuflags(void *optctx, const char *opt, const char *arg)
return 0;
}
int opt_codec_debug(void *optctx, const char *opt, const char *arg)
{
av_log_set_level(AV_LOG_DEBUG);
return opt_default(NULL, opt, arg);
}
int opt_timelimit(void *optctx, const char *opt, const char *arg)
{
#if HAVE_SETRLIMIT
@@ -1020,13 +691,12 @@ int show_version(void *optctx, const char *opt, const char *arg)
int show_license(void *optctx, const char *opt, const char *arg)
{
#if CONFIG_NONFREE
printf(
#if CONFIG_NONFREE
"This version of %s has nonfree parts compiled in.\n"
"Therefore it is not legally redistributable.\n",
program_name );
program_name
#elif CONFIG_GPLV3
printf(
"%s is free software; you can redistribute it and/or modify\n"
"it under the terms of the GNU General Public License as published by\n"
"the Free Software Foundation; either version 3 of the License, or\n"
@@ -1039,9 +709,8 @@ int show_license(void *optctx, const char *opt, const char *arg)
"\n"
"You should have received a copy of the GNU General Public License\n"
"along with %s. If not, see <http://www.gnu.org/licenses/>.\n",
program_name, program_name, program_name );
program_name, program_name, program_name
#elif CONFIG_GPL
printf(
"%s is free software; you can redistribute it and/or modify\n"
"it under the terms of the GNU General Public License as published by\n"
"the Free Software Foundation; either version 2 of the License, or\n"
@@ -1055,9 +724,8 @@ int show_license(void *optctx, const char *opt, const char *arg)
"You should have received a copy of the GNU General Public License\n"
"along with %s; if not, write to the Free Software\n"
"Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n",
program_name, program_name, program_name );
program_name, program_name, program_name
#elif CONFIG_LGPLV3
printf(
"%s is free software; you can redistribute it and/or modify\n"
"it under the terms of the GNU Lesser General Public License as published by\n"
"the Free Software Foundation; either version 3 of the License, or\n"
@@ -1070,9 +738,8 @@ int show_license(void *optctx, const char *opt, const char *arg)
"\n"
"You should have received a copy of the GNU Lesser General Public License\n"
"along with %s. If not, see <http://www.gnu.org/licenses/>.\n",
program_name, program_name, program_name );
program_name, program_name, program_name
#else
printf(
"%s is free software; you can redistribute it and/or\n"
"modify it under the terms of the GNU Lesser General Public\n"
"License as published by the Free Software Foundation; either\n"
@@ -1086,8 +753,9 @@ int show_license(void *optctx, const char *opt, const char *arg)
"You should have received a copy of the GNU Lesser General Public\n"
"License along with %s; if not, write to the Free Software\n"
"Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n",
program_name, program_name, program_name );
program_name, program_name, program_name
#endif
);
return 0;
}
@@ -1142,7 +810,7 @@ int show_formats(void *optctx, const char *opt, const char *arg)
#define PRINT_CODEC_SUPPORTED(codec, field, type, list_name, term, get_name) \
if (codec->field) { \
const type *p = codec->field; \
const type *p = c->field; \
\
printf(" Supported " list_name ":"); \
while (*p != term) { \
@@ -1183,8 +851,8 @@ static void print_codec(const AVCodec *c)
}
printf("\n");
}
PRINT_CODEC_SUPPORTED(c, pix_fmts, enum AVPixelFormat, "pixel formats",
AV_PIX_FMT_NONE, GET_PIX_FMT_NAME);
PRINT_CODEC_SUPPORTED(c, pix_fmts, enum PixelFormat, "pixel formats",
PIX_FMT_NONE, GET_PIX_FMT_NAME);
PRINT_CODEC_SUPPORTED(c, supported_samplerates, int, "sample rates", 0,
GET_SAMPLE_RATE_NAME);
PRINT_CODEC_SUPPORTED(c, sample_fmts, enum AVSampleFormat, "sample formats",
@@ -1240,8 +908,8 @@ static unsigned get_codecs_sorted(const AVCodecDescriptor ***rcodecs)
while ((desc = avcodec_descriptor_next(desc)))
nb_codecs++;
if (!(codecs = av_calloc(nb_codecs, sizeof(*codecs)))) {
av_log(NULL, AV_LOG_ERROR, "Out of memory\n");
exit(1);
av_log(0, AV_LOG_ERROR, "Out of memory\n");
exit_program(1);
}
desc = NULL;
while ((desc = avcodec_descriptor_next(desc)))
@@ -1426,7 +1094,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
{
const AVPixFmtDescriptor *pix_desc = NULL;
enum PixelFormat pix_fmt;
printf("Pixel formats:\n"
"I.... = Supported Input format for conversion\n"
@@ -1442,8 +1110,10 @@ int show_pix_fmts(void *optctx, const char *opt, const char *arg)
# define sws_isSupportedOutput(x) 0
#endif
while ((pix_desc = av_pix_fmt_desc_next(pix_desc))) {
enum AVPixelFormat pix_fmt = av_pix_fmt_desc_get_id(pix_desc);
for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++) {
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[pix_fmt];
if(!pix_desc->name)
continue;
printf("%c%c%c%c%c %-16s %d %2d\n",
sws_isSupportedInput (pix_fmt) ? 'I' : '.',
sws_isSupportedOutput(pix_fmt) ? 'O' : '.',
@@ -1633,11 +1303,6 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
fseek(f, 0, SEEK_END);
*size = ftell(f);
fseek(f, 0, SEEK_SET);
if (*size == (size_t)-1) {
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", strerror(errno));
fclose(f);
return AVERROR(errno);
}
*bufptr = av_malloc(*size + 1);
if (!*bufptr) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
@@ -1655,7 +1320,7 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
ret = AVERROR_EOF;
} else {
ret = 0;
(*bufptr)[(*size)++] = '\0';
(*bufptr)[*size++] = '\0';
}
fclose(f);
@@ -1802,13 +1467,13 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
{
if (new_size >= INT_MAX / elem_size) {
av_log(NULL, AV_LOG_ERROR, "Array too big.\n");
exit(1);
exit_program(1);
}
if (*size < new_size) {
uint8_t *tmp = av_realloc(array, new_size*elem_size);
if (!tmp) {
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
exit(1);
exit_program(1);
}
memset(tmp + *size*elem_size, 0, (new_size-*size) * elem_size);
*size = new_size;
@@ -1819,19 +1484,13 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbuf)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt);
FrameBuffer *buf;
FrameBuffer *buf = av_mallocz(sizeof(*buf));
int i, ret;
int pixel_size;
const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
int h_chroma_shift, v_chroma_shift;
int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
int w = s->width, h = s->height;
if (!desc)
return AVERROR(EINVAL);
pixel_size = desc->comp[0].step_minus1 + 1;
buf = av_mallocz(sizeof(*buf));
if (!buf)
return AVERROR(ENOMEM);
@@ -1851,7 +1510,7 @@ static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbu
/* XXX this shouldn't be needed, but some tests break without this line
* those decoders are buggy and need to be fixed.
* the following tests fail:
* cdgraphics, ansi
* cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
*/
memset(buf->base[0], 128, ret);
@@ -1904,6 +1563,11 @@ int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
frame->opaque = buf;
frame->type = FF_BUFFER_TYPE_USER;
frame->extended_data = frame->data;
frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
frame->width = buf->w;
frame->height = buf->h;
frame->format = buf->pix_fmt;
frame->sample_aspect_ratio = s->sample_aspect_ratio;
for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't

View File

@@ -139,7 +139,7 @@ typedef struct SpecifierOpt {
} u;
} SpecifierOpt;
typedef struct OptionDef {
typedef struct {
const char *name;
int flags;
#define HAS_ARG 0x0001
@@ -190,13 +190,13 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
void show_help_children(const AVClass *class, int flags);
/**
* Per-fftool specific help handler. Implemented in each
* fftool, called by show_help().
* Per-avtool specific help handler. Implemented in each
* avtool, called by show_help().
*/
void show_help_default(const char *opt, const char *arg);
/**
* Generic -h handler common to all fftools.
* Generic -h handler common to all avtools.
*/
int show_help(void *optctx, const char *opt, const char *arg);
@@ -204,8 +204,6 @@ int show_help(void *optctx, const char *opt, const char *arg);
* Parse the command line arguments.
*
* @param optctx an opaque options context
* @param argc number of command line arguments
* @param argv values of command line arguments
* @param options Array with the definitions required to interpret every
* option of the form: -option_name [argument]
* @param parse_arg_function Name of the function called to process every
@@ -223,95 +221,6 @@ void parse_options(void *optctx, int argc, char **argv, const OptionDef *options
int parse_option(void *optctx, const char *opt, const char *arg,
const OptionDef *options);
/**
* An option extracted from the commandline.
* Cannot use AVDictionary because of options like -map which can be
* used multiple times.
*/
typedef struct Option {
const OptionDef *opt;
const char *key;
const char *val;
} Option;
typedef struct OptionGroupDef {
/**< group name */
const char *name;
/**
* Option to be used as group separator. Can be NULL for groups which
* are terminated by a non-option argument (e.g. ffmpeg output files)
*/
const char *sep;
} OptionGroupDef;
typedef struct OptionGroup {
const OptionGroupDef *group_def;
const char *arg;
Option *opts;
int nb_opts;
AVDictionary *codec_opts;
AVDictionary *format_opts;
struct SwsContext *sws_opts;
struct SwrContext *swr_opts;
} OptionGroup;
/**
* A list of option groups that all have the same group type
* (e.g. input files or output files)
*/
typedef struct OptionGroupList {
const OptionGroupDef *group_def;
OptionGroup *groups;
int nb_groups;
} OptionGroupList;
typedef struct OptionParseContext {
OptionGroup global_opts;
OptionGroupList *groups;
int nb_groups;
/* parsing state */
OptionGroup cur_group;
} OptionParseContext;
/**
* Parse an options group and write results into optctx.
*
* @param optctx an app-specific options context. NULL for global options group
*/
int parse_optgroup(void *optctx, OptionGroup *g);
/**
* Split the commandline into an intermediate form convenient for further
* processing.
*
* The commandline is assumed to be composed of options which either belong to a
* group (those with OPT_SPEC, OPT_OFFSET or OPT_PERFILE) or are global
* (everything else).
*
* A group (defined by an OptionGroupDef struct) is a sequence of options
* terminated by either a group separator option (e.g. -i) or a parameter that
* is not an option (doesn't start with -). A group without a separator option
* must always be first in the supplied groups list.
*
* All options within the same group are stored in one OptionGroup struct in an
* OptionGroupList, all groups with the same group definition are stored in one
* OptionGroupList in OptionParseContext.groups. The order of group lists is the
* same as the order of group definitions.
*/
int split_commandline(OptionParseContext *octx, int argc, char *argv[],
const OptionDef *options,
const OptionGroupDef *groups, int nb_groups);
/**
* Free all allocated memory in an OptionParseContext.
*/
void uninit_parse_context(OptionParseContext *octx);
/**
* Find the '-loglevel' option in the command line args and apply it.
*/
@@ -340,8 +249,6 @@ int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
* Create a new options dictionary containing only the options from
* opts which apply to the codec with ID codec_id.
*
* @param opts dictionary to place options in
* @param codec_id ID of the codec that should be filtered for
* @param s Corresponding format context.
* @param st A stream from s for which the options should be filtered.
* @param codec The particular codec for which the options should be filtered.
@@ -475,7 +382,6 @@ int read_yesno(void);
* Read the file with name filename, and put its content in a newly
* allocated 0-terminated buffer.
*
* @param filename file to read from
* @param bufptr location where pointer to buffer is returned
* @param size location where size of buffer is returned
* @return 0 in case of success, a negative value corresponding to an
@@ -504,28 +410,29 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size);
FILE *get_preset_file(char *filename, size_t filename_size,
const char *preset_name, int is_path, const char *codec_name);
/**
* Do all the necessary cleanup and abort.
* This function is implemented in the avtools, not cmdutils.
*/
av_noreturn void exit_program(int ret);
/**
* Realloc array to hold new_size elements of elem_size.
* Calls exit() on failure.
* Calls exit_program() on failure.
*
* @param array array to reallocate
* @param elem_size size in bytes of each element
* @param size new element count will be written here
* @param new_size number of elements to place in reallocated array
* @return reallocated array
*/
void *grow_array(void *array, int elem_size, int *size, int new_size);
#define GROW_ARRAY(array, nb_elems)\
array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1)
typedef struct FrameBuffer {
uint8_t *base[4];
uint8_t *data[4];
int linesize[4];
int h, w;
enum AVPixelFormat pix_fmt;
enum PixelFormat pix_fmt;
int refcount;
struct FrameBuffer **pool; ///< head of the buffer pool

View File

@@ -16,6 +16,8 @@
{ "sample_fmts", OPT_EXIT, {.func_arg = show_sample_fmts }, "show available audio sample formats" },
{ "loglevel" , HAS_ARG, {.func_arg = opt_loglevel}, "set libav* logging level", "loglevel" },
{ "v", HAS_ARG, {.func_arg = opt_loglevel}, "set libav* logging level", "loglevel" },
{ "debug" , HAS_ARG, {.func_arg = opt_codec_debug}, "set debug flags", "flags" },
{ "fdebug" , HAS_ARG, {.func_arg = opt_codec_debug}, "set debug flags", "flags" },
{ "report" , 0, {(void*)opt_report}, "generate a report" },
{ "max_alloc" , HAS_ARG, {.func_arg = opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
{ "cpuflags" , HAS_ARG | OPT_EXPERT, {.func_arg = opt_cpuflags}, "force specific cpu flags", "flags" },

View File

@@ -30,7 +30,7 @@ CFLAGS += $(ECFLAGS)
CCFLAGS = $(CPPFLAGS) $(CFLAGS)
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
CXXFLAGS += $(CPPFLAGS) $(CFLAGS)
YASMFLAGS += $(IFLAGS:%=%/) -Pconfig.asm
YASMFLAGS += $(IFLAGS:%=%/) -I$(SRC_PATH)/libavutil/x86/ -Pconfig.asm
HOSTCCFLAGS = $(IFLAGS) $(HOSTCFLAGS)
LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
@@ -56,9 +56,6 @@ COMPILE_S = $(call COMPILE,AS)
%.o: %.S
$(COMPILE_S)
%.i: %.c
$(CC) $(CCFLAGS) $(CC_E) $<
%.h.c:
$(Q)echo '#include "$*.h"' >$@
@@ -97,7 +94,6 @@ HOSTPROGS := $(HOSTPROGS:%=$(SUBDIR)%$(HOSTEXESUF))
TOOLS += $(TOOLS-yes)
TOOLOBJS := $(TOOLS:%=tools/%.o)
TOOLS := $(TOOLS:%=tools/%$(EXESUF))
HEADERS += $(HEADERS-yes)
DEP_LIBS := $(foreach NAME,$(FFLIBS),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME))
@@ -128,13 +124,7 @@ CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
DISTCLEANSUFFIXES = *.pc
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
define RULES
clean::
$(RM) $(OBJS) $(OBJS:.o=.d)
$(RM) $(HOSTPROGS)
$(RM) $(TOOLS)
endef
$(eval $(RULES))
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d))

View File

@@ -46,7 +46,7 @@ static int getopt(int argc, char *argv[], char *opts)
int c;
char *cp;
if (sp == 1) {
if (sp == 1)
if (optind >= argc ||
argv[optind][0] != '-' || argv[optind][1] == '\0')
return EOF;
@@ -54,7 +54,6 @@ static int getopt(int argc, char *argv[], char *opts)
optind++;
return EOF;
}
}
optopt = c = argv[optind][sp];
if (c == ':' || (cp = strchr(opts, c)) == NULL) {
fprintf(stderr, ": illegal option -- %c\n", c);

View File

@@ -1,10 +0,0 @@
#!/bin/sh
n=10
case "$1" in
-n) n=$2; shift 2 ;;
-n*) n=${1#-n}; shift ;;
esac
exec sed ${n}q "$@"

View File

@@ -1,34 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
int plan9_main(int argc, char **argv);
#undef main
int main(int argc, char **argv)
{
/* The setfcr() function in lib9 is broken, must use asm. */
#ifdef __i386
short fcr;
__asm__ volatile ("fstcw %0 \n"
"or $63, %0 \n"
"fldcw %0 \n"
: "=m"(fcr));
#endif
return plan9_main(argc, argv);
}

View File

@@ -1,2 +0,0 @@
#!/bin/sh
exec awk "BEGIN { for (i = 2; i < ARGC; i++) printf \"$1\", ARGV[i] }" "$@"

View File

@@ -1,7 +0,0 @@
#include_next <math.h>
#undef INFINITY
#undef NAN
#define INFINITY (*(const float*)((const unsigned []){ 0x7f800000 }))
#define NAN (*(const float*)((const unsigned []){ 0x7fc00000 }))

1333
configure vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,185 +1,100 @@
Never assume the API of libav* to be stable unless at least 1 month has passed
since the last major version increase or the API was added.
since the last major version increase.
The last version increases were:
libavcodec: 2012-01-27
libavdevice: 2011-04-18
libavfilter: 2012-06-22
libavformat: 2012-01-27
libavresample: 2012-10-05
libavresample: 2012-04-24
libpostproc: 2011-04-18
libswresample: 2011-09-19
libswscale: 2011-06-20
libavutil: 2012-10-22
libavutil: 2011-04-18
API changes, most recent first:
2012-12-20 - xxxxxxx - lavfi 3.28.100 - avfilter.h
Add AVFilterLink.channels, avfilter_link_get_channels()
and avfilter_ref_get_channels().
2012-12-15 - 2ada584d - lavc 54.80.100 - avcodec.h
Add pkt_size field to AVFrame.
2012-11-25 - c70ec631 - lavu 52.9.100 - opt.h
Add the following convenience functions to opt.h:
av_opt_get_image_size
av_opt_get_pixel_fmt
av_opt_get_sample_fmt
av_opt_set_image_size
av_opt_set_pixel_fmt
av_opt_set_sample_fmt
2012-11-17 - 4cd74c81 - lavu 52.8.100 - bprint.h
Add av_bprint_strftime().
2012-11-15 - 92648107 - lavu 52.7.100 - opt.h
Add av_opt_get_key_value().
2012-11-13 - 79456652 - lavfi 3.23.100 - avfilter.h
Add channels field to AVFilterBufferRefAudioProps.
2012-11-03 - 481fdeee - lavu 52.3.100 - opt.h
Add AV_OPT_TYPE_SAMPLE_FMT value to AVOptionType enum.
2012-10-21 - 6fb2fd8 - lavc 54.68.100 - avcodec.h
lavfi 3.20.100 - avfilter.h
Add AV_PKT_DATA_STRINGS_METADATA side data type, used to transmit key/value
strings between AVPacket and AVFrame, and add metadata field to
AVCodecContext (which shall not be accessed by users; see AVFrame metadata
instead).
2012-09-27 - a70b493 - lavd 54.3.100 - version.h
Add LIBAVDEVICE_IDENT symbol.
2012-09-27 - a70b493 - lavfi 3.18.100 - version.h
Add LIBAVFILTER_IDENT symbol.
2012-09-27 - a70b493 - libswr 0.16.100 - version.h
Add LIBSWRESAMPLE_VERSION, LIBSWRESAMPLE_BUILD
and LIBSWRESAMPLE_IDENT symbols.
2012-09-06 - 29e972f - lavu 51.72.100 - parseutils.h
2012-09-01 - xxxxxxx - lavu 51.72.100 - parseutils.h
Add av_small_strptime() time parsing function.
Can be used as a stripped-down replacement for strptime(), on
systems which do not support it.
2012-08-25 - 2626cc4 - lavf 54.28.100
Matroska demuxer now identifies SRT subtitles as AV_CODEC_ID_SUBRIP instead
of AV_CODEC_ID_TEXT.
2012-08-13 - 5c0d8bc - lavfi 3.8.100 - avfilter.h
2012-08-13 - xxxxxxx - lavfi 3.8.100 - avfilter.h
Add avfilter_get_class() function, and priv_class field to AVFilter
struct.
2012-08-12 - a25346e - lavu 51.69.100 - opt.h
2012-08-13 - xxxxxxx - lavu 51.69.100 - opt.h
Add AV_OPT_FLAG_FILTERING_PARAM symbol in opt.h.
2012-07-31 - 23fc4dd - lavc 54.46.100
2012-07-31 - xxxxxxx - lavc 54.46.100
Add channels field to AVFrame.
2012-07-30 - f893904 - lavu 51.66.100
2012-07-30 - xxxxxxx - lavu 51.66.100
Add av_get_channel_description()
and av_get_standard_channel_layout() functions.
2012-07-21 - 016a472 - lavc 54.43.100
2012-07-20 - xxxxxxx - lavc 54.43.100
Add decode_error_flags field to AVFrame.
2012-07-20 - b062936 - lavf 54.18.100
2012-07-20 - xxxxxxx - lavf 54.18.100
Add avformat_match_stream_specifier() function.
2012-07-14 - f49ec1b - lavc 54.38.100 - avcodec.h
2012-07-14 - xxxxxxx - lavc 54.38.100 - avcodec.h
Add metadata to AVFrame, and the accessor functions
av_frame_get_metadata() and av_frame_set_metadata().
2012-07-10 - 0e003d8 - lavc 54.33.100
2012-07-10 - xxxxxxx - lavc 54.33.100
Add av_fast_padded_mallocz().
2012-07-10 - 21d5609 - lavfi 3.2.0 - avfilter.h
2012-07-10 - xxxxxxx - lavfi 3.2.0 - avfilter.h
Add init_opaque() callback to AVFilter struct.
2012-06-26 - e6674e4 - lavu 51.63.100 - imgutils.h
2012-06-26 - xxxxxxx - lavu 51.63.100 - imgutils.h
Add functions to libavutil/imgutils.h:
av_image_get_buffer_size()
av_image_fill_arrays()
av_image_copy_to_buffer()
2012-06-24 - c41899a - lavu 51.62.100 - version.h
2012-06-24 - xxxxxxx - lavu 51.62.100 - version.h
version moved from avutil.h to version.h
2012-04-11 - 359abb1 - lavu 51.58.100 - error.h
2012-04-11 - xxxxxxx - lavu 51.58.100 - error.h
Add av_make_error_string() and av_err2str() utilities to
libavutil/error.h.
2012-06-05 - 62b39d4 - lavc 54.24.100
2012-06-05 - xxxxxxx - lavc 54.24.100
Add pkt_duration field to AVFrame.
2012-05-24 - f2ee065 - lavu 51.54.100
2012-05-24 - xxxxxxx - lavu 51.54.100
Move AVPALETTE_SIZE and AVPALETTE_COUNT macros from
libavcodec/avcodec.h to libavutil/pixfmt.h.
2012-05-14 - 94a9ac1 - lavf 54.5.100
2012-05-07 - xxxxxxx - lavf 54.5.100
Add av_guess_sample_aspect_ratio() function.
2012-04-20 - 65fa7bc - lavfi 2.70.100
2012-04-20 - xxxxxxx - lavfi 2.70.100
Add avfilter_unref_bufferp() to avfilter.h.
2012-04-13 - 162e400 - lavfi 2.68.100
2012-04-12 - xxxxxxx - lavfi 2.68.100
Install libavfilter/asrc_abuffer.h public header.
2012-03-26 - a67d9cf - lavfi 2.66.100
Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
2012-12-29 - 2ce43b3 / d8fd06c - lavu 52.13.100 / 52.3.0 - avstring.h
Add av_basename() and av_dirname().
2012-11-11 - 03b0787 / 5980f5d - lavu 52.6.100 / 52.2.0 - audioconvert.h
Rename audioconvert.h to channel_layout.h. audioconvert.h is now deprecated.
2012-11-05 - 7d26be6 / dfde8a3 - lavu 52.5.100 / 52.1.0 - intmath.h
Add av_ctz() for trailing zero bit count
2012-10-21 - e3a91c5 / a893655 - lavu 51.77.100 / 51.45.0 - error.h
Add AVERROR_EXPERIMENTAL
2012-10-12 - a33ed6b / d2fcb35 - lavu 51.76.100 / 51.44.0 - pixdesc.h
Add functions for accessing pixel format descriptors.
Accessing the av_pix_fmt_descriptors array directly is now
deprecated.
2012-10-11 - f391e40 / 9a92aea - lavu 51.75.100 / 51.43.0 - aes.h, md5.h, sha.h, tree.h
Add functions for allocating the opaque contexts for the algorithms,
2012-10-10 - de31814 / b522000 - lavf 54.32.100 / 54.18.0 - avio.h
Add avio_closep to complement avio_close.
2012-10-08 - ae77266 / 78071a1 - lavu 51.74.100 / 51.42.0 - pixfmt.h
Rename PixelFormat to AVPixelFormat and all PIX_FMT_* to AV_PIX_FMT_*.
To provide backwards compatibility, PixelFormat is now #defined as
AVPixelFormat.
Note that this can break user code that includes pixfmt.h and uses the
'PixelFormat' identifier. Such code should either #undef PixelFormat
or stop using the PixelFormat name.
2012-10-05 - 55c49af / e7ba5b1 - lavr 1.0.0 - avresample.h
Data planes parameters to avresample_convert() and
avresample_read() are now uint8_t** instead of void**.
Libavresample is now stable.
2012-09-24 - 46a3595 / a42aada - lavc 54.59.100 / 54.28.0 - avcodec.h
2012-09-23 - xxxxxxx - lavc 54.28.0 - avcodec.h
Add avcodec_free_frame(). This function must now
be used for freeing an AVFrame.
2012-09-12 - e3e09f2 / 8919fee - lavu 51.73.100 / 51.41.0 - audioconvert.h
2012-09-12 - xxxxxxx - lavu 51.41.0 - audioconvert.h
Added AV_CH_LOW_FREQUENCY_2 channel mask value.
2012-09-04 - b21b5b0 / 686a329 - lavu 51.71.100 / 51.40.0 - opt.h
2012-09-04 - xxxxxxx - lavu 51.40.0 - opt.h
Reordered the fields in default_val in AVOption, changed which
default_val field is used for which AVOptionType.
2012-08-30 - 98298eb / a231832 - lavc 54.54.101 / 54.26.1 - avcodec.h
2012-xx-xx - xxxxxxx - lavc 54.26.1 - avcodec.h
Add codec descriptor properties AV_CODEC_PROP_LOSSY and
AV_CODEC_PROP_LOSSLESS.
@@ -187,90 +102,90 @@ API changes, most recent first:
Add codec descriptors for accessing codec properties without having
to refer to a specific decoder or encoder.
f5f3684 / c223d79 - Add an AVCodecDescriptor struct and functions
c223d79 - Add an AVCodecDescriptor struct and functions
avcodec_descriptor_get() and avcodec_descriptor_next().
f5f3684 / 51efed1 - Add AVCodecDescriptor.props and AV_CODEC_PROP_INTRA_ONLY.
6c180b3 / 91e59fe - Add avcodec_descriptor_get_by_name().
51efed1 - Add AVCodecDescriptor.props and AV_CODEC_PROP_INTRA_ONLY.
91e59fe - Add avcodec_descriptor_get_by_name().
2012-08-08 - f5f3684 / 987170c - lavu 51.68.100 / 51.38.0 - dict.h
2012-08-08 - 987170c - lavu 51.38 - dict.h
Add av_dict_count().
2012-08-07 - 7a72695 / 104e10f - lavc 54.51.100 / 54.25.0 - avcodec.h
2012-08-07 - 104e10f - lavc 54.25 - avcodec.h
Rename CodecID to AVCodecID and all CODEC_ID_* to AV_CODEC_ID_*.
To provide backwards compatibility, CodecID is now #defined as AVCodecID.
Note that this can break user code that includes avcodec.h and uses the
'CodecID' identifier. Such code should either #undef CodecID or stop using the
CodecID name.
2012-08-03 - e776ee8 / 239fdf1 - lavu 51.66.101 / 51.37.1 - cpu.h
2012-08-03 - 239fdf1 - lavu 51.37.1 - cpu.h
lsws 2.1.1 - swscale.h
Rename AV_CPU_FLAG_MMX2 ---> AV_CPU_FLAG_MMXEXT.
Rename SWS_CPU_CAPS_MMX2 ---> SWS_CPU_CAPS_MMXEXT.
2012-07-29 - 7c26761 / 681ed00 - lavf 54.22.100 / 54.13.0 - avformat.h
2012-07-29 - 681ed00 - lavf 54.13.0 - avformat.h
Add AVFMT_FLAG_NOBUFFER for low latency use cases.
2012-07-10 - 5fade8a - lavu 51.37.0
Add av_malloc_array() and av_mallocz_array()
2012-06-22 - e847f41 / d3d3a32 - lavu 51.61.100 / 51.34.0
2012-06-22 - d3d3a32 - lavu 51.34.0
Add av_usleep()
2012-06-20 - 4da42eb / ae0a301 - lavu 51.60.100 / 51.33.0
2012-06-20 - ae0a301 - lavu 51.33.0
Move av_gettime() to libavutil, add libavutil/time.h
2012-06-09 - 82edf67 / 3971be0 - lavr 0.0.3
2012-06-09 - 3971be0 - lavr 0.0.3
Add a parameter to avresample_build_matrix() for Dolby/DPLII downmixing.
2012-06-12 - c7b9eab / 9baeff9 - lavfi 2.79.100 / 2.23.0 - avfilter.h
2012-06-12 - 9baeff9 - lavfi 2.23.0 - avfilter.h
Add AVFilterContext.nb_inputs/outputs. Deprecate
AVFilterContext.input/output_count.
2012-06-12 - c7b9eab / 84b9fbe - lavfi 2.79.100 / 2.22.0 - avfilter.h
2012-06-12 - 84b9fbe - lavfi 2.22.0 - avfilter.h
Add avfilter_pad_get_type() and avfilter_pad_get_name(). Those
should now be used instead of accessing AVFilterPad members
directly.
2012-06-12 - 3630a07 / b0f0dfc - lavu 51.57.100 / 51.32.0 - audioconvert.h
2012-06-12 - b0f0dfc - lavu 51.32.0 - audioconvert.h
Add av_get_channel_layout_channel_index(), av_get_channel_name()
and av_channel_layout_extract_channel().
2012-05-25 - 53ce990 / 154486f - lavu 51.55.100 / 51.31.0 - opt.h
2012-05-25 - 154486f - lavu 51.31.0 - opt.h
Add av_opt_set_bin()
2012-05-15 - lavfi 2.74.100 / 2.17.0
2012-05-15 - lavfi 2.17.0
Add support for audio filters
61930bd / ac71230, 1cbf7fb / a2cd9be - add video/audio buffer sink in a new installed
ac71230/a2cd9be - add video/audio buffer sink in a new installed
header buffersink.h
1cbf7fb / 720c6b7 - add av_buffersrc_write_frame(), deprecate
720c6b7 - add av_buffersrc_write_frame(), deprecate
av_vsrc_buffer_add_frame()
61930bd / ab16504 - add avfilter_copy_buf_props()
61930bd / 9453c9e - add extended_data to AVFilterBuffer
61930bd / 1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays()
ab16504 - add avfilter_copy_buf_props()
9453c9e - add extended_data to AVFilterBuffer
1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays()
2012-05-09 - lavu 51.53.100 / 51.30.0 - samplefmt.h
61930bd / 142e740 - add av_samples_copy()
61930bd / 6d7f617 - add av_samples_set_silence()
2012-05-09 - lavu 51.30.0 - samplefmt.h
142e740 - add av_samples_copy()
6d7f617 - add av_samples_set_silence()
2012-05-09 - 61930bd / a5117a2 - lavc 54.21.101 / 54.13.1
2012-05-09 - a5117a2 - lavc 54.13.1
For audio formats with fixed frame size, the last frame
no longer needs to be padded with silence, libavcodec
will handle this internally (effectively all encoders
behave as if they had CODEC_CAP_SMALL_LAST_FRAME set).
2012-05-07 - 653d117 / 828bd08 - lavc 54.20.100 / 54.13.0 - avcodec.h
2012-05-07 - 828bd08 - lavc 54.13.0 - avcodec.h
Add sample_rate and channel_layout fields to AVFrame.
2012-05-01 - 2330eb1 / 4010d72 - lavr 0.0.1
2012-05-01 - 4010d72 - lavr 0.0.1
Change AV_MIX_COEFF_TYPE_Q6 to AV_MIX_COEFF_TYPE_Q8.
2012-04-25 - e890b68 / 3527a73 - lavu 51.48.100 / 51.29.0 - cpu.h
2012-04-25 - 3527a73 - lavu 51.29.0 - cpu.h
Add av_parse_cpu_flags()
2012-04-24 - 3ead79e / c8af852 - lavr 0.0.0
2012-04-24 - c8af852 - lavr 0.0.0
Add libavresample audio conversion library
2012-04-20 - 3194ab7 / 0c0d1bc - lavu 51.47.100 / 51.28.0 - audio_fifo.h
2012-04-20 - 0c0d1bc - lavu 51.28.0 - audio_fifo.h
Add audio FIFO functions:
av_audio_fifo_free()
av_audio_fifo_alloc()
@@ -282,10 +197,10 @@ API changes, most recent first:
av_audio_fifo_size()
av_audio_fifo_space()
2012-04-14 - lavfi 2.70.100 / 2.16.0 - avfiltergraph.h
7432bcf / d7bcc71 Add avfilter_graph_parse2().
2012-04-14 - lavfi 2.16.0 - avfiltergraph.h
d7bcc71 Add avfilter_graph_parse2().
2012-04-08 - 6bfb304 / 4d693b0 - lavu 51.46.100 / 51.27.0 - samplefmt.h
2012-04-08 - 4d693b0 - lavu 51.27.0 - samplefmt.h
Add av_get_packed_sample_fmt() and av_get_planar_sample_fmt()
2012-03-21 - b75c67d - lavu 51.43.100
@@ -313,73 +228,69 @@ API changes, most recent first:
2012-01-24 - 0c3577b - lavfi 2.60.100
Add avfilter_graph_dump.
2012-03-20 - 0ebd836 / 3c90cc2 - lavfo 54.2.0
Deprecate av_read_packet(), use av_read_frame() with
AVFMT_FLAG_NOPARSE | AVFMT_FLAG_NOFILLIN in AVFormatContext.flags
2012-03-05 - lavc 54.8.0
6699d07 Add av_get_exact_bits_per_sample()
9524cf7 Add av_get_audio_frame_duration()
2012-03-05 - lavc 54.10.100 / 54.8.0
f095391 / 6699d07 Add av_get_exact_bits_per_sample()
f095391 / 9524cf7 Add av_get_audio_frame_duration()
2012-03-04 - 2af8f2c / 44fe77b - lavc 54.8.100 / 54.7.0 - avcodec.h
2012-03-04 - 44fe77b - lavc 54.7.0 - avcodec.h
Add av_codec_is_encoder/decoder().
2012-03-01 - 1eb7f39 / 442c132 - lavc 54.5.100 / 54.3.0 - avcodec.h
2012-03-01 - 442c132 - lavc 54.3.0 - avcodec.h
Add av_packet_shrink_side_data.
2012-02-29 - 79ae084 / dd2a4bc - lavf 54.2.100 / 54.2.0 - avformat.h
2012-02-29 - dd2a4bc - lavf 54.2.0 - avformat.h
Add AVStream.attached_pic and AV_DISPOSITION_ATTACHED_PIC,
used for dealing with attached pictures/cover art.
2012-02-25 - 305e4b3 / c9bca80 - lavu 51.41.100 / 51.24.0 - error.h
2012-02-25 - c9bca80 - lavu 51.24.0 - error.h
Add AVERROR_UNKNOWN
NOTE: this was backported to 0.8
2012-02-20 - eadd426 / e9cda85 - lavc 54.2.100 / 54.2.0
2012-02-20 - e9cda85 - lavc 54.2.0
Add duration field to AVCodecParserContext
2012-02-20 - eadd426 / 0b42a93 - lavu 51.40.100 / 51.23.1 - mathematics.h
2012-02-20 - 0b42a93 - lavu 51.23.1 - mathematics.h
Add av_rescale_q_rnd()
2012-02-08 - f2b20b7 / 38d5533 - lavu 51.38.101 / 51.22.1 - pixdesc.h
2012-02-08 - 38d5533 - lavu 51.22.1 - pixdesc.h
Add PIX_FMT_PSEUDOPAL flag.
2012-02-08 - f2b20b7 / 52f82a1 - lavc 54.2.100 / 54.1.0
2012-02-08 - 52f82a1 - lavc 54.01.0
Add avcodec_encode_video2() and deprecate avcodec_encode_video().
2012-02-01 - 4c677df / 316fc74 - lavc 54.1.0
2012-02-01 - 316fc74 - lavc 54.01.0
Add av_fast_padded_malloc() as alternative for av_realloc() when aligned
memory is required. The buffer will always have FF_INPUT_BUFFER_PADDING_SIZE
zero-padded bytes at the end.
2012-01-31 - a369a6b / dd6d3b0 - lavf 54.1.0
2012-01-31 - dd6d3b0 - lavf 54.01.0
Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags().
NOTE: this was backported to 0.8
2012-01-31 - a369a6b / af08d9a - lavc 54.1.0
2012-01-31 - af08d9a - lavc 54.01.0
Add avcodec_is_open() function.
NOTE: this was backported to 0.8
2012-01-30 - 151ecc2 / 8b93312 - lavu 51.36.100 / 51.22.0 - intfloat.h
2012-01-30 - 8b93312 - lavu 51.22.0 - intfloat.h
Add a new installed header libavutil/intfloat.h with int/float punning
functions.
NOTE: this was backported to 0.8
2012-01-25 - lavf 53.31.100 / 53.22.0
3c5fe5b / f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible
2012-01-25 - lavf 53.22.0
f1caf01 Allow doing av_write_frame(ctx, NULL) for flushing possible
buffered data within a muxer. Added AVFMT_ALLOW_FLUSH for
muxers supporting it (av_write_frame makes sure it is called
only for muxers with this flag).
2012-01-15 - lavc 53.56.105 / 53.34.0
2012-01-15 - lavc 53.34.0
New audio encoding API:
67f5650 / b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
b2c75b6 Add CODEC_CAP_VARIABLE_FRAME_SIZE capability for use by audio
encoders.
67f5650 / 5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function.
67f5650 / b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
5ee5fa0 Add avcodec_fill_audio_frame() as a convenience function.
b2c75b6 Add avcodec_encode_audio2() and deprecate avcodec_encode_audio().
Add AVCodec.encode2().
2012-01-12 - b18e17e / 3167dc9 - lavfi 2.59.100 / 2.15.0
2012-01-12 - 3167dc9 - lavfi 2.15.0
Add a new installed header -- libavfilter/version.h -- with version macros.
2011-12-08 - a502939 - lavfi 2.52.0
@@ -400,37 +311,37 @@ API changes, most recent first:
2011-10-20 - b35e9e1 - lavu 51.22.0
Add av_strtok() to avstring.h.
2012-01-03 - ad1c8dd / b73ec05 - lavu 51.34.100 / 51.21.0
2011-01-03 - b73ec05 - lavu 51.21.0
Add av_popcount64
2011-12-18 - 7c29313 / 8400b12 - lavc 53.46.1 / 53.28.1
2011-12-18 - 8400b12 - lavc 53.28.1
Deprecate AVFrame.age. The field is unused.
2011-12-12 - 8bc7fe4 / 5266045 - lavf 53.25.0 / 53.17.0
2011-12-12 - 5266045 - lavf 53.17.0
Add avformat_close_input().
Deprecate av_close_input_file() and av_close_input_stream().
2011-12-02 - e4de716 / 0eea212 - lavc 53.40.0 / 53.25.0
2011-12-02 - 0eea212 - lavc 53.25.0
Add nb_samples and extended_data fields to AVFrame.
Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
avcodec_decode_audio4() writes output samples to an AVFrame, which allows
audio decoders to use get_buffer().
2011-12-04 - e4de716 / 560f773 - lavc 53.40.0 / 53.24.0
2011-12-04 - 560f773 - lavc 53.24.0
Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
Change AVCodecContext.error[4] to [8] at next major bump.
Add AV_NUM_DATA_POINTERS to simplify the bump transition.
2011-11-23 - 8e576d5 / bbb46f3 - lavu 51.27.0 / 51.18.0
2011-11-23 - bbb46f3 - lavu 51.18.0
Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
av_samples_alloc(), to samplefmt.h.
2011-11-23 - 8e576d5 / 8889cc4 - lavu 51.27.0 / 51.17.0
2011-11-23 - 8889cc4 - lavu 51.17.0
Add planar sample formats and av_sample_fmt_is_planar() to samplefmt.h.
2011-11-19 - dbb38bc / f3a29b7 - lavc 53.36.0 / 53.21.0
2011-11-19 - f3a29b7 - lavc 53.21.0
Move some AVCodecContext fields to a new private struct, AVCodecInternal,
which is accessed from a new field, AVCodecContext.internal.
- fields moved:
@@ -438,55 +349,55 @@ API changes, most recent first:
AVCodecContext.internal_buffer_count --> AVCodecInternal.buffer_count
AVCodecContext.is_copy --> AVCodecInternal.is_copy
2011-11-16 - 8709ba9 / 6270671 - lavu 51.26.0 / 51.16.0
2011-11-16 - 6270671 - lavu 51.16.0
Add av_timegm()
2011-11-13 - lavf 53.21.0 / 53.15.0
2011-11-13 - lavf 53.15.0
New interrupt callback API, allowing per-AVFormatContext/AVIOContext
interrupt callbacks.
5f268ca / 6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to
6aa0b98 Add AVIOInterruptCB struct and the interrupt_callback field to
AVFormatContext.
5f268ca / 1dee0ac Add avio_open2() with additional parameters. Those are
1dee0ac Add avio_open2() with additional parameters. Those are
an interrupt callback and an options AVDictionary.
This will allow passing AVOptions to protocols after lavf
54.0.
2011-11-06 - 13b7781 / ba04ecf - lavu 51.24.0 / 51.14.0
2011-11-06 - ba04ecf - lavu 51.14.0
Add av_strcasecmp() and av_strncasecmp() to avstring.h.
2011-11-06 - 13b7781 / 07b172f - lavu 51.24.0 / 51.13.0
2011-11-06 - 07b172f - lavu 51.13.0
Add av_toupper()/av_tolower()
2011-11-05 - d8cab5c / b6d08f4 - lavf 53.19.0 / 53.13.0
2011-11-05 - b6d08f4 - lavf 53.13.0
Add avformat_network_init()/avformat_network_deinit()
2011-10-27 - 6faf0a2 / 512557b - lavc 53.24.0 / 53.15.0
2011-10-27 - 512557b - lavc 53.15.0
Remove avcodec_parse_frame.
Deprecate AVCodecContext.parse_only and CODEC_CAP_PARSE_ONLY.
2011-10-19 - d049257 / 569129a - lavf 53.17.0 / 53.10.0
2011-10-19 - 569129a - lavf 53.10.0
Add avformat_new_stream(). Deprecate av_new_stream().
2011-10-13 - 91eb1b1 / b631fba - lavf 53.16.0 / 53.9.0
2011-10-13 - b631fba - lavf 53.9.0
Add AVFMT_NO_BYTE_SEEK AVInputFormat flag.
2011-10-12 - lavu 51.21.0 / 51.12.0
2011-10-12 - lavu 51.12.0
AVOptions API rewrite.
- f884ef0 / 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_*
- 145f741 FF_OPT_TYPE* renamed to AV_OPT_TYPE_*
- new setting/getting functions with slightly different semantics:
f884ef0 / dac66da av_set_string3 -> av_opt_set
dac66da av_set_string3 -> av_opt_set
av_set_double -> av_opt_set_double
av_set_q -> av_opt_set_q
av_set_int -> av_opt_set_int
f884ef0 / 41d9d51 av_get_string -> av_opt_get
41d9d51 av_get_string -> av_opt_get
av_get_double -> av_opt_get_double
av_get_q -> av_opt_get_q
av_get_int -> av_opt_get_int
- f884ef0 / 8c5dcaa trivial rename av_next_option -> av_opt_next
- f884ef0 / 641c7af new functions - av_opt_child_next, av_opt_child_class_next
- 8c5dcaa trivial rename av_next_option -> av_opt_next
- 641c7af new functions - av_opt_child_next, av_opt_child_class_next
and av_opt_find2()
2011-09-22 - a70e787 - lavu 51.17.0
@@ -532,31 +443,31 @@ API changes, most recent first:
2011-08-20 - 69e2c1a - lavu 51.13.0
Add av_get_media_type_string().
2011-09-03 - 1889c67 / fb4ca26 - lavc 53.13.0
2011-09-03 - fb4ca26 - lavc 53.13.0
lavf 53.11.0
lsws 2.1.0
Add {avcodec,avformat,sws}_get_class().
2011-08-03 - 1889c67 / c11fb82 - lavu 51.15.0
2011-08-03 - c11fb82 - lavu 51.15.0
Add AV_OPT_SEARCH_FAKE_OBJ flag for av_opt_find() function.
2011-08-14 - 323b930 - lavu 51.12.0
Add av_fifo_peek2(), deprecate av_fifo_peek().
2011-08-26 - lavu 51.14.0 / 51.9.0
- 976a8b2 / add41de..976a8b2 / abc78a5 Do not include intfloat_readwrite.h,
2011-08-26 - lavu 51.9.0
- add41de..abc78a5 Do not include intfloat_readwrite.h,
mathematics.h, rational.h, pixfmt.h, or log.h from avutil.h.
2011-08-16 - 27fbe31 / 48f9e45 - lavf 53.11.0 / 53.8.0
2011-08-16 - 48f9e45 - lavf 53.8.0
Add avformat_query_codec().
2011-08-16 - 27fbe31 / bca06e7 - lavc 53.11.0
2011-08-16 - bca06e7 - lavc 53.11.0
Add avcodec_get_type().
2011-08-06 - 0cb233c / 2f63440 - lavf 53.7.0
2011-08-06 - 2f63440 - lavf 53.7.0
Add error_recognition to AVFormatContext.
2011-08-02 - 1d186e9 / 9d39cbf - lavc 53.9.1
2011-08-02 - 9d39cbf - lavc 53.9.1
Add AV_PKT_FLAG_CORRUPT AVPacket flag.
2011-07-16 - b57df29 - lavfi 2.27.0
@@ -567,11 +478,11 @@ API changes, most recent first:
avfilter_set_common_packing_formats()
avfilter_all_packing_formats()
2011-07-10 - 3602ad7 / a67c061 - lavf 53.6.0
2011-07-10 - a67c061 - lavf 53.6.0
Add avformat_find_stream_info(), deprecate av_find_stream_info().
NOTE: this was backported to 0.7
2011-07-10 - 3602ad7 / 0b950fe - lavc 53.8.0
2011-07-10 - 0b950fe - lavc 53.8.0
Add avcodec_open2(), deprecate avcodec_open().
NOTE: this was backported to 0.7
@@ -614,35 +525,35 @@ API changes, most recent first:
2011-06-12 - 6119b23 - lavfi 2.16.0 - avfilter_graph_parse()
Change avfilter_graph_parse() signature.
2011-06-23 - 686959e / 67e9ae1 - lavu 51.10.0 / 51.8.0 - attributes.h
2011-06-23 - 67e9ae1 - lavu 51.8.0 - attributes.h
Add av_printf_format().
2011-06-16 - 2905e3f / 05e84c9, 2905e3f / 25de595 - lavf 53.4.0 / 53.2.0 - avformat.h
2011-06-16 - 05e84c9, 25de595 - lavf 53.2.0 - avformat.h
Add avformat_open_input and avformat_write_header().
Deprecate av_open_input_stream, av_open_input_file,
AVFormatParameters and av_write_header.
2011-06-16 - 2905e3f / 7e83e1c, 2905e3f / dc59ec5 - lavu 51.9.0 / 51.7.0 - opt.h
2011-06-16 - 7e83e1c, dc59ec5 - lavu 51.7.0 - opt.h
Add av_opt_set_dict() and av_opt_find().
Deprecate av_find_opt().
Add AV_DICT_APPEND flag.
2011-06-10 - 45fb647 / cb7c11c - lavu 51.6.0 - opt.h
2011-06-10 - cb7c11c - lavu 51.6.0 - opt.h
Add av_opt_flag_is_set().
2011-06-10 - c381960 - lavfi 2.15.0 - avfilter_get_audio_buffer_ref_from_arrays
Add avfilter_get_audio_buffer_ref_from_arrays() to avfilter.h.
2011-06-09 - f9ecb84 / d9f80ea - lavu 51.8.0 - AVMetadata
2011-06-09 - d9f80ea - lavu 51.8.0 - AVMetadata
Move AVMetadata from lavf to lavu and rename it to
AVDictionary -- new installed header dict.h.
All av_metadata_* functions renamed to av_dict_*.
2011-06-07 - d552f61 / a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
2011-06-07 - a6703fa - lavu 51.8.0 - av_get_bytes_per_sample()
Add av_get_bytes_per_sample() in libavutil/samplefmt.h.
Deprecate av_get_bits_per_sample_fmt().
2011-06-05 - f956924 / b39b062 - lavu 51.8.0 - opt.h
2011-06-05 - b39b062 - lavu 51.8.0 - opt.h
Add av_opt_free convenience function.
2011-06-06 - 95a0242 - lavfi 2.14.0 - AVFilterBufferRefAudioProps
@@ -672,7 +583,7 @@ API changes, most recent first:
Add av_get_pix_fmt_name() in libavutil/pixdesc.h, and deprecate
avcodec_get_pix_fmt_name() in libavcodec/avcodec.h in its favor.
2011-05-25 - 39e4206 / 30315a8 - lavf 53.3.0 - avformat.h
2011-05-25 - 30315a8 - lavf 53.3.0 - avformat.h
Add fps_probe_size to AVFormatContext.
2011-05-22 - 5ecdfd0 - lavf 53.2.0 - avformat.h
@@ -688,10 +599,10 @@ API changes, most recent first:
2011-05-14 - 9fdf772 - lavfi 2.6.0 - avcodec.h
Add avfilter_get_video_buffer_ref_from_frame() to libavfilter/avcodec.h.
2011-05-18 - 75a37b5 / 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
2011-05-18 - 64150ff - lavc 53.7.0 - AVCodecContext.request_sample_fmt
Add request_sample_fmt field to AVCodecContext.
2011-05-10 - 59eb12f / 188dea1 - lavc 53.6.0 - avcodec.h
2011-05-10 - 188dea1 - lavc 53.6.0 - avcodec.h
Deprecate AVLPCType and the following fields in
AVCodecContext: lpc_coeff_precision, prediction_order_method,
min_partition_order, max_partition_order, lpc_type, lpc_passes.
@@ -721,81 +632,81 @@ API changes, most recent first:
Add av_dynarray_add function for adding
an element to a dynamic array.
2011-04-26 - d7e5aeb / bebe72f - lavu 51.1.0 - avutil.h
2011-04-26 - bebe72f - lavu 51.1.0 - avutil.h
Add AVPictureType enum and av_get_picture_type_char(), deprecate
FF_*_TYPE defines and av_get_pict_type_char() defined in
libavcodec/avcodec.h.
2011-04-26 - d7e5aeb / 10d3940 - lavfi 2.3.0 - avfilter.h
2011-04-26 - 10d3940 - lavfi 2.3.0 - avfilter.h
Add pict_type and key_frame fields to AVFilterBufferRefVideo.
2011-04-26 - d7e5aeb / 7a11c82 - lavfi 2.2.0 - vsrc_buffer
2011-04-26 - 7a11c82 - lavfi 2.2.0 - vsrc_buffer
Add sample_aspect_ratio fields to vsrc_buffer arguments
2011-04-21 - 8772156 / 94f7451 - lavc 53.1.0 - avcodec.h
2011-04-21 - 94f7451 - lavc 53.1.0 - avcodec.h
Add CODEC_CAP_SLICE_THREADS for codecs supporting sliced threading.
2011-04-15 - lavc 52.120.0 - avcodec.h
AVPacket structure got additional members for passing side information:
c407984 / 4de339e introduce side information for AVPacket
c407984 / 2d8591c make containers pass palette change in AVPacket
4de339e introduce side information for AVPacket
2d8591c make containers pass palette change in AVPacket
2011-04-12 - lavf 52.107.0 - avio.h
Avio cleanup, part II - deprecate the entire URLContext API:
c55780d / 175389c add avio_check as a replacement for url_exist
9891004 / ff1ec0c add avio_pause and avio_seek_time as replacements
175389c add avio_check as a replacement for url_exist
ff1ec0c add avio_pause and avio_seek_time as replacements
for _av_url_read_fseek/fpause
d4d0932 / cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
cdc6a87 deprecate av_protocol_next(), avio_enum_protocols
should be used instead.
c88caa5 / 80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
c88caa5 / f87b1b3 rename open flags: URL_* -> AVIO_*
d4d0932 / f8270bb add avio_enum_protocols.
d4d0932 / 5593f03 deprecate URLProtocol.
d4d0932 / c486dad deprecate URLContext.
d4d0932 / 026e175 deprecate the typedef for URLInterruptCB
c88caa5 / 8e76a19 deprecate av_register_protocol2.
11d7841 / b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
11d7841 / 1305d93 deprecate av_url_read_seek
11d7841 / fa104e1 deprecate av_url_read_pause
434f248 / 727c7aa deprecate url_get_filename().
434f248 / 5958df3 deprecate url_max_packet_size().
434f248 / 1869ea0 deprecate url_get_file_handle().
434f248 / 32a97d4 deprecate url_filesize().
434f248 / e52a914 deprecate url_close().
434f248 / 58a48c6 deprecate url_seek().
434f248 / 925e908 deprecate url_write().
434f248 / dce3756 deprecate url_read_complete().
434f248 / bc371ac deprecate url_read().
434f248 / 0589da0 deprecate url_open().
434f248 / 62eaaea deprecate url_connect.
434f248 / 5652bb9 deprecate url_alloc.
434f248 / 333e894 deprecate url_open_protocol
434f248 / e230705 deprecate url_poll and URLPollEntry
80c6e23 rename url_set_interrupt_cb->avio_set_interrupt_cb.
f87b1b3 rename open flags: URL_* -> AVIO_*
f8270bb add avio_enum_protocols.
5593f03 deprecate URLProtocol.
c486dad deprecate URLContext.
026e175 deprecate the typedef for URLInterruptCB
8e76a19 deprecate av_register_protocol2.
b840484 deprecate URL_PROTOCOL_FLAG_NESTED_SCHEME
1305d93 deprecate av_url_read_seek
fa104e1 deprecate av_url_read_pause
727c7aa deprecate url_get_filename().
5958df3 deprecate url_max_packet_size().
1869ea0 deprecate url_get_file_handle().
32a97d4 deprecate url_filesize().
e52a914 deprecate url_close().
58a48c6 deprecate url_seek().
925e908 deprecate url_write().
dce3756 deprecate url_read_complete().
bc371ac deprecate url_read().
0589da0 deprecate url_open().
62eaaea deprecate url_connect.
5652bb9 deprecate url_alloc.
333e894 deprecate url_open_protocol
e230705 deprecate url_poll and URLPollEntry
2011-04-08 - lavf 52.106.0 - avformat.h
Minor avformat.h cleanup:
d4d0932 / a9bf9d8 deprecate av_guess_image2_codec
d4d0932 / c3675df rename avf_sdp_create->av_sdp_create
a9bf9d8 deprecate av_guess_image2_codec
c3675df rename avf_sdp_create->av_sdp_create
2011-04-03 - lavf 52.105.0 - avio.h
Large-scale renaming/deprecating of AVIOContext-related functions:
2cae980 / 724f6a0 deprecate url_fdopen
2cae980 / 403ee83 deprecate url_open_dyn_packet_buf
2cae980 / 6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
2cae980 / b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
2cae980 / 8978fed introduce an AVIOContext.seekable field as a replacement for
724f6a0 deprecate url_fdopen
403ee83 deprecate url_open_dyn_packet_buf
6dc7d80 rename url_close_dyn_buf -> avio_close_dyn_buf
b92c545 rename url_open_dyn_buf -> avio_open_dyn_buf
8978fed introduce an AVIOContext.seekable field as a replacement for
AVIOContext.is_streamed and url_is_streamed()
1caa412 / b64030f deprecate get_checksum()
1caa412 / 4c4427a deprecate init_checksum()
2fd41c9 / 4ec153b deprecate udp_set_remote_url/get_local_port
4fa0e24 / 933e90a deprecate av_url_read_fseek/fpause
4fa0e24 / 8d9769a deprecate url_fileno
0fecf26 / b7f2fdd rename put_flush_packet -> avio_flush
0fecf26 / 35f1023 deprecate url_close_buf
0fecf26 / 83fddae deprecate url_open_buf
0fecf26 / d9d86e0 rename url_fprintf -> avio_printf
0fecf26 / 59f65d9 deprecate url_setbufsize
6947b0c / 3e68b3b deprecate url_ferror
b64030f deprecate get_checksum()
4c4427a deprecate init_checksum()
4ec153b deprecate udp_set_remote_url/get_local_port
933e90a deprecate av_url_read_fseek/fpause
8d9769a deprecate url_fileno
b7f2fdd rename put_flush_packet -> avio_flush
35f1023 deprecate url_close_buf
83fddae deprecate url_open_buf
d9d86e0 rename url_fprintf -> avio_printf
59f65d9 deprecate url_setbufsize
3e68b3b deprecate url_ferror
e8bb2e2 deprecate url_fget_max_packet_size
76aa876 rename url_fsize -> avio_size
e519753 deprecate url_fgetc
@@ -816,7 +727,7 @@ API changes, most recent first:
b3db9ce deprecate get_partial_buffer
8d9ac96 rename av_alloc_put_byte -> avio_alloc_context
2011-03-25 - 27ef7b1 / 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
2011-03-25 - 34b47d7 - lavc 52.115.0 - AVCodecContext.audio_service_type
Add audio_service_type field to AVCodecContext.
2011-03-17 - e309fdc - lavu 50.40.0 - pixfmt.h
@@ -854,11 +765,11 @@ API changes, most recent first:
2011-02-10 - 12c14cd - lavf 52.99.0 - AVStream.disposition
Add AV_DISPOSITION_HEARING_IMPAIRED and AV_DISPOSITION_VISUAL_IMPAIRED.
2011-02-09 - c0b102c - lavc 52.112.0 - avcodec_thread_init()
2011-02-09 - 5592734 - lavc 52.112.0 - avcodec_thread_init()
Deprecate avcodec_thread_init()/avcodec_thread_free() use; instead
set thread_count before calling avcodec_open.
2011-02-09 - 37b00b4 - lavc 52.111.0 - threading API
2011-02-09 - 778b08a - lavc 52.111.0 - threading API
Add CODEC_CAP_FRAME_THREADS with new restrictions on get_buffer()/
release_buffer()/draw_horiz_band() callbacks for appropriate codecs.
Add thread_type and active_thread_type fields to AVCodecContext.

View File

@@ -1,30 +1,13 @@
LIBRARIES-$(CONFIG_AVUTIL) += libavutil
LIBRARIES-$(CONFIG_SWSCALE) += libswscale
LIBRARIES-$(CONFIG_SWRESAMPLE) += libswresample
LIBRARIES-$(CONFIG_AVCODEC) += libavcodec
LIBRARIES-$(CONFIG_AVFORMAT) += libavformat
LIBRARIES-$(CONFIG_AVDEVICE) += libavdevice
LIBRARIES-$(CONFIG_AVFILTER) += libavfilter
COMPONENTS-yes = $(PROGS-yes)
COMPONENTS-$(CONFIG_AVUTIL) += ffmpeg-utils
COMPONENTS-$(CONFIG_SWSCALE) += ffmpeg-scaler
COMPONENTS-$(CONFIG_SWRESAMPLE) += ffmpeg-resampler
COMPONENTS-$(CONFIG_AVCODEC) += ffmpeg-codecs ffmpeg-bitstream-filters
COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols
COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices
COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters
MANPAGES = $(COMPONENTS-yes:%=doc/%.1) $(LIBRARIES-yes:%=doc/%.3)
PODPAGES = $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
HTMLPAGES = $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
MANPAGES = $(PROGS-yes:%=doc/%.1)
PODPAGES = $(PROGS-yes:%=doc/%.pod)
HTMLPAGES = $(PROGS-yes:%=doc/%.html) \
doc/developer.html \
doc/faq.html \
doc/fate.html \
doc/general.html \
doc/git-howto.html \
doc/nut.html \
doc/platform.html \
doc/syntax.html \
TXTPAGES = doc/fate.txt \
@@ -39,7 +22,6 @@ all-$(CONFIG_DOC): doc
doc: documentation
apidoc: doc/doxy/html
documentation: $(DOCS)
TEXIDEP = awk '/^@(verbatim)?include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
@@ -66,16 +48,11 @@ doc/%.pod: doc/%.texi $(GENTEXI)
$(Q)$(TEXIDEP)
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Idoc $< $@
doc/%.1 doc/%.3: TAG = MAN
doc/%.1: TAG = MAN
doc/%.1: doc/%.pod $(GENTEXI)
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
doc/%.3: doc/%.pod $(GENTEXI)
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
$(DOCS) doc/doxy/html: | doc/
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(INSTHEADERS)
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $^
$(DOCS): | doc/
install-man:
@@ -92,12 +69,9 @@ uninstall: uninstall-man
uninstall-man:
$(RM) $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES))
clean:: docclean
docclean:
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
$(RM) -r doc/doxy/html
clean::
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
-include $(wildcard $(DOCS:%=%.d))
.PHONY: apidoc doc documentation
.PHONY: doc documentation

View File

@@ -1,7 +1,7 @@
Release Notes
=============
* 1.1 "Fire Flower" January, 2013
* 1.0 "Angel" September, 2012
General notes
@@ -14,12 +14,3 @@ accepted. If you are experiencing issues with any formally released version of
FFmpeg, please try git master to check if the issue still exists. If it does,
make your report against the development code following the usual bug reporting
guidelines.
Of big interest to our Windows users, FFmpeg now supports building with the MSVC
compiler. Since MSVC does not support C99 features used extensively by FFmpeg,
this has been accomplished using a converter that turns C99 code to C89. See the
platform-specific documentation for more detailed documentation on building
FFmpeg with MSVC.
The used output sample format for several audio decoders has changed, make
sure you always check/use AVCodecContext.sample_fmt or AVFrame.format.

View File

@@ -1,11 +0,0 @@
@chapter Authors
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
@command{git log} in the FFmpeg source directory, or browsing the
online repository at @url{http://source.ffmpeg.org}.
Maintainers for the specific components are listed in the file
@file{MAINTAINERS} in the source code tree.

View File

@@ -1,11 +1,10 @@
All the numerical options, if not specified otherwise, accept in input
a string representing a number, which may contain one of the
SI unit prefixes, for example 'K', 'M', 'G'.
If 'i' is appended after the prefix, binary prefixes are used,
which are based on powers of 1024 instead of powers of 1000.
The 'B' postfix multiplies the value by 8, and can be
appended after a unit prefix or used alone. This allows using for
example 'KB', 'MiB', 'G' and 'B' as number postfix.
International System number postfixes, for example 'K', 'M', 'G'.
If 'i' is appended after the postfix, powers of 2 are used instead of
powers of 10. The 'B' postfix multiplies the value for 8, and can be
appended after another postfix or used alone. This allows using for
example 'KB', 'MiB', 'G' and 'B' as postfix.
Options which do not take arguments are boolean options, and set the
corresponding value to true. They can be set to false by prefixing
@@ -152,21 +151,8 @@ directory.
This file can be useful for bug reports.
It also implies @code{-loglevel verbose}.
Setting the environment variable @code{FFREPORT} to any value has the
same effect. If the value is a ':'-separated key=value sequence, these
options will affect the report; options values must be escaped if they
contain special characters or the options delimiter ':' (see the
``Quoting and escaping'' section in the ffmpeg-utils manual). The
following option is recognized:
@table @option
@item file
set the file name to use for the report; @code{%p} is expanded to the name
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
to a plain @code{%}
@end table
Errors in parsing the environment variable are not fatal, and will not
appear in the report.
Note: setting the environment variable @code{FFREPORT} to any value has the
same effect.
@item -cpuflags flags (@emph{global})
Allows setting and clearing cpu flags. This option is intended
@@ -209,3 +195,6 @@ use @option{-option 0}/@option{-option 1}.
Note2 old undocumented way of specifying per-stream AVOptions by prepending
v/a/s to the options name is now obsolete and will be removed soon.
@include avoptions_codec.texi
@include avoptions_format.texi

View File

@@ -60,102 +60,4 @@ This decoder generates wave patterns according to predefined sequences. Its
use is purely internal and the format of the data it accepts is not publicly
documented.
@section libcelt
libcelt decoder wrapper
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
Requires the presence of the libcelt headers and library during configuration.
You need to explicitly configure the build with @code{--enable-libcelt}.
@section libgsm
libgsm decoder wrapper
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
the presence of the libgsm headers and library during configuration. You need
to explicitly configure the build with @code{--enable-libgsm}.
This decoder supports both the ordinary GSM and the Microsoft variant.
@section libilbc
libilbc decoder wrapper
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
audio codec. Requires the presence of the libilbc headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libilbc}.
@subsection Options
The following option is supported by the libilbc wrapper.
@table @option
@item enhance
Enable the enhancement of the decoded audio when set to 1. The default
value is 0 (disabled).
@end table
@section libopencore-amrnb
libopencore-amrnb decoder wrapper
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
Narrowband audio codec. Using it requires the presence of the
libopencore-amrnb headers and library during configuration. You need to
explicitly configure the build with @code{--enable-libopencore-amrnb}.
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
without this library.
@section libopencore-amrwb
libopencore-amrwb decoder wrapper.
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
Wideband audio codec. Using it requires the presence of the
libopencore-amrwb headers and library during configuration. You need to
explicitly configure the build with @code{--enable-libopencore-amrwb}.
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
without this library.
@section libopus
libopus decoder wrapper.
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
Requires the presence of the libopus headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libopus}.
@c man end AUDIO DECODERS
@chapter Subtitles Decoders
@c man begin SUBTILES DECODERS
@section dvdsub
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
also be found in VobSub file pairs and in some Matroska files.
@subsection Options
@table @option
@item palette
Specify the global palette used by the bitmaps. When stored in VobSub, the
palette is normally specified in the index file; in Matroska, the palette is
stored in the codec extra-data in the same format as in VobSub. In DVDs, the
palette is stored in the IFO file, and therefore not available when reading
from dumped VOB files.
The format for this option is a string containing 16 24-bits hexadecimal
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
@end table
@c man end SUBTILES DECODERS

View File

@@ -1,149 +0,0 @@
a {
color: #2D6198;
}
a:visited {
color: #884488;
}
#banner {
background-color: white;
position: relative;
text-align: center;
}
#banner img {
padding-bottom: 1px;
padding-top: 5px;
}
#body {
margin-left: 1em;
margin-right: 1em;
}
body {
background-color: #313131;
margin: 0;
text-align: justify;
}
.center {
margin-left: auto;
margin-right: auto;
text-align: center;
}
#container {
background-color: white;
color: #202020;
margin-left: 1em;
margin-right: 1em;
}
#footer {
text-align: center;
}
h1, h2, h3 {
padding-left: 0.4em;
border-radius: 4px;
padding-bottom: 0.2em;
padding-top: 0.2em;
border: 1px solid #6A996A;
}
h1 {
background-color: #7BB37B;
color: #151515;
font-size: 1.2em;
padding-bottom: 0.3em;
padding-top: 0.3em;
}
h2 {
color: #313131;
font-size: 0.9em;
background-color: #ABE3AB;
}
h3 {
color: #313131;
font-size: 0.8em;
margin-bottom: -8px;
background-color: #BBF3BB;
}
img {
border: 0;
}
#navbar {
background-color: #738073;
border-bottom: 1px solid #5C665C;
border-top: 1px solid #5C665C;
margin-top: 12px;
padding: 0.3em;
position: relative;
text-align: center;
}
#navbar a, #navbar_secondary a {
color: white;
padding: 0.3em;
text-decoration: none;
}
#navbar a:hover, #navbar_secondary a:hover {
background-color: #313131;
color: white;
text-decoration: none;
}
#navbar_secondary {
background-color: #738073;
border-bottom: 1px solid #5C665C;
border-left: 1px solid #5C665C;
border-right: 1px solid #5C665C;
padding: 0.3em;
position: relative;
text-align: center;
}
p {
margin-left: 1em;
margin-right: 1em;
}
pre {
margin-left: 3em;
margin-right: 3em;
padding: 0.3em;
border: 1px solid #bbb;
background-color: #f7f7f7;
}
dl dt {
font-weight: bold;
}
#proj_desc {
font-size: 1.2em;
}
#repos {
margin-left: 1em;
margin-right: 1em;
border-collapse: collapse;
border: solid 1px #6A996A;
}
#repos th {
background-color: #7BB37B;
border: solid 1px #6A996A;
}
#repos td {
padding: 0.2em;
border: solid 1px #6A996A;
}

View File

@@ -184,56 +184,4 @@ the script is directly played, the actual times will match the absolute
timestamps up to the sound controller's clock accuracy, but if the user
somehow pauses the playback or seeks, all times will be shifted accordingly.
@section concat
Virtual concatenation script demuxer.
This demuxer reads a list of files and other directives from a text file and
demuxes them one after the other, as if all their packet had been muxed
together.
The timestamps in the files are adjusted so that the first file starts at 0
and each next file starts where the previous one finishes. Note that it is
done globally and may cause gaps if all streams do not have exactly the same
length.
All files must have the same streams (same codecs, same time base, etc.).
This script format can currently not be probed, it must be specified explicitly.
@subsection Syntax
The script is a text file in extended-ASCII, with one directive per line.
Empty lines, leading spaces and lines starting with '#' are ignored. The
following directive is recognized:
@table @option
@item @code{file @var{path}}
Path to a file to read; special characters and spaces must be escaped with
backslash or single quotes.
@end table
@section tedcaptions
JSON captions used for @url{http://www.ted.com/, TED Talks}.
TED does not provide links to the captions, but they can be guessed from the
page. The file @file{tools/bookmarklets.html} from the FFmpeg source tree
contains a bookmarklet to expose them.
This demuxer accepts the following option:
@table @option
@item start_time
Set the start time of the TED talk, in milliseconds. The default is 15000
(15s). It is used to sync the captions with the downloadable videos, because
they include a 15s intro.
@end table
Example: convert the captions to a format most players understand:
@example
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
@end example
@c man end INPUT DEVICES

View File

@@ -170,7 +170,7 @@ For exported names, each library has its own prefixes. Just check the existing
code and name accordingly.
@end itemize
@subsection Miscellaneous conventions
@subsection Miscellanous conventions
@itemize @bullet
@item
fprintf and printf are forbidden in libavformat and libavcodec,
@@ -190,8 +190,8 @@ set shiftwidth=4
set softtabstop=4
set cindent
set cinoptions=(0
" Allow tabs in Makefiles.
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
" allow tabs in Makefiles
autocmd FileType make set noexpandtab shiftwidth=8 softtabstop=8
" Trailing whitespace and tabs are forbidden, so highlight them.
highlight ForbiddenWhitespace ctermbg=red guibg=red
match ForbiddenWhitespace /\s\+$\|\t/
@@ -317,8 +317,7 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
(e.g. addition of a function to the public API or extension of an
existing data structure).
Incrementing the third component means a noteworthy binary compatible
change (e.g. encoder bug fix that matters for the decoder). The third
component always starts at 100 to distinguish FFmpeg from Libav.
change (e.g. encoder bug fix that matters for the decoder).
@item
Compiler warnings indicate potential bugs or code with bad style. If a type of
warning always points to correct and clean code, that warning should
@@ -430,7 +429,7 @@ send a reminder by email. Your patch should eventually be dealt with.
Was the patch generated with git format-patch or send-email?
@item
Did you sign off your patch? (git commit -s)
See @url{http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches} for the meaning
See @url{http://kerneltrap.org/files/Jeremy/DCO.txt} for the meaning
of sign off.
@item
Did you provide a clear git commit log message?

View File

@@ -1,14 +0,0 @@
#!/bin/sh
SRC_PATH="${1}"
DOXYFILE="${2}"
shift 2
doxygen - <<EOF
@INCLUDE = ${DOXYFILE}
INPUT = $@
HTML_HEADER = ${SRC_PATH}/doc/doxy/header.html
HTML_FOOTER = ${SRC_PATH}/doc/doxy/footer.html
HTML_STYLESHEET = ${SRC_PATH}/doc/doxy/doxy_stylesheet.css
EOF

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,10 @@
</div>
<div id="footer">
Generated on $datetime for $projectname by&#160;<a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
</div>
<footer class="footer pagination-right">
<span class="label label-info">
Generated on $datetime for $projectname by&#160;<a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
</span>
</footer>
</div>
</body>
</html>

View File

@@ -1,16 +1,14 @@
<!DOCTYPE html>
<html>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
<!--Header replace -->
</head>
<div class="container">
<div id="container">
<!--Header replace -->
<div class="menu">
<div id="body">
<div>

View File

@@ -25,95 +25,6 @@ enabled encoders.
A description of some of the currently available audio encoders
follows.
@anchor{aacenc}
@section aac
Advanced Audio Coding (AAC) encoder.
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
low complexity (AAC-LC) profile is supported. To use this encoder, you must set
@option{strict} option to @samp{experimental} or lower.
As this encoder is experimental, unexpected behavior may exist from time to
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
that it has a worse quality reported by some users.
@c Comment this out until somebody writes the respective documentation.
@c See also @ref{libfaac}, @ref{libaacplus}, and @ref{libfdk-aac-enc}.
@subsection Options
@table @option
@item b
Set bit rate in bits/s. Setting this automatically activates constant bit rate
(CBR) mode.
@item q
Set quality for variable bit rate (VBR) mode. This option is valid only using
the @command{ffmpeg} command-line tool. For library interface users, use
@option{global_quality}.
@item stereo_mode
Set stereo encoding mode. Possible values:
@table @samp
@item auto
Automatically selected by the encoder.
@item ms_off
Disable middle/side encoding. This is the default.
@item ms_force
Force middle/side encoding.
@end table
@item aac_coder
Set AAC encoder coding method. Possible values:
@table @samp
@item 0
FAAC-inspired method.
This method is a simplified reimplementation of the method used in FAAC, which
sets thresholds proportional to the band energies, and then decreases all the
thresholds with quantizer steps to find the appropriate quantization with
distortion below threshold band by band.
The quality of this method is comparable to the two loop searching method
descibed below, but somewhat a little better and slower.
@item 1
Average noise to mask ratio (ANMR) trellis-based solution.
This has a theoretic best quality out of all the coding methods, but at the
cost of the slowest speed.
@item 2
Two loop searching (TLS) method.
This method first sets quantizers depending on band thresholds and then tries
to find an optimal combination by adding or subtracting a specific value from
all quantizers and adjusting some individual quantizer a little.
This method produces similar quality with the FAAC method and is the default.
@item 3
Constant quantizer method.
This method sets a constant quantizer for all bands. This is the fastest of all
the methods, yet produces the worst quality.
@end table
@end table
@subsection Tips and Tricks
According to some reports
(e.g. @url{http://d.hatena.ne.jp/kamedo2/20120729/1343545890}), setting the
@option{cutoff} option to 15000 Hz greatly improves the quality of the output
quality. As a result, we encourage you to do the same.
@section ac3 and ac3_fixed
AC-3 audio encoders.
@@ -501,279 +412,6 @@ Selected by Encoder (default)
@end table
@section libmp3lame
LAME (Lame Ain't an MP3 Encoder) MP3 encoder wrapper
Requires the presence of the libmp3lame headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libmp3lame}.
@subsection Options
The following options are supported by the libmp3lame wrapper. The
@command{lame}-equivalent of the options are listed in parentheses.
@table @option
@item b (@emph{-b})
Set bitrate expressed in bits/s for CBR. LAME @code{bitrate} is
expressed in kilobits/s.
@item q (@emph{-V})
Set constant quality setting for VBR. This option is valid only
using the @command{ffmpeg} command-line tool. For library interface
users, use @option{global_quality}.
@item compression_level (@emph{-q})
Set algorithm quality. Valid arguments are integers in the 0-9 range,
with 0 meaning highest quality but slowest, and 9 meaning fastest
while producing the worst quality.
@item reservoir
Enable use of bit reservoir when set to 1. Default value is 1. LAME
has this enabled by default, but can be overriden by use
@option{--nores} option.
@end table
@section libopencore-amrnb
OpenCORE Adaptive Multi-Rate Narrowband encoder.
Requires the presence of the libopencore-amrnb headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libopencore-amrnb --enable-version3}.
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
but you can override it by setting @option{strict} to @samp{unofficial} or
lower.
@subsection Options
@table @option
@item b
Set bitrate in bits per second. Only the following bitrates are supported,
otherwise libavcodec will round to the nearest valid bitrate.
@table @option
@item 4750
@item 5150
@item 5900
@item 6700
@item 7400
@item 7950
@item 10200
@item 12200
@end table
@item dtx
Allow discontinuous transmission (generate comfort noise) when set to 1. The
default value is 0 (disabled).
@end table
@section libtwolame
TwoLAME MP2 encoder wrapper
Requires the presence of the libtwolame headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libtwolame}.
@subsection Options
The following options are supported by the libtwolame wrapper. The
@command{twolame}-equivalent options follow the FFmpeg ones and are in
parentheses.
@table @option
@item b (@emph{-b})
Set bitrate expressed in bits/s for CBR. @command{twolame} @option{b}
option is expressed in kilobits/s. Default value is 128k.
@item q (@emph{-V})
Set quality for experimental VBR support. Maximum value range is
from -50 to 50, useful range is from -10 to 10. The higher the
value, the better the quality. This option is valid only using the
@command{ffmpeg} command-line tool. For library interface users,
use @option{global_quality}.
@item mode (@emph{--mode})
Set the mode of the resulting audio. Possible values:
@table @samp
@item auto
Choose mode automatically based on the input. This is the default.
@item stereo
Stereo
@item joint_stereo
Joint stereo
@item dual_channel
Dual channel
@item mono
Mono
@end table
@item psymodel (@emph{--psyc-mode})
Set psychoacoustic model to use in encoding. The argument must be
an integer between -1 and 4, inclusive. The higher the value, the
better the quality. The default value is 3.
@item energy_levels (@emph{--energy})
Enable energy levels extensions when set to 1. The default value is
0 (disabled).
@item error_protection (@emph{--protect})
Enable CRC error protection when set to 1. The default value is 0
(disabled).
@item copyright (@emph{--copyright})
Set MPEG audio copyright flag when set to 1. The default value is 0
(disabled).
@item original (@emph{--original})
Set MPEG audio original flag when set to 1. The default value is 0
(disabled).
@end table
@anchor{libvo-aacenc}
@section libvo-aacenc
VisualOn AAC encoder
Requires the presence of the libvo-aacenc headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libvo-aacenc --enable-version3}.
This encoder is considered to be worse than the
@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to
multiple sources.
@subsection Options
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
channels. It is also CBR-only.
@table @option
@item b
Set bit rate in bits/s.
@end table
@section libvo-amrwbenc
VisualOn Adaptive Multi-Rate Wideband encoder
Requires the presence of the libvo-amrwbenc headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libvo-amrwbenc --enable-version3}.
This is a mono-only encoder. Officially it only supports 16000Hz sample
rate, but you can override it by setting @option{strict} to
@samp{unofficial} or lower.
@subsection Options
@table @option
@item b
Set bitrate in bits/s. Only the following bitrates are supported, otherwise
libavcodec will round to the nearest valid bitrate.
@table @samp
@item 6600
@item 8850
@item 12650
@item 14250
@item 15850
@item 18250
@item 19850
@item 23050
@item 23850
@end table
@item dtx
Allow discontinuous transmission (generate comfort noise) when set to 1. The
default value is 0 (disabled).
@end table
@section libopus
libopus Opus Interactive Audio Codec encoder wrapper.
Requires the presence of the libopus headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libopus}.
@subsection Option Mapping
Most libopus options are modeled after the @command{opusenc} utility from
opus-tools. The following is an option mapping chart describing options
supported by the libopus wrapper, and their @command{opusenc}-equivalent
in parentheses.
@table @option
@item b (@emph{bitrate})
Set the bit rate in bits/s. FFmpeg's @option{b} option is
expressed in bits/s, while @command{opusenc}'s @option{bitrate} in
kilobits/s.
@item vbr (@emph{vbr}, @emph{hard-cbr}, and @emph{cvbr})
Set VBR mode. The FFmpeg @option{vbr} option has the following
valid arguments, with the their @command{opusenc} equivalent options
in parentheses:
@table @samp
@item off (@emph{hard-cbr})
Use constant bit rate encoding.
@item on (@emph{vbr})
Use variable bit rate encoding (the default).
@item constrained (@emph{cvbr})
Use constrained variable bit rate encoding.
@end table
@item compression_level (@emph{comp})
Set encoding algorithm complexity. Valid options are integers in
the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
gives the highest quality but slowest encoding. The default is 10.
@item frame_duration (@emph{framesize})
Set maximum frame size, or duration of a frame in milliseconds. The
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
frame sizes achieve lower latency but less quality at a given bitrate.
Sizes greater than 20ms are only interesting at fairly low bitrates.
The default is 20ms.
@item packet_loss (@emph{expect-loss})
Set expected packet loss percentage. The default is 0.
@item application (N.A.)
Set intended application type. Valid options are listed below:
@table @samp
@item voip
Favor improved speech intelligibility.
@item audio
Favor faithfulness to the input (the default).
@item lowdelay
Restrict to only the lowest delay modes.
@end table
@item cutoff (N.A.)
Set cutoff bandwidth in Hz. The argument must be exactly one of the
following: 4000, 6000, 8000, 12000, or 20000, corresponding to
narrowband, mediumband, wideband, super wideband, and fullband
respectively. The default is 0 (cutoff disabled).
@end table
@c man end AUDIO ENCODERS
@chapter Video Encoders
@@ -782,46 +420,6 @@ respectively. The default is 0 (cutoff disabled).
A description of some of the currently available video encoders
follows.
@section libtheora
Theora format supported through libtheora.
Requires the presence of the libtheora headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libtheora}.
@subsection Options
The following global options are mapped to internal libtheora options
which affect the quality and the bitrate of the encoded stream.
@table @option
@item b
Set the video bitrate, only works if the @code{qscale} flag in
@option{flags} is not enabled.
@item flags
Used to enable constant quality mode encoding through the
@option{qscale} flag, and to enable the @code{pass1} and @code{pass2}
modes.
@item g
Set the GOP size.
@item global_quality
Set the global quality in lambda units, only works if the
@code{qscale} flag in @option{flags} is enabled. The value is clipped
in the [0 - 10*@code{FF_QP2LAMBDA}] range, and then multiplied for 6.3
to get a value in the native libtheora range [0-63]. A higher value
corresponds to a higher quality.
For example, to set maximum constant quality encoding with
@command{ffmpeg}:
@example
ffmpeg -i INPUT -flags:v qscale -global_quality:v "10*QP2LAMBDA" -codec:v libtheora OUTPUT.ogg
@end example
@end table
@section libvpx
VP8 format supported through libvpx.
@@ -979,12 +577,10 @@ Specify Weighted prediction for P-frames.
Deprecated in favor of @var{x264opts} (see @var{weightp} libx264 option).
@item x264opts @var{options}
Allow to set any x264 option, see @code{x264 --fullhelp} for a list.
Allow to set any x264 option, see x264 --fullhelp for a list.
@var{options} is a list of @var{key}=@var{value} couples separated by
":". In @var{filter} and @var{psy-rd} options that use ":" as a separator
themselves, use "," instead. They accept it as well since long ago but this
is kept undocumented for some reason.
":".
@end table
For example to specify libx264 encoding options with @command{ffmpeg}:
@@ -995,117 +591,4 @@ ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
For more information about libx264 and the supported options see:
@url{http://www.videolan.org/developers/x264.html}
@section libxvid
Xvid MPEG-4 Part 2 encoder wrapper.
This encoder requires the presence of the libxvidcore headers and library
during configuration. You need to explicitly configure the build with
@code{--enable-libxvid --enable-gpl}.
The native @code{mpeg4} encoder supports the MPEG-4 Part 2 format, so
users can encode to this format without this library.
@subsection Options
The following options are supported by the libxvid wrapper. Some of
the following options are listed but are not documented, and
correspond to shared codec options. See @ref{codec-options,,the Codec
Options chapter} for their documentation. The other shared options
which are not listed have no effect for the libxvid encoder.
@table @option
@item b
@item g
@item qmin
@item qmax
@item mpeg_quant
@item threads
@item bf
@item b_qfactor
@item b_qoffset
@item flags
Set specific encoding flags. Possible values:
@table @samp
@item mv4
Use four motion vector by macroblock.
@item aic
Enable high quality AC prediction.
@item gray
Only encode grayscale.
@item gmc
Enable the use of global motion compensation (GMC).
@item qpel
Enable quarter-pixel motion compensation.
@item cgop
Enable closed GOP.
@item global_header
Place global headers in extradata instead of every keyframe.
@end table
@item trellis
@item me_method
Set motion estimation method. Possible values in decreasing order of
speed and increasing order of quality:
@table @samp
@item zero
Use no motion estimation (default).
@item phods
@item x1
@item log
Enable advanced diamond zonal search for 16x16 blocks and half-pixel
refinement for 16x16 blocks. @samp{x1} and @samp{log} are aliases for
@samp{phods}.
@item epzs
Enable all of the things described above, plus advanced diamond zonal
search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
estimation on chroma planes.
@item full
Enable all of the things described above, plus extended 16x16 and 8x8
blocks search.
@end table
@item mbd
Set macroblock decision algorithm. Possible values in the increasing
order of quality:
@table @samp
@item simple
Use macroblock comparing function algorithm (default).
@item bits
Enable rate distortion-based half pixel and quarter pixel refinement for
16x16 blocks.
@item rd
Enable all of the things described above, plus rate distortion-based
half pixel and quarter pixel refinement for 8x8 blocks, and rate
distortion-based search using square pattern.
@end table
@end table
@c man end VIDEO ENCODERS

View File

@@ -17,7 +17,6 @@ EXAMPLES= decoding_encoding \
filtering_audio \
metadata \
muxing \
resampling_audio \
scaling_video \
OBJS=$(addsuffix .o,$(EXAMPLES))

View File

@@ -1,18 +0,0 @@
FFmpeg examples README
----------------------
Both following use cases rely on pkg-config and make, thus make sure
that you have them installed and working on your system.
1) Build the installed examples in a generic read/write user directory
Copy to a read/write user directory and just use "make", it will link
to the libraries on your system, assuming the PKG_CONFIG_PATH is
correctly configured.
2) Build the examples in-tree
Assuming you are in the source FFmpeg checkout directory, you need to build
FFmpeg (no need to make install in any prefix). Then you can go into the
doc/examples and run a command such as PKG_CONFIG_PATH=pc-uninstalled make.

View File

@@ -27,14 +27,13 @@
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
* format handling
* @example doc/examples/decoding_encoding.c
*/
#include <math.h>
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/audioconvert.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
@@ -122,10 +121,6 @@ static void audio_encode_example(const char *filename)
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
/* put sample parameters */
c->bit_rate = 64000;
@@ -257,10 +252,6 @@ static void audio_decode_example(const char *outfilename, const char *filename)
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
@@ -355,10 +346,6 @@ static void video_encode_example(const char *filename, int codec_id)
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* put sample parameters */
c->bit_rate = 400000;
@@ -369,7 +356,7 @@ static void video_encode_example(const char *filename, int codec_id)
c->time_base= (AVRational){1,25};
c->gop_size = 10; /* emit one intra frame every ten frames */
c->max_b_frames=1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
c->pix_fmt = PIX_FMT_YUV420P;
if(codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
@@ -488,42 +475,15 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
fclose(f);
}
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
int len, got_frame;
char buf[1024];
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
return len;
}
if (got_frame) {
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
fflush(stdout);
/* the picture is allocated by the decoder, no need to free it */
snprintf(buf, sizeof(buf), outfilename, *frame_count);
pgm_save(frame->data[0], frame->linesize[0],
avctx->width, avctx->height, buf);
(*frame_count)++;
}
if (pkt->data) {
pkt->size -= len;
pkt->data += len;
}
return 0;
}
static void video_decode_example(const char *outfilename, const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int frame_count;
int frame, got_picture, len;
FILE *f;
AVFrame *frame;
AVFrame *picture;
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
char buf[1024];
AVPacket avpkt;
av_init_packet(&avpkt);
@@ -541,11 +501,6 @@ static void video_decode_example(const char *outfilename, const char *filename)
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
if(codec->capabilities&CODEC_CAP_TRUNCATED)
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
@@ -559,19 +514,21 @@ static void video_decode_example(const char *outfilename, const char *filename)
exit(1);
}
/* the codec gives us the frame size, in samples */
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = avcodec_alloc_frame();
if (!frame) {
picture = avcodec_alloc_frame();
if (!picture) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame_count = 0;
frame = 0;
for(;;) {
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
if (avpkt.size == 0)
@@ -593,9 +550,26 @@ static void video_decode_example(const char *outfilename, const char *filename)
/* here, we use a stream based decoder (mpeg1video), so we
feed decoder and see if it could decode a frame */
avpkt.data = inbuf;
while (avpkt.size > 0)
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
while (avpkt.size > 0) {
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d\n", frame);
exit(1);
}
if (got_picture) {
printf("Saving frame %3d\n", frame);
fflush(stdout);
/* the picture is allocated by the decoder. no need to
free it */
snprintf(buf, sizeof(buf), outfilename, frame);
pgm_save(picture->data[0], picture->linesize[0],
c->width, c->height, buf);
frame++;
}
avpkt.size -= len;
avpkt.data += len;
}
}
/* some codecs, such as MPEG, transmit the I and P frame with a
@@ -603,13 +577,24 @@ static void video_decode_example(const char *outfilename, const char *filename)
chance to get the last frame of the video */
avpkt.data = NULL;
avpkt.size = 0;
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
if (got_picture) {
printf("Saving last frame %3d\n", frame);
fflush(stdout);
/* the picture is allocated by the decoder. no need to
free it */
snprintf(buf, sizeof(buf), outfilename, frame);
pgm_save(picture->data[0], picture->linesize[0],
c->width, c->height, buf);
frame++;
}
fclose(f);
avcodec_close(c);
av_free(c);
avcodec_free_frame(&frame);
avcodec_free_frame(&picture);
printf("\n");
}
@@ -625,8 +610,8 @@ int main(int argc, char **argv)
"API example program to decode/encode a media stream with libavcodec.\n"
"This program generates a synthetic stream and encodes it to a file\n"
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
"The encoded stream is then decoded and written to a raw data output.\n"
"output_type must be choosen between 'h264', 'mp2', 'mpg'.\n",
"The encoded stream is then decoded and written to a raw data output\n."
"output_type must be choosen between 'h264', 'mp2', 'mpg'\n",
argv[0]);
return 1;
}

View File

@@ -26,7 +26,6 @@
*
* Show how to use the libavformat and libavcodec API to demux and
* decode audio and video data.
* @example doc/examples/demuxing.c
*/
#include <libavutil/imgutils.h>
@@ -209,7 +208,7 @@ int main (int argc, char **argv)
/* register all formats and codecs */
av_register_all();
/* open input file, and allocate format context */
/* open input file, and allocated format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", src_filename);
exit(1);
@@ -243,6 +242,9 @@ int main (int argc, char **argv)
video_dst_bufsize = ret;
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
int nb_planes;
@@ -265,9 +267,6 @@ int main (int argc, char **argv)
}
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (!audio_stream && !video_stream) {
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
ret = 1;
@@ -289,7 +288,7 @@ int main (int argc, char **argv)
if (video_stream)
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
if (audio_stream)
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
printf("Demuxing video from file '%s' into '%s'\n", src_filename, audio_dst_filename);
/* read frames from the file */
while (av_read_frame(fmt_ctx, &pkt) >= 0)
@@ -314,7 +313,7 @@ int main (int argc, char **argv)
if (audio_stream) {
const char *fmt;
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt)) < 0)
if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt) < 0))
goto end;
printf("Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",

View File

@@ -25,7 +25,6 @@
/**
* @file
* API example for audio decoding and filtering
* @example doc/examples/filtering_audio.c
*/
#include <unistd.h>

View File

@@ -24,7 +24,6 @@
/**
* @file
* API example for decoding and filtering
* @example doc/examples/filtering_video.c
*/
#define _XOPEN_SOURCE 600 /* for usleep */
@@ -88,7 +87,7 @@ static int init_filters(const char *filters_descr)
AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
enum PixelFormat pix_fmts[] = { PIX_FMT_GRAY8, PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params;
filter_graph = avfilter_graph_alloc();

View File

@@ -23,7 +23,6 @@
/**
* @file
* Shows how the metadata API can be used in application programs.
* @example doc/examples/metadata.c
*/
#include <stdio.h>

View File

@@ -26,7 +26,6 @@
*
* Output a media file in any supported libavformat format.
* The default codecs are used.
* @example doc/examples/muxing.c
*/
#include <stdlib.h>
@@ -42,7 +41,7 @@
#define STREAM_DURATION 200.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
static int sws_flags = SWS_BICUBIC;
@@ -53,18 +52,19 @@ static float t, tincr, tincr2;
static int16_t *samples;
static int audio_input_frame_size;
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
/*
* add an audio output stream
*/
static AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the encoder */
/* find the audio encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
fprintf(stderr, "Could not find codec\n");
exit(1);
}
@@ -73,75 +73,32 @@ static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
st->id = oc->nb_streams-1;
st->id = 1;
c = st->codec;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
st->id = 1;
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
break;
/* put sample parameters */
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
case AVMEDIA_TYPE_VIDEO:
avcodec_get_context_defaults3(c, *codec);
c->codec_id = codec_id;
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 352;
c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
/**************************************************************/
/* audio output */
static float t, tincr, tincr2;
static int16_t *samples;
static int audio_input_frame_size;
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
AVCodecContext *c;
int ret;
c = st->codec;
/* open it */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
@@ -158,10 +115,6 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
samples = av_malloc(audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels);
if (!samples) {
fprintf(stderr, "Could not allocate audio samples buffer\n");
exit(1);
}
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
@@ -186,7 +139,7 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame = avcodec_alloc_frame();
int got_packet, ret;
int got_packet;
av_init_packet(&pkt);
c = st->codec;
@@ -199,22 +152,15 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
av_get_bytes_per_sample(c->sample_fmt) *
c->channels, 1);
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (!got_packet)
return;
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
if (ret != 0) {
fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret));
if (av_interleaved_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "Error while writing audio frame\n");
exit(1);
}
avcodec_free_frame(&frame);
@@ -232,7 +178,64 @@ static void close_audio(AVFormatContext *oc, AVStream *st)
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
static int frame_count;
static uint8_t *video_outbuf;
static int frame_count, video_outbuf_size;
/* Add a video output stream. */
static AVStream *add_video_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the video encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "codec not found\n");
exit(1);
}
st = avformat_new_stream(oc, *codec);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
c = st->codec;
avcodec_get_context_defaults3(c, *codec);
c->codec_id = codec_id;
/* Put sample parameters. */
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 352;
c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
@@ -240,12 +243,23 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* Allocate output buffer. */
/* XXX: API change will be done. */
/* Buffers passed into lav* can be allocated any way you prefer,
* as long as they're aligned enough for the architecture, and
* they're freed appropriately (such as using av_free for buffers
* allocated with av_malloc). */
video_outbuf_size = 200000;
video_outbuf = av_malloc(video_outbuf_size);
}
/* allocate and init a re-usable frame */
frame = avcodec_alloc_frame();
if (!frame) {
@@ -256,18 +270,17 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
fprintf(stderr, "Could not allocate picture\n");
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
if (c->pix_fmt != PIX_FMT_YUV420P) {
ret = avpicture_alloc(&src_picture, PIX_FMT_YUV420P, c->width, c->height);
if (ret < 0) {
fprintf(stderr, "Could not allocate temporary picture: %s\n",
av_err2str(ret));
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
@@ -309,11 +322,11 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
* frames if using B-frames, so we get the last frames by
* passing the same picture again. */
} else {
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!sws_ctx) {
sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
sws_ctx = sws_getContext(c->width, c->height, PIX_FMT_YUV420P,
c->width, c->height, c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (!sws_ctx) {
@@ -332,7 +345,8 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
}
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* Raw video case - directly store the picture in the packet */
/* Raw video case - the API will change slightly in the near
* future for that. */
AVPacket pkt;
av_init_packet(&pkt);
@@ -353,12 +367,15 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
fprintf(stderr, "Error encoding video frame\n");
exit(1);
}
/* If size is zero, it means the image was buffered. */
if (got_output) {
if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(c->coded_frame->pts,
c->time_base, st->time_base);
if (c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
@@ -371,7 +388,7 @@ static void write_video_frame(AVFormatContext *oc, AVStream *st)
}
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
fprintf(stderr, "Error while writing video frame\n");
exit(1);
}
frame_count++;
@@ -383,6 +400,7 @@ static void close_video(AVFormatContext *oc, AVStream *st)
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_free(frame);
av_free(video_outbuf);
}
/**************************************************************/
@@ -396,7 +414,7 @@ int main(int argc, char **argv)
AVStream *audio_st, *video_st;
AVCodec *audio_codec, *video_codec;
double audio_pts, video_pts;
int ret, i;
int i;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
@@ -404,10 +422,8 @@ int main(int argc, char **argv)
if (argc != 2) {
printf("usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"This program generates a synthetic audio and video stream, encodes and\n"
"muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename.\n"
"Raw images can also be output by using '%%d' in the filename\n"
"\n", argv[0]);
return 1;
}
@@ -429,12 +445,11 @@ int main(int argc, char **argv)
* and initialize the codecs. */
video_st = NULL;
audio_st = NULL;
if (fmt->video_codec != AV_CODEC_ID_NONE) {
video_st = add_stream(oc, &video_codec, fmt->video_codec);
video_st = add_video_stream(oc, &video_codec, fmt->video_codec);
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
audio_st = add_audio_stream(oc, &audio_codec, fmt->audio_codec);
}
/* Now that all the parameters are set, we can open the audio and
@@ -448,24 +463,19 @@ int main(int argc, char **argv)
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
fprintf(stderr, "Could not open '%s'\n", filename);
return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(oc, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
if (avformat_write_header(oc, NULL) < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
return 1;
}
if (frame)
frame->pts = 0;
frame->pts = 0;
for (;;) {
/* Compute current audio and video time. */
if (audio_st)
@@ -488,7 +498,7 @@ int main(int argc, char **argv)
write_audio_frame(oc, audio_st);
} else {
write_video_frame(oc, video_st);
frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
frame->pts++;
}
}

View File

@@ -1,223 +0,0 @@
/*
* Copyright (c) 2012 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @example doc/examples/resampling_audio.c
* libswresample API use example.
*/
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
static int get_format_from_sample_fmt(const char **fmt,
enum AVSampleFormat sample_fmt)
{
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr,
"Sample format %s not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return AVERROR(EINVAL);
}
/**
* Fill dst buffer with nb_samples, generated starting from t.
*/
void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
{
int i, j;
double tincr = 1.0 / sample_rate, *dstp = dst;
const double c = 2 * M_PI * 440.0;
/* generate sin tone with 440Hz frequency and duplicated channels */
for (i = 0; i < nb_samples; i++) {
*dstp = sin(c * *t);
for (j = 1; j < nb_channels; j++)
dstp[j] = dstp[0];
dstp += nb_channels;
*t += tincr;
}
}
int alloc_samples_array_and_data(uint8_t ***data, int *linesize, int nb_channels,
int nb_samples, enum AVSampleFormat sample_fmt, int align)
{
int nb_planes = av_sample_fmt_is_planar(sample_fmt) ? nb_channels : 1;
*data = av_malloc(sizeof(*data) * nb_planes);
if (!*data)
return AVERROR(ENOMEM);
return av_samples_alloc(*data, linesize, nb_channels,
nb_samples, sample_fmt, align);
}
int main(int argc, char **argv)
{
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
int src_rate = 48000, dst_rate = 44100;
uint8_t **src_data = NULL, **dst_data = NULL;
int src_nb_channels = 0, dst_nb_channels = 0;
int src_linesize, dst_linesize;
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
const char *dst_filename = NULL;
FILE *dst_file;
int dst_bufsize;
const char *fmt;
struct SwrContext *swr_ctx;
double t;
int ret;
if (argc != 2) {
fprintf(stderr, "Usage: %s output_file\n"
"API example program to show how to resample an audio stream with libswresample.\n"
"This program generates a series of audio frames, resamples them to a specified "
"output format and rate and saves them to an output file named output_file.\n",
argv[0]);
exit(1);
}
dst_filename = argv[1];
dst_file = fopen(dst_filename, "wb");
if (!dst_file) {
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
exit(1);
}
/* create resampler context */
swr_ctx = swr_alloc();
if (!swr_ctx) {
fprintf(stderr, "Could not allocate resampler context\n");
ret = AVERROR(ENOMEM);
goto end;
}
/* set options */
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
/* initialize the resampling context */
if ((ret = swr_init(swr_ctx)) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
goto end;
}
/* allocate source and destination samples buffers */
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
ret = alloc_samples_array_and_data(&src_data, &src_linesize, src_nb_channels,
src_nb_samples, src_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate source samples\n");
goto end;
}
/* compute the number of converted samples: buffering is avoided
* ensuring that the output buffer will contain at least all the
* converted input samples */
max_dst_nb_samples = dst_nb_samples =
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
/* buffer is going to be directly written to a rawaudio file, no alignment */
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
ret = alloc_samples_array_and_data(&dst_data, &dst_linesize, dst_nb_channels,
dst_nb_samples, dst_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate destination samples\n");
goto end;
}
t = 0;
do {
/* generate synthetic audio */
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
if (dst_nb_samples > max_dst_nb_samples) {
av_free(dst_data[0]);
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
dst_nb_samples, dst_sample_fmt, 1);
if (ret < 0)
break;
max_dst_nb_samples = dst_nb_samples;
}
/* convert to destination format */
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
goto end;
}
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
ret, dst_sample_fmt, 1);
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
} while (t < 10);
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
goto end;
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
end:
if (dst_file)
fclose(dst_file);
if (src_data)
av_freep(&src_data[0]);
av_freep(&src_data);
if (dst_data)
av_freep(&dst_data[0]);
av_freep(&dst_data);
swr_free(&swr_ctx);
return ret < 0;
}

View File

@@ -23,7 +23,6 @@
/**
* @file
* libswscale API use example.
* @example doc/examples/scaling_video.c
*/
#include <libavutil/imgutils.h>
@@ -33,18 +32,20 @@
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
int width, int height, int frame_index)
{
int x, y;
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
data[0][y * linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
data[1][y * linesize[1] + x] = 128 + y + i * 2;
data[2][y * linesize[2] + x] = 64 + x + i * 5;
}
}
}
@@ -54,7 +55,7 @@ int main(int argc, char **argv)
uint8_t *src_data[4], *dst_data[4];
int src_linesize[4], dst_linesize[4];
int src_w = 320, src_h = 240, dst_w, dst_h;
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
enum PixelFormat src_pix_fmt = PIX_FMT_YUV420P, dst_pix_fmt = PIX_FMT_RGB24;
const char *dst_size = NULL;
const char *dst_filename = NULL;
FILE *dst_file;

View File

@@ -79,17 +79,6 @@ not a bug they should fix:
Then again, some of them do not know the difference between an undecidable
problem and an NP-hard problem...
@section I have installed this library with my distro's package manager. Why does @command{configure} not see it?
Distributions usually split libraries in several packages. The main package
contains the files necessary to run programs using the library. The
development package contains the files necessary to build programs using the
library. Sometimes, docs and/or data are in a separate package too.
To build FFmpeg, you need to install the development package. It is usually
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
build is finished, but be sure to keep the main package.
@chapter Usage
@section ffmpeg does not work; what is wrong?
@@ -110,16 +99,7 @@ Then you may run:
Notice that @samp{%d} is replaced by the image number.
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc.
Use the @option{-start_number} option to declare a starting number for
the sequence. This is useful if your sequence does not start with
@file{img001.jpg} but is still in a numerical order. The following
example will start with @file{img100.jpg}:
@example
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
@end example
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc...
If you have large number of pictures to rename, you can use the
following command to ease the burden. The command, using the bourne
@@ -142,12 +122,6 @@ Then run:
The same logic is used for any image format that ffmpeg reads.
You can also use @command{cat} to pipe images to ffmpeg:
@example
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
@end example
@section How do I encode movie to single pictures?
Use:
@@ -260,18 +234,18 @@ invoking ffmpeg with several @option{-i} options.
For audio, to put all channels together in a single stream (example: two
mono streams into one stereo stream): this is sometimes called to
@emph{merge} them, and can be done using the
@url{http://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter.
@url{http://ffmpeg.org/ffmpeg.html#amerge, @code{amerge}} filter.
@item
For audio, to play one on top of the other: this is called to @emph{mix}
them, and can be done by first merging them into a single stream and then
using the @url{http://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix
using the @url{http://ffmpeg.org/ffmpeg.html#pan, @code{pan}} filter to mix
the channels at will.
@item
For video, to display both together, side by side or one on top of a part of
the other; it can be done using the
@url{http://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter.
@url{http://ffmpeg.org/ffmpeg.html#overlay, @code{overlay}} video filter.
@end itemize
@@ -280,19 +254,12 @@ the other; it can be done using the
There are several solutions, depending on the exact circumstances.
@subsection Concatenating using the concat @emph{filter}
@subsection Concatenating using filters
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-filters.html#concat,
@code{concat}} filter designed specifically for that, with examples in the
documentation. This operation is recommended if you need to re-encode.
FFmpeg has a @url{http://ffmpeg.org/ffmpeg.html#concat-1, @code{concat}}
filter designed specifically for that, with examples in the documentation.
@subsection Concatenating using the concat @emph{demuxer}
FFmpeg has a @url{http://www.ffmpeg.org/ffmpeg-formats.html#concat,
@code{concat}} demuxer which you can use when you want to avoid a re-encode and
your format doesn't support file level concatenation.
@subsection Concatenating using the concat @emph{protocol} (file level)
@subsection Concatenating at the file level
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
video by merely concatenating the files them.
@@ -330,7 +297,7 @@ mkfifo intermediate2.mpg
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
cat intermediate1.mpg intermediate2.mpg |\
ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
ffmpeg -f mpeg -i - -qscale:v 2 -c:v mpeg4 -acodec libmp3lame -q:a 4 output.avi
@end example
@subsection Concatenating using raw audio and video
@@ -360,7 +327,7 @@ cat temp1.a temp2.a > all.a &
cat temp1.v temp2.v > all.v &
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
-f yuv4mpegpipe -i all.v \
-y output.flv
-qscale:v 2 -y output.flv
rm temp[12].[av] all.[av]
@end example
@@ -418,20 +385,6 @@ The size of the initial scan is controlled by two options: @code{probesize}
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
the subtitle stream to be detected, both values must be large enough.
@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead?
The @option{-sameq} option meant "same quantizer", and made sense only in a
very limited set of cases. Unfortunately, a lot of people mistook it for
"same quality" and used it in places where it did not make sense: it had
roughly the expected visible effect, but achieved it in a very inefficient
way.
Each encoder has its own set of options to set the quality-vs-size balance,
use the options for the encoder you are using to set the quality level to a
point acceptable for your tastes. The most common options to do that are
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
of the encoder you chose.
@chapter Development
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
@@ -455,8 +408,31 @@ with @code{#ifdef}s related to the compiler.
@section Is Microsoft Visual C++ supported?
Yes. Please see the @uref{platform.html, Microsoft Visual C++}
section in the FFmpeg documentation.
No. Microsoft Visual C++ is not compliant to the C99 standard and does
not - among other things - support the inline assembly used in FFmpeg.
If you wish to use MSVC++ for your
project then you can link the MSVC++ code with libav* as long as
you compile the latter with a working C compiler. For more information, see
the @emph{Microsoft Visual C++ compatibility} section in the FFmpeg
documentation.
There have been efforts to make FFmpeg compatible with MSVC++ in the
past. However, they have all been rejected as too intrusive, especially
since MinGW does the job adequately. None of the core developers
work with MSVC++ and thus this item is low priority. Should you find
the silver bullet that solves this problem, feel free to shoot it at us.
We strongly recommend you to move over from MSVC++ to MinGW tools.
@section Can I use FFmpeg or libavcodec under Windows?
Yes, but the Cygwin or MinGW tools @emph{must} be used to compile FFmpeg.
Read the @emph{Windows} section in the FFmpeg documentation to find more
information.
To get help and instructions for building FFmpeg under Windows, check out
the FFmpeg Windows Help Forum at
@url{http://ffmpeg.arrozcru.org/}.
@section Can you add automake, libtool or autoconf support?
@@ -514,8 +490,8 @@ to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
@section I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?
You have to create a custom AVIOContext using @code{avio_alloc_context},
see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer or MPlayer2 sources.
You have to implement a URLProtocol, see @file{libavformat/file.c} in
FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer sources.
@section Where can I find libav* headers for Pascal/Delphi?

View File

@@ -93,11 +93,11 @@ tests/fate.sh /path/to/fate_config
@end example
A configuration file template with comments describing the individual
configuration variables can be found at @file{doc/fate_config.sh.template}.
configuration variables can be found at @file{tests/fate_config.sh.template}.
@ifhtml
The mentioned configuration template is also available here:
@verbatiminclude fate_config.sh.template
@verbatiminclude ../tests/fate_config.sh.template
@end ifhtml
Create a configuration that suits your needs, based on the configuration

View File

@@ -1,45 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Bitstream Filters Documentation
@titlepage
@center @titlefont{FFmpeg Bitstream Filters Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the bitstream filters provided by the
libavcodec library.
A bitstream filter operates on the encoded stream data, and performs
bitstream level modifications without performing decoding.
@c man end DESCRIPTION
@include bitstream_filters.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavcodec.html,libavcodec}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-bitstream-filters
@settitle FFmpeg bitstream filters
@end ignore
@bye

File diff suppressed because it is too large Load Diff

View File

@@ -1,62 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Devices Documentation
@titlepage
@center @titlefont{FFmpeg Devices Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the input and output devices provided by the
libavdevice library.
@c man end DESCRIPTION
@chapter Device Options
@c man begin DEVICE OPTIONS
The libavdevice library provides the same interface as
libavformat. Namely, an input device is considered like a demuxer, and
an output device like a muxer, and the interface and generic device
options are the same provided by libavformat (see the ffmpeg-formats
manual).
In addition each input or output device may support so-called private
options, which are specific for that component.
Options may be set by specifying -@var{option} @var{value} in the
FFmpeg tools, or by setting the value explicitly in the device
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
for programmatic use.
@c man end DEVICE OPTIONS
@include indevs.texi
@include outdevs.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavdevice.html,libavdevice}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavdevice(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-devices
@settitle FFmpeg devices
@end ignore
@bye

View File

@@ -1,42 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Filters Documentation
@titlepage
@center @titlefont{FFmpeg Filters Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes filters, sources, and sinks provided by the
libavfilter library.
@c man end DESCRIPTION
@include filters.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavfilter.html,libavfilter}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavfilter(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-filters
@settitle FFmpeg filters
@end ignore
@bye

View File

@@ -1,177 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Formats Documentation
@titlepage
@center @titlefont{FFmpeg Formats Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the supported formats (muxers and demuxers)
provided by the libavformat library.
@c man end DESCRIPTION
@chapter Format Options
@c man begin FORMAT OPTIONS
The libavformat library provides some generic global options, which
can be set on all the muxers and demuxers. In addition each muxer or
demuxer may support so-called private options, which are specific for
that component.
Options may be set by specifying -@var{option} @var{value} in the
FFmpeg tools, or by setting the value explicitly in the
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
for programmatic use.
The list of supported options follows:
@table @option
@item avioflags @var{flags} (@emph{input/output})
Possible values:
@table @samp
@item direct
Reduce buffering.
@end table
@item probesize @var{integer} (@emph{input})
Set probing size in bytes, i.e. the size of the data to analyze to get
stream information. A higher value will allow to detect more
information in case it is dispersed into the stream, but will increase
latency. Must be an integer not lesser than 32. It is 5000000 by default.
@item packetsize @var{integer} (@emph{output})
Set packet size.
@item fflags @var{flags} (@emph{input/output})
Set format flags.
Possible values:
@table @samp
@item ignidx
Ignore index.
@item genpts
Generate PTS.
@item nofillin
Do not fill in missing values that can be exactly calculated.
@item noparse
Disable AVParsers, this needs @code{+nofillin} too.
@item igndts
Ignore DTS.
@item discardcorrupt
Discard corrupted frames.
@item sortdts
Try to interleave output packets by DTS.
@item keepside
Do not merge side data.
@item latm
Enable RTP MP4A-LATM payload.
@item nobuffer
Reduce the latency introduced by optional buffering
@end table
@item analyzeduration @var{integer} (@emph{input})
Specify how many microseconds are analyzed to estimate duration.
@item cryptokey @var{hexadecimal string} (@emph{input})
Set decryption key.
@item indexmem @var{integer} (@emph{input})
Set max memory used for timestamp index (per stream).
@item rtbufsize @var{integer} (@emph{input})
Set max memory used for buffering real-time frames.
@item fdebug @var{flags} (@emph{input/output})
Print specific debug info.
Possible values:
@table @samp
@item ts
@end table
@item max_delay @var{integer} (@emph{input/output})
Set maximum muxing or demuxing delay in microseconds.
@item fpsprobesize @var{integer} (@emph{input})
Set number of frames used to probe fps.
@item audio_preload @var{integer} (@emph{output})
Set microseconds by which audio packets should be interleaved earlier.
@item chunk_duration @var{integer} (@emph{output})
Set microseconds for each chunk.
@item chunk_size @var{integer} (@emph{output})
Set size in bytes for each chunk.
@item err_detect, f_err_detect @var{flags} (@emph{input})
Set error detection flags. @code{f_err_detect} is deprecated and
should be used only via the @command{ffmpeg} tool.
Possible values:
@table @samp
@item crccheck
Verify embedded CRCs.
@item bitstream
Detect bitstream specification deviations.
@item buffer
Detect improper bitstream length.
@item explode
Abort decoding on minor error detection.
@item careful
Consider things that violate the spec and have not been seen in the
wild as errors.
@item compliant
Consider all spec non compliancies as errors.
@item aggressive
Consider things that a sane encoder should not do as an error.
@end table
@item use_wallclock_as_timestamps @var{integer} (@emph{input})
Use wallclock as timestamps.
@item avoid_negative_ts @var{integer} (@emph{output})
Shift timestamps to make them positive. 1 enables, 0 disables, default
of -1 enables when required by target format.
@item skip_initial_bytes @var{integer} (@emph{input})
Set number initial bytes to skip. Default is 0.
@item correct_ts_overflow @var{integer} (@emph{input})
Correct single timestamp overflows if set to 1. Default is 1.
@end table
@c man end FORMAT OPTIONS
@include demuxers.texi
@include muxers.texi
@include metadata.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavformat.html,libavformat}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-formats
@settitle FFmpeg formats
@end ignore
@bye

View File

@@ -1,42 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Protocols Documentation
@titlepage
@center @titlefont{FFmpeg Protocols Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the input and output protocols provided by the
libavformat library.
@c man end DESCRIPTION
@include protocols.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavformat.html,libavformat}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-protocols
@settitle FFmpeg protocols
@end ignore
@bye

View File

@@ -1,238 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Resampler Documentation
@titlepage
@center @titlefont{FFmpeg Resampler Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The FFmpeg resampler provides an high-level interface to the
libswresample library audio resampling utilities. In particular it
allows to perform audio resampling, audio channel layout rematrixing,
and convert audio format and packing layout.
@c man end DESCRIPTION
@chapter Resampler Options
@c man begin RESAMPLER OPTIONS
The audio resampler supports the following named options.
Options may be set by specifying -@var{option} @var{value} in the
FFmpeg tools, @var{option}=@var{value} for the aresample filter,
by setting the value explicitly in the
@code{SwrContext} options or using the @file{libavutil/opt.h} API for
programmatic use.
@table @option
@item ich, in_channel_count
Set the number of input channels. Default value is 0. Setting this
value is not mandatory if the corresponding channel layout
@option{in_channel_layout} is set.
@item och, out_channel_count
Set the number of output channels. Default value is 0. Setting this
value is not mandatory if the corresponding channel layout
@option{out_channel_layout} is set.
@item uch, used_channel_count
Set the number of used channels. Default value is 0. This option is
only used for special remapping.
@item isr, in_sample_rate
Set the input sample rate. Default value is 0.
@item osr, out_sample_rate
Set the output sample rate. Default value is 0.
@item isf, in_sample_fmt
Specify the input sample format. It is set by default to @code{none}.
@item osf, out_sample_fmt
Specify the output sample format. It is set by default to @code{none}.
@item tsf, internal_sample_fmt
Set the internal sample format. Default value is @code{none}.
@item icl, in_channel_layout
Set the input channel layout.
@item ocl, out_channel_layout
Set the output channel layout.
@item clev, center_mix_level
Set center mix level. It is a value expressed in deciBel, and must be
inclusively included between -32 and +32.
@item slev, surround_mix_level
Set surround mix level. It is a value expressed in deciBel, and must
be inclusively included between -32 and +32.
@item lfe_mix_evel
Set LFE mix level.
@item rmvol, rematrix_volume
Set rematrix volume. Default value is 1.0.
@item flags, swr_flags
Set flags used by the converter. Default value is 0.
It supports the following individual flags:
@table @option
@item res
force resampling
@end table
@item dither_scale
Set the dither scale. Default value is 1.
@item dither_method
Set dither method. Default value is 0.
Supported values:
@table @samp
@item rectangular
select rectangular dither
@item triangular
select triangular dither
@item triangular_hp
select triangular dither with high pass
@end table
@item resampler
Set resampling engine. Default value is swr.
Supported values:
@table @samp
@item swr
select the native SW Resampler; filter options precision and cheby are not
applicable in this case.
@item soxr
select the SoX Resampler (where available); compensation, and filter options
filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
case.
@end table
@item filter_size
For swr only, set resampling filter size, default value is 32.
@item phase_shift
For swr only, set resampling phase shift, default value is 10, must be included
between 0 and 30.
@item linear_interp
Use Linear Interpolation if set to 1, default value is 0.
@item cutoff
Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
@item precision
For soxr only, the precision in bits to which the resampled signal will be
calculated. The default value of 20 (which, with suitable dithering, is
appropriate for a destination bit-depth of 16) gives SoX's 'High Quality'; a
value of 28 gives SoX's 'Very High Quality'.
@item cheby
For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
approximation for 'irrational' ratios. Default value is 0.
@item async
For swr only, simple 1 parameter audio sync to timestamps using stretching,
squeezing, filling and trimming. Setting this to 1 will enable filling and
trimming, larger values represent the maximum amount in samples that the data
may be stretched or squeezed for each second.
Default value is 0, thus no compensation is applied to make the samples match
the audio timestamps.
@item min_comp
For swr only, set the minimum difference between timestamps and audio data (in
seconds) to trigger stretching/squeezing/filling or trimming of the
data to make it match the timestamps. The default is that
stretching/squeezing/filling and trimming is disabled
(@option{min_comp} = @code{FLT_MAX}).
@item min_hard_comp
For swr only, set the minimum difference between timestamps and audio data (in
seconds) to trigger adding/dropping samples to make it match the
timestamps. This option effectively is a threshold to select between
hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
all compensation is by default disabled through @option{min_comp}.
The default is 0.1.
@item comp_duration
For swr only, set duration (in seconds) over which data is stretched/squeezed
to make it match the timestamps. Must be a non-negative double float value,
default value is 1.0.
@item max_soft_comp
For swr only, set maximum factor by which data is stretched/squeezed to make it
match the timestamps. Must be a non-negative double float value, default value
is 0.
@item matrix_encoding
Select matrixed stereo encoding.
It accepts the following values:
@table @samp
@item none
select none
@item dolby
select Dolby
@item dplii
select Dolby Pro Logic II
@end table
Default value is @code{none}.
@item filter_type
For swr only, select resampling filter type. This only affects resampling
operations.
It accepts the following values:
@table @samp
@item cubic
select cubic
@item blackman_nuttall
select Blackman Nuttall Windowed Sinc
@item kaiser
select Kaiser Windowed Sinc
@end table
@item kaiser_beta
For swr only, set Kaiser Window Beta value. Must be an integer included between
2 and 16, default value is 9.
@end table
@c man end RESAMPLER OPTIONS
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libswresample.html,libswresample}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-resampler
@settitle FFmpeg Resampler
@end ignore
@bye

View File

@@ -1,141 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Scaler Documentation
@titlepage
@center @titlefont{FFmpeg Scaler Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The FFmpeg rescaler provides an high-level interface to the libswscale
library image conversion utilities. In particular it allows to perform
image rescaling and pixel format conversion.
@c man end DESCRIPTION
@chapter Scaler Options
@c man begin SCALER OPTIONS
The video scaler supports the following named options.
Options may be set by specifying -@var{option} @var{value} in the
FFmpeg tools. For programmatic use, they can be set explicitly in the
@code{SwsContext} options or through the @file{libavutil/opt.h} API.
@table @option
@item sws_flags
Set the scaler flags. This is also used to set the scaling
algorithm. Only a single algorithm should be selected.
It accepts the following values:
@table @samp
@item fast_bilinear
Select fast bilinear scaling algorithm.
@item bilinear
Select bilinear scaling algorithm.
@item bicubic
Select bicubic scaling algorithm.
@item experimental
Select experimental scaling algorithm.
@item neighbor
Select nearest neighbor rescaling algorithm.
@item area
Select averaging area rescaling algorithm.
@item bicubiclin
Select bicubic scaling algorithm for the luma component, bilinear for
chroma components.
@item gauss
Select Gaussian rescaling algorithm.
@item sinc
Select sinc rescaling algorithm.
@item lanczos
Select lanczos rescaling algorithm.
@item spline
Select natural bicubic spline rescaling algorithm.
@item print_info
Enable printing/debug logging.
@item accurate_rnd
Enable accurate rounding.
@item full_chroma_int
Enable full chroma interpolation.
@item full_chroma_inp
Select full chroma input.
@item bitexact
Enable bitexact output.
@end table
@item srcw
Set source width.
@item srch
Set source height.
@item dstw
Set destination width.
@item dsth
Set destination height.
@item src_format
Set source pixel format (must be expressed as an integer).
@item dst_format
Set destination pixel format (must be expressed as an integer).
@item src_range
Select source range.
@item dst_range
Select destination range.
@item param0, param1
Set scaling algorithm parameters. The specified values are specific of
some scaling algorithms and ignored by others. The specified values
are floating point number values.
@end table
@c man end SCALER OPTIONS
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libswscale.html,libswscale}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-scaler
@settitle FFmpeg video scaling and pixel format converter
@end ignore
@bye

View File

@@ -1,43 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle FFmpeg Utilities Documentation
@titlepage
@center @titlefont{FFmpeg Utilities Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes some generic features and utilities provided
by the libavutil library.
@c man end DESCRIPTION
@include syntax.texi
@include eval.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavutil.html,libavutil}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavutil(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-utils
@settitle FFmpeg utilities
@end ignore
@bye

View File

@@ -11,7 +11,13 @@
@chapter Synopsis
ffmpeg [@var{global_options}] @{[@var{input_file_options}] -i @file{input_file}@} ... @{[@var{output_file_options}] @file{output_file}@} ...
The generic syntax is:
@example
@c man begin SYNOPSIS
ffmpeg [global options] [[infile options][@option{-i} @var{infile}]]... @{[outfile options] @var{outfile}@}...
@c man end
@end example
@chapter Description
@c man begin DESCRIPTION
@@ -52,7 +58,7 @@ options apply ONLY to the next input or output file and are reset between files.
@item
To set the video bitrate of the output file to 64kbit/s:
@example
ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
ffmpeg -i input.avi -b:v 64k output.avi
@end example
@item
@@ -420,7 +426,8 @@ As an input option, ignore any timestamps stored in the file and instead
generate timestamps assuming constant frame rate @var{fps}.
As an output option, duplicate or drop input frames to achieve constant output
frame rate @var{fps}.
frame rate @var{fps} (note that this actually causes the @code{fps} filter to be
inserted to the end of the corresponding filtergraph).
@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream})
Set frame size.
@@ -463,6 +470,11 @@ Disable video recording.
@item -vcodec @var{codec} (@emph{output})
Set the video codec. This is an alias for @code{-codec:v}.
@item -same_quant
Use same quantizer as source (implies VBR).
Note that this is NOT SAME QUALITY. Do not use this option unless you know you
need it.
@item -pass[:@var{stream_specifier}] @var{n} (@emph{output,per-stream})
Select the pass number (1 or 2). It is used to do two-pass
@@ -789,7 +801,39 @@ Copy chapters from input file with index @var{input_file_index} to the next
output file. If no chapter mapping is specified, then chapters are copied from
the first input file with at least one chapter. Use a negative file index to
disable any chapter copying.
@item -debug @var{category}
Print specific debug info.
@var{category} is a number or a string containing one of the following values:
@table @samp
@item bitstream
@item buffers
picture buffer allocations
@item bugs
@item dct_coeff
@item er
error recognition
@item mb_type
macroblock (MB) type
@item mmco
memory management control operations (H.264)
@item mv
motion vector
@item pict
picture info
@item pts
@item qp
per-block quantization parameter (QP)
@item rc
rate control
@item skip
@item startcode
@item thread_ops
threading operations
@item vis_mb_type
visualize block types
@item vis_qp
visualize quantization parameter (QP), lower QP are tinted greener
@end table
@item -benchmark (@emph{global})
Show benchmarking information at the end of an encode.
Shows CPU time used and maximum memory consumption.
@@ -852,17 +896,9 @@ Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps
the parameter is the maximum samples per second by which the audio is changed.
-async 1 is a special case where only the start of the audio stream is corrected
without any later correction.
This option has been deprecated. Use the @code{aresample} audio filter instead.
This option has been deprecated. Use the @code{asyncts} audio filter instead.
@item -copyts
Do not process input timestamps, but keep their values without trying
to sanitize them. In particular, do not remove the initial start time
offset value.
Note that, depending on the @option{vsync} option or on specific muxer
processing, the output timestamps may mismatch with the input
timestamps even when this option is selected.
Copy timestamps from input to output.
@item -copytb @var{mode}
Specify how to set the encoder timebase when stream copying. @var{mode} is an
integer numeric value, and can assume one of the following values:
@@ -908,7 +944,7 @@ ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
@end example
@item -bsf[:@var{stream_specifier}] @var{bitstream_filters} (@emph{output,per-stream})
Set bitstream filters for matching streams. @var{bitstream_filters} is
Set bitstream filters for matching streams. @var{bistream_filters} is
a comma-separated list of bitstream filters. Use the @code{-bsfs} option
to get the list of bitstream filters.
@example
@@ -932,8 +968,7 @@ ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
Define a complex filter graph, i.e. one with arbitrary number of inputs and/or
outputs. For simple graphs -- those with one input and one output of the same
type -- see the @option{-filter} options. @var{filtergraph} is a description of
the filter graph, as described in the ``Filtergraph syntax'' section of the
ffmpeg-filters manual.
the filter graph, as described in @ref{Filtergraph syntax}.
Input link labels must refer to input streams using the
@code{[file_index:stream_specifier]} syntax (i.e. the same as @option{-map}
@@ -978,7 +1013,7 @@ ffmpeg -filter_complex 'color=red' -t 5 out.mkv
As a special exception, you can use a bitmap subtitle stream as input: it
will be converted into a video with the same size as the largest video in
the file, or 720x576 if no video is present. Note that this is an
the file, or 720×576 if no video is present. Note that this is an
experimental and temporary solution. It will be removed once libavfilter has
proper support for subtitles.
@@ -1254,11 +1289,11 @@ ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
You can put many streams of the same type in the output:
@example
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
ffmpeg -i test1.avi -i test2.avi -map 0.3 -map 0.2 -map 0.1 -map 0.0 -c copy test12.nut
@end example
The resulting output file @file{test12.nut} will contain the first four streams
from the input files in reverse order.
The resulting output file @file{test12.avi} will contain first four streams from
the input file in reverse order.
@item
To force CBR video output:
@@ -1276,35 +1311,32 @@ ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
@end itemize
@c man end EXAMPLES
@chapter See Also
@ifhtml
@url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-utils.html,ffmpeg-utils},
@url{ffmpeg-scaler.html,ffmpeg-scaler},
@url{ffmpeg-resampler.html,ffmpeg-resampler},
@url{ffmpeg-codecs.html,ffmpeg-codecs},
@url{ffmpeg-bitstream-filters,ffmpeg-bitstream-filters},
@url{ffmpeg-formats.html,ffmpeg-formats},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{ffmpeg-filters.html,ffmpeg-filters}
@end ifhtml
@ifnothtml
ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
@end ifnothtml
@include authors.texi
@include syntax.texi
@include eval.texi
@include decoders.texi
@include encoders.texi
@include demuxers.texi
@include muxers.texi
@include indevs.texi
@include outdevs.texi
@include protocols.texi
@include bitstream_filters.texi
@include filters.texi
@include metadata.texi
@ignore
@setfilename ffmpeg
@settitle ffmpeg video converter
@c man begin SEEALSO
ffplay(1), ffprobe(1), ffserver(1) and the FFmpeg HTML documentation
@c man end
@c man begin AUTHORS
See git history
@c man end
@end ignore
@bye

View File

@@ -11,7 +11,11 @@
@chapter Synopsis
ffplay [@var{options}] [@file{input_file}]
@example
@c man begin SYNOPSIS
ffplay [options] [@file{input_file}]
@c man end
@end example
@chapter Description
@c man begin DESCRIPTION
@@ -130,20 +134,8 @@ Exit when video is done playing.
Exit if any key is pressed.
@item -exitonmousedown
Exit if any mouse button is pressed.
@item -codec:@var{media_specifier} @var{codec_name}
Force a specific decoder implementation for the stream identified by
@var{media_specifier}, which can assume the values @code{a} (audio),
@code{v} (video), and @code{s} subtitle.
@item -acodec @var{codec_name}
Force a specific audio decoder.
@item -vcodec @var{codec_name}
Force a specific video decoder.
@item -scodec @var{codec_name}
Force a specific subtitle decoder.
@item -codec:@var{stream_type}
Force a specific decoder implementation
@end table
@section While playing
@@ -186,35 +178,29 @@ Seek to percentage in file corresponding to fraction of width.
@c man end
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-utils.html,ffmpeg-utils},
@url{ffmpeg-scaler.html,ffmpeg-scaler},
@url{ffmpeg-resampler.html,ffmpeg-resampler},
@url{ffmpeg-codecs.html,ffmpeg-codecs},
@url{ffmpeg-bitstream-filters,ffmpeg-bitstream-filters},
@url{ffmpeg-formats.html,ffmpeg-formats},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{ffmpeg-filters.html,ffmpeg-filters}
@end ifhtml
@ifnothtml
ffmpeg(1), ffprobe(1), ffserver(1),
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
@end ifnothtml
@include authors.texi
@include syntax.texi
@include eval.texi
@include decoders.texi
@include demuxers.texi
@include muxers.texi
@include indevs.texi
@include outdevs.texi
@include protocols.texi
@include filters.texi
@ignore
@setfilename ffplay
@settitle FFplay media player
@c man begin SEEALSO
ffmpeg(1), ffprobe(1), ffserver(1) and the FFmpeg HTML documentation
@c man end
@c man begin AUTHORS
The FFmpeg developers
@c man end
@end ignore
@bye

View File

@@ -11,7 +11,13 @@
@chapter Synopsis
ffprobe [@var{options}] [@file{input_file}]
The generic syntax is:
@example
@c man begin SYNOPSIS
ffprobe [options] [@file{input_file}]
@c man end
@end example
@chapter Description
@c man begin DESCRIPTION
@@ -39,10 +45,6 @@ ffprobe output is designed to be easily parsable by a textual filter,
and consists of one or more sections of a form defined by the selected
writer, which is specified by the @option{print_format} option.
Sections may contain other nested sections, and are identified by a
name (which may be shared by other sections), and an unique
name. See the output of @option{sections}.
Metadata tags stored in the container or in the streams are recognized
and printed in the corresponding "FORMAT" or "STREAM" section.
@@ -92,25 +94,6 @@ For example for printing the output in JSON format, specify:
For more details on the available output printing formats, see the
Writers section below.
@item -sections
Print sections structure and section information, and exit. The output
is not meant to be parsed by a machine.
@item -select_streams @var{stream_specifier}
Select only the streams specified by @var{stream_specifier}. This
option affects only the options related to streams
(e.g. @code{show_streams}, @code{show_packets}, etc.).
For example to show only audio streams, you can use the command:
@example
ffprobe -show_streams -select_streams a INPUT
@end example
To show only video packets belonging to the video stream with index 1:
@example
ffprobe -show_packets -select_streams v:1 INPUT
@end example
@item -show_data
Show payload data, as an hexadecimal and ASCII dump. Coupled with
@option{-show_packets}, it will dump the packets' data. Coupled with
@@ -135,59 +118,6 @@ Like @option{-show_format}, but only prints the specified entry of the
container format information, rather than all. This option may be given more
than once, then all specified entries will be shown.
This option is deprecated, use @code{show_entries} instead.
@item -show_entries @var{section_entries}
Set list of entries to show.
Entries are specified according to the following
syntax. @var{section_entries} contains a list of section entries
separated by @code{:}. Each section entry is composed by a section
name (or unique name), optionally followed by a list of entries local
to that section, separated by @code{,}.
If section name is specified but is followed by no @code{=}, all
entries are printed to output, together with all the contained
sections. Otherwise only the entries specified in the local section
entries list are printed. In particular, if @code{=} is specified but
the list of local entries is empty, then no entries will be shown for
that section.
Note that the order of specification of the local section entries is
not honored in the output, and the usual display order will be
retained.
The formal syntax is given by:
@example
@var{LOCAL_SECTION_ENTRIES} ::= @var{SECTION_ENTRY_NAME}[,@var{LOCAL_SECTION_ENTRIES}]
@var{SECTION_ENTRY} ::= @var{SECTION_NAME}[=[@var{LOCAL_SECTION_ENTRIES}]]
@var{SECTION_ENTRIES} ::= @var{SECTION_ENTRY}[:@var{SECTION_ENTRIES}]
@end example
For example, to show only the index and type of each stream, and the PTS
time, duration time, and stream index of the packets, you can specify
the argument:
@example
packet=pts_time,duration_time,stream_index : stream=index,codec_type
@end example
To show all the entries in the section "format", but only the codec
type in the section "stream", specify the argument:
@example
format : stream=codec_type
@end example
To show all the tags in the stream and format sections:
@example
format_tags : format_tags
@end example
To show only the @code{title} tag (if available) in the stream
sections:
@example
stream_tags=title
@end example
@item -show_packets
Show information about each packet contained in the input multimedia
stream.
@@ -256,9 +186,8 @@ Read @var{input_file}.
A writer defines the output format adopted by @command{ffprobe}, and will be
used for printing all the parts of the output.
A writer may accept one or more arguments, which specify the options
to adopt. The options are specified as a list of @var{key}=@var{value}
pairs, separated by ":".
A writer may accept one or more arguments, which specify the options to
adopt.
A description of the currently available writers follows.
@@ -277,6 +206,9 @@ keyN=valN
Metadata tags are printed as a line in the corresponding FORMAT or
STREAM section, and are prefixed by the string "TAG:".
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
A description of the accepted options follows.
@table @option
@@ -306,6 +238,9 @@ Metadata tags are printed in the corresponding "format" or "stream"
section. A metadata tag key, if printed, is prefixed by the string
"tag:".
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
The description of the accepted options follows.
@table @option
@@ -357,6 +292,9 @@ A free-form output where each line contains an explicit key=value, such as
directly embedded in sh scripts as long as the separator character is an
alphanumeric character or an underscore (see @var{sep_char} option).
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
The description of the accepted options follows.
@table @option
@@ -419,6 +357,9 @@ JSON based format.
Each section is printed using JSON notation.
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
The description of the accepted options follows.
@table @option
@@ -445,6 +386,9 @@ Note that the output issued will be compliant to the
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
@option{sexagesimal} etc.) are specified.
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by ":".
The description of the accepted options follows.
@table @option
@@ -487,35 +431,25 @@ DV, GXF and AVI timecodes are available in format metadata
@end itemize
@c man end TIMECODE
@chapter See Also
@ifhtml
@url{ffplay.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-utils.html,ffmpeg-utils},
@url{ffmpeg-scaler.html,ffmpeg-scaler},
@url{ffmpeg-resampler.html,ffmpeg-resampler},
@url{ffmpeg-codecs.html,ffmpeg-codecs},
@url{ffmpeg-bitstream-filters,ffmpeg-bitstream-filters},
@url{ffmpeg-formats.html,ffmpeg-formats},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{ffmpeg-filters.html,ffmpeg-filters}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffserver(1),
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
@end ifnothtml
@include authors.texi
@include syntax.texi
@include decoders.texi
@include demuxers.texi
@include protocols.texi
@include indevs.texi
@ignore
@setfilename ffprobe
@settitle ffprobe media prober
@c man begin SEEALSO
ffmpeg(1), ffplay(1), ffserver(1) and the FFmpeg HTML documentation
@c man end
@c man begin AUTHORS
The FFmpeg developers
@c man end
@end ignore
@bye

View File

@@ -59,7 +59,6 @@
<xsd:attribute name="pkt_duration" type="xsd:long" />
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
<xsd:attribute name="pkt_pos" type="xsd:long" />
<xsd:attribute name="pkt_size" type="xsd:int" />
<!-- audio attributes -->
<xsd:attribute name="sample_fmt" type="xsd:string"/>
@@ -87,24 +86,9 @@
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="streamDispositionType">
<xsd:attribute name="default" type="xsd:int" use="required" />
<xsd:attribute name="dub" type="xsd:int" use="required" />
<xsd:attribute name="original" type="xsd:int" use="required" />
<xsd:attribute name="comment" type="xsd:int" use="required" />
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
<xsd:attribute name="forced" type="xsd:int" use="required" />
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
</xsd:complexType>
<xsd:complexType name="streamType">
<xsd:sequence>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
</xsd:sequence>
<xsd:attribute name="index" type="xsd:int" use="required"/>
@@ -188,7 +172,6 @@
<xsd:attribute name="minor" type="xsd:int" use="required"/>
<xsd:attribute name="micro" type="xsd:int" use="required"/>
<xsd:attribute name="version" type="xsd:int" use="required"/>
<xsd:attribute name="ident" type="xsd:string" use="required"/>
</xsd:complexType>
<xsd:complexType name="libraryVersionsType">

View File

@@ -25,6 +25,10 @@ MaxBandwidth 1000
# '-' is the standard output.
CustomLog -
# Suppress that if you want to launch ffserver as a daemon.
NoDaemon
##################################################################
# Definition of the live feeds. Each live feed contains one video
# and/or audio sequence coming from an ffmpeg encoder or another

View File

@@ -9,9 +9,15 @@
@contents
@chapter Synopsis
@chapter Synopsys
ffserver [@var{options}]
The generic syntax is:
@example
@c man begin SYNOPSIS
ffserver [options]
@c man end
@end example
@chapter Description
@c man begin DESCRIPTION
@@ -22,6 +28,11 @@ several live feeds, streaming from files and time shifting on live feeds
(you can seek to positions in the past on each live feed, provided you
specify a big enough feed storage in ffserver.conf).
ffserver runs in daemon mode by default; that is, it puts itself in
the background and detaches from its TTY, unless it is launched in
debug mode or a NoDaemon option is specified in the configuration
file.
This documentation covers only the streaming aspects of ffserver /
ffmpeg. All questions about parameters for ffmpeg, codec questions,
etc. are not covered here. Read @file{ffmpeg.html} for more
@@ -81,7 +92,7 @@ to make it work correctly.
@section What do I need?
I use Linux on a 900 MHz Duron with a cheap Bt848 based TV capture card. I'm
I use Linux on a 900 MHz Duron with a cheapo Bt848 based TV capture card. I'm
using stock Linux 2.4.17 with the stock drivers. [Actually that isn't true,
I needed some special drivers for my motherboard-based sound card.]
@@ -227,19 +238,6 @@ You use this by adding the ?date= to the end of the URL for the stream.
For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
@c man end
@section What is FFM, FFM2
FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of
video and audio streams and encoding options, and can store a moving time segment
of an infinite movie or a whole movie.
FFM is version specific, and there is limited compatibility of FFM files
generated by one version of ffmpeg/ffserver and another version of
ffmpeg/ffserver. It may work but its not guaranteed to work.
FFM2 is extensible while maintaining compatibility and should work between
differing versions of tools. FFM2 is the default.
@chapter Options
@c man begin OPTIONS
@@ -256,40 +254,26 @@ within the various <Stream> sections. Since ffserver will not launch
any ffmpeg instances, you will have to launch them manually.
@item -d
Enable debug mode. This option increases log verbosity, directs log
messages to stdout.
messages to stdout and causes ffserver to run in the foreground
rather than as a daemon.
@end table
@c man end
@chapter See Also
@ifhtml
The @file{doc/ffserver.conf} example,
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe},
@url{ffmpeg-utils.html,ffmpeg-utils},
@url{ffmpeg-scaler.html,ffmpeg-scaler},
@url{ffmpeg-resampler.html,ffmpeg-resampler},
@url{ffmpeg-codecs.html,ffmpeg-codecs},
@url{ffmpeg-bitstream-filters,ffmpeg-bitstream-filters},
@url{ffmpeg-formats.html,ffmpeg-formats},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{ffmpeg-filters.html,ffmpeg-filters}
@end ifhtml
@ifnothtml
The @file{doc/ffserver.conf} example, ffmpeg(1), ffplay(1), ffprobe(1),
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffserver
@settitle ffserver video server
@c man begin SEEALSO
ffmpeg(1), ffplay(1), ffprobe(1), the @file{ffserver.conf}
example and the FFmpeg HTML documentation
@c man end
@c man begin AUTHORS
The FFmpeg developers
@c man end
@end ignore
@bye

View File

@@ -52,7 +52,7 @@ Buffer references ownership and permissions
point to only a part of a video buffer.
A reference is usually obtained as input to the start_frame or
filter_frame method or requested using the ff_get_video_buffer or
filter_samples method or requested using the ff_get_video_buffer or
ff_get_audio_buffer functions. A new reference on an existing buffer can
be created with the avfilter_ref_buffer. A reference is destroyed using
the avfilter_unref_bufferp function.
@@ -68,14 +68,14 @@ Buffer references ownership and permissions
Here are the (fairly obvious) rules for reference ownership:
* A reference received by the start_frame or filter_frame method
* A reference received by the start_frame or filter_samples method
belong to the corresponding filter.
Special exception: for video references: the reference may be used
internally for automatic copying and must not be destroyed before
end_frame; it can be given away to ff_start_frame.
* A reference passed to ff_start_frame or ff_filter_frame is given
* A reference passed to ff_start_frame or ff_filter_samples is given
away and must no longer be used.
* A reference created with avfilter_ref_buffer belongs to the code that
@@ -93,16 +93,16 @@ Buffer references ownership and permissions
The AVFilterLink structure has a few AVFilterBufferRef fields. Here are
the rules to handle them:
* cur_buf is set before the start_frame and filter_frame methods to
* cur_buf is set before the start_frame and filter_samples methods to
the same reference given as argument to the methods and belongs to the
destination filter of the link. If it has not been cleared after
end_frame or filter_frame, libavfilter will automatically destroy
end_frame or filter_samples, libavfilter will automatically destroy
the reference; therefore, any filter that needs to keep the reference
for longer must set cur_buf to NULL.
* out_buf belongs to the source filter of the link and can be used to
store a reference to the buffer that has been sent to the destination.
If it is not NULL after end_frame or filter_frame, libavfilter will
If it is not NULL after end_frame or filter_samples, libavfilter will
automatically destroy the reference.
If a video input pad does not have a start_frame method, the default
@@ -179,7 +179,7 @@ Buffer references ownership and permissions
with the WRITE permission.
* Filters that intend to keep a reference after the filtering process
is finished (after end_frame or filter_frame returns) must have the
is finished (after end_frame or filter_samples returns) must have the
PRESERVE permission on it and remove the WRITE permission if they
create a new reference to give it away.
@@ -198,7 +198,7 @@ Frame scheduling
Simple filters that output one frame for each input frame should not have
to worry about it.
start_frame / filter_frame
start_frame / filter_samples
----------------------------
These methods are called when a frame is pushed to the filter's input.
@@ -233,7 +233,7 @@ Frame scheduling
This method is called when a frame is wanted on an output.
For an input, it should directly call start_frame or filter_frame on
For an input, it should directly call start_frame or filter_samples on
the corresponding output.
For a filter, if there are queued frames already ready, one of these
@@ -266,4 +266,4 @@ Frame scheduling
Note that, except for filters that can have queued frames, request_frame
does not push frames: it requests them to its input, and as a reaction,
the start_frame / filter_frame method will be called and do the work.
the start_frame / filter_samples method will be called and do the work.

File diff suppressed because it is too large Load Diff

View File

@@ -24,7 +24,7 @@ instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjp
@file{./configure}.
@section OpenCORE, VisualOn, and Fraunhofer libraries
@section OpenCORE and VisualOn libraries
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
libraries provide encoders for a number of audio codecs.
@@ -32,14 +32,9 @@ libraries provide encoders for a number of audio codecs.
@float NOTE
OpenCORE and VisualOn libraries are under the Apache License 2.0
(see @url{http://www.apache.org/licenses/LICENSE-2.0} for details), which is
incompatible to the LGPL version 2.1 and GPL version 2. You have to
incompatible with the LGPL version 2.1 and GPL version 2. You have to
upgrade FFmpeg's license to LGPL version 3 (or if you have enabled
GPL components, GPL version 3) by passing @code{--enable-version3} to configure in
order to use it.
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
and is not known to be compatible to the LGPL. Therefore, you have to pass
@code{--enable-nonfree} to configure to use it.
GPL components, GPL version 3) to use it.
@end float
@subsection OpenCORE AMR
@@ -152,15 +147,9 @@ library:
@tab Multimedia format used in game Heart Of Darkness.
@item Apple HTTP Live Streaming @tab @tab X
@item Artworx Data Format @tab @tab X
@item AFC @tab @tab X
@tab Audio format used on the Nintendo Gamecube.
@item ASF @tab X @tab X
@item AST @tab X @tab X
@tab Audio format used on the Nintendo Wii.
@item AVI @tab X @tab X
@item AVISynth @tab @tab X
@item AVR @tab @tab X
@tab Audio format used on Mac.
@item AVS @tab @tab X
@tab Multimedia format used by the Creature Shock game.
@item Beam Software SIFF @tab @tab X
@@ -174,8 +163,6 @@ library:
@tab Used in Z and Z95 games.
@item Brute Force & Ignorance @tab @tab X
@tab Used in the game Flash Traffic: City of Angels.
@item BRSTM @tab @tab X
@tab Audio format used on the Nintendo Wii.
@item BWF @tab X @tab X
@item CRI ADX @tab X @tab X
@tab Audio-only format used in console video games.
@@ -206,7 +193,6 @@ library:
@item Electronic Arts cdata @tab @tab X
@item Electronic Arts Multimedia @tab @tab X
@tab Used in various EA games; files have extensions like WVE and UV2.
@item Ensoniq Paris Audio File @tab @tab X
@item FFM (FFserver live feed) @tab X @tab X
@item Flash (SWF) @tab X @tab X
@item Flash 9 (AVM2) @tab X @tab X
@@ -221,7 +207,7 @@ library:
@item G.723.1 @tab X @tab X
@item G.729 BIT @tab X @tab X
@item G.729 raw @tab @tab X
@item GIF Animation @tab X @tab X
@item GIF Animation @tab X @tab
@item GXF @tab X @tab X
@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley
playout servers.
@@ -241,13 +227,11 @@ library:
@tab A format generated by IndigoVision 8000 video server.
@item IVF (On2) @tab X @tab X
@tab A format used by libvpx
@item IRCAM @tab X @tab X
@item LATM @tab X @tab X
@item LMLM4 @tab @tab X
@tab Used by Linux Media Labs MPEG-4 PCI boards
@item LOAS @tab @tab X
@tab contains LATM multiplexed AAC audio
@item LVF @tab @tab X
@item LXF @tab @tab X
@tab VR native stream format, used by Leitch/Harris' video servers.
@item Matroska @tab X @tab X
@@ -258,8 +242,6 @@ library:
@tab Used in Sim City 3000; file extension .xa.
@item MD Studio @tab @tab X
@item Metal Gear Solid: The Twin Snakes @tab @tab X
@item Megalux Frame @tab @tab X
@tab Used by Megalux Ultimate Paint
@item Mobotix .mxg @tab @tab X
@item Monkey's Audio @tab @tab X
@item Motion Pixels MVI @tab @tab X
@@ -287,7 +269,6 @@ library:
@tab SMPTE 386M, D-10/IMX Mapping.
@item NC camera feed @tab @tab X
@tab NC (AVIP NC4600) camera streams
@item NIST SPeech HEader REsources @tab @tab X
@item NTT TwinVQ (VQF) @tab @tab X
@tab Nippon Telegraph and Telephone Corporation TwinVQ.
@item Nullsoft Streaming Video @tab @tab X
@@ -296,7 +277,6 @@ library:
@tab NUT Open Container Format
@item Ogg @tab X @tab X
@item Playstation Portable PMP @tab @tab X
@item Portable Voice Format @tab @tab X
@item TechnoTrend PVA @tab @tab X
@tab Used by TechnoTrend DVB PCI boards.
@item QCP @tab @tab X
@@ -307,7 +287,6 @@ library:
@item raw Dirac @tab X @tab X
@item raw DNxHD @tab X @tab X
@item raw DTS @tab X @tab X
@item raw DTS-HD @tab @tab X
@item raw E-AC-3 @tab X @tab X
@item raw FLAC @tab X @tab X
@item raw GSM @tab @tab X
@@ -325,7 +304,6 @@ library:
@item raw video @tab X @tab X
@item raw id RoQ @tab X @tab
@item raw Shorten @tab @tab X
@item raw TAK @tab @tab X
@item raw TrueHD @tab X @tab X
@item raw VC-1 @tab @tab X
@item raw PCM A-law @tab X @tab X
@@ -367,7 +345,6 @@ library:
@item SDP @tab @tab X
@item Sega FILM/CPK @tab @tab X
@tab Used in many Sega Saturn console games.
@item Silicon Graphics Movie @tab @tab X
@item Sierra SOL @tab @tab X
@tab .sol files used in Sierra Online games.
@item Sierra VMD @tab @tab X
@@ -381,7 +358,7 @@ library:
@item Sony OpenMG (OMA) @tab X @tab X
@tab Audio format used in Sony Sonic Stage and Sony Vegas.
@item Sony PlayStation STR @tab @tab X
@item Sony Wave64 (W64) @tab X @tab X
@item Sony Wave64 (W64) @tab @tab X
@item SoX native format @tab X @tab X
@item SUN AU format @tab X @tab X
@item Text files @tab @tab X
@@ -391,7 +368,6 @@ library:
@tab Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
@item True Audio @tab @tab X
@item VC-1 test bitstream @tab X @tab X
@item Vivo @tab @tab X
@item WAV @tab X @tab X
@item WavPack @tab X @tab X
@item WebM @tab X @tab X
@@ -426,8 +402,6 @@ following image formats are supported:
@tab Only uncompressed GIFs are generated.
@item BMP @tab X @tab X
@tab Microsoft BMP image
@item PIX @tab @tab X
@tab PIX is an image format used in the Argonaut BRender engine.
@item DPX @tab X @tab X
@tab Digital Picture Exchange
@item EXR @tab @tab X
@@ -465,8 +439,6 @@ following image formats are supported:
@tab Targa (.TGA) image format
@item XBM @tab X @tab X
@tab X BitMap image format
@item XFace @tab X @tab X
@tab X-Face image format
@item XWD @tab X @tab X
@tab X Window Dump image format
@end multitable
@@ -482,6 +454,8 @@ following image formats are supported:
@item 4X Movie @tab @tab X
@tab Used in certain computer games.
@item 8088flex TMV @tab @tab X
@item 8SVX exponential @tab @tab X
@item 8SVX fibonacci @tab @tab X
@item A64 multicolor @tab X @tab
@tab Creates video suitable to be played on a commodore 64 (multicolor mode).
@item Amazing Studio PAF Video @tab @tab X
@@ -648,8 +622,6 @@ following image formats are supported:
@tab fourcc: VP60,VP61,VP62
@item VP8 @tab E @tab X
@tab fourcc: VP80, encoding supported through external library libvpx
@item Pinnacle TARGA CineWave YUV16 @tab @tab X
@tab fourcc: Y216
@item Prores @tab @tab X
@tab fourcc: apch,apcn,apcs,apco
@item Q-team QPEG @tab @tab X
@@ -673,11 +645,8 @@ following image formats are supported:
@tab Texture dictionaries used by the Renderware Engine.
@item RL2 video @tab @tab X
@tab used in some games by Entertainment Software Partners
@item SGI RLE 8-bit @tab @tab X
@item Sierra VMD video @tab @tab X
@tab Used in Sierra VMD files.
@item Silicon Graphics Motion Video Compressor 1 (MVC1) @tab @tab X
@item Silicon Graphics Motion Video Compressor 2 (MVC2) @tab @tab X
@item Smacker video @tab @tab X
@tab Video encoding used in Smacker.
@item SMPTE VC-1 @tab @tab X
@@ -737,8 +706,7 @@ following image formats are supported:
@multitable @columnfractions .4 .1 .1 .4
@item Name @tab Encoding @tab Decoding @tab Comments
@item 8SVX exponential @tab @tab X
@item 8SVX fibonacci @tab @tab X
@item 8SVX audio @tab @tab X
@item AAC+ @tab E @tab X
@tab encoding supported through external library libaacplus
@item AAC @tab E @tab X
@@ -769,19 +737,19 @@ following image formats are supported:
@item ADPCM IMA Westwood @tab @tab X
@item ADPCM ISS IMA @tab @tab X
@tab Used in FunCom games.
@item ADPCM IMA Dialogic @tab @tab X
@item ADPCM IMA Duck DK3 @tab @tab X
@tab Used in some Sega Saturn console games.
@item ADPCM IMA Duck DK4 @tab @tab X
@tab Used in some Sega Saturn console games.
@item ADPCM Microsoft @tab X @tab X
@item ADPCM MS IMA @tab X @tab X
@item ADPCM Nintendo Gamecube AFC @tab @tab X
@item ADPCM Nintendo Gamecube THP @tab @tab X
@item ADPCM QT IMA @tab X @tab X
@item ADPCM SEGA CRI ADX @tab X @tab X
@tab Used in Sega Dreamcast games.
@item ADPCM Shockwave Flash @tab X @tab X
@item ADPCM SMJPEG IMA @tab @tab X
@tab Used in certain Loki game ports.
@item ADPCM Sound Blaster Pro 2-bit @tab @tab X
@item ADPCM Sound Blaster Pro 2.6-bit @tab @tab X
@item ADPCM Sound Blaster Pro 4-bit @tab @tab X
@@ -845,15 +813,9 @@ following image formats are supported:
@item Musepack SV7 @tab @tab X
@item Musepack SV8 @tab @tab X
@item Nellymoser Asao @tab X @tab X
@item Opus @tab E @tab E
@tab supported through external library libopus
@item PCM A-law @tab X @tab X
@item PCM mu-law @tab X @tab X
@item PCM signed 8-bit planar @tab X @tab X
@item PCM signed 16-bit big-endian planar @tab X @tab X
@item PCM signed 16-bit little-endian planar @tab X @tab X
@item PCM signed 24-bit little-endian planar @tab X @tab X
@item PCM signed 32-bit little-endian planar @tab X @tab X
@item PCM 16-bit little-endian planar @tab @tab X
@item PCM 32-bit floating point big-endian @tab X @tab X
@item PCM 32-bit floating point little-endian @tab X @tab X
@item PCM 64-bit floating point big-endian @tab X @tab X
@@ -897,7 +859,6 @@ following image formats are supported:
@tab experimental codec
@item Speex @tab E @tab E
@tab supported through external library libspeex
@item TAK (Tom's lossless Audio Kompressor) @tab @tab X
@item True Audio (TTA) @tab @tab X
@item TrueHD @tab @tab X
@tab Used in HD-DVD and Blu-Ray discs.
@@ -926,25 +887,17 @@ performance on systems without hardware floating point support).
@multitable @columnfractions .4 .1 .1 .1 .1
@item Name @tab Muxing @tab Demuxing @tab Encoding @tab Decoding
@item 3GPP Timed Text @tab @tab @tab X @tab X
@item AQTitle @tab @tab X @tab @tab X
@item SSA/ASS @tab X @tab X @tab X @tab X
@item DVB @tab X @tab X @tab X @tab X
@item DVD @tab X @tab X @tab X @tab X
@item JACOsub @tab X @tab X @tab @tab X
@item MicroDVD @tab X @tab X @tab @tab X
@item MPL2 @tab @tab X @tab @tab X
@item MPsub (MPlayer) @tab @tab X @tab @tab X
@item PGS @tab @tab @tab @tab X
@item PJS (Phoenix) @tab @tab X @tab @tab X
@item RealText @tab @tab X @tab @tab X
@item SAMI @tab @tab X @tab @tab X
@item SSA/ASS @tab X @tab X @tab X @tab X
@item SubRip (SRT) @tab X @tab X @tab X @tab X
@item SubViewer v1 @tab @tab X @tab @tab X
@item SubViewer @tab @tab X @tab @tab X
@item TED Talks captions @tab @tab X @tab @tab X
@item VobSub (IDX+SUB) @tab @tab X @tab @tab X
@item VPlayer @tab @tab X @tab @tab X
@item 3GPP Timed Text @tab @tab @tab X @tab X
@item WebVTT @tab @tab X @tab @tab X
@item XSUB @tab @tab @tab X @tab X
@end multitable

View File

@@ -224,13 +224,6 @@ Set maxiumum size of buffer for incoming data, in frames. For DV, this
is an exact value. For HDV, it is not frame exact, since HDV does
not have a fixed frame size.
@item dvguid
Select the capture device by specifying it's GUID. Capturing will only
be performed from the specified device and fails if no device with the
given GUID is found. This is useful to select the input if multiple
devices are connected at the same time.
Look at /sys/bus/firewire/devices to find out the GUIDs.
@end table
@subsection Examples
@@ -327,12 +320,6 @@ label, but all the others need to be specified explicitly.
If not specified defaults to the filename specified for the input
device.
@item graph_file
Set the filename of the filtergraph to be read and sent to the other
filters. Syntax of the filtergraph is the same as the one specified by
the option @var{graph}.
@end table
@subsection Examples
@@ -341,14 +328,14 @@ the option @var{graph}.
@item
Create a color video stream and play it back with @command{ffplay}:
@example
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
ffplay -f lavfi -graph "color=pink [out0]" dummy
@end example
@item
As the previous example, but use filename for specifying the graph
description, and omit the "out0" label:
@example
ffplay -f lavfi color=c=pink
ffplay -f lavfi color=pink
@end example
@item

View File

@@ -24,7 +24,7 @@ a mail for every change to every issue.
The subscription URL for the ffmpeg-trac list is:
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
The URL of the webinterface of the tracker is:
http(s)://trac.ffmpeg.org
http(s)://ffmpeg.org/trac/ffmpeg
Type:
-----

View File

@@ -1,48 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle Libavcodec Documentation
@titlepage
@center @titlefont{Libavcodec Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavcodec library provides a generic encoding/decoding framework
and contains multiple decoders and encoders for audio, video and
subtitle streams, and several bitstream filters.
The shared architecture provides various services ranging from bit
stream I/O to DSP optimizations, and makes it suitable for
implementing robust and fast codecs as well as for experimentation.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-codecs.html,ffmpeg-codecs}, @url{ffmpeg-bitstream-filters.html,bitstream-filters},
@url{libavutil.html,libavutil}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1),
libavutil(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavcodec
@settitle media streams decoding and encoding library
@end ignore
@bye

View File

@@ -1,45 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle Libavdevice Documentation
@titlepage
@center @titlefont{Libavdevice Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavdevice library provides a generic framework for grabbing from
and rendering to many common multimedia input/output devices, and
supports several input and output devices, including Video4Linux2,
VfW, DShow, and ALSA.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-devices(1),
libavutil(3), libavcodec(3), libavformat(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavdevice
@settitle multimedia device handling library
@end ignore
@bye

View File

@@ -1,44 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle Libavfilter Documentation
@titlepage
@center @titlefont{Libavfilter Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavfilter library provides a generic audio/video filtering
framework containing several filters, sources and sinks.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-filters.html,ffmpeg-filters},
@url{libavutil.html,libavutil}, @url{libswscale.html,libswscale}, @url{libswresample.html,libswresample},
@url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}, @url{libavdevice.html,libavdevice}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-filters(1),
libavutil(3), libswscale(3), libswresample(3), libavcodec(3), libavformat(3), libavdevice(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavfilter
@settitle multimedia filtering library
@end ignore
@bye

View File

@@ -1,48 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle Libavformat Documentation
@titlepage
@center @titlefont{Libavformat Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavformat library provides a generic framework for multiplexing
and demultiplexing (muxing and demuxing) audio, video and subtitle
streams. It encompasses multiple muxers and demuxers for multimedia
container formats.
It also supports several input and output protocols to access a media
resource.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-formats.html,ffmpeg-formats}, @url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-formats(1), ffmpeg-protocols(1),
libavutil(3), libavcodec(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavformat
@settitle multimedia muxing and demuxing library
@end ignore
@bye

View File

@@ -1,44 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle Libavutil Documentation
@titlepage
@center @titlefont{Libavutil Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavutil library is a utility library to aid portable
multimedia programming. It contains safe portable string functions,
random number generators, data structures, additional mathematics
functions, cryptography and multimedia related functionality (like
enumerations for pixel and sample formats).
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-utils.html,ffmpeg-utils}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-utils(1)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavutil
@settitle multimedia-biased utility library
@end ignore
@bye

View File

@@ -1,70 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle Libswresample Documentation
@titlepage
@center @titlefont{Libswresample Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libswresample library performs highly optimized audio resampling,
rematrixing and sample format conversion operations.
Specifically, this library performs the following conversions:
@itemize
@item
@emph{Resampling}: is the process of changing the audio rate, for
example from an high sample rate of 44100Hz to 8000Hz. Audio
conversion from high to low sample rate is a lossy process. Several
resampling options and algorithms are available.
@item
@emph{Format conversion}: is the process of converting the type of
samples, for example from 16-bit signed samples to unsigned 8-bit or
float samples. It also handles packing conversion, when passing from
packed layout (all samples belonging to distinct channels interleaved
in the same buffer), to planar layout (all samples belonging to the
same channel stored in a dedicated buffer or "plane").
@item
@emph{Rematrixing}: is the process of changing the channel layout, for
example from stereo to mono. When the input channels cannot be mapped
to the output streams, the process is lossy, since it involves
different gain factors and mixing.
@end itemize
Various other audio conversions (e.g. stretching and padding) are
enabled through dedicated options.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-resampler.html,ffmpeg-resampler},
@url{libavutil.html,libavutil}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-resampler(1),
libavutil(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libswresample
@settitle audio resampling library
@end ignore
@bye

View File

@@ -1,63 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle Libswscale Documentation
@titlepage
@center @titlefont{Libswscale Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libswscale library performs highly optimized image scaling and
colorspace and pixel format conversion operations.
Specifically, this library performs the following conversions:
@itemize
@item
@emph{Rescaling}: is the process of changing the video size. Several
rescaling options and algorithms are available. This is usually a
lossy process.
@item
@emph{Pixel format conversion}: is the process of converting the image
format and colorspace of the image, for example from planar YUV420P to
RGB24 packed. It also handles packing conversion, that is converts
from packed layout (all pixels belonging to distinct planes
interleaved in the same buffer), to planar layout (all samples
belonging to the same plane stored in a dedicated buffer or "plane").
This is usually a lossy process in case the source and destination
colorspaces differ.
@end itemize
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-scaler.html,ffmpeg-scaler},
@url{libavutil.html,libavutil}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-scaler(1),
libavutil(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libswscale
@settitle video scaling and pixel format conversion library
@end ignore
@bye

View File

@@ -45,10 +45,8 @@ Example of copyright notice:
Files that have MIPS copyright notice in them:
===============================================
* libavutil/mips/
float_dsp_mips.c
libm_mips.h
* libavcodec/mips/
ac3dsp_mips.c
acelp_filters_mips.c
acelp_vectors_mips.c
amrwbdec_mips.c

View File

@@ -18,23 +18,6 @@ enabled muxers.
A description of some of the currently available muxers follows.
@anchor{aiff}
@section aiff
Audio Interchange File Format muxer.
It accepts the following options:
@table @option
@item write_id3v2
Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
@item id3v2_version
Select ID3v2 version to write. Currently only version 3 and 4 (aka.
ID3v2.3 and ID3v2.4) are supported. The default is version 4.
@end table
@anchor{crc}
@section crc
@@ -146,32 +129,6 @@ ffmpeg -i INPUT -f framemd5 -
See also the @ref{md5} muxer.
@anchor{hls}
@section hls
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
the HTTP Live Streaming specification.
It creates a playlist file and numbered segment files. The output
filename specifies the playlist filename; the segment filenames
receive the same basename as the playlist, a sequential number and
a .ts extension.
@example
ffmpeg -i in.nut out.m3u8
@end example
@table @option
@item -hls_time @var{seconds}
Set the segment length in seconds.
@item -hls_list_size @var{size}
Set the maximum number of playlist entries.
@item -hls_wrap @var{wrap}
Set the number after which index wraps.
@item -start_number @var{number}
Start the sequence from @var{number}.
@end table
@anchor{ico}
@section ico
@@ -257,11 +214,6 @@ Note also that the pattern must not necessarily contain "%d" or
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
@end example
@table @option
@item -start_number @var{number}
Start the sequence from @var{number}.
@end table
The image muxer supports the .Y.U.V image file format. This format is
special in that that each image frame consists of three files, for
each of the YUV420P components. To read or write this image file format,
@@ -300,8 +252,7 @@ See also the @ref{framemd5} muxer.
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
file has all the metadata about all packets stored in one location
(written at the end of the file, it can be moved to the start for
better playback by adding @var{faststart} to the @var{movflags}, or
using the @command{qt-faststart} tool). A fragmented
better playback using the @command{qt-faststart} tool). A fragmented
file consists of a number of fragments, where packets and metadata
about these packets are stored together. Writing a fragmented
file has the advantage that the file is decodable even if the
@@ -359,10 +310,6 @@ more efficient), but with this option set, the muxer writes one moof/mdat
pair for each track, making it easier to separate tracks.
This option is implicitly set when writing ismv (Smooth Streaming) files.
@item -movflags faststart
Run a second pass moving the moov atom on top of the file. This
operation can take a while, and will not work in various situations such
as fragmented output, thus it is not enabled by default.
@end table
Smooth Streaming content can be pushed in real time to a publishing
@@ -510,9 +457,7 @@ streaming output formats, i.e. which do not require global headers,
and is recommended for outputting e.g. to MPEG transport stream segments.
@code{ssegment} is a shorter alias for @code{stream_segment}.
Every segment starts with a keyframe of the selected reference stream,
which is set through the @option{reference_stream} option.
Every segment starts with a video keyframe, if a video stream is present.
Note that if you want accurate splitting for a video file, you need to
make the input key frames correspond to the exact splitting times
expected by the segmenter, or the segment muxer will start the new
@@ -528,13 +473,6 @@ the option @var{segment_list}. The list type is specified by the
The segment muxer supports the following options:
@table @option
@item reference_stream @var{specifier}
Set the reference stream, as specified by the string @var{specifier}.
If @var{specifier} is set to @code{auto}, the reference is choosen
automatically. Otherwise it must be a stream specifier (see the ``Stream
specifiers'' chapter in the ffmpeg manual) which specifies the
reference stream. The default value is ``auto''.
@item segment_format @var{format}
Override the inner container format, by default it is guessed by the filename
extension.
@@ -624,29 +562,11 @@ the specified time and the time set by @var{force_key_frames}.
@item segment_times @var{times}
Specify a list of split points. @var{times} contains a list of comma
separated duration specifications, in increasing order.
@item segment_frames @var{frames}
Specify a list of split video frame numbers. @var{frames} contains a
list of comma separated integer numbers, in increasing order.
This option specifies to start a new segment whenever a reference
stream key frame is found and the sequential number (starting from 0)
of the frame is greater or equal to the next value in the list.
@item segment_wrap @var{limit}
Wrap around segment index once it reaches @var{limit}.
@item segment_start_number @var{number}
Set the sequence number of the first segment. Defaults to @code{0}.
@item reset_timestamps @var{1|0}
Reset timestamps at the begin of each segment, so that each segment
will start with near-zero timestamps. It is meant to ease the playback
of the generated segments. May not work with some combinations of
muxers/codecs. It is set to @code{0} by default.
@end table
@section Examples
Some examples follow.
@itemize
@item
@@ -670,19 +590,12 @@ option to force key frames in the input at the specified location, together
with the segment option @var{segment_time_delta} to account for
possible roundings operated when setting key frame times.
@example
ffmpeg -i in.mkv -force_key_frames 1,2,3,5,8,13,21 -codec:v mpeg4 -codec:a pcm_s16le -map 0 \
ffmpeg -i in.mkv -force_key_frames 1,2,3,5,8,13,21 -vcodec mpeg4 -acodec pcm_s16le -map 0 \
-f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 -segment_time_delta 0.05 out%03d.nut
@end example
In order to force key frames on the input file, transcoding is
required.
@item
Segment the input file by splitting the input file according to the
frame numbers sequence specified with the @var{segment_frames} option:
@example
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_frames 100,200,300,500,800 out%03d.nut
@end example
@item
To convert the @file{in.mkv} to TS segments using the @code{libx264}
and @code{libfaac} encoders:
@@ -728,11 +641,10 @@ Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
@end example
To attach a picture to an mp3 file select both the audio and the picture stream
with @code{map}:
Attach a picture to an mp3:
@example
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
ffmpeg -i input.mp3 -i cover.png -c copy -metadata:s:v title="Album cover"
-metadata:s:v comment="Cover (Front)" out.mp3
@end example
@c man end MUXERS

View File

@@ -1,138 +0,0 @@
\input texinfo @c -*- texinfo -*-
@settitle NUT
@titlepage
@center @titlefont{NUT}
@end titlepage
@top
@contents
@chapter Description
NUT is a low overhead generic container format. It stores audio, video,
subtitle and user-defined streams in a simple, yet efficient, way.
It was created by a group of FFmpeg and MPlayer developers in 2003
and was finalized in 2008.
The official nut specification is at svn://svn.mplayerhq.hu/nut
In case of any differences between this text and the official specification,
the official specification shall prevail.
@chapter Container-specific codec tags
@section Generic raw YUVA formats
Since many exotic planar YUVA pixel formats are not considered by
the AVI/QuickTime FourCC lists, the following scheme is adopted for
representing them.
The first two bytes can contain the values:
Y1 = only Y
Y2 = Y+A
Y3 = YUV
Y4 = YUVA
The third byte represents the width and height chroma subsampling
values for the UV planes, that is the amount to shift the luma
width/height right to find the chroma width/height.
The fourth byte is the number of bits used (8, 16, ...).
If the order of bytes is inverted, that means that each component has
to be read big-endian.
@section Raw Audio
@multitable @columnfractions .4 .4
@item ALAW @tab A-LAW
@item ULAW @tab MU-LAW
@item P<type><interleaving><bits> @tab little-endian PCM
@item <bits><interleaving><type>P @tab big-endian PCM
@end multitable
<type> is S for signed integer, U for unsigned integer, F for IEEE float
<interleaving> is D for default, P is for planar.
<bits> is 8/16/24/32
@example
PFD[32] would for example be signed 32 bit little-endian IEEE float
@end example
@section Subtitles
@multitable @columnfractions .4 .4
@item UTF8 @tab Raw UTF-8
@item SSA[0] @tab SubStation Alpha
@item DVDS @tab DVD subtitles
@item DVBS @tab DVB subtitles
@end multitable
@section Raw Data
@multitable @columnfractions .4 .4
@item UTF8 @tab Raw UTF-8
@end multitable
@section Codecs
@multitable @columnfractions .4 .4
@item 3IV1 @tab non-compliant MPEG-4 generated by old 3ivx
@item ASV1 @tab Asus Video
@item ASV2 @tab Asus Video 2
@item CVID @tab Cinepak
@item CYUV @tab Creative YUV
@item DIVX @tab non-compliant MPEG-4 generated by old DivX
@item DUCK @tab Truemotion 1
@item FFV1 @tab FFmpeg video 1
@item FFVH @tab FFmpeg Huffyuv
@item H261 @tab ITU H.261
@item H262 @tab ITU H.262
@item H263 @tab ITU H.263
@item H264 @tab ITU H.264
@item HFYU @tab Huffyuv
@item I263 @tab Intel H.263
@item IV31 @tab Indeo 3.1
@item IV32 @tab Indeo 3.2
@item IV50 @tab Indeo 5.0
@item LJPG @tab ITU JPEG (lossless)
@item MJLS @tab ITU JPEG-LS
@item MJPG @tab ITU JPEG
@item MPG4 @tab MS MPEG-4v1 (not ISO MPEG-4)
@item MP42 @tab MS MPEG-4v2
@item MP43 @tab MS MPEG-4v3
@item MP4V @tab ISO MPEG-4 Part 2 Video (from old encoders)
@item mpg1 @tab ISO MPEG-1 Video
@item mpg2 @tab ISO MPEG-2 Video
@item MRLE @tab MS RLE
@item MSVC @tab MS Video 1
@item RT21 @tab Indeo 2.1
@item RV10 @tab RealVideo 1.0
@item RV20 @tab RealVideo 2.0
@item RV30 @tab RealVideo 3.0
@item RV40 @tab RealVideo 4.0
@item SNOW @tab FFmpeg Snow
@item SVQ1 @tab Sorenson Video 1
@item SVQ3 @tab Sorenson Video 3
@item theo @tab Xiph Theora
@item TM20 @tab Truemotion 2.0
@item UMP4 @tab non-compliant MPEG-4 generated by UB Video MPEG-4
@item VCR1 @tab ATI VCR1
@item VP30 @tab VP 3.0
@item VP31 @tab VP 3.1
@item VP50 @tab VP 5.0
@item VP60 @tab VP 6.0
@item VP61 @tab VP 6.1
@item VP62 @tab VP 6.2
@item VP70 @tab VP 7.0
@item WMV1 @tab MS WMV7
@item WMV2 @tab MS WMV8
@item WMV3 @tab MS WMV9
@item WV1F @tab non-compliant MPEG-4 generated by ?
@item WVC1 @tab VC-1
@item XVID @tab non-compliant MPEG-4 generated by old Xvid
@item XVIX @tab non-compliant MPEG-4 generated by old Xvid with interlacing bug
@end multitable

View File

@@ -253,7 +253,7 @@ Optimization guide for ARM11 (used in Nokia N800 Internet Tablet):
http://infocenter.arm.com/help/topic/com.arm.doc.ddi0211j/DDI0211J_arm1136_r1p5_trm.pdf
Optimization guide for Intel XScale (used in Sharp Zaurus PDA):
http://download.intel.com/design/intelxscale/27347302.pdf
Intel Wireless MMX 2 Coprocessor: Programmers Reference Manual
Intel Wireless MMX2 Coprocessor: Programmers Reference Manual
http://download.intel.com/design/intelxscale/31451001.pdf
PowerPC-specific:

View File

@@ -1,8 +1,8 @@
\input texinfo @c -*- texinfo -*-
@settitle Platform Specific Information
@settitle Platform Specific information
@titlepage
@center @titlefont{Platform Specific Information}
@center @titlefont{Platform Specific information}
@end titlepage
@top
@@ -51,9 +51,8 @@ The toolchain provided with Xcode is sufficient to build the basic
unacelerated code.
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
@url{https://github.com/FFmpeg/gas-preprocessor} or
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
assembler functions. Put the Perl script somewhere
assembler functions. Just download the Perl script and put it somewhere
in your PATH, FFmpeg's configure will pick it up automatically.
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
@@ -78,15 +77,30 @@ For information about compiling FFmpeg on OS/2 see
@chapter Windows
To get help and instructions for building FFmpeg under Windows, check out
the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}.
the FFmpeg Windows Help Forum at
@url{http://ffmpeg.arrozcru.org/}.
@section Native Windows compilation using MinGW or MinGW-w64
@section Native Windows compilation
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
@url{http://www.mingw.org/} or @url{http://mingw-w64.sourceforge.net/}.
You can find detailed installation instructions in the download section and
the FAQ.
FFmpeg can be built to run natively on Windows using the MinGW tools. Install
the latest versions of MSYS and MinGW from @url{http://www.mingw.org/}.
You can find detailed installation instructions in the download
section and the FAQ.
FFmpeg does not build out-of-the-box with the packages the automated MinGW
installer provides. It also requires coreutils to be installed and many other
packages updated to the latest version. The minimum versions for some packages
are listed below:
@itemize
@item bash 3.1
@item msys-make 3.81-2 (note: not mingw32-make)
@item w32api 3.13
@item mingw-runtime 3.15
@end itemize
FFmpeg automatically passes @code{-fno-common} to the compiler to work around
a GCC bug (see @url{http://gcc.gnu.org/bugzilla/show_bug.cgi?id=37216}).
Notes:
@@ -107,105 +121,149 @@ libavformat) as DLLs.
@end itemize
@section Microsoft Visual C++
@section Microsoft Visual C++ compatibility
FFmpeg can be built with MSVC using a C99-to-C89 conversion utility and
wrapper.
As stated in the FAQ, FFmpeg will not compile under MSVC++. However, if you
want to use the libav* libraries in your own applications, you can still
compile those applications using MSVC++. But the libav* libraries you link
to @emph{must} be built with MinGW. However, you will not be able to debug
inside the libav* libraries, since MSVC++ does not recognize the debug
symbols generated by GCC.
We strongly recommend you to move over from MSVC++ to MinGW tools.
You will need the following prerequisites:
This description of how to use the FFmpeg libraries with MSVC++ is based on
Microsoft Visual C++ 2005 Express Edition. If you have a different version,
you might have to modify the procedures slightly.
@itemize
@item @uref{http://download.videolan.org/pub/contrib/c99-to-c89/, C99-to-C89 Converter & Wrapper}
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
@item @uref{http://www.mingw.org/, MSYS}
@item @uref{http://yasm.tortall.net/, YASM}
@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if
you want to run @uref{fate.html, FATE}.
@end itemize
@subsection Using static libraries
To set up a proper MSVC environment in MSYS, you simply need to run
@code{msys.bat} from the Visual Studio command prompt.
Assuming you have just built and installed FFmpeg in @file{/usr/local}:
Place @code{makedef}, @code{c99wrap.exe}, @code{c99conv.exe}, and @code{yasm.exe}
somewhere in your @code{PATH}.
@enumerate
Next, make sure @code{inttypes.h} and any other headers and libs you want to use
are located in a spot that MSVC can see. Do so by modifying the @code{LIB} and
@code{INCLUDE} environment variables to include the @strong{Windows} paths to
these directories. Alternatively, you can try and use the
@code{--extra-cflags}/@code{--extra-ldflags} configure options.
@item Create a new console application ("File / New / Project") and then
select "Win32 Console Application". On the appropriate page of the
Application Wizard, uncheck the "Precompiled headers" option.
Finally, run:
@item Write the source code for your application, or, for testing, just
copy the code from an existing sample application into the source file
that MSVC++ has already created for you. For example, you can copy
@file{libavformat/output-example.c} from the FFmpeg distribution.
@item Open the "Project / Properties" dialog box. In the "Configuration"
combo box, select "All Configurations" so that the changes you make will
affect both debug and release builds. In the tree view on the left hand
side, select "C/C++ / General", then edit the "Additional Include
Directories" setting to contain the path where the FFmpeg includes were
installed (i.e. @file{c:\msys\1.0\local\include}).
Do not add MinGW's include directory here, or the include files will
conflict with MSVC's.
@item Still in the "Project / Properties" dialog box, select
"Linker / General" from the tree view and edit the
"Additional Library Directories" setting to contain the @file{lib}
directory where FFmpeg was installed (i.e. @file{c:\msys\1.0\local\lib}),
the directory where MinGW libs are installed (i.e. @file{c:\mingw\lib}),
and the directory where MinGW's GCC libs are installed
(i.e. @file{C:\mingw\lib\gcc\mingw32\4.2.1-sjlj}). Then select
"Linker / Input" from the tree view, and add the files @file{libavformat.a},
@file{libavcodec.a}, @file{libavutil.a}, @file{libmingwex.a},
@file{libgcc.a}, and any other libraries you used (i.e. @file{libz.a})
to the end of "Additional Dependencies".
@item Now, select "C/C++ / Code Generation" from the tree view. Select
"Debug" in the "Configuration" combo box. Make sure that "Runtime
Library" is set to "Multi-threaded Debug DLL". Then, select "Release" in
the "Configuration" combo box and make sure that "Runtime Library" is
set to "Multi-threaded DLL".
@item Click "OK" to close the "Project / Properties" dialog box.
@item MSVC++ lacks some C99 header files that are fundamental for FFmpeg.
Get msinttypes from @url{http://code.google.com/p/msinttypes/downloads/list}
and install it in MSVC++'s include directory
(i.e. @file{C:\Program Files\Microsoft Visual Studio 8\VC\include}).
@item MSVC++ also does not understand the @code{inline} keyword used by
FFmpeg, so you must add this line before @code{#include}ing libav*:
@example
#define inline _inline
@end example
@item Build your application, everything should work.
@end enumerate
@subsection Using shared libraries
This is how to create DLL and LIB files that are compatible with MSVC++:
@enumerate
@item Add a call to @file{vcvars32.bat} (which sets up the environment
variables for the Visual C++ tools) as the first line of @file{msys.bat}.
The standard location for @file{vcvars32.bat} is
@file{C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat},
and the standard location for @file{msys.bat} is @file{C:\msys\1.0\msys.bat}.
If this corresponds to your setup, add the following line as the first line
of @file{msys.bat}:
@example
./configure --toolchain=msvc
call "C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat"
@end example
Alternatively, you may start the @file{Visual Studio 2005 Command Prompt},
and run @file{c:\msys\1.0\msys.bat} from there.
@item Within the MSYS shell, run @code{lib.exe}. If you get a help message
from @file{Microsoft (R) Library Manager}, this means your environment
variables are set up correctly, the @file{Microsoft (R) Library Manager}
is on the path and will be used by FFmpeg to create
MSVC++-compatible import libraries.
@item Build FFmpeg with
@example
./configure --enable-shared
make
make install
@end example
If you wish to compile shared libraries, add @code{--enable-shared} to your
configure options. Note that due to the way MSVC handles DLL imports and
exports, you cannot compile static and shared libraries at the same time, and
enabling shared libraries will automatically disable the static ones.
Your install path (@file{/usr/local/} by default) should now have the
necessary DLL and LIB files under the @file{bin} directory.
Notes:
@itemize
@item It is possible that coreutils' @code{link.exe} conflicts with MSVC's linker.
You can find out by running @code{which link} to see which @code{link.exe} you
are using. If it is located at @code{/bin/link.exe}, then you have the wrong one
in your @code{PATH}. Either move or remove that copy, or make sure MSVC's
@code{link.exe} takes precedence in your @code{PATH} over coreutils'.
@item If you wish to build with zlib support, you will have to grab a compatible
zlib binary from somewhere, with an MSVC import lib, or if you wish to link
statically, you can follow the instructions below to build a compatible
@code{zlib.lib} with MSVC. Regardless of which method you use, you must still
follow step 3, or compilation will fail.
@enumerate
@item Grab the @uref{http://zlib.net/, zlib sources}.
@item Edit @code{win32/Makefile.msc} so that it uses -MT instead of -MD, since
this is how FFmpeg is built as well.
@item Edit @code{zconf.h} and remove its inclusion of @code{unistd.h}. This gets
erroneously included when building FFmpeg.
@item Run @code{nmake -f win32/Makefile.msc}.
@item Move @code{zlib.lib}, @code{zconf.h}, and @code{zlib.h} to somewhere MSVC
can see.
@end enumerate
@item FFmpeg has been tested with Visual Studio 2010 and 2012, Pro and Express.
Anything else is not officially supported.
Alternatively, build the libraries with a cross compiler, according to
the instructions below in @ref{Cross compilation for Windows with Linux}.
@end itemize
@subsection Linking to FFmpeg with Microsoft Visual C++
If you plan to link with MSVC-built static libraries, you will need
to make sure you have @code{Runtime Library} set to
@code{Multi-threaded (/MT)} in your project's settings.
To use those files with MSVC++, do the same as you would do with
the static libraries, as described above. But in Step 4,
you should only need to add the directory where the LIB files are installed
(i.e. @file{c:\msys\usr\local\bin}). This is not a typo, the LIB files are
installed in the @file{bin} directory. And instead of adding the static
libraries (@file{libxxx.a} files) you should add the MSVC import libraries
(@file{avcodec.lib}, @file{avformat.lib}, and
@file{avutil.lib}). Note that you should not use the GCC import
libraries (@file{libxxx.dll.a} files), as these will give you undefined
reference errors. There should be no need for @file{libmingwex.a},
@file{libgcc.a}, and @file{wsock32.lib}, nor any other external library
statically linked into the DLLs.
FFmpeg headers do not declare global data for Windows DLLs through the usual
dllexport/dllimport interface. Such data will be exported properly while
building, but to use them in your MSVC code you will have to edit the
building, but to use them in your MSVC++ code you will have to edit the
appropriate headers and mark the data as dllimport. For example, in
libavutil/pixdesc.h you should have:
@example
extern __declspec(dllimport) const AVPixFmtDescriptor av_pix_fmt_descriptors[];
@end example
You will also need to define @code{inline} to something MSVC understands:
@example
#define inline __inline
@end example
Also note, that as stated in @strong{Microsoft Visual C++}, you will need
an MSVC-compatible @uref{http://code.google.com/p/msinttypes/, inttypes.h}.
If you plan on using import libraries created by dlltool, you must
set @code{References} to @code{No (/OPT:NOREF)} under the linker optimization
settings, otherwise the resulting binaries will fail during runtime.
This is not required when using import libraries generated by @code{lib.exe}.
Note that using import libraries created by dlltool requires
the linker optimization option to be set to
"References: Keep Unreferenced Data (@code{/OPT:NOREF})", otherwise
the resulting binaries will fail during runtime. This isn't
required when using import libraries generated by lib.exe.
This issue is reported upstream at
@url{http://sourceware.org/bugzilla/show_bug.cgi?id=12633}.
@@ -214,12 +272,12 @@ To create import libraries that work with the @code{/OPT:REF} option
@enumerate
@item Open the @emph{Visual Studio Command Prompt}.
@item Open @emph{Visual Studio 2005 Command Prompt}.
Alternatively, in a normal command line prompt, call @file{vcvars32.bat}
which sets up the environment variables for the Visual C++ tools
(the standard location for this file is something like
@file{C:\Program Files (x86_\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat}).
(the standard location for this file is
@file{C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat}).
@item Enter the @file{bin} directory where the created LIB and DLL files
are stored.
@@ -304,67 +362,4 @@ and for a build with shared libraries
./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
@end example
@chapter Plan 9
The native @uref{http://plan9.bell-labs.com/plan9/, Plan 9} compiler
does not implement all the C99 features needed by FFmpeg so the gcc
port must be used. Furthermore, a few items missing from the C
library and shell environment need to be fixed.
@itemize
@item GNU awk, grep, make, and sed
Working packages of these tools can be found at
@uref{http://code.google.com/p/ports2plan9/downloads/list, ports2plan9}.
They can be installed with @uref{http://9front.org/, 9front's} @code{pkg}
utility by setting @code{pkgpath} to
@code{http://ports2plan9.googlecode.com/files/}.
@item Missing/broken @code{head} and @code{printf} commands
Replacements adequate for building FFmpeg can be found in the
@code{compat/plan9} directory. Place these somewhere they will be
found by the shell. These are not full implementations of the
commands and are @emph{not} suitable for general use.
@item Missing C99 @code{stdint.h} and @code{inttypes.h}
Replacement headers are available from
@url{http://code.google.com/p/plan9front/issues/detail?id=152}.
@item Missing or non-standard library functions
Some functions in the C library are missing or incomplete. The
@code{@uref{http://ports2plan9.googlecode.com/files/gcc-apelibs-1207.tbz,
gcc-apelibs-1207}} package from
@uref{http://code.google.com/p/ports2plan9/downloads/list, ports2plan9}
includes an updated C library, but installing the full package gives
unusable executables. Instead, keep the files from @code{gccbin.tgz}
under @code{/386/lib/gnu}. From the @code{libc.a} archive in the
@code{gcc-apelibs-1207} package, extract the following object files and
turn them into a library:
@itemize
@item @code{strerror.o}
@item @code{strtoll.o}
@item @code{snprintf.o}
@item @code{vsnprintf.o}
@item @code{vfprintf.o}
@item @code{_IO_getc.o}
@item @code{_IO_putc.o}
@end itemize
Use the @code{--extra-libs} option of @code{configure} to inform the
build system of this library.
@item FPU exceptions enabled by default
Unlike most other systems, Plan 9 enables FPU exceptions by default.
These must be disabled before calling any FFmpeg functions. While the
included tools will do this automatically, other users of the
libraries must do it themselves.
@end itemize
@bye

View File

@@ -75,15 +75,6 @@ ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
Note that you may need to escape the character "|" which is special for
many shells.
@section data
Data in-line in the URI. See @url{http://en.wikipedia.org/wiki/Data_URI_scheme}.
For example, to convert a GIF file given inline with @command{ffmpeg}:
@example
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
@end example
@section file
File access protocol.

View File

@@ -23,7 +23,7 @@ Let's consider the problem of minimizing:
rate is the filesize
distortion is the quality
lambda is a fixed value chosen as a tradeoff between quality and filesize
lambda is a fixed value choosen as a tradeoff between quality and filesize
Is this equivalent to finding the best quality for a given max
filesize? The answer is yes. For each filesize limit there is some lambda
factor for which minimizing above will get you the best quality (using your

View File

@@ -31,7 +31,7 @@ Special Converter v
v
Output
Planar/Packed conversion is done when needed during sample format conversion.
Planar/Packed convertion is done when needed during sample format convertion
Every step can be skipped without memcpy when its not needed.
Either Resampling and Rematrixing can be performed first depending on which
way its faster.
@@ -39,8 +39,8 @@ The Buffers are needed for resampling due to resamplng being a process that
requires future and past data, it thus also introduces inevitably a delay when
used.
Internally 32bit float and 16bit int is supported currently, other formats can
easily be added.
easily be added
Externally all sample formats in packed and planar configuration are supported
It's also trivial to add special converters for common cases.
If only sample format and/or packed/planar conversion is needed, it
Its also trivial to add special converters for common cases
If only sample format and or packed/planar convertion is needed it
is performed from input to output directly in a single pass with no intermediates.

View File

@@ -58,7 +58,7 @@ Input to YUV Converter
Horizontal scaler
There are several horizontal scalers. A special case worth mentioning is
the fast bilinear scaler that is made of runtime-generated MMXEXT code
the fast bilinear scaler that is made of runtime-generated MMX2 code
using specially tuned pshufw instructions.
The remaining scalers are specially-tuned for various filter lengths.
They scale 8-bit unsigned planar data to 16-bit signed planar data.

View File

@@ -1,81 +1,9 @@
@chapter Syntax
@c man begin SYNTAX
This section documents the syntax and formats employed by the FFmpeg
libraries and tools.
@anchor{quoting_and_escaping}
@section Quoting and escaping
FFmpeg adopts the following quoting and escaping mechanism, unless
explicitly specified. The following rules are applied:
@itemize
@item
@code{'} and @code{\} are special characters (respectively used for
quoting and escaping). In addition to them, there might be other
special characters depending on the specific syntax where the escaping
and quoting are employed.
@item
A special character is escaped by prefixing it with a '\'.
@item
All characters enclosed between '' are included literally in the
parsed string. The quote character @code{'} itself cannot be quoted,
so you may need to close the quote and escape it.
@item
Leading and trailing whitespaces, unless escaped or quoted, are
removed from the parsed string.
@end itemize
Note that you may need to add a second level of escaping when using
the command line or a script, which depends on the syntax of the
adopted shell language.
The function @code{av_get_token} defined in
@file{libavutil/avstring.h} can be used to parse a token quoted or
escaped according to the rules defined above.
The tool @file{tools/ffescape} in the FFmpeg source tree can be used
to automatically quote or escape a string in a script.
@subsection Examples
@itemize
@item
Escape the string @code{Crime d'Amour} containing the @code{'} special
character:
@example
Crime d\'Amour
@end example
@item
The string above contains a quote, so the @code{'} needs to be escaped
when quoting it:
@example
'Crime d'\''Amour'
@end example
@item
Include leading or trailing whitespaces using quoting:
@example
' this string starts and ends with whitespaces '
@end example
@item
Escaping and quoting can be mixed together:
@example
' The string '\'string\'' is a string '
@end example
@item
To include a literal @code{\} you can use either escaping or quoting:
@example
'c:\foo' can be written as c:\\foo
@end example
@end itemize
When evaluating specific formats, FFmpeg uses internal library parsing
functions, shared by the tools. This section documents the syntax of
some of these formats.
@anchor{date syntax}
@section Date

View File

@@ -6,18 +6,73 @@ sub FFmpeg_end_section($$)
$EXTRA_HEAD =
'<link rel="icon" href="favicon.png" type="image/png" />
<link rel="stylesheet" type="text/css" href="default.css" />
';
$CSS_LINES = $ENV{"FFMPEG_CSS"} || <<EOT;
<link rel="stylesheet" type="text/css" href="default.css" />
$CSS_LINES = <<EOT;
<style type="text/css">
<!--
a.summary-letter { text-decoration: none }
a { color: #2D6198; }
a:visited { color: #884488; }
h1 a, h2 a, h3 a { text-decoration: inherit; color: inherit; }
p { margin-left: 1em; margin-right: 1em; }
table { margin-left: 2em; }
pre { margin-left: 2em; }
#footer { text-align: center; }
#body { margin-left: 1em; margin-right: 1em; }
body { background-color: #313131; margin: 0; }
#container {
background-color: white;
color: #202020;
margin-left: 1em;
margin-right: 1em;
}
h1 {
background-color: #7BB37B;
border: 1px solid #6A996A;
color: #151515;
font-size: 1.2em;
padding-bottom: 0.2em;
padding-left: 0.4em;
padding-top: 0.2em;
}
h2 {
color: #313131;
font-size: 1.2em;
}
h3 {
color: #313131;
font-size: 0.8em;
margin-bottom: -8px;
}
.note {
margin: 1em;
border: 1px solid #bbc9d8;
background-color: #dde1e1;
}
.important {
margin: 1em;
border: 1px solid #d26767;
background-color: #f8e1e1;
}
-->
</style>
EOT
my $TEMPLATE_HEADER = $ENV{"FFMPEG_HEADER"} || <<EOT;
<link rel="icon" href="favicon.png" type="image/png" />
</head>
<body>
<div id="container">
EOT
my $FFMPEG_NAVBAR = $ENV{"FFMPEG_NAVBAR"} || '';
$AFTER_BODY_OPEN =
'<div id="container">' .
"\n$FFMPEG_NAVBAR\n" .
'<div id="body">';
$PRE_BODY_CLOSE = '</div></div>';
@@ -28,11 +83,9 @@ $print_page_foot = \&FFmpeg_print_page_foot;
sub FFmpeg_print_page_foot($$)
{
my $fh = shift;
my $program_string = defined &T2H_DEFAULT_program_string ?
T2H_DEFAULT_program_string() : program_string();
print $fh '<footer class="footer pagination-right">' . "\n";
print $fh '<span class="label label-info">' . $program_string;
print $fh "</span></footer></div>\n";
print $fh '<div id="footer">' . "\n";
T2H_DEFAULT_print_page_foot($fh);
print $fh "</div>\n";
}
$float = \&FFmpeg_float;
@@ -54,11 +107,11 @@ sub FFmpeg_float($$$$)
if ($caption =~ /NOTE/)
{
$class = "alert alert-info";
$class = "note";
}
elsif ($caption =~ /IMPORTANT/)
{
$class = "alert alert-warning";
$class = "important";
}
return '<div class="float ' . $class . '">' . "$label\n" . $text . '</div>';
@@ -81,7 +134,7 @@ sub FFmpeg_print_page_head($$)
$longtitle = "FFmpeg documentation : " . $longtitle;
print $fh <<EOT;
<!DOCTYPE html>
$DOCTYPE
<html>
$Texi2HTML::THISDOC{'copying'}<!-- Created on $Texi2HTML::THISDOC{today} by $Texi2HTML::THISDOC{program} -->
<!--
@@ -97,7 +150,11 @@ $description
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
$encoding
$CSS_LINES
$TEMPLATE_HEADER
$EXTRA_HEAD
</head>
<body $BODYTEXT>
$AFTER_BODY_OPEN
EOT
}

View File

@@ -27,9 +27,9 @@ use warnings;
$output = 0;
$skipping = 0;
%chapters = ();
@chapters_sequence = ();
$chapter = "";
%sects = ();
@sects_sequence = ();
$section = "";
@icstack = ();
@endwstack = ();
@skstack = ();
@@ -116,24 +116,18 @@ INF: while(<$inf>) {
die "cannot open $1: $!\n";
};
/^\@chapter\s+([A-Za-z ]+)/ and do {
# close old chapter
$chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name);
# start new chapter
$chapter_name = $1, push (@chapters_sequence, $chapter_name) unless $skipping;
$chapters{$chapter_name} = "" unless exists $chapters{$chapter_name};
$chapter = "";
$output = 1;
# Look for blocks surrounded by @c man begin SECTION ... @c man end.
# This really oughta be @ifman ... @end ifman and the like, but such
# would require rev'ing all other Texinfo translators.
/^\@c\s+man\s+begin\s+([A-Za-z ]+)/ and $sect = $1, push (@sects_sequence, $sect), $output = 1, next;
/^\@c\s+man\s+end/ and do {
$sects{$sect} = "" unless exists $sects{$sect};
$sects{$sect} .= postprocess($section);
$section = "";
$output = 0;
next;
};
/^\@bye/ and do {
# close old chapter
$chapters{$chapter_name} .= postprocess($chapter) if ($chapter_name);
last INF;
};
# handle variables
/^\@set\s+([a-zA-Z0-9_-]+)\s*(.*)$/ and do {
$defs{$1} = $2;
@@ -156,20 +150,20 @@ INF: while(<$inf>) {
# Ignore @end foo, where foo is not an operation which may
# cause us to skip, if we are presently skipping.
my $ended = $1;
next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/;
next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex)$/;
die "\@end $ended without \@$ended at line $.\n" unless defined $endw;
die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw;
$endw = pop @endwstack;
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/) {
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) {
$skipping = pop @skstack;
next;
} elsif ($ended =~ /^(?:example|smallexample|display)$/) {
$shift = "";
$_ = ""; # need a paragraph break
} elsif ($ended =~ /^(?:itemize|enumerate|(?:multi|[fv])?table)$/) {
} elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) {
$_ = "\n=back\n";
$ic = pop @icstack;
} else {
@@ -196,11 +190,11 @@ INF: while(<$inf>) {
next;
};
/^\@(ignore|menu|iftex|ifhtml|ifnothtml)\b/ and do {
/^\@(ignore|menu|iftex)\b/ and do {
push @endwstack, $endw;
push @skstack, $skipping;
$endw = $1;
$skipping = $endw !~ /ifnothtml/;
$skipping = 1;
next;
};
@@ -217,6 +211,7 @@ INF: while(<$inf>) {
s/\@TeX\{\}/TeX/g;
s/\@pounds\{\}/\#/g;
s/\@minus(?:\{\})?/-/g;
s/\\,/,/g;
# Now the ones that have to be replaced by special escapes
# (which will be turned back into text by unmunge())
@@ -269,16 +264,15 @@ INF: while(<$inf>) {
$endw = "enumerate";
};
/^\@((?:multi|[fv])?table)\s+(\@[a-z]+)/ and do {
/^\@([fv]?table)\s+(\@[a-z]+)/ and do {
push @endwstack, $endw;
push @icstack, $ic;
$endw = $1;
$ic = $2;
$ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env|command)/B/;
$ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env)/B/;
$ic =~ s/\@(?:code|kbd)/C/;
$ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
$ic =~ s/\@(?:file)/F/;
$ic =~ s/\@(?:columnfractions)//;
$_ = "\n=over 4\n";
};
@@ -289,21 +283,6 @@ INF: while(<$inf>) {
$_ = ""; # need a paragraph break
};
/^\@item\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
my $columns = $1;
$columns =~ s/\@tab/ : /;
$_ = "\n=item B&LT;". $columns ."&GT;\n";
};
/^\@tab\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
my $columns = $1;
$columns =~ s/\@tab/ : /;
$_ = " : ". $columns;
$chapter =~ s/\n+\s+$//;
};
/^\@itemx?\s*(.+)?$/ and do {
if (defined $1) {
# Entity escapes prevent munging by the <> processing below.
@@ -315,7 +294,7 @@ INF: while(<$inf>) {
}
};
$chapter .= $shift.$_."\n";
$section .= $shift.$_."\n";
}
# End of current file.
close($inf);
@@ -324,15 +303,16 @@ $inf = pop @instack;
die "No filename or title\n" unless defined $fn && defined $tl;
$chapters{NAME} = "$fn \- $tl\n";
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
$sects{NAME} = "$fn \- $tl\n";
$sects{FOOTNOTES} .= "=back\n" if exists $sects{FOOTNOTES};
unshift @chapters_sequence, "NAME";
for $chapter (@chapters_sequence) {
if (exists $chapters{$chapter}) {
$head = uc($chapter);
unshift @sects_sequence, "NAME";
for $sect (@sects_sequence) {
if(exists $sects{$sect}) {
$head = $sect;
$head =~ s/SEEALSO/SEE ALSO/;
print "=head1 $head\n\n";
print scalar unmunge ($chapters{$chapter});
print scalar unmunge ($sects{$sect});
print "\n";
}
}
@@ -377,7 +357,6 @@ sub postprocess
s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
s/;\s+\@pxref\{(?:[^\}]*)\}//g;
s/\@ref\{(?:[^,\}]*,)(?:[^,\}]*,)([^,\}]*).*\}/$1/g;
s/\@ref\{([^\}]*)\}/$1/g;
s/\@noindent\s*//g;
s/\@refill//g;
@@ -387,7 +366,7 @@ sub postprocess
# @uref can take one, two, or three arguments, with different
# semantics each time. @url and @email are just like @uref with
# one argument, for our purposes.
s/\@(?:uref|url|email)\{([^\},]*),?[^\}]*\}/&lt;B<$1>&gt;/g;
s/\@(?:uref|url|email)\{([^\},]*)\}/&lt;B<$1>&gt;/g;
s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g;
s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g;
@@ -431,13 +410,13 @@ sub unmunge
sub add_footnote
{
unless (exists $chapters{FOOTNOTES}) {
$chapters{FOOTNOTES} = "\n=over 4\n\n";
unless (exists $sects{FOOTNOTES}) {
$sects{FOOTNOTES} = "\n=over 4\n\n";
}
$chapters{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
$chapters{FOOTNOTES} .= $_[0];
$chapters{FOOTNOTES} .= "\n\n";
$sects{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
$sects{FOOTNOTES} .= $_[0];
$sects{FOOTNOTES} .= "\n\n";
}
# stolen from Symbol.pm

View File

@@ -85,8 +85,8 @@ here are some edges we could choose from:
/ \
O-----2--4--O
Finding the new best paths and scores for each point of our new column is
trivial given we know the previous column best paths and scores:
Finding the new best pathes and scores for each point of our new column is
trivial given we know the previous column best pathes and scores:
O-----0-----8
\

357
ffmpeg.c
View File

@@ -43,7 +43,7 @@
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/channel_layout.h"
#include "libavutil/audioconvert.h"
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/colorspace.h"
@@ -69,7 +69,6 @@
# include "libavfilter/buffersink.h"
#if HAVE_SYS_RESOURCE_H
#include <sys/time.h>
#include <sys/types.h>
#include <sys/resource.h>
#elif HAVE_GETPROCESSTIMES
@@ -109,7 +108,7 @@ const int program_birth_year = 2000;
static FILE *vstats_file;
static void do_video_stats(OutputStream *ost, int frame_size);
static void do_video_stats(AVFormatContext *os, OutputStream *ost, int frame_size);
static int64_t getutime(void);
static int run_as_daemon = 0;
@@ -152,8 +151,6 @@ static struct termios oldtty;
static int restore_tty;
#endif
static void free_input_threads(void);
/* sub2video hack:
Convert subtitles to video with alpha to insert them in filter graphs.
@@ -211,29 +208,17 @@ static void sub2video_update(InputStream *ist, AVSubtitle *sub)
AVFilterBufferRef *ref = ist->sub2video.ref;
int8_t *dst;
int dst_linesize;
int num_rects, i;
int64_t pts, end_pts;
int i;
int64_t pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ist->st->time_base);
if (!ref)
return;
if (sub) {
pts = av_rescale_q(sub->pts + sub->start_display_time * 1000,
AV_TIME_BASE_Q, ist->st->time_base);
end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000,
AV_TIME_BASE_Q, ist->st->time_base);
num_rects = sub->num_rects;
} else {
pts = ist->sub2video.end_pts;
end_pts = INT64_MAX;
num_rects = 0;
}
dst = ref->data [0];
dst_linesize = ref->linesize[0];
memset(dst, 0, h * dst_linesize);
for (i = 0; i < num_rects; i++)
for (i = 0; i < sub->num_rects; i++)
sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
sub2video_push_ref(ist, pts);
ist->sub2video.end_pts = end_pts;
}
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
@@ -256,8 +241,6 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts)
/* do not send the heartbeat frame if the subtitle is already ahead */
if (pts2 <= ist2->sub2video.last_pts)
continue;
if (pts2 >= ist2->sub2video.end_pts)
sub2video_update(ist2, NULL);
for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
if (nb_reqs)
@@ -371,10 +354,8 @@ static int read_key(void)
}
if (is_pipe) {
/* When running under a GUI, you will end here. */
if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
// input pipe may have been closed by the program that ran ffmpeg
if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL))
return -1;
}
//Read it
if(nchars != 0) {
read(0, &ch, 1);
@@ -397,7 +378,7 @@ static int decode_interrupt_cb(void *ctx)
const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
static void exit_program(void)
void av_noreturn exit_program(int ret)
{
int i, j;
@@ -443,9 +424,6 @@ static void exit_program(void)
av_freep(&output_streams[i]->logfile_prefix);
av_freep(&output_streams[i]);
}
#if HAVE_PTHREADS
free_input_threads();
#endif
for (i = 0; i < nb_input_files; i++) {
avformat_close_input(&input_files[i]->ctx);
av_freep(&input_files[i]);
@@ -476,7 +454,10 @@ static void exit_program(void)
if (received_sigterm) {
av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
(int) received_sigterm);
exit (255);
}
exit(ret);
}
void assert_avoptions(AVDictionary *m)
@@ -484,13 +465,25 @@ void assert_avoptions(AVDictionary *m)
AVDictionaryEntry *t;
if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
exit(1);
exit_program(1);
}
}
static void abort_codec_experimental(AVCodec *c, int encoder)
static void assert_codec_experimental(AVCodecContext *c, int encoder)
{
exit(1);
const char *codec_string = encoder ? "encoder" : "decoder";
AVCodec *codec;
if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
"results.\nAdd '-strict experimental' if you want to use it.\n",
codec_string, c->codec->name);
codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
codec_string, codec->name);
exit_program(1);
}
}
static void update_benchmark(const char *fmt, ...)
@@ -571,48 +564,28 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
avctx->codec ? avctx->codec->name : "copy");
print_error("", a);
if (exit_on_error)
exit(1);
exit_program(1);
}
*pkt = new_pkt;
bsfc = bsfc->next;
}
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
ost->last_mux_dts != AV_NOPTS_VALUE &&
pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
"%d:%d; previous: %"PRId64", current: %"PRId64"; ",
ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
if (exit_on_error) {
av_log(NULL, AV_LOG_FATAL, "aborting.\n");
exit(1);
}
av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
"in incorrect timestamps in the output file.\n",
ost->last_mux_dts + 1);
pkt->dts = ost->last_mux_dts + 1;
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts = FFMAX(pkt->pts, pkt->dts);
}
ost->last_mux_dts = pkt->dts;
pkt->stream_index = ost->index;
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
av_get_media_type_string(ost->st->codec->codec_type),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
pkt->size
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base)
);
}
ret = av_interleaved_write_frame(s, pkt);
if (ret < 0) {
print_error("av_interleaved_write_frame()", ret);
exit(1);
exit_program(1);
}
}
@@ -622,8 +595,9 @@ static void close_output_stream(OutputStream *ost)
ost->finished = 1;
if (of->shortest) {
int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, AV_TIME_BASE_Q);
of->recording_time = FFMIN(of->recording_time, end);
int i;
for (i = 0; i < of->ctx->nb_streams; i++)
output_streams[of->ost_index + i]->finished = 1;
}
}
@@ -662,7 +636,7 @@ static void do_audio_out(AVFormatContext *s, OutputStream *ost,
update_benchmark(NULL);
if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
exit(1);
exit_program(1);
}
update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
@@ -703,8 +677,6 @@ static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void *
/* create temporary picture */
size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
if (size < 0)
return;
buf = av_malloc(size);
if (!buf)
return;
@@ -743,7 +715,7 @@ static void do_subtitle_out(AVFormatContext *s,
if (sub->pts == AV_NOPTS_VALUE) {
av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
if (exit_on_error)
exit(1);
exit_program(1);
return;
}
@@ -779,7 +751,7 @@ static void do_subtitle_out(AVFormatContext *s,
subtitle_out_max_size, sub);
if (subtitle_out_size < 0) {
av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
exit(1);
exit_program(1);
}
av_init_packet(&pkt);
@@ -802,7 +774,8 @@ static void do_subtitle_out(AVFormatContext *s,
static void do_video_out(AVFormatContext *s,
OutputStream *ost,
AVFrame *in_picture)
AVFrame *in_picture,
float quality)
{
int ret, format_video_sync;
AVPacket pkt;
@@ -827,7 +800,7 @@ static void do_video_out(AVFormatContext *s,
format_video_sync = video_sync_method;
if (format_video_sync == VSYNC_AUTO)
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : 1;
switch (format_video_sync) {
case VSYNC_CFR:
@@ -884,10 +857,6 @@ static void do_video_out(AVFormatContext *s,
method. */
enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
enc->coded_frame->top_field_first = in_picture->top_field_first;
if (enc->coded_frame->interlaced_frame)
enc->field_order = enc->coded_frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
else
enc->field_order = AV_FIELD_PROGRESSIVE;
pkt.data = (uint8_t *)in_picture;
pkt.size = sizeof(AVPicture);
pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
@@ -910,15 +879,9 @@ static void do_video_out(AVFormatContext *s,
big_picture.top_field_first = !!ost->top_field_first;
}
if (big_picture.interlaced_frame) {
if (enc->codec->id == AV_CODEC_ID_MJPEG)
enc->field_order = big_picture.top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
else
enc->field_order = big_picture.top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
} else
enc->field_order = AV_FIELD_PROGRESSIVE;
big_picture.quality = ost->st->codec->global_quality;
/* handles same_quant here. This is not correct because it may
not be a global option */
big_picture.quality = quality;
if (!enc->me_threshold)
big_picture.pict_type = 0;
if (ost->forced_kf_index < ost->forced_kf_count &&
@@ -931,7 +894,7 @@ static void do_video_out(AVFormatContext *s,
update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
exit(1);
exit_program(1);
}
if (got_packet) {
@@ -971,7 +934,7 @@ static void do_video_out(AVFormatContext *s,
}
if (vstats_filename && frame_size)
do_video_stats(ost, frame_size);
do_video_stats(output_files[ost->file_index]->ctx, ost, frame_size);
}
static double psnr(double d)
@@ -979,7 +942,8 @@ static double psnr(double d)
return -10.0 * log(d) / log(10.0);
}
static void do_video_stats(OutputStream *ost, int frame_size)
static void do_video_stats(AVFormatContext *os, OutputStream *ost,
int frame_size)
{
AVCodecContext *enc;
int frame_number;
@@ -990,20 +954,20 @@ static void do_video_stats(OutputStream *ost, int frame_size)
vstats_file = fopen(vstats_filename, "w");
if (!vstats_file) {
perror("fopen");
exit(1);
exit_program(1);
}
}
enc = ost->st->codec;
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
frame_number = ost->st->nb_frames;
frame_number = ost->frame_number;
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
if (enc->flags&CODEC_FLAG_PSNR)
fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
fprintf(vstats_file,"f_size= %6d ", frame_size);
/* compute pts value */
ti1 = ost->st->pts.val * av_q2d(enc->time_base);
ti1 = ost->sync_opts * av_q2d(enc->time_base);
if (ti1 < 0.01)
ti1 = 0.01;
@@ -1015,19 +979,6 @@ static void do_video_stats(OutputStream *ost, int frame_size)
}
}
static void finish_output_stream(OutputStream *ost)
{
OutputFile *of = output_files[ost->file_index];
int i;
ost->finished = 1;
if (of->shortest) {
for (i = 0; i < of->ctx->nb_streams; i++)
output_streams[of->ost_index + i]->finished = 1;
}
}
/**
* Get and encode new output from any of the filtergraphs, without causing
* activity.
@@ -1093,7 +1044,9 @@ static int reap_filters(void)
if (!ost->frame_aspect_ratio)
ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
do_video_out(of->ctx, ost, filtered_frame);
do_video_out(of->ctx, ost, filtered_frame,
same_quant ? ost->last_quality :
ost->st->codec->global_quality);
break;
case AVMEDIA_TYPE_AUDIO:
avfilter_copy_buf_props(filtered_frame, picref);
@@ -1144,8 +1097,11 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
oc = output_files[0]->ctx;
total_size = avio_size(oc->pb);
if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
if (total_size < 0) { // FIXME improve avio_size() so it works with non seekable output too
total_size = avio_tell(oc->pb);
if (total_size < 0)
total_size = 0;
}
buf[0] = '\0';
vid = 0;
@@ -1182,7 +1138,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
for (j = 0; j < 32; j++)
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
}
if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
if (enc->flags&CODEC_FLAG_PSNR) {
int j;
double error, error_sum = 0;
double scale, scale_sum = 0;
@@ -1226,21 +1182,16 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
hours = mins / 60;
mins %= 60;
bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
bitrate = pts ? total_size * 8 / (pts / 1000.0) : 0;
if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"size=N/A time=");
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"size=%8.0fkB time=", total_size / 1024.0);
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"size=%8.0fkB time=", total_size / 1024.0);
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"%02d:%02d:%02d.%02d ", hours, mins, secs,
(100 * us) / AV_TIME_BASE);
if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"bitrate=N/A");
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"bitrate=%6.1fkbits/s", bitrate);
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"bitrate=%6.1fkbits/s", bitrate);
av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
hours, mins, secs, us);
@@ -1336,7 +1287,7 @@ static void flush_encoders(void)
update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
exit(1);
exit_program(1);
}
*size += pkt.size;
if (ost->logfile && enc->stats_out) {
@@ -1350,12 +1301,7 @@ static void flush_encoders(void)
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
if (pkt.duration > 0)
pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
write_frame(os, &pkt, ost);
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
do_video_stats(ost, pkt.size);
}
}
if (stop_encoding)
@@ -1425,15 +1371,6 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
opkt.dts -= ost_tb_start_time;
if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
int duration = av_get_audio_frame_duration(ist->st->codec, pkt->size);
if(!duration)
duration = ist->st->codec->frame_size;
opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
(AVRational){1, ist->st->codec->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
ost->st->time_base) - ost_tb_start_time;
}
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
opkt.flags = pkt->flags;
@@ -1460,6 +1397,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
write_frame(of->ctx, &opkt, ost);
ost->st->codec->frame_number++;
av_free_packet(&opkt);
}
static void rate_emu_sleep(InputStream *ist)
@@ -1499,6 +1437,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
else
avcodec_get_frame_defaults(ist->decoded_frame);
decoded_frame = ist->decoded_frame;
update_benchmark(NULL);
@@ -1540,7 +1480,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
"layout for Input Stream #%d.%d\n", ist->file_index,
ist->st->index);
exit(1);
exit_program(1);
}
decoded_frame->channel_layout = avctx->channel_layout;
@@ -1568,7 +1508,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
int j;
if (configure_filtergraph(fg) < 0) {
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
exit(1);
exit_program(1);
}
for (j = 0; j < fg->nb_outputs; j++) {
OutputStream *ost = fg->outputs[j]->ost;
@@ -1598,9 +1538,9 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
decoded_frame_tb = AV_TIME_BASE_Q;
}
if (decoded_frame->pts != AV_NOPTS_VALUE)
decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
(AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
(AVRational){1, ist->st->codec->sample_rate});
decoded_frame->pts = av_rescale_q(decoded_frame->pts,
decoded_frame_tb,
(AVRational){1, ist->st->codec->sample_rate});
for (i = 0; i < ist->nb_filters; i++)
av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame,
AV_BUFFERSRC_FLAG_PUSH);
@@ -1617,9 +1557,12 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
int i, ret = 0, resample_changed;
int64_t best_effort_timestamp;
AVRational *frame_sample_aspect;
float quality;
if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
else
avcodec_get_frame_defaults(ist->decoded_frame);
decoded_frame = ist->decoded_frame;
pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
@@ -1635,6 +1578,8 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
return ret;
}
quality = same_quant ? decoded_frame->quality : 0;
if(ist->top_field_first>=0)
decoded_frame->top_field_first = ist->top_field_first;
@@ -1674,13 +1619,12 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
ist->resample_height = decoded_frame->height;
ist->resample_pix_fmt = decoded_frame->format;
for (i = 0; i < nb_filtergraphs; i++) {
if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
for (i = 0; i < nb_filtergraphs; i++)
if (ist_in_filtergraph(filtergraphs[i], ist) &&
configure_filtergraph(filtergraphs[i]) < 0) {
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
exit(1);
exit_program(1);
}
}
}
frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
@@ -1688,10 +1632,13 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
int changed = ist->st->codec->width != ist->filters[i]->filter->outputs[0]->w
|| ist->st->codec->height != ist->filters[i]->filter->outputs[0]->h
|| ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
// XXX what an ugly hack
if (ist->filters[i]->graph->nb_outputs == 1)
ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
if (!frame_sample_aspect->num)
*frame_sample_aspect = ist->st->sample_aspect_ratio;
if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed && !do_deinterlace) {
if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
FrameBuffer *buf = decoded_frame->opaque;
AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
decoded_frame->data, decoded_frame->linesize,
@@ -1712,7 +1659,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
} else
if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {
av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
exit(1);
exit_program(1);
}
}
@@ -1803,7 +1750,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
if (pkt->dts != AV_NOPTS_VALUE) {
ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
if (ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
ist->next_pts = ist->pts = ist->dts;
ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
}
// while we have more to decode or while the decoder did output something on EOF
@@ -1911,7 +1858,7 @@ static void print_sdp(void)
AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
if (!avc)
exit(1);
exit_program(1);
for (i = 0; i < nb_output_files; i++)
avc[i] = output_files[i]->ctx;
@@ -1923,7 +1870,6 @@ static void print_sdp(void)
static int init_input_stream(int ist_index, char *error, int error_len)
{
int ret;
InputStream *ist = input_streams[ist_index];
if (ist->decoding_needed) {
@@ -1943,13 +1889,12 @@ static int init_input_stream(int ist_index, char *error, int error_len)
if (!av_dict_get(ist->opts, "threads", NULL, 0))
av_dict_set(&ist->opts, "threads", "auto", 0);
if ((ret = avcodec_open2(ist->st->codec, codec, &ist->opts)) < 0) {
if (ret == AVERROR_EXPERIMENTAL)
abort_codec_experimental(codec, 0);
if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
ist->file_index, ist->st->index);
return ret;
return AVERROR(EINVAL);
}
assert_codec_experimental(ist->st->codec, 0);
assert_avoptions(ist->opts);
}
@@ -1981,7 +1926,7 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost,
ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
if (!ost->forced_kf_pts) {
av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
exit(1);
exit_program(1);
}
p = kf;
@@ -2136,12 +2081,6 @@ static int transcode_init(void)
codec->time_base.num *= icodec->ticks_per_frame;
}
}
if ( codec->codec_tag == AV_RL32("tmcd")
&& icodec->time_base.num < icodec->time_base.den
&& icodec->time_base.num > 0
&& 121LL*icodec->time_base.num > icodec->time_base.den) {
codec->time_base = icodec->time_base;
}
if(ost->frame_rate.num)
codec->time_base = av_inv_q(ost->frame_rate);
@@ -2153,7 +2092,7 @@ static int transcode_init(void)
case AVMEDIA_TYPE_AUDIO:
if (audio_volume != 256) {
av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
exit(1);
exit_program(1);
}
codec->channel_layout = icodec->channel_layout;
codec->sample_rate = icodec->sample_rate;
@@ -2300,7 +2239,7 @@ static int transcode_init(void)
if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
logfilename);
exit(1);
exit_program(1);
}
codec->stats_in = logbuffer;
}
@@ -2309,7 +2248,7 @@ static int transcode_init(void)
if (!f) {
av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
logfilename, strerror(errno));
exit(1);
exit_program(1);
}
ost->logfile = f;
}
@@ -2339,17 +2278,17 @@ static int transcode_init(void)
}
if (!av_dict_get(ost->opts, "threads", NULL, 0))
av_dict_set(&ost->opts, "threads", "auto", 0);
if ((ret = avcodec_open2(ost->st->codec, codec, &ost->opts)) < 0) {
if (ret == AVERROR_EXPERIMENTAL)
abort_codec_experimental(codec, 1);
if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
ost->file_index, ost->index);
ret = AVERROR(EINVAL);
goto dump_format;
}
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
av_buffersink_set_frame_size(ost->filter->filter,
ost->st->codec->frame_size);
assert_codec_experimental(ost->st->codec, 1);
assert_avoptions(ost->opts);
if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
@@ -2476,7 +2415,10 @@ static int transcode_init(void)
return 0;
}
/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
/**
* @return 1 if there are still streams where more output is wanted,
* 0 otherwise
*/
static int need_output(void)
{
int i;
@@ -2755,8 +2697,8 @@ static void reset_eagain(void)
output_streams[i]->unavailable = 0;
}
/*
* Return
/**
* @return
* - 0 -- one packet was read and processed
* - AVERROR(EAGAIN) -- no packets were available for selected file,
* this function should be called again
@@ -2781,7 +2723,7 @@ static int process_input(int file_index)
if (ret != AVERROR_EOF) {
print_error(is->filename, ret);
if (exit_on_error)
exit(1);
exit_program(1);
}
ifile->eof_reached = 1;
@@ -2796,7 +2738,7 @@ static int process_input(int file_index)
if (ost->source_index == ifile->ist_index + i &&
(ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
finish_output_stream(ost);
close_output_stream(ost);
}
}
@@ -2820,41 +2762,9 @@ static int process_input(int file_index)
if (ist->discard)
goto discard_packet;
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
"next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),
av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
av_ts2str(input_files[ist->file_index]->ts_offset),
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
}
if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
int64_t stime, stime2;
// Correcting starttime based on the enabled streams
// FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
// so we instead do it here as part of discontinuity handling
if ( ist->next_dts == AV_NOPTS_VALUE
&& ifile->ts_offset == -is->start_time
&& (is->iformat->flags & AVFMT_TS_DISCONT)) {
int64_t new_start_time = INT64_MAX;
for (i=0; i<is->nb_streams; i++) {
AVStream *st = is->streams[i];
if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
continue;
new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
}
if (new_start_time > is->start_time) {
av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
ifile->ts_offset = -new_start_time;
}
}
stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
if(!ist->wrap_correction_done && input_files[file_index]->ctx->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
int64_t stime = av_rescale_q(input_files[file_index]->ctx->start_time, AV_TIME_BASE_Q, ist->st->time_base);
int64_t stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
ist->wrap_correction_done = 1;
if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
@@ -2877,6 +2787,17 @@ static int process_input(int file_index)
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts *= ist->ts_scale;
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
"next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%"PRId64"\n",
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),
av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
input_files[ist->file_index]->ts_offset);
}
if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
!copy_ts) {
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
@@ -2914,15 +2835,6 @@ static int process_input(int file_index)
}
}
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->st->codec->codec_type),
av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
av_ts2str(input_files[ist->file_index]->ts_offset),
av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
}
sub2video_heartbeat(ist, pkt.pts);
ret = output_packet(ist, &pkt);
@@ -2932,7 +2844,7 @@ static int process_input(int file_index)
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
ist->file_index, ist->st->index, buf);
if (exit_on_error)
exit(1);
exit_program(1);
}
discard_packet:
@@ -3194,14 +3106,19 @@ static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
{
}
static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
{
int idx = locate_option(argc, argv, options, "cpuflags");
if (idx && argv[idx + 1])
opt_cpuflags(NULL, "cpuflags", argv[idx + 1]);
}
int main(int argc, char **argv)
{
int ret;
OptionsContext o = { 0 };
int64_t ti;
atexit(exit_program);
setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
reset_options(&o, 0);
av_log_set_flags(AV_LOG_SKIP_REPEATED);
parse_loglevel(argc, argv, options);
@@ -3225,37 +3142,37 @@ int main(int argc, char **argv)
term_init();
/* parse options and open all input/output files */
ret = ffmpeg_parse_options(argc, argv);
if (ret < 0)
exit(1);
parse_cpuflags(argc, argv, options);
/* parse options */
parse_options(&o, argc, argv, options, opt_output_file);
if (nb_output_files <= 0 && nb_input_files == 0) {
show_usage();
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
exit(1);
exit_program(1);
}
/* file converter / grab */
if (nb_output_files <= 0) {
av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
exit(1);
exit_program(1);
}
// if (nb_input_files == 0) {
// av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
// exit(1);
// exit_program(1);
// }
current_time = ti = getutime();
if (transcode() < 0)
exit(1);
exit_program(1);
ti = getutime() - ti;
if (do_benchmark) {
int maxrss = getmaxrss() / 1024;
printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
}
exit(received_nb_signals ? 255 : 0);
exit_program(0);
return 0;
}

View File

@@ -57,12 +57,12 @@
/* select an input stream for an output stream */
typedef struct StreamMap {
int disabled; /* 1 is this mapping is disabled by a negative map */
int disabled; /** 1 is this mapping is disabled by a negative map */
int file_index;
int stream_index;
int sync_file_index;
int sync_stream_index;
char *linklabel; /* name of an output link, for mapping lavfi outputs */
char *linklabel; /** name of an output link, for mapping lavfi outputs */
} StreamMap;
typedef struct {
@@ -71,8 +71,6 @@ typedef struct {
} AudioChannelMap;
typedef struct OptionsContext {
OptionGroup *g;
/* input/output options */
int64_t start_time;
const char *format;
@@ -163,8 +161,6 @@ typedef struct OptionsContext {
int nb_copy_prior_start;
SpecifierOpt *filters;
int nb_filters;
SpecifierOpt *reinit_filters;
int nb_reinit_filters;
SpecifierOpt *fix_sub_duration;
int nb_fix_sub_duration;
SpecifierOpt *pass;
@@ -219,9 +215,6 @@ typedef struct InputStream {
int64_t next_pts; ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
int64_t pts; ///< current pts of the decoded frame (in AV_TIME_BASE units)
int wrap_correction_done;
int64_t filter_in_rescale_delta_last;
double ts_scale;
int is_start; /* is 1 at the start and after a discontinuity */
int saw_first_ts;
@@ -248,7 +241,6 @@ typedef struct InputStream {
struct sub2video {
int64_t last_pts;
int64_t end_pts;
AVFilterBufferRef *ref;
int w, h;
} sub2video;
@@ -261,8 +253,6 @@ typedef struct InputStream {
* currently video and audio only */
InputFilter **filters;
int nb_filters;
int reinit_filters;
} InputStream;
typedef struct InputFile {
@@ -300,8 +290,6 @@ typedef struct OutputStream {
/* pts of the first frame encoded for this stream, used for limiting
* recording time */
int64_t first_pts;
/* dts of the last packet sent to the muxer */
int64_t last_mux_dts;
AVBitStreamFilterContext *bitstream_filters;
AVCodec *enc;
int64_t max_frames;
@@ -313,6 +301,7 @@ typedef struct OutputStream {
int top_field_first;
float frame_aspect_ratio;
float last_quality;
/* forced key frames */
int64_t *forced_kf_pts;
@@ -331,7 +320,6 @@ typedef struct OutputStream {
char *avfilter;
int64_t sws_flags;
int64_t swr_filter_type;
int64_t swr_dither_method;
double swr_dither_scale;
AVDictionary *opts;
@@ -389,6 +377,7 @@ extern int debug_ts;
extern int exit_on_error;
extern int print_stats;
extern int qp_hist;
extern int same_quant;
extern int stdin_interaction;
extern int frame_bits_per_raw_sample;
extern AVIOContext *progress_avio;
@@ -409,7 +398,7 @@ void assert_avoptions(AVDictionary *m);
int guess_input_channel_layout(InputStream *ist);
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target);
enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target);
void choose_sample_fmt(AVStream *st, AVCodec *codec);
int configure_filtergraph(FilterGraph *fg);
@@ -417,6 +406,4 @@ int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOu
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int ffmpeg_parse_options(int argc, char **argv);
#endif /* FFMPEG_H */

View File

@@ -24,46 +24,41 @@
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavutil/audioconvert.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/pixfmt.h"
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum AVPixelFormat target)
enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target)
{
if (codec && codec->pix_fmts) {
const enum AVPixelFormat *p = codec->pix_fmts;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
enum AVPixelFormat best= AV_PIX_FMT_NONE;
const enum AVPixelFormat mjpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
const enum AVPixelFormat ljpeg_formats[] = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
const enum PixelFormat *p = codec->pix_fmts;
int has_alpha= av_pix_fmt_descriptors[target].nb_components % 2 == 0;
enum PixelFormat best= PIX_FMT_NONE;
if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
if (st->codec->codec_id == AV_CODEC_ID_MJPEG) {
p = mjpeg_formats;
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
} else if (st->codec->codec_id == AV_CODEC_ID_LJPEG) {
p =ljpeg_formats;
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
}
}
for (; *p != AV_PIX_FMT_NONE; p++) {
for (; *p != PIX_FMT_NONE; p++) {
best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
if (*p == target)
break;
}
if (*p == AV_PIX_FMT_NONE) {
if (target != AV_PIX_FMT_NONE)
if (*p == PIX_FMT_NONE) {
if (target != PIX_FMT_NONE)
av_log(NULL, AV_LOG_WARNING,
"Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
av_get_pix_fmt_name(target),
av_pix_fmt_descriptors[target].name,
codec->name,
av_get_pix_fmt_name(best));
av_pix_fmt_descriptors[best].name);
return best;
}
}
@@ -98,32 +93,32 @@ static char *choose_pix_fmts(OutputStream *ost)
if (ost->filter)
avfilter_graph_set_auto_convert(ost->filter->graph->graph,
AVFILTER_AUTO_CONVERT_NONE);
if (ost->st->codec->pix_fmt == AV_PIX_FMT_NONE)
if (ost->st->codec->pix_fmt == PIX_FMT_NONE)
return NULL;
return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
}
if (ost->st->codec->pix_fmt != AV_PIX_FMT_NONE) {
if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt)));
} else if (ost->enc && ost->enc->pix_fmts) {
const enum AVPixelFormat *p;
const enum PixelFormat *p;
AVIOContext *s = NULL;
uint8_t *ret;
int len;
if (avio_open_dyn_buf(&s) < 0)
exit(1);
exit_program(1);
p = ost->enc->pix_fmts;
if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
if (ost->st->codec->codec_id == AV_CODEC_ID_MJPEG) {
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
} else if (ost->st->codec->codec_id == AV_CODEC_ID_LJPEG) {
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
}
}
for (; *p != AV_PIX_FMT_NONE; p++) {
for (; *p != PIX_FMT_NONE; p++) {
const char *name = av_get_pix_fmt_name(*p);
avio_printf(s, "%s:", name);
}
@@ -134,8 +129,10 @@ static char *choose_pix_fmts(OutputStream *ost)
return NULL;
}
/* Define a function for building a string containing a list of
* allowed formats. */
/**
* Define a function for building a string containing a list of
* allowed formats,
*/
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\
static char *choose_ ## var ## s(OutputStream *ost) \
{ \
@@ -149,7 +146,7 @@ static char *choose_ ## var ## s(OutputStream *ost) \
int len; \
\
if (avio_open_dyn_buf(&s) < 0) \
exit(1); \
exit_program(1); \
\
for (p = ost->enc->supported_list; *p != none; p++) { \
get_name(*p); \
@@ -162,7 +159,7 @@ static char *choose_ ## var ## s(OutputStream *ost) \
return NULL; \
}
// DEF_CHOOSE_FORMAT(enum AVPixelFormat, pix_fmt, pix_fmts, AV_PIX_FMT_NONE,
// DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
// GET_PIX_FMT_NAME, ":")
DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
@@ -179,27 +176,31 @@ FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
FilterGraph *fg = av_mallocz(sizeof(*fg));
if (!fg)
exit(1);
exit_program(1);
fg->index = nb_filtergraphs;
GROW_ARRAY(fg->outputs, fg->nb_outputs);
fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
fg->nb_outputs + 1);
if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
exit(1);
exit_program(1);
fg->outputs[0]->ost = ost;
fg->outputs[0]->graph = fg;
ost->filter = fg->outputs[0];
GROW_ARRAY(fg->inputs, fg->nb_inputs);
fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
fg->nb_inputs + 1);
if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
exit(1);
exit_program(1);
fg->inputs[0]->ist = ist;
fg->inputs[0]->graph = fg;
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
&ist->nb_filters, ist->nb_filters + 1);
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
GROW_ARRAY(filtergraphs, nb_filtergraphs);
filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
&nb_filtergraphs, nb_filtergraphs + 1);
filtergraphs[nb_filtergraphs - 1] = fg;
return fg;
@@ -215,7 +216,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
"currently.\n");
exit(1);
exit_program(1);
}
if (in->name) {
@@ -227,7 +228,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
if (file_idx < 0 || file_idx >= nb_input_files) {
av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
file_idx, fg->graph_desc);
exit(1);
exit_program(1);
}
s = input_files[file_idx]->ctx;
@@ -245,7 +246,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
if (!st) {
av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
"matches no streams.\n", p, fg->graph_desc);
exit(1);
exit_program(1);
}
ist = input_streams[input_files[file_idx]->ist_index + st->index];
} else {
@@ -259,7 +260,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
"unlabeled input pad %d on filter %s\n", in->pad_idx,
in->filter_ctx->name);
exit(1);
exit_program(1);
}
}
av_assert0(ist);
@@ -268,13 +269,15 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
ist->decoding_needed++;
ist->st->discard = AVDISCARD_NONE;
GROW_ARRAY(fg->inputs, fg->nb_inputs);
fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
&fg->nb_inputs, fg->nb_inputs + 1);
if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
exit(1);
exit_program(1);
fg->inputs[fg->nb_inputs - 1]->ist = ist;
fg->inputs[fg->nb_inputs - 1]->graph = fg;
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
&ist->nb_filters, ist->nb_filters + 1);
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
}
@@ -474,7 +477,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
AVIOContext *pb; \
\
if (avio_open_dyn_buf(&pb) < 0) \
exit(1); \
exit_program(1); \
\
avio_printf(pb, "%s", ctx->filter->name); \
if (nb_pads > 1) \
@@ -520,20 +523,20 @@ static int sub2video_prepare(InputStream *ist)
}
av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
}
ist->sub2video.w = ist->st->codec->width = ist->resample_width = w;
ist->sub2video.h = ist->st->codec->height = ist->resample_height = h;
ist->sub2video.w = ist->st->codec->width = w;
ist->sub2video.h = ist->st->codec->height = h;
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
/* rectangles are PIX_FMT_PAL8, but we have no guarantee that the
palettes for all rectangles are identical or compatible */
ist->st->codec->pix_fmt = AV_PIX_FMT_RGB32;
ist->st->codec->pix_fmt = PIX_FMT_RGB32;
ret = av_image_alloc(image, linesize, w, h, AV_PIX_FMT_RGB32, 32);
ret = av_image_alloc(image, linesize, w, h, PIX_FMT_RGB32, 32);
if (ret < 0)
return ret;
memset(image[0], 0, h * linesize[0]);
ist->sub2video.ref = avfilter_get_video_buffer_ref_from_arrays(
image, linesize, AV_PERM_READ | AV_PERM_PRESERVE,
w, h, AV_PIX_FMT_RGB32);
w, h, PIX_FMT_RGB32);
if (!ist->sub2video.ref) {
av_free(image[0]);
return AVERROR(ENOMEM);
@@ -557,15 +560,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
int pad_idx = in->pad_idx;
int ret;
if (!ist->framerate.num && ist->st->codec->ticks_per_frame>1) {
AVRational codec_fr = av_inv_q(ist->st->codec->time_base);
AVRational avg_fr = ist->st->avg_frame_rate;
codec_fr.den *= ist->st->codec->ticks_per_frame;
if ( codec_fr.num>0 && codec_fr.den>0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
&& fabs(1.0 - av_q2d(av_div_q(avg_fr, fr)))>0.1)
fr = codec_fr;
}
if (ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
ret = sub2video_prepare(ist);
if (ret < 0)
@@ -580,8 +574,8 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
av_bprint_init(&args, 0, 1);
av_bprintf(&args,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
"pixel_aspect=%d/%d:sws_param=flags=%d", ist->resample_width,
ist->resample_height, ist->resample_pix_fmt,
"pixel_aspect=%d/%d:sws_param=flags=%d", ist->st->codec->width,
ist->st->codec->height, ist->st->codec->pix_fmt,
tb.num, tb.den, sar.num, sar.den,
SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
if (fr.num && fr.den)
@@ -664,9 +658,9 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
if (audio_sync_method > 0) {
char args[256] = {0};
av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
if (audio_drift_threshold != 0.1)
av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
av_strlcatf(args, sizeof(args), "min_comp=0.001:min_hard_comp=%f", audio_drift_threshold);
if (audio_sync_method > 1)
av_strlcatf(args, sizeof(args), ":max_soft_comp=%f", audio_sync_method/(double)ist->st->codec->sample_rate);
AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
}
@@ -686,9 +680,6 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
if (audio_volume != 256) {
char args[256];
av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
"audio filter instead.\n");
snprintf(args, sizeof(args), "%f", audio_volume / 256.);
AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
}
@@ -727,17 +718,6 @@ int configure_filtergraph(FilterGraph *fg)
char args[255];
snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
fg->graph->scale_sws_opts = av_strdup(args);
args[0] = 0;
if (ost->swr_filter_type != SWR_FILTER_TYPE_KAISER)
av_strlcatf(args, sizeof(args), "filter_type=%d:", (int)ost->swr_filter_type);
if (ost->swr_dither_method)
av_strlcatf(args, sizeof(args), "dither_method=%d:", (int)ost->swr_dither_method);
if (ost->swr_dither_scale != 1.0)
av_strlcatf(args, sizeof(args), "dither_scale=%f:", ost->swr_dither_scale);
if (strlen(args))
args[strlen(args)-1] = 0;
av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
}
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
@@ -769,9 +749,10 @@ int configure_filtergraph(FilterGraph *fg)
} else {
/* wait until output mappings are processed */
for (cur = outputs; cur;) {
GROW_ARRAY(fg->outputs, fg->nb_outputs);
fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
&fg->nb_outputs, fg->nb_outputs + 1);
if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
exit(1);
exit_program(1);
fg->outputs[fg->nb_outputs - 1]->graph = fg;
fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
cur = cur->next;

File diff suppressed because it is too large Load Diff

492
ffplay.c

File diff suppressed because it is too large Load Diff

532
ffprobe.c
View File

@@ -55,9 +55,9 @@ static int do_read_packets = 0;
static int do_show_error = 0;
static int do_show_format = 0;
static int do_show_frames = 0;
static AVDictionary *fmt_entries_to_show = NULL;
static int do_show_packets = 0;
static int do_show_streams = 0;
static int do_show_stream_disposition = 0;
static int do_show_data = 0;
static int do_show_program_version = 0;
static int do_show_library_versions = 0;
@@ -69,26 +69,16 @@ static int use_value_sexagesimal_format = 0;
static int show_private_data = 1;
static char *print_format;
static char *stream_specifier;
/* section structure definition */
#define SECTION_MAX_NB_CHILDREN 10
struct section {
int id; ///< unique id identifying a section
int id; ///< unique id indentifying a section
const char *name;
#define SECTION_FLAG_IS_WRAPPER 1 ///< the section only contains other sections, but has no data at its own level
#define SECTION_FLAG_IS_ARRAY 2 ///< the section contains an array of elements of the same type
#define SECTION_FLAG_HAS_VARIABLE_FIELDS 4 ///< the section may contain a variable number of fields with variable keys.
/// For these sections the element_name field is mandatory.
int flags;
int children_ids[SECTION_MAX_NB_CHILDREN+1]; ///< list of children section IDS, terminated by -1
const char *element_name; ///< name of the contained element, if provided
const char *unique_name; ///< unique section name, in case the name is ambiguous
AVDictionary *entries_to_show;
int show_all_entries;
};
typedef enum {
@@ -107,31 +97,30 @@ typedef enum {
SECTION_ID_PROGRAM_VERSION,
SECTION_ID_ROOT,
SECTION_ID_STREAM,
SECTION_ID_STREAM_DISPOSITION,
SECTION_ID_STREAMS,
SECTION_ID_STREAM_TAGS,
SECTION_ID_STREAM_TAGS
} SectionID;
static struct section sections[] = {
[SECTION_ID_ERROR] = { SECTION_ID_ERROR, "error", 0, { -1 } },
[SECTION_ID_FORMAT] = { SECTION_ID_FORMAT, "format", 0, { SECTION_ID_FORMAT_TAGS, -1 } },
[SECTION_ID_FORMAT_TAGS] = { SECTION_ID_FORMAT_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "format_tags" },
[SECTION_ID_FRAMES] = { SECTION_ID_FRAMES, "frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME, -1 } },
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, -1 } },
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
[SECTION_ID_LIBRARY_VERSION] = { SECTION_ID_LIBRARY_VERSION, "library_version", 0, { -1 } },
[SECTION_ID_PACKETS] = { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
[SECTION_ID_PACKETS_AND_FRAMES] = { SECTION_ID_PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { -1 } },
[SECTION_ID_PROGRAM_VERSION] = { SECTION_ID_PROGRAM_VERSION, "program_version", 0, { -1 } },
[SECTION_ID_ROOT] = { SECTION_ID_ROOT, "root", SECTION_FLAG_IS_WRAPPER,
{ SECTION_ID_FORMAT, SECTION_ID_FRAMES, SECTION_ID_STREAMS, SECTION_ID_PACKETS,
SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS, -1} },
[SECTION_ID_STREAMS] = { SECTION_ID_STREAMS, "streams", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM, -1 } },
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, -1 } },
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
[SECTION_ID_STREAM_TAGS] = { SECTION_ID_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "stream_tags" },
#define SECTION_ENTRY(id, name, flags) \
[SECTION_ID_##id] = { SECTION_ID_##id, name, flags }
static const struct section sections[] = {
SECTION_ENTRY(ERROR, "error", 0),
SECTION_ENTRY(FORMAT, "format", 0),
SECTION_ENTRY(FORMAT_TAGS, "tags", 0),
SECTION_ENTRY(FRAME, "frame", 0),
SECTION_ENTRY(FRAMES, "frames", SECTION_FLAG_IS_ARRAY),
SECTION_ENTRY(FRAME_TAGS, "tags", 0),
SECTION_ENTRY(LIBRARY_VERSION, "library_version", 0),
SECTION_ENTRY(LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY),
SECTION_ENTRY(PACKET, "packet", 0),
SECTION_ENTRY(PACKETS, "packets", SECTION_FLAG_IS_ARRAY),
SECTION_ENTRY(PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY),
SECTION_ENTRY(PROGRAM_VERSION, "program_version", 0),
SECTION_ENTRY(ROOT, "root", SECTION_FLAG_IS_WRAPPER),
SECTION_ENTRY(STREAM, "stream", 0),
SECTION_ENTRY(STREAMS, "streams", SECTION_FLAG_IS_ARRAY),
SECTION_ENTRY(STREAM_TAGS, "tags", 0),
};
static const OptionDef *options;
@@ -147,16 +136,13 @@ static const char unit_second_str[] = "s" ;
static const char unit_hertz_str[] = "Hz" ;
static const char unit_byte_str[] = "byte" ;
static const char unit_bit_per_second_str[] = "bit/s";
static uint64_t *nb_streams_packets;
static uint64_t *nb_streams_frames;
static int *selected_streams;
static void exit_program(void)
void av_noreturn exit_program(int ret)
{
int i;
for (i = 0; i < FF_ARRAY_ELEMS(sections); i++)
av_dict_free(&(sections[i].entries_to_show));
av_dict_free(&fmt_entries_to_show);
exit(ret);
}
struct unit_value {
@@ -228,7 +214,7 @@ typedef struct Writer {
int priv_size; ///< private size for the writer context
const char *name;
int (*init) (WriterContext *wctx);
int (*init) (WriterContext *wctx, const char *args);
void (*uninit)(WriterContext *wctx);
void (*print_section_header)(WriterContext *wctx);
@@ -257,8 +243,6 @@ struct WriterContext {
/** section per each level */
const struct section *section[SECTION_MAX_NB_LEVELS];
AVBPrint section_pbuf[SECTION_MAX_NB_LEVELS]; ///< generic print buffer dedicated to each section,
/// used by various writers
unsigned int nb_section_packet; ///< number of the packet section in case we are in "packets_and_frames" section
unsigned int nb_section_frame; ///< number of the frame section in case we are in "packets_and_frames" section
@@ -280,15 +264,11 @@ static const AVClass writer_class = {
static void writer_close(WriterContext **wctx)
{
int i;
if (!*wctx)
return;
if ((*wctx)->writer->uninit)
(*wctx)->writer->uninit(*wctx);
for (i = 0; i < SECTION_MAX_NB_LEVELS; i++)
av_bprint_finalize(&(*wctx)->section_pbuf[i], NULL);
if ((*wctx)->writer->priv_class)
av_opt_free((*wctx)->priv);
av_freep(&((*wctx)->priv));
@@ -298,7 +278,7 @@ static void writer_close(WriterContext **wctx)
static int writer_open(WriterContext **wctx, const Writer *writer, const char *args,
const struct section *sections, int nb_sections)
{
int i, ret = 0;
int ret = 0;
if (!(*wctx = av_malloc(sizeof(WriterContext)))) {
ret = AVERROR(ENOMEM);
@@ -325,12 +305,8 @@ static int writer_open(WriterContext **wctx, const Writer *writer, const char *a
(ret = av_set_options_string(priv_ctx, args, "=", ":")) < 0)
goto fail;
}
for (i = 0; i < SECTION_MAX_NB_LEVELS; i++)
av_bprint_init(&(*wctx)->section_pbuf[i], 1, AV_BPRINT_SIZE_UNLIMITED);
if ((*wctx)->writer->init)
ret = (*wctx)->writer->init(*wctx);
ret = (*wctx)->writer->init(*wctx, args);
if (ret < 0)
goto fail;
@@ -385,9 +361,9 @@ static inline void writer_print_section_footer(WriterContext *wctx)
static inline void writer_print_integer(WriterContext *wctx,
const char *key, long long int val)
{
const struct section *section = wctx->section[wctx->level];
if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) {
if ((wctx->section[wctx->level]->id != SECTION_ID_FORMAT
&& wctx->section[wctx->level]->id != SECTION_ID_FORMAT_TAGS) ||
!fmt_entries_to_show || av_dict_get(fmt_entries_to_show, key, NULL, 0)) {
wctx->writer->print_integer(wctx, key, val);
wctx->nb_item[wctx->level]++;
}
@@ -396,12 +372,11 @@ static inline void writer_print_integer(WriterContext *wctx,
static inline void writer_print_string(WriterContext *wctx,
const char *key, const char *val, int opt)
{
const struct section *section = wctx->section[wctx->level];
if (opt && !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
return;
if (section->show_all_entries || av_dict_get(section->entries_to_show, key, NULL, 0)) {
if ((wctx->section[wctx->level]->id != SECTION_ID_FORMAT
&& wctx->section[wctx->level]->id != SECTION_ID_FORMAT_TAGS) ||
!fmt_entries_to_show || av_dict_get(fmt_entries_to_show, key, NULL, 0)) {
wctx->writer->print_string(wctx, key, val);
wctx->nb_item[wctx->level]++;
}
@@ -516,7 +491,6 @@ typedef struct DefaultContext {
const AVClass *class;
int nokey;
int noprint_wrappers;
int nested_section[SECTION_MAX_NB_LEVELS];
} DefaultContext;
#define OFFSET(x) offsetof(DefaultContext, x)
@@ -549,17 +523,9 @@ static void default_print_section_header(WriterContext *wctx)
const struct section *parent_section = wctx->level ?
wctx->section[wctx->level-1] : NULL;
av_bprint_clear(&wctx->section_pbuf[wctx->level]);
if (parent_section &&
!(parent_section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY))) {
def->nested_section[wctx->level] = 1;
av_bprintf(&wctx->section_pbuf[wctx->level], "%s%s:",
wctx->section_pbuf[wctx->level-1].str,
upcase_string(buf, sizeof(buf),
av_x_if_null(section->element_name, section->name)));
}
if (def->noprint_wrappers || def->nested_section[wctx->level])
if (def->noprint_wrappers ||
(parent_section &&
!(parent_section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY))))
return;
if (!(section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY)))
@@ -570,9 +536,13 @@ static void default_print_section_footer(WriterContext *wctx)
{
DefaultContext *def = wctx->priv;
const struct section *section = wctx->section[wctx->level];
const struct section *parent_section = wctx->level ?
wctx->section[wctx->level-1] : NULL;
char buf[32];
if (def->noprint_wrappers || def->nested_section[wctx->level])
if (def->noprint_wrappers ||
(parent_section &&
!(parent_section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY))))
return;
if (!(section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY)))
@@ -582,9 +552,11 @@ static void default_print_section_footer(WriterContext *wctx)
static void default_print_str(WriterContext *wctx, const char *key, const char *value)
{
DefaultContext *def = wctx->priv;
const struct section *section = wctx->section[wctx->level];
const char *key_prefix = !strcmp(section->name, "tags") ? "TAG:" : "";
if (!def->nokey)
printf("%s%s=", wctx->section_pbuf[wctx->level].str, key);
printf("%s%s=", key_prefix, key);
printf("%s\n", value);
}
@@ -593,7 +565,7 @@ static void default_print_int(WriterContext *wctx, const char *key, long long in
DefaultContext *def = wctx->priv;
if (!def->nokey)
printf("%s%s=", wctx->section_pbuf[wctx->level].str, key);
printf("%s=", key);
printf("%lld\n", value);
}
@@ -642,15 +614,15 @@ static const char *csv_escape_str(AVBPrint *dst, const char *src, const char sep
int needs_quoting = !!src[strcspn(src, meta_chars)];
if (needs_quoting)
av_bprint_chars(dst, '"', 1);
av_bprint_chars(dst, '\"', 1);
for (; *src; src++) {
if (*src == '"')
av_bprint_chars(dst, '"', 1);
av_bprint_chars(dst, '\"', 1);
av_bprint_chars(dst, *src, 1);
}
if (needs_quoting)
av_bprint_chars(dst, '"', 1);
av_bprint_chars(dst, '\"', 1);
return dst->str;
}
@@ -667,7 +639,6 @@ typedef struct CompactContext {
int print_section;
char *escape_mode_str;
const char * (*escape_str)(AVBPrint *dst, const char *src, const char sep, void *log_ctx);
int nested_section[SECTION_MAX_NB_LEVELS];
} CompactContext;
#undef OFFSET
@@ -687,7 +658,7 @@ static const AVOption compact_options[]= {
DEFINE_WRITER_CLASS(compact);
static av_cold int compact_init(WriterContext *wctx)
static av_cold int compact_init(WriterContext *wctx, const char *args)
{
CompactContext *compact = wctx->priv;
@@ -713,42 +684,37 @@ static void compact_print_section_header(WriterContext *wctx)
{
CompactContext *compact = wctx->priv;
const struct section *section = wctx->section[wctx->level];
const struct section *parent_section = wctx->level ?
wctx->section[wctx->level-1] : NULL;
av_bprint_clear(&wctx->section_pbuf[wctx->level]);
if (parent_section &&
!(parent_section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY))) {
compact->nested_section[wctx->level] = 1;
av_bprintf(&wctx->section_pbuf[wctx->level], "%s%s:",
wctx->section_pbuf[wctx->level-1].str,
(char *)av_x_if_null(section->element_name, section->name));
if (!strcmp(section->name, "tags"))
wctx->nb_item[wctx->level] = wctx->nb_item[wctx->level-1];
} else if (compact->print_section &&
else if (compact->print_section &&
!(section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY)))
printf("%s%c", section->name, compact->item_sep);
printf("%s%c", section->name, compact->item_sep);
}
static void compact_print_section_footer(WriterContext *wctx)
{
CompactContext *compact = wctx->priv;
const struct section *section = wctx->section[wctx->level];
if (!compact->nested_section[wctx->level] &&
!(wctx->section[wctx->level]->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY)))
if (strcmp(section->name, "tags") &&
!(section->flags & (SECTION_FLAG_IS_WRAPPER|SECTION_FLAG_IS_ARRAY)))
printf("\n");
}
static void compact_print_str(WriterContext *wctx, const char *key, const char *value)
{
CompactContext *compact = wctx->priv;
const struct section *section = wctx->section[wctx->level];
const char *key_prefix = !strcmp(section->name, "tags") ? "tag:" : "";
AVBPrint buf;
if (wctx->nb_item[wctx->level]) printf("%c", compact->item_sep);
if (!compact->nokey)
printf("%s%s=", wctx->section_pbuf[wctx->level].str, key);
printf("%s%s=", key_prefix, key);
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
printf("%s", compact->escape_str(&buf, value, compact->item_sep, wctx));
av_bprint_finalize(&buf, NULL);
}
static void compact_print_int(WriterContext *wctx, const char *key, long long int value)
@@ -757,7 +723,7 @@ static void compact_print_int(WriterContext *wctx, const char *key, long long in
if (wctx->nb_item[wctx->level]) printf("%c", compact->item_sep);
if (!compact->nokey)
printf("%s%s=", wctx->section_pbuf[wctx->level].str, key);
printf("%s=", key);
printf("%lld", value);
}
@@ -808,6 +774,7 @@ static const Writer csv_writer = {
typedef struct FlatContext {
const AVClass *class;
AVBPrint section_header[SECTION_MAX_NB_LEVELS];
const char *sep_str;
char sep;
int hierarchical;
@@ -826,9 +793,10 @@ static const AVOption flat_options[]= {
DEFINE_WRITER_CLASS(flat);
static av_cold int flat_init(WriterContext *wctx)
static av_cold int flat_init(WriterContext *wctx, const char *args)
{
FlatContext *flat = wctx->priv;
int i;
if (strlen(flat->sep_str) != 1) {
av_log(wctx, AV_LOG_ERROR, "Item separator '%s' specified, but must contain a single character\n",
@@ -837,9 +805,20 @@ static av_cold int flat_init(WriterContext *wctx)
}
flat->sep = flat->sep_str[0];
for (i = 0; i < SECTION_MAX_NB_LEVELS; i++)
av_bprint_init(&flat->section_header[i], 1, AV_BPRINT_SIZE_UNLIMITED);
return 0;
}
static void flat_uninit(WriterContext *wctx)
{
FlatContext *flat = wctx->priv;
int i;
for (i = 0; i < SECTION_MAX_NB_LEVELS; i++)
av_bprint_finalize(&flat->section_header[i], NULL);
}
static const char *flat_escape_key_str(AVBPrint *dst, const char *src, const char sep)
{
const char *p;
@@ -876,32 +855,36 @@ static const char *flat_escape_value_str(AVBPrint *dst, const char *src)
static void flat_print_section_header(WriterContext *wctx)
{
FlatContext *flat = wctx->priv;
AVBPrint *buf = &wctx->section_pbuf[wctx->level];
const struct section *section = wctx->section[wctx->level];
const struct section *parent_section = wctx->level ?
wctx->section[wctx->level-1] : NULL;
AVBPrint *buf = &flat->section_header[wctx->level];
int i;
/* build section header */
av_bprint_clear(buf);
if (!parent_section)
return;
av_bprintf(buf, "%s", wctx->section_pbuf[wctx->level-1].str);
for (i = 1; i <= wctx->level; i++) {
if (flat->hierarchical ||
!(wctx->section[i]->flags & (SECTION_FLAG_IS_ARRAY|SECTION_FLAG_IS_WRAPPER)))
av_bprintf(buf, "%s%s", wctx->section[i]->name, flat->sep_str);
}
}
if (flat->hierarchical ||
!(section->flags & (SECTION_FLAG_IS_ARRAY|SECTION_FLAG_IS_WRAPPER))) {
av_bprintf(buf, "%s%s", wctx->section[wctx->level]->name, flat->sep_str);
static void flat_print_key_prefix(WriterContext *wctx)
{
FlatContext *flat = wctx->priv;
const struct section *parent_section = wctx->section[wctx->level-1];
if (parent_section->flags & SECTION_FLAG_IS_ARRAY) {
int n = parent_section->id == SECTION_ID_PACKETS_AND_FRAMES ?
wctx->nb_section_packet_frame : wctx->nb_item[wctx->level-1];
av_bprintf(buf, "%d%s", n, flat->sep_str);
}
printf("%s", flat->section_header[wctx->level].str);
if (parent_section->flags & SECTION_FLAG_IS_ARRAY) {
int n = parent_section->id == SECTION_ID_PACKETS_AND_FRAMES ?
wctx->nb_section_packet_frame : wctx->nb_item[wctx->level-1];
printf("%d%s", n, flat->sep_str);
}
}
static void flat_print_int(WriterContext *wctx, const char *key, long long int value)
{
printf("%s%s=%lld\n", wctx->section_pbuf[wctx->level].str, key, value);
flat_print_key_prefix(wctx);
printf("%s=%lld\n", key, value);
}
static void flat_print_str(WriterContext *wctx, const char *key, const char *value)
@@ -909,7 +892,7 @@ static void flat_print_str(WriterContext *wctx, const char *key, const char *val
FlatContext *flat = wctx->priv;
AVBPrint buf;
printf("%s", wctx->section_pbuf[wctx->level].str);
flat_print_key_prefix(wctx);
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
printf("%s=", flat_escape_key_str(&buf, key, flat->sep));
av_bprint_clear(&buf);
@@ -921,6 +904,7 @@ static const Writer flat_writer = {
.name = "flat",
.priv_size = sizeof(FlatContext),
.init = flat_init,
.uninit = flat_uninit,
.print_section_header = flat_print_section_header,
.print_integer = flat_print_int,
.print_string = flat_print_str,
@@ -976,13 +960,14 @@ static char *ini_escape_str(AVBPrint *dst, const char *src)
static void ini_print_section_header(WriterContext *wctx)
{
INIContext *ini = wctx->priv;
AVBPrint *buf = &wctx->section_pbuf[wctx->level];
AVBPrint buf;
int i;
const struct section *section = wctx->section[wctx->level];
const struct section *parent_section = wctx->level ?
wctx->section[wctx->level-1] : NULL;
av_bprint_clear(buf);
if (!parent_section) {
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
if (wctx->level == 0) {
printf("# ffprobe output\n\n");
return;
}
@@ -990,20 +975,21 @@ static void ini_print_section_header(WriterContext *wctx)
if (wctx->nb_item[wctx->level-1])
printf("\n");
av_bprintf(buf, "%s", wctx->section_pbuf[wctx->level-1].str);
if (ini->hierarchical ||
!(section->flags & (SECTION_FLAG_IS_ARRAY|SECTION_FLAG_IS_WRAPPER))) {
av_bprintf(buf, "%s%s", buf->str[0] ? "." : "", wctx->section[wctx->level]->name);
for (i = 1; i <= wctx->level; i++) {
if (ini->hierarchical ||
!(section->flags & (SECTION_FLAG_IS_ARRAY|SECTION_FLAG_IS_WRAPPER)))
av_bprintf(&buf, "%s%s", i>1 ? "." : "", wctx->section[i]->name);
}
if (parent_section->flags & SECTION_FLAG_IS_ARRAY) {
int n = parent_section->id == SECTION_ID_PACKETS_AND_FRAMES ?
wctx->nb_section_packet_frame : wctx->nb_item[wctx->level-1];
av_bprintf(buf, ".%d", n);
}
if (parent_section->flags & SECTION_FLAG_IS_ARRAY) {
int n = parent_section->id == SECTION_ID_PACKETS_AND_FRAMES ?
wctx->nb_section_packet_frame : wctx->nb_item[wctx->level-1];
av_bprintf(&buf, ".%d", n);
}
if (!(section->flags & (SECTION_FLAG_IS_ARRAY|SECTION_FLAG_IS_WRAPPER)))
printf("[%s]\n", buf->str);
printf("[%s]\n", buf.str);
av_bprint_finalize(&buf, NULL);
}
static void ini_print_str(WriterContext *wctx, const char *key, const char *value)
@@ -1052,7 +1038,7 @@ static const AVOption json_options[]= {
DEFINE_WRITER_CLASS(json);
static av_cold int json_init(WriterContext *wctx)
static av_cold int json_init(WriterContext *wctx, const char *args)
{
JSONContext *json = wctx->priv;
@@ -1106,13 +1092,13 @@ static void json_print_section_header(WriterContext *wctx)
json->indent_level++;
if (section->flags & SECTION_FLAG_IS_ARRAY) {
printf("\"%s\": [\n", buf.str);
} else if (parent_section && !(parent_section->flags & SECTION_FLAG_IS_ARRAY)) {
} else if (!(parent_section->flags & SECTION_FLAG_IS_ARRAY)) {
printf("\"%s\": {%s", buf.str, json->item_start_end);
} else {
printf("{%s", json->item_start_end);
/* this is required so the parser can distinguish between packets and frames */
if (parent_section && parent_section->id == SECTION_ID_PACKETS_AND_FRAMES) {
if (parent_section->id == SECTION_ID_PACKETS_AND_FRAMES) {
if (!json->compact)
JSON_INDENT();
printf("\"type\": \"%s\"%s", section->name, json->item_sep);
@@ -1217,7 +1203,7 @@ static const AVOption xml_options[] = {
DEFINE_WRITER_CLASS(xml);
static av_cold int xml_init(WriterContext *wctx)
static av_cold int xml_init(WriterContext *wctx, const char *args)
{
XMLContext *xml = wctx->priv;
@@ -1254,7 +1240,7 @@ static const char *xml_escape_str(AVBPrint *dst, const char *src, void *log_ctx)
case '&' : av_bprintf(dst, "%s", "&amp;"); break;
case '<' : av_bprintf(dst, "%s", "&lt;"); break;
case '>' : av_bprintf(dst, "%s", "&gt;"); break;
case '"' : av_bprintf(dst, "%s", "&quot;"); break;
case '\"': av_bprintf(dst, "%s", "&quot;"); break;
case '\'': av_bprintf(dst, "%s", "&apos;"); break;
default: av_bprint_chars(dst, *p, 1);
}
@@ -1288,12 +1274,13 @@ static void xml_print_section_header(WriterContext *wctx)
xml->within_tag = 0;
printf(">\n");
}
if (section->flags & SECTION_FLAG_HAS_VARIABLE_FIELDS) {
if (!strcmp(section->name, "tags")) {
xml->indent_level++;
} else {
if (parent_section && (parent_section->flags & SECTION_FLAG_IS_WRAPPER) &&
wctx->level && wctx->nb_item[wctx->level-1])
printf("\n");
if (!(parent_section->flags & SECTION_FLAG_IS_ARRAY) &&
(wctx->level && wctx->nb_item[wctx->level-1]))
printf("\n");
xml->indent_level++;
if (section->flags & SECTION_FLAG_IS_ARRAY) {
@@ -1316,7 +1303,7 @@ static void xml_print_section_footer(WriterContext *wctx)
xml->within_tag = 0;
printf("/>\n");
xml->indent_level--;
} else if (section->flags & SECTION_FLAG_HAS_VARIABLE_FIELDS) {
} else if (!strcmp(section->name, "tags")) {
xml->indent_level--;
} else {
XML_INDENT(); printf("</%s>\n", section->name);
@@ -1332,10 +1319,9 @@ static void xml_print_str(WriterContext *wctx, const char *key, const char *valu
av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
if (section->flags & SECTION_FLAG_HAS_VARIABLE_FIELDS) {
if (!strcmp(section->name, "tags")) {
XML_INDENT();
printf("<%s key=\"%s\"",
section->element_name, xml_escape_str(&buf, key, wctx));
printf("<tag key=\"%s\"", xml_escape_str(&buf, key, wctx));
av_bprint_clear(&buf);
printf(" value=\"%s\"/>\n", xml_escape_str(&buf, value, wctx));
} else {
@@ -1476,8 +1462,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
print_duration_time("pkt_duration_time", frame->pkt_duration, &stream->time_base);
if (frame->pkt_pos != -1) print_fmt ("pkt_pos", "%"PRId64, frame->pkt_pos);
else print_str_opt("pkt_pos", "N/A");
if (frame->pkt_size != -1) print_fmt ("pkt_size", "%d", av_frame_get_pkt_size(frame));
else print_str_opt("pkt_size", "N/A");
switch (stream->codec->codec_type) {
AVRational sar;
@@ -1568,16 +1552,14 @@ static void read_packets(WriterContext *w, AVFormatContext *fmt_ctx)
av_init_packet(&pkt);
while (!av_read_frame(fmt_ctx, &pkt)) {
if (selected_streams[pkt.stream_index]) {
if (do_read_packets) {
if (do_show_packets)
show_packet(w, fmt_ctx, &pkt, i++);
nb_streams_packets[pkt.stream_index]++;
}
if (do_read_frames) {
pkt1 = pkt;
while (pkt1.size && process_frame(w, fmt_ctx, &frame, &pkt1) > 0);
}
if (do_read_packets) {
if (do_show_packets)
show_packet(w, fmt_ctx, &pkt, i++);
nb_streams_packets[pkt.stream_index]++;
}
if (do_read_frames) {
pkt1 = pkt;
while (pkt1.size && process_frame(w, fmt_ctx, &frame, &pkt1) > 0);
}
av_free_packet(&pkt);
}
@@ -1713,28 +1695,6 @@ static void show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_i
if (do_show_data)
writer_print_data(w, "extradata", dec_ctx->extradata,
dec_ctx->extradata_size);
/* Print disposition information */
#define PRINT_DISPOSITION(flagname, name) do { \
print_int(name, !!(stream->disposition & AV_DISPOSITION_##flagname)); \
} while (0)
if (do_show_stream_disposition) {
writer_print_section_header(w, SECTION_ID_STREAM_DISPOSITION);
PRINT_DISPOSITION(DEFAULT, "default");
PRINT_DISPOSITION(DUB, "dub");
PRINT_DISPOSITION(ORIGINAL, "original");
PRINT_DISPOSITION(COMMENT, "comment");
PRINT_DISPOSITION(LYRICS, "lyrics");
PRINT_DISPOSITION(KARAOKE, "karaoke");
PRINT_DISPOSITION(FORCED, "forced");
PRINT_DISPOSITION(HEARING_IMPAIRED, "hearing_impaired");
PRINT_DISPOSITION(VISUAL_IMPAIRED, "visual_impaired");
PRINT_DISPOSITION(CLEAN_EFFECTS, "clean_effects");
PRINT_DISPOSITION(ATTACHED_PIC, "attached_pic");
writer_print_section_footer(w);
}
show_tags(w, stream->metadata, SECTION_ID_STREAM_TAGS);
writer_print_section_footer(w);
@@ -1747,8 +1707,7 @@ static void show_streams(WriterContext *w, AVFormatContext *fmt_ctx)
int i;
writer_print_section_header(w, SECTION_ID_STREAMS);
for (i = 0; i < fmt_ctx->nb_streams; i++)
if (selected_streams[i])
show_stream(w, fmt_ctx, i);
show_stream(w, fmt_ctx, i);
writer_print_section_footer(w);
}
@@ -1855,7 +1814,7 @@ static void close_input_file(AVFormatContext **ctx_ptr)
static int probe_file(WriterContext *wctx, const char *filename)
{
AVFormatContext *fmt_ctx;
int ret, i;
int ret;
int section_id;
do_read_frames = do_show_frames || do_count_frames;
@@ -1865,22 +1824,6 @@ static int probe_file(WriterContext *wctx, const char *filename)
if (ret >= 0) {
nb_streams_frames = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_frames));
nb_streams_packets = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_packets));
selected_streams = av_calloc(fmt_ctx->nb_streams, sizeof(*selected_streams));
for (i = 0; i < fmt_ctx->nb_streams; i++) {
if (stream_specifier) {
ret = avformat_match_stream_specifier(fmt_ctx,
fmt_ctx->streams[i],
stream_specifier);
if (ret < 0)
goto end;
else
selected_streams[i] = ret;
} else {
selected_streams[i] = 1;
}
}
if (do_read_frames || do_read_packets) {
if (do_show_frames && do_show_packets &&
wctx->writer->flags & WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER)
@@ -1900,11 +1843,9 @@ static int probe_file(WriterContext *wctx, const char *filename)
if (do_show_format)
show_format(wctx, fmt_ctx);
end:
close_input_file(&fmt_ctx);
av_freep(&nb_streams_frames);
av_freep(&nb_streams_packets);
av_freep(&selected_streams);
}
return ret;
}
@@ -1944,7 +1885,6 @@ static void ffprobe_show_program_version(WriterContext *w)
print_int("minor", LIB##LIBNAME##_VERSION_MINOR); \
print_int("micro", LIB##LIBNAME##_VERSION_MICRO); \
print_int("version", version); \
print_str("ident", LIB##LIBNAME##_IDENT); \
writer_print_section_footer(w); \
} \
} while (0)
@@ -1973,101 +1913,11 @@ static int opt_format(void *optctx, const char *opt, const char *arg)
return 0;
}
static inline void mark_section_show_entries(SectionID section_id,
int show_all_entries, AVDictionary *entries)
{
struct section *section = &sections[section_id];
section->show_all_entries = show_all_entries;
if (show_all_entries) {
SectionID *id;
for (id = section->children_ids; *id != -1; id++)
mark_section_show_entries(*id, show_all_entries, entries);
} else {
av_dict_copy(&section->entries_to_show, entries, 0);
}
}
static int match_section(const char *section_name,
int show_all_entries, AVDictionary *entries)
{
int i, ret = 0;
for (i = 0; i < FF_ARRAY_ELEMS(sections); i++) {
const struct section *section = &sections[i];
if (!strcmp(section_name, section->name) ||
(section->unique_name && !strcmp(section_name, section->unique_name))) {
av_log(NULL, AV_LOG_DEBUG,
"'%s' matches section with unique name '%s'\n", section_name,
(char *)av_x_if_null(section->unique_name, section->name));
ret++;
mark_section_show_entries(section->id, show_all_entries, entries);
}
}
return ret;
}
static int opt_show_entries(void *optctx, const char *opt, const char *arg)
{
const char *p = arg;
int ret = 0;
while (*p) {
AVDictionary *entries = NULL;
char *section_name = av_get_token(&p, "=:");
int show_all_entries = 0;
if (!section_name) {
av_log(NULL, AV_LOG_ERROR,
"Missing section name for option '%s'\n", opt);
return AVERROR(EINVAL);
}
if (*p == '=') {
p++;
while (*p && *p != ':') {
char *entry = av_get_token(&p, ",:");
if (!entry)
break;
av_log(NULL, AV_LOG_VERBOSE,
"Adding '%s' to the entries to show in section '%s'\n",
entry, section_name);
av_dict_set(&entries, entry, "", AV_DICT_DONT_STRDUP_KEY);
if (*p == ',')
p++;
}
} else {
show_all_entries = 1;
}
ret = match_section(section_name, show_all_entries, entries);
if (ret == 0) {
av_log(NULL, AV_LOG_ERROR, "No match for section '%s'\n", section_name);
ret = AVERROR(EINVAL);
}
av_dict_free(&entries);
av_free(section_name);
if (ret <= 0)
break;
if (*p)
p++;
}
return ret;
}
static int opt_show_format_entry(void *optctx, const char *opt, const char *arg)
{
char *buf = av_asprintf("format=%s", arg);
int ret;
av_log(NULL, AV_LOG_WARNING,
"Option '%s' is deprecated, use '-show_entries format=%s' instead\n",
opt, arg);
ret = opt_show_entries(optctx, opt, buf);
av_free(buf);
return ret;
do_show_format = 1;
av_dict_set(&fmt_entries_to_show, arg, "", 0);
return 0;
}
static void opt_input_file(void *optctx, const char *arg)
@@ -2108,57 +1958,13 @@ static int opt_pretty(void *optctx, const char *opt, const char *arg)
return 0;
}
static void print_section(SectionID id, int level)
{
const SectionID *pid;
const struct section *section = &sections[id];
printf("%c%c%c",
section->flags & SECTION_FLAG_IS_WRAPPER ? 'W' : '.',
section->flags & SECTION_FLAG_IS_ARRAY ? 'A' : '.',
section->flags & SECTION_FLAG_HAS_VARIABLE_FIELDS ? 'V' : '.');
printf("%*c %s", level * 4, ' ', section->name);
if (section->unique_name)
printf("/%s", section->unique_name);
printf("\n");
for (pid = section->children_ids; *pid != -1; pid++)
print_section(*pid, level+1);
}
static int opt_sections(void *optctx, const char *opt, const char *arg)
{
printf("Sections:\n"
"W.. = Section is a wrapper (contains other sections, no local entries)\n"
".A. = Section contains an array of elements of the same type\n"
"..V = Section may contain a variable number of fields with variable keys\n"
"FLAGS NAME/UNIQUE_NAME\n"
"---\n");
print_section(SECTION_ID_ROOT, 0);
return 0;
}
static int opt_show_versions(const char *opt, const char *arg)
{
mark_section_show_entries(SECTION_ID_PROGRAM_VERSION, 1, NULL);
mark_section_show_entries(SECTION_ID_LIBRARY_VERSION, 1, NULL);
do_show_program_version = 1;
do_show_library_versions = 1;
return 0;
}
#define DEFINE_OPT_SHOW_SECTION(section, target_section_id) \
static int opt_show_##section(const char *opt, const char *arg) \
{ \
mark_section_show_entries(SECTION_ID_##target_section_id, 1, NULL); \
return 0; \
}
DEFINE_OPT_SHOW_SECTION(error, ERROR);
DEFINE_OPT_SHOW_SECTION(format, FORMAT);
DEFINE_OPT_SHOW_SECTION(frames, FRAMES);
DEFINE_OPT_SHOW_SECTION(library_versions, LIBRARY_VERSIONS);
DEFINE_OPT_SHOW_SECTION(packets, PACKETS);
DEFINE_OPT_SHOW_SECTION(program_version, PROGRAM_VERSION);
DEFINE_OPT_SHOW_SECTION(streams, STREAMS);
static const OptionDef real_options[] = {
#include "cmdutils_common_opts.h"
{ "f", HAS_ARG, {.func_arg = opt_format}, "force format", "format" },
@@ -2173,22 +1979,18 @@ static const OptionDef real_options[] = {
{ "print_format", OPT_STRING | HAS_ARG, {(void*)&print_format},
"set the output printing format (available formats are: default, compact, csv, flat, ini, json, xml)", "format" },
{ "of", OPT_STRING | HAS_ARG, {(void*)&print_format}, "alias for -print_format", "format" },
{ "select_streams", OPT_STRING | HAS_ARG, {(void*)&stream_specifier}, "select the specified streams", "stream_specifier" },
{ "sections", OPT_EXIT, {.func_arg = opt_sections}, "print sections structure and section information, and exit" },
{ "show_data", OPT_BOOL, {(void*)&do_show_data}, "show packets data" },
{ "show_error", 0, {(void*)&opt_show_error}, "show probing error" },
{ "show_format", 0, {(void*)&opt_show_format}, "show format/container info" },
{ "show_frames", 0, {(void*)&opt_show_frames}, "show frames info" },
{ "show_error", OPT_BOOL, {(void*)&do_show_error} , "show probing error" },
{ "show_format", OPT_BOOL, {&do_show_format} , "show format/container info" },
{ "show_frames", OPT_BOOL, {(void*)&do_show_frames} , "show frames info" },
{ "show_format_entry", HAS_ARG, {.func_arg = opt_show_format_entry},
"show a particular entry from the format/container info", "entry" },
{ "show_entries", HAS_ARG, {.func_arg = opt_show_entries},
"show a set of specified entries", "entry_list" },
{ "show_packets", 0, {(void*)&opt_show_packets}, "show packets info" },
{ "show_streams", 0, {(void*)&opt_show_streams}, "show streams info" },
{ "show_packets", OPT_BOOL, {&do_show_packets}, "show packets info" },
{ "show_streams", OPT_BOOL, {&do_show_streams}, "show streams info" },
{ "count_frames", OPT_BOOL, {(void*)&do_count_frames}, "count the number of frames per stream" },
{ "count_packets", OPT_BOOL, {(void*)&do_count_packets}, "count the number of packets per stream" },
{ "show_program_version", 0, {(void*)&opt_show_program_version}, "show ffprobe version" },
{ "show_library_versions", 0, {(void*)&opt_show_library_versions}, "show library versions" },
{ "show_program_version", OPT_BOOL, {(void*)&do_show_program_version}, "show ffprobe version" },
{ "show_library_versions", OPT_BOOL, {(void*)&do_show_library_versions}, "show library versions" },
{ "show_versions", 0, {(void*)&opt_show_versions}, "show program and library versions" },
{ "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" },
{ "private", OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" },
@@ -2198,34 +2000,15 @@ static const OptionDef real_options[] = {
{ NULL, },
};
static inline int check_section_show_entries(int section_id)
{
int *id;
struct section *section = &sections[section_id];
if (sections[section_id].show_all_entries || sections[section_id].entries_to_show)
return 1;
for (id = section->children_ids; *id != -1; id++)
if (check_section_show_entries(*id))
return 1;
return 0;
}
#define SET_DO_SHOW(id, varname) do { \
if (check_section_show_entries(SECTION_ID_##id)) \
do_show_##varname = 1; \
} while (0)
int main(int argc, char **argv)
{
const Writer *w;
WriterContext *wctx;
char *buf;
char *w_name = NULL, *w_args = NULL;
int ret, i;
int ret;
av_log_set_flags(AV_LOG_SKIP_REPEATED);
atexit(exit_program);
options = real_options;
parse_loglevel(argc, argv, options);
av_register_all();
@@ -2238,16 +2021,6 @@ int main(int argc, char **argv)
show_banner(argc, argv, options);
parse_options(NULL, argc, argv, options, opt_input_file);
/* mark things to show, based on -show_entries */
SET_DO_SHOW(ERROR, error);
SET_DO_SHOW(FORMAT, format);
SET_DO_SHOW(FRAMES, frames);
SET_DO_SHOW(LIBRARY_VERSIONS, library_versions);
SET_DO_SHOW(PACKETS, packets);
SET_DO_SHOW(PROGRAM_VERSION, program_version);
SET_DO_SHOW(STREAMS, streams);
SET_DO_SHOW(STREAM_DISPOSITION, stream_disposition);
if (do_bitexact && (do_show_program_version || do_show_library_versions)) {
av_log(NULL, AV_LOG_ERROR,
"-bitexact and -show_program_version or -show_library_versions "
@@ -2260,10 +2033,6 @@ int main(int argc, char **argv)
if (!print_format)
print_format = av_strdup("default");
if (!print_format) {
ret = AVERROR(ENOMEM);
goto end;
}
w_name = av_strtok(print_format, "=", &buf);
w_args = buf;
@@ -2304,8 +2073,7 @@ end:
av_freep(&print_format);
uninit_opts();
for (i = 0; i < FF_ARRAY_ELEMS(sections); i++)
av_dict_free(&(sections[i].entries_to_show));
av_dict_free(&fmt_entries_to_show);
avformat_network_deinit();

View File

@@ -29,7 +29,6 @@
#endif
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "libavformat/avformat.h"
// FIXME those are internal headers, ffserver _really_ shouldn't use them
#include "libavformat/ffm.h"
@@ -45,7 +44,6 @@
#include "libavutil/avstring.h"
#include "libavutil/lfg.h"
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/random_seed.h"
#include "libavutil/parseutils.h"
@@ -307,10 +305,12 @@ static int rtp_new_av_stream(HTTPContext *c,
HTTPContext *rtsp_c);
static const char *my_program_name;
static const char *my_program_dir;
static const char *config_filename = "/etc/ffserver.conf";
static int ffserver_debug;
static int ffserver_daemon;
static int no_launch;
static int need_to_start_children;
@@ -328,48 +328,12 @@ static AVLFG random_state;
static FILE *logfile = NULL;
static void htmlstrip(char *s) {
while (s && *s) {
s += strspn(s, "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,. ");
if (*s)
*s++ = '?';
}
}
static int64_t ffm_read_write_index(int fd)
{
uint8_t buf[8];
if (lseek(fd, 8, SEEK_SET) < 0)
return AVERROR(EIO);
if (read(fd, buf, 8) != 8)
return AVERROR(EIO);
return AV_RB64(buf);
}
static int ffm_write_write_index(int fd, int64_t pos)
{
uint8_t buf[8];
int i;
for(i=0;i<8;i++)
buf[i] = (pos >> (56 - i * 8)) & 0xff;
if (lseek(fd, 8, SEEK_SET) < 0)
return AVERROR(EIO);
if (write(fd, buf, 8) != 8)
return AVERROR(EIO);
return 8;
}
static void ffm_set_write_index(AVFormatContext *s, int64_t pos,
int64_t file_size)
{
FFMContext *ffm = s->priv_data;
ffm->write_index = pos;
ffm->file_size = file_size;
}
/* FIXME: make ffserver work with IPv6 */
void av_noreturn exit_program(int ret)
{
exit(ret);
}
/* resolve host with also IP address parsing */
static int resolve_host(struct in_addr *sin_addr, const char *hostname)
{
@@ -529,12 +493,19 @@ static void start_children(FFStream *feed)
close(i);
if (!ffserver_debug) {
if (!freopen("/dev/null", "r", stdin))
http_log("failed to redirect STDIN to /dev/null\n;");
if (!freopen("/dev/null", "w", stdout))
http_log("failed to redirect STDOUT to /dev/null\n;");
if (!freopen("/dev/null", "w", stderr))
http_log("failed to redirect STDERR to /dev/null\n;");
i = open("/dev/null", O_RDWR);
if (i != -1) {
dup2(i, 0);
dup2(i, 1);
dup2(i, 2);
close(i);
}
}
/* This is needed to make relative pathnames work */
if (chdir(my_program_dir) < 0) {
http_log("chdir failed\n");
exit(1);
}
signal(SIGPIPE, SIG_DFL);
@@ -586,7 +557,7 @@ static void start_multicast(void)
FFStream *stream;
char session_id[32];
HTTPContext *rtp_c;
struct sockaddr_in dest_addr = {0};
struct sockaddr_in dest_addr;
int default_port, stream_index;
default_port = 6000;
@@ -801,7 +772,7 @@ static void http_send_too_busy_reply(int fd)
"\r\n"
"<html><head><title>Too busy</title></head><body>\r\n"
"<p>The server is too busy to serve your request at this time.</p>\r\n"
"<p>The number of current connections is %u, and this exceeds the limit of %u.</p>\r\n"
"<p>The number of current connections is %d, and this exceeds the limit of %d.</p>\r\n"
"</body></html>\r\n",
nb_connections, nb_max_connections);
av_assert0(len < sizeof(buffer));
@@ -812,8 +783,7 @@ static void http_send_too_busy_reply(int fd)
static void new_connection(int server_fd, int is_rtsp)
{
struct sockaddr_in from_addr;
socklen_t len;
int fd;
int fd, len;
HTTPContext *c = NULL;
len = sizeof(from_addr);
@@ -929,8 +899,6 @@ static void close_connection(HTTPContext *c)
for(i=0; i<ctx->nb_streams; i++)
av_free(ctx->streams[i]);
av_freep(&ctx->streams);
av_freep(&ctx->priv_data);
if (c->stream && !c->post && c->stream->stream_type == STREAM_TYPE_LIVE)
current_bandwidth -= c->stream->bandwidth;
@@ -1498,8 +1466,7 @@ enum RedirType {
/* parse http request and prepare header */
static int http_parse_request(HTTPContext *c)
{
const char *p;
char *p1;
char *p;
enum RedirType redir_type;
char cmd[32];
char info[1024], filename[1024];
@@ -1510,10 +1477,10 @@ static int http_parse_request(HTTPContext *c)
FFStream *stream;
int i;
char ratebuf[32];
const char *useragent = 0;
char *useragent = 0;
p = c->buffer;
get_word(cmd, sizeof(cmd), &p);
get_word(cmd, sizeof(cmd), (const char **)&p);
av_strlcpy(c->method, cmd, sizeof(c->method));
if (!strcmp(cmd, "GET"))
@@ -1523,7 +1490,7 @@ static int http_parse_request(HTTPContext *c)
else
return -1;
get_word(url, sizeof(url), &p);
get_word(url, sizeof(url), (const char **)&p);
av_strlcpy(c->url, url, sizeof(c->url));
get_word(protocol, sizeof(protocol), (const char **)&p);
@@ -1536,10 +1503,10 @@ static int http_parse_request(HTTPContext *c)
http_log("%s - - New connection: %s %s\n", inet_ntoa(c->from_addr.sin_addr), cmd, url);
/* find the filename and the optional info string in the request */
p1 = strchr(url, '?');
if (p1) {
av_strlcpy(info, p1, sizeof(info));
*p1 = '\0';
p = strchr(url, '?');
if (p) {
av_strlcpy(info, p, sizeof(info));
*p = '\0';
} else
info[0] = '\0';
@@ -1658,7 +1625,7 @@ static int http_parse_request(HTTPContext *c)
}
if (redir_type != REDIR_NONE) {
const char *hostinfo = 0;
char *hostinfo = 0;
for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
if (av_strncasecmp(p, "Host:", 5) == 0) {
@@ -1740,8 +1707,7 @@ static int http_parse_request(HTTPContext *c)
case REDIR_SDP:
{
uint8_t *sdp_data;
int sdp_data_size;
socklen_t len;
int sdp_data_size, len;
struct sockaddr_in my_addr;
snprintf(q, c->buffer_size,
@@ -1792,7 +1758,7 @@ static int http_parse_request(HTTPContext *c)
if (!stream->is_feed) {
/* However it might be a status report from WMP! Let us log the
* data as it might come in handy one day. */
const char *logline = 0;
char *logline = 0;
int client_id = 0;
for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
@@ -1895,7 +1861,6 @@ static int http_parse_request(HTTPContext *c)
send_error:
c->http_error = 404;
q = c->buffer;
htmlstrip(msg);
snprintf(q, c->buffer_size,
"HTTP/1.0 404 Not Found\r\n"
"Content-type: text/html\r\n"
@@ -2054,7 +2019,7 @@ static void compute_status(HTTPContext *c)
if (stream->pid) {
avio_printf(pb, "Running as pid %d.\n", stream->pid);
#if defined(linux)
#if defined(linux) && !defined(CONFIG_NOCUTILS)
{
FILE *pid_stat;
char ps_cmd[64];
@@ -2069,7 +2034,7 @@ static void compute_status(HTTPContext *c)
char cpuperc[10];
char cpuused[64];
if (fscanf(pid_stat, "%9s %63s", cpuperc,
if (fscanf(pid_stat, "%10s %64s", cpuperc,
cpuused) == 2) {
avio_printf(pb, "Currently using %s%% of the cpu. Total time used %s.\n",
cpuperc, cpuused);
@@ -2724,6 +2689,8 @@ static int http_receive_data(HTTPContext *c)
/* a packet has been received : write it in the store, except
if header */
if (c->data_count > FFM_PACKET_SIZE) {
// printf("writing pos=0x%"PRIx64" size=0x%"PRIx64"\n", feed->feed_write_index, feed->feed_size);
/* XXX: use llseek or url_seek */
lseek(c->feed_fd, feed->feed_write_index, SEEK_SET);
if (write(c->feed_fd, c->buffer, FFM_PACKET_SIZE) < 0) {
@@ -2970,14 +2937,12 @@ static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
{
AVFormatContext *avc;
AVStream *avs = NULL;
AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);
int i;
avc = avformat_alloc_context();
if (avc == NULL || !rtp_format) {
if (avc == NULL) {
return -1;
}
avc->oformat = rtp_format;
av_dict_set(&avc->metadata, "title",
stream->title[0] ? stream->title : "No Title", 0);
avc->nb_streams = stream->nb_streams;
@@ -3027,8 +2992,7 @@ static void rtsp_cmd_describe(HTTPContext *c, const char *url)
char path1[1024];
const char *path;
uint8_t *content;
int content_length;
socklen_t len;
int content_length, len;
struct sockaddr_in my_addr;
/* find which url is asked */
@@ -3517,9 +3481,6 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
{
AVStream *fst;
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
return NULL;
fst = av_mallocz(sizeof(AVStream));
if (!fst)
return NULL;
@@ -3527,7 +3488,7 @@ static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int cop
fst->codec = avcodec_alloc_context3(NULL);
memcpy(fst->codec, codec, sizeof(AVCodecContext));
if (codec->extradata_size) {
fst->codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
fst->codec->extradata = av_malloc(codec->extradata_size);
memcpy(fst->codec->extradata, codec->extradata,
codec->extradata_size);
}
@@ -3606,8 +3567,6 @@ static void extract_mpeg4_header(AVFormatContext *infile)
AVStream *st;
const uint8_t *p;
infile->flags |= AVFMT_FLAG_NOFILLIN | AVFMT_FLAG_NOPARSE;
mpeg4_count = 0;
for(i=0;i<infile->nb_streams;i++) {
st = infile->streams[i];
@@ -3621,7 +3580,7 @@ static void extract_mpeg4_header(AVFormatContext *infile)
printf("MPEG4 without extra data: trying to find header in %s\n", infile->filename);
while (mpeg4_count > 0) {
if (av_read_frame(infile, &pkt) < 0)
if (av_read_packet(infile, &pkt) < 0)
break;
st = infile->streams[pkt.stream_index];
if (st->codec->codec_id == AV_CODEC_ID_MPEG4 &&
@@ -3636,7 +3595,7 @@ static void extract_mpeg4_header(AVFormatContext *infile)
p[2] == 0x01 && p[3] == 0xb6) {
size = p - pkt.data;
// av_hex_dump_log(infile, AV_LOG_DEBUG, pkt.data, size);
st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
st->codec->extradata = av_malloc(size);
st->codec->extradata_size = size;
memcpy(st->codec->extradata, pkt.data, size);
break;
@@ -3869,9 +3828,6 @@ static void add_codec(FFStream *stream, AVCodecContext *av)
{
AVStream *st;
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
return;
/* compute default parameters */
switch(av->codec_type) {
case AVMEDIA_TYPE_AUDIO:
@@ -4129,7 +4085,7 @@ static int parse_ffconfig(const char *filename)
ERROR("%s:%d: Invalid host/IP address: %s\n", arg);
}
} else if (!av_strcasecmp(cmd, "NoDaemon")) {
// do nothing here, its the default now
ffserver_daemon = 0;
} else if (!av_strcasecmp(cmd, "RTSPPort")) {
get_arg(arg, sizeof(arg), &p);
val = atoi(arg);
@@ -4216,7 +4172,10 @@ static int parse_ffconfig(const char *filename)
feed->child_argv[i] = av_strdup(arg);
}
feed->child_argv[i] = av_asprintf("http://%s:%d/%s",
feed->child_argv[i] = av_malloc(30 + strlen(feed->filename));
snprintf(feed->child_argv[i], 30+strlen(feed->filename),
"http://%s:%d/%s",
(my_http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" :
inet_ntoa(my_http_addr.sin_addr),
ntohs(my_http_addr.sin_port), feed->filename);
@@ -4278,7 +4237,7 @@ static int parse_ffconfig(const char *filename)
stream = av_mallocz(sizeof(FFStream));
get_arg(stream->filename, sizeof(stream->filename), &p);
q = strrchr(stream->filename, '>');
if (q)
if (*q)
*q = '\0';
for (s = first_stream; s; s = s->next) {
@@ -4700,6 +4659,7 @@ static void handle_child_exit(int sig)
static void opt_debug(void)
{
ffserver_debug = 1;
ffserver_daemon = 0;
logfilename[0] = '-';
}
@@ -4730,6 +4690,8 @@ int main(int argc, char **argv)
show_banner(argc, argv, options);
my_program_name = argv[0];
my_program_dir = getcwd(0, 0);
ffserver_daemon = 1;
parse_options(NULL, argc, argv, options, NULL);
@@ -4761,9 +4723,37 @@ int main(int argc, char **argv)
compute_bandwidth();
/* put the process in background and detach it from its TTY */
if (ffserver_daemon) {
int pid;
pid = fork();
if (pid < 0) {
perror("fork");
exit(1);
} else if (pid > 0) {
/* parent : exit */
exit(0);
} else {
/* child */
setsid();
close(0);
open("/dev/null", O_RDWR);
if (strcmp(logfilename, "-") != 0) {
close(1);
dup(0);
}
close(2);
dup(0);
}
}
/* signal init */
signal(SIGPIPE, SIG_IGN);
if (ffserver_daemon)
chdir("/");
if (http_server() < 0) {
http_log("Could not start server\n");
exit(1);

View File

@@ -1,172 +0,0 @@
/*
* 012v decoder
*
* Copyright (C) 2012 Carl Eugen Hoyos
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
#include "internal.h"
#include "libavutil/intreadwrite.h"
static av_cold int zero12v_decode_init(AVCodecContext *avctx)
{
avctx->pix_fmt = PIX_FMT_YUV422P16;
avctx->bits_per_raw_sample = 10;
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
if (avctx->codec_tag == MKTAG('a', '1', '2', 'v'))
av_log_ask_for_sample(avctx, "Samples with actual transparency needed\n");
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
return 0;
}
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
int line = 0, ret;
const int width = avctx->width;
AVFrame *pic = avctx->coded_frame;
uint16_t *y, *u, *v;
const uint8_t *line_end, *src = avpkt->data;
int stride = avctx->width * 8 / 3;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
if (width == 1) {
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
return AVERROR_INVALIDDATA;
}
if (avpkt->size < avctx->height * stride) {
av_log(avctx, AV_LOG_ERROR, "Packet too small: %d instead of %d\n",
avpkt->size, avctx->height * stride);
return AVERROR_INVALIDDATA;
}
pic->reference = 0;
if ((ret = ff_get_buffer(avctx, pic)) < 0)
return ret;
y = (uint16_t *)pic->data[0];
u = (uint16_t *)pic->data[1];
v = (uint16_t *)pic->data[2];
line_end = avpkt->data + stride;
while (line++ < avctx->height) {
while (1) {
uint32_t t = AV_RL32(src);
src += 4;
*u++ = t << 6 & 0xFFC0;
*y++ = t >> 4 & 0xFFC0;
*v++ = t >> 14 & 0xFFC0;
if (src >= line_end - 1) {
*y = 0x80;
src++;
line_end += stride;
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
break;
}
t = AV_RL32(src);
src += 4;
*y++ = t << 6 & 0xFFC0;
*u++ = t >> 4 & 0xFFC0;
*y++ = t >> 14 & 0xFFC0;
if (src >= line_end - 2) {
if (!(width & 1)) {
*y = 0x80;
src += 2;
}
line_end += stride;
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
break;
}
t = AV_RL32(src);
src += 4;
*v++ = t << 6 & 0xFFC0;
*y++ = t >> 4 & 0xFFC0;
*u++ = t >> 14 & 0xFFC0;
if (src >= line_end - 1) {
*y = 0x80;
src++;
line_end += stride;
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
break;
}
t = AV_RL32(src);
src += 4;
*y++ = t << 6 & 0xFFC0;
*v++ = t >> 4 & 0xFFC0;
*y++ = t >> 14 & 0xFFC0;
if (src >= line_end - 2) {
if (width & 1) {
*y = 0x80;
src += 2;
}
line_end += stride;
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
break;
}
}
}
*got_frame = 1;
*(AVFrame*)data= *avctx->coded_frame;
return avpkt->size;
}
static av_cold int zero12v_decode_close(AVCodecContext *avctx)
{
AVFrame *pic = avctx->coded_frame;
if (pic->data[0])
avctx->release_buffer(avctx, pic);
av_freep(&avctx->coded_frame);
return 0;
}
AVCodec ff_zero12v_decoder = {
.name = "012v",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_012V,
.init = zero12v_decode_init,
.close = zero12v_decode_close,
.decode = zero12v_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
};

View File

@@ -29,7 +29,6 @@
#include "bytestream.h"
#include "dsputil.h"
#include "get_bits.h"
#include "internal.h"
#include "libavutil/avassert.h"
@@ -328,12 +327,12 @@ static inline void mcdc(uint16_t *dst, const uint16_t *src, int log2w,
}
break;
default:
av_assert0(0);
av_assert2(0);
}
}
static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
int log2w, int log2h, int stride)
static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
int log2w, int log2h, int stride)
{
const int index = size2index[log2h][log2w];
const int h = 1 << log2h;
@@ -342,82 +341,66 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
BLOCK_TYPE_VLC_BITS, 1);
uint16_t *start = (uint16_t *)f->last_picture.data[0];
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
int ret;
av_assert0(code >= 0 && code <= 6 && log2w >= 0);
av_assert2(code >= 0 && code <= 6);
if (code == 0) {
if (bytestream2_get_bytes_left(&f->g) < 1) {
if (f->g.buffer_end - f->g.buffer < 1) {
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
return AVERROR_INVALIDDATA;
return;
}
src += f->mv[bytestream2_get_byteu(&f->g)];
src += f->mv[bytestream2_get_byte(&f->g)];
if (start > src || src > end) {
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
return AVERROR_INVALIDDATA;
return;
}
mcdc(dst, src, log2w, h, stride, 1, 0);
} else if (code == 1) {
log2h--;
if ((ret = decode_p_block(f, dst, src, log2w, log2h, stride)) < 0)
return ret;
if ((ret = decode_p_block(f, dst + (stride << log2h),
src + (stride << log2h),
log2w, log2h, stride)) < 0)
return ret;
decode_p_block(f, dst, src, log2w, log2h, stride);
decode_p_block(f, dst + (stride << log2h),
src + (stride << log2h), log2w, log2h, stride);
} else if (code == 2) {
log2w--;
if ((ret = decode_p_block(f, dst , src, log2w, log2h, stride)) < 0)
return ret;
if ((ret = decode_p_block(f, dst + (1 << log2w),
src + (1 << log2w),
log2w, log2h, stride)) < 0)
return ret;
decode_p_block(f, dst , src, log2w, log2h, stride);
decode_p_block(f, dst + (1 << log2w),
src + (1 << log2w), log2w, log2h, stride);
} else if (code == 3 && f->version < 2) {
if (start > src || src > end) {
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
return AVERROR_INVALIDDATA;
}
mcdc(dst, src, log2w, h, stride, 1, 0);
} else if (code == 4) {
if (bytestream2_get_bytes_left(&f->g) < 1) {
if (f->g.buffer_end - f->g.buffer < 1) {
av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n");
return AVERROR_INVALIDDATA;
return;
}
src += f->mv[bytestream2_get_byteu(&f->g)];
src += f->mv[bytestream2_get_byte(&f->g)];
if (start > src || src > end) {
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
return AVERROR_INVALIDDATA;
return;
}
if (bytestream2_get_bytes_left(&f->g2) < 2){
if (f->g2.buffer_end - f->g2.buffer < 1){
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
return AVERROR_INVALIDDATA;
return;
}
mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16u(&f->g2));
mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16(&f->g2));
} else if (code == 5) {
if (bytestream2_get_bytes_left(&f->g2) < 2) {
if (f->g2.buffer_end - f->g2.buffer < 1) {
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
return AVERROR_INVALIDDATA;
return;
}
if (start > src || src > end) {
av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
return AVERROR_INVALIDDATA;
}
mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16u(&f->g2));
mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16(&f->g2));
} else if (code == 6) {
if (bytestream2_get_bytes_left(&f->g2) < 4) {
if (f->g2.buffer_end - f->g2.buffer < 2) {
av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n");
return AVERROR_INVALIDDATA;
return;
}
if (log2w) {
dst[0] = bytestream2_get_le16u(&f->g2);
dst[1] = bytestream2_get_le16u(&f->g2);
dst[0] = bytestream2_get_le16(&f->g2);
dst[1] = bytestream2_get_le16(&f->g2);
} else {
dst[0] = bytestream2_get_le16u(&f->g2);
dst[stride] = bytestream2_get_le16u(&f->g2);
dst[0] = bytestream2_get_le16(&f->g2);
dst[stride] = bytestream2_get_le16(&f->g2);
}
}
return 0;
}
static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
@@ -430,20 +413,8 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
const int stride = f->current_picture.linesize[0] >> 1;
unsigned int bitstream_size, bytestream_size, wordstream_size, extra,
bytestream_offset, wordstream_offset;
int ret;
if (!f->last_picture.data[0]) {
if ((ret = ff_get_buffer(f->avctx, &f->last_picture)) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
for (y=0; y<f->avctx->height; y++)
memset(f->last_picture.data[0] + y*f->last_picture.linesize[0], 0, 2*f->avctx->width);
}
if (f->version > 1) {
if (length < 20)
return AVERROR_INVALIDDATA;
extra = 20;
if (length < extra)
return -1;
@@ -457,7 +428,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
bytestream_size = FFMAX(length - bitstream_size - wordstream_size, 0);
}
if (bitstream_size > length || bitstream_size >= INT_MAX/8 ||
if (bitstream_size > length ||
bytestream_size > length - bitstream_size ||
wordstream_size > length - bytestream_size - bitstream_size ||
extra > length - bytestream_size - bitstream_size - wordstream_size) {
@@ -487,8 +458,7 @@ static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length)
for (y = 0; y < height; y += 8) {
for (x = 0; x < width; x += 8)
if ((ret = decode_p_block(f, dst + x, src + x, 3, 3, stride)) < 0)
return ret;
decode_p_block(f, dst + x, src + x, 3, 3, stride);
src += 8 * stride;
dst += 8 * stride;
}
@@ -608,8 +578,7 @@ static int decode_i_mb(FourXContext *f)
}
static const uint8_t *read_huffman_tables(FourXContext *f,
const uint8_t * const buf,
int buf_size)
const uint8_t * const buf, int buf_size)
{
int frequency[512] = { 0 };
uint8_t flag[512];
@@ -628,11 +597,8 @@ static const uint8_t *read_huffman_tables(FourXContext *f,
for (;;) {
int i;
if (ptr_end - ptr < FFMAX(end - start + 1, 0) + 1) {
av_log(f->avctx, AV_LOG_ERROR, "invalid data in read_huffman_tables\n");
if (start <= end && ptr_end - ptr < end - start + 1 + 1)
return NULL;
}
for (i = start; i <= end; i++)
frequency[i] = *ptr++;
start = *ptr++;
@@ -734,9 +700,9 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
color[1] = bytestream2_get_le16u(&g3);
if (color[0] & 0x8000)
av_log(f->avctx, AV_LOG_ERROR, "unk bit 1\n");
av_log(NULL, AV_LOG_ERROR, "unk bit 1\n");
if (color[1] & 0x8000)
av_log(f->avctx, AV_LOG_ERROR, "unk bit 2\n");
av_log(NULL, AV_LOG_ERROR, "unk bit 2\n");
color[2] = mix(color[0], color[1]);
color[3] = mix(color[1], color[0]);
@@ -765,10 +731,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
unsigned int prestream_size;
const uint8_t *prestream;
if (bitstream_size > (1 << 26))
return AVERROR_INVALIDDATA;
if (length < bitstream_size + 12) {
if (bitstream_size > (1<<26) || length < bitstream_size + 12) {
av_log(f->avctx, AV_LOG_ERROR, "packet size too small\n");
return AVERROR_INVALIDDATA;
}
@@ -777,17 +740,16 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
prestream = buf + bitstream_size + 12;
if (prestream_size + bitstream_size + 12 != length
|| bitstream_size > (1 << 26)
|| prestream_size > (1 << 26)) {
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n",
prestream_size, bitstream_size, length);
return -1;
}
prestream = read_huffman_tables(f, prestream, prestream_size);
if (!prestream) {
av_log(f->avctx, AV_LOG_ERROR, "Error reading Huffman tables.\n");
return AVERROR_INVALIDDATA;
}
prestream = read_huffman_tables(f, prestream, buf + length - prestream);
if (!prestream)
return -1;
init_get_bits(&f->gb, buf + 4, 8 * bitstream_size);
@@ -821,7 +783,7 @@ static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length)
}
static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -830,33 +792,20 @@ static int decode_frame(AVCodecContext *avctx, void *data,
AVFrame *p, temp;
int i, frame_4cc, frame_size;
if (buf_size < 20)
if (buf_size < 12)
return AVERROR_INVALIDDATA;
av_assert0(avctx->width % 16 == 0 && avctx->height % 16 == 0);
if (buf_size < AV_RL32(buf + 4) + 8) {
frame_4cc = AV_RL32(buf);
if (buf_size != AV_RL32(buf + 4) + 8 || buf_size < 20)
av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n",
buf_size, AV_RL32(buf + 4));
return AVERROR_INVALIDDATA;
}
frame_4cc = AV_RL32(buf);
if (frame_4cc == AV_RL32("cfrm")) {
int free_index = -1;
int id, whole_size;
const int data_size = buf_size - 20;
const int id = AV_RL32(buf + 12);
const int whole_size = AV_RL32(buf + 16);
CFrameBuffer *cfrm;
if (f->version <= 1) {
av_log(f->avctx, AV_LOG_ERROR, "cfrm in version %d\n", f->version);
return AVERROR_INVALIDDATA;
}
id = AV_RL32(buf + 12);
whole_size = AV_RL32(buf + 16);
if (data_size < 0 || whole_size < 0) {
av_log(f->avctx, AV_LOG_ERROR, "sizes invalid\n");
return AVERROR_INVALIDDATA;
@@ -887,7 +836,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
// explicit check needed as memcpy below might not catch a NULL
if (!cfrm->data) {
av_log(f->avctx, AV_LOG_ERROR, "realloc failure\n");
av_log(f->avctx, AV_LOG_ERROR, "realloc falure\n");
return -1;
}
@@ -902,9 +851,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
av_log(f->avctx, AV_LOG_ERROR, "cframe id mismatch %d %d\n",
id, avctx->frame_number);
if (f->version <= 1)
return AVERROR_INVALIDDATA;
cfrm->size = cfrm->id = 0;
frame_4cc = AV_RL32("pfrm");
} else
@@ -943,6 +889,14 @@ static int decode_frame(AVCodecContext *avctx, void *data,
return -1;
}
} else if (frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")) {
if (!f->last_picture.data[0]) {
f->last_picture.reference = 3;
if (avctx->get_buffer(avctx, &f->last_picture) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
}
p->pict_type = AV_PICTURE_TYPE_P;
if (decode_p_frame(f, buf, frame_size) < 0) {
av_log(f->avctx, AV_LOG_ERROR, "decode p frame failed\n");
@@ -959,7 +913,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
*picture = *p;
*got_frame = 1;
*data_size = sizeof(AVPicture);
emms_c();
@@ -982,7 +936,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
if (avctx->extradata_size != 4 || !avctx->extradata) {
av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n");
return AVERROR_INVALIDDATA;
return 1;
}
if((avctx->width % 16) || (avctx->height % 16)) {
av_log(avctx, AV_LOG_ERROR, "unsupported width/height\n");
@@ -996,9 +950,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
init_vlcs(f);
if (f->version > 2)
avctx->pix_fmt = AV_PIX_FMT_RGB565;
avctx->pix_fmt = PIX_FMT_RGB565;
else
avctx->pix_fmt = AV_PIX_FMT_BGR555;
avctx->pix_fmt = PIX_FMT_BGR555;
return 0;
}

View File

@@ -38,12 +38,14 @@
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "internal.h"
static const enum AVPixelFormat pixfmt_rgb24[] = {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE };
static const enum PixelFormat pixfmt_rgb24[] = {
PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE };
/*
* Decoder context
*/
typedef struct EightBpsContext {
AVCodecContext *avctx;
AVFrame pic;
@@ -54,8 +56,14 @@ typedef struct EightBpsContext {
uint32_t pal[256];
} EightBpsContext;
/*
*
* Decode a frame
*
*/
static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -64,7 +72,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
unsigned char *pixptr, *pixptr_end;
unsigned int height = avctx->height; // Real image height
unsigned int dlen, p, row;
const unsigned char *lp, *dp, *ep;
const unsigned char *lp, *dp;
unsigned char count;
unsigned int planes = c->planes;
unsigned char *planemap = c->planemap;
@@ -74,13 +82,11 @@ static int decode_frame(AVCodecContext *avctx, void *data,
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if (ff_get_buffer(avctx, &c->pic) < 0){
if (avctx->get_buffer(avctx, &c->pic) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
ep = encoded + buf_size;
/* Set data pointer after line lengths */
dp = encoded + planes * (height << 1);
@@ -92,19 +98,17 @@ static int decode_frame(AVCodecContext *avctx, void *data,
for (row = 0; row < height; row++) {
pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
pixptr_end = pixptr + c->pic.linesize[0];
if (ep - lp < row * 2 + 2)
return AVERROR_INVALIDDATA;
dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
/* Decode a row of this plane */
while (dlen > 0) {
if (ep - dp <= 1)
if (dp + 1 >= buf + buf_size)
return -1;
if ((count = *dp++) <= 127) {
count++;
dlen -= count + 1;
if (pixptr + count * planes > pixptr_end)
break;
if (ep - dp < count)
if (dp + count > buf + buf_size)
return -1;
while (count--) {
*pixptr = *dp++;
@@ -137,13 +141,19 @@ static int decode_frame(AVCodecContext *avctx, void *data,
memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
}
*got_frame = 1;
*data_size = sizeof(AVFrame);
*(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
}
/*
*
* Init 8BPS decoder
*
*/
static av_cold int decode_init(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
@@ -154,7 +164,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
avcodec_get_frame_defaults(&c->pic);
switch (avctx->bits_per_coded_sample) {
case 8:
avctx->pix_fmt = AV_PIX_FMT_PAL8;
avctx->pix_fmt = PIX_FMT_PAL8;
c->planes = 1;
c->planemap[0] = 0; // 1st plane is palette indexes
break;
@@ -166,7 +176,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
c->planemap[2] = 0; // 3rd plane is blue
break;
case 32:
avctx->pix_fmt = AV_PIX_FMT_RGB32;
avctx->pix_fmt = PIX_FMT_RGB32;
c->planes = 4;
#if HAVE_BIGENDIAN
c->planemap[0] = 1; // 1st plane is red
@@ -189,6 +199,14 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0;
}
/*
*
* Uninit 8BPS decoder
*
*/
static av_cold int decode_end(AVCodecContext *avctx)
{
EightBpsContext * const c = avctx->priv_data;
@@ -199,6 +217,8 @@ static av_cold int decode_end(AVCodecContext *avctx)
return 0;
}
AVCodec ff_eightbps_decoder = {
.name = "8bps",
.type = AVMEDIA_TYPE_VIDEO,

View File

@@ -39,20 +39,18 @@
#include "libavutil/avassert.h"
#include "avcodec.h"
#include "internal.h"
#include "libavutil/common.h"
/** decoder context */
typedef struct EightSvxContext {
AVFrame frame;
uint8_t fib_acc[2];
const int8_t *table;
/* buffer used to store the whole first packet.
data is only sent as one large packet */
uint8_t *data[2];
int data_size;
int data_idx;
/* buffer used to store the whole audio decoded/interleaved chunk,
* which is sent with the first packet */
uint8_t *samples;
int64_t samples_size;
int samples_idx;
} EightSvxContext;
static const int8_t fibonacci[16] = { -34, -21, -13, -8, -5, -3, -2, -1, 0, 1, 2, 3, 5, 8, 13, 21 };
@@ -61,26 +59,47 @@ static const int8_t exponential[16] = { -128, -64, -32, -16, -8, -4, -2, -1, 0,
#define MAX_FRAME_SIZE 2048
/**
* Delta decode the compressed values in src, and put the resulting
* decoded samples in dst.
* Interleave samples in buffer containing all left channel samples
* at the beginning, and right channel samples at the end.
* Each sample is assumed to be in signed 8-bit format.
*
* @param[in,out] state starting value. it is saved for use in the next call.
* @param table delta sequence table
* @param size the size in bytes of the dst and src buffer
*/
static void delta_decode(uint8_t *dst, const uint8_t *src, int src_size,
uint8_t *state, const int8_t *table)
static void interleave_stereo(uint8_t *dst, const uint8_t *src, int size)
{
uint8_t val = *state;
uint8_t *dst_end = dst + size;
size = size>>1;
while (src_size--) {
while (dst < dst_end) {
*dst++ = *src;
*dst++ = *(src+size);
src++;
}
}
/**
* Delta decode the compressed values in src, and put the resulting
* decoded n samples in dst.
*
* @param val starting value assumed by the delta sequence
* @param table delta sequence table
* @return size in bytes of the decoded data, must be src_size*2
*/
static int delta_decode(int8_t *dst, const uint8_t *src, int src_size,
int8_t val, const int8_t *table)
{
int n = src_size;
int8_t *dst0 = dst;
while (n--) {
uint8_t d = *src++;
val = av_clip_uint8(val + table[d & 0xF]);
val = av_clip(val + table[d & 0x0f], -127, 128);
*dst++ = val;
val = av_clip_uint8(val + table[d >> 4]);
val = av_clip(val + table[d >> 4] , -127, 128);
*dst++ = val;
}
*state = val;
return dst-dst0;
}
/** decode a frame */
@@ -88,70 +107,79 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
EightSvxContext *esc = avctx->priv_data;
int buf_size;
int ch, ret;
int hdr_size = 2;
int n, out_data_size, ret;
uint8_t *src, *dst;
/* decode and interleave the first packet */
if (!esc->data[0] && avpkt) {
int chan_size = avpkt->size / avctx->channels - hdr_size;
if (!esc->samples && avpkt) {
uint8_t *deinterleaved_samples, *p = NULL;
int packet_size = avpkt->size;
if (avpkt->size % avctx->channels) {
if (packet_size % avctx->channels) {
av_log(avctx, AV_LOG_WARNING, "Packet with odd size, ignoring last byte\n");
if (packet_size < avctx->channels)
return packet_size;
packet_size -= packet_size % avctx->channels;
}
if (avpkt->size < (hdr_size + 1) * avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "packet size is too small\n");
return AVERROR(EINVAL);
}
esc->fib_acc[0] = avpkt->data[1] + 128;
if (avctx->channels == 2)
esc->fib_acc[1] = avpkt->data[2+chan_size+1] + 128;
esc->data_idx = 0;
esc->data_size = chan_size;
if (!(esc->data[0] = av_malloc(chan_size)))
esc->samples_size = !esc->table ?
packet_size : avctx->channels + (packet_size-avctx->channels) * 2;
if (!(esc->samples = av_malloc(esc->samples_size)))
return AVERROR(ENOMEM);
if (avctx->channels == 2) {
if (!(esc->data[1] = av_malloc(chan_size))) {
av_freep(&esc->data[0]);
return AVERROR(ENOMEM);
}
}
memcpy(esc->data[0], &avpkt->data[hdr_size], chan_size);
if (avctx->channels == 2)
memcpy(esc->data[1], &avpkt->data[2*hdr_size+chan_size], chan_size);
}
if (!esc->data[0]) {
av_log(avctx, AV_LOG_ERROR, "unexpected empty packet\n");
return AVERROR(EINVAL);
}
/* decode next piece of data from the buffer */
buf_size = FFMIN(MAX_FRAME_SIZE, esc->data_size - esc->data_idx);
if (buf_size <= 0) {
*got_frame_ptr = 0;
return avpkt->size;
/* decompress */
if (esc->table) {
const uint8_t *buf = avpkt->data;
uint8_t *dst;
int buf_size = avpkt->size;
int i, n = esc->samples_size;
if (buf_size < 2) {
av_log(avctx, AV_LOG_ERROR, "packet size is too small\n");
return AVERROR(EINVAL);
}
if (!(deinterleaved_samples = av_mallocz(n)))
return AVERROR(ENOMEM);
dst = p = deinterleaved_samples;
/* the uncompressed starting value is contained in the first byte */
dst = deinterleaved_samples;
for (i = 0; i < avctx->channels; i++) {
delta_decode(dst, buf + 1, buf_size / avctx->channels - 1, buf[0], esc->table);
buf += buf_size / avctx->channels;
dst += n / avctx->channels - 1;
}
} else {
deinterleaved_samples = avpkt->data;
}
if (avctx->channels == 2)
interleave_stereo(esc->samples, deinterleaved_samples, esc->samples_size);
else
memcpy(esc->samples, deinterleaved_samples, esc->samples_size);
av_freep(&p);
}
/* get output buffer */
esc->frame.nb_samples = buf_size * 2;
if ((ret = ff_get_buffer(avctx, &esc->frame)) < 0) {
av_assert1(!(esc->samples_size % avctx->channels || esc->samples_idx % avctx->channels));
esc->frame.nb_samples = FFMIN(MAX_FRAME_SIZE, esc->samples_size - esc->samples_idx) / avctx->channels;
if ((ret = avctx->get_buffer(avctx, &esc->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
for (ch = 0; ch < avctx->channels; ch++) {
delta_decode(esc->frame.data[ch], &esc->data[ch][esc->data_idx],
buf_size, &esc->fib_acc[ch], esc->table);
}
esc->data_idx += buf_size;
*got_frame_ptr = 1;
*(AVFrame *)data = esc->frame;
return ((avctx->frame_number == 0)*hdr_size + buf_size)*avctx->channels;
dst = esc->frame.data[0];
src = esc->samples + esc->samples_idx;
out_data_size = esc->frame.nb_samples * avctx->channels;
for (n = out_data_size; n > 0; n--)
*dst++ = *src++ + 128;
esc->samples_idx += out_data_size;
return esc->table ?
(avctx->frame_number == 0)*2 + out_data_size / 2 :
out_data_size;
}
static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
@@ -166,11 +194,13 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
switch (avctx->codec->id) {
case AV_CODEC_ID_8SVX_FIB: esc->table = fibonacci; break;
case AV_CODEC_ID_8SVX_EXP: esc->table = exponential; break;
case AV_CODEC_ID_PCM_S8_PLANAR:
case AV_CODEC_ID_8SVX_RAW: esc->table = NULL; break;
default:
av_log(avctx, AV_LOG_ERROR, "Invalid codec id %d.\n", avctx->codec->id);
return AVERROR_INVALIDDATA;
}
avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
avctx->sample_fmt = AV_SAMPLE_FMT_U8;
avcodec_get_frame_defaults(&esc->frame);
avctx->coded_frame = &esc->frame;
@@ -182,10 +212,9 @@ static av_cold int eightsvx_decode_close(AVCodecContext *avctx)
{
EightSvxContext *esc = avctx->priv_data;
av_freep(&esc->data[0]);
av_freep(&esc->data[1]);
esc->data_size = 0;
esc->data_idx = 0;
av_freep(&esc->samples);
esc->samples_size = 0;
esc->samples_idx = 0;
return 0;
}
@@ -201,8 +230,6 @@ AVCodec ff_eightsvx_fib_decoder = {
.close = eightsvx_decode_close,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE },
};
#endif
#if CONFIG_EIGHTSVX_EXP_DECODER
@@ -216,7 +243,18 @@ AVCodec ff_eightsvx_exp_decoder = {
.close = eightsvx_decode_close,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE },
};
#endif
#if CONFIG_PCM_S8_PLANAR_DECODER
AVCodec ff_pcm_s8_planar_decoder = {
.name = "pcm_s8_planar",
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_PCM_S8_PLANAR,
.priv_data_size = sizeof(EightSvxContext),
.init = eightsvx_decode_init,
.close = eightsvx_decode_close,
.decode = eightsvx_decode_frame,
.capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
};
#endif

View File

@@ -16,7 +16,6 @@ HEADERS = avcodec.h \
OBJS = allcodecs.o \
audioconvert.o \
avpacket.o \
avpicture.o \
bitstream.o \
bitstream_filter.o \
codec_desc.o \
@@ -25,7 +24,6 @@ OBJS = allcodecs.o \
fmtconvert.o \
imgconvert.o \
jrevdct.o \
mathtables.o \
options.o \
parser.o \
raw.o \
@@ -38,7 +36,6 @@ OBJS = allcodecs.o \
# parts needed for many different codecs
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
OBJS-$(CONFIG_DWT) += dwt.o snow.o
@@ -56,8 +53,6 @@ OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
OBJS-$(CONFIG_LPC) += lpc.o
OBJS-$(CONFIG_LSP) += lsp.o
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o
OBJS-$(CONFIG_MPEGAUDIO) += mpegaudio.o mpegaudiodata.o \
mpegaudiodecheader.o
OBJS-$(CONFIG_MPEGAUDIODSP) += mpegaudiodsp.o \
mpegaudiodsp_data.o \
mpegaudiodsp_fixed.o \
@@ -68,15 +63,12 @@ OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
OBJS-$(CONFIG_SHARED) += log2_tab.o
OBJS-$(CONFIG_SINEWIN) += sinewin.o
OBJS-$(CONFIG_VAAPI) += vaapi.o
OBJS-$(CONFIG_VDPAU) += vdpau.o
OBJS-$(CONFIG_VIDEODSP) += videodsp.o
OBJS-$(CONFIG_VP3DSP) += vp3dsp.o
# decoders/encoders/hardware accelerators
OBJS-$(CONFIG_ZERO12V_DECODER) += 012v.o
OBJS-$(CONFIG_A64MULTI_ENCODER) += a64multienc.o elbg.o
OBJS-$(CONFIG_A64MULTI5_ENCODER) += a64multienc.o elbg.o
OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps.o \
@@ -85,14 +77,15 @@ OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps.o \
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o \
aacpsy.o aactab.o \
psymodel.o iirfilter.o \
mpeg4audio.o kbdwin.o
mpeg4audio.o kbdwin.o \
audio_frame_queue.o
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
OBJS-$(CONFIG_AC3_DECODER) += ac3dec.o ac3dec_data.o ac3.o kbdwin.o
OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_float.o ac3enc.o ac3tab.o \
ac3.o kbdwin.o
OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3enc.o ac3tab.o ac3.o
OBJS-$(CONFIG_ALAC_DECODER) += alac.o alac_data.o
OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o alac_data.o
OBJS-$(CONFIG_ALAC_DECODER) += alac.o
OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o
OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mpeg4audio.o
OBJS-$(CONFIG_AMRNB_DECODER) += amrnbdec.o celp_filters.o \
celp_math.o acelp_filters.o \
@@ -112,15 +105,15 @@ OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
OBJS-$(CONFIG_APE_DECODER) += apedec.o
OBJS-$(CONFIG_ASS_DECODER) += assdec.o ass.o ass_split.o
OBJS-$(CONFIG_ASS_ENCODER) += assenc.o ass.o
OBJS-$(CONFIG_ASV1_DECODER) += asvdec.o asv.o mpeg12data.o
OBJS-$(CONFIG_ASV1_ENCODER) += asvenc.o asv.o mpeg12data.o
OBJS-$(CONFIG_ASV2_DECODER) += asvdec.o asv.o mpeg12data.o
OBJS-$(CONFIG_ASV2_ENCODER) += asvenc.o asv.o mpeg12data.o
OBJS-$(CONFIG_ASV1_DECODER) += asv1.o mpeg12data.o
OBJS-$(CONFIG_ASV1_ENCODER) += asv1.o mpeg12data.o
OBJS-$(CONFIG_ASV2_DECODER) += asv1.o mpeg12data.o
OBJS-$(CONFIG_ASV2_ENCODER) += asv1.o mpeg12data.o
OBJS-$(CONFIG_ATRAC1_DECODER) += atrac1.o atrac.o
OBJS-$(CONFIG_ATRAC3_DECODER) += atrac3.o atrac.o
OBJS-$(CONFIG_AURA_DECODER) += cyuv.o
OBJS-$(CONFIG_AURA2_DECODER) += aura.o
OBJS-$(CONFIG_AVRN_DECODER) += avrndec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_AVRP_DECODER) += avrndec.o
OBJS-$(CONFIG_AVRP_DECODER) += r210dec.o
OBJS-$(CONFIG_AVRP_ENCODER) += r210enc.o
OBJS-$(CONFIG_AVS_DECODER) += avs.o
@@ -138,7 +131,6 @@ OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o
OBJS-$(CONFIG_BMP_ENCODER) += bmpenc.o
OBJS-$(CONFIG_BMV_VIDEO_DECODER) += bmv.o
OBJS-$(CONFIG_BMV_AUDIO_DECODER) += bmv.o
OBJS-$(CONFIG_BRENDER_PIX_DECODER) += brender_pix.o
OBJS-$(CONFIG_C93_DECODER) += c93.o
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
cavsdata.o mpeg12data.o
@@ -149,14 +141,12 @@ OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
OBJS-$(CONFIG_CLLC_DECODER) += cllc.o
OBJS-$(CONFIG_COOK_DECODER) += cook.o
OBJS-$(CONFIG_COMFORTNOISE_DECODER) += cngdec.o celp_filters.o
OBJS-$(CONFIG_COMFORTNOISE_ENCODER) += cngenc.o
OBJS-$(CONFIG_CPIA_DECODER) += cpia.o
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadsp.o \
dca_parser.o synth_filter.o
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dca.o
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
dirac_arith.o mpeg12data.o dwt.o
OBJS-$(CONFIG_DFA_DECODER) += dfa.o
@@ -189,13 +179,13 @@ OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER) += 8svx.o
OBJS-$(CONFIG_ESCAPE124_DECODER) += escape124.o
OBJS-$(CONFIG_ESCAPE130_DECODER) += escape130.o
OBJS-$(CONFIG_EXR_DECODER) += exr.o
OBJS-$(CONFIG_FFV1_DECODER) += ffv1dec.o ffv1.o
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1enc.o ffv1.o
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o huffyuvdec.o
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o huffyuvenc.o
OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o
OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o
OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o flacdsp.o
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o flacdsp.o vorbis_data.o
OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o vorbis_data.o
OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o
OBJS-$(CONFIG_FLASHSV2_ENCODER) += flashsv2enc.o
@@ -205,7 +195,7 @@ OBJS-$(CONFIG_FOURXM_DECODER) += 4xm.o
OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o
OBJS-$(CONFIG_FRWU_DECODER) += frwu.o
OBJS-$(CONFIG_G723_1_DECODER) += g723_1.o acelp_vectors.o \
celp_filters.o celp_math.o
celp_filters.o
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o acelp_vectors.o celp_math.o
OBJS-$(CONFIG_G729_DECODER) += g729dec.o lsp.o celp_math.o acelp_filters.o acelp_pitch_delay.o acelp_vectors.o g729postfilter.o
OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
@@ -228,8 +218,8 @@ OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
OBJS-$(CONFIG_H264_VDA_DECODER) += vda_h264_dec.o
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o huffyuvdec.o
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o huffyuvenc.o
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o
OBJS-$(CONFIG_IAC_DECODER) += imc.o
OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
OBJS-$(CONFIG_IDF_DECODER) += bintext.o cga_data.o
@@ -267,18 +257,30 @@ OBJS-$(CONFIG_MMVIDEO_DECODER) += mmvideo.o
OBJS-$(CONFIG_MOTIONPIXELS_DECODER) += motionpixels.o
OBJS-$(CONFIG_MOVTEXT_DECODER) += movtextdec.o ass.o
OBJS-$(CONFIG_MOVTEXT_ENCODER) += movtextenc.o ass_split.o
OBJS-$(CONFIG_MP1_DECODER) += mpegaudiodec.o
OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec.o
OBJS-$(CONFIG_MP1_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP2_ENCODER) += mpegaudioenc.o mpegaudio.o \
mpegaudiodata.o mpegaudiodsp_data.o
OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec.o
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec.o
OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o
OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec.o mpeg4audio.o
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o mpeg4audio.o
OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o \
mpeg4audio.o
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o \
mpeg4audio.o
OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
mpegaudio.o mpegaudiodata.o
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12.o mpeg12data.o \
@@ -293,7 +295,6 @@ OBJS-$(CONFIG_MPEG2VIDEO_DECODER) += mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpeg12.o \
timecode.o
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_MPL2_DECODER) += mpl2dec.o ass.o
OBJS-$(CONFIG_MSMPEG4V1_DECODER) += msmpeg4.o msmpeg4data.o
OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
h263.o ituh263dec.o mpeg4videodec.o
@@ -313,11 +314,10 @@ OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
OBJS-$(CONFIG_MSVIDEO1_ENCODER) += msvideo1enc.o elbg.o
OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
OBJS-$(CONFIG_MTS2_DECODER) += mss4.o mss34dsp.o
OBJS-$(CONFIG_MVC1_DECODER) += mvcdec.o
OBJS-$(CONFIG_MVC2_DECODER) += mvcdec.o
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o mjpegdec.o mjpeg.o
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o \
audio_frame_queue.o
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += paf.o
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += paf.o
@@ -333,7 +333,6 @@ OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o
OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o
OBJS-$(CONFIG_PJS_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o pngdsp.o
OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o
@@ -357,7 +356,8 @@ OBJS-$(CONFIG_R10K_ENCODER) += r210enc.o
OBJS-$(CONFIG_R210_DECODER) += r210dec.o
OBJS-$(CONFIG_R210_ENCODER) += r210enc.o
OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o
OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o \
audio_frame_queue.o
OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_filters.o
OBJS-$(CONFIG_RALF_DECODER) += ralf.o
OBJS-$(CONFIG_RAWVIDEO_DECODER) += rawdec.o
@@ -380,7 +380,6 @@ OBJS-$(CONFIG_S302M_DECODER) += s302m.o
OBJS-$(CONFIG_SANM_DECODER) += sanm.o
OBJS-$(CONFIG_SGI_DECODER) += sgidec.o
OBJS-$(CONFIG_SGI_ENCODER) += sgienc.o rle.o
OBJS-$(CONFIG_SGIRLE_DECODER) += sgirledec.o
OBJS-$(CONFIG_SHORTEN_DECODER) += shorten.o
OBJS-$(CONFIG_SIPR_DECODER) += sipr.o acelp_pitch_delay.o \
celp_math.o acelp_vectors.o \
@@ -401,7 +400,6 @@ OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
OBJS-$(CONFIG_SUBRIP_DECODER) += srtdec.o ass.o
OBJS-$(CONFIG_SUBRIP_ENCODER) += srtenc.o ass_split.o
OBJS-$(CONFIG_SUBVIEWER1_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_SUBVIEWER_DECODER) += subviewerdec.o ass.o
OBJS-$(CONFIG_SUNRAST_DECODER) += sunrast.o
OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o
@@ -412,11 +410,8 @@ OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o h263.o h264.o \
h264_loopfilter.o h264_direct.o \
h264_sei.o h264_ps.o h264_refs.o \
h264_cavlc.o h264_cabac.o cabac.o
OBJS-$(CONFIG_TEXT_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_TAK_DECODER) += takdec.o tak.o
OBJS-$(CONFIG_TARGA_DECODER) += targa.o
OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
OBJS-$(CONFIG_TARGA_Y216_DECODER) += targa_y216dec.o
OBJS-$(CONFIG_THEORA_DECODER) += xiph.o
OBJS-$(CONFIG_THP_DECODER) += mjpegdec.o mjpeg.o
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
@@ -466,7 +461,6 @@ OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56dsp.o \
OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \
vp6dsp.o vp56rac.o
OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o
OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
OBJS-$(CONFIG_WEBVTT_DECODER) += webvttdec.o
@@ -494,8 +488,6 @@ OBJS-$(CONFIG_XAN_WC4_DECODER) += xxan.o
OBJS-$(CONFIG_XBIN_DECODER) += bintext.o cga_data.o
OBJS-$(CONFIG_XBM_DECODER) += xbmdec.o
OBJS-$(CONFIG_XBM_ENCODER) += xbmenc.o
OBJS-$(CONFIG_XFACE_DECODER) += xfacedec.o xface.o
OBJS-$(CONFIG_XFACE_ENCODER) += xfaceenc.o xface.o
OBJS-$(CONFIG_XL_DECODER) += xl.o
OBJS-$(CONFIG_XSUB_DECODER) += xsubdec.o
OBJS-$(CONFIG_XSUB_ENCODER) += xsubenc.o
@@ -517,6 +509,7 @@ OBJS-$(CONFIG_PCM_ALAW_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_ALAW_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_BLURAY_DECODER) += pcm-mpeg.o
OBJS-$(CONFIG_PCM_DVD_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_DVD_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_F32BE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_F32BE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_F32LE_DECODER) += pcm.o
@@ -530,30 +523,22 @@ OBJS-$(CONFIG_PCM_MULAW_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_MULAW_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S8_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S8_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S8_PLANAR_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S8_PLANAR_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S8_PLANAR_DECODER) += 8svx.o
OBJS-$(CONFIG_PCM_S16BE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S16BE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S16BE_PLANAR_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S16BE_PLANAR_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S16LE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S16LE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S16LE_PLANAR_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S16LE_PLANAR_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S24BE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S24BE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S24DAUD_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S24DAUD_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S24LE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S24LE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S24LE_PLANAR_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S24LE_PLANAR_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S32BE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S32BE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S32LE_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S32LE_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_S32LE_PLANAR_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_S32LE_PLANAR_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_U8_DECODER) += pcm.o
OBJS-$(CONFIG_PCM_U8_ENCODER) += pcm.o
OBJS-$(CONFIG_PCM_U16BE_DECODER) += pcm.o
@@ -573,7 +558,6 @@ OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o
OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o adx.o
OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o adx.o
OBJS-$(CONFIG_ADPCM_AFC_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o adpcm_data.o
@@ -592,7 +576,6 @@ OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_EA_SEAD_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_ISS_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_OKI_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_QT_DECODER) += adpcm.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_QT_ENCODER) += adpcmenc.o adpcm_data.o
OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER) += adpcm.o adpcm_data.o
@@ -638,7 +621,7 @@ OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o
OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MPEGTS_MUXER) += mpeg4audio.o
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
OBJS-$(CONFIG_MXF_MUXER) += timecode.o dnxhddata.o
OBJS-$(CONFIG_MXF_MUXER) += timecode.o
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
OBJS-$(CONFIG_OGG_DEMUXER) += xiph.o flac.o flacdata.o \
mpeg12data.o vorbis_parser.o \
@@ -649,7 +632,6 @@ OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o xiph.o
OBJS-$(CONFIG_RTPDEC) += mjpeg.o
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
OBJS-$(CONFIG_TAK_DEMUXER) += tak.o
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \
xiph.o flac.o flacdata.o \
vorbis_data.o
@@ -658,39 +640,40 @@ OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
# external codec libraries
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o audio_frame_queue.o
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o audio_frame_queue.o
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsm.o
OBJS-$(CONFIG_LIBILBC_DECODER) += libilbc.o
OBJS-$(CONFIG_LIBILBC_ENCODER) += libilbc.o
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o libopus.o \
vorbis_data.o
OBJS-$(CONFIG_LIBOPUS_ENCODER) += libopusenc.o libopus.o \
vorbis_data.o
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopus_dec.o vorbis_data.o
OBJS-$(CONFIG_LIBSCHROEDINGER_DECODER) += libschroedingerdec.o \
libschroedinger.o
OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \
libschroedinger.o
OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o
OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o audio_frame_queue.o
OBJS-$(CONFIG_LIBSTAGEFRIGHT_H264_DECODER)+= libstagefright.o
OBJS-$(CONFIG_LIBTHEORA_ENCODER) += libtheoraenc.o
OBJS-$(CONFIG_LIBTWOLAME_ENCODER) += libtwolame.o
OBJS-$(CONFIG_LIBUTVIDEO_DECODER) += libutvideodec.o
OBJS-$(CONFIG_LIBUTVIDEO_ENCODER) += libutvideoenc.o
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o \
audio_frame_queue.o
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
OBJS-$(CONFIG_LIBVORBIS_DECODER) += libvorbisdec.o
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o \
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o audio_frame_queue.o \
vorbis_data.o vorbis_parser.o xiph.o
OBJS-$(CONFIG_LIBVPX_DECODER) += libvpxdec.o
OBJS-$(CONFIG_LIBVPX_ENCODER) += libvpxenc.o
@@ -736,7 +719,6 @@ OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
OBJS-$(CONFIG_RV30_PARSER) += rv34_parser.o
OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
OBJS-$(CONFIG_TAK_PARSER) += tak_parser.o tak.o
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o \
msmpeg4.o msmpeg4data.o mpeg4video.o \
h263.o
@@ -766,6 +748,12 @@ OBJS-$(HAVE_PTHREADS) += pthread.o frame_thread_encoder.o
OBJS-$(HAVE_W32THREADS) += pthread.o frame_thread_encoder.o
OBJS-$(HAVE_OS2THREADS) += pthread.o frame_thread_encoder.o
# inverse.o contains the ff_inverse table definition, which is used by
# the FASTDIV macro (from libavutil); since referencing the external
# table has a negative effect on performance, copy it in libavcodec as
# well.
OBJS-$(!CONFIG_SMALL) += inverse.o
SKIPHEADERS += %_tablegen.h \
%_tables.h \
aac_tablegen_decl.h \
@@ -790,7 +778,6 @@ TESTPROGS = cabac \
fft-fixed \
golomb \
iirfilter \
imgconvert \
rangecoder \
snowenc \

View File

@@ -41,7 +41,7 @@ typedef struct A64Context {
AVLFG randctx;
int mc_lifetime;
int mc_use_5col;
unsigned mc_frame_counter;
int mc_frame_counter;
int *mc_meta_charset;
int *mc_charmap;
int *mc_best_cb;

View File

@@ -378,7 +378,7 @@ AVCodec ff_a64multi_encoder = {
.init = a64multi_init_encoder,
.encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder,
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
.capabilities = CODEC_CAP_DELAY,
};
@@ -392,7 +392,7 @@ AVCodec ff_a64multi5_encoder = {
.init = a64multi_init_encoder,
.encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder,
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
.pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
.capabilities = CODEC_CAP_DELAY,
};

View File

@@ -113,7 +113,7 @@ enum OCStatus {
OC_LOCKED, ///< Output configuration locked in place
};
typedef struct OutputConfiguration {
typedef struct {
MPEG4AudioConfig m4ac;
uint8_t layout_map[MAX_ELEM_ID*4][3];
int layout_map_tags;
@@ -125,7 +125,7 @@ typedef struct OutputConfiguration {
/**
* Predictor State
*/
typedef struct PredictorState {
typedef struct {
float cor0;
float cor1;
float var0;
@@ -141,11 +141,12 @@ typedef struct PredictorState {
#define SCALE_MAX_POS 255 ///< scalefactor index maximum value
#define SCALE_MAX_DIFF 60 ///< maximum scalefactor difference allowed by standard
#define SCALE_DIFF_ZERO 60 ///< codebook index corresponding to zero scalefactor indices difference
#define POW_SF2_ZERO 200 ///< ff_aac_pow2sf_tab index corresponding to pow(2, 0);
/**
* Long Term Prediction
*/
typedef struct LongTermPrediction {
typedef struct {
int8_t present;
int16_t lag;
float coef;
@@ -155,7 +156,7 @@ typedef struct LongTermPrediction {
/**
* Individual Channel Stream
*/
typedef struct IndividualChannelStream {
typedef struct {
uint8_t max_sfb; ///< number of scalefactor bands per group
enum WindowSequence window_sequence[2];
uint8_t use_kb_window[2]; ///< If set, use Kaiser-Bessel window, otherwise use a sinus window.
@@ -176,7 +177,7 @@ typedef struct IndividualChannelStream {
/**
* Temporal Noise Shaping
*/
typedef struct TemporalNoiseShaping {
typedef struct {
int present;
int n_filt[8];
int length[8][4];
@@ -188,7 +189,7 @@ typedef struct TemporalNoiseShaping {
/**
* Dynamic Range Control - decoded from the bitstream but not processed further.
*/
typedef struct DynamicRangeControl {
typedef struct {
int pce_instance_tag; ///< Indicates with which program the DRC info is associated.
int dyn_rng_sgn[17]; ///< DRC sign information; 0 - positive, 1 - negative
int dyn_rng_ctl[17]; ///< DRC magnitude information
@@ -201,7 +202,7 @@ typedef struct DynamicRangeControl {
*/
} DynamicRangeControl;
typedef struct Pulse {
typedef struct {
int num_pulse;
int start;
int pos[4];
@@ -211,7 +212,7 @@ typedef struct Pulse {
/**
* coupling parameters
*/
typedef struct ChannelCoupling {
typedef struct {
enum CouplingPoint coupling_point; ///< The point during decoding at which coupling is applied.
int num_coupled; ///< number of target elements
enum RawDataBlockType type[8]; ///< Type of channel element to be coupled - SCE or CPE.
@@ -225,7 +226,7 @@ typedef struct ChannelCoupling {
/**
* Single Channel Element - used for both SCE and LFE elements.
*/
typedef struct SingleChannelElement {
typedef struct {
IndividualChannelStream ics;
TemporalNoiseShaping tns;
Pulse pulse;
@@ -236,16 +237,15 @@ typedef struct SingleChannelElement {
uint8_t zeroes[128]; ///< band is not coded (used by encoder)
DECLARE_ALIGNED(32, float, coeffs)[1024]; ///< coefficients for IMDCT
DECLARE_ALIGNED(32, float, saved)[1024]; ///< overlap
DECLARE_ALIGNED(32, float, ret_buf)[2048]; ///< PCM output buffer
DECLARE_ALIGNED(32, float, ret)[2048]; ///< PCM output
DECLARE_ALIGNED(16, float, ltp_state)[3072]; ///< time signal for LTP
PredictorState predictor_state[MAX_PREDICTORS];
float *ret; ///< PCM output
} SingleChannelElement;
/**
* channel element - generic struct for SCE/CPE/CCE/LFE
*/
typedef struct ChannelElement {
typedef struct {
// CPE specific
int common_window; ///< Set if channels share a common 'IndividualChannelStream' in bitstream.
int ms_mode; ///< Signals mid/side stereo flags coding mode (used by encoder)
@@ -260,8 +260,7 @@ typedef struct ChannelElement {
/**
* main AAC context
*/
typedef struct AACContext {
AVClass *class;
typedef struct {
AVCodecContext *avctx;
AVFrame frame;
@@ -299,10 +298,10 @@ typedef struct AACContext {
/** @} */
/**
* @name Members used for output
* @name Members used for output interleaving
* @{
*/
SingleChannelElement *output_element[MAX_CHANNELS]; ///< Points to each SingleChannelElement
float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output).
/** @} */
@@ -310,8 +309,8 @@ typedef struct AACContext {
* @name Japanese DTV specific extension
* @{
*/
int force_dmono_mode;///< 0->not dmono, 1->use first channel, 2->use second channel
int dmono_mode; ///< 0->not dmono, 1->use first channel, 2->use second channel
int enable_jp_dmono; ///< enable japanese DTV specific 'dual mono'
int dmono_mode; ///< select the channel to decode in dual mono.
/** @} */
DECLARE_ALIGNED(32, float, temp)[128];

Some files were not shown because too many files have changed in this diff Show More