Compare commits
128 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
043f326060 | ||
![]() |
9a641b909c | ||
![]() |
5405ba7b63 | ||
![]() |
70f6d553d9 | ||
![]() |
25fc3deed8 | ||
![]() |
a8a6cdfcd7 | ||
![]() |
39518589e7 | ||
![]() |
857e391697 | ||
![]() |
04aa2ffbcf | ||
![]() |
63523485f4 | ||
![]() |
35bc67503e | ||
![]() |
1f636a697f | ||
![]() |
24d725f455 | ||
![]() |
66fcf1fa40 | ||
![]() |
c7b7e0790c | ||
![]() |
3f3e5f8f60 | ||
![]() |
8e95ddbe82 | ||
![]() |
8cba067fe5 | ||
![]() |
73c6520c09 | ||
![]() |
ca47574e16 | ||
![]() |
045670a6f7 | ||
![]() |
30a0622a5d | ||
![]() |
e4d921dc71 | ||
![]() |
2185103bcd | ||
![]() |
bf7ee2524b | ||
![]() |
5a1efc7b85 | ||
![]() |
71af22097d | ||
![]() |
f3d34cff76 | ||
![]() |
81b38caf21 | ||
![]() |
20071ff1a4 | ||
![]() |
3b7db9c4f5 | ||
![]() |
0ddcee172e | ||
![]() |
13ecdb06f8 | ||
![]() |
ca8c62d187 | ||
![]() |
e443165c32 | ||
![]() |
0b41eeac45 | ||
![]() |
de31f85707 | ||
![]() |
d61454e7c1 | ||
![]() |
b02e4faa3e | ||
![]() |
09256527be | ||
![]() |
84d26ab6eb | ||
![]() |
1d99adc953 | ||
![]() |
67991f3a3e | ||
![]() |
32e8922faf | ||
![]() |
32dbd1f342 | ||
![]() |
eefc3ca7be | ||
![]() |
a16558e122 | ||
![]() |
506368f563 | ||
![]() |
c0c24bc9b3 | ||
![]() |
b89f279cd6 | ||
![]() |
7f90eef87a | ||
![]() |
71f0a3c4ad | ||
![]() |
d9bef14e41 | ||
![]() |
4b4ed88e89 | ||
![]() |
f6476944e1 | ||
![]() |
03d30d4c2c | ||
![]() |
853a27e345 | ||
![]() |
f7c0f8355e | ||
![]() |
6f5c505109 | ||
![]() |
b29f9897e3 | ||
![]() |
1d987a34d8 | ||
![]() |
6099d1ca0e | ||
![]() |
a9b9751bc8 | ||
![]() |
da5e52010c | ||
![]() |
e1fd837888 | ||
![]() |
1d10997488 | ||
![]() |
76a5cf1f80 | ||
![]() |
16c3d6d392 | ||
![]() |
9d79848f84 | ||
![]() |
b8d34604ff | ||
![]() |
7a02b9cb2d | ||
![]() |
ff6d440d10 | ||
![]() |
a88a57cd24 | ||
![]() |
0cda7baa8b | ||
![]() |
31baa6f199 | ||
![]() |
3b364ac18a | ||
![]() |
63660277fd | ||
![]() |
6101187c55 | ||
![]() |
91ff803a74 | ||
![]() |
4f4f78ea48 | ||
![]() |
ff24824a72 | ||
![]() |
570cefb02b | ||
![]() |
ce0972ecdd | ||
![]() |
09c848855a | ||
![]() |
e1ce4f805f | ||
![]() |
bb5c0ac922 | ||
![]() |
d389438296 | ||
![]() |
3b57d7769a | ||
![]() |
b7f2719951 | ||
![]() |
dc4e34a2f0 | ||
![]() |
d694ab846c | ||
![]() |
ab02548c8a | ||
![]() |
9eb442cca2 | ||
![]() |
b45ab61b24 | ||
![]() |
cc73b4f574 | ||
![]() |
8ac3b2cdb7 | ||
![]() |
ace90ee265 | ||
![]() |
703bd31647 | ||
![]() |
c16e80ee3d | ||
![]() |
66ac5b96e8 | ||
![]() |
547cad8c81 | ||
![]() |
55b1a1e9c1 | ||
![]() |
f851477889 | ||
![]() |
4f2d4b98fc | ||
![]() |
7d8ebb8774 | ||
![]() |
07b0ccf511 | ||
![]() |
9d3e69ae30 | ||
![]() |
480633c6c2 | ||
![]() |
b5d4f49e3c | ||
![]() |
4cde8bae49 | ||
![]() |
5b740d1eaa | ||
![]() |
152e09fde7 | ||
![]() |
110841c3ab | ||
![]() |
5694831e06 | ||
![]() |
1f52f82a55 | ||
![]() |
e62f08ca8d | ||
![]() |
ee099059e7 | ||
![]() |
e2a89f7f0f | ||
![]() |
7c46855074 | ||
![]() |
c2d6cc2971 | ||
![]() |
8c91414803 | ||
![]() |
0263750a0d | ||
![]() |
63795fe5b9 | ||
![]() |
d2bad216f7 | ||
![]() |
d04fb11868 | ||
![]() |
e8f2823f06 | ||
![]() |
7dfccac20c | ||
![]() |
04361427e6 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
|||||||
*.pnm -diff -text
|
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -64,7 +64,6 @@
|
|||||||
/tests/data/
|
/tests/data/
|
||||||
/tests/pixfmts.mak
|
/tests/pixfmts.mak
|
||||||
/tests/rotozoom
|
/tests/rotozoom
|
||||||
/tests/test_copy.ffmeta
|
|
||||||
/tests/tiny_psnr
|
/tests/tiny_psnr
|
||||||
/tests/tiny_ssim
|
/tests/tiny_ssim
|
||||||
/tests/videogen
|
/tests/videogen
|
||||||
@@ -83,7 +82,6 @@
|
|||||||
/tools/pktdumper
|
/tools/pktdumper
|
||||||
/tools/probetest
|
/tools/probetest
|
||||||
/tools/qt-faststart
|
/tools/qt-faststart
|
||||||
/tools/sidxindex
|
|
||||||
/tools/trasher
|
/tools/trasher
|
||||||
/tools/seek_print
|
/tools/seek_print
|
||||||
/tools/uncoded_frame
|
/tools/uncoded_frame
|
||||||
|
147
Changelog
147
Changelog
@@ -1,85 +1,78 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
Entries are sorted chronologically from oldest to youngest within each release,
|
||||||
releases are sorted from youngest to oldest.
|
releases are sorted from youngest to oldest.
|
||||||
|
|
||||||
version 2.6.1:
|
version <next>:
|
||||||
- avformat/mov: Disallow ".." in dref unless use_absolute_path is set
|
|
||||||
- avfilter/palettegen: make sure at least one frame was sent to the filter
|
|
||||||
- avformat/mov: Check for string truncation in mov_open_dref()
|
|
||||||
- ac3_fixed: fix out-of-bound read
|
|
||||||
- mips/asmdefs: use _ABI64 as defined by gcc
|
|
||||||
- hevc: delay ff_thread_finish_setup for hwaccel
|
|
||||||
- avcodec/012v: Check dimensions more completely
|
|
||||||
- asfenc: fix leaking asf->index_ptr on error
|
|
||||||
- roqvideoenc: set enc->avctx in roq_encode_init
|
|
||||||
- avcodec/options_table: remove extradata_size from the AVOptions table
|
|
||||||
- ffmdec: limit the backward seek to the last resync position
|
|
||||||
- Add dependencies to configure file for vf_fftfilt
|
|
||||||
- ffmdec: make sure the time base is valid
|
|
||||||
- ffmdec: fix infinite loop at EOF
|
|
||||||
- ffmdec: initialize f_cprv, f_stvi and f_stau
|
|
||||||
- arm: Suppress tags about used cpu arch and extensions
|
|
||||||
- mxfdec: Fix the error handling for when strftime fails
|
|
||||||
- avcodec/opusdec: Fix delayed sample value
|
|
||||||
- avcodec/opusdec: Clear out pointers per packet
|
|
||||||
- avcodec/utils: Align YUV411 by as much as the other YUV variants
|
|
||||||
- lavc/hevcdsp: Fix compilation for arm with --disable-neon.
|
|
||||||
- vp9: fix segmentation map retention with threading enabled.
|
|
||||||
- Revert "avutil/opencl: is_compiled flag not being cleared in av_opencl_uninit"
|
|
||||||
|
|
||||||
version 2.6:
|
version 2.4.3:
|
||||||
- nvenc encoder
|
- avcodec/svq1dec: zero terminate embedded message before printing
|
||||||
- 10bit spp filter
|
- avcodec/cook: check that the subpacket sizes fit in block_align
|
||||||
- colorlevels filter
|
- avcodec/g2meet: check tile dimensions to avoid integer overflow
|
||||||
- RIFX format for *.wav files
|
- avcodec/utils: Align dimensions by at least their chroma sub-sampling factors.
|
||||||
- RTP/mpegts muxer
|
- avcodec/dnxhddec: treat pix_fmt like width/height
|
||||||
- non continuous cache protocol support
|
- avcodec/dxa: check dimensions
|
||||||
- tblend filter
|
- avcodec/dirac_arith: fix integer overflow
|
||||||
- cropdetect support for non 8bpp, absolute (if limit >= 1) and relative (if limit < 1.0) threshold
|
- avcodec/diracdec: Tighter checks on CODEBLOCKS_X/Y
|
||||||
- Camellia symmetric block cipher
|
- avcodec/diracdec: Use 64bit in calculation of codeblock coordinates
|
||||||
- OpenH264 encoder wrapper
|
- avcodec/sgidec: fix count check
|
||||||
- VOC seeking support
|
- avcodec/sgidec: fix linesize for 16bit
|
||||||
- Closed caption Decoder
|
- avcodec/hevc_ps: Check default display window bitstream and skip if invalid
|
||||||
- fspp, uspp, pp7 MPlayer postprocessing filters ported to native filters
|
- avcodec/tiffenc: properly compute packet size
|
||||||
- showpalette filter
|
- lavd: export all symbols with av_ prefix
|
||||||
- Twofish symmetric block cipher
|
- avformat/mxfdec: Fix termination of mxf_data_essence_container_uls
|
||||||
- Support DNx100 (960x720@8)
|
- postproc: fix qp count
|
||||||
- eq2 filter ported from libmpcodecs as eq filter
|
- postproc/postprocess: fix quant store for fq mode
|
||||||
- removed libmpcodecs
|
- vf_drawtext: add missing clear of pointers after av_expr_free()
|
||||||
- Changed default DNxHD colour range in QuickTime .mov derivatives to mpeg range
|
- utvideoenc: properly set slice height/last line
|
||||||
- ported softpulldown filter from libmpcodecs as repeatfields filter
|
- swresample: fix sample drop loop end condition
|
||||||
- dcshift filter
|
- resample: Avoid off-by-1 errors in PTS calcs.
|
||||||
- RTP depacketizer for loss tolerant payload format for MP3 audio (RFC 5219)
|
- imc: fix order of operations in coefficients read
|
||||||
- RTP depacketizer for AC3 payload format (RFC 4184)
|
- hevc_mvs: make sure to always initialize the temporal MV fully
|
||||||
- palettegen and paletteuse filters
|
- hevc_mvs: initialize the temporal MV in case of missing reference
|
||||||
- VP9 RTP payload format (draft 0) experimental depacketizer
|
|
||||||
- RTP depacketizer for DV (RFC 6469)
|
version 2.4.2:
|
||||||
- DXVA2-accelerated HEVC decoding
|
- avcodec/on2avc: Check number of channels
|
||||||
- AAC ELD 480 decoding
|
- avcodec/hevc: fix chroma transform_add size
|
||||||
- Intel QSV-accelerated H.264 decoding
|
- avcodec/h264: Check mode before considering mixed mode intra prediction
|
||||||
- DSS SP decoder and DSS demuxer
|
- avformat/mpegts: use a padded buffer in read_sl_header()
|
||||||
- Fix stsd atom corruption in DNxHD QuickTimes
|
- avformat/mpegts: Check desc_len / get8() return code
|
||||||
- Canopus HQX decoder
|
- avcodec/vorbisdec: Fix off by 1 error in ptns_to_read
|
||||||
- RTP depacketization of T.140 text (RFC 4103)
|
- sdp: add support for H.261
|
||||||
- Port MIPS optimizations to 64-bit
|
- avcodec/svq3: Do not memcpy AVFrame
|
||||||
|
- avcodec/smc: fix off by 1 error
|
||||||
|
- avcodec/qpeg: fix off by 1 error in MV bounds check
|
||||||
|
- avcodec/gifdec: factorize interleave end handling out
|
||||||
|
- avcodec/cinepak: fix integer underflow
|
||||||
|
- avcodec/pngdec: Check bits per pixel before setting monoblack pixel format
|
||||||
|
- avcodec/pngdec: Calculate MPNG bytewidth more defensively
|
||||||
|
- avcodec/tiff: more completely check bpp/bppcount
|
||||||
|
- avcodec/mmvideo: Bounds check 2nd line of HHV Intra blocks
|
||||||
|
- avcodec/h263dec: Fix decoding messenger.h263
|
||||||
|
- avcodec/utils: Add case for jv to avcodec_align_dimensions2()
|
||||||
|
- avcodec/mjpegdec: check bits per pixel for changes similar to dimensions
|
||||||
|
- avcodec/jpeglsdec: Check run value more completely in ls_decode_line()
|
||||||
|
- avformat/hlsenc: export inner muxer timebase
|
||||||
|
- configure: add noexecstack to linker options if supported.
|
||||||
|
- avcodec/ac3enc_template: fix out of array read
|
||||||
|
- avutil/x86/cpu: fix cpuid sub-leaf selection
|
||||||
|
- avformat/img2dec: enable generic seeking for image pipes
|
||||||
|
- avformat/img2dec: initialize pkt->pos for image pipes
|
||||||
|
- avformat/img2dec: pass error code and signal EOF
|
||||||
|
- avformat/img2dec: fix error code at EOF for pipes
|
||||||
|
- libavutil/opt: fix av_opt_set_channel_layout() to access correct memory address
|
||||||
|
- tests/fate-run.sh: Cat .err file in case of error with V>0
|
||||||
|
- avformat/riffenc: Filter out "BottomUp" in ff_put_bmp_header()
|
||||||
|
- avcodec/webp: fix default palette color 0xff000000 -> 0x00000000
|
||||||
|
- avcodec/asvenc: fix AAN scaling
|
||||||
|
- Fix compile error on arm4/arm5 platform
|
||||||
|
|
||||||
|
|
||||||
version 2.5:
|
version 2.4.1:
|
||||||
- HEVC/H.265 RTP payload format (draft v6) packetizer
|
- swscale: Allow chroma samples to be above and to the left of luma samples
|
||||||
- SUP/PGS subtitle demuxer
|
- avcodec/libilbc: support for latest git of libilbc
|
||||||
- ffprobe -show_pixel_formats option
|
- avcodec/webp: treat out-of-bound palette index as translucent black
|
||||||
- CAST128 symmetric block cipher, ECB mode
|
- vf_deshake: rename Transform.vector to Transform.vec to avoid compiler confusion
|
||||||
- STL subtitle demuxer and decoder
|
- apetag: Fix APE tag size check
|
||||||
- libutvideo YUV 4:2:2 10bit support
|
- tools/crypto_bench: fix build when AV_READ_TIME is unavailable
|
||||||
- XCB-based screen-grabber
|
|
||||||
- UDP-Lite support (RFC 3828)
|
|
||||||
- xBR scaling filter
|
|
||||||
- AVFoundation screen capturing support
|
|
||||||
- ffserver supports codec private options
|
|
||||||
- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
|
|
||||||
- WebP muxer with animated WebP support
|
|
||||||
- zygoaudio decoding support
|
|
||||||
- APNG demuxer
|
|
||||||
- postproc visualization support
|
|
||||||
|
|
||||||
|
|
||||||
version 2.4:
|
version 2.4:
|
||||||
@@ -89,7 +82,7 @@ version 2.4:
|
|||||||
- ICY metadata are now requested by default with the HTTP protocol
|
- ICY metadata are now requested by default with the HTTP protocol
|
||||||
- support for using metadata in stream specifiers in fftools
|
- support for using metadata in stream specifiers in fftools
|
||||||
- LZMA compression support in TIFF decoder
|
- LZMA compression support in TIFF decoder
|
||||||
- H.261 RTP payload format (RFC 4587) depacketizer and experimental packetizer
|
- support for H.261 RTP payload format (RFC 4587)
|
||||||
- HEVC/H.265 RTP payload format (draft v6) depacketizer
|
- HEVC/H.265 RTP payload format (draft v6) depacketizer
|
||||||
- added codecview filter to visualize information exported by some codecs
|
- added codecview filter to visualize information exported by some codecs
|
||||||
- Matroska 3D support thorugh side data
|
- Matroska 3D support thorugh side data
|
||||||
|
109
LICENSE.md
109
LICENSE.md
@@ -1,73 +1,69 @@
|
|||||||
#FFmpeg:
|
#FFmpeg:
|
||||||
|
|
||||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||||
or later (LGPL v2.1+). Read the file `COPYING.LGPLv2.1` for details. Some other
|
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||||
files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to
|
files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to
|
||||||
FFmpeg.
|
FFmpeg.
|
||||||
|
|
||||||
Some optional parts of FFmpeg are licensed under the GNU General Public License
|
Some optional parts of FFmpeg are licensed under the GNU General Public License
|
||||||
version 2 or later (GPL v2+). See the file `COPYING.GPLv2` for details. None of
|
version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of
|
||||||
these parts are used by default, you have to explicitly pass `--enable-gpl` to
|
these parts are used by default, you have to explicitly pass --enable-gpl to
|
||||||
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||||
|
|
||||||
Specifically, the GPL parts of FFmpeg are:
|
Specifically, the GPL parts of FFmpeg are:
|
||||||
|
|
||||||
- libpostproc
|
- libpostproc
|
||||||
|
- libmpcodecs
|
||||||
- optional x86 optimizations in the files
|
- optional x86 optimizations in the files
|
||||||
- `libavcodec/x86/flac_dsp_gpl.asm`
|
libavcodec/x86/flac_dsp_gpl.asm
|
||||||
- `libavcodec/x86/idct_mmx.c`
|
libavcodec/x86/idct_mmx.c
|
||||||
- libutvideo encoding/decoding wrappers in
|
- libutvideo encoding/decoding wrappers in
|
||||||
`libavcodec/libutvideo*.cpp`
|
libavcodec/libutvideo*.cpp
|
||||||
- the X11 grabber in `libavdevice/x11grab.c`
|
- the X11 grabber in libavdevice/x11grab.c
|
||||||
- the swresample test app in
|
- the swresample test app in
|
||||||
`libswresample/swresample-test.c`
|
libswresample/swresample-test.c
|
||||||
- the `texi2pod.pl` tool
|
- the texi2pod.pl tool
|
||||||
- the following filters in libavfilter:
|
- the following filters in libavfilter:
|
||||||
- `f_ebur128.c`
|
- f_ebur128.c
|
||||||
- `vf_blackframe.c`
|
- vf_blackframe.c
|
||||||
- `vf_boxblur.c`
|
- vf_boxblur.c
|
||||||
- `vf_colormatrix.c`
|
- vf_colormatrix.c
|
||||||
- `vf_cropdetect.c`
|
- vf_cropdetect.c
|
||||||
- `vf_delogo.c`
|
- vf_decimate.c
|
||||||
- `vf_eq.c`
|
- vf_delogo.c
|
||||||
- `vf_fspp.c`
|
- vf_geq.c
|
||||||
- `vf_geq.c`
|
- vf_histeq.c
|
||||||
- `vf_histeq.c`
|
- vf_hqdn3d.c
|
||||||
- `vf_hqdn3d.c`
|
- vf_interlace.c
|
||||||
- `vf_interlace.c`
|
- vf_kerndeint.c
|
||||||
- `vf_kerndeint.c`
|
- vf_mcdeint.c
|
||||||
- `vf_mcdeint.c`
|
- vf_mp.c
|
||||||
- `vf_mpdecimate.c`
|
- vf_owdenoise.c
|
||||||
- `vf_owdenoise.c`
|
- vf_perspective.c
|
||||||
- `vf_perspective.c`
|
- vf_phase.c
|
||||||
- `vf_phase.c`
|
- vf_pp.c
|
||||||
- `vf_pp.c`
|
- vf_pullup.c
|
||||||
- `vf_pp7.c`
|
- vf_sab.c
|
||||||
- `vf_pullup.c`
|
- vf_smartblur.c
|
||||||
- `vf_sab.c`
|
- vf_spp.c
|
||||||
- `vf_smartblur.c`
|
- vf_stereo3d.c
|
||||||
- `vf_repeatfields.c`
|
- vf_super2xsai.c
|
||||||
- `vf_spp.c`
|
- vf_tinterlace.c
|
||||||
- `vf_stereo3d.c`
|
- vsrc_mptestsrc.c
|
||||||
- `vf_super2xsai.c`
|
|
||||||
- `vf_tinterlace.c`
|
|
||||||
- `vf_uspp.c`
|
|
||||||
- `vsrc_mptestsrc.c`
|
|
||||||
|
|
||||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||||
the configure parameter `--enable-version3` will activate this licensing option
|
the configure parameter --enable-version3 will activate this licensing option
|
||||||
for you. Read the file `COPYING.LGPLv3` or, if you have enabled GPL parts,
|
for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
|
||||||
`COPYING.GPLv3` to learn the exact legal terms that apply in this case.
|
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
|
||||||
|
|
||||||
There are a handful of files under other licensing terms, namely:
|
There are a handful of files under other licensing terms, namely:
|
||||||
|
|
||||||
* The files `libavcodec/jfdctfst.c`, `libavcodec/jfdctint_template.c` and
|
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
|
||||||
`libavcodec/jrevdct.c` are taken from libjpeg, see the top of the files for
|
libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
|
||||||
licensing details. Specifically note that you must credit the IJG in the
|
licensing details. Specifically note that you must credit the IJG in the
|
||||||
documentation accompanying your program if you only distribute executables.
|
documentation accompanying your program if you only distribute executables.
|
||||||
You must also indicate any changes including additions and deletions to
|
You must also indicate any changes including additions and deletions to
|
||||||
those three files in the documentation.
|
those three files in the documentation.
|
||||||
* `tests/reference.pnm` is under the expat license.
|
|
||||||
|
|
||||||
|
|
||||||
external libraries
|
external libraries
|
||||||
@@ -80,22 +76,21 @@ compatible libraries
|
|||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
The following libraries are under GPL:
|
The following libraries are under GPL:
|
||||||
- frei0r
|
- frei0r
|
||||||
- libcdio
|
- libcdio
|
||||||
- libutvideo
|
- libutvideo
|
||||||
- libvidstab
|
- libvidstab
|
||||||
- libx264
|
- libx264
|
||||||
- libx265
|
- libx265
|
||||||
- libxavs
|
- libxavs
|
||||||
- libxvid
|
- libxvid
|
||||||
|
|
||||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||||
passing `--enable-gpl` to configure.
|
passing --enable-gpl to configure.
|
||||||
|
|
||||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||||
license version needs to be upgraded by passing `--enable-version3` to configure.
|
license version needs to be upgraded by passing --enable-version3 to configure.
|
||||||
|
|
||||||
incompatible libraries
|
incompatible libraries
|
||||||
----------------------
|
----------------------
|
||||||
@@ -103,7 +98,7 @@ incompatible libraries
|
|||||||
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
|
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
|
||||||
are incompatible with the GPLv2 and v3. We do not know for certain if their
|
are incompatible with the GPLv2 and v3. We do not know for certain if their
|
||||||
licenses are compatible with the LGPL.
|
licenses are compatible with the LGPL.
|
||||||
If you wish to enable these libraries, pass `--enable-nonfree` to configure.
|
If you wish to enable these libraries, pass --enable-nonfree to configure.
|
||||||
But note that if you enable any of these libraries the resulting binary will
|
But note that if you enable any of these libraries the resulting binary will
|
||||||
be under a complex license mix that is more restrictive than the LGPL and that
|
be under a complex license mix that is more restrictive than the LGPL and that
|
||||||
may result in additional obligations. It is possible that these
|
may result in additional obligations. It is possible that these
|
||||||
|
16
MAINTAINERS
16
MAINTAINERS
@@ -156,7 +156,6 @@ Codecs:
|
|||||||
celp_filters.* Vitor Sessak
|
celp_filters.* Vitor Sessak
|
||||||
cinepak.c Roberto Togni
|
cinepak.c Roberto Togni
|
||||||
cinepakenc.c Rl / Aetey G.T. AB
|
cinepakenc.c Rl / Aetey G.T. AB
|
||||||
ccaption_dec.c Anshul Maheshwari
|
|
||||||
cljr Alex Beregszaszi
|
cljr Alex Beregszaszi
|
||||||
cllc.c Derek Buitenhuis
|
cllc.c Derek Buitenhuis
|
||||||
cook.c, cookdata.h Benjamin Larsson
|
cook.c, cookdata.h Benjamin Larsson
|
||||||
@@ -166,7 +165,6 @@ Codecs:
|
|||||||
dca.c Kostya Shishkov, Benjamin Larsson
|
dca.c Kostya Shishkov, Benjamin Larsson
|
||||||
dnxhd* Baptiste Coudurier
|
dnxhd* Baptiste Coudurier
|
||||||
dpcm.c Mike Melanson
|
dpcm.c Mike Melanson
|
||||||
dss_sp.c Oleksij Rempel, Michael Niedermayer
|
|
||||||
dv.c Roman Shaposhnik
|
dv.c Roman Shaposhnik
|
||||||
dvbsubdec.c Anshul Maheshwari
|
dvbsubdec.c Anshul Maheshwari
|
||||||
dxa.c Kostya Shishkov
|
dxa.c Kostya Shishkov
|
||||||
@@ -228,7 +226,6 @@ Codecs:
|
|||||||
msvideo1.c Mike Melanson
|
msvideo1.c Mike Melanson
|
||||||
nellymoserdec.c Benjamin Larsson
|
nellymoserdec.c Benjamin Larsson
|
||||||
nuv.c Reimar Doeffinger
|
nuv.c Reimar Doeffinger
|
||||||
nvenc.c Timo Rothenpieler
|
|
||||||
paf.* Paul B Mahol
|
paf.* Paul B Mahol
|
||||||
pcx.c Ivo van Poorten
|
pcx.c Ivo van Poorten
|
||||||
pgssubdec.c Reimar Doeffinger
|
pgssubdec.c Reimar Doeffinger
|
||||||
@@ -312,7 +309,6 @@ libavdevice
|
|||||||
|
|
||||||
|
|
||||||
avfoundation.m Thilo Borgmann
|
avfoundation.m Thilo Borgmann
|
||||||
decklink* Deti Fliegl
|
|
||||||
dshow.c Roger Pack (CC rogerdpack@gmail.com)
|
dshow.c Roger Pack (CC rogerdpack@gmail.com)
|
||||||
fbdev_enc.c Lukasz Marek
|
fbdev_enc.c Lukasz Marek
|
||||||
gdigrab.c Roger Pack (CC rogerdpack@gmail.com)
|
gdigrab.c Roger Pack (CC rogerdpack@gmail.com)
|
||||||
@@ -389,7 +385,6 @@ Muxers/Demuxers:
|
|||||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||||
ape.c Kostya Shishkov
|
ape.c Kostya Shishkov
|
||||||
apngdec.c Benoit Fouet
|
|
||||||
ass* Aurelien Jacobs
|
ass* Aurelien Jacobs
|
||||||
astdec.c Paul B Mahol
|
astdec.c Paul B Mahol
|
||||||
astenc.c James Almer
|
astenc.c James Almer
|
||||||
@@ -402,7 +397,6 @@ Muxers/Demuxers:
|
|||||||
cdxl.c Paul B Mahol
|
cdxl.c Paul B Mahol
|
||||||
crc.c Michael Niedermayer
|
crc.c Michael Niedermayer
|
||||||
daud.c Reimar Doeffinger
|
daud.c Reimar Doeffinger
|
||||||
dss.c Oleksij Rempel, Michael Niedermayer
|
|
||||||
dtshddec.c Paul B Mahol
|
dtshddec.c Paul B Mahol
|
||||||
dv.c Roman Shaposhnik
|
dv.c Roman Shaposhnik
|
||||||
dxa.c Kostya Shishkov
|
dxa.c Kostya Shishkov
|
||||||
@@ -467,13 +461,9 @@ Muxers/Demuxers:
|
|||||||
rmdec.c, rmenc.c Ronald S. Bultje, Kostya Shishkov
|
rmdec.c, rmenc.c Ronald S. Bultje, Kostya Shishkov
|
||||||
rtmp* Kostya Shishkov
|
rtmp* Kostya Shishkov
|
||||||
rtp.c, rtpenc.c Martin Storsjo
|
rtp.c, rtpenc.c Martin Storsjo
|
||||||
rtpdec_ac3.* Gilles Chanteperdrix
|
|
||||||
rtpdec_dv.* Thomas Volkert
|
|
||||||
rtpdec_h261.*, rtpenc_h261.* Thomas Volkert
|
rtpdec_h261.*, rtpenc_h261.* Thomas Volkert
|
||||||
rtpdec_hevc.*, rtpenc_hevc.* Thomas Volkert
|
rtpdec_hevc.* Thomas Volkert
|
||||||
rtpdec_mpa_robust.* Gilles Chanteperdrix
|
|
||||||
rtpdec_asf.* Ronald S. Bultje
|
rtpdec_asf.* Ronald S. Bultje
|
||||||
rtpdec_vp9.c Thomas Volkert
|
|
||||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||||
rtsp.c Luca Barbato
|
rtsp.c Luca Barbato
|
||||||
sbgdec.c Nicolas George
|
sbgdec.c Nicolas George
|
||||||
@@ -506,7 +496,6 @@ Protocols:
|
|||||||
libssh.c Lukasz Marek
|
libssh.c Lukasz Marek
|
||||||
mms*.c Ronald S. Bultje
|
mms*.c Ronald S. Bultje
|
||||||
udp.c Luca Abeni
|
udp.c Luca Abeni
|
||||||
icecast.c Marvin Scholz
|
|
||||||
|
|
||||||
|
|
||||||
libswresample
|
libswresample
|
||||||
@@ -545,10 +534,9 @@ x86 Michael Niedermayer
|
|||||||
Releases
|
Releases
|
||||||
========
|
========
|
||||||
|
|
||||||
2.6 Michael Niedermayer
|
|
||||||
2.5 Michael Niedermayer
|
|
||||||
2.4 Michael Niedermayer
|
2.4 Michael Niedermayer
|
||||||
2.2 Michael Niedermayer
|
2.2 Michael Niedermayer
|
||||||
|
1.2 Michael Niedermayer
|
||||||
|
|
||||||
If you want to maintain an older release, please contact us
|
If you want to maintain an older release, please contact us
|
||||||
|
|
||||||
|
5
Makefile
5
Makefile
@@ -32,7 +32,6 @@ OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
|
|||||||
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
|
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
|
||||||
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
|
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
|
||||||
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
|
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
|
||||||
OBJS-ffserver += ffserver_config.o
|
|
||||||
|
|
||||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
|
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
|
||||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||||
@@ -80,7 +79,7 @@ SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
|||||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||||
ALTIVEC-OBJS MMX-OBJS YASM-OBJS \
|
ALTIVEC-OBJS MMX-OBJS YASM-OBJS \
|
||||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS \
|
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||||
OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||||
|
|
||||||
define RESET
|
define RESET
|
||||||
@@ -112,7 +111,7 @@ endef
|
|||||||
|
|
||||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||||
|
|
||||||
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
||||||
|
|
||||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||||
$(CP) $< $@
|
$(CP) $< $@
|
||||||
|
@@ -19,10 +19,8 @@ such as audio, video, subtitles and related metadata.
|
|||||||
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
|
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
|
||||||
manipulate, convert and stream multimedia content.
|
manipulate, convert and stream multimedia content.
|
||||||
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
|
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
|
||||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analysis tool to inspect
|
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analisys tool to inspect
|
||||||
multimedia content.
|
multimedia content.
|
||||||
* [ffserver](http://ffmpeg.org/ffserver.html) is a multimedia streaming server
|
|
||||||
for live broadcasts.
|
|
||||||
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
|
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
@@ -34,7 +32,7 @@ and in the [wiki](http://trac.ffmpeg.org).
|
|||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
Coding examples are available in the **doc/examples** directory.
|
Conding examples are available in the **doc/example** directory.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
120
RELEASE_NOTES
120
RELEASE_NOTES
@@ -1,65 +1,83 @@
|
|||||||
|
┌────────────────────────────────────────┐
|
||||||
|
│ RELEASE NOTES for FFmpeg 2.4 "Fresnel" │
|
||||||
|
└────────────────────────────────────────┘
|
||||||
|
|
||||||
┌─────────────────────────────────────────────┐
|
The FFmpeg Project proudly presents FFmpeg 2.4 "Fresnel", just 2 months
|
||||||
│ RELEASE NOTES for FFmpeg 2.6 "Grothendieck" │
|
after the release of 2.3. Since this wasn't a long time ago, the Changelog
|
||||||
└─────────────────────────────────────────────┘
|
is a bit short this time.
|
||||||
|
|
||||||
The FFmpeg Project proudly presents FFmpeg 2.6 "Grothendieck", about 3
|
The most important thing in this release is the major version bump of the
|
||||||
months after the release of FFmpeg 2.5.
|
libraries. This means that this release is neither ABI-compatible nor
|
||||||
|
fully API-compatible. But on the other hand it is aligned with the Libav
|
||||||
|
11 release series, and will as a result probably end up being maintained for
|
||||||
|
a long time.
|
||||||
|
|
||||||
A lot of important work got in this time, so let's start talking about what
|
As usual, if you have any question on this release or any FFmpeg related
|
||||||
we like to brag the most about: features.
|
topic, feel free to join us on the #ffmpeg IRC channel (on
|
||||||
|
irc.freenode.net).
|
||||||
|
|
||||||
A lot of people will probably be happy to hear that we now have support for
|
┌────────────────────────────┐
|
||||||
NVENC — the Nvidia Video Encoder interface for H.264 encoding — thanks to
|
│ 🔨 API Information │
|
||||||
Timo Rothenpieler, with some little help from NVIDIA and Philip Langdale.
|
└────────────────────────────┘
|
||||||
|
|
||||||
People in the broadcasting industry might also be interested in the first
|
FFmpeg 2.4 includes the following library versions:
|
||||||
steps of closed captions support with the introduction of a decoder by
|
|
||||||
Anshul Maheswhwari.
|
|
||||||
|
|
||||||
Regarding filters love, we improved and added many. We could talk about the
|
• libavutil 54.7.100
|
||||||
10-bit support in spp, but maybe it's more important to mention the addition
|
• libavcodec 56.1.100
|
||||||
of colorlevels (yet another color handling filter), tblend (allowing you
|
• libavformat 56.4.101
|
||||||
to for example run a diff between successive frames of a video stream), or
|
• libavdevice 56.0.100
|
||||||
the dcshift audio filter.
|
• libavfilter 5.1.100
|
||||||
|
• libswscale 3.0.100
|
||||||
|
• libswresample 1.1.100
|
||||||
|
• libpostproc 53.0.100
|
||||||
|
|
||||||
There are also two other important filters landing in libavfilter: palettegen
|
Important API changes since 2.3:
|
||||||
and paletteuse. Both submitted by the Stupeflix company. These filters will
|
|
||||||
be very useful in case you are looking for creating high quality GIFs, a
|
|
||||||
format that still bravely fights annihilation in 2015.
|
|
||||||
|
|
||||||
There are many other new features, but let's follow-up on one big cleanup
|
• The new field mime_type was added to AVProbeData, which can
|
||||||
achievement: the libmpcodecs (MPlayer filters) wrapper is finally dead. The
|
cause crashes, if it is not initialized.
|
||||||
last remaining filters (softpulldown/repeatfields, eq*, and various
|
• Some deprecated functions were removed.
|
||||||
postprocessing filters) were ported by Arwa Arif (OPW student) and Paul B
|
• The avfilter_graph_parse function was made compatible with Libav.
|
||||||
Mahol.
|
• The Matroska demuxer now outputs verbatim ASS packets.
|
||||||
|
|
||||||
Concerning API changes, there are not many things to mention. Though, the
|
Please refer to the doc/APIchanges file for more information.
|
||||||
introduction of device inputs and outputs listing by Lukasz Marek is a
|
|
||||||
notable addition (try ffmpeg -sources or ffmpeg -sinks for an example of
|
|
||||||
the usage). As usual, see doc/APIchanges for more information.
|
|
||||||
|
|
||||||
Now let's talk about optimizations. Ronald S. Bultje made the VP9 decoder
|
┌────────────────────────────┐
|
||||||
usable on x86 32-bit systems and pre-ssse3 CPUs like Phenom (even dual core
|
│ ★ List of New Features │
|
||||||
Athlons can play 1080p 30fps VP9 content now), so we now secretly hope for
|
└────────────────────────────┘
|
||||||
Google and Mozilla to use ffvp9 instead of libvpx. But VP9 is not the
|
|
||||||
center of attention anymore, and HEVC/H.265 is also getting many
|
|
||||||
improvements, which include C and x86 ASM optimizations, mainly from James
|
|
||||||
Almer, Christophe Gisquet and Pierre-Edouard Lepere.
|
|
||||||
|
|
||||||
Even though we had many x86 contributions, it is not the only architecture
|
┌────────────────────────────┐
|
||||||
getting some love, with Seppo Tomperi adding ARM NEON optimizations to the
|
│ libavformat │
|
||||||
HEVC stack, and James Cowgill adding MIPS64 assembly for all kind of audio
|
└────────────────────────────┘
|
||||||
processing code in libavcodec.
|
|
||||||
|
|
||||||
And finally, Michael Niedermayer is still fixing many bugs, dealing with
|
• Icecast protocol.
|
||||||
most of the boring work such as making releases, applying tons of
|
• API for live metadata updates through event flags.
|
||||||
contributors patches, and daily merging the changes from the Libav project.
|
• UTF-16 support in text subtitles formats.
|
||||||
|
• The ASS muxer now reorders the Dialogue events properly.
|
||||||
|
• support for H.261 RTP payload format (RFC 4587)
|
||||||
|
• HEVC/H.265 RTP payload format (draft v6) depacketizer
|
||||||
|
|
||||||
A more complete Changelog is available at the root of the project, and the
|
┌────────────────────────────┐
|
||||||
complete Git history on http://source.ffmpeg.org.
|
│ libavfilter │
|
||||||
|
└────────────────────────────┘
|
||||||
|
|
||||||
We hope you will like this release as much as we enjoyed working on it, and
|
• Ported lenscorrection filter from frei0r filter.
|
||||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
• Large optimizations in dctdnoiz to make it usable.
|
||||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
• Added codecview filter to visualize information exported by some codecs.
|
||||||
on the mailing-lists.
|
• Added silenceremove filter.
|
||||||
|
|
||||||
|
┌────────────────────────────┐
|
||||||
|
│ libavutil │
|
||||||
|
└────────────────────────────┘
|
||||||
|
|
||||||
|
• Added clip() function in eval.
|
||||||
|
|
||||||
|
┌────────────────────────────┐
|
||||||
|
│ ⚠ Behaviour changes │
|
||||||
|
└────────────────────────────┘
|
||||||
|
|
||||||
|
• dctdnoiz filter now uses a block size of 8x8 instead of 16x16 by default
|
||||||
|
• -vismv option is deprecated in favor of the codecview filter
|
||||||
|
• libmodplug is now detected through pkg-config
|
||||||
|
• HTML documentation generation through texi2html is deprecated in
|
||||||
|
favor of makeinfo/texi2any
|
||||||
|
• ICY metadata are now requested by default with the HTTP protocol
|
||||||
|
1
arch.mak
1
arch.mak
@@ -5,6 +5,7 @@ OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
|||||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||||
|
|
||||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||||
|
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
|
||||||
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
|
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
|
||||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||||
|
|
||||||
|
233
cmdutils.c
233
cmdutils.c
@@ -290,14 +290,10 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
|||||||
if (po->flags & OPT_SPEC) {
|
if (po->flags & OPT_SPEC) {
|
||||||
SpecifierOpt **so = dst;
|
SpecifierOpt **so = dst;
|
||||||
char *p = strchr(opt, ':');
|
char *p = strchr(opt, ':');
|
||||||
char *str;
|
|
||||||
|
|
||||||
dstcount = (int *)(so + 1);
|
dstcount = (int *)(so + 1);
|
||||||
*so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1);
|
*so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1);
|
||||||
str = av_strdup(p ? p + 1 : "");
|
(*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : "");
|
||||||
if (!str)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
(*so)[*dstcount - 1].specifier = str;
|
|
||||||
dst = &(*so)[*dstcount - 1].u;
|
dst = &(*so)[*dstcount - 1].u;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -305,8 +301,6 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
|||||||
char *str;
|
char *str;
|
||||||
str = av_strdup(arg);
|
str = av_strdup(arg);
|
||||||
av_freep(dst);
|
av_freep(dst);
|
||||||
if (!str)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
*(char **)dst = str;
|
*(char **)dst = str;
|
||||||
} else if (po->flags & OPT_BOOL || po->flags & OPT_INT) {
|
} else if (po->flags & OPT_BOOL || po->flags & OPT_INT) {
|
||||||
*(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
|
*(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
|
||||||
@@ -450,7 +444,7 @@ int locate_option(int argc, char **argv, const OptionDef *options,
|
|||||||
(po->name && !strcmp(optname, po->name)))
|
(po->name && !strcmp(optname, po->name)))
|
||||||
return i;
|
return i;
|
||||||
|
|
||||||
if (!po->name || po->flags & HAS_ARG)
|
if (po->flags & HAS_ARG)
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@@ -965,10 +959,9 @@ static int init_report(const char *env)
|
|||||||
|
|
||||||
report_file = fopen(filename.str, "w");
|
report_file = fopen(filename.str, "w");
|
||||||
if (!report_file) {
|
if (!report_file) {
|
||||||
int ret = AVERROR(errno);
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Failed to open report \"%s\": %s\n",
|
av_log(NULL, AV_LOG_ERROR, "Failed to open report \"%s\": %s\n",
|
||||||
filename.str, strerror(errno));
|
filename.str, strerror(errno));
|
||||||
return ret;
|
return AVERROR(errno);
|
||||||
}
|
}
|
||||||
av_log_set_callback(log_callback_report);
|
av_log_set_callback(log_callback_report);
|
||||||
av_log(NULL, AV_LOG_INFO,
|
av_log(NULL, AV_LOG_INFO,
|
||||||
@@ -1081,7 +1074,8 @@ static void print_program_info(int flags, int level)
|
|||||||
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
|
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
|
||||||
program_birth_year, CONFIG_THIS_YEAR);
|
program_birth_year, CONFIG_THIS_YEAR);
|
||||||
av_log(NULL, level, "\n");
|
av_log(NULL, level, "\n");
|
||||||
av_log(NULL, level, "%sbuilt with %s\n", indent, CC_IDENT);
|
av_log(NULL, level, "%sbuilt on %s %s with %s\n",
|
||||||
|
indent, __DATE__, __TIME__, CC_IDENT);
|
||||||
|
|
||||||
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
|
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
|
||||||
}
|
}
|
||||||
@@ -1218,7 +1212,12 @@ static int is_device(const AVClass *avclass)
|
|||||||
{
|
{
|
||||||
if (!avclass)
|
if (!avclass)
|
||||||
return 0;
|
return 0;
|
||||||
return AV_IS_INPUT_DEVICE(avclass->category) || AV_IS_OUTPUT_DEVICE(avclass->category);
|
return avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT ||
|
||||||
|
avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT ||
|
||||||
|
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT ||
|
||||||
|
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT ||
|
||||||
|
avclass->category == AV_CLASS_CATEGORY_DEVICE_OUTPUT ||
|
||||||
|
avclass->category == AV_CLASS_CATEGORY_DEVICE_INPUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only)
|
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only)
|
||||||
@@ -1544,8 +1543,7 @@ int show_protocols(void *optctx, const char *opt, const char *arg)
|
|||||||
|
|
||||||
int show_filters(void *optctx, const char *opt, const char *arg)
|
int show_filters(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
#if CONFIG_AVFILTER
|
const AVFilter av_unused(*filter) = NULL;
|
||||||
const AVFilter *filter = NULL;
|
|
||||||
char descr[64], *descr_cur;
|
char descr[64], *descr_cur;
|
||||||
int i, j;
|
int i, j;
|
||||||
const AVFilterPad *pad;
|
const AVFilterPad *pad;
|
||||||
@@ -1558,6 +1556,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
|||||||
" V = Video input/output\n"
|
" V = Video input/output\n"
|
||||||
" N = Dynamic number and/or type of input/output\n"
|
" N = Dynamic number and/or type of input/output\n"
|
||||||
" | = Source or sink filter\n");
|
" | = Source or sink filter\n");
|
||||||
|
#if CONFIG_AVFILTER
|
||||||
while ((filter = avfilter_next(filter))) {
|
while ((filter = avfilter_next(filter))) {
|
||||||
descr_cur = descr;
|
descr_cur = descr;
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
@@ -1582,8 +1581,6 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
|||||||
filter->process_command ? 'C' : '.',
|
filter->process_command ? 'C' : '.',
|
||||||
filter->name, descr, filter->description);
|
filter->name, descr, filter->description);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
printf("No filters available: libavfilter disabled\n");
|
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1821,8 +1818,6 @@ int show_help(void *optctx, const char *opt, const char *arg)
|
|||||||
av_log_set_callback(log_callback_help);
|
av_log_set_callback(log_callback_help);
|
||||||
|
|
||||||
topic = av_strdup(arg ? arg : "");
|
topic = av_strdup(arg ? arg : "");
|
||||||
if (!topic)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
par = strchr(topic, '=');
|
par = strchr(topic, '=');
|
||||||
if (par)
|
if (par)
|
||||||
*par++ = 0;
|
*par++ = 0;
|
||||||
@@ -1862,48 +1857,35 @@ int read_yesno(void)
|
|||||||
|
|
||||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||||
{
|
{
|
||||||
int64_t ret;
|
int ret;
|
||||||
FILE *f = av_fopen_utf8(filename, "rb");
|
FILE *f = av_fopen_utf8(filename, "rb");
|
||||||
|
|
||||||
if (!f) {
|
if (!f) {
|
||||||
ret = AVERROR(errno);
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
|
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
|
||||||
strerror(errno));
|
strerror(errno));
|
||||||
return ret;
|
return AVERROR(errno);
|
||||||
}
|
}
|
||||||
|
fseek(f, 0, SEEK_END);
|
||||||
ret = fseek(f, 0, SEEK_END);
|
*size = ftell(f);
|
||||||
if (ret == -1) {
|
fseek(f, 0, SEEK_SET);
|
||||||
ret = AVERROR(errno);
|
if (*size == (size_t)-1) {
|
||||||
goto out;
|
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", strerror(errno));
|
||||||
|
fclose(f);
|
||||||
|
return AVERROR(errno);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ftell(f);
|
|
||||||
if (ret < 0) {
|
|
||||||
ret = AVERROR(errno);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
*size = ret;
|
|
||||||
|
|
||||||
ret = fseek(f, 0, SEEK_SET);
|
|
||||||
if (ret == -1) {
|
|
||||||
ret = AVERROR(errno);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
*bufptr = av_malloc(*size + 1);
|
*bufptr = av_malloc(*size + 1);
|
||||||
if (!*bufptr) {
|
if (!*bufptr) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
|
av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
|
||||||
ret = AVERROR(ENOMEM);
|
fclose(f);
|
||||||
goto out;
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
ret = fread(*bufptr, 1, *size, f);
|
ret = fread(*bufptr, 1, *size, f);
|
||||||
if (ret < *size) {
|
if (ret < *size) {
|
||||||
av_free(*bufptr);
|
av_free(*bufptr);
|
||||||
if (ferror(f)) {
|
if (ferror(f)) {
|
||||||
ret = AVERROR(errno);
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Error while reading file '%s': %s\n",
|
av_log(NULL, AV_LOG_ERROR, "Error while reading file '%s': %s\n",
|
||||||
filename, strerror(errno));
|
filename, strerror(errno));
|
||||||
|
ret = AVERROR(errno);
|
||||||
} else
|
} else
|
||||||
ret = AVERROR_EOF;
|
ret = AVERROR_EOF;
|
||||||
} else {
|
} else {
|
||||||
@@ -1911,9 +1893,6 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
|||||||
(*bufptr)[(*size)++] = '\0';
|
(*bufptr)[(*size)++] = '\0';
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
if (ret < 0)
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", av_err2str(ret));
|
|
||||||
fclose(f);
|
fclose(f);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -2013,7 +1992,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
|||||||
switch (check_stream_specifier(s, st, p + 1)) {
|
switch (check_stream_specifier(s, st, p + 1)) {
|
||||||
case 1: *p = 0; break;
|
case 1: *p = 0; break;
|
||||||
case 0: continue;
|
case 0: continue;
|
||||||
default: exit_program(1);
|
default: return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
|
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
|
||||||
@@ -2060,7 +2039,7 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
|||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
if (*size < new_size) {
|
if (*size < new_size) {
|
||||||
uint8_t *tmp = av_realloc_array(array, new_size, elem_size);
|
uint8_t *tmp = av_realloc(array, new_size*elem_size);
|
||||||
if (!tmp) {
|
if (!tmp) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
|
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
@@ -2071,161 +2050,3 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
|||||||
}
|
}
|
||||||
return array;
|
return array;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_AVDEVICE
|
|
||||||
static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
|
||||||
{
|
|
||||||
int ret, i;
|
|
||||||
AVDeviceInfoList *device_list = NULL;
|
|
||||||
|
|
||||||
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
|
|
||||||
return AVERROR(EINVAL);
|
|
||||||
|
|
||||||
printf("Audo-detected sources for %s:\n", fmt->name);
|
|
||||||
if (!fmt->get_device_list) {
|
|
||||||
ret = AVERROR(ENOSYS);
|
|
||||||
printf("Cannot list sources. Not implemented.\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((ret = avdevice_list_input_sources(fmt, NULL, opts, &device_list)) < 0) {
|
|
||||||
printf("Cannot list sources.\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < device_list->nb_devices; i++) {
|
|
||||||
printf("%s %s [%s]\n", device_list->default_device == i ? "*" : " ",
|
|
||||||
device_list->devices[i]->device_name, device_list->devices[i]->device_description);
|
|
||||||
}
|
|
||||||
|
|
||||||
fail:
|
|
||||||
avdevice_free_list_devices(&device_list);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
|
||||||
{
|
|
||||||
int ret, i;
|
|
||||||
AVDeviceInfoList *device_list = NULL;
|
|
||||||
|
|
||||||
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
|
|
||||||
return AVERROR(EINVAL);
|
|
||||||
|
|
||||||
printf("Audo-detected sinks for %s:\n", fmt->name);
|
|
||||||
if (!fmt->get_device_list) {
|
|
||||||
ret = AVERROR(ENOSYS);
|
|
||||||
printf("Cannot list sinks. Not implemented.\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((ret = avdevice_list_output_sinks(fmt, NULL, opts, &device_list)) < 0) {
|
|
||||||
printf("Cannot list sinks.\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < device_list->nb_devices; i++) {
|
|
||||||
printf("%s %s [%s]\n", device_list->default_device == i ? "*" : " ",
|
|
||||||
device_list->devices[i]->device_name, device_list->devices[i]->device_description);
|
|
||||||
}
|
|
||||||
|
|
||||||
fail:
|
|
||||||
avdevice_free_list_devices(&device_list);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int show_sinks_sources_parse_arg(const char *arg, char **dev, AVDictionary **opts)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
if (arg) {
|
|
||||||
char *opts_str = NULL;
|
|
||||||
av_assert0(dev && opts);
|
|
||||||
*dev = av_strdup(arg);
|
|
||||||
if (!*dev)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
if ((opts_str = strchr(*dev, ','))) {
|
|
||||||
*(opts_str++) = '\0';
|
|
||||||
if (opts_str[0] && ((ret = av_dict_parse_string(opts, opts_str, "=", ":", 0)) < 0)) {
|
|
||||||
av_freep(dev);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
printf("\nDevice name is not provided.\n"
|
|
||||||
"You can pass devicename[,opt1=val1[,opt2=val2...]] as an argument.\n\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int show_sources(void *optctx, const char *opt, const char *arg)
|
|
||||||
{
|
|
||||||
AVInputFormat *fmt = NULL;
|
|
||||||
char *dev = NULL;
|
|
||||||
AVDictionary *opts = NULL;
|
|
||||||
int ret = 0;
|
|
||||||
int error_level = av_log_get_level();
|
|
||||||
|
|
||||||
av_log_set_level(AV_LOG_ERROR);
|
|
||||||
|
|
||||||
if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
do {
|
|
||||||
fmt = av_input_audio_device_next(fmt);
|
|
||||||
if (fmt) {
|
|
||||||
if (!strcmp(fmt->name, "lavfi"))
|
|
||||||
continue; //it's pointless to probe lavfi
|
|
||||||
if (dev && !av_match_name(dev, fmt->name))
|
|
||||||
continue;
|
|
||||||
print_device_sources(fmt, opts);
|
|
||||||
}
|
|
||||||
} while (fmt);
|
|
||||||
do {
|
|
||||||
fmt = av_input_video_device_next(fmt);
|
|
||||||
if (fmt) {
|
|
||||||
if (dev && !av_match_name(dev, fmt->name))
|
|
||||||
continue;
|
|
||||||
print_device_sources(fmt, opts);
|
|
||||||
}
|
|
||||||
} while (fmt);
|
|
||||||
fail:
|
|
||||||
av_dict_free(&opts);
|
|
||||||
av_free(dev);
|
|
||||||
av_log_set_level(error_level);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int show_sinks(void *optctx, const char *opt, const char *arg)
|
|
||||||
{
|
|
||||||
AVOutputFormat *fmt = NULL;
|
|
||||||
char *dev = NULL;
|
|
||||||
AVDictionary *opts = NULL;
|
|
||||||
int ret = 0;
|
|
||||||
int error_level = av_log_get_level();
|
|
||||||
|
|
||||||
av_log_set_level(AV_LOG_ERROR);
|
|
||||||
|
|
||||||
if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
do {
|
|
||||||
fmt = av_output_audio_device_next(fmt);
|
|
||||||
if (fmt) {
|
|
||||||
if (dev && !av_match_name(dev, fmt->name))
|
|
||||||
continue;
|
|
||||||
print_device_sinks(fmt, opts);
|
|
||||||
}
|
|
||||||
} while (fmt);
|
|
||||||
do {
|
|
||||||
fmt = av_output_video_device_next(fmt);
|
|
||||||
if (fmt) {
|
|
||||||
if (dev && !av_match_name(dev, fmt->name))
|
|
||||||
continue;
|
|
||||||
print_device_sinks(fmt, opts);
|
|
||||||
}
|
|
||||||
} while (fmt);
|
|
||||||
fail:
|
|
||||||
av_dict_free(&opts);
|
|
||||||
av_free(dev);
|
|
||||||
av_log_set_level(error_level);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
14
cmdutils.h
14
cmdutils.h
@@ -443,20 +443,6 @@ int show_formats(void *optctx, const char *opt, const char *arg);
|
|||||||
*/
|
*/
|
||||||
int show_devices(void *optctx, const char *opt, const char *arg);
|
int show_devices(void *optctx, const char *opt, const char *arg);
|
||||||
|
|
||||||
#if CONFIG_AVDEVICE
|
|
||||||
/**
|
|
||||||
* Print a listing containing audodetected sinks of the output device.
|
|
||||||
* Device name with options may be passed as an argument to limit results.
|
|
||||||
*/
|
|
||||||
int show_sinks(void *optctx, const char *opt, const char *arg);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Print a listing containing audodetected sources of the input device.
|
|
||||||
* Device name with options may be passed as an argument to limit results.
|
|
||||||
*/
|
|
||||||
int show_sources(void *optctx, const char *opt, const char *arg);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Print a listing containing all the codecs supported by the
|
* Print a listing containing all the codecs supported by the
|
||||||
* program.
|
* program.
|
||||||
|
@@ -27,9 +27,3 @@
|
|||||||
{ "opencl_bench", OPT_EXIT, {.func_arg = opt_opencl_bench}, "run benchmark on all OpenCL devices and show results" },
|
{ "opencl_bench", OPT_EXIT, {.func_arg = opt_opencl_bench}, "run benchmark on all OpenCL devices and show results" },
|
||||||
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, "set OpenCL environment options" },
|
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, "set OpenCL environment options" },
|
||||||
#endif
|
#endif
|
||||||
#if CONFIG_AVDEVICE
|
|
||||||
{ "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
|
|
||||||
"list sources of the input device", "device" },
|
|
||||||
{ "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
|
|
||||||
"list sinks of the output device", "device" },
|
|
||||||
#endif
|
|
||||||
|
14
common.mak
14
common.mak
@@ -5,14 +5,6 @@
|
|||||||
# first so "all" becomes default target
|
# first so "all" becomes default target
|
||||||
all: all-yes
|
all: all-yes
|
||||||
|
|
||||||
DEFAULT_YASMD=.dbg
|
|
||||||
|
|
||||||
ifeq (1, DBG)
|
|
||||||
YASMD=$(DEFAULT_YASMD)
|
|
||||||
else
|
|
||||||
YASMD=
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef SUBDIR
|
ifndef SUBDIR
|
||||||
|
|
||||||
ifndef V
|
ifndef V
|
||||||
@@ -146,17 +138,17 @@ $(TOOLOBJS): | tools
|
|||||||
|
|
||||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||||
|
|
||||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda *$(DEFAULT_YASMD).asm
|
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
|
||||||
DISTCLEANSUFFIXES = *.pc
|
DISTCLEANSUFFIXES = *.pc
|
||||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||||
|
|
||||||
define RULES
|
define RULES
|
||||||
clean::
|
clean::
|
||||||
$(RM) $(OBJS) $(OBJS:.o=.d) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
$(RM) $(OBJS) $(OBJS:.o=.d)
|
||||||
$(RM) $(HOSTPROGS)
|
$(RM) $(HOSTPROGS)
|
||||||
$(RM) $(TOOLS)
|
$(RM) $(TOOLS)
|
||||||
endef
|
endef
|
||||||
|
|
||||||
$(eval $(RULES))
|
$(eval $(RULES))
|
||||||
|
|
||||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d))
|
||||||
|
@@ -513,21 +513,21 @@ AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
|||||||
// only use these functions on am AVS_Value that does not already have
|
// only use these functions on am AVS_Value that does not already have
|
||||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||||
{ AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
{ AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||||
{ AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; }
|
{ AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; }
|
||||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||||
{ AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; }
|
{ AVS_Value v; v.type = 's'; v.d.string = v0; return v; }
|
||||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||||
{ AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
{ AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||||
{ AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; }
|
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||||
#ifndef AVSC_NO_DECLSPEC
|
#ifndef AVSC_NO_DECLSPEC
|
||||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||||
{ AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; }
|
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||||
#endif
|
#endif
|
||||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||||
{ AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
//
|
//
|
||||||
|
@@ -52,8 +52,8 @@ namespace avxsynth {
|
|||||||
//
|
//
|
||||||
// Functions
|
// Functions
|
||||||
//
|
//
|
||||||
#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
#define MAKEDWORD(a,b,c,d) ((a << 24) | (b << 16) | (c << 8) | (d))
|
||||||
#define MAKEWORD(a,b) (((a) << 8) | (b))
|
#define MAKEWORD(a,b) ((a << 8) | (b))
|
||||||
|
|
||||||
#define lstrlen strlen
|
#define lstrlen strlen
|
||||||
#define lstrcpy strcpy
|
#define lstrcpy strcpy
|
||||||
|
@@ -55,17 +55,35 @@ typedef struct pthread_t {
|
|||||||
* not mutexes */
|
* not mutexes */
|
||||||
typedef CRITICAL_SECTION pthread_mutex_t;
|
typedef CRITICAL_SECTION pthread_mutex_t;
|
||||||
|
|
||||||
/* This is the CONDITION_VARIABLE typedef for using Windows' native
|
/* This is the CONDITIONAL_VARIABLE typedef for using Window's native
|
||||||
* conditional variables on kernels 6.0+. */
|
* conditional variables on kernels 6.0+.
|
||||||
#if HAVE_CONDITION_VARIABLE_PTR
|
* MinGW does not currently have this typedef. */
|
||||||
typedef CONDITION_VARIABLE pthread_cond_t;
|
|
||||||
#else
|
|
||||||
typedef struct pthread_cond_t {
|
typedef struct pthread_cond_t {
|
||||||
void *Ptr;
|
void *ptr;
|
||||||
} pthread_cond_t;
|
} pthread_cond_t;
|
||||||
#endif
|
|
||||||
|
|
||||||
#if _WIN32_WINNT >= 0x0600
|
/* function pointers to conditional variable API on windows 6.0+ kernels */
|
||||||
|
#if _WIN32_WINNT < 0x0600
|
||||||
|
static void (WINAPI *cond_broadcast)(pthread_cond_t *cond);
|
||||||
|
static void (WINAPI *cond_init)(pthread_cond_t *cond);
|
||||||
|
static void (WINAPI *cond_signal)(pthread_cond_t *cond);
|
||||||
|
static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||||
|
DWORD milliseconds);
|
||||||
|
#else
|
||||||
|
#define cond_init InitializeConditionVariable
|
||||||
|
#define cond_broadcast WakeAllConditionVariable
|
||||||
|
#define cond_signal WakeConditionVariable
|
||||||
|
#define cond_wait SleepConditionVariableCS
|
||||||
|
|
||||||
|
#define CreateEvent(a, reset, init, name) \
|
||||||
|
CreateEventEx(a, name, \
|
||||||
|
(reset ? CREATE_EVENT_MANUAL_RESET : 0) | \
|
||||||
|
(init ? CREATE_EVENT_INITIAL_SET : 0), \
|
||||||
|
EVENT_ALL_ACCESS)
|
||||||
|
// CreateSemaphoreExA seems to be desktop-only, but as long as we don't
|
||||||
|
// use named semaphores, it doesn't matter if we use the W version.
|
||||||
|
#define CreateSemaphore(a, b, c, d) \
|
||||||
|
CreateSemaphoreExW(a, b, c, d, 0, SEMAPHORE_ALL_ACCESS)
|
||||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
||||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
||||||
#endif
|
#endif
|
||||||
@@ -118,36 +136,6 @@ static inline int pthread_mutex_unlock(pthread_mutex_t *m)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if _WIN32_WINNT >= 0x0600
|
|
||||||
static inline int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
|
||||||
{
|
|
||||||
InitializeConditionVariable(cond);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* native condition variables do not destroy */
|
|
||||||
static inline void pthread_cond_destroy(pthread_cond_t *cond)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pthread_cond_broadcast(pthread_cond_t *cond)
|
|
||||||
{
|
|
||||||
WakeAllConditionVariable(cond);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
|
||||||
{
|
|
||||||
SleepConditionVariableCS(cond, mutex, INFINITE);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void pthread_cond_signal(pthread_cond_t *cond)
|
|
||||||
{
|
|
||||||
WakeConditionVariable(cond);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else // _WIN32_WINNT < 0x0600
|
|
||||||
/* for pre-Windows 6.0 platforms we need to define and use our own condition
|
/* for pre-Windows 6.0 platforms we need to define and use our own condition
|
||||||
* variable and api */
|
* variable and api */
|
||||||
typedef struct win32_cond_t {
|
typedef struct win32_cond_t {
|
||||||
@@ -159,13 +147,6 @@ typedef struct win32_cond_t {
|
|||||||
volatile int is_broadcast;
|
volatile int is_broadcast;
|
||||||
} win32_cond_t;
|
} win32_cond_t;
|
||||||
|
|
||||||
/* function pointers to conditional variable API on windows 6.0+ kernels */
|
|
||||||
static void (WINAPI *cond_broadcast)(pthread_cond_t *cond);
|
|
||||||
static void (WINAPI *cond_init)(pthread_cond_t *cond);
|
|
||||||
static void (WINAPI *cond_signal)(pthread_cond_t *cond);
|
|
||||||
static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
|
||||||
DWORD milliseconds);
|
|
||||||
|
|
||||||
static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||||
{
|
{
|
||||||
win32_cond_t *win32_cond = NULL;
|
win32_cond_t *win32_cond = NULL;
|
||||||
@@ -178,7 +159,7 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
|
|||||||
win32_cond = av_mallocz(sizeof(win32_cond_t));
|
win32_cond = av_mallocz(sizeof(win32_cond_t));
|
||||||
if (!win32_cond)
|
if (!win32_cond)
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
cond->Ptr = win32_cond;
|
cond->ptr = win32_cond;
|
||||||
win32_cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL);
|
win32_cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL);
|
||||||
if (!win32_cond->semaphore)
|
if (!win32_cond->semaphore)
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
@@ -193,7 +174,7 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
|
|||||||
|
|
||||||
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
|
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
|
||||||
{
|
{
|
||||||
win32_cond_t *win32_cond = cond->Ptr;
|
win32_cond_t *win32_cond = cond->ptr;
|
||||||
/* native condition variables do not destroy */
|
/* native condition variables do not destroy */
|
||||||
if (cond_init)
|
if (cond_init)
|
||||||
return;
|
return;
|
||||||
@@ -204,12 +185,12 @@ static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
|
|||||||
pthread_mutex_destroy(&win32_cond->mtx_waiter_count);
|
pthread_mutex_destroy(&win32_cond->mtx_waiter_count);
|
||||||
pthread_mutex_destroy(&win32_cond->mtx_broadcast);
|
pthread_mutex_destroy(&win32_cond->mtx_broadcast);
|
||||||
av_freep(&win32_cond);
|
av_freep(&win32_cond);
|
||||||
cond->Ptr = NULL;
|
cond->ptr = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
|
static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||||
{
|
{
|
||||||
win32_cond_t *win32_cond = cond->Ptr;
|
win32_cond_t *win32_cond = cond->ptr;
|
||||||
int have_waiter;
|
int have_waiter;
|
||||||
|
|
||||||
if (cond_broadcast) {
|
if (cond_broadcast) {
|
||||||
@@ -240,7 +221,7 @@ static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
|
|||||||
|
|
||||||
static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
win32_cond_t *win32_cond = cond->Ptr;
|
win32_cond_t *win32_cond = cond->ptr;
|
||||||
int last_waiter;
|
int last_waiter;
|
||||||
if (cond_wait) {
|
if (cond_wait) {
|
||||||
cond_wait(cond, mutex, INFINITE);
|
cond_wait(cond, mutex, INFINITE);
|
||||||
@@ -272,7 +253,7 @@ static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mu
|
|||||||
|
|
||||||
static av_unused void pthread_cond_signal(pthread_cond_t *cond)
|
static av_unused void pthread_cond_signal(pthread_cond_t *cond)
|
||||||
{
|
{
|
||||||
win32_cond_t *win32_cond = cond->Ptr;
|
win32_cond_t *win32_cond = cond->ptr;
|
||||||
int have_waiter;
|
int have_waiter;
|
||||||
if (cond_signal) {
|
if (cond_signal) {
|
||||||
cond_signal(cond);
|
cond_signal(cond);
|
||||||
@@ -294,7 +275,6 @@ static av_unused void pthread_cond_signal(pthread_cond_t *cond)
|
|||||||
|
|
||||||
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static av_unused void w32thread_init(void)
|
static av_unused void w32thread_init(void)
|
||||||
{
|
{
|
||||||
|
128
doc/APIchanges
128
doc/APIchanges
@@ -15,110 +15,15 @@ libavutil: 2014-08-09
|
|||||||
|
|
||||||
API changes, most recent first:
|
API changes, most recent first:
|
||||||
|
|
||||||
-------- 8< --------- FFmpeg 2.6 was cut here -------- 8< ---------
|
|
||||||
|
|
||||||
2015-03-04 - cca4476 - lavf 56.25.100
|
|
||||||
Add avformat_flush()
|
|
||||||
|
|
||||||
2015-03-03 - 81a9126 - lavf 56.24.100
|
|
||||||
Add avio_put_str16be()
|
|
||||||
|
|
||||||
2015-02-19 - 560eb71 / 31d2039 - lavc 56.23.100 / 56.13.0
|
|
||||||
Add width, height, coded_width, coded_height and format to
|
|
||||||
AVCodecParserContext.
|
|
||||||
|
|
||||||
2015-02-19 - e375511 / 5b1d9ce - lavu 54.19.100 / 54.9.0
|
|
||||||
Add AV_PIX_FMT_QSV for QSV hardware acceleration.
|
|
||||||
|
|
||||||
2015-02-14 - ba22295 - lavc 56.21.102
|
|
||||||
Deprecate VIMA decoder.
|
|
||||||
|
|
||||||
2015-01-27 - 62a82c6 / 728685f - lavc 56.21.100 / 56.12.0, lavu 54.18.100 / 54.8.0 - avcodec.h, frame.h
|
|
||||||
Add AV_PKT_DATA_AUDIO_SERVICE_TYPE and AV_FRAME_DATA_AUDIO_SERVICE_TYPE for
|
|
||||||
storing the audio service type as side data.
|
|
||||||
|
|
||||||
2015-01-16 - a47c933 - lavf 56.19.100 - avformat.h
|
|
||||||
Add data_codec and data_codec_id for storing codec of data stream
|
|
||||||
|
|
||||||
2015-01-11 - 007c33d - lavd 56.4.100 - avdevice.h
|
|
||||||
Add avdevice_list_input_sources().
|
|
||||||
Add avdevice_list_output_sinks().
|
|
||||||
|
|
||||||
2014-12-25 - d7aaeea / c220a60 - lavc 56.19.100 / 56.10.0 - vdpau.h
|
|
||||||
Add av_vdpau_get_surface_parameters().
|
|
||||||
|
|
||||||
2014-12-25 - ddb9a24 / 6c99c92 - lavc 56.18.100 / 56.9.0 - avcodec.h
|
|
||||||
Add AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH flag to av_vdpau_bind_context().
|
|
||||||
|
|
||||||
2014-12-25 - d16079a / 57b6704 - lavc 56.17.100 / 56.8.0 - avcodec.h
|
|
||||||
Add AVCodecContext.sw_pix_fmt.
|
|
||||||
|
|
||||||
2014-12-04 - 6e9ac02 - lavc 56.14.100 - dv_profile.h
|
|
||||||
Add av_dv_codec_profile2().
|
|
||||||
|
|
||||||
-------- 8< --------- FFmpeg 2.5 was cut here -------- 8< ---------
|
|
||||||
|
|
||||||
2014-11-21 - ab922f9 - lavu 54.15.100 - dict.h
|
|
||||||
Add av_dict_get_string().
|
|
||||||
|
|
||||||
2014-11-18 - a54a51c - lavu 54.14.100 - float_dsp.h
|
|
||||||
Add avpriv_float_dsp_alloc().
|
|
||||||
|
|
||||||
2014-11-16 - 6690d4c3 - lavf 56.13.100 - avformat.h
|
|
||||||
Add AVStream.recommended_encoder_configuration with accessors.
|
|
||||||
|
|
||||||
2014-11-16 - bee5844d - lavu 54.13.100 - opt.h
|
|
||||||
Add av_opt_serialize().
|
|
||||||
|
|
||||||
2014-11-16 - eec69332 - lavu 54.12.100 - opt.h
|
|
||||||
Add av_opt_is_set_to_default().
|
|
||||||
|
|
||||||
2014-11-06 - 44fa267 / 5e80fb7 - lavc 56.11.100 / 56.6.0 - vorbis_parser.h
|
|
||||||
Add a public API for parsing vorbis packets.
|
|
||||||
|
|
||||||
2014-10-15 - 17085a0 / 7ea1b34 - lavc 56.7.100 / 56.5.0 - avcodec.h
|
|
||||||
Replace AVCodecContext.time_base used for decoding
|
|
||||||
with AVCodecContext.framerate.
|
|
||||||
|
|
||||||
2014-10-15 - 51c810e / d565fef1 - lavc 56.6.100 / 56.4.0 - avcodec.h
|
|
||||||
Add AV_HWACCEL_FLAG_IGNORE_LEVEL flag to av_vdpau_bind_context().
|
|
||||||
|
|
||||||
2014-10-13 - da21895 / 2df0c32e - lavc 56.5.100 / 56.3.0 - avcodec.h
|
|
||||||
Add AVCodecContext.initial_padding. Deprecate the use of AVCodecContext.delay
|
|
||||||
for audio encoding.
|
|
||||||
|
|
||||||
2014-10-08 - bb44f7d / 5a419b2 - lavu 54.10.100 / 54.4.0 - pixdesc.h
|
|
||||||
Add API to return the name of frame and context color properties.
|
|
||||||
|
|
||||||
2014-10-06 - a61899a / e3e158e - lavc 56.3.100 / 56.2.0 - vdpau.h
|
|
||||||
Add av_vdpau_bind_context(). This function should now be used for creating
|
|
||||||
(or resetting) a AVVDPAUContext instead of av_vdpau_alloc_context().
|
|
||||||
|
|
||||||
2014-10-02 - cdd6f05 - lavc 56.2.100 - avcodec.h
|
|
||||||
2014-10-02 - cdd6f05 - lavu 54.9.100 - frame.h
|
|
||||||
Add AV_FRAME_DATA_SKIP_SAMPLES. Add lavc CODEC_FLAG2_SKIP_MANUAL and
|
|
||||||
AVOption "skip_manual", which makes lavc export skip information via
|
|
||||||
AV_FRAME_DATA_SKIP_SAMPLES AVFrame side data, instead of skipping and
|
|
||||||
discarding samples automatically.
|
|
||||||
|
|
||||||
2014-10-02 - 0d92b0d - lavu 54.8.100 - avstring.h
|
|
||||||
Add av_match_list()
|
|
||||||
|
|
||||||
2014-09-24 - ac68295 - libpostproc 53.1.100
|
|
||||||
Add visualization support
|
|
||||||
|
|
||||||
2014-09-19 - 6edd6a4 - lavc 56.1.101 - dv_profile.h
|
|
||||||
deprecate avpriv_dv_frame_profile2(), which was made public by accident.
|
|
||||||
|
|
||||||
|
|
||||||
-------- 8< --------- FFmpeg 2.4 was cut here -------- 8< ---------
|
-------- 8< --------- FFmpeg 2.4 was cut here -------- 8< ---------
|
||||||
|
|
||||||
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
|
|
||||||
Add AVFormatContext.max_ts_probe.
|
|
||||||
|
|
||||||
2014-08-28 - f30a815 / 9301486 - lavc 56.1.100 / 56.1.0 - avcodec.h
|
2014-08-28 - f30a815 / 9301486 - lavc 56.1.100 / 56.1.0 - avcodec.h
|
||||||
Add AV_PKT_DATA_STEREO3D to export container-level stereo3d information.
|
Add AV_PKT_DATA_STEREO3D to export container-level stereo3d information.
|
||||||
|
|
||||||
|
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
|
||||||
|
Add AVFormatContext.max_ts_probe.
|
||||||
|
|
||||||
2014-08-23 - 8fc9bd0 - lavu 54.7.100 - dict.h
|
2014-08-23 - 8fc9bd0 - lavu 54.7.100 - dict.h
|
||||||
AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL arguments are now
|
AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL arguments are now
|
||||||
freed even on error. This is consistent with the behaviour all users
|
freed even on error. This is consistent with the behaviour all users
|
||||||
@@ -243,7 +148,7 @@ API changes, most recent first:
|
|||||||
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
||||||
it
|
it
|
||||||
|
|
||||||
2014-06-10 - 5482780 - lavf 55.43.100 - avformat.h
|
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
||||||
New field int64_t max_analyze_duration2 instead of deprecated
|
New field int64_t max_analyze_duration2 instead of deprecated
|
||||||
int max_analyze_duration.
|
int max_analyze_duration.
|
||||||
|
|
||||||
@@ -267,7 +172,7 @@ API changes, most recent first:
|
|||||||
Add strict_std_compliance and related AVOptions to support experimental
|
Add strict_std_compliance and related AVOptions to support experimental
|
||||||
muxing.
|
muxing.
|
||||||
|
|
||||||
2014-05-26 - 55cc60c - lavu 52.87.100 - threadmessage.h
|
2014-05-26 - xxxxxxx - lavu 52.87.100 - threadmessage.h
|
||||||
Add thread message queue API.
|
Add thread message queue API.
|
||||||
|
|
||||||
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
||||||
@@ -277,7 +182,7 @@ API changes, most recent first:
|
|||||||
Add av_stream_get_side_data() to access stream-level side data
|
Add av_stream_get_side_data() to access stream-level side data
|
||||||
in the same way as av_packet_get_side_data().
|
in the same way as av_packet_get_side_data().
|
||||||
|
|
||||||
2014-05-20 - 7336e39 - lavu 52.86.100 - fifo.h
|
2014-05-xx - xxxxxxx - lavu 52.86.100 - fifo.h
|
||||||
Add av_fifo_alloc_array() function.
|
Add av_fifo_alloc_array() function.
|
||||||
|
|
||||||
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
||||||
@@ -296,7 +201,7 @@ API changes, most recent first:
|
|||||||
Add avcodec_free_context(). From now on it should be used for freeing
|
Add avcodec_free_context(). From now on it should be used for freeing
|
||||||
AVCodecContext.
|
AVCodecContext.
|
||||||
|
|
||||||
2014-05-17 - 0eec06e / 1bd0bdc - lavu 52.84.100 / 54.5.0 - time.h
|
2014-05-17 - 0eec06e - lavu 52.84.100 - time.h
|
||||||
Add av_gettime_relative() av_gettime_relative_is_monotonic()
|
Add av_gettime_relative() av_gettime_relative_is_monotonic()
|
||||||
|
|
||||||
2014-05-15 - eacf7d6 / 0c1959b - lavf 55.38.100 / 55.17.0 - avformat.h
|
2014-05-15 - eacf7d6 / 0c1959b - lavf 55.38.100 / 55.17.0 - avformat.h
|
||||||
@@ -309,10 +214,10 @@ API changes, most recent first:
|
|||||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
||||||
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
||||||
|
|
||||||
2014-05-07 - 351f611 - lavu 52.82.100 - fifo.h
|
2014-05-xx - xxxxxxx - lavu 52.82.0 - fifo.h
|
||||||
Add av_fifo_freep() function.
|
Add av_fifo_freep() function.
|
||||||
|
|
||||||
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
2014-05-02 - ba52fb11 - lavu 52.81.0 - opt.h
|
||||||
Add av_opt_set_dict2() function.
|
Add av_opt_set_dict2() function.
|
||||||
|
|
||||||
2014-05-01 - e77b985 / a2941c8 - lavc 55.60.103 / 55.50.3 - avcodec.h
|
2014-05-01 - e77b985 / a2941c8 - lavc 55.60.103 / 55.50.3 - avcodec.h
|
||||||
@@ -331,14 +236,10 @@ API changes, most recent first:
|
|||||||
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
||||||
reference-counted frames to encoders.
|
reference-counted frames to encoders.
|
||||||
|
|
||||||
2014-04-30 - 617e866 - lavu 52.81.100 - pixdesc.h
|
|
||||||
Add av_find_best_pix_fmt_of_2(), av_get_pix_fmt_loss()
|
|
||||||
Deprecate avcodec_get_pix_fmt_loss(), avcodec_find_best_pix_fmt_of_2()
|
|
||||||
|
|
||||||
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
||||||
Add AVCodecDescriptor.mime_types field.
|
Add AVCodecDescriptor.mime_types field.
|
||||||
|
|
||||||
2014-04-29 - b804eb4 - lavu 52.80.100 - hash.h
|
2014-04-29 - xxxxxxx - lavu 52.80.0 - hash.h
|
||||||
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
||||||
|
|
||||||
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
||||||
@@ -350,7 +251,7 @@ API changes, most recent first:
|
|||||||
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
||||||
Add AV_CRC_16_ANSI_LE crc variant.
|
Add AV_CRC_16_ANSI_LE crc variant.
|
||||||
|
|
||||||
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
|
2014-04-XX - xxxxxxx - lavf xx.xx.1xx - avformat.h
|
||||||
Add av_format_inject_global_side_data()
|
Add av_format_inject_global_side_data()
|
||||||
|
|
||||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||||
@@ -430,7 +331,7 @@ API changes, most recent first:
|
|||||||
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
||||||
Deprecate unused AV_OPT_FLAG_METADATA.
|
Deprecate unused AV_OPT_FLAG_METADATA.
|
||||||
|
|
||||||
2014-02-16 - 81c3f81 - lavd 55.10.100 - avdevice.h
|
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
|
||||||
Add avdevice_list_devices() and avdevice_free_list_devices()
|
Add avdevice_list_devices() and avdevice_free_list_devices()
|
||||||
|
|
||||||
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
|
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
|
||||||
@@ -471,7 +372,7 @@ API changes, most recent first:
|
|||||||
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
||||||
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
||||||
|
|
||||||
2014-01-19 - 3532dd5 - lavu 52.63.100 - rational.h
|
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
|
||||||
Add av_make_q() function.
|
Add av_make_q() function.
|
||||||
|
|
||||||
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
|
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
|
||||||
@@ -961,9 +862,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
|||||||
avresample_read() are now uint8_t** instead of void**.
|
avresample_read() are now uint8_t** instead of void**.
|
||||||
Libavresample is now stable.
|
Libavresample is now stable.
|
||||||
|
|
||||||
2012-09-26 - 3ba0dab7 / 1384df64 - lavf 54.29.101 / 56.06.3 - avformat.h
|
|
||||||
Add AVFormatContext.avoid_negative_ts.
|
|
||||||
|
|
||||||
2012-09-24 - 46a3595 / a42aada - lavc 54.59.100 / 54.28.0 - avcodec.h
|
2012-09-24 - 46a3595 / a42aada - lavc 54.59.100 / 54.28.0 - avcodec.h
|
||||||
Add avcodec_free_frame(). This function must now
|
Add avcodec_free_frame(). This function must now
|
||||||
be used for freeing an AVFrame.
|
be used for freeing an AVFrame.
|
||||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER = 2.6.1
|
PROJECT_NUMBER = 2.4.3
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@@ -46,7 +46,6 @@ DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
|||||||
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||||
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||||
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||||
DOC_EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
|
||||||
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||||
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||||
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||||
@@ -115,9 +114,9 @@ doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
|||||||
|
|
||||||
doc/%.1 doc/%.3: TAG = MAN
|
doc/%.1 doc/%.3: TAG = MAN
|
||||||
doc/%.1: doc/%.pod $(GENTEXI)
|
doc/%.1: doc/%.pod $(GENTEXI)
|
||||||
$(M)pod2man --section=1 --center=" " --release=" " --date=" " $< > $@
|
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||||
doc/%.3: doc/%.pod $(GENTEXI)
|
doc/%.3: doc/%.pod $(GENTEXI)
|
||||||
$(M)pod2man --section=3 --center=" " --release=" " --date=" " $< > $@
|
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
|
||||||
|
|
||||||
$(DOCS) doc/doxy/html: | doc/
|
$(DOCS) doc/doxy/html: | doc/
|
||||||
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
|
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
|
||||||
|
@@ -13,16 +13,7 @@ bitstream filter using the option @code{--disable-bsf=BSF}.
|
|||||||
The option @code{-bsfs} of the ff* tools will display the list of
|
The option @code{-bsfs} of the ff* tools will display the list of
|
||||||
all the supported bitstream filters included in your build.
|
all the supported bitstream filters included in your build.
|
||||||
|
|
||||||
The ff* tools have a -bsf option applied per stream, taking a
|
Below is a description of the currently available bitstream filters.
|
||||||
comma-separated list of filters, whose parameters follow the filter
|
|
||||||
name after a '='.
|
|
||||||
|
|
||||||
@example
|
|
||||||
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
|
|
||||||
@end example
|
|
||||||
|
|
||||||
Below is a description of the currently available bitstream filters,
|
|
||||||
with their parameters, if any.
|
|
||||||
|
|
||||||
@section aac_adtstoasc
|
@section aac_adtstoasc
|
||||||
|
|
||||||
@@ -144,16 +135,9 @@ ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
|||||||
Damages the contents of packets without damaging the container. Can be
|
Damages the contents of packets without damaging the container. Can be
|
||||||
used for fuzzing or testing error resilience/concealment.
|
used for fuzzing or testing error resilience/concealment.
|
||||||
|
|
||||||
Parameters:
|
|
||||||
A numeral string, whose value is related to how often output bytes will
|
|
||||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
|
||||||
the lower the more frequent bytes will be modified, with 1 meaning
|
|
||||||
every byte is modified.
|
|
||||||
|
|
||||||
@example
|
@example
|
||||||
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
ffmpeg -i INPUT -c copy -bsf noise output.mkv
|
||||||
@end example
|
@end example
|
||||||
applies the modification to every byte.
|
|
||||||
|
|
||||||
@section remove_extra
|
@section remove_extra
|
||||||
|
|
||||||
|
@@ -7,11 +7,6 @@ V
|
|||||||
Disable the default terse mode, the full command issued by make and its
|
Disable the default terse mode, the full command issued by make and its
|
||||||
output will be shown on the screen.
|
output will be shown on the screen.
|
||||||
|
|
||||||
DBG
|
|
||||||
Preprocess x86 external assembler files to a .dbg.asm file in the object
|
|
||||||
directory, which then gets compiled. Helps developping those assembler
|
|
||||||
files.
|
|
||||||
|
|
||||||
DESTDIR
|
DESTDIR
|
||||||
Destination directory for the install targets, useful to prepare packages
|
Destination directory for the install targets, useful to prepare packages
|
||||||
or install FFmpeg in cross-environments.
|
or install FFmpeg in cross-environments.
|
||||||
|
@@ -7,7 +7,7 @@ all the encoders and decoders. In addition each codec may support
|
|||||||
so-called private options, which are specific for a given codec.
|
so-called private options, which are specific for a given codec.
|
||||||
|
|
||||||
Sometimes, a global option may only affect a specific kind of codec,
|
Sometimes, a global option may only affect a specific kind of codec,
|
||||||
and may be nonsensical or ignored by another, so you need to be aware
|
and may be unsensical or ignored by another, so you need to be aware
|
||||||
of the meaning of the specified options. Also some options are
|
of the meaning of the specified options. Also some options are
|
||||||
meant only for decoding or encoding.
|
meant only for decoding or encoding.
|
||||||
|
|
||||||
@@ -71,9 +71,7 @@ Force low delay.
|
|||||||
@item global_header
|
@item global_header
|
||||||
Place global headers in extradata instead of every keyframe.
|
Place global headers in extradata instead of every keyframe.
|
||||||
@item bitexact
|
@item bitexact
|
||||||
Only write platform-, build- and time-independent data. (except (I)DCT).
|
Use only bitexact stuff (except (I)DCT).
|
||||||
This ensures that file and data checksums are reproducible and match between
|
|
||||||
platforms. Its primary use is for regression testing.
|
|
||||||
@item aic
|
@item aic
|
||||||
Apply H263 advanced intra coding / mpeg4 ac prediction.
|
Apply H263 advanced intra coding / mpeg4 ac prediction.
|
||||||
@item cbp
|
@item cbp
|
||||||
@@ -495,8 +493,6 @@ visualize block types
|
|||||||
picture buffer allocations
|
picture buffer allocations
|
||||||
@item thread_ops
|
@item thread_ops
|
||||||
threading operations
|
threading operations
|
||||||
@item nomc
|
|
||||||
skip motion compensation
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item vismv @var{integer} (@emph{decoding,video})
|
@item vismv @var{integer} (@emph{decoding,video})
|
||||||
@@ -865,14 +861,6 @@ Possible values:
|
|||||||
|
|
||||||
@item mpeg2_aac_he
|
@item mpeg2_aac_he
|
||||||
|
|
||||||
@item mpeg4_sp
|
|
||||||
|
|
||||||
@item mpeg4_core
|
|
||||||
|
|
||||||
@item mpeg4_main
|
|
||||||
|
|
||||||
@item mpeg4_asp
|
|
||||||
|
|
||||||
@item dts
|
@item dts
|
||||||
|
|
||||||
@item dts_es
|
@item dts_es
|
||||||
@@ -1126,19 +1114,6 @@ Interlaced video, bottom coded first, top displayed first
|
|||||||
Set to 1 to disable processing alpha (transparency). This works like the
|
Set to 1 to disable processing alpha (transparency). This works like the
|
||||||
@samp{gray} flag in the @option{flags} option which skips chroma information
|
@samp{gray} flag in the @option{flags} option which skips chroma information
|
||||||
instead of alpha. Default is 0.
|
instead of alpha. Default is 0.
|
||||||
|
|
||||||
@item codec_whitelist @var{list} (@emph{input})
|
|
||||||
"," separated List of allowed decoders. By default all are allowed.
|
|
||||||
|
|
||||||
@item dump_separator @var{string} (@emph{input})
|
|
||||||
Separator used to separate the fields printed on the command line about the
|
|
||||||
Stream parameters.
|
|
||||||
For example to separate the fields with newlines and indention:
|
|
||||||
@example
|
|
||||||
ffprobe -dump_separator "
|
|
||||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@c man end CODEC OPTIONS
|
@c man end CODEC OPTIONS
|
||||||
|
@@ -190,15 +190,6 @@ The format for this option is a string containing 16 24-bits hexadecimal
|
|||||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||||
|
|
||||||
@item ifo_palette
|
|
||||||
Specify the IFO file from which the global palette is obtained.
|
|
||||||
(experimental)
|
|
||||||
|
|
||||||
@item forced_subs_only
|
|
||||||
Only decode subtitle entries marked as forced. Some titles have forced
|
|
||||||
and non-forced subtitles in the same track. Setting this flag to @code{1}
|
|
||||||
will only keep the forced subtitles. Default value is @code{0}.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section libzvbi-teletext
|
@section libzvbi-teletext
|
||||||
|
@@ -29,26 +29,6 @@ the caller can decide which variant streams to actually receive.
|
|||||||
The total bitrate of the variant that the stream belongs to is
|
The total bitrate of the variant that the stream belongs to is
|
||||||
available in a metadata key named "variant_bitrate".
|
available in a metadata key named "variant_bitrate".
|
||||||
|
|
||||||
@section apng
|
|
||||||
|
|
||||||
Animated Portable Network Graphics demuxer.
|
|
||||||
|
|
||||||
This demuxer is used to demux APNG files.
|
|
||||||
All headers, but the PNG signature, up to (but not including) the first
|
|
||||||
fcTL chunk are transmitted as extradata.
|
|
||||||
Frames are then split as being all the chunks between two fcTL ones, or
|
|
||||||
between the last fcTL and IEND chunks.
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
@item -ignore_loop @var{bool}
|
|
||||||
Ignore the loop variable in the file if set.
|
|
||||||
@item -max_fps @var{int}
|
|
||||||
Maximum framerate in frames per second (0 for no limit).
|
|
||||||
@item -default_fps @var{int}
|
|
||||||
Default framerate in frames per second when none is specified in the file
|
|
||||||
(0 meaning as fast as possible).
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@section asf
|
@section asf
|
||||||
|
|
||||||
Advanced Systems Format demuxer.
|
Advanced Systems Format demuxer.
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Developer Documentation
|
@settitle Developer Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
@@ -648,12 +647,12 @@ accordingly].
|
|||||||
@subsection Adding files to the fate-suite dataset
|
@subsection Adding files to the fate-suite dataset
|
||||||
|
|
||||||
When there is no muxer or encoder available to generate test media for a
|
When there is no muxer or encoder available to generate test media for a
|
||||||
specific test then the media has to be included in the fate-suite.
|
specific test then the media has to be inlcuded in the fate-suite.
|
||||||
First please make sure that the sample file is as small as possible to test the
|
First please make sure that the sample file is as small as possible to test the
|
||||||
respective decoder or demuxer sufficiently. Large files increase network
|
respective decoder or demuxer sufficiently. Large files increase network
|
||||||
bandwidth and disk space requirements.
|
bandwidth and disk space requirements.
|
||||||
Once you have a working fate test and fate sample, provide in the commit
|
Once you have a working fate test and fate sample, provide in the commit
|
||||||
message or introductory message for the patch series that you post to
|
message or introductionary message for the patch series that you post to
|
||||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||||
|
|
||||||
|
|
||||||
|
@@ -6,16 +6,8 @@ DOXYGEN="${3}"
|
|||||||
|
|
||||||
shift 3
|
shift 3
|
||||||
|
|
||||||
if [ -e "$SRC_PATH/VERSION" ]; then
|
|
||||||
VERSION=`cat "$SRC_PATH/VERSION"`
|
|
||||||
else
|
|
||||||
VERSION=`cd "$SRC_PATH"; git describe`
|
|
||||||
fi
|
|
||||||
|
|
||||||
$DOXYGEN - <<EOF
|
$DOXYGEN - <<EOF
|
||||||
@INCLUDE = ${DOXYFILE}
|
@INCLUDE = ${DOXYFILE}
|
||||||
INPUT = $@
|
INPUT = $@
|
||||||
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
|
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
|
||||||
HTML_TIMESTAMP = NO
|
|
||||||
PROJECT_NUMBER = $VERSION
|
|
||||||
EOF
|
EOF
|
||||||
|
@@ -1745,10 +1745,6 @@ Enable calculation and printing SSIM stats after the encoding.
|
|||||||
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
||||||
to 1.
|
to 1.
|
||||||
|
|
||||||
@item avcintra-class (@emph{class})
|
|
||||||
Configure the encoder to generate AVC-Intra.
|
|
||||||
Valid values are 50,100 and 200
|
|
||||||
|
|
||||||
@item bluray-compat (@emph{bluray-compat})
|
@item bluray-compat (@emph{bluray-compat})
|
||||||
Configure the encoder to be compatible with the bluray standard.
|
Configure the encoder to be compatible with the bluray standard.
|
||||||
It is a shorthand for setting "bluray-compat=1 force-cfr=1".
|
It is a shorthand for setting "bluray-compat=1 force-cfr=1".
|
||||||
@@ -1886,34 +1882,6 @@ no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
|
|||||||
Encoding ffpresets for common usages are provided so they can be used with the
|
Encoding ffpresets for common usages are provided so they can be used with the
|
||||||
general presets system (e.g. passing the @option{pre} option).
|
general presets system (e.g. passing the @option{pre} option).
|
||||||
|
|
||||||
@section libx265
|
|
||||||
|
|
||||||
x265 H.265/HEVC encoder wrapper.
|
|
||||||
|
|
||||||
This encoder requires the presence of the libx265 headers and library
|
|
||||||
during configuration. You need to explicitly configure the build with
|
|
||||||
@option{--enable-libx265}.
|
|
||||||
|
|
||||||
@subsection Options
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
@item preset
|
|
||||||
Set the x265 preset.
|
|
||||||
|
|
||||||
@item tune
|
|
||||||
Set the x265 tune parameter.
|
|
||||||
|
|
||||||
@item x265-params
|
|
||||||
Set x265 options using a list of @var{key}=@var{value} couples separated
|
|
||||||
by ":". See @command{x265 --help} for a list of options.
|
|
||||||
|
|
||||||
For example to specify libx265 encoding options with @option{-x265-params}:
|
|
||||||
|
|
||||||
@example
|
|
||||||
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
|
|
||||||
@end example
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@section libxvid
|
@section libxvid
|
||||||
|
|
||||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||||
@@ -2171,7 +2139,7 @@ Use @var{0} to disable alpha plane coding.
|
|||||||
@subsection Speed considerations
|
@subsection Speed considerations
|
||||||
|
|
||||||
In the default mode of operation the encoder has to honor frame constraints
|
In the default mode of operation the encoder has to honor frame constraints
|
||||||
(i.e. not produce frames with size bigger than requested) while still making
|
(i.e. not produc frames with size bigger than requested) while still making
|
||||||
output picture as good as possible.
|
output picture as good as possible.
|
||||||
A frame containing a lot of small details is harder to compress and the encoder
|
A frame containing a lot of small details is harder to compress and the encoder
|
||||||
would spend more time searching for appropriate quantizers for each slice.
|
would spend more time searching for appropriate quantizers for each slice.
|
||||||
|
@@ -29,7 +29,6 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
|
|||||||
|
|
||||||
# the following examples make explicit use of the math library
|
# the following examples make explicit use of the math library
|
||||||
avcodec: LDLIBS += -lm
|
avcodec: LDLIBS += -lm
|
||||||
decoding_encoding: LDLIBS += -lm
|
|
||||||
muxing: LDLIBS += -lm
|
muxing: LDLIBS += -lm
|
||||||
resampling_audio: LDLIBS += -lm
|
resampling_audio: LDLIBS += -lm
|
||||||
|
|
||||||
|
@@ -288,7 +288,6 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
|||||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||||
|
|
||||||
while (avpkt.size > 0) {
|
while (avpkt.size > 0) {
|
||||||
int i, ch;
|
|
||||||
int got_frame = 0;
|
int got_frame = 0;
|
||||||
|
|
||||||
if (!decoded_frame) {
|
if (!decoded_frame) {
|
||||||
@@ -305,15 +304,15 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
|||||||
}
|
}
|
||||||
if (got_frame) {
|
if (got_frame) {
|
||||||
/* if a frame has been decoded, output it */
|
/* if a frame has been decoded, output it */
|
||||||
int data_size = av_get_bytes_per_sample(c->sample_fmt);
|
int data_size = av_samples_get_buffer_size(NULL, c->channels,
|
||||||
|
decoded_frame->nb_samples,
|
||||||
|
c->sample_fmt, 1);
|
||||||
if (data_size < 0) {
|
if (data_size < 0) {
|
||||||
/* This should not occur, checking just for paranoia */
|
/* This should not occur, checking just for paranoia */
|
||||||
fprintf(stderr, "Failed to calculate data size\n");
|
fprintf(stderr, "Failed to calculate data size\n");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
for (i=0; i<decoded_frame->nb_samples; i++)
|
fwrite(decoded_frame->data[0], 1, data_size, outfile);
|
||||||
for (ch=0; ch<c->channels; ch++)
|
|
||||||
fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
|
|
||||||
}
|
}
|
||||||
avpkt.size -= len;
|
avpkt.size -= len;
|
||||||
avpkt.data += len;
|
avpkt.data += len;
|
||||||
@@ -651,7 +650,7 @@ int main(int argc, char **argv)
|
|||||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||||
} else if (!strcmp(output_type, "mp2")) {
|
} else if (!strcmp(output_type, "mp2")) {
|
||||||
audio_encode_example("test.mp2");
|
audio_encode_example("test.mp2");
|
||||||
audio_decode_example("test.pcm", "test.mp2");
|
audio_decode_example("test.sw", "test.mp2");
|
||||||
} else if (!strcmp(output_type, "mpg")) {
|
} else if (!strcmp(output_type, "mpg")) {
|
||||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||||
video_decode_example("test%02d.pgm", "test.mpg");
|
video_decode_example("test%02d.pgm", "test.mpg");
|
||||||
|
@@ -36,8 +36,6 @@
|
|||||||
|
|
||||||
static AVFormatContext *fmt_ctx = NULL;
|
static AVFormatContext *fmt_ctx = NULL;
|
||||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||||
static int width, height;
|
|
||||||
static enum AVPixelFormat pix_fmt;
|
|
||||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||||
static const char *src_filename = NULL;
|
static const char *src_filename = NULL;
|
||||||
static const char *video_dst_filename = NULL;
|
static const char *video_dst_filename = NULL;
|
||||||
@@ -81,20 +79,6 @@ static int decode_packet(int *got_frame, int cached)
|
|||||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (video_dec_ctx->width != width || video_dec_ctx->height != height ||
|
|
||||||
video_dec_ctx->pix_fmt != pix_fmt) {
|
|
||||||
/* To handle this change, one could call av_image_alloc again and
|
|
||||||
* decode the following frames into another rawvideo file. */
|
|
||||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
|
||||||
"constant in a rawvideo file, but the width, height or "
|
|
||||||
"pixel format of the input video changed:\n"
|
|
||||||
"old: width = %d, height = %d, format = %s\n"
|
|
||||||
"new: width = %d, height = %d, format = %s\n",
|
|
||||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
|
||||||
video_dec_ctx->width, video_dec_ctx->height,
|
|
||||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*got_frame) {
|
if (*got_frame) {
|
||||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||||
@@ -106,7 +90,7 @@ static int decode_packet(int *got_frame, int cached)
|
|||||||
* this is required since rawvideo expects non aligned data */
|
* this is required since rawvideo expects non aligned data */
|
||||||
av_image_copy(video_dst_data, video_dst_linesize,
|
av_image_copy(video_dst_data, video_dst_linesize,
|
||||||
(const uint8_t **)(frame->data), frame->linesize,
|
(const uint8_t **)(frame->data), frame->linesize,
|
||||||
pix_fmt, width, height);
|
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||||
|
|
||||||
/* write to rawvideo file */
|
/* write to rawvideo file */
|
||||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||||
@@ -154,7 +138,7 @@ static int decode_packet(int *got_frame, int cached)
|
|||||||
static int open_codec_context(int *stream_idx,
|
static int open_codec_context(int *stream_idx,
|
||||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||||
{
|
{
|
||||||
int ret, stream_index;
|
int ret;
|
||||||
AVStream *st;
|
AVStream *st;
|
||||||
AVCodecContext *dec_ctx = NULL;
|
AVCodecContext *dec_ctx = NULL;
|
||||||
AVCodec *dec = NULL;
|
AVCodec *dec = NULL;
|
||||||
@@ -166,8 +150,8 @@ static int open_codec_context(int *stream_idx,
|
|||||||
av_get_media_type_string(type), src_filename);
|
av_get_media_type_string(type), src_filename);
|
||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
stream_index = ret;
|
*stream_idx = ret;
|
||||||
st = fmt_ctx->streams[stream_index];
|
st = fmt_ctx->streams[*stream_idx];
|
||||||
|
|
||||||
/* find decoder for the stream */
|
/* find decoder for the stream */
|
||||||
dec_ctx = st->codec;
|
dec_ctx = st->codec;
|
||||||
@@ -186,7 +170,6 @@ static int open_codec_context(int *stream_idx,
|
|||||||
av_get_media_type_string(type));
|
av_get_media_type_string(type));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
*stream_idx = stream_index;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -281,11 +264,9 @@ int main (int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* allocate image where the decoded image will be put */
|
/* allocate image where the decoded image will be put */
|
||||||
width = video_dec_ctx->width;
|
|
||||||
height = video_dec_ctx->height;
|
|
||||||
pix_fmt = video_dec_ctx->pix_fmt;
|
|
||||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||||
width, height, pix_fmt, 1);
|
video_dec_ctx->width, video_dec_ctx->height,
|
||||||
|
video_dec_ctx->pix_fmt, 1);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||||
goto end;
|
goto end;
|
||||||
@@ -360,7 +341,7 @@ int main (int argc, char **argv)
|
|||||||
if (video_stream) {
|
if (video_stream) {
|
||||||
printf("Play the output video file with the command:\n"
|
printf("Play the output video file with the command:\n"
|
||||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||||
av_get_pix_fmt_name(pix_fmt), width, height,
|
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||||
video_dst_filename);
|
video_dst_filename);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -145,28 +145,12 @@ static int init_filters(const char *filters_descr)
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Endpoints for the filter graph. */
|
||||||
* Set the endpoints for the filter graph. The filter_graph will
|
|
||||||
* be linked to the graph described by filters_descr.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The buffer source output must be connected to the input pad of
|
|
||||||
* the first filter described by filters_descr; since the first
|
|
||||||
* filter input label is not specified, it is set to "in" by
|
|
||||||
* default.
|
|
||||||
*/
|
|
||||||
outputs->name = av_strdup("in");
|
outputs->name = av_strdup("in");
|
||||||
outputs->filter_ctx = buffersrc_ctx;
|
outputs->filter_ctx = buffersrc_ctx;
|
||||||
outputs->pad_idx = 0;
|
outputs->pad_idx = 0;
|
||||||
outputs->next = NULL;
|
outputs->next = NULL;
|
||||||
|
|
||||||
/*
|
|
||||||
* The buffer sink input must be connected to the output pad of
|
|
||||||
* the last filter described by filters_descr; since the last
|
|
||||||
* filter output label is not specified, it is set to "out" by
|
|
||||||
* default.
|
|
||||||
*/
|
|
||||||
inputs->name = av_strdup("out");
|
inputs->name = av_strdup("out");
|
||||||
inputs->filter_ctx = buffersink_ctx;
|
inputs->filter_ctx = buffersink_ctx;
|
||||||
inputs->pad_idx = 0;
|
inputs->pad_idx = 0;
|
||||||
|
@@ -90,7 +90,6 @@ static int init_filters(const char *filters_descr)
|
|||||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
|
||||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||||
|
|
||||||
filter_graph = avfilter_graph_alloc();
|
filter_graph = avfilter_graph_alloc();
|
||||||
@@ -103,7 +102,7 @@ static int init_filters(const char *filters_descr)
|
|||||||
snprintf(args, sizeof(args),
|
snprintf(args, sizeof(args),
|
||||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||||
time_base.num, time_base.den,
|
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||||
|
|
||||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||||
@@ -128,28 +127,12 @@ static int init_filters(const char *filters_descr)
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Endpoints for the filter graph. */
|
||||||
* Set the endpoints for the filter graph. The filter_graph will
|
|
||||||
* be linked to the graph described by filters_descr.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The buffer source output must be connected to the input pad of
|
|
||||||
* the first filter described by filters_descr; since the first
|
|
||||||
* filter input label is not specified, it is set to "in" by
|
|
||||||
* default.
|
|
||||||
*/
|
|
||||||
outputs->name = av_strdup("in");
|
outputs->name = av_strdup("in");
|
||||||
outputs->filter_ctx = buffersrc_ctx;
|
outputs->filter_ctx = buffersrc_ctx;
|
||||||
outputs->pad_idx = 0;
|
outputs->pad_idx = 0;
|
||||||
outputs->next = NULL;
|
outputs->next = NULL;
|
||||||
|
|
||||||
/*
|
|
||||||
* The buffer sink input must be connected to the output pad of
|
|
||||||
* the last filter described by filters_descr; since the last
|
|
||||||
* filter output label is not specified, it is set to "out" by
|
|
||||||
* default.
|
|
||||||
*/
|
|
||||||
inputs->name = av_strdup("out");
|
inputs->name = av_strdup("out");
|
||||||
inputs->filter_ctx = buffersink_ctx;
|
inputs->filter_ctx = buffersink_ctx;
|
||||||
inputs->pad_idx = 0;
|
inputs->pad_idx = 0;
|
||||||
|
@@ -661,7 +661,7 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
if (!(fmt->flags & AVFMT_NOFILE))
|
if (!(fmt->flags & AVFMT_NOFILE))
|
||||||
/* Close the output file. */
|
/* Close the output file. */
|
||||||
avio_closep(&oc->pb);
|
avio_close(oc->pb);
|
||||||
|
|
||||||
/* free the stream */
|
/* free the stream */
|
||||||
avformat_free_context(oc);
|
avformat_free_context(oc);
|
||||||
|
@@ -1,484 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2015 Anton Khirnov
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
|
||||||
* in the Software without restriction, including without limitation the rights
|
|
||||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
* copies of the Software, and to permit persons to whom the Software is
|
|
||||||
* furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
* THE SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @file
|
|
||||||
* Intel QSV-accelerated H.264 decoding example.
|
|
||||||
*
|
|
||||||
* @example qsvdec.c
|
|
||||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
|
||||||
* frames in the VA-API video surfaces.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "config.h"
|
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#include <mfx/mfxvideo.h>
|
|
||||||
|
|
||||||
#include <va/va.h>
|
|
||||||
#include <va/va_x11.h>
|
|
||||||
#include <X11/Xlib.h>
|
|
||||||
|
|
||||||
#include "libavformat/avformat.h"
|
|
||||||
#include "libavformat/avio.h"
|
|
||||||
|
|
||||||
#include "libavcodec/avcodec.h"
|
|
||||||
#include "libavcodec/qsv.h"
|
|
||||||
|
|
||||||
#include "libavutil/error.h"
|
|
||||||
#include "libavutil/mem.h"
|
|
||||||
|
|
||||||
typedef struct DecodeContext {
|
|
||||||
mfxSession mfx_session;
|
|
||||||
VADisplay va_dpy;
|
|
||||||
|
|
||||||
VASurfaceID *surfaces;
|
|
||||||
mfxMemId *surface_ids;
|
|
||||||
int *surface_used;
|
|
||||||
int nb_surfaces;
|
|
||||||
|
|
||||||
mfxFrameInfo frame_info;
|
|
||||||
} DecodeContext;
|
|
||||||
|
|
||||||
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
|
|
||||||
mfxFrameAllocResponse *resp)
|
|
||||||
{
|
|
||||||
DecodeContext *decode = pthis;
|
|
||||||
int err, i;
|
|
||||||
|
|
||||||
if (decode->surfaces) {
|
|
||||||
fprintf(stderr, "Multiple allocation requests.\n");
|
|
||||||
return MFX_ERR_MEMORY_ALLOC;
|
|
||||||
}
|
|
||||||
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)) {
|
|
||||||
fprintf(stderr, "Unsupported surface type: %d\n", req->Type);
|
|
||||||
return MFX_ERR_UNSUPPORTED;
|
|
||||||
}
|
|
||||||
if (req->Info.BitDepthLuma != 8 || req->Info.BitDepthChroma != 8 ||
|
|
||||||
req->Info.Shift || req->Info.FourCC != MFX_FOURCC_NV12 ||
|
|
||||||
req->Info.ChromaFormat != MFX_CHROMAFORMAT_YUV420) {
|
|
||||||
fprintf(stderr, "Unsupported surface properties.\n");
|
|
||||||
return MFX_ERR_UNSUPPORTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
decode->surfaces = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surfaces));
|
|
||||||
decode->surface_ids = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surface_ids));
|
|
||||||
decode->surface_used = av_mallocz_array(req->NumFrameSuggested, sizeof(*decode->surface_used));
|
|
||||||
if (!decode->surfaces || !decode->surface_ids || !decode->surface_used)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
err = vaCreateSurfaces(decode->va_dpy, VA_RT_FORMAT_YUV420,
|
|
||||||
req->Info.Width, req->Info.Height,
|
|
||||||
decode->surfaces, req->NumFrameSuggested,
|
|
||||||
NULL, 0);
|
|
||||||
if (err != VA_STATUS_SUCCESS) {
|
|
||||||
fprintf(stderr, "Error allocating VA surfaces\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
decode->nb_surfaces = req->NumFrameSuggested;
|
|
||||||
|
|
||||||
for (i = 0; i < decode->nb_surfaces; i++)
|
|
||||||
decode->surface_ids[i] = &decode->surfaces[i];
|
|
||||||
|
|
||||||
resp->mids = decode->surface_ids;
|
|
||||||
resp->NumFrameActual = decode->nb_surfaces;
|
|
||||||
|
|
||||||
decode->frame_info = req->Info;
|
|
||||||
|
|
||||||
return MFX_ERR_NONE;
|
|
||||||
fail:
|
|
||||||
av_freep(&decode->surfaces);
|
|
||||||
av_freep(&decode->surface_ids);
|
|
||||||
av_freep(&decode->surface_used);
|
|
||||||
|
|
||||||
return MFX_ERR_MEMORY_ALLOC;
|
|
||||||
}
|
|
||||||
|
|
||||||
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
|
|
||||||
{
|
|
||||||
DecodeContext *decode = pthis;
|
|
||||||
|
|
||||||
if (decode->surfaces)
|
|
||||||
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
|
|
||||||
av_freep(&decode->surfaces);
|
|
||||||
av_freep(&decode->surface_ids);
|
|
||||||
av_freep(&decode->surface_used);
|
|
||||||
decode->nb_surfaces = 0;
|
|
||||||
|
|
||||||
return MFX_ERR_NONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
|
||||||
{
|
|
||||||
return MFX_ERR_UNSUPPORTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
|
||||||
{
|
|
||||||
return MFX_ERR_UNSUPPORTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
|
|
||||||
{
|
|
||||||
*hdl = mid;
|
|
||||||
return MFX_ERR_NONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void free_buffer(void *opaque, uint8_t *data)
|
|
||||||
{
|
|
||||||
int *used = opaque;
|
|
||||||
*used = 0;
|
|
||||||
av_freep(&data);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
|
||||||
{
|
|
||||||
DecodeContext *decode = avctx->opaque;
|
|
||||||
|
|
||||||
mfxFrameSurface1 *surf;
|
|
||||||
AVBufferRef *surf_buf;
|
|
||||||
int idx;
|
|
||||||
|
|
||||||
for (idx = 0; idx < decode->nb_surfaces; idx++) {
|
|
||||||
if (!decode->surface_used[idx])
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (idx == decode->nb_surfaces) {
|
|
||||||
fprintf(stderr, "No free surfaces\n");
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
surf = av_mallocz(sizeof(*surf));
|
|
||||||
if (!surf)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
surf_buf = av_buffer_create((uint8_t*)surf, sizeof(*surf), free_buffer,
|
|
||||||
&decode->surface_used[idx], AV_BUFFER_FLAG_READONLY);
|
|
||||||
if (!surf_buf) {
|
|
||||||
av_freep(&surf);
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
surf->Info = decode->frame_info;
|
|
||||||
surf->Data.MemId = &decode->surfaces[idx];
|
|
||||||
|
|
||||||
frame->buf[0] = surf_buf;
|
|
||||||
frame->data[3] = (uint8_t*)surf;
|
|
||||||
|
|
||||||
decode->surface_used[idx] = 1;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
|
||||||
{
|
|
||||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
|
||||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
|
||||||
if (!avctx->hwaccel_context) {
|
|
||||||
DecodeContext *decode = avctx->opaque;
|
|
||||||
AVQSVContext *qsv = av_qsv_alloc_context();
|
|
||||||
if (!qsv)
|
|
||||||
return AV_PIX_FMT_NONE;
|
|
||||||
|
|
||||||
qsv->session = decode->mfx_session;
|
|
||||||
qsv->iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
|
||||||
|
|
||||||
avctx->hwaccel_context = qsv;
|
|
||||||
}
|
|
||||||
|
|
||||||
return AV_PIX_FMT_QSV;
|
|
||||||
}
|
|
||||||
|
|
||||||
pix_fmts++;
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
|
||||||
|
|
||||||
return AV_PIX_FMT_NONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
|
||||||
AVFrame *frame, AVPacket *pkt,
|
|
||||||
AVIOContext *output_ctx)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
int got_frame = 1;
|
|
||||||
|
|
||||||
while (pkt->size > 0 || (!pkt->data && got_frame)) {
|
|
||||||
ret = avcodec_decode_video2(decoder_ctx, frame, &got_frame, pkt);
|
|
||||||
if (ret < 0) {
|
|
||||||
fprintf(stderr, "Error during decoding\n");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
pkt->data += ret;
|
|
||||||
pkt->size -= ret;
|
|
||||||
|
|
||||||
/* A real program would do something useful with the decoded frame here.
|
|
||||||
* We just retrieve the raw data and write it to a file, which is rather
|
|
||||||
* useless but pedagogic. */
|
|
||||||
if (got_frame) {
|
|
||||||
mfxFrameSurface1 *surf = (mfxFrameSurface1*)frame->data[3];
|
|
||||||
VASurfaceID surface = *(VASurfaceID*)surf->Data.MemId;
|
|
||||||
|
|
||||||
VAImageFormat img_fmt = {
|
|
||||||
.fourcc = VA_FOURCC_NV12,
|
|
||||||
.byte_order = VA_LSB_FIRST,
|
|
||||||
.bits_per_pixel = 8,
|
|
||||||
.depth = 8,
|
|
||||||
};
|
|
||||||
|
|
||||||
VAImage img;
|
|
||||||
|
|
||||||
VAStatus err;
|
|
||||||
uint8_t *data;
|
|
||||||
int i, j;
|
|
||||||
|
|
||||||
img.buf = VA_INVALID_ID;
|
|
||||||
img.image_id = VA_INVALID_ID;
|
|
||||||
|
|
||||||
err = vaCreateImage(decode->va_dpy, &img_fmt,
|
|
||||||
frame->width, frame->height, &img);
|
|
||||||
if (err != VA_STATUS_SUCCESS) {
|
|
||||||
fprintf(stderr, "Error creating an image: %s\n",
|
|
||||||
vaErrorStr(err));
|
|
||||||
ret = AVERROR_UNKNOWN;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = vaGetImage(decode->va_dpy, surface, 0, 0,
|
|
||||||
frame->width, frame->height,
|
|
||||||
img.image_id);
|
|
||||||
if (err != VA_STATUS_SUCCESS) {
|
|
||||||
fprintf(stderr, "Error getting an image: %s\n",
|
|
||||||
vaErrorStr(err));
|
|
||||||
ret = AVERROR_UNKNOWN;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = vaMapBuffer(decode->va_dpy, img.buf, (void**)&data);
|
|
||||||
if (err != VA_STATUS_SUCCESS) {
|
|
||||||
fprintf(stderr, "Error mapping the image buffer: %s\n",
|
|
||||||
vaErrorStr(err));
|
|
||||||
ret = AVERROR_UNKNOWN;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < img.num_planes; i++)
|
|
||||||
for (j = 0; j < (img.height >> (i > 0)); j++)
|
|
||||||
avio_write(output_ctx, data + img.offsets[i] + j * img.pitches[i], img.width);
|
|
||||||
|
|
||||||
fail:
|
|
||||||
if (img.buf != VA_INVALID_ID)
|
|
||||||
vaUnmapBuffer(decode->va_dpy, img.buf);
|
|
||||||
if (img.image_id != VA_INVALID_ID)
|
|
||||||
vaDestroyImage(decode->va_dpy, img.image_id);
|
|
||||||
av_frame_unref(frame);
|
|
||||||
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char **argv)
|
|
||||||
{
|
|
||||||
AVFormatContext *input_ctx = NULL;
|
|
||||||
AVStream *video_st = NULL;
|
|
||||||
AVCodecContext *decoder_ctx = NULL;
|
|
||||||
const AVCodec *decoder;
|
|
||||||
|
|
||||||
AVPacket pkt = { 0 };
|
|
||||||
AVFrame *frame = NULL;
|
|
||||||
|
|
||||||
DecodeContext decode = { NULL };
|
|
||||||
|
|
||||||
Display *dpy = NULL;
|
|
||||||
int va_ver_major, va_ver_minor;
|
|
||||||
|
|
||||||
mfxIMPL mfx_impl = MFX_IMPL_AUTO_ANY;
|
|
||||||
mfxVersion mfx_ver = { { 1, 1 } };
|
|
||||||
|
|
||||||
mfxFrameAllocator frame_allocator = {
|
|
||||||
.pthis = &decode,
|
|
||||||
.Alloc = frame_alloc,
|
|
||||||
.Lock = frame_lock,
|
|
||||||
.Unlock = frame_unlock,
|
|
||||||
.GetHDL = frame_get_hdl,
|
|
||||||
.Free = frame_free,
|
|
||||||
};
|
|
||||||
|
|
||||||
AVIOContext *output_ctx = NULL;
|
|
||||||
|
|
||||||
int ret, i, err;
|
|
||||||
|
|
||||||
av_register_all();
|
|
||||||
|
|
||||||
if (argc < 3) {
|
|
||||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* open the input file */
|
|
||||||
ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL);
|
|
||||||
if (ret < 0) {
|
|
||||||
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* find the first H.264 video stream */
|
|
||||||
for (i = 0; i < input_ctx->nb_streams; i++) {
|
|
||||||
AVStream *st = input_ctx->streams[i];
|
|
||||||
|
|
||||||
if (st->codec->codec_id == AV_CODEC_ID_H264 && !video_st)
|
|
||||||
video_st = st;
|
|
||||||
else
|
|
||||||
st->discard = AVDISCARD_ALL;
|
|
||||||
}
|
|
||||||
if (!video_st) {
|
|
||||||
fprintf(stderr, "No H.264 video stream in the input file\n");
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* initialize VA-API */
|
|
||||||
dpy = XOpenDisplay(NULL);
|
|
||||||
if (!dpy) {
|
|
||||||
fprintf(stderr, "Cannot open the X display\n");
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
decode.va_dpy = vaGetDisplay(dpy);
|
|
||||||
if (!decode.va_dpy) {
|
|
||||||
fprintf(stderr, "Cannot open the VA display\n");
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = vaInitialize(decode.va_dpy, &va_ver_major, &va_ver_minor);
|
|
||||||
if (err != VA_STATUS_SUCCESS) {
|
|
||||||
fprintf(stderr, "Cannot initialize VA: %s\n", vaErrorStr(err));
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
fprintf(stderr, "Initialized VA v%d.%d\n", va_ver_major, va_ver_minor);
|
|
||||||
|
|
||||||
/* initialize an MFX session */
|
|
||||||
err = MFXInit(mfx_impl, &mfx_ver, &decode.mfx_session);
|
|
||||||
if (err != MFX_ERR_NONE) {
|
|
||||||
fprintf(stderr, "Error initializing an MFX session\n");
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
MFXVideoCORE_SetHandle(decode.mfx_session, MFX_HANDLE_VA_DISPLAY, decode.va_dpy);
|
|
||||||
MFXVideoCORE_SetFrameAllocator(decode.mfx_session, &frame_allocator);
|
|
||||||
|
|
||||||
/* initialize the decoder */
|
|
||||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
|
||||||
if (!decoder) {
|
|
||||||
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
decoder_ctx = avcodec_alloc_context3(decoder);
|
|
||||||
if (!decoder_ctx) {
|
|
||||||
ret = AVERROR(ENOMEM);
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
decoder_ctx->codec_id = AV_CODEC_ID_H264;
|
|
||||||
if (video_st->codec->extradata_size) {
|
|
||||||
decoder_ctx->extradata = av_mallocz(video_st->codec->extradata_size +
|
|
||||||
FF_INPUT_BUFFER_PADDING_SIZE);
|
|
||||||
if (!decoder_ctx->extradata) {
|
|
||||||
ret = AVERROR(ENOMEM);
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
memcpy(decoder_ctx->extradata, video_st->codec->extradata,
|
|
||||||
video_st->codec->extradata_size);
|
|
||||||
decoder_ctx->extradata_size = video_st->codec->extradata_size;
|
|
||||||
}
|
|
||||||
decoder_ctx->refcounted_frames = 1;
|
|
||||||
|
|
||||||
decoder_ctx->opaque = &decode;
|
|
||||||
decoder_ctx->get_buffer2 = get_buffer;
|
|
||||||
decoder_ctx->get_format = get_format;
|
|
||||||
|
|
||||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
|
||||||
if (ret < 0) {
|
|
||||||
fprintf(stderr, "Error opening the decoder: ");
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* open the output stream */
|
|
||||||
ret = avio_open(&output_ctx, argv[2], AVIO_FLAG_WRITE);
|
|
||||||
if (ret < 0) {
|
|
||||||
fprintf(stderr, "Error opening the output context: ");
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
frame = av_frame_alloc();
|
|
||||||
if (!frame) {
|
|
||||||
ret = AVERROR(ENOMEM);
|
|
||||||
goto finish;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* actual decoding */
|
|
||||||
while (ret >= 0) {
|
|
||||||
ret = av_read_frame(input_ctx, &pkt);
|
|
||||||
if (ret < 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (pkt.stream_index == video_st->index)
|
|
||||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
|
||||||
|
|
||||||
av_packet_unref(&pkt);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* flush the decoder */
|
|
||||||
pkt.data = NULL;
|
|
||||||
pkt.size = 0;
|
|
||||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
|
||||||
|
|
||||||
finish:
|
|
||||||
if (ret < 0) {
|
|
||||||
char buf[1024];
|
|
||||||
av_strerror(ret, buf, sizeof(buf));
|
|
||||||
fprintf(stderr, "%s\n", buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
avformat_close_input(&input_ctx);
|
|
||||||
|
|
||||||
av_frame_free(&frame);
|
|
||||||
|
|
||||||
if (decode.mfx_session)
|
|
||||||
MFXClose(decode.mfx_session);
|
|
||||||
if (decode.va_dpy)
|
|
||||||
vaTerminate(decode.va_dpy);
|
|
||||||
if (dpy)
|
|
||||||
XCloseDisplay(dpy);
|
|
||||||
|
|
||||||
if (decoder_ctx)
|
|
||||||
av_freep(&decoder_ctx->hwaccel_context);
|
|
||||||
avcodec_free_context(&decoder_ctx);
|
|
||||||
|
|
||||||
avio_close(output_ctx);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
@@ -153,7 +153,7 @@ end:
|
|||||||
|
|
||||||
/* close output */
|
/* close output */
|
||||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||||
avio_closep(&ofmt_ctx->pb);
|
avio_close(ofmt_ctx->pb);
|
||||||
avformat_free_context(ofmt_ctx);
|
avformat_free_context(ofmt_ctx);
|
||||||
|
|
||||||
if (ret < 0 && ret != AVERROR_EOF) {
|
if (ret < 0 && ret != AVERROR_EOF) {
|
||||||
|
@@ -41,9 +41,11 @@
|
|||||||
#include "libswresample/swresample.h"
|
#include "libswresample/swresample.h"
|
||||||
|
|
||||||
/** The output bit rate in kbit/s */
|
/** The output bit rate in kbit/s */
|
||||||
#define OUTPUT_BIT_RATE 96000
|
#define OUTPUT_BIT_RATE 48000
|
||||||
/** The number of output channels */
|
/** The number of output channels */
|
||||||
#define OUTPUT_CHANNELS 2
|
#define OUTPUT_CHANNELS 2
|
||||||
|
/** The audio sample output format */
|
||||||
|
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert an error code into a text message.
|
* Convert an error code into a text message.
|
||||||
@@ -167,7 +169,7 @@ static int open_output_file(const char *filename,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Save the encoder context for easier access later. */
|
/** Save the encoder context for easiert access later. */
|
||||||
*output_codec_context = stream->codec;
|
*output_codec_context = stream->codec;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -177,16 +179,9 @@ static int open_output_file(const char *filename,
|
|||||||
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
||||||
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||||
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
||||||
(*output_codec_context)->sample_fmt = output_codec->sample_fmts[0];
|
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||||
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
||||||
|
|
||||||
/** Allow the use of the experimental AAC encoder */
|
|
||||||
(*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
|
||||||
|
|
||||||
/** Set the sample rate for the container. */
|
|
||||||
stream->time_base.den = input_codec_context->sample_rate;
|
|
||||||
stream->time_base.num = 1;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Some container formats (like MP4) require global headers to be present
|
* Some container formats (like MP4) require global headers to be present
|
||||||
* Mark the encoder so that it behaves accordingly.
|
* Mark the encoder so that it behaves accordingly.
|
||||||
@@ -204,7 +199,7 @@ static int open_output_file(const char *filename,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
avio_closep(&(*output_format_context)->pb);
|
avio_close((*output_format_context)->pb);
|
||||||
avformat_free_context(*output_format_context);
|
avformat_free_context(*output_format_context);
|
||||||
*output_format_context = NULL;
|
*output_format_context = NULL;
|
||||||
return error < 0 ? error : AVERROR_EXIT;
|
return error < 0 ? error : AVERROR_EXIT;
|
||||||
@@ -276,11 +271,10 @@ static int init_resampler(AVCodecContext *input_codec_context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
||||||
static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
static int init_fifo(AVAudioFifo **fifo)
|
||||||
{
|
{
|
||||||
/** Create the FIFO buffer based on the specified output sample format. */
|
/** Create the FIFO buffer based on the specified output sample format. */
|
||||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
|
||||||
output_codec_context->channels, 1))) {
|
|
||||||
fprintf(stderr, "Could not allocate FIFO\n");
|
fprintf(stderr, "Could not allocate FIFO\n");
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
@@ -312,7 +306,7 @@ static int decode_audio_frame(AVFrame *frame,
|
|||||||
|
|
||||||
/** Read one audio frame from the input file into a temporary packet. */
|
/** Read one audio frame from the input file into a temporary packet. */
|
||||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||||
/** If we are at the end of the file, flush the decoder below. */
|
/** If we are the the end of the file, flush the decoder below. */
|
||||||
if (error == AVERROR_EOF)
|
if (error == AVERROR_EOF)
|
||||||
*finished = 1;
|
*finished = 1;
|
||||||
else {
|
else {
|
||||||
@@ -543,9 +537,6 @@ static int init_output_frame(AVFrame **frame,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Global timestamp for the audio frames */
|
|
||||||
static int64_t pts = 0;
|
|
||||||
|
|
||||||
/** Encode one frame worth of audio to the output file. */
|
/** Encode one frame worth of audio to the output file. */
|
||||||
static int encode_audio_frame(AVFrame *frame,
|
static int encode_audio_frame(AVFrame *frame,
|
||||||
AVFormatContext *output_format_context,
|
AVFormatContext *output_format_context,
|
||||||
@@ -557,12 +548,6 @@ static int encode_audio_frame(AVFrame *frame,
|
|||||||
int error;
|
int error;
|
||||||
init_packet(&output_packet);
|
init_packet(&output_packet);
|
||||||
|
|
||||||
/** Set a timestamp based on the sample rate for the container. */
|
|
||||||
if (frame) {
|
|
||||||
frame->pts = pts;
|
|
||||||
pts += frame->nb_samples;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Encode the audio frame and store it in the temporary packet.
|
* Encode the audio frame and store it in the temporary packet.
|
||||||
* The output audio stream encoder is used to do this.
|
* The output audio stream encoder is used to do this.
|
||||||
@@ -674,7 +659,7 @@ int main(int argc, char **argv)
|
|||||||
&resample_context))
|
&resample_context))
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
||||||
if (init_fifo(&fifo, output_codec_context))
|
if (init_fifo(&fifo))
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
/** Write the header of the output file container. */
|
/** Write the header of the output file container. */
|
||||||
if (write_output_file_header(output_format_context))
|
if (write_output_file_header(output_format_context))
|
||||||
@@ -758,7 +743,7 @@ cleanup:
|
|||||||
if (output_codec_context)
|
if (output_codec_context)
|
||||||
avcodec_close(output_codec_context);
|
avcodec_close(output_codec_context);
|
||||||
if (output_format_context) {
|
if (output_format_context) {
|
||||||
avio_closep(&output_format_context->pb);
|
avio_close(output_format_context->pb);
|
||||||
avformat_free_context(output_format_context);
|
avformat_free_context(output_format_context);
|
||||||
}
|
}
|
||||||
if (input_codec_context)
|
if (input_codec_context)
|
||||||
|
@@ -116,10 +116,6 @@ static int open_output_file(const char *filename)
|
|||||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||||
/* in this example, we choose transcoding to same codec */
|
/* in this example, we choose transcoding to same codec */
|
||||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||||
if (!encoder) {
|
|
||||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* In this example, we transcode to same properties (picture size,
|
/* In this example, we transcode to same properties (picture size,
|
||||||
* sample rate etc.). These properties can be changed for output
|
* sample rate etc.). These properties can be changed for output
|
||||||
@@ -389,9 +385,17 @@ static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, in
|
|||||||
|
|
||||||
/* prepare packet for muxing */
|
/* prepare packet for muxing */
|
||||||
enc_pkt.stream_index = stream_index;
|
enc_pkt.stream_index = stream_index;
|
||||||
av_packet_rescale_ts(&enc_pkt,
|
enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
|
||||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||||
ofmt_ctx->streams[stream_index]->time_base);
|
ofmt_ctx->streams[stream_index]->time_base,
|
||||||
|
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||||
|
enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
|
||||||
|
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||||
|
ofmt_ctx->streams[stream_index]->time_base,
|
||||||
|
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||||
|
enc_pkt.duration = av_rescale_q(enc_pkt.duration,
|
||||||
|
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||||
|
ofmt_ctx->streams[stream_index]->time_base);
|
||||||
|
|
||||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||||
/* mux encoded frame */
|
/* mux encoded frame */
|
||||||
@@ -505,9 +509,14 @@ int main(int argc, char **argv)
|
|||||||
ret = AVERROR(ENOMEM);
|
ret = AVERROR(ENOMEM);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
av_packet_rescale_ts(&packet,
|
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||||
ifmt_ctx->streams[stream_index]->time_base,
|
ifmt_ctx->streams[stream_index]->time_base,
|
||||||
ifmt_ctx->streams[stream_index]->codec->time_base);
|
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||||
|
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||||
|
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||||
|
ifmt_ctx->streams[stream_index]->time_base,
|
||||||
|
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||||
|
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||||
avcodec_decode_audio4;
|
avcodec_decode_audio4;
|
||||||
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
||||||
@@ -529,9 +538,14 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* remux this frame without reencoding */
|
/* remux this frame without reencoding */
|
||||||
av_packet_rescale_ts(&packet,
|
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||||
ifmt_ctx->streams[stream_index]->time_base,
|
ifmt_ctx->streams[stream_index]->time_base,
|
||||||
ofmt_ctx->streams[stream_index]->time_base);
|
ofmt_ctx->streams[stream_index]->time_base,
|
||||||
|
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||||
|
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||||
|
ifmt_ctx->streams[stream_index]->time_base,
|
||||||
|
ofmt_ctx->streams[stream_index]->time_base,
|
||||||
|
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||||
|
|
||||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@@ -573,7 +587,7 @@ end:
|
|||||||
av_free(filter_ctx);
|
av_free(filter_ctx);
|
||||||
avformat_close_input(&ifmt_ctx);
|
avformat_close_input(&ifmt_ctx);
|
||||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||||
avio_closep(&ofmt_ctx->pb);
|
avio_close(ofmt_ctx->pb);
|
||||||
avformat_free_context(ofmt_ctx);
|
avformat_free_context(ofmt_ctx);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
|
87
doc/faq.texi
87
doc/faq.texi
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg FAQ
|
@settitle FFmpeg FAQ
|
||||||
@titlepage
|
@titlepage
|
||||||
@@ -91,56 +90,6 @@ To build FFmpeg, you need to install the development package. It is usually
|
|||||||
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
||||||
build is finished, but be sure to keep the main package.
|
build is finished, but be sure to keep the main package.
|
||||||
|
|
||||||
@section How do I make @command{pkg-config} find my libraries?
|
|
||||||
|
|
||||||
Somewhere along with your libraries, there is a @file{.pc} file (or several)
|
|
||||||
in a @file{pkgconfig} directory. You need to set environment variables to
|
|
||||||
point @command{pkg-config} to these files.
|
|
||||||
|
|
||||||
If you need to @emph{add} directories to @command{pkg-config}'s search list
|
|
||||||
(typical use case: library installed separately), add it to
|
|
||||||
@code{$PKG_CONFIG_PATH}:
|
|
||||||
|
|
||||||
@example
|
|
||||||
export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
|
|
||||||
@end example
|
|
||||||
|
|
||||||
If you need to @emph{replace} @command{pkg-config}'s search list
|
|
||||||
(typical use case: cross-compiling), set it in
|
|
||||||
@code{$PKG_CONFIG_LIBDIR}:
|
|
||||||
|
|
||||||
@example
|
|
||||||
export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
|
|
||||||
@end example
|
|
||||||
|
|
||||||
If you need to know the library's internal dependencies (typical use: static
|
|
||||||
linking), add the @code{--static} option to @command{pkg-config}:
|
|
||||||
|
|
||||||
@example
|
|
||||||
./configure --pkg-config-flags=--static
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@section How do I use @command{pkg-config} when cross-compiling?
|
|
||||||
|
|
||||||
The best way is to install @command{pkg-config} in your cross-compilation
|
|
||||||
environment. It will automatically use the cross-compilation libraries.
|
|
||||||
|
|
||||||
You can also use @command{pkg-config} from the host environment by
|
|
||||||
specifying explicitly @code{--pkg-config=pkg-config} to @command{configure}.
|
|
||||||
In that case, you must point @command{pkg-config} to the correct directories
|
|
||||||
using the @code{PKG_CONFIG_LIBDIR}, as explained in the previous entry.
|
|
||||||
|
|
||||||
As an intermediate solution, you can place in your cross-compilation
|
|
||||||
environment a script that calls the host @command{pkg-config} with
|
|
||||||
@code{PKG_CONFIG_LIBDIR} set. That script can look like that:
|
|
||||||
|
|
||||||
@example
|
|
||||||
#!/bin/sh
|
|
||||||
PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
|
|
||||||
export PKG_CONFIG_LIBDIR
|
|
||||||
exec /usr/bin/pkg-config "$@@"
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@chapter Usage
|
@chapter Usage
|
||||||
|
|
||||||
@section ffmpeg does not work; what is wrong?
|
@section ffmpeg does not work; what is wrong?
|
||||||
@@ -349,7 +298,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
|||||||
@code{concat}} protocol designed specifically for that, with examples in the
|
@code{concat}} protocol designed specifically for that, with examples in the
|
||||||
documentation.
|
documentation.
|
||||||
|
|
||||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
|
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||||
video by merely concatenating the files containing them.
|
video by merely concatenating the files containing them.
|
||||||
|
|
||||||
Hence you may concatenate your multimedia files by first transcoding them to
|
Hence you may concatenate your multimedia files by first transcoding them to
|
||||||
@@ -467,40 +416,6 @@ point acceptable for your tastes. The most common options to do that are
|
|||||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||||
of the encoder you chose.
|
of the encoder you chose.
|
||||||
|
|
||||||
@section I have a stretched video, why does scaling does not fix it?
|
|
||||||
|
|
||||||
A lot of video codecs and formats can store the @emph{aspect ratio} of the
|
|
||||||
video: this is the ratio between the width and the height of either the full
|
|
||||||
image (DAR, display aspect ratio) or individual pixels (SAR, sample aspect
|
|
||||||
ratio). For example, EGA screens at resolution 640×350 had 4:3 DAR and 35:48
|
|
||||||
SAR.
|
|
||||||
|
|
||||||
Most still image processing work with square pixels, i.e. 1:1 SAR, but a lot
|
|
||||||
of video standards, especially from the analogic-numeric transition era, use
|
|
||||||
non-square pixels.
|
|
||||||
|
|
||||||
Most processing filters in FFmpeg handle the aspect ratio to avoid
|
|
||||||
stretching the image: cropping adjusts the DAR to keep the SAR constant,
|
|
||||||
scaling adjusts the SAR to keep the DAR constant.
|
|
||||||
|
|
||||||
If you want to stretch, or “unstretch”, the image, you need to override the
|
|
||||||
information with the
|
|
||||||
@url{http://ffmpeg.org/ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
|
|
||||||
|
|
||||||
Do not forget to examine carefully the original video to check whether the
|
|
||||||
stretching comes from the image or from the aspect ratio information.
|
|
||||||
|
|
||||||
For example, to fix a badly encoded EGA capture, use the following commands,
|
|
||||||
either the first one to upscale to square pixels or the second one to set
|
|
||||||
the correct aspect ratio or the third one to avoid transcoding (may not work
|
|
||||||
depending on the format / codec / player / phase of the moon):
|
|
||||||
|
|
||||||
@example
|
|
||||||
ffmpeg -i ega_screen.nut -vf scale=640:480,setsar=1 ega_screen_scaled.nut
|
|
||||||
ffmpeg -i ega_screen.nut -vf setdar=4/3 ega_screen_anamorphic.nut
|
|
||||||
ffmpeg -i ega_screen.nut -aspect 4/3 -c copy ega_screen_overridden.nut
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@chapter Development
|
@chapter Development
|
||||||
|
|
||||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Automated Testing Environment
|
@settitle FFmpeg Automated Testing Environment
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Bitstream Filters Documentation
|
@settitle FFmpeg Bitstream Filters Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Codecs Documentation
|
@settitle FFmpeg Codecs Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Devices Documentation
|
@settitle FFmpeg Devices Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Filters Documentation
|
@settitle FFmpeg Filters Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Formats Documentation
|
@settitle FFmpeg Formats Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Protocols Documentation
|
@settitle FFmpeg Protocols Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Resampler Documentation
|
@settitle FFmpeg Resampler Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Scaler Documentation
|
@settitle FFmpeg Scaler Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle FFmpeg Utilities Documentation
|
@settitle FFmpeg Utilities Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle ffmpeg Documentation
|
@settitle ffmpeg Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
@@ -361,7 +360,7 @@ ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
|
|||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item -dframes @var{number} (@emph{output})
|
@item -dframes @var{number} (@emph{output})
|
||||||
Set the number of data frames to output. This is an alias for @code{-frames:d}.
|
Set the number of data frames to record. This is an alias for @code{-frames:d}.
|
||||||
|
|
||||||
@item -frames[:@var{stream_specifier}] @var{framecount} (@emph{output,per-stream})
|
@item -frames[:@var{stream_specifier}] @var{framecount} (@emph{output,per-stream})
|
||||||
Stop writing to the stream after @var{framecount} frames.
|
Stop writing to the stream after @var{framecount} frames.
|
||||||
@@ -468,7 +467,7 @@ attachments.
|
|||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item -vframes @var{number} (@emph{output})
|
@item -vframes @var{number} (@emph{output})
|
||||||
Set the number of video frames to output. This is an alias for @code{-frames:v}.
|
Set the number of video frames to record. This is an alias for @code{-frames:v}.
|
||||||
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
|
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
|
||||||
Set frame rate (Hz value, fraction or abbreviation).
|
Set frame rate (Hz value, fraction or abbreviation).
|
||||||
|
|
||||||
@@ -693,7 +692,7 @@ If this option is not specified, the default adapter is used.
|
|||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item -aframes @var{number} (@emph{output})
|
@item -aframes @var{number} (@emph{output})
|
||||||
Set the number of audio frames to output. This is an alias for @code{-frames:a}.
|
Set the number of audio frames to record. This is an alias for @code{-frames:a}.
|
||||||
@item -ar[:@var{stream_specifier}] @var{freq} (@emph{input/output,per-stream})
|
@item -ar[:@var{stream_specifier}] @var{freq} (@emph{input/output,per-stream})
|
||||||
Set the audio sampling frequency. For output streams it is set by
|
Set the audio sampling frequency. For output streams it is set by
|
||||||
default to the frequency of the corresponding input stream. For input
|
default to the frequency of the corresponding input stream. For input
|
||||||
@@ -995,13 +994,6 @@ With -map you can select from which stream the timestamps should be
|
|||||||
taken. You can leave either video or audio unchanged and sync the
|
taken. You can leave either video or audio unchanged and sync the
|
||||||
remaining stream(s) to the unchanged one.
|
remaining stream(s) to the unchanged one.
|
||||||
|
|
||||||
@item -frame_drop_threshold @var{parameter}
|
|
||||||
Frame drop threshold, which specifies how much behind video frames can
|
|
||||||
be before they are dropped. In frame rate units, so 1.0 is one frame.
|
|
||||||
The default is -1.1. One possible usecase is to avoid framedrops in case
|
|
||||||
of noisy timestamps or to increase frame drop precision in case of exact
|
|
||||||
timestamps.
|
|
||||||
|
|
||||||
@item -async @var{samples_per_second}
|
@item -async @var{samples_per_second}
|
||||||
Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
|
Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
|
||||||
the parameter is the maximum samples per second by which the audio is changed.
|
the parameter is the maximum samples per second by which the audio is changed.
|
||||||
@@ -1024,12 +1016,6 @@ processing (e.g. in case the format option @option{avoid_negative_ts}
|
|||||||
is enabled) the output timestamps may mismatch with the input
|
is enabled) the output timestamps may mismatch with the input
|
||||||
timestamps even when this option is selected.
|
timestamps even when this option is selected.
|
||||||
|
|
||||||
@item -start_at_zero
|
|
||||||
When used with @option{copyts}, shift input timestamps so they start at zero.
|
|
||||||
|
|
||||||
This means that using e.g. @code{-ss 50} will make output timestamps start at
|
|
||||||
50 seconds, regardless of what timestamp the input file started at.
|
|
||||||
|
|
||||||
@item -copytb @var{mode}
|
@item -copytb @var{mode}
|
||||||
Specify how to set the encoder timebase when stream copying. @var{mode} is an
|
Specify how to set the encoder timebase when stream copying. @var{mode} is an
|
||||||
integer numeric value, and can assume one of the following values:
|
integer numeric value, and can assume one of the following values:
|
||||||
@@ -1158,12 +1144,6 @@ This option enables or disables accurate seeking in input files with the
|
|||||||
transcoding. Use @option{-noaccurate_seek} to disable it, which may be useful
|
transcoding. Use @option{-noaccurate_seek} to disable it, which may be useful
|
||||||
e.g. when copying some streams and transcoding the others.
|
e.g. when copying some streams and transcoding the others.
|
||||||
|
|
||||||
@item -thread_message_queue @var{size} (@emph{input})
|
|
||||||
This option sets the maximum number of queued packets when reading from the
|
|
||||||
file or device. With low latency / high rate live streams, packets may be
|
|
||||||
discarded if they are not read in a timely manner; raising this value can
|
|
||||||
avoid it.
|
|
||||||
|
|
||||||
@item -override_ffserver (@emph{global})
|
@item -override_ffserver (@emph{global})
|
||||||
Overrides the input specifications from @command{ffserver}. Using this
|
Overrides the input specifications from @command{ffserver}. Using this
|
||||||
option you can map any input stream to @command{ffserver} and control
|
option you can map any input stream to @command{ffserver} and control
|
||||||
@@ -1174,11 +1154,6 @@ requested by @command{ffserver}.
|
|||||||
The option is intended for cases where features are needed that cannot be
|
The option is intended for cases where features are needed that cannot be
|
||||||
specified to @command{ffserver} but can be to @command{ffmpeg}.
|
specified to @command{ffserver} but can be to @command{ffmpeg}.
|
||||||
|
|
||||||
@item -sdp_file @var{file} (@emph{global})
|
|
||||||
Print sdp information to @var{file}.
|
|
||||||
This allows dumping sdp information when at least one output isn't an
|
|
||||||
rtp stream.
|
|
||||||
|
|
||||||
@item -discard (@emph{input})
|
@item -discard (@emph{input})
|
||||||
Allows discarding specific streams or frames of streams at the demuxer.
|
Allows discarding specific streams or frames of streams at the demuxer.
|
||||||
Not all demuxers support this.
|
Not all demuxers support this.
|
||||||
@@ -1228,10 +1203,7 @@ awkward to specify on the command line. Lines starting with the hash
|
|||||||
('#') character are ignored and are used to provide comments. Check
|
('#') character are ignored and are used to provide comments. Check
|
||||||
the @file{presets} directory in the FFmpeg source tree for examples.
|
the @file{presets} directory in the FFmpeg source tree for examples.
|
||||||
|
|
||||||
There are two types of preset files: ffpreset and avpreset files.
|
Preset files are specified with the @code{vpre}, @code{apre},
|
||||||
|
|
||||||
@subsection ffpreset files
|
|
||||||
ffpreset files are specified with the @code{vpre}, @code{apre},
|
|
||||||
@code{spre}, and @code{fpre} options. The @code{fpre} option takes the
|
@code{spre}, and @code{fpre} options. The @code{fpre} option takes the
|
||||||
filename of the preset instead of a preset name as input and can be
|
filename of the preset instead of a preset name as input and can be
|
||||||
used for any kind of codec. For the @code{vpre}, @code{apre}, and
|
used for any kind of codec. For the @code{vpre}, @code{apre}, and
|
||||||
@@ -1256,26 +1228,6 @@ directories, where @var{codec_name} is the name of the codec to which
|
|||||||
the preset file options will be applied. For example, if you select
|
the preset file options will be applied. For example, if you select
|
||||||
the video codec with @code{-vcodec libvpx} and use @code{-vpre 1080p},
|
the video codec with @code{-vcodec libvpx} and use @code{-vpre 1080p},
|
||||||
then it will search for the file @file{libvpx-1080p.ffpreset}.
|
then it will search for the file @file{libvpx-1080p.ffpreset}.
|
||||||
|
|
||||||
@subsection avpreset files
|
|
||||||
avpreset files are specified with the @code{pre} option. They work similar to
|
|
||||||
ffpreset files, but they only allow encoder- specific options. Therefore, an
|
|
||||||
@var{option}=@var{value} pair specifying an encoder cannot be used.
|
|
||||||
|
|
||||||
When the @code{pre} option is specified, ffmpeg will look for files with the
|
|
||||||
suffix .avpreset in the directories @file{$AVCONV_DATADIR} (if set), and
|
|
||||||
@file{$HOME/.avconv}, and in the datadir defined at configuration time (usually
|
|
||||||
@file{PREFIX/share/ffmpeg}), in that order.
|
|
||||||
|
|
||||||
First ffmpeg searches for a file named @var{codec_name}-@var{arg}.avpreset in
|
|
||||||
the above-mentioned directories, where @var{codec_name} is the name of the codec
|
|
||||||
to which the preset file options will be applied. For example, if you select the
|
|
||||||
video codec with @code{-vcodec libvpx} and use @code{-pre 1080p}, then it will
|
|
||||||
search for the file @file{libvpx-1080p.avpreset}.
|
|
||||||
|
|
||||||
If no such file is found, then ffmpeg will search for a file named
|
|
||||||
@var{arg}.avpreset in the same directories.
|
|
||||||
|
|
||||||
@c man end OPTIONS
|
@c man end OPTIONS
|
||||||
|
|
||||||
@chapter Tips
|
@chapter Tips
|
||||||
@@ -1322,6 +1274,21 @@ quality).
|
|||||||
@chapter Examples
|
@chapter Examples
|
||||||
@c man begin EXAMPLES
|
@c man begin EXAMPLES
|
||||||
|
|
||||||
|
@section Preset files
|
||||||
|
|
||||||
|
A preset file contains a sequence of @var{option=value} pairs, one for
|
||||||
|
each line, specifying a sequence of options which can be specified also on
|
||||||
|
the command line. Lines starting with the hash ('#') character are ignored and
|
||||||
|
are used to provide comments. Empty lines are also ignored. Check the
|
||||||
|
@file{presets} directory in the FFmpeg source tree for examples.
|
||||||
|
|
||||||
|
Preset files are specified with the @code{pre} option, this option takes a
|
||||||
|
preset name as input. FFmpeg searches for a file named @var{preset_name}.avpreset in
|
||||||
|
the directories @file{$AVCONV_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in
|
||||||
|
the data directory defined at configuration time (usually @file{$PREFIX/share/ffmpeg})
|
||||||
|
in that order. For example, if the argument is @code{libx264-max}, it will
|
||||||
|
search for the file @file{libx264-max.avpreset}.
|
||||||
|
|
||||||
@section Video and Audio grabbing
|
@section Video and Audio grabbing
|
||||||
|
|
||||||
If you specify the input format and device then ffmpeg can grab video
|
If you specify the input format and device then ffmpeg can grab video
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle ffplay Documentation
|
@settitle ffplay Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
@@ -38,14 +37,10 @@ Force displayed height.
|
|||||||
Set frame size (WxH or abbreviation), needed for videos which do
|
Set frame size (WxH or abbreviation), needed for videos which do
|
||||||
not contain a header with the frame size like raw YUV. This option
|
not contain a header with the frame size like raw YUV. This option
|
||||||
has been deprecated in favor of private options, try -video_size.
|
has been deprecated in favor of private options, try -video_size.
|
||||||
@item -fs
|
|
||||||
Start in fullscreen mode.
|
|
||||||
@item -an
|
@item -an
|
||||||
Disable audio.
|
Disable audio.
|
||||||
@item -vn
|
@item -vn
|
||||||
Disable video.
|
Disable video.
|
||||||
@item -sn
|
|
||||||
Disable subtitles.
|
|
||||||
@item -ss @var{pos}
|
@item -ss @var{pos}
|
||||||
Seek to a given position in seconds.
|
Seek to a given position in seconds.
|
||||||
@item -t @var{duration}
|
@item -t @var{duration}
|
||||||
@@ -114,10 +109,15 @@ duration, the codec parameters, the current position in the stream and
|
|||||||
the audio/video synchronisation drift. It is on by default, to
|
the audio/video synchronisation drift. It is on by default, to
|
||||||
explicitly disable it you need to specify @code{-nostats}.
|
explicitly disable it you need to specify @code{-nostats}.
|
||||||
|
|
||||||
|
@item -bug
|
||||||
|
Work around bugs.
|
||||||
@item -fast
|
@item -fast
|
||||||
Non-spec-compliant optimizations.
|
Non-spec-compliant optimizations.
|
||||||
@item -genpts
|
@item -genpts
|
||||||
Generate pts.
|
Generate pts.
|
||||||
|
@item -rtp_tcp
|
||||||
|
Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful
|
||||||
|
if you are streaming with the RTSP protocol.
|
||||||
@item -sync @var{type}
|
@item -sync @var{type}
|
||||||
Set the master clock to audio (@code{type=audio}), video
|
Set the master clock to audio (@code{type=audio}), video
|
||||||
(@code{type=video}) or external (@code{type=ext}). Default is audio. The
|
(@code{type=video}) or external (@code{type=ext}). Default is audio. The
|
||||||
@@ -125,20 +125,23 @@ master clock is used to control audio-video synchronization. Most media
|
|||||||
players use audio as master clock, but in some cases (streaming or high
|
players use audio as master clock, but in some cases (streaming or high
|
||||||
quality broadcast) it is necessary to change that. This option is mainly
|
quality broadcast) it is necessary to change that. This option is mainly
|
||||||
used for debugging purposes.
|
used for debugging purposes.
|
||||||
@item -ast @var{audio_stream_specifier}
|
@item -threads @var{count}
|
||||||
Select the desired audio stream using the given stream specifier. The stream
|
Set the thread count.
|
||||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
@item -ast @var{audio_stream_number}
|
||||||
is not specified, the "best" audio stream is selected in the program of the
|
Select the desired audio stream number, counting from 0. The number
|
||||||
already selected video stream.
|
refers to the list of all the input audio streams. If it is greater
|
||||||
@item -vst @var{video_stream_specifier}
|
than the number of audio streams minus one, then the last one is
|
||||||
Select the desired video stream using the given stream specifier. The stream
|
selected, if it is negative the audio playback is disabled.
|
||||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
@item -vst @var{video_stream_number}
|
||||||
is not specified, the "best" video stream is selected.
|
Select the desired video stream number, counting from 0. The number
|
||||||
@item -sst @var{subtitle_stream_specifier}
|
refers to the list of all the input video streams. If it is greater
|
||||||
Select the desired subtitle stream using the given stream specifier. The stream
|
than the number of video streams minus one, then the last one is
|
||||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
selected, if it is negative the video playback is disabled.
|
||||||
is not specified, the "best" subtitle stream is selected in the program of the
|
@item -sst @var{subtitle_stream_number}
|
||||||
already selected video or audio stream.
|
Select the desired subtitle stream number, counting from 0. The number
|
||||||
|
refers to the list of all the input subtitle streams. If it is greater
|
||||||
|
than the number of subtitle streams minus one, then the last one is
|
||||||
|
selected, if it is negative the subtitle rendering is disabled.
|
||||||
@item -autoexit
|
@item -autoexit
|
||||||
Exit when video is done playing.
|
Exit when video is done playing.
|
||||||
@item -exitonkeydown
|
@item -exitonkeydown
|
||||||
@@ -161,20 +164,8 @@ Force a specific video decoder.
|
|||||||
Force a specific subtitle decoder.
|
Force a specific subtitle decoder.
|
||||||
|
|
||||||
@item -autorotate
|
@item -autorotate
|
||||||
Automatically rotate the video according to presentation metadata. Enabled by
|
Automatically rotate the video according to presentation metadata. Set by
|
||||||
default, use @option{-noautorotate} to disable it.
|
default, use -noautorotate to disable.
|
||||||
|
|
||||||
@item -framedrop
|
|
||||||
Drop video frames if video is out of sync. Enabled by default if the master
|
|
||||||
clock is not set to video. Use this option to enable frame dropping for all
|
|
||||||
master clock sources, use @option{-noframedrop} to disable it.
|
|
||||||
|
|
||||||
@item -infbuf
|
|
||||||
Do not limit the input buffer size, read as much data as possible from the
|
|
||||||
input as soon as possible. Enabled by default for realtime streams, where data
|
|
||||||
may be dropped if not read in time. Use this option to enable infinite buffers
|
|
||||||
for all inputs, use @option{-noinfbuf} to disable it.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section While playing
|
@section While playing
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle ffprobe Documentation
|
@settitle ffprobe Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
@@ -322,12 +321,6 @@ Show information related to program and library versions. This is the
|
|||||||
equivalent of setting both @option{-show_program_version} and
|
equivalent of setting both @option{-show_program_version} and
|
||||||
@option{-show_library_versions} options.
|
@option{-show_library_versions} options.
|
||||||
|
|
||||||
@item -show_pixel_formats
|
|
||||||
Show information about all pixel formats supported by FFmpeg.
|
|
||||||
|
|
||||||
Pixel format information for each format is printed within a section
|
|
||||||
with name "PIXEL_FORMAT".
|
|
||||||
|
|
||||||
@item -bitexact
|
@item -bitexact
|
||||||
Force bitexact output, useful to produce output which is not dependent
|
Force bitexact output, useful to produce output which is not dependent
|
||||||
on the specific build.
|
on the specific build.
|
||||||
|
@@ -10,10 +10,8 @@
|
|||||||
<xsd:sequence>
|
<xsd:sequence>
|
||||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
|
||||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
|
||||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||||
@@ -37,16 +35,6 @@
|
|||||||
</xsd:sequence>
|
</xsd:sequence>
|
||||||
</xsd:complexType>
|
</xsd:complexType>
|
||||||
|
|
||||||
<xsd:complexType name="packetsAndFramesType">
|
|
||||||
<xsd:sequence>
|
|
||||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
|
||||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
|
||||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
|
||||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
|
||||||
</xsd:choice>
|
|
||||||
</xsd:sequence>
|
|
||||||
</xsd:complexType>
|
|
||||||
|
|
||||||
<xsd:complexType name="packetType">
|
<xsd:complexType name="packetType">
|
||||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||||
@@ -178,11 +166,7 @@
|
|||||||
<xsd:attribute name="level" type="xsd:int"/>
|
<xsd:attribute name="level" type="xsd:int"/>
|
||||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
|
||||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
|
||||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
|
||||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||||
<xsd:attribute name="refs" type="xsd:int"/>
|
|
||||||
|
|
||||||
<!-- audio attributes -->
|
<!-- audio attributes -->
|
||||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||||
@@ -254,6 +238,8 @@
|
|||||||
<xsd:complexType name="programVersionType">
|
<xsd:complexType name="programVersionType">
|
||||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||||
|
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||||
|
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||||
</xsd:complexType>
|
</xsd:complexType>
|
||||||
@@ -291,45 +277,4 @@
|
|||||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||||
</xsd:sequence>
|
</xsd:sequence>
|
||||||
</xsd:complexType>
|
</xsd:complexType>
|
||||||
|
|
||||||
<xsd:complexType name="pixelFormatFlagsType">
|
|
||||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="pseudopal" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
|
||||||
</xsd:complexType>
|
|
||||||
|
|
||||||
<xsd:complexType name="pixelFormatComponentType">
|
|
||||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
|
||||||
</xsd:complexType>
|
|
||||||
|
|
||||||
<xsd:complexType name="pixelFormatComponentsType">
|
|
||||||
<xsd:sequence>
|
|
||||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
|
||||||
</xsd:sequence>
|
|
||||||
</xsd:complexType>
|
|
||||||
|
|
||||||
<xsd:complexType name="pixelFormatType">
|
|
||||||
<xsd:sequence>
|
|
||||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
|
||||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
|
||||||
</xsd:sequence>
|
|
||||||
|
|
||||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
|
||||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
|
||||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
|
||||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
|
||||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
|
||||||
</xsd:complexType>
|
|
||||||
|
|
||||||
<xsd:complexType name="pixelFormatsType">
|
|
||||||
<xsd:sequence>
|
|
||||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
|
||||||
</xsd:sequence>
|
|
||||||
</xsd:complexType>
|
|
||||||
</xsd:schema>
|
</xsd:schema>
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle ffserver Documentation
|
@settitle ffserver Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
@@ -72,7 +71,7 @@ the HTTP server (configured through the @option{HTTPPort} option), and
|
|||||||
configuration file.
|
configuration file.
|
||||||
|
|
||||||
Each feed is associated to a file which is stored on disk. This stored
|
Each feed is associated to a file which is stored on disk. This stored
|
||||||
file is used to send pre-recorded data to a player as fast as
|
file is used to allow to send pre-recorded data to a player as fast as
|
||||||
possible when new content is added in real-time to the stream.
|
possible when new content is added in real-time to the stream.
|
||||||
|
|
||||||
A "live-stream" or "stream" is a resource published by
|
A "live-stream" or "stream" is a resource published by
|
||||||
@@ -409,12 +408,6 @@ ignored, and the log is written to standard output.
|
|||||||
Set no-daemon mode. This option is currently ignored since now
|
Set no-daemon mode. This option is currently ignored since now
|
||||||
@command{ffserver} will always work in no-daemon mode, and is
|
@command{ffserver} will always work in no-daemon mode, and is
|
||||||
deprecated.
|
deprecated.
|
||||||
|
|
||||||
@item UseDefaults
|
|
||||||
@item NoDefaults
|
|
||||||
Control whether default codec options are used for the all streams or not.
|
|
||||||
Each stream may overwrite this setting for its own. Default is @var{UseDefaults}.
|
|
||||||
The lastest occurrence overrides previous if multiple definitions.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section Feed section
|
@section Feed section
|
||||||
@@ -578,11 +571,6 @@ deprecated in favor of @option{Metadata}.
|
|||||||
@item Metadata @var{key} @var{value}
|
@item Metadata @var{key} @var{value}
|
||||||
Set metadata value on the output stream.
|
Set metadata value on the output stream.
|
||||||
|
|
||||||
@item UseDefaults
|
|
||||||
@item NoDefaults
|
|
||||||
Control whether default codec options are used for the stream or not.
|
|
||||||
Default is @var{UseDefaults} unless disabled globally.
|
|
||||||
|
|
||||||
@item NoAudio
|
@item NoAudio
|
||||||
@item NoVideo
|
@item NoVideo
|
||||||
Suppress audio/video.
|
Suppress audio/video.
|
||||||
@@ -601,9 +589,8 @@ Set sampling frequency for audio. When using low bitrates, you should
|
|||||||
lower this frequency to 22050 or 11025. The supported frequencies
|
lower this frequency to 22050 or 11025. The supported frequencies
|
||||||
depend on the selected audio codec.
|
depend on the selected audio codec.
|
||||||
|
|
||||||
@item AVOptionAudio [@var{codec}:]@var{option} @var{value} (@emph{encoding,audio})
|
@item AVOptionAudio @var{option} @var{value} (@emph{encoding,audio})
|
||||||
Set generic or private option for audio stream.
|
Set generic option for audio stream.
|
||||||
Private option must be prefixed with codec name or codec must be defined before.
|
|
||||||
|
|
||||||
@item AVPresetAudio @var{preset} (@emph{encoding,audio})
|
@item AVPresetAudio @var{preset} (@emph{encoding,audio})
|
||||||
Set preset for audio stream.
|
Set preset for audio stream.
|
||||||
@@ -680,9 +667,8 @@ Set video @option{qdiff} encoding option.
|
|||||||
@item DarkMask @var{float} (@emph{encoding,video})
|
@item DarkMask @var{float} (@emph{encoding,video})
|
||||||
Set @option{lumi_mask}/@option{dark_mask} encoding options.
|
Set @option{lumi_mask}/@option{dark_mask} encoding options.
|
||||||
|
|
||||||
@item AVOptionVideo [@var{codec}:]@var{option} @var{value} (@emph{encoding,video})
|
@item AVOptionVideo @var{option} @var{value} (@emph{encoding,video})
|
||||||
Set generic or private option for video stream.
|
Set generic option for video stream.
|
||||||
Private option must be prefixed with codec name or codec must be defined before.
|
|
||||||
|
|
||||||
@item AVPresetVideo @var{preset} (@emph{encoding,video})
|
@item AVPresetVideo @var{preset} (@emph{encoding,video})
|
||||||
Set preset for video stream.
|
Set preset for video stream.
|
||||||
|
@@ -103,10 +103,7 @@ Print detailed information about the filter name @var{filter_name}. Use the
|
|||||||
Show version.
|
Show version.
|
||||||
|
|
||||||
@item -formats
|
@item -formats
|
||||||
Show available formats (including devices).
|
Show available formats.
|
||||||
|
|
||||||
@item -devices
|
|
||||||
Show available devices.
|
|
||||||
|
|
||||||
@item -codecs
|
@item -codecs
|
||||||
Show all codecs known to libavcodec.
|
Show all codecs known to libavcodec.
|
||||||
@@ -141,22 +138,6 @@ Show channel names and standard channel layouts.
|
|||||||
@item -colors
|
@item -colors
|
||||||
Show recognized color names.
|
Show recognized color names.
|
||||||
|
|
||||||
@item -sources @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
|
|
||||||
Show autodetected sources of the intput device.
|
|
||||||
Some devices may provide system-dependent source names that cannot be autodetected.
|
|
||||||
The returned list cannot be assumed to be always complete.
|
|
||||||
@example
|
|
||||||
ffmpeg -sources pulse,server=192.168.0.4
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item -sinks @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
|
|
||||||
Show autodetected sinks of the output device.
|
|
||||||
Some devices may provide system-dependent sink names that cannot be autodetected.
|
|
||||||
The returned list cannot be assumed to be always complete.
|
|
||||||
@example
|
|
||||||
ffmpeg -sinks pulse,server=192.168.0.4
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}
|
@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}
|
||||||
Set the logging level used by the library.
|
Set the logging level used by the library.
|
||||||
Adding "repeat+" indicates that repeated log output should not be compressed
|
Adding "repeat+" indicates that repeated log output should not be compressed
|
||||||
@@ -165,27 +146,27 @@ omitted. "repeat" can also be used alone.
|
|||||||
If "repeat" is used alone, and with no prior loglevel set, the default
|
If "repeat" is used alone, and with no prior loglevel set, the default
|
||||||
loglevel will be used. If multiple loglevel parameters are given, using
|
loglevel will be used. If multiple loglevel parameters are given, using
|
||||||
'repeat' will not change the loglevel.
|
'repeat' will not change the loglevel.
|
||||||
@var{loglevel} is a string or a number containing one of the following values:
|
@var{loglevel} is a number or a string containing one of the following values:
|
||||||
@table @samp
|
@table @samp
|
||||||
@item quiet, -8
|
@item quiet
|
||||||
Show nothing at all; be silent.
|
Show nothing at all; be silent.
|
||||||
@item panic, 0
|
@item panic
|
||||||
Only show fatal errors which could lead the process to crash, such as
|
Only show fatal errors which could lead the process to crash, such as
|
||||||
and assert failure. This is not currently used for anything.
|
and assert failure. This is not currently used for anything.
|
||||||
@item fatal, 8
|
@item fatal
|
||||||
Only show fatal errors. These are errors after which the process absolutely
|
Only show fatal errors. These are errors after which the process absolutely
|
||||||
cannot continue after.
|
cannot continue after.
|
||||||
@item error, 16
|
@item error
|
||||||
Show all errors, including ones which can be recovered from.
|
Show all errors, including ones which can be recovered from.
|
||||||
@item warning, 24
|
@item warning
|
||||||
Show all warnings and errors. Any message related to possibly
|
Show all warnings and errors. Any message related to possibly
|
||||||
incorrect or unexpected events will be shown.
|
incorrect or unexpected events will be shown.
|
||||||
@item info, 32
|
@item info
|
||||||
Show informative messages during processing. This is in addition to
|
Show informative messages during processing. This is in addition to
|
||||||
warnings and errors. This is the default value.
|
warnings and errors. This is the default value.
|
||||||
@item verbose, 40
|
@item verbose
|
||||||
Same as @code{info}, except more verbose.
|
Same as @code{info}, except more verbose.
|
||||||
@item debug, 48
|
@item debug
|
||||||
Show everything, including debugging information.
|
Show everything, including debugging information.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@@ -204,29 +185,21 @@ directory.
|
|||||||
This file can be useful for bug reports.
|
This file can be useful for bug reports.
|
||||||
It also implies @code{-loglevel verbose}.
|
It also implies @code{-loglevel verbose}.
|
||||||
|
|
||||||
Setting the environment variable @env{FFREPORT} to any value has the
|
Setting the environment variable @code{FFREPORT} to any value has the
|
||||||
same effect. If the value is a ':'-separated key=value sequence, these
|
same effect. If the value is a ':'-separated key=value sequence, these
|
||||||
options will affect the report; option values must be escaped if they
|
options will affect the report; options values must be escaped if they
|
||||||
contain special characters or the options delimiter ':' (see the
|
contain special characters or the options delimiter ':' (see the
|
||||||
``Quoting and escaping'' section in the ffmpeg-utils manual).
|
``Quoting and escaping'' section in the ffmpeg-utils manual). The
|
||||||
|
following option is recognized:
|
||||||
The following options are recognized:
|
|
||||||
@table @option
|
@table @option
|
||||||
@item file
|
@item file
|
||||||
set the file name to use for the report; @code{%p} is expanded to the name
|
set the file name to use for the report; @code{%p} is expanded to the name
|
||||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||||
to a plain @code{%}
|
to a plain @code{%}
|
||||||
@item level
|
@item level
|
||||||
set the log verbosity level using a numerical value (see @code{-loglevel}).
|
set the log level
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
For example, to output a report to a file named @file{ffreport.log}
|
|
||||||
using a log level of @code{32} (alias for log level @code{info}):
|
|
||||||
|
|
||||||
@example
|
|
||||||
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
|
|
||||||
@end example
|
|
||||||
|
|
||||||
Errors in parsing the environment variable are not fatal, and will not
|
Errors in parsing the environment variable are not fatal, and will not
|
||||||
appear in the report.
|
appear in the report.
|
||||||
|
|
||||||
@@ -294,41 +267,8 @@ Possible flags for this option are:
|
|||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item -opencl_bench
|
@item -opencl_bench
|
||||||
This option is used to benchmark all available OpenCL devices and print the
|
Benchmark all available OpenCL devices and show the results. This option
|
||||||
results. This option is only available when FFmpeg has been compiled with
|
is only available when FFmpeg has been compiled with @code{--enable-opencl}.
|
||||||
@code{--enable-opencl}.
|
|
||||||
|
|
||||||
When FFmpeg is configured with @code{--enable-opencl}, the options for the
|
|
||||||
global OpenCL context are set via @option{-opencl_options}. See the
|
|
||||||
"OpenCL Options" section in the ffmpeg-utils manual for the complete list of
|
|
||||||
supported options. Amongst others, these options include the ability to select
|
|
||||||
a specific platform and device to run the OpenCL code on. By default, FFmpeg
|
|
||||||
will run on the first device of the first platform. While the options for the
|
|
||||||
global OpenCL context provide flexibility to the user in selecting the OpenCL
|
|
||||||
device of their choice, most users would probably want to select the fastest
|
|
||||||
OpenCL device for their system.
|
|
||||||
|
|
||||||
This option assists the selection of the most efficient configuration by
|
|
||||||
identifying the appropriate device for the user's system. The built-in
|
|
||||||
benchmark is run on all the OpenCL devices and the performance is measured for
|
|
||||||
each device. The devices in the results list are sorted based on their
|
|
||||||
performance with the fastest device listed first. The user can subsequently
|
|
||||||
invoke @command{ffmpeg} using the device deemed most appropriate via
|
|
||||||
@option{-opencl_options} to obtain the best performance for the OpenCL
|
|
||||||
accelerated code.
|
|
||||||
|
|
||||||
Typical usage to use the fastest OpenCL device involve the following steps.
|
|
||||||
|
|
||||||
Run the command:
|
|
||||||
@example
|
|
||||||
ffmpeg -opencl_bench
|
|
||||||
@end example
|
|
||||||
Note down the platform ID (@var{pidx}) and device ID (@var{didx}) of the first
|
|
||||||
i.e. fastest device in the list.
|
|
||||||
Select the platform and device using the command:
|
|
||||||
@example
|
|
||||||
ffmpeg -opencl_options platform_idx=@var{pidx}:device_idx=@var{didx} ...
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item -opencl_options options (@emph{global})
|
@item -opencl_options options (@emph{global})
|
||||||
Set OpenCL environment options. This option is only available when
|
Set OpenCL environment options. This option is only available when
|
||||||
|
965
doc/filters.texi
965
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ Reduce buffering.
|
|||||||
|
|
||||||
@item probesize @var{integer} (@emph{input})
|
@item probesize @var{integer} (@emph{input})
|
||||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||||
stream information. A higher value will enable detecting more
|
stream information. A higher value will allow to detect more
|
||||||
information in case it is dispersed into the stream, but will increase
|
information in case it is dispersed into the stream, but will increase
|
||||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||||
|
|
||||||
@@ -55,10 +55,6 @@ Do not merge side data.
|
|||||||
Enable RTP MP4A-LATM payload.
|
Enable RTP MP4A-LATM payload.
|
||||||
@item nobuffer
|
@item nobuffer
|
||||||
Reduce the latency introduced by optional buffering
|
Reduce the latency introduced by optional buffering
|
||||||
@item bitexact
|
|
||||||
Only write platform-, build- and time-independent data.
|
|
||||||
This ensures that file and data checksums are reproducible and match between
|
|
||||||
platforms. Its primary use is for regression testing.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item seek2any @var{integer} (@emph{input})
|
@item seek2any @var{integer} (@emph{input})
|
||||||
@@ -67,7 +63,7 @@ Default is 0.
|
|||||||
|
|
||||||
@item analyzeduration @var{integer} (@emph{input})
|
@item analyzeduration @var{integer} (@emph{input})
|
||||||
Specify how many microseconds are analyzed to probe the input. A
|
Specify how many microseconds are analyzed to probe the input. A
|
||||||
higher value will enable detecting more accurate information, but will
|
higher value will allow to detect more accurate information, but will
|
||||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||||
|
|
||||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||||
@@ -172,18 +168,6 @@ The offset is added by the muxer to the output timestamps.
|
|||||||
Specifying a positive offset means that the corresponding streams are
|
Specifying a positive offset means that the corresponding streams are
|
||||||
delayed bt the time duration specified in @var{offset}. Default value
|
delayed bt the time duration specified in @var{offset}. Default value
|
||||||
is @code{0} (meaning that no offset is applied).
|
is @code{0} (meaning that no offset is applied).
|
||||||
|
|
||||||
@item format_whitelist @var{list} (@emph{input})
|
|
||||||
"," separated List of allowed demuxers. By default all are allowed.
|
|
||||||
|
|
||||||
@item dump_separator @var{string} (@emph{input})
|
|
||||||
Separator used to separate the fields printed on the command line about the
|
|
||||||
Stream parameters.
|
|
||||||
For example to separate the fields with newlines and indention:
|
|
||||||
@example
|
|
||||||
ffprobe -dump_separator "
|
|
||||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
|
||||||
@end example
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@c man end FORMAT OPTIONS
|
@c man end FORMAT OPTIONS
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle General Documentation
|
@settitle General Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
@@ -109,14 +108,6 @@ Go to @url{http://www.wavpack.com/} and follow the instructions for
|
|||||||
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
||||||
enable it.
|
enable it.
|
||||||
|
|
||||||
@section OpenH264
|
|
||||||
|
|
||||||
FFmpeg can make use of the OpenH264 library for H.264 encoding.
|
|
||||||
|
|
||||||
Go to @url{http://www.openh264.org/} and follow the instructions for
|
|
||||||
installing the library. Then pass @code{--enable-libopenh264} to configure to
|
|
||||||
enable it.
|
|
||||||
|
|
||||||
@section x264
|
@section x264
|
||||||
|
|
||||||
FFmpeg can make use of the x264 library for H.264 encoding.
|
FFmpeg can make use of the x264 library for H.264 encoding.
|
||||||
@@ -152,7 +143,7 @@ by Google as part of the WebRTC project. libilbc is a packaging friendly
|
|||||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||||
iLBC encoding and decoding.
|
iLBC encoding and decoding.
|
||||||
|
|
||||||
Go to @url{https://github.com/TimothyGu/libilbc} and follow the instructions for
|
Go to @url{https://github.com/dekkers/libilbc} and follow the instructions for
|
||||||
installing the library. Then pass @code{--enable-libilbc} to configure to
|
installing the library. Then pass @code{--enable-libilbc} to configure to
|
||||||
enable it.
|
enable it.
|
||||||
|
|
||||||
@@ -252,8 +243,6 @@ library:
|
|||||||
@tab Used in the game Cyberia from Interplay.
|
@tab Used in the game Cyberia from Interplay.
|
||||||
@item Delphine Software International CIN @tab @tab X
|
@item Delphine Software International CIN @tab @tab X
|
||||||
@tab Multimedia format used by Delphine Software games.
|
@tab Multimedia format used by Delphine Software games.
|
||||||
@item Digital Speech Standard (DSS) @tab @tab X
|
|
||||||
@item Canopus HQX @tab @tab X
|
|
||||||
@item CD+G @tab @tab X
|
@item CD+G @tab @tab X
|
||||||
@tab Video format used by CD+G karaoke disks
|
@tab Video format used by CD+G karaoke disks
|
||||||
@item Phantom Cine @tab @tab X
|
@item Phantom Cine @tab @tab X
|
||||||
@@ -465,7 +454,6 @@ library:
|
|||||||
@item Sony Wave64 (W64) @tab X @tab X
|
@item Sony Wave64 (W64) @tab X @tab X
|
||||||
@item SoX native format @tab X @tab X
|
@item SoX native format @tab X @tab X
|
||||||
@item SUN AU format @tab X @tab X
|
@item SUN AU format @tab X @tab X
|
||||||
@item SUP raw PGS subtitles @tab @tab X
|
|
||||||
@item Text files @tab @tab X
|
@item Text files @tab @tab X
|
||||||
@item THP @tab @tab X
|
@item THP @tab @tab X
|
||||||
@tab Used on the Nintendo GameCube.
|
@tab Used on the Nintendo GameCube.
|
||||||
@@ -664,7 +652,7 @@ following image formats are supported:
|
|||||||
@item H.263 / H.263-1996 @tab X @tab X
|
@item H.263 / H.263-1996 @tab X @tab X
|
||||||
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
|
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
|
||||||
@item H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 @tab E @tab X
|
@item H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 @tab E @tab X
|
||||||
@tab encoding supported through external library libx264 and OpenH264
|
@tab encoding supported through external library libx264
|
||||||
@item HEVC @tab X @tab X
|
@item HEVC @tab X @tab X
|
||||||
@tab encoding supported through the external library libx265
|
@tab encoding supported through the external library libx265
|
||||||
@item HNM version 4 @tab @tab X
|
@item HNM version 4 @tab @tab X
|
||||||
@@ -900,7 +888,6 @@ following image formats are supported:
|
|||||||
@tab decoding supported through external library libcelt
|
@tab decoding supported through external library libcelt
|
||||||
@item Delphine Software International CIN audio @tab @tab X
|
@item Delphine Software International CIN audio @tab @tab X
|
||||||
@tab Codec used in Delphine Software International games.
|
@tab Codec used in Delphine Software International games.
|
||||||
@item Digital Speech Standard - Standard Play mode (DSS SP) @tab @tab X
|
|
||||||
@item Discworld II BMV Audio @tab @tab X
|
@item Discworld II BMV Audio @tab @tab X
|
||||||
@item COOK @tab @tab X
|
@item COOK @tab @tab X
|
||||||
@tab All versions except 5.1 are supported.
|
@tab All versions except 5.1 are supported.
|
||||||
@@ -1043,7 +1030,6 @@ performance on systems without hardware floating point support).
|
|||||||
@item PJS (Phoenix) @tab @tab X @tab @tab X
|
@item PJS (Phoenix) @tab @tab X @tab @tab X
|
||||||
@item RealText @tab @tab X @tab @tab X
|
@item RealText @tab @tab X @tab @tab X
|
||||||
@item SAMI @tab @tab X @tab @tab X
|
@item SAMI @tab @tab X @tab @tab X
|
||||||
@item Spruce format (STL) @tab @tab X @tab @tab X
|
|
||||||
@item SSA/ASS @tab X @tab X @tab X @tab X
|
@item SSA/ASS @tab X @tab X @tab X @tab X
|
||||||
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
||||||
@item SubViewer v1 @tab @tab X @tab @tab X
|
@item SubViewer v1 @tab @tab X @tab @tab X
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Using git to develop FFmpeg
|
@settitle Using git to develop FFmpeg
|
||||||
|
|
||||||
|
324
doc/indevs.texi
324
doc/indevs.texi
@@ -1,7 +1,7 @@
|
|||||||
@chapter Input Devices
|
@chapter Input Devices
|
||||||
@c man begin INPUT DEVICES
|
@c man begin INPUT DEVICES
|
||||||
|
|
||||||
Input devices are configured elements in FFmpeg which enable accessing
|
Input devices are configured elements in FFmpeg which allow to access
|
||||||
the data coming from a multimedia device attached to your system.
|
the data coming from a multimedia device attached to your system.
|
||||||
|
|
||||||
When you configure your FFmpeg build, all the supported input devices
|
When you configure your FFmpeg build, all the supported input devices
|
||||||
@@ -58,94 +58,34 @@ AVFoundation input device.
|
|||||||
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
|
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
|
||||||
The older QTKit framework has been marked deprecated since OSX version 10.7.
|
The older QTKit framework has been marked deprecated since OSX version 10.7.
|
||||||
|
|
||||||
The input filename has to be given in the following syntax:
|
The filename passed as input is parsed to contain either a device name or index.
|
||||||
@example
|
The device index can also be given by using -video_device_index.
|
||||||
-i "[[VIDEO]:[AUDIO]]"
|
A given device index will override any given device name.
|
||||||
@end example
|
If the desired device consists of numbers only, use -video_device_index to identify it.
|
||||||
The first entry selects the video input while the latter selects the audio input.
|
The default device will be chosen if an empty string or the device name "default" is given.
|
||||||
The stream has to be specified by the device name or the device index as shown by the device list.
|
The available devices can be enumerated by using -list_devices.
|
||||||
Alternatively, the video and/or audio input device can be chosen by index using the
|
The pixel format can be set using -pixel_format.
|
||||||
@option{
|
Available formats:
|
||||||
-video_device_index <INDEX>
|
monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
|
||||||
}
|
|
||||||
and/or
|
|
||||||
@option{
|
|
||||||
-audio_device_index <INDEX>
|
|
||||||
}
|
|
||||||
, overriding any
|
|
||||||
device name or index given in the input filename.
|
|
||||||
|
|
||||||
All available devices can be enumerated by using @option{-list_devices true}, listing
|
|
||||||
all device names and corresponding indices.
|
|
||||||
|
|
||||||
There are two device name aliases:
|
|
||||||
@table @code
|
|
||||||
|
|
||||||
@item default
|
|
||||||
Select the AVFoundation default device of the corresponding type.
|
|
||||||
|
|
||||||
@item none
|
|
||||||
Do not record the corresponding media type.
|
|
||||||
This is equivalent to specifying an empty device name or index.
|
|
||||||
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@subsection Options
|
|
||||||
|
|
||||||
AVFoundation supports the following options:
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
|
|
||||||
@item -list_devices <TRUE|FALSE>
|
|
||||||
If set to true, a list of all available input devices is given showing all
|
|
||||||
device names and indices.
|
|
||||||
|
|
||||||
@item -video_device_index <INDEX>
|
|
||||||
Specify the video device by its index. Overrides anything given in the input filename.
|
|
||||||
|
|
||||||
@item -audio_device_index <INDEX>
|
|
||||||
Specify the audio device by its index. Overrides anything given in the input filename.
|
|
||||||
|
|
||||||
@item -pixel_format <FORMAT>
|
|
||||||
Request the video device to use a specific pixel format.
|
|
||||||
If the specified format is not supported, a list of available formats is given
|
|
||||||
und the first one in this list is used instead. Available pixel formats are:
|
|
||||||
@code{monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
|
|
||||||
bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
|
bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
|
||||||
yuv420p, nv12, yuyv422, gray}
|
yuv420p, nv12, yuyv422, gray
|
||||||
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@subsection Examples
|
|
||||||
|
|
||||||
@itemize
|
|
||||||
|
|
||||||
@item
|
|
||||||
Print the list of AVFoundation supported devices and exit:
|
|
||||||
@example
|
@example
|
||||||
$ ffmpeg -f avfoundation -list_devices true -i ""
|
ffmpeg -f avfoundation -i "0" out.mpg
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
|
||||||
Record video from video device 0 and audio from audio device 0 into out.avi:
|
|
||||||
@example
|
@example
|
||||||
$ ffmpeg -f avfoundation -i "0:0" out.avi
|
ffmpeg -f avfoundation -video_device_index 0 -i "" out.mpg
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
|
||||||
Record video from video device 2 and audio from audio device 1 into out.avi:
|
|
||||||
@example
|
@example
|
||||||
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
|
ffmpeg -f avfoundation -pixel_format bgr0 -i "default" out.mpg
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
|
||||||
Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
|
|
||||||
@example
|
@example
|
||||||
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
ffmpeg -f avfoundation -list_devices true -i ""
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@end itemize
|
|
||||||
|
|
||||||
@section bktr
|
@section bktr
|
||||||
|
|
||||||
BSD video input device.
|
BSD video input device.
|
||||||
@@ -167,7 +107,7 @@ The input name should be in the format:
|
|||||||
@end example
|
@end example
|
||||||
|
|
||||||
where @var{TYPE} can be either @var{audio} or @var{video},
|
where @var{TYPE} can be either @var{audio} or @var{video},
|
||||||
and @var{NAME} is the device's name or alternative name..
|
and @var{NAME} is the device's name.
|
||||||
|
|
||||||
@subsection Options
|
@subsection Options
|
||||||
|
|
||||||
@@ -220,61 +160,6 @@ Setting this value too low can degrade performance.
|
|||||||
See also
|
See also
|
||||||
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
|
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
|
||||||
|
|
||||||
@item video_pin_name
|
|
||||||
Select video capture pin to use by name or alternative name.
|
|
||||||
|
|
||||||
@item audio_pin_name
|
|
||||||
Select audio capture pin to use by name or alternative name.
|
|
||||||
|
|
||||||
@item crossbar_video_input_pin_number
|
|
||||||
Select video input pin number for crossbar device. This will be
|
|
||||||
routed to the crossbar device's Video Decoder output pin.
|
|
||||||
Note that changing this value can affect future invocations
|
|
||||||
(sets a new default) until system reboot occurs.
|
|
||||||
|
|
||||||
@item crossbar_audio_input_pin_number
|
|
||||||
Select audio input pin number for crossbar device. This will be
|
|
||||||
routed to the crossbar device's Audio Decoder output pin.
|
|
||||||
Note that changing this value can affect future invocations
|
|
||||||
(sets a new default) until system reboot occurs.
|
|
||||||
|
|
||||||
@item show_video_device_dialog
|
|
||||||
If set to @option{true}, before capture starts, popup a display dialog
|
|
||||||
to the end user, allowing them to change video filter properties
|
|
||||||
and configurations manually.
|
|
||||||
Note that for crossbar devices, adjusting values in this dialog
|
|
||||||
may be needed at times to toggle between PAL (25 fps) and NTSC (29.97)
|
|
||||||
input frame rates, sizes, interlacing, etc. Changing these values can
|
|
||||||
enable different scan rates/frame rates and avoiding green bars at
|
|
||||||
the bottom, flickering scan lines, etc.
|
|
||||||
Note that with some devices, changing these properties can also affect future
|
|
||||||
invocations (sets new defaults) until system reboot occurs.
|
|
||||||
|
|
||||||
@item show_audio_device_dialog
|
|
||||||
If set to @option{true}, before capture starts, popup a display dialog
|
|
||||||
to the end user, allowing them to change audio filter properties
|
|
||||||
and configurations manually.
|
|
||||||
|
|
||||||
@item show_video_crossbar_connection_dialog
|
|
||||||
If set to @option{true}, before capture starts, popup a display
|
|
||||||
dialog to the end user, allowing them to manually
|
|
||||||
modify crossbar pin routings, when it opens a video device.
|
|
||||||
|
|
||||||
@item show_audio_crossbar_connection_dialog
|
|
||||||
If set to @option{true}, before capture starts, popup a display
|
|
||||||
dialog to the end user, allowing them to manually
|
|
||||||
modify crossbar pin routings, when it opens an audio device.
|
|
||||||
|
|
||||||
@item show_analog_tv_tuner_dialog
|
|
||||||
If set to @option{true}, before capture starts, popup a display
|
|
||||||
dialog to the end user, allowing them to manually
|
|
||||||
modify TV channels and frequencies.
|
|
||||||
|
|
||||||
@item show_analog_tv_tuner_audio_dialog
|
|
||||||
If set to @option{true}, before capture starts, popup a display
|
|
||||||
dialog to the end user, allowing them to manually
|
|
||||||
modify TV audio (like mono vs. stereo, Language A,B or C).
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
@@ -311,19 +196,6 @@ Print the list of supported options in selected device and exit:
|
|||||||
$ ffmpeg -list_options true -f dshow -i video="Camera"
|
$ ffmpeg -list_options true -f dshow -i video="Camera"
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
|
||||||
Specify pin names to capture by name or alternative name, specify alternative device name:
|
|
||||||
@example
|
|
||||||
$ ffmpeg -f dshow -audio_pin_name "Audio Out" -video_pin_name 2 -i video=video="@@device_pnp_\\?\pci#ven_1a0a&dev_6200&subsys_62021461&rev_01#4&e2c7dd6&0&00e1#@{65e8773d-8f56-11d0-a3b9-00a0c9223196@}\@{ca465100-deb0-4d59-818f-8c477184adf6@}":audio="Microphone"
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item
|
|
||||||
Configure a crossbar device, specifying crossbar pins, allow user to adjust video capture properties at startup:
|
|
||||||
@example
|
|
||||||
$ ffmpeg -f dshow -show_video_device_dialog true -crossbar_video_input_pin_number 0
|
|
||||||
-crossbar_audio_input_pin_number 3 -i video="AVerMedia BDA Analog Capture":audio="AVerMedia BDA Analog Capture"
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
@section dv1394
|
@section dv1394
|
||||||
@@ -458,7 +330,7 @@ not work and result in undefined behavior.
|
|||||||
The values @option{auto}, @option{dv} and @option{hdv} are supported.
|
The values @option{auto}, @option{dv} and @option{hdv} are supported.
|
||||||
|
|
||||||
@item dvbuffer
|
@item dvbuffer
|
||||||
Set maximum size of buffer for incoming data, in frames. For DV, this
|
Set maxiumum size of buffer for incoming data, in frames. For DV, this
|
||||||
is an exact value. For HDV, it is not frame exact, since HDV does
|
is an exact value. For HDV, it is not frame exact, since HDV does
|
||||||
not have a fixed frame size.
|
not have a fixed frame size.
|
||||||
|
|
||||||
@@ -563,14 +435,6 @@ generated by the device.
|
|||||||
The first unlabelled output is automatically assigned to the "out0"
|
The first unlabelled output is automatically assigned to the "out0"
|
||||||
label, but all the others need to be specified explicitly.
|
label, but all the others need to be specified explicitly.
|
||||||
|
|
||||||
The suffix "+subcc" can be appended to the output label to create an extra
|
|
||||||
stream with the closed captions packets attached to that output
|
|
||||||
(experimental; only for EIA-608 / CEA-708 for now).
|
|
||||||
The subcc streams are created after all the normal streams, in the order of
|
|
||||||
the corresponding stream.
|
|
||||||
For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
|
|
||||||
stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
|
|
||||||
|
|
||||||
If not specified defaults to the filename specified for the input
|
If not specified defaults to the filename specified for the input
|
||||||
device.
|
device.
|
||||||
|
|
||||||
@@ -617,57 +481,24 @@ Read an audio stream and a video stream and play it back with
|
|||||||
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
|
||||||
Dump decoded frames to images and closed captions to a file (experimental):
|
|
||||||
@example
|
|
||||||
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
@section libcdio
|
@section libcdio
|
||||||
|
|
||||||
Audio-CD input device based on libcdio.
|
Audio-CD input device based on cdio.
|
||||||
|
|
||||||
To enable this input device during configuration you need libcdio
|
To enable this input device during configuration you need libcdio
|
||||||
installed on your system. It requires the configure option
|
installed on your system. Requires the configure option
|
||||||
@code{--enable-libcdio}.
|
@code{--enable-libcdio}.
|
||||||
|
|
||||||
This device allows playing and grabbing from an Audio-CD.
|
This device allows playing and grabbing from an Audio-CD.
|
||||||
|
|
||||||
For example to copy with @command{ffmpeg} the entire Audio-CD in @file{/dev/sr0},
|
For example to copy with @command{ffmpeg} the entire Audio-CD in /dev/sr0,
|
||||||
you may run the command:
|
you may run the command:
|
||||||
@example
|
@example
|
||||||
ffmpeg -f libcdio -i /dev/sr0 cd.wav
|
ffmpeg -f libcdio -i /dev/sr0 cd.wav
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@subsection Options
|
|
||||||
@table @option
|
|
||||||
@item speed
|
|
||||||
Set drive reading speed. Default value is 0.
|
|
||||||
|
|
||||||
The speed is specified CD-ROM speed units. The speed is set through
|
|
||||||
the libcdio @code{cdio_cddap_speed_set} function. On many CD-ROM
|
|
||||||
drives, specifying a value too large will result in using the fastest
|
|
||||||
speed.
|
|
||||||
|
|
||||||
@item paranoia_mode
|
|
||||||
Set paranoia recovery mode flags. It accepts one of the following values:
|
|
||||||
|
|
||||||
@table @samp
|
|
||||||
@item disable
|
|
||||||
@item verify
|
|
||||||
@item overlap
|
|
||||||
@item neverskip
|
|
||||||
@item full
|
|
||||||
@end table
|
|
||||||
|
|
||||||
Default value is @samp{disable}.
|
|
||||||
|
|
||||||
For more information about the available recovery modes, consult the
|
|
||||||
paranoia project documentation.
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@section libdc1394
|
@section libdc1394
|
||||||
|
|
||||||
IIDC1394 input device, based on libdc1394 and libraw1394.
|
IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||||
@@ -1002,12 +833,8 @@ other filename will be interpreted as device number 0.
|
|||||||
|
|
||||||
X11 video input device.
|
X11 video input device.
|
||||||
|
|
||||||
To enable this input device during configuration you need libxcb
|
Depends on X11, Xext, and Xfixes. Requires the configure option
|
||||||
installed on your system. It will be automatically detected during
|
@code{--enable-x11grab}.
|
||||||
configuration.
|
|
||||||
|
|
||||||
Alternatively, the configure option @option{--enable-x11grab} exists
|
|
||||||
for legacy Xlib users.
|
|
||||||
|
|
||||||
This device allows one to capture a region of an X11 display.
|
This device allows one to capture a region of an X11 display.
|
||||||
|
|
||||||
@@ -1025,12 +852,10 @@ omitted, and defaults to "localhost". The environment variable
|
|||||||
area with respect to the top-left border of the X11 screen. They
|
area with respect to the top-left border of the X11 screen. They
|
||||||
default to 0.
|
default to 0.
|
||||||
|
|
||||||
Check the X11 documentation (e.g. @command{man X}) for more detailed
|
Check the X11 documentation (e.g. man X) for more detailed information.
|
||||||
information.
|
|
||||||
|
|
||||||
Use the @command{xdpyinfo} program for getting basic information about
|
Use the @command{dpyinfo} program for getting basic information about the
|
||||||
the properties of your X11 display (e.g. grep for "name" or
|
properties of your X11 display (e.g. grep for "name" or "dimensions").
|
||||||
"dimensions").
|
|
||||||
|
|
||||||
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
||||||
@example
|
@example
|
||||||
@@ -1079,10 +904,6 @@ If @var{show_region} is specified with @code{1}, then the grabbing
|
|||||||
region will be indicated on screen. With this option, it is easy to
|
region will be indicated on screen. With this option, it is easy to
|
||||||
know what is being grabbed if only a portion of the screen is grabbed.
|
know what is being grabbed if only a portion of the screen is grabbed.
|
||||||
|
|
||||||
@item region_border
|
|
||||||
Set the region border thickness if @option{-show_region 1} is used.
|
|
||||||
Range is 1 to 128 and default is 3 (XCB-based x11grab only).
|
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
@example
|
@example
|
||||||
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||||
@@ -1098,100 +919,7 @@ Set the video frame size. Default value is @code{vga}.
|
|||||||
|
|
||||||
@item use_shm
|
@item use_shm
|
||||||
Use the MIT-SHM extension for shared memory. Default value is @code{1}.
|
Use the MIT-SHM extension for shared memory. Default value is @code{1}.
|
||||||
It may be necessary to disable it for remote displays (legacy x11grab
|
It may be necessary to disable it for remote displays.
|
||||||
only).
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection @var{grab_x} @var{grab_y} AVOption
|
|
||||||
|
|
||||||
The syntax is:
|
|
||||||
@example
|
|
||||||
-grab_x @var{x_offset} -grab_y @var{y_offset}
|
|
||||||
@end example
|
|
||||||
|
|
||||||
Set the grabbing region coordinates. They are expressed as offset from the top left
|
|
||||||
corner of the X11 window. The default value is 0.
|
|
||||||
|
|
||||||
@section decklink
|
|
||||||
|
|
||||||
The decklink input device provides capture capabilities for Blackmagic
|
|
||||||
DeckLink devices.
|
|
||||||
|
|
||||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
|
||||||
need to configure with the appropriate @code{--extra-cflags}
|
|
||||||
and @code{--extra-ldflags}.
|
|
||||||
On Windows, you need to run the IDL files through @command{widl}.
|
|
||||||
|
|
||||||
DeckLink is very picky about the formats it supports. Pixel format is
|
|
||||||
uyvy422 or v210, framerate and video size must be determined for your device with
|
|
||||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
|
||||||
of channels can be 2, 8 or 16.
|
|
||||||
|
|
||||||
@subsection Options
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
|
|
||||||
@item list_devices
|
|
||||||
If set to @option{true}, print a list of devices and exit.
|
|
||||||
Defaults to @option{false}.
|
|
||||||
|
|
||||||
@item list_formats
|
|
||||||
If set to @option{true}, print a list of supported formats and exit.
|
|
||||||
Defaults to @option{false}.
|
|
||||||
|
|
||||||
@item bm_v210
|
|
||||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
|
||||||
of uyvy422. Not all Blackmagic devices support this option.
|
|
||||||
|
|
||||||
@item bm_channels <CHANNELS>
|
|
||||||
Number of audio channels, can be 2, 8 or 16
|
|
||||||
|
|
||||||
@item bm_audiodepth <BITDEPTH>
|
|
||||||
Audio bit depth, can be 16 or 32.
|
|
||||||
|
|
||||||
@end table
|
|
||||||
|
|
||||||
@subsection Examples
|
|
||||||
|
|
||||||
@itemize
|
|
||||||
|
|
||||||
@item
|
|
||||||
List input devices:
|
|
||||||
@example
|
|
||||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item
|
|
||||||
List supported formats:
|
|
||||||
@example
|
|
||||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item
|
|
||||||
Capture video clip at 1080i50 (format 11):
|
|
||||||
@example
|
|
||||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item
|
|
||||||
Capture video clip at 1080i50 10 bit:
|
|
||||||
@example
|
|
||||||
ffmpeg -bm_v210 1 -f decklink -i 'UltraStudio Mini Recorder@@11' -acodec copy -vcodec copy output.avi
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item
|
|
||||||
Capture video clip at 720p50 with 32bit audio:
|
|
||||||
@example
|
|
||||||
ffmpeg -bm_audiodepth 32 -f decklink -i 'UltraStudio Mini Recorder@@14' -acodec copy -vcodec copy output.avi
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@item
|
|
||||||
Capture video clip at 576i50 with 8 audio channels:
|
|
||||||
@example
|
|
||||||
ffmpeg -bm_channels 8 -f decklink -i 'UltraStudio Mini Recorder@@3' -acodec copy -vcodec copy output.avi
|
|
||||||
@end example
|
|
||||||
|
|
||||||
@end itemize
|
|
||||||
|
|
||||||
|
|
||||||
@c man end INPUT DEVICES
|
@c man end INPUT DEVICES
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Libavcodec Documentation
|
@settitle Libavcodec Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Libavdevice Documentation
|
@settitle Libavdevice Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Libavfilter Documentation
|
@settitle Libavfilter Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Libavformat Documentation
|
@settitle Libavformat Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Libavutil Documentation
|
@settitle Libavutil Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Libswresample Documentation
|
@settitle Libswresample Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Libswscale Documentation
|
@settitle Libswscale Documentation
|
||||||
@titlepage
|
@titlepage
|
||||||
|
141
doc/muxers.texi
141
doc/muxers.texi
@@ -194,19 +194,15 @@ can not be smaller than one centi second.
|
|||||||
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
|
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
|
||||||
the HTTP Live Streaming (HLS) specification.
|
the HTTP Live Streaming (HLS) specification.
|
||||||
|
|
||||||
It creates a playlist file, and one or more segment files. The output filename
|
It creates a playlist file and numbered segment files. The output
|
||||||
specifies the playlist filename.
|
filename specifies the playlist filename; the segment filenames
|
||||||
|
receive the same basename as the playlist, a sequential number and
|
||||||
By default, the muxer creates a file for each segment produced. These files
|
a .ts extension.
|
||||||
have the same name as the playlist, followed by a sequential number and a
|
|
||||||
.ts extension.
|
|
||||||
|
|
||||||
For example, to convert an input file with @command{ffmpeg}:
|
For example, to convert an input file with @command{ffmpeg}:
|
||||||
@example
|
@example
|
||||||
ffmpeg -i in.nut out.m3u8
|
ffmpeg -i in.nut out.m3u8
|
||||||
@end example
|
@end example
|
||||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
|
||||||
@file{out0.ts}, @file{out1.ts}, @file{out2.ts}, etc.
|
|
||||||
|
|
||||||
See also the @ref{segment} muxer, which provides a more generic and
|
See also the @ref{segment} muxer, which provides a more generic and
|
||||||
flexible implementation of a segmenter, and can be used to perform HLS
|
flexible implementation of a segmenter, and can be used to perform HLS
|
||||||
@@ -224,11 +220,6 @@ Set the segment length in seconds. Default value is 2.
|
|||||||
Set the maximum number of playlist entries. If set to 0 the list file
|
Set the maximum number of playlist entries. If set to 0 the list file
|
||||||
will contain all the segments. Default value is 5.
|
will contain all the segments. Default value is 5.
|
||||||
|
|
||||||
@item hls_ts_options @var{options_list}
|
|
||||||
Set output format options using a :-separated list of key=value
|
|
||||||
parameters. Values containing @code{:} special characters must be
|
|
||||||
escaped.
|
|
||||||
|
|
||||||
@item hls_wrap @var{wrap}
|
@item hls_wrap @var{wrap}
|
||||||
Set the number after which the segment filename number (the number
|
Set the number after which the segment filename number (the number
|
||||||
specified in each segment file) wraps. If set to 0 the number will be
|
specified in each segment file) wraps. If set to 0 the number will be
|
||||||
@@ -242,9 +233,6 @@ to @var{wrap}.
|
|||||||
Start the playlist sequence number from @var{number}. Default value is
|
Start the playlist sequence number from @var{number}. Default value is
|
||||||
0.
|
0.
|
||||||
|
|
||||||
@item hls_allow_cache @var{allowcache}
|
|
||||||
Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
|
|
||||||
|
|
||||||
@item hls_base_url @var{baseurl}
|
@item hls_base_url @var{baseurl}
|
||||||
Append @var{baseurl} to every entry in the playlist.
|
Append @var{baseurl} to every entry in the playlist.
|
||||||
Useful to generate playlists with absolute paths.
|
Useful to generate playlists with absolute paths.
|
||||||
@@ -253,30 +241,6 @@ Note that the playlist sequence number must be unique for each segment
|
|||||||
and it is not to be confused with the segment filename sequence number
|
and it is not to be confused with the segment filename sequence number
|
||||||
which can be cyclic, for example if the @option{wrap} option is
|
which can be cyclic, for example if the @option{wrap} option is
|
||||||
specified.
|
specified.
|
||||||
|
|
||||||
@item hls_segment_filename @var{filename}
|
|
||||||
Set the segment filename. Unless hls_flags single_file is set @var{filename}
|
|
||||||
is used as a string format with the segment number:
|
|
||||||
@example
|
|
||||||
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
|
|
||||||
@end example
|
|
||||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
|
||||||
@file{file000.ts}, @file{file001.ts}, @file{file002.ts}, etc.
|
|
||||||
|
|
||||||
@item hls_flags single_file
|
|
||||||
If this flag is set, the muxer will store all segments in a single MPEG-TS
|
|
||||||
file, and will use byte ranges in the playlist. HLS playlists generated with
|
|
||||||
this way will have the version number 4.
|
|
||||||
For example:
|
|
||||||
@example
|
|
||||||
ffmpeg -i in.nut -hls_flags single_file out.m3u8
|
|
||||||
@end example
|
|
||||||
Will produce the playlist, @file{out.m3u8}, and a single segment file,
|
|
||||||
@file{out.ts}.
|
|
||||||
|
|
||||||
@item hls_flags delete_segments
|
|
||||||
Segment files removed from the playlist are deleted after a period of time
|
|
||||||
equal to the duration of the segment plus the duration of the playlist.
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@anchor{ico}
|
@anchor{ico}
|
||||||
@@ -381,7 +345,8 @@ ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
|
|||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@item start_number
|
@item start_number
|
||||||
Start the sequence from the specified number. Default value is 0.
|
Start the sequence from the specified number. Default value is 1. Must
|
||||||
|
be a non-negative number.
|
||||||
|
|
||||||
@item update
|
@item update
|
||||||
If set to 1, the filename will always be interpreted as just a
|
If set to 1, the filename will always be interpreted as just a
|
||||||
@@ -571,6 +536,7 @@ a short portion of the file. With this option set, there is no initial
|
|||||||
mdat atom, and the moov atom only describes the tracks but has
|
mdat atom, and the moov atom only describes the tracks but has
|
||||||
a zero duration.
|
a zero duration.
|
||||||
|
|
||||||
|
Files written with this option set do not work in QuickTime.
|
||||||
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
||||||
@item -movflags separate_moof
|
@item -movflags separate_moof
|
||||||
Write a separate moof (movie fragment) atom for each track. Normally,
|
Write a separate moof (movie fragment) atom for each track. Normally,
|
||||||
@@ -591,16 +557,6 @@ and a QuickTime chapter track are written to the file. With this option
|
|||||||
set, only the QuickTime chapter track will be written. Nero chapters can
|
set, only the QuickTime chapter track will be written. Nero chapters can
|
||||||
cause failures when the file is reprocessed with certain tagging programs, like
|
cause failures when the file is reprocessed with certain tagging programs, like
|
||||||
mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
|
mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
|
||||||
@item -movflags omit_tfhd_offset
|
|
||||||
Do not write any absolute base_data_offset in tfhd atoms. This avoids
|
|
||||||
tying fragments to absolute byte positions in the file/streams.
|
|
||||||
@item -movflags default_base_moof
|
|
||||||
Similarly to the omit_tfhd_offset, this flag avoids writing the
|
|
||||||
absolute base_data_offset field in tfhd atoms, but does so by using
|
|
||||||
the new default-base-is-moof flag instead. This flag is new from
|
|
||||||
14496-12:2012. This may make the fragments easier to parse in certain
|
|
||||||
circumstances (avoiding basing track fragment location calculations
|
|
||||||
on the implicit end of the previous track fragment).
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Example
|
@subsection Example
|
||||||
@@ -613,38 +569,29 @@ ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe
|
|||||||
|
|
||||||
@section mp3
|
@section mp3
|
||||||
|
|
||||||
The MP3 muxer writes a raw MP3 stream with the following optional features:
|
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
|
||||||
@itemize @bullet
|
optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the
|
||||||
@item
|
@code{id3v2_version} option controls which one is used. Setting
|
||||||
An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
|
@code{id3v2_version} to 0 will disable the ID3v2 header completely. The legacy
|
||||||
2.4 are supported, the @code{id3v2_version} private option controls which one is
|
ID3v1 tag is not written by default, but may be enabled with the
|
||||||
used (3 or 4). Setting @code{id3v2_version} to 0 disables the ID3v2 header
|
@code{write_id3v1} option.
|
||||||
completely.
|
|
||||||
|
|
||||||
The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
|
The muxer may also write a Xing frame at the beginning, which contains the
|
||||||
The pictures are supplied to the muxer in form of a video stream with a single
|
number of frames in the file. It is useful for computing duration of VBR files.
|
||||||
packet. There can be any number of those streams, each will correspond to a
|
The Xing frame is written if the output stream is seekable and if the
|
||||||
single APIC frame. The stream metadata tags @var{title} and @var{comment} map
|
@code{write_xing} option is set to 1 (the default).
|
||||||
to APIC @var{description} and @var{picture type} respectively. See
|
|
||||||
|
The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures
|
||||||
|
are supplied to the muxer in form of a video stream with a single packet. There
|
||||||
|
can be any number of those streams, each will correspond to a single APIC frame.
|
||||||
|
The stream metadata tags @var{title} and @var{comment} map to APIC
|
||||||
|
@var{description} and @var{picture type} respectively. See
|
||||||
@url{http://id3.org/id3v2.4.0-frames} for allowed picture types.
|
@url{http://id3.org/id3v2.4.0-frames} for allowed picture types.
|
||||||
|
|
||||||
Note that the APIC frames must be written at the beginning, so the muxer will
|
Note that the APIC frames must be written at the beginning, so the muxer will
|
||||||
buffer the audio frames until it gets all the pictures. It is therefore advised
|
buffer the audio frames until it gets all the pictures. It is therefore advised
|
||||||
to provide the pictures as soon as possible to avoid excessive buffering.
|
to provide the pictures as soon as possible to avoid excessive buffering.
|
||||||
|
|
||||||
@item
|
|
||||||
A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
|
|
||||||
default, but will be written only if the output is seekable. The
|
|
||||||
@code{write_xing} private option can be used to disable it. The frame contains
|
|
||||||
various information that may be useful to the decoder, like the audio duration
|
|
||||||
or encoder delay.
|
|
||||||
|
|
||||||
@item
|
|
||||||
A legacy ID3v1 tag at the end of the file (disabled by default). It may be
|
|
||||||
enabled with the @code{write_id3v1} private option, but as its capabilities are
|
|
||||||
very limited, its usage is not recommended.
|
|
||||||
@end itemize
|
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
||||||
@@ -689,9 +636,6 @@ Set the transport_stream_id (default 0x0001). This identifies a
|
|||||||
transponder in DVB.
|
transponder in DVB.
|
||||||
@item -mpegts_service_id @var{number}
|
@item -mpegts_service_id @var{number}
|
||||||
Set the service_id (default 0x0001) also known as program in DVB.
|
Set the service_id (default 0x0001) also known as program in DVB.
|
||||||
@item -mpegts_service_type @var{number}
|
|
||||||
Set the program service_type (default @var{digital_tv}), see below
|
|
||||||
a list of pre defined values.
|
|
||||||
@item -mpegts_pmt_start_pid @var{number}
|
@item -mpegts_pmt_start_pid @var{number}
|
||||||
Set the first PID for PMT (default 0x1000, max 0x1f00).
|
Set the first PID for PMT (default 0x1000, max 0x1f00).
|
||||||
@item -mpegts_start_pid @var{number}
|
@item -mpegts_start_pid @var{number}
|
||||||
@@ -726,27 +670,6 @@ ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
|
|||||||
@end example
|
@end example
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
Option mpegts_service_type accepts the following values:
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
@item hex_value
|
|
||||||
Any hexdecimal value between 0x01 to 0xff as defined in ETSI 300 468.
|
|
||||||
@item digital_tv
|
|
||||||
Digital TV service.
|
|
||||||
@item digital_radio
|
|
||||||
Digital Radio service.
|
|
||||||
@item teletext
|
|
||||||
Teletext service.
|
|
||||||
@item advanced_codec_digital_radio
|
|
||||||
Advanced Codec Digital Radio service.
|
|
||||||
@item mpeg2_digital_hdtv
|
|
||||||
MPEG2 Digital HDTV service.
|
|
||||||
@item advanced_codec_digital_sdtv
|
|
||||||
Advanced Codec Digital SDTV service.
|
|
||||||
@item advanced_codec_digital_hdtv
|
|
||||||
Advanced Codec Digital HDTV service.
|
|
||||||
@end table
|
|
||||||
|
|
||||||
Option mpegts_flags may take a set of such flags:
|
Option mpegts_flags may take a set of such flags:
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
@@ -804,7 +727,7 @@ Change the syncpoint usage in nut:
|
|||||||
sensitive and seeking is not possible. Also in general the overhead from
|
sensitive and seeking is not possible. Also in general the overhead from
|
||||||
syncpoints is negligible. Note, -@code{write_index} 0 can be used to disable
|
syncpoints is negligible. Note, -@code{write_index} 0 can be used to disable
|
||||||
all growing data tables, allowing to mux endless streams with limited memory
|
all growing data tables, allowing to mux endless streams with limited memory
|
||||||
and without these disadvantages.
|
and wihout these disadvantages.
|
||||||
@item @var{timestamped} extend the syncpoint with a wallclock field.
|
@item @var{timestamped} extend the syncpoint with a wallclock field.
|
||||||
@end table
|
@end table
|
||||||
The @var{none} and @var{timestamped} flags are experimental.
|
The @var{none} and @var{timestamped} flags are experimental.
|
||||||
@@ -829,11 +752,6 @@ is 1 second. A value of 0 will fill all segments, making pages as large as
|
|||||||
possible. A value of 1 will effectively use 1 packet-per-page in most
|
possible. A value of 1 will effectively use 1 packet-per-page in most
|
||||||
situations, giving a small seek granularity at the cost of additional container
|
situations, giving a small seek granularity at the cost of additional container
|
||||||
overhead.
|
overhead.
|
||||||
@item -serial_offset @var{value}
|
|
||||||
Serial value from which to set the streams serial number.
|
|
||||||
Setting it to different and sufficiently large values ensures that the produced
|
|
||||||
ogg files can be safely chained.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@anchor{segment}
|
@anchor{segment}
|
||||||
@@ -842,9 +760,8 @@ ogg files can be safely chained.
|
|||||||
Basic stream segmenter.
|
Basic stream segmenter.
|
||||||
|
|
||||||
This muxer outputs streams to a number of separate files of nearly
|
This muxer outputs streams to a number of separate files of nearly
|
||||||
fixed duration. Output filename pattern can be set in a fashion
|
fixed duration. Output filename pattern can be set in a fashion similar to
|
||||||
similar to @ref{image2}, or by using a @code{strftime} template if
|
@ref{image2}.
|
||||||
the @option{strftime} option is enabled.
|
|
||||||
|
|
||||||
@code{stream_segment} is a variant of the muxer used to write to
|
@code{stream_segment} is a variant of the muxer used to write to
|
||||||
streaming output formats, i.e. which do not require global headers,
|
streaming output formats, i.e. which do not require global headers,
|
||||||
@@ -1024,12 +941,6 @@ Wrap around segment index once it reaches @var{limit}.
|
|||||||
@item segment_start_number @var{number}
|
@item segment_start_number @var{number}
|
||||||
Set the sequence number of the first segment. Defaults to @code{0}.
|
Set the sequence number of the first segment. Defaults to @code{0}.
|
||||||
|
|
||||||
@item strftime @var{1|0}
|
|
||||||
Use the @code{strftime} function to define the name of the new
|
|
||||||
segments to write. If this is selected, the output segment name must
|
|
||||||
contain a @code{strftime} function template. Default value is
|
|
||||||
@code{0}.
|
|
||||||
|
|
||||||
@item reset_timestamps @var{1|0}
|
@item reset_timestamps @var{1|0}
|
||||||
Reset timestamps at the begin of each segment, so that each segment
|
Reset timestamps at the begin of each segment, so that each segment
|
||||||
will start with near-zero timestamps. It is meant to ease the playback
|
will start with near-zero timestamps. It is meant to ease the playback
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle NUT
|
@settitle NUT
|
||||||
|
|
||||||
|
@@ -191,11 +191,6 @@ __asm__() block.
|
|||||||
Use external asm (nasm/yasm) or inline asm (__asm__()), do not use intrinsics.
|
Use external asm (nasm/yasm) or inline asm (__asm__()), do not use intrinsics.
|
||||||
The latter requires a good optimizing compiler which gcc is not.
|
The latter requires a good optimizing compiler which gcc is not.
|
||||||
|
|
||||||
When debugging a x86 external asm compilation issue, if lost in the macro
|
|
||||||
expansions, add DBG=1 to your make command-line: the input file will be
|
|
||||||
preprocessed, stripped of the debug/empty lines, then compiled, showing the
|
|
||||||
actual lines causing issues.
|
|
||||||
|
|
||||||
Inline asm vs. external asm
|
Inline asm vs. external asm
|
||||||
---------------------------
|
---------------------------
|
||||||
Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc)
|
Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc)
|
||||||
|
@@ -1,5 +1,4 @@
|
|||||||
\input texinfo @c -*- texinfo -*-
|
\input texinfo @c -*- texinfo -*-
|
||||||
@documentencoding UTF-8
|
|
||||||
|
|
||||||
@settitle Platform Specific Information
|
@settitle Platform Specific Information
|
||||||
@titlepage
|
@titlepage
|
||||||
@@ -97,9 +96,9 @@ the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}.
|
|||||||
|
|
||||||
@section Native Windows compilation using MinGW or MinGW-w64
|
@section Native Windows compilation using MinGW or MinGW-w64
|
||||||
|
|
||||||
FFmpeg can be built to run natively on Windows using the MinGW-w64
|
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
|
||||||
toolchain. Install the latest versions of MSYS2 and MinGW-w64 from
|
toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
|
||||||
@url{http://msys2.github.io/} and/or @url{http://mingw-w64.sourceforge.net/}.
|
@url{http://www.mingw.org/} or @url{http://mingw-w64.sourceforge.net/}.
|
||||||
You can find detailed installation instructions in the download section and
|
You can find detailed installation instructions in the download section and
|
||||||
the FAQ.
|
the FAQ.
|
||||||
|
|
||||||
@@ -107,7 +106,7 @@ Notes:
|
|||||||
|
|
||||||
@itemize
|
@itemize
|
||||||
|
|
||||||
@item Building natively using MSYS2 can be sped up by disabling implicit rules
|
@item Building natively using MSYS can be sped up by disabling implicit rules
|
||||||
in the Makefile by calling @code{make -r} instead of plain @code{make}. This
|
in the Makefile by calling @code{make -r} instead of plain @code{make}. This
|
||||||
speed up is close to non-existent for normal one-off builds and is only
|
speed up is close to non-existent for normal one-off builds and is only
|
||||||
noticeable when running make for a second time (for example during
|
noticeable when running make for a second time (for example during
|
||||||
@@ -134,12 +133,13 @@ You will need the following prerequisites:
|
|||||||
(if using MSVC 2012 or earlier)
|
(if using MSVC 2012 or earlier)
|
||||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||||
(if using MSVC 2012 or earlier)
|
(if using MSVC 2012 or earlier)
|
||||||
@item @uref{http://msys2.github.io/, MSYS2}
|
@item @uref{http://www.mingw.org/, MSYS}
|
||||||
@item @uref{http://yasm.tortall.net/, YASM}
|
@item @uref{http://yasm.tortall.net/, YASM}
|
||||||
(Also available via MSYS2's package manager.)
|
@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if
|
||||||
|
you want to run @uref{fate.html, FATE}.
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
|
To set up a proper environment in MSYS, you need to run @code{msys.bat} from
|
||||||
the Visual Studio or Intel Compiler command prompt.
|
the Visual Studio or Intel Compiler command prompt.
|
||||||
|
|
||||||
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
|
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
|
||||||
@@ -283,7 +283,7 @@ binutils, gcc4-core, make, git, mingw-runtime, texinfo
|
|||||||
|
|
||||||
In order to run FATE you will also need the following "Utils" packages:
|
In order to run FATE you will also need the following "Utils" packages:
|
||||||
@example
|
@example
|
||||||
diffutils
|
bc, diffutils
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
If you want to build FFmpeg with additional libraries, download Cygwin
|
If you want to build FFmpeg with additional libraries, download Cygwin
|
||||||
|
@@ -26,10 +26,6 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <float.h>
|
#include <float.h>
|
||||||
|
|
||||||
// print_options is build for the host, os_support.h isn't needed and is setup
|
|
||||||
// for the target. without this build breaks on mingw
|
|
||||||
#define AVFORMAT_OS_SUPPORT_H
|
|
||||||
|
|
||||||
#include "libavformat/avformat.h"
|
#include "libavformat/avformat.h"
|
||||||
#include "libavformat/options_table.h"
|
#include "libavformat/options_table.h"
|
||||||
#include "libavcodec/avcodec.h"
|
#include "libavcodec/avcodec.h"
|
||||||
|
@@ -63,7 +63,7 @@ cache:@var{URL}
|
|||||||
|
|
||||||
Physical concatenation protocol.
|
Physical concatenation protocol.
|
||||||
|
|
||||||
Read and seek from many resources in sequence as if they were
|
Allow to read and seek from many resource in sequence as if they were
|
||||||
a unique resource.
|
a unique resource.
|
||||||
|
|
||||||
A URL accepted by this protocol has the syntax:
|
A URL accepted by this protocol has the syntax:
|
||||||
@@ -117,7 +117,7 @@ ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP/////////////
|
|||||||
|
|
||||||
File access protocol.
|
File access protocol.
|
||||||
|
|
||||||
Read from or write to a file.
|
Allow to read from or write to a file.
|
||||||
|
|
||||||
A file URL can have the form:
|
A file URL can have the form:
|
||||||
@example
|
@example
|
||||||
@@ -155,7 +155,7 @@ time, which is valuable for files on slow medium.
|
|||||||
|
|
||||||
FTP (File Transfer Protocol).
|
FTP (File Transfer Protocol).
|
||||||
|
|
||||||
Read from or write to remote resources using FTP protocol.
|
Allow to read from or write to remote resources using FTP protocol.
|
||||||
|
|
||||||
Following syntax is required.
|
Following syntax is required.
|
||||||
@example
|
@example
|
||||||
@@ -374,7 +374,7 @@ be seekable, so they will fail with the MD5 output protocol.
|
|||||||
|
|
||||||
UNIX pipe access protocol.
|
UNIX pipe access protocol.
|
||||||
|
|
||||||
Read and write from UNIX pipes.
|
Allow to read and write from UNIX pipes.
|
||||||
|
|
||||||
The accepted syntax is:
|
The accepted syntax is:
|
||||||
@example
|
@example
|
||||||
@@ -614,7 +614,7 @@ For more information see: @url{http://www.samba.org/}.
|
|||||||
|
|
||||||
Secure File Transfer Protocol via libssh
|
Secure File Transfer Protocol via libssh
|
||||||
|
|
||||||
Read from or write to remote resources using SFTP protocol.
|
Allow to read from or write to remote resources using SFTP protocol.
|
||||||
|
|
||||||
Following syntax is required.
|
Following syntax is required.
|
||||||
|
|
||||||
@@ -750,7 +750,7 @@ port will be used for the local RTP and RTCP ports.
|
|||||||
|
|
||||||
@item
|
@item
|
||||||
If @option{localrtcpport} (the local RTCP port) is not set it will be
|
If @option{localrtcpport} (the local RTCP port) is not set it will be
|
||||||
set to the local RTP port value plus 1.
|
set to the the local RTP port value plus 1.
|
||||||
@end enumerate
|
@end enumerate
|
||||||
|
|
||||||
@section rtsp
|
@section rtsp
|
||||||
@@ -1081,8 +1081,8 @@ Set raise error timeout, expressed in microseconds.
|
|||||||
This option is only relevant in read mode: if no data arrived in more
|
This option is only relevant in read mode: if no data arrived in more
|
||||||
than this time interval, raise error.
|
than this time interval, raise error.
|
||||||
|
|
||||||
@item listen_timeout=@var{milliseconds}
|
@item listen_timeout=@var{microseconds}
|
||||||
Set listen timeout, expressed in milliseconds.
|
Set listen timeout, expressed in microseconds.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
The following example shows how to setup a listening TCP connection
|
The following example shows how to setup a listening TCP connection
|
||||||
|
126
doc/t2h.pm
126
doc/t2h.pm
@@ -14,117 +14,17 @@
|
|||||||
# FFmpeg is distributed in the hope that it will be useful,
|
# FFmpeg is distributed in the hope that it will be useful,
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
# General Public License for more details.
|
# Lesser General Public License for more details.
|
||||||
#
|
#
|
||||||
# You should have received a copy of the GNU General Public
|
# You should have received a copy of the GNU Lesser General Public
|
||||||
# License along with FFmpeg; if not, write to the Free Software
|
# License along with FFmpeg; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
# no navigation elements
|
# no navigation elements
|
||||||
set_from_init_file('HEADERS', 0);
|
set_from_init_file('HEADERS', 0);
|
||||||
|
|
||||||
sub ffmpeg_heading_command($$$$$)
|
# TOC and Chapter headings link
|
||||||
{
|
set_from_init_file('TOC_LINKS', 1);
|
||||||
my $self = shift;
|
|
||||||
my $cmdname = shift;
|
|
||||||
my $command = shift;
|
|
||||||
my $args = shift;
|
|
||||||
my $content = shift;
|
|
||||||
|
|
||||||
my $result = '';
|
|
||||||
|
|
||||||
# not clear that it may really happen
|
|
||||||
if ($self->in_string) {
|
|
||||||
$result .= $self->command_string($command) ."\n" if ($cmdname ne 'node');
|
|
||||||
$result .= $content if (defined($content));
|
|
||||||
return $result;
|
|
||||||
}
|
|
||||||
|
|
||||||
my $element_id = $self->command_id($command);
|
|
||||||
$result .= "<a name=\"$element_id\"></a>\n"
|
|
||||||
if (defined($element_id) and $element_id ne '');
|
|
||||||
|
|
||||||
print STDERR "Process $command "
|
|
||||||
.Texinfo::Structuring::_print_root_command_texi($command)."\n"
|
|
||||||
if ($self->get_conf('DEBUG'));
|
|
||||||
my $element;
|
|
||||||
if ($Texinfo::Common::root_commands{$command->{'cmdname'}}
|
|
||||||
and $command->{'parent'}
|
|
||||||
and $command->{'parent'}->{'type'}
|
|
||||||
and $command->{'parent'}->{'type'} eq 'element') {
|
|
||||||
$element = $command->{'parent'};
|
|
||||||
}
|
|
||||||
if ($element) {
|
|
||||||
$result .= &{$self->{'format_element_header'}}($self, $cmdname,
|
|
||||||
$command, $element);
|
|
||||||
}
|
|
||||||
|
|
||||||
my $heading_level;
|
|
||||||
# node is used as heading if there is nothing else.
|
|
||||||
if ($cmdname eq 'node') {
|
|
||||||
if (!$element or (!$element->{'extra'}->{'section'}
|
|
||||||
and $element->{'extra'}->{'node'}
|
|
||||||
and $element->{'extra'}->{'node'} eq $command
|
|
||||||
# bogus node may not have been normalized
|
|
||||||
and defined($command->{'extra'}->{'normalized'}))) {
|
|
||||||
if ($command->{'extra'}->{'normalized'} eq 'Top') {
|
|
||||||
$heading_level = 0;
|
|
||||||
} else {
|
|
||||||
$heading_level = 3;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
$heading_level = $command->{'level'};
|
|
||||||
}
|
|
||||||
|
|
||||||
my $heading = $self->command_text($command);
|
|
||||||
# $heading not defined may happen if the command is a @node, for example
|
|
||||||
# if there is an error in the node.
|
|
||||||
if (defined($heading) and $heading ne '' and defined($heading_level)) {
|
|
||||||
|
|
||||||
if ($Texinfo::Common::root_commands{$cmdname}
|
|
||||||
and $Texinfo::Common::sectioning_commands{$cmdname}) {
|
|
||||||
my $content_href = $self->command_contents_href($command, 'contents',
|
|
||||||
$self->{'current_filename'});
|
|
||||||
if ($content_href) {
|
|
||||||
my $this_href = $content_href =~ s/^\#toc-/\#/r;
|
|
||||||
$heading .= '<span class="pull-right">'.
|
|
||||||
'<a class="anchor hidden-xs" '.
|
|
||||||
"href=\"$this_href\" aria-hidden=\"true\">".
|
|
||||||
($ENV{"FA_ICONS"} ? '<i class="fa fa-link"></i>'
|
|
||||||
: '#').
|
|
||||||
'</a> '.
|
|
||||||
'<a class="anchor hidden-xs"'.
|
|
||||||
"href=\"$content_href\" aria-hidden=\"true\">".
|
|
||||||
($ENV{"FA_ICONS"} ? '<i class="fa fa-navicon"></i>'
|
|
||||||
: 'TOC').
|
|
||||||
'</a>'.
|
|
||||||
'</span>';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($self->in_preformatted()) {
|
|
||||||
$result .= $heading."\n";
|
|
||||||
} else {
|
|
||||||
# if the level was changed, set the command name right
|
|
||||||
if ($cmdname ne 'node'
|
|
||||||
and $heading_level ne $Texinfo::Common::command_structuring_level{$cmdname}) {
|
|
||||||
$cmdname
|
|
||||||
= $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level];
|
|
||||||
}
|
|
||||||
$result .= &{$self->{'format_heading_text'}}(
|
|
||||||
$self, $cmdname, $heading,
|
|
||||||
$heading_level +
|
|
||||||
$self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
$result .= $content if (defined($content));
|
|
||||||
return $result;
|
|
||||||
}
|
|
||||||
|
|
||||||
foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
|
|
||||||
texinfo_register_command_formatting($command, \&ffmpeg_heading_command);
|
|
||||||
}
|
|
||||||
|
|
||||||
# print the TOC where @contents is used
|
# print the TOC where @contents is used
|
||||||
set_from_init_file('INLINE_CONTENTS', 1);
|
set_from_init_file('INLINE_CONTENTS', 1);
|
||||||
@@ -169,7 +69,6 @@ EOT
|
|||||||
|
|
||||||
my $head2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
|
my $head2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
|
||||||
</title>
|
</title>
|
||||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
|
||||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||||
</head>
|
</head>
|
||||||
@@ -186,23 +85,6 @@ EOT
|
|||||||
}
|
}
|
||||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||||
|
|
||||||
sub ffmpeg_program_string($)
|
|
||||||
{
|
|
||||||
my $self = shift;
|
|
||||||
if (defined($self->get_conf('PROGRAM'))
|
|
||||||
and $self->get_conf('PROGRAM') ne ''
|
|
||||||
and defined($self->get_conf('PACKAGE_URL'))) {
|
|
||||||
return $self->convert_tree(
|
|
||||||
$self->gdt('This document was generated using @uref{{program_homepage}, @emph{{program}}}.',
|
|
||||||
{ 'program_homepage' => $self->get_conf('PACKAGE_URL'),
|
|
||||||
'program' => $self->get_conf('PROGRAM') }));
|
|
||||||
} else {
|
|
||||||
return $self->convert_tree(
|
|
||||||
$self->gdt('This document was generated automatically.'));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
|
||||||
|
|
||||||
# Customized file ending
|
# Customized file ending
|
||||||
sub ffmpeg_end_file($)
|
sub ffmpeg_end_file($)
|
||||||
{
|
{
|
||||||
|
@@ -844,7 +844,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
|||||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||||
|
|
||||||
@item ld(var)
|
@item ld(var)
|
||||||
Load the value of the internal variable with number
|
Allow to load the value of the internal variable with number
|
||||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||||
The function returns the loaded value.
|
The function returns the loaded value.
|
||||||
|
|
||||||
@@ -912,7 +912,7 @@ Compute the square root of @var{expr}. This is equivalent to
|
|||||||
Compute expression @code{1/(1 + exp(4*x))}.
|
Compute expression @code{1/(1 + exp(4*x))}.
|
||||||
|
|
||||||
@item st(var, expr)
|
@item st(var, expr)
|
||||||
Store the value of the expression @var{expr} in an internal
|
Allow to store the value of the expression @var{expr} in an internal
|
||||||
variable. @var{var} specifies the number of the variable where to
|
variable. @var{var} specifies the number of the variable where to
|
||||||
store the value, and it is a value ranging from 0 to 9. The function
|
store the value, and it is a value ranging from 0 to 9. The function
|
||||||
returns the value stored in the internal variable.
|
returns the value stored in the internal variable.
|
||||||
|
@@ -16,15 +16,16 @@ outputs the modified frame. The most simple way of doing this is to take a
|
|||||||
similar filter. We'll pick edgedetect, but any other should do. You can look
|
similar filter. We'll pick edgedetect, but any other should do. You can look
|
||||||
for others using the `./ffmpeg -v 0 -filters|grep ' V->V '` command.
|
for others using the `./ffmpeg -v 0 -filters|grep ' V->V '` command.
|
||||||
|
|
||||||
- sed 's/edgedetect/foobar/g;s/EdgeDetect/Foobar/g' libavfilter/vf_edgedetect.c > libavfilter/vf_foobar.c
|
- cp libavfilter/vf_{edgedetect,foobar}.c
|
||||||
|
- sed -i s/edgedetect/foobar/g -i libavfilter/vf_foobar.c
|
||||||
|
- sed -i s/EdgeDetect/Foobar/g -i libavfilter/vf_foobar.c
|
||||||
- edit libavfilter/Makefile, and add an entry for "foobar" following the
|
- edit libavfilter/Makefile, and add an entry for "foobar" following the
|
||||||
pattern of the other filters.
|
pattern of the other filters.
|
||||||
- edit libavfilter/allfilters.c, and add an entry for "foobar" following the
|
- edit libavfilter/allfilters.c, and add an entry for "foobar" following the
|
||||||
pattern of the other filters.
|
pattern of the other filters.
|
||||||
- ./configure ...
|
- ./configure ...
|
||||||
- make -j<whatever> ffmpeg
|
- make -j<whatever> ffmpeg
|
||||||
- ./ffmpeg -i http://samples.ffmpeg.org/image-samples/lena.pnm -vf foobar foobar.png
|
- ./ffmpeg -i tests/lena.pnm -vf foobar foobar.png
|
||||||
Note here: you can obviously use a random local image instead of a remote URL.
|
|
||||||
|
|
||||||
If everything went right, you should get a foobar.png with Lena edge-detected.
|
If everything went right, you should get a foobar.png with Lena edge-detected.
|
||||||
|
|
||||||
|
346
ffmpeg.c
346
ffmpeg.c
@@ -62,6 +62,8 @@
|
|||||||
#include "libavutil/threadmessage.h"
|
#include "libavutil/threadmessage.h"
|
||||||
#include "libavformat/os_support.h"
|
#include "libavformat/os_support.h"
|
||||||
|
|
||||||
|
#include "libavformat/ffm.h" // not public API
|
||||||
|
|
||||||
# include "libavfilter/avcodec.h"
|
# include "libavfilter/avcodec.h"
|
||||||
# include "libavfilter/avfilter.h"
|
# include "libavfilter/avfilter.h"
|
||||||
# include "libavfilter/buffersrc.h"
|
# include "libavfilter/buffersrc.h"
|
||||||
@@ -153,9 +155,8 @@ static struct termios oldtty;
|
|||||||
static int restore_tty;
|
static int restore_tty;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if HAVE_PTHREADS
|
|
||||||
static void free_input_threads(void);
|
static void free_input_threads(void);
|
||||||
#endif
|
|
||||||
|
|
||||||
/* sub2video hack:
|
/* sub2video hack:
|
||||||
Convert subtitles to video with alpha to insert them in filter graphs.
|
Convert subtitles to video with alpha to insert them in filter graphs.
|
||||||
@@ -457,8 +458,8 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
for (i = 0; i < nb_output_files; i++) {
|
for (i = 0; i < nb_output_files; i++) {
|
||||||
OutputFile *of = output_files[i];
|
OutputFile *of = output_files[i];
|
||||||
AVFormatContext *s = of->ctx;
|
AVFormatContext *s = of->ctx;
|
||||||
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
|
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
|
||||||
avio_closep(&s->pb);
|
avio_close(s->pb);
|
||||||
avformat_free_context(s);
|
avformat_free_context(s);
|
||||||
av_dict_free(&of->opts);
|
av_dict_free(&of->opts);
|
||||||
|
|
||||||
@@ -474,7 +475,6 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
}
|
}
|
||||||
ost->bitstream_filters = NULL;
|
ost->bitstream_filters = NULL;
|
||||||
av_frame_free(&ost->filtered_frame);
|
av_frame_free(&ost->filtered_frame);
|
||||||
av_frame_free(&ost->last_frame);
|
|
||||||
|
|
||||||
av_parser_close(ost->parser);
|
av_parser_close(ost->parser);
|
||||||
|
|
||||||
@@ -515,7 +515,7 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
|
|
||||||
if (vstats_file)
|
if (vstats_file)
|
||||||
fclose(vstats_file);
|
fclose(vstats_file);
|
||||||
av_freep(&vstats_filename);
|
av_free(vstats_filename);
|
||||||
|
|
||||||
av_freep(&input_streams);
|
av_freep(&input_streams);
|
||||||
av_freep(&input_files);
|
av_freep(&input_files);
|
||||||
@@ -622,11 +622,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
|||||||
|
|
||||||
while (bsfc) {
|
while (bsfc) {
|
||||||
AVPacket new_pkt = *pkt;
|
AVPacket new_pkt = *pkt;
|
||||||
AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
|
int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
|
||||||
bsfc->filter->name,
|
|
||||||
NULL, 0);
|
|
||||||
int a = av_bitstream_filter_filter(bsfc, avctx,
|
|
||||||
bsf_arg ? bsf_arg->value : NULL,
|
|
||||||
&new_pkt.data, &new_pkt.size,
|
&new_pkt.data, &new_pkt.size,
|
||||||
pkt->data, pkt->size,
|
pkt->data, pkt->size,
|
||||||
pkt->flags & AV_PKT_FLAG_KEY);
|
pkt->flags & AV_PKT_FLAG_KEY);
|
||||||
@@ -663,17 +659,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
||||||
if (pkt->dts != AV_NOPTS_VALUE &&
|
|
||||||
pkt->pts != AV_NOPTS_VALUE &&
|
|
||||||
pkt->dts > pkt->pts) {
|
|
||||||
av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
|
|
||||||
pkt->dts, pkt->pts,
|
|
||||||
ost->file_index, ost->st->index);
|
|
||||||
pkt->pts =
|
|
||||||
pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
|
|
||||||
- FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
|
|
||||||
- FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
|
|
||||||
}
|
|
||||||
if(
|
if(
|
||||||
(avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
|
(avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
|
||||||
pkt->dts != AV_NOPTS_VALUE &&
|
pkt->dts != AV_NOPTS_VALUE &&
|
||||||
@@ -696,6 +681,15 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
|||||||
pkt->dts = max;
|
pkt->dts = max;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (pkt->dts != AV_NOPTS_VALUE &&
|
||||||
|
pkt->pts != AV_NOPTS_VALUE &&
|
||||||
|
pkt->dts > pkt->pts) {
|
||||||
|
av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d\n",
|
||||||
|
pkt->dts, pkt->pts,
|
||||||
|
ost->file_index, ost->st->index);
|
||||||
|
pkt->pts = AV_NOPTS_VALUE;
|
||||||
|
pkt->dts = AV_NOPTS_VALUE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ost->last_mux_dts = pkt->dts;
|
ost->last_mux_dts = pkt->dts;
|
||||||
|
|
||||||
@@ -818,10 +812,6 @@ static void do_subtitle_out(AVFormatContext *s,
|
|||||||
|
|
||||||
if (!subtitle_out) {
|
if (!subtitle_out) {
|
||||||
subtitle_out = av_malloc(subtitle_out_max_size);
|
subtitle_out = av_malloc(subtitle_out_max_size);
|
||||||
if (!subtitle_out) {
|
|
||||||
av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
|
|
||||||
exit_program(1);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Note: DVB subtitle need one packet to draw them and one other
|
/* Note: DVB subtitle need one packet to draw them and one other
|
||||||
@@ -882,43 +872,28 @@ static void do_subtitle_out(AVFormatContext *s,
|
|||||||
|
|
||||||
static void do_video_out(AVFormatContext *s,
|
static void do_video_out(AVFormatContext *s,
|
||||||
OutputStream *ost,
|
OutputStream *ost,
|
||||||
AVFrame *next_picture,
|
AVFrame *in_picture)
|
||||||
double sync_ipts)
|
|
||||||
{
|
{
|
||||||
int ret, format_video_sync;
|
int ret, format_video_sync;
|
||||||
AVPacket pkt;
|
AVPacket pkt;
|
||||||
AVCodecContext *enc = ost->enc_ctx;
|
AVCodecContext *enc = ost->enc_ctx;
|
||||||
AVCodecContext *mux_enc = ost->st->codec;
|
AVCodecContext *mux_enc = ost->st->codec;
|
||||||
int nb_frames, nb0_frames, i;
|
int nb_frames, i;
|
||||||
double delta, delta0;
|
double sync_ipts, delta;
|
||||||
double duration = 0;
|
double duration = 0;
|
||||||
int frame_size = 0;
|
int frame_size = 0;
|
||||||
InputStream *ist = NULL;
|
InputStream *ist = NULL;
|
||||||
AVFilterContext *filter = ost->filter->filter;
|
|
||||||
|
|
||||||
if (ost->source_index >= 0)
|
if (ost->source_index >= 0)
|
||||||
ist = input_streams[ost->source_index];
|
ist = input_streams[ost->source_index];
|
||||||
|
|
||||||
if (filter->inputs[0]->frame_rate.num > 0 &&
|
|
||||||
filter->inputs[0]->frame_rate.den > 0)
|
|
||||||
duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
|
|
||||||
|
|
||||||
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
|
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
|
||||||
duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
|
duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
|
||||||
|
|
||||||
if (!ost->filters_script &&
|
sync_ipts = in_picture->pts;
|
||||||
!ost->filters &&
|
delta = sync_ipts - ost->sync_opts + duration;
|
||||||
next_picture &&
|
|
||||||
ist &&
|
|
||||||
lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
|
|
||||||
duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
|
|
||||||
}
|
|
||||||
|
|
||||||
delta0 = sync_ipts - ost->sync_opts;
|
|
||||||
delta = delta0 + duration;
|
|
||||||
|
|
||||||
/* by default, we output a single frame */
|
/* by default, we output a single frame */
|
||||||
nb0_frames = 0;
|
|
||||||
nb_frames = 1;
|
nb_frames = 1;
|
||||||
|
|
||||||
format_video_sync = video_sync_method;
|
format_video_sync = video_sync_method;
|
||||||
@@ -938,39 +913,19 @@ static void do_video_out(AVFormatContext *s,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (delta0 < 0 &&
|
|
||||||
delta > 0 &&
|
|
||||||
format_video_sync != VSYNC_PASSTHROUGH &&
|
|
||||||
format_video_sync != VSYNC_DROP) {
|
|
||||||
double cor = FFMIN(-delta0, duration);
|
|
||||||
if (delta0 < -0.6) {
|
|
||||||
av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
|
|
||||||
} else
|
|
||||||
av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
|
|
||||||
sync_ipts += cor;
|
|
||||||
duration -= cor;
|
|
||||||
delta0 += cor;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (format_video_sync) {
|
switch (format_video_sync) {
|
||||||
case VSYNC_VSCFR:
|
case VSYNC_VSCFR:
|
||||||
if (ost->frame_number == 0 && delta - duration >= 0.5) {
|
if (ost->frame_number == 0 && delta - duration >= 0.5) {
|
||||||
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
|
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
|
||||||
delta = duration;
|
delta = duration;
|
||||||
delta0 = 0;
|
|
||||||
ost->sync_opts = lrint(sync_ipts);
|
ost->sync_opts = lrint(sync_ipts);
|
||||||
}
|
}
|
||||||
case VSYNC_CFR:
|
case VSYNC_CFR:
|
||||||
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
||||||
if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
|
if (delta < -1.1)
|
||||||
nb_frames = 0;
|
nb_frames = 0;
|
||||||
} else if (delta < -1.1)
|
else if (delta > 1.1)
|
||||||
nb_frames = 0;
|
|
||||||
else if (delta > 1.1) {
|
|
||||||
nb_frames = lrintf(delta);
|
nb_frames = lrintf(delta);
|
||||||
if (delta0 > 1.1)
|
|
||||||
nb0_frames = lrintf(delta0 - 0.6);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
case VSYNC_VFR:
|
case VSYNC_VFR:
|
||||||
if (delta <= -0.6)
|
if (delta <= -0.6)
|
||||||
@@ -987,36 +942,28 @@ static void do_video_out(AVFormatContext *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
|
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
|
||||||
nb0_frames = FFMIN(nb0_frames, nb_frames);
|
if (nb_frames == 0) {
|
||||||
if (nb0_frames == 0 && ost->last_droped) {
|
|
||||||
nb_frames_drop++;
|
nb_frames_drop++;
|
||||||
av_log(NULL, AV_LOG_VERBOSE,
|
av_log(NULL, AV_LOG_VERBOSE,
|
||||||
"*** dropping frame %d from stream %d at ts %"PRId64"\n",
|
"*** dropping frame %d from stream %d at ts %"PRId64"\n",
|
||||||
ost->frame_number, ost->st->index, ost->last_frame->pts);
|
ost->frame_number, ost->st->index, in_picture->pts);
|
||||||
}
|
return;
|
||||||
if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
|
} else if (nb_frames > 1) {
|
||||||
if (nb_frames > dts_error_threshold * 30) {
|
if (nb_frames > dts_error_threshold * 30) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
|
av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
|
||||||
nb_frames_drop++;
|
nb_frames_drop++;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
|
nb_frames_dup += nb_frames - 1;
|
||||||
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
|
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
|
||||||
}
|
}
|
||||||
ost->last_droped = nb_frames == nb0_frames;
|
|
||||||
|
|
||||||
/* duplicates frame if needed */
|
/* duplicates frame if needed */
|
||||||
for (i = 0; i < nb_frames; i++) {
|
for (i = 0; i < nb_frames; i++) {
|
||||||
AVFrame *in_picture;
|
|
||||||
av_init_packet(&pkt);
|
av_init_packet(&pkt);
|
||||||
pkt.data = NULL;
|
pkt.data = NULL;
|
||||||
pkt.size = 0;
|
pkt.size = 0;
|
||||||
|
|
||||||
if (i < nb0_frames && ost->last_frame) {
|
|
||||||
in_picture = ost->last_frame;
|
|
||||||
} else
|
|
||||||
in_picture = next_picture;
|
|
||||||
|
|
||||||
in_picture->pts = ost->sync_opts;
|
in_picture->pts = ost->sync_opts;
|
||||||
|
|
||||||
#if 1
|
#if 1
|
||||||
@@ -1031,8 +978,10 @@ static void do_video_out(AVFormatContext *s,
|
|||||||
/* raw pictures are written as AVPicture structure to
|
/* raw pictures are written as AVPicture structure to
|
||||||
avoid any copies. We support temporarily the older
|
avoid any copies. We support temporarily the older
|
||||||
method. */
|
method. */
|
||||||
if (in_picture->interlaced_frame)
|
mux_enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
|
||||||
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
|
mux_enc->coded_frame->top_field_first = in_picture->top_field_first;
|
||||||
|
if (mux_enc->coded_frame->interlaced_frame)
|
||||||
|
mux_enc->field_order = mux_enc->coded_frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
|
||||||
else
|
else
|
||||||
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
||||||
pkt.data = (uint8_t *)in_picture;
|
pkt.data = (uint8_t *)in_picture;
|
||||||
@@ -1058,7 +1007,8 @@ static void do_video_out(AVFormatContext *s,
|
|||||||
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
||||||
|
|
||||||
in_picture->quality = enc->global_quality;
|
in_picture->quality = enc->global_quality;
|
||||||
in_picture->pict_type = 0;
|
if (!enc->me_threshold)
|
||||||
|
in_picture->pict_type = 0;
|
||||||
|
|
||||||
pts_time = in_picture->pts != AV_NOPTS_VALUE ?
|
pts_time = in_picture->pts != AV_NOPTS_VALUE ?
|
||||||
in_picture->pts * av_q2d(enc->time_base) : NAN;
|
in_picture->pts * av_q2d(enc->time_base) : NAN;
|
||||||
@@ -1152,11 +1102,6 @@ static void do_video_out(AVFormatContext *s,
|
|||||||
if (vstats_filename && frame_size)
|
if (vstats_filename && frame_size)
|
||||||
do_video_stats(ost, frame_size);
|
do_video_stats(ost, frame_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ost->last_frame)
|
|
||||||
ost->last_frame = av_frame_alloc();
|
|
||||||
av_frame_unref(ost->last_frame);
|
|
||||||
av_frame_ref(ost->last_frame, next_picture);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static double psnr(double d)
|
static double psnr(double d)
|
||||||
@@ -1182,8 +1127,8 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
|||||||
enc = ost->enc_ctx;
|
enc = ost->enc_ctx;
|
||||||
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
|
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||||
frame_number = ost->st->nb_frames;
|
frame_number = ost->st->nb_frames;
|
||||||
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
|
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
|
||||||
if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
|
if (enc->flags&CODEC_FLAG_PSNR)
|
||||||
fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
|
fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
|
||||||
|
|
||||||
fprintf(vstats_file,"f_size= %6d ", frame_size);
|
fprintf(vstats_file,"f_size= %6d ", frame_size);
|
||||||
@@ -1196,7 +1141,7 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
|||||||
avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
|
avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
|
||||||
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
||||||
(double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
|
(double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
|
||||||
fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
|
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1223,6 +1168,7 @@ static int reap_filters(void)
|
|||||||
{
|
{
|
||||||
AVFrame *filtered_frame = NULL;
|
AVFrame *filtered_frame = NULL;
|
||||||
int i;
|
int i;
|
||||||
|
int64_t frame_pts;
|
||||||
|
|
||||||
/* Reap all buffers present in the buffer sinks */
|
/* Reap all buffers present in the buffer sinks */
|
||||||
for (i = 0; i < nb_output_streams; i++) {
|
for (i = 0; i < nb_output_streams; i++) {
|
||||||
@@ -1242,7 +1188,6 @@ static int reap_filters(void)
|
|||||||
filtered_frame = ost->filtered_frame;
|
filtered_frame = ost->filtered_frame;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
|
|
||||||
ret = av_buffersink_get_frame_flags(filter, filtered_frame,
|
ret = av_buffersink_get_frame_flags(filter, filtered_frame,
|
||||||
AV_BUFFERSINK_FLAG_NO_REQUEST);
|
AV_BUFFERSINK_FLAG_NO_REQUEST);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
@@ -1256,20 +1201,10 @@ static int reap_filters(void)
|
|||||||
av_frame_unref(filtered_frame);
|
av_frame_unref(filtered_frame);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
frame_pts = AV_NOPTS_VALUE;
|
||||||
if (filtered_frame->pts != AV_NOPTS_VALUE) {
|
if (filtered_frame->pts != AV_NOPTS_VALUE) {
|
||||||
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
|
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
|
||||||
AVRational tb = enc->time_base;
|
filtered_frame->pts = frame_pts =
|
||||||
int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
|
|
||||||
|
|
||||||
tb.den <<= extra_bits;
|
|
||||||
float_pts =
|
|
||||||
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
|
|
||||||
av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
|
|
||||||
float_pts /= 1 << extra_bits;
|
|
||||||
// avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
|
|
||||||
float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
|
|
||||||
|
|
||||||
filtered_frame->pts =
|
|
||||||
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
|
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
|
||||||
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
|
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
|
||||||
}
|
}
|
||||||
@@ -1278,19 +1213,20 @@ static int reap_filters(void)
|
|||||||
|
|
||||||
switch (filter->inputs[0]->type) {
|
switch (filter->inputs[0]->type) {
|
||||||
case AVMEDIA_TYPE_VIDEO:
|
case AVMEDIA_TYPE_VIDEO:
|
||||||
|
filtered_frame->pts = frame_pts;
|
||||||
if (!ost->frame_aspect_ratio.num)
|
if (!ost->frame_aspect_ratio.num)
|
||||||
enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
|
enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
|
||||||
|
|
||||||
if (debug_ts) {
|
if (debug_ts) {
|
||||||
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
|
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s time_base:%d/%d\n",
|
||||||
av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
|
av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
|
||||||
float_pts,
|
|
||||||
enc->time_base.num, enc->time_base.den);
|
enc->time_base.num, enc->time_base.den);
|
||||||
}
|
}
|
||||||
|
|
||||||
do_video_out(of->ctx, ost, filtered_frame, float_pts);
|
do_video_out(of->ctx, ost, filtered_frame);
|
||||||
break;
|
break;
|
||||||
case AVMEDIA_TYPE_AUDIO:
|
case AVMEDIA_TYPE_AUDIO:
|
||||||
|
filtered_frame->pts = frame_pts;
|
||||||
if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
|
if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
|
||||||
enc->channels != av_frame_get_channels(filtered_frame)) {
|
enc->channels != av_frame_get_channels(filtered_frame)) {
|
||||||
av_log(NULL, AV_LOG_ERROR,
|
av_log(NULL, AV_LOG_ERROR,
|
||||||
@@ -1334,6 +1270,7 @@ static void print_final_stats(int64_t total_size)
|
|||||||
if (data_size && total_size>0 && total_size >= data_size)
|
if (data_size && total_size>0 && total_size >= data_size)
|
||||||
percent = 100.0 * (total_size - data_size) / data_size;
|
percent = 100.0 * (total_size - data_size) / data_size;
|
||||||
|
|
||||||
|
av_log(NULL, AV_LOG_INFO, "\n");
|
||||||
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
|
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
|
||||||
video_size / 1024.0,
|
video_size / 1024.0,
|
||||||
audio_size / 1024.0,
|
audio_size / 1024.0,
|
||||||
@@ -1524,12 +1461,10 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||||||
if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
|
if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
|
||||||
pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
|
pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
|
||||||
ost->st->time_base, AV_TIME_BASE_Q));
|
ost->st->time_base, AV_TIME_BASE_Q));
|
||||||
if (is_last_report)
|
|
||||||
nb_frames_drop += ost->last_droped;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
secs = FFABS(pts) / AV_TIME_BASE;
|
secs = pts / AV_TIME_BASE;
|
||||||
us = FFABS(pts) % AV_TIME_BASE;
|
us = pts % AV_TIME_BASE;
|
||||||
mins = secs / 60;
|
mins = secs / 60;
|
||||||
secs %= 60;
|
secs %= 60;
|
||||||
hours = mins / 60;
|
hours = mins / 60;
|
||||||
@@ -1541,20 +1476,13 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||||||
"size=N/A time=");
|
"size=N/A time=");
|
||||||
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||||
"size=%8.0fkB time=", total_size / 1024.0);
|
"size=%8.0fkB time=", total_size / 1024.0);
|
||||||
if (pts < 0)
|
|
||||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
|
|
||||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||||
"%02d:%02d:%02d.%02d ", hours, mins, secs,
|
"%02d:%02d:%02d.%02d ", hours, mins, secs,
|
||||||
(100 * us) / AV_TIME_BASE);
|
(100 * us) / AV_TIME_BASE);
|
||||||
|
if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||||
if (bitrate < 0) {
|
"bitrate=N/A");
|
||||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
|
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||||
av_bprintf(&buf_script, "bitrate=N/A\n");
|
"bitrate=%6.1fkbits/s", bitrate);
|
||||||
}else{
|
|
||||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
|
|
||||||
av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
|
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
|
||||||
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
|
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
|
||||||
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
|
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
|
||||||
@@ -1568,11 +1496,10 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||||||
av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
|
av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
|
||||||
|
|
||||||
if (print_stats || is_last_report) {
|
if (print_stats || is_last_report) {
|
||||||
const char end = is_last_report ? '\n' : '\r';
|
|
||||||
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
|
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
|
||||||
fprintf(stderr, "%s %c", buf, end);
|
fprintf(stderr, "%s \r", buf);
|
||||||
} else
|
} else
|
||||||
av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
|
av_log(NULL, AV_LOG_INFO, "%s \r", buf);
|
||||||
|
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
}
|
}
|
||||||
@@ -1585,7 +1512,8 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||||||
avio_flush(progress_avio);
|
avio_flush(progress_avio);
|
||||||
av_bprint_finalize(&buf_script, NULL);
|
av_bprint_finalize(&buf_script, NULL);
|
||||||
if (is_last_report) {
|
if (is_last_report) {
|
||||||
avio_closep(&progress_avio);
|
avio_close(progress_avio);
|
||||||
|
progress_avio = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1978,20 +1906,6 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
|
|||||||
if (*got_output || ret<0 || pkt->size)
|
if (*got_output || ret<0 || pkt->size)
|
||||||
decode_error_stat[ret<0] ++;
|
decode_error_stat[ret<0] ++;
|
||||||
|
|
||||||
if (*got_output && ret >= 0) {
|
|
||||||
if (ist->dec_ctx->width != decoded_frame->width ||
|
|
||||||
ist->dec_ctx->height != decoded_frame->height ||
|
|
||||||
ist->dec_ctx->pix_fmt != decoded_frame->format) {
|
|
||||||
av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
|
|
||||||
decoded_frame->width,
|
|
||||||
decoded_frame->height,
|
|
||||||
decoded_frame->format,
|
|
||||||
ist->dec_ctx->width,
|
|
||||||
ist->dec_ctx->height,
|
|
||||||
ist->dec_ctx->pix_fmt);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!*got_output || ret < 0) {
|
if (!*got_output || ret < 0) {
|
||||||
if (!pkt->size) {
|
if (!pkt->size) {
|
||||||
for (i = 0; i < ist->nb_filters; i++)
|
for (i = 0; i < ist->nb_filters; i++)
|
||||||
@@ -2208,11 +2122,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
|||||||
ret = decode_video (ist, &avpkt, &got_output);
|
ret = decode_video (ist, &avpkt, &got_output);
|
||||||
if (avpkt.duration) {
|
if (avpkt.duration) {
|
||||||
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
|
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
} else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
|
} else if(ist->dec_ctx->time_base.num != 0 && ist->dec_ctx->time_base.den != 0) {
|
||||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
|
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
|
||||||
duration = ((int64_t)AV_TIME_BASE *
|
duration = ((int64_t)AV_TIME_BASE *
|
||||||
ist->dec_ctx->framerate.den * ticks) /
|
ist->dec_ctx->time_base.num * ticks) /
|
||||||
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
|
ist->dec_ctx->time_base.den;
|
||||||
} else
|
} else
|
||||||
duration = 0;
|
duration = 0;
|
||||||
|
|
||||||
@@ -2267,11 +2181,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
|||||||
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
|
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
|
||||||
} else if (pkt->duration) {
|
} else if (pkt->duration) {
|
||||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
} else if(ist->dec_ctx->framerate.num != 0) {
|
} else if(ist->dec_ctx->time_base.num != 0) {
|
||||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
|
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
|
||||||
ist->next_dts += ((int64_t)AV_TIME_BASE *
|
ist->next_dts += ((int64_t)AV_TIME_BASE *
|
||||||
ist->dec_ctx->framerate.den * ticks) /
|
ist->dec_ctx->time_base.num * ticks) /
|
||||||
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
|
ist->dec_ctx->time_base.den;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -2294,34 +2208,16 @@ static void print_sdp(void)
|
|||||||
{
|
{
|
||||||
char sdp[16384];
|
char sdp[16384];
|
||||||
int i;
|
int i;
|
||||||
int j;
|
|
||||||
AVIOContext *sdp_pb;
|
|
||||||
AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
|
AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
|
||||||
|
|
||||||
if (!avc)
|
if (!avc)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
for (i = 0, j = 0; i < nb_output_files; i++) {
|
for (i = 0; i < nb_output_files; i++)
|
||||||
if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
|
avc[i] = output_files[i]->ctx;
|
||||||
avc[j] = output_files[i]->ctx;
|
|
||||||
j++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
av_sdp_create(avc, j, sdp, sizeof(sdp));
|
|
||||||
|
|
||||||
if (!sdp_filename) {
|
|
||||||
printf("SDP:\n%s\n", sdp);
|
|
||||||
fflush(stdout);
|
|
||||||
} else {
|
|
||||||
if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
|
|
||||||
} else {
|
|
||||||
avio_printf(sdp_pb, "SDP:\n%s", sdp);
|
|
||||||
avio_closep(&sdp_pb);
|
|
||||||
av_freep(&sdp_filename);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
|
||||||
|
printf("SDP:\n%s\n", sdp);
|
||||||
|
fflush(stdout);
|
||||||
av_freep(&avc);
|
av_freep(&avc);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2567,7 +2463,7 @@ static int transcode_init(void)
|
|||||||
AVFormatContext *oc;
|
AVFormatContext *oc;
|
||||||
OutputStream *ost;
|
OutputStream *ost;
|
||||||
InputStream *ist;
|
InputStream *ist;
|
||||||
char error[1024] = {0};
|
char error[1024];
|
||||||
int want_sdp = 1;
|
int want_sdp = 1;
|
||||||
|
|
||||||
for (i = 0; i < nb_filtergraphs; i++) {
|
for (i = 0; i < nb_filtergraphs; i++) {
|
||||||
@@ -2725,26 +2621,6 @@ static int transcode_init(void)
|
|||||||
av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
|
av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
|
||||||
enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
|
enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
|
||||||
|
|
||||||
if (ist->st->nb_side_data) {
|
|
||||||
ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
|
|
||||||
sizeof(*ist->st->side_data));
|
|
||||||
if (!ost->st->side_data)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
|
|
||||||
for (j = 0; j < ist->st->nb_side_data; j++) {
|
|
||||||
const AVPacketSideData *sd_src = &ist->st->side_data[j];
|
|
||||||
AVPacketSideData *sd_dst = &ost->st->side_data[j];
|
|
||||||
|
|
||||||
sd_dst->data = av_malloc(sd_src->size);
|
|
||||||
if (!sd_dst->data)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
memcpy(sd_dst->data, sd_src->data, sd_src->size);
|
|
||||||
sd_dst->size = sd_src->size;
|
|
||||||
sd_dst->type = sd_src->type;
|
|
||||||
ost->st->nb_side_data++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ost->parser = av_parser_init(enc_ctx->codec_id);
|
ost->parser = av_parser_init(enc_ctx->codec_id);
|
||||||
|
|
||||||
switch (enc_ctx->codec_type) {
|
switch (enc_ctx->codec_type) {
|
||||||
@@ -2759,10 +2635,7 @@ static int transcode_init(void)
|
|||||||
enc_ctx->frame_size = dec_ctx->frame_size;
|
enc_ctx->frame_size = dec_ctx->frame_size;
|
||||||
enc_ctx->audio_service_type = dec_ctx->audio_service_type;
|
enc_ctx->audio_service_type = dec_ctx->audio_service_type;
|
||||||
enc_ctx->block_align = dec_ctx->block_align;
|
enc_ctx->block_align = dec_ctx->block_align;
|
||||||
enc_ctx->initial_padding = dec_ctx->delay;
|
|
||||||
#if FF_API_AUDIOENC_DELAY
|
|
||||||
enc_ctx->delay = dec_ctx->delay;
|
enc_ctx->delay = dec_ctx->delay;
|
||||||
#endif
|
|
||||||
if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
|
if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
|
||||||
enc_ctx->block_align= 0;
|
enc_ctx->block_align= 0;
|
||||||
if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
|
if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
|
||||||
@@ -2786,7 +2659,6 @@ static int transcode_init(void)
|
|||||||
sar = dec_ctx->sample_aspect_ratio;
|
sar = dec_ctx->sample_aspect_ratio;
|
||||||
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
|
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
|
||||||
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
|
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
|
||||||
ost->st->r_frame_rate = ist->st->r_frame_rate;
|
|
||||||
break;
|
break;
|
||||||
case AVMEDIA_TYPE_SUBTITLE:
|
case AVMEDIA_TYPE_SUBTITLE:
|
||||||
enc_ctx->width = dec_ctx->width;
|
enc_ctx->width = dec_ctx->width;
|
||||||
@@ -2847,7 +2719,6 @@ static int transcode_init(void)
|
|||||||
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
|
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
|
||||||
ost->frame_rate = ost->enc->supported_framerates[idx];
|
ost->frame_rate = ost->enc->supported_framerates[idx];
|
||||||
}
|
}
|
||||||
// reduce frame rate for mpeg4 to be within the spec limits
|
|
||||||
if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
|
if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
|
||||||
av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
|
av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
|
||||||
ost->frame_rate.num, ost->frame_rate.den, 65535);
|
ost->frame_rate.num, ost->frame_rate.den, 65535);
|
||||||
@@ -2932,8 +2803,6 @@ static int transcode_init(void)
|
|||||||
enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
|
enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case AVMEDIA_TYPE_DATA:
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
abort();
|
abort();
|
||||||
break;
|
break;
|
||||||
@@ -2972,37 +2841,6 @@ static int transcode_init(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ost->disposition) {
|
|
||||||
static const AVOption opts[] = {
|
|
||||||
{ "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
|
|
||||||
{ "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
|
|
||||||
{ "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
|
|
||||||
{ "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
|
|
||||||
{ "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
|
|
||||||
{ "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
|
|
||||||
{ "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
|
|
||||||
{ "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
|
|
||||||
{ "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
|
|
||||||
{ "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
|
|
||||||
{ "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
|
|
||||||
{ "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
|
|
||||||
{ "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
|
|
||||||
{ "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
|
|
||||||
{ NULL },
|
|
||||||
};
|
|
||||||
static const AVClass class = {
|
|
||||||
.class_name = "",
|
|
||||||
.item_name = av_default_item_name,
|
|
||||||
.option = opts,
|
|
||||||
.version = LIBAVUTIL_VERSION_INT,
|
|
||||||
};
|
|
||||||
const AVClass *pclass = &class;
|
|
||||||
|
|
||||||
ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
|
|
||||||
if (ret < 0)
|
|
||||||
goto dump_format;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* open each encoder */
|
/* open each encoder */
|
||||||
@@ -3044,11 +2882,10 @@ static int transcode_init(void)
|
|||||||
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
||||||
" It takes bits/s as argument, not kbits/s\n");
|
" It takes bits/s as argument, not kbits/s\n");
|
||||||
} else {
|
} else {
|
||||||
ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
|
if (av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts) < 0) {
|
||||||
if (ret < 0) {
|
|
||||||
av_log(NULL, AV_LOG_FATAL,
|
av_log(NULL, AV_LOG_FATAL,
|
||||||
"Error setting up codec context options.\n");
|
"Error setting up codec context options.\n");
|
||||||
return ret;
|
exit_program(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3198,7 +3035,7 @@ static int transcode_init(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sdp_filename || want_sdp) {
|
if (want_sdp) {
|
||||||
print_sdp();
|
print_sdp();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3248,9 +3085,9 @@ static OutputStream *choose_output(void)
|
|||||||
OutputStream *ost = output_streams[i];
|
OutputStream *ost = output_streams[i];
|
||||||
int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
|
int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
|
||||||
AV_TIME_BASE_Q);
|
AV_TIME_BASE_Q);
|
||||||
if (!ost->finished && opts < opts_min) {
|
if (!ost->unavailable && !ost->finished && opts < opts_min) {
|
||||||
opts_min = opts;
|
opts_min = opts;
|
||||||
ost_min = ost->unavailable ? NULL : ost;
|
ost_min = ost;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ost_min;
|
return ost_min;
|
||||||
@@ -3357,7 +3194,6 @@ static int check_keyboard_interaction(int64_t cur_time)
|
|||||||
static void *input_thread(void *arg)
|
static void *input_thread(void *arg)
|
||||||
{
|
{
|
||||||
InputFile *f = arg;
|
InputFile *f = arg;
|
||||||
unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
@@ -3373,15 +3209,7 @@ static void *input_thread(void *arg)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
av_dup_packet(&pkt);
|
av_dup_packet(&pkt);
|
||||||
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
|
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, 0);
|
||||||
if (flags && ret == AVERROR(EAGAIN)) {
|
|
||||||
flags = 0;
|
|
||||||
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
|
|
||||||
av_log(f->ctx, AV_LOG_WARNING,
|
|
||||||
"Thread message queue blocking; consider raising the "
|
|
||||||
"thread_queue_size option (current value: %d)\n",
|
|
||||||
f->thread_queue_size);
|
|
||||||
}
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
if (ret != AVERROR_EOF)
|
if (ret != AVERROR_EOF)
|
||||||
av_log(f->ctx, AV_LOG_ERROR,
|
av_log(f->ctx, AV_LOG_ERROR,
|
||||||
@@ -3430,7 +3258,7 @@ static int init_input_threads(void)
|
|||||||
strcmp(f->ctx->iformat->name, "lavfi"))
|
strcmp(f->ctx->iformat->name, "lavfi"))
|
||||||
f->non_blocking = 1;
|
f->non_blocking = 1;
|
||||||
ret = av_thread_message_queue_alloc(&f->in_thread_queue,
|
ret = av_thread_message_queue_alloc(&f->in_thread_queue,
|
||||||
f->thread_queue_size, sizeof(AVPacket));
|
8, sizeof(AVPacket));
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -3637,14 +3465,13 @@ static int process_input(int file_index)
|
|||||||
if (pkt.dts != AV_NOPTS_VALUE)
|
if (pkt.dts != AV_NOPTS_VALUE)
|
||||||
pkt.dts *= ist->ts_scale;
|
pkt.dts *= ist->ts_scale;
|
||||||
|
|
||||||
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
|
if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
|
||||||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
|
|
||||||
pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
|
|
||||||
&& (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
|
&& (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
|
||||||
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
|
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
int64_t delta = pkt_dts - ifile->last_ts;
|
int64_t delta = pkt_dts - ifile->last_ts;
|
||||||
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||||
delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
|
(delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
|
||||||
|
ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)){
|
||||||
ifile->ts_offset -= delta;
|
ifile->ts_offset -= delta;
|
||||||
av_log(NULL, AV_LOG_DEBUG,
|
av_log(NULL, AV_LOG_DEBUG,
|
||||||
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
|
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
|
||||||
@@ -3655,15 +3482,14 @@ static int process_input(int file_index)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
|
if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
|
||||||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
|
|
||||||
pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
|
|
||||||
!copy_ts) {
|
!copy_ts) {
|
||||||
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
|
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
int64_t delta = pkt_dts - ist->next_dts;
|
int64_t delta = pkt_dts - ist->next_dts;
|
||||||
if (is->iformat->flags & AVFMT_TS_DISCONT) {
|
if (is->iformat->flags & AVFMT_TS_DISCONT) {
|
||||||
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||||
delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
|
(delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
|
||||||
|
ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
|
||||||
pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
|
pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
|
||||||
ifile->ts_offset -= delta;
|
ifile->ts_offset -= delta;
|
||||||
av_log(NULL, AV_LOG_DEBUG,
|
av_log(NULL, AV_LOG_DEBUG,
|
||||||
@@ -3675,7 +3501,7 @@ static int process_input(int file_index)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
|
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
|
||||||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
|
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
|
||||||
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
|
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
|
||||||
pkt.dts = AV_NOPTS_VALUE;
|
pkt.dts = AV_NOPTS_VALUE;
|
||||||
}
|
}
|
||||||
@@ -3683,7 +3509,7 @@ static int process_input(int file_index)
|
|||||||
int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
|
int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||||
delta = pkt_pts - ist->next_dts;
|
delta = pkt_pts - ist->next_dts;
|
||||||
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
|
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
|
||||||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
|
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
|
||||||
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
|
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
|
||||||
pkt.pts = AV_NOPTS_VALUE;
|
pkt.pts = AV_NOPTS_VALUE;
|
||||||
}
|
}
|
||||||
@@ -3923,11 +3749,9 @@ static int transcode(void)
|
|||||||
}
|
}
|
||||||
av_freep(&ost->forced_kf_pts);
|
av_freep(&ost->forced_kf_pts);
|
||||||
av_freep(&ost->apad);
|
av_freep(&ost->apad);
|
||||||
av_freep(&ost->disposition);
|
|
||||||
av_dict_free(&ost->encoder_opts);
|
av_dict_free(&ost->encoder_opts);
|
||||||
av_dict_free(&ost->swr_opts);
|
av_dict_free(&ost->swr_opts);
|
||||||
av_dict_free(&ost->resample_opts);
|
av_dict_free(&ost->resample_opts);
|
||||||
av_dict_free(&ost->bsf_args);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3949,7 +3773,7 @@ static int64_t getutime(void)
|
|||||||
GetProcessTimes(proc, &c, &e, &k, &u);
|
GetProcessTimes(proc, &c, &e, &k, &u);
|
||||||
return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
|
return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
|
||||||
#else
|
#else
|
||||||
return av_gettime_relative();
|
return av_gettime();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
12
ffmpeg.h
12
ffmpeg.h
@@ -111,7 +111,6 @@ typedef struct OptionsContext {
|
|||||||
int64_t input_ts_offset;
|
int64_t input_ts_offset;
|
||||||
int rate_emu;
|
int rate_emu;
|
||||||
int accurate_seek;
|
int accurate_seek;
|
||||||
int thread_queue_size;
|
|
||||||
|
|
||||||
SpecifierOpt *ts_scale;
|
SpecifierOpt *ts_scale;
|
||||||
int nb_ts_scale;
|
int nb_ts_scale;
|
||||||
@@ -207,8 +206,6 @@ typedef struct OptionsContext {
|
|||||||
int nb_apad;
|
int nb_apad;
|
||||||
SpecifierOpt *discard;
|
SpecifierOpt *discard;
|
||||||
int nb_discard;
|
int nb_discard;
|
||||||
SpecifierOpt *disposition;
|
|
||||||
int nb_disposition;
|
|
||||||
} OptionsContext;
|
} OptionsContext;
|
||||||
|
|
||||||
typedef struct InputFilter {
|
typedef struct InputFilter {
|
||||||
@@ -351,7 +348,6 @@ typedef struct InputFile {
|
|||||||
pthread_t thread; /* thread reading from this file */
|
pthread_t thread; /* thread reading from this file */
|
||||||
int non_blocking; /* reading packets from the thread should not block */
|
int non_blocking; /* reading packets from the thread should not block */
|
||||||
int joined; /* the thread has been joined */
|
int joined; /* the thread has been joined */
|
||||||
int thread_queue_size; /* maximum number of queued packets */
|
|
||||||
#endif
|
#endif
|
||||||
} InputFile;
|
} InputFile;
|
||||||
|
|
||||||
@@ -392,8 +388,6 @@ typedef struct OutputStream {
|
|||||||
AVCodec *enc;
|
AVCodec *enc;
|
||||||
int64_t max_frames;
|
int64_t max_frames;
|
||||||
AVFrame *filtered_frame;
|
AVFrame *filtered_frame;
|
||||||
AVFrame *last_frame;
|
|
||||||
int last_droped;
|
|
||||||
|
|
||||||
/* video only */
|
/* video only */
|
||||||
AVRational frame_rate;
|
AVRational frame_rate;
|
||||||
@@ -426,7 +420,6 @@ typedef struct OutputStream {
|
|||||||
AVDictionary *encoder_opts;
|
AVDictionary *encoder_opts;
|
||||||
AVDictionary *swr_opts;
|
AVDictionary *swr_opts;
|
||||||
AVDictionary *resample_opts;
|
AVDictionary *resample_opts;
|
||||||
AVDictionary *bsf_args;
|
|
||||||
char *apad;
|
char *apad;
|
||||||
OSTFinished finished; /* no more packets should be written for this stream */
|
OSTFinished finished; /* no more packets should be written for this stream */
|
||||||
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
|
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
|
||||||
@@ -434,7 +427,6 @@ typedef struct OutputStream {
|
|||||||
const char *attachment_filename;
|
const char *attachment_filename;
|
||||||
int copy_initial_nonkeyframes;
|
int copy_initial_nonkeyframes;
|
||||||
int copy_prior_start;
|
int copy_prior_start;
|
||||||
char *disposition;
|
|
||||||
|
|
||||||
int keep_pix_fmt;
|
int keep_pix_fmt;
|
||||||
|
|
||||||
@@ -475,7 +467,6 @@ extern FilterGraph **filtergraphs;
|
|||||||
extern int nb_filtergraphs;
|
extern int nb_filtergraphs;
|
||||||
|
|
||||||
extern char *vstats_filename;
|
extern char *vstats_filename;
|
||||||
extern char *sdp_filename;
|
|
||||||
|
|
||||||
extern float audio_drift_threshold;
|
extern float audio_drift_threshold;
|
||||||
extern float dts_delta_threshold;
|
extern float dts_delta_threshold;
|
||||||
@@ -484,14 +475,12 @@ extern float dts_error_threshold;
|
|||||||
extern int audio_volume;
|
extern int audio_volume;
|
||||||
extern int audio_sync_method;
|
extern int audio_sync_method;
|
||||||
extern int video_sync_method;
|
extern int video_sync_method;
|
||||||
extern float frame_drop_threshold;
|
|
||||||
extern int do_benchmark;
|
extern int do_benchmark;
|
||||||
extern int do_benchmark_all;
|
extern int do_benchmark_all;
|
||||||
extern int do_deinterlace;
|
extern int do_deinterlace;
|
||||||
extern int do_hex_dump;
|
extern int do_hex_dump;
|
||||||
extern int do_pkt_dump;
|
extern int do_pkt_dump;
|
||||||
extern int copy_ts;
|
extern int copy_ts;
|
||||||
extern int start_at_zero;
|
|
||||||
extern int copy_tb;
|
extern int copy_tb;
|
||||||
extern int debug_ts;
|
extern int debug_ts;
|
||||||
extern int exit_on_error;
|
extern int exit_on_error;
|
||||||
@@ -501,7 +490,6 @@ extern int stdin_interaction;
|
|||||||
extern int frame_bits_per_raw_sample;
|
extern int frame_bits_per_raw_sample;
|
||||||
extern AVIOContext *progress_avio;
|
extern AVIOContext *progress_avio;
|
||||||
extern float max_error_rate;
|
extern float max_error_rate;
|
||||||
extern int vdpau_api_ver;
|
|
||||||
|
|
||||||
extern const AVIOInterruptCB int_cb;
|
extern const AVIOInterruptCB int_cb;
|
||||||
|
|
||||||
|
@@ -52,7 +52,6 @@ DEFINE_GUID(DXVA2_ModeH264_F, 0x1b81be69, 0xa0c7,0x11d3,0xb9,0x84,0x00,0
|
|||||||
DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0xD2,0x5C,0x15,0xB3,0xD6);
|
DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0xD2,0x5C,0x15,0xB3,0xD6);
|
||||||
DEFINE_GUID(DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
DEFINE_GUID(DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||||
DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||||
DEFINE_GUID(DXVA2_ModeHEVC_VLD_Main, 0x5b11d51b, 0x2f4c,0x4452,0xbc,0xc3,0x09,0xf2,0xa1,0x16,0x0c,0xc0);
|
|
||||||
DEFINE_GUID(DXVA2_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
DEFINE_GUID(DXVA2_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||||
DEFINE_GUID(GUID_NULL, 0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
|
DEFINE_GUID(GUID_NULL, 0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
|
||||||
|
|
||||||
@@ -81,9 +80,6 @@ static const dxva2_mode dxva2_modes[] = {
|
|||||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
|
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
|
||||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
|
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
|
||||||
|
|
||||||
/* HEVC/H.265 */
|
|
||||||
{ &DXVA2_ModeHEVC_VLD_Main, AV_CODEC_ID_HEVC },
|
|
||||||
|
|
||||||
{ NULL, 0 },
|
{ NULL, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -530,10 +526,6 @@ static int dxva2_create_decoder(AVCodecContext *s)
|
|||||||
but it causes issues for H.264 on certain AMD GPUs..... */
|
but it causes issues for H.264 on certain AMD GPUs..... */
|
||||||
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO)
|
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO)
|
||||||
surface_alignment = 32;
|
surface_alignment = 32;
|
||||||
/* the HEVC DXVA2 spec asks for 128 pixel aligned surfaces to ensure
|
|
||||||
all coding features have enough room to work with */
|
|
||||||
else if (s->codec_id == AV_CODEC_ID_HEVC)
|
|
||||||
surface_alignment = 128;
|
|
||||||
else
|
else
|
||||||
surface_alignment = 16;
|
surface_alignment = 16;
|
||||||
|
|
||||||
@@ -541,7 +533,7 @@ static int dxva2_create_decoder(AVCodecContext *s)
|
|||||||
ctx->num_surfaces = 4;
|
ctx->num_surfaces = 4;
|
||||||
|
|
||||||
/* add surfaces based on number of possible refs */
|
/* add surfaces based on number of possible refs */
|
||||||
if (s->codec_id == AV_CODEC_ID_H264 || s->codec_id == AV_CODEC_ID_HEVC)
|
if (s->codec_id == AV_CODEC_ID_H264)
|
||||||
ctx->num_surfaces += 16;
|
ctx->num_surfaces += 16;
|
||||||
else
|
else
|
||||||
ctx->num_surfaces += 2;
|
ctx->num_surfaces += 2;
|
||||||
|
@@ -383,8 +383,9 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
|
|||||||
snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
|
snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
|
||||||
ost->file_index, ost->index);
|
ost->file_index, ost->index);
|
||||||
ret = avfilter_graph_create_filter(&filter,
|
ret = avfilter_graph_create_filter(&filter,
|
||||||
avfilter_get_by_name("format"),
|
avfilter_get_by_name("format"),
|
||||||
"format", pix_fmts, NULL, fg->graph);
|
"format", pix_fmts, NULL,
|
||||||
|
fg->graph);
|
||||||
av_freep(&pix_fmts);
|
av_freep(&pix_fmts);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -619,7 +620,6 @@ static int sub2video_prepare(InputStream *ist)
|
|||||||
ist->sub2video.frame = av_frame_alloc();
|
ist->sub2video.frame = av_frame_alloc();
|
||||||
if (!ist->sub2video.frame)
|
if (!ist->sub2video.frame)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
ist->sub2video.last_pts = INT64_MIN;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -637,7 +637,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
|||||||
AVBPrint args;
|
AVBPrint args;
|
||||||
char name[255];
|
char name[255];
|
||||||
int ret, pad_idx = 0;
|
int ret, pad_idx = 0;
|
||||||
int64_t tsoffset = 0;
|
|
||||||
|
|
||||||
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
|
av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
|
||||||
@@ -712,14 +711,8 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
|||||||
|
|
||||||
snprintf(name, sizeof(name), "trim for input stream %d:%d",
|
snprintf(name, sizeof(name), "trim for input stream %d:%d",
|
||||||
ist->file_index, ist->st->index);
|
ist->file_index, ist->st->index);
|
||||||
if (copy_ts) {
|
|
||||||
tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
|
|
||||||
if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
|
|
||||||
tsoffset += f->ctx->start_time;
|
|
||||||
}
|
|
||||||
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
|
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
|
||||||
AV_NOPTS_VALUE : tsoffset, f->recording_time,
|
AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
|
||||||
&last_filter, &pad_idx, name);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -738,7 +731,6 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
|||||||
AVBPrint args;
|
AVBPrint args;
|
||||||
char name[255];
|
char name[255];
|
||||||
int ret, pad_idx = 0;
|
int ret, pad_idx = 0;
|
||||||
int64_t tsoffset = 0;
|
|
||||||
|
|
||||||
if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
|
if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
|
av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
|
||||||
@@ -821,14 +813,8 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
|||||||
|
|
||||||
snprintf(name, sizeof(name), "trim for input stream %d:%d",
|
snprintf(name, sizeof(name), "trim for input stream %d:%d",
|
||||||
ist->file_index, ist->st->index);
|
ist->file_index, ist->st->index);
|
||||||
if (copy_ts) {
|
|
||||||
tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
|
|
||||||
if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
|
|
||||||
tsoffset += f->ctx->start_time;
|
|
||||||
}
|
|
||||||
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
|
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
|
||||||
AV_NOPTS_VALUE : tsoffset, f->recording_time,
|
AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
|
||||||
&last_filter, &pad_idx, name);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
123
ffmpeg_opt.c
123
ffmpeg_opt.c
@@ -77,7 +77,6 @@ const HWAccel hwaccels[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
char *vstats_filename;
|
char *vstats_filename;
|
||||||
char *sdp_filename;
|
|
||||||
|
|
||||||
float audio_drift_threshold = 0.1;
|
float audio_drift_threshold = 0.1;
|
||||||
float dts_delta_threshold = 10;
|
float dts_delta_threshold = 10;
|
||||||
@@ -86,14 +85,12 @@ float dts_error_threshold = 3600*30;
|
|||||||
int audio_volume = 256;
|
int audio_volume = 256;
|
||||||
int audio_sync_method = 0;
|
int audio_sync_method = 0;
|
||||||
int video_sync_method = VSYNC_AUTO;
|
int video_sync_method = VSYNC_AUTO;
|
||||||
float frame_drop_threshold = 0;
|
|
||||||
int do_deinterlace = 0;
|
int do_deinterlace = 0;
|
||||||
int do_benchmark = 0;
|
int do_benchmark = 0;
|
||||||
int do_benchmark_all = 0;
|
int do_benchmark_all = 0;
|
||||||
int do_hex_dump = 0;
|
int do_hex_dump = 0;
|
||||||
int do_pkt_dump = 0;
|
int do_pkt_dump = 0;
|
||||||
int copy_ts = 0;
|
int copy_ts = 0;
|
||||||
int start_at_zero = 0;
|
|
||||||
int copy_tb = -1;
|
int copy_tb = -1;
|
||||||
int debug_ts = 0;
|
int debug_ts = 0;
|
||||||
int exit_on_error = 0;
|
int exit_on_error = 0;
|
||||||
@@ -233,8 +230,6 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
|||||||
arg++;
|
arg++;
|
||||||
}
|
}
|
||||||
map = av_strdup(arg);
|
map = av_strdup(arg);
|
||||||
if (!map)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
|
|
||||||
/* parse sync stream first, just pick first matching stream */
|
/* parse sync stream first, just pick first matching stream */
|
||||||
if (sync = strchr(map, ',')) {
|
if (sync = strchr(map, ',')) {
|
||||||
@@ -383,13 +378,6 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
|
|
||||||
{
|
|
||||||
av_free(sdp_filename);
|
|
||||||
sdp_filename = av_strdup(arg);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parse a metadata specifier passed as 'arg' parameter.
|
* Parse a metadata specifier passed as 'arg' parameter.
|
||||||
* @param arg metadata string to parse
|
* @param arg metadata string to parse
|
||||||
@@ -520,8 +508,7 @@ static int opt_recording_timestamp(void *optctx, const char *opt, const char *ar
|
|||||||
char buf[128];
|
char buf[128];
|
||||||
int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6;
|
int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6;
|
||||||
struct tm time = *gmtime((time_t*)&recording_timestamp);
|
struct tm time = *gmtime((time_t*)&recording_timestamp);
|
||||||
if (!strftime(buf, sizeof(buf), "creation_time=%Y-%m-%dT%H:%M:%S%z", &time))
|
strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time);
|
||||||
return -1;
|
|
||||||
parse_option(o, "metadata", buf, options);
|
parse_option(o, "metadata", buf, options);
|
||||||
|
|
||||||
av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata "
|
av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata "
|
||||||
@@ -715,7 +702,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
|||||||
MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
|
MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
|
||||||
MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
|
MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
|
||||||
if (canvas_size &&
|
if (canvas_size &&
|
||||||
av_parse_video_size(&ist->dec_ctx->width, &ist->dec_ctx->height, canvas_size) < 0) {
|
av_parse_video_size(&dec->width, &dec->height, canvas_size) < 0) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "Invalid canvas size: %s.\n", canvas_size);
|
av_log(NULL, AV_LOG_FATAL, "Invalid canvas size: %s.\n", canvas_size);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
@@ -805,8 +792,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
char * video_codec_name = NULL;
|
char * video_codec_name = NULL;
|
||||||
char * audio_codec_name = NULL;
|
char * audio_codec_name = NULL;
|
||||||
char *subtitle_codec_name = NULL;
|
char *subtitle_codec_name = NULL;
|
||||||
char * data_codec_name = NULL;
|
|
||||||
int scan_all_pmts_set = 0;
|
|
||||||
|
|
||||||
if (o->format) {
|
if (o->format) {
|
||||||
if (!(file_iformat = av_find_input_format(o->format))) {
|
if (!(file_iformat = av_find_input_format(o->format))) {
|
||||||
@@ -859,7 +844,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
MATCH_PER_TYPE_OPT(codec_names, str, video_codec_name, ic, "v");
|
MATCH_PER_TYPE_OPT(codec_names, str, video_codec_name, ic, "v");
|
||||||
MATCH_PER_TYPE_OPT(codec_names, str, audio_codec_name, ic, "a");
|
MATCH_PER_TYPE_OPT(codec_names, str, audio_codec_name, ic, "a");
|
||||||
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, ic, "s");
|
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, ic, "s");
|
||||||
MATCH_PER_TYPE_OPT(codec_names, str, data_codec_name, ic, "d");
|
|
||||||
|
|
||||||
ic->video_codec_id = video_codec_name ?
|
ic->video_codec_id = video_codec_name ?
|
||||||
find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0)->id : AV_CODEC_ID_NONE;
|
find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0)->id : AV_CODEC_ID_NONE;
|
||||||
@@ -867,8 +851,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0)->id : AV_CODEC_ID_NONE;
|
find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0)->id : AV_CODEC_ID_NONE;
|
||||||
ic->subtitle_codec_id= subtitle_codec_name ?
|
ic->subtitle_codec_id= subtitle_codec_name ?
|
||||||
find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0)->id : AV_CODEC_ID_NONE;
|
find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0)->id : AV_CODEC_ID_NONE;
|
||||||
ic->data_codec_id = data_codec_name ?
|
|
||||||
find_codec_or_die(data_codec_name, AVMEDIA_TYPE_DATA, 0)->id : AV_CODEC_ID_NONE;
|
|
||||||
|
|
||||||
if (video_codec_name)
|
if (video_codec_name)
|
||||||
av_format_set_video_codec (ic, find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0));
|
av_format_set_video_codec (ic, find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0));
|
||||||
@@ -876,24 +858,16 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
av_format_set_audio_codec (ic, find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0));
|
av_format_set_audio_codec (ic, find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0));
|
||||||
if (subtitle_codec_name)
|
if (subtitle_codec_name)
|
||||||
av_format_set_subtitle_codec(ic, find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0));
|
av_format_set_subtitle_codec(ic, find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0));
|
||||||
if (data_codec_name)
|
|
||||||
av_format_set_data_codec(ic, find_codec_or_die(data_codec_name, AVMEDIA_TYPE_DATA, 0));
|
|
||||||
|
|
||||||
ic->flags |= AVFMT_FLAG_NONBLOCK;
|
ic->flags |= AVFMT_FLAG_NONBLOCK;
|
||||||
ic->interrupt_callback = int_cb;
|
ic->interrupt_callback = int_cb;
|
||||||
|
|
||||||
if (!av_dict_get(o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
|
|
||||||
av_dict_set(&o->g->format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
|
|
||||||
scan_all_pmts_set = 1;
|
|
||||||
}
|
|
||||||
/* open the input file with generic avformat function */
|
/* open the input file with generic avformat function */
|
||||||
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
|
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
print_error(filename, err);
|
print_error(filename, err);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
if (scan_all_pmts_set)
|
|
||||||
av_dict_set(&o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
|
||||||
remove_avoptions(&o->g->format_opts, o->g->codec_opts);
|
remove_avoptions(&o->g->format_opts, o->g->codec_opts);
|
||||||
assert_avoptions(o->g->format_opts);
|
assert_avoptions(o->g->format_opts);
|
||||||
|
|
||||||
@@ -947,13 +921,10 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
|||||||
f->start_time = o->start_time;
|
f->start_time = o->start_time;
|
||||||
f->recording_time = o->recording_time;
|
f->recording_time = o->recording_time;
|
||||||
f->input_ts_offset = o->input_ts_offset;
|
f->input_ts_offset = o->input_ts_offset;
|
||||||
f->ts_offset = o->input_ts_offset - (copy_ts ? (start_at_zero && ic->start_time != AV_NOPTS_VALUE ? ic->start_time : 0) : timestamp);
|
f->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
|
||||||
f->nb_streams = ic->nb_streams;
|
f->nb_streams = ic->nb_streams;
|
||||||
f->rate_emu = o->rate_emu;
|
f->rate_emu = o->rate_emu;
|
||||||
f->accurate_seek = o->accurate_seek;
|
f->accurate_seek = o->accurate_seek;
|
||||||
#if HAVE_PTHREADS
|
|
||||||
f->thread_queue_size = o->thread_queue_size > 0 ? o->thread_queue_size : 8;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* check if all codec options have been used */
|
/* check if all codec options have been used */
|
||||||
unused_opts = strip_specifiers(o->g->codec_opts);
|
unused_opts = strip_specifiers(o->g->codec_opts);
|
||||||
@@ -1131,7 +1102,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
||||||
av_free(buf);
|
av_free(buf);
|
||||||
} while (!s->eof_reached);
|
} while (!s->eof_reached);
|
||||||
avio_closep(&s);
|
avio_close(s);
|
||||||
}
|
}
|
||||||
if (ret) {
|
if (ret) {
|
||||||
av_log(NULL, AV_LOG_FATAL,
|
av_log(NULL, AV_LOG_FATAL,
|
||||||
@@ -1158,11 +1129,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
|
|
||||||
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
|
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
|
||||||
while (bsf) {
|
while (bsf) {
|
||||||
char *arg = NULL;
|
|
||||||
if (next = strchr(bsf, ','))
|
if (next = strchr(bsf, ','))
|
||||||
*next++ = 0;
|
*next++ = 0;
|
||||||
if (arg = strchr(bsf, '='))
|
|
||||||
*arg++ = 0;
|
|
||||||
if (!(bsfc = av_bitstream_filter_init(bsf))) {
|
if (!(bsfc = av_bitstream_filter_init(bsf))) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
|
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
@@ -1171,7 +1139,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
bsfc_prev->next = bsfc;
|
bsfc_prev->next = bsfc;
|
||||||
else
|
else
|
||||||
ost->bitstream_filters = bsfc;
|
ost->bitstream_filters = bsfc;
|
||||||
av_dict_set(&ost->bsf_args, bsfc->filter->name, arg, 0);
|
|
||||||
|
|
||||||
bsfc_prev = bsfc;
|
bsfc_prev = bsfc;
|
||||||
bsf = next;
|
bsf = next;
|
||||||
@@ -1191,9 +1158,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||||
}
|
}
|
||||||
|
|
||||||
MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
|
|
||||||
ost->disposition = av_strdup(ost->disposition);
|
|
||||||
|
|
||||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||||
ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||||
|
|
||||||
@@ -1312,8 +1276,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
|||||||
av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
|
av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
if (frame_rate && video_sync_method == VSYNC_PASSTHROUGH)
|
|
||||||
av_log(NULL, AV_LOG_ERROR, "Using -vsync 0 and -r can produce invalid output files\n");
|
|
||||||
|
|
||||||
MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
|
MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
|
||||||
if (frame_aspect_ratio) {
|
if (frame_aspect_ratio) {
|
||||||
@@ -1394,13 +1356,10 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
|||||||
av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
|
av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
}
|
}
|
||||||
|
/* FIXME realloc failure */
|
||||||
video_enc->rc_override =
|
video_enc->rc_override =
|
||||||
av_realloc_array(video_enc->rc_override,
|
av_realloc(video_enc->rc_override,
|
||||||
i + 1, sizeof(RcOverride));
|
sizeof(RcOverride) * (i + 1));
|
||||||
if (!video_enc->rc_override) {
|
|
||||||
av_log(NULL, AV_LOG_FATAL, "Could not (re)allocate memory for rc_override.\n");
|
|
||||||
exit_program(1);
|
|
||||||
}
|
|
||||||
video_enc->rc_override[i].start_frame = start;
|
video_enc->rc_override[i].start_frame = start;
|
||||||
video_enc->rc_override[i].end_frame = end;
|
video_enc->rc_override[i].end_frame = end;
|
||||||
if (q > 0) {
|
if (q > 0) {
|
||||||
@@ -1661,36 +1620,27 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
|
|||||||
AVStream *st;
|
AVStream *st;
|
||||||
OutputStream *ost;
|
OutputStream *ost;
|
||||||
AVCodec *codec;
|
AVCodec *codec;
|
||||||
const char *enc_config;
|
AVCodecContext *avctx;
|
||||||
|
|
||||||
codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
|
codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
|
||||||
if (!codec) {
|
|
||||||
av_log(s, AV_LOG_ERROR, "no encoder found for codec id %i\n", ic->streams[i]->codec->codec_id);
|
|
||||||
return AVERROR(EINVAL);
|
|
||||||
}
|
|
||||||
if (codec->type == AVMEDIA_TYPE_AUDIO)
|
|
||||||
opt_audio_codec(o, "c:a", codec->name);
|
|
||||||
else if (codec->type == AVMEDIA_TYPE_VIDEO)
|
|
||||||
opt_video_codec(o, "c:v", codec->name);
|
|
||||||
ost = new_output_stream(o, s, codec->type, -1);
|
ost = new_output_stream(o, s, codec->type, -1);
|
||||||
st = ost->st;
|
st = ost->st;
|
||||||
|
avctx = st->codec;
|
||||||
|
ost->enc = codec;
|
||||||
|
|
||||||
avcodec_get_context_defaults3(st->codec, codec);
|
// FIXME: a more elegant solution is needed
|
||||||
enc_config = av_stream_get_recommended_encoder_configuration(ic->streams[i]);
|
memcpy(st, ic->streams[i], sizeof(AVStream));
|
||||||
if (enc_config) {
|
st->cur_dts = 0;
|
||||||
AVDictionary *opts = NULL;
|
st->info = av_malloc(sizeof(*st->info));
|
||||||
av_dict_parse_string(&opts, enc_config, "=", ",", 0);
|
memcpy(st->info, ic->streams[i]->info, sizeof(*st->info));
|
||||||
av_opt_set_dict2(st->codec, &opts, AV_OPT_SEARCH_CHILDREN);
|
st->codec= avctx;
|
||||||
av_dict_free(&opts);
|
avcodec_copy_context(st->codec, ic->streams[i]->codec);
|
||||||
}
|
|
||||||
|
|
||||||
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
|
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
|
||||||
choose_sample_fmt(st, codec);
|
choose_sample_fmt(st, codec);
|
||||||
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
|
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
|
||||||
choose_pixel_fmt(st, st->codec, codec, st->codec->pix_fmt);
|
choose_pixel_fmt(st, st->codec, codec, st->codec->pix_fmt);
|
||||||
avcodec_copy_context(ost->enc_ctx, st->codec);
|
avcodec_copy_context(ost->enc_ctx, st->codec);
|
||||||
if (enc_config)
|
|
||||||
av_dict_parse_string(&ost->encoder_opts, enc_config, "=", ",", 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
avformat_close_input(&ic);
|
avformat_close_input(&ic);
|
||||||
@@ -1778,8 +1728,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
if (o->stop_time != INT64_MAX && o->recording_time == INT64_MAX) {
|
if (o->stop_time != INT64_MAX && o->recording_time == INT64_MAX) {
|
||||||
int64_t start_time = o->start_time == AV_NOPTS_VALUE ? 0 : o->start_time;
|
int64_t start_time = o->start_time == AV_NOPTS_VALUE ? 0 : o->start_time;
|
||||||
if (o->stop_time <= start_time) {
|
if (o->stop_time <= start_time) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "-to value smaller than -ss; aborting.\n");
|
av_log(NULL, AV_LOG_WARNING, "-to value smaller than -ss; ignoring -to.\n");
|
||||||
exit_program(1);
|
o->stop_time = INT64_MAX;
|
||||||
} else {
|
} else {
|
||||||
o->recording_time = o->stop_time - start_time;
|
o->recording_time = o->stop_time - start_time;
|
||||||
}
|
}
|
||||||
@@ -1941,15 +1891,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Data only if codec id match */
|
/* do something with data? */
|
||||||
if (!o->data_disable ) {
|
|
||||||
enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_DATA);
|
|
||||||
for (i = 0; codec_id != AV_CODEC_ID_NONE && i < nb_input_streams; i++) {
|
|
||||||
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_DATA
|
|
||||||
&& input_streams[i]->st->codec->codec_id == codec_id )
|
|
||||||
new_data_stream(o, oc, i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < o->nb_stream_maps; i++) {
|
for (i = 0; i < o->nb_stream_maps; i++) {
|
||||||
StreamMap *map = &o->stream_maps[i];
|
StreamMap *map = &o->stream_maps[i];
|
||||||
@@ -2040,7 +1982,7 @@ loop_end:
|
|||||||
|
|
||||||
p = strrchr(o->attachments[i], '/');
|
p = strrchr(o->attachments[i], '/');
|
||||||
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
|
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
|
||||||
avio_closep(&pb);
|
avio_close(pb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
|
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
|
||||||
@@ -2865,7 +2807,7 @@ const OptionDef options[] = {
|
|||||||
"add metadata", "string=string" },
|
"add metadata", "string=string" },
|
||||||
{ "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
|
{ "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
|
||||||
OPT_OUTPUT, { .func_arg = opt_data_frames },
|
OPT_OUTPUT, { .func_arg = opt_data_frames },
|
||||||
"set the number of data frames to output", "number" },
|
"set the number of data frames to record", "number" },
|
||||||
{ "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
|
{ "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
|
||||||
"add timings for benchmarking" },
|
"add timings for benchmarking" },
|
||||||
{ "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
|
{ "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
|
||||||
@@ -2888,16 +2830,12 @@ const OptionDef options[] = {
|
|||||||
" \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
|
" \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
|
||||||
{ "vsync", HAS_ARG | OPT_EXPERT, { opt_vsync },
|
{ "vsync", HAS_ARG | OPT_EXPERT, { opt_vsync },
|
||||||
"video sync method", "" },
|
"video sync method", "" },
|
||||||
{ "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
|
|
||||||
"frame drop threshold", "" },
|
|
||||||
{ "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
|
{ "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
|
||||||
"audio sync method", "" },
|
"audio sync method", "" },
|
||||||
{ "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
|
{ "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
|
||||||
"audio drift threshold", "threshold" },
|
"audio drift threshold", "threshold" },
|
||||||
{ "copyts", OPT_BOOL | OPT_EXPERT, { ©_ts },
|
{ "copyts", OPT_BOOL | OPT_EXPERT, { ©_ts },
|
||||||
"copy timestamps" },
|
"copy timestamps" },
|
||||||
{ "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
|
|
||||||
"shift input timestamps to start at 0 when using copyts" },
|
|
||||||
{ "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { ©_tb },
|
{ "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { ©_tb },
|
||||||
"copy input stream time base when stream copying", "mode" },
|
"copy input stream time base when stream copying", "mode" },
|
||||||
{ "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
|
{ "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
|
||||||
@@ -2918,7 +2856,7 @@ const OptionDef options[] = {
|
|||||||
{ "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
|
{ "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
|
||||||
"copy or discard frames before start time" },
|
"copy or discard frames before start time" },
|
||||||
{ "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
|
{ "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
|
||||||
"set the number of frames to output", "number" },
|
"set the number of frames to record", "number" },
|
||||||
{ "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
|
{ "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||||
OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
|
OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
|
||||||
"force codec tag/fourcc", "fourcc/tag" },
|
"force codec tag/fourcc", "fourcc/tag" },
|
||||||
@@ -2957,16 +2895,10 @@ const OptionDef options[] = {
|
|||||||
{ "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
|
{ "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||||
OPT_INPUT, { .off = OFFSET(discard) },
|
OPT_INPUT, { .off = OFFSET(discard) },
|
||||||
"discard", "" },
|
"discard", "" },
|
||||||
{ "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
|
|
||||||
OPT_OUTPUT, { .off = OFFSET(disposition) },
|
|
||||||
"disposition", "" },
|
|
||||||
{ "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
|
|
||||||
{ .off = OFFSET(thread_queue_size) },
|
|
||||||
"set the maximum number of queued packets from the demuxer" },
|
|
||||||
|
|
||||||
/* video options */
|
/* video options */
|
||||||
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
||||||
"set the number of video frames to output", "number" },
|
"set the number of video frames to record", "number" },
|
||||||
{ "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
|
{ "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
|
||||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
|
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
|
||||||
"set frame rate (Hz value, fraction or abbreviation)", "rate" },
|
"set frame rate (Hz value, fraction or abbreviation)", "rate" },
|
||||||
@@ -3048,13 +2980,10 @@ const OptionDef options[] = {
|
|||||||
{ "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
{ "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
||||||
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
|
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
|
||||||
"select a device for HW acceleration" "devicename" },
|
"select a device for HW acceleration" "devicename" },
|
||||||
#if HAVE_VDPAU_X11
|
|
||||||
{ "vdpau_api_ver", HAS_ARG | OPT_INT | OPT_EXPERT, { &vdpau_api_ver }, "" },
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* audio options */
|
/* audio options */
|
||||||
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
|
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
|
||||||
"set the number of audio frames to output", "number" },
|
"set the number of audio frames to record", "number" },
|
||||||
{ "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
|
{ "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
|
||||||
"set audio quality (codec-specific)", "quality", },
|
"set audio quality (codec-specific)", "quality", },
|
||||||
{ "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
|
{ "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
|
||||||
@@ -3110,8 +3039,6 @@ const OptionDef options[] = {
|
|||||||
"set the initial demux-decode delay", "seconds" },
|
"set the initial demux-decode delay", "seconds" },
|
||||||
{ "override_ffserver", OPT_BOOL | OPT_EXPERT | OPT_OUTPUT, { &override_ffserver },
|
{ "override_ffserver", OPT_BOOL | OPT_EXPERT | OPT_OUTPUT, { &override_ffserver },
|
||||||
"override the options from ffserver", "" },
|
"override the options from ffserver", "" },
|
||||||
{ "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { opt_sdp_file },
|
|
||||||
"specify a file in which to print sdp information", "file" },
|
|
||||||
|
|
||||||
{ "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
|
{ "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
|
||||||
"A comma-separated list of bitstream filters", "bitstream_filters" },
|
"A comma-separated list of bitstream filters", "bitstream_filters" },
|
||||||
|
@@ -42,11 +42,9 @@ typedef struct VDPAUContext {
|
|||||||
VdpGetErrorString *get_error_string;
|
VdpGetErrorString *get_error_string;
|
||||||
VdpGetInformationString *get_information_string;
|
VdpGetInformationString *get_information_string;
|
||||||
VdpDeviceDestroy *device_destroy;
|
VdpDeviceDestroy *device_destroy;
|
||||||
#if 1 // for ffmpegs older vdpau API, not the oldest though
|
|
||||||
VdpDecoderCreate *decoder_create;
|
VdpDecoderCreate *decoder_create;
|
||||||
VdpDecoderDestroy *decoder_destroy;
|
VdpDecoderDestroy *decoder_destroy;
|
||||||
VdpDecoderRender *decoder_render;
|
VdpDecoderRender *decoder_render;
|
||||||
#endif
|
|
||||||
VdpVideoSurfaceCreate *video_surface_create;
|
VdpVideoSurfaceCreate *video_surface_create;
|
||||||
VdpVideoSurfaceDestroy *video_surface_destroy;
|
VdpVideoSurfaceDestroy *video_surface_destroy;
|
||||||
VdpVideoSurfaceGetBitsYCbCr *video_surface_get_bits;
|
VdpVideoSurfaceGetBitsYCbCr *video_surface_get_bits;
|
||||||
@@ -59,8 +57,6 @@ typedef struct VDPAUContext {
|
|||||||
VdpYCbCrFormat vdpau_format;
|
VdpYCbCrFormat vdpau_format;
|
||||||
} VDPAUContext;
|
} VDPAUContext;
|
||||||
|
|
||||||
int vdpau_api_ver = 2;
|
|
||||||
|
|
||||||
static void vdpau_uninit(AVCodecContext *s)
|
static void vdpau_uninit(AVCodecContext *s)
|
||||||
{
|
{
|
||||||
InputStream *ist = s->opaque;
|
InputStream *ist = s->opaque;
|
||||||
@@ -100,14 +96,9 @@ static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
|||||||
VDPAUContext *ctx = ist->hwaccel_ctx;
|
VDPAUContext *ctx = ist->hwaccel_ctx;
|
||||||
VdpVideoSurface *surface;
|
VdpVideoSurface *surface;
|
||||||
VdpStatus err;
|
VdpStatus err;
|
||||||
VdpChromaType chroma;
|
|
||||||
uint32_t width, height;
|
|
||||||
|
|
||||||
av_assert0(frame->format == AV_PIX_FMT_VDPAU);
|
av_assert0(frame->format == AV_PIX_FMT_VDPAU);
|
||||||
|
|
||||||
if (av_vdpau_get_surface_parameters(s, &chroma, &width, &height))
|
|
||||||
return AVERROR(ENOSYS);
|
|
||||||
|
|
||||||
surface = av_malloc(sizeof(*surface));
|
surface = av_malloc(sizeof(*surface));
|
||||||
if (!surface)
|
if (!surface)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@@ -123,8 +114,8 @@ static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
|||||||
// properly we should keep a pool of surfaces instead of creating
|
// properly we should keep a pool of surfaces instead of creating
|
||||||
// them anew for each frame, but since we don't care about speed
|
// them anew for each frame, but since we don't care about speed
|
||||||
// much in this code, we don't bother
|
// much in this code, we don't bother
|
||||||
err = ctx->video_surface_create(ctx->device, chroma, width, height,
|
err = ctx->video_surface_create(ctx->device, VDP_CHROMA_TYPE_420,
|
||||||
surface);
|
frame->width, frame->height, surface);
|
||||||
if (err != VDP_STATUS_OK) {
|
if (err != VDP_STATUS_OK) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Error allocating a VDPAU video surface: %s\n",
|
av_log(NULL, AV_LOG_ERROR, "Error allocating a VDPAU video surface: %s\n",
|
||||||
ctx->get_error_string(err));
|
ctx->get_error_string(err));
|
||||||
@@ -248,11 +239,9 @@ do {
|
|||||||
GET_CALLBACK(VDP_FUNC_ID_GET_ERROR_STRING, get_error_string);
|
GET_CALLBACK(VDP_FUNC_ID_GET_ERROR_STRING, get_error_string);
|
||||||
GET_CALLBACK(VDP_FUNC_ID_GET_INFORMATION_STRING, get_information_string);
|
GET_CALLBACK(VDP_FUNC_ID_GET_INFORMATION_STRING, get_information_string);
|
||||||
GET_CALLBACK(VDP_FUNC_ID_DEVICE_DESTROY, device_destroy);
|
GET_CALLBACK(VDP_FUNC_ID_DEVICE_DESTROY, device_destroy);
|
||||||
if (vdpau_api_ver == 1) {
|
GET_CALLBACK(VDP_FUNC_ID_DECODER_CREATE, decoder_create);
|
||||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_CREATE, decoder_create);
|
GET_CALLBACK(VDP_FUNC_ID_DECODER_DESTROY, decoder_destroy);
|
||||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_DESTROY, decoder_destroy);
|
GET_CALLBACK(VDP_FUNC_ID_DECODER_RENDER, decoder_render);
|
||||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_RENDER, decoder_render);
|
|
||||||
}
|
|
||||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, video_surface_create);
|
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, video_surface_create);
|
||||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, video_surface_destroy);
|
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, video_surface_destroy);
|
||||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, video_surface_get_bits);
|
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, video_surface_get_bits);
|
||||||
@@ -281,16 +270,12 @@ do {
|
|||||||
ctx->vdpau_format = vdpau_formats[i][0];
|
ctx->vdpau_format = vdpau_formats[i][0];
|
||||||
ctx->pix_fmt = vdpau_formats[i][1];
|
ctx->pix_fmt = vdpau_formats[i][1];
|
||||||
|
|
||||||
if (vdpau_api_ver == 1) {
|
vdpau_ctx = av_vdpau_alloc_context();
|
||||||
vdpau_ctx = av_vdpau_alloc_context();
|
if (!vdpau_ctx)
|
||||||
if (!vdpau_ctx)
|
|
||||||
goto fail;
|
|
||||||
vdpau_ctx->render = ctx->decoder_render;
|
|
||||||
|
|
||||||
s->hwaccel_context = vdpau_ctx;
|
|
||||||
} else
|
|
||||||
if (av_vdpau_bind_context(s, ctx->device, ctx->get_proc_address, 0))
|
|
||||||
goto fail;
|
goto fail;
|
||||||
|
vdpau_ctx->render = ctx->decoder_render;
|
||||||
|
|
||||||
|
s->hwaccel_context = vdpau_ctx;
|
||||||
|
|
||||||
ctx->get_information_string(&vendor);
|
ctx->get_information_string(&vendor);
|
||||||
av_log(NULL, AV_LOG_VERBOSE, "Using VDPAU -- %s -- on X11 display %s, "
|
av_log(NULL, AV_LOG_VERBOSE, "Using VDPAU -- %s -- on X11 display %s, "
|
||||||
@@ -306,7 +291,7 @@ fail:
|
|||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vdpau_old_init(AVCodecContext *s)
|
int vdpau_init(AVCodecContext *s)
|
||||||
{
|
{
|
||||||
InputStream *ist = s->opaque;
|
InputStream *ist = s->opaque;
|
||||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||||
@@ -348,22 +333,3 @@ static int vdpau_old_init(AVCodecContext *s)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vdpau_init(AVCodecContext *s)
|
|
||||||
{
|
|
||||||
InputStream *ist = s->opaque;
|
|
||||||
|
|
||||||
if (vdpau_api_ver == 1)
|
|
||||||
return vdpau_old_init(s);
|
|
||||||
|
|
||||||
if (!ist->hwaccel_ctx) {
|
|
||||||
int ret = vdpau_alloc(s);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ist->hwaccel_get_buffer = vdpau_get_buffer;
|
|
||||||
ist->hwaccel_retrieve_data = vdpau_retrieve_data;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
134
ffprobe.c
134
ffprobe.c
@@ -66,9 +66,6 @@ static int do_show_stream_disposition = 0;
|
|||||||
static int do_show_data = 0;
|
static int do_show_data = 0;
|
||||||
static int do_show_program_version = 0;
|
static int do_show_program_version = 0;
|
||||||
static int do_show_library_versions = 0;
|
static int do_show_library_versions = 0;
|
||||||
static int do_show_pixel_formats = 0;
|
|
||||||
static int do_show_pixel_format_flags = 0;
|
|
||||||
static int do_show_pixel_format_components = 0;
|
|
||||||
|
|
||||||
static int do_show_chapter_tags = 0;
|
static int do_show_chapter_tags = 0;
|
||||||
static int do_show_format_tags = 0;
|
static int do_show_format_tags = 0;
|
||||||
@@ -86,7 +83,7 @@ static char *print_format;
|
|||||||
static char *stream_specifier;
|
static char *stream_specifier;
|
||||||
static char *show_data_hash;
|
static char *show_data_hash;
|
||||||
|
|
||||||
typedef struct ReadInterval {
|
typedef struct {
|
||||||
int id; ///< identifier
|
int id; ///< identifier
|
||||||
int64_t start, end; ///< start, end in second/AV_TIME_BASE units
|
int64_t start, end; ///< start, end in second/AV_TIME_BASE units
|
||||||
int has_start, has_end;
|
int has_start, has_end;
|
||||||
@@ -135,11 +132,6 @@ typedef enum {
|
|||||||
SECTION_ID_PACKET,
|
SECTION_ID_PACKET,
|
||||||
SECTION_ID_PACKETS,
|
SECTION_ID_PACKETS,
|
||||||
SECTION_ID_PACKETS_AND_FRAMES,
|
SECTION_ID_PACKETS_AND_FRAMES,
|
||||||
SECTION_ID_PIXEL_FORMAT,
|
|
||||||
SECTION_ID_PIXEL_FORMAT_FLAGS,
|
|
||||||
SECTION_ID_PIXEL_FORMAT_COMPONENT,
|
|
||||||
SECTION_ID_PIXEL_FORMAT_COMPONENTS,
|
|
||||||
SECTION_ID_PIXEL_FORMATS,
|
|
||||||
SECTION_ID_PROGRAM_STREAM_DISPOSITION,
|
SECTION_ID_PROGRAM_STREAM_DISPOSITION,
|
||||||
SECTION_ID_PROGRAM_STREAM_TAGS,
|
SECTION_ID_PROGRAM_STREAM_TAGS,
|
||||||
SECTION_ID_PROGRAM,
|
SECTION_ID_PROGRAM,
|
||||||
@@ -173,11 +165,6 @@ static struct section sections[] = {
|
|||||||
[SECTION_ID_PACKETS] = { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
[SECTION_ID_PACKETS] = { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||||
[SECTION_ID_PACKETS_AND_FRAMES] = { SECTION_ID_PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
[SECTION_ID_PACKETS_AND_FRAMES] = { SECTION_ID_PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||||
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { -1 } },
|
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { -1 } },
|
||||||
[SECTION_ID_PIXEL_FORMATS] = { SECTION_ID_PIXEL_FORMATS, "pixel_formats", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PIXEL_FORMAT, -1 } },
|
|
||||||
[SECTION_ID_PIXEL_FORMAT] = { SECTION_ID_PIXEL_FORMAT, "pixel_format", 0, { SECTION_ID_PIXEL_FORMAT_FLAGS, SECTION_ID_PIXEL_FORMAT_COMPONENTS, -1 } },
|
|
||||||
[SECTION_ID_PIXEL_FORMAT_FLAGS] = { SECTION_ID_PIXEL_FORMAT_FLAGS, "flags", 0, { -1 }, .unique_name = "pixel_format_flags" },
|
|
||||||
[SECTION_ID_PIXEL_FORMAT_COMPONENTS] = { SECTION_ID_PIXEL_FORMAT_COMPONENTS, "components", SECTION_FLAG_IS_ARRAY, {SECTION_ID_PIXEL_FORMAT_COMPONENT, -1 }, .unique_name = "pixel_format_components" },
|
|
||||||
[SECTION_ID_PIXEL_FORMAT_COMPONENT] = { SECTION_ID_PIXEL_FORMAT_COMPONENT, "component", 0, { -1 } },
|
|
||||||
[SECTION_ID_PROGRAM_STREAM_DISPOSITION] = { SECTION_ID_PROGRAM_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "program_stream_disposition" },
|
[SECTION_ID_PROGRAM_STREAM_DISPOSITION] = { SECTION_ID_PROGRAM_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "program_stream_disposition" },
|
||||||
[SECTION_ID_PROGRAM_STREAM_TAGS] = { SECTION_ID_PROGRAM_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "program_stream_tags" },
|
[SECTION_ID_PROGRAM_STREAM_TAGS] = { SECTION_ID_PROGRAM_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "program_stream_tags" },
|
||||||
[SECTION_ID_PROGRAM] = { SECTION_ID_PROGRAM, "program", 0, { SECTION_ID_PROGRAM_TAGS, SECTION_ID_PROGRAM_STREAMS, -1 } },
|
[SECTION_ID_PROGRAM] = { SECTION_ID_PROGRAM, "program", 0, { SECTION_ID_PROGRAM_TAGS, SECTION_ID_PROGRAM_STREAMS, -1 } },
|
||||||
@@ -188,8 +175,7 @@ static struct section sections[] = {
|
|||||||
[SECTION_ID_PROGRAMS] = { SECTION_ID_PROGRAMS, "programs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PROGRAM, -1 } },
|
[SECTION_ID_PROGRAMS] = { SECTION_ID_PROGRAMS, "programs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PROGRAM, -1 } },
|
||||||
[SECTION_ID_ROOT] = { SECTION_ID_ROOT, "root", SECTION_FLAG_IS_WRAPPER,
|
[SECTION_ID_ROOT] = { SECTION_ID_ROOT, "root", SECTION_FLAG_IS_WRAPPER,
|
||||||
{ SECTION_ID_CHAPTERS, SECTION_ID_FORMAT, SECTION_ID_FRAMES, SECTION_ID_PROGRAMS, SECTION_ID_STREAMS,
|
{ SECTION_ID_CHAPTERS, SECTION_ID_FORMAT, SECTION_ID_FRAMES, SECTION_ID_PROGRAMS, SECTION_ID_STREAMS,
|
||||||
SECTION_ID_PACKETS, SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS,
|
SECTION_ID_PACKETS, SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS, -1} },
|
||||||
SECTION_ID_PIXEL_FORMATS, -1} },
|
|
||||||
[SECTION_ID_STREAMS] = { SECTION_ID_STREAMS, "streams", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM, -1 } },
|
[SECTION_ID_STREAMS] = { SECTION_ID_STREAMS, "streams", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM, -1 } },
|
||||||
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, -1 } },
|
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, -1 } },
|
||||||
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
|
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
|
||||||
@@ -338,7 +324,7 @@ struct WriterContext {
|
|||||||
unsigned int nb_section_frame; ///< number of the frame section in case we are in "packets_and_frames" section
|
unsigned int nb_section_frame; ///< number of the frame section in case we are in "packets_and_frames" section
|
||||||
unsigned int nb_section_packet_frame; ///< nb_section_packet or nb_section_frame according if is_packets_and_frames
|
unsigned int nb_section_packet_frame; ///< nb_section_packet or nb_section_frame according if is_packets_and_frames
|
||||||
|
|
||||||
int string_validation;
|
StringValidation string_validation;
|
||||||
char *string_validation_replacement;
|
char *string_validation_replacement;
|
||||||
unsigned int string_validation_utf8_flags;
|
unsigned int string_validation_utf8_flags;
|
||||||
};
|
};
|
||||||
@@ -1192,7 +1178,7 @@ static const Writer flat_writer = {
|
|||||||
|
|
||||||
/* INI format output */
|
/* INI format output */
|
||||||
|
|
||||||
typedef struct INIContext {
|
typedef struct {
|
||||||
const AVClass *class;
|
const AVClass *class;
|
||||||
int hierarchical;
|
int hierarchical;
|
||||||
} INIContext;
|
} INIContext;
|
||||||
@@ -1296,7 +1282,7 @@ static const Writer ini_writer = {
|
|||||||
|
|
||||||
/* JSON output */
|
/* JSON output */
|
||||||
|
|
||||||
typedef struct JSONContext {
|
typedef struct {
|
||||||
const AVClass *class;
|
const AVClass *class;
|
||||||
int indent_level;
|
int indent_level;
|
||||||
int compact;
|
int compact;
|
||||||
@@ -1458,7 +1444,7 @@ static const Writer json_writer = {
|
|||||||
|
|
||||||
/* XML output */
|
/* XML output */
|
||||||
|
|
||||||
typedef struct XMLContext {
|
typedef struct {
|
||||||
const AVClass *class;
|
const AVClass *class;
|
||||||
int within_tag;
|
int within_tag;
|
||||||
int indent_level;
|
int indent_level;
|
||||||
@@ -2112,28 +2098,12 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
|||||||
else print_str_opt("pix_fmt", "unknown");
|
else print_str_opt("pix_fmt", "unknown");
|
||||||
print_int("level", dec_ctx->level);
|
print_int("level", dec_ctx->level);
|
||||||
if (dec_ctx->color_range != AVCOL_RANGE_UNSPECIFIED)
|
if (dec_ctx->color_range != AVCOL_RANGE_UNSPECIFIED)
|
||||||
print_str ("color_range", av_color_range_name(dec_ctx->color_range));
|
print_str ("color_range", dec_ctx->color_range == AVCOL_RANGE_MPEG ? "tv": "pc");
|
||||||
else
|
else
|
||||||
print_str_opt("color_range", "N/A");
|
print_str_opt("color_range", "N/A");
|
||||||
s = av_get_colorspace_name(dec_ctx->colorspace);
|
s = av_get_colorspace_name(dec_ctx->colorspace);
|
||||||
if (s) print_str ("color_space", s);
|
if (s) print_str ("color_space", s);
|
||||||
else print_str_opt("color_space", "unknown");
|
else print_str_opt("color_space", "unknown");
|
||||||
|
|
||||||
if (dec_ctx->color_trc != AVCOL_TRC_UNSPECIFIED)
|
|
||||||
print_str("color_transfer", av_color_transfer_name(dec_ctx->color_trc));
|
|
||||||
else
|
|
||||||
print_str_opt("color_transfer", av_color_transfer_name(dec_ctx->color_trc));
|
|
||||||
|
|
||||||
if (dec_ctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
|
|
||||||
print_str("color_primaries", av_color_primaries_name(dec_ctx->color_primaries));
|
|
||||||
else
|
|
||||||
print_str_opt("color_primaries", av_color_primaries_name(dec_ctx->color_primaries));
|
|
||||||
|
|
||||||
if (dec_ctx->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED)
|
|
||||||
print_str("chroma_location", av_chroma_location_name(dec_ctx->chroma_sample_location));
|
|
||||||
else
|
|
||||||
print_str_opt("chroma_location", av_chroma_location_name(dec_ctx->chroma_sample_location));
|
|
||||||
|
|
||||||
if (dec_ctx->timecode_frame_start >= 0) {
|
if (dec_ctx->timecode_frame_start >= 0) {
|
||||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||||
av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
|
av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
|
||||||
@@ -2141,7 +2111,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
|||||||
} else {
|
} else {
|
||||||
print_str_opt("timecode", "N/A");
|
print_str_opt("timecode", "N/A");
|
||||||
}
|
}
|
||||||
print_int("refs", dec_ctx->refs);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case AVMEDIA_TYPE_AUDIO:
|
case AVMEDIA_TYPE_AUDIO:
|
||||||
@@ -2387,20 +2356,12 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
|||||||
AVFormatContext *fmt_ctx = NULL;
|
AVFormatContext *fmt_ctx = NULL;
|
||||||
AVDictionaryEntry *t;
|
AVDictionaryEntry *t;
|
||||||
AVDictionary **opts;
|
AVDictionary **opts;
|
||||||
int scan_all_pmts_set = 0;
|
|
||||||
|
|
||||||
if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
|
|
||||||
av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
|
|
||||||
scan_all_pmts_set = 1;
|
|
||||||
}
|
|
||||||
if ((err = avformat_open_input(&fmt_ctx, filename,
|
if ((err = avformat_open_input(&fmt_ctx, filename,
|
||||||
iformat, &format_opts)) < 0) {
|
iformat, &format_opts)) < 0) {
|
||||||
print_error(filename, err);
|
print_error(filename, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
*fmt_ctx_ptr = fmt_ctx;
|
|
||||||
if (scan_all_pmts_set)
|
|
||||||
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
|
||||||
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
|
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
|
||||||
return AVERROR_OPTION_NOT_FOUND;
|
return AVERROR_OPTION_NOT_FOUND;
|
||||||
@@ -2410,16 +2371,13 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
|||||||
opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
|
opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
|
||||||
orig_nb_streams = fmt_ctx->nb_streams;
|
orig_nb_streams = fmt_ctx->nb_streams;
|
||||||
|
|
||||||
err = avformat_find_stream_info(fmt_ctx, opts);
|
if ((err = avformat_find_stream_info(fmt_ctx, opts)) < 0) {
|
||||||
|
|
||||||
for (i = 0; i < orig_nb_streams; i++)
|
|
||||||
av_dict_free(&opts[i]);
|
|
||||||
av_freep(&opts);
|
|
||||||
|
|
||||||
if (err < 0) {
|
|
||||||
print_error(filename, err);
|
print_error(filename, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
for (i = 0; i < orig_nb_streams; i++)
|
||||||
|
av_dict_free(&opts[i]);
|
||||||
|
av_freep(&opts);
|
||||||
|
|
||||||
av_dump_format(fmt_ctx, 0, filename, 0);
|
av_dump_format(fmt_ctx, 0, filename, 0);
|
||||||
|
|
||||||
@@ -2470,7 +2428,7 @@ static void close_input_file(AVFormatContext **ctx_ptr)
|
|||||||
|
|
||||||
static int probe_file(WriterContext *wctx, const char *filename)
|
static int probe_file(WriterContext *wctx, const char *filename)
|
||||||
{
|
{
|
||||||
AVFormatContext *fmt_ctx = NULL;
|
AVFormatContext *fmt_ctx;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
int section_id;
|
int section_id;
|
||||||
|
|
||||||
@@ -2479,7 +2437,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
|||||||
|
|
||||||
ret = open_input_file(&fmt_ctx, filename);
|
ret = open_input_file(&fmt_ctx, filename);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto end;
|
return ret;
|
||||||
|
|
||||||
#define CHECK_END if (ret < 0) goto end
|
#define CHECK_END if (ret < 0) goto end
|
||||||
|
|
||||||
@@ -2537,8 +2495,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
|||||||
}
|
}
|
||||||
|
|
||||||
end:
|
end:
|
||||||
if (fmt_ctx)
|
close_input_file(&fmt_ctx);
|
||||||
close_input_file(&fmt_ctx);
|
|
||||||
av_freep(&nb_streams_frames);
|
av_freep(&nb_streams_frames);
|
||||||
av_freep(&nb_streams_packets);
|
av_freep(&nb_streams_packets);
|
||||||
av_freep(&selected_streams);
|
av_freep(&selected_streams);
|
||||||
@@ -2562,6 +2519,8 @@ static void ffprobe_show_program_version(WriterContext *w)
|
|||||||
print_str("version", FFMPEG_VERSION);
|
print_str("version", FFMPEG_VERSION);
|
||||||
print_fmt("copyright", "Copyright (c) %d-%d the FFmpeg developers",
|
print_fmt("copyright", "Copyright (c) %d-%d the FFmpeg developers",
|
||||||
program_birth_year, CONFIG_THIS_YEAR);
|
program_birth_year, CONFIG_THIS_YEAR);
|
||||||
|
print_str("build_date", __DATE__);
|
||||||
|
print_str("build_time", __TIME__);
|
||||||
print_str("compiler_ident", CC_IDENT);
|
print_str("compiler_ident", CC_IDENT);
|
||||||
print_str("configuration", FFMPEG_CONFIGURATION);
|
print_str("configuration", FFMPEG_CONFIGURATION);
|
||||||
writer_print_section_footer(w);
|
writer_print_section_footer(w);
|
||||||
@@ -2598,58 +2557,6 @@ static void ffprobe_show_library_versions(WriterContext *w)
|
|||||||
writer_print_section_footer(w);
|
writer_print_section_footer(w);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PRINT_PIX_FMT_FLAG(flagname, name) \
|
|
||||||
do { \
|
|
||||||
print_int(name, !!(pixdesc->flags & AV_PIX_FMT_FLAG_##flagname)); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
static void ffprobe_show_pixel_formats(WriterContext *w)
|
|
||||||
{
|
|
||||||
const AVPixFmtDescriptor *pixdesc = NULL;
|
|
||||||
int i, n;
|
|
||||||
|
|
||||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMATS);
|
|
||||||
while (pixdesc = av_pix_fmt_desc_next(pixdesc)) {
|
|
||||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT);
|
|
||||||
print_str("name", pixdesc->name);
|
|
||||||
print_int("nb_components", pixdesc->nb_components);
|
|
||||||
if ((pixdesc->nb_components >= 3) && !(pixdesc->flags & AV_PIX_FMT_FLAG_RGB)) {
|
|
||||||
print_int ("log2_chroma_w", pixdesc->log2_chroma_w);
|
|
||||||
print_int ("log2_chroma_h", pixdesc->log2_chroma_h);
|
|
||||||
} else {
|
|
||||||
print_str_opt("log2_chroma_w", "N/A");
|
|
||||||
print_str_opt("log2_chroma_h", "N/A");
|
|
||||||
}
|
|
||||||
n = av_get_bits_per_pixel(pixdesc);
|
|
||||||
if (n) print_int ("bits_per_pixel", n);
|
|
||||||
else print_str_opt("bits_per_pixel", "N/A");
|
|
||||||
if (do_show_pixel_format_flags) {
|
|
||||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_FLAGS);
|
|
||||||
PRINT_PIX_FMT_FLAG(BE, "big_endian");
|
|
||||||
PRINT_PIX_FMT_FLAG(PAL, "palette");
|
|
||||||
PRINT_PIX_FMT_FLAG(BITSTREAM, "bitstream");
|
|
||||||
PRINT_PIX_FMT_FLAG(HWACCEL, "hwaccel");
|
|
||||||
PRINT_PIX_FMT_FLAG(PLANAR, "planar");
|
|
||||||
PRINT_PIX_FMT_FLAG(RGB, "rgb");
|
|
||||||
PRINT_PIX_FMT_FLAG(PSEUDOPAL, "pseudopal");
|
|
||||||
PRINT_PIX_FMT_FLAG(ALPHA, "alpha");
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
}
|
|
||||||
if (do_show_pixel_format_components && (pixdesc->nb_components > 0)) {
|
|
||||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_COMPONENTS);
|
|
||||||
for (i = 0; i < pixdesc->nb_components; i++) {
|
|
||||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_COMPONENT);
|
|
||||||
print_int("index", i + 1);
|
|
||||||
print_int("bit_depth", pixdesc->comp[i].depth_minus1 + 1);
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
}
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
}
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
}
|
|
||||||
writer_print_section_footer(w);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int opt_format(void *optctx, const char *opt, const char *arg)
|
static int opt_format(void *optctx, const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
iformat = av_find_input_format(arg);
|
iformat = av_find_input_format(arg);
|
||||||
@@ -2983,7 +2890,6 @@ DEFINE_OPT_SHOW_SECTION(format, FORMAT);
|
|||||||
DEFINE_OPT_SHOW_SECTION(frames, FRAMES);
|
DEFINE_OPT_SHOW_SECTION(frames, FRAMES);
|
||||||
DEFINE_OPT_SHOW_SECTION(library_versions, LIBRARY_VERSIONS);
|
DEFINE_OPT_SHOW_SECTION(library_versions, LIBRARY_VERSIONS);
|
||||||
DEFINE_OPT_SHOW_SECTION(packets, PACKETS);
|
DEFINE_OPT_SHOW_SECTION(packets, PACKETS);
|
||||||
DEFINE_OPT_SHOW_SECTION(pixel_formats, PIXEL_FORMATS);
|
|
||||||
DEFINE_OPT_SHOW_SECTION(program_version, PROGRAM_VERSION);
|
DEFINE_OPT_SHOW_SECTION(program_version, PROGRAM_VERSION);
|
||||||
DEFINE_OPT_SHOW_SECTION(streams, STREAMS);
|
DEFINE_OPT_SHOW_SECTION(streams, STREAMS);
|
||||||
DEFINE_OPT_SHOW_SECTION(programs, PROGRAMS);
|
DEFINE_OPT_SHOW_SECTION(programs, PROGRAMS);
|
||||||
@@ -3022,7 +2928,6 @@ static const OptionDef real_options[] = {
|
|||||||
{ "show_program_version", 0, {(void*)&opt_show_program_version}, "show ffprobe version" },
|
{ "show_program_version", 0, {(void*)&opt_show_program_version}, "show ffprobe version" },
|
||||||
{ "show_library_versions", 0, {(void*)&opt_show_library_versions}, "show library versions" },
|
{ "show_library_versions", 0, {(void*)&opt_show_library_versions}, "show library versions" },
|
||||||
{ "show_versions", 0, {(void*)&opt_show_versions}, "show program and library versions" },
|
{ "show_versions", 0, {(void*)&opt_show_versions}, "show program and library versions" },
|
||||||
{ "show_pixel_formats", 0, {(void*)&opt_show_pixel_formats}, "show pixel format descriptions" },
|
|
||||||
{ "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" },
|
{ "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" },
|
||||||
{ "private", OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" },
|
{ "private", OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" },
|
||||||
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },
|
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },
|
||||||
@@ -3079,9 +2984,6 @@ int main(int argc, char **argv)
|
|||||||
SET_DO_SHOW(FRAMES, frames);
|
SET_DO_SHOW(FRAMES, frames);
|
||||||
SET_DO_SHOW(LIBRARY_VERSIONS, library_versions);
|
SET_DO_SHOW(LIBRARY_VERSIONS, library_versions);
|
||||||
SET_DO_SHOW(PACKETS, packets);
|
SET_DO_SHOW(PACKETS, packets);
|
||||||
SET_DO_SHOW(PIXEL_FORMATS, pixel_formats);
|
|
||||||
SET_DO_SHOW(PIXEL_FORMAT_FLAGS, pixel_format_flags);
|
|
||||||
SET_DO_SHOW(PIXEL_FORMAT_COMPONENTS, pixel_format_components);
|
|
||||||
SET_DO_SHOW(PROGRAM_VERSION, program_version);
|
SET_DO_SHOW(PROGRAM_VERSION, program_version);
|
||||||
SET_DO_SHOW(PROGRAMS, programs);
|
SET_DO_SHOW(PROGRAMS, programs);
|
||||||
SET_DO_SHOW(STREAMS, streams);
|
SET_DO_SHOW(STREAMS, streams);
|
||||||
@@ -3146,12 +3048,10 @@ int main(int argc, char **argv)
|
|||||||
ffprobe_show_program_version(wctx);
|
ffprobe_show_program_version(wctx);
|
||||||
if (do_show_library_versions)
|
if (do_show_library_versions)
|
||||||
ffprobe_show_library_versions(wctx);
|
ffprobe_show_library_versions(wctx);
|
||||||
if (do_show_pixel_formats)
|
|
||||||
ffprobe_show_pixel_formats(wctx);
|
|
||||||
|
|
||||||
if (!input_filename &&
|
if (!input_filename &&
|
||||||
((do_show_format || do_show_programs || do_show_streams || do_show_chapters || do_show_packets || do_show_error) ||
|
((do_show_format || do_show_programs || do_show_streams || do_show_chapters || do_show_packets || do_show_error) ||
|
||||||
(!do_show_program_version && !do_show_library_versions && !do_show_pixel_formats))) {
|
(!do_show_program_version && !do_show_library_versions))) {
|
||||||
show_usage();
|
show_usage();
|
||||||
av_log(NULL, AV_LOG_ERROR, "You have to specify one input file.\n");
|
av_log(NULL, AV_LOG_ERROR, "You have to specify one input file.\n");
|
||||||
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
|
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
|
||||||
|
2005
ffserver.c
2005
ffserver.c
File diff suppressed because it is too large
Load Diff
1313
ffserver_config.c
1313
ffserver_config.c
File diff suppressed because it is too large
Load Diff
@@ -1,133 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
|
|
||||||
*
|
|
||||||
* This file is part of FFmpeg.
|
|
||||||
*
|
|
||||||
* FFmpeg is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU Lesser General Public
|
|
||||||
* License as published by the Free Software Foundation; either
|
|
||||||
* version 2.1 of the License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
* FFmpeg is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
||||||
* Lesser General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU Lesser General Public
|
|
||||||
* License along with FFmpeg; if not, write to the Free Software
|
|
||||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef FFSERVER_CONFIG_H
|
|
||||||
#define FFSERVER_CONFIG_H
|
|
||||||
|
|
||||||
#include "libavutil/dict.h"
|
|
||||||
#include "libavformat/avformat.h"
|
|
||||||
#include "libavformat/network.h"
|
|
||||||
|
|
||||||
#define FFSERVER_MAX_STREAMS 20
|
|
||||||
|
|
||||||
/* each generated stream is described here */
|
|
||||||
enum FFServerStreamType {
|
|
||||||
STREAM_TYPE_LIVE,
|
|
||||||
STREAM_TYPE_STATUS,
|
|
||||||
STREAM_TYPE_REDIRECT,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum FFServerIPAddressAction {
|
|
||||||
IP_ALLOW = 1,
|
|
||||||
IP_DENY,
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef struct FFServerIPAddressACL {
|
|
||||||
struct FFServerIPAddressACL *next;
|
|
||||||
enum FFServerIPAddressAction action;
|
|
||||||
/* These are in host order */
|
|
||||||
struct in_addr first;
|
|
||||||
struct in_addr last;
|
|
||||||
} FFServerIPAddressACL;
|
|
||||||
|
|
||||||
/* description of each stream of the ffserver.conf file */
|
|
||||||
typedef struct FFServerStream {
|
|
||||||
enum FFServerStreamType stream_type;
|
|
||||||
char filename[1024]; /* stream filename */
|
|
||||||
struct FFServerStream *feed; /* feed we are using (can be null if coming from file) */
|
|
||||||
AVDictionary *in_opts; /* input parameters */
|
|
||||||
AVDictionary *metadata; /* metadata to set on the stream */
|
|
||||||
AVInputFormat *ifmt; /* if non NULL, force input format */
|
|
||||||
AVOutputFormat *fmt;
|
|
||||||
FFServerIPAddressACL *acl;
|
|
||||||
char dynamic_acl[1024];
|
|
||||||
int nb_streams;
|
|
||||||
int prebuffer; /* Number of milliseconds early to start */
|
|
||||||
int64_t max_time; /* Number of milliseconds to run */
|
|
||||||
int send_on_key;
|
|
||||||
AVStream *streams[FFSERVER_MAX_STREAMS];
|
|
||||||
int feed_streams[FFSERVER_MAX_STREAMS]; /* index of streams in the feed */
|
|
||||||
char feed_filename[1024]; /* file name of the feed storage, or
|
|
||||||
input file name for a stream */
|
|
||||||
pid_t pid; /* Of ffmpeg process */
|
|
||||||
time_t pid_start; /* Of ffmpeg process */
|
|
||||||
char **child_argv;
|
|
||||||
struct FFServerStream *next;
|
|
||||||
unsigned bandwidth; /* bandwidth, in kbits/s */
|
|
||||||
/* RTSP options */
|
|
||||||
char *rtsp_option;
|
|
||||||
/* multicast specific */
|
|
||||||
int is_multicast;
|
|
||||||
struct in_addr multicast_ip;
|
|
||||||
int multicast_port; /* first port used for multicast */
|
|
||||||
int multicast_ttl;
|
|
||||||
int loop; /* if true, send the stream in loops (only meaningful if file) */
|
|
||||||
|
|
||||||
/* feed specific */
|
|
||||||
int feed_opened; /* true if someone is writing to the feed */
|
|
||||||
int is_feed; /* true if it is a feed */
|
|
||||||
int readonly; /* True if writing is prohibited to the file */
|
|
||||||
int truncate; /* True if feeder connection truncate the feed file */
|
|
||||||
int conns_served;
|
|
||||||
int64_t bytes_served;
|
|
||||||
int64_t feed_max_size; /* maximum storage size, zero means unlimited */
|
|
||||||
int64_t feed_write_index; /* current write position in feed (it wraps around) */
|
|
||||||
int64_t feed_size; /* current size of feed */
|
|
||||||
struct FFServerStream *next_feed;
|
|
||||||
} FFServerStream;
|
|
||||||
|
|
||||||
typedef struct FFServerConfig {
|
|
||||||
char *filename;
|
|
||||||
FFServerStream *first_feed; /* contains only feeds */
|
|
||||||
FFServerStream *first_stream; /* contains all streams, including feeds */
|
|
||||||
unsigned int nb_max_http_connections;
|
|
||||||
unsigned int nb_max_connections;
|
|
||||||
uint64_t max_bandwidth;
|
|
||||||
int debug;
|
|
||||||
char logfilename[1024];
|
|
||||||
struct sockaddr_in http_addr;
|
|
||||||
struct sockaddr_in rtsp_addr;
|
|
||||||
int errors;
|
|
||||||
int warnings;
|
|
||||||
int use_defaults;
|
|
||||||
// Following variables MUST NOT be used outside configuration parsing code.
|
|
||||||
enum AVCodecID guessed_audio_codec_id;
|
|
||||||
enum AVCodecID guessed_video_codec_id;
|
|
||||||
AVDictionary *video_opts; /* AVOptions for video encoder */
|
|
||||||
AVDictionary *audio_opts; /* AVOptions for audio encoder */
|
|
||||||
AVCodecContext *dummy_actx; /* Used internally to test audio AVOptions. */
|
|
||||||
AVCodecContext *dummy_vctx; /* Used internally to test video AVOptions. */
|
|
||||||
int no_audio;
|
|
||||||
int no_video;
|
|
||||||
int line_num;
|
|
||||||
int stream_use_defaults;
|
|
||||||
} FFServerConfig;
|
|
||||||
|
|
||||||
void ffserver_get_arg(char *buf, int buf_size, const char **pp);
|
|
||||||
|
|
||||||
void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
|
|
||||||
FFServerIPAddressACL *ext_acl,
|
|
||||||
const char *p, const char *filename, int line_num);
|
|
||||||
|
|
||||||
int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config);
|
|
||||||
|
|
||||||
void ffserver_free_child_args(void *argsp);
|
|
||||||
|
|
||||||
#endif /* FFSERVER_CONFIG_H */
|
|
@@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
|||||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
int *got_frame, AVPacket *avpkt)
|
int *got_frame, AVPacket *avpkt)
|
||||||
{
|
{
|
||||||
int line, ret;
|
int line = 0, ret;
|
||||||
const int width = avctx->width;
|
const int width = avctx->width;
|
||||||
AVFrame *pic = data;
|
AVFrame *pic = data;
|
||||||
uint16_t *y, *u, *v;
|
uint16_t *y, *u, *v;
|
||||||
const uint8_t *line_end, *src = avpkt->data;
|
const uint8_t *line_end, *src = avpkt->data;
|
||||||
int stride = avctx->width * 8 / 3;
|
int stride = avctx->width * 8 / 3;
|
||||||
|
|
||||||
if (width <= 1 || avctx->height <= 0) {
|
if (width == 1) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,45 +67,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||||
pic->key_frame = 1;
|
pic->key_frame = 1;
|
||||||
|
|
||||||
|
y = (uint16_t *)pic->data[0];
|
||||||
|
u = (uint16_t *)pic->data[1];
|
||||||
|
v = (uint16_t *)pic->data[2];
|
||||||
line_end = avpkt->data + stride;
|
line_end = avpkt->data + stride;
|
||||||
for (line = 0; line < avctx->height; line++) {
|
|
||||||
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
|
||||||
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
|
||||||
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
|
||||||
int x;
|
|
||||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
|
|
||||||
for (x = 0; x < width; x += 6) {
|
while (line++ < avctx->height) {
|
||||||
uint32_t t;
|
while (1) {
|
||||||
|
uint32_t t = AV_RL32(src);
|
||||||
if (width - x < 6 || line_end - src < 16) {
|
|
||||||
y = y_temp;
|
|
||||||
u = u_temp;
|
|
||||||
v = v_temp;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (line_end - src < 4)
|
|
||||||
break;
|
|
||||||
|
|
||||||
t = AV_RL32(src);
|
|
||||||
src += 4;
|
src += 4;
|
||||||
*u++ = t << 6 & 0xFFC0;
|
*u++ = t << 6 & 0xFFC0;
|
||||||
*y++ = t >> 4 & 0xFFC0;
|
*y++ = t >> 4 & 0xFFC0;
|
||||||
*v++ = t >> 14 & 0xFFC0;
|
*v++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (line_end - src < 4)
|
if (src >= line_end - 1) {
|
||||||
|
*y = 0x80;
|
||||||
|
src++;
|
||||||
|
line_end += stride;
|
||||||
|
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
*y++ = t << 6 & 0xFFC0;
|
*y++ = t << 6 & 0xFFC0;
|
||||||
*u++ = t >> 4 & 0xFFC0;
|
*u++ = t >> 4 & 0xFFC0;
|
||||||
*y++ = t >> 14 & 0xFFC0;
|
*y++ = t >> 14 & 0xFFC0;
|
||||||
|
if (src >= line_end - 2) {
|
||||||
if (line_end - src < 4)
|
if (!(width & 1)) {
|
||||||
|
*y = 0x80;
|
||||||
|
src += 2;
|
||||||
|
}
|
||||||
|
line_end += stride;
|
||||||
|
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
@@ -113,8 +113,15 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*y++ = t >> 4 & 0xFFC0;
|
*y++ = t >> 4 & 0xFFC0;
|
||||||
*u++ = t >> 14 & 0xFFC0;
|
*u++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (line_end - src < 4)
|
if (src >= line_end - 1) {
|
||||||
|
*y = 0x80;
|
||||||
|
src++;
|
||||||
|
line_end += stride;
|
||||||
|
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
t = AV_RL32(src);
|
t = AV_RL32(src);
|
||||||
src += 4;
|
src += 4;
|
||||||
@@ -122,21 +129,18 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
*v++ = t >> 4 & 0xFFC0;
|
*v++ = t >> 4 & 0xFFC0;
|
||||||
*y++ = t >> 14 & 0xFFC0;
|
*y++ = t >> 14 & 0xFFC0;
|
||||||
|
|
||||||
if (width - x < 6)
|
if (src >= line_end - 2) {
|
||||||
|
if (width & 1) {
|
||||||
|
*y = 0x80;
|
||||||
|
src += 2;
|
||||||
|
}
|
||||||
|
line_end += stride;
|
||||||
|
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||||
|
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||||
|
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (x < width) {
|
|
||||||
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
|
||||||
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
|
||||||
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
|
||||||
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
|
||||||
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
|
||||||
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
line_end += stride;
|
|
||||||
src = line_end - stride;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*got_frame = 1;
|
*got_frame = 1;
|
||||||
|
@@ -339,25 +339,21 @@ static inline void mcdc(uint16_t *dst, const uint16_t *src, int log2w,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int decode_p_block(FourXContext *f, uint16_t *dst, const uint16_t *src,
|
static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
|
||||||
int log2w, int log2h, int stride)
|
int log2w, int log2h, int stride)
|
||||||
{
|
{
|
||||||
int index, h, code, ret, scale = 1;
|
const int index = size2index[log2h][log2w];
|
||||||
uint16_t *start, *end;
|
const int h = 1 << log2h;
|
||||||
|
int code = get_vlc2(&f->gb,
|
||||||
|
block_type_vlc[1 - (f->version > 1)][index].table,
|
||||||
|
BLOCK_TYPE_VLC_BITS, 1);
|
||||||
|
uint16_t *start = f->last_frame_buffer;
|
||||||
|
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
|
||||||
|
int ret;
|
||||||
|
int scale = 1;
|
||||||
unsigned dc = 0;
|
unsigned dc = 0;
|
||||||
|
|
||||||
av_assert0(log2w >= 0 && log2h >= 0);
|
av_assert0(code >= 0 && code <= 6 && log2w >= 0);
|
||||||
|
|
||||||
index = size2index[log2h][log2w];
|
|
||||||
av_assert0(index >= 0);
|
|
||||||
|
|
||||||
h = 1 << log2h;
|
|
||||||
code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table,
|
|
||||||
BLOCK_TYPE_VLC_BITS, 1);
|
|
||||||
av_assert0(code >= 0 && code <= 6);
|
|
||||||
|
|
||||||
start = f->last_frame_buffer;
|
|
||||||
end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
|
|
||||||
|
|
||||||
if (code == 1) {
|
if (code == 1) {
|
||||||
log2h--;
|
log2h--;
|
||||||
|
@@ -7,12 +7,10 @@ HEADERS = avcodec.h \
|
|||||||
dv_profile.h \
|
dv_profile.h \
|
||||||
dxva2.h \
|
dxva2.h \
|
||||||
old_codec_ids.h \
|
old_codec_ids.h \
|
||||||
qsv.h \
|
|
||||||
vaapi.h \
|
vaapi.h \
|
||||||
vda.h \
|
vda.h \
|
||||||
vdpau.h \
|
vdpau.h \
|
||||||
version.h \
|
version.h \
|
||||||
vorbis_parser.h \
|
|
||||||
xvmc.h \
|
xvmc.h \
|
||||||
|
|
||||||
OBJS = allcodecs.o \
|
OBJS = allcodecs.o \
|
||||||
@@ -24,17 +22,15 @@ OBJS = allcodecs.o \
|
|||||||
bitstream_filter.o \
|
bitstream_filter.o \
|
||||||
codec_desc.o \
|
codec_desc.o \
|
||||||
dv_profile.o \
|
dv_profile.o \
|
||||||
|
fmtconvert.o \
|
||||||
imgconvert.o \
|
imgconvert.o \
|
||||||
mathtables.o \
|
mathtables.o \
|
||||||
options.o \
|
options.o \
|
||||||
parser.o \
|
parser.o \
|
||||||
qsv_api.o \
|
|
||||||
raw.o \
|
raw.o \
|
||||||
resample.o \
|
resample.o \
|
||||||
resample2.o \
|
resample2.o \
|
||||||
utils.o \
|
utils.o \
|
||||||
vorbis_parser.o \
|
|
||||||
xiph.o \
|
|
||||||
|
|
||||||
# subsystems
|
# subsystems
|
||||||
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
|
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
|
||||||
@@ -55,7 +51,6 @@ FFT-OBJS-$(CONFIG_HARDCODED_TABLES) += cos_tables.o cos_fixed_tables.o
|
|||||||
OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
|
OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
|
||||||
fft_fixed_32.o fft_init_table.o \
|
fft_fixed_32.o fft_init_table.o \
|
||||||
$(FFT-OBJS-yes)
|
$(FFT-OBJS-yes)
|
||||||
OBJS-$(CONFIG_FMTCONVERT) += fmtconvert.o
|
|
||||||
OBJS-$(CONFIG_GOLOMB) += golomb.o
|
OBJS-$(CONFIG_GOLOMB) += golomb.o
|
||||||
OBJS-$(CONFIG_H263DSP) += h263dsp.o
|
OBJS-$(CONFIG_H263DSP) += h263dsp.o
|
||||||
OBJS-$(CONFIG_H264CHROMA) += h264chroma.o
|
OBJS-$(CONFIG_H264CHROMA) += h264chroma.o
|
||||||
@@ -68,7 +63,6 @@ OBJS-$(CONFIG_HUFFYUVDSP) += huffyuvdsp.o
|
|||||||
OBJS-$(CONFIG_HUFFYUVENCDSP) += huffyuvencdsp.o
|
OBJS-$(CONFIG_HUFFYUVENCDSP) += huffyuvencdsp.o
|
||||||
OBJS-$(CONFIG_IDCTDSP) += idctdsp.o simple_idct.o jrevdct.o
|
OBJS-$(CONFIG_IDCTDSP) += idctdsp.o simple_idct.o jrevdct.o
|
||||||
OBJS-$(CONFIG_IIRFILTER) += iirfilter.o
|
OBJS-$(CONFIG_IIRFILTER) += iirfilter.o
|
||||||
OBJS-$(CONFIG_IMDCT15) += imdct15.o
|
|
||||||
OBJS-$(CONFIG_INTRAX8) += intrax8.o intrax8dsp.o
|
OBJS-$(CONFIG_INTRAX8) += intrax8.o intrax8dsp.o
|
||||||
OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
|
OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
|
||||||
OBJS-$(CONFIG_LLAUDDSP) += lossless_audiodsp.o
|
OBJS-$(CONFIG_LLAUDDSP) += lossless_audiodsp.o
|
||||||
@@ -76,7 +70,7 @@ OBJS-$(CONFIG_LLVIDDSP) += lossless_videodsp.o
|
|||||||
OBJS-$(CONFIG_LPC) += lpc.o
|
OBJS-$(CONFIG_LPC) += lpc.o
|
||||||
OBJS-$(CONFIG_LSP) += lsp.o
|
OBJS-$(CONFIG_LSP) += lsp.o
|
||||||
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o mdct_fixed_32.o
|
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o mdct_fixed_32.o
|
||||||
OBJS-$(CONFIG_ME_CMP) += me_cmp.o
|
OBJS-$(CONFIG_ME_CMP) += me_cmp.o dsputil_compat.o
|
||||||
OBJS-$(CONFIG_MPEG_ER) += mpeg_er.o
|
OBJS-$(CONFIG_MPEG_ER) += mpeg_er.o
|
||||||
OBJS-$(CONFIG_MPEGAUDIO) += mpegaudio.o mpegaudiodata.o \
|
OBJS-$(CONFIG_MPEGAUDIO) += mpegaudio.o mpegaudiodata.o \
|
||||||
mpegaudiodecheader.o
|
mpegaudiodecheader.o
|
||||||
@@ -91,7 +85,6 @@ OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
|
|||||||
mpegvideoencdsp.o
|
mpegvideoencdsp.o
|
||||||
OBJS-$(CONFIG_PIXBLOCKDSP) += pixblockdsp.o
|
OBJS-$(CONFIG_PIXBLOCKDSP) += pixblockdsp.o
|
||||||
OBJS-$(CONFIG_QPELDSP) += qpeldsp.o
|
OBJS-$(CONFIG_QPELDSP) += qpeldsp.o
|
||||||
OBJS-$(CONFIG_QSV) += qsv.o
|
|
||||||
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
|
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
|
||||||
RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
|
RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
|
||||||
OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
|
OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
|
||||||
@@ -140,7 +133,6 @@ OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpeg.o mjpegenc_common.o \
|
|||||||
OBJS-$(CONFIG_ANM_DECODER) += anm.o
|
OBJS-$(CONFIG_ANM_DECODER) += anm.o
|
||||||
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
|
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
|
||||||
OBJS-$(CONFIG_APE_DECODER) += apedec.o
|
OBJS-$(CONFIG_APE_DECODER) += apedec.o
|
||||||
OBJS-$(CONFIG_APNG_DECODER) += png.o pngdec.o pngdsp.o
|
|
||||||
OBJS-$(CONFIG_SSA_DECODER) += assdec.o ass.o ass_split.o
|
OBJS-$(CONFIG_SSA_DECODER) += assdec.o ass.o ass_split.o
|
||||||
OBJS-$(CONFIG_SSA_ENCODER) += assenc.o ass.o
|
OBJS-$(CONFIG_SSA_ENCODER) += assenc.o ass.o
|
||||||
OBJS-$(CONFIG_ASS_DECODER) += assdec.o ass.o ass_split.o
|
OBJS-$(CONFIG_ASS_DECODER) += assdec.o ass.o ass_split.o
|
||||||
@@ -177,7 +169,6 @@ OBJS-$(CONFIG_BRENDER_PIX_DECODER) += brenderpix.o
|
|||||||
OBJS-$(CONFIG_C93_DECODER) += c93.o
|
OBJS-$(CONFIG_C93_DECODER) += c93.o
|
||||||
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
|
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
|
||||||
cavsdata.o mpeg12data.o
|
cavsdata.o mpeg12data.o
|
||||||
OBJS-$(CONFIG_CCAPTION_DECODER) += ccaption_dec.o
|
|
||||||
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
|
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
|
||||||
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
|
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
|
||||||
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
|
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
|
||||||
@@ -192,9 +183,8 @@ OBJS-$(CONFIG_CPIA_DECODER) += cpia.o
|
|||||||
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
|
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
|
||||||
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
|
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
|
||||||
OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadsp.o \
|
OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadsp.o \
|
||||||
dcadata.o dca_exss.o \
|
|
||||||
synth_filter.o
|
synth_filter.o
|
||||||
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dca.o dcadata.o
|
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dca.o
|
||||||
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
|
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
|
||||||
dirac_arith.o mpeg12data.o dirac_dwt.o
|
dirac_arith.o mpeg12data.o dirac_dwt.o
|
||||||
OBJS-$(CONFIG_DFA_DECODER) += dfa.o
|
OBJS-$(CONFIG_DFA_DECODER) += dfa.o
|
||||||
@@ -208,7 +198,6 @@ OBJS-$(CONFIG_DSD_LSBF_PLANAR_DECODER) += dsddec.o
|
|||||||
OBJS-$(CONFIG_DSD_MSBF_PLANAR_DECODER) += dsddec.o
|
OBJS-$(CONFIG_DSD_MSBF_PLANAR_DECODER) += dsddec.o
|
||||||
OBJS-$(CONFIG_DSICINAUDIO_DECODER) += dsicinaudio.o
|
OBJS-$(CONFIG_DSICINAUDIO_DECODER) += dsicinaudio.o
|
||||||
OBJS-$(CONFIG_DSICINVIDEO_DECODER) += dsicinvideo.o
|
OBJS-$(CONFIG_DSICINVIDEO_DECODER) += dsicinvideo.o
|
||||||
OBJS-$(CONFIG_DSS_SP_DECODER) += dss_sp.o
|
|
||||||
OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
|
OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
|
||||||
OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
|
OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
|
||||||
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
|
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
|
||||||
@@ -217,7 +206,7 @@ OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
|
|||||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
||||||
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
||||||
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
||||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3_data.o
|
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
|
||||||
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
|
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
|
||||||
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
|
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
|
||||||
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
||||||
@@ -267,12 +256,10 @@ OBJS-$(CONFIG_H264_DECODER) += h264.o h264_cabac.o h264_cavlc.o \
|
|||||||
h264_mb.o h264_picture.o h264_ps.o \
|
h264_mb.o h264_picture.o h264_ps.o \
|
||||||
h264_refs.o h264_sei.o h264_slice.o
|
h264_refs.o h264_sei.o h264_slice.o
|
||||||
OBJS-$(CONFIG_H264_VDA_DECODER) += vda_h264_dec.o
|
OBJS-$(CONFIG_H264_VDA_DECODER) += vda_h264_dec.o
|
||||||
OBJS-$(CONFIG_H264_QSV_DECODER) += qsv_h264.o
|
|
||||||
OBJS-$(CONFIG_HEVC_DECODER) += hevc.o hevc_mvs.o hevc_ps.o hevc_sei.o \
|
OBJS-$(CONFIG_HEVC_DECODER) += hevc.o hevc_mvs.o hevc_ps.o hevc_sei.o \
|
||||||
hevc_cabac.o hevc_refs.o hevcpred.o \
|
hevc_cabac.o hevc_refs.o hevcpred.o \
|
||||||
hevcdsp.o hevc_filter.o
|
hevcdsp.o hevc_filter.o
|
||||||
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
|
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
|
||||||
OBJS-$(CONFIG_HQX_DECODER) += hqx.o hqxvlc.o
|
|
||||||
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o huffyuvdec.o
|
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o huffyuvdec.o
|
||||||
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o huffyuvenc.o
|
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o huffyuvenc.o
|
||||||
OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
|
OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
|
||||||
@@ -289,7 +276,7 @@ OBJS-$(CONFIG_INTERPLAY_VIDEO_DECODER) += interplayvideo.o
|
|||||||
OBJS-$(CONFIG_JACOSUB_DECODER) += jacosubdec.o ass.o
|
OBJS-$(CONFIG_JACOSUB_DECODER) += jacosubdec.o ass.o
|
||||||
OBJS-$(CONFIG_JPEG2000_ENCODER) += j2kenc.o mqcenc.o mqc.o jpeg2000.o \
|
OBJS-$(CONFIG_JPEG2000_ENCODER) += j2kenc.o mqcenc.o mqc.o jpeg2000.o \
|
||||||
jpeg2000dwt.o
|
jpeg2000dwt.o
|
||||||
OBJS-$(CONFIG_JPEG2000_DECODER) += jpeg2000dec.o jpeg2000.o jpeg2000dsp.o \
|
OBJS-$(CONFIG_JPEG2000_DECODER) += jpeg2000dec.o jpeg2000.o \
|
||||||
jpeg2000dwt.o mqcdec.o mqc.o
|
jpeg2000dwt.o mqcdec.o mqc.o
|
||||||
OBJS-$(CONFIG_JPEGLS_DECODER) += jpeglsdec.o jpegls.o
|
OBJS-$(CONFIG_JPEGLS_DECODER) += jpeglsdec.o jpegls.o
|
||||||
OBJS-$(CONFIG_JPEGLS_ENCODER) += jpeglsenc.o jpegls.o
|
OBJS-$(CONFIG_JPEGLS_ENCODER) += jpeglsenc.o jpegls.o
|
||||||
@@ -356,10 +343,10 @@ OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o
|
|||||||
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
|
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
|
||||||
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
||||||
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
||||||
OBJS-$(CONFIG_NVENC_ENCODER) += nvenc.o
|
|
||||||
OBJS-$(CONFIG_ON2AVC_DECODER) += on2avc.o on2avcdata.o
|
OBJS-$(CONFIG_ON2AVC_DECODER) += on2avc.o on2avcdata.o
|
||||||
OBJS-$(CONFIG_OPUS_DECODER) += opusdec.o opus.o opus_celt.o \
|
OBJS-$(CONFIG_OPUS_DECODER) += opusdec.o opus.o opus_celt.o \
|
||||||
opus_silk.o vorbis_data.o
|
opus_imdct.o opus_silk.o \
|
||||||
|
vorbis_data.o
|
||||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += pafaudio.o
|
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += pafaudio.o
|
||||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += pafvideo.o
|
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += pafvideo.o
|
||||||
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
|
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
|
||||||
@@ -442,7 +429,6 @@ OBJS-$(CONFIG_SONIC_LS_ENCODER) += sonic.o
|
|||||||
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o
|
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o
|
||||||
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
|
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
|
||||||
OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
|
OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
|
||||||
OBJS-$(CONFIG_STL_DECODER) += textdec.o ass.o
|
|
||||||
OBJS-$(CONFIG_SUBRIP_DECODER) += srtdec.o ass.o
|
OBJS-$(CONFIG_SUBRIP_DECODER) += srtdec.o ass.o
|
||||||
OBJS-$(CONFIG_SUBRIP_ENCODER) += srtenc.o ass_split.o
|
OBJS-$(CONFIG_SUBRIP_ENCODER) += srtenc.o ass_split.o
|
||||||
OBJS-$(CONFIG_SUBVIEWER1_DECODER) += textdec.o ass.o
|
OBJS-$(CONFIG_SUBVIEWER1_DECODER) += textdec.o ass.o
|
||||||
@@ -458,6 +444,7 @@ OBJS-$(CONFIG_TAK_DECODER) += takdec.o tak.o
|
|||||||
OBJS-$(CONFIG_TARGA_DECODER) += targa.o
|
OBJS-$(CONFIG_TARGA_DECODER) += targa.o
|
||||||
OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
|
OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
|
||||||
OBJS-$(CONFIG_TARGA_Y216_DECODER) += targa_y216dec.o
|
OBJS-$(CONFIG_TARGA_Y216_DECODER) += targa_y216dec.o
|
||||||
|
OBJS-$(CONFIG_THEORA_DECODER) += xiph.o
|
||||||
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
|
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
|
||||||
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o tiff_common.o
|
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o tiff_common.o
|
||||||
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o tiff_data.o
|
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o tiff_data.o
|
||||||
@@ -486,9 +473,7 @@ OBJS-$(CONFIG_V410_ENCODER) += v410enc.o
|
|||||||
OBJS-$(CONFIG_V210X_DECODER) += v210x.o
|
OBJS-$(CONFIG_V210X_DECODER) += v210x.o
|
||||||
OBJS-$(CONFIG_VB_DECODER) += vb.o
|
OBJS-$(CONFIG_VB_DECODER) += vb.o
|
||||||
OBJS-$(CONFIG_VBLE_DECODER) += vble.o
|
OBJS-$(CONFIG_VBLE_DECODER) += vble.o
|
||||||
OBJS-$(CONFIG_VC1_DECODER) += vc1dec.o vc1_block.o vc1_loopfilter.o \
|
OBJS-$(CONFIG_VC1_DECODER) += vc1dec.o vc1.o vc1data.o vc1dsp.o \
|
||||||
vc1_mc.o vc1_pred.o vc1.o vc1data.o \
|
|
||||||
vc1dsp.o \
|
|
||||||
msmpeg4dec.o msmpeg4.o msmpeg4data.o \
|
msmpeg4dec.o msmpeg4.o msmpeg4data.o \
|
||||||
wmv2dsp.o
|
wmv2dsp.o
|
||||||
OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
|
OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
|
||||||
@@ -496,7 +481,7 @@ OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdaudio.o
|
|||||||
OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdvideo.o
|
OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdvideo.o
|
||||||
OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o
|
OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o
|
||||||
OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbisdsp.o vorbis.o \
|
OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbisdsp.o vorbis.o \
|
||||||
vorbis_data.o
|
vorbis_data.o xiph.o
|
||||||
OBJS-$(CONFIG_VORBIS_ENCODER) += vorbisenc.o vorbis.o \
|
OBJS-$(CONFIG_VORBIS_ENCODER) += vorbisenc.o vorbis.o \
|
||||||
vorbis_data.o
|
vorbis_data.o
|
||||||
OBJS-$(CONFIG_VP3_DECODER) += vp3.o
|
OBJS-$(CONFIG_VP3_DECODER) += vp3.o
|
||||||
@@ -626,8 +611,8 @@ OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o adpcm_data.o
|
|||||||
OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o adpcm_data.o
|
OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o adpcm_data.o
|
||||||
OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o g722dsp.o g722dec.o
|
OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o g722dec.o
|
||||||
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o g722dsp.o g722enc.o
|
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o g722enc.o
|
||||||
OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
|
OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
|
||||||
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
|
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
|
||||||
OBJS-$(CONFIG_ADPCM_G726LE_DECODER) += g726.o
|
OBJS-$(CONFIG_ADPCM_G726LE_DECODER) += g726.o
|
||||||
@@ -666,41 +651,42 @@ OBJS-$(CONFIG_VAAPI) += vaapi.o
|
|||||||
OBJS-$(CONFIG_VDA) += vda.o
|
OBJS-$(CONFIG_VDA) += vda.o
|
||||||
OBJS-$(CONFIG_VDPAU) += vdpau.o
|
OBJS-$(CONFIG_VDPAU) += vdpau.o
|
||||||
|
|
||||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o
|
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o vaapi_mpeg.o
|
||||||
OBJS-$(CONFIG_H263_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
OBJS-$(CONFIG_H263_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
||||||
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
|
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
|
||||||
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
|
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
|
||||||
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
|
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
|
||||||
OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o
|
OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o
|
||||||
OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o
|
|
||||||
OBJS-$(CONFIG_MPEG1_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
OBJS-$(CONFIG_MPEG1_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
||||||
OBJS-$(CONFIG_MPEG1_XVMC_HWACCEL) += mpegvideo_xvmc.o
|
OBJS-$(CONFIG_MPEG1_XVMC_HWACCEL) += mpegvideo_xvmc.o
|
||||||
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
|
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
|
||||||
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o
|
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o vaapi_mpeg.o
|
||||||
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
||||||
OBJS-$(CONFIG_MPEG2_XVMC_HWACCEL) += mpegvideo_xvmc.o
|
OBJS-$(CONFIG_MPEG2_XVMC_HWACCEL) += mpegvideo_xvmc.o
|
||||||
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
|
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o vaapi_mpeg.o
|
||||||
OBJS-$(CONFIG_MPEG4_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
OBJS-$(CONFIG_MPEG4_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
||||||
OBJS-$(CONFIG_VC1_DXVA2_HWACCEL) += dxva2_vc1.o
|
OBJS-$(CONFIG_VC1_DXVA2_HWACCEL) += dxva2_vc1.o
|
||||||
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o
|
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o vaapi_mpeg.o
|
||||||
OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o
|
OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o
|
||||||
|
|
||||||
# libavformat dependencies
|
# libavformat dependencies
|
||||||
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
|
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
|
||||||
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
|
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||||
ac3tab.o
|
ac3tab.o
|
||||||
OBJS-$(CONFIG_FLAC_DEMUXER) += flac.o flacdata.o vorbis_data.o
|
OBJS-$(CONFIG_FLAC_DEMUXER) += flac.o flacdata.o vorbis_data.o \
|
||||||
|
vorbis_parser.o xiph.o
|
||||||
OBJS-$(CONFIG_FLAC_MUXER) += flac.o flacdata.o vorbis_data.o
|
OBJS-$(CONFIG_FLAC_MUXER) += flac.o flacdata.o vorbis_data.o
|
||||||
OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
|
OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
|
||||||
OBJS-$(CONFIG_GXF_DEMUXER) += mpeg12data.o
|
OBJS-$(CONFIG_GXF_DEMUXER) += mpeg12data.o
|
||||||
OBJS-$(CONFIG_IFF_DEMUXER) += iff.o
|
OBJS-$(CONFIG_IFF_DEMUXER) += iff.o
|
||||||
OBJS-$(CONFIG_ISMV_MUXER) += mpeg4audio.o mpegaudiodata.o
|
OBJS-$(CONFIG_ISMV_MUXER) += mpeg4audio.o mpegaudiodata.o
|
||||||
OBJS-$(CONFIG_LATM_MUXER) += mpeg4audio.o
|
OBJS-$(CONFIG_LATM_MUXER) += mpeg4audio.o
|
||||||
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += mpeg4audio.o vorbis_data.o \
|
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += xiph.o mpeg4audio.o vorbis_data.o \
|
||||||
flac.o flacdata.o
|
flac.o flacdata.o
|
||||||
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||||
|
vorbis_parser.o xiph.o
|
||||||
OBJS-$(CONFIG_MATROSKA_MUXER) += mpeg4audio.o mpegaudiodata.o \
|
OBJS-$(CONFIG_MATROSKA_MUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||||
flac.o flacdata.o vorbis_data.o
|
flac.o flacdata.o vorbis_data.o xiph.o
|
||||||
OBJS-$(CONFIG_MP2_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
OBJS-$(CONFIG_MP2_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||||
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o
|
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o
|
||||||
@@ -709,20 +695,21 @@ OBJS-$(CONFIG_MPEGTS_MUXER) += mpeg4audio.o
|
|||||||
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||||
OBJS-$(CONFIG_MXF_MUXER) += dnxhddata.o
|
OBJS-$(CONFIG_MXF_MUXER) += dnxhddata.o
|
||||||
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
|
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
|
||||||
OBJS-$(CONFIG_NUT_DEMUXER) += mpegaudiodata.o mpeg4audio.o
|
OBJS-$(CONFIG_OGA_MUXER) += xiph.o flac.o flacdata.o
|
||||||
OBJS-$(CONFIG_OGA_MUXER) += flac.o flacdata.o
|
OBJS-$(CONFIG_OGG_DEMUXER) += xiph.o flac.o flacdata.o \
|
||||||
OBJS-$(CONFIG_OGG_DEMUXER) += mpeg12data.o \
|
mpeg12data.o vorbis_parser.o \
|
||||||
dirac.o vorbis_data.o
|
dirac.o vorbis_data.o
|
||||||
OBJS-$(CONFIG_OGG_MUXER) += flac.o flacdata.o \
|
OBJS-$(CONFIG_OGG_MUXER) += xiph.o flac.o flacdata.o \
|
||||||
vorbis_data.o
|
vorbis_data.o
|
||||||
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o
|
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o xiph.o
|
||||||
OBJS-$(CONFIG_RTPDEC) += mjpeg.o
|
OBJS-$(CONFIG_RTPDEC) += mjpeg.o
|
||||||
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
|
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
|
||||||
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
|
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
|
||||||
OBJS-$(CONFIG_TAK_DEMUXER) += tak.o
|
OBJS-$(CONFIG_TAK_DEMUXER) += tak.o
|
||||||
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \
|
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||||
flac.o flacdata.o \
|
xiph.o flac.o flacdata.o \
|
||||||
vorbis_data.o
|
vorbis_data.o
|
||||||
|
OBJS-$(CONFIG_WEBM_DASH_MANIFEST_DEMUXER) += vorbis_parser.o xiph.o
|
||||||
OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||||
|
|
||||||
# libavfilter dependencies
|
# libavfilter dependencies
|
||||||
@@ -744,7 +731,6 @@ OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o
|
|||||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
|
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
|
||||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
|
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
|
||||||
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
|
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
|
||||||
OBJS-$(CONFIG_LIBOPENH264_ENCODER) += libopenh264enc.o
|
|
||||||
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
|
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
|
||||||
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
|
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
|
||||||
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o libopus.o \
|
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o libopus.o \
|
||||||
@@ -767,7 +753,7 @@ OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
|
|||||||
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
|
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
|
||||||
OBJS-$(CONFIG_LIBVORBIS_DECODER) += libvorbisdec.o
|
OBJS-$(CONFIG_LIBVORBIS_DECODER) += libvorbisdec.o
|
||||||
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o \
|
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o \
|
||||||
vorbis_data.o
|
vorbis_data.o vorbis_parser.o xiph.o
|
||||||
OBJS-$(CONFIG_LIBVPX_VP8_DECODER) += libvpxdec.o
|
OBJS-$(CONFIG_LIBVPX_VP8_DECODER) += libvpxdec.o
|
||||||
OBJS-$(CONFIG_LIBVPX_VP8_ENCODER) += libvpxenc.o
|
OBJS-$(CONFIG_LIBVPX_VP8_ENCODER) += libvpxenc.o
|
||||||
OBJS-$(CONFIG_LIBVPX_VP9_DECODER) += libvpxdec.o libvpx.o
|
OBJS-$(CONFIG_LIBVPX_VP9_DECODER) += libvpxdec.o libvpx.o
|
||||||
@@ -820,7 +806,10 @@ OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
|
|||||||
OBJS-$(CONFIG_RV30_PARSER) += rv34_parser.o
|
OBJS-$(CONFIG_RV30_PARSER) += rv34_parser.o
|
||||||
OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
|
OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
|
||||||
OBJS-$(CONFIG_TAK_PARSER) += tak_parser.o tak.o
|
OBJS-$(CONFIG_TAK_PARSER) += tak_parser.o tak.o
|
||||||
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o
|
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o vc1dsp.o \
|
||||||
|
msmpeg4.o msmpeg4data.o mpeg4video.o \
|
||||||
|
h263.o
|
||||||
|
OBJS-$(CONFIG_VORBIS_PARSER) += vorbis_parser.o xiph.o
|
||||||
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
|
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
|
||||||
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
|
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
|
||||||
OBJS-$(CONFIG_VP9_PARSER) += vp9_parser.o
|
OBJS-$(CONFIG_VP9_PARSER) += vp9_parser.o
|
||||||
@@ -857,22 +846,17 @@ SKIPHEADERS += %_tablegen.h \
|
|||||||
libutvideo.h \
|
libutvideo.h \
|
||||||
old_codec_ids.h \
|
old_codec_ids.h \
|
||||||
tableprint.h \
|
tableprint.h \
|
||||||
tableprint_vlc.h \
|
|
||||||
$(ARCH)/vp56_arith.h \
|
$(ARCH)/vp56_arith.h \
|
||||||
|
|
||||||
SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
|
SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
|
||||||
SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
|
SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
|
||||||
SKIPHEADERS-$(CONFIG_LIBUTVIDEO) += libutvideo.h
|
SKIPHEADERS-$(CONFIG_LIBUTVIDEO) += libutvideo.h
|
||||||
SKIPHEADERS-$(CONFIG_QSV) += qsv.h qsv_internal.h
|
|
||||||
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
|
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
|
||||||
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
|
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
|
||||||
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
|
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
|
||||||
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
|
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
|
||||||
|
|
||||||
TESTPROGS = imgconvert \
|
TESTPROGS = imgconvert \
|
||||||
options \
|
|
||||||
avfft \
|
|
||||||
|
|
||||||
|
|
||||||
TESTPROGS-$(CONFIG_CABAC) += cabac
|
TESTPROGS-$(CONFIG_CABAC) += cabac
|
||||||
TESTPROGS-$(CONFIG_FFT) += fft fft-fixed fft-fixed32
|
TESTPROGS-$(CONFIG_FFT) += fft fft-fixed fft-fixed32
|
||||||
|
@@ -28,7 +28,6 @@
|
|||||||
#include "a64tables.h"
|
#include "a64tables.h"
|
||||||
#include "elbg.h"
|
#include "elbg.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "libavutil/avassert.h"
|
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
|
|
||||||
@@ -66,7 +65,7 @@ static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
|
|||||||
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
||||||
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
||||||
|
|
||||||
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
|
static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
||||||
{
|
{
|
||||||
int blockx, blocky, x, y;
|
int blockx, blocky, x, y;
|
||||||
int luma = 0;
|
int luma = 0;
|
||||||
@@ -79,13 +78,9 @@ static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest
|
|||||||
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
||||||
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
||||||
if(x < width && y < height) {
|
if(x < width && y < height) {
|
||||||
if (x + 1 < width) {
|
/* build average over 2 pixels */
|
||||||
/* build average over 2 pixels */
|
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
|
||||||
} else {
|
|
||||||
luma = src[(x + y * p->linesize[0])];
|
|
||||||
}
|
|
||||||
/* write blocks as linear data now so they are suitable for elbg */
|
/* write blocks as linear data now so they are suitable for elbg */
|
||||||
dest[0] = luma;
|
dest[0] = luma;
|
||||||
}
|
}
|
||||||
@@ -191,6 +186,7 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
|
|||||||
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
A64Context *c = avctx->priv_data;
|
A64Context *c = avctx->priv_data;
|
||||||
|
av_frame_free(&avctx->coded_frame);
|
||||||
av_freep(&c->mc_meta_charset);
|
av_freep(&c->mc_meta_charset);
|
||||||
av_freep(&c->mc_best_cb);
|
av_freep(&c->mc_best_cb);
|
||||||
av_freep(&c->mc_charset);
|
av_freep(&c->mc_charset);
|
||||||
@@ -224,7 +220,7 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
|||||||
a64_palette[mc_colors[a]][2] * 0.11;
|
a64_palette[mc_colors[a]][2] * 0.11;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(c->mc_meta_charset = av_mallocz_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
if (!(c->mc_meta_charset = av_malloc_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||||
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
||||||
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
||||||
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
||||||
@@ -242,6 +238,14 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
|||||||
AV_WB32(avctx->extradata, c->mc_lifetime);
|
AV_WB32(avctx->extradata, c->mc_lifetime);
|
||||||
AV_WB32(avctx->extradata + 16, INTERLACED);
|
AV_WB32(avctx->extradata + 16, INTERLACED);
|
||||||
|
|
||||||
|
avctx->coded_frame = av_frame_alloc();
|
||||||
|
if (!avctx->coded_frame) {
|
||||||
|
a64multi_close_encoder(avctx);
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
|
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
|
avctx->coded_frame->key_frame = 1;
|
||||||
if (!avctx->codec_tag)
|
if (!avctx->codec_tag)
|
||||||
avctx->codec_tag = AV_RL32("a64m");
|
avctx->codec_tag = AV_RL32("a64m");
|
||||||
|
|
||||||
@@ -266,9 +270,10 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||||
const AVFrame *p, int *got_packet)
|
const AVFrame *pict, int *got_packet)
|
||||||
{
|
{
|
||||||
A64Context *c = avctx->priv_data;
|
A64Context *c = avctx->priv_data;
|
||||||
|
AVFrame *const p = avctx->coded_frame;
|
||||||
|
|
||||||
int frame;
|
int frame;
|
||||||
int x, y;
|
int x, y;
|
||||||
@@ -299,7 +304,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* no data, means end encoding asap */
|
/* no data, means end encoding asap */
|
||||||
if (!p) {
|
if (!pict) {
|
||||||
/* all done, end encoding */
|
/* all done, end encoding */
|
||||||
if (!c->mc_lifetime) return 0;
|
if (!c->mc_lifetime) return 0;
|
||||||
/* no more frames in queue, prepare to flush remaining frames */
|
/* no more frames in queue, prepare to flush remaining frames */
|
||||||
@@ -312,10 +317,13 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
} else {
|
} else {
|
||||||
/* fill up mc_meta_charset with data until lifetime exceeds */
|
/* fill up mc_meta_charset with data until lifetime exceeds */
|
||||||
if (c->mc_frame_counter < c->mc_lifetime) {
|
if (c->mc_frame_counter < c->mc_lifetime) {
|
||||||
|
*p = *pict;
|
||||||
|
p->pict_type = AV_PICTURE_TYPE_I;
|
||||||
|
p->key_frame = 1;
|
||||||
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
||||||
c->mc_frame_counter++;
|
c->mc_frame_counter++;
|
||||||
if (c->next_pts == AV_NOPTS_VALUE)
|
if (c->next_pts == AV_NOPTS_VALUE)
|
||||||
c->next_pts = p->pts;
|
c->next_pts = pict->pts;
|
||||||
/* lifetime is not reached so wait for next frame first */
|
/* lifetime is not reached so wait for next frame first */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -326,20 +334,14 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
req_size = 0;
|
req_size = 0;
|
||||||
/* any frames to encode? */
|
/* any frames to encode? */
|
||||||
if (c->mc_lifetime) {
|
if (c->mc_lifetime) {
|
||||||
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||||
if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size)) < 0)
|
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
buf = pkt->data;
|
buf = pkt->data;
|
||||||
|
|
||||||
/* calc optimal new charset + charmaps */
|
/* calc optimal new charset + charmaps */
|
||||||
ret = avpriv_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
|
avpriv_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||||
CHARSET_CHARS, 50, charmap, &c->randctx);
|
avpriv_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
ret = avpriv_do_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
|
|
||||||
CHARSET_CHARS, 50, charmap, &c->randctx);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* create colorram map and a c64 readable charset */
|
/* create colorram map and a c64 readable charset */
|
||||||
render_charset(avctx, charset, colram);
|
render_charset(avctx, charset, colram);
|
||||||
@@ -349,7 +351,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
|
|
||||||
/* advance pointers */
|
/* advance pointers */
|
||||||
buf += charset_size;
|
buf += charset_size;
|
||||||
req_size += charset_size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* write x frames to buf */
|
/* write x frames to buf */
|
||||||
@@ -386,7 +387,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
pkt->pts = pkt->dts = c->next_pts;
|
pkt->pts = pkt->dts = c->next_pts;
|
||||||
c->next_pts = AV_NOPTS_VALUE;
|
c->next_pts = AV_NOPTS_VALUE;
|
||||||
|
|
||||||
av_assert0(pkt->size >= req_size);
|
|
||||||
pkt->size = req_size;
|
pkt->size = req_size;
|
||||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||||
*got_packet = !!req_size;
|
*got_packet = !!req_size;
|
||||||
|
@@ -32,10 +32,10 @@
|
|||||||
|
|
||||||
#include "libavutil/float_dsp.h"
|
#include "libavutil/float_dsp.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "imdct15.h"
|
|
||||||
#include "fft.h"
|
#include "fft.h"
|
||||||
#include "mpeg4audio.h"
|
#include "mpeg4audio.h"
|
||||||
#include "sbr.h"
|
#include "sbr.h"
|
||||||
|
#include "fmtconvert.h"
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
@@ -233,8 +233,7 @@ typedef struct SingleChannelElement {
|
|||||||
float sf[120]; ///< scalefactors
|
float sf[120]; ///< scalefactors
|
||||||
int sf_idx[128]; ///< scalefactor indices (used by encoder)
|
int sf_idx[128]; ///< scalefactor indices (used by encoder)
|
||||||
uint8_t zeroes[128]; ///< band is not coded (used by encoder)
|
uint8_t zeroes[128]; ///< band is not coded (used by encoder)
|
||||||
DECLARE_ALIGNED(32, float, pcoeffs)[1024]; ///< coefficients for IMDCT, pristine
|
DECLARE_ALIGNED(32, float, coeffs)[1024]; ///< coefficients for IMDCT
|
||||||
DECLARE_ALIGNED(32, float, coeffs)[1024]; ///< coefficients for IMDCT, maybe processed
|
|
||||||
DECLARE_ALIGNED(32, float, saved)[1536]; ///< overlap
|
DECLARE_ALIGNED(32, float, saved)[1536]; ///< overlap
|
||||||
DECLARE_ALIGNED(32, float, ret_buf)[2048]; ///< PCM output buffer
|
DECLARE_ALIGNED(32, float, ret_buf)[2048]; ///< PCM output buffer
|
||||||
DECLARE_ALIGNED(16, float, ltp_state)[3072]; ///< time signal for LTP
|
DECLARE_ALIGNED(16, float, ltp_state)[3072]; ///< time signal for LTP
|
||||||
@@ -246,7 +245,6 @@ typedef struct SingleChannelElement {
|
|||||||
* channel element - generic struct for SCE/CPE/CCE/LFE
|
* channel element - generic struct for SCE/CPE/CCE/LFE
|
||||||
*/
|
*/
|
||||||
typedef struct ChannelElement {
|
typedef struct ChannelElement {
|
||||||
int present;
|
|
||||||
// CPE specific
|
// CPE specific
|
||||||
int common_window; ///< Set if channels share a common 'IndividualChannelStream' in bitstream.
|
int common_window; ///< Set if channels share a common 'IndividualChannelStream' in bitstream.
|
||||||
int ms_mode; ///< Signals mid/side stereo flags coding mode (used by encoder)
|
int ms_mode; ///< Signals mid/side stereo flags coding mode (used by encoder)
|
||||||
@@ -276,7 +274,6 @@ struct AACContext {
|
|||||||
ChannelElement *che[4][MAX_ELEM_ID];
|
ChannelElement *che[4][MAX_ELEM_ID];
|
||||||
ChannelElement *tag_che_map[4][MAX_ELEM_ID];
|
ChannelElement *tag_che_map[4][MAX_ELEM_ID];
|
||||||
int tags_mapped;
|
int tags_mapped;
|
||||||
int warned_remapping_once;
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -295,8 +292,8 @@ struct AACContext {
|
|||||||
FFTContext mdct_small;
|
FFTContext mdct_small;
|
||||||
FFTContext mdct_ld;
|
FFTContext mdct_ld;
|
||||||
FFTContext mdct_ltp;
|
FFTContext mdct_ltp;
|
||||||
IMDCT15Context *mdct480;
|
FmtConvertContext fmt_conv;
|
||||||
AVFloatDSPContext *fdsp;
|
AVFloatDSPContext fdsp;
|
||||||
int random_state;
|
int random_state;
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
||||||
|
@@ -90,10 +90,6 @@ static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc,
|
|||||||
av_free(avctx->extradata);
|
av_free(avctx->extradata);
|
||||||
avctx->extradata_size = 2 + pce_size;
|
avctx->extradata_size = 2 + pce_size;
|
||||||
avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
if (!avctx->extradata) {
|
|
||||||
avctx->extradata_size = 0;
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
||||||
put_bits(&pb, 5, hdr.object_type);
|
put_bits(&pb, 5, hdr.object_type);
|
||||||
|
@@ -161,7 +161,7 @@ static av_always_inline float quantize_and_encode_band_cost_template(
|
|||||||
di = t - CLIPPED_ESCAPE;
|
di = t - CLIPPED_ESCAPE;
|
||||||
curbits += 21;
|
curbits += 21;
|
||||||
} else {
|
} else {
|
||||||
int c = av_clip_uintp2(quant(t, Q), 13);
|
int c = av_clip(quant(t, Q), 0, 8191);
|
||||||
di = t - c*cbrtf(c)*IQ;
|
di = t - c*cbrtf(c)*IQ;
|
||||||
curbits += av_log2(c)*2 - 4 + 1;
|
curbits += av_log2(c)*2 - 4 + 1;
|
||||||
}
|
}
|
||||||
@@ -191,7 +191,7 @@ static av_always_inline float quantize_and_encode_band_cost_template(
|
|||||||
if (BT_ESC) {
|
if (BT_ESC) {
|
||||||
for (j = 0; j < 2; j++) {
|
for (j = 0; j < 2; j++) {
|
||||||
if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) {
|
if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) {
|
||||||
int coef = av_clip_uintp2(quant(fabsf(in[i+j]), Q), 13);
|
int coef = av_clip(quant(fabsf(in[i+j]), Q), 0, 8191);
|
||||||
int len = av_log2(coef);
|
int len = av_log2(coef);
|
||||||
|
|
||||||
put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2);
|
put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2);
|
||||||
@@ -1069,10 +1069,10 @@ static void search_for_ms(AACEncContext *s, ChannelElement *cpe,
|
|||||||
float minthr = FFMIN(band0->threshold, band1->threshold);
|
float minthr = FFMIN(band0->threshold, band1->threshold);
|
||||||
float maxthr = FFMAX(band0->threshold, band1->threshold);
|
float maxthr = FFMAX(band0->threshold, band1->threshold);
|
||||||
for (i = 0; i < sce0->ics.swb_sizes[g]; i++) {
|
for (i = 0; i < sce0->ics.swb_sizes[g]; i++) {
|
||||||
M[i] = (sce0->pcoeffs[start+w2*128+i]
|
M[i] = (sce0->coeffs[start+w2*128+i]
|
||||||
+ sce1->pcoeffs[start+w2*128+i]) * 0.5;
|
+ sce1->coeffs[start+w2*128+i]) * 0.5;
|
||||||
S[i] = M[i]
|
S[i] = M[i]
|
||||||
- sce1->pcoeffs[start+w2*128+i];
|
- sce1->coeffs[start+w2*128+i];
|
||||||
}
|
}
|
||||||
abs_pow34_v(L34, sce0->coeffs+start+w2*128, sce0->ics.swb_sizes[g]);
|
abs_pow34_v(L34, sce0->coeffs+start+w2*128, sce0->ics.swb_sizes[g]);
|
||||||
abs_pow34_v(R34, sce1->coeffs+start+w2*128, sce0->ics.swb_sizes[g]);
|
abs_pow34_v(R34, sce1->coeffs+start+w2*128, sce0->ics.swb_sizes[g]);
|
||||||
|
@@ -87,7 +87,7 @@
|
|||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
#include "fft.h"
|
#include "fft.h"
|
||||||
#include "imdct15.h"
|
#include "fmtconvert.h"
|
||||||
#include "lpc.h"
|
#include "lpc.h"
|
||||||
#include "kbdwin.h"
|
#include "kbdwin.h"
|
||||||
#include "sinewin.h"
|
#include "sinewin.h"
|
||||||
@@ -102,6 +102,7 @@
|
|||||||
#include "aacadtsdec.h"
|
#include "aacadtsdec.h"
|
||||||
#include "libavutil/intfloat.h"
|
#include "libavutil/intfloat.h"
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
@@ -620,12 +621,6 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
|||||||
* If we seem to have encountered such a stream, transfer
|
* If we seem to have encountered such a stream, transfer
|
||||||
* the LFE[0] element to the SCE[1]'s mapping */
|
* the LFE[0] element to the SCE[1]'s mapping */
|
||||||
if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
|
if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
|
||||||
if (!ac->warned_remapping_once && (type != TYPE_LFE || elem_id != 0)) {
|
|
||||||
av_log(ac->avctx, AV_LOG_WARNING,
|
|
||||||
"This stream seems to incorrectly report its last channel as %s[%d], mapping to LFE[0]\n",
|
|
||||||
type == TYPE_SCE ? "SCE" : "LFE", elem_id);
|
|
||||||
ac->warned_remapping_once++;
|
|
||||||
}
|
|
||||||
ac->tags_mapped++;
|
ac->tags_mapped++;
|
||||||
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
|
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
|
||||||
}
|
}
|
||||||
@@ -635,22 +630,6 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
|||||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
|
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
|
||||||
}
|
}
|
||||||
case 4:
|
case 4:
|
||||||
/* Some streams incorrectly code 4.0 audio as
|
|
||||||
* SCE[0] CPE[0] LFE[0]
|
|
||||||
* instead of
|
|
||||||
* SCE[0] CPE[0] SCE[1].
|
|
||||||
* If we seem to have encountered such a stream, transfer
|
|
||||||
* the SCE[1] element to the LFE[0]'s mapping */
|
|
||||||
if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
|
|
||||||
if (!ac->warned_remapping_once && (type != TYPE_SCE || elem_id != 1)) {
|
|
||||||
av_log(ac->avctx, AV_LOG_WARNING,
|
|
||||||
"This stream seems to incorrectly report its last channel as %s[%d], mapping to SCE[1]\n",
|
|
||||||
type == TYPE_SCE ? "SCE" : "LFE", elem_id);
|
|
||||||
ac->warned_remapping_once++;
|
|
||||||
}
|
|
||||||
ac->tags_mapped++;
|
|
||||||
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_SCE][1];
|
|
||||||
}
|
|
||||||
if (ac->tags_mapped == 2 &&
|
if (ac->tags_mapped == 2 &&
|
||||||
ac->oc[1].m4ac.chan_config == 4 &&
|
ac->oc[1].m4ac.chan_config == 4 &&
|
||||||
type == TYPE_SCE) {
|
type == TYPE_SCE) {
|
||||||
@@ -702,7 +681,6 @@ static void decode_channel_map(uint8_t layout_map[][3],
|
|||||||
syn_ele = TYPE_LFE;
|
syn_ele = TYPE_LFE;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
// AAC_CHANNEL_OFF has no channel map
|
|
||||||
av_assert0(0);
|
av_assert0(0);
|
||||||
}
|
}
|
||||||
layout_map[0][0] = syn_ele;
|
layout_map[0][0] = syn_ele;
|
||||||
@@ -800,7 +778,6 @@ static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
|
|||||||
avpriv_request_sample(avctx, "960/120 MDCT window");
|
avpriv_request_sample(avctx, "960/120 MDCT window");
|
||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
m4ac->frame_length_short = 0;
|
|
||||||
|
|
||||||
if (get_bits1(gb)) // dependsOnCoreCoder
|
if (get_bits1(gb)) // dependsOnCoreCoder
|
||||||
skip_bits(gb, 14); // coreCoderDelay
|
skip_bits(gb, 14); // coreCoderDelay
|
||||||
@@ -878,7 +855,11 @@ static int decode_eld_specific_config(AACContext *ac, AVCodecContext *avctx,
|
|||||||
m4ac->ps = 0;
|
m4ac->ps = 0;
|
||||||
m4ac->sbr = 0;
|
m4ac->sbr = 0;
|
||||||
|
|
||||||
m4ac->frame_length_short = get_bits1(gb);
|
if (get_bits1(gb)) { // frameLengthFlag
|
||||||
|
avpriv_request_sample(avctx, "960/120 MDCT window");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
res_flags = get_bits(gb, 3);
|
res_flags = get_bits(gb, 3);
|
||||||
if (res_flags) {
|
if (res_flags) {
|
||||||
avpriv_report_missing_feature(avctx,
|
avpriv_report_missing_feature(avctx,
|
||||||
@@ -1132,10 +1113,8 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
ff_aac_sbr_init();
|
ff_aac_sbr_init();
|
||||||
|
|
||||||
ac->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
|
ff_fmt_convert_init(&ac->fmt_conv, avctx);
|
||||||
if (!ac->fdsp) {
|
avpriv_float_dsp_init(&ac->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
}
|
|
||||||
|
|
||||||
ac->random_state = 0x1f2e3d4c;
|
ac->random_state = 0x1f2e3d4c;
|
||||||
|
|
||||||
@@ -1155,10 +1134,6 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
|||||||
ff_mdct_init(&ac->mdct_ld, 10, 1, 1.0 / (32768.0 * 512.0));
|
ff_mdct_init(&ac->mdct_ld, 10, 1, 1.0 / (32768.0 * 512.0));
|
||||||
ff_mdct_init(&ac->mdct_small, 8, 1, 1.0 / (32768.0 * 128.0));
|
ff_mdct_init(&ac->mdct_small, 8, 1, 1.0 / (32768.0 * 128.0));
|
||||||
ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0 * 32768.0);
|
ff_mdct_init(&ac->mdct_ltp, 11, 0, -2.0 * 32768.0);
|
||||||
ret = ff_imdct15_init(&ac->mdct480, 5);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
// window initialization
|
// window initialization
|
||||||
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
|
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
|
||||||
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
|
ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
|
||||||
@@ -1230,14 +1205,11 @@ static void decode_ltp(LongTermPrediction *ltp,
|
|||||||
static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
|
static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
|
||||||
GetBitContext *gb)
|
GetBitContext *gb)
|
||||||
{
|
{
|
||||||
const MPEG4AudioConfig *const m4ac = &ac->oc[1].m4ac;
|
int aot = ac->oc[1].m4ac.object_type;
|
||||||
const int aot = m4ac->object_type;
|
|
||||||
const int sampling_index = m4ac->sampling_index;
|
|
||||||
if (aot != AOT_ER_AAC_ELD) {
|
if (aot != AOT_ER_AAC_ELD) {
|
||||||
if (get_bits1(gb)) {
|
if (get_bits1(gb)) {
|
||||||
av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
|
av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
|
||||||
if (ac->avctx->err_recognition & AV_EF_BITSTREAM)
|
return AVERROR_INVALIDDATA;
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
}
|
}
|
||||||
ics->window_sequence[1] = ics->window_sequence[0];
|
ics->window_sequence[1] = ics->window_sequence[0];
|
||||||
ics->window_sequence[0] = get_bits(gb, 2);
|
ics->window_sequence[0] = get_bits(gb, 2);
|
||||||
@@ -1266,29 +1238,23 @@ static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ics->num_windows = 8;
|
ics->num_windows = 8;
|
||||||
ics->swb_offset = ff_swb_offset_128[sampling_index];
|
ics->swb_offset = ff_swb_offset_128[ac->oc[1].m4ac.sampling_index];
|
||||||
ics->num_swb = ff_aac_num_swb_128[sampling_index];
|
ics->num_swb = ff_aac_num_swb_128[ac->oc[1].m4ac.sampling_index];
|
||||||
ics->tns_max_bands = ff_tns_max_bands_128[sampling_index];
|
ics->tns_max_bands = ff_tns_max_bands_128[ac->oc[1].m4ac.sampling_index];
|
||||||
ics->predictor_present = 0;
|
ics->predictor_present = 0;
|
||||||
} else {
|
} else {
|
||||||
ics->max_sfb = get_bits(gb, 6);
|
ics->max_sfb = get_bits(gb, 6);
|
||||||
ics->num_windows = 1;
|
ics->num_windows = 1;
|
||||||
if (aot == AOT_ER_AAC_LD || aot == AOT_ER_AAC_ELD) {
|
if (aot == AOT_ER_AAC_LD || aot == AOT_ER_AAC_ELD) {
|
||||||
if (m4ac->frame_length_short) {
|
ics->swb_offset = ff_swb_offset_512[ac->oc[1].m4ac.sampling_index];
|
||||||
ics->swb_offset = ff_swb_offset_480[sampling_index];
|
ics->num_swb = ff_aac_num_swb_512[ac->oc[1].m4ac.sampling_index];
|
||||||
ics->num_swb = ff_aac_num_swb_480[sampling_index];
|
ics->tns_max_bands = ff_tns_max_bands_512[ac->oc[1].m4ac.sampling_index];
|
||||||
ics->tns_max_bands = ff_tns_max_bands_480[sampling_index];
|
|
||||||
} else {
|
|
||||||
ics->swb_offset = ff_swb_offset_512[sampling_index];
|
|
||||||
ics->num_swb = ff_aac_num_swb_512[sampling_index];
|
|
||||||
ics->tns_max_bands = ff_tns_max_bands_512[sampling_index];
|
|
||||||
}
|
|
||||||
if (!ics->num_swb || !ics->swb_offset)
|
if (!ics->num_swb || !ics->swb_offset)
|
||||||
return AVERROR_BUG;
|
return AVERROR_BUG;
|
||||||
} else {
|
} else {
|
||||||
ics->swb_offset = ff_swb_offset_1024[sampling_index];
|
ics->swb_offset = ff_swb_offset_1024[ac->oc[1].m4ac.sampling_index];
|
||||||
ics->num_swb = ff_aac_num_swb_1024[sampling_index];
|
ics->num_swb = ff_aac_num_swb_1024[ac->oc[1].m4ac.sampling_index];
|
||||||
ics->tns_max_bands = ff_tns_max_bands_1024[sampling_index];
|
ics->tns_max_bands = ff_tns_max_bands_1024[ac->oc[1].m4ac.sampling_index];
|
||||||
}
|
}
|
||||||
if (aot != AOT_ER_AAC_ELD) {
|
if (aot != AOT_ER_AAC_ELD) {
|
||||||
ics->predictor_present = get_bits1(gb);
|
ics->predictor_present = get_bits1(gb);
|
||||||
@@ -1524,12 +1490,13 @@ static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
|
|||||||
int ms_present)
|
int ms_present)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx;
|
||||||
int max_idx = cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb;
|
|
||||||
if (ms_present == 1) {
|
if (ms_present == 1) {
|
||||||
for (idx = 0; idx < max_idx; idx++)
|
for (idx = 0;
|
||||||
|
idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb;
|
||||||
|
idx++)
|
||||||
cpe->ms_mask[idx] = get_bits1(gb);
|
cpe->ms_mask[idx] = get_bits1(gb);
|
||||||
} else if (ms_present == 2) {
|
} else if (ms_present == 2) {
|
||||||
memset(cpe->ms_mask, 1, max_idx * sizeof(cpe->ms_mask[0]));
|
memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask[0]) * cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1651,9 +1618,9 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
|||||||
cfo[k] = ac->random_state;
|
cfo[k] = ac->random_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
band_energy = ac->fdsp->scalarproduct_float(cfo, cfo, off_len);
|
band_energy = ac->fdsp.scalarproduct_float(cfo, cfo, off_len);
|
||||||
scale = sf[idx] / sqrtf(band_energy);
|
scale = sf[idx] / sqrtf(band_energy);
|
||||||
ac->fdsp->vector_fmul_scalar(cfo, cfo, scale, off_len);
|
ac->fdsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
|
const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
|
||||||
@@ -1799,7 +1766,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
|||||||
}
|
}
|
||||||
} while (len -= 2);
|
} while (len -= 2);
|
||||||
|
|
||||||
ac->fdsp->vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
|
ac->fdsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2012,7 +1979,7 @@ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
|
|||||||
cpe->ch[0].band_type[idx] < NOISE_BT &&
|
cpe->ch[0].band_type[idx] < NOISE_BT &&
|
||||||
cpe->ch[1].band_type[idx] < NOISE_BT) {
|
cpe->ch[1].band_type[idx] < NOISE_BT) {
|
||||||
for (group = 0; group < ics->group_len[g]; group++) {
|
for (group = 0; group < ics->group_len[g]; group++) {
|
||||||
ac->fdsp->butterflies_float(ch0 + group * 128 + offsets[i],
|
ac->fdsp.butterflies_float(ch0 + group * 128 + offsets[i],
|
||||||
ch1 + group * 128 + offsets[i],
|
ch1 + group * 128 + offsets[i],
|
||||||
offsets[i+1] - offsets[i]);
|
offsets[i+1] - offsets[i]);
|
||||||
}
|
}
|
||||||
@@ -2051,7 +2018,7 @@ static void apply_intensity_stereo(AACContext *ac,
|
|||||||
c *= 1 - 2 * cpe->ms_mask[idx];
|
c *= 1 - 2 * cpe->ms_mask[idx];
|
||||||
scale = c * sce1->sf[idx];
|
scale = c * sce1->sf[idx];
|
||||||
for (group = 0; group < ics->group_len[g]; group++)
|
for (group = 0; group < ics->group_len[g]; group++)
|
||||||
ac->fdsp->vector_fmul_scalar(coef1 + group * 128 + offsets[i],
|
ac->fdsp.vector_fmul_scalar(coef1 + group * 128 + offsets[i],
|
||||||
coef0 + group * 128 + offsets[i],
|
coef0 + group * 128 + offsets[i],
|
||||||
scale,
|
scale,
|
||||||
offsets[i + 1] - offsets[i]);
|
offsets[i + 1] - offsets[i]);
|
||||||
@@ -2301,12 +2268,7 @@ static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
|
|||||||
{
|
{
|
||||||
int crc_flag = 0;
|
int crc_flag = 0;
|
||||||
int res = cnt;
|
int res = cnt;
|
||||||
int type = get_bits(gb, 4);
|
switch (get_bits(gb, 4)) { // extension type
|
||||||
|
|
||||||
if (ac->avctx->debug & FF_DEBUG_STARTCODE)
|
|
||||||
av_log(ac->avctx, AV_LOG_DEBUG, "extension type: %d len:%d\n", type, cnt);
|
|
||||||
|
|
||||||
switch (type) { // extension type
|
|
||||||
case EXT_SBR_DATA_CRC:
|
case EXT_SBR_DATA_CRC:
|
||||||
crc_flag++;
|
crc_flag++;
|
||||||
case EXT_SBR_DATA:
|
case EXT_SBR_DATA:
|
||||||
@@ -2419,15 +2381,15 @@ static void windowing_and_mdct_ltp(AACContext *ac, float *out,
|
|||||||
const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
|
const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||||
|
|
||||||
if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
|
if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
|
||||||
ac->fdsp->vector_fmul(in, in, lwindow_prev, 1024);
|
ac->fdsp.vector_fmul(in, in, lwindow_prev, 1024);
|
||||||
} else {
|
} else {
|
||||||
memset(in, 0, 448 * sizeof(float));
|
memset(in, 0, 448 * sizeof(float));
|
||||||
ac->fdsp->vector_fmul(in + 448, in + 448, swindow_prev, 128);
|
ac->fdsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
|
||||||
}
|
}
|
||||||
if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
|
if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
|
||||||
ac->fdsp->vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
|
ac->fdsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
|
||||||
} else {
|
} else {
|
||||||
ac->fdsp->vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
|
ac->fdsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
|
||||||
memset(in + 1024 + 576, 0, 448 * sizeof(float));
|
memset(in + 1024 + 576, 0, 448 * sizeof(float));
|
||||||
}
|
}
|
||||||
ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in);
|
ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in);
|
||||||
@@ -2480,17 +2442,17 @@ static void update_ltp(AACContext *ac, SingleChannelElement *sce)
|
|||||||
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
||||||
memcpy(saved_ltp, saved, 512 * sizeof(float));
|
memcpy(saved_ltp, saved, 512 * sizeof(float));
|
||||||
memset(saved_ltp + 576, 0, 448 * sizeof(float));
|
memset(saved_ltp + 576, 0, 448 * sizeof(float));
|
||||||
ac->fdsp->vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
|
ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
|
||||||
for (i = 0; i < 64; i++)
|
for (i = 0; i < 64; i++)
|
||||||
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
|
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
|
||||||
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
|
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
|
||||||
memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(float));
|
memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(float));
|
||||||
memset(saved_ltp + 576, 0, 448 * sizeof(float));
|
memset(saved_ltp + 576, 0, 448 * sizeof(float));
|
||||||
ac->fdsp->vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
|
ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
|
||||||
for (i = 0; i < 64; i++)
|
for (i = 0; i < 64; i++)
|
||||||
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
|
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
|
||||||
} else { // LONG_STOP or ONLY_LONG
|
} else { // LONG_STOP or ONLY_LONG
|
||||||
ac->fdsp->vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
|
ac->fdsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
|
||||||
for (i = 0; i < 512; i++)
|
for (i = 0; i < 512; i++)
|
||||||
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i];
|
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i];
|
||||||
}
|
}
|
||||||
@@ -2531,19 +2493,19 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
|
|||||||
*/
|
*/
|
||||||
if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
|
if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
|
||||||
(ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
|
(ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
|
||||||
ac->fdsp->vector_fmul_window( out, saved, buf, lwindow_prev, 512);
|
ac->fdsp.vector_fmul_window( out, saved, buf, lwindow_prev, 512);
|
||||||
} else {
|
} else {
|
||||||
memcpy( out, saved, 448 * sizeof(float));
|
memcpy( out, saved, 448 * sizeof(float));
|
||||||
|
|
||||||
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
||||||
ac->fdsp->vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
|
ac->fdsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
|
||||||
ac->fdsp->vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
|
ac->fdsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
|
||||||
ac->fdsp->vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
|
ac->fdsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
|
||||||
ac->fdsp->vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
|
ac->fdsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
|
||||||
ac->fdsp->vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
|
ac->fdsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
|
||||||
memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
|
memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
|
||||||
} else {
|
} else {
|
||||||
ac->fdsp->vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
|
ac->fdsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
|
||||||
memcpy( out + 576, buf + 64, 448 * sizeof(float));
|
memcpy( out + 576, buf + 64, 448 * sizeof(float));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2551,9 +2513,9 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
|
|||||||
// buffer update
|
// buffer update
|
||||||
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
||||||
memcpy( saved, temp + 64, 64 * sizeof(float));
|
memcpy( saved, temp + 64, 64 * sizeof(float));
|
||||||
ac->fdsp->vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
|
ac->fdsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
|
||||||
ac->fdsp->vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
|
ac->fdsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
|
||||||
ac->fdsp->vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
|
ac->fdsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
|
||||||
memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
|
memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
|
||||||
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
|
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
|
||||||
memcpy( saved, buf + 512, 448 * sizeof(float));
|
memcpy( saved, buf + 512, 448 * sizeof(float));
|
||||||
@@ -2578,10 +2540,10 @@ static void imdct_and_windowing_ld(AACContext *ac, SingleChannelElement *sce)
|
|||||||
if (ics->use_kb_window[1]) {
|
if (ics->use_kb_window[1]) {
|
||||||
// AAC LD uses a low overlap sine window instead of a KBD window
|
// AAC LD uses a low overlap sine window instead of a KBD window
|
||||||
memcpy(out, saved, 192 * sizeof(float));
|
memcpy(out, saved, 192 * sizeof(float));
|
||||||
ac->fdsp->vector_fmul_window(out + 192, saved + 192, buf, ff_sine_128, 64);
|
ac->fdsp.vector_fmul_window(out + 192, saved + 192, buf, ff_sine_128, 64);
|
||||||
memcpy( out + 320, buf + 64, 192 * sizeof(float));
|
memcpy( out + 320, buf + 64, 192 * sizeof(float));
|
||||||
} else {
|
} else {
|
||||||
ac->fdsp->vector_fmul_window(out, saved, buf, ff_sine_512, 256);
|
ac->fdsp.vector_fmul_window(out, saved, buf, ff_sine_512, 256);
|
||||||
}
|
}
|
||||||
|
|
||||||
// buffer update
|
// buffer update
|
||||||
@@ -2593,13 +2555,12 @@ static void imdct_and_windowing_eld(AACContext *ac, SingleChannelElement *sce)
|
|||||||
float *in = sce->coeffs;
|
float *in = sce->coeffs;
|
||||||
float *out = sce->ret;
|
float *out = sce->ret;
|
||||||
float *saved = sce->saved;
|
float *saved = sce->saved;
|
||||||
|
const float *const window = ff_aac_eld_window;
|
||||||
float *buf = ac->buf_mdct;
|
float *buf = ac->buf_mdct;
|
||||||
int i;
|
int i;
|
||||||
const int n = ac->oc[1].m4ac.frame_length_short ? 480 : 512;
|
const int n = 512;
|
||||||
const int n2 = n >> 1;
|
const int n2 = n >> 1;
|
||||||
const int n4 = n >> 2;
|
const int n4 = n >> 2;
|
||||||
const float *const window = n == 480 ? ff_aac_eld_window_480 :
|
|
||||||
ff_aac_eld_window_512;
|
|
||||||
|
|
||||||
// Inverse transform, mapped to the conventional IMDCT by
|
// Inverse transform, mapped to the conventional IMDCT by
|
||||||
// Chivukula, R.K.; Reznik, Y.A.; Devarajan, V.,
|
// Chivukula, R.K.; Reznik, Y.A.; Devarajan, V.,
|
||||||
@@ -2611,10 +2572,7 @@ static void imdct_and_windowing_eld(AACContext *ac, SingleChannelElement *sce)
|
|||||||
temp = in[i ]; in[i ] = -in[n - 1 - i]; in[n - 1 - i] = temp;
|
temp = in[i ]; in[i ] = -in[n - 1 - i]; in[n - 1 - i] = temp;
|
||||||
temp = -in[i + 1]; in[i + 1] = in[n - 2 - i]; in[n - 2 - i] = temp;
|
temp = -in[i + 1]; in[i + 1] = in[n - 2 - i]; in[n - 2 - i] = temp;
|
||||||
}
|
}
|
||||||
if (n == 480)
|
ac->mdct.imdct_half(&ac->mdct_ld, buf, in);
|
||||||
ac->mdct480->imdct_half(ac->mdct480, buf, in, 1, -1.f/(16*1024*960));
|
|
||||||
else
|
|
||||||
ac->mdct.imdct_half(&ac->mdct_ld, buf, in);
|
|
||||||
for (i = 0; i < n; i+=2) {
|
for (i = 0; i < n; i+=2) {
|
||||||
buf[i] = -buf[i];
|
buf[i] = -buf[i];
|
||||||
}
|
}
|
||||||
@@ -2758,7 +2716,7 @@ static void spectral_to_sample(AACContext *ac)
|
|||||||
for (type = 3; type >= 0; type--) {
|
for (type = 3; type >= 0; type--) {
|
||||||
for (i = 0; i < MAX_ELEM_ID; i++) {
|
for (i = 0; i < MAX_ELEM_ID; i++) {
|
||||||
ChannelElement *che = ac->che[type][i];
|
ChannelElement *che = ac->che[type][i];
|
||||||
if (che && che->present) {
|
if (che) {
|
||||||
if (type <= TYPE_CPE)
|
if (type <= TYPE_CPE)
|
||||||
apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
|
apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
|
||||||
if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
|
if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
|
||||||
@@ -2790,9 +2748,6 @@ static void spectral_to_sample(AACContext *ac)
|
|||||||
}
|
}
|
||||||
if (type <= TYPE_CCE)
|
if (type <= TYPE_CCE)
|
||||||
apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
|
apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
|
||||||
che->present = 0;
|
|
||||||
} else if (che) {
|
|
||||||
av_log(ac->avctx, AV_LOG_VERBOSE, "ChannelElement %d.%d missing \n", type, i);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2847,7 +2802,6 @@ static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
|
|||||||
ac->oc[1].m4ac.sample_rate = hdr_info.sample_rate;
|
ac->oc[1].m4ac.sample_rate = hdr_info.sample_rate;
|
||||||
ac->oc[1].m4ac.sampling_index = hdr_info.sampling_index;
|
ac->oc[1].m4ac.sampling_index = hdr_info.sampling_index;
|
||||||
ac->oc[1].m4ac.object_type = hdr_info.object_type;
|
ac->oc[1].m4ac.object_type = hdr_info.object_type;
|
||||||
ac->oc[1].m4ac.frame_length_short = 0;
|
|
||||||
if (ac->oc[0].status != OC_LOCKED ||
|
if (ac->oc[0].status != OC_LOCKED ||
|
||||||
ac->oc[0].m4ac.chan_config != hdr_info.chan_config ||
|
ac->oc[0].m4ac.chan_config != hdr_info.chan_config ||
|
||||||
ac->oc[0].m4ac.sample_rate != hdr_info.sample_rate) {
|
ac->oc[0].m4ac.sample_rate != hdr_info.sample_rate) {
|
||||||
@@ -2864,12 +2818,11 @@ static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
|
|||||||
int *got_frame_ptr, GetBitContext *gb)
|
int *got_frame_ptr, GetBitContext *gb)
|
||||||
{
|
{
|
||||||
AACContext *ac = avctx->priv_data;
|
AACContext *ac = avctx->priv_data;
|
||||||
const MPEG4AudioConfig *const m4ac = &ac->oc[1].m4ac;
|
|
||||||
ChannelElement *che;
|
ChannelElement *che;
|
||||||
int err, i;
|
int err, i;
|
||||||
int samples = m4ac->frame_length_short ? 960 : 1024;
|
int samples = 1024;
|
||||||
int chan_config = m4ac->chan_config;
|
int chan_config = ac->oc[1].m4ac.chan_config;
|
||||||
int aot = m4ac->object_type;
|
int aot = ac->oc[1].m4ac.object_type;
|
||||||
|
|
||||||
if (aot == AOT_ER_AAC_LD || aot == AOT_ER_AAC_ELD)
|
if (aot == AOT_ER_AAC_LD || aot == AOT_ER_AAC_ELD)
|
||||||
samples >>= 1;
|
samples >>= 1;
|
||||||
@@ -2881,13 +2834,13 @@ static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
// The FF_PROFILE_AAC_* defines are all object_type - 1
|
// The FF_PROFILE_AAC_* defines are all object_type - 1
|
||||||
// This may lead to an undefined profile being signaled
|
// This may lead to an undefined profile being signaled
|
||||||
ac->avctx->profile = aot - 1;
|
ac->avctx->profile = ac->oc[1].m4ac.object_type - 1;
|
||||||
|
|
||||||
ac->tags_mapped = 0;
|
ac->tags_mapped = 0;
|
||||||
|
|
||||||
if (chan_config < 0 || chan_config >= 8) {
|
if (chan_config < 0 || chan_config >= 8) {
|
||||||
avpriv_request_sample(avctx, "Unknown ER channel configuration %d",
|
avpriv_request_sample(avctx, "Unknown ER channel configuration %d",
|
||||||
chan_config);
|
ac->oc[1].m4ac.chan_config);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
for (i = 0; i < tags_per_config[chan_config]; i++) {
|
for (i = 0; i < tags_per_config[chan_config]; i++) {
|
||||||
@@ -2899,7 +2852,6 @@ static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
|
|||||||
elem_type, elem_id);
|
elem_type, elem_id);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
che->present = 1;
|
|
||||||
if (aot != AOT_ER_AAC_ELD)
|
if (aot != AOT_ER_AAC_ELD)
|
||||||
skip_bits(gb, 4);
|
skip_bits(gb, 4);
|
||||||
switch (elem_type) {
|
switch (elem_type) {
|
||||||
@@ -2963,9 +2915,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
|||||||
while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
|
while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
|
||||||
elem_id = get_bits(gb, 4);
|
elem_id = get_bits(gb, 4);
|
||||||
|
|
||||||
if (avctx->debug & FF_DEBUG_STARTCODE)
|
|
||||||
av_log(avctx, AV_LOG_DEBUG, "Elem type:%x id:%x\n", elem_type, elem_id);
|
|
||||||
|
|
||||||
if (elem_type < TYPE_DSE) {
|
if (elem_type < TYPE_DSE) {
|
||||||
if (!(che=get_che(ac, elem_type, elem_id))) {
|
if (!(che=get_che(ac, elem_type, elem_id))) {
|
||||||
av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
|
av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
|
||||||
@@ -2974,7 +2923,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
samples = 1024;
|
samples = 1024;
|
||||||
che->present = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (elem_type) {
|
switch (elem_type) {
|
||||||
@@ -3183,8 +3131,6 @@ static av_cold int aac_decode_close(AVCodecContext *avctx)
|
|||||||
ff_mdct_end(&ac->mdct_small);
|
ff_mdct_end(&ac->mdct_small);
|
||||||
ff_mdct_end(&ac->mdct_ld);
|
ff_mdct_end(&ac->mdct_ld);
|
||||||
ff_mdct_end(&ac->mdct_ltp);
|
ff_mdct_end(&ac->mdct_ltp);
|
||||||
ff_imdct15_uninit(&ac->mdct480);
|
|
||||||
av_freep(&ac->fdsp);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3442,17 +3388,7 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (latmctx->aac_ctx.oc[1].m4ac.object_type) {
|
if ((err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb, avpkt)) < 0)
|
||||||
case AOT_ER_AAC_LC:
|
|
||||||
case AOT_ER_AAC_LTP:
|
|
||||||
case AOT_ER_AAC_LD:
|
|
||||||
case AOT_ER_AAC_ELD:
|
|
||||||
err = aac_decode_er_frame(avctx, out, got_frame_ptr, &gb);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
err = aac_decode_frame_int(avctx, out, got_frame_ptr, &gb, avpkt);
|
|
||||||
}
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
return muxlength;
|
return muxlength;
|
||||||
@@ -3504,18 +3440,6 @@ static const AVClass aac_decoder_class = {
|
|||||||
.version = LIBAVUTIL_VERSION_INT,
|
.version = LIBAVUTIL_VERSION_INT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const AVProfile profiles[] = {
|
|
||||||
{ FF_PROFILE_AAC_MAIN, "Main" },
|
|
||||||
{ FF_PROFILE_AAC_LOW, "LC" },
|
|
||||||
{ FF_PROFILE_AAC_SSR, "SSR" },
|
|
||||||
{ FF_PROFILE_AAC_LTP, "LTP" },
|
|
||||||
{ FF_PROFILE_AAC_HE, "HE-AAC" },
|
|
||||||
{ FF_PROFILE_AAC_HE_V2, "HE-AACv2" },
|
|
||||||
{ FF_PROFILE_AAC_LD, "LD" },
|
|
||||||
{ FF_PROFILE_AAC_ELD, "ELD" },
|
|
||||||
{ FF_PROFILE_UNKNOWN },
|
|
||||||
};
|
|
||||||
|
|
||||||
AVCodec ff_aac_decoder = {
|
AVCodec ff_aac_decoder = {
|
||||||
.name = "aac",
|
.name = "aac",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
||||||
@@ -3532,7 +3456,6 @@ AVCodec ff_aac_decoder = {
|
|||||||
.channel_layouts = aac_channel_layout,
|
.channel_layouts = aac_channel_layout,
|
||||||
.flush = flush,
|
.flush = flush,
|
||||||
.priv_class = &aac_decoder_class,
|
.priv_class = &aac_decoder_class,
|
||||||
.profiles = profiles,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -3555,5 +3478,4 @@ AVCodec ff_aac_latm_decoder = {
|
|||||||
.capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
|
.capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
|
||||||
.channel_layouts = aac_channel_layout,
|
.channel_layouts = aac_channel_layout,
|
||||||
.flush = flush,
|
.flush = flush,
|
||||||
.profiles = profiles,
|
|
||||||
};
|
};
|
||||||
|
@@ -53,11 +53,6 @@
|
|||||||
return AVERROR(EINVAL); \
|
return AVERROR(EINVAL); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define WARN_IF(cond, ...) \
|
|
||||||
if (cond) { \
|
|
||||||
av_log(avctx, AV_LOG_WARNING, __VA_ARGS__); \
|
|
||||||
}
|
|
||||||
|
|
||||||
float ff_aac_pow34sf_tab[428];
|
float ff_aac_pow34sf_tab[428];
|
||||||
|
|
||||||
static const uint8_t swb_size_1024_96[] = {
|
static const uint8_t swb_size_1024_96[] = {
|
||||||
@@ -107,8 +102,7 @@ static const uint8_t *swb_size_1024[] = {
|
|||||||
swb_size_1024_96, swb_size_1024_96, swb_size_1024_64,
|
swb_size_1024_96, swb_size_1024_96, swb_size_1024_64,
|
||||||
swb_size_1024_48, swb_size_1024_48, swb_size_1024_32,
|
swb_size_1024_48, swb_size_1024_48, swb_size_1024_32,
|
||||||
swb_size_1024_24, swb_size_1024_24, swb_size_1024_16,
|
swb_size_1024_24, swb_size_1024_24, swb_size_1024_16,
|
||||||
swb_size_1024_16, swb_size_1024_16, swb_size_1024_8,
|
swb_size_1024_16, swb_size_1024_16, swb_size_1024_8
|
||||||
swb_size_1024_8
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint8_t swb_size_128_96[] = {
|
static const uint8_t swb_size_128_96[] = {
|
||||||
@@ -137,8 +131,7 @@ static const uint8_t *swb_size_128[] = {
|
|||||||
swb_size_128_96, swb_size_128_96, swb_size_128_96,
|
swb_size_128_96, swb_size_128_96, swb_size_128_96,
|
||||||
swb_size_128_48, swb_size_128_48, swb_size_128_48,
|
swb_size_128_48, swb_size_128_48, swb_size_128_48,
|
||||||
swb_size_128_24, swb_size_128_24, swb_size_128_16,
|
swb_size_128_24, swb_size_128_24, swb_size_128_16,
|
||||||
swb_size_128_16, swb_size_128_16, swb_size_128_8,
|
swb_size_128_16, swb_size_128_16, swb_size_128_8
|
||||||
swb_size_128_8
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/** default channel configurations */
|
/** default channel configurations */
|
||||||
@@ -172,7 +165,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
|||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
AACEncContext *s = avctx->priv_data;
|
AACEncContext *s = avctx->priv_data;
|
||||||
|
|
||||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
|
||||||
put_bits(&pb, 5, 2); //object type - AAC-LC
|
put_bits(&pb, 5, 2); //object type - AAC-LC
|
||||||
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
||||||
put_bits(&pb, 4, s->channels);
|
put_bits(&pb, 4, s->channels);
|
||||||
@@ -259,7 +252,7 @@ static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
|
|||||||
int i;
|
int i;
|
||||||
float *output = sce->ret_buf;
|
float *output = sce->ret_buf;
|
||||||
|
|
||||||
apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, audio);
|
apply_window[sce->ics.window_sequence[0]](&s->fdsp, sce, audio);
|
||||||
|
|
||||||
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE)
|
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE)
|
||||||
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
|
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
|
||||||
@@ -267,7 +260,6 @@ static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
|
|||||||
for (i = 0; i < 1024; i += 128)
|
for (i = 0; i < 1024; i += 128)
|
||||||
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + i, output + i*2);
|
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + i, output + i*2);
|
||||||
memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024);
|
memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024);
|
||||||
memcpy(sce->pcoeffs, sce->coeffs, sizeof(sce->pcoeffs));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -319,23 +311,20 @@ static void adjust_frame_information(ChannelElement *cpe, int chans)
|
|||||||
start = 0;
|
start = 0;
|
||||||
maxsfb = 0;
|
maxsfb = 0;
|
||||||
cpe->ch[ch].pulse.num_pulse = 0;
|
cpe->ch[ch].pulse.num_pulse = 0;
|
||||||
for (w = 0; w < ics->num_windows; w += ics->group_len[w]) {
|
for (w = 0; w < ics->num_windows*16; w += 16) {
|
||||||
for (w2 = 0; w2 < ics->group_len[w]; w2++) {
|
for (g = 0; g < ics->num_swb; g++) {
|
||||||
start = (w+w2) * 128;
|
//apply M/S
|
||||||
for (g = 0; g < ics->num_swb; g++) {
|
if (cpe->common_window && !ch && cpe->ms_mask[w + g]) {
|
||||||
//apply M/S
|
for (i = 0; i < ics->swb_sizes[g]; i++) {
|
||||||
if (cpe->common_window && !ch && cpe->ms_mask[w*16 + g]) {
|
cpe->ch[0].coeffs[start+i] = (cpe->ch[0].coeffs[start+i] + cpe->ch[1].coeffs[start+i]) / 2.0;
|
||||||
for (i = 0; i < ics->swb_sizes[g]; i++) {
|
cpe->ch[1].coeffs[start+i] = cpe->ch[0].coeffs[start+i] - cpe->ch[1].coeffs[start+i];
|
||||||
cpe->ch[0].coeffs[start+i] = (cpe->ch[0].pcoeffs[start+i] + cpe->ch[1].pcoeffs[start+i]) * 0.5f;
|
|
||||||
cpe->ch[1].coeffs[start+i] = cpe->ch[0].coeffs[start+i] - cpe->ch[1].pcoeffs[start+i];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
start += ics->swb_sizes[g];
|
|
||||||
}
|
}
|
||||||
for (cmaxsfb = ics->num_swb; cmaxsfb > 0 && cpe->ch[ch].zeroes[w*16+cmaxsfb-1]; cmaxsfb--)
|
start += ics->swb_sizes[g];
|
||||||
;
|
|
||||||
maxsfb = FFMAX(maxsfb, cmaxsfb);
|
|
||||||
}
|
}
|
||||||
|
for (cmaxsfb = ics->num_swb; cmaxsfb > 0 && cpe->ch[ch].zeroes[w+cmaxsfb-1]; cmaxsfb--)
|
||||||
|
;
|
||||||
|
maxsfb = FFMAX(maxsfb, cmaxsfb);
|
||||||
}
|
}
|
||||||
ics->max_sfb = maxsfb;
|
ics->max_sfb = maxsfb;
|
||||||
|
|
||||||
@@ -518,7 +507,7 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
AACEncContext *s = avctx->priv_data;
|
AACEncContext *s = avctx->priv_data;
|
||||||
float **samples = s->planar_samples, *samples2, *la, *overlap;
|
float **samples = s->planar_samples, *samples2, *la, *overlap;
|
||||||
ChannelElement *cpe;
|
ChannelElement *cpe;
|
||||||
int i, ch, w, g, chans, tag, start_ch, ret, ms_mode = 0;
|
int i, ch, w, g, chans, tag, start_ch, ret;
|
||||||
int chan_el_counter[4];
|
int chan_el_counter[4];
|
||||||
FFPsyWindowInfo windows[AAC_MAX_CHANNELS];
|
FFPsyWindowInfo windows[AAC_MAX_CHANNELS];
|
||||||
|
|
||||||
@@ -578,10 +567,6 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
ics->group_len[w] = wi[ch].grouping[w];
|
ics->group_len[w] = wi[ch].grouping[w];
|
||||||
|
|
||||||
apply_window_and_mdct(s, &cpe->ch[ch], overlap);
|
apply_window_and_mdct(s, &cpe->ch[ch], overlap);
|
||||||
if (isnan(cpe->ch->coeffs[0])) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Input contains NaN\n");
|
|
||||||
return AVERROR(EINVAL);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
start_ch += chans;
|
start_ch += chans;
|
||||||
}
|
}
|
||||||
@@ -641,7 +626,6 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
if (cpe->common_window) {
|
if (cpe->common_window) {
|
||||||
put_ics_info(s, &cpe->ch[0].ics);
|
put_ics_info(s, &cpe->ch[0].ics);
|
||||||
encode_ms_info(&s->pb, cpe);
|
encode_ms_info(&s->pb, cpe);
|
||||||
if (cpe->ms_mode) ms_mode = 1;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (ch = 0; ch < chans; ch++) {
|
for (ch = 0; ch < chans; ch++) {
|
||||||
@@ -656,15 +640,6 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
s->psy.bitres.bits = frame_bits / s->channels;
|
s->psy.bitres.bits = frame_bits / s->channels;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ms_mode) {
|
|
||||||
for (i = 0; i < s->chan_map[0]; i++) {
|
|
||||||
// Must restore coeffs
|
|
||||||
chans = tag == TYPE_CPE ? 2 : 1;
|
|
||||||
cpe = &s->cpe[i];
|
|
||||||
for (ch = 0; ch < chans; ch++)
|
|
||||||
memcpy(cpe->ch[ch].coeffs, cpe->ch[ch].pcoeffs, sizeof(cpe->ch[ch].coeffs));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s->lambda *= avctx->bit_rate * 1024.0f / avctx->sample_rate / frame_bits;
|
s->lambda *= avctx->bit_rate * 1024.0f / avctx->sample_rate / frame_bits;
|
||||||
|
|
||||||
@@ -703,7 +678,6 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
|
|||||||
ff_psy_preprocess_end(s->psypp);
|
ff_psy_preprocess_end(s->psypp);
|
||||||
av_freep(&s->buffer.samples);
|
av_freep(&s->buffer.samples);
|
||||||
av_freep(&s->cpe);
|
av_freep(&s->cpe);
|
||||||
av_freep(&s->fdsp);
|
|
||||||
ff_af_queue_close(&s->afq);
|
ff_af_queue_close(&s->afq);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -712,9 +686,7 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
|
|||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
|
avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||||
if (!s->fdsp)
|
|
||||||
return AVERROR(ENOMEM);
|
|
||||||
|
|
||||||
// window init
|
// window init
|
||||||
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
|
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
|
||||||
@@ -761,29 +733,23 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->channels = avctx->channels;
|
s->channels = avctx->channels;
|
||||||
|
|
||||||
ERROR_IF(i == 16
|
ERROR_IF(i == 16,
|
||||||
|| i >= (sizeof(swb_size_1024) / sizeof(*swb_size_1024))
|
|
||||||
|| i >= (sizeof(swb_size_128) / sizeof(*swb_size_128)),
|
|
||||||
"Unsupported sample rate %d\n", avctx->sample_rate);
|
"Unsupported sample rate %d\n", avctx->sample_rate);
|
||||||
ERROR_IF(s->channels > AAC_MAX_CHANNELS,
|
ERROR_IF(s->channels > AAC_MAX_CHANNELS,
|
||||||
"Unsupported number of channels: %d\n", s->channels);
|
"Unsupported number of channels: %d\n", s->channels);
|
||||||
ERROR_IF(avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW,
|
ERROR_IF(avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW,
|
||||||
"Unsupported profile %d\n", avctx->profile);
|
"Unsupported profile %d\n", avctx->profile);
|
||||||
WARN_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels,
|
ERROR_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels,
|
||||||
"Too many bits per frame requested, clamping to max\n");
|
"Too many bits per frame requested\n");
|
||||||
|
|
||||||
avctx->bit_rate = (int)FFMIN(
|
|
||||||
6144 * s->channels / 1024.0 * avctx->sample_rate,
|
|
||||||
avctx->bit_rate);
|
|
||||||
|
|
||||||
s->samplerate_index = i;
|
s->samplerate_index = i;
|
||||||
|
|
||||||
s->chan_map = aac_chan_configs[s->channels-1];
|
s->chan_map = aac_chan_configs[s->channels-1];
|
||||||
|
|
||||||
if ((ret = dsp_init(avctx, s)) < 0)
|
if (ret = dsp_init(avctx, s))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if ((ret = alloc_buffers(avctx, s)) < 0)
|
if (ret = alloc_buffers(avctx, s))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
avctx->extradata_size = 5;
|
avctx->extradata_size = 5;
|
||||||
@@ -795,8 +761,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
|||||||
lengths[1] = ff_aac_num_swb_128[i];
|
lengths[1] = ff_aac_num_swb_128[i];
|
||||||
for (i = 0; i < s->chan_map[0]; i++)
|
for (i = 0; i < s->chan_map[0]; i++)
|
||||||
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
|
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
|
||||||
if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths,
|
if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping))
|
||||||
s->chan_map[0], grouping)) < 0)
|
|
||||||
goto fail;
|
goto fail;
|
||||||
s->psypp = ff_psy_preprocess_init(avctx);
|
s->psypp = ff_psy_preprocess_init(avctx);
|
||||||
s->coder = &ff_aac_coders[s->options.aac_coder];
|
s->coder = &ff_aac_coders[s->options.aac_coder];
|
||||||
@@ -811,7 +776,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
|||||||
for (i = 0; i < 428; i++)
|
for (i = 0; i < 428; i++)
|
||||||
ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i]));
|
ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i]));
|
||||||
|
|
||||||
avctx->initial_padding = 1024;
|
avctx->delay = 1024;
|
||||||
ff_af_queue_init(avctx, &s->afq);
|
ff_af_queue_init(avctx, &s->afq);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -67,7 +67,7 @@ typedef struct AACEncContext {
|
|||||||
PutBitContext pb;
|
PutBitContext pb;
|
||||||
FFTContext mdct1024; ///< long (1024 samples) frame transform context
|
FFTContext mdct1024; ///< long (1024 samples) frame transform context
|
||||||
FFTContext mdct128; ///< short (128 samples) frame transform context
|
FFTContext mdct128; ///< short (128 samples) frame transform context
|
||||||
AVFloatDSPContext *fdsp;
|
AVFloatDSPContext fdsp;
|
||||||
float *planar_samples[6]; ///< saved preprocessed input
|
float *planar_samples[6]; ///< saved preprocessed input
|
||||||
|
|
||||||
int samplerate_index; ///< MPEG-4 samplerate index
|
int samplerate_index; ///< MPEG-4 samplerate index
|
||||||
|
@@ -908,8 +908,8 @@ static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2
|
|||||||
|
|
||||||
int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float R[2][38][64], int top)
|
int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float R[2][38][64], int top)
|
||||||
{
|
{
|
||||||
float (*Lbuf)[32][2] = ps->Lbuf;
|
LOCAL_ALIGNED_16(float, Lbuf, [91], [32][2]);
|
||||||
float (*Rbuf)[32][2] = ps->Rbuf;
|
LOCAL_ALIGNED_16(float, Rbuf, [91], [32][2]);
|
||||||
const int len = 32;
|
const int len = 32;
|
||||||
int is34 = ps->is34bands;
|
int is34 = ps->is34bands;
|
||||||
|
|
||||||
|
@@ -71,8 +71,6 @@ typedef struct PSContext {
|
|||||||
DECLARE_ALIGNED(16, float, H12)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
DECLARE_ALIGNED(16, float, H12)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||||
DECLARE_ALIGNED(16, float, H21)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
DECLARE_ALIGNED(16, float, H21)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||||
DECLARE_ALIGNED(16, float, H22)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
DECLARE_ALIGNED(16, float, H22)[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
|
||||||
DECLARE_ALIGNED(16, float, Lbuf)[91][32][2];
|
|
||||||
DECLARE_ALIGNED(16, float, Rbuf)[91][32][2];
|
|
||||||
int8_t opd_hist[PS_MAX_NR_IIDICC];
|
int8_t opd_hist[PS_MAX_NR_IIDICC];
|
||||||
int8_t ipd_hist[PS_MAX_NR_IIDICC];
|
int8_t ipd_hist[PS_MAX_NR_IIDICC];
|
||||||
PSDSPContext dsp;
|
PSDSPContext dsp;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user