Compare commits
214 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d0599a3516 | ||
![]() |
742d7e9a6e | ||
![]() |
eb6f2a183a | ||
![]() |
1e86b7108e | ||
![]() |
4be1cc7b1d | ||
![]() |
61dbd3f3d0 | ||
![]() |
38d6ff31b7 | ||
![]() |
b0cd6fb590 | ||
![]() |
50fd06ea32 | ||
![]() |
808d5444c4 | ||
![]() |
6915dd49c7 | ||
![]() |
c657b08fd7 | ||
![]() |
6d14bea8b5 | ||
![]() |
749cd89ca9 | ||
![]() |
2b408d257f | ||
![]() |
8d853dc341 | ||
![]() |
86960b1101 | ||
![]() |
94354e368d | ||
![]() |
93a0682b1d | ||
![]() |
0e16c3843a | ||
![]() |
819955f0c6 | ||
![]() |
b36bda3c82 | ||
![]() |
25b8d52fdd | ||
![]() |
1a2aaa7497 | ||
![]() |
f18fc45d18 | ||
![]() |
2684ff3573 | ||
![]() |
ea0f616a57 | ||
![]() |
51b911e948 | ||
![]() |
bb508ddb8b | ||
![]() |
9246eb1ec5 | ||
![]() |
c5b2ef3bdf | ||
![]() |
07df052d8d | ||
![]() |
9bb7e2bd90 | ||
![]() |
90fa2460c0 | ||
![]() |
50f5037947 | ||
![]() |
21533730fc | ||
![]() |
032476f830 | ||
![]() |
57c7922331 | ||
![]() |
73dd8f0a24 | ||
![]() |
55637b2e5e | ||
![]() |
e6bc1fe10c | ||
![]() |
e6b18f5700 | ||
![]() |
3791436eb5 | ||
![]() |
61147f58ab | ||
![]() |
29e435ca33 | ||
![]() |
0540d5c5fc | ||
![]() |
7f97231d97 | ||
![]() |
4005a71def | ||
![]() |
429347afa7 | ||
![]() |
a81b6a662a | ||
![]() |
6168fe32f1 | ||
![]() |
711374b626 | ||
![]() |
9a63a36dc6 | ||
![]() |
db1a99a209 | ||
![]() |
fe8c81a0f3 | ||
![]() |
728051d9b1 | ||
![]() |
99d58a0da4 | ||
![]() |
9783f9fb98 | ||
![]() |
2ed0a77b7b | ||
![]() |
804e1e1610 | ||
![]() |
99d2d1404c | ||
![]() |
cb1c9294f3 | ||
![]() |
84341627d7 | ||
![]() |
4f694182e0 | ||
![]() |
a2cfb784fb | ||
![]() |
6faf18acbd | ||
![]() |
96807933d8 | ||
![]() |
4ef32aa2a6 | ||
![]() |
727730e279 | ||
![]() |
5a829ee69e | ||
![]() |
7fe22c3fe6 | ||
![]() |
c7565b143c | ||
![]() |
303ecfc373 | ||
![]() |
1456ed2dd5 | ||
![]() |
79c9d9b134 | ||
![]() |
d0aa3d13fa | ||
![]() |
e5cc73e0a5 | ||
![]() |
3429714f3d | ||
![]() |
ee902d3d2d | ||
![]() |
9bc62da980 | ||
![]() |
2a6d16ba5f | ||
![]() |
c7e967a7cb | ||
![]() |
5262c88bb0 | ||
![]() |
bd78b9416d | ||
![]() |
48ae72e501 | ||
![]() |
0f671dfeac | ||
![]() |
345962121d | ||
![]() |
0cbf53bdf5 | ||
![]() |
b20409c690 | ||
![]() |
7997ec54c9 | ||
![]() |
e805826903 | ||
![]() |
137a000377 | ||
![]() |
78c314e39e | ||
![]() |
756d85dc14 | ||
![]() |
eeab3e1b20 | ||
![]() |
a75787a71a | ||
![]() |
09425294c9 | ||
![]() |
3572eaaf02 | ||
![]() |
25da8d84a4 | ||
![]() |
0f642909d8 | ||
![]() |
6252e9141b | ||
![]() |
c65a731b6f | ||
![]() |
58096b70fa | ||
![]() |
0ae93844d0 | ||
![]() |
657dc91b44 | ||
![]() |
352d17086f | ||
![]() |
193440f566 | ||
![]() |
563e542b31 | ||
![]() |
b0b6d8de7e | ||
![]() |
2f5c5767d1 | ||
![]() |
979a54ed18 | ||
![]() |
d11bca8043 | ||
![]() |
a3ef410b9c | ||
![]() |
260e5c6dbe | ||
![]() |
bac6554c74 | ||
![]() |
1e366c15ed | ||
![]() |
675fb3a8af | ||
![]() |
25fc0faccb | ||
![]() |
164083434e | ||
![]() |
8026606497 | ||
![]() |
6ac8ac0109 | ||
![]() |
f07e2ff697 | ||
![]() |
b62b3e1a25 | ||
![]() |
a45b8af839 | ||
![]() |
a443b48ccf | ||
![]() |
a45402d4c0 | ||
![]() |
0bdc64e8b9 | ||
![]() |
5fbf63ea39 | ||
![]() |
4d74bb24e3 | ||
![]() |
7f8d0cf93a | ||
![]() |
28fba55306 | ||
![]() |
7caee17204 | ||
![]() |
edec2a4da3 | ||
![]() |
65074a5dae | ||
![]() |
e3b6144e0c | ||
![]() |
8acbba0ec3 | ||
![]() |
9e9cde6afa | ||
![]() |
4f8814964c | ||
![]() |
8a6770a214 | ||
![]() |
763e6ecf83 | ||
![]() |
07d508e4f5 | ||
![]() |
b089b58250 | ||
![]() |
a7dd37169c | ||
![]() |
3032291b3a | ||
![]() |
f9bbc26e69 | ||
![]() |
b895e29941 | ||
![]() |
3d71024f8a | ||
![]() |
b834dc14da | ||
![]() |
d0041dc8c4 | ||
![]() |
b3c082412c | ||
![]() |
918ed73b70 | ||
![]() |
2791eba1d7 | ||
![]() |
0d3a07852c | ||
![]() |
c7a2ac6b6b | ||
![]() |
0ce35b8ce8 | ||
![]() |
25312a427b | ||
![]() |
9143ab0e5a | ||
![]() |
022bfd3dd4 | ||
![]() |
e0a12b3dc3 | ||
![]() |
252ba4a925 | ||
![]() |
5bb31e856d | ||
![]() |
eac0451e47 | ||
![]() |
68c6347089 | ||
![]() |
bcd7f35717 | ||
![]() |
93cbdcd4d3 | ||
![]() |
959ab06c68 | ||
![]() |
46db3121c6 | ||
![]() |
c96c755320 | ||
![]() |
9f8cdd520b | ||
![]() |
f5631d23e0 | ||
![]() |
50f4543c6b | ||
![]() |
1344e91f33 | ||
![]() |
f13e6ec7a6 | ||
![]() |
bf2c9e1ad4 | ||
![]() |
0663aab1d9 | ||
![]() |
e911f125fc | ||
![]() |
5aead5ee05 | ||
![]() |
3a5b749d7c | ||
![]() |
4b4d0b0290 | ||
![]() |
c9b25252cb | ||
![]() |
4400385d5f | ||
![]() |
d85e25fe0b | ||
![]() |
3d1972d182 | ||
![]() |
71b1abe638 | ||
![]() |
991ef3a67e | ||
![]() |
b850b01533 | ||
![]() |
23a17b4a3d | ||
![]() |
3a8ad4b878 | ||
![]() |
b85a939633 | ||
![]() |
7e130ca5b4 | ||
![]() |
f295f9488a | ||
![]() |
2c01dd2ea5 | ||
![]() |
5e26152ee4 | ||
![]() |
135c733125 | ||
![]() |
3b3a3c3d44 | ||
![]() |
13244abcd3 | ||
![]() |
9783d5bfda | ||
![]() |
0e4efad93c | ||
![]() |
0103bc67fd | ||
![]() |
d5af3fb1c5 | ||
![]() |
6ec5a199ea | ||
![]() |
f7b5366657 | ||
![]() |
542332e523 | ||
![]() |
aa24dd487f | ||
![]() |
3bcd1daad7 | ||
![]() |
91f88eab32 | ||
![]() |
aac467ae17 | ||
![]() |
968e733b2e | ||
![]() |
192d46e6d1 | ||
![]() |
bb8614cb7b | ||
![]() |
d9bdf7d9ae | ||
![]() |
a588e1e560 | ||
![]() |
36e7385d0e | ||
![]() |
5108323aa9 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -28,7 +28,6 @@
|
||||
/ffserver
|
||||
/config.*
|
||||
/coverage.info
|
||||
/avversion.h
|
||||
/doc/*.1
|
||||
/doc/*.3
|
||||
/doc/*.html
|
||||
@@ -37,7 +36,6 @@
|
||||
/doc/avoptions_codec.texi
|
||||
/doc/avoptions_format.texi
|
||||
/doc/doxy/html/
|
||||
/doc/examples/avio_dir_cmd
|
||||
/doc/examples/avio_reading
|
||||
/doc/examples/decoding_encoding
|
||||
/doc/examples/demuxing_decoding
|
||||
@@ -63,7 +61,6 @@
|
||||
/libavutil/ffversion.h
|
||||
/tests/audiogen
|
||||
/tests/base64
|
||||
/tests/checkasm/checkasm
|
||||
/tests/data/
|
||||
/tests/pixfmts.mak
|
||||
/tests/rotozoom
|
||||
@@ -86,7 +83,6 @@
|
||||
/tools/pktdumper
|
||||
/tools/probetest
|
||||
/tools/qt-faststart
|
||||
/tools/sidxindex
|
||||
/tools/trasher
|
||||
/tools/seek_print
|
||||
/tools/uncoded_frame
|
||||
|
295
Changelog
295
Changelog
@@ -1,154 +1,157 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.8.1:
|
||||
- swscale: fix ticket #4881
|
||||
- doc: fix spelling errors
|
||||
- hls: only seek if there is an offset
|
||||
- asfdec: add more checks for size left in asf packet buffer
|
||||
- asfdec: alloc enough space for storing name in asf_read_metadata_obj
|
||||
- avcodec/pngdec: Check blend_op.
|
||||
- h264_mp4toannexb: fix pps offfset fault when there are more than one sps in avcc
|
||||
- avcodec/h264_mp4toannexb_bsf: Use av_freep() to free spspps_buf
|
||||
- avformat/avidec: Workaround broken initial frame
|
||||
- avformat/hls: fix some cases of HLS streams which require cookies
|
||||
- avcodec/pngdec: reset has_trns after every decode_frame_png()
|
||||
- lavf/img2dec: Fix memory leak
|
||||
- avcodec/mp3: fix skipping zeros
|
||||
- avformat/srtdec: make sure we probe a number
|
||||
- configure: check for ID3D11VideoContext
|
||||
- avformat/vobsub: compare correct packet stream IDs
|
||||
- avformat/srtdec: more lenient first line probing
|
||||
- avformat/srtdec: fix number check for the first character
|
||||
- avcodec/mips: build fix for MSA 64bit
|
||||
- avcodec/mips: build fix for MSA
|
||||
- avformat/httpauth: Add space after commas in HTTP/RTSP auth header
|
||||
- libavformat/hlsenc: Use of uninitialized memory unlinking old files
|
||||
- avcodec/x86/sbrdsp: Fix using uninitialized upper 32bit of noise
|
||||
- avcodec/ffv1dec: Fix off by 1 error in quant_table_count check
|
||||
- avcodec/ffv1dec: Explicitly check read_quant_table() return value
|
||||
- dnxhddata: correct weight tables
|
||||
- dnxhddec: decode and use interlace mb flag
|
||||
- swscale: fix ticket #4877
|
||||
- avcodec/rangecoder: Check e
|
||||
- avcodec/ffv1: seperate slice_count from max_slice_count
|
||||
- swscale: fix ticket 4850
|
||||
- cmdutils: Filter dst/srcw/h
|
||||
- avutil/log: fix zero length gnu_printf format string warning
|
||||
- lavf/webvttenc: Require webvtt file to contain exactly one WebVTT stream.
|
||||
- swscale/swscale: Fix "unused variable" warning
|
||||
- avcodec/mjpegdec: Fix decoding RGBA RCT LJPEG
|
||||
- MAINTAINERS: add 2.8, drop 2.2
|
||||
- doc: mention libavcodec can decode Opus natively
|
||||
- hevc: properly handle no_rasl_output_flag when removing pictures from the DPB
|
||||
- avfilter/af_ladspa: process all channels for nb_handles > 1
|
||||
- configure: add libsoxr to swresample's pkgconfig
|
||||
- lavc: Fix compilation with --disable-everything --enable-parser=mpeg4video.
|
||||
|
||||
version 2.8:
|
||||
- colorkey video filter
|
||||
- BFSTM/BCSTM demuxer
|
||||
- little-endian ADPCM_THP decoder
|
||||
- Hap decoder and encoder
|
||||
- DirectDraw Surface image/texture decoder
|
||||
- ssim filter
|
||||
- optional new ASF demuxer
|
||||
- showvolume filter
|
||||
- Many improvements to the JPEG 2000 decoder
|
||||
- Go2Meeting decoding support
|
||||
- adrawgraph audio and drawgraph video filter
|
||||
- removegrain video filter
|
||||
- Intel QSV-accelerated MPEG-2 video and HEVC encoding
|
||||
- Intel QSV-accelerated MPEG-2 video and HEVC decoding
|
||||
- Intel QSV-accelerated VC-1 video decoding
|
||||
- libkvazaar HEVC encoder
|
||||
- erosion, dilation, deflate and inflate video filters
|
||||
- Dynamic Audio Normalizer as dynaudnorm filter
|
||||
- Reverse video and areverse audio filter
|
||||
- Random filter
|
||||
- deband filter
|
||||
- AAC fixed-point decoding
|
||||
- sidechaincompress audio filter
|
||||
- bitstream filter for converting HEVC from MP4 to Annex B
|
||||
- acrossfade audio filter
|
||||
- allyuv and allrgb video sources
|
||||
- atadenoise video filter
|
||||
- OS X VideoToolbox support
|
||||
- aphasemeter filter
|
||||
- showfreqs filter
|
||||
- vectorscope filter
|
||||
- waveform filter
|
||||
- hstack and vstack filter
|
||||
- Support DNx100 (1440x1080@8)
|
||||
- VAAPI hevc hwaccel
|
||||
- VDPAU hevc hwaccel
|
||||
- framerate filter
|
||||
- Switched default encoders for webm to VP9 and Opus
|
||||
- Removed experimental flag from the JPEG 2000 encoder
|
||||
version 2.5.5:
|
||||
- vp9: make above buffer pointer 32-byte aligned.
|
||||
- avcodec/dnxhddec: Check that the frame is interlaced before using cur_field
|
||||
- avformat/mov: Disallow ".." in dref unless use_absolute_path is set
|
||||
- avformat/mov: Check for string truncation in mov_open_dref()
|
||||
- avformat/mov: Use sizeof(filename) instead of a literal number
|
||||
- eac3dec: fix scaling
|
||||
- ac3_fixed: fix computation of spx_noise_blend
|
||||
- ac3_fixed: fix out-of-bound read
|
||||
- ac3dec_fixed: always use the USE_FIXED=1 variant of the AC3DecodeContext
|
||||
- avcodec/012v: redesign main loop
|
||||
- avcodec/012v: Check dimensions more completely
|
||||
- asfenc: fix leaking asf->index_ptr on error
|
||||
- avcodec/options_table: remove extradata_size from the AVOptions table
|
||||
- ffmdec: limit the backward seek to the last resync position
|
||||
- ffmdec: make sure the time base is valid
|
||||
- ffmdec: fix infinite loop at EOF
|
||||
- ffmdec: initialize f_cprv, f_stvi and f_stau
|
||||
- avformat/rm: limit packet size
|
||||
- avcodec/webp: validate the distance prefix code
|
||||
- avcodec/rv10: check size of s->mb_width * s->mb_height
|
||||
- eamad: check for out of bounds read
|
||||
- mdec: check for out of bounds read
|
||||
- arm: Suppress tags about used cpu arch and extensions
|
||||
- aic: Fix decoding files with odd dimensions
|
||||
- avcodec/tiff: move bpp check to after "end:"
|
||||
- mxfdec: Fix the error handling for when strftime fails
|
||||
- avcodec/opusdec: Fix delayed sample value
|
||||
- avcodec/opusdec: Clear out pointers per packet
|
||||
- avcodec/utils: Align YUV411 by as much as the other YUV variants
|
||||
- vp9: fix segmentation map retention with threading enabled.
|
||||
- webp: ensure that each transform is only used once
|
||||
- doc/protocols/tcp: fix units of listen_timeout option value, from microseconds to milliseconds
|
||||
- fix VP9 packet decoder returning 0 instead of the used data size
|
||||
- avformat/flvenc: check that the codec_tag fits in the available bits
|
||||
- avcodec/utils: use correct printf specifier in ff_set_sar
|
||||
- avutil/imgutils: correctly check for negative SAR components
|
||||
- swscale/utils: clear formatConvBuffer on allocation
|
||||
- avformat/bit: only accept the g729 codec and 1 channel
|
||||
- avformat/bit: check that pkt->size is 10 in write_packet
|
||||
- avformat/adxdec: check avctx->channels for invalid values
|
||||
- avformat/adxdec: set avctx->channels in adx_read_header
|
||||
- Fix buffer_size argument to init_put_bits() in multiple encoders.
|
||||
- mips/acelp_filters: fix incorrect register constraint
|
||||
- avcodec/hevc_ps: Sanity checks for some log2_* values
|
||||
- avcodec/zmbv: Check len before reading in decode_frame()
|
||||
- avcodec/h264: Only reinit quant tables if a new PPS is allowed
|
||||
- avcodec/snowdec: Fix ref value check
|
||||
- swscale/utils: More carefully merge and clear coefficients outside the input
|
||||
- avcodec/a64multienc: Assert that the Packet size does not grow
|
||||
- avcodec/a64multienc: simplify frame handling code
|
||||
- avcodec/a64multienc: fix use of uninitialized values in to_meta_with_crop
|
||||
- avcodec/a64multienc: initialize mc_meta_charset to zero
|
||||
- avcodec/a64multienc: don't set incorrect packet size
|
||||
- avcodec/a64multienc: use av_frame_ref instead of copying the frame
|
||||
- avcodec/x86/mlpdsp_init: Simplify mlp_filter_channel_x86()
|
||||
- h264: initialize H264Context.avctx in init_thread_copy
|
||||
- wtvdec: fix integer overflow resulting in errors with large files
|
||||
- avcodec/gif: fix off by one in column offsetting finding
|
||||
|
||||
|
||||
version 2.7:
|
||||
- FFT video filter
|
||||
- TDSC decoder
|
||||
- DTS lossless extension (XLL) decoding (not lossless, disabled by default)
|
||||
- showwavespic filter
|
||||
- DTS decoding through libdcadec
|
||||
- Drop support for nvenc API before 5.0
|
||||
- nvenc HEVC encoder
|
||||
- Detelecine filter
|
||||
- Intel QSV-accelerated H.264 encoding
|
||||
- MMAL-accelerated H.264 decoding
|
||||
- basic APNG encoder and muxer with default extension "apng"
|
||||
- unpack DivX-style packed B-frames in MPEG-4 bitstream filter
|
||||
- WebM Live Chunk Muxer
|
||||
- nvenc level and tier options
|
||||
- chorus filter
|
||||
- Canopus HQ/HQA decoder
|
||||
- Automatically rotate videos based on metadata in ffmpeg
|
||||
- improved Quickdraw compatibility
|
||||
- VP9 high bit-depth and extended colorspaces decoding support
|
||||
- WebPAnimEncoder API when available for encoding and muxing WebP
|
||||
- Direct3D11-accelerated decoding
|
||||
- Support Secure Transport
|
||||
- Multipart JPEG demuxer
|
||||
version 2.5.4:
|
||||
- avcodec/arm/videodsp_armv5te: Fix linking failure with shared libs
|
||||
- avcodec/mjpegdec: Skip blocks which are outside the visible area
|
||||
- avcodec/h264_slice: ignore SAR changes in slices after the first
|
||||
- avcodec/h264_slice: Check picture structure before setting the related fields
|
||||
- avcodec/h264_slice: Do not change frame_num after the first slice
|
||||
- avutil/opt: Fix type used to access AV_OPT_TYPE_SAMPLE_FMT
|
||||
- avutil/opt: Fix types used to access AV_OPT_TYPE_PIXEL_FMT
|
||||
- avcodec/h264: Be more strict on rejecting pps/sps changes
|
||||
- avcodec/h264: Be more strict on rejecting pps_id changes
|
||||
- avcodec/h264_ps: More completely check the bit depths
|
||||
- avformat/thp: Check av_get_packet() for failure not only for partial output
|
||||
- swscale/utils: Limit filter shifting so as not to read from prior the array
|
||||
- avcodec/mpegvideo_motion: Fix gmc chroma dimensions
|
||||
- avcodec/mjpegdec: Check number of components for JPEG-LS
|
||||
- avcodec/mjpegdec: Check escape sequence validity
|
||||
- avformat/mpc8: Use uint64_t in *_get_v() to avoid undefined behavior
|
||||
- avformat/mpc8: fix broken pointer math
|
||||
- avformat/mpc8: fix hang with fuzzed file
|
||||
- avformat/tta: fix crash with corrupted files
|
||||
- avcodec/ppc/idctdsp.c: POWER LE support in idct_add_altivec()
|
||||
- swscale/input: fix rgba64 alpha non native
|
||||
- swscale/input: Fix alpha of YA16 input
|
||||
- libavcodec/ppc/mpegvideoencdsp.c: fix stack smashing in pix_norm1_altivec() and pix_sum_altivec()
|
||||
- avformat/rmdec: Check for overflow in ff_rm_read_mdpr_codecdata()
|
||||
- avformat/mpeg: do not count PES packets inside PES packets during probing
|
||||
- hevc: always clip luma_log2_weight_denom
|
||||
- rtpdec_h263_rfc2190: Clear the stored bits if discarding buffered data
|
||||
- aacenc: correctly check returned value
|
||||
- swscale: check memory allocations
|
||||
- opt: check memory allocation
|
||||
- avformat/utils: check for malloc failure
|
||||
- avcodec/flac_parser: fix handling EOF if no headers are found
|
||||
- avfilter/vf_framepack: Check and update frame_rate
|
||||
- vp8: improve memory allocation checks
|
||||
- configure: enable vsx together with altivec for ppc64el
|
||||
- avcodec/hevc: Fix handling of skipped_bytes() reallocation failures
|
||||
- qpeg: avoid pointless invalid memcpy()
|
||||
|
||||
|
||||
version 2.6:
|
||||
- nvenc encoder
|
||||
- 10bit spp filter
|
||||
- colorlevels filter
|
||||
- RIFX format for *.wav files
|
||||
- RTP/mpegts muxer
|
||||
- non continuous cache protocol support
|
||||
- tblend filter
|
||||
- cropdetect support for non 8bpp, absolute (if limit >= 1) and relative (if limit < 1.0) threshold
|
||||
- Camellia symmetric block cipher
|
||||
- OpenH264 encoder wrapper
|
||||
- VOC seeking support
|
||||
- Closed caption Decoder
|
||||
- fspp, uspp, pp7 MPlayer postprocessing filters ported to native filters
|
||||
- showpalette filter
|
||||
- Twofish symmetric block cipher
|
||||
- Support DNx100 (960x720@8)
|
||||
- eq2 filter ported from libmpcodecs as eq filter
|
||||
- removed libmpcodecs
|
||||
- Changed default DNxHD colour range in QuickTime .mov derivatives to mpeg range
|
||||
- ported softpulldown filter from libmpcodecs as repeatfields filter
|
||||
- dcshift filter
|
||||
- RTP depacketizer for loss tolerant payload format for MP3 audio (RFC 5219)
|
||||
- RTP depacketizer for AC3 payload format (RFC 4184)
|
||||
- palettegen and paletteuse filters
|
||||
- VP9 RTP payload format (draft 0) experimental depacketizer
|
||||
- RTP depacketizer for DV (RFC 6469)
|
||||
- DXVA2-accelerated HEVC decoding
|
||||
- AAC ELD 480 decoding
|
||||
- Intel QSV-accelerated H.264 decoding
|
||||
- DSS SP decoder and DSS demuxer
|
||||
- Fix stsd atom corruption in DNxHD QuickTimes
|
||||
- Canopus HQX decoder
|
||||
- RTP depacketization of T.140 text (RFC 4103)
|
||||
- Port MIPS optimizations to 64-bit
|
||||
version 2.5.3:
|
||||
- vp9: fix parser return values in error case
|
||||
- ffmpeg: Clear error message array at init.
|
||||
- avcodec/dvdsubdec: fix accessing dangling pointers
|
||||
- avcodec/dvdsubdec: error on bitmaps with size 0
|
||||
- cmdutils: Use 64bit for file size/offset related variable in cmdutils_read_file()
|
||||
- mov: Fix negative size calculation in mov_read_default().
|
||||
- avformat/mov: fix integer overflow in mov_read_udta_string()
|
||||
- mov: Fix overflow and error handling in read_tfra().
|
||||
- mov: Avoid overflow with mov_metadata_raw()
|
||||
- avcodec/dvdsubdec: fix out of bounds accesses
|
||||
- avfilter/vf_sab: fix filtering tiny images
|
||||
- avformat/flvdec: Increase string array size
|
||||
- avformat/flvdec: do not inject dts=0 metadata packets which failed to be parsed into a new data stream
|
||||
- avformat/cdxl: Fix integer overflow of image_size
|
||||
- libavformat: Build hevc.o when building the RTP muxer
|
||||
|
||||
version 2.5.2:
|
||||
- avcodec/indeo3: ensure offsets are non negative
|
||||
- avcodec/h264: Check *log2_weight_denom
|
||||
- avcodec/hevc_ps: Check diff_cu_qp_delta_depth
|
||||
- avcodec/h264: Clear delayed_pic on deallocation
|
||||
- avcodec/hevc: clear filter_slice_edges() on allocation
|
||||
- avcodec/dcadec: Check that the added xch channel isnt already there
|
||||
- avcodec/indeo3: use signed variables to avoid underflow
|
||||
- swscale: increase yuv2rgb table headroom
|
||||
- avformat/mov: fix integer overflow of size
|
||||
- avformat/mov: check atom nesting depth
|
||||
- avcodec/utvideodec: Fix handling of slice_height=0
|
||||
- avcodec/xface: correct the XFACE_MAX_* values
|
||||
- avcodec/vmdvideo: Check len before using it in method 3
|
||||
- configure: create the tests directory like the doc directory
|
||||
- mmvideo: check frame dimensions
|
||||
- jvdec: check frame dimensions
|
||||
|
||||
version 2.5.1:
|
||||
- lavu/frame: fix malloc error path in av_frame_copy_props()
|
||||
- avformat/aviobuf: Check that avio_seek() target is non negative
|
||||
- swresample/soxr_resample: fix error handling
|
||||
- avformat/flvdec: fix potential use of uninitialized variables
|
||||
- avformat/crypto: fix key vs iv typo
|
||||
- configure: use use_pkg_config() instead of check_pkg_config() for libsmbclient
|
||||
- avcodec/ppc/vp3dsp_altivec: POWER LE support to vp3_idct_add_altivec()
|
||||
- avformat/matroskadec: fix handling of recursive SeekHead elements
|
||||
- doc/examples/filtering_video: fix frame rate
|
||||
- avcodec/mpegaudiodec_template: only allocate fdsp when its used
|
||||
- doc/examples/transcoding: check encoder before using it
|
||||
- update MAINTAINERS file
|
||||
- POWER LE support in put_vp8_epel_h_altivec_core() put_vp8_epel_v_altivec_core() put_vp8_pixels16_altivec()
|
||||
- POWER LE support in vc1_inv_trans_8x4_altivec()
|
||||
|
||||
version 2.5:
|
||||
- HEVC/H.265 RTP payload format (draft v6) packetizer
|
||||
@@ -165,7 +168,7 @@ version 2.5:
|
||||
- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
|
||||
- WebP muxer with animated WebP support
|
||||
- zygoaudio decoding support
|
||||
- APNG demuxer
|
||||
- APNG decoder and demuxer
|
||||
- postproc visualization support
|
||||
|
||||
|
||||
@@ -176,7 +179,7 @@ version 2.4:
|
||||
- ICY metadata are now requested by default with the HTTP protocol
|
||||
- support for using metadata in stream specifiers in fftools
|
||||
- LZMA compression support in TIFF decoder
|
||||
- H.261 RTP payload format (RFC 4587) depacketizer and experimental packetizer
|
||||
- support for H.261 RTP payload format (RFC 4587)
|
||||
- HEVC/H.265 RTP payload format (draft v6) depacketizer
|
||||
- added codecview filter to visualize information exported by some codecs
|
||||
- Matroska 3D support thorugh side data
|
||||
@@ -656,7 +659,7 @@ easier to use. The changes are:
|
||||
all the stream in the first input file, except for the second audio
|
||||
stream'.
|
||||
* There is a new option -c (or -codec) for choosing the decoder/encoder to
|
||||
use, which makes it possible to precisely specify target stream(s) consistently with
|
||||
use, which allows to precisely specify target stream(s) consistently with
|
||||
other options. E.g. -c:v lib264 sets the codec for all video streams, -c:a:0
|
||||
libvorbis sets the codec for the first audio stream and -c copy copies all
|
||||
the streams without reencoding. Old -vcodec/-acodec/-scodec options are now
|
||||
|
113
LICENSE.md
113
LICENSE.md
@@ -1,76 +1,70 @@
|
||||
#FFmpeg:
|
||||
|
||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||
or later (LGPL v2.1+). Read the file `COPYING.LGPLv2.1` for details. Some other
|
||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||
files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to
|
||||
FFmpeg.
|
||||
|
||||
Some optional parts of FFmpeg are licensed under the GNU General Public License
|
||||
version 2 or later (GPL v2+). See the file `COPYING.GPLv2` for details. None of
|
||||
these parts are used by default, you have to explicitly pass `--enable-gpl` to
|
||||
version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of
|
||||
these parts are used by default, you have to explicitly pass --enable-gpl to
|
||||
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
|
||||
Specifically, the GPL parts of FFmpeg are:
|
||||
|
||||
- libpostproc
|
||||
- libmpcodecs
|
||||
- optional x86 optimizations in the files
|
||||
- `libavcodec/x86/flac_dsp_gpl.asm`
|
||||
- `libavcodec/x86/idct_mmx.c`
|
||||
- `libavfilter/x86/vf_removegrain.asm`
|
||||
libavcodec/x86/flac_dsp_gpl.asm
|
||||
libavcodec/x86/idct_mmx.c
|
||||
- libutvideo encoding/decoding wrappers in
|
||||
`libavcodec/libutvideo*.cpp`
|
||||
- the X11 grabber in `libavdevice/x11grab.c`
|
||||
libavcodec/libutvideo*.cpp
|
||||
- the X11 grabber in libavdevice/x11grab.c
|
||||
- the swresample test app in
|
||||
`libswresample/swresample-test.c`
|
||||
- the `texi2pod.pl` tool
|
||||
libswresample/swresample-test.c
|
||||
- the texi2pod.pl tool
|
||||
- the following filters in libavfilter:
|
||||
- `f_ebur128.c`
|
||||
- `vf_blackframe.c`
|
||||
- `vf_boxblur.c`
|
||||
- `vf_colormatrix.c`
|
||||
- `vf_cover_rect.c`
|
||||
- `vf_cropdetect.c`
|
||||
- `vf_delogo.c`
|
||||
- `vf_eq.c`
|
||||
- `vf_find_rect.c`
|
||||
- `vf_fspp.c`
|
||||
- `vf_geq.c`
|
||||
- `vf_histeq.c`
|
||||
- `vf_hqdn3d.c`
|
||||
- `vf_interlace.c`
|
||||
- `vf_kerndeint.c`
|
||||
- `vf_mcdeint.c`
|
||||
- `vf_mpdecimate.c`
|
||||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
- `vf_pp.c`
|
||||
- `vf_pp7.c`
|
||||
- `vf_pullup.c`
|
||||
- `vf_sab.c`
|
||||
- `vf_smartblur.c`
|
||||
- `vf_repeatfields.c`
|
||||
- `vf_spp.c`
|
||||
- `vf_stereo3d.c`
|
||||
- `vf_super2xsai.c`
|
||||
- `vf_tinterlace.c`
|
||||
- `vf_uspp.c`
|
||||
- `vsrc_mptestsrc.c`
|
||||
- f_ebur128.c
|
||||
- vf_blackframe.c
|
||||
- vf_boxblur.c
|
||||
- vf_colormatrix.c
|
||||
- vf_cropdetect.c
|
||||
- vf_decimate.c
|
||||
- vf_delogo.c
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_interlace.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mcdeint.c
|
||||
- vf_mp.c
|
||||
- vf_owdenoise.c
|
||||
- vf_perspective.c
|
||||
- vf_phase.c
|
||||
- vf_pp.c
|
||||
- vf_pullup.c
|
||||
- vf_sab.c
|
||||
- vf_smartblur.c
|
||||
- vf_spp.c
|
||||
- vf_stereo3d.c
|
||||
- vf_super2xsai.c
|
||||
- vf_tinterlace.c
|
||||
- vsrc_mptestsrc.c
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
the configure parameter `--enable-version3` will activate this licensing option
|
||||
for you. Read the file `COPYING.LGPLv3` or, if you have enabled GPL parts,
|
||||
`COPYING.GPLv3` to learn the exact legal terms that apply in this case.
|
||||
the configure parameter --enable-version3 will activate this licensing option
|
||||
for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
|
||||
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
* The files `libavcodec/jfdctfst.c`, `libavcodec/jfdctint_template.c` and
|
||||
`libavcodec/jrevdct.c` are taken from libjpeg, see the top of the files for
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
|
||||
libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
|
||||
licensing details. Specifically note that you must credit the IJG in the
|
||||
documentation accompanying your program if you only distribute executables.
|
||||
You must also indicate any changes including additions and deletions to
|
||||
those three files in the documentation.
|
||||
* `tests/reference.pnm` is under the expat license.
|
||||
tests/reference.pnm is under the expat license
|
||||
|
||||
|
||||
external libraries
|
||||
@@ -83,22 +77,21 @@ compatible libraries
|
||||
--------------------
|
||||
|
||||
The following libraries are under GPL:
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libutvideo
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxvid
|
||||
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libutvideo
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxvid
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing `--enable-gpl` to configure.
|
||||
passing --enable-gpl to configure.
|
||||
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing `--enable-version3` to configure.
|
||||
license version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
incompatible libraries
|
||||
----------------------
|
||||
@@ -106,7 +99,7 @@ incompatible libraries
|
||||
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
|
||||
are incompatible with the GPLv2 and v3. We do not know for certain if their
|
||||
licenses are compatible with the LGPL.
|
||||
If you wish to enable these libraries, pass `--enable-nonfree` to configure.
|
||||
If you wish to enable these libraries, pass --enable-nonfree to configure.
|
||||
But note that if you enable any of these libraries the resulting binary will
|
||||
be under a complex license mix that is more restrictive than the LGPL and that
|
||||
may result in additional obligations. It is possible that these
|
||||
|
47
MAINTAINERS
47
MAINTAINERS
@@ -14,6 +14,7 @@ patches and related discussions.
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
Michael Niedermayer
|
||||
final design decisions
|
||||
|
||||
|
||||
@@ -42,9 +43,9 @@ QuickTime faststart:
|
||||
Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Lou Logan
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu
|
||||
build system (configure, makefiles) Diego Biurrun, Mans Rullgard
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Lou Logan
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
@@ -58,7 +59,7 @@ fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet
|
||||
Twitter Lou Logan
|
||||
Launchpad Timothy Gu
|
||||
|
||||
|
||||
@@ -137,7 +138,6 @@ Codecs:
|
||||
4xm.c Michael Niedermayer
|
||||
8bps.c Roberto Togni
|
||||
8svx.c Jaikrishnan Menon
|
||||
aacenc*, aaccoder.c Rostislav Pehlivanov
|
||||
aasc.c Kostya Shishkov
|
||||
ac3* Justin Ruggles
|
||||
alacenc.c Jaikrishnan Menon
|
||||
@@ -156,7 +156,6 @@ Codecs:
|
||||
celp_filters.* Vitor Sessak
|
||||
cinepak.c Roberto Togni
|
||||
cinepakenc.c Rl / Aetey G.T. AB
|
||||
ccaption_dec.c Anshul Maheshwari
|
||||
cljr Alex Beregszaszi
|
||||
cllc.c Derek Buitenhuis
|
||||
cook.c, cookdata.h Benjamin Larsson
|
||||
@@ -166,12 +165,10 @@ Codecs:
|
||||
dca.c Kostya Shishkov, Benjamin Larsson
|
||||
dnxhd* Baptiste Coudurier
|
||||
dpcm.c Mike Melanson
|
||||
dss_sp.c Oleksij Rempel, Michael Niedermayer
|
||||
dv.c Roman Shaposhnik
|
||||
dvbsubdec.c Anshul Maheshwari
|
||||
dxa.c Kostya Shishkov
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
evrc* Paul B Mahol
|
||||
exif.c, exif.h Thilo Borgmann
|
||||
ffv1* Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
@@ -201,7 +198,6 @@ Codecs:
|
||||
libcelt_dec.c Nicolas George
|
||||
libdirac* David Conrad
|
||||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libschroedinger* David Conrad
|
||||
@@ -230,7 +226,6 @@ Codecs:
|
||||
msvideo1.c Mike Melanson
|
||||
nellymoserdec.c Benjamin Larsson
|
||||
nuv.c Reimar Doeffinger
|
||||
nvenc.c Timo Rothenpieler
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
pgssubdec.c Reimar Doeffinger
|
||||
@@ -239,7 +234,6 @@ Codecs:
|
||||
qdm2.c, qdm2data.h Roberto Togni, Benjamin Larsson
|
||||
qdrw.c Kostya Shishkov
|
||||
qpeg.c Kostya Shishkov
|
||||
qsv* Ivan Uskov
|
||||
qtrle.c Mike Melanson
|
||||
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
|
||||
resample2.c Michael Niedermayer
|
||||
@@ -301,12 +295,11 @@ Codecs:
|
||||
|
||||
Hardware acceleration:
|
||||
crystalhd.c Philip Langdale
|
||||
dxva2* Hendrik Leppkes, Laurent Aimar
|
||||
dxva2* Laurent Aimar
|
||||
libstagefright.cpp Mohamed Naufal
|
||||
vaapi* Gwenole Beauchesne
|
||||
vda* Sebastien Zwickert
|
||||
vdpau* Philip Langdale, Carl Eugen Hoyos
|
||||
videotoolbox* Sebastien Zwickert
|
||||
vdpau* Carl Eugen Hoyos
|
||||
|
||||
|
||||
libavdevice
|
||||
@@ -338,7 +331,6 @@ Generic parts:
|
||||
graphdump.c Nicolas George
|
||||
|
||||
Filters:
|
||||
f_drawgraph.c Paul B Mahol
|
||||
af_adelay.c Paul B Mahol
|
||||
af_aecho.c Paul B Mahol
|
||||
af_afade.c Paul B Mahol
|
||||
@@ -349,21 +341,14 @@ Filters:
|
||||
af_astreamsync.c Nicolas George
|
||||
af_atempo.c Pavel Koshevoy
|
||||
af_biquads.c Paul B Mahol
|
||||
af_chorus.c Paul B Mahol
|
||||
af_compand.c Paul B Mahol
|
||||
af_ladspa.c Paul B Mahol
|
||||
af_pan.c Nicolas George
|
||||
af_sidechaincompress.c Paul B Mahol
|
||||
af_silenceremove.c Paul B Mahol
|
||||
avf_aphasemeter.c Paul B Mahol
|
||||
avf_avectorscope.c Paul B Mahol
|
||||
avf_showcqt.c Muhammad Faiz
|
||||
vf_blend.c Paul B Mahol
|
||||
vf_colorchannelmixer.c Paul B Mahol
|
||||
vf_colorbalance.c Paul B Mahol
|
||||
vf_colorkey.c Timo Rothenpieler
|
||||
vf_colorlevels.c Paul B Mahol
|
||||
vf_deband.c Paul B Mahol
|
||||
vf_dejudder.c Nicholas Robbins
|
||||
vf_delogo.c Jean Delvare (CC <khali@linux-fr.org>)
|
||||
vf_drawbox.c/drawgrid Andrey Utkin
|
||||
@@ -374,16 +359,12 @@ Filters:
|
||||
vf_il.c Paul B Mahol
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_neighbor.c Paul B Mahol
|
||||
vf_psnr.c Paul B Mahol
|
||||
vf_random.c Paul B Mahol
|
||||
vf_scale.c Michael Niedermayer
|
||||
vf_separatefields.c Paul B Mahol
|
||||
vf_ssim.c Paul B Mahol
|
||||
vf_stereo3d.c Paul B Mahol
|
||||
vf_telecine.c Paul B Mahol
|
||||
vf_yadif.c Michael Niedermayer
|
||||
vf_zoompan.c Paul B Mahol
|
||||
|
||||
Sources:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
@@ -400,7 +381,6 @@ Generic parts:
|
||||
|
||||
Muxers/Demuxers:
|
||||
4xm.c Mike Melanson
|
||||
aadec.c Vesselin Bontchev (vesselin.bontchev at yandex dot com)
|
||||
adtsenc.c Robert Swain
|
||||
afc.c Paul B Mahol
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
@@ -419,7 +399,6 @@ Muxers/Demuxers:
|
||||
cdxl.c Paul B Mahol
|
||||
crc.c Michael Niedermayer
|
||||
daud.c Reimar Doeffinger
|
||||
dss.c Oleksij Rempel, Michael Niedermayer
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
dxa.c Kostya Shishkov
|
||||
@@ -432,7 +411,6 @@ Muxers/Demuxers:
|
||||
gxf.c Reimar Doeffinger
|
||||
gxfenc.c Baptiste Coudurier
|
||||
hls.c Anssi Hannula
|
||||
hls encryption (hlsenc.c) Christian Suloway
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
@@ -485,13 +463,9 @@ Muxers/Demuxers:
|
||||
rmdec.c, rmenc.c Ronald S. Bultje, Kostya Shishkov
|
||||
rtmp* Kostya Shishkov
|
||||
rtp.c, rtpenc.c Martin Storsjo
|
||||
rtpdec_ac3.* Gilles Chanteperdrix
|
||||
rtpdec_dv.* Thomas Volkert
|
||||
rtpdec_h261.*, rtpenc_h261.* Thomas Volkert
|
||||
rtpdec_hevc.*, rtpenc_hevc.* Thomas Volkert
|
||||
rtpdec_mpa_robust.* Gilles Chanteperdrix
|
||||
rtpdec_asf.* Ronald S. Bultje
|
||||
rtpdec_vp9.c Thomas Volkert
|
||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||
rtsp.c Luca Barbato
|
||||
sbgdec.c Nicolas George
|
||||
@@ -518,7 +492,6 @@ Muxers/Demuxers:
|
||||
wvenc.c Paul B Mahol
|
||||
|
||||
Protocols:
|
||||
async.c Zhang Rui
|
||||
bluray.c Petri Hintukainen
|
||||
ftp.c Lukasz Marek
|
||||
http.c Ronald S. Bultje
|
||||
@@ -554,7 +527,7 @@ Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC Luca Barbato
|
||||
Windows MinGW Alex Beregszaszi, Ramiro Polla
|
||||
Windows Cygwin Victor Paesa
|
||||
Windows MSVC Matthew Oliver, Hendrik Leppkes
|
||||
Windows MSVC Matthew Oliver
|
||||
Windows ICL Matthew Oliver
|
||||
ADI/Blackfin DSP Marc Hoffman
|
||||
Sparc Roman Shaposhnik
|
||||
@@ -564,11 +537,10 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
2.8 Michael Niedermayer
|
||||
2.7 Michael Niedermayer
|
||||
2.6 Michael Niedermayer
|
||||
2.5 Michael Niedermayer
|
||||
2.4 Michael Niedermayer
|
||||
2.2 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
@@ -599,7 +571,6 @@ Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Philip Langdale 5DC5 8D66 5FBA 3A43 18EC 045E F8D6 B194 6A75 682E
|
||||
Reimar Doeffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
|
12
Makefile
12
Makefile
@@ -31,10 +31,7 @@ $(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_o
|
||||
OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
|
||||
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
|
||||
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
|
||||
ifndef CONFIG_VIDEOTOOLBOX
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_videotoolbox.o
|
||||
endif
|
||||
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += ffmpeg_videotoolbox.o
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
|
||||
OBJS-ffserver += ffserver_config.o
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
|
||||
@@ -63,7 +60,6 @@ include $(SRC_PATH)/common.mak
|
||||
|
||||
FF_EXTRALIBS := $(FFEXTRALIBS)
|
||||
FF_DEP_LIBS := $(DEP_LIBS)
|
||||
FF_STATIC_DEP_LIBS := $(STATIC_DEP_LIBS)
|
||||
|
||||
all: $(AVPROGS)
|
||||
|
||||
@@ -84,8 +80,8 @@ SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MSA-OBJS \
|
||||
MMI-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||
OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -179,7 +175,7 @@ clean::
|
||||
|
||||
distclean::
|
||||
$(RM) $(DISTCLEANSUFFIXES)
|
||||
$(RM) config.* .config libavutil/avconfig.h .version avversion.h version.h libavutil/ffversion.h libavcodec/codec_names.h
|
||||
$(RM) config.* .config libavutil/avconfig.h .version version.h libavutil/ffversion.h libavcodec/codec_names.h
|
||||
|
||||
config:
|
||||
$(SRC_PATH)/configure $(value FFMPEG_CONFIGURATION)
|
||||
|
@@ -19,10 +19,8 @@ such as audio, video, subtitles and related metadata.
|
||||
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
|
||||
manipulate, convert and stream multimedia content.
|
||||
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
|
||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analysis tool to inspect
|
||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analisys tool to inspect
|
||||
multimedia content.
|
||||
* [ffserver](http://ffmpeg.org/ffserver.html) is a multimedia streaming server
|
||||
for live broadcasts.
|
||||
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
|
||||
|
||||
## Documentation
|
||||
|
108
RELEASE_NOTES
108
RELEASE_NOTES
@@ -1,15 +1,101 @@
|
||||
┌────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.5 "Bohr" │
|
||||
└────────────────────────────────────────┘
|
||||
|
||||
┌────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.8 "Feynman" │
|
||||
└────────────────────────────────────────┘
|
||||
The FFmpeg Project proudly presents FFmpeg 2.5 "Bohr", 2.5 months after the
|
||||
release of 2.4.
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 2.8 "Feynman", about 3
|
||||
months after the release of FFmpeg 2.7.
|
||||
The most important new features are AVFoundation screen-grabbing support,
|
||||
animated WebP decoding support, and Animated PNG support. In addition, many
|
||||
exciting features for video streaming are also implemented, including MPEG-
|
||||
DASH fragmenting muxer, HEVC RTP payload muxer, and UDP Lite support.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on http://source.ffmpeg.org.
|
||||
As usual, if you have any question on this release or any FFmpeg related
|
||||
topic, feel free to join us on the #ffmpeg IRC channel (on
|
||||
irc.freenode.net).
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
||||
┌────────────────────────────┐
|
||||
│ 🔨 API Information │
|
||||
└────────────────────────────┘
|
||||
|
||||
FFmpeg 2.5 includes the following library versions:
|
||||
|
||||
• libavutil 54.15.100
|
||||
• libavcodec 56.13.100
|
||||
• libavformat 56.15.102
|
||||
• libavdevice 56. 3.100
|
||||
• libavfilter 5. 2.103
|
||||
• libswscale 3. 1.101
|
||||
• libswresample 1. 1.100
|
||||
• libpostproc 53. 3.100
|
||||
|
||||
Important API changes since 2.4:
|
||||
|
||||
• avpriv_dv_frame_profile2() has been deprecated
|
||||
|
||||
|
||||
Please refer to the doc/APIchanges file for more information.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ★ List of New Features │
|
||||
└────────────────────────────┘
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ffprobe │
|
||||
└────────────────────────────┘
|
||||
|
||||
• -show_pixel_formats option
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ffserver │
|
||||
└────────────────────────────┘
|
||||
|
||||
• codec private options support
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavcodec │
|
||||
└────────────────────────────┘
|
||||
|
||||
• STL subtitle decoder
|
||||
• libutvideo YUV 4:2:2 10bit support
|
||||
• animated WebP decoding support
|
||||
• zygoaudio decoding support
|
||||
• APNG decoder
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavdevice │
|
||||
└────────────────────────────┘
|
||||
|
||||
• XCB-based screen-grabber
|
||||
• AVFoundation screen capturing support
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavformat │
|
||||
└────────────────────────────┘
|
||||
|
||||
• HEVC/H.265 RTP payload format (draft v6) packetizer
|
||||
• SUP/PGS subtitle demuxer
|
||||
• STL subtitle demuxer
|
||||
• UDP-Lite support (RFC 3828)
|
||||
• MPEG-DASH segmenting muxer, which allows creating DASH compatible
|
||||
fragmented MP4
|
||||
• WebP muxer
|
||||
• APNG demuxer
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavfilter │
|
||||
└────────────────────────────┘
|
||||
|
||||
• xBR scaling filter
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavutil │
|
||||
└────────────────────────────┘
|
||||
|
||||
• CAST128 symmetric block cipher, ECB mode
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libpostproc │
|
||||
└────────────────────────────┘
|
||||
|
||||
• visualization support
|
||||
|
4
arch.mak
4
arch.mak
@@ -5,13 +5,11 @@ OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||
OBJS-$(HAVE_MSA) += $(MSA-OBJS) $(MSA-OBJS-yes)
|
||||
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
OBJS-$(HAVE_VSX) += $(VSX-OBJS) $(VSX-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_YASM) += $(YASM-OBJS) $(YASM-OBJS-yes)
|
||||
|
222
cmdutils.c
222
cmdutils.c
@@ -41,10 +41,8 @@
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/display.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/libm.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/eval.h"
|
||||
@@ -63,7 +61,7 @@
|
||||
|
||||
static int init_report(const char *env);
|
||||
|
||||
AVDictionary *sws_dict;
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *swr_opts;
|
||||
AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||
|
||||
@@ -73,13 +71,20 @@ int hide_banner = 0;
|
||||
|
||||
void init_opts(void)
|
||||
{
|
||||
av_dict_set(&sws_dict, "flags", "bicubic", 0);
|
||||
|
||||
if(CONFIG_SWSCALE)
|
||||
sws_opts = sws_getContext(16, 16, 0, 16, 16, 0, SWS_BICUBIC,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
void uninit_opts(void)
|
||||
{
|
||||
#if CONFIG_SWSCALE
|
||||
sws_freeContext(sws_opts);
|
||||
sws_opts = NULL;
|
||||
#endif
|
||||
|
||||
av_dict_free(&swr_opts);
|
||||
av_dict_free(&sws_dict);
|
||||
av_dict_free(&format_opts);
|
||||
av_dict_free(&codec_opts);
|
||||
av_dict_free(&resample_opts);
|
||||
@@ -285,14 +290,10 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
||||
if (po->flags & OPT_SPEC) {
|
||||
SpecifierOpt **so = dst;
|
||||
char *p = strchr(opt, ':');
|
||||
char *str;
|
||||
|
||||
dstcount = (int *)(so + 1);
|
||||
*so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1);
|
||||
str = av_strdup(p ? p + 1 : "");
|
||||
if (!str)
|
||||
return AVERROR(ENOMEM);
|
||||
(*so)[*dstcount - 1].specifier = str;
|
||||
(*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : "");
|
||||
dst = &(*so)[*dstcount - 1].u;
|
||||
}
|
||||
|
||||
@@ -300,8 +301,6 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
||||
char *str;
|
||||
str = av_strdup(arg);
|
||||
av_freep(dst);
|
||||
if (!str)
|
||||
return AVERROR(ENOMEM);
|
||||
*(char **)dst = str;
|
||||
} else if (po->flags & OPT_BOOL || po->flags & OPT_INT) {
|
||||
*(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
|
||||
@@ -475,22 +474,10 @@ static void dump_argument(const char *a)
|
||||
fputc('"', report_file);
|
||||
}
|
||||
|
||||
static void check_options(const OptionDef *po)
|
||||
{
|
||||
while (po->name) {
|
||||
if (po->flags & OPT_PERFILE)
|
||||
av_assert0(po->flags & (OPT_INPUT | OPT_OUTPUT));
|
||||
po++;
|
||||
}
|
||||
}
|
||||
|
||||
void parse_loglevel(int argc, char **argv, const OptionDef *options)
|
||||
{
|
||||
int idx = locate_option(argc, argv, options, "loglevel");
|
||||
const char *env;
|
||||
|
||||
check_options(options);
|
||||
|
||||
if (!idx)
|
||||
idx = locate_option(argc, argv, options, "v");
|
||||
if (idx && argv[idx + 1])
|
||||
@@ -522,7 +509,7 @@ static const AVOption *opt_find(void *obj, const char *name, const char *unit,
|
||||
return o;
|
||||
}
|
||||
|
||||
#define FLAGS (o->type == AV_OPT_TYPE_FLAGS && (arg[0]=='-' || arg[0]=='+')) ? AV_DICT_APPEND : 0
|
||||
#define FLAGS (o->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0
|
||||
int opt_default(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
const AVOption *o;
|
||||
@@ -558,24 +545,14 @@ int opt_default(void *optctx, const char *opt, const char *arg)
|
||||
}
|
||||
#if CONFIG_SWSCALE
|
||||
sc = sws_get_class();
|
||||
if (!consumed && (o = opt_find(&sc, opt, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
|
||||
struct SwsContext *sws = sws_alloc_context();
|
||||
int ret = av_opt_set(sws, opt, arg, 0);
|
||||
sws_freeContext(sws);
|
||||
if (!strcmp(opt, "srcw") || !strcmp(opt, "srch") ||
|
||||
!strcmp(opt, "dstw") || !strcmp(opt, "dsth") ||
|
||||
!strcmp(opt, "src_format") || !strcmp(opt, "dst_format")) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Directly using swscale dimensions/format options is not supported, please use the -s or -pix_fmt options\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (!consumed && opt_find(&sc, opt, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
|
||||
// XXX we only support sws_flags, not arbitrary sws options
|
||||
int ret = av_opt_set(sws_opts, opt, arg, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error setting option %s.\n", opt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
av_dict_set(&sws_dict, opt, arg, FLAGS);
|
||||
|
||||
consumed = 1;
|
||||
}
|
||||
#else
|
||||
@@ -649,7 +626,9 @@ static void finish_group(OptionParseContext *octx, int group_idx,
|
||||
*g = octx->cur_group;
|
||||
g->arg = arg;
|
||||
g->group_def = l->group_def;
|
||||
g->sws_dict = sws_dict;
|
||||
#if CONFIG_SWSCALE
|
||||
g->sws_opts = sws_opts;
|
||||
#endif
|
||||
g->swr_opts = swr_opts;
|
||||
g->codec_opts = codec_opts;
|
||||
g->format_opts = format_opts;
|
||||
@@ -658,7 +637,9 @@ static void finish_group(OptionParseContext *octx, int group_idx,
|
||||
codec_opts = NULL;
|
||||
format_opts = NULL;
|
||||
resample_opts = NULL;
|
||||
sws_dict = NULL;
|
||||
#if CONFIG_SWSCALE
|
||||
sws_opts = NULL;
|
||||
#endif
|
||||
swr_opts = NULL;
|
||||
init_opts();
|
||||
|
||||
@@ -714,8 +695,9 @@ void uninit_parse_context(OptionParseContext *octx)
|
||||
av_dict_free(&l->groups[j].codec_opts);
|
||||
av_dict_free(&l->groups[j].format_opts);
|
||||
av_dict_free(&l->groups[j].resample_opts);
|
||||
|
||||
av_dict_free(&l->groups[j].sws_dict);
|
||||
#if CONFIG_SWSCALE
|
||||
sws_freeContext(l->groups[j].sws_opts);
|
||||
#endif
|
||||
av_dict_free(&l->groups[j].swr_opts);
|
||||
}
|
||||
av_freep(&l->groups);
|
||||
@@ -857,7 +839,6 @@ int opt_loglevel(void *optctx, const char *opt, const char *arg)
|
||||
{ "info" , AV_LOG_INFO },
|
||||
{ "verbose", AV_LOG_VERBOSE },
|
||||
{ "debug" , AV_LOG_DEBUG },
|
||||
{ "trace" , AV_LOG_TRACE },
|
||||
};
|
||||
char *tail;
|
||||
int level;
|
||||
@@ -1094,7 +1075,8 @@ static void print_program_info(int flags, int level)
|
||||
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
|
||||
program_birth_year, CONFIG_THIS_YEAR);
|
||||
av_log(NULL, level, "\n");
|
||||
av_log(NULL, level, "%sbuilt with %s\n", indent, CC_IDENT);
|
||||
av_log(NULL, level, "%sbuilt on %s %s with %s\n",
|
||||
indent, __DATE__, __TIME__, CC_IDENT);
|
||||
|
||||
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
|
||||
}
|
||||
@@ -1231,7 +1213,12 @@ static int is_device(const AVClass *avclass)
|
||||
{
|
||||
if (!avclass)
|
||||
return 0;
|
||||
return AV_IS_INPUT_DEVICE(avclass->category) || AV_IS_OUTPUT_DEVICE(avclass->category);
|
||||
return avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_INPUT;
|
||||
}
|
||||
|
||||
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only)
|
||||
@@ -1322,12 +1309,12 @@ static void print_codec(const AVCodec *c)
|
||||
if (c->type == AVMEDIA_TYPE_VIDEO ||
|
||||
c->type == AVMEDIA_TYPE_AUDIO) {
|
||||
printf(" Threading capabilities: ");
|
||||
switch (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
|
||||
AV_CODEC_CAP_SLICE_THREADS)) {
|
||||
case AV_CODEC_CAP_FRAME_THREADS |
|
||||
AV_CODEC_CAP_SLICE_THREADS: printf("frame and slice"); break;
|
||||
case AV_CODEC_CAP_FRAME_THREADS: printf("frame"); break;
|
||||
case AV_CODEC_CAP_SLICE_THREADS: printf("slice"); break;
|
||||
switch (c->capabilities & (CODEC_CAP_FRAME_THREADS |
|
||||
CODEC_CAP_SLICE_THREADS)) {
|
||||
case CODEC_CAP_FRAME_THREADS |
|
||||
CODEC_CAP_SLICE_THREADS: printf("frame and slice"); break;
|
||||
case CODEC_CAP_FRAME_THREADS: printf("frame"); break;
|
||||
case CODEC_CAP_SLICE_THREADS: printf("slice"); break;
|
||||
default: printf("no"); break;
|
||||
}
|
||||
printf("\n");
|
||||
@@ -1501,11 +1488,11 @@ static void print_codecs(int encoder)
|
||||
|
||||
while ((codec = next_codec_for_id(desc->id, codec, encoder))) {
|
||||
printf(" %c", get_media_type_char(desc->type));
|
||||
printf((codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) ? "F" : ".");
|
||||
printf((codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) ? "S" : ".");
|
||||
printf((codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) ? "X" : ".");
|
||||
printf((codec->capabilities & AV_CODEC_CAP_DRAW_HORIZ_BAND)?"B" : ".");
|
||||
printf((codec->capabilities & AV_CODEC_CAP_DR1) ? "D" : ".");
|
||||
printf((codec->capabilities & CODEC_CAP_FRAME_THREADS) ? "F" : ".");
|
||||
printf((codec->capabilities & CODEC_CAP_SLICE_THREADS) ? "S" : ".");
|
||||
printf((codec->capabilities & CODEC_CAP_EXPERIMENTAL) ? "X" : ".");
|
||||
printf((codec->capabilities & CODEC_CAP_DRAW_HORIZ_BAND)?"B" : ".");
|
||||
printf((codec->capabilities & CODEC_CAP_DR1) ? "D" : ".");
|
||||
|
||||
printf(" %-20s %s", codec->name, codec->long_name ? codec->long_name : "");
|
||||
if (strcmp(codec->name, desc->name))
|
||||
@@ -1548,10 +1535,10 @@ int show_protocols(void *optctx, const char *opt, const char *arg)
|
||||
printf("Supported file protocols:\n"
|
||||
"Input:\n");
|
||||
while ((name = avio_enum_protocols(&opaque, 0)))
|
||||
printf(" %s\n", name);
|
||||
printf("%s\n", name);
|
||||
printf("Output:\n");
|
||||
while ((name = avio_enum_protocols(&opaque, 1)))
|
||||
printf(" %s\n", name);
|
||||
printf("%s\n", name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1566,7 +1553,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
printf("Filters:\n"
|
||||
" T.. = Timeline support\n"
|
||||
" .S. = Slice threading\n"
|
||||
" ..C = Command support\n"
|
||||
" ..C = Commmand support\n"
|
||||
" A = Audio input/output\n"
|
||||
" V = Video input/output\n"
|
||||
" N = Dynamic number and/or type of input/output\n"
|
||||
@@ -1579,10 +1566,10 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
*(descr_cur++) = '>';
|
||||
}
|
||||
pad = i ? filter->outputs : filter->inputs;
|
||||
for (j = 0; pad && avfilter_pad_get_name(pad, j); j++) {
|
||||
for (j = 0; pad && pad[j].name; j++) {
|
||||
if (descr_cur >= descr + sizeof(descr) - 4)
|
||||
break;
|
||||
*(descr_cur++) = get_media_type_char(avfilter_pad_get_type(pad, j));
|
||||
*(descr_cur++) = get_media_type_char(pad[j].type);
|
||||
}
|
||||
if (!j)
|
||||
*(descr_cur++) = ((!i && (filter->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) ||
|
||||
@@ -1834,8 +1821,6 @@ int show_help(void *optctx, const char *opt, const char *arg)
|
||||
av_log_set_callback(log_callback_help);
|
||||
|
||||
topic = av_strdup(arg ? arg : "");
|
||||
if (!topic)
|
||||
return AVERROR(ENOMEM);
|
||||
par = strchr(topic, '=');
|
||||
if (par)
|
||||
*par++ = 0;
|
||||
@@ -1873,6 +1858,50 @@ int read_yesno(void)
|
||||
return yesno;
|
||||
}
|
||||
|
||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
{
|
||||
int64_t ret;
|
||||
FILE *f = av_fopen_utf8(filename, "rb");
|
||||
|
||||
if (!f) {
|
||||
ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
|
||||
strerror(errno));
|
||||
return ret;
|
||||
}
|
||||
fseek(f, 0, SEEK_END);
|
||||
*size = ftell(f);
|
||||
fseek(f, 0, SEEK_SET);
|
||||
if (*size == (size_t)-1) {
|
||||
ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", strerror(errno));
|
||||
fclose(f);
|
||||
return ret;
|
||||
}
|
||||
*bufptr = av_malloc(*size + 1);
|
||||
if (!*bufptr) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
|
||||
fclose(f);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
ret = fread(*bufptr, 1, *size, f);
|
||||
if (ret < *size) {
|
||||
av_free(*bufptr);
|
||||
if (ferror(f)) {
|
||||
ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while reading file '%s': %s\n",
|
||||
filename, strerror(errno));
|
||||
} else
|
||||
ret = AVERROR_EOF;
|
||||
} else {
|
||||
ret = 0;
|
||||
(*bufptr)[(*size)++] = '\0';
|
||||
}
|
||||
|
||||
fclose(f);
|
||||
return ret;
|
||||
}
|
||||
|
||||
FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
const char *preset_name, int is_path,
|
||||
const char *codec_name)
|
||||
@@ -2015,7 +2044,7 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
||||
exit_program(1);
|
||||
}
|
||||
if (*size < new_size) {
|
||||
uint8_t *tmp = av_realloc_array(array, new_size, elem_size);
|
||||
uint8_t *tmp = av_realloc(array, new_size*elem_size);
|
||||
if (!tmp) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
|
||||
exit_program(1);
|
||||
@@ -2027,38 +2056,13 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
||||
return array;
|
||||
}
|
||||
|
||||
double get_rotation(AVStream *st)
|
||||
{
|
||||
AVDictionaryEntry *rotate_tag = av_dict_get(st->metadata, "rotate", NULL, 0);
|
||||
uint8_t* displaymatrix = av_stream_get_side_data(st,
|
||||
AV_PKT_DATA_DISPLAYMATRIX, NULL);
|
||||
double theta = 0;
|
||||
|
||||
if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
|
||||
char *tail;
|
||||
theta = av_strtod(rotate_tag->value, &tail);
|
||||
if (*tail)
|
||||
theta = 0;
|
||||
}
|
||||
if (displaymatrix && !theta)
|
||||
theta = -av_display_rotation_get((int32_t*) displaymatrix);
|
||||
|
||||
theta -= 360*floor(theta/360 + 0.9/360);
|
||||
|
||||
if (fabs(theta - 90*round(theta/90)) > 2)
|
||||
av_log(NULL, AV_LOG_WARNING, "Odd rotation angle.\n"
|
||||
"If you want to help, upload a sample "
|
||||
"of this file to ftp://upload.ffmpeg.org/incoming/ "
|
||||
"and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)");
|
||||
|
||||
return theta;
|
||||
}
|
||||
|
||||
#if CONFIG_AVDEVICE
|
||||
static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||
{
|
||||
int ret, i;
|
||||
AVFormatContext *dev = NULL;
|
||||
AVDeviceInfoList *device_list = NULL;
|
||||
AVDictionary *tmp_opts = NULL;
|
||||
|
||||
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
|
||||
return AVERROR(EINVAL);
|
||||
@@ -2070,7 +2074,15 @@ static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_input_sources(fmt, NULL, opts, &device_list)) < 0) {
|
||||
/* TODO: avformat_open_input calls read_header callback which is not necessary.
|
||||
Function like avformat_alloc_output_context2 for input could be helpful here. */
|
||||
av_dict_copy(&tmp_opts, opts, 0);
|
||||
if ((ret = avformat_open_input(&dev, NULL, fmt, &tmp_opts)) < 0) {
|
||||
printf("Cannot open device: %s.\n", fmt->name);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_devices(dev, &device_list)) < 0) {
|
||||
printf("Cannot list sources.\n");
|
||||
goto fail;
|
||||
}
|
||||
@@ -2081,14 +2093,18 @@ static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||
}
|
||||
|
||||
fail:
|
||||
av_dict_free(&tmp_opts);
|
||||
avdevice_free_list_devices(&device_list);
|
||||
avformat_close_input(&dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||
{
|
||||
int ret, i;
|
||||
AVFormatContext *dev = NULL;
|
||||
AVDeviceInfoList *device_list = NULL;
|
||||
AVDictionary *tmp_opts = NULL;
|
||||
|
||||
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
|
||||
return AVERROR(EINVAL);
|
||||
@@ -2100,7 +2116,14 @@ static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_output_sinks(fmt, NULL, opts, &device_list)) < 0) {
|
||||
if ((ret = avformat_alloc_output_context2(&dev, fmt, NULL, NULL)) < 0) {
|
||||
printf("Cannot open device: %s.\n", fmt->name);
|
||||
goto fail;
|
||||
}
|
||||
av_dict_copy(&tmp_opts, opts, 0);
|
||||
av_opt_set_dict2(dev, &tmp_opts, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
if ((ret = avdevice_list_devices(dev, &device_list)) < 0) {
|
||||
printf("Cannot list sinks.\n");
|
||||
goto fail;
|
||||
}
|
||||
@@ -2111,7 +2134,9 @@ static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||
}
|
||||
|
||||
fail:
|
||||
av_dict_free(&tmp_opts);
|
||||
avdevice_free_list_devices(&device_list);
|
||||
avformat_free_context(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2155,7 +2180,7 @@ int show_sources(void *optctx, const char *opt, const char *arg)
|
||||
if (fmt) {
|
||||
if (!strcmp(fmt->name, "lavfi"))
|
||||
continue; //it's pointless to probe lavfi
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sources(fmt, opts);
|
||||
}
|
||||
@@ -2163,7 +2188,7 @@ int show_sources(void *optctx, const char *opt, const char *arg)
|
||||
do {
|
||||
fmt = av_input_video_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sources(fmt, opts);
|
||||
}
|
||||
@@ -2191,7 +2216,7 @@ int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||
do {
|
||||
fmt = av_output_audio_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sinks(fmt, opts);
|
||||
}
|
||||
@@ -2199,7 +2224,7 @@ int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||
do {
|
||||
fmt = av_output_video_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sinks(fmt, opts);
|
||||
}
|
||||
@@ -2210,5 +2235,4 @@ int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||
av_log_set_level(error_level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
18
cmdutils.h
18
cmdutils.h
@@ -46,7 +46,7 @@ extern const int program_birth_year;
|
||||
|
||||
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
|
||||
extern AVFormatContext *avformat_opts;
|
||||
extern AVDictionary *sws_dict;
|
||||
extern struct SwsContext *sws_opts;
|
||||
extern AVDictionary *swr_opts;
|
||||
extern AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||
extern int hide_banner;
|
||||
@@ -277,7 +277,7 @@ typedef struct OptionGroup {
|
||||
AVDictionary *codec_opts;
|
||||
AVDictionary *format_opts;
|
||||
AVDictionary *resample_opts;
|
||||
AVDictionary *sws_dict;
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *swr_opts;
|
||||
} OptionGroup;
|
||||
|
||||
@@ -529,6 +529,18 @@ int show_colors(void *optctx, const char *opt, const char *arg);
|
||||
*/
|
||||
int read_yesno(void);
|
||||
|
||||
/**
|
||||
* Read the file with name filename, and put its content in a newly
|
||||
* allocated 0-terminated buffer.
|
||||
*
|
||||
* @param filename file to read from
|
||||
* @param bufptr location where pointer to buffer is returned
|
||||
* @param size location where size of buffer is returned
|
||||
* @return >= 0 in case of success, a negative value corresponding to an
|
||||
* AVERROR error code in case of failure.
|
||||
*/
|
||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size);
|
||||
|
||||
/**
|
||||
* Get a file corresponding to a preset file.
|
||||
*
|
||||
@@ -585,6 +597,4 @@ void *grow_array(void *array, int elem_size, int *size, int new_size);
|
||||
char name[128];\
|
||||
av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
|
||||
|
||||
double get_rotation(AVStream *st);
|
||||
|
||||
#endif /* CMDUTILS_H */
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opencl.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "cmdutils.h"
|
||||
|
||||
typedef struct {
|
||||
@@ -239,8 +238,7 @@ int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
|
||||
devices[count].platform_idx = i;
|
||||
devices[count].device_idx = j;
|
||||
devices[count].runtime = score;
|
||||
av_strlcpy(devices[count].device_name, device_node->device_name,
|
||||
sizeof(devices[count].device_name));
|
||||
strcpy(devices[count].device_name, device_node->device_name);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
19
common.mak
19
common.mak
@@ -5,14 +5,6 @@
|
||||
# first so "all" becomes default target
|
||||
all: all-yes
|
||||
|
||||
DEFAULT_YASMD=.dbg
|
||||
|
||||
ifeq ($(DBG),1)
|
||||
YASMD=$(DEFAULT_YASMD)
|
||||
else
|
||||
YASMD=
|
||||
endif
|
||||
|
||||
ifndef SUBDIR
|
||||
|
||||
ifndef V
|
||||
@@ -118,9 +110,8 @@ TOOLOBJS := $(TOOLS:%=tools/%.o)
|
||||
TOOLS := $(TOOLS:%=tools/%$(EXESUF))
|
||||
HEADERS += $(HEADERS-yes)
|
||||
|
||||
PATH_LIBNAME = $(foreach NAME,$(1),lib$(NAME)/$($(2)LIBNAME))
|
||||
DEP_LIBS := $(foreach lib,$(FFLIBS),$(call PATH_LIBNAME,$(lib),$(CONFIG_SHARED:yes=S)))
|
||||
STATIC_DEP_LIBS := $(foreach lib,$(FFLIBS),$(call PATH_LIBNAME,$(lib)))
|
||||
PATH_LIBNAME = $(foreach NAME,$(1),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME))
|
||||
DEP_LIBS := $(foreach lib,$(FFLIBS),$(call PATH_LIBNAME,$(lib)))
|
||||
|
||||
SRC_DIR := $(SRC_PATH)/lib$(NAME)
|
||||
ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)/$(ARCH)/*.h))
|
||||
@@ -147,17 +138,17 @@ $(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda *$(DEFAULT_YASMD).asm
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
define RULES
|
||||
clean::
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d)
|
||||
$(RM) $(HOSTPROGS)
|
||||
$(RM) $(TOOLS)
|
||||
endef
|
||||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d))
|
||||
|
@@ -38,9 +38,40 @@
|
||||
#ifndef __AVISYNTH_C__
|
||||
#define __AVISYNTH_C__
|
||||
|
||||
#include "avs/config.h"
|
||||
#include "avs/capi.h"
|
||||
#include "avs/types.h"
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
typedef unsigned char BYTE;
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
@@ -48,8 +79,8 @@
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVISYNTH_6_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 6 };
|
||||
#ifndef __AVISYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 4 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
@@ -81,8 +112,8 @@ enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_PLANAR = 1<<31,
|
||||
|
||||
AVS_CS_SHIFT_SUB_WIDTH = 0,
|
||||
AVS_CS_SHIFT_SUB_HEIGHT = 8,
|
||||
AVS_CS_SHIFT_SAMPLE_BITS = 16,
|
||||
AVS_CS_SHIFT_SUB_HEIGHT = 1 << 3,
|
||||
AVS_CS_SHIFT_SAMPLE_BITS = 1 << 4,
|
||||
|
||||
AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH,
|
||||
AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24
|
||||
@@ -149,66 +180,15 @@ enum { //SUBTYPES
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
// New 2.6 explicitly defined cache hints.
|
||||
AVS_CACHE_NOTHING=10, // Do not cache video.
|
||||
AVS_CACHE_WINDOW=11, // Hard protect upto X frames within a range of X from the current frame N.
|
||||
AVS_CACHE_GENERIC=12, // LRU cache upto X frames.
|
||||
AVS_CACHE_FORCE_GENERIC=13, // LRU cache upto X frames, override any previous CACHE_WINDOW.
|
||||
|
||||
AVS_CACHE_GET_POLICY=30, // Get the current policy.
|
||||
AVS_CACHE_GET_WINDOW=31, // Get the current window h_span.
|
||||
AVS_CACHE_GET_RANGE=32, // Get the current generic frame range.
|
||||
|
||||
AVS_CACHE_AUDIO=50, // Explicitly do cache audio, X byte cache.
|
||||
AVS_CACHE_AUDIO_NOTHING=51, // Explicitly do not cache audio.
|
||||
AVS_CACHE_AUDIO_NONE=52, // Audio cache off (auto mode), X byte intial cache.
|
||||
AVS_CACHE_AUDIO_AUTO=53, // Audio cache on (auto mode), X byte intial cache.
|
||||
|
||||
AVS_CACHE_GET_AUDIO_POLICY=70, // Get the current audio policy.
|
||||
AVS_CACHE_GET_AUDIO_SIZE=71, // Get the current audio cache size.
|
||||
|
||||
AVS_CACHE_PREFETCH_FRAME=100, // Queue request to prefetch frame N.
|
||||
AVS_CACHE_PREFETCH_GO=101, // Action video prefetches.
|
||||
|
||||
AVS_CACHE_PREFETCH_AUDIO_BEGIN=120, // Begin queue request transaction to prefetch audio (take critical section).
|
||||
AVS_CACHE_PREFETCH_AUDIO_STARTLO=121, // Set low 32 bits of start.
|
||||
AVS_CACHE_PREFETCH_AUDIO_STARTHI=122, // Set high 32 bits of start.
|
||||
AVS_CACHE_PREFETCH_AUDIO_COUNT=123, // Set low 32 bits of length.
|
||||
AVS_CACHE_PREFETCH_AUDIO_COMMIT=124, // Enqueue request transaction to prefetch audio (release critical section).
|
||||
AVS_CACHE_PREFETCH_AUDIO_GO=125, // Action audio prefetches.
|
||||
|
||||
AVS_CACHE_GETCHILD_CACHE_MODE=200, // Cache ask Child for desired video cache mode.
|
||||
AVS_CACHE_GETCHILD_CACHE_SIZE=201, // Cache ask Child for desired video cache size.
|
||||
AVS_CACHE_GETCHILD_AUDIO_MODE=202, // Cache ask Child for desired audio cache mode.
|
||||
AVS_CACHE_GETCHILD_AUDIO_SIZE=203, // Cache ask Child for desired audio cache size.
|
||||
|
||||
AVS_CACHE_GETCHILD_COST=220, // Cache ask Child for estimated processing cost.
|
||||
AVS_CACHE_COST_ZERO=221, // Child response of zero cost (ptr arithmetic only).
|
||||
AVS_CACHE_COST_UNIT=222, // Child response of unit cost (less than or equal 1 full frame blit).
|
||||
AVS_CACHE_COST_LOW=223, // Child response of light cost. (Fast)
|
||||
AVS_CACHE_COST_MED=224, // Child response of medium cost. (Real time)
|
||||
AVS_CACHE_COST_HI=225, // Child response of heavy cost. (Slow)
|
||||
|
||||
AVS_CACHE_GETCHILD_THREAD_MODE=240, // Cache ask Child for thread safetyness.
|
||||
AVS_CACHE_THREAD_UNSAFE=241, // Only 1 thread allowed for all instances. 2.5 filters default!
|
||||
AVS_CACHE_THREAD_CLASS=242, // Only 1 thread allowed for each instance. 2.6 filters default!
|
||||
AVS_CACHE_THREAD_SAFE=243, // Allow all threads in any instance.
|
||||
AVS_CACHE_THREAD_OWN=244, // Safe but limit to 1 thread, internally threaded.
|
||||
|
||||
AVS_CACHE_GETCHILD_ACCESS_COST=260, // Cache ask Child for preferred access pattern.
|
||||
AVS_CACHE_ACCESS_RAND=261, // Filter is access order agnostic.
|
||||
AVS_CACHE_ACCESS_SEQ0=262, // Filter prefers sequential access (low cost)
|
||||
AVS_CACHE_ACCESS_SEQ1=263, // Filter needs sequential access (high cost)
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
struct AVS_ScriptEnvironment {
|
||||
IScriptEnvironment * env;
|
||||
const char * error;
|
||||
AVS_ScriptEnvironment(IScriptEnvironment * e = 0)
|
||||
: env(e), error(0) {}
|
||||
};
|
||||
#endif
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
@@ -258,23 +238,29 @@ AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_API(int, avs_is_yv24)(const AVS_VideoInfo * p);
|
||||
AVSC_INLINE int avs_is_yv24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV24 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_API(int, avs_is_yv16)(const AVS_VideoInfo * p);
|
||||
AVSC_INLINE int avs_is_yv16(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV16 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_API(int, avs_is_yv12)(const AVS_VideoInfo * p) ;
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV12 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_API(int, avs_is_yv411)(const AVS_VideoInfo * p);
|
||||
AVSC_INLINE int avs_is_yv411(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV411 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_API(int, avs_is_y8)(const AVS_VideoInfo * p);
|
||||
AVSC_INLINE int avs_is_y8(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_Y8 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->image_type & property)==property ); }
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_API(int, avs_is_color_space)(const AVS_VideoInfo * p, int c_space);
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return avs_is_planar(p) ? ((p->pixel_type & AVS_CS_PLANAR_MASK) == (c_space & AVS_CS_PLANAR_FILTER)) : ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
@@ -288,18 +274,25 @@ AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_API(int, avs_get_plane_width_subsampling)(const AVS_VideoInfo * p, int plane);
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_API(int, avs_get_plane_height_subsampling)(const AVS_VideoInfo * p, int plane);
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
|
||||
AVSC_API(int, avs_bits_per_pixel)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_bytes_from_pixels)(const AVS_VideoInfo * p, int pixels);
|
||||
|
||||
AVSC_API(int, avs_row_size)(const AVS_VideoInfo * p, int plane);
|
||||
|
||||
AVSC_API(int, avs_bmp_size)(const AVS_VideoInfo * vi);
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
@@ -357,13 +350,11 @@ AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned den
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
@@ -400,38 +391,89 @@ typedef struct AVS_VideoFrame {
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_API(int, avs_get_pitch_p)(const AVS_VideoFrame * p, int plane);
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return avs_get_pitch_p(p, 0);}
|
||||
#endif
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_API(int, avs_get_row_size_p)(const AVS_VideoFrame * p, int plane);
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_API(int, avs_get_height_p)(const AVS_VideoFrame * p, int plane);
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_sizeUV;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = (p->row_sizeUV+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_sizeUV;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_API(const BYTE *, avs_get_read_ptr_p)(const AVS_VideoFrame * p, int plane);
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->heightUV;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return avs_get_read_ptr_p(p, 0);}
|
||||
#endif
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_API(int, avs_is_writable)(const AVS_VideoFrame * p);
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_API(BYTE *, avs_get_write_ptr_p)(const AVS_VideoFrame * p, int plane);
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p) {
|
||||
return avs_get_write_ptr_p(p, 0);}
|
||||
#endif
|
||||
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
@@ -616,16 +658,12 @@ enum {
|
||||
AVS_CPUF_SSSE3 = 0x200, // Core 2
|
||||
AVS_CPUF_SSE4 = 0x400, // Penryn, Wolfdale, Yorkfield
|
||||
AVS_CPUF_SSE4_1 = 0x400,
|
||||
//AVS_CPUF_AVX = 0x800, // Sandy Bridge, Bulldozer
|
||||
AVS_CPUF_SSE4_2 = 0x1000, // Nehalem
|
||||
//AVS_CPUF_AVX2 = 0x2000, // Haswell
|
||||
//AVS_CPUF_AVX512 = 0x4000, // Knights Landing
|
||||
AVS_CPUF_SSE4_2 = 0x800, // Nehalem
|
||||
};
|
||||
|
||||
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(int, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
@@ -662,12 +700,12 @@ AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,FRAME_ALIGN);}
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,FRAME_ALIGN);}
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -735,6 +773,7 @@ struct AVS_Library {
|
||||
AVSC_DECLARE_FUNC(avs_function_exists);
|
||||
AVSC_DECLARE_FUNC(avs_get_audio);
|
||||
AVSC_DECLARE_FUNC(avs_get_cpu_flags);
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_get_frame);
|
||||
AVSC_DECLARE_FUNC(avs_get_parity);
|
||||
AVSC_DECLARE_FUNC(avs_get_var);
|
||||
@@ -759,27 +798,6 @@ struct AVS_Library {
|
||||
AVSC_DECLARE_FUNC(avs_subframe_planar);
|
||||
AVSC_DECLARE_FUNC(avs_take_clip);
|
||||
AVSC_DECLARE_FUNC(avs_vsprintf);
|
||||
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv24);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv12);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv411);
|
||||
AVSC_DECLARE_FUNC(avs_is_y8);
|
||||
AVSC_DECLARE_FUNC(avs_is_color_space);
|
||||
|
||||
AVSC_DECLARE_FUNC(avs_get_plane_width_subsampling);
|
||||
AVSC_DECLARE_FUNC(avs_get_plane_height_subsampling);
|
||||
AVSC_DECLARE_FUNC(avs_bits_per_pixel);
|
||||
AVSC_DECLARE_FUNC(avs_bytes_from_pixels);
|
||||
AVSC_DECLARE_FUNC(avs_row_size);
|
||||
AVSC_DECLARE_FUNC(avs_bmp_size);
|
||||
AVSC_DECLARE_FUNC(avs_get_pitch_p);
|
||||
AVSC_DECLARE_FUNC(avs_get_row_size_p);
|
||||
AVSC_DECLARE_FUNC(avs_get_height_p);
|
||||
AVSC_DECLARE_FUNC(avs_get_read_ptr_p);
|
||||
AVSC_DECLARE_FUNC(avs_is_writable);
|
||||
AVSC_DECLARE_FUNC(avs_get_write_ptr_p);
|
||||
};
|
||||
|
||||
#undef AVSC_DECLARE_FUNC
|
||||
@@ -787,7 +805,7 @@ struct AVS_Library {
|
||||
|
||||
AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
||||
if (library == NULL)
|
||||
if (!library)
|
||||
return NULL;
|
||||
library->handle = LoadLibrary("avisynth");
|
||||
if (library->handle == NULL)
|
||||
@@ -814,6 +832,7 @@ AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVSC_LOAD_FUNC(avs_function_exists);
|
||||
AVSC_LOAD_FUNC(avs_get_audio);
|
||||
AVSC_LOAD_FUNC(avs_get_cpu_flags);
|
||||
AVSC_LOAD_FUNC(avs_get_error);
|
||||
AVSC_LOAD_FUNC(avs_get_frame);
|
||||
AVSC_LOAD_FUNC(avs_get_parity);
|
||||
AVSC_LOAD_FUNC(avs_get_var);
|
||||
@@ -839,27 +858,6 @@ AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVSC_LOAD_FUNC(avs_take_clip);
|
||||
AVSC_LOAD_FUNC(avs_vsprintf);
|
||||
|
||||
AVSC_LOAD_FUNC(avs_get_error);
|
||||
AVSC_LOAD_FUNC(avs_is_yv24);
|
||||
AVSC_LOAD_FUNC(avs_is_yv16);
|
||||
AVSC_LOAD_FUNC(avs_is_yv12);
|
||||
AVSC_LOAD_FUNC(avs_is_yv411);
|
||||
AVSC_LOAD_FUNC(avs_is_y8);
|
||||
AVSC_LOAD_FUNC(avs_is_color_space);
|
||||
|
||||
AVSC_LOAD_FUNC(avs_get_plane_width_subsampling);
|
||||
AVSC_LOAD_FUNC(avs_get_plane_height_subsampling);
|
||||
AVSC_LOAD_FUNC(avs_bits_per_pixel);
|
||||
AVSC_LOAD_FUNC(avs_bytes_from_pixels);
|
||||
AVSC_LOAD_FUNC(avs_row_size);
|
||||
AVSC_LOAD_FUNC(avs_bmp_size);
|
||||
AVSC_LOAD_FUNC(avs_get_pitch_p);
|
||||
AVSC_LOAD_FUNC(avs_get_row_size_p);
|
||||
AVSC_LOAD_FUNC(avs_get_height_p);
|
||||
AVSC_LOAD_FUNC(avs_get_read_ptr_p);
|
||||
AVSC_LOAD_FUNC(avs_is_writable);
|
||||
AVSC_LOAD_FUNC(avs_get_write_ptr_p);
|
||||
|
||||
#undef __AVSC_STRINGIFY
|
||||
#undef AVSC_STRINGIFY
|
||||
#undef AVSC_LOAD_FUNC
|
||||
@@ -872,7 +870,7 @@ fail:
|
||||
}
|
||||
|
||||
AVSC_INLINE void avs_free_library(AVS_Library *library) {
|
||||
if (library == NULL)
|
||||
if (!library)
|
||||
return;
|
||||
FreeLibrary(library->handle);
|
||||
free(library);
|
||||
|
68
compat/avisynth/avisynth_c_25.h
Normal file
68
compat/avisynth/avisynth_c_25.h
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (c) 2011 FFmpegSource Project
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
/* these are defines/functions that are used and were changed in the switch to 2.6
|
||||
* and are needed to maintain full compatility with 2.5 */
|
||||
|
||||
enum {
|
||||
AVS_CS_YV12_25 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420_25 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
};
|
||||
|
||||
AVSC_INLINE int avs_get_height_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->row_size>>1;
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV)
|
||||
{
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_yv12_25(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12_25) == AVS_CS_YV12_25)||((p->pixel_type & AVS_CS_I420_25) == AVS_CS_I420_25); }
|
@@ -1,62 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CAPI_H
|
||||
#define AVS_CAPI_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif //AVS_CAPI_H
|
@@ -1,55 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CONFIG_H
|
||||
#define AVS_CONFIG_H
|
||||
|
||||
// Undefine this to get cdecl calling convention
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
// NOTE TO PLUGIN AUTHORS:
|
||||
// Because FRAME_ALIGN can be substantially higher than the alignment
|
||||
// a plugin actually needs, plugins should not use FRAME_ALIGN to check for
|
||||
// alignment. They should always request the exact alignment value they need.
|
||||
// This is to make sure that plugins work over the widest range of AviSynth
|
||||
// builds possible.
|
||||
#define FRAME_ALIGN 32
|
||||
|
||||
#if defined(_M_AMD64) || defined(__x86_64)
|
||||
# define X86_64
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
# define X86_32
|
||||
#else
|
||||
# error Unsupported CPU architecture.
|
||||
#endif
|
||||
|
||||
#endif //AVS_CONFIG_H
|
@@ -1,51 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_TYPES_H
|
||||
#define AVS_TYPES_H
|
||||
|
||||
// Define all types necessary for interfacing with avisynth.dll
|
||||
|
||||
// Raster types used by VirtualDub & Avisynth
|
||||
typedef unsigned int Pixel32;
|
||||
typedef unsigned char BYTE;
|
||||
|
||||
// Audio Sample information
|
||||
typedef float SFLOAT;
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
#endif //AVS_TYPES_H
|
@@ -513,21 +513,21 @@ AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
{ AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
{ AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; }
|
||||
{ AVS_Value v; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
{ AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; }
|
||||
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; }
|
||||
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
|
@@ -52,8 +52,8 @@ namespace avxsynth {
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||
#define MAKEWORD(a,b) (((a) << 8) | (b))
|
||||
#define MAKEDWORD(a,b,c,d) ((a << 24) | (b << 16) | (c << 8) | (d))
|
||||
#define MAKEWORD(a,b) ((a << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
|
@@ -66,8 +66,6 @@ static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr
|
||||
struct thread_arg *thread_arg;
|
||||
|
||||
thread_arg = av_mallocz(sizeof(struct thread_arg));
|
||||
if (!thread_arg)
|
||||
return ENOMEM;
|
||||
|
||||
thread_arg->start_routine = start_routine;
|
||||
thread_arg->arg = arg;
|
||||
|
@@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
LINK_EXE_PATH=$(dirname "$(command -v cl)")/link
|
||||
if [ -x "$LINK_EXE_PATH" ]; then
|
||||
"$LINK_EXE_PATH" $@
|
||||
else
|
||||
link $@
|
||||
fi
|
||||
exit $?
|
146
doc/APIchanges
146
doc/APIchanges
@@ -15,141 +15,6 @@ libavutil: 2014-08-09
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 2.8 was cut here -------- 8< ---------
|
||||
|
||||
2015-08-27 - 1dd854e1 - lavc 56.58.100 - vaapi.h
|
||||
Deprecate old VA-API context (vaapi_context) fields that were only
|
||||
set and used by libavcodec. They are all managed internally now.
|
||||
|
||||
2015-08-19 - 9f8e57ef - lavu 54.31.100 - pixfmt.h
|
||||
Add a unique pixel format for VA-API (AV_PIX_FMT_VAAPI) that
|
||||
indicates the nature of the underlying storage: a VA surface. This
|
||||
yields the same value as AV_PIX_FMT_VAAPI_VLD.
|
||||
Deprecate old VA-API related pixel formats: AV_PIX_FMT_VAAPI_MOCO,
|
||||
AV_PIX_FMT_VAAPI_IDCT, AV_PIX_FMT_VAAPI_VLD.
|
||||
|
||||
2015-08-02 - lavu 54.30.100 / 54.17.0
|
||||
9ed59f1 / 7a7df34c - Add av_blowfish_alloc().
|
||||
a130ec9 / ae365453 - Add av_rc4_alloc().
|
||||
9ca1997 / 5d8bea3b - Add av_xtea_alloc().
|
||||
3cf08e9 / d9e8b47e - Add av_des_alloc().
|
||||
|
||||
2015-07-27 - lavc 56.56.100 / 56.35.0 - avcodec.h
|
||||
94d68a4 / 7c6eb0a1 - Rename CODEC_FLAG* defines to AV_CODEC_FLAG*.
|
||||
444e987 / def97856 - Rename CODEC_CAP_* defines to AV_CODEC_CAP_*.
|
||||
29d147c / 059a9348 - Rename FF_INPUT_BUFFER_PADDING_SIZE and FF_MIN_BUFFER_SIZE
|
||||
to AV_INPUT_BUFFER_PADDING_SIZE and AV_INPUT_BUFFER_MIN_SIZE.
|
||||
|
||||
2015-07-22 - c40ecff - lavc 56.51.100 - avcodec.h
|
||||
Add AV_PKT_DATA_QUALITY_STATS to export the quality value, PSNR, and pict_type
|
||||
of an AVPacket.
|
||||
|
||||
2015-07-16 - 8dad213 - lavc 56.49.100
|
||||
Add av_codec_get_codec_properties(), FF_CODEC_PROPERTY_LOSSLESS
|
||||
and FF_CODEC_PROPERTY_CLOSED_CAPTIONS
|
||||
|
||||
2015-07-03 - d563e13 / 83212943 - lavu 54.28.100 / 56.15.0
|
||||
Add av_version_info().
|
||||
|
||||
-------- 8< --------- FFmpeg 2.7 was cut here -------- 8< ---------
|
||||
|
||||
2015-06-04 - cc17b43 - lswr 1.2.100
|
||||
Add swr_get_out_samples()
|
||||
|
||||
2015-05-27 - c312bfa - lavu 54.26.100 - cpu.h
|
||||
Add AV_CPU_FLAG_AVXSLOW.
|
||||
|
||||
2015-05-26 - 1fb9b2a - lavu 54.25.100 - rational.h
|
||||
Add av_q2intfloat().
|
||||
|
||||
2015-05-13 - cc48409 / e7c5e17 - lavc 56.39.100 / 56.23.0
|
||||
Add av_vda_default_init2.
|
||||
|
||||
2015-05-11 - 541d75f - lavf 56.33.100 - avformat.h
|
||||
Add AVOpenCallback AVFormatContext.open_cb
|
||||
|
||||
2015-05-07 - a7dd933 - 56.38.100 - avcodec.h
|
||||
Add av_packet_side_data_name().
|
||||
|
||||
2015-05-07 - 01e59d4 - 56.37.102 - avcodec.h
|
||||
Add FF_PROFILE_VP9_2 and FF_PROFILE_VP9_3.
|
||||
|
||||
2015-05-04 - 079b7f6 - 56.37.100 - avcodec.h
|
||||
Add FF_PROFILE_VP9_0 and FF_PROFILE_VP9_1.
|
||||
|
||||
2015-04-22 - 748d481 - lavf 56.31.100 - avformat.h
|
||||
Add AVFMT_FLAG_FAST_SEEK flag. Some formats (initially mp3) use it to enable
|
||||
fast, but inaccurate seeking.
|
||||
|
||||
2015-04-20 - 8e8219e / c253340 - lavu 54.23.100 / 54.12.0 - log.h
|
||||
Add AV_LOG_TRACE for extremely verbose debugging.
|
||||
|
||||
2015-04-02 - 26e0e393 - lavf 56.29.100 - avio.h
|
||||
Add AVIODirEntryType.AVIO_ENTRY_SERVER.
|
||||
Add AVIODirEntryType.AVIO_ENTRY_SHARE.
|
||||
Add AVIODirEntryType.AVIO_ENTRY_WORKGROUP.
|
||||
|
||||
2015-03-31 - 3188696 - lavu 54.22.100 - avstring.h
|
||||
Add av_append_path_component()
|
||||
|
||||
2015-03-27 - 184084c - lavf 56.27.100 - avio.h url.h
|
||||
New directory listing API.
|
||||
|
||||
Add AVIODirEntryType enum.
|
||||
Add AVIODirEntry, AVIODirContext structures.
|
||||
Add avio_open_dir(), avio_read_dir(), avio_close_dir(), avio_free_directory_entry().
|
||||
Add ff_alloc_dir_entry().
|
||||
Extend URLProtocol with url_open_dir(), url_read_dir(), url_close_dir().
|
||||
|
||||
2015-03-29 - 268ff17 / c484561 - lavu 54.21.100 / 54.10.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_MMAL for MMAL hardware acceleration.
|
||||
|
||||
2015-03-19 - 11fe56c - 56.29.100 / lavc 56.22.0
|
||||
Add FF_PROFILE_DTS_EXPRESS.
|
||||
|
||||
-------- 8< --------- FFmpeg 2.6 was cut here -------- 8< ---------
|
||||
|
||||
2015-03-04 - cca4476 - lavf 56.25.100
|
||||
Add avformat_flush()
|
||||
|
||||
2015-03-03 - 81a9126 - lavf 56.24.100
|
||||
Add avio_put_str16be()
|
||||
|
||||
2015-02-19 - 560eb71 / 31d2039 - lavc 56.23.100 / 56.13.0
|
||||
Add width, height, coded_width, coded_height and format to
|
||||
AVCodecParserContext.
|
||||
|
||||
2015-02-19 - e375511 / 5b1d9ce - lavu 54.19.100 / 54.9.0
|
||||
Add AV_PIX_FMT_QSV for QSV hardware acceleration.
|
||||
|
||||
2015-02-14 - ba22295 - lavc 56.21.102
|
||||
Deprecate VIMA decoder.
|
||||
|
||||
2015-01-27 - 62a82c6 / 728685f - lavc 56.21.100 / 56.12.0, lavu 54.18.100 / 54.8.0 - avcodec.h, frame.h
|
||||
Add AV_PKT_DATA_AUDIO_SERVICE_TYPE and AV_FRAME_DATA_AUDIO_SERVICE_TYPE for
|
||||
storing the audio service type as side data.
|
||||
|
||||
2015-01-16 - a47c933 - lavf 56.19.100 - avformat.h
|
||||
Add data_codec and data_codec_id for storing codec of data stream
|
||||
|
||||
2015-01-11 - 007c33d - lavd 56.4.100 - avdevice.h
|
||||
Add avdevice_list_input_sources().
|
||||
Add avdevice_list_output_sinks().
|
||||
|
||||
2014-12-25 - d7aaeea / c220a60 - lavc 56.19.100 / 56.10.0 - vdpau.h
|
||||
Add av_vdpau_get_surface_parameters().
|
||||
|
||||
2014-12-25 - ddb9a24 / 6c99c92 - lavc 56.18.100 / 56.9.0 - avcodec.h
|
||||
Add AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH flag to av_vdpau_bind_context().
|
||||
|
||||
2014-12-25 - d16079a / 57b6704 - lavc 56.17.100 / 56.8.0 - avcodec.h
|
||||
Add AVCodecContext.sw_pix_fmt.
|
||||
|
||||
2014-12-04 - 6e9ac02 - lavc 56.14.100 - dv_profile.h
|
||||
Add av_dv_codec_profile2().
|
||||
|
||||
-------- 8< --------- FFmpeg 2.5 was cut here -------- 8< ---------
|
||||
|
||||
2014-11-21 - ab922f9 - lavu 54.15.100 - dict.h
|
||||
Add av_dict_get_string().
|
||||
|
||||
@@ -205,12 +70,12 @@ API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 2.4 was cut here -------- 8< ---------
|
||||
|
||||
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
|
||||
Add AVFormatContext.max_ts_probe.
|
||||
|
||||
2014-08-28 - f30a815 / 9301486 - lavc 56.1.100 / 56.1.0 - avcodec.h
|
||||
Add AV_PKT_DATA_STEREO3D to export container-level stereo3d information.
|
||||
|
||||
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
|
||||
Add AVFormatContext.max_ts_probe.
|
||||
|
||||
2014-08-23 - 8fc9bd0 - lavu 54.7.100 - dict.h
|
||||
AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL arguments are now
|
||||
freed even on error. This is consistent with the behaviour all users
|
||||
@@ -388,7 +253,7 @@ API changes, most recent first:
|
||||
Add avcodec_free_context(). From now on it should be used for freeing
|
||||
AVCodecContext.
|
||||
|
||||
2014-05-17 - 0eec06e / 1bd0bdc - lavu 52.84.100 / 54.5.0 - time.h
|
||||
2014-05-17 - 0eec06e - lavu 52.84.100 - time.h
|
||||
Add av_gettime_relative() av_gettime_relative_is_monotonic()
|
||||
|
||||
2014-05-15 - eacf7d6 / 0c1959b - lavf 55.38.100 / 55.17.0 - avformat.h
|
||||
@@ -728,9 +593,6 @@ API changes, most recent first:
|
||||
av_ripemd_update()
|
||||
av_ripemd_final()
|
||||
|
||||
2013-06-10 - 82ef670 - lavu 52.35.101 - hmac.h
|
||||
Add AV_HMAC_SHA224, AV_HMAC_SHA256, AV_HMAC_SHA384, AV_HMAC_SHA512
|
||||
|
||||
2013-06-04 - 30b491f / fc962d4 - lavu 52.35.100 / 52.13.0 - mem.h
|
||||
Add av_realloc_array and av_reallocp_array
|
||||
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.8.1
|
||||
PROJECT_NUMBER = 2.5.5
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -36,7 +36,6 @@ DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
|
||||
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
|
||||
DOCS = $(DOCS-yes)
|
||||
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
|
||||
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
|
||||
@@ -47,7 +46,6 @@ DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
DOC_EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
@@ -116,9 +114,9 @@ doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
|
||||
doc/%.1 doc/%.3: TAG = MAN
|
||||
doc/%.1: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=1 --center=" " --release=" " --date=" " $< > $@
|
||||
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
doc/%.3: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=3 --center=" " --release=" " --date=" " $< > $@
|
||||
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
|
||||
|
||||
$(DOCS) doc/doxy/html: | doc/
|
||||
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
|
||||
|
@@ -139,26 +139,6 @@ ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
|
||||
@section mp3_header_decompress
|
||||
|
||||
@section mpeg4_unpack_bframes
|
||||
|
||||
Unpack DivX-style packed B-frames.
|
||||
|
||||
DivX-style packed B-frames are not valid MPEG-4 and were only a
|
||||
workaround for the broken Video for Windows subsystem.
|
||||
They use more space, can cause minor AV sync issues, require more
|
||||
CPU power to decode (unless the player has some decoded picture queue
|
||||
to compensate the 2,0,2,0 frame per packet style) and cause
|
||||
trouble if copied into a standard container like mp4 or mpeg-ps/ts,
|
||||
because MPEG-4 decoders may not be able to decode them, since they are
|
||||
not valid MPEG-4.
|
||||
|
||||
For example to fix an AVI file containing an MPEG-4 stream with
|
||||
DivX-style packed B-frames using @command{ffmpeg}, you can use the command:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT.avi -codec copy -bsf:v mpeg4_unpack_bframes OUTPUT.avi
|
||||
@end example
|
||||
|
||||
@section noise
|
||||
|
||||
Damages the contents of packets without damaging the container. Can be
|
||||
|
@@ -7,18 +7,10 @@ V
|
||||
Disable the default terse mode, the full command issued by make and its
|
||||
output will be shown on the screen.
|
||||
|
||||
DBG
|
||||
Preprocess x86 external assembler files to a .dbg.asm file in the object
|
||||
directory, which then gets compiled. Helps developping those assembler
|
||||
files.
|
||||
|
||||
DESTDIR
|
||||
Destination directory for the install targets, useful to prepare packages
|
||||
or install FFmpeg in cross-environments.
|
||||
|
||||
GEN
|
||||
Set to ‘1’ to generate the missing or mismatched references.
|
||||
|
||||
Makefile targets:
|
||||
|
||||
all
|
||||
|
@@ -7,7 +7,7 @@ all the encoders and decoders. In addition each codec may support
|
||||
so-called private options, which are specific for a given codec.
|
||||
|
||||
Sometimes, a global option may only affect a specific kind of codec,
|
||||
and may be nonsensical or ignored by another, so you need to be aware
|
||||
and may be unsensical or ignored by another, so you need to be aware
|
||||
of the meaning of the specified options. Also some options are
|
||||
meant only for decoding or encoding.
|
||||
|
||||
@@ -475,9 +475,6 @@ per-block quantization parameter (QP)
|
||||
motion vector
|
||||
@item dct_coeff
|
||||
|
||||
@item green_metadata
|
||||
display complexity metadata for the upcoming frame, GoP or for a given duration.
|
||||
|
||||
@item skip
|
||||
|
||||
@item startcode
|
||||
@@ -498,8 +495,6 @@ visualize block types
|
||||
picture buffer allocations
|
||||
@item thread_ops
|
||||
threading operations
|
||||
@item nomc
|
||||
skip motion compensation
|
||||
@end table
|
||||
|
||||
@item vismv @var{integer} (@emph{decoding,video})
|
||||
@@ -868,14 +863,6 @@ Possible values:
|
||||
|
||||
@item mpeg2_aac_he
|
||||
|
||||
@item mpeg4_sp
|
||||
|
||||
@item mpeg4_core
|
||||
|
||||
@item mpeg4_main
|
||||
|
||||
@item mpeg4_asp
|
||||
|
||||
@item dts
|
||||
|
||||
@item dts_es
|
||||
@@ -1045,11 +1032,7 @@ Possible values:
|
||||
@item color_primaries @var{integer} (@emph{decoding/encoding,video})
|
||||
@item color_trc @var{integer} (@emph{decoding/encoding,video})
|
||||
@item colorspace @var{integer} (@emph{decoding/encoding,video})
|
||||
|
||||
@item color_range @var{integer} (@emph{decoding/encoding,video})
|
||||
If used as input parameter, it serves as a hint to the decoder, which
|
||||
color_range the input has.
|
||||
|
||||
@item chroma_sample_location @var{integer} (@emph{decoding/encoding,video})
|
||||
|
||||
@item log_level_offset @var{integer}
|
||||
|
@@ -25,13 +25,6 @@ enabled decoders.
|
||||
A description of some of the currently available video decoders
|
||||
follows.
|
||||
|
||||
@section hevc
|
||||
|
||||
HEVC / H.265 decoder.
|
||||
|
||||
Note: the @option{skip_loop_filter} option has effect only at level
|
||||
@code{all}.
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video decoder.
|
||||
@@ -90,23 +83,6 @@ Loud sounds are fully compressed. Soft sounds are enhanced.
|
||||
|
||||
@end table
|
||||
|
||||
@section flac
|
||||
|
||||
FLAC audio decoder.
|
||||
|
||||
This decoder aims to implement the complete FLAC specification from Xiph.
|
||||
|
||||
@subsection FLAC Decoder options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -use_buggy_lpc
|
||||
The lavc FLAC encoder used to produce buggy streams with high lpc values
|
||||
(like the default value). This option makes it possible to decode such streams
|
||||
correctly by using lavc's old buggy lpc logic for decoding.
|
||||
|
||||
@end table
|
||||
|
||||
@section ffwavesynth
|
||||
|
||||
Internal wave synthetizer.
|
||||
@@ -195,25 +171,6 @@ without this library.
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section dvbsub
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item compute_clut
|
||||
@table @option
|
||||
@item -1
|
||||
Compute clut if no matching CLUT is in the stream.
|
||||
@item 0
|
||||
Never compute CLUT
|
||||
@item 1
|
||||
Always compute CLUT and override the one provided in the stream.
|
||||
@end table
|
||||
@item dvb_substream
|
||||
Selects the dvb substream, or all substreams if -1 which is default.
|
||||
|
||||
@end table
|
||||
|
||||
@section dvdsub
|
||||
|
||||
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
|
||||
|
@@ -18,12 +18,6 @@ enabled demuxers.
|
||||
|
||||
The description of some of the currently available demuxers follows.
|
||||
|
||||
@section aa
|
||||
|
||||
Audible Format 2, 3, and 4 demuxer.
|
||||
|
||||
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
|
||||
|
||||
@section applehttp
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
@@ -118,47 +112,6 @@ file is not available or accurate.
|
||||
If the duration is set for all files, then it is possible to seek in the
|
||||
whole concatenated video.
|
||||
|
||||
@item @code{inpoint @var{timestamp}}
|
||||
In point of the file. When the demuxer opens the file it instantly seeks to the
|
||||
specified timestamp. Seeking is done so that all streams can be presented
|
||||
successfully at In point.
|
||||
|
||||
This directive works best with intra frame codecs, because for non-intra frame
|
||||
ones you will usually get extra packets before the actual In point and the
|
||||
decoded content will most likely contain frames before In point too.
|
||||
|
||||
For each file, packets before the file In point will have timestamps less than
|
||||
the calculated start timestamp of the file (negative in case of the first
|
||||
file), and the duration of the files (if not specified by the @code{duration}
|
||||
directive) will be reduced based on their specified In point.
|
||||
|
||||
Because of potential packets before the specified In point, packet timestamps
|
||||
may overlap between two concatenated files.
|
||||
|
||||
@item @code{outpoint @var{timestamp}}
|
||||
Out point of the file. When the demuxer reaches the specified decoding
|
||||
timestamp in any of the streams, it handles it as an end of file condition and
|
||||
skips the current and all the remaining packets from all streams.
|
||||
|
||||
Out point is exclusive, which means that the demuxer will not output packets
|
||||
with a decoding timestamp greater or equal to Out point.
|
||||
|
||||
This directive works best with intra frame codecs and formats where all streams
|
||||
are tightly interleaved. For non-intra frame codecs you will usually get
|
||||
additional packets with presentation timestamp after Out point therefore the
|
||||
decoded content will most likely contain frames after Out point too. If your
|
||||
streams are not tightly interleaved you may not get all the packets from all
|
||||
streams before Out point and you may only will be able to decode the earliest
|
||||
stream until Out point.
|
||||
|
||||
The duration of the files (if not specified by the @code{duration}
|
||||
directive) will be reduced based on their specified Out point.
|
||||
|
||||
@item @code{file_packet_metadata @var{key=value}}
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
|
||||
@item @code{stream}
|
||||
Introduce a stream in the virtual file.
|
||||
All subsequent stream-related directives apply to the last introduced
|
||||
@@ -198,7 +151,6 @@ probed and 0 otherwise.
|
||||
@item auto_convert
|
||||
If set to 1, try to perform automatic conversions on packet data to make the
|
||||
streams concatenable.
|
||||
The default is 1.
|
||||
|
||||
Currently, the only conversion is adding the h264_mp4toannexb bitstream
|
||||
filter to H.264 streams in MP4 format. This is necessary in particular if
|
||||
@@ -253,11 +205,6 @@ It accepts the following options:
|
||||
Set the minimum valid delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 2.
|
||||
|
||||
@item max_gif_delay
|
||||
Set the maximum valid delay between frames in hundredth of seconds.
|
||||
Range is 0 to 65535. Default value is 65535 (nearly eleven minutes),
|
||||
the maximum value allowed by the specification.
|
||||
|
||||
@item default_delay
|
||||
Set the default delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 10.
|
||||
@@ -306,10 +253,6 @@ Select the pattern type used to interpret the provided filename.
|
||||
|
||||
@var{pattern_type} accepts one of the following values.
|
||||
@table @option
|
||||
@item none
|
||||
Disable pattern matching, therefore the video will only contain the specified
|
||||
image. You should use this option if you do not want to create sequences from
|
||||
multiple images and your filenames may contain special pattern characters.
|
||||
@item sequence
|
||||
Select a sequence pattern type, used to specify a sequence of files
|
||||
indexed by sequential numbers.
|
||||
@@ -420,26 +363,13 @@ ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item resync_size
|
||||
Set size limit for looking up a new synchronization. Default value is
|
||||
65536.
|
||||
|
||||
@item fix_teletext_pts
|
||||
Override teletext packet PTS and DTS values with the timestamps calculated
|
||||
Overrides teletext packet PTS and DTS values with the timestamps calculated
|
||||
from the PCR of the first program which the teletext stream is part of and is
|
||||
not discarded. Default value is 1, set this option to 0 if you want your
|
||||
teletext packet PTS and DTS values untouched.
|
||||
|
||||
@item ts_packetsize
|
||||
Output option carrying the raw packet size in bytes.
|
||||
Show the detected raw packet size, cannot be set by the user.
|
||||
|
||||
@item scan_all_pmts
|
||||
Scan and combine all PMTs. The value is an integer with value from -1
|
||||
to 1 (-1 means automatic setting, 1 means enabled, 0 means
|
||||
disabled). Default value is -1.
|
||||
@end table
|
||||
|
||||
@section rawvideo
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Developer Documentation
|
||||
@titlepage
|
||||
@@ -228,7 +227,7 @@ autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
@example
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
@@ -239,7 +238,7 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
@end example
|
||||
|
||||
@section Development Policy
|
||||
|
||||
@@ -543,10 +542,6 @@ tools/trasher, the noise bitstream filter, and
|
||||
should not crash, end in a (near) infinite loop, or allocate ridiculous
|
||||
amounts of memory when fed damaged data.
|
||||
|
||||
@item
|
||||
Did you test your decoder or demuxer against sample files?
|
||||
Samples may be obtained at @url{http://samples.ffmpeg.org}.
|
||||
|
||||
@item
|
||||
Does the patch not mix functional and cosmetic changes?
|
||||
|
||||
@@ -637,10 +632,6 @@ not related to the comments received during review. Such patches will
|
||||
be rejected. Instead, submit significant changes or new features as
|
||||
separate patches.
|
||||
|
||||
Everyone is welcome to review patches. Also if you are waiting for your patch
|
||||
to be reviewed, please consider helping to review other patches, that is a great
|
||||
way to get everyone's patches reviewed sooner.
|
||||
|
||||
@anchor{Regression tests}
|
||||
@section Regression tests
|
||||
|
||||
@@ -656,12 +647,12 @@ accordingly].
|
||||
@subsection Adding files to the fate-suite dataset
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be included in the fate-suite.
|
||||
specific test then the media has to be inlcuded in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductory message for the patch series that you post to
|
||||
message or introductionary message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
|
||||
|
||||
|
@@ -6,16 +6,8 @@ DOXYGEN="${3}"
|
||||
|
||||
shift 3
|
||||
|
||||
if [ -e "$SRC_PATH/VERSION" ]; then
|
||||
VERSION=`cat "$SRC_PATH/VERSION"`
|
||||
else
|
||||
VERSION=`cd "$SRC_PATH"; git describe`
|
||||
fi
|
||||
|
||||
$DOXYGEN - <<EOF
|
||||
@INCLUDE = ${DOXYFILE}
|
||||
INPUT = $@
|
||||
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
|
||||
HTML_TIMESTAMP = NO
|
||||
PROJECT_NUMBER = $VERSION
|
||||
EOF
|
||||
|
@@ -494,85 +494,6 @@ Selected by Encoder (default)
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{flac}
|
||||
@section flac
|
||||
|
||||
FLAC (Free Lossless Audio Codec) Encoder
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by FFmpeg's flac encoder.
|
||||
|
||||
@table @option
|
||||
@item compression_level
|
||||
Sets the compression level, which chooses defaults for many other options
|
||||
if they are not set explicitly.
|
||||
|
||||
@item frame_size
|
||||
Sets the size of the frames in samples per channel.
|
||||
|
||||
@item lpc_coeff_precision
|
||||
Sets the LPC coefficient precision, valid values are from 1 to 15, 15 is the
|
||||
default.
|
||||
|
||||
@item lpc_type
|
||||
Sets the first stage LPC algorithm
|
||||
@table @samp
|
||||
@item none
|
||||
LPC is not used
|
||||
|
||||
@item fixed
|
||||
fixed LPC coefficients
|
||||
|
||||
@item levinson
|
||||
|
||||
@item cholesky
|
||||
@end table
|
||||
|
||||
@item lpc_passes
|
||||
Number of passes to use for Cholesky factorization during LPC analysis
|
||||
|
||||
@item min_partition_order
|
||||
The minimum partition order
|
||||
|
||||
@item max_partition_order
|
||||
The maximum partition order
|
||||
|
||||
@item prediction_order_method
|
||||
@table @samp
|
||||
@item estimation
|
||||
@item 2level
|
||||
@item 4level
|
||||
@item 8level
|
||||
@item search
|
||||
Bruteforce search
|
||||
@item log
|
||||
@end table
|
||||
|
||||
@item ch_mode
|
||||
Channel mode
|
||||
@table @samp
|
||||
@item auto
|
||||
The mode is chosen automatically for each frame
|
||||
@item indep
|
||||
Chanels are independently coded
|
||||
@item left_side
|
||||
@item right_side
|
||||
@item mid_side
|
||||
@end table
|
||||
|
||||
@item exact_rice_parameters
|
||||
Chooses if rice parameters are calculated exactly or approximately.
|
||||
if set to 1 then they are chosen exactly, which slows the code down slightly and
|
||||
improves compression slightly.
|
||||
|
||||
@item multi_dim_quant
|
||||
Multi Dimensional Quantization. If set to 1 then a 2nd stage LPC algorithm is
|
||||
applied after the first stage to finetune the coefficients. This is quite slow
|
||||
and slightly improves compression.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{libfaac}
|
||||
@section libfaac
|
||||
|
||||
@@ -1342,30 +1263,6 @@ disabled
|
||||
A description of some of the currently available video encoders
|
||||
follows.
|
||||
|
||||
@section jpeg2000
|
||||
|
||||
The native jpeg 2000 encoder is lossy by default, the @code{-q:v}
|
||||
option can be used to set the encoding quality. Lossless encoding
|
||||
can be selected with @code{-pred 1}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item format
|
||||
Can be set to either @code{j2k} or @code{jp2} (the default) that
|
||||
makes it possible to store non-rgb pix_fmts.
|
||||
|
||||
@end table
|
||||
|
||||
@section snow
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item iterative_dia_size
|
||||
dia size for the iterative motion estimation
|
||||
@end table
|
||||
|
||||
@section libtheora
|
||||
|
||||
libtheora Theora encoder wrapper.
|
||||
@@ -1440,153 +1337,113 @@ You need to explicitly configure the build with @code{--enable-libvpx}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libvpx wrapper. The
|
||||
@command{vpxenc}-equivalent options or values are listed in parentheses
|
||||
for easy migration.
|
||||
|
||||
To reduce the duplication of documentation, only the private options
|
||||
and some others requiring special attention are documented here. For
|
||||
the documentation of the undocumented generic options, see
|
||||
@ref{codec-options,,the Codec Options chapter}.
|
||||
|
||||
To get more documentation of the libvpx options, invoke the command
|
||||
@command{ffmpeg -h encoder=libvpx}, @command{ffmpeg -h encoder=libvpx-vp9} or
|
||||
@command{vpxenc --help}. Further information is available in the libvpx API
|
||||
documentation.
|
||||
Mapping from FFmpeg to libvpx options with conversion notes in parentheses.
|
||||
|
||||
@table @option
|
||||
|
||||
@item b (@emph{target-bitrate})
|
||||
Set bitrate in bits/s. Note that FFmpeg's @option{b} option is
|
||||
expressed in bits/s, while @command{vpxenc}'s @option{target-bitrate} is in
|
||||
kilobits/s.
|
||||
@item threads
|
||||
g_threads
|
||||
|
||||
@item g (@emph{kf-max-dist})
|
||||
@item profile
|
||||
g_profile
|
||||
|
||||
@item keyint_min (@emph{kf-min-dist})
|
||||
@item vb
|
||||
rc_target_bitrate
|
||||
|
||||
@item qmin (@emph{min-q})
|
||||
@item g
|
||||
kf_max_dist
|
||||
|
||||
@item qmax (@emph{max-q})
|
||||
@item keyint_min
|
||||
kf_min_dist
|
||||
|
||||
@item bufsize (@emph{buf-sz}, @emph{buf-optimal-sz})
|
||||
Set ratecontrol buffer size (in bits). Note @command{vpxenc}'s options are
|
||||
specified in milliseconds, the libvpx wrapper converts this value as follows:
|
||||
@code{buf-sz = bufsize * 1000 / bitrate},
|
||||
@code{buf-optimal-sz = bufsize * 1000 / bitrate * 5 / 6}.
|
||||
@item qmin
|
||||
rc_min_quantizer
|
||||
|
||||
@item rc_init_occupancy (@emph{buf-initial-sz})
|
||||
Set number of bits which should be loaded into the rc buffer before decoding
|
||||
starts. Note @command{vpxenc}'s option is specified in milliseconds, the libvpx
|
||||
wrapper converts this value as follows:
|
||||
@code{rc_init_occupancy * 1000 / bitrate}.
|
||||
@item qmax
|
||||
rc_max_quantizer
|
||||
|
||||
@item undershoot-pct
|
||||
Set datarate undershoot (min) percentage of the target bitrate.
|
||||
@item bufsize, vb
|
||||
rc_buf_sz
|
||||
@code{(bufsize * 1000 / vb)}
|
||||
|
||||
@item overshoot-pct
|
||||
Set datarate overshoot (max) percentage of the target bitrate.
|
||||
rc_buf_optimal_sz
|
||||
@code{(bufsize * 1000 / vb * 5 / 6)}
|
||||
|
||||
@item skip_threshold (@emph{drop-frame})
|
||||
@item rc_init_occupancy, vb
|
||||
rc_buf_initial_sz
|
||||
@code{(rc_init_occupancy * 1000 / vb)}
|
||||
|
||||
@item qcomp (@emph{bias-pct})
|
||||
@item rc_buffer_aggressivity
|
||||
rc_undershoot_pct
|
||||
|
||||
@item maxrate (@emph{maxsection-pct})
|
||||
Set GOP max bitrate in bits/s. Note @command{vpxenc}'s option is specified as a
|
||||
percentage of the target bitrate, the libvpx wrapper converts this value as
|
||||
follows: @code{(maxrate * 100 / bitrate)}.
|
||||
@item skip_threshold
|
||||
rc_dropframe_thresh
|
||||
|
||||
@item minrate (@emph{minsection-pct})
|
||||
Set GOP min bitrate in bits/s. Note @command{vpxenc}'s option is specified as a
|
||||
percentage of the target bitrate, the libvpx wrapper converts this value as
|
||||
follows: @code{(minrate * 100 / bitrate)}.
|
||||
@item qcomp
|
||||
rc_2pass_vbr_bias_pct
|
||||
|
||||
@item minrate, maxrate, b @emph{end-usage=cbr}
|
||||
@code{(minrate == maxrate == bitrate)}.
|
||||
@item maxrate, vb
|
||||
rc_2pass_vbr_maxsection_pct
|
||||
@code{(maxrate * 100 / vb)}
|
||||
|
||||
@item crf (@emph{end-usage=cq}, @emph{cq-level})
|
||||
@item minrate, vb
|
||||
rc_2pass_vbr_minsection_pct
|
||||
@code{(minrate * 100 / vb)}
|
||||
|
||||
@item quality, deadline (@emph{deadline})
|
||||
@table @samp
|
||||
@item best
|
||||
Use best quality deadline. Poorly named and quite slow, this option should be
|
||||
avoided as it may give worse quality output than good.
|
||||
@item good
|
||||
Use good quality deadline. This is a good trade-off between speed and quality
|
||||
when used with the @option{cpu-used} option.
|
||||
@item realtime
|
||||
Use realtime quality deadline.
|
||||
@item minrate, maxrate, vb
|
||||
@code{VPX_CBR}
|
||||
@code{(minrate == maxrate == vb)}
|
||||
|
||||
@item crf
|
||||
@code{VPX_CQ}, @code{VP8E_SET_CQ_LEVEL}
|
||||
|
||||
@item quality
|
||||
@table @option
|
||||
@item @var{best}
|
||||
@code{VPX_DL_BEST_QUALITY}
|
||||
@item @var{good}
|
||||
@code{VPX_DL_GOOD_QUALITY}
|
||||
@item @var{realtime}
|
||||
@code{VPX_DL_REALTIME}
|
||||
@end table
|
||||
|
||||
@item speed, cpu-used (@emph{cpu-used})
|
||||
Set quality/speed ratio modifier. Higher values speed up the encode at the cost
|
||||
of quality.
|
||||
@item speed
|
||||
@code{VP8E_SET_CPUUSED}
|
||||
|
||||
@item nr (@emph{noise-sensitivity})
|
||||
@item nr
|
||||
@code{VP8E_SET_NOISE_SENSITIVITY}
|
||||
|
||||
@item static-thresh
|
||||
Set a change threshold on blocks below which they will be skipped by the
|
||||
encoder.
|
||||
@item mb_threshold
|
||||
@code{VP8E_SET_STATIC_THRESHOLD}
|
||||
|
||||
@item slices (@emph{token-parts})
|
||||
Note that FFmpeg's @option{slices} option gives the total number of partitions,
|
||||
while @command{vpxenc}'s @option{token-parts} is given as
|
||||
@code{log2(partitions)}.
|
||||
@item slices
|
||||
@code{VP8E_SET_TOKEN_PARTITIONS}
|
||||
|
||||
@item max-intra-rate
|
||||
Set maximum I-frame bitrate as a percentage of the target bitrate. A value of 0
|
||||
means unlimited.
|
||||
@code{VP8E_SET_MAX_INTRA_BITRATE_PCT}
|
||||
|
||||
@item force_key_frames
|
||||
@code{VPX_EFLAG_FORCE_KF}
|
||||
|
||||
@item Alternate reference frame related
|
||||
@table @option
|
||||
@item auto-alt-ref
|
||||
Enable use of alternate reference frames (2-pass only).
|
||||
@item arnr-max-frames
|
||||
Set altref noise reduction max frame count.
|
||||
@item arnr-type
|
||||
Set altref noise reduction filter type: backward, forward, centered.
|
||||
@item arnr-strength
|
||||
Set altref noise reduction filter strength.
|
||||
@item rc-lookahead, lag-in-frames (@emph{lag-in-frames})
|
||||
Set number of frames to look ahead for frametype and ratecontrol.
|
||||
@item vp8flags altref
|
||||
@code{VP8E_SET_ENABLEAUTOALTREF}
|
||||
@item @var{arnr_max_frames}
|
||||
@code{VP8E_SET_ARNR_MAXFRAMES}
|
||||
@item @var{arnr_type}
|
||||
@code{VP8E_SET_ARNR_TYPE}
|
||||
@item @var{arnr_strength}
|
||||
@code{VP8E_SET_ARNR_STRENGTH}
|
||||
@item @var{rc_lookahead}
|
||||
g_lag_in_frames
|
||||
@end table
|
||||
|
||||
@item error-resilient
|
||||
Enable error resiliency features.
|
||||
@item vp8flags error_resilient
|
||||
g_error_resilient
|
||||
|
||||
@item VP9-specific options
|
||||
@table @option
|
||||
@item lossless
|
||||
Enable lossless mode.
|
||||
@item tile-columns
|
||||
Set number of tile columns to use. Note this is given as
|
||||
@code{log2(tile_columns)}. For example, 8 tile columns would be requested by
|
||||
setting the @option{tile-columns} option to 3.
|
||||
@item tile-rows
|
||||
Set number of tile rows to use. Note this is given as @code{log2(tile_rows)}.
|
||||
For example, 4 tile rows would be requested by setting the @option{tile-rows}
|
||||
option to 2.
|
||||
@item frame-parallel
|
||||
Enable frame parallel decodability features.
|
||||
@item aq-mode
|
||||
Set adaptive quantization mode (0: off (default), 1: variance 2: complexity, 3:
|
||||
cyclic refresh).
|
||||
@item colorspace @emph{color-space}
|
||||
Set input color space. The VP9 bitstream supports signaling the following
|
||||
colorspaces:
|
||||
@table @option
|
||||
@item @samp{rgb} @emph{sRGB}
|
||||
@item @samp{bt709} @emph{bt709}
|
||||
@item @samp{unspecified} @emph{unknown}
|
||||
@item @samp{bt470bg} @emph{bt601}
|
||||
@item @samp{smpte170m} @emph{smpte170}
|
||||
@item @samp{smpte240m} @emph{smpte240}
|
||||
@item @samp{bt2020_ncl} @emph{bt2020}
|
||||
@end table
|
||||
@end table
|
||||
@item aq_mode
|
||||
@code{VP9E_SET_AQ_MODE}
|
||||
|
||||
@end table
|
||||
|
||||
@@ -2314,7 +2171,7 @@ Use @var{0} to disable alpha plane coding.
|
||||
@subsection Speed considerations
|
||||
|
||||
In the default mode of operation the encoder has to honor frame constraints
|
||||
(i.e. not produce frames with size bigger than requested) while still making
|
||||
(i.e. not produc frames with size bigger than requested) while still making
|
||||
output picture as good as possible.
|
||||
A frame containing a lot of small details is harder to compress and the encoder
|
||||
would spend more time searching for appropriate quantizers for each slice.
|
||||
@@ -2324,30 +2181,6 @@ Setting a higher @option{bits_per_mb} limit will improve the speed.
|
||||
For the fastest encoding speed set the @option{qscale} parameter (4 is the
|
||||
recommended value) and do not set a size constraint.
|
||||
|
||||
@section libkvazaar
|
||||
|
||||
Kvazaar H.265/HEVC encoder.
|
||||
|
||||
Requires the presence of the libkvazaar headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@option{--enable-libkvazaar}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item b
|
||||
Set target video bitrate in bit/s and enable rate control.
|
||||
|
||||
@item threads
|
||||
Set number of encoding threads.
|
||||
|
||||
@item kvazaar-params
|
||||
Set kvazaar parameters as a list of @var{name}=@var{value} pairs separated
|
||||
by commas (,). See kvazaar documentation for a list of options.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO ENCODERS
|
||||
|
||||
@chapter Subtitles Encoders
|
||||
|
@@ -11,14 +11,12 @@ CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_dir_cmd \
|
||||
avio_reading \
|
||||
EXAMPLES= avio_reading \
|
||||
decoding_encoding \
|
||||
demuxing_decoding \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
http_multiclient \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
|
@@ -1,180 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Lukasz Marek
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
|
||||
static const char *type_string(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case AVIO_ENTRY_DIRECTORY:
|
||||
return "<DIR>";
|
||||
case AVIO_ENTRY_FILE:
|
||||
return "<FILE>";
|
||||
case AVIO_ENTRY_BLOCK_DEVICE:
|
||||
return "<BLOCK DEVICE>";
|
||||
case AVIO_ENTRY_CHARACTER_DEVICE:
|
||||
return "<CHARACTER DEVICE>";
|
||||
case AVIO_ENTRY_NAMED_PIPE:
|
||||
return "<PIPE>";
|
||||
case AVIO_ENTRY_SYMBOLIC_LINK:
|
||||
return "<LINK>";
|
||||
case AVIO_ENTRY_SOCKET:
|
||||
return "<SOCKET>";
|
||||
case AVIO_ENTRY_SERVER:
|
||||
return "<SERVER>";
|
||||
case AVIO_ENTRY_SHARE:
|
||||
return "<SHARE>";
|
||||
case AVIO_ENTRY_WORKGROUP:
|
||||
return "<WORKGROUP>";
|
||||
case AVIO_ENTRY_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return "<UNKNOWN>";
|
||||
}
|
||||
|
||||
static int list_op(const char *input_dir)
|
||||
{
|
||||
AVIODirEntry *entry = NULL;
|
||||
AVIODirContext *ctx = NULL;
|
||||
int cnt, ret;
|
||||
char filemode[4], uid_and_gid[20];
|
||||
|
||||
if ((ret = avio_open_dir(&ctx, input_dir, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open directory: %s.\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cnt = 0;
|
||||
for (;;) {
|
||||
if ((ret = avio_read_dir(ctx, &entry)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot list directory: %s.\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
if (!entry)
|
||||
break;
|
||||
if (entry->filemode == -1) {
|
||||
snprintf(filemode, 4, "???");
|
||||
} else {
|
||||
snprintf(filemode, 4, "%3"PRIo64, entry->filemode);
|
||||
}
|
||||
snprintf(uid_and_gid, 20, "%"PRId64"(%"PRId64")", entry->user_id, entry->group_id);
|
||||
if (cnt == 0)
|
||||
av_log(NULL, AV_LOG_INFO, "%-9s %12s %30s %10s %s %16s %16s %16s\n",
|
||||
"TYPE", "SIZE", "NAME", "UID(GID)", "UGO", "MODIFIED",
|
||||
"ACCESSED", "STATUS_CHANGED");
|
||||
av_log(NULL, AV_LOG_INFO, "%-9s %12"PRId64" %30s %10s %s %16"PRId64" %16"PRId64" %16"PRId64"\n",
|
||||
type_string(entry->type),
|
||||
entry->size,
|
||||
entry->name,
|
||||
uid_and_gid,
|
||||
filemode,
|
||||
entry->modification_timestamp,
|
||||
entry->access_timestamp,
|
||||
entry->status_change_timestamp);
|
||||
avio_free_directory_entry(&entry);
|
||||
cnt++;
|
||||
};
|
||||
|
||||
fail:
|
||||
avio_close_dir(&ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int del_op(const char *url)
|
||||
{
|
||||
int ret = avpriv_io_delete(url);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int move_op(const char *src, const char *dst)
|
||||
{
|
||||
int ret = avpriv_io_move(src, dst);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void usage(const char *program_name)
|
||||
{
|
||||
fprintf(stderr, "usage: %s OPERATION entry1 [entry2]\n"
|
||||
"API example program to show how to manipulate resources "
|
||||
"accessed through AVIOContext.\n"
|
||||
"OPERATIONS:\n"
|
||||
"list list content of the directory\n"
|
||||
"move rename content in directory\n"
|
||||
"del delete content in directory\n",
|
||||
program_name);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const char *op = NULL;
|
||||
int ret;
|
||||
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
|
||||
if (argc < 2) {
|
||||
usage(argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* register codecs and formats and other lavf/lavc components*/
|
||||
av_register_all();
|
||||
avformat_network_init();
|
||||
|
||||
op = argv[1];
|
||||
if (strcmp(op, "list") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = list_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "del") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = del_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "move") == 0) {
|
||||
if (argc < 4) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = move_op(argv[2], argv[3]);
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op);
|
||||
ret = AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
avformat_network_deinit();
|
||||
|
||||
return ret < 0 ? 1 : 0;
|
||||
}
|
@@ -245,7 +245,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
AVCodecContext *c= NULL;
|
||||
int len;
|
||||
FILE *f, *outfile;
|
||||
uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
|
||||
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
|
||||
@@ -521,7 +521,7 @@ static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, buf);
|
||||
avctx->width, avctx->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
@@ -538,13 +538,13 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
int frame_count;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
|
||||
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s to %s\n", filename, outfilename);
|
||||
|
||||
@@ -561,8 +561,8 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
|
||||
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
|
||||
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
|
||||
|
||||
/* For some codecs, such as msmpeg4 and mpeg4, width and height
|
||||
MUST be initialized there because this information is not
|
||||
|
@@ -36,8 +36,6 @@
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static int width, height;
|
||||
static enum AVPixelFormat pix_fmt;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
@@ -83,22 +81,6 @@ static int decode_packet(int *got_frame, int cached)
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
@@ -108,7 +90,7 @@ static int decode_packet(int *got_frame, int cached)
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
@@ -156,7 +138,7 @@ static int decode_packet(int *got_frame, int cached)
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret, stream_index;
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
@@ -168,8 +150,8 @@ static int open_codec_context(int *stream_idx,
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
stream_index = ret;
|
||||
st = fmt_ctx->streams[stream_index];
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
@@ -188,7 +170,6 @@ static int open_codec_context(int *stream_idx,
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
*stream_idx = stream_index;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -283,11 +264,9 @@ int main (int argc, char **argv)
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
width = video_dec_ctx->width;
|
||||
height = video_dec_ctx->height;
|
||||
pix_fmt = video_dec_ctx->pix_fmt;
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
width, height, pix_fmt, 1);
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
@@ -362,7 +341,7 @@ int main (int argc, char **argv)
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(pix_fmt), width, height,
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
|
@@ -145,28 +145,12 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
|
@@ -38,10 +38,7 @@
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24,transpose=cclock";
|
||||
/* other way:
|
||||
scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
|
||||
*/
|
||||
const char *filter_descr = "scale=78:24";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
@@ -131,28 +128,12 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
|
@@ -1,155 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Stephan Holljes
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat multi-client network API usage example.
|
||||
*
|
||||
* @example http_multiclient.c
|
||||
* This example will serve a file without decoding or demuxing it over http.
|
||||
* Multiple clients can connect and will receive the same file.
|
||||
*/
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <unistd.h>
|
||||
|
||||
void process_client(AVIOContext *client, const char *in_uri)
|
||||
{
|
||||
AVIOContext *input = NULL;
|
||||
uint8_t buf[1024];
|
||||
int ret, n, reply_code;
|
||||
char *resource = NULL;
|
||||
while ((ret = avio_handshake(client)) > 0) {
|
||||
av_opt_get(client, "resource", AV_OPT_SEARCH_CHILDREN, &resource);
|
||||
// check for strlen(resource) is necessary, because av_opt_get()
|
||||
// may return empty string.
|
||||
if (resource && strlen(resource))
|
||||
break;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
av_log(client, AV_LOG_TRACE, "resource=%p\n", resource);
|
||||
if (resource && resource[0] == '/' && !strcmp((resource + 1), in_uri)) {
|
||||
reply_code = 200;
|
||||
} else {
|
||||
reply_code = AVERROR_HTTP_NOT_FOUND;
|
||||
}
|
||||
if ((ret = av_opt_set_int(client, "reply_code", reply_code, AV_OPT_SEARCH_CHILDREN)) < 0) {
|
||||
av_log(client, AV_LOG_ERROR, "Failed to set reply_code: %s.\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
av_log(client, AV_LOG_TRACE, "Set reply code to %d\n", reply_code);
|
||||
|
||||
while ((ret = avio_handshake(client)) > 0);
|
||||
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
fprintf(stderr, "Handshake performed.\n");
|
||||
if (reply_code != 200)
|
||||
goto end;
|
||||
fprintf(stderr, "Opening input file.\n");
|
||||
if ((ret = avio_open2(&input, in_uri, AVIO_FLAG_READ, NULL, NULL)) < 0) {
|
||||
av_log(input, AV_LOG_ERROR, "Failed to open input: %s: %s.\n", in_uri,
|
||||
av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
for(;;) {
|
||||
n = avio_read(input, buf, sizeof(buf));
|
||||
if (n < 0) {
|
||||
if (n == AVERROR_EOF)
|
||||
break;
|
||||
av_log(input, AV_LOG_ERROR, "Error reading from input: %s.\n",
|
||||
av_err2str(n));
|
||||
break;
|
||||
}
|
||||
avio_write(client, buf, n);
|
||||
avio_flush(client);
|
||||
}
|
||||
end:
|
||||
fprintf(stderr, "Flushing client\n");
|
||||
avio_flush(client);
|
||||
fprintf(stderr, "Closing client\n");
|
||||
avio_close(client);
|
||||
fprintf(stderr, "Closing input\n");
|
||||
avio_close(input);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
av_log_set_level(AV_LOG_TRACE);
|
||||
AVDictionary *options = NULL;
|
||||
AVIOContext *client = NULL, *server = NULL;
|
||||
const char *in_uri, *out_uri;
|
||||
int ret, pid;
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input http://hostname[:port]\n"
|
||||
"API example program to serve http to multiple clients.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_uri = argv[1];
|
||||
out_uri = argv[2];
|
||||
|
||||
av_register_all();
|
||||
avformat_network_init();
|
||||
|
||||
if ((ret = av_dict_set(&options, "listen", "2", 0)) < 0) {
|
||||
fprintf(stderr, "Failed to set listen mode for server: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
if ((ret = avio_open2(&server, out_uri, AVIO_FLAG_WRITE, NULL, &options)) < 0) {
|
||||
fprintf(stderr, "Failed to open server: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
fprintf(stderr, "Entering main loop.\n");
|
||||
for(;;) {
|
||||
if ((ret = avio_accept(server, &client)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Accepted client, forking process.\n");
|
||||
// XXX: Since we don't reap our children and don't ignore signals
|
||||
// this produces zombie processes.
|
||||
pid = fork();
|
||||
if (pid < 0) {
|
||||
perror("Fork failed");
|
||||
ret = AVERROR(errno);
|
||||
goto end;
|
||||
}
|
||||
if (pid == 0) {
|
||||
fprintf(stderr, "In child.\n");
|
||||
process_client(client, in_uri);
|
||||
avio_close(server);
|
||||
exit(0);
|
||||
}
|
||||
if (pid > 0)
|
||||
avio_close(client);
|
||||
}
|
||||
end:
|
||||
avio_close(server);
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Some errors occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
@@ -172,7 +172,7 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@@ -230,7 +230,7 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, A
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
@@ -661,7 +661,7 @@ int main(int argc, char **argv)
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_closep(&oc->pb);
|
||||
avio_close(oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
@@ -1,484 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Anton Khirnov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Intel QSV-accelerated H.264 decoding example.
|
||||
*
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the VA-API video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <mfx/mfxvideo.h>
|
||||
|
||||
#include <va/va.h>
|
||||
#include <va/va_x11.h>
|
||||
#include <X11/Xlib.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/qsv.h"
|
||||
|
||||
#include "libavutil/error.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
mfxSession mfx_session;
|
||||
VADisplay va_dpy;
|
||||
|
||||
VASurfaceID *surfaces;
|
||||
mfxMemId *surface_ids;
|
||||
int *surface_used;
|
||||
int nb_surfaces;
|
||||
|
||||
mfxFrameInfo frame_info;
|
||||
} DecodeContext;
|
||||
|
||||
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
|
||||
mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
int err, i;
|
||||
|
||||
if (decode->surfaces) {
|
||||
fprintf(stderr, "Multiple allocation requests.\n");
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)) {
|
||||
fprintf(stderr, "Unsupported surface type: %d\n", req->Type);
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
if (req->Info.BitDepthLuma != 8 || req->Info.BitDepthChroma != 8 ||
|
||||
req->Info.Shift || req->Info.FourCC != MFX_FOURCC_NV12 ||
|
||||
req->Info.ChromaFormat != MFX_CHROMAFORMAT_YUV420) {
|
||||
fprintf(stderr, "Unsupported surface properties.\n");
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
decode->surfaces = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surfaces));
|
||||
decode->surface_ids = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surface_ids));
|
||||
decode->surface_used = av_mallocz_array(req->NumFrameSuggested, sizeof(*decode->surface_used));
|
||||
if (!decode->surfaces || !decode->surface_ids || !decode->surface_used)
|
||||
goto fail;
|
||||
|
||||
err = vaCreateSurfaces(decode->va_dpy, VA_RT_FORMAT_YUV420,
|
||||
req->Info.Width, req->Info.Height,
|
||||
decode->surfaces, req->NumFrameSuggested,
|
||||
NULL, 0);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error allocating VA surfaces\n");
|
||||
goto fail;
|
||||
}
|
||||
decode->nb_surfaces = req->NumFrameSuggested;
|
||||
|
||||
for (i = 0; i < decode->nb_surfaces; i++)
|
||||
decode->surface_ids[i] = &decode->surfaces[i];
|
||||
|
||||
resp->mids = decode->surface_ids;
|
||||
resp->NumFrameActual = decode->nb_surfaces;
|
||||
|
||||
decode->frame_info = req->Info;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
fail:
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
|
||||
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
|
||||
if (decode->surfaces)
|
||||
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
decode->nb_surfaces = 0;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
|
||||
{
|
||||
*hdl = mid;
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static void free_buffer(void *opaque, uint8_t *data)
|
||||
{
|
||||
int *used = opaque;
|
||||
*used = 0;
|
||||
av_freep(&data);
|
||||
}
|
||||
|
||||
static int get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
||||
{
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
|
||||
mfxFrameSurface1 *surf;
|
||||
AVBufferRef *surf_buf;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < decode->nb_surfaces; idx++) {
|
||||
if (!decode->surface_used[idx])
|
||||
break;
|
||||
}
|
||||
if (idx == decode->nb_surfaces) {
|
||||
fprintf(stderr, "No free surfaces\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf = av_mallocz(sizeof(*surf));
|
||||
if (!surf)
|
||||
return AVERROR(ENOMEM);
|
||||
surf_buf = av_buffer_create((uint8_t*)surf, sizeof(*surf), free_buffer,
|
||||
&decode->surface_used[idx], AV_BUFFER_FLAG_READONLY);
|
||||
if (!surf_buf) {
|
||||
av_freep(&surf);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf->Info = decode->frame_info;
|
||||
surf->Data.MemId = &decode->surfaces[idx];
|
||||
|
||||
frame->buf[0] = surf_buf;
|
||||
frame->data[3] = (uint8_t*)surf;
|
||||
|
||||
decode->surface_used[idx] = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
if (!avctx->hwaccel_context) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVQSVContext *qsv = av_qsv_alloc_context();
|
||||
if (!qsv)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
qsv->session = decode->mfx_session;
|
||||
qsv->iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
||||
|
||||
avctx->hwaccel_context = qsv;
|
||||
}
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVPacket *pkt,
|
||||
AVIOContext *output_ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
int got_frame = 1;
|
||||
|
||||
while (pkt->size > 0 || (!pkt->data && got_frame)) {
|
||||
ret = avcodec_decode_video2(decoder_ctx, frame, &got_frame, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pkt->data += ret;
|
||||
pkt->size -= ret;
|
||||
|
||||
/* A real program would do something useful with the decoded frame here.
|
||||
* We just retrieve the raw data and write it to a file, which is rather
|
||||
* useless but pedagogic. */
|
||||
if (got_frame) {
|
||||
mfxFrameSurface1 *surf = (mfxFrameSurface1*)frame->data[3];
|
||||
VASurfaceID surface = *(VASurfaceID*)surf->Data.MemId;
|
||||
|
||||
VAImageFormat img_fmt = {
|
||||
.fourcc = VA_FOURCC_NV12,
|
||||
.byte_order = VA_LSB_FIRST,
|
||||
.bits_per_pixel = 8,
|
||||
.depth = 8,
|
||||
};
|
||||
|
||||
VAImage img;
|
||||
|
||||
VAStatus err;
|
||||
uint8_t *data;
|
||||
int i, j;
|
||||
|
||||
img.buf = VA_INVALID_ID;
|
||||
img.image_id = VA_INVALID_ID;
|
||||
|
||||
err = vaCreateImage(decode->va_dpy, &img_fmt,
|
||||
frame->width, frame->height, &img);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error creating an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaGetImage(decode->va_dpy, surface, 0, 0,
|
||||
frame->width, frame->height,
|
||||
img.image_id);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error getting an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaMapBuffer(decode->va_dpy, img.buf, (void**)&data);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error mapping the image buffer: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < img.num_planes; i++)
|
||||
for (j = 0; j < (img.height >> (i > 0)); j++)
|
||||
avio_write(output_ctx, data + img.offsets[i] + j * img.pitches[i], img.width);
|
||||
|
||||
fail:
|
||||
if (img.buf != VA_INVALID_ID)
|
||||
vaUnmapBuffer(decode->va_dpy, img.buf);
|
||||
if (img.image_id != VA_INVALID_ID)
|
||||
vaDestroyImage(decode->va_dpy, img.image_id);
|
||||
av_frame_unref(frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_ctx = NULL;
|
||||
AVStream *video_st = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
Display *dpy = NULL;
|
||||
int va_ver_major, va_ver_minor;
|
||||
|
||||
mfxIMPL mfx_impl = MFX_IMPL_AUTO_ANY;
|
||||
mfxVersion mfx_ver = { { 1, 1 } };
|
||||
|
||||
mfxFrameAllocator frame_allocator = {
|
||||
.pthis = &decode,
|
||||
.Alloc = frame_alloc,
|
||||
.Lock = frame_lock,
|
||||
.Unlock = frame_unlock,
|
||||
.GetHDL = frame_get_hdl,
|
||||
.Free = frame_free,
|
||||
};
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i, err;
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* find the first H.264 video stream */
|
||||
for (i = 0; i < input_ctx->nb_streams; i++) {
|
||||
AVStream *st = input_ctx->streams[i];
|
||||
|
||||
if (st->codec->codec_id == AV_CODEC_ID_H264 && !video_st)
|
||||
video_st = st;
|
||||
else
|
||||
st->discard = AVDISCARD_ALL;
|
||||
}
|
||||
if (!video_st) {
|
||||
fprintf(stderr, "No H.264 video stream in the input file\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* initialize VA-API */
|
||||
dpy = XOpenDisplay(NULL);
|
||||
if (!dpy) {
|
||||
fprintf(stderr, "Cannot open the X display\n");
|
||||
goto finish;
|
||||
}
|
||||
decode.va_dpy = vaGetDisplay(dpy);
|
||||
if (!decode.va_dpy) {
|
||||
fprintf(stderr, "Cannot open the VA display\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
err = vaInitialize(decode.va_dpy, &va_ver_major, &va_ver_minor);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Cannot initialize VA: %s\n", vaErrorStr(err));
|
||||
goto finish;
|
||||
}
|
||||
fprintf(stderr, "Initialized VA v%d.%d\n", va_ver_major, va_ver_minor);
|
||||
|
||||
/* initialize an MFX session */
|
||||
err = MFXInit(mfx_impl, &mfx_ver, &decode.mfx_session);
|
||||
if (err != MFX_ERR_NONE) {
|
||||
fprintf(stderr, "Error initializing an MFX session\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
MFXVideoCORE_SetHandle(decode.mfx_session, MFX_HANDLE_VA_DISPLAY, decode.va_dpy);
|
||||
MFXVideoCORE_SetFrameAllocator(decode.mfx_session, &frame_allocator);
|
||||
|
||||
/* initialize the decoder */
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
if (!decoder) {
|
||||
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
decoder_ctx = avcodec_alloc_context3(decoder);
|
||||
if (!decoder_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
decoder_ctx->codec_id = AV_CODEC_ID_H264;
|
||||
if (video_st->codec->extradata_size) {
|
||||
decoder_ctx->extradata = av_mallocz(video_st->codec->extradata_size +
|
||||
AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!decoder_ctx->extradata) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
memcpy(decoder_ctx->extradata, video_st->codec->extradata,
|
||||
video_st->codec->extradata_size);
|
||||
decoder_ctx->extradata_size = video_st->codec->extradata_size;
|
||||
}
|
||||
decoder_ctx->refcounted_frames = 1;
|
||||
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_buffer2 = get_buffer;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the decoder: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the output stream */
|
||||
ret = avio_open(&output_ctx, argv[2], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the output context: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "%s\n", buf);
|
||||
}
|
||||
|
||||
avformat_close_input(&input_ctx);
|
||||
|
||||
av_frame_free(&frame);
|
||||
|
||||
if (decode.mfx_session)
|
||||
MFXClose(decode.mfx_session);
|
||||
if (decode.va_dpy)
|
||||
vaTerminate(decode.va_dpy);
|
||||
if (dpy)
|
||||
XCloseDisplay(dpy);
|
||||
|
||||
if (decoder_ctx)
|
||||
av_freep(&decoder_ctx->hwaccel_context);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
@@ -101,7 +101,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
out_stream->codec->codec_tag = 0;
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, out_filename, 1);
|
||||
|
||||
@@ -153,7 +153,7 @@ end:
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
|
@@ -41,9 +41,11 @@
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
/** The output bit rate in kbit/s */
|
||||
#define OUTPUT_BIT_RATE 96000
|
||||
#define OUTPUT_BIT_RATE 48000
|
||||
/** The number of output channels */
|
||||
#define OUTPUT_CHANNELS 2
|
||||
/** The audio sample output format */
|
||||
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
|
||||
|
||||
/**
|
||||
* Convert an error code into a text message.
|
||||
@@ -167,7 +169,7 @@ static int open_output_file(const char *filename,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Save the encoder context for easier access later. */
|
||||
/** Save the encoder context for easiert access later. */
|
||||
*output_codec_context = stream->codec;
|
||||
|
||||
/**
|
||||
@@ -177,22 +179,15 @@ static int open_output_file(const char *filename,
|
||||
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
||||
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
||||
(*output_codec_context)->sample_fmt = output_codec->sample_fmts[0];
|
||||
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/** Allow the use of the experimental AAC encoder */
|
||||
(*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
||||
|
||||
/** Set the sample rate for the container. */
|
||||
stream->time_base.den = input_codec_context->sample_rate;
|
||||
stream->time_base.num = 1;
|
||||
|
||||
/**
|
||||
* Some container formats (like MP4) require global headers to be present
|
||||
* Mark the encoder so that it behaves accordingly.
|
||||
*/
|
||||
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
(*output_codec_context)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
(*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/** Open the encoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
|
||||
@@ -204,7 +199,7 @@ static int open_output_file(const char *filename,
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
avio_closep(&(*output_format_context)->pb);
|
||||
avio_close((*output_format_context)->pb);
|
||||
avformat_free_context(*output_format_context);
|
||||
*output_format_context = NULL;
|
||||
return error < 0 ? error : AVERROR_EXIT;
|
||||
@@ -276,11 +271,10 @@ static int init_resampler(AVCodecContext *input_codec_context,
|
||||
}
|
||||
|
||||
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
||||
static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
||||
static int init_fifo(AVAudioFifo **fifo)
|
||||
{
|
||||
/** Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
||||
output_codec_context->channels, 1))) {
|
||||
if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -543,9 +537,6 @@ static int init_output_frame(AVFrame **frame,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Global timestamp for the audio frames */
|
||||
static int64_t pts = 0;
|
||||
|
||||
/** Encode one frame worth of audio to the output file. */
|
||||
static int encode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *output_format_context,
|
||||
@@ -557,12 +548,6 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
int error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/** Set a timestamp based on the sample rate for the container. */
|
||||
if (frame) {
|
||||
frame->pts = pts;
|
||||
pts += frame->nb_samples;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode the audio frame and store it in the temporary packet.
|
||||
* The output audio stream encoder is used to do this.
|
||||
@@ -674,7 +659,7 @@ int main(int argc, char **argv)
|
||||
&resample_context))
|
||||
goto cleanup;
|
||||
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
||||
if (init_fifo(&fifo, output_codec_context))
|
||||
if (init_fifo(&fifo))
|
||||
goto cleanup;
|
||||
/** Write the header of the output file container. */
|
||||
if (write_output_file_header(output_format_context))
|
||||
@@ -758,7 +743,7 @@ cleanup:
|
||||
if (output_codec_context)
|
||||
avcodec_close(output_codec_context);
|
||||
if (output_format_context) {
|
||||
avio_closep(&output_format_context->pb);
|
||||
avio_close(output_format_context->pb);
|
||||
avformat_free_context(output_format_context);
|
||||
}
|
||||
if (input_codec_context)
|
||||
|
@@ -117,7 +117,7 @@ static int open_output_file(const char *filename)
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
|
||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ static int open_output_file(const char *filename)
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, filename, 1);
|
||||
@@ -449,7 +449,7 @@ static int flush_encoder(unsigned int stream_index)
|
||||
int got_frame;
|
||||
|
||||
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
|
||||
AV_CODEC_CAP_DELAY))
|
||||
CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
while (1) {
|
||||
@@ -573,7 +573,7 @@ end:
|
||||
av_free(filter_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
|
85
doc/faq.texi
85
doc/faq.texi
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg FAQ
|
||||
@titlepage
|
||||
@@ -91,56 +90,6 @@ To build FFmpeg, you need to install the development package. It is usually
|
||||
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
||||
build is finished, but be sure to keep the main package.
|
||||
|
||||
@section How do I make @command{pkg-config} find my libraries?
|
||||
|
||||
Somewhere along with your libraries, there is a @file{.pc} file (or several)
|
||||
in a @file{pkgconfig} directory. You need to set environment variables to
|
||||
point @command{pkg-config} to these files.
|
||||
|
||||
If you need to @emph{add} directories to @command{pkg-config}'s search list
|
||||
(typical use case: library installed separately), add it to
|
||||
@code{$PKG_CONFIG_PATH}:
|
||||
|
||||
@example
|
||||
export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
|
||||
@end example
|
||||
|
||||
If you need to @emph{replace} @command{pkg-config}'s search list
|
||||
(typical use case: cross-compiling), set it in
|
||||
@code{$PKG_CONFIG_LIBDIR}:
|
||||
|
||||
@example
|
||||
export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
|
||||
@end example
|
||||
|
||||
If you need to know the library's internal dependencies (typical use: static
|
||||
linking), add the @code{--static} option to @command{pkg-config}:
|
||||
|
||||
@example
|
||||
./configure --pkg-config-flags=--static
|
||||
@end example
|
||||
|
||||
@section How do I use @command{pkg-config} when cross-compiling?
|
||||
|
||||
The best way is to install @command{pkg-config} in your cross-compilation
|
||||
environment. It will automatically use the cross-compilation libraries.
|
||||
|
||||
You can also use @command{pkg-config} from the host environment by
|
||||
specifying explicitly @code{--pkg-config=pkg-config} to @command{configure}.
|
||||
In that case, you must point @command{pkg-config} to the correct directories
|
||||
using the @code{PKG_CONFIG_LIBDIR}, as explained in the previous entry.
|
||||
|
||||
As an intermediate solution, you can place in your cross-compilation
|
||||
environment a script that calls the host @command{pkg-config} with
|
||||
@code{PKG_CONFIG_LIBDIR} set. That script can look like that:
|
||||
|
||||
@example
|
||||
#!/bin/sh
|
||||
PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
|
||||
export PKG_CONFIG_LIBDIR
|
||||
exec /usr/bin/pkg-config "$@@"
|
||||
@end example
|
||||
|
||||
@chapter Usage
|
||||
|
||||
@section ffmpeg does not work; what is wrong?
|
||||
@@ -467,40 +416,6 @@ point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@section I have a stretched video, why does scaling does not fix it?
|
||||
|
||||
A lot of video codecs and formats can store the @emph{aspect ratio} of the
|
||||
video: this is the ratio between the width and the height of either the full
|
||||
image (DAR, display aspect ratio) or individual pixels (SAR, sample aspect
|
||||
ratio). For example, EGA screens at resolution 640×350 had 4:3 DAR and 35:48
|
||||
SAR.
|
||||
|
||||
Most still image processing work with square pixels, i.e. 1:1 SAR, but a lot
|
||||
of video standards, especially from the analogic-numeric transition era, use
|
||||
non-square pixels.
|
||||
|
||||
Most processing filters in FFmpeg handle the aspect ratio to avoid
|
||||
stretching the image: cropping adjusts the DAR to keep the SAR constant,
|
||||
scaling adjusts the SAR to keep the DAR constant.
|
||||
|
||||
If you want to stretch, or “unstretch”, the image, you need to override the
|
||||
information with the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
|
||||
|
||||
Do not forget to examine carefully the original video to check whether the
|
||||
stretching comes from the image or from the aspect ratio information.
|
||||
|
||||
For example, to fix a badly encoded EGA capture, use the following commands,
|
||||
either the first one to upscale to square pixels or the second one to set
|
||||
the correct aspect ratio or the third one to avoid transcoding (may not work
|
||||
depending on the format / codec / player / phase of the moon):
|
||||
|
||||
@example
|
||||
ffmpeg -i ega_screen.nut -vf scale=640:480,setsar=1 ega_screen_scaled.nut
|
||||
ffmpeg -i ega_screen.nut -vf setdar=4/3 ega_screen_anamorphic.nut
|
||||
ffmpeg -i ega_screen.nut -aspect 4/3 -c copy ega_screen_overridden.nut
|
||||
@end example
|
||||
|
||||
@chapter Development
|
||||
|
||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Automated Testing Environment
|
||||
@titlepage
|
||||
@@ -13,36 +12,36 @@
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
|
||||
@url{http://fate.ffmpeg.org/}
|
||||
@url{http://fate.ffmpeg.org/}
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with their recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
|
||||
|
||||
@chapter Using FATE from your FFmpeg source directory
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
|
||||
@@ -51,11 +50,11 @@ make fate-rsync SAMPLES=fate-suite/
|
||||
make fate SAMPLES=fate-suite/
|
||||
@end example
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
@option{--samples=<path to the samples directory>}. Afterwards you can
|
||||
invoke the makefile targets without setting the @var{SAMPLES} makefile
|
||||
`--samples=<path to the samples directory>'. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
|
||||
@example
|
||||
@@ -64,7 +63,7 @@ make fate-rsync
|
||||
make fate
|
||||
@end example
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
@@ -85,7 +84,7 @@ To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
|
||||
@chapter Submitting the results to the FFmpeg result aggregation server
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
@@ -93,23 +92,23 @@ to be invoked with a configuration file as its first argument.
|
||||
tests/fate.sh /path/to/fate_config
|
||||
@end example
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at @file{doc/fate_config.sh.template}.
|
||||
|
||||
@ifhtml
|
||||
The mentioned configuration template is also available here:
|
||||
The mentioned configuration template is also available here:
|
||||
@verbatiminclude fate_config.sh.template
|
||||
@end ifhtml
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The @env{slot} configuration variable can be any string that is not
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The `slot' configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern @samp{@var{arch}-@var{os}-@var{compiler}-@var{compiler version}}. The
|
||||
configuration file itself will be sourced in a shell script, therefore all
|
||||
shell features may be used. This enables you to setup the environment as you
|
||||
need it for your build.
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
|
||||
For your first test runs the @env{fate_recv} variable should be empty or
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
@@ -122,29 +121,29 @@ present in $workdir as specified in the configuration file:
|
||||
@item version
|
||||
@end itemize
|
||||
|
||||
When you have everything working properly you can create an SSH key pair
|
||||
When you have everything working properly you can create an SSH key pair
|
||||
and send the public key to the FATE server administrator who can be contacted
|
||||
at the email address @email{fate-admin@@ffmpeg.org}.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server's fingerprint is:
|
||||
|
||||
@table @samp
|
||||
@table @option
|
||||
@item RSA
|
||||
d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
|
||||
@item ECDSA
|
||||
76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
|
||||
@end table
|
||||
|
||||
If you have problems connecting to the FATE server, it may help to try out
|
||||
If you have problems connecting to the FATE server, it may help to try out
|
||||
the @command{ssh} command with one or more @option{-v} options. You should
|
||||
get detailed output concerning your SSH configuration and the authentication
|
||||
process.
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
|
||||
@@ -165,7 +164,7 @@ Run the FATE test suite (requires the fate-suite dataset).
|
||||
|
||||
@section Makefile variables
|
||||
|
||||
@table @env
|
||||
@table @option
|
||||
@item V
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
@itemize
|
||||
@@ -183,20 +182,20 @@ Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @samp{slice} or @samp{frame},
|
||||
by default @samp{slice+frame}
|
||||
Specify which threading strategy test, either @var{slice} or @var{frame},
|
||||
by default @var{slice+frame}
|
||||
|
||||
@item CPUFLAGS
|
||||
Specify CPU flags.
|
||||
|
||||
@item TARGET_EXEC
|
||||
Specify or override the wrapper used to run the tests.
|
||||
The @env{TARGET_EXEC} option provides a way to run FATE wrapped in
|
||||
The @var{TARGET_EXEC} option provides a way to run FATE wrapped in
|
||||
@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets
|
||||
through @command{ssh}.
|
||||
|
||||
@item GEN
|
||||
Set to @samp{1} to generate the missing or mismatched references.
|
||||
Set to @var{1} to generate the missing or mismatched references.
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
@@ -1,6 +1,5 @@
|
||||
slot= # some unique identifier
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
#branch=release/2.6 # the branch to test
|
||||
samples= # path to samples directory
|
||||
workdir= # directory in which to do all the work
|
||||
#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Bitstream Filters Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Codecs Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Devices Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Filters Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Formats Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Protocols Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Resampler Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Scaler Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Utilities Documentation
|
||||
@titlepage
|
||||
|
169
doc/ffmpeg.texi
169
doc/ffmpeg.texi
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffmpeg Documentation
|
||||
@titlepage
|
||||
@@ -80,7 +79,7 @@ The format option may be needed for raw input files.
|
||||
The transcoding process in @command{ffmpeg} for each output can be described by
|
||||
the following diagram:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_______ ______________
|
||||
| | | |
|
||||
| input | demuxer | encoded data | decoder
|
||||
@@ -99,7 +98,7 @@ the following diagram:
|
||||
|________| |______________|
|
||||
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
@command{ffmpeg} calls the libavformat library (containing demuxers) to read
|
||||
input files and get packets containing encoded data from them. When there are
|
||||
@@ -124,7 +123,7 @@ Simple filtergraphs are those that have exactly one input and output, both of
|
||||
the same type. In the above diagram they can be represented by simply inserting
|
||||
an additional step between decoding and encoding:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_________ ______________
|
||||
| | | |
|
||||
| decoded | | encoded data |
|
||||
@@ -136,19 +135,19 @@ an additional step between decoding and encoding:
|
||||
| frames |
|
||||
|__________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
Simple filtergraphs are configured with the per-stream @option{-filter} option
|
||||
(with @option{-vf} and @option{-af} aliases for video and audio respectively).
|
||||
A simple filtergraph for video can look for example like this:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_______ _____________ _______ ________
|
||||
| | | | | | | |
|
||||
| input | ---> | deinterlace | ---> | scale | ---> | output |
|
||||
|_______| |_____________| |_______| |________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
Note that some filters change frame properties but not frame contents. E.g. the
|
||||
@code{fps} filter in the example above changes number of frames, but does not
|
||||
@@ -161,7 +160,7 @@ processing chain applied to one stream. This is the case, for example, when the
|
||||
more than one input and/or output, or when output stream type is different from
|
||||
input. They can be represented with the following diagram:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_________
|
||||
| |
|
||||
| input 0 |\ __________
|
||||
@@ -179,7 +178,7 @@ input. They can be represented with the following diagram:
|
||||
| input 2 |/
|
||||
|_________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
Complex filtergraphs are configured with the @option{-filter_complex} option.
|
||||
Note that this option is global, since a complex filtergraph, by its nature,
|
||||
@@ -198,14 +197,14 @@ step for the specified stream, so it does only demuxing and muxing. It is useful
|
||||
for changing the container format or modifying container-level metadata. The
|
||||
diagram above will, in this case, simplify to this:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_______ ______________ ________
|
||||
| | | | | |
|
||||
| input | demuxer | encoded data | muxer | output |
|
||||
| file | ---------> | packets | -------> | file |
|
||||
|_______| |______________| |________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
Since there is no decoding or encoding, it is very fast and there is no quality
|
||||
loss. However, it might not work in some cases because of many factors. Applying
|
||||
@@ -280,15 +279,13 @@ data read from the input file.
|
||||
When used as an output option (before an output filename), stop writing the
|
||||
output after its duration reaches @var{duration}.
|
||||
|
||||
@var{duration} must be a time duration specification,
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
@var{duration} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form.
|
||||
|
||||
-to and -t are mutually exclusive and -t has priority.
|
||||
|
||||
@item -to @var{position} (@emph{output})
|
||||
Stop writing the output at @var{position}.
|
||||
@var{position} must be a time duration specification,
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
@var{position} may be a number in seconds, or in @code{hh:mm:ss[.xxx]} form.
|
||||
|
||||
-to and -t are mutually exclusive and -t has priority.
|
||||
|
||||
@@ -297,8 +294,8 @@ Set the file size limit, expressed in bytes.
|
||||
|
||||
@item -ss @var{position} (@emph{input/output})
|
||||
When used as an input option (before @code{-i}), seeks in this input file to
|
||||
@var{position}. Note that in most formats it is not possible to seek exactly,
|
||||
so @command{ffmpeg} will seek to the closest seek point before @var{position}.
|
||||
@var{position}. Note the in most formats it is not possible to seek exactly, so
|
||||
@command{ffmpeg} will seek to the closest seek point before @var{position}.
|
||||
When transcoding and @option{-accurate_seek} is enabled (the default), this
|
||||
extra segment between the seek point and @var{position} will be decoded and
|
||||
discarded. When doing stream copy or when @option{-noaccurate_seek} is used, it
|
||||
@@ -307,13 +304,7 @@ will be preserved.
|
||||
When used as an output option (before an output filename), decodes but discards
|
||||
input until the timestamps reach @var{position}.
|
||||
|
||||
@var{position} must be a time duration specification,
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
|
||||
@item -sseof @var{position} (@emph{input/output})
|
||||
|
||||
Like the @code{-ss} option but relative to the "end of file". That is negative
|
||||
values are earlier in the file, 0 is at EOF.
|
||||
@var{position} may be either in seconds or in @code{hh:mm:ss[.xxx]} form.
|
||||
|
||||
@item -itsoffset @var{offset} (@emph{input})
|
||||
Set the input time offset.
|
||||
@@ -328,7 +319,7 @@ the time duration specified in @var{offset}.
|
||||
@item -timestamp @var{date} (@emph{output})
|
||||
Set the recording timestamp in the container.
|
||||
|
||||
@var{date} must be a date specification,
|
||||
@var{date} must be a time duration specification,
|
||||
see @ref{date syntax,,the Date section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
|
||||
@item -metadata[:metadata_specifier] @var{key}=@var{value} (@emph{output,per-metadata})
|
||||
@@ -470,9 +461,6 @@ Technical note -- attachments are implemented as codec extradata, so this
|
||||
option can actually be used to extract extradata from any stream, not just
|
||||
attachments.
|
||||
|
||||
@item -noautorotate
|
||||
Disable automatically rotating video based on file metadata.
|
||||
|
||||
@end table
|
||||
|
||||
@section Video Options
|
||||
@@ -698,10 +686,6 @@ is not specified, the value of the @var{DISPLAY} environment variable is used
|
||||
For DXVA2, this option should contain the number of the display adapter to use.
|
||||
If this option is not specified, the default adapter is used.
|
||||
@end table
|
||||
|
||||
@item -hwaccels
|
||||
List all hardware acceleration methods supported in this build of ffmpeg.
|
||||
|
||||
@end table
|
||||
|
||||
@section Audio Options
|
||||
@@ -847,14 +831,6 @@ ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
|
||||
|
||||
Note that using this option disables the default mappings for this output file.
|
||||
|
||||
@item -ignore_unknown
|
||||
Ignore input streams with unknown type instead of failing if copying
|
||||
such streams is attempted.
|
||||
|
||||
@item -copy_unknown
|
||||
Allow input streams with unknown type to be copied instead of failing if copying
|
||||
such streams is attempted.
|
||||
|
||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}]
|
||||
Map an audio channel from a given input to an output. If
|
||||
@var{output_file_id}.@var{stream_specifier} is not set, the audio channel will
|
||||
@@ -1018,13 +994,6 @@ With -map you can select from which stream the timestamps should be
|
||||
taken. You can leave either video or audio unchanged and sync the
|
||||
remaining stream(s) to the unchanged one.
|
||||
|
||||
@item -frame_drop_threshold @var{parameter}
|
||||
Frame drop threshold, which specifies how much behind video frames can
|
||||
be before they are dropped. In frame rate units, so 1.0 is one frame.
|
||||
The default is -1.1. One possible usecase is to avoid framedrops in case
|
||||
of noisy timestamps or to increase frame drop precision in case of exact
|
||||
timestamps.
|
||||
|
||||
@item -async @var{samples_per_second}
|
||||
Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
|
||||
the parameter is the maximum samples per second by which the audio is changed.
|
||||
@@ -1181,19 +1150,6 @@ This option enables or disables accurate seeking in input files with the
|
||||
transcoding. Use @option{-noaccurate_seek} to disable it, which may be useful
|
||||
e.g. when copying some streams and transcoding the others.
|
||||
|
||||
@item -seek_timestamp (@emph{input})
|
||||
This option enables or disables seeking by timestamp in input files with the
|
||||
@option{-ss} option. It is disabled by default. If enabled, the argument
|
||||
to the @option{-ss} option is considered an actual timestamp, and is not
|
||||
offset by the start time of the file. This matters only for files which do
|
||||
not start from timestamp 0, such as transport streams.
|
||||
|
||||
@item -thread_queue_size @var{size} (@emph{input})
|
||||
This option sets the maximum number of queued packets when reading from the
|
||||
file or device. With low latency / high rate live streams, packets may be
|
||||
discarded if they are not read in a timely manner; raising this value can
|
||||
avoid it.
|
||||
|
||||
@item -override_ffserver (@emph{global})
|
||||
Overrides the input specifications from @command{ffserver}. Using this
|
||||
option you can map any input stream to @command{ffserver} and control
|
||||
@@ -1204,11 +1160,6 @@ requested by @command{ffserver}.
|
||||
The option is intended for cases where features are needed that cannot be
|
||||
specified to @command{ffserver} but can be to @command{ffmpeg}.
|
||||
|
||||
@item -sdp_file @var{file} (@emph{global})
|
||||
Print sdp information to @var{file}.
|
||||
This allows dumping sdp information when at least one output isn't an
|
||||
rtp stream.
|
||||
|
||||
@item -discard (@emph{input})
|
||||
Allows discarding specific streams or frames of streams at the demuxer.
|
||||
Not all demuxers support this.
|
||||
@@ -1233,9 +1184,6 @@ Discard all frames excepts keyframes.
|
||||
Discard all frames.
|
||||
@end table
|
||||
|
||||
@item -xerror (@emph{global})
|
||||
Stop and exit on error
|
||||
|
||||
@end table
|
||||
|
||||
As a special exception, you can use a bitmap subtitle stream as input: it
|
||||
@@ -1261,10 +1209,7 @@ awkward to specify on the command line. Lines starting with the hash
|
||||
('#') character are ignored and are used to provide comments. Check
|
||||
the @file{presets} directory in the FFmpeg source tree for examples.
|
||||
|
||||
There are two types of preset files: ffpreset and avpreset files.
|
||||
|
||||
@subsection ffpreset files
|
||||
ffpreset files are specified with the @code{vpre}, @code{apre},
|
||||
Preset files are specified with the @code{vpre}, @code{apre},
|
||||
@code{spre}, and @code{fpre} options. The @code{fpre} option takes the
|
||||
filename of the preset instead of a preset name as input and can be
|
||||
used for any kind of codec. For the @code{vpre}, @code{apre}, and
|
||||
@@ -1289,31 +1234,67 @@ directories, where @var{codec_name} is the name of the codec to which
|
||||
the preset file options will be applied. For example, if you select
|
||||
the video codec with @code{-vcodec libvpx} and use @code{-vpre 1080p},
|
||||
then it will search for the file @file{libvpx-1080p.ffpreset}.
|
||||
|
||||
@subsection avpreset files
|
||||
avpreset files are specified with the @code{pre} option. They work similar to
|
||||
ffpreset files, but they only allow encoder- specific options. Therefore, an
|
||||
@var{option}=@var{value} pair specifying an encoder cannot be used.
|
||||
|
||||
When the @code{pre} option is specified, ffmpeg will look for files with the
|
||||
suffix .avpreset in the directories @file{$AVCONV_DATADIR} (if set), and
|
||||
@file{$HOME/.avconv}, and in the datadir defined at configuration time (usually
|
||||
@file{PREFIX/share/ffmpeg}), in that order.
|
||||
|
||||
First ffmpeg searches for a file named @var{codec_name}-@var{arg}.avpreset in
|
||||
the above-mentioned directories, where @var{codec_name} is the name of the codec
|
||||
to which the preset file options will be applied. For example, if you select the
|
||||
video codec with @code{-vcodec libvpx} and use @code{-pre 1080p}, then it will
|
||||
search for the file @file{libvpx-1080p.avpreset}.
|
||||
|
||||
If no such file is found, then ffmpeg will search for a file named
|
||||
@var{arg}.avpreset in the same directories.
|
||||
|
||||
@c man end OPTIONS
|
||||
|
||||
@chapter Tips
|
||||
@c man begin TIPS
|
||||
|
||||
@itemize
|
||||
@item
|
||||
For streaming at very low bitrates, use a low frame rate
|
||||
and a small GOP size. This is especially true for RealVideo where
|
||||
the Linux player does not seem to be very fast, so it can miss
|
||||
frames. An example is:
|
||||
|
||||
@example
|
||||
ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
|
||||
@end example
|
||||
|
||||
@item
|
||||
The parameter 'q' which is displayed while encoding is the current
|
||||
quantizer. The value 1 indicates that a very good quality could
|
||||
be achieved. The value 31 indicates the worst quality. If q=31 appears
|
||||
too often, it means that the encoder cannot compress enough to meet
|
||||
your bitrate. You must either increase the bitrate, decrease the
|
||||
frame rate or decrease the frame size.
|
||||
|
||||
@item
|
||||
If your computer is not fast enough, you can speed up the
|
||||
compression at the expense of the compression ratio. You can use
|
||||
'-me zero' to speed up motion estimation, and '-g 0' to disable
|
||||
motion estimation completely (you have only I-frames, which means it
|
||||
is about as good as JPEG compression).
|
||||
|
||||
@item
|
||||
To have very low audio bitrates, reduce the sampling frequency
|
||||
(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3).
|
||||
|
||||
@item
|
||||
To have a constant quality (but a variable bitrate), use the option
|
||||
'-qscale n' when 'n' is between 1 (excellent quality) and 31 (worst
|
||||
quality).
|
||||
|
||||
@end itemize
|
||||
@c man end TIPS
|
||||
|
||||
@chapter Examples
|
||||
@c man begin EXAMPLES
|
||||
|
||||
@section Preset files
|
||||
|
||||
A preset file contains a sequence of @var{option=value} pairs, one for
|
||||
each line, specifying a sequence of options which can be specified also on
|
||||
the command line. Lines starting with the hash ('#') character are ignored and
|
||||
are used to provide comments. Empty lines are also ignored. Check the
|
||||
@file{presets} directory in the FFmpeg source tree for examples.
|
||||
|
||||
Preset files are specified with the @code{pre} option, this option takes a
|
||||
preset name as input. FFmpeg searches for a file named @var{preset_name}.avpreset in
|
||||
the directories @file{$AVCONV_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in
|
||||
the data directory defined at configuration time (usually @file{$PREFIX/share/ffmpeg})
|
||||
in that order. For example, if the argument is @code{libx264-max}, it will
|
||||
search for the file @file{libx264-max.avpreset}.
|
||||
|
||||
@section Video and Audio grabbing
|
||||
|
||||
If you specify the input format and device then ffmpeg can grab video
|
||||
@@ -1461,7 +1442,7 @@ combination with -ss to start extracting from a certain point in time.
|
||||
|
||||
For creating a video from many images:
|
||||
@example
|
||||
ffmpeg -f image2 -framerate 12 -i foo-%03d.jpeg -s WxH foo.avi
|
||||
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
|
||||
@end example
|
||||
|
||||
The syntax @code{foo-%03d.jpeg} specifies to use a decimal number
|
||||
@@ -1476,7 +1457,7 @@ image2-specific @code{-pattern_type glob} option.
|
||||
For example, for creating a video from filenames matching the glob pattern
|
||||
@code{foo-*.jpeg}:
|
||||
@example
|
||||
ffmpeg -f image2 -pattern_type glob -framerate 12 -i 'foo-*.jpeg' -s WxH foo.avi
|
||||
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffplay Documentation
|
||||
@titlepage
|
||||
@@ -47,17 +46,9 @@ Disable video.
|
||||
@item -sn
|
||||
Disable subtitles.
|
||||
@item -ss @var{pos}
|
||||
Seek to @var{pos}. Note that in most formats it is not possible to seek
|
||||
exactly, so @command{ffplay} will seek to the nearest seek point to
|
||||
@var{pos}.
|
||||
|
||||
@var{pos} must be a time duration specification,
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
Seek to a given position in seconds.
|
||||
@item -t @var{duration}
|
||||
Play @var{duration} seconds of audio/video.
|
||||
|
||||
@var{duration} must be a time duration specification,
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
play <duration> seconds of audio/video
|
||||
@item -bytes
|
||||
Seek by bytes.
|
||||
@item -nodisp
|
||||
@@ -133,20 +124,24 @@ master clock is used to control audio-video synchronization. Most media
|
||||
players use audio as master clock, but in some cases (streaming or high
|
||||
quality broadcast) it is necessary to change that. This option is mainly
|
||||
used for debugging purposes.
|
||||
@item -ast @var{audio_stream_specifier}
|
||||
Select the desired audio stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" audio stream is selected in the program of the
|
||||
already selected video stream.
|
||||
@item -vst @var{video_stream_specifier}
|
||||
Select the desired video stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" video stream is selected.
|
||||
@item -sst @var{subtitle_stream_specifier}
|
||||
Select the desired subtitle stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" subtitle stream is selected in the program of the
|
||||
already selected video or audio stream.
|
||||
@item -threads @var{count}
|
||||
Set the thread count. By default, @command{ffplay} automatically detects a
|
||||
suitable number of threads to use.
|
||||
@item -ast @var{audio_stream_number}
|
||||
Select the desired audio stream number, counting from 0. The number
|
||||
refers to the list of all the input audio streams. If it is greater
|
||||
than the number of audio streams minus one, then the last one is
|
||||
selected, if it is negative the audio playback is disabled.
|
||||
@item -vst @var{video_stream_number}
|
||||
Select the desired video stream number, counting from 0. The number
|
||||
refers to the list of all the input video streams. If it is greater
|
||||
than the number of video streams minus one, then the last one is
|
||||
selected, if it is negative the video playback is disabled.
|
||||
@item -sst @var{subtitle_stream_number}
|
||||
Select the desired subtitle stream number, counting from 0. The number
|
||||
refers to the list of all the input subtitle streams. If it is greater
|
||||
than the number of subtitle streams minus one, then the last one is
|
||||
selected, if it is negative the subtitle rendering is disabled.
|
||||
@item -autoexit
|
||||
Exit when video is done playing.
|
||||
@item -exitonkeydown
|
||||
@@ -169,7 +164,7 @@ Force a specific video decoder.
|
||||
Force a specific subtitle decoder.
|
||||
|
||||
@item -autorotate
|
||||
Automatically rotate the video according to file metadata. Enabled by
|
||||
Automatically rotate the video according to presentation metadata. Enabled by
|
||||
default, use @option{-noautorotate} to disable it.
|
||||
|
||||
@item -framedrop
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffprobe Documentation
|
||||
@titlepage
|
||||
@@ -447,17 +446,17 @@ writer).
|
||||
It can assume one of the following values:
|
||||
@table @option
|
||||
@item c
|
||||
Perform C-like escaping. Strings containing a newline (@samp{\n}), carriage
|
||||
return (@samp{\r}), a tab (@samp{\t}), a form feed (@samp{\f}), the escaping
|
||||
character (@samp{\}) or the item separator character @var{SEP} are escaped
|
||||
using C-like fashioned escaping, so that a newline is converted to the
|
||||
sequence @samp{\n}, a carriage return to @samp{\r}, @samp{\} to @samp{\\} and
|
||||
the separator @var{SEP} is converted to @samp{\@var{SEP}}.
|
||||
Perform C-like escaping. Strings containing a newline ('\n'), carriage
|
||||
return ('\r'), a tab ('\t'), a form feed ('\f'), the escaping
|
||||
character ('\') or the item separator character @var{SEP} are escaped using C-like fashioned
|
||||
escaping, so that a newline is converted to the sequence "\n", a
|
||||
carriage return to "\r", '\' to "\\" and the separator @var{SEP} is
|
||||
converted to "\@var{SEP}".
|
||||
|
||||
@item csv
|
||||
Perform CSV-like escaping, as described in RFC4180. Strings
|
||||
containing a newline (@samp{\n}), a carriage return (@samp{\r}), a double quote
|
||||
(@samp{"}), or @var{SEP} are enclosed in double-quotes.
|
||||
containing a newline ('\n'), a carriage return ('\r'), a double quote
|
||||
('"'), or @var{SEP} are enclosed in double-quotes.
|
||||
|
||||
@item none
|
||||
Perform no escaping.
|
||||
@@ -485,7 +484,7 @@ The description of the accepted options follows.
|
||||
Separator character used to separate the chapter, the section name, IDs and
|
||||
potential tags in the printed field key.
|
||||
|
||||
Default value is @samp{.}.
|
||||
Default value is '.'.
|
||||
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
@@ -507,22 +506,21 @@ The following conventions are adopted:
|
||||
@item
|
||||
all key and values are UTF-8
|
||||
@item
|
||||
@samp{.} is the subgroup separator
|
||||
'.' is the subgroup separator
|
||||
@item
|
||||
newline, @samp{\t}, @samp{\f}, @samp{\b} and the following characters are
|
||||
escaped
|
||||
newline, '\t', '\f', '\b' and the following characters are escaped
|
||||
@item
|
||||
@samp{\} is the escape character
|
||||
'\' is the escape character
|
||||
@item
|
||||
@samp{#} is the comment indicator
|
||||
'#' is the comment indicator
|
||||
@item
|
||||
@samp{=} is the key/value separator
|
||||
'=' is the key/value separator
|
||||
@item
|
||||
@samp{:} is not used but usually parsed as key/value separator
|
||||
':' is not used but usually parsed as key/value separator
|
||||
@end itemize
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by @samp{:}.
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
|
@@ -48,11 +48,6 @@
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
@@ -70,16 +65,6 @@
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
@@ -87,7 +72,6 @@
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="stream_index" type="xsd:int" />
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
@@ -171,7 +155,6 @@
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
@@ -188,8 +171,6 @@
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
@@ -201,7 +182,6 @@
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
@@ -273,8 +253,8 @@
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
@@ -82,7 +82,6 @@ Feed feed1.ffm
|
||||
# ra : RealNetworks-compatible stream. Audio only.
|
||||
# mpjpeg : Multipart JPEG (works with Netscape without any plugin)
|
||||
# jpeg : Generate a single JPEG image.
|
||||
# mjpeg : Generate a M-JPEG stream.
|
||||
# asf : ASF compatible streaming (Windows Media Player format).
|
||||
# swf : Macromedia Flash compatible stream
|
||||
# avi : AVI format (MPEG-4 video, MPEG audio sound)
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffserver Documentation
|
||||
@titlepage
|
||||
@@ -118,8 +117,7 @@ Multiple streams can be connected to the same feed.
|
||||
|
||||
For example, you can have a situation described by the following
|
||||
graph:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_________ __________
|
||||
| | | |
|
||||
ffmpeg 1 -----| feed 1 |-----| stream 1 |
|
||||
@@ -144,8 +142,7 @@ ffmpeg 2 -----| feed 3 |-----| stream 4 |
|
||||
| | | |
|
||||
| file 1 |-----| stream 5 |
|
||||
|_________| |__________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
@anchor{FFM}
|
||||
@section FFM, FFM2 formats
|
||||
|
@@ -36,10 +36,8 @@ Possible forms of stream specifiers are:
|
||||
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
|
||||
thread count for the second stream to 4.
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' or 'V' for video, 'a' for audio, 's'
|
||||
for subtitle, 'd' for data, and 't' for attachments. 'v' matches all video
|
||||
streams, 'V' only matches video streams which are not attached pictures, video
|
||||
thumbnails or cover arts. If @var{stream_index} is given, then it matches
|
||||
@var{stream_type} is one of following: 'v' for video, 'a' for audio, 's' for subtitle,
|
||||
'd' for data, and 't' for attachments. If @var{stream_index} is given, then it matches
|
||||
stream number @var{stream_index} of this type. Otherwise, it matches all
|
||||
streams of this type.
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
@@ -52,9 +50,6 @@ Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
Matches streams with the metadata tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
@item u
|
||||
Matches streams with usable configuration, the codec must be defined and the
|
||||
essential information such as video dimension or audio sample rate must be present.
|
||||
|
||||
Note that in @command{ffmpeg}, matching by metadata will only work properly for
|
||||
input files.
|
||||
@@ -170,29 +165,28 @@ omitted. "repeat" can also be used alone.
|
||||
If "repeat" is used alone, and with no prior loglevel set, the default
|
||||
loglevel will be used. If multiple loglevel parameters are given, using
|
||||
'repeat' will not change the loglevel.
|
||||
@var{loglevel} is a string or a number containing one of the following values:
|
||||
@var{loglevel} is a number or a string containing one of the following values:
|
||||
@table @samp
|
||||
@item quiet, -8
|
||||
@item quiet
|
||||
Show nothing at all; be silent.
|
||||
@item panic, 0
|
||||
@item panic
|
||||
Only show fatal errors which could lead the process to crash, such as
|
||||
and assert failure. This is not currently used for anything.
|
||||
@item fatal, 8
|
||||
@item fatal
|
||||
Only show fatal errors. These are errors after which the process absolutely
|
||||
cannot continue after.
|
||||
@item error, 16
|
||||
@item error
|
||||
Show all errors, including ones which can be recovered from.
|
||||
@item warning, 24
|
||||
@item warning
|
||||
Show all warnings and errors. Any message related to possibly
|
||||
incorrect or unexpected events will be shown.
|
||||
@item info, 32
|
||||
@item info
|
||||
Show informative messages during processing. This is in addition to
|
||||
warnings and errors. This is the default value.
|
||||
@item verbose, 40
|
||||
@item verbose
|
||||
Same as @code{info}, except more verbose.
|
||||
@item debug, 48
|
||||
@item debug
|
||||
Show everything, including debugging information.
|
||||
@item trace, 56
|
||||
@end table
|
||||
|
||||
By default the program logs to stderr, if coloring is supported by the
|
||||
@@ -210,29 +204,21 @@ directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @env{FFREPORT} to any value has the
|
||||
Setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
options will affect the report; option values must be escaped if they
|
||||
options will affect the report; options values must be escaped if they
|
||||
contain special characters or the options delimiter ':' (see the
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual).
|
||||
|
||||
The following options are recognized:
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual). The
|
||||
following option is recognized:
|
||||
@table @option
|
||||
@item file
|
||||
set the file name to use for the report; @code{%p} is expanded to the name
|
||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||
to a plain @code{%}
|
||||
@item level
|
||||
set the log verbosity level using a numerical value (see @code{-loglevel}).
|
||||
set the log level
|
||||
@end table
|
||||
|
||||
For example, to output a report to a file named @file{ffreport.log}
|
||||
using a log level of @code{32} (alias for log level @code{info}):
|
||||
|
||||
@example
|
||||
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
|
||||
@end example
|
||||
|
||||
Errors in parsing the environment variable are not fatal, and will not
|
||||
appear in the report.
|
||||
|
||||
@@ -267,14 +253,10 @@ Possible flags for this option are:
|
||||
@item sse4.1
|
||||
@item sse4.2
|
||||
@item avx
|
||||
@item avx2
|
||||
@item xop
|
||||
@item fma3
|
||||
@item fma4
|
||||
@item 3dnow
|
||||
@item 3dnowext
|
||||
@item bmi1
|
||||
@item bmi2
|
||||
@item cmov
|
||||
@end table
|
||||
@item ARM
|
||||
@@ -285,13 +267,6 @@ Possible flags for this option are:
|
||||
@item vfp
|
||||
@item vfpv3
|
||||
@item neon
|
||||
@item setend
|
||||
@end table
|
||||
@item AArch64
|
||||
@table @samp
|
||||
@item armv8
|
||||
@item vfp
|
||||
@item neon
|
||||
@end table
|
||||
@item PowerPC
|
||||
@table @samp
|
||||
@@ -311,41 +286,8 @@ Possible flags for this option are:
|
||||
@end table
|
||||
|
||||
@item -opencl_bench
|
||||
This option is used to benchmark all available OpenCL devices and print the
|
||||
results. This option is only available when FFmpeg has been compiled with
|
||||
@code{--enable-opencl}.
|
||||
|
||||
When FFmpeg is configured with @code{--enable-opencl}, the options for the
|
||||
global OpenCL context are set via @option{-opencl_options}. See the
|
||||
"OpenCL Options" section in the ffmpeg-utils manual for the complete list of
|
||||
supported options. Amongst others, these options include the ability to select
|
||||
a specific platform and device to run the OpenCL code on. By default, FFmpeg
|
||||
will run on the first device of the first platform. While the options for the
|
||||
global OpenCL context provide flexibility to the user in selecting the OpenCL
|
||||
device of their choice, most users would probably want to select the fastest
|
||||
OpenCL device for their system.
|
||||
|
||||
This option assists the selection of the most efficient configuration by
|
||||
identifying the appropriate device for the user's system. The built-in
|
||||
benchmark is run on all the OpenCL devices and the performance is measured for
|
||||
each device. The devices in the results list are sorted based on their
|
||||
performance with the fastest device listed first. The user can subsequently
|
||||
invoke @command{ffmpeg} using the device deemed most appropriate via
|
||||
@option{-opencl_options} to obtain the best performance for the OpenCL
|
||||
accelerated code.
|
||||
|
||||
Typical usage to use the fastest OpenCL device involve the following steps.
|
||||
|
||||
Run the command:
|
||||
@example
|
||||
ffmpeg -opencl_bench
|
||||
@end example
|
||||
Note down the platform ID (@var{pidx}) and device ID (@var{didx}) of the first
|
||||
i.e. fastest device in the list.
|
||||
Select the platform and device using the command:
|
||||
@example
|
||||
ffmpeg -opencl_options platform_idx=@var{pidx}:device_idx=@var{didx} ...
|
||||
@end example
|
||||
Benchmark all available OpenCL devices and show the results. This option
|
||||
is only available when FFmpeg has been compiled with @code{--enable-opencl}.
|
||||
|
||||
@item -opencl_options options (@emph{global})
|
||||
Set OpenCL environment options. This option is only available when
|
||||
|
@@ -98,7 +98,7 @@ Buffer references ownership and permissions
|
||||
The AVFilterLink structure has a few AVFilterBufferRef fields. The
|
||||
cur_buf and out_buf were used with the deprecated
|
||||
start_frame/draw_slice/end_frame API and should no longer be used.
|
||||
src_buf and partial_buf are used by libavfilter internally
|
||||
src_buf, cur_buf_copy and partial_buf are used by libavfilter internally
|
||||
and must not be accessed by filters.
|
||||
|
||||
Reference permissions
|
||||
|
2426
doc/filters.texi
2426
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -37,8 +37,6 @@ Possible values:
|
||||
@table @samp
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item fastseek
|
||||
Enable fast, but inaccurate seeks for some formats.
|
||||
@item genpts
|
||||
Generate PTS.
|
||||
@item nofillin
|
||||
@@ -127,25 +125,6 @@ Consider all spec non compliancies as errors.
|
||||
Consider things that a sane encoder should not do as an error.
|
||||
@end table
|
||||
|
||||
@item max_interleave_delta @var{integer} (@emph{output})
|
||||
Set maximum buffering duration for interleaving. The duration is
|
||||
expressed in microseconds, and defaults to 1000000 (1 second).
|
||||
|
||||
To ensure all the streams are interleaved correctly, libavformat will
|
||||
wait until it has at least one packet for each stream before actually
|
||||
writing any packets to the output file. When some streams are
|
||||
"sparse" (i.e. there are large gaps between successive packets), this
|
||||
can result in excessive buffering.
|
||||
|
||||
This field specifies the maximum difference between the timestamps of the
|
||||
first and the last packet in the muxing queue, above which libavformat
|
||||
will output a packet regardless of whether it has queued a packet for all
|
||||
the streams.
|
||||
|
||||
If set to 0, libavformat will continue buffering packets until it has
|
||||
a packet for each stream, regardless of the maximum timestamp
|
||||
difference between the buffered packets.
|
||||
|
||||
@item use_wallclock_as_timestamps @var{integer} (@emph{input})
|
||||
Use wallclock as timestamps.
|
||||
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle General Documentation
|
||||
@titlepage
|
||||
@@ -109,14 +108,6 @@ Go to @url{http://www.wavpack.com/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
||||
enable it.
|
||||
|
||||
@section OpenH264
|
||||
|
||||
FFmpeg can make use of the OpenH264 library for H.264 encoding.
|
||||
|
||||
Go to @url{http://www.openh264.org/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libopenh264} to configure to
|
||||
enable it.
|
||||
|
||||
@section x264
|
||||
|
||||
FFmpeg can make use of the x264 library for H.264 encoding.
|
||||
@@ -145,14 +136,6 @@ x265 is under the GNU Public License Version 2 or later
|
||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
@end float
|
||||
|
||||
@section kvazaar
|
||||
|
||||
FFmpeg can make use of the kvazaar library for HEVC encoding.
|
||||
|
||||
Go to @url{https://github.com/ultravideo/kvazaar} and follow the
|
||||
instructions for installing the library. Then pass
|
||||
@code{--enable-libkvazaar} to configure to enable it.
|
||||
|
||||
@section libilbc
|
||||
|
||||
iLBC is a narrowband speech codec that has been made freely available
|
||||
@@ -160,7 +143,7 @@ by Google as part of the WebRTC project. libilbc is a packaging friendly
|
||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||
iLBC encoding and decoding.
|
||||
|
||||
Go to @url{https://github.com/TimothyGu/libilbc} and follow the instructions for
|
||||
Go to @url{https://github.com/dekkers/libilbc} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libilbc} to configure to
|
||||
enable it.
|
||||
|
||||
@@ -187,8 +170,8 @@ included in compat/avisynth/, which allows the user to enable support
|
||||
without needing to search for these headers themselves.
|
||||
|
||||
For Windows, supported AviSynth variants are
|
||||
@url{http://avisynth.nl, AviSynth 2.6 RC1 or higher} for 32-bit builds and
|
||||
@url{http://avs-plus.net, AviSynth+ r1718 or higher} for 32-bit and 64-bit builds.
|
||||
@url{http://avisynth.nl, AviSynth 2.5 or 2.6} for 32-bit builds and
|
||||
@url{http://avs-plus.net, AviSynth+ 0.1} for 32-bit and 64-bit builds.
|
||||
|
||||
For Linux and OS X, the supported AviSynth variant is
|
||||
@url{https://github.com/avxsynth/avxsynth, AvxSynth}.
|
||||
@@ -200,17 +183,6 @@ end user having AviSynth or AvxSynth installed - they'll only need to be
|
||||
installed to use AviSynth scripts (obviously).
|
||||
@end float
|
||||
|
||||
@section Intel QuickSync Video
|
||||
|
||||
FFmpeg can use Intel QuickSync Video (QSV) for accelerated encoding and decoding
|
||||
of multiple codecs. To use QSV, FFmpeg must be linked against the @code{libmfx}
|
||||
dispatcher, which loads the actual decoding libraries.
|
||||
|
||||
The dispatcher is open source and can be downloaded from
|
||||
@url{https://github.com/lu-zero/mfx_dispatch.git}. FFmpeg needs to be configured
|
||||
with the @code{--enable-libmfx} option and @code{pkg-config} needs to be able to
|
||||
locate the dispatcher's @code{.pc} files.
|
||||
|
||||
|
||||
@chapter Supported File Formats, Codecs or Features
|
||||
|
||||
@@ -226,10 +198,6 @@ library:
|
||||
@item 4xm @tab @tab X
|
||||
@tab 4X Technologies format, used in some games.
|
||||
@item 8088flex TMV @tab @tab X
|
||||
@item AAX @tab @tab X
|
||||
@tab Audible Enhanced Audio format, used in audiobooks.
|
||||
@item AA @tab @tab X
|
||||
@tab Audible Format 2, 3, and 4, used in audiobooks.
|
||||
@item ACT Voice @tab @tab X
|
||||
@tab contains G.729 audio
|
||||
@item Adobe Filmstrip @tab X @tab X
|
||||
@@ -245,7 +213,6 @@ library:
|
||||
@tab Audio format used on the Nintendo Gamecube.
|
||||
@item AFC @tab @tab X
|
||||
@tab Audio format used on the Nintendo Gamecube.
|
||||
@item APNG @tab X @tab X
|
||||
@item ASF @tab X @tab X
|
||||
@item AST @tab X @tab X
|
||||
@tab Audio format used on the Nintendo Wii.
|
||||
@@ -266,8 +233,6 @@ library:
|
||||
@tab Used in Z and Z95 games.
|
||||
@item Brute Force & Ignorance @tab @tab X
|
||||
@tab Used in the game Flash Traffic: City of Angels.
|
||||
@item BFSTM @tab @tab X
|
||||
@tab Audio format used on the Nintendo WiiU (based on BRSTM).
|
||||
@item BRSTM @tab @tab X
|
||||
@tab Audio format used on the Nintendo Wii.
|
||||
@item BWF @tab X @tab X
|
||||
@@ -278,10 +243,6 @@ library:
|
||||
@tab Used in the game Cyberia from Interplay.
|
||||
@item Delphine Software International CIN @tab @tab X
|
||||
@tab Multimedia format used by Delphine Software games.
|
||||
@item Digital Speech Standard (DSS) @tab @tab X
|
||||
@item Canopus HQ @tab @tab X
|
||||
@item Canopus HQA @tab @tab X
|
||||
@item Canopus HQX @tab @tab X
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video format used by CD+G karaoke disks
|
||||
@item Phantom Cine @tab @tab X
|
||||
@@ -298,7 +259,6 @@ library:
|
||||
@item Deluxe Paint Animation @tab @tab X
|
||||
@item DFA @tab @tab X
|
||||
@tab This format is used in Chronomaster game
|
||||
@item DirectDraw Surface @tab @tab X
|
||||
@item DSD Stream File (DSF) @tab @tab X
|
||||
@item DV video @tab X @tab X
|
||||
@item DXA @tab @tab X
|
||||
@@ -495,7 +455,6 @@ library:
|
||||
@item SoX native format @tab X @tab X
|
||||
@item SUN AU format @tab X @tab X
|
||||
@item SUP raw PGS subtitles @tab @tab X
|
||||
@item TDSC @tab @tab X
|
||||
@item Text files @tab @tab X
|
||||
@item THP @tab @tab X
|
||||
@tab Used on the Nintendo GameCube.
|
||||
@@ -503,7 +462,6 @@ library:
|
||||
@tab Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
|
||||
@item True Audio @tab @tab X
|
||||
@item VC-1 test bitstream @tab X @tab X
|
||||
@item Vidvox Hap @tab X @tab X
|
||||
@item Vivo @tab @tab X
|
||||
@item WAV @tab X @tab X
|
||||
@item WavPack @tab X @tab X
|
||||
@@ -538,7 +496,6 @@ following image formats are supported:
|
||||
@item Alias PIX @tab X @tab X
|
||||
@tab Alias/Wavefront PIX image format
|
||||
@item animated GIF @tab X @tab X
|
||||
@item APNG @tab X @tab X
|
||||
@item BMP @tab X @tab X
|
||||
@tab Microsoft BMP image
|
||||
@item BRender PIX @tab @tab X
|
||||
@@ -690,17 +647,15 @@ following image formats are supported:
|
||||
@tab Sorenson H.263 used in Flash
|
||||
@item Forward Uncompressed @tab @tab X
|
||||
@item Fraps @tab @tab X
|
||||
@item Go2Meeting @tab @tab X
|
||||
@tab fourcc: G2M2, G2M3
|
||||
@item Go2Webinar @tab @tab X
|
||||
@tab fourcc: G2M4
|
||||
@item H.261 @tab X @tab X
|
||||
@item H.263 / H.263-1996 @tab X @tab X
|
||||
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
|
||||
@item H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 @tab E @tab X
|
||||
@tab encoding supported through external library libx264 and OpenH264
|
||||
@tab encoding supported through external library libx264
|
||||
@item HEVC @tab X @tab X
|
||||
@tab encoding supported through external library libx265 and libkvazaar
|
||||
@tab encoding supported through the external library libx265
|
||||
@item HNM version 4 @tab @tab X
|
||||
@item HuffYUV @tab X @tab X
|
||||
@item HuffYUV FFmpeg variant @tab X @tab X
|
||||
@@ -865,7 +820,7 @@ following image formats are supported:
|
||||
@item Name @tab Encoding @tab Decoding @tab Comments
|
||||
@item 8SVX exponential @tab @tab X
|
||||
@item 8SVX fibonacci @tab @tab X
|
||||
@item AAC+ @tab E @tab IX
|
||||
@item AAC+ @tab E @tab X
|
||||
@tab encoding supported through external library libaacplus
|
||||
@item AAC @tab E @tab X
|
||||
@tab encoding supported through external library libfaac and libvo-aacenc
|
||||
@@ -905,7 +860,7 @@ following image formats are supported:
|
||||
@item ADPCM MS IMA @tab X @tab X
|
||||
@item ADPCM Nintendo Gamecube AFC @tab @tab X
|
||||
@item ADPCM Nintendo Gamecube DTK @tab @tab X
|
||||
@item ADPCM Nintendo THP @tab @tab X
|
||||
@item ADPCM Nintendo Gamecube THP @tab @tab X
|
||||
@item ADPCM QT IMA @tab X @tab X
|
||||
@item ADPCM SEGA CRI ADX @tab X @tab X
|
||||
@tab Used in Sega Dreamcast games.
|
||||
@@ -934,12 +889,10 @@ following image formats are supported:
|
||||
@tab decoding supported through external library libcelt
|
||||
@item Delphine Software International CIN audio @tab @tab X
|
||||
@tab Codec used in Delphine Software International games.
|
||||
@item Digital Speech Standard - Standard Play mode (DSS SP) @tab @tab X
|
||||
@item Discworld II BMV Audio @tab @tab X
|
||||
@item COOK @tab @tab X
|
||||
@tab All versions except 5.1 are supported.
|
||||
@item DCA (DTS Coherent Acoustics) @tab X @tab X
|
||||
@tab supported extensions: XCh, XLL (partially)
|
||||
@item DPCM id RoQ @tab X @tab X
|
||||
@tab Used in Quake III, Jedi Knight 2 and other computer games.
|
||||
@item DPCM Interplay @tab @tab X
|
||||
@@ -983,8 +936,8 @@ following image formats are supported:
|
||||
@item Musepack SV8 @tab @tab X
|
||||
@item Nellymoser Asao @tab X @tab X
|
||||
@item On2 AVC (Audio for Video Codec) @tab @tab X
|
||||
@item Opus @tab E @tab X
|
||||
@tab encoding supported through external library libopus
|
||||
@item Opus @tab E @tab E
|
||||
@tab supported through external library libopus
|
||||
@item PCM A-law @tab X @tab X
|
||||
@item PCM mu-law @tab X @tab X
|
||||
@item PCM signed 8-bit planar @tab X @tab X
|
||||
|
@@ -1,10 +1,9 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Using Git to develop FFmpeg
|
||||
@settitle Using git to develop FFmpeg
|
||||
|
||||
@titlepage
|
||||
@center @titlefont{Using Git to develop FFmpeg}
|
||||
@center @titlefont{Using git to develop FFmpeg}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
@@ -13,9 +12,9 @@
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
This document aims in giving some quick references on a set of useful Git
|
||||
This document aims in giving some quick references on a set of useful git
|
||||
commands. You should always use the extensive and detailed documentation
|
||||
provided directly by Git:
|
||||
provided directly by git:
|
||||
|
||||
@example
|
||||
git --help
|
||||
@@ -32,21 +31,22 @@ man git-<command>
|
||||
shows information about the subcommand <command>.
|
||||
|
||||
Additional information could be found on the
|
||||
@url{http://gitref.org, Git Reference} website.
|
||||
@url{http://gitref.org, Git Reference} website
|
||||
|
||||
For more information about the Git project, visit the
|
||||
@url{http://git-scm.com/, Git website}.
|
||||
|
||||
@url{http://git-scm.com/, Git website}
|
||||
|
||||
Consult these resources whenever you have problems, they are quite exhaustive.
|
||||
|
||||
What follows now is a basic introduction to Git and some FFmpeg-specific
|
||||
guidelines to ease the contribution to the project.
|
||||
guidelines to ease the contribution to the project
|
||||
|
||||
@chapter Basics Usage
|
||||
|
||||
@section Get Git
|
||||
@section Get GIT
|
||||
|
||||
You can get Git from @url{http://git-scm.com/}
|
||||
You can get git from @url{http://git-scm.com/}
|
||||
Most distribution and operating system provide a package for it.
|
||||
|
||||
|
||||
@@ -74,7 +74,6 @@ git config --global core.autocrlf false
|
||||
@end example
|
||||
|
||||
|
||||
@anchor{Updating the source tree to the latest revision}
|
||||
@section Updating the source tree to the latest revision
|
||||
|
||||
@example
|
||||
@@ -107,7 +106,7 @@ git add [-A] <filename/dirname>
|
||||
git rm [-r] <filename/dirname>
|
||||
@end example
|
||||
|
||||
Git needs to get notified of all changes you make to your working
|
||||
GIT needs to get notified of all changes you make to your working
|
||||
directory that makes files appear or disappear.
|
||||
Line moves across files are automatically tracked.
|
||||
|
||||
@@ -127,8 +126,8 @@ will show all local modifications in your working directory as unified diff.
|
||||
git log <filename(s)>
|
||||
@end example
|
||||
|
||||
You may also use the graphical tools like @command{gitview} or @command{gitk}
|
||||
or the web interface available at @url{http://source.ffmpeg.org/}.
|
||||
You may also use the graphical tools like gitview or gitk or the web
|
||||
interface available at http://source.ffmpeg.org/
|
||||
|
||||
@section Checking source tree status
|
||||
|
||||
@@ -149,7 +148,6 @@ git diff --check
|
||||
to double check your changes before committing them to avoid trouble later
|
||||
on. All experienced developers do this on each and every commit, no matter
|
||||
how small.
|
||||
|
||||
Every one of them has been saved from looking like a fool by this many times.
|
||||
It's very easy for stray debug output or cosmetic modifications to slip in,
|
||||
please avoid problems through this extra level of scrutiny.
|
||||
@@ -172,14 +170,14 @@ to make sure you don't have untracked files or deletions.
|
||||
git add [-i|-p|-A] <filenames/dirnames>
|
||||
@end example
|
||||
|
||||
Make sure you have told Git your name and email address
|
||||
Make sure you have told git your name and email address
|
||||
|
||||
@example
|
||||
git config --global user.name "My Name"
|
||||
git config --global user.email my@@email.invalid
|
||||
@end example
|
||||
|
||||
Use @option{--global} to set the global configuration for all your Git checkouts.
|
||||
Use @var{--global} to set the global configuration for all your git checkouts.
|
||||
|
||||
Git will select the changes to the files for commit. Optionally you can use
|
||||
the interactive or the patch mode to select hunk by hunk what should be
|
||||
@@ -210,7 +208,7 @@ include filenames in log messages, Git provides that information.
|
||||
|
||||
Possibly make the commit message have a terse, descriptive first line, an
|
||||
empty line and then a full description. The first line will be used to name
|
||||
the patch by @command{git format-patch}.
|
||||
the patch by git format-patch.
|
||||
|
||||
@section Preparing a patchset
|
||||
|
||||
@@ -326,14 +324,12 @@ faulty commit disappear from the history.
|
||||
@section Pushing changes to remote trees
|
||||
|
||||
@example
|
||||
git push origin master --dry-run
|
||||
git push
|
||||
@end example
|
||||
|
||||
Will simulate a push of the local master branch to the default remote
|
||||
(@var{origin}). And list which branches and ranges or commits would have been
|
||||
pushed.
|
||||
Will push the changes to the default remote (@var{origin}).
|
||||
Git will prevent you from pushing changes if the local and remote trees are
|
||||
out of sync. Refer to @ref{Updating the source tree to the latest revision}.
|
||||
out of sync. Refer to and to sync the local tree.
|
||||
|
||||
@example
|
||||
git remote add <name> <url>
|
||||
@@ -352,24 +348,23 @@ branches matching the local ones.
|
||||
|
||||
@section Finding a specific svn revision
|
||||
|
||||
Since version 1.7.1 Git supports @samp{:/foo} syntax for specifying commits
|
||||
Since version 1.7.1 git supports @var{:/foo} syntax for specifying commits
|
||||
based on a regular expression. see man gitrevisions
|
||||
|
||||
@example
|
||||
git show :/'as revision 23456'
|
||||
@end example
|
||||
|
||||
will show the svn changeset @samp{r23456}. With older Git versions searching in
|
||||
will show the svn changeset @var{r23456}. With older git versions searching in
|
||||
the @command{git log} output is the easiest option (especially if a pager with
|
||||
search capabilities is used).
|
||||
|
||||
This commit can be checked out with
|
||||
|
||||
@example
|
||||
git checkout -b svn_23456 :/'as revision 23456'
|
||||
@end example
|
||||
|
||||
or for Git < 1.7.1 with
|
||||
or for git < 1.7.1 with
|
||||
|
||||
@example
|
||||
git checkout -b svn_23456 $SHA1
|
||||
@@ -378,7 +373,7 @@ git checkout -b svn_23456 $SHA1
|
||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||
|
||||
|
||||
@chapter Pre-push checklist
|
||||
@chapter pre-push checklist
|
||||
|
||||
Once you have a set of commits that you feel are ready for pushing,
|
||||
work through the following checklist to doublecheck everything is in
|
||||
@@ -389,7 +384,7 @@ Apply your common sense, but if in doubt, err on the side of caution.
|
||||
First, make sure that the commits and branches you are going to push
|
||||
match what you want pushed and that nothing is missing, extraneous or
|
||||
wrong. You can see what will be pushed by running the git push command
|
||||
with @option{--dry-run} first. And then inspecting the commits listed with
|
||||
with --dry-run first. And then inspecting the commits listed with
|
||||
@command{git log -p 1234567..987654}. The @command{git status} command
|
||||
may help in finding local changes that have been forgotten to be added.
|
||||
|
||||
@@ -398,7 +393,7 @@ Next let the code pass through a full run of our testsuite.
|
||||
@itemize
|
||||
@item @command{make distclean}
|
||||
@item @command{/path/to/ffmpeg/configure}
|
||||
@item @command{make fate}
|
||||
@item @command{make check}
|
||||
@item if fate fails due to missing samples run @command{make fate-rsync} and retry
|
||||
@end itemize
|
||||
|
||||
@@ -416,5 +411,5 @@ recommended.
|
||||
|
||||
@chapter Server Issues
|
||||
|
||||
Contact the project admins at @email{root@@ffmpeg.org} if you have technical
|
||||
problems with the Git server.
|
||||
Contact the project admins @email{root@@ffmpeg.org} if you have technical
|
||||
problems with the GIT server.
|
||||
|
457
doc/indevs.texi
457
doc/indevs.texi
@@ -51,18 +51,6 @@ ffmpeg -f alsa -i hw:0 alsaout.wav
|
||||
For more information see:
|
||||
@url{http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html}
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item sample_rate
|
||||
Set the sample rate in Hz. Default is 48000.
|
||||
|
||||
@item channels
|
||||
Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
@section avfoundation
|
||||
|
||||
AVFoundation input device.
|
||||
@@ -126,19 +114,6 @@ und the first one in this list is used instead. Available pixel formats are:
|
||||
bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
|
||||
yuv420p, nv12, yuyv422, gray}
|
||||
|
||||
@item -framerate
|
||||
Set the grabbing frame rate. Default is @code{ntsc}, corresponding to a
|
||||
frame rate of @code{30000/1001}.
|
||||
|
||||
@item -video_size
|
||||
Set the video frame size.
|
||||
|
||||
@item -capture_cursor
|
||||
Capture the mouse pointer. Default is 0.
|
||||
|
||||
@item -capture_mouse_clicks
|
||||
Capture the screen mouse clicks. Default is 0.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -175,99 +150,6 @@ $ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
||||
|
||||
BSD video input device.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set the frame rate.
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default is @code{vga}.
|
||||
|
||||
@item standard
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item pal
|
||||
|
||||
@item ntsc
|
||||
|
||||
@item secam
|
||||
|
||||
@item paln
|
||||
|
||||
@item palm
|
||||
|
||||
@item ntscj
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@section decklink
|
||||
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
DeckLink devices.
|
||||
|
||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format is
|
||||
uyvy422 or v210, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels can be 2, 8 or 16.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item bm_v210
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
List supported formats:
|
||||
@example
|
||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 (format 11):
|
||||
@example
|
||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -bm_v210 1 -f decklink -i 'UltraStudio Mini Recorder@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dshow
|
||||
|
||||
Windows DirectShow input device.
|
||||
@@ -285,7 +167,7 @@ The input name should be in the format:
|
||||
@end example
|
||||
|
||||
where @var{TYPE} can be either @var{audio} or @var{video},
|
||||
and @var{NAME} is the device's name or alternative name..
|
||||
and @var{NAME} is the device's name.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@@ -318,11 +200,11 @@ If set to @option{true}, print a list of selected device's options
|
||||
and exit.
|
||||
|
||||
@item video_device_number
|
||||
Set video device number for devices with the same name (starts at 0,
|
||||
Set video device number for devices with same name (starts at 0,
|
||||
defaults to 0).
|
||||
|
||||
@item audio_device_number
|
||||
Set audio device number for devices with the same name (starts at 0,
|
||||
Set audio device number for devices with same name (starts at 0,
|
||||
defaults to 0).
|
||||
|
||||
@item pixel_format
|
||||
@@ -338,85 +220,6 @@ Setting this value too low can degrade performance.
|
||||
See also
|
||||
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
|
||||
|
||||
@item video_pin_name
|
||||
Select video capture pin to use by name or alternative name.
|
||||
|
||||
@item audio_pin_name
|
||||
Select audio capture pin to use by name or alternative name.
|
||||
|
||||
@item crossbar_video_input_pin_number
|
||||
Select video input pin number for crossbar device. This will be
|
||||
routed to the crossbar device's Video Decoder output pin.
|
||||
Note that changing this value can affect future invocations
|
||||
(sets a new default) until system reboot occurs.
|
||||
|
||||
@item crossbar_audio_input_pin_number
|
||||
Select audio input pin number for crossbar device. This will be
|
||||
routed to the crossbar device's Audio Decoder output pin.
|
||||
Note that changing this value can affect future invocations
|
||||
(sets a new default) until system reboot occurs.
|
||||
|
||||
@item show_video_device_dialog
|
||||
If set to @option{true}, before capture starts, popup a display dialog
|
||||
to the end user, allowing them to change video filter properties
|
||||
and configurations manually.
|
||||
Note that for crossbar devices, adjusting values in this dialog
|
||||
may be needed at times to toggle between PAL (25 fps) and NTSC (29.97)
|
||||
input frame rates, sizes, interlacing, etc. Changing these values can
|
||||
enable different scan rates/frame rates and avoiding green bars at
|
||||
the bottom, flickering scan lines, etc.
|
||||
Note that with some devices, changing these properties can also affect future
|
||||
invocations (sets new defaults) until system reboot occurs.
|
||||
|
||||
@item show_audio_device_dialog
|
||||
If set to @option{true}, before capture starts, popup a display dialog
|
||||
to the end user, allowing them to change audio filter properties
|
||||
and configurations manually.
|
||||
|
||||
@item show_video_crossbar_connection_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify crossbar pin routings, when it opens a video device.
|
||||
|
||||
@item show_audio_crossbar_connection_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify crossbar pin routings, when it opens an audio device.
|
||||
|
||||
@item show_analog_tv_tuner_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify TV channels and frequencies.
|
||||
|
||||
@item show_analog_tv_tuner_audio_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify TV audio (like mono vs. stereo, Language A,B or C).
|
||||
|
||||
@item audio_device_load
|
||||
Load an audio capture filter device from file instead of searching
|
||||
it by name. It may load additional parameters too, if the filter
|
||||
supports the serialization of its properties to.
|
||||
To use this an audio capture source has to be specified, but it can
|
||||
be anything even fake one.
|
||||
|
||||
@item audio_device_save
|
||||
Save the currently used audio capture filter device and its
|
||||
parameters (if the filter supports it) to a file.
|
||||
If a file with the same name exists it will be overwritten.
|
||||
|
||||
@item video_device_load
|
||||
Load a video capture filter device from file instead of searching
|
||||
it by name. It may load additional parameters too, if the filter
|
||||
supports the serialization of its properties to.
|
||||
To use this a video capture source has to be specified, but it can
|
||||
be anything even fake one.
|
||||
|
||||
@item video_device_save
|
||||
Save the currently used video capture filter device and its
|
||||
parameters (if the filter supports it) to a file.
|
||||
If a file with the same name exists it will be overwritten.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -453,46 +256,12 @@ Print the list of supported options in selected device and exit:
|
||||
$ ffmpeg -list_options true -f dshow -i video="Camera"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Specify pin names to capture by name or alternative name, specify alternative device name:
|
||||
@example
|
||||
$ ffmpeg -f dshow -audio_pin_name "Audio Out" -video_pin_name 2 -i video=video="@@device_pnp_\\?\pci#ven_1a0a&dev_6200&subsys_62021461&rev_01#4&e2c7dd6&0&00e1#@{65e8773d-8f56-11d0-a3b9-00a0c9223196@}\@{ca465100-deb0-4d59-818f-8c477184adf6@}":audio="Microphone"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Configure a crossbar device, specifying crossbar pins, allow user to adjust video capture properties at startup:
|
||||
@example
|
||||
$ ffmpeg -f dshow -show_video_device_dialog true -crossbar_video_input_pin_number 0
|
||||
-crossbar_audio_input_pin_number 3 -i video="AVerMedia BDA Analog Capture":audio="AVerMedia BDA Analog Capture"
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dv1394
|
||||
|
||||
Linux DV 1394 input device.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set the frame rate. Default is 25.
|
||||
|
||||
@item standard
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item pal
|
||||
|
||||
@item ntsc
|
||||
|
||||
@end table
|
||||
|
||||
Default value is @code{ntsc}.
|
||||
|
||||
@end table
|
||||
|
||||
@section fbdev
|
||||
|
||||
Linux framebuffer input device.
|
||||
@@ -505,27 +274,18 @@ console. It is accessed through a file device node, usually
|
||||
For more detailed information read the file
|
||||
Documentation/fb/framebuffer.txt included in the Linux source tree.
|
||||
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
|
||||
To record from the framebuffer device @file{/dev/fb0} with
|
||||
@command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -f fbdev -framerate 10 -i /dev/fb0 out.avi
|
||||
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
|
||||
@end example
|
||||
|
||||
You can take a single screenshot image with the command:
|
||||
@example
|
||||
ffmpeg -f fbdev -framerate 1 -i /dev/fb0 -frames:v 1 screenshot.jpeg
|
||||
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set the frame rate. Default is 25.
|
||||
|
||||
@end table
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
|
||||
@section gdigrab
|
||||
|
||||
@@ -630,7 +390,7 @@ not work and result in undefined behavior.
|
||||
The values @option{auto}, @option{dv} and @option{hdv} are supported.
|
||||
|
||||
@item dvbuffer
|
||||
Set maximum size of buffer for incoming data, in frames. For DV, this
|
||||
Set maxiumum size of buffer for incoming data, in frames. For DV, this
|
||||
is an exact value. For HDV, it is not frame exact, since HDV does
|
||||
not have a fixed frame size.
|
||||
|
||||
@@ -711,15 +471,6 @@ $ jack_connect metro:120_bpm ffmpeg:input_1
|
||||
For more information read:
|
||||
@url{http://jackaudio.org/}
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item channels
|
||||
Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
@section lavfi
|
||||
|
||||
Libavfilter input virtual device.
|
||||
@@ -744,14 +495,6 @@ generated by the device.
|
||||
The first unlabelled output is automatically assigned to the "out0"
|
||||
label, but all the others need to be specified explicitly.
|
||||
|
||||
The suffix "+subcc" can be appended to the output label to create an extra
|
||||
stream with the closed captions packets attached to that output
|
||||
(experimental; only for EIA-608 / CEA-708 for now).
|
||||
The subcc streams are created after all the normal streams, in the order of
|
||||
the corresponding stream.
|
||||
For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
|
||||
stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
|
||||
|
||||
If not specified defaults to the filename specified for the input
|
||||
device.
|
||||
|
||||
@@ -760,9 +503,6 @@ Set the filename of the filtergraph to be read and sent to the other
|
||||
filters. Syntax of the filtergraph is the same as the one specified by
|
||||
the option @var{graph}.
|
||||
|
||||
@item dumpgraph
|
||||
Dump graph to stderr.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -801,57 +541,24 @@ Read an audio stream and a video stream and play it back with
|
||||
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Dump decoded frames to images and closed captions to a file (experimental):
|
||||
@example
|
||||
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libcdio
|
||||
|
||||
Audio-CD input device based on libcdio.
|
||||
Audio-CD input device based on cdio.
|
||||
|
||||
To enable this input device during configuration you need libcdio
|
||||
installed on your system. It requires the configure option
|
||||
installed on your system. Requires the configure option
|
||||
@code{--enable-libcdio}.
|
||||
|
||||
This device allows playing and grabbing from an Audio-CD.
|
||||
|
||||
For example to copy with @command{ffmpeg} the entire Audio-CD in @file{/dev/sr0},
|
||||
For example to copy with @command{ffmpeg} the entire Audio-CD in /dev/sr0,
|
||||
you may run the command:
|
||||
@example
|
||||
ffmpeg -f libcdio -i /dev/sr0 cd.wav
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
@item speed
|
||||
Set drive reading speed. Default value is 0.
|
||||
|
||||
The speed is specified CD-ROM speed units. The speed is set through
|
||||
the libcdio @code{cdio_cddap_speed_set} function. On many CD-ROM
|
||||
drives, specifying a value too large will result in using the fastest
|
||||
speed.
|
||||
|
||||
@item paranoia_mode
|
||||
Set paranoia recovery mode flags. It accepts one of the following values:
|
||||
|
||||
@table @samp
|
||||
@item disable
|
||||
@item verify
|
||||
@item overlap
|
||||
@item neverskip
|
||||
@item full
|
||||
@end table
|
||||
|
||||
Default value is @samp{disable}.
|
||||
|
||||
For more information about the available recovery modes, consult the
|
||||
paranoia project documentation.
|
||||
@end table
|
||||
|
||||
@section libdc1394
|
||||
|
||||
IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
@@ -964,19 +671,6 @@ ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
|
||||
For more information about OSS see:
|
||||
@url{http://manuals.opensound.com/usersguide/dsp.html}
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item sample_rate
|
||||
Set the sample rate in Hz. Default is 48000.
|
||||
|
||||
@item channels
|
||||
Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
|
||||
@section pulse
|
||||
|
||||
PulseAudio input device.
|
||||
@@ -1017,10 +711,6 @@ Specify the number of bytes per frame, by default it is set to 1024.
|
||||
@item fragment_size
|
||||
Specify the minimal buffering fragment in PulseAudio, it will affect the
|
||||
audio latency. By default it is unset.
|
||||
|
||||
@item wallclock
|
||||
Set the initial PTS using the current time. Default is 1.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -1056,22 +746,6 @@ ffmpeg -f qtkit -i "default" out.mpg
|
||||
ffmpeg -f qtkit -list_devices true -i ""
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item frame_rate
|
||||
Set frame rate. Default is 30.
|
||||
|
||||
@item list_devices
|
||||
If set to @code{true}, print a list of devices and exit. Default is
|
||||
@code{false}.
|
||||
|
||||
@item video_device_index
|
||||
Select the video device by index for devices with the same name (starts at 0).
|
||||
|
||||
@end table
|
||||
|
||||
@section sndio
|
||||
|
||||
sndio input device.
|
||||
@@ -1089,18 +763,6 @@ command:
|
||||
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item sample_rate
|
||||
Set the sample rate in Hz. Default is 48000.
|
||||
|
||||
@item channels
|
||||
Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
@section video4linux2, v4l2
|
||||
|
||||
Video4Linux2 input video device.
|
||||
@@ -1133,12 +795,6 @@ conversion into the real time clock.
|
||||
Some usage examples of the video4linux2 device with @command{ffmpeg}
|
||||
and @command{ffplay}:
|
||||
@itemize
|
||||
@item
|
||||
List supported formats for a video4linux2 device:
|
||||
@example
|
||||
ffplay -f video4linux2 -list_formats all /dev/video0
|
||||
@end example
|
||||
|
||||
@item
|
||||
Grab and show the input of a video4linux2 device:
|
||||
@example
|
||||
@@ -1223,10 +879,6 @@ Force conversion from monotonic to absolute timestamps.
|
||||
@end table
|
||||
|
||||
Default value is @code{default}.
|
||||
|
||||
@item use_libv4l2
|
||||
Use libv4l2 (v4l-utils) conversion functions. Default is 0.
|
||||
|
||||
@end table
|
||||
|
||||
@section vfwcap
|
||||
@@ -1237,29 +889,12 @@ The filename passed as input is the capture driver number, ranging from
|
||||
0 to 9. You may use "list" as filename to print a list of drivers. Any
|
||||
other filename will be interpreted as device number 0.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item video_size
|
||||
Set the video frame size.
|
||||
|
||||
@item framerate
|
||||
Set the grabbing frame rate. Default value is @code{ntsc},
|
||||
corresponding to a frame rate of @code{30000/1001}.
|
||||
|
||||
@end table
|
||||
|
||||
@section x11grab
|
||||
|
||||
X11 video input device.
|
||||
|
||||
To enable this input device during configuration you need libxcb
|
||||
installed on your system. It will be automatically detected during
|
||||
configuration.
|
||||
|
||||
Alternatively, the configure option @option{--enable-x11grab} exists
|
||||
for legacy Xlib users.
|
||||
Depends on X11, Xext, and Xfixes. Requires the configure option
|
||||
@code{--enable-x11grab}.
|
||||
|
||||
This device allows one to capture a region of an X11 display.
|
||||
|
||||
@@ -1277,12 +912,10 @@ omitted, and defaults to "localhost". The environment variable
|
||||
area with respect to the top-left border of the X11 screen. They
|
||||
default to 0.
|
||||
|
||||
Check the X11 documentation (e.g. @command{man X}) for more detailed
|
||||
information.
|
||||
Check the X11 documentation (e.g. man X) for more detailed information.
|
||||
|
||||
Use the @command{xdpyinfo} program for getting basic information about
|
||||
the properties of your X11 display (e.g. grep for "name" or
|
||||
"dimensions").
|
||||
Use the @command{dpyinfo} program for getting basic information about the
|
||||
properties of your X11 display (e.g. grep for "name" or "dimensions").
|
||||
|
||||
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
||||
@example
|
||||
@@ -1331,10 +964,6 @@ If @var{show_region} is specified with @code{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it is easy to
|
||||
know what is being grabbed if only a portion of the screen is grabbed.
|
||||
|
||||
@item region_border
|
||||
Set the region border thickness if @option{-show_region 1} is used.
|
||||
Range is 1 to 128 and default is 3 (XCB-based x11grab only).
|
||||
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||
@@ -1350,19 +979,61 @@ Set the video frame size. Default value is @code{vga}.
|
||||
|
||||
@item use_shm
|
||||
Use the MIT-SHM extension for shared memory. Default value is @code{1}.
|
||||
It may be necessary to disable it for remote displays (legacy x11grab
|
||||
only).
|
||||
It may be necessary to disable it for remote displays.
|
||||
@end table
|
||||
|
||||
@subsection @var{grab_x} @var{grab_y} AVOption
|
||||
@section decklink
|
||||
|
||||
The syntax is:
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
DeckLink devices.
|
||||
|
||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format is always
|
||||
uyvy422, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels currently is limited to 2 (stereo).
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
-grab_x @var{x_offset} -grab_y @var{y_offset}
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
Set the grabbing region coordinates. They are expressed as offset from the top left
|
||||
corner of the X11 window. The default value is 0.
|
||||
@item
|
||||
List supported formats:
|
||||
@example
|
||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 (format 11):
|
||||
@example
|
||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavcodec Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavdevice Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavfilter Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavformat Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavutil Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libswresample Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libswscale Documentation
|
||||
@titlepage
|
||||
|
@@ -12,10 +12,10 @@ A file consists of a header and a number of metadata tags divided into sections,
|
||||
each on its own line.
|
||||
|
||||
@item
|
||||
The header is a @samp{;FFMETADATA} string, followed by a version number (now 1).
|
||||
The header is a ';FFMETADATA' string, followed by a version number (now 1).
|
||||
|
||||
@item
|
||||
Metadata tags are of the form @samp{key=value}
|
||||
Metadata tags are of the form 'key=value'
|
||||
|
||||
@item
|
||||
Immediately after header follows global metadata
|
||||
@@ -26,30 +26,26 @@ metadata.
|
||||
|
||||
@item
|
||||
A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
|
||||
brackets (@samp{[}, @samp{]}) and ends with next section or end of file.
|
||||
brackets ('[', ']') and ends with next section or end of file.
|
||||
|
||||
@item
|
||||
At the beginning of a chapter section there may be an optional timebase to be
|
||||
used for start/end values. It must be in form
|
||||
@samp{TIMEBASE=@var{num}/@var{den}}, where @var{num} and @var{den} are
|
||||
integers. If the timebase is missing then start/end times are assumed to
|
||||
used for start/end values. It must be in form 'TIMEBASE=num/den', where num and
|
||||
den are integers. If the timebase is missing then start/end times are assumed to
|
||||
be in milliseconds.
|
||||
|
||||
Next a chapter section must contain chapter start and end times in form
|
||||
@samp{START=@var{num}}, @samp{END=@var{num}}, where @var{num} is a positive
|
||||
integer.
|
||||
'START=num', 'END=num', where num is a positive integer.
|
||||
|
||||
@item
|
||||
Empty lines and lines starting with @samp{;} or @samp{#} are ignored.
|
||||
Empty lines and lines starting with ';' or '#' are ignored.
|
||||
|
||||
@item
|
||||
Metadata keys or values containing special characters (@samp{=}, @samp{;},
|
||||
@samp{#}, @samp{\} and a newline) must be escaped with a backslash @samp{\}.
|
||||
Metadata keys or values containing special characters ('=', ';', '#', '\' and a
|
||||
newline) must be escaped with a backslash '\'.
|
||||
|
||||
@item
|
||||
Note that whitespace in metadata (e.g. @samp{foo = bar}) is considered to be
|
||||
a part of the tag (in the example above key is @samp{foo }, value is
|
||||
@samp{ bar}).
|
||||
Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
|
||||
the tag (in the example above key is 'foo ', value is ' bar').
|
||||
@end enumerate
|
||||
|
||||
A ffmetadata file might look like this:
|
||||
|
@@ -47,16 +47,12 @@ Files that have MIPS copyright notice in them:
|
||||
* libavutil/mips/
|
||||
float_dsp_mips.c
|
||||
libm_mips.h
|
||||
softfloat_tables.h
|
||||
* libavcodec/
|
||||
fft_fixed_32.c
|
||||
fft_init_table.c
|
||||
fft_table.h
|
||||
mdct_fixed_32.c
|
||||
* libavcodec/mips/
|
||||
aacdec_fixed.c
|
||||
aacsbr_fixed.c
|
||||
aacsbr_template.c
|
||||
aaccoder_mips.c
|
||||
aacpsy_mips.h
|
||||
ac3dsp_mips.c
|
||||
|
@@ -54,7 +54,7 @@ thread.
|
||||
If the codec allocates writable tables in its init(), add an init_thread_copy()
|
||||
which re-allocates them for other threads.
|
||||
|
||||
Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
|
||||
Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
|
||||
speed gain at this point but it should work.
|
||||
|
||||
If there are inter-frame dependencies, so the codec calls
|
||||
|
224
doc/muxers.texi
224
doc/muxers.texi
@@ -254,71 +254,6 @@ and it is not to be confused with the segment filename sequence number
|
||||
which can be cyclic, for example if the @option{wrap} option is
|
||||
specified.
|
||||
|
||||
@item hls_segment_filename @var{filename}
|
||||
Set the segment filename. Unless hls_flags single_file is set @var{filename}
|
||||
is used as a string format with the segment number:
|
||||
@example
|
||||
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
|
||||
@end example
|
||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@file{file000.ts}, @file{file001.ts}, @file{file002.ts}, etc.
|
||||
|
||||
@item hls_key_info_file @var{key_info_file}
|
||||
Use the information in @var{key_info_file} for segment encryption. The first
|
||||
line of @var{key_info_file} specifies the key URI written to the playlist. The
|
||||
key URL is used to access the encryption key during playback. The second line
|
||||
specifies the path to the key file used to obtain the key during the encryption
|
||||
process. The key file is read as a single packed array of 16 octets in binary
|
||||
format. The optional third line specifies the initialization vector (IV) as a
|
||||
hexadecimal string to be used instead of the segment sequence number (default)
|
||||
for encryption. Changes to @var{key_info_file} will result in segment
|
||||
encryption with the new key/IV and an entry in the playlist for the new key
|
||||
URI/IV.
|
||||
|
||||
Key info file format:
|
||||
@example
|
||||
@var{key URI}
|
||||
@var{key file path}
|
||||
@var{IV} (optional)
|
||||
@end example
|
||||
|
||||
Example key URIs:
|
||||
@example
|
||||
http://server/file.key
|
||||
/path/to/file.key
|
||||
file.key
|
||||
@end example
|
||||
|
||||
Example key file paths:
|
||||
@example
|
||||
file.key
|
||||
/path/to/file.key
|
||||
@end example
|
||||
|
||||
Example IV:
|
||||
@example
|
||||
0123456789ABCDEF0123456789ABCDEF
|
||||
@end example
|
||||
|
||||
Key info file example:
|
||||
@example
|
||||
http://server/file.key
|
||||
/path/to/file.key
|
||||
0123456789ABCDEF0123456789ABCDEF
|
||||
@end example
|
||||
|
||||
Example shell script:
|
||||
@example
|
||||
#!/bin/sh
|
||||
BASE_URL=$@{1:-'.'@}
|
||||
openssl rand 16 > file.key
|
||||
echo $BASE_URL/file.key > file.keyinfo
|
||||
echo file.key >> file.keyinfo
|
||||
echo $(openssl rand -hex 16) >> file.keyinfo
|
||||
ffmpeg -f lavfi -re -i testsrc -c:v h264 -hls_flags delete_segments \
|
||||
-hls_key_info_file file.keyinfo out.m3u8
|
||||
@end example
|
||||
|
||||
@item hls_flags single_file
|
||||
If this flag is set, the muxer will store all segments in a single MPEG-TS
|
||||
file, and will use byte ranges in the playlist. HLS playlists generated with
|
||||
@@ -329,10 +264,6 @@ ffmpeg -i in.nut -hls_flags single_file out.m3u8
|
||||
@end example
|
||||
Will produce the playlist, @file{out.m3u8}, and a single segment file,
|
||||
@file{out.ts}.
|
||||
|
||||
@item hls_flags delete_segments
|
||||
Segment files removed from the playlist are deleted after a period of time
|
||||
equal to the duration of the segment plus the duration of the playlist.
|
||||
@end table
|
||||
|
||||
@anchor{ico}
|
||||
@@ -437,7 +368,8 @@ ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
|
||||
|
||||
@table @option
|
||||
@item start_number
|
||||
Start the sequence from the specified number. Default value is 0.
|
||||
Start the sequence from the specified number. Default value is 1. Must
|
||||
be a non-negative number.
|
||||
|
||||
@item update
|
||||
If set to 1, the filename will always be interpreted as just a
|
||||
@@ -667,13 +599,6 @@ point on IIS with this muxer. Example:
|
||||
ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
|
||||
@end example
|
||||
|
||||
@subsection Audible AAX
|
||||
|
||||
Audible AAX files are encrypted M4B files, and they can be decrypted by specifying a 4 byte activation secret.
|
||||
@example
|
||||
ffmpeg -activation_bytes 1CEB00DA -i test.aax -vn -c:a copy output.mp4
|
||||
@end example
|
||||
|
||||
@section mp3
|
||||
|
||||
The MP3 muxer writes a raw MP3 stream with the following optional features:
|
||||
@@ -752,9 +677,6 @@ Set the transport_stream_id (default 0x0001). This identifies a
|
||||
transponder in DVB.
|
||||
@item -mpegts_service_id @var{number}
|
||||
Set the service_id (default 0x0001) also known as program in DVB.
|
||||
@item -mpegts_service_type @var{number}
|
||||
Set the program service_type (default @var{digital_tv}), see below
|
||||
a list of pre defined values.
|
||||
@item -mpegts_pmt_start_pid @var{number}
|
||||
Set the first PID for PMT (default 0x1000, max 0x1f00).
|
||||
@item -mpegts_start_pid @var{number}
|
||||
@@ -766,10 +688,6 @@ Set a constant muxrate (default VBR).
|
||||
@item -pcr_period @var{numer}
|
||||
Override the default PCR retransmission time (default 20ms), ignored
|
||||
if variable muxrate is selected.
|
||||
@item pat_period @var{number}
|
||||
Maximal time in seconds between PAT/PMT tables.
|
||||
@item sdt_period @var{number}
|
||||
Maximal time in seconds between SDT tables.
|
||||
@item -pes_payload_size @var{number}
|
||||
Set minimum PES packet payload in bytes.
|
||||
@item -mpegts_flags @var{flags}
|
||||
@@ -793,27 +711,6 @@ ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
|
||||
@end example
|
||||
@end table
|
||||
|
||||
Option mpegts_service_type accepts the following values:
|
||||
|
||||
@table @option
|
||||
@item hex_value
|
||||
Any hexdecimal value between 0x01 to 0xff as defined in ETSI 300 468.
|
||||
@item digital_tv
|
||||
Digital TV service.
|
||||
@item digital_radio
|
||||
Digital Radio service.
|
||||
@item teletext
|
||||
Teletext service.
|
||||
@item advanced_codec_digital_radio
|
||||
Advanced Codec Digital Radio service.
|
||||
@item mpeg2_digital_hdtv
|
||||
MPEG2 Digital HDTV service.
|
||||
@item advanced_codec_digital_sdtv
|
||||
Advanced Codec Digital SDTV service.
|
||||
@item advanced_codec_digital_hdtv
|
||||
Advanced Codec Digital HDTV service.
|
||||
@end table
|
||||
|
||||
Option mpegts_flags may take a set of such flags:
|
||||
|
||||
@table @option
|
||||
@@ -821,8 +718,6 @@ Option mpegts_flags may take a set of such flags:
|
||||
Reemit PAT/PMT before writing the next packet.
|
||||
@item latm
|
||||
Use LATM packetization for AAC.
|
||||
@item pat_pmt_at_frames
|
||||
Reemit PAT and PMT at each video frame.
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@@ -873,7 +768,7 @@ Change the syncpoint usage in nut:
|
||||
sensitive and seeking is not possible. Also in general the overhead from
|
||||
syncpoints is negligible. Note, -@code{write_index} 0 can be used to disable
|
||||
all growing data tables, allowing to mux endless streams with limited memory
|
||||
and without these disadvantages.
|
||||
and wihout these disadvantages.
|
||||
@item @var{timestamped} extend the syncpoint with a wallclock field.
|
||||
@end table
|
||||
The @var{none} and @var{timestamped} flags are experimental.
|
||||
@@ -898,11 +793,6 @@ is 1 second. A value of 0 will fill all segments, making pages as large as
|
||||
possible. A value of 1 will effectively use 1 packet-per-page in most
|
||||
situations, giving a small seek granularity at the cost of additional container
|
||||
overhead.
|
||||
@item -serial_offset @var{value}
|
||||
Serial value from which to set the streams serial number.
|
||||
Setting it to different and sufficiently large values ensures that the produced
|
||||
ogg files can be safely chained.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{segment}
|
||||
@@ -911,9 +801,8 @@ ogg files can be safely chained.
|
||||
Basic stream segmenter.
|
||||
|
||||
This muxer outputs streams to a number of separate files of nearly
|
||||
fixed duration. Output filename pattern can be set in a fashion
|
||||
similar to @ref{image2}, or by using a @code{strftime} template if
|
||||
the @option{strftime} option is enabled.
|
||||
fixed duration. Output filename pattern can be set in a fashion similar to
|
||||
@ref{image2}.
|
||||
|
||||
@code{stream_segment} is a variant of the muxer used to write to
|
||||
streaming output formats, i.e. which do not require global headers,
|
||||
@@ -977,6 +866,13 @@ Allow caching (only affects M3U8 list files).
|
||||
Allow live-friendly file generation.
|
||||
@end table
|
||||
|
||||
@item segment_list_type @var{type}
|
||||
Select the listing format.
|
||||
@table @option
|
||||
@item @var{flat} use a simple flat list of entries.
|
||||
@item @var{hls} use a m3u8-like structure.
|
||||
@end table
|
||||
|
||||
@item segment_list_size @var{size}
|
||||
Update the list file so that it contains at most @var{size}
|
||||
segments. If 0 the list file will contain all the segments. Default
|
||||
@@ -986,9 +882,6 @@ value is 0.
|
||||
Prepend @var{prefix} to each entry. Useful to generate absolute paths.
|
||||
By default no prefix is applied.
|
||||
|
||||
@item segment_list_type @var{type}
|
||||
Select the listing format.
|
||||
|
||||
The following values are recognized:
|
||||
@table @samp
|
||||
@item flat
|
||||
@@ -1089,18 +982,6 @@ Wrap around segment index once it reaches @var{limit}.
|
||||
@item segment_start_number @var{number}
|
||||
Set the sequence number of the first segment. Defaults to @code{0}.
|
||||
|
||||
@item strftime @var{1|0}
|
||||
Use the @code{strftime} function to define the name of the new
|
||||
segments to write. If this is selected, the output segment name must
|
||||
contain a @code{strftime} function template. Default value is
|
||||
@code{0}.
|
||||
|
||||
@item break_non_keyframes @var{1|0}
|
||||
If enabled, allow segments to start on frames other than keyframes. This
|
||||
improves behavior on some players when the time between keyframes is
|
||||
inconsistent, but may make things worse on others, and can cause some oddities
|
||||
during seeking. Defaults to @code{0}.
|
||||
|
||||
@item reset_timestamps @var{1|0}
|
||||
Reset timestamps at the begin of each segment, so that each segment
|
||||
will start with near-zero timestamps. It is meant to ease the playback
|
||||
@@ -1281,17 +1162,7 @@ is the @option{global_header} flag.
|
||||
|
||||
WebM DASH Manifest muxer.
|
||||
|
||||
This muxer implements the WebM DASH Manifest specification to generate the DASH
|
||||
manifest XML. It also supports manifest generation for DASH live streams.
|
||||
|
||||
For more information see:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
WebM DASH Specification: @url{https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification}
|
||||
@item
|
||||
ISO DASH Specification: @url{http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip}
|
||||
@end itemize
|
||||
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@@ -1302,32 +1173,6 @@ This muxer supports the following options:
|
||||
This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
|
||||
unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
|
||||
audio and video streams. Any number of adaptation sets can be added using this option.
|
||||
|
||||
@item live
|
||||
Set this to 1 to create a live stream DASH Manifest. Default: 0.
|
||||
|
||||
@item chunk_start_index
|
||||
Start index of the first chunk. This will go in the @samp{startNumber} attribute
|
||||
of the @samp{SegmentTemplate} element in the manifest. Default: 0.
|
||||
|
||||
@item chunk_duration_ms
|
||||
Duration of each chunk in milliseconds. This will go in the @samp{duration}
|
||||
attribute of the @samp{SegmentTemplate} element in the manifest. Default: 1000.
|
||||
|
||||
@item utc_timing_url
|
||||
URL of the page that will return the UTC timestamp in ISO format. This will go
|
||||
in the @samp{value} attribute of the @samp{UTCTiming} element in the manifest.
|
||||
Default: None.
|
||||
|
||||
@item time_shift_buffer_depth
|
||||
Smallest time (in seconds) shifting buffer for which any Representation is
|
||||
guaranteed to be available. This will go in the @samp{timeShiftBufferDepth}
|
||||
attribute of the @samp{MPD} element. Default: 60.
|
||||
|
||||
@item minimum_update_period
|
||||
Minimum update period (in seconds) of the manifest. This will go in the
|
||||
@samp{minimumUpdatePeriod} attribute of the @samp{MPD} element. Default: 0.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@@ -1343,47 +1188,4 @@ ffmpeg -f webm_dash_manifest -i video1.webm \
|
||||
manifest.xml
|
||||
@end example
|
||||
|
||||
@section webm_chunk
|
||||
|
||||
WebM Live Chunk Muxer.
|
||||
|
||||
This muxer writes out WebM headers and chunks as separate files which can be
|
||||
consumed by clients that support WebM Live streams via DASH.
|
||||
|
||||
@subsection Options
|
||||
|
||||
This muxer supports the following options:
|
||||
|
||||
@table @option
|
||||
@item chunk_start_index
|
||||
Index of the first chunk (defaults to 0).
|
||||
|
||||
@item header
|
||||
Filename of the header where the initialization data will be written.
|
||||
|
||||
@item audio_chunk_duration
|
||||
Duration of each audio chunk in milliseconds (defaults to 5000).
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@example
|
||||
ffmpeg -f v4l2 -i /dev/video0 \
|
||||
-f alsa -i hw:0 \
|
||||
-map 0:0 \
|
||||
-c:v libvpx-vp9 \
|
||||
-s 640x360 -keyint_min 30 -g 30 \
|
||||
-f webm_chunk \
|
||||
-header webm_live_video_360.hdr \
|
||||
-chunk_start_index 1 \
|
||||
webm_live_video_360_%d.chk \
|
||||
-map 1:0 \
|
||||
-c:a libvorbis \
|
||||
-b:a 128k \
|
||||
-f webm_chunk \
|
||||
-header webm_live_audio_128.hdr \
|
||||
-chunk_start_index 1 \
|
||||
-audio_chunk_duration 1000 \
|
||||
webm_live_audio_128_%d.chk
|
||||
@end example
|
||||
|
||||
@c man end MUXERS
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle NUT
|
||||
|
||||
|
@@ -191,11 +191,6 @@ __asm__() block.
|
||||
Use external asm (nasm/yasm) or inline asm (__asm__()), do not use intrinsics.
|
||||
The latter requires a good optimizing compiler which gcc is not.
|
||||
|
||||
When debugging a x86 external asm compilation issue, if lost in the macro
|
||||
expansions, add DBG=1 to your make command-line: the input file will be
|
||||
preprocessed, stripped of the debug/empty lines, then compiled, showing the
|
||||
actual lines causing issues.
|
||||
|
||||
Inline asm vs. external asm
|
||||
---------------------------
|
||||
Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc)
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Platform Specific Information
|
||||
@titlepage
|
||||
@@ -97,9 +96,9 @@ the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}.
|
||||
|
||||
@section Native Windows compilation using MinGW or MinGW-w64
|
||||
|
||||
FFmpeg can be built to run natively on Windows using the MinGW-w64
|
||||
toolchain. Install the latest versions of MSYS2 and MinGW-w64 from
|
||||
@url{http://msys2.github.io/} and/or @url{http://mingw-w64.sourceforge.net/}.
|
||||
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
|
||||
toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
|
||||
@url{http://www.mingw.org/} or @url{http://mingw-w64.sourceforge.net/}.
|
||||
You can find detailed installation instructions in the download section and
|
||||
the FAQ.
|
||||
|
||||
@@ -107,7 +106,7 @@ Notes:
|
||||
|
||||
@itemize
|
||||
|
||||
@item Building natively using MSYS2 can be sped up by disabling implicit rules
|
||||
@item Building natively using MSYS can be sped up by disabling implicit rules
|
||||
in the Makefile by calling @code{make -r} instead of plain @code{make}. This
|
||||
speed up is close to non-existent for normal one-off builds and is only
|
||||
noticeable when running make for a second time (for example during
|
||||
@@ -134,12 +133,13 @@ You will need the following prerequisites:
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://msys2.github.io/, MSYS2}
|
||||
@item @uref{http://www.mingw.org/, MSYS}
|
||||
@item @uref{http://yasm.tortall.net/, YASM}
|
||||
(Also available via MSYS2's package manager.)
|
||||
@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if
|
||||
you want to run @uref{fate.html, FATE}.
|
||||
@end itemize
|
||||
|
||||
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
|
||||
To set up a proper environment in MSYS, you need to run @code{msys.bat} from
|
||||
the Visual Studio or Intel Compiler command prompt.
|
||||
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
|
||||
@@ -175,6 +175,12 @@ Notes:
|
||||
|
||||
@itemize
|
||||
|
||||
@item It is possible that coreutils' @code{link.exe} conflicts with MSVC's linker.
|
||||
You can find out by running @code{which link} to see which @code{link.exe} you
|
||||
are using. If it is located at @code{/bin/link.exe}, then you have the wrong one
|
||||
in your @code{PATH}. Either move or remove that copy, or make sure MSVC's
|
||||
@code{link.exe} takes precedence in your @code{PATH} over coreutils'.
|
||||
|
||||
@item If you wish to build with zlib support, you will have to grab a compatible
|
||||
zlib binary from somewhere, with an MSVC import lib, or if you wish to link
|
||||
statically, you can follow the instructions below to build a compatible
|
||||
@@ -277,7 +283,7 @@ binutils, gcc4-core, make, git, mingw-runtime, texinfo
|
||||
|
||||
In order to run FATE you will also need the following "Utils" packages:
|
||||
@example
|
||||
diffutils
|
||||
bc, diffutils
|
||||
@end example
|
||||
|
||||
If you want to build FFmpeg with additional libraries, download Cygwin
|
||||
|
@@ -19,18 +19,6 @@ supported protocols.
|
||||
|
||||
A description of the currently available protocols follows.
|
||||
|
||||
@section async
|
||||
|
||||
Asynchronous data filling wrapper for input stream.
|
||||
|
||||
Fill data in a background thread, to decouple I/O operation from demux thread.
|
||||
|
||||
@example
|
||||
async:@var{URL}
|
||||
async:http://host/resource
|
||||
async:cache:http://host/resource
|
||||
@end example
|
||||
|
||||
@section bluray
|
||||
|
||||
Read BluRay playlist.
|
||||
@@ -75,7 +63,7 @@ cache:@var{URL}
|
||||
|
||||
Physical concatenation protocol.
|
||||
|
||||
Read and seek from many resources in sequence as if they were
|
||||
Allow to read and seek from many resource in sequence as if they were
|
||||
a unique resource.
|
||||
|
||||
A URL accepted by this protocol has the syntax:
|
||||
@@ -129,7 +117,7 @@ ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP/////////////
|
||||
|
||||
File access protocol.
|
||||
|
||||
Read from or write to a file.
|
||||
Allow to read from or write to a file.
|
||||
|
||||
A file URL can have the form:
|
||||
@example
|
||||
@@ -167,7 +155,7 @@ time, which is valuable for files on slow medium.
|
||||
|
||||
FTP (File Transfer Protocol).
|
||||
|
||||
Read from or write to remote resources using FTP protocol.
|
||||
Allow to read from or write to remote resources using FTP protocol.
|
||||
|
||||
Following syntax is required.
|
||||
@example
|
||||
@@ -289,43 +277,6 @@ Set initial byte offset.
|
||||
|
||||
@item end_offset
|
||||
Try to limit the request to bytes preceding this offset.
|
||||
|
||||
@item method
|
||||
When used as a client option it sets the HTTP method for the request.
|
||||
|
||||
When used as a server option it sets the HTTP method that is going to be
|
||||
expected from the client(s).
|
||||
If the expected and the received HTTP method do not match the client will
|
||||
be given a Bad Request response.
|
||||
When unset the HTTP method is not checked for now. This will be replaced by
|
||||
autodetection in the future.
|
||||
|
||||
@item listen
|
||||
If set to 1 enables experimental HTTP server. This can be used to send data when
|
||||
used as an output option, or read data from a client with HTTP POST when used as
|
||||
an input option.
|
||||
If set to 2 enables experimental mutli-client HTTP server. This is not yet implemented
|
||||
in ffmpeg.c or ffserver.c and thus must not be used as a command line option.
|
||||
@example
|
||||
# Server side (sending):
|
||||
ffmpeg -i somefile.ogg -c copy -listen 1 -f ogg http://@var{server}:@var{port}
|
||||
|
||||
# Client side (receiving):
|
||||
ffmpeg -i http://@var{server}:@var{port} -c copy somefile.ogg
|
||||
|
||||
# Client can also be done with wget:
|
||||
wget http://@var{server}:@var{port} -O somefile.ogg
|
||||
|
||||
# Server side (receiving):
|
||||
ffmpeg -listen 1 -i http://@var{server}:@var{port} -c copy somefile.ogg
|
||||
|
||||
# Client side (sending):
|
||||
ffmpeg -i somefile.ogg -chunked_post 0 -c copy -f ogg http://@var{server}:@var{port}
|
||||
|
||||
# Client can also be done with wget:
|
||||
wget --post-file=somefile.ogg http://@var{server}:@var{port}
|
||||
@end example
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HTTP Cookies
|
||||
@@ -423,7 +374,7 @@ be seekable, so they will fail with the MD5 output protocol.
|
||||
|
||||
UNIX pipe access protocol.
|
||||
|
||||
Read and write from UNIX pipes.
|
||||
Allow to read and write from UNIX pipes.
|
||||
|
||||
The accepted syntax is:
|
||||
@example
|
||||
@@ -663,7 +614,7 @@ For more information see: @url{http://www.samba.org/}.
|
||||
|
||||
Secure File Transfer Protocol via libssh
|
||||
|
||||
Read from or write to remote resources using SFTP protocol.
|
||||
Allow to read from or write to remote resources using SFTP protocol.
|
||||
|
||||
Following syntax is required.
|
||||
|
||||
@@ -1104,9 +1055,7 @@ subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
|
||||
@end example
|
||||
|
||||
Play an AVI file directly from a TAR archive:
|
||||
@example
|
||||
subfile,,start,183241728,end,366490624,,:archive.tar
|
||||
@end example
|
||||
|
||||
@section tcp
|
||||
|
||||
|
@@ -122,22 +122,6 @@ a_dither).
|
||||
|
||||
@end table
|
||||
|
||||
@item alphablend
|
||||
Set the alpha blending to use when the input has alpha but the output does not.
|
||||
Default value is @samp{none}.
|
||||
|
||||
@table @samp
|
||||
@item uniform_color
|
||||
Blend onto a uniform background color
|
||||
|
||||
@item checkerboard
|
||||
Blend onto a checkerboard
|
||||
|
||||
@item none
|
||||
No blending
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@c man end SCALER OPTIONS
|
||||
|
122
doc/t2h.pm
122
doc/t2h.pm
@@ -23,108 +23,8 @@
|
||||
# no navigation elements
|
||||
set_from_init_file('HEADERS', 0);
|
||||
|
||||
sub ffmpeg_heading_command($$$$$)
|
||||
{
|
||||
my $self = shift;
|
||||
my $cmdname = shift;
|
||||
my $command = shift;
|
||||
my $args = shift;
|
||||
my $content = shift;
|
||||
|
||||
my $result = '';
|
||||
|
||||
# not clear that it may really happen
|
||||
if ($self->in_string) {
|
||||
$result .= $self->command_string($command) ."\n" if ($cmdname ne 'node');
|
||||
$result .= $content if (defined($content));
|
||||
return $result;
|
||||
}
|
||||
|
||||
my $element_id = $self->command_id($command);
|
||||
$result .= "<a name=\"$element_id\"></a>\n"
|
||||
if (defined($element_id) and $element_id ne '');
|
||||
|
||||
print STDERR "Process $command "
|
||||
.Texinfo::Structuring::_print_root_command_texi($command)."\n"
|
||||
if ($self->get_conf('DEBUG'));
|
||||
my $element;
|
||||
if ($Texinfo::Common::root_commands{$command->{'cmdname'}}
|
||||
and $command->{'parent'}
|
||||
and $command->{'parent'}->{'type'}
|
||||
and $command->{'parent'}->{'type'} eq 'element') {
|
||||
$element = $command->{'parent'};
|
||||
}
|
||||
if ($element) {
|
||||
$result .= &{$self->{'format_element_header'}}($self, $cmdname,
|
||||
$command, $element);
|
||||
}
|
||||
|
||||
my $heading_level;
|
||||
# node is used as heading if there is nothing else.
|
||||
if ($cmdname eq 'node') {
|
||||
if (!$element or (!$element->{'extra'}->{'section'}
|
||||
and $element->{'extra'}->{'node'}
|
||||
and $element->{'extra'}->{'node'} eq $command
|
||||
# bogus node may not have been normalized
|
||||
and defined($command->{'extra'}->{'normalized'}))) {
|
||||
if ($command->{'extra'}->{'normalized'} eq 'Top') {
|
||||
$heading_level = 0;
|
||||
} else {
|
||||
$heading_level = 3;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
$heading_level = $command->{'level'};
|
||||
}
|
||||
|
||||
my $heading = $self->command_text($command);
|
||||
# $heading not defined may happen if the command is a @node, for example
|
||||
# if there is an error in the node.
|
||||
if (defined($heading) and $heading ne '' and defined($heading_level)) {
|
||||
|
||||
if ($Texinfo::Common::root_commands{$cmdname}
|
||||
and $Texinfo::Common::sectioning_commands{$cmdname}) {
|
||||
my $content_href = $self->command_contents_href($command, 'contents',
|
||||
$self->{'current_filename'});
|
||||
if ($content_href) {
|
||||
my $this_href = $content_href =~ s/^\#toc-/\#/r;
|
||||
$heading .= '<span class="pull-right">'.
|
||||
'<a class="anchor hidden-xs" '.
|
||||
"href=\"$this_href\" aria-hidden=\"true\">".
|
||||
($ENV{"FA_ICONS"} ? '<i class="fa fa-link"></i>'
|
||||
: '#').
|
||||
'</a> '.
|
||||
'<a class="anchor hidden-xs"'.
|
||||
"href=\"$content_href\" aria-hidden=\"true\">".
|
||||
($ENV{"FA_ICONS"} ? '<i class="fa fa-navicon"></i>'
|
||||
: 'TOC').
|
||||
'</a>'.
|
||||
'</span>';
|
||||
}
|
||||
}
|
||||
|
||||
if ($self->in_preformatted()) {
|
||||
$result .= $heading."\n";
|
||||
} else {
|
||||
# if the level was changed, set the command name right
|
||||
if ($cmdname ne 'node'
|
||||
and $heading_level ne $Texinfo::Common::command_structuring_level{$cmdname}) {
|
||||
$cmdname
|
||||
= $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level];
|
||||
}
|
||||
$result .= &{$self->{'format_heading_text'}}(
|
||||
$self, $cmdname, $heading,
|
||||
$heading_level +
|
||||
$self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command);
|
||||
}
|
||||
}
|
||||
$result .= $content if (defined($content));
|
||||
return $result;
|
||||
}
|
||||
|
||||
foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
|
||||
texinfo_register_command_formatting($command, \&ffmpeg_heading_command);
|
||||
}
|
||||
# TOC and Chapter headings link
|
||||
set_from_init_file('TOC_LINKS', 1);
|
||||
|
||||
# print the TOC where @contents is used
|
||||
set_from_init_file('INLINE_CONTENTS', 1);
|
||||
@@ -169,7 +69,6 @@ EOT
|
||||
|
||||
my $head2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
@@ -186,23 +85,6 @@ EOT
|
||||
}
|
||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||
|
||||
sub ffmpeg_program_string($)
|
||||
{
|
||||
my $self = shift;
|
||||
if (defined($self->get_conf('PROGRAM'))
|
||||
and $self->get_conf('PROGRAM') ne ''
|
||||
and defined($self->get_conf('PACKAGE_URL'))) {
|
||||
return $self->convert_tree(
|
||||
$self->gdt('This document was generated using @uref{{program_homepage}, @emph{{program}}}.',
|
||||
{ 'program_homepage' => $self->get_conf('PACKAGE_URL'),
|
||||
'program' => $self->get_conf('PROGRAM') }));
|
||||
} else {
|
||||
return $self->convert_tree(
|
||||
$self->gdt('This document was generated automatically.'));
|
||||
}
|
||||
}
|
||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
||||
|
||||
# Customized file ending
|
||||
sub ffmpeg_end_file($)
|
||||
{
|
||||
|
@@ -166,7 +166,7 @@ INF: while(<$inf>) {
|
||||
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/) {
|
||||
$skipping = pop @skstack;
|
||||
next;
|
||||
} elsif ($ended =~ /^(?:example|smallexample|verbatim|display)$/) {
|
||||
} elsif ($ended =~ /^(?:example|smallexample|display)$/) {
|
||||
$shift = "";
|
||||
$_ = ""; # need a paragraph break
|
||||
} elsif ($ended =~ /^(?:itemize|enumerate|(?:multi|[fv])?table)$/) {
|
||||
@@ -290,7 +290,7 @@ INF: while(<$inf>) {
|
||||
$_ = "\n=over 4\n";
|
||||
};
|
||||
|
||||
/^\@((?:small)?example|verbatim|display)/ and do {
|
||||
/^\@((?:small)?example|display)/ and do {
|
||||
push @endwstack, $endw;
|
||||
$endw = $1;
|
||||
$shift = "\t";
|
||||
@@ -384,7 +384,7 @@ sub postprocess
|
||||
# @* is also impossible in .pod; we discard it and any newline that
|
||||
# follows it. Similarly, our macro @gol must be discarded.
|
||||
|
||||
s/\@anchor\{(?:[^\}]*)\}//g;
|
||||
s/\@anchor{(?:[^\}]*)\}//g;
|
||||
s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
|
||||
s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
|
||||
s/;\s+\@pxref\{(?:[^\}]*)\}//g;
|
||||
|
@@ -12,17 +12,17 @@ explicitly specified. The following rules are applied:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@samp{'} and @samp{\} are special characters (respectively used for
|
||||
@code{'} and @code{\} are special characters (respectively used for
|
||||
quoting and escaping). In addition to them, there might be other
|
||||
special characters depending on the specific syntax where the escaping
|
||||
and quoting are employed.
|
||||
|
||||
@item
|
||||
A special character is escaped by prefixing it with a @samp{\}.
|
||||
A special character is escaped by prefixing it with a '\'.
|
||||
|
||||
@item
|
||||
All characters enclosed between @samp{''} are included literally in the
|
||||
parsed string. The quote character @samp{'} itself cannot be quoted,
|
||||
All characters enclosed between '' are included literally in the
|
||||
parsed string. The quote character @code{'} itself cannot be quoted,
|
||||
so you may need to close the quote and escape it.
|
||||
|
||||
@item
|
||||
@@ -71,7 +71,7 @@ Escaping and quoting can be mixed together:
|
||||
@end example
|
||||
|
||||
@item
|
||||
To include a literal @samp{\} you can use either escaping or quoting:
|
||||
To include a literal @code{\} you can use either escaping or quoting:
|
||||
@example
|
||||
'c:\foo' can be written as c:\\foo
|
||||
@end example
|
||||
@@ -238,14 +238,6 @@ The following abbreviations are recognized:
|
||||
480x320
|
||||
@item qhd
|
||||
960x540
|
||||
@item 2kdci
|
||||
2048x1080
|
||||
@item 4kdci
|
||||
4096x2160
|
||||
@item uhd2160
|
||||
3840x2160
|
||||
@item uhd4320
|
||||
7680x4320
|
||||
@end table
|
||||
|
||||
@anchor{video rate syntax}
|
||||
|
31
ffmpeg.h
31
ffmpeg.h
@@ -63,7 +63,6 @@ enum HWAccelID {
|
||||
HWACCEL_VDPAU,
|
||||
HWACCEL_DXVA2,
|
||||
HWACCEL_VDA,
|
||||
HWACCEL_VIDEOTOOLBOX,
|
||||
};
|
||||
|
||||
typedef struct HWAccel {
|
||||
@@ -93,8 +92,6 @@ typedef struct OptionsContext {
|
||||
|
||||
/* input/output options */
|
||||
int64_t start_time;
|
||||
int64_t start_time_eof;
|
||||
int seek_timestamp;
|
||||
const char *format;
|
||||
|
||||
SpecifierOpt *codec_names;
|
||||
@@ -114,7 +111,6 @@ typedef struct OptionsContext {
|
||||
int64_t input_ts_offset;
|
||||
int rate_emu;
|
||||
int accurate_seek;
|
||||
int thread_queue_size;
|
||||
|
||||
SpecifierOpt *ts_scale;
|
||||
int nb_ts_scale;
|
||||
@@ -124,8 +120,6 @@ typedef struct OptionsContext {
|
||||
int nb_hwaccels;
|
||||
SpecifierOpt *hwaccel_devices;
|
||||
int nb_hwaccel_devices;
|
||||
SpecifierOpt *autorotate;
|
||||
int nb_autorotate;
|
||||
|
||||
/* output options */
|
||||
StreamMap *stream_maps;
|
||||
@@ -212,8 +206,6 @@ typedef struct OptionsContext {
|
||||
int nb_apad;
|
||||
SpecifierOpt *discard;
|
||||
int nb_discard;
|
||||
SpecifierOpt *disposition;
|
||||
int nb_disposition;
|
||||
} OptionsContext;
|
||||
|
||||
typedef struct InputFilter {
|
||||
@@ -231,7 +223,6 @@ typedef struct OutputFilter {
|
||||
|
||||
/* temporary storage until stream maps are processed */
|
||||
AVFilterInOut *out_tmp;
|
||||
enum AVMediaType type;
|
||||
} OutputFilter;
|
||||
|
||||
typedef struct FilterGraph {
|
||||
@@ -281,7 +272,6 @@ typedef struct InputStream {
|
||||
int top_field_first;
|
||||
int guess_layout_max;
|
||||
|
||||
int autorotate;
|
||||
int resample_height;
|
||||
int resample_width;
|
||||
int resample_pix_fmt;
|
||||
@@ -346,7 +336,6 @@ typedef struct InputFile {
|
||||
int64_t ts_offset;
|
||||
int64_t last_ts;
|
||||
int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
|
||||
int seek_timestamp;
|
||||
int64_t recording_time;
|
||||
int nb_streams; /* number of stream that ffmpeg is aware of; may be different
|
||||
from ctx.nb_streams if new streams appear during av_read_frame() */
|
||||
@@ -359,7 +348,6 @@ typedef struct InputFile {
|
||||
pthread_t thread; /* thread reading from this file */
|
||||
int non_blocking; /* reading packets from the thread should not block */
|
||||
int joined; /* the thread has been joined */
|
||||
int thread_queue_size; /* maximum number of queued packets */
|
||||
#endif
|
||||
} InputFile;
|
||||
|
||||
@@ -402,13 +390,11 @@ typedef struct OutputStream {
|
||||
AVFrame *filtered_frame;
|
||||
AVFrame *last_frame;
|
||||
int last_droped;
|
||||
int last_nb0_frames[3];
|
||||
|
||||
/* video only */
|
||||
AVRational frame_rate;
|
||||
int force_fps;
|
||||
int top_field_first;
|
||||
int rotate_overridden;
|
||||
|
||||
AVRational frame_aspect_ratio;
|
||||
|
||||
@@ -432,8 +418,8 @@ typedef struct OutputStream {
|
||||
char *filters; ///< filtergraph associated to the -filter option
|
||||
char *filters_script; ///< filtergraph script associated to the -filter_script option
|
||||
|
||||
int64_t sws_flags;
|
||||
AVDictionary *encoder_opts;
|
||||
AVDictionary *sws_dict;
|
||||
AVDictionary *swr_opts;
|
||||
AVDictionary *resample_opts;
|
||||
AVDictionary *bsf_args;
|
||||
@@ -444,7 +430,6 @@ typedef struct OutputStream {
|
||||
const char *attachment_filename;
|
||||
int copy_initial_nonkeyframes;
|
||||
int copy_prior_start;
|
||||
char *disposition;
|
||||
|
||||
int keep_pix_fmt;
|
||||
|
||||
@@ -458,15 +443,6 @@ typedef struct OutputStream {
|
||||
// number of frames/samples sent to the encoder
|
||||
uint64_t frames_encoded;
|
||||
uint64_t samples_encoded;
|
||||
|
||||
/* packet quality factor */
|
||||
int quality;
|
||||
|
||||
/* packet picture type */
|
||||
int pict_type;
|
||||
|
||||
/* frame encode sum of squared error values */
|
||||
int64_t error[4];
|
||||
} OutputStream;
|
||||
|
||||
typedef struct OutputFile {
|
||||
@@ -494,7 +470,6 @@ extern FilterGraph **filtergraphs;
|
||||
extern int nb_filtergraphs;
|
||||
|
||||
extern char *vstats_filename;
|
||||
extern char *sdp_filename;
|
||||
|
||||
extern float audio_drift_threshold;
|
||||
extern float dts_delta_threshold;
|
||||
@@ -503,7 +478,6 @@ extern float dts_error_threshold;
|
||||
extern int audio_volume;
|
||||
extern int audio_sync_method;
|
||||
extern int video_sync_method;
|
||||
extern float frame_drop_threshold;
|
||||
extern int do_benchmark;
|
||||
extern int do_benchmark_all;
|
||||
extern int do_deinterlace;
|
||||
@@ -521,7 +495,6 @@ extern int frame_bits_per_raw_sample;
|
||||
extern AVIOContext *progress_avio;
|
||||
extern float max_error_rate;
|
||||
extern int vdpau_api_ver;
|
||||
extern char *videotoolbox_pixfmt;
|
||||
|
||||
extern const AVIOInterruptCB int_cb;
|
||||
|
||||
@@ -549,13 +522,11 @@ int configure_filtergraph(FilterGraph *fg);
|
||||
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
|
||||
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
|
||||
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
|
||||
int init_complex_filtergraph(FilterGraph *fg);
|
||||
|
||||
int ffmpeg_parse_options(int argc, char **argv);
|
||||
|
||||
int vdpau_init(AVCodecContext *s);
|
||||
int dxva2_init(AVCodecContext *s);
|
||||
int vda_init(AVCodecContext *s);
|
||||
int videotoolbox_init(AVCodecContext *s);
|
||||
|
||||
#endif /* FFMPEG_H */
|
||||
|
@@ -52,7 +52,6 @@ DEFINE_GUID(DXVA2_ModeH264_F, 0x1b81be69, 0xa0c7,0x11d3,0xb9,0x84,0x00,0
|
||||
DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0xD2,0x5C,0x15,0xB3,0xD6);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeHEVC_VLD_Main, 0x5b11d51b, 0x2f4c,0x4452,0xbc,0xc3,0x09,0xf2,0xa1,0x16,0x0c,0xc0);
|
||||
DEFINE_GUID(DXVA2_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(GUID_NULL, 0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
|
||||
|
||||
@@ -81,9 +80,6 @@ static const dxva2_mode dxva2_modes[] = {
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
|
||||
|
||||
/* HEVC/H.265 */
|
||||
{ &DXVA2_ModeHEVC_VLD_Main, AV_CODEC_ID_HEVC },
|
||||
|
||||
{ NULL, 0 },
|
||||
};
|
||||
|
||||
@@ -364,7 +360,7 @@ static int dxva2_alloc(AVCodecContext *s)
|
||||
d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
|
||||
d3dpp.Flags = D3DPRESENTFLAG_VIDEO;
|
||||
|
||||
hr = IDirect3D9_CreateDevice(ctx->d3d9, adapter, D3DDEVTYPE_HAL, GetDesktopWindow(),
|
||||
hr = IDirect3D9_CreateDevice(ctx->d3d9, adapter, D3DDEVTYPE_HAL, GetShellWindow(),
|
||||
D3DCREATE_SOFTWARE_VERTEXPROCESSING | D3DCREATE_MULTITHREADED | D3DCREATE_FPU_PRESERVE,
|
||||
&d3dpp, &ctx->d3d9device);
|
||||
if (FAILED(hr)) {
|
||||
@@ -530,10 +526,6 @@ static int dxva2_create_decoder(AVCodecContext *s)
|
||||
but it causes issues for H.264 on certain AMD GPUs..... */
|
||||
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
surface_alignment = 32;
|
||||
/* the HEVC DXVA2 spec asks for 128 pixel aligned surfaces to ensure
|
||||
all coding features have enough room to work with */
|
||||
else if (s->codec_id == AV_CODEC_ID_HEVC)
|
||||
surface_alignment = 128;
|
||||
else
|
||||
surface_alignment = 16;
|
||||
|
||||
@@ -541,7 +533,7 @@ static int dxva2_create_decoder(AVCodecContext *s)
|
||||
ctx->num_surfaces = 4;
|
||||
|
||||
/* add surfaces based on number of possible refs */
|
||||
if (s->codec_id == AV_CODEC_ID_H264 || s->codec_id == AV_CODEC_ID_HEVC)
|
||||
if (s->codec_id == AV_CODEC_ID_H264)
|
||||
ctx->num_surfaces += 16;
|
||||
else
|
||||
ctx->num_surfaces += 2;
|
||||
|
173
ffmpeg_filter.c
173
ffmpeg_filter.c
@@ -31,7 +31,6 @@
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/display.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
@@ -85,7 +84,7 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
|
||||
break;
|
||||
}
|
||||
if (*p == -1) {
|
||||
if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
|
||||
if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
|
||||
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
|
||||
if(av_get_sample_fmt_name(st->codec->sample_fmt))
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
@@ -289,45 +288,6 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
||||
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
|
||||
}
|
||||
|
||||
int init_complex_filtergraph(FilterGraph *fg)
|
||||
{
|
||||
AVFilterInOut *inputs, *outputs, *cur;
|
||||
AVFilterGraph *graph;
|
||||
int ret = 0;
|
||||
|
||||
/* this graph is only used for determining the kinds of inputs
|
||||
* and outputs we have, and is discarded on exit from this function */
|
||||
graph = avfilter_graph_alloc();
|
||||
if (!graph)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
for (cur = inputs; cur; cur = cur->next)
|
||||
init_input_filter(fg, cur);
|
||||
|
||||
for (cur = outputs; cur;) {
|
||||
GROW_ARRAY(fg->outputs, fg->nb_outputs);
|
||||
fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
|
||||
if (!fg->outputs[fg->nb_outputs - 1])
|
||||
exit_program(1);
|
||||
|
||||
fg->outputs[fg->nb_outputs - 1]->graph = fg;
|
||||
fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
|
||||
fg->outputs[fg->nb_outputs - 1]->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
|
||||
cur->pad_idx);
|
||||
cur = cur->next;
|
||||
fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
|
||||
}
|
||||
|
||||
fail:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_graph_free(&graph);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int insert_trim(int64_t start_time, int64_t duration,
|
||||
AVFilterContext **last_filter, int *pad_idx,
|
||||
const char *filter_name)
|
||||
@@ -379,28 +339,6 @@ static int insert_trim(int64_t start_time, int64_t duration,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
|
||||
const char *filter_name, const char *args)
|
||||
{
|
||||
AVFilterGraph *graph = (*last_filter)->graph;
|
||||
AVFilterContext *ctx;
|
||||
int ret;
|
||||
|
||||
ret = avfilter_graph_create_filter(&ctx,
|
||||
avfilter_get_by_name(filter_name),
|
||||
filter_name, args, NULL, graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*last_filter = ctx;
|
||||
*pad_idx = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||
{
|
||||
char *pix_fmts;
|
||||
@@ -423,17 +361,11 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
|
||||
if (codec->width || codec->height) {
|
||||
char args[255];
|
||||
AVFilterContext *filter;
|
||||
AVDictionaryEntry *e = NULL;
|
||||
|
||||
snprintf(args, sizeof(args), "%d:%d",
|
||||
snprintf(args, sizeof(args), "%d:%d:0x%X",
|
||||
codec->width,
|
||||
codec->height);
|
||||
|
||||
while ((e = av_dict_get(ost->sws_dict, "", e,
|
||||
AV_DICT_IGNORE_SUFFIX))) {
|
||||
av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
|
||||
}
|
||||
|
||||
codec->height,
|
||||
(unsigned)ost->sws_flags);
|
||||
snprintf(name, sizeof(name), "scaler for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
|
||||
@@ -544,7 +476,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
|
||||
av_get_default_channel_layout(ost->audio_channels_mapped));
|
||||
for (i = 0; i < ost->audio_channels_mapped; i++)
|
||||
if (ost->audio_channels_map[i] != -1)
|
||||
av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
|
||||
av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
|
||||
|
||||
AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
|
||||
av_bprint_finalize(&pan_buf, NULL);
|
||||
@@ -647,11 +579,6 @@ int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOu
|
||||
av_freep(&ofilter->name);
|
||||
DESCRIBE_FILTER_LINK(ofilter, out, 0);
|
||||
|
||||
if (!ofilter->ost) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Filter %s has a unconnected output\n", ofilter->name);
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
|
||||
case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
|
||||
@@ -682,8 +609,8 @@ static int sub2video_prepare(InputStream *ist)
|
||||
}
|
||||
av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
|
||||
}
|
||||
ist->sub2video.w = ist->resample_width = w;
|
||||
ist->sub2video.h = ist->resample_height = h;
|
||||
ist->sub2video.w = ist->dec_ctx->width = ist->resample_width = w;
|
||||
ist->sub2video.h = ist->dec_ctx->height = ist->resample_height = h;
|
||||
|
||||
/* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
|
||||
palettes for all rectangles are identical or compatible */
|
||||
@@ -738,7 +665,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
ist->resample_height,
|
||||
ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt,
|
||||
tb.num, tb.den, sar.num, sar.den,
|
||||
SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
|
||||
SWS_BILINEAR + ((ist->dec_ctx->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
|
||||
if (fr.num && fr.den)
|
||||
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
|
||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||
@@ -749,27 +676,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
return ret;
|
||||
last_filter = ifilter->filter;
|
||||
|
||||
if (ist->autorotate) {
|
||||
double theta = get_rotation(ist->st);
|
||||
|
||||
if (fabs(theta - 90) < 1.0) {
|
||||
ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
|
||||
} else if (fabs(theta - 180) < 1.0) {
|
||||
ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
|
||||
} else if (fabs(theta - 270) < 1.0) {
|
||||
ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
|
||||
} else if (fabs(theta) > 1.0) {
|
||||
char rotate_buf[64];
|
||||
snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
|
||||
ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ist->framerate.num) {
|
||||
AVFilterContext *setpts;
|
||||
|
||||
@@ -954,7 +860,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
int configure_filtergraph(FilterGraph *fg)
|
||||
{
|
||||
AVFilterInOut *inputs, *outputs, *cur;
|
||||
int ret, i, simple = !fg->graph_desc;
|
||||
int ret, i, init = !fg->graph, simple = !fg->graph_desc;
|
||||
const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
|
||||
fg->graph_desc;
|
||||
|
||||
@@ -967,13 +873,7 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
char args[512];
|
||||
AVDictionaryEntry *e = NULL;
|
||||
|
||||
args[0] = 0;
|
||||
while ((e = av_dict_get(ost->sws_dict, "", e,
|
||||
AV_DICT_IGNORE_SUFFIX))) {
|
||||
av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
|
||||
}
|
||||
if (strlen(args))
|
||||
args[strlen(args)-1] = 0;
|
||||
snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
|
||||
fg->graph->scale_sws_opts = av_strdup(args);
|
||||
|
||||
args[0] = 0;
|
||||
@@ -1003,30 +903,14 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
return ret;
|
||||
|
||||
if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
|
||||
const char *num_inputs;
|
||||
const char *num_outputs;
|
||||
if (!outputs) {
|
||||
num_outputs = "0";
|
||||
} else if (outputs->next) {
|
||||
num_outputs = ">1";
|
||||
} else {
|
||||
num_outputs = "1";
|
||||
}
|
||||
if (!inputs) {
|
||||
num_inputs = "0";
|
||||
} else if (inputs->next) {
|
||||
num_inputs = ">1";
|
||||
} else {
|
||||
num_inputs = "1";
|
||||
}
|
||||
av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
|
||||
"to have exactly 1 input and 1 output."
|
||||
" However, it had %s input(s) and %s output(s)."
|
||||
" Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
|
||||
graph_desc, num_inputs, num_outputs);
|
||||
av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' does not have "
|
||||
"exactly one input and output.\n", graph_desc);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for (cur = inputs; !simple && init && cur; cur = cur->next)
|
||||
init_input_filter(fg, cur);
|
||||
|
||||
for (cur = inputs, i = 0; cur; cur = cur->next, i++)
|
||||
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
|
||||
avfilter_inout_free(&inputs);
|
||||
@@ -1035,12 +919,27 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
}
|
||||
avfilter_inout_free(&inputs);
|
||||
|
||||
for (cur = outputs, i = 0; cur; cur = cur->next, i++)
|
||||
configure_output_filter(fg, fg->outputs[i], cur);
|
||||
avfilter_inout_free(&outputs);
|
||||
if (!init || simple) {
|
||||
/* we already know the mappings between lavfi outputs and output streams,
|
||||
* so we can finish the setup */
|
||||
for (cur = outputs, i = 0; cur; cur = cur->next, i++)
|
||||
configure_output_filter(fg, fg->outputs[i], cur);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
|
||||
return ret;
|
||||
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
/* wait until output mappings are processed */
|
||||
for (cur = outputs; cur;) {
|
||||
GROW_ARRAY(fg->outputs, fg->nb_outputs);
|
||||
if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
|
||||
exit_program(1);
|
||||
fg->outputs[fg->nb_outputs - 1]->graph = fg;
|
||||
fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
|
||||
cur = cur->next;
|
||||
fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
fg->reconfiguration = 1;
|
||||
|
||||
@@ -1048,7 +947,7 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
OutputStream *ost = fg->outputs[i]->ost;
|
||||
if (ost &&
|
||||
ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
||||
!(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
|
||||
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
|
||||
av_buffersink_set_frame_size(ost->filter->filter,
|
||||
ost->enc_ctx->frame_size);
|
||||
}
|
||||
|
361
ffmpeg_opt.c
361
ffmpeg_opt.c
@@ -40,9 +40,6 @@
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "libavutil/time_internal.h"
|
||||
|
||||
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
|
||||
|
||||
#define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
|
||||
{\
|
||||
@@ -74,16 +71,12 @@ const HWAccel hwaccels[] = {
|
||||
{ "dxva2", dxva2_init, HWACCEL_DXVA2, AV_PIX_FMT_DXVA2_VLD },
|
||||
#endif
|
||||
#if CONFIG_VDA
|
||||
{ "vda", videotoolbox_init, HWACCEL_VDA, AV_PIX_FMT_VDA },
|
||||
#endif
|
||||
#if CONFIG_VIDEOTOOLBOX
|
||||
{ "videotoolbox", videotoolbox_init, HWACCEL_VIDEOTOOLBOX, AV_PIX_FMT_VIDEOTOOLBOX },
|
||||
{ "vda", vda_init, HWACCEL_VDA, AV_PIX_FMT_VDA },
|
||||
#endif
|
||||
{ 0 },
|
||||
};
|
||||
|
||||
char *vstats_filename;
|
||||
char *sdp_filename;
|
||||
|
||||
float audio_drift_threshold = 0.1;
|
||||
float dts_delta_threshold = 10;
|
||||
@@ -92,7 +85,6 @@ float dts_error_threshold = 3600*30;
|
||||
int audio_volume = 256;
|
||||
int audio_sync_method = 0;
|
||||
int video_sync_method = VSYNC_AUTO;
|
||||
float frame_drop_threshold = 0;
|
||||
int do_deinterlace = 0;
|
||||
int do_benchmark = 0;
|
||||
int do_benchmark_all = 0;
|
||||
@@ -116,9 +108,6 @@ static int no_file_overwrite = 0;
|
||||
static int do_psnr = 0;
|
||||
static int input_sync;
|
||||
static int override_ffserver = 0;
|
||||
static int input_stream_potentially_available = 0;
|
||||
static int ignore_unknown_streams = 0;
|
||||
static int copy_unknown_streams = 0;
|
||||
|
||||
static void uninit_options(OptionsContext *o)
|
||||
{
|
||||
@@ -159,25 +148,12 @@ static void init_options(OptionsContext *o)
|
||||
o->stop_time = INT64_MAX;
|
||||
o->mux_max_delay = 0.7;
|
||||
o->start_time = AV_NOPTS_VALUE;
|
||||
o->start_time_eof = AV_NOPTS_VALUE;
|
||||
o->recording_time = INT64_MAX;
|
||||
o->limit_filesize = UINT64_MAX;
|
||||
o->chapters_input_file = INT_MAX;
|
||||
o->accurate_seek = 1;
|
||||
}
|
||||
|
||||
static int show_hwaccels(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
int i;
|
||||
|
||||
printf("Hardware acceleration methods:\n");
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(hwaccels) - 1; i++) {
|
||||
printf("%s\n", hwaccels[i].name);
|
||||
}
|
||||
printf("\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* return a copy of the input with the stream specifiers removed from the keys */
|
||||
static AVDictionary *strip_specifiers(AVDictionary *dict)
|
||||
{
|
||||
@@ -249,15 +225,12 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
int sync_file_idx = -1, sync_stream_idx = 0;
|
||||
char *p, *sync;
|
||||
char *map;
|
||||
char *allow_unused;
|
||||
|
||||
if (*arg == '-') {
|
||||
negative = 1;
|
||||
arg++;
|
||||
}
|
||||
map = av_strdup(arg);
|
||||
if (!map)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* parse sync stream first, just pick first matching stream */
|
||||
if (sync = strchr(map, ',')) {
|
||||
@@ -294,8 +267,6 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
exit_program(1);
|
||||
}
|
||||
} else {
|
||||
if (allow_unused = strchr(map, '?'))
|
||||
*allow_unused = 0;
|
||||
file_idx = strtol(map, &p, 0);
|
||||
if (file_idx >= nb_input_files || file_idx < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
|
||||
@@ -333,13 +304,8 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
}
|
||||
|
||||
if (!m) {
|
||||
if (allow_unused) {
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Stream map '%s' matches no streams; ignoring.\n", arg);
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n"
|
||||
"To ignore this, add a trailing '?' to the map.\n", arg);
|
||||
exit_program(1);
|
||||
}
|
||||
av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
av_freep(&map);
|
||||
@@ -413,13 +379,6 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
av_free(sdp_filename);
|
||||
sdp_filename = av_strdup(arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a metadata specifier passed as 'arg' parameter.
|
||||
* @param arg metadata string to parse
|
||||
@@ -550,7 +509,7 @@ static int opt_recording_timestamp(void *optctx, const char *opt, const char *ar
|
||||
char buf[128];
|
||||
int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6;
|
||||
struct tm time = *gmtime((time_t*)&recording_timestamp);
|
||||
if (!strftime(buf, sizeof(buf), "creation_time=%Y-%m-%dT%H:%M:%S%z", &time))
|
||||
if (!strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time))
|
||||
return -1;
|
||||
parse_option(o, "metadata", buf, options);
|
||||
|
||||
@@ -631,9 +590,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
ist->ts_scale = 1.0;
|
||||
MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
|
||||
|
||||
ist->autorotate = 1;
|
||||
MATCH_PER_STREAM_OPT(autorotate, i, ist->autorotate, ic, st);
|
||||
|
||||
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, ic, st);
|
||||
if (codec_tag) {
|
||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||
@@ -674,11 +630,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if(!ist->dec)
|
||||
ist->dec = avcodec_find_decoder(dec->codec_id);
|
||||
#if FF_API_EMU_EDGE
|
||||
if (av_codec_get_lowres(dec)) {
|
||||
dec->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
}
|
||||
#endif
|
||||
|
||||
ist->resample_height = ist->dec_ctx->height;
|
||||
ist->resample_width = ist->dec_ctx->width;
|
||||
@@ -840,7 +794,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
char * video_codec_name = NULL;
|
||||
char * audio_codec_name = NULL;
|
||||
char *subtitle_codec_name = NULL;
|
||||
char * data_codec_name = NULL;
|
||||
int scan_all_pmts_set = 0;
|
||||
|
||||
if (o->format) {
|
||||
@@ -894,7 +847,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, video_codec_name, ic, "v");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, audio_codec_name, ic, "a");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, ic, "s");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, data_codec_name, ic, "d");
|
||||
|
||||
ic->video_codec_id = video_codec_name ?
|
||||
find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0)->id : AV_CODEC_ID_NONE;
|
||||
@@ -902,8 +854,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0)->id : AV_CODEC_ID_NONE;
|
||||
ic->subtitle_codec_id= subtitle_codec_name ?
|
||||
find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0)->id : AV_CODEC_ID_NONE;
|
||||
ic->data_codec_id = data_codec_name ?
|
||||
find_codec_or_die(data_codec_name, AVMEDIA_TYPE_DATA, 0)->id : AV_CODEC_ID_NONE;
|
||||
|
||||
if (video_codec_name)
|
||||
av_format_set_video_codec (ic, find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0));
|
||||
@@ -911,8 +861,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
av_format_set_audio_codec (ic, find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0));
|
||||
if (subtitle_codec_name)
|
||||
av_format_set_subtitle_codec(ic, find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0));
|
||||
if (data_codec_name)
|
||||
av_format_set_data_codec(ic, find_codec_or_die(data_codec_name, AVMEDIA_TYPE_DATA, 0));
|
||||
|
||||
ic->flags |= AVFMT_FLAG_NONBLOCK;
|
||||
ic->interrupt_callback = int_cb;
|
||||
@@ -951,33 +899,14 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
}
|
||||
}
|
||||
|
||||
if (o->start_time_eof != AV_NOPTS_VALUE) {
|
||||
if (ic->duration>0) {
|
||||
o->start_time = o->start_time_eof + ic->duration;
|
||||
} else
|
||||
av_log(NULL, AV_LOG_WARNING, "Cannot use -sseof, duration of %s not known\n", filename);
|
||||
}
|
||||
timestamp = (o->start_time == AV_NOPTS_VALUE) ? 0 : o->start_time;
|
||||
/* add the stream start time */
|
||||
if (!o->seek_timestamp && ic->start_time != AV_NOPTS_VALUE)
|
||||
if (ic->start_time != AV_NOPTS_VALUE)
|
||||
timestamp += ic->start_time;
|
||||
|
||||
/* if seeking requested, we execute it */
|
||||
if (o->start_time != AV_NOPTS_VALUE) {
|
||||
int64_t seek_timestamp = timestamp;
|
||||
|
||||
if (!(ic->iformat->flags & AVFMT_SEEK_TO_PTS)) {
|
||||
int dts_heuristic = 0;
|
||||
for (i=0; i<ic->nb_streams; i++) {
|
||||
AVCodecContext *avctx = ic->streams[i]->codec;
|
||||
if (avctx->has_b_frames)
|
||||
dts_heuristic = 1;
|
||||
}
|
||||
if (dts_heuristic) {
|
||||
seek_timestamp -= 3*AV_TIME_BASE / 23;
|
||||
}
|
||||
}
|
||||
ret = avformat_seek_file(ic, -1, INT64_MIN, seek_timestamp, seek_timestamp, 0);
|
||||
ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, timestamp, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
|
||||
filename, (double)timestamp / AV_TIME_BASE);
|
||||
@@ -1005,9 +934,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
f->nb_streams = ic->nb_streams;
|
||||
f->rate_emu = o->rate_emu;
|
||||
f->accurate_seek = o->accurate_seek;
|
||||
#if HAVE_PTHREADS
|
||||
f->thread_queue_size = o->thread_queue_size > 0 ? o->thread_queue_size : 8;
|
||||
#endif
|
||||
|
||||
/* check if all codec options have been used */
|
||||
unused_opts = strip_specifiers(o->g->codec_opts);
|
||||
@@ -1062,8 +988,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
input_stream_potentially_available = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1187,7 +1111,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
||||
av_free(buf);
|
||||
} while (!s->eof_reached);
|
||||
avio_closep(&s);
|
||||
avio_close(s);
|
||||
}
|
||||
if (ret) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
@@ -1238,23 +1162,19 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||
if (*next)
|
||||
tag = AV_RL32(codec_tag);
|
||||
ost->st->codec->codec_tag =
|
||||
ost->enc_ctx->codec_tag = tag;
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
|
||||
if (qscale >= 0) {
|
||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE;
|
||||
ost->enc_ctx->flags |= CODEC_FLAG_QSCALE;
|
||||
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
|
||||
ost->disposition = av_strdup(ost->disposition);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
av_dict_copy(&ost->sws_dict, o->g->sws_dict, 0);
|
||||
av_opt_get_int(o->g->sws_opts, "sws_flags", 0, &ost->sws_flags);
|
||||
|
||||
av_dict_copy(&ost->swr_opts, o->g->swr_opts, 0);
|
||||
if (ost->enc && av_get_exact_bits_per_sample(ost->enc->id) == 24)
|
||||
@@ -1451,13 +1371,10 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
|
||||
exit_program(1);
|
||||
}
|
||||
/* FIXME realloc failure */
|
||||
video_enc->rc_override =
|
||||
av_realloc_array(video_enc->rc_override,
|
||||
i + 1, sizeof(RcOverride));
|
||||
if (!video_enc->rc_override) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Could not (re)allocate memory for rc_override.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
av_realloc(video_enc->rc_override,
|
||||
sizeof(RcOverride) * (i + 1));
|
||||
video_enc->rc_override[i].start_frame = start;
|
||||
video_enc->rc_override[i].end_frame = end;
|
||||
if (q > 0) {
|
||||
@@ -1474,17 +1391,17 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
video_enc->rc_override_count = i;
|
||||
|
||||
if (do_psnr)
|
||||
video_enc->flags|= AV_CODEC_FLAG_PSNR;
|
||||
video_enc->flags|= CODEC_FLAG_PSNR;
|
||||
|
||||
/* two pass mode */
|
||||
MATCH_PER_STREAM_OPT(pass, i, do_pass, oc, st);
|
||||
if (do_pass) {
|
||||
if (do_pass & 1) {
|
||||
video_enc->flags |= AV_CODEC_FLAG_PASS1;
|
||||
video_enc->flags |= CODEC_FLAG_PASS1;
|
||||
av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND);
|
||||
}
|
||||
if (do_pass & 2) {
|
||||
video_enc->flags |= AV_CODEC_FLAG_PASS2;
|
||||
video_enc->flags |= CODEC_FLAG_PASS2;
|
||||
av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND);
|
||||
}
|
||||
}
|
||||
@@ -1494,40 +1411,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
!(ost->logfile_prefix = av_strdup(ost->logfile_prefix)))
|
||||
exit_program(1);
|
||||
|
||||
if (do_pass) {
|
||||
char logfilename[1024];
|
||||
FILE *f;
|
||||
|
||||
snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
|
||||
ost->logfile_prefix ? ost->logfile_prefix :
|
||||
DEFAULT_PASS_LOGFILENAME_PREFIX,
|
||||
i);
|
||||
if (!strcmp(ost->enc->name, "libx264")) {
|
||||
av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
|
||||
} else {
|
||||
if (video_enc->flags & AV_CODEC_FLAG_PASS2) {
|
||||
char *logbuffer = read_file(logfilename);
|
||||
|
||||
if (!logbuffer) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
|
||||
logfilename);
|
||||
exit_program(1);
|
||||
}
|
||||
video_enc->stats_in = logbuffer;
|
||||
}
|
||||
if (video_enc->flags & AV_CODEC_FLAG_PASS1) {
|
||||
f = av_fopen_utf8(logfilename, "wb");
|
||||
if (!f) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"Cannot write log file '%s' for pass-1 encoding: %s\n",
|
||||
logfilename, strerror(errno));
|
||||
exit_program(1);
|
||||
}
|
||||
ost->logfile = f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(forced_key_frames, str, ost->forced_keyframes, oc, st);
|
||||
if (ost->forced_keyframes)
|
||||
ost->forced_keyframes = av_strdup(ost->forced_keyframes);
|
||||
@@ -1637,19 +1520,6 @@ static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc, int
|
||||
return ost;
|
||||
}
|
||||
|
||||
static OutputStream *new_unknown_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
|
||||
{
|
||||
OutputStream *ost;
|
||||
|
||||
ost = new_output_stream(o, oc, AVMEDIA_TYPE_UNKNOWN, source_index);
|
||||
if (!ost->stream_copy) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Unknown stream encoding not supported yet (only streamcopy)\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
return ost;
|
||||
}
|
||||
|
||||
static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
|
||||
{
|
||||
OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT, source_index);
|
||||
@@ -1806,7 +1676,8 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
|
||||
{
|
||||
OutputStream *ost;
|
||||
|
||||
switch (ofilter->type) {
|
||||
switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads,
|
||||
ofilter->out_tmp->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc, -1); break;
|
||||
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc, -1); break;
|
||||
default:
|
||||
@@ -1839,19 +1710,11 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
avfilter_inout_free(&ofilter->out_tmp);
|
||||
}
|
||||
|
||||
static int init_complex_filters(void)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < nb_filtergraphs; i++) {
|
||||
ret = init_complex_filtergraph(filtergraphs[i]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
return 0;
|
||||
avfilter_inout_free(&ofilter->out_tmp);
|
||||
}
|
||||
|
||||
static int configure_complex_filters(void)
|
||||
@@ -1876,6 +1739,10 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
AVDictionary *unused_opts = NULL;
|
||||
AVDictionaryEntry *e = NULL;
|
||||
|
||||
if (configure_complex_filters() < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
if (o->stop_time != INT64_MAX && o->recording_time != INT64_MAX) {
|
||||
o->stop_time = INT64_MAX;
|
||||
@@ -1930,7 +1797,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
if (!ofilter->out_tmp || ofilter->out_tmp->name)
|
||||
continue;
|
||||
|
||||
switch (ofilter->type) {
|
||||
switch (avfilter_pad_get_type(ofilter->out_tmp->filter_ctx->output_pads,
|
||||
ofilter->out_tmp->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
|
||||
case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
|
||||
case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
|
||||
@@ -1989,7 +1857,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
int new_area;
|
||||
ist = input_streams[i];
|
||||
new_area = ist->st->codec->width * ist->st->codec->height + 100000000*!!ist->st->codec_info_nb_frames;
|
||||
new_area = ist->st->codec->width * ist->st->codec->height;
|
||||
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
||||
new_area = 1;
|
||||
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||
@@ -2006,14 +1874,12 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
|
||||
/* audio: most channels */
|
||||
if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
|
||||
int best_score = 0, idx = -1;
|
||||
int channels = 0, idx = -1;
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
int score;
|
||||
ist = input_streams[i];
|
||||
score = ist->st->codec->channels + 100000000*!!ist->st->codec_info_nb_frames;
|
||||
if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
|
||||
score > best_score) {
|
||||
best_score = score;
|
||||
ist->st->codec->channels > channels) {
|
||||
channels = ist->st->codec->channels;
|
||||
idx = i;
|
||||
}
|
||||
}
|
||||
@@ -2049,15 +1915,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Data only if codec id match */
|
||||
if (!o->data_disable ) {
|
||||
enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_DATA);
|
||||
for (i = 0; codec_id != AV_CODEC_ID_NONE && i < nb_input_streams; i++) {
|
||||
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_DATA
|
||||
&& input_streams[i]->st->codec->codec_id == codec_id )
|
||||
new_data_stream(o, oc, i);
|
||||
}
|
||||
}
|
||||
/* do something with data? */
|
||||
} else {
|
||||
for (i = 0; i < o->nb_stream_maps; i++) {
|
||||
StreamMap *map = &o->stream_maps[i];
|
||||
@@ -2100,33 +1958,17 @@ loop_end:
|
||||
if(o-> data_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_DATA)
|
||||
continue;
|
||||
|
||||
ost = NULL;
|
||||
switch (ist->st->codec->codec_type) {
|
||||
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream (o, oc, src_idx); break;
|
||||
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream (o, oc, src_idx); break;
|
||||
case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream (o, oc, src_idx); break;
|
||||
case AVMEDIA_TYPE_DATA: ost = new_data_stream (o, oc, src_idx); break;
|
||||
case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc, src_idx); break;
|
||||
case AVMEDIA_TYPE_UNKNOWN:
|
||||
if (copy_unknown_streams) {
|
||||
ost = new_unknown_stream (o, oc, src_idx);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
av_log(NULL, ignore_unknown_streams ? AV_LOG_WARNING : AV_LOG_FATAL,
|
||||
"Cannot map stream #%d:%d - unsupported type.\n",
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
|
||||
map->file_index, map->stream_index);
|
||||
if (!ignore_unknown_streams) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"If you want unsupported types ignored instead "
|
||||
"of failing, please use the -ignore_unknown option\n"
|
||||
"If you want them copied, please use -copy_unknown\n");
|
||||
exit_program(1);
|
||||
}
|
||||
exit_program(1);
|
||||
}
|
||||
if (ost)
|
||||
ost->sync_ist = input_streams[ input_files[map->sync_file_index]->ist_index
|
||||
+ map->sync_stream_index];
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2156,15 +1998,15 @@ loop_end:
|
||||
avio_read(pb, attachment, len);
|
||||
|
||||
ost = new_attachment_stream(o, oc, -1);
|
||||
ost->stream_copy = 1;
|
||||
ost->stream_copy = 0;
|
||||
ost->attachment_filename = o->attachments[i];
|
||||
ost->finished = 1;
|
||||
ost->st->codec->extradata = attachment;
|
||||
ost->st->codec->extradata_size = len;
|
||||
ost->enc_ctx->extradata = attachment;
|
||||
ost->enc_ctx->extradata_size = len;
|
||||
|
||||
p = strrchr(o->attachments[i], '/');
|
||||
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
|
||||
avio_closep(&pb);
|
||||
avio_close(pb);
|
||||
}
|
||||
|
||||
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
|
||||
@@ -2178,12 +2020,6 @@ loop_end:
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
|
||||
av_dump_format(oc, nb_output_files - 1, oc->filename, 1);
|
||||
av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", nb_output_files - 1);
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
/* check if all codec options have been used */
|
||||
unused_opts = strip_specifiers(o->g->codec_opts);
|
||||
for (i = of->ost_index; i < nb_output_streams; i++) {
|
||||
@@ -2226,17 +2062,6 @@ loop_end:
|
||||
}
|
||||
av_dict_free(&unused_opts);
|
||||
|
||||
/* set the encoding/decoding_needed flags */
|
||||
for (i = of->ost_index; i < nb_output_streams; i++) {
|
||||
OutputStream *ost = output_streams[i];
|
||||
|
||||
ost->encoding_needed = !ost->stream_copy;
|
||||
if (ost->encoding_needed && ost->source_index >= 0) {
|
||||
InputStream *ist = input_streams[ost->source_index];
|
||||
ist->decoding_needed |= DECODING_FOR_OST;
|
||||
}
|
||||
}
|
||||
|
||||
/* check filename in case of an image number is expected */
|
||||
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
|
||||
if (!av_filename_number_test(oc->filename)) {
|
||||
@@ -2245,12 +2070,6 @@ loop_end:
|
||||
}
|
||||
}
|
||||
|
||||
if (!(oc->oformat->flags & AVFMT_NOSTREAMS) && !input_stream_potentially_available) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"No input streams but output needs an input stream\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
if (!(oc->oformat->flags & AVFMT_NOFILE)) {
|
||||
/* test if it already exists to avoid losing precious files */
|
||||
assert_file_overwrite(filename);
|
||||
@@ -2319,11 +2138,8 @@ loop_end:
|
||||
continue;
|
||||
ist = input_streams[output_streams[i]->source_index];
|
||||
av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
|
||||
if (!output_streams[i]->stream_copy) {
|
||||
if (!output_streams[i]->stream_copy)
|
||||
av_dict_set(&output_streams[i]->st->metadata, "encoder", NULL, 0);
|
||||
if (ist->autorotate)
|
||||
av_dict_set(&output_streams[i]->st->metadata, "rotate", NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* process manually set metadata */
|
||||
@@ -2332,7 +2148,6 @@ loop_end:
|
||||
char type, *val;
|
||||
const char *stream_spec;
|
||||
int index = 0, j, ret = 0;
|
||||
char now_time[256];
|
||||
|
||||
val = strchr(o->metadata[i].u.str, '=');
|
||||
if (!val) {
|
||||
@@ -2342,26 +2157,11 @@ loop_end:
|
||||
}
|
||||
*val++ = 0;
|
||||
|
||||
if (!strcmp(o->metadata[i].u.str, "creation_time") &&
|
||||
!strcmp(val, "now")) {
|
||||
time_t now = time(0);
|
||||
struct tm *ptm, tmbuf;
|
||||
ptm = localtime_r(&now, &tmbuf);
|
||||
if (ptm) {
|
||||
if (strftime(now_time, sizeof(now_time), "%Y-%m-%d %H:%M:%S", ptm))
|
||||
val = now_time;
|
||||
}
|
||||
}
|
||||
|
||||
parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
|
||||
if (type == 's') {
|
||||
for (j = 0; j < oc->nb_streams; j++) {
|
||||
ost = output_streams[nb_output_streams - oc->nb_streams + j];
|
||||
if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
|
||||
av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
|
||||
if (!strcmp(o->metadata[i].u.str, "rotate")) {
|
||||
ost->rotate_overridden = 1;
|
||||
}
|
||||
} else if (ret < 0)
|
||||
exit_program(1);
|
||||
}
|
||||
@@ -2448,9 +2248,9 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
opt_default(NULL, "b:v", "1150000");
|
||||
opt_default(NULL, "maxrate:v", "1150000");
|
||||
opt_default(NULL, "minrate:v", "1150000");
|
||||
opt_default(NULL, "bufsize:v", "327680"); // 40*1024*8;
|
||||
opt_default(NULL, "maxrate", "1150000");
|
||||
opt_default(NULL, "minrate", "1150000");
|
||||
opt_default(NULL, "bufsize", "327680"); // 40*1024*8;
|
||||
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
parse_option(o, "ar", "44100", options);
|
||||
@@ -2477,9 +2277,9 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
opt_default(NULL, "b:v", "2040000");
|
||||
opt_default(NULL, "maxrate:v", "2516000");
|
||||
opt_default(NULL, "minrate:v", "0"); // 1145000;
|
||||
opt_default(NULL, "bufsize:v", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "maxrate", "2516000");
|
||||
opt_default(NULL, "minrate", "0"); // 1145000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "scan_offset", "1");
|
||||
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
@@ -2499,9 +2299,9 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
opt_default(NULL, "b:v", "6000000");
|
||||
opt_default(NULL, "maxrate:v", "9000000");
|
||||
opt_default(NULL, "minrate:v", "0"); // 1500000;
|
||||
opt_default(NULL, "bufsize:v", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "maxrate", "9000000");
|
||||
opt_default(NULL, "minrate", "0"); // 1500000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
|
||||
opt_default(NULL, "packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
opt_default(NULL, "muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
@@ -2545,11 +2345,6 @@ static int opt_vstats(void *optctx, const char *opt, const char *arg)
|
||||
time_t today2 = time(NULL);
|
||||
struct tm *today = localtime(&today2);
|
||||
|
||||
if (!today) { // maybe tomorrow
|
||||
av_log(NULL, AV_LOG_FATAL, "Unable to get current time: %s\n", strerror(errno));
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
|
||||
today->tm_sec);
|
||||
return opt_vstats_file(NULL, opt, filename);
|
||||
@@ -2779,9 +2574,6 @@ static int opt_filter_complex(void *optctx, const char *opt, const char *arg)
|
||||
filtergraphs[nb_filtergraphs - 1]->graph_desc = av_strdup(arg);
|
||||
if (!filtergraphs[nb_filtergraphs - 1]->graph_desc)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
input_stream_potentially_available = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2796,9 +2588,6 @@ static int opt_filter_complex_script(void *optctx, const char *opt, const char *
|
||||
return AVERROR(ENOMEM);
|
||||
filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
|
||||
filtergraphs[nb_filtergraphs - 1]->graph_desc = graph_desc;
|
||||
|
||||
input_stream_potentially_available = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2950,13 +2739,6 @@ int ffmpeg_parse_options(int argc, char **argv)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* create the complex filtergraphs */
|
||||
ret = init_complex_filters();
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error initializing complex filters.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* open output files */
|
||||
ret = open_files(&octx.groups[GROUP_OUTFILE], "output", open_output_file);
|
||||
if (ret < 0) {
|
||||
@@ -2964,13 +2746,6 @@ int ffmpeg_parse_options(int argc, char **argv)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* configure the complex filtergraphs */
|
||||
ret = configure_complex_filters();
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error configuring complex filters.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fail:
|
||||
uninit_parse_context(&octx);
|
||||
if (ret < 0) {
|
||||
@@ -3008,10 +2783,6 @@ const OptionDef options[] = {
|
||||
"overwrite output files" },
|
||||
{ "n", OPT_BOOL, { &no_file_overwrite },
|
||||
"never overwrite output files" },
|
||||
{ "ignore_unknown", OPT_BOOL, { &ignore_unknown_streams },
|
||||
"Ignore unknown stream types" },
|
||||
{ "copy_unknown", OPT_BOOL | OPT_EXPERT, { ©_unknown_streams },
|
||||
"Copy unknown stream types" },
|
||||
{ "c", HAS_ARG | OPT_STRING | OPT_SPEC |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
|
||||
"codec name", "codec" },
|
||||
@@ -3045,12 +2816,6 @@ const OptionDef options[] = {
|
||||
{ "ss", HAS_ARG | OPT_TIME | OPT_OFFSET |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
|
||||
"set the start time offset", "time_off" },
|
||||
{ "sseof", HAS_ARG | OPT_TIME | OPT_OFFSET |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time_eof) },
|
||||
"set the start time offset relative to EOF", "time_off" },
|
||||
{ "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
|
||||
OPT_INPUT, { .off = OFFSET(seek_timestamp) },
|
||||
"enable/disable seeking by timestamp with -ss" },
|
||||
{ "accurate_seek", OPT_BOOL | OPT_OFFSET | OPT_EXPERT |
|
||||
OPT_INPUT, { .off = OFFSET(accurate_seek) },
|
||||
"enable/disable accurate seeking with -ss" },
|
||||
@@ -3060,7 +2825,7 @@ const OptionDef options[] = {
|
||||
{ "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
|
||||
"set the input ts scale", "scale" },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE, { .func_arg = opt_recording_timestamp },
|
||||
"set the recording timestamp ('now' to set the current time)", "time" },
|
||||
{ "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
|
||||
"add metadata", "string=string" },
|
||||
@@ -3085,12 +2850,10 @@ const OptionDef options[] = {
|
||||
OPT_INPUT, { .off = OFFSET(rate_emu) },
|
||||
"read input at native frame rate", "" },
|
||||
{ "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
|
||||
"specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
|
||||
"with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
|
||||
"specify target file type (\"vcd\", \"svcd\", \"dvd\","
|
||||
" \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
|
||||
{ "vsync", HAS_ARG | OPT_EXPERT, { opt_vsync },
|
||||
"video sync method", "" },
|
||||
{ "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
|
||||
"frame drop threshold", "" },
|
||||
{ "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
|
||||
"audio sync method", "" },
|
||||
{ "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
|
||||
@@ -3158,12 +2921,6 @@ const OptionDef options[] = {
|
||||
{ "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_INPUT, { .off = OFFSET(discard) },
|
||||
"discard", "" },
|
||||
{ "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_OUTPUT, { .off = OFFSET(disposition) },
|
||||
"disposition", "" },
|
||||
{ "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
|
||||
{ .off = OFFSET(thread_queue_size) },
|
||||
"set the maximum number of queued packets from the demuxer" },
|
||||
|
||||
/* video options */
|
||||
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
||||
@@ -3226,7 +2983,7 @@ const OptionDef options[] = {
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
|
||||
"top=1/bottom=0/auto=-1 field first", "" },
|
||||
{ "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
|
||||
OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_old2new },
|
||||
OPT_OUTPUT, { .func_arg = opt_old2new },
|
||||
"force video tag/fourcc", "fourcc/tag" },
|
||||
{ "qphist", OPT_VIDEO | OPT_BOOL | OPT_EXPERT , { &qp_hist },
|
||||
"show QP histogram" },
|
||||
@@ -3248,18 +3005,10 @@ const OptionDef options[] = {
|
||||
"use HW accelerated decoding", "hwaccel name" },
|
||||
{ "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
||||
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
|
||||
"select a device for HW acceleration", "devicename" },
|
||||
"select a device for HW acceleration" "devicename" },
|
||||
#if HAVE_VDPAU_X11
|
||||
{ "vdpau_api_ver", HAS_ARG | OPT_INT | OPT_EXPERT, { &vdpau_api_ver }, "" },
|
||||
#endif
|
||||
#if CONFIG_VDA || CONFIG_VIDEOTOOLBOX
|
||||
{ "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
|
||||
#endif
|
||||
{ "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
|
||||
"show available HW acceleration methods" },
|
||||
{ "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
|
||||
"automatically insert correct rotate filters" },
|
||||
|
||||
/* audio options */
|
||||
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
|
||||
@@ -3319,8 +3068,6 @@ const OptionDef options[] = {
|
||||
"set the initial demux-decode delay", "seconds" },
|
||||
{ "override_ffserver", OPT_BOOL | OPT_EXPERT | OPT_OUTPUT, { &override_ffserver },
|
||||
"override the options from ffserver", "" },
|
||||
{ "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { opt_sdp_file },
|
||||
"specify a file in which to print sdp information", "file" },
|
||||
|
||||
{ "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
|
||||
"A comma-separated list of bitstream filters", "bitstream_filters" },
|
||||
|
134
ffmpeg_vda.c
Normal file
134
ffmpeg_vda.c
Normal file
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/vda.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
typedef struct VDAContext {
|
||||
AVFrame *tmp_frame;
|
||||
} VDAContext;
|
||||
|
||||
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VDAContext *vda = ist->hwaccel_ctx;
|
||||
CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
|
||||
OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
|
||||
CVReturn err;
|
||||
uint8_t *data[4] = { 0 };
|
||||
int linesize[4] = { 0 };
|
||||
int planes, ret, i;
|
||||
|
||||
av_frame_unref(vda->tmp_frame);
|
||||
|
||||
switch (pixel_format) {
|
||||
case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
|
||||
case kCVPixelFormatType_422YpCbCr8: vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
|
||||
default:
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Unsupported pixel format: %u\n", pixel_format);
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
vda->tmp_frame->width = frame->width;
|
||||
vda->tmp_frame->height = frame->height;
|
||||
ret = av_frame_get_buffer(vda->tmp_frame, 32);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
|
||||
if (err != kCVReturnSuccess) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
if (CVPixelBufferIsPlanar(pixbuf)) {
|
||||
|
||||
planes = CVPixelBufferGetPlaneCount(pixbuf);
|
||||
for (i = 0; i < planes; i++) {
|
||||
data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
|
||||
linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
|
||||
}
|
||||
} else {
|
||||
data[0] = CVPixelBufferGetBaseAddress(pixbuf);
|
||||
linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
|
||||
}
|
||||
|
||||
av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
|
||||
data, linesize, vda->tmp_frame->format,
|
||||
frame->width, frame->height);
|
||||
|
||||
ret = av_frame_copy_props(vda->tmp_frame, frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
av_frame_unref(frame);
|
||||
av_frame_move_ref(frame, vda->tmp_frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vda_uninit(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VDAContext *vda = ist->hwaccel_ctx;
|
||||
|
||||
ist->hwaccel_uninit = NULL;
|
||||
ist->hwaccel_retrieve_data = NULL;
|
||||
|
||||
av_frame_free(&vda->tmp_frame);
|
||||
|
||||
av_vda_default_free(s);
|
||||
av_freep(&ist->hwaccel_ctx);
|
||||
}
|
||||
|
||||
int vda_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
VDAContext *vda;
|
||||
int ret;
|
||||
|
||||
vda = av_mallocz(sizeof(*vda));
|
||||
if (!vda)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ist->hwaccel_ctx = vda;
|
||||
ist->hwaccel_uninit = vda_uninit;
|
||||
ist->hwaccel_retrieve_data = vda_retrieve_data;
|
||||
|
||||
vda->tmp_frame = av_frame_alloc();
|
||||
if (!vda->tmp_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = av_vda_default_init(s);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, loglevel, "Error creating VDA decoder.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
vda_uninit(s);
|
||||
return ret;
|
||||
}
|
@@ -100,14 +100,9 @@ static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
VDPAUContext *ctx = ist->hwaccel_ctx;
|
||||
VdpVideoSurface *surface;
|
||||
VdpStatus err;
|
||||
VdpChromaType chroma;
|
||||
uint32_t width, height;
|
||||
|
||||
av_assert0(frame->format == AV_PIX_FMT_VDPAU);
|
||||
|
||||
if (av_vdpau_get_surface_parameters(s, &chroma, &width, &height))
|
||||
return AVERROR(ENOSYS);
|
||||
|
||||
surface = av_malloc(sizeof(*surface));
|
||||
if (!surface)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -123,8 +118,8 @@ static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
// properly we should keep a pool of surfaces instead of creating
|
||||
// them anew for each frame, but since we don't care about speed
|
||||
// much in this code, we don't bother
|
||||
err = ctx->video_surface_create(ctx->device, chroma, width, height,
|
||||
surface);
|
||||
err = ctx->video_surface_create(ctx->device, VDP_CHROMA_TYPE_420,
|
||||
frame->width, frame->height, surface);
|
||||
if (err != VDP_STATUS_OK) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error allocating a VDPAU video surface: %s\n",
|
||||
ctx->get_error_string(err));
|
||||
@@ -289,8 +284,7 @@ do {
|
||||
|
||||
s->hwaccel_context = vdpau_ctx;
|
||||
} else
|
||||
if (av_vdpau_bind_context(s, ctx->device, ctx->get_proc_address,
|
||||
AV_HWACCEL_FLAG_IGNORE_LEVEL))
|
||||
if (av_vdpau_bind_context(s, ctx->device, ctx->get_proc_address, 0))
|
||||
goto fail;
|
||||
|
||||
ctx->get_information_string(&vendor);
|
||||
|
@@ -1,187 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <CoreServices/CoreServices.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
#if CONFIG_VDA
|
||||
# include "libavcodec/vda.h"
|
||||
#endif
|
||||
#if CONFIG_VIDEOTOOLBOX
|
||||
# include "libavcodec/videotoolbox.h"
|
||||
#endif
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "ffmpeg.h"
|
||||
|
||||
typedef struct VTContext {
|
||||
AVFrame *tmp_frame;
|
||||
} VTContext;
|
||||
|
||||
char *videotoolbox_pixfmt;
|
||||
|
||||
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VTContext *vt = ist->hwaccel_ctx;
|
||||
CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
|
||||
OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
|
||||
CVReturn err;
|
||||
uint8_t *data[4] = { 0 };
|
||||
int linesize[4] = { 0 };
|
||||
int planes, ret, i;
|
||||
char codec_str[32];
|
||||
|
||||
av_frame_unref(vt->tmp_frame);
|
||||
|
||||
switch (pixel_format) {
|
||||
case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
|
||||
case kCVPixelFormatType_422YpCbCr8: vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
|
||||
case kCVPixelFormatType_32BGRA: vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
|
||||
#ifdef kCFCoreFoundationVersionNumber10_7
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
|
||||
#endif
|
||||
default:
|
||||
av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag);
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt);
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
vt->tmp_frame->width = frame->width;
|
||||
vt->tmp_frame->height = frame->height;
|
||||
ret = av_frame_get_buffer(vt->tmp_frame, 32);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
|
||||
if (err != kCVReturnSuccess) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
if (CVPixelBufferIsPlanar(pixbuf)) {
|
||||
|
||||
planes = CVPixelBufferGetPlaneCount(pixbuf);
|
||||
for (i = 0; i < planes; i++) {
|
||||
data[i] = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
|
||||
linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
|
||||
}
|
||||
} else {
|
||||
data[0] = CVPixelBufferGetBaseAddress(pixbuf);
|
||||
linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
|
||||
}
|
||||
|
||||
av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize,
|
||||
(const uint8_t **)data, linesize, vt->tmp_frame->format,
|
||||
frame->width, frame->height);
|
||||
|
||||
ret = av_frame_copy_props(vt->tmp_frame, frame);
|
||||
CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
av_frame_unref(frame);
|
||||
av_frame_move_ref(frame, vt->tmp_frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void videotoolbox_uninit(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VTContext *vt = ist->hwaccel_ctx;
|
||||
|
||||
ist->hwaccel_uninit = NULL;
|
||||
ist->hwaccel_retrieve_data = NULL;
|
||||
|
||||
av_frame_free(&vt->tmp_frame);
|
||||
|
||||
if (ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX) {
|
||||
#if CONFIG_VIDEOTOOLBOX
|
||||
av_videotoolbox_default_free(s);
|
||||
#endif
|
||||
} else {
|
||||
#if CONFIG_VDA
|
||||
av_vda_default_free(s);
|
||||
#endif
|
||||
}
|
||||
av_freep(&ist->hwaccel_ctx);
|
||||
}
|
||||
|
||||
int videotoolbox_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
int ret = 0;
|
||||
VTContext *vt;
|
||||
|
||||
vt = av_mallocz(sizeof(*vt));
|
||||
if (!vt)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ist->hwaccel_ctx = vt;
|
||||
ist->hwaccel_uninit = videotoolbox_uninit;
|
||||
ist->hwaccel_retrieve_data = videotoolbox_retrieve_data;
|
||||
|
||||
vt->tmp_frame = av_frame_alloc();
|
||||
if (!vt->tmp_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX) {
|
||||
#if CONFIG_VIDEOTOOLBOX
|
||||
if (!videotoolbox_pixfmt) {
|
||||
ret = av_videotoolbox_default_init(s);
|
||||
} else {
|
||||
AVVideotoolboxContext *vtctx = av_videotoolbox_alloc_context();
|
||||
CFStringRef pixfmt_str = CFStringCreateWithCString(kCFAllocatorDefault,
|
||||
videotoolbox_pixfmt,
|
||||
kCFStringEncodingUTF8);
|
||||
vtctx->cv_pix_fmt_type = UTGetOSTypeFromString(pixfmt_str);
|
||||
ret = av_videotoolbox_default_init2(s, vtctx);
|
||||
CFRelease(pixfmt_str);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
#if CONFIG_VDA
|
||||
if (!videotoolbox_pixfmt) {
|
||||
ret = av_vda_default_init(s);
|
||||
} else {
|
||||
AVVDAContext *vdactx = av_vda_alloc_context();
|
||||
CFStringRef pixfmt_str = CFStringCreateWithCString(kCFAllocatorDefault,
|
||||
videotoolbox_pixfmt,
|
||||
kCFStringEncodingUTF8);
|
||||
vdactx->cv_pix_fmt_type = UTGetOSTypeFromString(pixfmt_str);
|
||||
ret = av_vda_default_init2(s, vdactx);
|
||||
CFRelease(pixfmt_str);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (ret < 0) {
|
||||
av_log(NULL, loglevel,
|
||||
"Error creating %s decoder.\n", ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX ? "Videotoolbox" : "VDA");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
videotoolbox_uninit(s);
|
||||
return ret;
|
||||
}
|
548
ffplay.c
548
ffplay.c
@@ -31,7 +31,7 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/eval.h"
|
||||
#include "libavutil/colorspace.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
@@ -65,9 +65,7 @@ const char program_name[] = "ffplay";
|
||||
const int program_birth_year = 2003;
|
||||
|
||||
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
|
||||
#define MIN_FRAMES 25
|
||||
#define EXTERNAL_CLOCK_MIN_FRAMES 2
|
||||
#define EXTERNAL_CLOCK_MAX_FRAMES 10
|
||||
#define MIN_FRAMES 5
|
||||
|
||||
/* Minimum SDL audio buffer size, in samples. */
|
||||
#define SDL_AUDIO_MIN_BUFFER_SIZE 512
|
||||
@@ -103,7 +101,7 @@ const int program_birth_year = 2003;
|
||||
|
||||
#define CURSOR_HIDE_DELAY 1000000
|
||||
|
||||
static unsigned sws_flags = SWS_BICUBIC;
|
||||
static int64_t sws_flags = SWS_BICUBIC;
|
||||
|
||||
typedef struct MyAVPacketList {
|
||||
AVPacket pkt;
|
||||
@@ -187,18 +185,21 @@ typedef struct Decoder {
|
||||
AVCodecContext *avctx;
|
||||
int pkt_serial;
|
||||
int finished;
|
||||
int flushed;
|
||||
int packet_pending;
|
||||
SDL_cond *empty_queue_cond;
|
||||
int64_t start_pts;
|
||||
AVRational start_pts_tb;
|
||||
int64_t next_pts;
|
||||
AVRational next_pts_tb;
|
||||
SDL_Thread *decoder_tid;
|
||||
} Decoder;
|
||||
|
||||
typedef struct VideoState {
|
||||
SDL_Thread *read_tid;
|
||||
SDL_Thread *video_tid;
|
||||
SDL_Thread *audio_tid;
|
||||
AVInputFormat *iformat;
|
||||
int no_background;
|
||||
int abort_request;
|
||||
int force_refresh;
|
||||
int paused;
|
||||
@@ -224,9 +225,6 @@ typedef struct VideoState {
|
||||
Decoder viddec;
|
||||
Decoder subdec;
|
||||
|
||||
int viddec_width;
|
||||
int viddec_height;
|
||||
|
||||
int audio_stream;
|
||||
|
||||
int av_sync_type;
|
||||
@@ -268,6 +266,7 @@ typedef struct VideoState {
|
||||
int xpos;
|
||||
double last_vis_time;
|
||||
|
||||
SDL_Thread *subtitle_tid;
|
||||
int subtitle_stream;
|
||||
AVStream *subtitle_st;
|
||||
PacketQueue subtitleq;
|
||||
@@ -282,9 +281,7 @@ typedef struct VideoState {
|
||||
#if !CONFIG_AVFILTER
|
||||
struct SwsContext *img_convert_ctx;
|
||||
#endif
|
||||
struct SwsContext *sub_convert_ctx;
|
||||
SDL_Rect last_display_rect;
|
||||
int eof;
|
||||
|
||||
char filename[1024];
|
||||
int width, height, xleft, ytop;
|
||||
@@ -317,7 +314,11 @@ static int screen_height = 0;
|
||||
static int audio_disable;
|
||||
static int video_disable;
|
||||
static int subtitle_disable;
|
||||
static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
|
||||
static int wanted_stream[AVMEDIA_TYPE_NB] = {
|
||||
[AVMEDIA_TYPE_AUDIO] = -1,
|
||||
[AVMEDIA_TYPE_VIDEO] = -1,
|
||||
[AVMEDIA_TYPE_SUBTITLE] = -1,
|
||||
};
|
||||
static int seek_by_bytes = -1;
|
||||
static int display_disable;
|
||||
static int show_status = 1;
|
||||
@@ -547,6 +548,8 @@ static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue,
|
||||
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
|
||||
int got_frame = 0;
|
||||
|
||||
d->flushed = 0;
|
||||
|
||||
do {
|
||||
int ret = -1;
|
||||
|
||||
@@ -563,6 +566,7 @@ static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
|
||||
if (pkt.data == flush_pkt.data) {
|
||||
avcodec_flush_buffers(d->avctx);
|
||||
d->finished = 0;
|
||||
d->flushed = 1;
|
||||
d->next_pts = d->start_pts;
|
||||
d->next_pts_tb = d->start_pts_tb;
|
||||
}
|
||||
@@ -773,15 +777,6 @@ static int64_t frame_queue_last_pos(FrameQueue *f)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void decoder_abort(Decoder *d, FrameQueue *fq)
|
||||
{
|
||||
packet_queue_abort(d->queue);
|
||||
frame_queue_signal(fq);
|
||||
SDL_WaitThread(d->decoder_tid, NULL);
|
||||
d->decoder_tid = NULL;
|
||||
packet_queue_flush(d->queue);
|
||||
}
|
||||
|
||||
static inline void fill_rectangle(SDL_Surface *screen,
|
||||
int x, int y, int w, int h, int color, int update)
|
||||
{
|
||||
@@ -834,50 +829,229 @@ static void fill_border(int xleft, int ytop, int width, int height, int x, int y
|
||||
#define ALPHA_BLEND(a, oldp, newp, s)\
|
||||
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
|
||||
|
||||
#define RGBA_IN(r, g, b, a, s)\
|
||||
{\
|
||||
unsigned int v = ((const uint32_t *)(s))[0];\
|
||||
a = (v >> 24) & 0xff;\
|
||||
r = (v >> 16) & 0xff;\
|
||||
g = (v >> 8) & 0xff;\
|
||||
b = v & 0xff;\
|
||||
}
|
||||
|
||||
#define YUVA_IN(y, u, v, a, s, pal)\
|
||||
{\
|
||||
unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
|
||||
a = (val >> 24) & 0xff;\
|
||||
y = (val >> 16) & 0xff;\
|
||||
u = (val >> 8) & 0xff;\
|
||||
v = val & 0xff;\
|
||||
}
|
||||
|
||||
#define YUVA_OUT(d, y, u, v, a)\
|
||||
{\
|
||||
((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
|
||||
}
|
||||
|
||||
|
||||
#define BPP 1
|
||||
|
||||
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
|
||||
{
|
||||
int x, y, Y, U, V, A;
|
||||
int wrap, wrap3, width2, skip2;
|
||||
int y, u, v, a, u1, v1, a1, w, h;
|
||||
uint8_t *lum, *cb, *cr;
|
||||
const uint8_t *p;
|
||||
const uint32_t *pal;
|
||||
int dstx, dsty, dstw, dsth;
|
||||
const AVPicture *src = &rect->pict;
|
||||
|
||||
dstw = av_clip(rect->w, 0, imgw);
|
||||
dsth = av_clip(rect->h, 0, imgh);
|
||||
dstx = av_clip(rect->x, 0, imgw - dstw);
|
||||
dsty = av_clip(rect->y, 0, imgh - dsth);
|
||||
lum = dst->data[0] + dstx + dsty * dst->linesize[0];
|
||||
cb = dst->data[1] + dstx/2 + (dsty >> 1) * dst->linesize[1];
|
||||
cr = dst->data[2] + dstx/2 + (dsty >> 1) * dst->linesize[2];
|
||||
lum = dst->data[0] + dsty * dst->linesize[0];
|
||||
cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
|
||||
cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
|
||||
|
||||
for (y = 0; y<dsth; y++) {
|
||||
for (x = 0; x<dstw; x++) {
|
||||
Y = src->data[0][x + y*src->linesize[0]];
|
||||
A = src->data[3][x + y*src->linesize[3]];
|
||||
lum[0] = ALPHA_BLEND(A, lum[0], Y, 0);
|
||||
lum++;
|
||||
}
|
||||
lum += dst->linesize[0] - dstw;
|
||||
}
|
||||
width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
|
||||
skip2 = dstx >> 1;
|
||||
wrap = dst->linesize[0];
|
||||
wrap3 = rect->pict.linesize[0];
|
||||
p = rect->pict.data[0];
|
||||
pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
|
||||
|
||||
for (y = 0; y<dsth/2; y++) {
|
||||
for (x = 0; x<dstw/2; x++) {
|
||||
U = src->data[1][x + y*src->linesize[1]];
|
||||
V = src->data[2][x + y*src->linesize[2]];
|
||||
A = src->data[3][2*x + 2*y *src->linesize[3]]
|
||||
+ src->data[3][2*x + 1 + 2*y *src->linesize[3]]
|
||||
+ src->data[3][2*x + 1 + (2*y+1)*src->linesize[3]]
|
||||
+ src->data[3][2*x + (2*y+1)*src->linesize[3]];
|
||||
cb[0] = ALPHA_BLEND(A>>2, cb[0], U, 0);
|
||||
cr[0] = ALPHA_BLEND(A>>2, cr[0], V, 0);
|
||||
if (dsty & 1) {
|
||||
lum += dstx;
|
||||
cb += skip2;
|
||||
cr += skip2;
|
||||
|
||||
if (dstx & 1) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
|
||||
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
|
||||
cb++;
|
||||
cr++;
|
||||
lum++;
|
||||
p += BPP;
|
||||
}
|
||||
for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
u1 = u;
|
||||
v1 = v;
|
||||
a1 = a;
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
|
||||
YUVA_IN(y, u, v, a, p + BPP, pal);
|
||||
u1 += u;
|
||||
v1 += v;
|
||||
a1 += a;
|
||||
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
|
||||
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
|
||||
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
|
||||
cb++;
|
||||
cr++;
|
||||
p += 2 * BPP;
|
||||
lum += 2;
|
||||
}
|
||||
if (w) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
|
||||
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
|
||||
p++;
|
||||
lum++;
|
||||
}
|
||||
p += wrap3 - dstw * BPP;
|
||||
lum += wrap - dstw - dstx;
|
||||
cb += dst->linesize[1] - width2 - skip2;
|
||||
cr += dst->linesize[2] - width2 - skip2;
|
||||
}
|
||||
for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
|
||||
lum += dstx;
|
||||
cb += skip2;
|
||||
cr += skip2;
|
||||
|
||||
if (dstx & 1) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
u1 = u;
|
||||
v1 = v;
|
||||
a1 = a;
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
p += wrap3;
|
||||
lum += wrap;
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
u1 += u;
|
||||
v1 += v;
|
||||
a1 += a;
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
|
||||
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
|
||||
cb++;
|
||||
cr++;
|
||||
p += -wrap3 + BPP;
|
||||
lum += -wrap + 1;
|
||||
}
|
||||
for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
u1 = u;
|
||||
v1 = v;
|
||||
a1 = a;
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
|
||||
YUVA_IN(y, u, v, a, p + BPP, pal);
|
||||
u1 += u;
|
||||
v1 += v;
|
||||
a1 += a;
|
||||
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
|
||||
p += wrap3;
|
||||
lum += wrap;
|
||||
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
u1 += u;
|
||||
v1 += v;
|
||||
a1 += a;
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
|
||||
YUVA_IN(y, u, v, a, p + BPP, pal);
|
||||
u1 += u;
|
||||
v1 += v;
|
||||
a1 += a;
|
||||
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
|
||||
|
||||
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
|
||||
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
|
||||
|
||||
cb++;
|
||||
cr++;
|
||||
p += -wrap3 + 2 * BPP;
|
||||
lum += -wrap + 2;
|
||||
}
|
||||
if (w) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
u1 = u;
|
||||
v1 = v;
|
||||
a1 = a;
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
p += wrap3;
|
||||
lum += wrap;
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
u1 += u;
|
||||
v1 += v;
|
||||
a1 += a;
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
|
||||
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
|
||||
cb++;
|
||||
cr++;
|
||||
p += -wrap3 + BPP;
|
||||
lum += -wrap + 1;
|
||||
}
|
||||
p += wrap3 + (wrap3 - dstw * BPP);
|
||||
lum += wrap + (wrap - dstw - dstx);
|
||||
cb += dst->linesize[1] - width2 - skip2;
|
||||
cr += dst->linesize[2] - width2 - skip2;
|
||||
}
|
||||
/* handle odd height */
|
||||
if (h) {
|
||||
lum += dstx;
|
||||
cb += skip2;
|
||||
cr += skip2;
|
||||
|
||||
if (dstx & 1) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
|
||||
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
|
||||
cb++;
|
||||
cr++;
|
||||
lum++;
|
||||
p += BPP;
|
||||
}
|
||||
for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
u1 = u;
|
||||
v1 = v;
|
||||
a1 = a;
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
|
||||
YUVA_IN(y, u, v, a, p + BPP, pal);
|
||||
u1 += u;
|
||||
v1 += v;
|
||||
a1 += a;
|
||||
lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
|
||||
cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
|
||||
cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
|
||||
cb++;
|
||||
cr++;
|
||||
p += 2 * BPP;
|
||||
lum += 2;
|
||||
}
|
||||
if (w) {
|
||||
YUVA_IN(y, u, v, a, p, pal);
|
||||
lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
|
||||
cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
|
||||
cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
|
||||
}
|
||||
cb += dst->linesize[1] - dstw/2;
|
||||
cr += dst->linesize[2] - dstw/2;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1074,10 +1248,7 @@ static void video_audio_display(VideoState *s)
|
||||
s->rdft_bits = rdft_bits;
|
||||
s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
|
||||
}
|
||||
if (!s->rdft || !s->rdft_data){
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
|
||||
s->show_mode = SHOW_MODE_WAVES;
|
||||
} else {
|
||||
{
|
||||
FFTSample *data[2];
|
||||
for (ch = 0; ch < nb_display_channels; ch++) {
|
||||
data[ch] = s->rdft_data + 2 * nb_freq * ch;
|
||||
@@ -1132,7 +1303,6 @@ static void stream_close(VideoState *is)
|
||||
#if !CONFIG_AVFILTER
|
||||
sws_freeContext(is->img_convert_ctx);
|
||||
#endif
|
||||
sws_freeContext(is->sub_convert_ctx);
|
||||
av_free(is);
|
||||
}
|
||||
|
||||
@@ -1302,11 +1472,11 @@ static double get_master_clock(VideoState *is)
|
||||
}
|
||||
|
||||
static void check_external_clock_speed(VideoState *is) {
|
||||
if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
|
||||
is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
|
||||
if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
|
||||
is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
|
||||
set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
|
||||
} else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
|
||||
(is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
|
||||
} else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
|
||||
(is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
|
||||
set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
|
||||
} else {
|
||||
double speed = is->extclk.speed;
|
||||
@@ -1333,7 +1503,7 @@ static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_by
|
||||
static void stream_toggle_pause(VideoState *is)
|
||||
{
|
||||
if (is->paused) {
|
||||
is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
|
||||
is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
|
||||
if (is->read_pause_return != AVERROR(ENOSYS)) {
|
||||
is->vidclk.paused = 0;
|
||||
}
|
||||
@@ -1359,7 +1529,7 @@ static void step_to_next_frame(VideoState *is)
|
||||
|
||||
static double compute_target_delay(double delay, VideoState *is)
|
||||
{
|
||||
double sync_threshold, diff = 0;
|
||||
double sync_threshold, diff;
|
||||
|
||||
/* update delay to follow master synchronisation source */
|
||||
if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
|
||||
@@ -1381,7 +1551,7 @@ static double compute_target_delay(double delay, VideoState *is)
|
||||
}
|
||||
}
|
||||
|
||||
av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
|
||||
av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
|
||||
delay, -diff);
|
||||
|
||||
return delay;
|
||||
@@ -1678,18 +1848,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double
|
||||
av_picture_copy(&pict, (AVPicture *)src_frame,
|
||||
src_frame->format, vp->width, vp->height);
|
||||
#else
|
||||
{
|
||||
AVDictionaryEntry *e = av_dict_get(sws_dict, "sws_flags", NULL, 0);
|
||||
if (e) {
|
||||
const AVClass *class = sws_get_class();
|
||||
const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
|
||||
AV_OPT_SEARCH_FAKE_OBJ);
|
||||
int ret = av_opt_eval_flags(&class, o, e->value, &sws_flags);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
|
||||
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
|
||||
vp->width, vp->height, src_frame->format, vp->width, vp->height,
|
||||
AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
|
||||
@@ -1731,9 +1890,6 @@ static int get_video_frame(VideoState *is, AVFrame *frame)
|
||||
|
||||
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
|
||||
|
||||
is->viddec_width = frame->width;
|
||||
is->viddec_height = frame->height;
|
||||
|
||||
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
double diff = dpts - get_master_clock(is);
|
||||
@@ -1799,23 +1955,15 @@ fail:
|
||||
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
|
||||
{
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
|
||||
char sws_flags_str[512] = "";
|
||||
char sws_flags_str[128];
|
||||
char buffersrc_args[256];
|
||||
int ret;
|
||||
AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
|
||||
AVCodecContext *codec = is->video_st->codec;
|
||||
AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
|
||||
AVDictionaryEntry *e = NULL;
|
||||
|
||||
while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
|
||||
if (!strcmp(e->key, "sws_flags")) {
|
||||
av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
|
||||
} else
|
||||
av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
|
||||
}
|
||||
if (strlen(sws_flags_str))
|
||||
sws_flags_str[strlen(sws_flags_str)-1] = '\0';
|
||||
|
||||
av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
|
||||
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
|
||||
graph->scale_sws_opts = av_strdup(sws_flags_str);
|
||||
|
||||
snprintf(buffersrc_args, sizeof(buffersrc_args),
|
||||
@@ -1845,20 +1993,20 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
|
||||
/* Note: this macro adds a filter before the lastly added filter, so the
|
||||
* processing order of the filters is in reverse */
|
||||
#define INSERT_FILT(name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(name), \
|
||||
"ffplay_" name, arg, NULL, graph); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
last_filter = filt_ctx; \
|
||||
#define INSERT_FILT(name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(name), \
|
||||
"ffplay_" name, arg, NULL, graph); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
last_filter = filt_ctx; \
|
||||
} while (0)
|
||||
|
||||
/* SDL YUV code is not handling odd width/height for some driver
|
||||
@@ -1866,19 +2014,20 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
|
||||
|
||||
if (autorotate) {
|
||||
double theta = get_rotation(is->video_st);
|
||||
|
||||
if (fabs(theta - 90) < 1.0) {
|
||||
INSERT_FILT("transpose", "clock");
|
||||
} else if (fabs(theta - 180) < 1.0) {
|
||||
INSERT_FILT("hflip", NULL);
|
||||
INSERT_FILT("vflip", NULL);
|
||||
} else if (fabs(theta - 270) < 1.0) {
|
||||
INSERT_FILT("transpose", "cclock");
|
||||
} else if (fabs(theta) > 1.0) {
|
||||
char rotate_buf[64];
|
||||
snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
|
||||
INSERT_FILT("rotate", rotate_buf);
|
||||
AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
|
||||
if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
|
||||
if (!strcmp(rotate_tag->value, "90")) {
|
||||
INSERT_FILT("transpose", "clock");
|
||||
} else if (!strcmp(rotate_tag->value, "180")) {
|
||||
INSERT_FILT("hflip", NULL);
|
||||
INSERT_FILT("vflip", NULL);
|
||||
} else if (!strcmp(rotate_tag->value, "270")) {
|
||||
INSERT_FILT("transpose", "cclock");
|
||||
} else {
|
||||
char rotate_buf[64];
|
||||
snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
|
||||
INSERT_FILT("rotate", rotate_buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2056,12 +2205,6 @@ static int audio_thread(void *arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void decoder_start(Decoder *d, int (*fn)(void *), void *arg)
|
||||
{
|
||||
packet_queue_start(d->queue);
|
||||
d->decoder_tid = SDL_CreateThread(fn, arg);
|
||||
}
|
||||
|
||||
static int video_thread(void *arg)
|
||||
{
|
||||
VideoState *is = arg;
|
||||
@@ -2080,20 +2223,8 @@ static int video_thread(void *arg)
|
||||
enum AVPixelFormat last_format = -2;
|
||||
int last_serial = -1;
|
||||
int last_vfilter_idx = 0;
|
||||
if (!graph) {
|
||||
av_frame_free(&frame);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
if (!frame) {
|
||||
#if CONFIG_AVFILTER
|
||||
avfilter_graph_free(&graph);
|
||||
#endif
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
ret = get_video_frame(is, frame);
|
||||
if (ret < 0)
|
||||
@@ -2177,7 +2308,8 @@ static int subtitle_thread(void *arg)
|
||||
Frame *sp;
|
||||
int got_subtitle;
|
||||
double pts;
|
||||
int i;
|
||||
int i, j;
|
||||
int r, g, b, y, u, v, a;
|
||||
|
||||
for (;;) {
|
||||
if (!(sp = frame_queue_peek_writable(&is->subpq)))
|
||||
@@ -2196,41 +2328,14 @@ static int subtitle_thread(void *arg)
|
||||
|
||||
for (i = 0; i < sp->sub.num_rects; i++)
|
||||
{
|
||||
int in_w = sp->sub.rects[i]->w;
|
||||
int in_h = sp->sub.rects[i]->h;
|
||||
int subw = is->subdec.avctx->width ? is->subdec.avctx->width : is->viddec_width;
|
||||
int subh = is->subdec.avctx->height ? is->subdec.avctx->height : is->viddec_height;
|
||||
int out_w = is->viddec_width ? in_w * is->viddec_width / subw : in_w;
|
||||
int out_h = is->viddec_height ? in_h * is->viddec_height / subh : in_h;
|
||||
AVPicture newpic;
|
||||
|
||||
//can not use avpicture_alloc as it is not compatible with avsubtitle_free()
|
||||
av_image_fill_linesizes(newpic.linesize, AV_PIX_FMT_YUVA420P, out_w);
|
||||
newpic.data[0] = av_malloc(newpic.linesize[0] * out_h);
|
||||
newpic.data[3] = av_malloc(newpic.linesize[3] * out_h);
|
||||
newpic.data[1] = av_malloc(newpic.linesize[1] * ((out_h+1)/2));
|
||||
newpic.data[2] = av_malloc(newpic.linesize[2] * ((out_h+1)/2));
|
||||
|
||||
is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
|
||||
in_w, in_h, AV_PIX_FMT_PAL8, out_w, out_h,
|
||||
AV_PIX_FMT_YUVA420P, sws_flags, NULL, NULL, NULL);
|
||||
if (!is->sub_convert_ctx || !newpic.data[0] || !newpic.data[3] ||
|
||||
!newpic.data[1] || !newpic.data[2]
|
||||
) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the sub conversion context\n");
|
||||
exit(1);
|
||||
for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
|
||||
{
|
||||
RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
|
||||
y = RGB_TO_Y_CCIR(r, g, b);
|
||||
u = RGB_TO_U_CCIR(r, g, b, 0);
|
||||
v = RGB_TO_V_CCIR(r, g, b, 0);
|
||||
YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
|
||||
}
|
||||
sws_scale(is->sub_convert_ctx,
|
||||
(void*)sp->sub.rects[i]->pict.data, sp->sub.rects[i]->pict.linesize,
|
||||
0, in_h, newpic.data, newpic.linesize);
|
||||
|
||||
av_free(sp->sub.rects[i]->pict.data[0]);
|
||||
av_free(sp->sub.rects[i]->pict.data[1]);
|
||||
sp->sub.rects[i]->pict = newpic;
|
||||
sp->sub.rects[i]->w = out_w;
|
||||
sp->sub.rects[i]->h = out_h;
|
||||
sp->sub.rects[i]->x = sp->sub.rects[i]->x * out_w / in_w;
|
||||
sp->sub.rects[i]->y = sp->sub.rects[i]->y * out_h / in_h;
|
||||
}
|
||||
|
||||
/* now we can update the picture count */
|
||||
@@ -2287,9 +2392,9 @@ static int synchronize_audio(VideoState *is, int nb_samples)
|
||||
wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
|
||||
min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
|
||||
max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
|
||||
wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
|
||||
wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
|
||||
}
|
||||
av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
|
||||
av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
|
||||
diff, avg_diff, wanted_nb_samples - nb_samples,
|
||||
is->audio_clock, is->audio_diff_threshold);
|
||||
}
|
||||
@@ -2323,13 +2428,6 @@ static int audio_decode_frame(VideoState *is)
|
||||
return -1;
|
||||
|
||||
do {
|
||||
#if defined(_WIN32)
|
||||
while (frame_queue_nb_remaining(&is->sampq) == 0) {
|
||||
if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
|
||||
return -1;
|
||||
av_usleep (1000);
|
||||
}
|
||||
#endif
|
||||
if (!(af = frame_queue_peek_readable(&is->sampq)))
|
||||
return -1;
|
||||
frame_queue_next(&is->sampq);
|
||||
@@ -2577,15 +2675,10 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
}
|
||||
av_codec_set_lowres(avctx, stream_lowres);
|
||||
|
||||
#if FF_API_EMU_EDGE
|
||||
if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
#endif
|
||||
if (fast)
|
||||
avctx->flags2 |= AV_CODEC_FLAG2_FAST;
|
||||
#if FF_API_EMU_EDGE
|
||||
if(codec->capabilities & AV_CODEC_CAP_DR1)
|
||||
if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
|
||||
if(codec->capabilities & CODEC_CAP_DR1)
|
||||
avctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
#endif
|
||||
|
||||
opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
|
||||
if (!av_dict_get(opts, "threads", NULL, 0))
|
||||
@@ -2603,7 +2696,6 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
is->eof = 0;
|
||||
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
|
||||
switch (avctx->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -2646,31 +2738,31 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
is->audio_stream = stream_index;
|
||||
is->audio_st = ic->streams[stream_index];
|
||||
|
||||
packet_queue_start(&is->audioq);
|
||||
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
|
||||
if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
|
||||
is->auddec.start_pts = is->audio_st->start_time;
|
||||
is->auddec.start_pts_tb = is->audio_st->time_base;
|
||||
}
|
||||
decoder_start(&is->auddec, audio_thread, is);
|
||||
is->audio_tid = SDL_CreateThread(audio_thread, is);
|
||||
SDL_PauseAudio(0);
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
is->video_stream = stream_index;
|
||||
is->video_st = ic->streams[stream_index];
|
||||
|
||||
is->viddec_width = avctx->width;
|
||||
is->viddec_height = avctx->height;
|
||||
|
||||
packet_queue_start(&is->videoq);
|
||||
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
|
||||
decoder_start(&is->viddec, video_thread, is);
|
||||
is->video_tid = SDL_CreateThread(video_thread, is);
|
||||
is->queue_attachments_req = 1;
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
is->subtitle_stream = stream_index;
|
||||
is->subtitle_st = ic->streams[stream_index];
|
||||
|
||||
packet_queue_start(&is->subtitleq);
|
||||
decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
|
||||
decoder_start(&is->subdec, subtitle_thread, is);
|
||||
is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -2693,9 +2785,13 @@ static void stream_component_close(VideoState *is, int stream_index)
|
||||
|
||||
switch (avctx->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
decoder_abort(&is->auddec, &is->sampq);
|
||||
packet_queue_abort(&is->audioq);
|
||||
frame_queue_signal(&is->sampq);
|
||||
SDL_CloseAudio();
|
||||
SDL_WaitThread(is->audio_tid, NULL);
|
||||
|
||||
decoder_destroy(&is->auddec);
|
||||
packet_queue_flush(&is->audioq);
|
||||
swr_free(&is->swr_ctx);
|
||||
av_freep(&is->audio_buf1);
|
||||
is->audio_buf1_size = 0;
|
||||
@@ -2709,12 +2805,28 @@ static void stream_component_close(VideoState *is, int stream_index)
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
decoder_abort(&is->viddec, &is->pictq);
|
||||
packet_queue_abort(&is->videoq);
|
||||
|
||||
/* note: we also signal this mutex to make sure we deblock the
|
||||
video thread in all cases */
|
||||
frame_queue_signal(&is->pictq);
|
||||
|
||||
SDL_WaitThread(is->video_tid, NULL);
|
||||
|
||||
decoder_destroy(&is->viddec);
|
||||
packet_queue_flush(&is->videoq);
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
decoder_abort(&is->subdec, &is->subpq);
|
||||
packet_queue_abort(&is->subtitleq);
|
||||
|
||||
/* note: we also signal this mutex to make sure we deblock the
|
||||
video thread in all cases */
|
||||
frame_queue_signal(&is->subpq);
|
||||
|
||||
SDL_WaitThread(is->subtitle_tid, NULL);
|
||||
|
||||
decoder_destroy(&is->subdec);
|
||||
packet_queue_flush(&is->subtitleq);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -2770,6 +2882,7 @@ static int read_thread(void *arg)
|
||||
int err, i, ret;
|
||||
int st_index[AVMEDIA_TYPE_NB];
|
||||
AVPacket pkt1, *pkt = &pkt1;
|
||||
int eof = 0;
|
||||
int64_t stream_start_time;
|
||||
int pkt_in_play_range = 0;
|
||||
AVDictionaryEntry *t;
|
||||
@@ -2777,20 +2890,13 @@ static int read_thread(void *arg)
|
||||
int orig_nb_streams;
|
||||
SDL_mutex *wait_mutex = SDL_CreateMutex();
|
||||
int scan_all_pmts_set = 0;
|
||||
int64_t pkt_ts;
|
||||
|
||||
memset(st_index, -1, sizeof(st_index));
|
||||
is->last_video_stream = is->video_stream = -1;
|
||||
is->last_audio_stream = is->audio_stream = -1;
|
||||
is->last_subtitle_stream = is->subtitle_stream = -1;
|
||||
is->eof = 0;
|
||||
|
||||
ic = avformat_alloc_context();
|
||||
if (!ic) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
ic->interrupt_callback.callback = decode_interrupt_cb;
|
||||
ic->interrupt_callback.opaque = is;
|
||||
if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
|
||||
@@ -2862,42 +2968,29 @@ static int read_thread(void *arg)
|
||||
|
||||
is->realtime = is_realtime(ic);
|
||||
|
||||
if (show_status)
|
||||
av_dump_format(ic, 0, is->filename, 0);
|
||||
|
||||
for (i = 0; i < ic->nb_streams; i++) {
|
||||
AVStream *st = ic->streams[i];
|
||||
enum AVMediaType type = st->codec->codec_type;
|
||||
st->discard = AVDISCARD_ALL;
|
||||
if (wanted_stream_spec[type] && st_index[type] == -1)
|
||||
if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
|
||||
st_index[type] = i;
|
||||
}
|
||||
for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
|
||||
if (wanted_stream_spec[i] && st_index[i] == -1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
|
||||
st_index[i] = INT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ic->nb_streams; i++)
|
||||
ic->streams[i]->discard = AVDISCARD_ALL;
|
||||
if (!video_disable)
|
||||
st_index[AVMEDIA_TYPE_VIDEO] =
|
||||
av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
|
||||
st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
|
||||
wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
|
||||
if (!audio_disable)
|
||||
st_index[AVMEDIA_TYPE_AUDIO] =
|
||||
av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
|
||||
st_index[AVMEDIA_TYPE_AUDIO],
|
||||
wanted_stream[AVMEDIA_TYPE_AUDIO],
|
||||
st_index[AVMEDIA_TYPE_VIDEO],
|
||||
NULL, 0);
|
||||
if (!video_disable && !subtitle_disable)
|
||||
st_index[AVMEDIA_TYPE_SUBTITLE] =
|
||||
av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
|
||||
st_index[AVMEDIA_TYPE_SUBTITLE],
|
||||
wanted_stream[AVMEDIA_TYPE_SUBTITLE],
|
||||
(st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
|
||||
st_index[AVMEDIA_TYPE_AUDIO] :
|
||||
st_index[AVMEDIA_TYPE_VIDEO]),
|
||||
NULL, 0);
|
||||
if (show_status) {
|
||||
av_dump_format(ic, 0, is->filename, 0);
|
||||
}
|
||||
|
||||
is->show_mode = show_mode;
|
||||
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
|
||||
@@ -2986,7 +3079,7 @@ static int read_thread(void *arg)
|
||||
}
|
||||
is->seek_req = 0;
|
||||
is->queue_attachments_req = 1;
|
||||
is->eof = 0;
|
||||
eof = 0;
|
||||
if (is->paused)
|
||||
step_to_next_frame(is);
|
||||
}
|
||||
@@ -3026,14 +3119,14 @@ static int read_thread(void *arg)
|
||||
}
|
||||
ret = av_read_frame(ic, pkt);
|
||||
if (ret < 0) {
|
||||
if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
|
||||
if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !eof) {
|
||||
if (is->video_stream >= 0)
|
||||
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
|
||||
if (is->audio_stream >= 0)
|
||||
packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
|
||||
if (is->subtitle_stream >= 0)
|
||||
packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
|
||||
is->eof = 1;
|
||||
eof = 1;
|
||||
}
|
||||
if (ic->pb && ic->pb->error)
|
||||
break;
|
||||
@@ -3042,13 +3135,12 @@ static int read_thread(void *arg)
|
||||
SDL_UnlockMutex(wait_mutex);
|
||||
continue;
|
||||
} else {
|
||||
is->eof = 0;
|
||||
eof = 0;
|
||||
}
|
||||
/* check if packet is in play range specified by user, then queue, otherwise discard */
|
||||
stream_start_time = ic->streams[pkt->stream_index]->start_time;
|
||||
pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
|
||||
pkt_in_play_range = duration == AV_NOPTS_VALUE ||
|
||||
(pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
|
||||
(pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
|
||||
av_q2d(ic->streams[pkt->stream_index]->time_base) -
|
||||
(double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
|
||||
<= ((double)duration / 1000000);
|
||||
@@ -3582,9 +3674,9 @@ static const OptionDef options[] = {
|
||||
{ "an", OPT_BOOL, { &audio_disable }, "disable audio" },
|
||||
{ "vn", OPT_BOOL, { &video_disable }, "disable video" },
|
||||
{ "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
|
||||
{ "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
|
||||
{ "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
|
||||
{ "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
|
||||
{ "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
|
||||
{ "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
|
||||
{ "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
|
||||
{ "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
|
||||
{ "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
|
||||
{ "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
|
||||
|
121
ffprobe.c
121
ffprobe.c
@@ -33,12 +33,10 @@
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/display.h"
|
||||
#include "libavutil/hash.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/libm.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/timecode.h"
|
||||
@@ -88,7 +86,7 @@ static char *print_format;
|
||||
static char *stream_specifier;
|
||||
static char *show_data_hash;
|
||||
|
||||
typedef struct ReadInterval {
|
||||
typedef struct {
|
||||
int id; ///< identifier
|
||||
int64_t start, end; ///< start, end in second/AV_TIME_BASE units
|
||||
int has_start, has_end;
|
||||
@@ -137,8 +135,6 @@ typedef enum {
|
||||
SECTION_ID_PACKET,
|
||||
SECTION_ID_PACKETS,
|
||||
SECTION_ID_PACKETS_AND_FRAMES,
|
||||
SECTION_ID_PACKET_SIDE_DATA_LIST,
|
||||
SECTION_ID_PACKET_SIDE_DATA,
|
||||
SECTION_ID_PIXEL_FORMAT,
|
||||
SECTION_ID_PIXEL_FORMAT_FLAGS,
|
||||
SECTION_ID_PIXEL_FORMAT_COMPONENT,
|
||||
@@ -157,8 +153,6 @@ typedef enum {
|
||||
SECTION_ID_STREAM_DISPOSITION,
|
||||
SECTION_ID_STREAMS,
|
||||
SECTION_ID_STREAM_TAGS,
|
||||
SECTION_ID_STREAM_SIDE_DATA_LIST,
|
||||
SECTION_ID_STREAM_SIDE_DATA,
|
||||
SECTION_ID_SUBTITLE,
|
||||
} SectionID;
|
||||
|
||||
@@ -178,9 +172,7 @@ static struct section sections[] = {
|
||||
[SECTION_ID_LIBRARY_VERSION] = { SECTION_ID_LIBRARY_VERSION, "library_version", 0, { -1 } },
|
||||
[SECTION_ID_PACKETS] = { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||
[SECTION_ID_PACKETS_AND_FRAMES] = { SECTION_ID_PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { SECTION_ID_PACKET_SIDE_DATA_LIST, -1 } },
|
||||
[SECTION_ID_PACKET_SIDE_DATA_LIST] ={ SECTION_ID_PACKET_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET_SIDE_DATA, -1 } },
|
||||
[SECTION_ID_PACKET_SIDE_DATA] = { SECTION_ID_PACKET_SIDE_DATA, "side_data", 0, { -1 } },
|
||||
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { -1 } },
|
||||
[SECTION_ID_PIXEL_FORMATS] = { SECTION_ID_PIXEL_FORMATS, "pixel_formats", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PIXEL_FORMAT, -1 } },
|
||||
[SECTION_ID_PIXEL_FORMAT] = { SECTION_ID_PIXEL_FORMAT, "pixel_format", 0, { SECTION_ID_PIXEL_FORMAT_FLAGS, SECTION_ID_PIXEL_FORMAT_COMPONENTS, -1 } },
|
||||
[SECTION_ID_PIXEL_FORMAT_FLAGS] = { SECTION_ID_PIXEL_FORMAT_FLAGS, "flags", 0, { -1 }, .unique_name = "pixel_format_flags" },
|
||||
@@ -199,11 +191,9 @@ static struct section sections[] = {
|
||||
SECTION_ID_PACKETS, SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS,
|
||||
SECTION_ID_PIXEL_FORMATS, -1} },
|
||||
[SECTION_ID_STREAMS] = { SECTION_ID_STREAMS, "streams", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM, -1 } },
|
||||
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, SECTION_ID_STREAM_SIDE_DATA_LIST, -1 } },
|
||||
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, -1 } },
|
||||
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
|
||||
[SECTION_ID_STREAM_TAGS] = { SECTION_ID_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "stream_tags" },
|
||||
[SECTION_ID_STREAM_SIDE_DATA_LIST] ={ SECTION_ID_STREAM_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM_SIDE_DATA, -1 } },
|
||||
[SECTION_ID_STREAM_SIDE_DATA] = { SECTION_ID_STREAM_SIDE_DATA, "side_data", 0, { -1 } },
|
||||
[SECTION_ID_SUBTITLE] = { SECTION_ID_SUBTITLE, "subtitle", 0, { -1 } },
|
||||
};
|
||||
|
||||
@@ -348,7 +338,7 @@ struct WriterContext {
|
||||
unsigned int nb_section_frame; ///< number of the frame section in case we are in "packets_and_frames" section
|
||||
unsigned int nb_section_packet_frame; ///< nb_section_packet or nb_section_frame according if is_packets_and_frames
|
||||
|
||||
int string_validation;
|
||||
StringValidation string_validation;
|
||||
char *string_validation_replacement;
|
||||
unsigned int string_validation_utf8_flags;
|
||||
};
|
||||
@@ -728,32 +718,6 @@ static void writer_print_data_hash(WriterContext *wctx, const char *name,
|
||||
writer_print_string(wctx, name, buf, 0);
|
||||
}
|
||||
|
||||
static void writer_print_integers(WriterContext *wctx, const char *name,
|
||||
uint8_t *data, int size, const char *format,
|
||||
int columns, int bytes, int offset_add)
|
||||
{
|
||||
AVBPrint bp;
|
||||
int offset = 0, l, i;
|
||||
|
||||
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_UNLIMITED);
|
||||
av_bprintf(&bp, "\n");
|
||||
while (size) {
|
||||
av_bprintf(&bp, "%08x: ", offset);
|
||||
l = FFMIN(size, columns);
|
||||
for (i = 0; i < l; i++) {
|
||||
if (bytes == 1) av_bprintf(&bp, format, *data);
|
||||
else if (bytes == 2) av_bprintf(&bp, format, AV_RN16(data));
|
||||
else if (bytes == 4) av_bprintf(&bp, format, AV_RN32(data));
|
||||
data += bytes;
|
||||
size --;
|
||||
}
|
||||
av_bprintf(&bp, "\n");
|
||||
offset += offset_add;
|
||||
}
|
||||
writer_print_string(wctx, name, bp.str, 0);
|
||||
av_bprint_finalize(&bp, NULL);
|
||||
}
|
||||
|
||||
#define MAX_REGISTERED_WRITERS_NB 64
|
||||
|
||||
static const Writer *registered_writers[MAX_REGISTERED_WRITERS_NB + 1];
|
||||
@@ -1228,7 +1192,7 @@ static const Writer flat_writer = {
|
||||
|
||||
/* INI format output */
|
||||
|
||||
typedef struct INIContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int hierarchical;
|
||||
} INIContext;
|
||||
@@ -1332,7 +1296,7 @@ static const Writer ini_writer = {
|
||||
|
||||
/* JSON output */
|
||||
|
||||
typedef struct JSONContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int indent_level;
|
||||
int compact;
|
||||
@@ -1494,7 +1458,7 @@ static const Writer json_writer = {
|
||||
|
||||
/* XML output */
|
||||
|
||||
typedef struct XMLContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int within_tag;
|
||||
int indent_level;
|
||||
@@ -1759,25 +1723,6 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
|
||||
if (pkt->pos != -1) print_fmt ("pos", "%"PRId64, pkt->pos);
|
||||
else print_str_opt("pos", "N/A");
|
||||
print_fmt("flags", "%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
|
||||
|
||||
if (pkt->side_data_elems) {
|
||||
int i;
|
||||
writer_print_section_header(w, SECTION_ID_PACKET_SIDE_DATA_LIST);
|
||||
for (i = 0; i < pkt->side_data_elems; i++) {
|
||||
AVPacketSideData *sd = &pkt->side_data[i];
|
||||
const char *name = av_packet_side_data_name(sd->type);
|
||||
writer_print_section_header(w, SECTION_ID_PACKET_SIDE_DATA);
|
||||
print_str("side_data_type", name ? name : "unknown");
|
||||
print_int("side_data_size", sd->size);
|
||||
if (sd->type == AV_PKT_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
|
||||
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
|
||||
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
if (do_show_data)
|
||||
writer_print_data(w, "data", pkt->data, pkt->size);
|
||||
writer_print_data_hash(w, "data_hash", pkt->data, pkt->size);
|
||||
@@ -1824,7 +1769,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
s = av_get_media_type_string(stream->codec->codec_type);
|
||||
if (s) print_str ("media_type", s);
|
||||
else print_str_opt("media_type", "unknown");
|
||||
print_int("stream_index", stream->index);
|
||||
print_int("key_frame", frame->key_frame);
|
||||
print_ts ("pkt_pts", frame->pkt_pts);
|
||||
print_time("pkt_pts_time", frame->pkt_pts, &stream->time_base);
|
||||
@@ -1889,11 +1833,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
name = av_frame_side_data_name(sd->type);
|
||||
print_str("side_data_type", name ? name : "unknown");
|
||||
print_int("side_data_size", sd->size);
|
||||
if (sd->type == AV_FRAME_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
|
||||
abort();
|
||||
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
|
||||
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
@@ -2155,8 +2094,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
print_int("width", dec_ctx->width);
|
||||
print_int("height", dec_ctx->height);
|
||||
print_int("coded_width", dec_ctx->coded_width);
|
||||
print_int("coded_height", dec_ctx->coded_height);
|
||||
print_int("has_b_frames", dec_ctx->has_b_frames);
|
||||
sar = av_guess_sample_aspect_ratio(fmt_ctx, stream, NULL);
|
||||
if (sar.den) {
|
||||
@@ -2204,7 +2141,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
} else {
|
||||
print_str_opt("timecode", "N/A");
|
||||
}
|
||||
print_int("refs", dec_ctx->refs);
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -2302,25 +2238,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
if (do_show_stream_tags)
|
||||
ret = show_tags(w, stream->metadata, in_program ? SECTION_ID_PROGRAM_STREAM_TAGS : SECTION_ID_STREAM_TAGS);
|
||||
|
||||
if (stream->nb_side_data) {
|
||||
int i;
|
||||
writer_print_section_header(w, SECTION_ID_STREAM_SIDE_DATA_LIST);
|
||||
for (i = 0; i < stream->nb_side_data; i++) {
|
||||
AVPacketSideData *sd = &stream->side_data[i];
|
||||
const char *name = av_packet_side_data_name(sd->type);
|
||||
|
||||
writer_print_section_header(w, SECTION_ID_STREAM_SIDE_DATA);
|
||||
print_str("side_data_type", name ? name : "unknown");
|
||||
print_int("side_data_size", sd->size);
|
||||
if (sd->type == AV_PKT_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
|
||||
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
|
||||
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
writer_print_section_footer(w);
|
||||
av_bprint_finalize(&pbuf, NULL);
|
||||
fflush(stdout);
|
||||
@@ -2480,7 +2397,6 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
*fmt_ctx_ptr = fmt_ctx;
|
||||
if (scan_all_pmts_set)
|
||||
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
||||
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
||||
@@ -2492,16 +2408,13 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
|
||||
orig_nb_streams = fmt_ctx->nb_streams;
|
||||
|
||||
err = avformat_find_stream_info(fmt_ctx, opts);
|
||||
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
if (err < 0) {
|
||||
if ((err = avformat_find_stream_info(fmt_ctx, opts)) < 0) {
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
av_dump_format(fmt_ctx, 0, filename, 0);
|
||||
|
||||
@@ -2552,7 +2465,7 @@ static void close_input_file(AVFormatContext **ctx_ptr)
|
||||
|
||||
static int probe_file(WriterContext *wctx, const char *filename)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVFormatContext *fmt_ctx;
|
||||
int ret, i;
|
||||
int section_id;
|
||||
|
||||
@@ -2561,7 +2474,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
|
||||
ret = open_input_file(&fmt_ctx, filename);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
return ret;
|
||||
|
||||
#define CHECK_END if (ret < 0) goto end
|
||||
|
||||
@@ -2619,8 +2532,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
}
|
||||
|
||||
end:
|
||||
if (fmt_ctx)
|
||||
close_input_file(&fmt_ctx);
|
||||
close_input_file(&fmt_ctx);
|
||||
av_freep(&nb_streams_frames);
|
||||
av_freep(&nb_streams_packets);
|
||||
av_freep(&selected_streams);
|
||||
@@ -2644,6 +2556,8 @@ static void ffprobe_show_program_version(WriterContext *w)
|
||||
print_str("version", FFMPEG_VERSION);
|
||||
print_fmt("copyright", "Copyright (c) %d-%d the FFmpeg developers",
|
||||
program_birth_year, CONFIG_THIS_YEAR);
|
||||
print_str("build_date", __DATE__);
|
||||
print_str("build_time", __TIME__);
|
||||
print_str("compiler_ident", CC_IDENT);
|
||||
print_str("configuration", FFMPEG_CONFIGURATION);
|
||||
writer_print_section_footer(w);
|
||||
@@ -2831,9 +2745,6 @@ static int opt_show_format_entry(void *optctx, const char *opt, const char *arg)
|
||||
char *buf = av_asprintf("format=%s", arg);
|
||||
int ret;
|
||||
|
||||
if (!buf)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"Option '%s' is deprecated, use '-show_entries format=%s' instead\n",
|
||||
opt, arg);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user