Compare commits
229 Commits
n2.6.4
...
release/2.
Author | SHA1 | Date | |
---|---|---|---|
![]() |
49fb1f66f1 | ||
![]() |
f8b25be7f7 | ||
![]() |
633bf6246c | ||
![]() |
0fde898f16 | ||
![]() |
1fff994709 | ||
![]() |
eb0a9cd68a | ||
![]() |
f450caf057 | ||
![]() |
f6006295c0 | ||
![]() |
5279f5d068 | ||
![]() |
d40ab18c07 | ||
![]() |
80a467b452 | ||
![]() |
9e1b493916 | ||
![]() |
44184226ac | ||
![]() |
e2f0443af0 | ||
![]() |
f57a595024 | ||
![]() |
d88a85c493 | ||
![]() |
bf4fa00162 | ||
![]() |
17d9157c82 | ||
![]() |
e1afd6ed60 | ||
![]() |
7d97587b1a | ||
![]() |
86c85b6f1a | ||
![]() |
ac4126decd | ||
![]() |
db27f50e06 | ||
![]() |
8327bef1c9 | ||
![]() |
a620c463f0 | ||
![]() |
63b5cb1fb0 | ||
![]() |
7959b9a0f3 | ||
![]() |
bbfe0f7b08 | ||
![]() |
27a910a857 | ||
![]() |
ffe915b6f5 | ||
![]() |
22558d6f6e | ||
![]() |
f03888b449 | ||
![]() |
11c0531099 | ||
![]() |
0b033cd3a1 | ||
![]() |
c9a25ff5a0 | ||
![]() |
3ee4a610c2 | ||
![]() |
d071c1f0e1 | ||
![]() |
15601df419 | ||
![]() |
103cf56c62 | ||
![]() |
d4c45f9249 | ||
![]() |
841ce9a838 | ||
![]() |
8a01fb3729 | ||
![]() |
b5dbe93c8b | ||
![]() |
185e55279c | ||
![]() |
730826275f | ||
![]() |
afd7fac3f1 | ||
![]() |
3353a00d58 | ||
![]() |
b052ea0f5b | ||
![]() |
c8fb53357d | ||
![]() |
30e7dae22c | ||
![]() |
a74a0a5c0c | ||
![]() |
0f77303c57 | ||
![]() |
dbe690b572 | ||
![]() |
6eca20aaec | ||
![]() |
7ef11e8221 | ||
![]() |
942806cbe4 | ||
![]() |
557e3790ef | ||
![]() |
2b15ceec62 | ||
![]() |
c3cd7b8a29 | ||
![]() |
0621421ee2 | ||
![]() |
0e9fe8510e | ||
![]() |
1f59cfe65b | ||
![]() |
a6f808b36a | ||
![]() |
4a495766d1 | ||
![]() |
cedb96db37 | ||
![]() |
de75b4063d | ||
![]() |
bb70b6673f | ||
![]() |
a07dfcdd6d | ||
![]() |
5630d5cdc2 | ||
![]() |
87d0339d67 | ||
![]() |
fac6ae0814 | ||
![]() |
0c51b26729 | ||
![]() |
e0822b147f | ||
![]() |
1bfd23d2c9 | ||
![]() |
419bd6e303 | ||
![]() |
494d3d14db | ||
![]() |
6c63eb5909 | ||
![]() |
142896f2d0 | ||
![]() |
dca70c5931 | ||
![]() |
17ff5d3f88 | ||
![]() |
693d0d3ac5 | ||
![]() |
bb2c09310c | ||
![]() |
66261cfa77 | ||
![]() |
b89b136c00 | ||
![]() |
6ed35a6674 | ||
![]() |
d4c70c8b50 | ||
![]() |
05bc6f8ba6 | ||
![]() |
c2517fb363 | ||
![]() |
0cc15f7c83 | ||
![]() |
b6ff3acafc | ||
![]() |
8047380514 | ||
![]() |
b152305bb3 | ||
![]() |
a0605792c2 | ||
![]() |
4b8cb3fe51 | ||
![]() |
48bf926bad | ||
![]() |
ab43652c67 | ||
![]() |
48b586ca4e | ||
![]() |
4e2e997faf | ||
![]() |
19ccc06d8b | ||
![]() |
193b949f71 | ||
![]() |
63ed7e09dd | ||
![]() |
10464ca0eb | ||
![]() |
4f515913a2 | ||
![]() |
b44506c393 | ||
![]() |
9395a3a96b | ||
![]() |
25d2a4dde7 | ||
![]() |
bfee1e9072 | ||
![]() |
0db579445f | ||
![]() |
e3275571c7 | ||
![]() |
7b7d12ea04 | ||
![]() |
e26fd791ef | ||
![]() |
ad98b2891c | ||
![]() |
1ec6a3c768 | ||
![]() |
5c791b1c9c | ||
![]() |
4a03c31728 | ||
![]() |
c3c8857263 | ||
![]() |
4fbdac00e9 | ||
![]() |
0f4c149730 | ||
![]() |
d887a12145 | ||
![]() |
f2fba07494 | ||
![]() |
1ebd7d2ccf | ||
![]() |
5f7e48a113 | ||
![]() |
e2e0c5b7f6 | ||
![]() |
279003eb9e | ||
![]() |
5926bea980 | ||
![]() |
cbfd6b1fa4 | ||
![]() |
fdc8f4e5b4 | ||
![]() |
603f4ecd14 | ||
![]() |
1ac4ae2a32 | ||
![]() |
3998071039 | ||
![]() |
6fb9bfb1a3 | ||
![]() |
693faadd30 | ||
![]() |
9806028fbb | ||
![]() |
433e15bb87 | ||
![]() |
c3af801c63 | ||
![]() |
01817d508b | ||
![]() |
b5ef1eee45 | ||
![]() |
e2c5f88237 | ||
![]() |
d005e2ecce | ||
![]() |
57bdb3f3dd | ||
![]() |
5e34dded10 | ||
![]() |
45ca270ec9 | ||
![]() |
ca2ccd85d7 | ||
![]() |
437f6fb488 | ||
![]() |
f913da3e15 | ||
![]() |
ed9c6529f0 | ||
![]() |
aa40bbb492 | ||
![]() |
8e276fc96a | ||
![]() |
4a4e30a6d8 | ||
![]() |
1c733a440a | ||
![]() |
5c3bc127ca | ||
![]() |
be94d15a03 | ||
![]() |
9c57328b81 | ||
![]() |
6952f6f39b | ||
![]() |
6359be6751 | ||
![]() |
beb55b3981 | ||
![]() |
80aec733ad | ||
![]() |
77bb6b5bcc | ||
![]() |
f68395f7fc | ||
![]() |
eefb6b654d | ||
![]() |
d18d48def6 | ||
![]() |
8df77c3758 | ||
![]() |
08f56b846c | ||
![]() |
f903147f2d | ||
![]() |
9a840d5e17 | ||
![]() |
9e43d92d6a | ||
![]() |
e13e928baa | ||
![]() |
d3bfb66a66 | ||
![]() |
17a6ca7d31 | ||
![]() |
8a20224059 | ||
![]() |
29ee8b72c4 | ||
![]() |
25864cf562 | ||
![]() |
f74206cb40 | ||
![]() |
148d9cd122 | ||
![]() |
bc259185cb | ||
![]() |
3b6bde3b3d | ||
![]() |
4f187f0af1 | ||
![]() |
10c2d22ba1 | ||
![]() |
35738e5898 | ||
![]() |
59d98fc050 | ||
![]() |
60bfa9154d | ||
![]() |
9794727ccd | ||
![]() |
b88de7b31a | ||
![]() |
11420649d0 | ||
![]() |
dbf5d7e5cd | ||
![]() |
6badd558ce | ||
![]() |
a1fe3b4150 | ||
![]() |
c5129da726 | ||
![]() |
fc57959fd5 | ||
![]() |
83956309cc | ||
![]() |
ed15be7519 | ||
![]() |
6928193493 | ||
![]() |
a72b7286e6 | ||
![]() |
901e275697 | ||
![]() |
7a6b5d7a86 | ||
![]() |
fdfe94f4b1 | ||
![]() |
b63ec0cb0f | ||
![]() |
2eb72d5bdc | ||
![]() |
638c3aca64 | ||
![]() |
76d59f1b34 | ||
![]() |
aa0a8ef50e | ||
![]() |
d6173ae341 | ||
![]() |
3ed27832e7 | ||
![]() |
594b1fa961 | ||
![]() |
7a1262fca3 | ||
![]() |
6d2219e9f9 | ||
![]() |
7c2d152f56 | ||
![]() |
1f58590e1e | ||
![]() |
64bbbcd7b0 | ||
![]() |
de9d3f22f0 | ||
![]() |
ea5bb5613f | ||
![]() |
c61ac696e5 | ||
![]() |
6a250c858e | ||
![]() |
5411040802 | ||
![]() |
ab1ea597bd | ||
![]() |
ee606fd031 | ||
![]() |
2f71aeb301 | ||
![]() |
65259b4d68 | ||
![]() |
8f53d32dfb | ||
![]() |
fcc6568a10 | ||
![]() |
489d066d49 | ||
![]() |
9cb45f6ad2 | ||
![]() |
0f04e2741e | ||
![]() |
84642ec879 | ||
![]() |
bef4d9bf87 | ||
![]() |
bc4f6ae88e | ||
![]() |
2678b25099 | ||
![]() |
e322496054 | ||
![]() |
7fa72ff19c |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
*.pnm -diff -text
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -39,7 +39,6 @@
|
||||
/doc/examples/avio_reading
|
||||
/doc/examples/decoding_encoding
|
||||
/doc/examples/demuxing_decoding
|
||||
/doc/examples/extract_mvs
|
||||
/doc/examples/filter_audio
|
||||
/doc/examples/filtering_audio
|
||||
/doc/examples/filtering_video
|
||||
@@ -62,9 +61,7 @@
|
||||
/tests/audiogen
|
||||
/tests/base64
|
||||
/tests/data/
|
||||
/tests/pixfmts.mak
|
||||
/tests/rotozoom
|
||||
/tests/test_copy.ffmeta
|
||||
/tests/tiny_psnr
|
||||
/tests/tiny_ssim
|
||||
/tests/videogen
|
||||
@@ -83,7 +80,6 @@
|
||||
/tools/pktdumper
|
||||
/tools/probetest
|
||||
/tools/qt-faststart
|
||||
/tools/sidxindex
|
||||
/tools/trasher
|
||||
/tools/seek_print
|
||||
/tools/uncoded_frame
|
||||
|
333
Changelog
333
Changelog
@@ -1,317 +1,29 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.6.4:
|
||||
- imc: use correct position for flcoeffs2 calculation
|
||||
- hevc: check slice address length
|
||||
- snow: remove an obsolete av_assert2
|
||||
- webp: fix infinite loop in webp_decode_frame
|
||||
- wavpack: limit extra_bits to 32 and use get_bits_long
|
||||
- ffmpeg: only count got_output/errors in decode_error_stat
|
||||
- ffmpeg: exit_on_error if decoding a packet failed
|
||||
- pthread_frame: forward error codes when flushing
|
||||
- huffyuvdec: validate image size
|
||||
- wavpack: use get_bits_long to read up to 32 bits
|
||||
- nutdec: check maxpos in read_sm_data before returning success
|
||||
- vc1dec: use get_bits_long and limit the read bits to 32
|
||||
- mpegaudiodec: copy AVFloatDSPContext from first context to all contexts
|
||||
- avcodec/vp8: Check buffer size in vp8_decode_frame_header()
|
||||
- avcodec/vp8: Fix null pointer dereference in ff_vp8_decode_free()
|
||||
- avcodec/diracdec: Check for hpel_base allocation failure
|
||||
- avcodec/rv34: Clear pointers in ff_rv34_decode_init_thread_copy()
|
||||
- avfilter/af_aresample: Check ff_all_* for allocation failures
|
||||
- avcodec/pthread_frame: clear priv_data, avoid stale pointer in error case
|
||||
- swscale/utils: Clear pix buffers
|
||||
- avutil/fifo: Fix the case where func() returns less bytes than requested in av_fifo_generic_write()
|
||||
- ffmpeg: Fix cleanup after failed allocation of output_files
|
||||
- avformat/mov: Fix deallocation when MOVStreamContext failed to allocate
|
||||
- ffmpeg: Fix crash with ost->last_frame allocation failure
|
||||
- ffmpeg: Fix cleanup with ost = NULL
|
||||
- avcodec/pthread_frame: check avctx on deallocation
|
||||
- avcodec/sanm: Reset sizes in destroy_buffers()
|
||||
- avcodec/alac: Clear pointers in allocate_buffers()
|
||||
- bytestream2: set the reader to the end when reading more than available
|
||||
- avcodec/utils: use a minimum 32pixel width in avcodec_align_dimensions2() for H.264
|
||||
- avcodec/mpegvideo: Clear pointers in ff_mpv_common_init()
|
||||
- oggparsedirac: check return value of init_get_bits
|
||||
- wmalosslessdec: reset frame->nb_samples on packet loss
|
||||
- wmalosslessdec: avoid reading 0 bits with get_bits
|
||||
- avcodec/rawenc: Use ff_alloc_packet() instead of ff_alloc_packet2()
|
||||
- avcodec/aacsbr: Assert that bs_num_env is positive
|
||||
- avcodec/aacsbr: check that the element type matches before applying SBR
|
||||
- avcodec/h264_slice: Use w/h from the AVFrame instead of mb_w/h
|
||||
- vp9/update_prob: prevent out of bounds table read
|
||||
- avfilter/vf_transpose: Fix rounding error
|
||||
- avcodec/pngdec: Check values before updating context in decode_fctl_chunk()
|
||||
- avcodec/pngdec: Require a IHDR chunk before fctl
|
||||
- avcodec/pngdec: Only allow one IHDR chunk
|
||||
- wmavoice: limit wmavoice_decode_packet return value to packet size
|
||||
- swscale/swscale_unscaled: Fix rounding difference with RGBA output between little and big endian
|
||||
- ffmpeg: Do not use the data/size of a bitstream filter after failure
|
||||
- swscale/x86/rgb2rgb_template: fix signedness of v in shuffle_bytes_2103_{mmx,mmxext}
|
||||
- swscale/x86/rgb2rgb_template: add missing xmm clobbers
|
||||
- vda: unlock the pixel buffer base address.
|
||||
- swscale/rgb2rgb_template: Fix signedness of v in shuffle_bytes_2103_c()
|
||||
- swscale/rgb2rgb_template: Implement shuffle_bytes_0321_c and fix shuffle_bytes_2103_c on BE
|
||||
- swscale/rgb2rgb_template: Disable shuffle_bytes_2103_c on big endian
|
||||
- swr: Remember previously set int_sample_format from user
|
||||
- matroskadec: check audio sample rate
|
||||
- matroskadec: validate audio channels and bitdepth
|
||||
- avcodec/dpxenc: implement write16/32 as functions
|
||||
- postproc: fix unaligned access
|
||||
- ffmpeg: Free last_frame instead of just unref
|
||||
- avio: fix potential crashes when combining ffio_ensure_seekback + crc
|
||||
- examples/demuxing_decoding: use properties from frame instead of video_dec_ctx
|
||||
- h264: er: Copy from the previous reference only if compatible
|
||||
- sonic: set avctx->channels in sonic_decode_init
|
||||
- vp8: change mv_{min,max}.{x,y} type to int
|
||||
- vp9: change type of tile_size from unsigned to int64_t
|
||||
- arm: only enable setend on ARMv6
|
||||
- libopenjpegdec: check existence of image component data
|
||||
- mov: abort on EOF in ff_mov_read_chan
|
||||
- ffmpeg_opt: Check for localtime() failure
|
||||
- avformat: Fix bug in parse_rps for HEVC.
|
||||
- takdec: ensure chan2 is a valid channel index
|
||||
- avcodec/h264_slice: Use AVFrame diemensions for grayscale handling
|
||||
- avdevice/lavfi: do not rescale AV_NOPTS_VALUE in lavfi_read_packet()
|
||||
- libavutil/channel_layout: Correctly return layout when channel specification ends with a trailing 'c'.
|
||||
- avcodec/jpeg2000dec: Check that coords match before applying ICT
|
||||
- avformat/ffmdec: Check ffio_set_buf_size() return value
|
||||
- avcodec/adpcm: Check for overreads
|
||||
- avcodec/alsdec: Check for overread
|
||||
- avcodec/atrac3plusdec: consume only as many bytes as available
|
||||
- libavutil/softfloat: Fix av_normalize1_sf bias.
|
||||
- swresample/swresample: Cleanup on init failure.
|
||||
- Revert "avformat/rtpenc: check av_packet_get_side_data() return, fix null ptr dereference"
|
||||
- avformat/mxfenc: Accept MXF D-10 with 49.999840 Mbit/sec
|
||||
- swresample/dither: check memory allocation
|
||||
- libopenjpegenc: add NULL check for img before accessing it
|
||||
- swresample: Check the return value of resampler->init()
|
||||
- h264: Make sure reinit failures mark the context as not initialized
|
||||
- avfilter/x86/vf_fspp: Fix invalid combination of opcode and operands
|
||||
- ffmpeg_opt: Set the video VBV parameters only for the video stream from -target
|
||||
- avcodec/bitstream: Assert that there is enough space left in avpriv_copy_bits()
|
||||
- avcodec/put_bits: Assert that there is enough space left in skip_put_bytes()
|
||||
- avcodec/mpegvideo_enc: Update the buffer size as more slices are merged
|
||||
- avcodec/put_bits: Update size_in_bits in set_put_bits_buffer_size()
|
||||
- avformat/wavdec: Increase dts packet threshold to fix more misdetections
|
||||
- avformat/wavdec: Increase probe_packets limit
|
||||
- nutdec: abort if EOF is reached in decode_info_header/read_sm_data
|
||||
- nutdec: stop skipping bytes at EOF
|
||||
- nutdec: fix infinite resync loops
|
||||
- avformat/nutdec: Check X in 2nd branch of index reading
|
||||
- avformat/nutdec: Fix recovery when immedeately after seeking a failure happens
|
||||
- avformat/nutdec: Return error on EOF from get_str()
|
||||
- rtsp: Make sure we don't write too many transport entries into a fixed-size array
|
||||
- rtpenc_jpeg: handle case of picture dimensions not dividing by 8
|
||||
- avcodec/golomb: get_ur_golomb_jpegls: Fix reading huge k values
|
||||
- avformat/swfdec: Do not error out on pixel format changes
|
||||
- avformat/mov: Mark avio context of decompressed atoms as seekable
|
||||
- avcodec/mjpegenc_common: Use ff_mpv_reallocate_putbitbuffer()
|
||||
- avcodec/mpegvideo: Factor ff_mpv_reallocate_putbitbuffer() out
|
||||
- avfilter/x86/vf_hqdn3d: Fix register types
|
||||
- avcodec/exr: fix crash caused by merge
|
||||
- avcodec/x86/h264_weight: handle weight1=128
|
||||
- avcodec/dvbsubdec: Fix buf_size check in dvbsub_parse_display_definition_segment()
|
||||
- avcodec/hevc_ps: Only discard overread VPS if a previous is available
|
||||
- avcodec/flacenc: Fix Invalid Rice order
|
||||
- lavd/xcbgrab: fix comparison with screen size.
|
||||
version 2.3.3:
|
||||
- h264: fix grayscale only decoding with weighted prediction
|
||||
- mjpegdec: support AV_PIX_FMT_YUV420P16 with upscale_h
|
||||
- proresenc_ks: fix buffer overflow
|
||||
- matroskadec: fix crash
|
||||
|
||||
version 2.6.3:
|
||||
- avcodec/libtheoraenc: Check for av_malloc failure
|
||||
- ffmpeg_opt: Fix -timestamp parsing
|
||||
- hevc: make avcodec_decode_video2() fail if get_format() fails
|
||||
- avcodec/cavsdec: Use ff_set_dimensions()
|
||||
- swr: fix alignment issue caused by 8ch sse functions
|
||||
- avcodec/mjpegdec: fix len computation in ff_mjpeg_decode_dqt()
|
||||
- avcodec/jpeg2000dec: fix boolean operator
|
||||
- avcodec/hevc_ps: Explicitly check num_tile_* for negative values
|
||||
- avformat/matroskadec: Cleanup error handling for bz2 & zlib
|
||||
- avformat/nutdec: Fix use of uinitialized value
|
||||
- tools/graph2dot: use larger data types than int for array/string sizes
|
||||
- avformat/matroskaenc: Check ff_vorbiscomment_length in put_flac_codecpriv()
|
||||
- avcodec/mpeg12dec: use the correct dimensions for checking SAR
|
||||
- xcbgrab: Validate the capture area
|
||||
- xcbgrab: Do not assume the non shm image data is always available
|
||||
- avfilter/lavfutils: disable frame threads when decoding a single image
|
||||
- avformat/mov: Do not read ACLR into extradata for H.264
|
||||
- ffmpeg: remove incorrect network deinit
|
||||
- OpenCL: Avoid potential buffer overflow in cmdutils_opencl.c
|
||||
- libvpxenc: only set noise reduction w/vp8
|
||||
- vp9: remove another optimization branch in iadst16 which causes overflows.
|
||||
- lavf: Reset global flag on deinit
|
||||
- network: Do not leave context locked on error
|
||||
- vp9: remove one optimization branch in iadst16 which causes overflows.
|
||||
- fate: Include branch information in the payload header
|
||||
- avformat/utils: Ensure that AVFMT_FLAG_CUSTOM_IO is set before use
|
||||
- avformat/img2dec: do not rewind custom io buffers
|
||||
- avcodec/alsdec: Use av_mallocz_array() for chan_data to ensure the arrays never contain random data
|
||||
- avcodec/atrac3plusdsp: fix on stack alignment
|
||||
- swresample/swresample-test: Randomly wipe out channel counts
|
||||
- swresample: Check channel layouts and channels against each other and print human readable error messages
|
||||
- swresample: Allow reinitialization without ever setting channel layouts (cherry picked from commit 80a28c7509a11114e1aea5b208d56c6646d69c07)
|
||||
- swresample: Allow reinitialization without ever setting channel counts
|
||||
- dashenc: replace attribute id with contentType for the AdaptationSet element
|
||||
- avformat/matroskaenc: Use avoid_negative_ts_use_pts if no stream writes dts
|
||||
- avformat/mux: Add avoid_negative_ts_use_pts
|
||||
- tests/fate-run: do not attempt to parse tiny_psnrs output if it failed
|
||||
- cafdec: free extradata before allocating it
|
||||
- imgutils: initialize palette padding bytes in av_image_alloc
|
||||
- aacdec: don't return frames without data
|
||||
- id3v2: catch avio_read errors in check_tag
|
||||
- avi: Validate sample_size
|
||||
- aacsbr: break infinite loop in sbr_hf_calc_npatches
|
||||
- diracdec: avoid overflow of bytes*8 in decode_lowdelay
|
||||
- diracdec: prevent overflow in data_unit_size check
|
||||
- avformat/matroskadec: Use tracks[k]->stream instead of s->streams[k]
|
||||
- matroskadec: use uint64_t instead of int for index_scale
|
||||
- pngdec: don't use AV_PIX_FMT_MONOBLACK for apng
|
||||
- pngdec: return correct error code from decode_frame_common
|
||||
- nutdec: fix illegal count check in decode_main_header
|
||||
- nutdec: fix memleaks on error in nut_read_header
|
||||
- apedec: prevent out of array writes in decode_array_0000
|
||||
- apedec: set s->samples only when init_frame_decoder succeeded
|
||||
- swscale/ppc/swscale_altivec.c: POWER LE support in yuv2planeX_8() delete macro GET_VF() it was wrong
|
||||
- alac: reject rice_limit 0 if compression is used
|
||||
- alsdec: only adapt order for positive max_order
|
||||
- bink: check vst->index_entries before using it
|
||||
- mpeg4videodec: only allow a positive length
|
||||
- aacpsy: correct calculation of minath in psy_3gpp_init
|
||||
- alsdec: validate time diff index
|
||||
- alsdec: ensure channel reordering is reversible
|
||||
- ac3: validate end in ff_ac3_bit_alloc_calc_mask
|
||||
- aacpsy: avoid psy_band->threshold becoming NaN
|
||||
- aasc: return correct buffer size from aasc_decode_frame
|
||||
- matroskadec: export cover art correctly
|
||||
- mxfenc: don't try to write footer without header
|
||||
- mxfenc: fix memleaks in mxf_write_footer
|
||||
- rtpenc_mpegts: Set chain->rtp_ctx only after avformat_write_header succeeded
|
||||
- rtpenc_mpegts: Free the right ->pb in the error path in the init function
|
||||
|
||||
version 2.6.2:
|
||||
- avcodec/h264: Do not fail with randomly truncated VUIs
|
||||
- avcodec/h264_ps: Move truncation check from VUI to SPS
|
||||
- avcodec/h264: Be more tolerant to changing pps id between slices
|
||||
- avcodec/aacdec: Fix storing state before PCE decode
|
||||
- avcodec/h264: reset the counts in the correct context
|
||||
- avcodec/h264_slice: Do not reset mb_aff_frame per slice
|
||||
- avcodec/h264: finish previous slices before switching to single thread mode
|
||||
- avcodec/h264: Fix race between slices where one overwrites data from the next
|
||||
- avformat/utils: avoid discarded streams in av_find_default_stream_index()
|
||||
- ffmpeg: Fix extradata allocation
|
||||
- avcodec/h264_refs: Do not set reference to things which do not exist
|
||||
- avcodec/h264: Fail for invalid mixed IDR / non IDR frames in slice threading mode
|
||||
- Revert "avcodec/exr: fix memset first arg in reverse_lut()"
|
||||
- h264: avoid unnecessary calls to get_format
|
||||
- avutil/pca: Check for av_malloc* failures
|
||||
- avutil/cpu: add missing check for mmxext to av_force_cpu_flags
|
||||
- lavc/dnxhd: Fix pix_fmt change.
|
||||
- avformat/http: replace cookies with updated values instead of appending forever
|
||||
- avformat/hls: store cookies returned in HLS key response
|
||||
- avformat/rmdec: fix support for 0 sized mdpr
|
||||
- avcodec/msrledec: restructure msrle_decode_pal4() based on the line number instead of the pixel pointer
|
||||
- avcodec/hevc_ps: Check cropping parameters more correctly
|
||||
- hevc: make the crop sizes unsigned
|
||||
- avcodec/dnxhddec: Reset is_444 if format is not 444
|
||||
- avcodec/dnxhddec: Check that the frame is interlaced before using cur_field
|
||||
- mips/float_dsp: fix vector_fmul_window_mips on mips64
|
||||
- doc: Remove non-existing decklink options.
|
||||
|
||||
version 2.6.1:
|
||||
- avformat/mov: Disallow ".." in dref unless use_absolute_path is set
|
||||
- avfilter/palettegen: make sure at least one frame was sent to the filter
|
||||
- avformat/mov: Check for string truncation in mov_open_dref()
|
||||
- ac3_fixed: fix out-of-bound read
|
||||
- mips/asmdefs: use _ABI64 as defined by gcc
|
||||
- hevc: delay ff_thread_finish_setup for hwaccel
|
||||
- avcodec/012v: Check dimensions more completely
|
||||
- asfenc: fix leaking asf->index_ptr on error
|
||||
- roqvideoenc: set enc->avctx in roq_encode_init
|
||||
- avcodec/options_table: remove extradata_size from the AVOptions table
|
||||
- ffmdec: limit the backward seek to the last resync position
|
||||
- Add dependencies to configure file for vf_fftfilt
|
||||
- ffmdec: make sure the time base is valid
|
||||
- ffmdec: fix infinite loop at EOF
|
||||
- ffmdec: initialize f_cprv, f_stvi and f_stau
|
||||
- arm: Suppress tags about used cpu arch and extensions
|
||||
- mxfdec: Fix the error handling for when strftime fails
|
||||
- avcodec/opusdec: Fix delayed sample value
|
||||
- avcodec/opusdec: Clear out pointers per packet
|
||||
- avcodec/utils: Align YUV411 by as much as the other YUV variants
|
||||
- lavc/hevcdsp: Fix compilation for arm with --disable-neon.
|
||||
- vp9: fix segmentation map retention with threading enabled.
|
||||
- Revert "avutil/opencl: is_compiled flag not being cleared in av_opencl_uninit"
|
||||
|
||||
version 2.6:
|
||||
- nvenc encoder
|
||||
- 10bit spp filter
|
||||
- colorlevels filter
|
||||
- RIFX format for *.wav files
|
||||
- RTP/mpegts muxer
|
||||
- non continuous cache protocol support
|
||||
- tblend filter
|
||||
- cropdetect support for non 8bpp, absolute (if limit >= 1) and relative (if limit < 1.0) threshold
|
||||
- Camellia symmetric block cipher
|
||||
- OpenH264 encoder wrapper
|
||||
- VOC seeking support
|
||||
- Closed caption Decoder
|
||||
- fspp, uspp, pp7 MPlayer postprocessing filters ported to native filters
|
||||
- showpalette filter
|
||||
- Twofish symmetric block cipher
|
||||
- Support DNx100 (960x720@8)
|
||||
- eq2 filter ported from libmpcodecs as eq filter
|
||||
- removed libmpcodecs
|
||||
- Changed default DNxHD colour range in QuickTime .mov derivatives to mpeg range
|
||||
- ported softpulldown filter from libmpcodecs as repeatfields filter
|
||||
- dcshift filter
|
||||
- RTP depacketizer for loss tolerant payload format for MP3 audio (RFC 5219)
|
||||
- RTP depacketizer for AC3 payload format (RFC 4184)
|
||||
- palettegen and paletteuse filters
|
||||
- VP9 RTP payload format (draft 0) experimental depacketizer
|
||||
- RTP depacketizer for DV (RFC 6469)
|
||||
- DXVA2-accelerated HEVC decoding
|
||||
- AAC ELD 480 decoding
|
||||
- Intel QSV-accelerated H.264 decoding
|
||||
- DSS SP decoder and DSS demuxer
|
||||
- Fix stsd atom corruption in DNxHD QuickTimes
|
||||
- Canopus HQX decoder
|
||||
- RTP depacketization of T.140 text (RFC 4103)
|
||||
- Port MIPS optimizations to 64-bit
|
||||
|
||||
|
||||
version 2.5:
|
||||
- HEVC/H.265 RTP payload format (draft v6) packetizer
|
||||
- SUP/PGS subtitle demuxer
|
||||
- ffprobe -show_pixel_formats option
|
||||
- CAST128 symmetric block cipher, ECB mode
|
||||
- STL subtitle demuxer and decoder
|
||||
- libutvideo YUV 4:2:2 10bit support
|
||||
- XCB-based screen-grabber
|
||||
- UDP-Lite support (RFC 3828)
|
||||
- xBR scaling filter
|
||||
- AVFoundation screen capturing support
|
||||
- ffserver supports codec private options
|
||||
- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
|
||||
- WebP muxer with animated WebP support
|
||||
- zygoaudio decoding support
|
||||
- APNG demuxer
|
||||
- postproc visualization support
|
||||
|
||||
|
||||
version 2.4:
|
||||
- Icecast protocol
|
||||
- ported lenscorrection filter from frei0r filter
|
||||
- large optimizations in dctdnoiz to make it usable
|
||||
- ICY metadata are now requested by default with the HTTP protocol
|
||||
- support for using metadata in stream specifiers in fftools
|
||||
- LZMA compression support in TIFF decoder
|
||||
- H.261 RTP payload format (RFC 4587) depacketizer and experimental packetizer
|
||||
- HEVC/H.265 RTP payload format (draft v6) depacketizer
|
||||
- added codecview filter to visualize information exported by some codecs
|
||||
- Matroska 3D support thorugh side data
|
||||
- HTML generation using texi2html is deprecated in favor of makeinfo/texi2any
|
||||
- silenceremove filter
|
||||
version 2.3.2:
|
||||
- snow: fix null pointer dereference
|
||||
- huffyucdec: fix overread
|
||||
- vc1dec: fix crash
|
||||
- iff: fix out of array access
|
||||
- matroskaenc: fix assertion failure
|
||||
- cdgraphics: fix infinite loop
|
||||
- dvdsub_parser: fix infinite loop
|
||||
- mpeg12dec: support decoding some broken files
|
||||
- v4l2enc: fix crash
|
||||
- h264_parser: fix handling huge resolutions
|
||||
- h264_mp4toannexb_bsf: multiple bugfixes
|
||||
|
||||
version 2.3.1:
|
||||
- public AVDCT API/ABI for DCT functions
|
||||
- g2meet: allow size changes within original sizes
|
||||
- dv: improved error resilience, fixing Ticket2340 and Ticket2341
|
||||
|
||||
version 2.3:
|
||||
- AC3 fixed-point decoding
|
||||
@@ -336,7 +48,7 @@ version 2.3:
|
||||
- libbs2b-based stereo-to-binaural audio filter
|
||||
- libx264 reference frames count limiting depending on level
|
||||
- native Opus decoder
|
||||
- display matrix export and rotation API
|
||||
- display matrix export and rotation api
|
||||
- WebVTT encoder
|
||||
- showcqt multimedia filter
|
||||
- zoompan filter
|
||||
@@ -378,7 +90,6 @@ version 2.2:
|
||||
- libx265 encoder
|
||||
- dejudder filter
|
||||
- Autodetect VDA like all other hardware accelerations
|
||||
- aliases and defaults for Ogg subtypes (opus, spx)
|
||||
|
||||
|
||||
version 2.1:
|
||||
|
107
LICENSE.md
107
LICENSE.md
@@ -1,73 +1,67 @@
|
||||
#FFmpeg:
|
||||
|
||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||
or later (LGPL v2.1+). Read the file `COPYING.LGPLv2.1` for details. Some other
|
||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||
files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to
|
||||
FFmpeg.
|
||||
|
||||
Some optional parts of FFmpeg are licensed under the GNU General Public License
|
||||
version 2 or later (GPL v2+). See the file `COPYING.GPLv2` for details. None of
|
||||
these parts are used by default, you have to explicitly pass `--enable-gpl` to
|
||||
version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of
|
||||
these parts are used by default, you have to explicitly pass --enable-gpl to
|
||||
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
|
||||
Specifically, the GPL parts of FFmpeg are:
|
||||
|
||||
- libpostproc
|
||||
- libmpcodecs
|
||||
- optional x86 optimizations in the files
|
||||
- `libavcodec/x86/flac_dsp_gpl.asm`
|
||||
- `libavcodec/x86/idct_mmx.c`
|
||||
libavcodec/x86/idct_mmx.c
|
||||
- libutvideo encoding/decoding wrappers in
|
||||
`libavcodec/libutvideo*.cpp`
|
||||
- the X11 grabber in `libavdevice/x11grab.c`
|
||||
libavcodec/libutvideo*.cpp
|
||||
- the X11 grabber in libavdevice/x11grab.c
|
||||
- the swresample test app in
|
||||
`libswresample/swresample-test.c`
|
||||
- the `texi2pod.pl` tool
|
||||
libswresample/swresample-test.c
|
||||
- the texi2pod.pl tool
|
||||
- the following filters in libavfilter:
|
||||
- `f_ebur128.c`
|
||||
- `vf_blackframe.c`
|
||||
- `vf_boxblur.c`
|
||||
- `vf_colormatrix.c`
|
||||
- `vf_cropdetect.c`
|
||||
- `vf_delogo.c`
|
||||
- `vf_eq.c`
|
||||
- `vf_fspp.c`
|
||||
- `vf_geq.c`
|
||||
- `vf_histeq.c`
|
||||
- `vf_hqdn3d.c`
|
||||
- `vf_interlace.c`
|
||||
- `vf_kerndeint.c`
|
||||
- `vf_mcdeint.c`
|
||||
- `vf_mpdecimate.c`
|
||||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
- `vf_pp.c`
|
||||
- `vf_pp7.c`
|
||||
- `vf_pullup.c`
|
||||
- `vf_sab.c`
|
||||
- `vf_smartblur.c`
|
||||
- `vf_repeatfields.c`
|
||||
- `vf_spp.c`
|
||||
- `vf_stereo3d.c`
|
||||
- `vf_super2xsai.c`
|
||||
- `vf_tinterlace.c`
|
||||
- `vf_uspp.c`
|
||||
- `vsrc_mptestsrc.c`
|
||||
- f_ebur128.c
|
||||
- vf_blackframe.c
|
||||
- vf_boxblur.c
|
||||
- vf_colormatrix.c
|
||||
- vf_cropdetect.c
|
||||
- vf_decimate.c
|
||||
- vf_delogo.c
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mcdeint.c
|
||||
- vf_mp.c
|
||||
- vf_owdenoise.c
|
||||
- vf_perspective.c
|
||||
- vf_phase.c
|
||||
- vf_pp.c
|
||||
- vf_pullup.c
|
||||
- vf_sab.c
|
||||
- vf_smartblur.c
|
||||
- vf_spp.c
|
||||
- vf_stereo3d.c
|
||||
- vf_super2xsai.c
|
||||
- vf_tinterlace.c
|
||||
- vsrc_mptestsrc.c
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
the configure parameter `--enable-version3` will activate this licensing option
|
||||
for you. Read the file `COPYING.LGPLv3` or, if you have enabled GPL parts,
|
||||
`COPYING.GPLv3` to learn the exact legal terms that apply in this case.
|
||||
the configure parameter --enable-version3 will activate this licensing option
|
||||
for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
|
||||
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
* The files `libavcodec/jfdctfst.c`, `libavcodec/jfdctint_template.c` and
|
||||
`libavcodec/jrevdct.c` are taken from libjpeg, see the top of the files for
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
|
||||
libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
|
||||
licensing details. Specifically note that you must credit the IJG in the
|
||||
documentation accompanying your program if you only distribute executables.
|
||||
You must also indicate any changes including additions and deletions to
|
||||
those three files in the documentation.
|
||||
* `tests/reference.pnm` is under the expat license.
|
||||
|
||||
|
||||
external libraries
|
||||
@@ -80,22 +74,21 @@ compatible libraries
|
||||
--------------------
|
||||
|
||||
The following libraries are under GPL:
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libutvideo
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxvid
|
||||
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libutvideo
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxvid
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing `--enable-gpl` to configure.
|
||||
passing --enable-gpl to configure.
|
||||
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing `--enable-version3` to configure.
|
||||
license version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
incompatible libraries
|
||||
----------------------
|
||||
@@ -103,7 +96,7 @@ incompatible libraries
|
||||
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
|
||||
are incompatible with the GPLv2 and v3. We do not know for certain if their
|
||||
licenses are compatible with the LGPL.
|
||||
If you wish to enable these libraries, pass `--enable-nonfree` to configure.
|
||||
If you wish to enable these libraries, pass --enable-nonfree to configure.
|
||||
But note that if you enable any of these libraries the resulting binary will
|
||||
be under a complex license mix that is more restrictive than the LGPL and that
|
||||
may result in additional obligations. It is possible that these
|
||||
|
27
MAINTAINERS
27
MAINTAINERS
@@ -14,7 +14,6 @@ patches and related discussions.
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
Michael Niedermayer
|
||||
final design decisions
|
||||
|
||||
|
||||
@@ -54,7 +53,7 @@ release management Michael Niedermayer
|
||||
Communication
|
||||
=============
|
||||
|
||||
website Deby Barbara Lepage
|
||||
website Robert Swain
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
@@ -156,7 +155,6 @@ Codecs:
|
||||
celp_filters.* Vitor Sessak
|
||||
cinepak.c Roberto Togni
|
||||
cinepakenc.c Rl / Aetey G.T. AB
|
||||
ccaption_dec.c Anshul Maheshwari
|
||||
cljr Alex Beregszaszi
|
||||
cllc.c Derek Buitenhuis
|
||||
cook.c, cookdata.h Benjamin Larsson
|
||||
@@ -166,7 +164,6 @@ Codecs:
|
||||
dca.c Kostya Shishkov, Benjamin Larsson
|
||||
dnxhd* Baptiste Coudurier
|
||||
dpcm.c Mike Melanson
|
||||
dss_sp.c Oleksij Rempel, Michael Niedermayer
|
||||
dv.c Roman Shaposhnik
|
||||
dvbsubdec.c Anshul Maheshwari
|
||||
dxa.c Kostya Shishkov
|
||||
@@ -228,7 +225,6 @@ Codecs:
|
||||
msvideo1.c Mike Melanson
|
||||
nellymoserdec.c Benjamin Larsson
|
||||
nuv.c Reimar Doeffinger
|
||||
nvenc.c Timo Rothenpieler
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
pgssubdec.c Reimar Doeffinger
|
||||
@@ -312,7 +308,6 @@ libavdevice
|
||||
|
||||
|
||||
avfoundation.m Thilo Borgmann
|
||||
decklink* Deti Fliegl
|
||||
dshow.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
fbdev_enc.c Lukasz Marek
|
||||
gdigrab.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
@@ -323,7 +318,7 @@ libavdevice
|
||||
pulse_audio_enc.c Lukasz Marek
|
||||
qtkit.m Thilo Borgmann
|
||||
sdl Stefano Sabatini
|
||||
v4l2.c Giorgio Vazzana
|
||||
v4l2.c Luca Abeni
|
||||
vfwcap.c Ramiro Polla
|
||||
xv.c Lukasz Marek
|
||||
|
||||
@@ -347,7 +342,6 @@ Filters:
|
||||
af_compand.c Paul B Mahol
|
||||
af_ladspa.c Paul B Mahol
|
||||
af_pan.c Nicolas George
|
||||
af_silenceremove.c Paul B Mahol
|
||||
avf_avectorscope.c Paul B Mahol
|
||||
avf_showcqt.c Muhammad Faiz
|
||||
vf_blend.c Paul B Mahol
|
||||
@@ -358,9 +352,7 @@ Filters:
|
||||
vf_extractplanes.c Paul B Mahol
|
||||
vf_histogram.c Paul B Mahol
|
||||
vf_hqx.c Clément Bœsch
|
||||
vf_idet.c Pascal Massimino
|
||||
vf_il.c Paul B Mahol
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_psnr.c Paul B Mahol
|
||||
vf_scale.c Michael Niedermayer
|
||||
@@ -389,7 +381,6 @@ Muxers/Demuxers:
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
ape.c Kostya Shishkov
|
||||
apngdec.c Benoit Fouet
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
@@ -402,7 +393,6 @@ Muxers/Demuxers:
|
||||
cdxl.c Paul B Mahol
|
||||
crc.c Michael Niedermayer
|
||||
daud.c Reimar Doeffinger
|
||||
dss.c Oleksij Rempel, Michael Niedermayer
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
dxa.c Kostya Shishkov
|
||||
@@ -467,19 +457,12 @@ Muxers/Demuxers:
|
||||
rmdec.c, rmenc.c Ronald S. Bultje, Kostya Shishkov
|
||||
rtmp* Kostya Shishkov
|
||||
rtp.c, rtpenc.c Martin Storsjo
|
||||
rtpdec_ac3.* Gilles Chanteperdrix
|
||||
rtpdec_dv.* Thomas Volkert
|
||||
rtpdec_h261.*, rtpenc_h261.* Thomas Volkert
|
||||
rtpdec_hevc.*, rtpenc_hevc.* Thomas Volkert
|
||||
rtpdec_mpa_robust.* Gilles Chanteperdrix
|
||||
rtpdec_asf.* Ronald S. Bultje
|
||||
rtpdec_vp9.c Thomas Volkert
|
||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||
rtsp.c Luca Barbato
|
||||
sbgdec.c Nicolas George
|
||||
sdp.c Martin Storsjo
|
||||
segafilm.c Mike Melanson
|
||||
segment.c Stefano Sabatini
|
||||
siff.c Kostya Shishkov
|
||||
smacker.c Kostya Shishkov
|
||||
smjpeg* Paul B Mahol
|
||||
@@ -506,7 +489,6 @@ Protocols:
|
||||
libssh.c Lukasz Marek
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
icecast.c Marvin Scholz
|
||||
|
||||
|
||||
libswresample
|
||||
@@ -545,10 +527,9 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
2.6 Michael Niedermayer
|
||||
2.5 Michael Niedermayer
|
||||
2.4 Michael Niedermayer
|
||||
2.3 Michael Niedermayer
|
||||
2.2 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
8
Makefile
8
Makefile
@@ -32,7 +32,6 @@ OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
|
||||
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
|
||||
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
|
||||
OBJS-ffserver += ffserver_config.o
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
@@ -64,7 +63,7 @@ FF_DEP_LIBS := $(DEP_LIBS)
|
||||
all: $(AVPROGS)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS)
|
||||
$(LD) $(LDFLAGS) $(LD_O) $^ $(ELIBS)
|
||||
|
||||
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
@@ -80,7 +79,7 @@ SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||
OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
@@ -93,7 +92,6 @@ $(foreach V,$(SUBDIR_VARS),$(eval $(call RESET,$(V))))
|
||||
SUBDIR := $(1)/
|
||||
include $(SRC_PATH)/$(1)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(ARCH)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(INTRINSICS)/Makefile
|
||||
include $(SRC_PATH)/library.mak
|
||||
endef
|
||||
|
||||
@@ -119,7 +117,7 @@ $(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(STRIP) $@
|
||||
|
||||
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
|
||||
OBJDIRS += tools
|
||||
|
||||
|
43
README.md
43
README.md
@@ -1,42 +1,19 @@
|
||||
FFmpeg README
|
||||
=============
|
||||
|
||||
FFmpeg is a collection of libraries and tools to process multimedia content
|
||||
such as audio, video, subtitles and related metadata.
|
||||
1) Documentation
|
||||
----------------
|
||||
|
||||
## Libraries
|
||||
* Read the documentation in the doc/ directory in git.
|
||||
|
||||
* `libavcodec` provides implementation of a wider range of codecs.
|
||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
||||
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
|
||||
* `libavdevice` provides an abstraction to access capture and playback devices.
|
||||
* `libswresample` implements audio mixing and resampling routines.
|
||||
* `libswscale` implements color conversion and scaling routines.
|
||||
You can also view it online at http://ffmpeg.org/documentation.html
|
||||
|
||||
## Tools
|
||||
2) Licensing
|
||||
------------
|
||||
|
||||
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
|
||||
manipulate, convert and stream multimedia content.
|
||||
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
|
||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analysis tool to inspect
|
||||
multimedia content.
|
||||
* [ffserver](http://ffmpeg.org/ffserver.html) is a multimedia streaming server
|
||||
for live broadcasts.
|
||||
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
|
||||
* See the LICENSE file.
|
||||
|
||||
## Documentation
|
||||
3) Build and Install
|
||||
--------------------
|
||||
|
||||
The offline documentation is available in the **doc/** directory.
|
||||
|
||||
The online documentation is available in the main [website](http://ffmpeg.org)
|
||||
and in the [wiki](http://trac.ffmpeg.org).
|
||||
|
||||
### Examples
|
||||
|
||||
Coding examples are available in the **doc/examples** directory.
|
||||
|
||||
## License
|
||||
|
||||
FFmpeg codebase is mainly LGPL-licensed with optional components licensed under
|
||||
GPL. Please refer to the LICENSE file for detailed information.
|
||||
* See the INSTALL file.
|
||||
|
214
RELEASE_NOTES
214
RELEASE_NOTES
@@ -1,65 +1,177 @@
|
||||
┌───────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.3 "Mandelbrot" │
|
||||
└───────────────────────────────────────────┘
|
||||
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.6 "Grothendieck" │
|
||||
└─────────────────────────────────────────────┘
|
||||
The FFmpeg Project proudly presents FFmpeg 2.3 "Mandelbrot", a major
|
||||
release with all the great features committed during the three-month period
|
||||
since the release of FFmpeg 2.2.
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 2.6 "Grothendieck", about 3
|
||||
months after the release of FFmpeg 2.5.
|
||||
In this release, there are lots of internal overhauls that make FFmpeg a
|
||||
more accessible project for new developers. Many important new
|
||||
optimizations and features like QTKit and AVFoundation input devices are
|
||||
committed. Contributions done by Libav such as a new native Opus decoder
|
||||
are also merged.
|
||||
|
||||
A lot of important work got in this time, so let's start talking about what
|
||||
we like to brag the most about: features.
|
||||
Because of the increasing difficulty to maintain and lack of maintainers,
|
||||
we are very sorry to say that we have removed all Blackfin and SPARC
|
||||
architecture assembly optimizations with the cleanups done. If you are
|
||||
interested in maintaining optimization for these two architecture, feel
|
||||
free to contact us and we will restore the code!
|
||||
|
||||
A lot of people will probably be happy to hear that we now have support for
|
||||
NVENC — the Nvidia Video Encoder interface for H.264 encoding — thanks to
|
||||
Timo Rothenpieler, with some little help from NVIDIA and Philip Langdale.
|
||||
Oh, and since this release, this modern-looking release note is provided in
|
||||
addition to the old-style Changelog file, to make it easier for you to
|
||||
focus on the most important features in this release.
|
||||
|
||||
People in the broadcasting industry might also be interested in the first
|
||||
steps of closed captions support with the introduction of a decoder by
|
||||
Anshul Maheswhwari.
|
||||
Enjoy!
|
||||
|
||||
Regarding filters love, we improved and added many. We could talk about the
|
||||
10-bit support in spp, but maybe it's more important to mention the addition
|
||||
of colorlevels (yet another color handling filter), tblend (allowing you
|
||||
to for example run a diff between successive frames of a video stream), or
|
||||
the dcshift audio filter.
|
||||
┌────────────────────────────┐
|
||||
│ * API Information │
|
||||
└────────────────────────────┘
|
||||
|
||||
There are also two other important filters landing in libavfilter: palettegen
|
||||
and paletteuse. Both submitted by the Stupeflix company. These filters will
|
||||
be very useful in case you are looking for creating high quality GIFs, a
|
||||
format that still bravely fights annihilation in 2015.
|
||||
FFmpeg 2.3 is completely source-compatible to the FFmpeg 2.2 series. There
|
||||
are however some API deprecations that you need to take care of. Use `git
|
||||
diff n2.2 n2.3 doc/APIchanges` to show the list of added and deprecated
|
||||
APIs. FFmpeg 2.3 includes the following library versions:
|
||||
|
||||
There are many other new features, but let's follow-up on one big cleanup
|
||||
achievement: the libmpcodecs (MPlayer filters) wrapper is finally dead. The
|
||||
last remaining filters (softpulldown/repeatfields, eq*, and various
|
||||
postprocessing filters) were ported by Arwa Arif (OPW student) and Paul B
|
||||
Mahol.
|
||||
• libavutil 52.92.100
|
||||
• libavcodec 55.69.100
|
||||
• libavformat 55.48.100
|
||||
• libavdevice 55.13.102
|
||||
• libavfilter 4.11.100
|
||||
• libswscale 2. 6.100
|
||||
• libswresample 0.19.100
|
||||
• libpostproc 52. 3.100
|
||||
|
||||
Concerning API changes, there are not many things to mention. Though, the
|
||||
introduction of device inputs and outputs listing by Lukasz Marek is a
|
||||
notable addition (try ffmpeg -sources or ffmpeg -sinks for an example of
|
||||
the usage). As usual, see doc/APIchanges for more information.
|
||||
Please refer to the doc/APIChanges file for more information.
|
||||
|
||||
Now let's talk about optimizations. Ronald S. Bultje made the VP9 decoder
|
||||
usable on x86 32-bit systems and pre-ssse3 CPUs like Phenom (even dual core
|
||||
Athlons can play 1080p 30fps VP9 content now), so we now secretly hope for
|
||||
Google and Mozilla to use ffvp9 instead of libvpx. But VP9 is not the
|
||||
center of attention anymore, and HEVC/H.265 is also getting many
|
||||
improvements, which include C and x86 ASM optimizations, mainly from James
|
||||
Almer, Christophe Gisquet and Pierre-Edouard Lepere.
|
||||
┌────────────────────────────┐
|
||||
│ New Optimization │
|
||||
└────────────────────────────┘
|
||||
|
||||
Even though we had many x86 contributions, it is not the only architecture
|
||||
getting some love, with Seppo Tomperi adding ARM NEON optimizations to the
|
||||
HEVC stack, and James Cowgill adding MIPS64 assembly for all kind of audio
|
||||
processing code in libavcodec.
|
||||
We are excited to announce that we have committed new x86 assembly
|
||||
optimization for HEVC, and FFmpeg's audio resampler libswresample. ARM
|
||||
users will get a boost in MLP/TrueHD decoding thanks to new optimization.
|
||||
Decoding Huffyuv also got a major boost from optimization on the C code.
|
||||
|
||||
And finally, Michael Niedermayer is still fixing many bugs, dealing with
|
||||
most of the boring work such as making releases, applying tons of
|
||||
contributors patches, and daily merging the changes from the Libav project.
|
||||
Of special interest for Microsoft Visual Studio users, we have also
|
||||
converted some preexisting x86 assembly to NASM/Yasm format compatible
|
||||
with MSVC setup, especially in the area of audio resampling.
|
||||
|
||||
A more complete Changelog is available at the root of the project, and the
|
||||
complete Git history on http://source.ffmpeg.org.
|
||||
Another major feature in this release is the introduction of AArch64
|
||||
(ARMv8) assembly optimization. AArch64 is another name for the first
|
||||
64-bit ARM architecture, used by Apple A7 SoC inside iPhone 5S. Some
|
||||
32-bit ARM assembly has already been ported to AArch64, but more work is
|
||||
underway.
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
||||
┌────────────────────────────┐
|
||||
│ Native Opus decoder │
|
||||
└────────────────────────────┘
|
||||
|
||||
Opus is an open audio format jointly developed by Xiph.Org, Mozilla,
|
||||
Skype/Microsoft, and Broadcom. It combines the features of the Skype Cilk
|
||||
speech codec and the Xiph.Org CELT music codec into one low-latency
|
||||
codec. Decoding Opus is already possible since FFmpeg 1.0 using the
|
||||
libopus library, but the new Opus native decoder brings a higher level of
|
||||
stability and speed.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ QTKit and AVFoundation │
|
||||
└────────────────────────────┘
|
||||
|
||||
For OS X users, the new QTKit and AVFoundation devices allow you to use
|
||||
the integrated camera on Macs. AVFoundation is a newer API only available
|
||||
on OS X 10.7 "Lion" or newer. For users with older OS X systems, the
|
||||
QTKit device using the older OS X API is for you.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ API Additions │
|
||||
└────────────────────────────┘
|
||||
|
||||
In this release, stream side data are introduced as AVStream.side_data as
|
||||
a way to store miscellaneous stream-wide information. The format is
|
||||
similar to the previously anonymous structure AVPacket.side_data (now
|
||||
named as AVPacketSideData). With this change, audio ReplayGain
|
||||
information and video rotation matrix are now exported through this API,
|
||||
if available in the demuxer.
|
||||
|
||||
We also have improved libswresample's Doxygen API documentation, so new
|
||||
developers wishing to use FFmpeg's excellent libraries can get started
|
||||
more easily and faster.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ Last But Not Least │
|
||||
└────────────────────────────┘
|
||||
|
||||
Other interesting new features including hqx video filter, a pixel art
|
||||
scaling filter; a fixed-point AC-3 decoder contributed by Imagination
|
||||
Technologies; an On2 TrueMotion VP7 video decoder; an HTML5 WebVTT
|
||||
subtitle encoder that allows creation of WebVTT from any text-based
|
||||
subtitles; and an 1-bit Direct Stream Digital audio decoder.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ★ List of New Features │
|
||||
└────────────────────────────┘
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ Command line tools │
|
||||
└────────────────────────────┘
|
||||
|
||||
• Support for decoding through DXVA2 in ffmpeg
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavcodec │
|
||||
└────────────────────────────┘
|
||||
|
||||
• AC3 fixed-point decoding
|
||||
• VP7 video decoder
|
||||
• Alias PIX image encoder and decoder
|
||||
• Improvements to the BRender PIX image decoder
|
||||
• Improvements to the XBM decoder
|
||||
• Improvements to OpenEXR image decoder
|
||||
• Support decoding 16-bit RLE SGI images
|
||||
• Direct Stream Digital (DSD) decoder
|
||||
• On2 AVC (Audio for Video) decoder
|
||||
• Native Opus decoder
|
||||
• WebVTT encoder
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavdevice │
|
||||
└────────────────────────────┘
|
||||
|
||||
• QTKit input device
|
||||
• GDI screen grabbing for Windows
|
||||
• AVFoundation input device
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavformat │
|
||||
└────────────────────────────┘
|
||||
|
||||
• subfile protocol
|
||||
• Phantom Cine demuxer
|
||||
• Alternative rendition support for HTTP Live Streaming
|
||||
• Magic Lantern Video (MLV) demuxer
|
||||
• Image format auto-detection
|
||||
• LRC lyric file demuxer and muxer
|
||||
• Samba protocol (via libsmbclient)
|
||||
• WebM DASH Manifest muxer
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavfilter │
|
||||
└────────────────────────────┘
|
||||
|
||||
• shuffleplanes filter
|
||||
• libbs2b-based stereo-to-binaural audio filter
|
||||
• showcqt multimedia filter
|
||||
• zoompan filter
|
||||
• signalstats filter
|
||||
• hqx filter (hq2x, hq3x, hq4x)
|
||||
• flanger filter
|
||||
• libfribidi support in drawtext
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ⚠ Behaviour changes │
|
||||
└────────────────────────────┘
|
||||
|
||||
• libx264 reference frames count is now limited depending on level chosen
|
||||
• Because of the new image format auto-detection feature, you don't need to
|
||||
specify image format when decoding an image with no extension.
|
||||
|
1
arch.mak
1
arch.mak
@@ -5,6 +5,7 @@ OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||
|
||||
|
251
cmdutils.c
251
cmdutils.c
@@ -166,7 +166,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
int first;
|
||||
|
||||
first = 1;
|
||||
for (po = options; po->name; po++) {
|
||||
for (po = options; po->name != NULL; po++) {
|
||||
char buf[64];
|
||||
|
||||
if (((po->flags & req_flags) != req_flags) ||
|
||||
@@ -205,7 +205,7 @@ static const OptionDef *find_option(const OptionDef *po, const char *name)
|
||||
const char *p = strchr(name, ':');
|
||||
int len = p ? p - name : strlen(name);
|
||||
|
||||
while (po->name) {
|
||||
while (po->name != NULL) {
|
||||
if (!strncmp(name, po->name, len) && strlen(po->name) == len)
|
||||
break;
|
||||
po++;
|
||||
@@ -254,7 +254,7 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
|
||||
|
||||
win32_argv_utf8 = av_mallocz(sizeof(char *) * (win32_argc + 1) + buffsize);
|
||||
argstr_flat = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1);
|
||||
if (!win32_argv_utf8) {
|
||||
if (win32_argv_utf8 == NULL) {
|
||||
LocalFree(argv_w);
|
||||
return;
|
||||
}
|
||||
@@ -290,14 +290,10 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
||||
if (po->flags & OPT_SPEC) {
|
||||
SpecifierOpt **so = dst;
|
||||
char *p = strchr(opt, ':');
|
||||
char *str;
|
||||
|
||||
dstcount = (int *)(so + 1);
|
||||
*so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1);
|
||||
str = av_strdup(p ? p + 1 : "");
|
||||
if (!str)
|
||||
return AVERROR(ENOMEM);
|
||||
(*so)[*dstcount - 1].specifier = str;
|
||||
(*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : "");
|
||||
dst = &(*so)[*dstcount - 1].u;
|
||||
}
|
||||
|
||||
@@ -305,8 +301,6 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
||||
char *str;
|
||||
str = av_strdup(arg);
|
||||
av_freep(dst);
|
||||
if (!str)
|
||||
return AVERROR(ENOMEM);
|
||||
*(char **)dst = str;
|
||||
} else if (po->flags & OPT_BOOL || po->flags & OPT_INT) {
|
||||
*(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
|
||||
@@ -450,7 +444,7 @@ int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
(po->name && !strcmp(optname, po->name)))
|
||||
return i;
|
||||
|
||||
if (!po->name || po->flags & HAS_ARG)
|
||||
if (po->flags & HAS_ARG)
|
||||
i++;
|
||||
}
|
||||
return 0;
|
||||
@@ -965,10 +959,9 @@ static int init_report(const char *env)
|
||||
|
||||
report_file = fopen(filename.str, "w");
|
||||
if (!report_file) {
|
||||
int ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open report \"%s\": %s\n",
|
||||
filename.str, strerror(errno));
|
||||
return ret;
|
||||
return AVERROR(errno);
|
||||
}
|
||||
av_log_set_callback(log_callback_report);
|
||||
av_log(NULL, AV_LOG_INFO,
|
||||
@@ -1081,7 +1074,8 @@ static void print_program_info(int flags, int level)
|
||||
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
|
||||
program_birth_year, CONFIG_THIS_YEAR);
|
||||
av_log(NULL, level, "\n");
|
||||
av_log(NULL, level, "%sbuilt with %s\n", indent, CC_IDENT);
|
||||
av_log(NULL, level, "%sbuilt on %s %s with %s\n",
|
||||
indent, __DATE__, __TIME__, CC_IDENT);
|
||||
|
||||
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
|
||||
}
|
||||
@@ -1218,7 +1212,12 @@ static int is_device(const AVClass *avclass)
|
||||
{
|
||||
if (!avclass)
|
||||
return 0;
|
||||
return AV_IS_INPUT_DEVICE(avclass->category) || AV_IS_OUTPUT_DEVICE(avclass->category);
|
||||
return avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_INPUT;
|
||||
}
|
||||
|
||||
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only)
|
||||
@@ -1243,7 +1242,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
is_dev = is_device(ofmt->priv_class);
|
||||
if (!is_dev && device_only)
|
||||
continue;
|
||||
if ((!name || strcmp(ofmt->name, name) < 0) &&
|
||||
if ((name == NULL || strcmp(ofmt->name, name) < 0) &&
|
||||
strcmp(ofmt->name, last_name) > 0) {
|
||||
name = ofmt->name;
|
||||
long_name = ofmt->long_name;
|
||||
@@ -1254,7 +1253,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
is_dev = is_device(ifmt->priv_class);
|
||||
if (!is_dev && device_only)
|
||||
continue;
|
||||
if ((!name || strcmp(ifmt->name, name) < 0) &&
|
||||
if ((name == NULL || strcmp(ifmt->name, name) < 0) &&
|
||||
strcmp(ifmt->name, last_name) > 0) {
|
||||
name = ifmt->name;
|
||||
long_name = ifmt->long_name;
|
||||
@@ -1263,7 +1262,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
if (name && strcmp(ifmt->name, name) == 0)
|
||||
decode = 1;
|
||||
}
|
||||
if (!name)
|
||||
if (name == NULL)
|
||||
break;
|
||||
last_name = name;
|
||||
|
||||
@@ -1544,8 +1543,7 @@ int show_protocols(void *optctx, const char *opt, const char *arg)
|
||||
|
||||
int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
#if CONFIG_AVFILTER
|
||||
const AVFilter *filter = NULL;
|
||||
const AVFilter av_unused(*filter) = NULL;
|
||||
char descr[64], *descr_cur;
|
||||
int i, j;
|
||||
const AVFilterPad *pad;
|
||||
@@ -1558,6 +1556,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
" V = Video input/output\n"
|
||||
" N = Dynamic number and/or type of input/output\n"
|
||||
" | = Source or sink filter\n");
|
||||
#if CONFIG_AVFILTER
|
||||
while ((filter = avfilter_next(filter))) {
|
||||
descr_cur = descr;
|
||||
for (i = 0; i < 2; i++) {
|
||||
@@ -1582,8 +1581,6 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
filter->process_command ? 'C' : '.',
|
||||
filter->name, descr, filter->description);
|
||||
}
|
||||
#else
|
||||
printf("No filters available: libavfilter disabled\n");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
@@ -1642,19 +1639,19 @@ int show_layouts(void *optctx, const char *opt, const char *arg)
|
||||
const char *name, *descr;
|
||||
|
||||
printf("Individual channels:\n"
|
||||
"NAME DESCRIPTION\n");
|
||||
"NAME DESCRIPTION\n");
|
||||
for (i = 0; i < 63; i++) {
|
||||
name = av_get_channel_name((uint64_t)1 << i);
|
||||
if (!name)
|
||||
continue;
|
||||
descr = av_get_channel_description((uint64_t)1 << i);
|
||||
printf("%-14s %s\n", name, descr);
|
||||
printf("%-12s%s\n", name, descr);
|
||||
}
|
||||
printf("\nStandard channel layouts:\n"
|
||||
"NAME DECOMPOSITION\n");
|
||||
"NAME DECOMPOSITION\n");
|
||||
for (i = 0; !av_get_standard_channel_layout(i, &layout, &name); i++) {
|
||||
if (name) {
|
||||
printf("%-14s ", name);
|
||||
printf("%-12s", name);
|
||||
for (j = 1; j; j <<= 1)
|
||||
if ((layout & j))
|
||||
printf("%s%s", (layout & (j - 1)) ? "+" : "", av_get_channel_name(j));
|
||||
@@ -1821,8 +1818,6 @@ int show_help(void *optctx, const char *opt, const char *arg)
|
||||
av_log_set_callback(log_callback_help);
|
||||
|
||||
topic = av_strdup(arg ? arg : "");
|
||||
if (!topic)
|
||||
return AVERROR(ENOMEM);
|
||||
par = strchr(topic, '=');
|
||||
if (par)
|
||||
*par++ = 0;
|
||||
@@ -1866,44 +1861,31 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
FILE *f = av_fopen_utf8(filename, "rb");
|
||||
|
||||
if (!f) {
|
||||
ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
|
||||
strerror(errno));
|
||||
return ret;
|
||||
return AVERROR(errno);
|
||||
}
|
||||
|
||||
ret = fseek(f, 0, SEEK_END);
|
||||
if (ret == -1) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
fseek(f, 0, SEEK_END);
|
||||
*size = ftell(f);
|
||||
fseek(f, 0, SEEK_SET);
|
||||
if (*size == (size_t)-1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", strerror(errno));
|
||||
fclose(f);
|
||||
return AVERROR(errno);
|
||||
}
|
||||
|
||||
ret = ftell(f);
|
||||
if (ret < 0) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
}
|
||||
*size = ret;
|
||||
|
||||
ret = fseek(f, 0, SEEK_SET);
|
||||
if (ret == -1) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*bufptr = av_malloc(*size + 1);
|
||||
if (!*bufptr) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto out;
|
||||
fclose(f);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
ret = fread(*bufptr, 1, *size, f);
|
||||
if (ret < *size) {
|
||||
av_free(*bufptr);
|
||||
if (ferror(f)) {
|
||||
ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while reading file '%s': %s\n",
|
||||
filename, strerror(errno));
|
||||
ret = AVERROR(errno);
|
||||
} else
|
||||
ret = AVERROR_EOF;
|
||||
} else {
|
||||
@@ -1911,9 +1893,6 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
(*bufptr)[(*size)++] = '\0';
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", av_err2str(ret));
|
||||
fclose(f);
|
||||
return ret;
|
||||
}
|
||||
@@ -2013,7 +1992,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
||||
switch (check_stream_specifier(s, st, p + 1)) {
|
||||
case 1: *p = 0; break;
|
||||
case 0: continue;
|
||||
default: exit_program(1);
|
||||
default: return NULL;
|
||||
}
|
||||
|
||||
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
|
||||
@@ -2060,7 +2039,7 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
||||
exit_program(1);
|
||||
}
|
||||
if (*size < new_size) {
|
||||
uint8_t *tmp = av_realloc_array(array, new_size, elem_size);
|
||||
uint8_t *tmp = av_realloc(array, new_size*elem_size);
|
||||
if (!tmp) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
|
||||
exit_program(1);
|
||||
@@ -2071,161 +2050,3 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
||||
}
|
||||
return array;
|
||||
}
|
||||
|
||||
#if CONFIG_AVDEVICE
|
||||
static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||
{
|
||||
int ret, i;
|
||||
AVDeviceInfoList *device_list = NULL;
|
||||
|
||||
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
printf("Audo-detected sources for %s:\n", fmt->name);
|
||||
if (!fmt->get_device_list) {
|
||||
ret = AVERROR(ENOSYS);
|
||||
printf("Cannot list sources. Not implemented.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_input_sources(fmt, NULL, opts, &device_list)) < 0) {
|
||||
printf("Cannot list sources.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < device_list->nb_devices; i++) {
|
||||
printf("%s %s [%s]\n", device_list->default_device == i ? "*" : " ",
|
||||
device_list->devices[i]->device_name, device_list->devices[i]->device_description);
|
||||
}
|
||||
|
||||
fail:
|
||||
avdevice_free_list_devices(&device_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||
{
|
||||
int ret, i;
|
||||
AVDeviceInfoList *device_list = NULL;
|
||||
|
||||
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
printf("Audo-detected sinks for %s:\n", fmt->name);
|
||||
if (!fmt->get_device_list) {
|
||||
ret = AVERROR(ENOSYS);
|
||||
printf("Cannot list sinks. Not implemented.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_output_sinks(fmt, NULL, opts, &device_list)) < 0) {
|
||||
printf("Cannot list sinks.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < device_list->nb_devices; i++) {
|
||||
printf("%s %s [%s]\n", device_list->default_device == i ? "*" : " ",
|
||||
device_list->devices[i]->device_name, device_list->devices[i]->device_description);
|
||||
}
|
||||
|
||||
fail:
|
||||
avdevice_free_list_devices(&device_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int show_sinks_sources_parse_arg(const char *arg, char **dev, AVDictionary **opts)
|
||||
{
|
||||
int ret;
|
||||
if (arg) {
|
||||
char *opts_str = NULL;
|
||||
av_assert0(dev && opts);
|
||||
*dev = av_strdup(arg);
|
||||
if (!*dev)
|
||||
return AVERROR(ENOMEM);
|
||||
if ((opts_str = strchr(*dev, ','))) {
|
||||
*(opts_str++) = '\0';
|
||||
if (opts_str[0] && ((ret = av_dict_parse_string(opts, opts_str, "=", ":", 0)) < 0)) {
|
||||
av_freep(dev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
} else
|
||||
printf("\nDevice name is not provided.\n"
|
||||
"You can pass devicename[,opt1=val1[,opt2=val2...]] as an argument.\n\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int show_sources(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
AVInputFormat *fmt = NULL;
|
||||
char *dev = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
int ret = 0;
|
||||
int error_level = av_log_get_level();
|
||||
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
|
||||
if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0)
|
||||
goto fail;
|
||||
|
||||
do {
|
||||
fmt = av_input_audio_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (!strcmp(fmt->name, "lavfi"))
|
||||
continue; //it's pointless to probe lavfi
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
continue;
|
||||
print_device_sources(fmt, opts);
|
||||
}
|
||||
} while (fmt);
|
||||
do {
|
||||
fmt = av_input_video_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
continue;
|
||||
print_device_sources(fmt, opts);
|
||||
}
|
||||
} while (fmt);
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
av_free(dev);
|
||||
av_log_set_level(error_level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
AVOutputFormat *fmt = NULL;
|
||||
char *dev = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
int ret = 0;
|
||||
int error_level = av_log_get_level();
|
||||
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
|
||||
if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0)
|
||||
goto fail;
|
||||
|
||||
do {
|
||||
fmt = av_output_audio_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
continue;
|
||||
print_device_sinks(fmt, opts);
|
||||
}
|
||||
} while (fmt);
|
||||
do {
|
||||
fmt = av_output_video_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
continue;
|
||||
print_device_sinks(fmt, opts);
|
||||
}
|
||||
} while (fmt);
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
av_free(dev);
|
||||
av_log_set_level(error_level);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
14
cmdutils.h
14
cmdutils.h
@@ -443,20 +443,6 @@ int show_formats(void *optctx, const char *opt, const char *arg);
|
||||
*/
|
||||
int show_devices(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
#if CONFIG_AVDEVICE
|
||||
/**
|
||||
* Print a listing containing audodetected sinks of the output device.
|
||||
* Device name with options may be passed as an argument to limit results.
|
||||
*/
|
||||
int show_sinks(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing audodetected sources of the input device.
|
||||
* Device name with options may be passed as an argument to limit results.
|
||||
*/
|
||||
int show_sources(void *optctx, const char *opt, const char *arg);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Print a listing containing all the codecs supported by the
|
||||
* program.
|
||||
|
@@ -27,9 +27,3 @@
|
||||
{ "opencl_bench", OPT_EXIT, {.func_arg = opt_opencl_bench}, "run benchmark on all OpenCL devices and show results" },
|
||||
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, "set OpenCL environment options" },
|
||||
#endif
|
||||
#if CONFIG_AVDEVICE
|
||||
{ "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
|
||||
"list sources of the input device", "device" },
|
||||
{ "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
|
||||
"list sinks of the output device", "device" },
|
||||
#endif
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opencl.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "cmdutils.h"
|
||||
|
||||
typedef struct {
|
||||
@@ -239,8 +238,7 @@ int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
|
||||
devices[count].platform_idx = i;
|
||||
devices[count].device_idx = j;
|
||||
devices[count].runtime = score;
|
||||
av_strlcpy(devices[count].device_name, device_node->device_name,
|
||||
sizeof(devices[count].device_name));
|
||||
strcpy(devices[count].device_name, device_node->device_name);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
14
common.mak
14
common.mak
@@ -5,14 +5,6 @@
|
||||
# first so "all" becomes default target
|
||||
all: all-yes
|
||||
|
||||
DEFAULT_YASMD=.dbg
|
||||
|
||||
ifeq (1, DBG)
|
||||
YASMD=$(DEFAULT_YASMD)
|
||||
else
|
||||
YASMD=
|
||||
endif
|
||||
|
||||
ifndef SUBDIR
|
||||
|
||||
ifndef V
|
||||
@@ -146,17 +138,17 @@ $(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda *$(DEFAULT_YASMD).asm
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
define RULES
|
||||
clean::
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d)
|
||||
$(RM) $(HOSTPROGS)
|
||||
$(RM) $(TOOLS)
|
||||
endef
|
||||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d))
|
||||
|
@@ -805,7 +805,7 @@ struct AVS_Library {
|
||||
|
||||
AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
||||
if (!library)
|
||||
if (library == NULL)
|
||||
return NULL;
|
||||
library->handle = LoadLibrary("avisynth");
|
||||
if (library->handle == NULL)
|
||||
@@ -870,7 +870,7 @@ fail:
|
||||
}
|
||||
|
||||
AVSC_INLINE void avs_free_library(AVS_Library *library) {
|
||||
if (!library)
|
||||
if (library == NULL)
|
||||
return;
|
||||
FreeLibrary(library->handle);
|
||||
free(library);
|
||||
|
@@ -513,21 +513,21 @@ AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
{ AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
{ AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; }
|
||||
{ AVS_Value v; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
{ AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; }
|
||||
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; }
|
||||
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
|
@@ -52,8 +52,8 @@ namespace avxsynth {
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||
#define MAKEWORD(a,b) (((a) << 8) | (b))
|
||||
#define MAKEDWORD(a,b,c,d) ((a << 24) | (b << 16) | (c << 8) | (d))
|
||||
#define MAKEWORD(a,b) ((a << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
|
@@ -54,7 +54,7 @@ static int getopt(int argc, char *argv[], char *opts)
|
||||
}
|
||||
}
|
||||
optopt = c = argv[optind][sp];
|
||||
if (c == ':' || !(cp = strchr(opts, c))) {
|
||||
if (c == ':' || (cp = strchr(opts, c)) == NULL) {
|
||||
fprintf(stderr, ": illegal option -- %c\n", c);
|
||||
if (argv[optind][++sp] == '\0') {
|
||||
optind++;
|
||||
|
@@ -39,7 +39,6 @@
|
||||
#include <windows.h>
|
||||
#include <process.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
@@ -55,30 +54,36 @@ typedef struct pthread_t {
|
||||
* not mutexes */
|
||||
typedef CRITICAL_SECTION pthread_mutex_t;
|
||||
|
||||
/* This is the CONDITION_VARIABLE typedef for using Windows' native
|
||||
* conditional variables on kernels 6.0+. */
|
||||
#if HAVE_CONDITION_VARIABLE_PTR
|
||||
typedef CONDITION_VARIABLE pthread_cond_t;
|
||||
#else
|
||||
/* This is the CONDITIONAL_VARIABLE typedef for using Window's native
|
||||
* conditional variables on kernels 6.0+.
|
||||
* MinGW does not currently have this typedef. */
|
||||
typedef struct pthread_cond_t {
|
||||
void *Ptr;
|
||||
void *ptr;
|
||||
} pthread_cond_t;
|
||||
|
||||
/* function pointers to conditional variable API on windows 6.0+ kernels */
|
||||
#if _WIN32_WINNT < 0x0600
|
||||
static void (WINAPI *cond_broadcast)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_init)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_signal)(pthread_cond_t *cond);
|
||||
static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
DWORD milliseconds);
|
||||
#else
|
||||
#define cond_init InitializeConditionVariable
|
||||
#define cond_broadcast WakeAllConditionVariable
|
||||
#define cond_signal WakeConditionVariable
|
||||
#define cond_wait SleepConditionVariableCS
|
||||
#endif
|
||||
|
||||
#if _WIN32_WINNT >= 0x0600
|
||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
||||
#endif
|
||||
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
static unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t *h = arg;
|
||||
h->ret = h->func(h->arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
void *(*start_routine)(void*), void *arg)
|
||||
static int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
void *(*start_routine)(void*), void *arg)
|
||||
{
|
||||
thread->func = start_routine;
|
||||
thread->arg = arg;
|
||||
@@ -87,7 +92,7 @@ static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
return !thread->handle;
|
||||
}
|
||||
|
||||
static av_unused void pthread_join(pthread_t thread, void **value_ptr)
|
||||
static void pthread_join(pthread_t thread, void **value_ptr)
|
||||
{
|
||||
DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
|
||||
if (ret != WAIT_OBJECT_0)
|
||||
@@ -118,36 +123,6 @@ static inline int pthread_mutex_unlock(pthread_mutex_t *m)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if _WIN32_WINNT >= 0x0600
|
||||
static inline int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
{
|
||||
InitializeConditionVariable(cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* native condition variables do not destroy */
|
||||
static inline void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
{
|
||||
WakeAllConditionVariable(cond);
|
||||
}
|
||||
|
||||
static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
SleepConditionVariableCS(cond, mutex, INFINITE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
WakeConditionVariable(cond);
|
||||
}
|
||||
|
||||
#else // _WIN32_WINNT < 0x0600
|
||||
/* for pre-Windows 6.0 platforms we need to define and use our own condition
|
||||
* variable and api */
|
||||
typedef struct win32_cond_t {
|
||||
@@ -159,14 +134,7 @@ typedef struct win32_cond_t {
|
||||
volatile int is_broadcast;
|
||||
} win32_cond_t;
|
||||
|
||||
/* function pointers to conditional variable API on windows 6.0+ kernels */
|
||||
static void (WINAPI *cond_broadcast)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_init)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_signal)(pthread_cond_t *cond);
|
||||
static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
DWORD milliseconds);
|
||||
|
||||
static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
static int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
{
|
||||
win32_cond_t *win32_cond = NULL;
|
||||
if (cond_init) {
|
||||
@@ -178,7 +146,7 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
|
||||
win32_cond = av_mallocz(sizeof(win32_cond_t));
|
||||
if (!win32_cond)
|
||||
return ENOMEM;
|
||||
cond->Ptr = win32_cond;
|
||||
cond->ptr = win32_cond;
|
||||
win32_cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL);
|
||||
if (!win32_cond->semaphore)
|
||||
return ENOMEM;
|
||||
@@ -191,9 +159,9 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
static void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
/* native condition variables do not destroy */
|
||||
if (cond_init)
|
||||
return;
|
||||
@@ -204,12 +172,12 @@ static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
pthread_mutex_destroy(&win32_cond->mtx_waiter_count);
|
||||
pthread_mutex_destroy(&win32_cond->mtx_broadcast);
|
||||
av_freep(&win32_cond);
|
||||
cond->Ptr = NULL;
|
||||
cond->ptr = NULL;
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
static void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int have_waiter;
|
||||
|
||||
if (cond_broadcast) {
|
||||
@@ -238,9 +206,9 @@ static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
||||
}
|
||||
|
||||
static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
static int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int last_waiter;
|
||||
if (cond_wait) {
|
||||
cond_wait(cond, mutex, INFINITE);
|
||||
@@ -270,9 +238,9 @@ static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mu
|
||||
return pthread_mutex_lock(mutex);
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_signal(pthread_cond_t *cond)
|
||||
static void pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int have_waiter;
|
||||
if (cond_signal) {
|
||||
cond_signal(cond);
|
||||
@@ -294,9 +262,8 @@ static av_unused void pthread_cond_signal(pthread_cond_t *cond)
|
||||
|
||||
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
||||
}
|
||||
#endif
|
||||
|
||||
static av_unused void w32thread_init(void)
|
||||
static void w32thread_init(void)
|
||||
{
|
||||
#if _WIN32_WINNT < 0x0600
|
||||
HANDLE kernel_dll = GetModuleHandle(TEXT("kernel32.dll"));
|
||||
|
290
doc/APIchanges
290
doc/APIchanges
@@ -2,223 +2,19 @@ Never assume the API of libav* to be stable unless at least 1 month has passed
|
||||
since the last major version increase or the API was added.
|
||||
|
||||
The last version increases were:
|
||||
libavcodec: 2014-08-09
|
||||
libavdevice: 2014-08-09
|
||||
libavfilter: 2014-08-09
|
||||
libavformat: 2014-08-09
|
||||
libavresample: 2014-08-09
|
||||
libpostproc: 2014-08-09
|
||||
libswresample: 2014-08-09
|
||||
libswscale: 2014-08-09
|
||||
libavutil: 2014-08-09
|
||||
libavcodec: 2013-03-xx
|
||||
libavdevice: 2013-03-xx
|
||||
libavfilter: 2013-12-xx
|
||||
libavformat: 2013-03-xx
|
||||
libavresample: 2012-10-05
|
||||
libpostproc: 2011-04-18
|
||||
libswresample: 2011-09-19
|
||||
libswscale: 2011-06-20
|
||||
libavutil: 2012-10-22
|
||||
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 2.6 was cut here -------- 8< ---------
|
||||
|
||||
2015-03-04 - cca4476 - lavf 56.25.100
|
||||
Add avformat_flush()
|
||||
|
||||
2015-03-03 - 81a9126 - lavf 56.24.100
|
||||
Add avio_put_str16be()
|
||||
|
||||
2015-02-19 - 560eb71 / 31d2039 - lavc 56.23.100 / 56.13.0
|
||||
Add width, height, coded_width, coded_height and format to
|
||||
AVCodecParserContext.
|
||||
|
||||
2015-02-19 - e375511 / 5b1d9ce - lavu 54.19.100 / 54.9.0
|
||||
Add AV_PIX_FMT_QSV for QSV hardware acceleration.
|
||||
|
||||
2015-02-14 - ba22295 - lavc 56.21.102
|
||||
Deprecate VIMA decoder.
|
||||
|
||||
2015-01-27 - 62a82c6 / 728685f - lavc 56.21.100 / 56.12.0, lavu 54.18.100 / 54.8.0 - avcodec.h, frame.h
|
||||
Add AV_PKT_DATA_AUDIO_SERVICE_TYPE and AV_FRAME_DATA_AUDIO_SERVICE_TYPE for
|
||||
storing the audio service type as side data.
|
||||
|
||||
2015-01-16 - a47c933 - lavf 56.19.100 - avformat.h
|
||||
Add data_codec and data_codec_id for storing codec of data stream
|
||||
|
||||
2015-01-11 - 007c33d - lavd 56.4.100 - avdevice.h
|
||||
Add avdevice_list_input_sources().
|
||||
Add avdevice_list_output_sinks().
|
||||
|
||||
2014-12-25 - d7aaeea / c220a60 - lavc 56.19.100 / 56.10.0 - vdpau.h
|
||||
Add av_vdpau_get_surface_parameters().
|
||||
|
||||
2014-12-25 - ddb9a24 / 6c99c92 - lavc 56.18.100 / 56.9.0 - avcodec.h
|
||||
Add AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH flag to av_vdpau_bind_context().
|
||||
|
||||
2014-12-25 - d16079a / 57b6704 - lavc 56.17.100 / 56.8.0 - avcodec.h
|
||||
Add AVCodecContext.sw_pix_fmt.
|
||||
|
||||
2014-12-04 - 6e9ac02 - lavc 56.14.100 - dv_profile.h
|
||||
Add av_dv_codec_profile2().
|
||||
|
||||
-------- 8< --------- FFmpeg 2.5 was cut here -------- 8< ---------
|
||||
|
||||
2014-11-21 - ab922f9 - lavu 54.15.100 - dict.h
|
||||
Add av_dict_get_string().
|
||||
|
||||
2014-11-18 - a54a51c - lavu 54.14.100 - float_dsp.h
|
||||
Add avpriv_float_dsp_alloc().
|
||||
|
||||
2014-11-16 - 6690d4c3 - lavf 56.13.100 - avformat.h
|
||||
Add AVStream.recommended_encoder_configuration with accessors.
|
||||
|
||||
2014-11-16 - bee5844d - lavu 54.13.100 - opt.h
|
||||
Add av_opt_serialize().
|
||||
|
||||
2014-11-16 - eec69332 - lavu 54.12.100 - opt.h
|
||||
Add av_opt_is_set_to_default().
|
||||
|
||||
2014-11-06 - 44fa267 / 5e80fb7 - lavc 56.11.100 / 56.6.0 - vorbis_parser.h
|
||||
Add a public API for parsing vorbis packets.
|
||||
|
||||
2014-10-15 - 17085a0 / 7ea1b34 - lavc 56.7.100 / 56.5.0 - avcodec.h
|
||||
Replace AVCodecContext.time_base used for decoding
|
||||
with AVCodecContext.framerate.
|
||||
|
||||
2014-10-15 - 51c810e / d565fef1 - lavc 56.6.100 / 56.4.0 - avcodec.h
|
||||
Add AV_HWACCEL_FLAG_IGNORE_LEVEL flag to av_vdpau_bind_context().
|
||||
|
||||
2014-10-13 - da21895 / 2df0c32e - lavc 56.5.100 / 56.3.0 - avcodec.h
|
||||
Add AVCodecContext.initial_padding. Deprecate the use of AVCodecContext.delay
|
||||
for audio encoding.
|
||||
|
||||
2014-10-08 - bb44f7d / 5a419b2 - lavu 54.10.100 / 54.4.0 - pixdesc.h
|
||||
Add API to return the name of frame and context color properties.
|
||||
|
||||
2014-10-06 - a61899a / e3e158e - lavc 56.3.100 / 56.2.0 - vdpau.h
|
||||
Add av_vdpau_bind_context(). This function should now be used for creating
|
||||
(or resetting) a AVVDPAUContext instead of av_vdpau_alloc_context().
|
||||
|
||||
2014-10-02 - cdd6f05 - lavc 56.2.100 - avcodec.h
|
||||
2014-10-02 - cdd6f05 - lavu 54.9.100 - frame.h
|
||||
Add AV_FRAME_DATA_SKIP_SAMPLES. Add lavc CODEC_FLAG2_SKIP_MANUAL and
|
||||
AVOption "skip_manual", which makes lavc export skip information via
|
||||
AV_FRAME_DATA_SKIP_SAMPLES AVFrame side data, instead of skipping and
|
||||
discarding samples automatically.
|
||||
|
||||
2014-10-02 - 0d92b0d - lavu 54.8.100 - avstring.h
|
||||
Add av_match_list()
|
||||
|
||||
2014-09-24 - ac68295 - libpostproc 53.1.100
|
||||
Add visualization support
|
||||
|
||||
2014-09-19 - 6edd6a4 - lavc 56.1.101 - dv_profile.h
|
||||
deprecate avpriv_dv_frame_profile2(), which was made public by accident.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.4 was cut here -------- 8< ---------
|
||||
|
||||
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
|
||||
Add AVFormatContext.max_ts_probe.
|
||||
|
||||
2014-08-28 - f30a815 / 9301486 - lavc 56.1.100 / 56.1.0 - avcodec.h
|
||||
Add AV_PKT_DATA_STEREO3D to export container-level stereo3d information.
|
||||
|
||||
2014-08-23 - 8fc9bd0 - lavu 54.7.100 - dict.h
|
||||
AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL arguments are now
|
||||
freed even on error. This is consistent with the behaviour all users
|
||||
of it we could find expect.
|
||||
|
||||
2014-08-21 - 980a5b0 - lavu 54.6.100 - frame.h motion_vector.h
|
||||
Add AV_FRAME_DATA_MOTION_VECTORS side data and AVMotionVector structure
|
||||
|
||||
2014-08-16 - b7d5e01 - lswr 1.1.100 - swresample.h
|
||||
Add AVFrame based API
|
||||
|
||||
2014-08-16 - c2829dc - lavu 54.4.100 - dict.h
|
||||
Add av_dict_set_int helper function.
|
||||
|
||||
2014-08-13 - c8571c6 / 8ddc326 - lavu 54.3.100 / 54.3.0 - mem.h
|
||||
Add av_strndup().
|
||||
|
||||
2014-08-13 - 2ba4577 / a8c104a - lavu 54.2.100 / 54.2.0 - opt.h
|
||||
Add av_opt_get_dict_val/set_dict_val with AV_OPT_TYPE_DICT to support
|
||||
dictionary types being set as options.
|
||||
|
||||
2014-08-13 - afbd4b8 - lavf 56.01.0 - avformat.h
|
||||
Add AVFormatContext.event_flags and AVStream.event_flags for signaling to
|
||||
the user when events happen in the file/stream.
|
||||
|
||||
2014-08-10 - 78eaaa8 / fb1ddcd - lavr 2.1.0 - avresample.h
|
||||
Add avresample_convert_frame() and avresample_config().
|
||||
|
||||
2014-08-10 - 78eaaa8 / fb1ddcd - lavu 54.1.100 / 54.1.0 - error.h
|
||||
Add AVERROR_INPUT_CHANGED and AVERROR_OUTPUT_CHANGED.
|
||||
|
||||
2014-08-08 - 3841f2a / d35b94f - lavc 55.73.102 / 55.57.4 - avcodec.h
|
||||
Deprecate FF_IDCT_XVIDMMX define and xvidmmx idct option.
|
||||
Replaced by FF_IDCT_XVID and xvid respectively.
|
||||
|
||||
2014-08-08 - 5c3c671 - lavf 55.53.100 - avio.h
|
||||
Add avio_feof() and deprecate url_feof().
|
||||
|
||||
2014-08-07 - bb78903 - lsws 2.1.3 - swscale.h
|
||||
sws_getContext is not going to be removed in the future.
|
||||
|
||||
2014-08-07 - a561662 / ad1ee5f - lavc 55.73.101 / 55.57.3 - avcodec.h
|
||||
reordered_opaque is not going to be removed in the future.
|
||||
|
||||
2014-08-02 - 28a2107 - lavu 52.98.100 - pixelutils.h
|
||||
Add pixelutils API with SAD functions
|
||||
|
||||
2014-08-04 - 6017c98 / e9abafc - lavu 52.97.100 / 53.22.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_YA16 pixel format for 16 bit packed gray with alpha.
|
||||
|
||||
2014-08-04 - 4c8bc6f / e96c3b8 - lavu 52.96.101 / 53.21.1 - avstring.h
|
||||
Rename AV_PIX_FMT_Y400A to AV_PIX_FMT_YA8 to better identify the format.
|
||||
An alias pixel format and color space name are provided for compatibility.
|
||||
|
||||
2014-08-04 - 073c074 / d2962e9 - lavu 52.96.100 / 53.21.0 - pixdesc.h
|
||||
Support name aliases for pixel formats.
|
||||
|
||||
2014-08-03 - 71d008e / 1ef9e83 - lavc 55.72.101 / 55.57.2 - avcodec.h
|
||||
2014-08-03 - 71d008e / 1ef9e83 - lavu 52.95.100 / 53.20.0 - frame.h
|
||||
Deprecate AVCodecContext.dtg_active_format and use side-data instead.
|
||||
|
||||
2014-08-03 - e680c73 - lavc 55.72.100 - avcodec.h
|
||||
Add get_pixels() to AVDCT
|
||||
|
||||
2014-08-03 - 9400603 / 9f17685 - lavc 55.71.101 / 55.57.1 - avcodec.h
|
||||
Deprecate unused FF_IDCT_IPP define and ipp avcodec option.
|
||||
Deprecate unused FF_DEBUG_PTS define and pts avcodec option.
|
||||
Deprecate unused FF_CODER_TYPE_DEFLATE define and deflate avcodec option.
|
||||
Deprecate unused FF_DCT_INT define and int avcodec option.
|
||||
Deprecate unused avcodec option scenechange_factor.
|
||||
|
||||
2014-07-30 - ba3e331 - lavu 52.94.100 - frame.h
|
||||
Add av_frame_side_data_name()
|
||||
|
||||
2014-07-29 - 80a3a66 / 3a19405 - lavf 56.01.100 / 56.01.0 - avformat.h
|
||||
Add mime_type field to AVProbeData, which now MUST be initialized in
|
||||
order to avoid uninitialized reads of the mime_type pointer, likely
|
||||
leading to crashes.
|
||||
Typically, this means you will do 'AVProbeData pd = { 0 };' instead of
|
||||
'AVProbeData pd;'.
|
||||
|
||||
2014-07-29 - 31e0b5d / 69e7336 - lavu 52.92.100 / 53.19.0 - avstring.h
|
||||
Make name matching function from lavf public as av_match_name().
|
||||
|
||||
2014-07-28 - 2e5c8b0 / c5fca01 - lavc 55.71.100 / 55.57.0 - avcodec.h
|
||||
Add AV_CODEC_PROP_REORDER to mark codecs supporting frame reordering.
|
||||
|
||||
2014-07-27 - ff9a154 - lavf 55.50.100 - avformat.h
|
||||
New field int64_t probesize2 instead of deprecated
|
||||
field int probesize.
|
||||
|
||||
2014-07-27 - 932ff70 - lavc 55.70.100 - avdct.h
|
||||
Add AVDCT / avcodec_dct_alloc() / avcodec_dct_init().
|
||||
|
||||
2014-07-23 - 8a4c086 - lavf 55.49.100 - avio.h
|
||||
Add avio_read_to_bprint()
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.3 was cut here -------- 8< ---------
|
||||
|
||||
2014-07-14 - 62227a7 - lavf 55.47.100 - avformat.h
|
||||
Add av_stream_get_parser()
|
||||
|
||||
@@ -243,7 +39,7 @@ API changes, most recent first:
|
||||
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
||||
it
|
||||
|
||||
2014-06-10 - 5482780 - lavf 55.43.100 - avformat.h
|
||||
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
||||
New field int64_t max_analyze_duration2 instead of deprecated
|
||||
int max_analyze_duration.
|
||||
|
||||
@@ -263,28 +59,28 @@ API changes, most recent first:
|
||||
2014-05-29 - bdb2e80 / b2d4565 - lavr 1.3.0 - avresample.h
|
||||
Add avresample_max_output_samples
|
||||
|
||||
2014-05-28 - d858ee7 / 6d21259 - lavf 55.42.100 / 55.19.0 - avformat.h
|
||||
2014-05-24 - d858ee7 / 6d21259 - lavf 55.42.100 / 55.19.0 - avformat.h
|
||||
Add strict_std_compliance and related AVOptions to support experimental
|
||||
muxing.
|
||||
|
||||
2014-05-26 - 55cc60c - lavu 52.87.100 - threadmessage.h
|
||||
2014-05-26 - xxxxxxx - lavu 52.87.100 - threadmessage.h
|
||||
Add thread message queue API.
|
||||
|
||||
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
||||
Add format_probesize to AVFormatContext.
|
||||
|
||||
2014-05-20 - 7d25af1 / c23c96b - lavf 55.39.100 / 55.18.0 - avformat.h
|
||||
2014-05-19 - 7d25af1 / c23c96b - lavf 55.39.100 / 55.18.0 - avformat.h
|
||||
Add av_stream_get_side_data() to access stream-level side data
|
||||
in the same way as av_packet_get_side_data().
|
||||
|
||||
2014-05-20 - 7336e39 - lavu 52.86.100 - fifo.h
|
||||
2014-05-xx - xxxxxxx - lavu 52.86.100 - fifo.h
|
||||
Add av_fifo_alloc_array() function.
|
||||
|
||||
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
||||
Add AV_FRAME_DATA_DISPLAYMATRIX for exporting frame-level
|
||||
spatial rendering on video frames for proper display.
|
||||
|
||||
2014-05-19 - ef1d4ee / bddd8cb - lavc 55.64.100 / 55.53.0 - avcodec.h
|
||||
2014-05-xx - xxxxxxx - lavc 55.64.100 / 55.53.0 - avcodec.h
|
||||
Add AV_PKT_DATA_DISPLAYMATRIX for exporting packet-level
|
||||
spatial rendering on video frames for proper display.
|
||||
|
||||
@@ -296,11 +92,11 @@ API changes, most recent first:
|
||||
Add avcodec_free_context(). From now on it should be used for freeing
|
||||
AVCodecContext.
|
||||
|
||||
2014-05-17 - 0eec06e / 1bd0bdc - lavu 52.84.100 / 54.5.0 - time.h
|
||||
2014-05-17 - 0eec06e - lavu 52.84.100 - time.h
|
||||
Add av_gettime_relative() av_gettime_relative_is_monotonic()
|
||||
|
||||
2014-05-15 - eacf7d6 / 0c1959b - lavf 55.38.100 / 55.17.0 - avformat.h
|
||||
Add AVFMT_FLAG_BITEXACT flag. Muxers now use it instead of checking
|
||||
Add AVMFT_FLAG_BITEXACT flag. Muxers now use it instead of checking
|
||||
CODEC_FLAG_BITEXACT on the first stream.
|
||||
|
||||
2014-05-15 - 96cb4c8 - lswr 0.19.100 - swresample.h
|
||||
@@ -309,7 +105,7 @@ API changes, most recent first:
|
||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
||||
|
||||
2014-05-07 - 351f611 - lavu 52.82.100 - fifo.h
|
||||
2014-05-xx - xxxxxxx - lavu 52.82.100 - fifo.h
|
||||
Add av_fifo_freep() function.
|
||||
|
||||
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
||||
@@ -331,14 +127,10 @@ API changes, most recent first:
|
||||
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
||||
reference-counted frames to encoders.
|
||||
|
||||
2014-04-30 - 617e866 - lavu 52.81.100 - pixdesc.h
|
||||
Add av_find_best_pix_fmt_of_2(), av_get_pix_fmt_loss()
|
||||
Deprecate avcodec_get_pix_fmt_loss(), avcodec_find_best_pix_fmt_of_2()
|
||||
|
||||
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
||||
Add AVCodecDescriptor.mime_types field.
|
||||
|
||||
2014-04-29 - b804eb4 - lavu 52.80.100 - hash.h
|
||||
2014-04-29 - xxxxxxx - lavu 52.80.0 - hash.h
|
||||
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
||||
|
||||
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
||||
@@ -350,7 +142,7 @@ API changes, most recent first:
|
||||
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
||||
Add AV_CRC_16_ANSI_LE crc variant.
|
||||
|
||||
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
|
||||
2014-04-XX - xxxxxxx - lavf xx.xx.1xx - avformat.h
|
||||
Add av_format_inject_global_side_data()
|
||||
|
||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||
@@ -396,9 +188,6 @@ API changes, most recent first:
|
||||
Give the name AVPacketSideData to the previously anonymous struct used for
|
||||
AVPacket.side_data.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.2 was cut here -------- 8< ---------
|
||||
|
||||
2014-03-18 - 37c07d4 - lsws 2.5.102
|
||||
Make gray16 full-scale.
|
||||
|
||||
@@ -430,7 +219,7 @@ API changes, most recent first:
|
||||
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
||||
Deprecate unused AV_OPT_FLAG_METADATA.
|
||||
|
||||
2014-02-16 - 81c3f81 - lavd 55.10.100 - avdevice.h
|
||||
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
|
||||
Add avdevice_list_devices() and avdevice_free_list_devices()
|
||||
|
||||
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
|
||||
@@ -471,7 +260,7 @@ API changes, most recent first:
|
||||
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
||||
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
||||
|
||||
2014-01-19 - 3532dd5 - lavu 52.63.100 - rational.h
|
||||
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
|
||||
Add av_make_q() function.
|
||||
|
||||
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
|
||||
@@ -541,9 +330,6 @@ API changes, most recent first:
|
||||
2013-10-31 - 78265fc / 28096e0 - lavu 52.49.100 / 52.17.0 - frame.h
|
||||
Add AVFrame.flags and AV_FRAME_FLAG_CORRUPT.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.1 was cut here -------- 8< ---------
|
||||
|
||||
2013-10-27 - dbe6f9f - lavc 55.39.100 - avcodec.h
|
||||
Add CODEC_CAP_DELAY support to avcodec_decode_subtitle2.
|
||||
|
||||
@@ -616,9 +402,6 @@ API changes, most recent first:
|
||||
Add avcodec_chroma_pos_to_enum()
|
||||
Add avcodec_enum_to_chroma_pos()
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.0 was cut here -------- 8< ---------
|
||||
|
||||
2013-07-03 - 838bd73 - lavfi 3.78.100 - avfilter.h
|
||||
Deprecate avfilter_graph_parse() in favor of the equivalent
|
||||
avfilter_graph_parse_ptr().
|
||||
@@ -691,9 +474,6 @@ API changes, most recent first:
|
||||
2013-03-17 - 7aa9af5 - lavu 52.20.100 - opt.h
|
||||
Add AV_OPT_TYPE_VIDEO_RATE value to AVOptionType enum.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 1.2 was cut here -------- 8< ---------
|
||||
|
||||
2013-03-07 - 9767ec6 - lavu 52.18.100 - avstring.h,bprint.h
|
||||
Add av_escape() and av_bprint_escape() API.
|
||||
|
||||
@@ -706,9 +486,6 @@ API changes, most recent first:
|
||||
2013-01-01 - 2eb2e17 - lavfi 3.34.100
|
||||
Add avfilter_get_audio_buffer_ref_from_arrays_channels.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 1.1 was cut here -------- 8< ---------
|
||||
|
||||
2012-12-20 - 34de47aa - lavfi 3.29.100 - avfilter.h
|
||||
Add AVFilterLink.channels, avfilter_link_get_channels()
|
||||
and avfilter_ref_get_channels().
|
||||
@@ -754,9 +531,6 @@ API changes, most recent first:
|
||||
Add LIBSWRESAMPLE_VERSION, LIBSWRESAMPLE_BUILD
|
||||
and LIBSWRESAMPLE_IDENT symbols.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 1.0 was cut here -------- 8< ---------
|
||||
|
||||
2012-09-06 - 29e972f - lavu 51.72.100 - parseutils.h
|
||||
Add av_small_strptime() time parsing function.
|
||||
|
||||
@@ -961,9 +735,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
avresample_read() are now uint8_t** instead of void**.
|
||||
Libavresample is now stable.
|
||||
|
||||
2012-09-26 - 3ba0dab7 / 1384df64 - lavf 54.29.101 / 56.06.3 - avformat.h
|
||||
Add AVFormatContext.avoid_negative_ts.
|
||||
|
||||
2012-09-24 - 46a3595 / a42aada - lavc 54.59.100 / 54.28.0 - avcodec.h
|
||||
Add avcodec_free_frame(). This function must now
|
||||
be used for freeing an AVFrame.
|
||||
@@ -1178,9 +949,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
2012-01-12 - b18e17e / 3167dc9 - lavfi 2.59.100 / 2.15.0
|
||||
Add a new installed header -- libavfilter/version.h -- with version macros.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 0.9 was cut here -------- 8< ---------
|
||||
|
||||
2011-12-08 - a502939 - lavfi 2.52.0
|
||||
Add av_buffersink_poll_frame() to buffersink.h.
|
||||
|
||||
@@ -1390,13 +1158,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
2011-06-28 - 5129336 - lavu 51.11.0 - avutil.h
|
||||
Define the AV_PICTURE_TYPE_NONE value in AVPictureType enum.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 0.7 was cut here -------- 8< ---------
|
||||
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 0.8 was cut here -------- 8< ---------
|
||||
|
||||
2011-06-19 - fd2c0a5 - lavfi 2.23.0 - avfilter.h
|
||||
Add layout negotiation fields and helper functions.
|
||||
|
||||
@@ -2074,9 +1835,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
2010-06-02 - 7e566bb - lavc 52.73.0 - av_get_codec_tag_string()
|
||||
Add av_get_codec_tag_string().
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 0.6 was cut here -------- 8< ---------
|
||||
|
||||
2010-06-01 - 2b99142 - lsws 0.11.0 - convertPalette API
|
||||
Add sws_convertPalette8ToPacked32() and sws_convertPalette8ToPacked24().
|
||||
|
||||
@@ -2094,6 +1852,10 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
2010-05-09 - b6bc205 - lavfi 1.20.0 - AVFilterPicRef
|
||||
Add interlaced and top_field_first fields to AVFilterPicRef.
|
||||
|
||||
------------------------------8<-------------------------------------
|
||||
0.6 branch was cut here
|
||||
----------------------------->8--------------------------------------
|
||||
|
||||
2010-05-01 - 8e2ee18 - lavf 52.62.0 - probe function
|
||||
Add av_probe_input_format2 to API, it allows ignoring probe
|
||||
results below given score and returns the actual probe score.
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.6.4
|
||||
PROJECT_NUMBER = 2.3.6
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
20
doc/Makefile
20
doc/Makefile
@@ -38,15 +38,12 @@ DOCS = $(DOCS-yes)
|
||||
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
|
||||
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
|
||||
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
DOC_EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
DOC_EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
@@ -83,25 +80,14 @@ $(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
|
||||
$(M)doc/print_options $* > $@
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
doc/%-all.html: TAG = HTML
|
||||
|
||||
ifdef HAVE_MAKEINFO_HTML
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --html -I doc --no-split -D config-not-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
|
||||
|
||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --html -I doc --no-split -D config-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
|
||||
else
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
|
||||
doc/%-all.html: TAG = HTML
|
||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
endif
|
||||
|
||||
doc/%.pod: TAG = POD
|
||||
doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
@@ -115,9 +101,9 @@ doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
|
||||
doc/%.1 doc/%.3: TAG = MAN
|
||||
doc/%.1: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=1 --center=" " --release=" " --date=" " $< > $@
|
||||
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
doc/%.3: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=3 --center=" " --release=" " --date=" " $< > $@
|
||||
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
|
||||
|
||||
$(DOCS) doc/doxy/html: | doc/
|
||||
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
|
||||
|
@@ -13,16 +13,7 @@ bitstream filter using the option @code{--disable-bsf=BSF}.
|
||||
The option @code{-bsfs} of the ff* tools will display the list of
|
||||
all the supported bitstream filters included in your build.
|
||||
|
||||
The ff* tools have a -bsf option applied per stream, taking a
|
||||
comma-separated list of filters, whose parameters follow the filter
|
||||
name after a '='.
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
|
||||
@end example
|
||||
|
||||
Below is a description of the currently available bitstream filters,
|
||||
with their parameters, if any.
|
||||
Below is a description of the currently available bitstream filters.
|
||||
|
||||
@section aac_adtstoasc
|
||||
|
||||
@@ -144,16 +135,9 @@ ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
Damages the contents of packets without damaging the container. Can be
|
||||
used for fuzzing or testing error resilience/concealment.
|
||||
|
||||
Parameters:
|
||||
A numeral string, whose value is related to how often output bytes will
|
||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||
the lower the more frequent bytes will be modified, with 1 meaning
|
||||
every byte is modified.
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
ffmpeg -i INPUT -c copy -bsf noise output.mkv
|
||||
@end example
|
||||
applies the modification to every byte.
|
||||
|
||||
@section remove_extra
|
||||
|
||||
|
5
doc/bootstrap.min.css
vendored
5
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -7,11 +7,6 @@ V
|
||||
Disable the default terse mode, the full command issued by make and its
|
||||
output will be shown on the screen.
|
||||
|
||||
DBG
|
||||
Preprocess x86 external assembler files to a .dbg.asm file in the object
|
||||
directory, which then gets compiled. Helps developping those assembler
|
||||
files.
|
||||
|
||||
DESTDIR
|
||||
Destination directory for the install targets, useful to prepare packages
|
||||
or install FFmpeg in cross-environments.
|
||||
|
@@ -7,7 +7,7 @@ all the encoders and decoders. In addition each codec may support
|
||||
so-called private options, which are specific for a given codec.
|
||||
|
||||
Sometimes, a global option may only affect a specific kind of codec,
|
||||
and may be nonsensical or ignored by another, so you need to be aware
|
||||
and may be unsensical or ignored by another, so you need to be aware
|
||||
of the meaning of the specified options. Also some options are
|
||||
meant only for decoding or encoding.
|
||||
|
||||
@@ -71,9 +71,7 @@ Force low delay.
|
||||
@item global_header
|
||||
Place global headers in extradata instead of every keyframe.
|
||||
@item bitexact
|
||||
Only write platform-, build- and time-independent data. (except (I)DCT).
|
||||
This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
Use only bitexact stuff (except (I)DCT).
|
||||
@item aic
|
||||
Apply H263 advanced intra coding / mpeg4 ac prediction.
|
||||
@item cbp
|
||||
@@ -495,15 +493,11 @@ visualize block types
|
||||
picture buffer allocations
|
||||
@item thread_ops
|
||||
threading operations
|
||||
@item nomc
|
||||
skip motion compensation
|
||||
@end table
|
||||
|
||||
@item vismv @var{integer} (@emph{decoding,video})
|
||||
Visualize motion vectors (MVs).
|
||||
|
||||
This option is deprecated, see the codecview filter instead.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item pf
|
||||
@@ -803,9 +797,6 @@ Frame data might be split into multiple chunks.
|
||||
Show all frames before the first keyframe.
|
||||
@item skiprd
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item export_mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@end table
|
||||
|
||||
@item error @var{integer} (@emph{encoding,video})
|
||||
@@ -865,14 +856,6 @@ Possible values:
|
||||
|
||||
@item mpeg2_aac_he
|
||||
|
||||
@item mpeg4_sp
|
||||
|
||||
@item mpeg4_core
|
||||
|
||||
@item mpeg4_main
|
||||
|
||||
@item mpeg4_asp
|
||||
|
||||
@item dts
|
||||
|
||||
@item dts_es
|
||||
@@ -1126,19 +1109,6 @@ Interlaced video, bottom coded first, top displayed first
|
||||
Set to 1 to disable processing alpha (transparency). This works like the
|
||||
@samp{gray} flag in the @option{flags} option which skips chroma information
|
||||
instead of alpha. Default is 0.
|
||||
|
||||
@item codec_whitelist @var{list} (@emph{input})
|
||||
"," separated List of allowed decoders. By default all are allowed.
|
||||
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example to separate the fields with newlines and indention:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
@end example
|
||||
|
||||
@end table
|
||||
|
||||
@c man end CODEC OPTIONS
|
||||
|
@@ -163,9 +163,6 @@ Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
An FFmpeg native decoder for Opus exists, so users can decode Opus
|
||||
without this library.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
@@ -190,15 +187,6 @@ The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
|
||||
@item ifo_palette
|
||||
Specify the IFO file from which the global palette is obtained.
|
||||
(experimental)
|
||||
|
||||
@item forced_subs_only
|
||||
Only decode subtitle entries marked as forced. Some titles have forced
|
||||
and non-forced subtitles in the same track. Setting this flag to @code{1}
|
||||
will only keep the forced subtitles. Default value is @code{0}.
|
||||
@end table
|
||||
|
||||
@section libzvbi-teletext
|
||||
|
@@ -29,26 +29,6 @@ the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@section apng
|
||||
|
||||
Animated Portable Network Graphics demuxer.
|
||||
|
||||
This demuxer is used to demux APNG files.
|
||||
All headers, but the PNG signature, up to (but not including) the first
|
||||
fcTL chunk are transmitted as extradata.
|
||||
Frames are then split as being all the chunks between two fcTL ones, or
|
||||
between the last fcTL and IEND chunks.
|
||||
|
||||
@table @option
|
||||
@item -ignore_loop @var{bool}
|
||||
Ignore the loop variable in the file if set.
|
||||
@item -max_fps @var{int}
|
||||
Maximum framerate in frames per second (0 for no limit).
|
||||
@item -default_fps @var{int}
|
||||
Default framerate in frames per second when none is specified in the file
|
||||
(0 meaning as fast as possible).
|
||||
@end table
|
||||
|
||||
@section asf
|
||||
|
||||
Advanced Systems Format demuxer.
|
||||
@@ -194,40 +174,6 @@ See @url{http://quvi.sourceforge.net/} for more information.
|
||||
FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be
|
||||
enabled.
|
||||
|
||||
@section gif
|
||||
|
||||
Animated GIF demuxer.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item min_delay
|
||||
Set the minimum valid delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 2.
|
||||
|
||||
@item default_delay
|
||||
Set the default delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 10.
|
||||
|
||||
@item ignore_loop
|
||||
GIF files can contain information to loop a certain number of times (or
|
||||
infinitely). If @option{ignore_loop} is set to 1, then the loop setting
|
||||
from the input will be ignored and looping will not occur. If set to 0,
|
||||
then looping will occur and will cycle the number of times according to
|
||||
the GIF. Default value is 1.
|
||||
@end table
|
||||
|
||||
For example, with the overlay filter, place an infinitely looping GIF
|
||||
over another video:
|
||||
@example
|
||||
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
|
||||
@end example
|
||||
|
||||
Note that in the above example the shortest option for overlay filter is
|
||||
used to end the output video at the length of the shortest input file,
|
||||
which in this case is @file{input.mp4} as the GIF in this example loops
|
||||
infinitely.
|
||||
|
||||
@section image2
|
||||
|
||||
Image file demuxer.
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Developer Documentation
|
||||
@titlepage
|
||||
@@ -324,12 +323,9 @@ Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
|
||||
@example
|
||||
area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
@item
|
||||
Make sure the author of the commit is set correctly. (see git commit --author)
|
||||
@@ -648,12 +644,12 @@ accordingly].
|
||||
@subsection Adding files to the fate-suite dataset
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be included in the fate-suite.
|
||||
specific test then the media has to be inlcuded in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductory message for the patch series that you post to
|
||||
message or introductionary message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
|
||||
|
||||
|
@@ -6,16 +6,8 @@ DOXYGEN="${3}"
|
||||
|
||||
shift 3
|
||||
|
||||
if [ -e "$SRC_PATH/VERSION" ]; then
|
||||
VERSION=`cat "$SRC_PATH/VERSION"`
|
||||
else
|
||||
VERSION=`cd "$SRC_PATH"; git describe`
|
||||
fi
|
||||
|
||||
$DOXYGEN - <<EOF
|
||||
@INCLUDE = ${DOXYFILE}
|
||||
INPUT = $@
|
||||
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
|
||||
HTML_TIMESTAMP = NO
|
||||
PROJECT_NUMBER = $VERSION
|
||||
EOF
|
||||
|
@@ -1032,7 +1032,7 @@ configuration. You need to explicitly configure the build with
|
||||
|
||||
@subsection Option Mapping
|
||||
|
||||
Most libopus options are modelled after the @command{opusenc} utility from
|
||||
Most libopus options are modeled after the @command{opusenc} utility from
|
||||
opus-tools. The following is an option mapping chart describing options
|
||||
supported by the libopus wrapper, and their @command{opusenc}-equivalent
|
||||
in parentheses.
|
||||
@@ -1330,7 +1330,7 @@ ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
|
||||
|
||||
@section libvpx
|
||||
|
||||
VP8/VP9 format supported through libvpx.
|
||||
VP8 format supported through libvpx.
|
||||
|
||||
Requires the presence of the libvpx headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libvpx}.
|
||||
@@ -1442,9 +1442,6 @@ g_lag_in_frames
|
||||
@item vp8flags error_resilient
|
||||
g_error_resilient
|
||||
|
||||
@item aq_mode
|
||||
@code{VP9E_SET_AQ_MODE}
|
||||
|
||||
@end table
|
||||
|
||||
For more information about libvpx see:
|
||||
@@ -1745,10 +1742,6 @@ Enable calculation and printing SSIM stats after the encoding.
|
||||
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
||||
to 1.
|
||||
|
||||
@item avcintra-class (@emph{class})
|
||||
Configure the encoder to generate AVC-Intra.
|
||||
Valid values are 50,100 and 200
|
||||
|
||||
@item bluray-compat (@emph{bluray-compat})
|
||||
Configure the encoder to be compatible with the bluray standard.
|
||||
It is a shorthand for setting "bluray-compat=1 force-cfr=1".
|
||||
@@ -1886,34 +1879,6 @@ no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
|
||||
Encoding ffpresets for common usages are provided so they can be used with the
|
||||
general presets system (e.g. passing the @option{pre} option).
|
||||
|
||||
@section libx265
|
||||
|
||||
x265 H.265/HEVC encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the libx265 headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@option{--enable-libx265}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item preset
|
||||
Set the x265 preset.
|
||||
|
||||
@item tune
|
||||
Set the x265 tune parameter.
|
||||
|
||||
@item x265-params
|
||||
Set x265 options using a list of @var{key}=@var{value} couples separated
|
||||
by ":". See @command{x265 --help} for a list of options.
|
||||
|
||||
For example to specify libx265 encoding options with @option{-x265-params}:
|
||||
|
||||
@example
|
||||
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section libxvid
|
||||
|
||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||
@@ -2171,7 +2136,7 @@ Use @var{0} to disable alpha plane coding.
|
||||
@subsection Speed considerations
|
||||
|
||||
In the default mode of operation the encoder has to honor frame constraints
|
||||
(i.e. not produce frames with size bigger than requested) while still making
|
||||
(i.e. not produc frames with size bigger than requested) while still making
|
||||
output picture as good as possible.
|
||||
A frame containing a lot of small details is harder to compress and the encoder
|
||||
would spend more time searching for appropriate quantizers for each slice.
|
||||
|
@@ -14,7 +14,6 @@ LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
EXAMPLES= avio_reading \
|
||||
decoding_encoding \
|
||||
demuxing_decoding \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
@@ -29,7 +28,6 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
|
@@ -288,7 +288,6 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
|
||||
while (avpkt.size > 0) {
|
||||
int i, ch;
|
||||
int got_frame = 0;
|
||||
|
||||
if (!decoded_frame) {
|
||||
@@ -305,15 +304,15 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
}
|
||||
if (got_frame) {
|
||||
/* if a frame has been decoded, output it */
|
||||
int data_size = av_get_bytes_per_sample(c->sample_fmt);
|
||||
int data_size = av_samples_get_buffer_size(NULL, c->channels,
|
||||
decoded_frame->nb_samples,
|
||||
c->sample_fmt, 1);
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
for (i=0; i<decoded_frame->nb_samples; i++)
|
||||
for (ch=0; ch<c->channels; ch++)
|
||||
fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
fwrite(decoded_frame->data[0], 1, data_size, outfile);
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
@@ -651,7 +650,7 @@ int main(int argc, char **argv)
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.pcm", "test.mp2");
|
||||
audio_decode_example("test.sw", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
|
@@ -36,8 +36,6 @@
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static int width, height;
|
||||
static enum AVPixelFormat pix_fmt;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
@@ -83,22 +81,6 @@ static int decode_packet(int *got_frame, int cached)
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
@@ -108,7 +90,7 @@ static int decode_packet(int *got_frame, int cached)
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
@@ -156,7 +138,7 @@ static int decode_packet(int *got_frame, int cached)
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret, stream_index;
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
@@ -168,8 +150,8 @@ static int open_codec_context(int *stream_idx,
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
stream_index = ret;
|
||||
st = fmt_ctx->streams[stream_index];
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
@@ -188,7 +170,6 @@ static int open_codec_context(int *stream_idx,
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
*stream_idx = stream_index;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -283,11 +264,9 @@ int main (int argc, char **argv)
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
width = video_dec_ctx->width;
|
||||
height = video_dec_ctx->height;
|
||||
pix_fmt = video_dec_ctx->pix_fmt;
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
width, height, pix_fmt, 1);
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
@@ -362,7 +341,7 @@ int main (int argc, char **argv)
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(pix_fmt), width, height,
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
|
@@ -1,185 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
* Copyright (c) 2014 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL;
|
||||
static AVStream *video_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
|
||||
static int video_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
int i;
|
||||
AVFrameSideData *sd;
|
||||
|
||||
video_frame_count++;
|
||||
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
|
||||
if (sd) {
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!video_stream) {
|
||||
fprintf(stderr, "Could not find video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
return ret < 0;
|
||||
}
|
@@ -45,7 +45,6 @@
|
||||
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/md5.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
|
@@ -145,28 +145,12 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
|
@@ -90,7 +90,6 @@ static int init_filters(const char *filters_descr)
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
@@ -103,7 +102,7 @@ static int init_filters(const char *filters_descr)
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
time_base.num, time_base.den,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
@@ -128,28 +127,12 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
|
@@ -55,7 +55,6 @@ typedef struct OutputStream {
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
int samples_count;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
@@ -63,7 +62,6 @@ typedef struct OutputStream {
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
struct SwrContext *swr_ctx;
|
||||
} OutputStream;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
@@ -94,7 +92,6 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int i;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
@@ -118,24 +115,8 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
if ((*codec)->supported_samplerates) {
|
||||
c->sample_rate = (*codec)->supported_samplerates[0];
|
||||
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
||||
if ((*codec)->supported_samplerates[i] == 44100)
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channels = 2;
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
@@ -149,9 +130,8 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
|
||||
c->time_base = ost->st->time_base;
|
||||
|
||||
c->time_base.den = STREAM_FRAME_RATE;
|
||||
c->time_base.num = 1;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
@@ -178,47 +158,19 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
int samples_count;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
struct SwrContext *swr_ctx = NULL;
|
||||
|
||||
frame->format = sample_fmt;
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
int ret;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
/* open it */
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
@@ -230,52 +182,84 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, A
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
ost->frame = av_frame_alloc();
|
||||
if (!ost->frame)
|
||||
exit(1);
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->frame->sample_rate = c->sample_rate;
|
||||
ost->frame->format = AV_SAMPLE_FMT_S16;
|
||||
ost->frame->channel_layout = c->channel_layout;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
ost->frame->nb_samples = 10000;
|
||||
else
|
||||
ost->frame->nb_samples = c->frame_size;
|
||||
|
||||
ost->tmp_frame = av_frame_alloc();
|
||||
if (!ost->frame)
|
||||
exit(1);
|
||||
|
||||
ost->tmp_frame->sample_rate = c->sample_rate;
|
||||
ost->tmp_frame->format = c->sample_fmt;
|
||||
ost->tmp_frame->channel_layout = c->channel_layout;
|
||||
ost->tmp_frame->nb_samples = ost->frame->nb_samples;
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
ret = av_frame_get_buffer(ost->frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate an audio frame.\n");
|
||||
exit(1);
|
||||
}
|
||||
ret = av_frame_get_buffer(ost->tmp_frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate an audio frame.\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
{
|
||||
AVFrame *frame = ost->tmp_frame;
|
||||
int j, i, v;
|
||||
int16_t *q = (int16_t*)frame->data[0];
|
||||
int j, i, v, ret;
|
||||
int16_t *q = (int16_t*)ost->frame->data[0];
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
for (j = 0; j < ost->frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->st->codec->channels; i++)
|
||||
*q++ = v;
|
||||
@@ -283,10 +267,10 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
ost->tincr += ost->tincr2;
|
||||
}
|
||||
|
||||
frame->pts = ost->next_pts;
|
||||
ost->next_pts += frame->nb_samples;
|
||||
ost->frame->pts = ost->next_pts;
|
||||
ost->next_pts += ost->frame->nb_samples;
|
||||
|
||||
return frame;
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -309,31 +293,27 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
if (swr_ctx) {
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(ost->swr_ctx,
|
||||
ost->frame->data, dst_nb_samples,
|
||||
ret = swr_convert(swr_ctx,
|
||||
ost->tmp_frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->frame;
|
||||
frame = ost->tmp_frame;
|
||||
} else {
|
||||
dst_nb_samples = frame->nb_samples;
|
||||
}
|
||||
|
||||
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
ost->samples_count += dst_nb_samples;
|
||||
frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
@@ -380,17 +360,13 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
@@ -547,7 +523,6 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@@ -563,12 +538,11 @@ int main(int argc, char **argv)
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
if (argc != 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
@@ -580,9 +554,6 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
if (argc > 3 && !strcmp(argv[2], "-flags")) {
|
||||
av_dict_set(&opt, argv[2]+1, argv[3], 0);
|
||||
}
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
@@ -611,10 +582,10 @@ int main(int argc, char **argv)
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st, opt);
|
||||
open_video(oc, video_codec, &video_st);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st, opt);
|
||||
open_audio(oc, audio_codec, &audio_st);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
@@ -629,7 +600,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, &opt);
|
||||
ret = avformat_write_header(oc, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
@@ -661,7 +632,7 @@ int main(int argc, char **argv)
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_closep(&oc->pb);
|
||||
avio_close(oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
@@ -1,484 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Anton Khirnov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Intel QSV-accelerated H.264 decoding example.
|
||||
*
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the VA-API video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <mfx/mfxvideo.h>
|
||||
|
||||
#include <va/va.h>
|
||||
#include <va/va_x11.h>
|
||||
#include <X11/Xlib.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/qsv.h"
|
||||
|
||||
#include "libavutil/error.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
mfxSession mfx_session;
|
||||
VADisplay va_dpy;
|
||||
|
||||
VASurfaceID *surfaces;
|
||||
mfxMemId *surface_ids;
|
||||
int *surface_used;
|
||||
int nb_surfaces;
|
||||
|
||||
mfxFrameInfo frame_info;
|
||||
} DecodeContext;
|
||||
|
||||
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
|
||||
mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
int err, i;
|
||||
|
||||
if (decode->surfaces) {
|
||||
fprintf(stderr, "Multiple allocation requests.\n");
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)) {
|
||||
fprintf(stderr, "Unsupported surface type: %d\n", req->Type);
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
if (req->Info.BitDepthLuma != 8 || req->Info.BitDepthChroma != 8 ||
|
||||
req->Info.Shift || req->Info.FourCC != MFX_FOURCC_NV12 ||
|
||||
req->Info.ChromaFormat != MFX_CHROMAFORMAT_YUV420) {
|
||||
fprintf(stderr, "Unsupported surface properties.\n");
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
decode->surfaces = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surfaces));
|
||||
decode->surface_ids = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surface_ids));
|
||||
decode->surface_used = av_mallocz_array(req->NumFrameSuggested, sizeof(*decode->surface_used));
|
||||
if (!decode->surfaces || !decode->surface_ids || !decode->surface_used)
|
||||
goto fail;
|
||||
|
||||
err = vaCreateSurfaces(decode->va_dpy, VA_RT_FORMAT_YUV420,
|
||||
req->Info.Width, req->Info.Height,
|
||||
decode->surfaces, req->NumFrameSuggested,
|
||||
NULL, 0);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error allocating VA surfaces\n");
|
||||
goto fail;
|
||||
}
|
||||
decode->nb_surfaces = req->NumFrameSuggested;
|
||||
|
||||
for (i = 0; i < decode->nb_surfaces; i++)
|
||||
decode->surface_ids[i] = &decode->surfaces[i];
|
||||
|
||||
resp->mids = decode->surface_ids;
|
||||
resp->NumFrameActual = decode->nb_surfaces;
|
||||
|
||||
decode->frame_info = req->Info;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
fail:
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
|
||||
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
|
||||
if (decode->surfaces)
|
||||
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
decode->nb_surfaces = 0;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
|
||||
{
|
||||
*hdl = mid;
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static void free_buffer(void *opaque, uint8_t *data)
|
||||
{
|
||||
int *used = opaque;
|
||||
*used = 0;
|
||||
av_freep(&data);
|
||||
}
|
||||
|
||||
static int get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
||||
{
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
|
||||
mfxFrameSurface1 *surf;
|
||||
AVBufferRef *surf_buf;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < decode->nb_surfaces; idx++) {
|
||||
if (!decode->surface_used[idx])
|
||||
break;
|
||||
}
|
||||
if (idx == decode->nb_surfaces) {
|
||||
fprintf(stderr, "No free surfaces\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf = av_mallocz(sizeof(*surf));
|
||||
if (!surf)
|
||||
return AVERROR(ENOMEM);
|
||||
surf_buf = av_buffer_create((uint8_t*)surf, sizeof(*surf), free_buffer,
|
||||
&decode->surface_used[idx], AV_BUFFER_FLAG_READONLY);
|
||||
if (!surf_buf) {
|
||||
av_freep(&surf);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf->Info = decode->frame_info;
|
||||
surf->Data.MemId = &decode->surfaces[idx];
|
||||
|
||||
frame->buf[0] = surf_buf;
|
||||
frame->data[3] = (uint8_t*)surf;
|
||||
|
||||
decode->surface_used[idx] = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
if (!avctx->hwaccel_context) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVQSVContext *qsv = av_qsv_alloc_context();
|
||||
if (!qsv)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
qsv->session = decode->mfx_session;
|
||||
qsv->iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
||||
|
||||
avctx->hwaccel_context = qsv;
|
||||
}
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVPacket *pkt,
|
||||
AVIOContext *output_ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
int got_frame = 1;
|
||||
|
||||
while (pkt->size > 0 || (!pkt->data && got_frame)) {
|
||||
ret = avcodec_decode_video2(decoder_ctx, frame, &got_frame, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pkt->data += ret;
|
||||
pkt->size -= ret;
|
||||
|
||||
/* A real program would do something useful with the decoded frame here.
|
||||
* We just retrieve the raw data and write it to a file, which is rather
|
||||
* useless but pedagogic. */
|
||||
if (got_frame) {
|
||||
mfxFrameSurface1 *surf = (mfxFrameSurface1*)frame->data[3];
|
||||
VASurfaceID surface = *(VASurfaceID*)surf->Data.MemId;
|
||||
|
||||
VAImageFormat img_fmt = {
|
||||
.fourcc = VA_FOURCC_NV12,
|
||||
.byte_order = VA_LSB_FIRST,
|
||||
.bits_per_pixel = 8,
|
||||
.depth = 8,
|
||||
};
|
||||
|
||||
VAImage img;
|
||||
|
||||
VAStatus err;
|
||||
uint8_t *data;
|
||||
int i, j;
|
||||
|
||||
img.buf = VA_INVALID_ID;
|
||||
img.image_id = VA_INVALID_ID;
|
||||
|
||||
err = vaCreateImage(decode->va_dpy, &img_fmt,
|
||||
frame->width, frame->height, &img);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error creating an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaGetImage(decode->va_dpy, surface, 0, 0,
|
||||
frame->width, frame->height,
|
||||
img.image_id);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error getting an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaMapBuffer(decode->va_dpy, img.buf, (void**)&data);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error mapping the image buffer: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < img.num_planes; i++)
|
||||
for (j = 0; j < (img.height >> (i > 0)); j++)
|
||||
avio_write(output_ctx, data + img.offsets[i] + j * img.pitches[i], img.width);
|
||||
|
||||
fail:
|
||||
if (img.buf != VA_INVALID_ID)
|
||||
vaUnmapBuffer(decode->va_dpy, img.buf);
|
||||
if (img.image_id != VA_INVALID_ID)
|
||||
vaDestroyImage(decode->va_dpy, img.image_id);
|
||||
av_frame_unref(frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_ctx = NULL;
|
||||
AVStream *video_st = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
Display *dpy = NULL;
|
||||
int va_ver_major, va_ver_minor;
|
||||
|
||||
mfxIMPL mfx_impl = MFX_IMPL_AUTO_ANY;
|
||||
mfxVersion mfx_ver = { { 1, 1 } };
|
||||
|
||||
mfxFrameAllocator frame_allocator = {
|
||||
.pthis = &decode,
|
||||
.Alloc = frame_alloc,
|
||||
.Lock = frame_lock,
|
||||
.Unlock = frame_unlock,
|
||||
.GetHDL = frame_get_hdl,
|
||||
.Free = frame_free,
|
||||
};
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i, err;
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* find the first H.264 video stream */
|
||||
for (i = 0; i < input_ctx->nb_streams; i++) {
|
||||
AVStream *st = input_ctx->streams[i];
|
||||
|
||||
if (st->codec->codec_id == AV_CODEC_ID_H264 && !video_st)
|
||||
video_st = st;
|
||||
else
|
||||
st->discard = AVDISCARD_ALL;
|
||||
}
|
||||
if (!video_st) {
|
||||
fprintf(stderr, "No H.264 video stream in the input file\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* initialize VA-API */
|
||||
dpy = XOpenDisplay(NULL);
|
||||
if (!dpy) {
|
||||
fprintf(stderr, "Cannot open the X display\n");
|
||||
goto finish;
|
||||
}
|
||||
decode.va_dpy = vaGetDisplay(dpy);
|
||||
if (!decode.va_dpy) {
|
||||
fprintf(stderr, "Cannot open the VA display\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
err = vaInitialize(decode.va_dpy, &va_ver_major, &va_ver_minor);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Cannot initialize VA: %s\n", vaErrorStr(err));
|
||||
goto finish;
|
||||
}
|
||||
fprintf(stderr, "Initialized VA v%d.%d\n", va_ver_major, va_ver_minor);
|
||||
|
||||
/* initialize an MFX session */
|
||||
err = MFXInit(mfx_impl, &mfx_ver, &decode.mfx_session);
|
||||
if (err != MFX_ERR_NONE) {
|
||||
fprintf(stderr, "Error initializing an MFX session\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
MFXVideoCORE_SetHandle(decode.mfx_session, MFX_HANDLE_VA_DISPLAY, decode.va_dpy);
|
||||
MFXVideoCORE_SetFrameAllocator(decode.mfx_session, &frame_allocator);
|
||||
|
||||
/* initialize the decoder */
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
if (!decoder) {
|
||||
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
decoder_ctx = avcodec_alloc_context3(decoder);
|
||||
if (!decoder_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
decoder_ctx->codec_id = AV_CODEC_ID_H264;
|
||||
if (video_st->codec->extradata_size) {
|
||||
decoder_ctx->extradata = av_mallocz(video_st->codec->extradata_size +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!decoder_ctx->extradata) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
memcpy(decoder_ctx->extradata, video_st->codec->extradata,
|
||||
video_st->codec->extradata_size);
|
||||
decoder_ctx->extradata_size = video_st->codec->extradata_size;
|
||||
}
|
||||
decoder_ctx->refcounted_frames = 1;
|
||||
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_buffer2 = get_buffer;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the decoder: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the output stream */
|
||||
ret = avio_open(&output_ctx, argv[2], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the output context: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "%s\n", buf);
|
||||
}
|
||||
|
||||
avformat_close_input(&input_ctx);
|
||||
|
||||
av_frame_free(&frame);
|
||||
|
||||
if (decode.mfx_session)
|
||||
MFXClose(decode.mfx_session);
|
||||
if (decode.va_dpy)
|
||||
vaTerminate(decode.va_dpy);
|
||||
if (dpy)
|
||||
XCloseDisplay(dpy);
|
||||
|
||||
if (decoder_ctx)
|
||||
av_freep(&decoder_ctx->hwaccel_context);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
@@ -153,7 +153,7 @@ end:
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
|
@@ -199,7 +199,8 @@ int main(int argc, char **argv)
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
|
@@ -132,7 +132,8 @@ int main(int argc, char **argv)
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
|
@@ -41,16 +41,18 @@
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
/** The output bit rate in kbit/s */
|
||||
#define OUTPUT_BIT_RATE 96000
|
||||
#define OUTPUT_BIT_RATE 48000
|
||||
/** The number of output channels */
|
||||
#define OUTPUT_CHANNELS 2
|
||||
/** The audio sample output format */
|
||||
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
|
||||
|
||||
/**
|
||||
* Convert an error code into a text message.
|
||||
* @param error Error code to be converted
|
||||
* @return Corresponding error text (not thread-safe)
|
||||
*/
|
||||
static const char *get_error_text(const int error)
|
||||
static char *const get_error_text(const int error)
|
||||
{
|
||||
static char error_buffer[255];
|
||||
av_strerror(error, error_buffer, sizeof(error_buffer));
|
||||
@@ -167,7 +169,7 @@ static int open_output_file(const char *filename,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Save the encoder context for easier access later. */
|
||||
/** Save the encoder context for easiert access later. */
|
||||
*output_codec_context = stream->codec;
|
||||
|
||||
/**
|
||||
@@ -177,16 +179,9 @@ static int open_output_file(const char *filename,
|
||||
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
||||
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
||||
(*output_codec_context)->sample_fmt = output_codec->sample_fmts[0];
|
||||
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/** Allow the use of the experimental AAC encoder */
|
||||
(*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
||||
|
||||
/** Set the sample rate for the container. */
|
||||
stream->time_base.den = input_codec_context->sample_rate;
|
||||
stream->time_base.num = 1;
|
||||
|
||||
/**
|
||||
* Some container formats (like MP4) require global headers to be present
|
||||
* Mark the encoder so that it behaves accordingly.
|
||||
@@ -204,7 +199,7 @@ static int open_output_file(const char *filename,
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
avio_closep(&(*output_format_context)->pb);
|
||||
avio_close((*output_format_context)->pb);
|
||||
avformat_free_context(*output_format_context);
|
||||
*output_format_context = NULL;
|
||||
return error < 0 ? error : AVERROR_EXIT;
|
||||
@@ -276,11 +271,10 @@ static int init_resampler(AVCodecContext *input_codec_context,
|
||||
}
|
||||
|
||||
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
||||
static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
||||
static int init_fifo(AVAudioFifo **fifo)
|
||||
{
|
||||
/** Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
||||
output_codec_context->channels, 1))) {
|
||||
if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -312,7 +306,7 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
|
||||
/** Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/** If we are at the end of the file, flush the decoder below. */
|
||||
/** If we are the the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
@@ -543,9 +537,6 @@ static int init_output_frame(AVFrame **frame,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Global timestamp for the audio frames */
|
||||
static int64_t pts = 0;
|
||||
|
||||
/** Encode one frame worth of audio to the output file. */
|
||||
static int encode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *output_format_context,
|
||||
@@ -557,12 +548,6 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
int error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/** Set a timestamp based on the sample rate for the container. */
|
||||
if (frame) {
|
||||
frame->pts = pts;
|
||||
pts += frame->nb_samples;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode the audio frame and store it in the temporary packet.
|
||||
* The output audio stream encoder is used to do this.
|
||||
@@ -674,7 +659,7 @@ int main(int argc, char **argv)
|
||||
&resample_context))
|
||||
goto cleanup;
|
||||
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
||||
if (init_fifo(&fifo, output_codec_context))
|
||||
if (init_fifo(&fifo))
|
||||
goto cleanup;
|
||||
/** Write the header of the output file container. */
|
||||
if (write_output_file_header(output_format_context))
|
||||
@@ -758,7 +743,7 @@ cleanup:
|
||||
if (output_codec_context)
|
||||
avcodec_close(output_codec_context);
|
||||
if (output_format_context) {
|
||||
avio_closep(&output_format_context->pb);
|
||||
avio_close(output_format_context->pb);
|
||||
avformat_free_context(output_format_context);
|
||||
}
|
||||
if (input_codec_context)
|
||||
|
@@ -389,9 +389,17 @@ static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, in
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
av_packet_rescale_ts(&enc_pkt,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
enc_pkt.duration = av_rescale_q(enc_pkt.duration,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
@@ -505,9 +513,14 @@ int main(int argc, char **argv)
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base);
|
||||
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
||||
@@ -529,9 +542,14 @@ int main(int argc, char **argv)
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
@@ -573,7 +591,7 @@ end:
|
||||
av_free(filter_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
|
87
doc/faq.texi
87
doc/faq.texi
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg FAQ
|
||||
@titlepage
|
||||
@@ -91,56 +90,6 @@ To build FFmpeg, you need to install the development package. It is usually
|
||||
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
||||
build is finished, but be sure to keep the main package.
|
||||
|
||||
@section How do I make @command{pkg-config} find my libraries?
|
||||
|
||||
Somewhere along with your libraries, there is a @file{.pc} file (or several)
|
||||
in a @file{pkgconfig} directory. You need to set environment variables to
|
||||
point @command{pkg-config} to these files.
|
||||
|
||||
If you need to @emph{add} directories to @command{pkg-config}'s search list
|
||||
(typical use case: library installed separately), add it to
|
||||
@code{$PKG_CONFIG_PATH}:
|
||||
|
||||
@example
|
||||
export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
|
||||
@end example
|
||||
|
||||
If you need to @emph{replace} @command{pkg-config}'s search list
|
||||
(typical use case: cross-compiling), set it in
|
||||
@code{$PKG_CONFIG_LIBDIR}:
|
||||
|
||||
@example
|
||||
export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
|
||||
@end example
|
||||
|
||||
If you need to know the library's internal dependencies (typical use: static
|
||||
linking), add the @code{--static} option to @command{pkg-config}:
|
||||
|
||||
@example
|
||||
./configure --pkg-config-flags=--static
|
||||
@end example
|
||||
|
||||
@section How do I use @command{pkg-config} when cross-compiling?
|
||||
|
||||
The best way is to install @command{pkg-config} in your cross-compilation
|
||||
environment. It will automatically use the cross-compilation libraries.
|
||||
|
||||
You can also use @command{pkg-config} from the host environment by
|
||||
specifying explicitly @code{--pkg-config=pkg-config} to @command{configure}.
|
||||
In that case, you must point @command{pkg-config} to the correct directories
|
||||
using the @code{PKG_CONFIG_LIBDIR}, as explained in the previous entry.
|
||||
|
||||
As an intermediate solution, you can place in your cross-compilation
|
||||
environment a script that calls the host @command{pkg-config} with
|
||||
@code{PKG_CONFIG_LIBDIR} set. That script can look like that:
|
||||
|
||||
@example
|
||||
#!/bin/sh
|
||||
PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
|
||||
export PKG_CONFIG_LIBDIR
|
||||
exec /usr/bin/pkg-config "$@@"
|
||||
@end example
|
||||
|
||||
@chapter Usage
|
||||
|
||||
@section ffmpeg does not work; what is wrong?
|
||||
@@ -349,7 +298,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
@@ -467,40 +416,6 @@ point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@section I have a stretched video, why does scaling does not fix it?
|
||||
|
||||
A lot of video codecs and formats can store the @emph{aspect ratio} of the
|
||||
video: this is the ratio between the width and the height of either the full
|
||||
image (DAR, display aspect ratio) or individual pixels (SAR, sample aspect
|
||||
ratio). For example, EGA screens at resolution 640×350 had 4:3 DAR and 35:48
|
||||
SAR.
|
||||
|
||||
Most still image processing work with square pixels, i.e. 1:1 SAR, but a lot
|
||||
of video standards, especially from the analogic-numeric transition era, use
|
||||
non-square pixels.
|
||||
|
||||
Most processing filters in FFmpeg handle the aspect ratio to avoid
|
||||
stretching the image: cropping adjusts the DAR to keep the SAR constant,
|
||||
scaling adjusts the SAR to keep the DAR constant.
|
||||
|
||||
If you want to stretch, or “unstretch”, the image, you need to override the
|
||||
information with the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
|
||||
|
||||
Do not forget to examine carefully the original video to check whether the
|
||||
stretching comes from the image or from the aspect ratio information.
|
||||
|
||||
For example, to fix a badly encoded EGA capture, use the following commands,
|
||||
either the first one to upscale to square pixels or the second one to set
|
||||
the correct aspect ratio or the third one to avoid transcoding (may not work
|
||||
depending on the format / codec / player / phase of the moon):
|
||||
|
||||
@example
|
||||
ffmpeg -i ega_screen.nut -vf scale=640:480,setsar=1 ega_screen_scaled.nut
|
||||
ffmpeg -i ega_screen.nut -vf setdar=4/3 ega_screen_anamorphic.nut
|
||||
ffmpeg -i ega_screen.nut -aspect 4/3 -c copy ega_screen_overridden.nut
|
||||
@end example
|
||||
|
||||
@chapter Development
|
||||
|
||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Automated Testing Environment
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Bitstream Filters Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Codecs Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Devices Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Filters Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Formats Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Protocols Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Resampler Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Scaler Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Utilities Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffmpeg Documentation
|
||||
@titlepage
|
||||
@@ -340,7 +339,7 @@ ffmpeg -i in.avi -metadata title="my title" out.flv
|
||||
|
||||
To set the language of the first audio stream:
|
||||
@example
|
||||
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
|
||||
ffmpeg -i INPUT -metadata:s:a:1 language=eng OUTPUT
|
||||
@end example
|
||||
|
||||
@item -target @var{type} (@emph{output})
|
||||
@@ -361,7 +360,7 @@ ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
|
||||
@end example
|
||||
|
||||
@item -dframes @var{number} (@emph{output})
|
||||
Set the number of data frames to output. This is an alias for @code{-frames:d}.
|
||||
Set the number of data frames to record. This is an alias for @code{-frames:d}.
|
||||
|
||||
@item -frames[:@var{stream_specifier}] @var{framecount} (@emph{output,per-stream})
|
||||
Stop writing to the stream after @var{framecount} frames.
|
||||
@@ -468,15 +467,12 @@ attachments.
|
||||
|
||||
@table @option
|
||||
@item -vframes @var{number} (@emph{output})
|
||||
Set the number of video frames to output. This is an alias for @code{-frames:v}.
|
||||
Set the number of video frames to record. This is an alias for @code{-frames:v}.
|
||||
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
|
||||
Set frame rate (Hz value, fraction or abbreviation).
|
||||
|
||||
As an input option, ignore any timestamps stored in the file and instead
|
||||
generate timestamps assuming constant frame rate @var{fps}.
|
||||
This is not the same as the @option{-framerate} option used for some input formats
|
||||
like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
|
||||
If in doubt use @option{-framerate} instead of the input option @option{-r}.
|
||||
|
||||
As an output option, duplicate or drop input frames to achieve constant output
|
||||
frame rate @var{fps}.
|
||||
@@ -693,7 +689,7 @@ If this option is not specified, the default adapter is used.
|
||||
|
||||
@table @option
|
||||
@item -aframes @var{number} (@emph{output})
|
||||
Set the number of audio frames to output. This is an alias for @code{-frames:a}.
|
||||
Set the number of audio frames to record. This is an alias for @code{-frames:a}.
|
||||
@item -ar[:@var{stream_specifier}] @var{freq} (@emph{input/output,per-stream})
|
||||
Set the audio sampling frequency. For output streams it is set by
|
||||
default to the frequency of the corresponding input stream. For input
|
||||
@@ -825,11 +821,6 @@ To map all the streams except the second audio, use negative mappings
|
||||
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
|
||||
@end example
|
||||
|
||||
To pick the English audio stream:
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
|
||||
@end example
|
||||
|
||||
Note that using this option disables the default mappings for this output file.
|
||||
|
||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}]
|
||||
@@ -995,13 +986,6 @@ With -map you can select from which stream the timestamps should be
|
||||
taken. You can leave either video or audio unchanged and sync the
|
||||
remaining stream(s) to the unchanged one.
|
||||
|
||||
@item -frame_drop_threshold @var{parameter}
|
||||
Frame drop threshold, which specifies how much behind video frames can
|
||||
be before they are dropped. In frame rate units, so 1.0 is one frame.
|
||||
The default is -1.1. One possible usecase is to avoid framedrops in case
|
||||
of noisy timestamps or to increase frame drop precision in case of exact
|
||||
timestamps.
|
||||
|
||||
@item -async @var{samples_per_second}
|
||||
Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
|
||||
the parameter is the maximum samples per second by which the audio is changed.
|
||||
@@ -1024,12 +1008,6 @@ processing (e.g. in case the format option @option{avoid_negative_ts}
|
||||
is enabled) the output timestamps may mismatch with the input
|
||||
timestamps even when this option is selected.
|
||||
|
||||
@item -start_at_zero
|
||||
When used with @option{copyts}, shift input timestamps so they start at zero.
|
||||
|
||||
This means that using e.g. @code{-ss 50} will make output timestamps start at
|
||||
50 seconds, regardless of what timestamp the input file started at.
|
||||
|
||||
@item -copytb @var{mode}
|
||||
Specify how to set the encoder timebase when stream copying. @var{mode} is an
|
||||
integer numeric value, and can assume one of the following values:
|
||||
@@ -1158,12 +1136,6 @@ This option enables or disables accurate seeking in input files with the
|
||||
transcoding. Use @option{-noaccurate_seek} to disable it, which may be useful
|
||||
e.g. when copying some streams and transcoding the others.
|
||||
|
||||
@item -thread_message_queue @var{size} (@emph{input})
|
||||
This option sets the maximum number of queued packets when reading from the
|
||||
file or device. With low latency / high rate live streams, packets may be
|
||||
discarded if they are not read in a timely manner; raising this value can
|
||||
avoid it.
|
||||
|
||||
@item -override_ffserver (@emph{global})
|
||||
Overrides the input specifications from @command{ffserver}. Using this
|
||||
option you can map any input stream to @command{ffserver} and control
|
||||
@@ -1174,11 +1146,6 @@ requested by @command{ffserver}.
|
||||
The option is intended for cases where features are needed that cannot be
|
||||
specified to @command{ffserver} but can be to @command{ffmpeg}.
|
||||
|
||||
@item -sdp_file @var{file} (@emph{global})
|
||||
Print sdp information to @var{file}.
|
||||
This allows dumping sdp information when at least one output isn't an
|
||||
rtp stream.
|
||||
|
||||
@item -discard (@emph{input})
|
||||
Allows discarding specific streams or frames of streams at the demuxer.
|
||||
Not all demuxers support this.
|
||||
@@ -1228,10 +1195,7 @@ awkward to specify on the command line. Lines starting with the hash
|
||||
('#') character are ignored and are used to provide comments. Check
|
||||
the @file{presets} directory in the FFmpeg source tree for examples.
|
||||
|
||||
There are two types of preset files: ffpreset and avpreset files.
|
||||
|
||||
@subsection ffpreset files
|
||||
ffpreset files are specified with the @code{vpre}, @code{apre},
|
||||
Preset files are specified with the @code{vpre}, @code{apre},
|
||||
@code{spre}, and @code{fpre} options. The @code{fpre} option takes the
|
||||
filename of the preset instead of a preset name as input and can be
|
||||
used for any kind of codec. For the @code{vpre}, @code{apre}, and
|
||||
@@ -1256,26 +1220,6 @@ directories, where @var{codec_name} is the name of the codec to which
|
||||
the preset file options will be applied. For example, if you select
|
||||
the video codec with @code{-vcodec libvpx} and use @code{-vpre 1080p},
|
||||
then it will search for the file @file{libvpx-1080p.ffpreset}.
|
||||
|
||||
@subsection avpreset files
|
||||
avpreset files are specified with the @code{pre} option. They work similar to
|
||||
ffpreset files, but they only allow encoder- specific options. Therefore, an
|
||||
@var{option}=@var{value} pair specifying an encoder cannot be used.
|
||||
|
||||
When the @code{pre} option is specified, ffmpeg will look for files with the
|
||||
suffix .avpreset in the directories @file{$AVCONV_DATADIR} (if set), and
|
||||
@file{$HOME/.avconv}, and in the datadir defined at configuration time (usually
|
||||
@file{PREFIX/share/ffmpeg}), in that order.
|
||||
|
||||
First ffmpeg searches for a file named @var{codec_name}-@var{arg}.avpreset in
|
||||
the above-mentioned directories, where @var{codec_name} is the name of the codec
|
||||
to which the preset file options will be applied. For example, if you select the
|
||||
video codec with @code{-vcodec libvpx} and use @code{-pre 1080p}, then it will
|
||||
search for the file @file{libvpx-1080p.avpreset}.
|
||||
|
||||
If no such file is found, then ffmpeg will search for a file named
|
||||
@var{arg}.avpreset in the same directories.
|
||||
|
||||
@c man end OPTIONS
|
||||
|
||||
@chapter Tips
|
||||
@@ -1322,6 +1266,21 @@ quality).
|
||||
@chapter Examples
|
||||
@c man begin EXAMPLES
|
||||
|
||||
@section Preset files
|
||||
|
||||
A preset file contains a sequence of @var{option=value} pairs, one for
|
||||
each line, specifying a sequence of options which can be specified also on
|
||||
the command line. Lines starting with the hash ('#') character are ignored and
|
||||
are used to provide comments. Empty lines are also ignored. Check the
|
||||
@file{presets} directory in the FFmpeg source tree for examples.
|
||||
|
||||
Preset files are specified with the @code{pre} option, this option takes a
|
||||
preset name as input. FFmpeg searches for a file named @var{preset_name}.avpreset in
|
||||
the directories @file{$AVCONV_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in
|
||||
the data directory defined at configuration time (usually @file{$PREFIX/share/ffmpeg})
|
||||
in that order. For example, if the argument is @code{libx264-max}, it will
|
||||
search for the file @file{libx264-max.avpreset}.
|
||||
|
||||
@section Video and Audio grabbing
|
||||
|
||||
If you specify the input format and device then ffmpeg can grab video
|
||||
@@ -1491,11 +1450,11 @@ ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
|
||||
You can put many streams of the same type in the output:
|
||||
|
||||
@example
|
||||
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
|
||||
ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
|
||||
@end example
|
||||
|
||||
The resulting output file @file{test12.nut} will contain the first four streams
|
||||
from the input files in reverse order.
|
||||
The resulting output file @file{test12.avi} will contain first four streams from
|
||||
the input file in reverse order.
|
||||
|
||||
@item
|
||||
To force CBR video output:
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffplay Documentation
|
||||
@titlepage
|
||||
@@ -38,14 +37,10 @@ Force displayed height.
|
||||
Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
@item -fs
|
||||
Start in fullscreen mode.
|
||||
@item -an
|
||||
Disable audio.
|
||||
@item -vn
|
||||
Disable video.
|
||||
@item -sn
|
||||
Disable subtitles.
|
||||
@item -ss @var{pos}
|
||||
Seek to a given position in seconds.
|
||||
@item -t @var{duration}
|
||||
@@ -114,10 +109,15 @@ duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -bug
|
||||
Work around bugs.
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
@item -genpts
|
||||
Generate pts.
|
||||
@item -rtp_tcp
|
||||
Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful
|
||||
if you are streaming with the RTSP protocol.
|
||||
@item -sync @var{type}
|
||||
Set the master clock to audio (@code{type=audio}), video
|
||||
(@code{type=video}) or external (@code{type=ext}). Default is audio. The
|
||||
@@ -125,20 +125,23 @@ master clock is used to control audio-video synchronization. Most media
|
||||
players use audio as master clock, but in some cases (streaming or high
|
||||
quality broadcast) it is necessary to change that. This option is mainly
|
||||
used for debugging purposes.
|
||||
@item -ast @var{audio_stream_specifier}
|
||||
Select the desired audio stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" audio stream is selected in the program of the
|
||||
already selected video stream.
|
||||
@item -vst @var{video_stream_specifier}
|
||||
Select the desired video stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" video stream is selected.
|
||||
@item -sst @var{subtitle_stream_specifier}
|
||||
Select the desired subtitle stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" subtitle stream is selected in the program of the
|
||||
already selected video or audio stream.
|
||||
@item -threads @var{count}
|
||||
Set the thread count.
|
||||
@item -ast @var{audio_stream_number}
|
||||
Select the desired audio stream number, counting from 0. The number
|
||||
refers to the list of all the input audio streams. If it is greater
|
||||
than the number of audio streams minus one, then the last one is
|
||||
selected, if it is negative the audio playback is disabled.
|
||||
@item -vst @var{video_stream_number}
|
||||
Select the desired video stream number, counting from 0. The number
|
||||
refers to the list of all the input video streams. If it is greater
|
||||
than the number of video streams minus one, then the last one is
|
||||
selected, if it is negative the video playback is disabled.
|
||||
@item -sst @var{subtitle_stream_number}
|
||||
Select the desired subtitle stream number, counting from 0. The number
|
||||
refers to the list of all the input subtitle streams. If it is greater
|
||||
than the number of subtitle streams minus one, then the last one is
|
||||
selected, if it is negative the subtitle rendering is disabled.
|
||||
@item -autoexit
|
||||
Exit when video is done playing.
|
||||
@item -exitonkeydown
|
||||
@@ -161,20 +164,8 @@ Force a specific video decoder.
|
||||
Force a specific subtitle decoder.
|
||||
|
||||
@item -autorotate
|
||||
Automatically rotate the video according to presentation metadata. Enabled by
|
||||
default, use @option{-noautorotate} to disable it.
|
||||
|
||||
@item -framedrop
|
||||
Drop video frames if video is out of sync. Enabled by default if the master
|
||||
clock is not set to video. Use this option to enable frame dropping for all
|
||||
master clock sources, use @option{-noframedrop} to disable it.
|
||||
|
||||
@item -infbuf
|
||||
Do not limit the input buffer size, read as much data as possible from the
|
||||
input as soon as possible. Enabled by default for realtime streams, where data
|
||||
may be dropped if not read in time. Use this option to enable infinite buffers
|
||||
for all inputs, use @option{-noinfbuf} to disable it.
|
||||
|
||||
Automatically rotate the video according to presentation metadata. Set by
|
||||
default, use -noautorotate to disable.
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffprobe Documentation
|
||||
@titlepage
|
||||
@@ -120,10 +119,6 @@ Show payload data, as a hexadecimal and ASCII dump. Coupled with
|
||||
|
||||
The dump is printed as the "data" field. It may contain newlines.
|
||||
|
||||
@item -show_data_hash @var{algorithm}
|
||||
Show a hash of payload data, for packets with @option{-show_packets} and for
|
||||
codec extradata with @option{-show_streams}.
|
||||
|
||||
@item -show_error
|
||||
Show information about the error found when trying to probe the input.
|
||||
|
||||
@@ -185,7 +180,7 @@ format : stream=codec_type
|
||||
|
||||
To show all the tags in the stream and format sections:
|
||||
@example
|
||||
stream_tags : format_tags
|
||||
format_tags : format_tags
|
||||
@end example
|
||||
|
||||
To show only the @code{title} tag (if available) in the stream
|
||||
@@ -322,12 +317,6 @@ Show information related to program and library versions. This is the
|
||||
equivalent of setting both @option{-show_program_version} and
|
||||
@option{-show_library_versions} options.
|
||||
|
||||
@item -show_pixel_formats
|
||||
Show information about all pixel formats supported by FFmpeg.
|
||||
|
||||
Pixel format information for each format is printed within a section
|
||||
with name "PIXEL_FORMAT".
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
|
@@ -10,10 +10,8 @@
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
@@ -37,16 +35,6 @@
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
@@ -62,13 +50,11 @@
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
@@ -105,16 +91,6 @@
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
@@ -166,7 +142,6 @@
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
@@ -178,11 +153,7 @@
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
@@ -201,7 +172,6 @@
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
@@ -254,6 +224,8 @@
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
@@ -291,45 +263,4 @@
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatFlagsType">
|
||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pseudopal" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
||||
|
@@ -1,11 +1,11 @@
|
||||
# Port on which the server is listening. You must select a different
|
||||
# port from your standard HTTP web server if it is running on the same
|
||||
# computer.
|
||||
HTTPPort 8090
|
||||
Port 8090
|
||||
|
||||
# Address on which the server is bound. Only useful if you have
|
||||
# several network interfaces.
|
||||
HTTPBindAddress 0.0.0.0
|
||||
BindAddress 0.0.0.0
|
||||
|
||||
# Number of simultaneous HTTP connections that can be handled. It has
|
||||
# to be defined *before* the MaxClients parameter, since it defines the
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffserver Documentation
|
||||
@titlepage
|
||||
@@ -67,12 +66,12 @@ http://@var{ffserver_ip_address}:@var{http_port}/@var{feed_name}
|
||||
|
||||
where @var{ffserver_ip_address} is the IP address of the machine where
|
||||
@command{ffserver} is installed, @var{http_port} is the port number of
|
||||
the HTTP server (configured through the @option{HTTPPort} option), and
|
||||
the HTTP server (configured through the @option{Port} option), and
|
||||
@var{feed_name} is the name of the corresponding feed defined in the
|
||||
configuration file.
|
||||
|
||||
Each feed is associated to a file which is stored on disk. This stored
|
||||
file is used to send pre-recorded data to a player as fast as
|
||||
file is used to allow to send pre-recorded data to a player as fast as
|
||||
possible when new content is added in real-time to the stream.
|
||||
|
||||
A "live-stream" or "stream" is a resource published by
|
||||
@@ -102,7 +101,7 @@ http://@var{ffserver_ip_address}:@var{rtsp_port}/@var{stream_name}[@var{options}
|
||||
the configuration file. @var{options} is a list of options specified
|
||||
after the URL which affects how the stream is served by
|
||||
@command{ffserver}. @var{http_port} and @var{rtsp_port} are the HTTP
|
||||
and RTSP ports configured with the options @var{HTTPPort} and
|
||||
and RTSP ports configured with the options @var{Port} and
|
||||
@var{RTSPPort} respectively.
|
||||
|
||||
In case the stream is associated to a feed, the encoding parameters
|
||||
@@ -204,9 +203,11 @@ WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
|
||||
transfer the entire file before starting to play.
|
||||
The same is true of AVI files.
|
||||
|
||||
You should edit the @file{ffserver.conf} file to suit your needs (in
|
||||
terms of frame rates etc). Then install @command{ffserver} and
|
||||
@command{ffmpeg}, write a script to start them up, and off you go.
|
||||
@section What happens next?
|
||||
|
||||
You should edit the ffserver.conf file to suit your needs (in terms of
|
||||
frame rates etc). Then install ffserver and ffmpeg, write a script to start
|
||||
them up, and off you go.
|
||||
|
||||
@section What else can it do?
|
||||
|
||||
@@ -353,29 +354,20 @@ allow everybody else.
|
||||
|
||||
@section Global options
|
||||
@table @option
|
||||
@item HTTPPort @var{port_number}
|
||||
@item Port @var{port_number}
|
||||
@item RTSPPort @var{port_number}
|
||||
|
||||
@var{HTTPPort} sets the HTTP server listening TCP port number,
|
||||
@var{RTSPPort} sets the RTSP server listening TCP port number.
|
||||
|
||||
@var{Port} is the equivalent of @var{HTTPPort} and is deprecated.
|
||||
|
||||
You must select a different port from your standard HTTP web server if
|
||||
it is running on the same computer.
|
||||
Set TCP port number on which the HTTP/RTSP server is listening. You
|
||||
must select a different port from your standard HTTP web server if it
|
||||
is running on the same computer.
|
||||
|
||||
If not specified, no corresponding server will be created.
|
||||
|
||||
@item HTTPBindAddress @var{ip_address}
|
||||
@item BindAddress @var{ip_address}
|
||||
@item RTSPBindAddress @var{ip_address}
|
||||
Set address on which the HTTP/RTSP server is bound. Only useful if you
|
||||
have several network interfaces.
|
||||
|
||||
@var{BindAddress} is the equivalent of @var{HTTPBindAddress} and is
|
||||
deprecated.
|
||||
|
||||
@item MaxHTTPConnections @var{n}
|
||||
Set number of simultaneous HTTP connections that can be handled. It
|
||||
has to be defined @emph{before} the @option{MaxClients} parameter,
|
||||
@@ -409,12 +401,6 @@ ignored, and the log is written to standard output.
|
||||
Set no-daemon mode. This option is currently ignored since now
|
||||
@command{ffserver} will always work in no-daemon mode, and is
|
||||
deprecated.
|
||||
|
||||
@item UseDefaults
|
||||
@item NoDefaults
|
||||
Control whether default codec options are used for the all streams or not.
|
||||
Each stream may overwrite this setting for its own. Default is @var{UseDefaults}.
|
||||
The lastest occurrence overrides previous if multiple definitions.
|
||||
@end table
|
||||
|
||||
@section Feed section
|
||||
@@ -578,11 +564,6 @@ deprecated in favor of @option{Metadata}.
|
||||
@item Metadata @var{key} @var{value}
|
||||
Set metadata value on the output stream.
|
||||
|
||||
@item UseDefaults
|
||||
@item NoDefaults
|
||||
Control whether default codec options are used for the stream or not.
|
||||
Default is @var{UseDefaults} unless disabled globally.
|
||||
|
||||
@item NoAudio
|
||||
@item NoVideo
|
||||
Suppress audio/video.
|
||||
@@ -601,9 +582,8 @@ Set sampling frequency for audio. When using low bitrates, you should
|
||||
lower this frequency to 22050 or 11025. The supported frequencies
|
||||
depend on the selected audio codec.
|
||||
|
||||
@item AVOptionAudio [@var{codec}:]@var{option} @var{value} (@emph{encoding,audio})
|
||||
Set generic or private option for audio stream.
|
||||
Private option must be prefixed with codec name or codec must be defined before.
|
||||
@item AVOptionAudio @var{option} @var{value} (@emph{encoding,audio})
|
||||
Set generic option for audio stream.
|
||||
|
||||
@item AVPresetAudio @var{preset} (@emph{encoding,audio})
|
||||
Set preset for audio stream.
|
||||
@@ -680,9 +660,8 @@ Set video @option{qdiff} encoding option.
|
||||
@item DarkMask @var{float} (@emph{encoding,video})
|
||||
Set @option{lumi_mask}/@option{dark_mask} encoding options.
|
||||
|
||||
@item AVOptionVideo [@var{codec}:]@var{option} @var{value} (@emph{encoding,video})
|
||||
Set generic or private option for video stream.
|
||||
Private option must be prefixed with codec name or codec must be defined before.
|
||||
@item AVOptionVideo @var{option} @var{value} (@emph{encoding,video})
|
||||
Set generic option for video stream.
|
||||
|
||||
@item AVPresetVideo @var{preset} (@emph{encoding,video})
|
||||
Set preset for video stream.
|
||||
|
@@ -3,7 +3,7 @@ representing a number as input, which may be followed by one of the SI
|
||||
unit prefixes, for example: 'K', 'M', or 'G'.
|
||||
|
||||
If 'i' is appended to the SI unit prefix, the complete prefix will be
|
||||
interpreted as a unit prefix for binary multiples, which are based on
|
||||
interpreted as a unit prefix for binary multiplies, which are based on
|
||||
powers of 1024 instead of powers of 1000. Appending 'B' to the SI unit
|
||||
prefix multiplies the value by 8. This allows using, for example:
|
||||
'KB', 'MiB', 'G' and 'B' as number suffixes.
|
||||
@@ -46,13 +46,6 @@ in the program with the id @var{program_id}. Otherwise, it matches all streams i
|
||||
program.
|
||||
@item #@var{stream_id} or i:@var{stream_id}
|
||||
Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
@item m:@var{key}[:@var{value}]
|
||||
Matches streams with the metadata tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
|
||||
Note that in @command{ffmpeg}, matching by metadata will only work properly for
|
||||
input files.
|
||||
@end table
|
||||
|
||||
@section Generic options
|
||||
@@ -103,10 +96,7 @@ Print detailed information about the filter name @var{filter_name}. Use the
|
||||
Show version.
|
||||
|
||||
@item -formats
|
||||
Show available formats (including devices).
|
||||
|
||||
@item -devices
|
||||
Show available devices.
|
||||
Show available formats.
|
||||
|
||||
@item -codecs
|
||||
Show all codecs known to libavcodec.
|
||||
@@ -141,22 +131,6 @@ Show channel names and standard channel layouts.
|
||||
@item -colors
|
||||
Show recognized color names.
|
||||
|
||||
@item -sources @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
|
||||
Show autodetected sources of the intput device.
|
||||
Some devices may provide system-dependent source names that cannot be autodetected.
|
||||
The returned list cannot be assumed to be always complete.
|
||||
@example
|
||||
ffmpeg -sources pulse,server=192.168.0.4
|
||||
@end example
|
||||
|
||||
@item -sinks @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
|
||||
Show autodetected sinks of the output device.
|
||||
Some devices may provide system-dependent sink names that cannot be autodetected.
|
||||
The returned list cannot be assumed to be always complete.
|
||||
@example
|
||||
ffmpeg -sinks pulse,server=192.168.0.4
|
||||
@end example
|
||||
|
||||
@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}
|
||||
Set the logging level used by the library.
|
||||
Adding "repeat+" indicates that repeated log output should not be compressed
|
||||
@@ -165,27 +139,27 @@ omitted. "repeat" can also be used alone.
|
||||
If "repeat" is used alone, and with no prior loglevel set, the default
|
||||
loglevel will be used. If multiple loglevel parameters are given, using
|
||||
'repeat' will not change the loglevel.
|
||||
@var{loglevel} is a string or a number containing one of the following values:
|
||||
@var{loglevel} is a number or a string containing one of the following values:
|
||||
@table @samp
|
||||
@item quiet, -8
|
||||
@item quiet
|
||||
Show nothing at all; be silent.
|
||||
@item panic, 0
|
||||
@item panic
|
||||
Only show fatal errors which could lead the process to crash, such as
|
||||
and assert failure. This is not currently used for anything.
|
||||
@item fatal, 8
|
||||
@item fatal
|
||||
Only show fatal errors. These are errors after which the process absolutely
|
||||
cannot continue after.
|
||||
@item error, 16
|
||||
@item error
|
||||
Show all errors, including ones which can be recovered from.
|
||||
@item warning, 24
|
||||
@item warning
|
||||
Show all warnings and errors. Any message related to possibly
|
||||
incorrect or unexpected events will be shown.
|
||||
@item info, 32
|
||||
@item info
|
||||
Show informative messages during processing. This is in addition to
|
||||
warnings and errors. This is the default value.
|
||||
@item verbose, 40
|
||||
@item verbose
|
||||
Same as @code{info}, except more verbose.
|
||||
@item debug, 48
|
||||
@item debug
|
||||
Show everything, including debugging information.
|
||||
@end table
|
||||
|
||||
@@ -204,29 +178,21 @@ directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @env{FFREPORT} to any value has the
|
||||
Setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
options will affect the report; option values must be escaped if they
|
||||
options will affect the report; options values must be escaped if they
|
||||
contain special characters or the options delimiter ':' (see the
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual).
|
||||
|
||||
The following options are recognized:
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual). The
|
||||
following option is recognized:
|
||||
@table @option
|
||||
@item file
|
||||
set the file name to use for the report; @code{%p} is expanded to the name
|
||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||
to a plain @code{%}
|
||||
@item level
|
||||
set the log verbosity level using a numerical value (see @code{-loglevel}).
|
||||
set the log level
|
||||
@end table
|
||||
|
||||
For example, to output a report to a file named @file{ffreport.log}
|
||||
using a log level of @code{32} (alias for log level @code{info}):
|
||||
|
||||
@example
|
||||
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
|
||||
@end example
|
||||
|
||||
Errors in parsing the environment variable are not fatal, and will not
|
||||
appear in the report.
|
||||
|
||||
@@ -261,14 +227,10 @@ Possible flags for this option are:
|
||||
@item sse4.1
|
||||
@item sse4.2
|
||||
@item avx
|
||||
@item avx2
|
||||
@item xop
|
||||
@item fma3
|
||||
@item fma4
|
||||
@item 3dnow
|
||||
@item 3dnowext
|
||||
@item bmi1
|
||||
@item bmi2
|
||||
@item cmov
|
||||
@end table
|
||||
@item ARM
|
||||
@@ -279,13 +241,6 @@ Possible flags for this option are:
|
||||
@item vfp
|
||||
@item vfpv3
|
||||
@item neon
|
||||
@item setend
|
||||
@end table
|
||||
@item AArch64
|
||||
@table @samp
|
||||
@item armv8
|
||||
@item vfp
|
||||
@item neon
|
||||
@end table
|
||||
@item PowerPC
|
||||
@table @samp
|
||||
@@ -305,41 +260,8 @@ Possible flags for this option are:
|
||||
@end table
|
||||
|
||||
@item -opencl_bench
|
||||
This option is used to benchmark all available OpenCL devices and print the
|
||||
results. This option is only available when FFmpeg has been compiled with
|
||||
@code{--enable-opencl}.
|
||||
|
||||
When FFmpeg is configured with @code{--enable-opencl}, the options for the
|
||||
global OpenCL context are set via @option{-opencl_options}. See the
|
||||
"OpenCL Options" section in the ffmpeg-utils manual for the complete list of
|
||||
supported options. Amongst others, these options include the ability to select
|
||||
a specific platform and device to run the OpenCL code on. By default, FFmpeg
|
||||
will run on the first device of the first platform. While the options for the
|
||||
global OpenCL context provide flexibility to the user in selecting the OpenCL
|
||||
device of their choice, most users would probably want to select the fastest
|
||||
OpenCL device for their system.
|
||||
|
||||
This option assists the selection of the most efficient configuration by
|
||||
identifying the appropriate device for the user's system. The built-in
|
||||
benchmark is run on all the OpenCL devices and the performance is measured for
|
||||
each device. The devices in the results list are sorted based on their
|
||||
performance with the fastest device listed first. The user can subsequently
|
||||
invoke @command{ffmpeg} using the device deemed most appropriate via
|
||||
@option{-opencl_options} to obtain the best performance for the OpenCL
|
||||
accelerated code.
|
||||
|
||||
Typical usage to use the fastest OpenCL device involve the following steps.
|
||||
|
||||
Run the command:
|
||||
@example
|
||||
ffmpeg -opencl_bench
|
||||
@end example
|
||||
Note down the platform ID (@var{pidx}) and device ID (@var{didx}) of the first
|
||||
i.e. fastest device in the list.
|
||||
Select the platform and device using the command:
|
||||
@example
|
||||
ffmpeg -opencl_options platform_idx=@var{pidx}:device_idx=@var{didx} ...
|
||||
@end example
|
||||
Benchmark all available OpenCL devices and show the results. This option
|
||||
is only available when FFmpeg has been compiled with @code{--enable-opencl}.
|
||||
|
||||
@item -opencl_options options (@emph{global})
|
||||
Set OpenCL environment options. This option is only available when
|
||||
|
1386
doc/filters.texi
1386
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ Reduce buffering.
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will enable detecting more
|
||||
stream information. A higher value will allow to detect more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@@ -55,10 +55,6 @@ Do not merge side data.
|
||||
Enable RTP MP4A-LATM payload.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by optional buffering
|
||||
@item bitexact
|
||||
Only write platform-, build- and time-independent data.
|
||||
This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@end table
|
||||
|
||||
@item seek2any @var{integer} (@emph{input})
|
||||
@@ -67,7 +63,7 @@ Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will enable detecting more accurate information, but will
|
||||
higher value will allow to detect more accurate information, but will
|
||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
@@ -172,18 +168,6 @@ The offset is added by the muxer to the output timestamps.
|
||||
Specifying a positive offset means that the corresponding streams are
|
||||
delayed bt the time duration specified in @var{offset}. Default value
|
||||
is @code{0} (meaning that no offset is applied).
|
||||
|
||||
@item format_whitelist @var{list} (@emph{input})
|
||||
"," separated List of allowed demuxers. By default all are allowed.
|
||||
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example to separate the fields with newlines and indention:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle General Documentation
|
||||
@titlepage
|
||||
@@ -109,14 +108,6 @@ Go to @url{http://www.wavpack.com/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
||||
enable it.
|
||||
|
||||
@section OpenH264
|
||||
|
||||
FFmpeg can make use of the OpenH264 library for H.264 encoding.
|
||||
|
||||
Go to @url{http://www.openh264.org/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libopenh264} to configure to
|
||||
enable it.
|
||||
|
||||
@section x264
|
||||
|
||||
FFmpeg can make use of the x264 library for H.264 encoding.
|
||||
@@ -139,7 +130,7 @@ Go to @url{http://x265.org/developers.html} and follow the instructions
|
||||
for installing the library. Then pass @code{--enable-libx265} to configure
|
||||
to enable it.
|
||||
|
||||
@float NOTE
|
||||
@float note
|
||||
x265 is under the GNU Public License Version 2 or later
|
||||
(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for
|
||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
@@ -152,7 +143,7 @@ by Google as part of the WebRTC project. libilbc is a packaging friendly
|
||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||
iLBC encoding and decoding.
|
||||
|
||||
Go to @url{https://github.com/TimothyGu/libilbc} and follow the instructions for
|
||||
Go to @url{https://github.com/dekkers/libilbc} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libilbc} to configure to
|
||||
enable it.
|
||||
|
||||
@@ -252,8 +243,6 @@ library:
|
||||
@tab Used in the game Cyberia from Interplay.
|
||||
@item Delphine Software International CIN @tab @tab X
|
||||
@tab Multimedia format used by Delphine Software games.
|
||||
@item Digital Speech Standard (DSS) @tab @tab X
|
||||
@item Canopus HQX @tab @tab X
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video format used by CD+G karaoke disks
|
||||
@item Phantom Cine @tab @tab X
|
||||
@@ -465,7 +454,6 @@ library:
|
||||
@item Sony Wave64 (W64) @tab X @tab X
|
||||
@item SoX native format @tab X @tab X
|
||||
@item SUN AU format @tab X @tab X
|
||||
@item SUP raw PGS subtitles @tab @tab X
|
||||
@item Text files @tab @tab X
|
||||
@item THP @tab @tab X
|
||||
@tab Used on the Nintendo GameCube.
|
||||
@@ -664,7 +652,7 @@ following image formats are supported:
|
||||
@item H.263 / H.263-1996 @tab X @tab X
|
||||
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
|
||||
@item H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 @tab E @tab X
|
||||
@tab encoding supported through external library libx264 and OpenH264
|
||||
@tab encoding supported through external library libx264
|
||||
@item HEVC @tab X @tab X
|
||||
@tab encoding supported through the external library libx265
|
||||
@item HNM version 4 @tab @tab X
|
||||
@@ -900,7 +888,6 @@ following image formats are supported:
|
||||
@tab decoding supported through external library libcelt
|
||||
@item Delphine Software International CIN audio @tab @tab X
|
||||
@tab Codec used in Delphine Software International games.
|
||||
@item Digital Speech Standard - Standard Play mode (DSS SP) @tab @tab X
|
||||
@item Discworld II BMV Audio @tab @tab X
|
||||
@item COOK @tab @tab X
|
||||
@tab All versions except 5.1 are supported.
|
||||
@@ -1043,7 +1030,6 @@ performance on systems without hardware floating point support).
|
||||
@item PJS (Phoenix) @tab @tab X @tab @tab X
|
||||
@item RealText @tab @tab X @tab @tab X
|
||||
@item SAMI @tab @tab X @tab @tab X
|
||||
@item Spruce format (STL) @tab @tab X @tab @tab X
|
||||
@item SSA/ASS @tab X @tab X @tab X @tab X
|
||||
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
||||
@item SubViewer v1 @tab @tab X @tab @tab X
|
||||
@@ -1069,7 +1055,6 @@ performance on systems without hardware floating point support).
|
||||
@item HLS @tab X
|
||||
@item HTTP @tab X
|
||||
@item HTTPS @tab X
|
||||
@item Icecast @tab X
|
||||
@item MMSH @tab X
|
||||
@item MMST @tab X
|
||||
@item pipe @tab X
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Using git to develop FFmpeg
|
||||
|
||||
|
322
doc/indevs.texi
322
doc/indevs.texi
@@ -1,7 +1,7 @@
|
||||
@chapter Input Devices
|
||||
@c man begin INPUT DEVICES
|
||||
|
||||
Input devices are configured elements in FFmpeg which enable accessing
|
||||
Input devices are configured elements in FFmpeg which allow to access
|
||||
the data coming from a multimedia device attached to your system.
|
||||
|
||||
When you configure your FFmpeg build, all the supported input devices
|
||||
@@ -58,173 +58,38 @@ AVFoundation input device.
|
||||
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
|
||||
The older QTKit framework has been marked deprecated since OSX version 10.7.
|
||||
|
||||
The input filename has to be given in the following syntax:
|
||||
@example
|
||||
-i "[[VIDEO]:[AUDIO]]"
|
||||
@end example
|
||||
The first entry selects the video input while the latter selects the audio input.
|
||||
The stream has to be specified by the device name or the device index as shown by the device list.
|
||||
Alternatively, the video and/or audio input device can be chosen by index using the
|
||||
@option{
|
||||
-video_device_index <INDEX>
|
||||
}
|
||||
and/or
|
||||
@option{
|
||||
-audio_device_index <INDEX>
|
||||
}
|
||||
, overriding any
|
||||
device name or index given in the input filename.
|
||||
|
||||
All available devices can be enumerated by using @option{-list_devices true}, listing
|
||||
all device names and corresponding indices.
|
||||
|
||||
There are two device name aliases:
|
||||
@table @code
|
||||
|
||||
@item default
|
||||
Select the AVFoundation default device of the corresponding type.
|
||||
|
||||
@item none
|
||||
Do not record the corresponding media type.
|
||||
This is equivalent to specifying an empty device name or index.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
|
||||
AVFoundation supports the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item -list_devices <TRUE|FALSE>
|
||||
If set to true, a list of all available input devices is given showing all
|
||||
device names and indices.
|
||||
|
||||
@item -video_device_index <INDEX>
|
||||
Specify the video device by its index. Overrides anything given in the input filename.
|
||||
|
||||
@item -audio_device_index <INDEX>
|
||||
Specify the audio device by its index. Overrides anything given in the input filename.
|
||||
|
||||
@item -pixel_format <FORMAT>
|
||||
Request the video device to use a specific pixel format.
|
||||
If the specified format is not supported, a list of available formats is given
|
||||
und the first one in this list is used instead. Available pixel formats are:
|
||||
@code{monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
|
||||
The filename passed as input is parsed to contain either a device name or index.
|
||||
The device index can also be given by using -video_device_index.
|
||||
A given device index will override any given device name.
|
||||
If the desired device consists of numbers only, use -video_device_index to identify it.
|
||||
The default device will be chosen if an empty string or the device name "default" is given.
|
||||
The available devices can be enumerated by using -list_devices.
|
||||
The pixel format can be set using -pixel_format.
|
||||
Available formats:
|
||||
monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
|
||||
bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
|
||||
yuv420p, nv12, yuyv422, gray}
|
||||
yuv420p, nv12, yuyv422, gray
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Print the list of AVFoundation supported devices and exit:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -list_devices true -i ""
|
||||
ffmpeg -f avfoundation -i "0" out.mpg
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record video from video device 0 and audio from audio device 0 into out.avi:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -i "0:0" out.avi
|
||||
ffmpeg -f avfoundation -video_device_index 0 -i "" out.mpg
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record video from video device 2 and audio from audio device 1 into out.avi:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
|
||||
ffmpeg -f avfoundation -pixel_format bgr0 -i "default" out.mpg
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
||||
ffmpeg -f avfoundation -list_devices true -i ""
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section bktr
|
||||
|
||||
BSD video input device.
|
||||
|
||||
@section decklink
|
||||
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
DeckLink devices.
|
||||
|
||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format is
|
||||
uyvy422 or v210, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels can be 2, 8 or 16.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item bm_v210
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
List supported formats:
|
||||
@example
|
||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 (format 11):
|
||||
@example
|
||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -bm_v210 1 -f decklink -i 'UltraStudio Mini Recorder@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 720p50 with 32bit audio:
|
||||
@example
|
||||
ffmpeg -bm_audiodepth 32 -f decklink -i 'UltraStudio Mini Recorder@@14' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 576i50 with 8 audio channels:
|
||||
@example
|
||||
ffmpeg -bm_channels 8 -f decklink -i 'UltraStudio Mini Recorder@@3' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dshow
|
||||
|
||||
Windows DirectShow input device.
|
||||
@@ -242,7 +107,7 @@ The input name should be in the format:
|
||||
@end example
|
||||
|
||||
where @var{TYPE} can be either @var{audio} or @var{video},
|
||||
and @var{NAME} is the device's name or alternative name..
|
||||
and @var{NAME} is the device's name.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@@ -295,61 +160,6 @@ Setting this value too low can degrade performance.
|
||||
See also
|
||||
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
|
||||
|
||||
@item video_pin_name
|
||||
Select video capture pin to use by name or alternative name.
|
||||
|
||||
@item audio_pin_name
|
||||
Select audio capture pin to use by name or alternative name.
|
||||
|
||||
@item crossbar_video_input_pin_number
|
||||
Select video input pin number for crossbar device. This will be
|
||||
routed to the crossbar device's Video Decoder output pin.
|
||||
Note that changing this value can affect future invocations
|
||||
(sets a new default) until system reboot occurs.
|
||||
|
||||
@item crossbar_audio_input_pin_number
|
||||
Select audio input pin number for crossbar device. This will be
|
||||
routed to the crossbar device's Audio Decoder output pin.
|
||||
Note that changing this value can affect future invocations
|
||||
(sets a new default) until system reboot occurs.
|
||||
|
||||
@item show_video_device_dialog
|
||||
If set to @option{true}, before capture starts, popup a display dialog
|
||||
to the end user, allowing them to change video filter properties
|
||||
and configurations manually.
|
||||
Note that for crossbar devices, adjusting values in this dialog
|
||||
may be needed at times to toggle between PAL (25 fps) and NTSC (29.97)
|
||||
input frame rates, sizes, interlacing, etc. Changing these values can
|
||||
enable different scan rates/frame rates and avoiding green bars at
|
||||
the bottom, flickering scan lines, etc.
|
||||
Note that with some devices, changing these properties can also affect future
|
||||
invocations (sets new defaults) until system reboot occurs.
|
||||
|
||||
@item show_audio_device_dialog
|
||||
If set to @option{true}, before capture starts, popup a display dialog
|
||||
to the end user, allowing them to change audio filter properties
|
||||
and configurations manually.
|
||||
|
||||
@item show_video_crossbar_connection_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify crossbar pin routings, when it opens a video device.
|
||||
|
||||
@item show_audio_crossbar_connection_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify crossbar pin routings, when it opens an audio device.
|
||||
|
||||
@item show_analog_tv_tuner_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify TV channels and frequencies.
|
||||
|
||||
@item show_analog_tv_tuner_audio_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify TV audio (like mono vs. stereo, Language A,B or C).
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -386,19 +196,6 @@ Print the list of supported options in selected device and exit:
|
||||
$ ffmpeg -list_options true -f dshow -i video="Camera"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Specify pin names to capture by name or alternative name, specify alternative device name:
|
||||
@example
|
||||
$ ffmpeg -f dshow -audio_pin_name "Audio Out" -video_pin_name 2 -i video=video="@@device_pnp_\\?\pci#ven_1a0a&dev_6200&subsys_62021461&rev_01#4&e2c7dd6&0&00e1#@{65e8773d-8f56-11d0-a3b9-00a0c9223196@}\@{ca465100-deb0-4d59-818f-8c477184adf6@}":audio="Microphone"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Configure a crossbar device, specifying crossbar pins, allow user to adjust video capture properties at startup:
|
||||
@example
|
||||
$ ffmpeg -f dshow -show_video_device_dialog true -crossbar_video_input_pin_number 0
|
||||
-crossbar_audio_input_pin_number 3 -i video="AVerMedia BDA Analog Capture":audio="AVerMedia BDA Analog Capture"
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dv1394
|
||||
@@ -533,7 +330,7 @@ not work and result in undefined behavior.
|
||||
The values @option{auto}, @option{dv} and @option{hdv} are supported.
|
||||
|
||||
@item dvbuffer
|
||||
Set maximum size of buffer for incoming data, in frames. For DV, this
|
||||
Set maxiumum size of buffer for incoming data, in frames. For DV, this
|
||||
is an exact value. For HDV, it is not frame exact, since HDV does
|
||||
not have a fixed frame size.
|
||||
|
||||
@@ -638,14 +435,6 @@ generated by the device.
|
||||
The first unlabelled output is automatically assigned to the "out0"
|
||||
label, but all the others need to be specified explicitly.
|
||||
|
||||
The suffix "+subcc" can be appended to the output label to create an extra
|
||||
stream with the closed captions packets attached to that output
|
||||
(experimental; only for EIA-608 / CEA-708 for now).
|
||||
The subcc streams are created after all the normal streams, in the order of
|
||||
the corresponding stream.
|
||||
For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
|
||||
stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
|
||||
|
||||
If not specified defaults to the filename specified for the input
|
||||
device.
|
||||
|
||||
@@ -692,63 +481,27 @@ Read an audio stream and a video stream and play it back with
|
||||
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Dump decoded frames to images and closed captions to a file (experimental):
|
||||
@example
|
||||
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libcdio
|
||||
|
||||
Audio-CD input device based on libcdio.
|
||||
Audio-CD input device based on cdio.
|
||||
|
||||
To enable this input device during configuration you need libcdio
|
||||
installed on your system. It requires the configure option
|
||||
@code{--enable-libcdio}.
|
||||
installed on your system.
|
||||
|
||||
This device allows playing and grabbing from an Audio-CD.
|
||||
|
||||
For example to copy with @command{ffmpeg} the entire Audio-CD in @file{/dev/sr0},
|
||||
For example to copy with @command{ffmpeg} the entire Audio-CD in /dev/sr0,
|
||||
you may run the command:
|
||||
@example
|
||||
ffmpeg -f libcdio -i /dev/sr0 cd.wav
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
@item speed
|
||||
Set drive reading speed. Default value is 0.
|
||||
|
||||
The speed is specified CD-ROM speed units. The speed is set through
|
||||
the libcdio @code{cdio_cddap_speed_set} function. On many CD-ROM
|
||||
drives, specifying a value too large will result in using the fastest
|
||||
speed.
|
||||
|
||||
@item paranoia_mode
|
||||
Set paranoia recovery mode flags. It accepts one of the following values:
|
||||
|
||||
@table @samp
|
||||
@item disable
|
||||
@item verify
|
||||
@item overlap
|
||||
@item neverskip
|
||||
@item full
|
||||
@end table
|
||||
|
||||
Default value is @samp{disable}.
|
||||
|
||||
For more information about the available recovery modes, consult the
|
||||
paranoia project documentation.
|
||||
@end table
|
||||
|
||||
@section libdc1394
|
||||
|
||||
IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
|
||||
Requires the configure option @code{--enable-libdc1394}.
|
||||
|
||||
@section openal
|
||||
|
||||
The OpenAL input device provides audio capture on all systems with a
|
||||
@@ -1077,13 +830,6 @@ other filename will be interpreted as device number 0.
|
||||
|
||||
X11 video input device.
|
||||
|
||||
To enable this input device during configuration you need libxcb
|
||||
installed on your system. It will be automatically detected during
|
||||
configuration.
|
||||
|
||||
Alternatively, the configure option @option{--enable-x11grab} exists
|
||||
for legacy Xlib users.
|
||||
|
||||
This device allows one to capture a region of an X11 display.
|
||||
|
||||
The filename passed as input has the syntax:
|
||||
@@ -1100,12 +846,10 @@ omitted, and defaults to "localhost". The environment variable
|
||||
area with respect to the top-left border of the X11 screen. They
|
||||
default to 0.
|
||||
|
||||
Check the X11 documentation (e.g. @command{man X}) for more detailed
|
||||
information.
|
||||
Check the X11 documentation (e.g. man X) for more detailed information.
|
||||
|
||||
Use the @command{xdpyinfo} program for getting basic information about
|
||||
the properties of your X11 display (e.g. grep for "name" or
|
||||
"dimensions").
|
||||
Use the @command{dpyinfo} program for getting basic information about the
|
||||
properties of your X11 display (e.g. grep for "name" or "dimensions").
|
||||
|
||||
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
||||
@example
|
||||
@@ -1154,10 +898,6 @@ If @var{show_region} is specified with @code{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it is easy to
|
||||
know what is being grabbed if only a portion of the screen is grabbed.
|
||||
|
||||
@item region_border
|
||||
Set the region border thickness if @option{-show_region 1} is used.
|
||||
Range is 1 to 128 and default is 3 (XCB-based x11grab only).
|
||||
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||
@@ -1170,22 +910,6 @@ ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_siz
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
|
||||
@item use_shm
|
||||
Use the MIT-SHM extension for shared memory. Default value is @code{1}.
|
||||
It may be necessary to disable it for remote displays (legacy x11grab
|
||||
only).
|
||||
@end table
|
||||
|
||||
@subsection @var{grab_x} @var{grab_y} AVOption
|
||||
|
||||
The syntax is:
|
||||
@example
|
||||
-grab_x @var{x_offset} -grab_y @var{y_offset}
|
||||
@end example
|
||||
|
||||
Set the grabbing region coordinates. They are expressed as offset from the top left
|
||||
corner of the X11 window. The default value is 0.
|
||||
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -22,7 +22,7 @@ a mail for every change to every issue.
|
||||
(the above does all work already after light testing)
|
||||
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://lists.ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://trac.ffmpeg.org
|
||||
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavcodec Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavdevice Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavfilter Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavformat Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavutil Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libswresample Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libswscale Documentation
|
||||
@titlepage
|
||||
|
170
doc/muxers.texi
170
doc/muxers.texi
@@ -194,19 +194,15 @@ can not be smaller than one centi second.
|
||||
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
|
||||
the HTTP Live Streaming (HLS) specification.
|
||||
|
||||
It creates a playlist file, and one or more segment files. The output filename
|
||||
specifies the playlist filename.
|
||||
|
||||
By default, the muxer creates a file for each segment produced. These files
|
||||
have the same name as the playlist, followed by a sequential number and a
|
||||
.ts extension.
|
||||
It creates a playlist file and numbered segment files. The output
|
||||
filename specifies the playlist filename; the segment filenames
|
||||
receive the same basename as the playlist, a sequential number and
|
||||
a .ts extension.
|
||||
|
||||
For example, to convert an input file with @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i in.nut out.m3u8
|
||||
@end example
|
||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@file{out0.ts}, @file{out1.ts}, @file{out2.ts}, etc.
|
||||
|
||||
See also the @ref{segment} muxer, which provides a more generic and
|
||||
flexible implementation of a segmenter, and can be used to perform HLS
|
||||
@@ -224,11 +220,6 @@ Set the segment length in seconds. Default value is 2.
|
||||
Set the maximum number of playlist entries. If set to 0 the list file
|
||||
will contain all the segments. Default value is 5.
|
||||
|
||||
@item hls_ts_options @var{options_list}
|
||||
Set output format options using a :-separated list of key=value
|
||||
parameters. Values containing @code{:} special characters must be
|
||||
escaped.
|
||||
|
||||
@item hls_wrap @var{wrap}
|
||||
Set the number after which the segment filename number (the number
|
||||
specified in each segment file) wraps. If set to 0 the number will be
|
||||
@@ -242,9 +233,6 @@ to @var{wrap}.
|
||||
Start the playlist sequence number from @var{number}. Default value is
|
||||
0.
|
||||
|
||||
@item hls_allow_cache @var{allowcache}
|
||||
Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
|
||||
|
||||
@item hls_base_url @var{baseurl}
|
||||
Append @var{baseurl} to every entry in the playlist.
|
||||
Useful to generate playlists with absolute paths.
|
||||
@@ -253,30 +241,6 @@ Note that the playlist sequence number must be unique for each segment
|
||||
and it is not to be confused with the segment filename sequence number
|
||||
which can be cyclic, for example if the @option{wrap} option is
|
||||
specified.
|
||||
|
||||
@item hls_segment_filename @var{filename}
|
||||
Set the segment filename. Unless hls_flags single_file is set @var{filename}
|
||||
is used as a string format with the segment number:
|
||||
@example
|
||||
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
|
||||
@end example
|
||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@file{file000.ts}, @file{file001.ts}, @file{file002.ts}, etc.
|
||||
|
||||
@item hls_flags single_file
|
||||
If this flag is set, the muxer will store all segments in a single MPEG-TS
|
||||
file, and will use byte ranges in the playlist. HLS playlists generated with
|
||||
this way will have the version number 4.
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -i in.nut -hls_flags single_file out.m3u8
|
||||
@end example
|
||||
Will produce the playlist, @file{out.m3u8}, and a single segment file,
|
||||
@file{out.ts}.
|
||||
|
||||
@item hls_flags delete_segments
|
||||
Segment files removed from the playlist are deleted after a period of time
|
||||
equal to the duration of the segment plus the duration of the playlist.
|
||||
@end table
|
||||
|
||||
@anchor{ico}
|
||||
@@ -381,7 +345,8 @@ ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
|
||||
|
||||
@table @option
|
||||
@item start_number
|
||||
Start the sequence from the specified number. Default value is 0.
|
||||
Start the sequence from the specified number. Default value is 1. Must
|
||||
be a non-negative number.
|
||||
|
||||
@item update
|
||||
If set to 1, the filename will always be interpreted as just a
|
||||
@@ -571,6 +536,7 @@ a short portion of the file. With this option set, there is no initial
|
||||
mdat atom, and the moov atom only describes the tracks but has
|
||||
a zero duration.
|
||||
|
||||
Files written with this option set do not work in QuickTime.
|
||||
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
||||
@item -movflags separate_moof
|
||||
Write a separate moof (movie fragment) atom for each track. Normally,
|
||||
@@ -585,22 +551,6 @@ This operation can take a while, and will not work in various situations such
|
||||
as fragmented output, thus it is not enabled by default.
|
||||
@item -movflags rtphint
|
||||
Add RTP hinting tracks to the output file.
|
||||
@item -movflags disable_chpl
|
||||
Disable Nero chapter markers (chpl atom). Normally, both Nero chapters
|
||||
and a QuickTime chapter track are written to the file. With this option
|
||||
set, only the QuickTime chapter track will be written. Nero chapters can
|
||||
cause failures when the file is reprocessed with certain tagging programs, like
|
||||
mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
|
||||
@item -movflags omit_tfhd_offset
|
||||
Do not write any absolute base_data_offset in tfhd atoms. This avoids
|
||||
tying fragments to absolute byte positions in the file/streams.
|
||||
@item -movflags default_base_moof
|
||||
Similarly to the omit_tfhd_offset, this flag avoids writing the
|
||||
absolute base_data_offset field in tfhd atoms, but does so by using
|
||||
the new default-base-is-moof flag instead. This flag is new from
|
||||
14496-12:2012. This may make the fragments easier to parse in certain
|
||||
circumstances (avoiding basing track fragment location calculations
|
||||
on the implicit end of the previous track fragment).
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@@ -613,38 +563,29 @@ ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe
|
||||
|
||||
@section mp3
|
||||
|
||||
The MP3 muxer writes a raw MP3 stream with the following optional features:
|
||||
@itemize @bullet
|
||||
@item
|
||||
An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
|
||||
2.4 are supported, the @code{id3v2_version} private option controls which one is
|
||||
used (3 or 4). Setting @code{id3v2_version} to 0 disables the ID3v2 header
|
||||
completely.
|
||||
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
|
||||
optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the
|
||||
@code{id3v2_version} option controls which one is used. Setting
|
||||
@code{id3v2_version} to 0 will disable the ID3v2 header completely. The legacy
|
||||
ID3v1 tag is not written by default, but may be enabled with the
|
||||
@code{write_id3v1} option.
|
||||
|
||||
The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
|
||||
The pictures are supplied to the muxer in form of a video stream with a single
|
||||
packet. There can be any number of those streams, each will correspond to a
|
||||
single APIC frame. The stream metadata tags @var{title} and @var{comment} map
|
||||
to APIC @var{description} and @var{picture type} respectively. See
|
||||
The muxer may also write a Xing frame at the beginning, which contains the
|
||||
number of frames in the file. It is useful for computing duration of VBR files.
|
||||
The Xing frame is written if the output stream is seekable and if the
|
||||
@code{write_xing} option is set to 1 (the default).
|
||||
|
||||
The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures
|
||||
are supplied to the muxer in form of a video stream with a single packet. There
|
||||
can be any number of those streams, each will correspond to a single APIC frame.
|
||||
The stream metadata tags @var{title} and @var{comment} map to APIC
|
||||
@var{description} and @var{picture type} respectively. See
|
||||
@url{http://id3.org/id3v2.4.0-frames} for allowed picture types.
|
||||
|
||||
Note that the APIC frames must be written at the beginning, so the muxer will
|
||||
buffer the audio frames until it gets all the pictures. It is therefore advised
|
||||
to provide the pictures as soon as possible to avoid excessive buffering.
|
||||
|
||||
@item
|
||||
A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
|
||||
default, but will be written only if the output is seekable. The
|
||||
@code{write_xing} private option can be used to disable it. The frame contains
|
||||
various information that may be useful to the decoder, like the audio duration
|
||||
or encoder delay.
|
||||
|
||||
@item
|
||||
A legacy ID3v1 tag at the end of the file (disabled by default). It may be
|
||||
enabled with the @code{write_id3v1} private option, but as its capabilities are
|
||||
very limited, its usage is not recommended.
|
||||
@end itemize
|
||||
|
||||
Examples:
|
||||
|
||||
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
||||
@@ -689,9 +630,6 @@ Set the transport_stream_id (default 0x0001). This identifies a
|
||||
transponder in DVB.
|
||||
@item -mpegts_service_id @var{number}
|
||||
Set the service_id (default 0x0001) also known as program in DVB.
|
||||
@item -mpegts_service_type @var{number}
|
||||
Set the program service_type (default @var{digital_tv}), see below
|
||||
a list of pre defined values.
|
||||
@item -mpegts_pmt_start_pid @var{number}
|
||||
Set the first PID for PMT (default 0x1000, max 0x1f00).
|
||||
@item -mpegts_start_pid @var{number}
|
||||
@@ -726,27 +664,6 @@ ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
|
||||
@end example
|
||||
@end table
|
||||
|
||||
Option mpegts_service_type accepts the following values:
|
||||
|
||||
@table @option
|
||||
@item hex_value
|
||||
Any hexdecimal value between 0x01 to 0xff as defined in ETSI 300 468.
|
||||
@item digital_tv
|
||||
Digital TV service.
|
||||
@item digital_radio
|
||||
Digital Radio service.
|
||||
@item teletext
|
||||
Teletext service.
|
||||
@item advanced_codec_digital_radio
|
||||
Advanced Codec Digital Radio service.
|
||||
@item mpeg2_digital_hdtv
|
||||
MPEG2 Digital HDTV service.
|
||||
@item advanced_codec_digital_sdtv
|
||||
Advanced Codec Digital SDTV service.
|
||||
@item advanced_codec_digital_hdtv
|
||||
Advanced Codec Digital HDTV service.
|
||||
@end table
|
||||
|
||||
Option mpegts_flags may take a set of such flags:
|
||||
|
||||
@table @option
|
||||
@@ -804,7 +721,7 @@ Change the syncpoint usage in nut:
|
||||
sensitive and seeking is not possible. Also in general the overhead from
|
||||
syncpoints is negligible. Note, -@code{write_index} 0 can be used to disable
|
||||
all growing data tables, allowing to mux endless streams with limited memory
|
||||
and without these disadvantages.
|
||||
and wihout these disadvantages.
|
||||
@item @var{timestamped} extend the syncpoint with a wallclock field.
|
||||
@end table
|
||||
The @var{none} and @var{timestamped} flags are experimental.
|
||||
@@ -829,11 +746,6 @@ is 1 second. A value of 0 will fill all segments, making pages as large as
|
||||
possible. A value of 1 will effectively use 1 packet-per-page in most
|
||||
situations, giving a small seek granularity at the cost of additional container
|
||||
overhead.
|
||||
@item -serial_offset @var{value}
|
||||
Serial value from which to set the streams serial number.
|
||||
Setting it to different and sufficiently large values ensures that the produced
|
||||
ogg files can be safely chained.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{segment}
|
||||
@@ -842,9 +754,8 @@ ogg files can be safely chained.
|
||||
Basic stream segmenter.
|
||||
|
||||
This muxer outputs streams to a number of separate files of nearly
|
||||
fixed duration. Output filename pattern can be set in a fashion
|
||||
similar to @ref{image2}, or by using a @code{strftime} template if
|
||||
the @option{strftime} option is enabled.
|
||||
fixed duration. Output filename pattern can be set in a fashion similar to
|
||||
@ref{image2}.
|
||||
|
||||
@code{stream_segment} is a variant of the muxer used to write to
|
||||
streaming output formats, i.e. which do not require global headers,
|
||||
@@ -887,11 +798,6 @@ reference stream. The default value is @code{auto}.
|
||||
Override the inner container format, by default it is guessed by the filename
|
||||
extension.
|
||||
|
||||
@item segment_format_options @var{options_list}
|
||||
Set output format options using a :-separated list of key=value
|
||||
parameters. Values containing the @code{:} special character must be
|
||||
escaped.
|
||||
|
||||
@item segment_list @var{name}
|
||||
Generate also a listfile named @var{name}. If not specified no
|
||||
listfile is generated.
|
||||
@@ -916,7 +822,7 @@ Select the listing format.
|
||||
@end table
|
||||
|
||||
@item segment_list_size @var{size}
|
||||
Update the list file so that it contains at most @var{size}
|
||||
Update the list file so that it contains at most the last @var{size}
|
||||
segments. If 0 the list file will contain all the segments. Default
|
||||
value is 0.
|
||||
|
||||
@@ -1024,12 +930,6 @@ Wrap around segment index once it reaches @var{limit}.
|
||||
@item segment_start_number @var{number}
|
||||
Set the sequence number of the first segment. Defaults to @code{0}.
|
||||
|
||||
@item strftime @var{1|0}
|
||||
Use the @code{strftime} function to define the name of the new
|
||||
segments to write. If this is selected, the output segment name must
|
||||
contain a @code{strftime} function template. Default value is
|
||||
@code{0}.
|
||||
|
||||
@item reset_timestamps @var{1|0}
|
||||
Reset timestamps at the begin of each segment, so that each segment
|
||||
will start with near-zero timestamps. It is meant to ease the playback
|
||||
@@ -1045,7 +945,7 @@ argument must be a time duration specification, and defaults to 0.
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Remux the content of file @file{in.mkv} to a list of segments
|
||||
To remux the content of file @file{in.mkv} to a list of segments
|
||||
@file{out-000.nut}, @file{out-001.nut}, etc., and write the list of
|
||||
generated segments to @file{out.list}:
|
||||
@example
|
||||
@@ -1053,20 +953,14 @@ ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.list out%03d.nu
|
||||
@end example
|
||||
|
||||
@item
|
||||
Segment input and set output format options for the output segments:
|
||||
@example
|
||||
ffmpeg -i in.mkv -f segment -segment_time 10 -segment_format_options movflags=+faststart out%03d.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Segment the input file according to the split points specified by the
|
||||
@var{segment_times} option:
|
||||
As the example above, but segment the input file according to the split
|
||||
points specified by the @var{segment_times} option:
|
||||
@example
|
||||
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut
|
||||
@end example
|
||||
|
||||
@item
|
||||
Use the @command{ffmpeg} @option{force_key_frames}
|
||||
As the example above, but use the @command{ffmpeg} @option{force_key_frames}
|
||||
option to force key frames in the input at the specified location, together
|
||||
with the segment option @option{segment_time_delta} to account for
|
||||
possible roundings operated when setting key frame times.
|
||||
@@ -1085,7 +979,7 @@ ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_fr
|
||||
@end example
|
||||
|
||||
@item
|
||||
Convert the @file{in.mkv} to TS segments using the @code{libx264}
|
||||
To convert the @file{in.mkv} to TS segments using the @code{libx264}
|
||||
and @code{libfaac} encoders:
|
||||
@example
|
||||
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle NUT
|
||||
|
||||
|
@@ -191,11 +191,6 @@ __asm__() block.
|
||||
Use external asm (nasm/yasm) or inline asm (__asm__()), do not use intrinsics.
|
||||
The latter requires a good optimizing compiler which gcc is not.
|
||||
|
||||
When debugging a x86 external asm compilation issue, if lost in the macro
|
||||
expansions, add DBG=1 to your make command-line: the input file will be
|
||||
preprocessed, stripped of the debug/empty lines, then compiled, showing the
|
||||
actual lines causing issues.
|
||||
|
||||
Inline asm vs. external asm
|
||||
---------------------------
|
||||
Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc)
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Platform Specific Information
|
||||
@titlepage
|
||||
@@ -36,9 +35,6 @@ to your project LDFLAGS:
|
||||
-Wl,-Bsymbolic
|
||||
@end example
|
||||
|
||||
If your target platform requires position independent binaries, you should
|
||||
pass the correct linking flag (e.g. @code{-pie}) to @code{--extra-ldexeflags}.
|
||||
|
||||
@section BSD
|
||||
|
||||
BSD make will not build FFmpeg, you need to install and use GNU Make
|
||||
@@ -97,9 +93,9 @@ the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}.
|
||||
|
||||
@section Native Windows compilation using MinGW or MinGW-w64
|
||||
|
||||
FFmpeg can be built to run natively on Windows using the MinGW-w64
|
||||
toolchain. Install the latest versions of MSYS2 and MinGW-w64 from
|
||||
@url{http://msys2.github.io/} and/or @url{http://mingw-w64.sourceforge.net/}.
|
||||
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
|
||||
toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
|
||||
@url{http://www.mingw.org/} or @url{http://mingw-w64.sourceforge.net/}.
|
||||
You can find detailed installation instructions in the download section and
|
||||
the FAQ.
|
||||
|
||||
@@ -107,7 +103,7 @@ Notes:
|
||||
|
||||
@itemize
|
||||
|
||||
@item Building natively using MSYS2 can be sped up by disabling implicit rules
|
||||
@item Building natively using MSYS can be sped up by disabling implicit rules
|
||||
in the Makefile by calling @code{make -r} instead of plain @code{make}. This
|
||||
speed up is close to non-existent for normal one-off builds and is only
|
||||
noticeable when running make for a second time (for example during
|
||||
@@ -134,12 +130,13 @@ You will need the following prerequisites:
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://msys2.github.io/, MSYS2}
|
||||
@item @uref{http://www.mingw.org/, MSYS}
|
||||
@item @uref{http://yasm.tortall.net/, YASM}
|
||||
(Also available via MSYS2's package manager.)
|
||||
@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if
|
||||
you want to run @uref{fate.html, FATE}.
|
||||
@end itemize
|
||||
|
||||
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
|
||||
To set up a proper environment in MSYS, you need to run @code{msys.bat} from
|
||||
the Visual Studio or Intel Compiler command prompt.
|
||||
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
|
||||
@@ -278,12 +275,12 @@ llrint() in its C library.
|
||||
Install your Cygwin with all the "Base" packages, plus the
|
||||
following "Devel" ones:
|
||||
@example
|
||||
binutils, gcc4-core, make, git, mingw-runtime, texinfo
|
||||
binutils, gcc4-core, make, git, mingw-runtime, texi2html
|
||||
@end example
|
||||
|
||||
In order to run FATE you will also need the following "Utils" packages:
|
||||
@example
|
||||
diffutils
|
||||
bc, diffutils
|
||||
@end example
|
||||
|
||||
If you want to build FFmpeg with additional libraries, download Cygwin
|
||||
|
@@ -26,10 +26,6 @@
|
||||
#include <string.h>
|
||||
#include <float.h>
|
||||
|
||||
// print_options is build for the host, os_support.h isn't needed and is setup
|
||||
// for the target. without this build breaks on mingw
|
||||
#define AVFORMAT_OS_SUPPORT_H
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/options_table.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
@@ -63,7 +63,7 @@ cache:@var{URL}
|
||||
|
||||
Physical concatenation protocol.
|
||||
|
||||
Read and seek from many resources in sequence as if they were
|
||||
Allow to read and seek from many resource in sequence as if they were
|
||||
a unique resource.
|
||||
|
||||
A URL accepted by this protocol has the syntax:
|
||||
@@ -117,7 +117,7 @@ ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP/////////////
|
||||
|
||||
File access protocol.
|
||||
|
||||
Read from or write to a file.
|
||||
Allow to read from or write to a file.
|
||||
|
||||
A file URL can have the form:
|
||||
@example
|
||||
@@ -155,7 +155,7 @@ time, which is valuable for files on slow medium.
|
||||
|
||||
FTP (File Transfer Protocol).
|
||||
|
||||
Read from or write to remote resources using FTP protocol.
|
||||
Allow to read from or write to remote resources using FTP protocol.
|
||||
|
||||
Following syntax is required.
|
||||
@example
|
||||
@@ -255,7 +255,7 @@ Export the MIME type.
|
||||
If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
|
||||
supports this, the metadata has to be retrieved by the application by reading
|
||||
the @option{icy_metadata_headers} and @option{icy_metadata_packet} options.
|
||||
The default is 1.
|
||||
The default is 0.
|
||||
|
||||
@item icy_metadata_headers
|
||||
If the server supports ICY metadata, this contains the ICY-specific HTTP reply
|
||||
@@ -293,50 +293,6 @@ The required syntax to play a stream specifying a cookie is:
|
||||
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
|
||||
@end example
|
||||
|
||||
@section Icecast
|
||||
|
||||
Icecast protocol (stream to Icecast servers)
|
||||
|
||||
This protocol accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item ice_genre
|
||||
Set the stream genre.
|
||||
|
||||
@item ice_name
|
||||
Set the stream name.
|
||||
|
||||
@item ice_description
|
||||
Set the stream description.
|
||||
|
||||
@item ice_url
|
||||
Set the stream website URL.
|
||||
|
||||
@item ice_public
|
||||
Set if the stream should be public.
|
||||
The default is 0 (not public).
|
||||
|
||||
@item user_agent
|
||||
Override the User-Agent header. If not specified a string of the form
|
||||
"Lavf/<version>" will be used.
|
||||
|
||||
@item password
|
||||
Set the Icecast mountpoint password.
|
||||
|
||||
@item content_type
|
||||
Set the stream content type. This must be set if it is different from
|
||||
audio/mpeg.
|
||||
|
||||
@item legacy_icecast
|
||||
This enables support for Icecast versions < 2.4.0, that do not support the
|
||||
HTTP PUT method but the SOURCE method.
|
||||
|
||||
@end table
|
||||
|
||||
@example
|
||||
icecast://[@var{username}[:@var{password}]@@]@var{server}:@var{port}/@var{mountpoint}
|
||||
@end example
|
||||
|
||||
@section mmst
|
||||
|
||||
MMS (Microsoft Media Server) protocol over TCP.
|
||||
@@ -374,7 +330,7 @@ be seekable, so they will fail with the MD5 output protocol.
|
||||
|
||||
UNIX pipe access protocol.
|
||||
|
||||
Read and write from UNIX pipes.
|
||||
Allow to read and write from UNIX pipes.
|
||||
|
||||
The accepted syntax is:
|
||||
@example
|
||||
@@ -583,7 +539,7 @@ firewalls.
|
||||
|
||||
@section libsmbclient
|
||||
|
||||
libsmbclient permits one to manipulate CIFS/SMB network resources.
|
||||
libsmbclient permits to manipulate CIFS/SMB network resources.
|
||||
|
||||
Following syntax is required.
|
||||
|
||||
@@ -614,7 +570,7 @@ For more information see: @url{http://www.samba.org/}.
|
||||
|
||||
Secure File Transfer Protocol via libssh
|
||||
|
||||
Read from or write to remote resources using SFTP protocol.
|
||||
Allow to read from or write to remote resources using SFTP protocol.
|
||||
|
||||
Following syntax is required.
|
||||
|
||||
@@ -750,7 +706,7 @@ port will be used for the local RTP and RTCP ports.
|
||||
|
||||
@item
|
||||
If @option{localrtcpport} (the local RTCP port) is not set it will be
|
||||
set to the local RTP port value plus 1.
|
||||
set to the the local RTP port value plus 1.
|
||||
@end enumerate
|
||||
|
||||
@section rtsp
|
||||
@@ -1081,8 +1037,8 @@ Set raise error timeout, expressed in microseconds.
|
||||
This option is only relevant in read mode: if no data arrived in more
|
||||
than this time interval, raise error.
|
||||
|
||||
@item listen_timeout=@var{milliseconds}
|
||||
Set listen timeout, expressed in milliseconds.
|
||||
@item listen_timeout=@var{microseconds}
|
||||
Set listen timeout, expressed in microseconds.
|
||||
@end table
|
||||
|
||||
The following example shows how to setup a listening TCP connection
|
||||
|
@@ -618,6 +618,7 @@ flip wavelet?
|
||||
try to use the wavelet transformed predicted image (motion compensated image) as context for coding the residual coefficients
|
||||
try the MV length as context for coding the residual coefficients
|
||||
use extradata for stuff which is in the keyframes now?
|
||||
the MV median predictor is patented IIRC
|
||||
implement per picture halfpel interpolation
|
||||
try different range coder state transition tables for different contexts
|
||||
|
||||
|
23
doc/style.min.css
vendored
23
doc/style.min.css
vendored
File diff suppressed because one or more lines are too long
59
doc/t2h.init
59
doc/t2h.init
@@ -1,35 +1,26 @@
|
||||
# Init file for texi2html.
|
||||
|
||||
# This is deprecated, and the makeinfo/texi2any version is doc/t2h.pm
|
||||
|
||||
# no horiz rules between sections
|
||||
$end_section = \&FFmpeg_end_section;
|
||||
sub FFmpeg_end_section($$)
|
||||
{
|
||||
}
|
||||
|
||||
my $TEMPLATE_HEADER1 = $ENV{"FFMPEG_HEADER1"} || <<EOT;
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||||
<title>FFmpeg documentation</title>
|
||||
<link rel="stylesheet" href="bootstrap.min.css" />
|
||||
<link rel="stylesheet" href="style.min.css" />
|
||||
$EXTRA_HEAD =
|
||||
'<link rel="icon" href="favicon.png" type="image/png" />
|
||||
';
|
||||
|
||||
$CSS_LINES = $ENV{"FFMPEG_CSS"} || <<EOT;
|
||||
<link rel="stylesheet" type="text/css" href="default.css" />
|
||||
EOT
|
||||
|
||||
my $TEMPLATE_HEADER2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
my $TEMPLATE_HEADER = $ENV{"FFMPEG_HEADER"} || <<EOT;
|
||||
<link rel="icon" href="favicon.png" type="image/png" />
|
||||
</head>
|
||||
<body>
|
||||
<div id="container">
|
||||
<div id="body">
|
||||
EOT
|
||||
|
||||
my $TEMPLATE_FOOTER = $ENV{"FFMPEG_FOOTER"} || <<EOT;
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
EOT
|
||||
$PRE_BODY_CLOSE = '</div></div>';
|
||||
|
||||
$SMALL_RULE = '';
|
||||
$BODYTEXT = '';
|
||||
@@ -91,25 +82,21 @@ sub FFmpeg_print_page_head($$)
|
||||
$longtitle = "FFmpeg documentation : " . $longtitle;
|
||||
|
||||
print $fh <<EOT;
|
||||
$TEMPLATE_HEADER1
|
||||
$description
|
||||
<meta name="keywords" content="$longtitle">
|
||||
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
$Texi2HTML::THISDOC{'copying'}<!-- Created on $Texi2HTML::THISDOC{today} by $Texi2HTML::THISDOC{program} -->
|
||||
<!--
|
||||
$Texi2HTML::THISDOC{program_authors}
|
||||
-->
|
||||
$encoding
|
||||
$TEMPLATE_HEADER2
|
||||
EOT
|
||||
}
|
||||
<head>
|
||||
<title>$longtitle</title>
|
||||
|
||||
$print_page_foot = \&FFmpeg_print_page_foot;
|
||||
sub FFmpeg_print_page_foot($$)
|
||||
{
|
||||
my $fh = shift;
|
||||
print $fh <<EOT;
|
||||
$TEMPLATE_FOOTER
|
||||
$description
|
||||
<meta name="keywords" content="$longtitle">
|
||||
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
|
||||
$encoding
|
||||
$CSS_LINES
|
||||
$TEMPLATE_HEADER
|
||||
EOT
|
||||
}
|
||||
|
||||
|
339
doc/t2h.pm
339
doc/t2h.pm
@@ -1,339 +0,0 @@
|
||||
# makeinfo HTML output init file
|
||||
#
|
||||
# Copyright (c) 2011, 2012 Free Software Foundation, Inc.
|
||||
# Copyright (c) 2014 Andreas Cadhalpun
|
||||
# Copyright (c) 2014 Tiancheng "Timothy" Gu
|
||||
#
|
||||
# This file is part of FFmpeg.
|
||||
#
|
||||
# FFmpeg is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# FFmpeg is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public
|
||||
# License along with FFmpeg; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
# no navigation elements
|
||||
set_from_init_file('HEADERS', 0);
|
||||
|
||||
sub ffmpeg_heading_command($$$$$)
|
||||
{
|
||||
my $self = shift;
|
||||
my $cmdname = shift;
|
||||
my $command = shift;
|
||||
my $args = shift;
|
||||
my $content = shift;
|
||||
|
||||
my $result = '';
|
||||
|
||||
# not clear that it may really happen
|
||||
if ($self->in_string) {
|
||||
$result .= $self->command_string($command) ."\n" if ($cmdname ne 'node');
|
||||
$result .= $content if (defined($content));
|
||||
return $result;
|
||||
}
|
||||
|
||||
my $element_id = $self->command_id($command);
|
||||
$result .= "<a name=\"$element_id\"></a>\n"
|
||||
if (defined($element_id) and $element_id ne '');
|
||||
|
||||
print STDERR "Process $command "
|
||||
.Texinfo::Structuring::_print_root_command_texi($command)."\n"
|
||||
if ($self->get_conf('DEBUG'));
|
||||
my $element;
|
||||
if ($Texinfo::Common::root_commands{$command->{'cmdname'}}
|
||||
and $command->{'parent'}
|
||||
and $command->{'parent'}->{'type'}
|
||||
and $command->{'parent'}->{'type'} eq 'element') {
|
||||
$element = $command->{'parent'};
|
||||
}
|
||||
if ($element) {
|
||||
$result .= &{$self->{'format_element_header'}}($self, $cmdname,
|
||||
$command, $element);
|
||||
}
|
||||
|
||||
my $heading_level;
|
||||
# node is used as heading if there is nothing else.
|
||||
if ($cmdname eq 'node') {
|
||||
if (!$element or (!$element->{'extra'}->{'section'}
|
||||
and $element->{'extra'}->{'node'}
|
||||
and $element->{'extra'}->{'node'} eq $command
|
||||
# bogus node may not have been normalized
|
||||
and defined($command->{'extra'}->{'normalized'}))) {
|
||||
if ($command->{'extra'}->{'normalized'} eq 'Top') {
|
||||
$heading_level = 0;
|
||||
} else {
|
||||
$heading_level = 3;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
$heading_level = $command->{'level'};
|
||||
}
|
||||
|
||||
my $heading = $self->command_text($command);
|
||||
# $heading not defined may happen if the command is a @node, for example
|
||||
# if there is an error in the node.
|
||||
if (defined($heading) and $heading ne '' and defined($heading_level)) {
|
||||
|
||||
if ($Texinfo::Common::root_commands{$cmdname}
|
||||
and $Texinfo::Common::sectioning_commands{$cmdname}) {
|
||||
my $content_href = $self->command_contents_href($command, 'contents',
|
||||
$self->{'current_filename'});
|
||||
if ($content_href) {
|
||||
my $this_href = $content_href =~ s/^\#toc-/\#/r;
|
||||
$heading .= '<span class="pull-right">'.
|
||||
'<a class="anchor hidden-xs" '.
|
||||
"href=\"$this_href\" aria-hidden=\"true\">".
|
||||
($ENV{"FA_ICONS"} ? '<i class="fa fa-link"></i>'
|
||||
: '#').
|
||||
'</a> '.
|
||||
'<a class="anchor hidden-xs"'.
|
||||
"href=\"$content_href\" aria-hidden=\"true\">".
|
||||
($ENV{"FA_ICONS"} ? '<i class="fa fa-navicon"></i>'
|
||||
: 'TOC').
|
||||
'</a>'.
|
||||
'</span>';
|
||||
}
|
||||
}
|
||||
|
||||
if ($self->in_preformatted()) {
|
||||
$result .= $heading."\n";
|
||||
} else {
|
||||
# if the level was changed, set the command name right
|
||||
if ($cmdname ne 'node'
|
||||
and $heading_level ne $Texinfo::Common::command_structuring_level{$cmdname}) {
|
||||
$cmdname
|
||||
= $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level];
|
||||
}
|
||||
$result .= &{$self->{'format_heading_text'}}(
|
||||
$self, $cmdname, $heading,
|
||||
$heading_level +
|
||||
$self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command);
|
||||
}
|
||||
}
|
||||
$result .= $content if (defined($content));
|
||||
return $result;
|
||||
}
|
||||
|
||||
foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
|
||||
texinfo_register_command_formatting($command, \&ffmpeg_heading_command);
|
||||
}
|
||||
|
||||
# print the TOC where @contents is used
|
||||
set_from_init_file('INLINE_CONTENTS', 1);
|
||||
|
||||
# make chapters <h2>
|
||||
set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||
|
||||
# Do not add <hr>
|
||||
set_from_init_file('DEFAULT_RULE', '');
|
||||
set_from_init_file('BIG_RULE', '');
|
||||
|
||||
# Customized file beginning
|
||||
sub ffmpeg_begin_file($$$)
|
||||
{
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $element = shift;
|
||||
|
||||
my $command;
|
||||
if ($element and $self->get_conf('SPLIT')) {
|
||||
$command = $self->element_command($element);
|
||||
}
|
||||
|
||||
my ($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_informations($command);
|
||||
|
||||
my $links = $self->_get_links ($filename, $element);
|
||||
|
||||
my $head1 = $ENV{"FFMPEG_HEADER1"} || <<EOT;
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by $program_and_version, $program_homepage -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
EOT
|
||||
my $head_title = <<EOT;
|
||||
$title
|
||||
EOT
|
||||
|
||||
my $head2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
EOT
|
||||
|
||||
my $head3 = $ENV{"FFMPEG_HEADER3"} || <<EOT;
|
||||
</h1>
|
||||
EOT
|
||||
|
||||
return $head1 . $head_title . $head2 . $head_title . $head3;
|
||||
}
|
||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||
|
||||
sub ffmpeg_program_string($)
|
||||
{
|
||||
my $self = shift;
|
||||
if (defined($self->get_conf('PROGRAM'))
|
||||
and $self->get_conf('PROGRAM') ne ''
|
||||
and defined($self->get_conf('PACKAGE_URL'))) {
|
||||
return $self->convert_tree(
|
||||
$self->gdt('This document was generated using @uref{{program_homepage}, @emph{{program}}}.',
|
||||
{ 'program_homepage' => $self->get_conf('PACKAGE_URL'),
|
||||
'program' => $self->get_conf('PROGRAM') }));
|
||||
} else {
|
||||
return $self->convert_tree(
|
||||
$self->gdt('This document was generated automatically.'));
|
||||
}
|
||||
}
|
||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
||||
|
||||
# Customized file ending
|
||||
sub ffmpeg_end_file($)
|
||||
{
|
||||
my $self = shift;
|
||||
my $program_string = &{$self->{'format_program_string'}}($self);
|
||||
my $program_text = <<EOT;
|
||||
<p style="font-size: small;">
|
||||
$program_string
|
||||
</p>
|
||||
EOT
|
||||
my $footer = $ENV{FFMPEG_FOOTER} || <<EOT;
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
EOT
|
||||
return $program_text . $footer;
|
||||
}
|
||||
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
|
||||
|
||||
# Dummy title command
|
||||
# Ignore title. Title is handled through ffmpeg_begin_file().
|
||||
set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
|
||||
sub ffmpeg_title($$$$)
|
||||
{
|
||||
return '';
|
||||
}
|
||||
|
||||
texinfo_register_command_formatting('titlefont',
|
||||
\&ffmpeg_title);
|
||||
|
||||
# Customized float command. Part of code borrowed from GNU Texinfo.
|
||||
sub ffmpeg_float($$$$$)
|
||||
{
|
||||
my $self = shift;
|
||||
my $cmdname = shift;
|
||||
my $command = shift;
|
||||
my $args = shift;
|
||||
my $content = shift;
|
||||
|
||||
my ($caption, $prepended) = Texinfo::Common::float_name_caption($self,
|
||||
$command);
|
||||
my $caption_text = '';
|
||||
my $prepended_text;
|
||||
my $prepended_save = '';
|
||||
|
||||
if ($self->in_string()) {
|
||||
if ($prepended) {
|
||||
$prepended_text = $self->convert_tree_new_formatting_context(
|
||||
$prepended, 'float prepended');
|
||||
} else {
|
||||
$prepended_text = '';
|
||||
}
|
||||
if ($caption) {
|
||||
$caption_text = $self->convert_tree_new_formatting_context(
|
||||
{'contents' => $caption->{'args'}->[0]->{'contents'}},
|
||||
'float caption');
|
||||
}
|
||||
return $prepended.$content.$caption_text;
|
||||
}
|
||||
|
||||
my $id = $self->command_id($command);
|
||||
my $label;
|
||||
if (defined($id) and $id ne '') {
|
||||
$label = "<a name=\"$id\"></a>";
|
||||
} else {
|
||||
$label = '';
|
||||
}
|
||||
|
||||
if ($prepended) {
|
||||
if ($caption) {
|
||||
# prepend the prepended tree to the first paragraph
|
||||
my @caption_original_contents = @{$caption->{'args'}->[0]->{'contents'}};
|
||||
my @caption_contents;
|
||||
my $new_paragraph;
|
||||
while (@caption_original_contents) {
|
||||
my $content = shift @caption_original_contents;
|
||||
if ($content->{'type'} and $content->{'type'} eq 'paragraph') {
|
||||
%{$new_paragraph} = %{$content};
|
||||
$new_paragraph->{'contents'} = [@{$content->{'contents'}}];
|
||||
unshift (@{$new_paragraph->{'contents'}}, {'cmdname' => 'strong',
|
||||
'args' => [{'type' => 'brace_command_arg',
|
||||
'contents' => [$prepended]}]});
|
||||
push @caption_contents, $new_paragraph;
|
||||
last;
|
||||
} else {
|
||||
push @caption_contents, $content;
|
||||
}
|
||||
}
|
||||
push @caption_contents, @caption_original_contents;
|
||||
if ($new_paragraph) {
|
||||
$caption_text = $self->convert_tree_new_formatting_context(
|
||||
{'contents' => \@caption_contents}, 'float caption');
|
||||
$prepended_text = '';
|
||||
}
|
||||
}
|
||||
if ($caption_text eq '') {
|
||||
$prepended_text = $self->convert_tree_new_formatting_context(
|
||||
$prepended, 'float prepended');
|
||||
if ($prepended_text ne '') {
|
||||
$prepended_save = $prepended_text;
|
||||
$prepended_text = '<p><strong>'.$prepended_text.'</strong></p>';
|
||||
}
|
||||
}
|
||||
} else {
|
||||
$prepended_text = '';
|
||||
}
|
||||
|
||||
if ($caption and $caption_text eq '') {
|
||||
$caption_text = $self->convert_tree_new_formatting_context(
|
||||
$caption->{'args'}->[0], 'float caption');
|
||||
}
|
||||
if ($prepended_text.$caption_text ne '') {
|
||||
$prepended_text = $self->_attribute_class('div','float-caption'). '>'
|
||||
. $prepended_text;
|
||||
$caption_text .= '</div>';
|
||||
}
|
||||
my $html_class = '';
|
||||
if ($prepended_save =~ /NOTE/) {
|
||||
$html_class = 'info';
|
||||
$prepended_text = '';
|
||||
$caption_text = '';
|
||||
} elsif ($prepended_save =~ /IMPORTANT/) {
|
||||
$html_class = 'warning';
|
||||
$prepended_text = '';
|
||||
$caption_text = '';
|
||||
}
|
||||
return $self->_attribute_class('div', $html_class). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
}
|
||||
|
||||
texinfo_register_command_formatting('float',
|
||||
\&ffmpeg_float);
|
||||
|
||||
1;
|
@@ -332,12 +332,12 @@ $inf = pop @instack;
|
||||
|
||||
die "No filename or title\n" unless defined $fn && defined $tl;
|
||||
|
||||
# always use utf8
|
||||
print "=encoding utf8\n\n";
|
||||
|
||||
$chapters{NAME} = "$fn \- $tl\n";
|
||||
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
|
||||
|
||||
# always use utf8
|
||||
print "=encoding utf8\n\n";
|
||||
|
||||
unshift @chapters_sequence, "NAME";
|
||||
for $chapter (@chapters_sequence) {
|
||||
if (exists $chapters{$chapter}) {
|
||||
|
@@ -782,9 +782,6 @@ large numbers (usually 2^53 and larger).
|
||||
Round the value of expression @var{expr} upwards to the nearest
|
||||
integer. For example, "ceil(1.5)" is "2.0".
|
||||
|
||||
@item clip(x, min, max)
|
||||
Return the value of @var{x} clipped between @var{min} and @var{max}.
|
||||
|
||||
@item cos(x)
|
||||
Compute cosine of @var{x}.
|
||||
|
||||
@@ -844,7 +841,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||
|
||||
@item ld(var)
|
||||
Load the value of the internal variable with number
|
||||
Allow to load the value of the internal variable with number
|
||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||
The function returns the loaded value.
|
||||
|
||||
@@ -861,7 +858,7 @@ Return 1 if @var{x} is lesser than or equal to @var{y}, 0 otherwise.
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
|
||||
@item min(x, y)
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
Return the minimum between @var{x} and @var{y}.
|
||||
|
||||
@item mod(x, y)
|
||||
Compute the remainder of division of @var{x} by @var{y}.
|
||||
@@ -912,7 +909,7 @@ Compute the square root of @var{expr}. This is equivalent to
|
||||
Compute expression @code{1/(1 + exp(4*x))}.
|
||||
|
||||
@item st(var, expr)
|
||||
Store the value of the expression @var{expr} in an internal
|
||||
Allow to store the value of the expression @var{expr} in an internal
|
||||
variable. @var{var} specifies the number of the variable where to
|
||||
store the value, and it is a value ranging from 0 to 9. The function
|
||||
returns the value stored in the internal variable.
|
||||
|
@@ -16,15 +16,16 @@ outputs the modified frame. The most simple way of doing this is to take a
|
||||
similar filter. We'll pick edgedetect, but any other should do. You can look
|
||||
for others using the `./ffmpeg -v 0 -filters|grep ' V->V '` command.
|
||||
|
||||
- sed 's/edgedetect/foobar/g;s/EdgeDetect/Foobar/g' libavfilter/vf_edgedetect.c > libavfilter/vf_foobar.c
|
||||
- cp libavfilter/vf_{edgedetect,foobar}.c
|
||||
- sed -i s/edgedetect/foobar/g -i libavfilter/vf_foobar.c
|
||||
- sed -i s/EdgeDetect/Foobar/g -i libavfilter/vf_foobar.c
|
||||
- edit libavfilter/Makefile, and add an entry for "foobar" following the
|
||||
pattern of the other filters.
|
||||
- edit libavfilter/allfilters.c, and add an entry for "foobar" following the
|
||||
pattern of the other filters.
|
||||
- ./configure ...
|
||||
- make -j<whatever> ffmpeg
|
||||
- ./ffmpeg -i http://samples.ffmpeg.org/image-samples/lena.pnm -vf foobar foobar.png
|
||||
Note here: you can obviously use a random local image instead of a remote URL.
|
||||
- ./ffmpeg -i tests/lena.pnm -vf foobar foobar.png
|
||||
|
||||
If everything went right, you should get a foobar.png with Lena edge-detected.
|
||||
|
||||
|
424
ffmpeg.c
424
ffmpeg.c
@@ -62,6 +62,8 @@
|
||||
#include "libavutil/threadmessage.h"
|
||||
#include "libavformat/os_support.h"
|
||||
|
||||
#include "libavformat/ffm.h" // not public API
|
||||
|
||||
# include "libavfilter/avcodec.h"
|
||||
# include "libavfilter/avfilter.h"
|
||||
# include "libavfilter/buffersrc.h"
|
||||
@@ -153,9 +155,8 @@ static struct termios oldtty;
|
||||
static int restore_tty;
|
||||
#endif
|
||||
|
||||
#if HAVE_PTHREADS
|
||||
static void free_input_threads(void);
|
||||
#endif
|
||||
|
||||
|
||||
/* sub2video hack:
|
||||
Convert subtitles to video with alpha to insert them in filter graphs.
|
||||
@@ -351,6 +352,7 @@ void term_init(void)
|
||||
signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
|
||||
}
|
||||
#endif
|
||||
avformat_network_deinit();
|
||||
|
||||
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
|
||||
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
|
||||
@@ -455,12 +457,9 @@ static void ffmpeg_cleanup(int ret)
|
||||
/* close files */
|
||||
for (i = 0; i < nb_output_files; i++) {
|
||||
OutputFile *of = output_files[i];
|
||||
AVFormatContext *s;
|
||||
if (!of)
|
||||
continue;
|
||||
s = of->ctx;
|
||||
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&s->pb);
|
||||
AVFormatContext *s = of->ctx;
|
||||
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
|
||||
avio_close(s->pb);
|
||||
avformat_free_context(s);
|
||||
av_dict_free(&of->opts);
|
||||
|
||||
@@ -468,12 +467,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
}
|
||||
for (i = 0; i < nb_output_streams; i++) {
|
||||
OutputStream *ost = output_streams[i];
|
||||
AVBitStreamFilterContext *bsfc;
|
||||
|
||||
if (!ost)
|
||||
continue;
|
||||
|
||||
bsfc = ost->bitstream_filters;
|
||||
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
|
||||
while (bsfc) {
|
||||
AVBitStreamFilterContext *next = bsfc->next;
|
||||
av_bitstream_filter_close(bsfc);
|
||||
@@ -481,7 +475,6 @@ static void ffmpeg_cleanup(int ret)
|
||||
}
|
||||
ost->bitstream_filters = NULL;
|
||||
av_frame_free(&ost->filtered_frame);
|
||||
av_frame_free(&ost->last_frame);
|
||||
|
||||
av_parser_close(ost->parser);
|
||||
|
||||
@@ -490,9 +483,6 @@ static void ffmpeg_cleanup(int ret)
|
||||
av_freep(&ost->avfilter);
|
||||
av_freep(&ost->logfile_prefix);
|
||||
|
||||
av_freep(&ost->audio_channels_map);
|
||||
ost->audio_channels_mapped = 0;
|
||||
|
||||
avcodec_free_context(&ost->enc_ctx);
|
||||
|
||||
av_freep(&output_streams[i]);
|
||||
@@ -522,7 +512,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
|
||||
if (vstats_file)
|
||||
fclose(vstats_file);
|
||||
av_freep(&vstats_filename);
|
||||
av_free(vstats_filename);
|
||||
|
||||
av_freep(&input_streams);
|
||||
av_freep(&input_files);
|
||||
@@ -542,15 +532,6 @@ static void ffmpeg_cleanup(int ret)
|
||||
term_exit();
|
||||
}
|
||||
|
||||
void remove_avoptions(AVDictionary **a, AVDictionary *b)
|
||||
{
|
||||
AVDictionaryEntry *t = NULL;
|
||||
|
||||
while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
|
||||
av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
|
||||
}
|
||||
}
|
||||
|
||||
void assert_avoptions(AVDictionary *m)
|
||||
{
|
||||
AVDictionaryEntry *t;
|
||||
@@ -629,11 +610,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
|
||||
while (bsfc) {
|
||||
AVPacket new_pkt = *pkt;
|
||||
AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
|
||||
bsfc->filter->name,
|
||||
NULL, 0);
|
||||
int a = av_bitstream_filter_filter(bsfc, avctx,
|
||||
bsf_arg ? bsf_arg->value : NULL,
|
||||
int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
|
||||
&new_pkt.data, &new_pkt.size,
|
||||
pkt->data, pkt->size,
|
||||
pkt->flags & AV_PKT_FLAG_KEY);
|
||||
@@ -649,15 +626,12 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
a = AVERROR(ENOMEM);
|
||||
}
|
||||
if (a > 0) {
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
av_free_packet(pkt);
|
||||
new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
|
||||
av_buffer_default_free, NULL, 0);
|
||||
if (!new_pkt.buf)
|
||||
exit_program(1);
|
||||
} else if (a < 0) {
|
||||
new_pkt = *pkt;
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
|
||||
bsfc->filter->name, pkt->stream_index,
|
||||
avctx->codec ? avctx->codec->name : "copy");
|
||||
@@ -671,17 +645,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
}
|
||||
|
||||
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
||||
if (pkt->dts != AV_NOPTS_VALUE &&
|
||||
pkt->pts != AV_NOPTS_VALUE &&
|
||||
pkt->dts > pkt->pts) {
|
||||
av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
|
||||
pkt->dts, pkt->pts,
|
||||
ost->file_index, ost->st->index);
|
||||
pkt->pts =
|
||||
pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
|
||||
- FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
|
||||
- FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
|
||||
}
|
||||
if(
|
||||
(avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
|
||||
pkt->dts != AV_NOPTS_VALUE &&
|
||||
@@ -704,6 +667,15 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
pkt->dts = max;
|
||||
}
|
||||
}
|
||||
if (pkt->dts != AV_NOPTS_VALUE &&
|
||||
pkt->pts != AV_NOPTS_VALUE &&
|
||||
pkt->dts > pkt->pts) {
|
||||
av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d\n",
|
||||
pkt->dts, pkt->pts,
|
||||
ost->file_index, ost->st->index);
|
||||
pkt->pts = AV_NOPTS_VALUE;
|
||||
pkt->dts = AV_NOPTS_VALUE;
|
||||
}
|
||||
}
|
||||
ost->last_mux_dts = pkt->dts;
|
||||
|
||||
@@ -826,10 +798,6 @@ static void do_subtitle_out(AVFormatContext *s,
|
||||
|
||||
if (!subtitle_out) {
|
||||
subtitle_out = av_malloc(subtitle_out_max_size);
|
||||
if (!subtitle_out) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Note: DVB subtitle need one packet to draw them and one other
|
||||
@@ -890,43 +858,28 @@ static void do_subtitle_out(AVFormatContext *s,
|
||||
|
||||
static void do_video_out(AVFormatContext *s,
|
||||
OutputStream *ost,
|
||||
AVFrame *next_picture,
|
||||
double sync_ipts)
|
||||
AVFrame *in_picture)
|
||||
{
|
||||
int ret, format_video_sync;
|
||||
AVPacket pkt;
|
||||
AVCodecContext *enc = ost->enc_ctx;
|
||||
AVCodecContext *mux_enc = ost->st->codec;
|
||||
int nb_frames, nb0_frames, i;
|
||||
double delta, delta0;
|
||||
int nb_frames, i;
|
||||
double sync_ipts, delta;
|
||||
double duration = 0;
|
||||
int frame_size = 0;
|
||||
InputStream *ist = NULL;
|
||||
AVFilterContext *filter = ost->filter->filter;
|
||||
|
||||
if (ost->source_index >= 0)
|
||||
ist = input_streams[ost->source_index];
|
||||
|
||||
if (filter->inputs[0]->frame_rate.num > 0 &&
|
||||
filter->inputs[0]->frame_rate.den > 0)
|
||||
duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
|
||||
|
||||
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
|
||||
duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
|
||||
duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
|
||||
|
||||
if (!ost->filters_script &&
|
||||
!ost->filters &&
|
||||
next_picture &&
|
||||
ist &&
|
||||
lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
|
||||
duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
|
||||
}
|
||||
|
||||
delta0 = sync_ipts - ost->sync_opts;
|
||||
delta = delta0 + duration;
|
||||
sync_ipts = in_picture->pts;
|
||||
delta = sync_ipts - ost->sync_opts + duration;
|
||||
|
||||
/* by default, we output a single frame */
|
||||
nb0_frames = 0;
|
||||
nb_frames = 1;
|
||||
|
||||
format_video_sync = video_sync_method;
|
||||
@@ -946,39 +899,19 @@ static void do_video_out(AVFormatContext *s,
|
||||
}
|
||||
}
|
||||
|
||||
if (delta0 < 0 &&
|
||||
delta > 0 &&
|
||||
format_video_sync != VSYNC_PASSTHROUGH &&
|
||||
format_video_sync != VSYNC_DROP) {
|
||||
double cor = FFMIN(-delta0, duration);
|
||||
if (delta0 < -0.6) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
|
||||
} else
|
||||
av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
|
||||
sync_ipts += cor;
|
||||
duration -= cor;
|
||||
delta0 += cor;
|
||||
}
|
||||
|
||||
switch (format_video_sync) {
|
||||
case VSYNC_VSCFR:
|
||||
if (ost->frame_number == 0 && delta - duration >= 0.5) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
|
||||
delta = duration;
|
||||
delta0 = 0;
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
}
|
||||
case VSYNC_CFR:
|
||||
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
||||
if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
|
||||
if (delta < -1.1)
|
||||
nb_frames = 0;
|
||||
} else if (delta < -1.1)
|
||||
nb_frames = 0;
|
||||
else if (delta > 1.1) {
|
||||
else if (delta > 1.1)
|
||||
nb_frames = lrintf(delta);
|
||||
if (delta0 > 1.1)
|
||||
nb0_frames = lrintf(delta0 - 0.6);
|
||||
}
|
||||
break;
|
||||
case VSYNC_VFR:
|
||||
if (delta <= -0.6)
|
||||
@@ -995,36 +928,28 @@ static void do_video_out(AVFormatContext *s,
|
||||
}
|
||||
|
||||
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
|
||||
nb0_frames = FFMIN(nb0_frames, nb_frames);
|
||||
if (nb0_frames == 0 && ost->last_droped) {
|
||||
if (nb_frames == 0) {
|
||||
nb_frames_drop++;
|
||||
av_log(NULL, AV_LOG_VERBOSE,
|
||||
"*** dropping frame %d from stream %d at ts %"PRId64"\n",
|
||||
ost->frame_number, ost->st->index, ost->last_frame->pts);
|
||||
}
|
||||
if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
|
||||
ost->frame_number, ost->st->index, in_picture->pts);
|
||||
return;
|
||||
} else if (nb_frames > 1) {
|
||||
if (nb_frames > dts_error_threshold * 30) {
|
||||
av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
|
||||
nb_frames_drop++;
|
||||
return;
|
||||
}
|
||||
nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
|
||||
nb_frames_dup += nb_frames - 1;
|
||||
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
|
||||
}
|
||||
ost->last_droped = nb_frames == nb0_frames;
|
||||
|
||||
/* duplicates frame if needed */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
AVFrame *in_picture;
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (i < nb0_frames && ost->last_frame) {
|
||||
in_picture = ost->last_frame;
|
||||
} else
|
||||
in_picture = next_picture;
|
||||
|
||||
in_picture->pts = ost->sync_opts;
|
||||
|
||||
#if 1
|
||||
@@ -1039,8 +964,10 @@ static void do_video_out(AVFormatContext *s,
|
||||
/* raw pictures are written as AVPicture structure to
|
||||
avoid any copies. We support temporarily the older
|
||||
method. */
|
||||
if (in_picture->interlaced_frame)
|
||||
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
|
||||
mux_enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
|
||||
mux_enc->coded_frame->top_field_first = in_picture->top_field_first;
|
||||
if (mux_enc->coded_frame->interlaced_frame)
|
||||
mux_enc->field_order = mux_enc->coded_frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
|
||||
else
|
||||
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
||||
pkt.data = (uint8_t *)in_picture;
|
||||
@@ -1066,7 +993,8 @@ static void do_video_out(AVFormatContext *s,
|
||||
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
||||
|
||||
in_picture->quality = enc->global_quality;
|
||||
in_picture->pict_type = 0;
|
||||
if (!enc->me_threshold)
|
||||
in_picture->pict_type = 0;
|
||||
|
||||
pts_time = in_picture->pts != AV_NOPTS_VALUE ?
|
||||
in_picture->pts * av_q2d(enc->time_base) : NAN;
|
||||
@@ -1160,14 +1088,6 @@ static void do_video_out(AVFormatContext *s,
|
||||
if (vstats_filename && frame_size)
|
||||
do_video_stats(ost, frame_size);
|
||||
}
|
||||
|
||||
if (!ost->last_frame)
|
||||
ost->last_frame = av_frame_alloc();
|
||||
av_frame_unref(ost->last_frame);
|
||||
if (next_picture && ost->last_frame)
|
||||
av_frame_ref(ost->last_frame, next_picture);
|
||||
else
|
||||
av_frame_free(&ost->last_frame);
|
||||
}
|
||||
|
||||
static double psnr(double d)
|
||||
@@ -1193,8 +1113,8 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
||||
enc = ost->enc_ctx;
|
||||
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
frame_number = ost->st->nb_frames;
|
||||
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
|
||||
if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
|
||||
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
|
||||
if (enc->flags&CODEC_FLAG_PSNR)
|
||||
fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
|
||||
|
||||
fprintf(vstats_file,"f_size= %6d ", frame_size);
|
||||
@@ -1207,7 +1127,7 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
||||
avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
|
||||
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
||||
(double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
|
||||
fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
|
||||
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1234,6 +1154,7 @@ static int reap_filters(void)
|
||||
{
|
||||
AVFrame *filtered_frame = NULL;
|
||||
int i;
|
||||
int64_t frame_pts;
|
||||
|
||||
/* Reap all buffers present in the buffer sinks */
|
||||
for (i = 0; i < nb_output_streams; i++) {
|
||||
@@ -1253,7 +1174,6 @@ static int reap_filters(void)
|
||||
filtered_frame = ost->filtered_frame;
|
||||
|
||||
while (1) {
|
||||
double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
|
||||
ret = av_buffersink_get_frame_flags(filter, filtered_frame,
|
||||
AV_BUFFERSINK_FLAG_NO_REQUEST);
|
||||
if (ret < 0) {
|
||||
@@ -1267,20 +1187,10 @@ static int reap_filters(void)
|
||||
av_frame_unref(filtered_frame);
|
||||
continue;
|
||||
}
|
||||
frame_pts = AV_NOPTS_VALUE;
|
||||
if (filtered_frame->pts != AV_NOPTS_VALUE) {
|
||||
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
|
||||
AVRational tb = enc->time_base;
|
||||
int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
|
||||
|
||||
tb.den <<= extra_bits;
|
||||
float_pts =
|
||||
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
|
||||
av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
|
||||
float_pts /= 1 << extra_bits;
|
||||
// avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
|
||||
float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
|
||||
|
||||
filtered_frame->pts =
|
||||
filtered_frame->pts = frame_pts =
|
||||
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
|
||||
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
|
||||
}
|
||||
@@ -1289,19 +1199,20 @@ static int reap_filters(void)
|
||||
|
||||
switch (filter->inputs[0]->type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
filtered_frame->pts = frame_pts;
|
||||
if (!ost->frame_aspect_ratio.num)
|
||||
enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
|
||||
|
||||
if (debug_ts) {
|
||||
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
|
||||
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s time_base:%d/%d\n",
|
||||
av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
|
||||
float_pts,
|
||||
enc->time_base.num, enc->time_base.den);
|
||||
}
|
||||
|
||||
do_video_out(of->ctx, ost, filtered_frame, float_pts);
|
||||
do_video_out(of->ctx, ost, filtered_frame);
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
filtered_frame->pts = frame_pts;
|
||||
if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
|
||||
enc->channels != av_frame_get_channels(filtered_frame)) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
@@ -1345,6 +1256,7 @@ static void print_final_stats(int64_t total_size)
|
||||
if (data_size && total_size>0 && total_size >= data_size)
|
||||
percent = 100.0 * (total_size - data_size) / data_size;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "\n");
|
||||
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
|
||||
video_size / 1024.0,
|
||||
audio_size / 1024.0,
|
||||
@@ -1535,12 +1447,10 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
|
||||
pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
|
||||
ost->st->time_base, AV_TIME_BASE_Q));
|
||||
if (is_last_report)
|
||||
nb_frames_drop += ost->last_droped;
|
||||
}
|
||||
|
||||
secs = FFABS(pts) / AV_TIME_BASE;
|
||||
us = FFABS(pts) % AV_TIME_BASE;
|
||||
secs = pts / AV_TIME_BASE;
|
||||
us = pts % AV_TIME_BASE;
|
||||
mins = secs / 60;
|
||||
secs %= 60;
|
||||
hours = mins / 60;
|
||||
@@ -1552,20 +1462,13 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
"size=N/A time=");
|
||||
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||
"size=%8.0fkB time=", total_size / 1024.0);
|
||||
if (pts < 0)
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||
"%02d:%02d:%02d.%02d ", hours, mins, secs,
|
||||
(100 * us) / AV_TIME_BASE);
|
||||
|
||||
if (bitrate < 0) {
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
|
||||
av_bprintf(&buf_script, "bitrate=N/A\n");
|
||||
}else{
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
|
||||
av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
|
||||
}
|
||||
|
||||
if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||
"bitrate=N/A");
|
||||
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||
"bitrate=%6.1fkbits/s", bitrate);
|
||||
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
|
||||
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
|
||||
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
|
||||
@@ -1579,11 +1482,10 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
|
||||
|
||||
if (print_stats || is_last_report) {
|
||||
const char end = is_last_report ? '\n' : '\r';
|
||||
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
|
||||
fprintf(stderr, "%s %c", buf, end);
|
||||
fprintf(stderr, "%s \r", buf);
|
||||
} else
|
||||
av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
|
||||
av_log(NULL, AV_LOG_INFO, "%s \r", buf);
|
||||
|
||||
fflush(stderr);
|
||||
}
|
||||
@@ -1596,7 +1498,8 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
avio_flush(progress_avio);
|
||||
av_bprint_finalize(&buf_script, NULL);
|
||||
if (is_last_report) {
|
||||
avio_closep(&progress_avio);
|
||||
avio_close(progress_avio);
|
||||
progress_avio = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1841,12 +1744,9 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (*got_output || ret<0)
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (!*got_output || ret < 0) {
|
||||
if (!pkt->size) {
|
||||
for (i = 0; i < ist->nb_filters; i++)
|
||||
@@ -1989,26 +1889,9 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
);
|
||||
}
|
||||
|
||||
if (*got_output || ret<0)
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (*got_output && ret >= 0) {
|
||||
if (ist->dec_ctx->width != decoded_frame->width ||
|
||||
ist->dec_ctx->height != decoded_frame->height ||
|
||||
ist->dec_ctx->pix_fmt != decoded_frame->format) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
|
||||
decoded_frame->width,
|
||||
decoded_frame->height,
|
||||
decoded_frame->format,
|
||||
ist->dec_ctx->width,
|
||||
ist->dec_ctx->height,
|
||||
ist->dec_ctx->pix_fmt);
|
||||
}
|
||||
}
|
||||
|
||||
if (!*got_output || ret < 0) {
|
||||
if (!pkt->size) {
|
||||
for (i = 0; i < ist->nb_filters; i++)
|
||||
@@ -2110,12 +1993,9 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
|
||||
&subtitle, got_output, pkt);
|
||||
|
||||
if (*got_output || ret<0)
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (ret < 0 || !*got_output) {
|
||||
if (!pkt->size)
|
||||
sub2video_flush(ist);
|
||||
@@ -2168,7 +2048,7 @@ out:
|
||||
}
|
||||
|
||||
/* pkt = NULL means EOF (needed to flush decoder buffers) */
|
||||
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
static int output_packet(InputStream *ist, const AVPacket *pkt)
|
||||
{
|
||||
int ret = 0, i;
|
||||
int got_output = 0;
|
||||
@@ -2177,7 +2057,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
if (!ist->saw_first_ts) {
|
||||
ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
|
||||
ist->pts = 0;
|
||||
if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
|
||||
if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
|
||||
ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
|
||||
}
|
||||
@@ -2189,7 +2069,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
if (ist->next_pts == AV_NOPTS_VALUE)
|
||||
ist->next_pts = ist->pts;
|
||||
|
||||
if (!pkt) {
|
||||
if (pkt == NULL) {
|
||||
/* EOF handling */
|
||||
av_init_packet(&avpkt);
|
||||
avpkt.data = NULL;
|
||||
@@ -2228,11 +2108,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
ret = decode_video (ist, &avpkt, &got_output);
|
||||
if (avpkt.duration) {
|
||||
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
|
||||
} else if(ist->dec_ctx->time_base.num != 0 && ist->dec_ctx->time_base.den != 0) {
|
||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
|
||||
duration = ((int64_t)AV_TIME_BASE *
|
||||
ist->dec_ctx->framerate.den * ticks) /
|
||||
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
|
||||
ist->dec_ctx->time_base.num * ticks) /
|
||||
ist->dec_ctx->time_base.den;
|
||||
} else
|
||||
duration = 0;
|
||||
|
||||
@@ -2287,11 +2167,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
|
||||
} else if (pkt->duration) {
|
||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->dec_ctx->framerate.num != 0) {
|
||||
} else if(ist->dec_ctx->time_base.num != 0) {
|
||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
|
||||
ist->next_dts += ((int64_t)AV_TIME_BASE *
|
||||
ist->dec_ctx->framerate.den * ticks) /
|
||||
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
|
||||
ist->dec_ctx->time_base.num * ticks) /
|
||||
ist->dec_ctx->time_base.den;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -2314,34 +2194,16 @@ static void print_sdp(void)
|
||||
{
|
||||
char sdp[16384];
|
||||
int i;
|
||||
int j;
|
||||
AVIOContext *sdp_pb;
|
||||
AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
|
||||
|
||||
if (!avc)
|
||||
exit_program(1);
|
||||
for (i = 0, j = 0; i < nb_output_files; i++) {
|
||||
if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
|
||||
avc[j] = output_files[i]->ctx;
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
av_sdp_create(avc, j, sdp, sizeof(sdp));
|
||||
|
||||
if (!sdp_filename) {
|
||||
printf("SDP:\n%s\n", sdp);
|
||||
fflush(stdout);
|
||||
} else {
|
||||
if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
|
||||
} else {
|
||||
avio_printf(sdp_pb, "SDP:\n%s", sdp);
|
||||
avio_closep(&sdp_pb);
|
||||
av_freep(&sdp_filename);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < nb_output_files; i++)
|
||||
avc[i] = output_files[i]->ctx;
|
||||
|
||||
av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
|
||||
printf("SDP:\n%s\n", sdp);
|
||||
fflush(stdout);
|
||||
av_freep(&avc);
|
||||
}
|
||||
|
||||
@@ -2421,12 +2283,8 @@ static int init_input_stream(int ist_index, char *error, int error_len)
|
||||
ist->dec_ctx->thread_safe_callbacks = 1;
|
||||
|
||||
av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
|
||||
if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
|
||||
(ist->decoding_needed & DECODING_FOR_OST)) {
|
||||
av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
|
||||
if (ist->decoding_needed & DECODING_FOR_FILTER)
|
||||
av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
|
||||
}
|
||||
if(ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
|
||||
av_dict_set(&ist->decoder_opts, "compute_edt", "1", 0);
|
||||
|
||||
if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
|
||||
av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
|
||||
@@ -2587,7 +2445,7 @@ static int transcode_init(void)
|
||||
AVFormatContext *oc;
|
||||
OutputStream *ost;
|
||||
InputStream *ist;
|
||||
char error[1024] = {0};
|
||||
char error[1024];
|
||||
int want_sdp = 1;
|
||||
|
||||
for (i = 0; i < nb_filtergraphs; i++) {
|
||||
@@ -2686,13 +2544,11 @@ static int transcode_init(void)
|
||||
enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
|
||||
enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
|
||||
enc_ctx->field_order = dec_ctx->field_order;
|
||||
if (dec_ctx->extradata_size) {
|
||||
enc_ctx->extradata = av_mallocz(extra_size);
|
||||
if (!enc_ctx->extradata) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
|
||||
enc_ctx->extradata = av_mallocz(extra_size);
|
||||
if (!enc_ctx->extradata) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
|
||||
enc_ctx->extradata_size= dec_ctx->extradata_size;
|
||||
enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
|
||||
|
||||
@@ -2747,26 +2603,6 @@ static int transcode_init(void)
|
||||
av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
|
||||
enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
|
||||
|
||||
if (ist->st->nb_side_data) {
|
||||
ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
|
||||
sizeof(*ist->st->side_data));
|
||||
if (!ost->st->side_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (j = 0; j < ist->st->nb_side_data; j++) {
|
||||
const AVPacketSideData *sd_src = &ist->st->side_data[j];
|
||||
AVPacketSideData *sd_dst = &ost->st->side_data[j];
|
||||
|
||||
sd_dst->data = av_malloc(sd_src->size);
|
||||
if (!sd_dst->data)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(sd_dst->data, sd_src->data, sd_src->size);
|
||||
sd_dst->size = sd_src->size;
|
||||
sd_dst->type = sd_src->type;
|
||||
ost->st->nb_side_data++;
|
||||
}
|
||||
}
|
||||
|
||||
ost->parser = av_parser_init(enc_ctx->codec_id);
|
||||
|
||||
switch (enc_ctx->codec_type) {
|
||||
@@ -2781,10 +2617,7 @@ static int transcode_init(void)
|
||||
enc_ctx->frame_size = dec_ctx->frame_size;
|
||||
enc_ctx->audio_service_type = dec_ctx->audio_service_type;
|
||||
enc_ctx->block_align = dec_ctx->block_align;
|
||||
enc_ctx->initial_padding = dec_ctx->delay;
|
||||
#if FF_API_AUDIOENC_DELAY
|
||||
enc_ctx->delay = dec_ctx->delay;
|
||||
#endif
|
||||
if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
|
||||
enc_ctx->block_align= 0;
|
||||
if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
|
||||
@@ -2808,7 +2641,6 @@ static int transcode_init(void)
|
||||
sar = dec_ctx->sample_aspect_ratio;
|
||||
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
|
||||
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
|
||||
ost->st->r_frame_rate = ist->st->r_frame_rate;
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
@@ -2832,7 +2664,7 @@ static int transcode_init(void)
|
||||
}
|
||||
|
||||
if (ist)
|
||||
ist->decoding_needed |= DECODING_FOR_OST;
|
||||
ist->decoding_needed++;
|
||||
ost->encoding_needed = 1;
|
||||
|
||||
set_encoder_id(output_files[ost->file_index], ost);
|
||||
@@ -2869,7 +2701,6 @@ static int transcode_init(void)
|
||||
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
|
||||
ost->frame_rate = ost->enc->supported_framerates[idx];
|
||||
}
|
||||
// reduce frame rate for mpeg4 to be within the spec limits
|
||||
if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
|
||||
av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
|
||||
ost->frame_rate.num, ost->frame_rate.den, 65535);
|
||||
@@ -2954,8 +2785,6 @@ static int transcode_init(void)
|
||||
enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_DATA:
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
break;
|
||||
@@ -2994,37 +2823,6 @@ static int transcode_init(void)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ost->disposition) {
|
||||
static const AVOption opts[] = {
|
||||
{ "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
|
||||
{ "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
|
||||
{ "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
|
||||
{ "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
|
||||
{ "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
|
||||
{ "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
|
||||
{ "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
|
||||
{ "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
|
||||
{ "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
|
||||
{ "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
|
||||
{ "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
|
||||
{ "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
|
||||
{ "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
|
||||
{ "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
|
||||
{ NULL },
|
||||
};
|
||||
static const AVClass class = {
|
||||
.class_name = "",
|
||||
.item_name = av_default_item_name,
|
||||
.option = opts,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
const AVClass *pclass = &class;
|
||||
|
||||
ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
|
||||
if (ret < 0)
|
||||
goto dump_format;
|
||||
}
|
||||
}
|
||||
|
||||
/* open each encoder */
|
||||
@@ -3066,11 +2864,10 @@ static int transcode_init(void)
|
||||
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
||||
" It takes bits/s as argument, not kbits/s\n");
|
||||
} else {
|
||||
ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
|
||||
if (ret < 0) {
|
||||
if (av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"Error setting up codec context options.\n");
|
||||
return ret;
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3220,7 +3017,7 @@ static int transcode_init(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (sdp_filename || want_sdp) {
|
||||
if (want_sdp) {
|
||||
print_sdp();
|
||||
}
|
||||
|
||||
@@ -3270,9 +3067,9 @@ static OutputStream *choose_output(void)
|
||||
OutputStream *ost = output_streams[i];
|
||||
int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
|
||||
AV_TIME_BASE_Q);
|
||||
if (!ost->finished && opts < opts_min) {
|
||||
if (!ost->unavailable && !ost->finished && opts < opts_min) {
|
||||
opts_min = opts;
|
||||
ost_min = ost->unavailable ? NULL : ost;
|
||||
ost_min = ost;
|
||||
}
|
||||
}
|
||||
return ost_min;
|
||||
@@ -3379,7 +3176,6 @@ static int check_keyboard_interaction(int64_t cur_time)
|
||||
static void *input_thread(void *arg)
|
||||
{
|
||||
InputFile *f = arg;
|
||||
unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
|
||||
int ret = 0;
|
||||
|
||||
while (1) {
|
||||
@@ -3395,15 +3191,7 @@ static void *input_thread(void *arg)
|
||||
break;
|
||||
}
|
||||
av_dup_packet(&pkt);
|
||||
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
|
||||
if (flags && ret == AVERROR(EAGAIN)) {
|
||||
flags = 0;
|
||||
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
|
||||
av_log(f->ctx, AV_LOG_WARNING,
|
||||
"Thread message queue blocking; consider raising the "
|
||||
"thread_queue_size option (current value: %d)\n",
|
||||
f->thread_queue_size);
|
||||
}
|
||||
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, 0);
|
||||
if (ret < 0) {
|
||||
if (ret != AVERROR_EOF)
|
||||
av_log(f->ctx, AV_LOG_ERROR,
|
||||
@@ -3452,7 +3240,7 @@ static int init_input_threads(void)
|
||||
strcmp(f->ctx->iformat->name, "lavfi"))
|
||||
f->non_blocking = 1;
|
||||
ret = av_thread_message_queue_alloc(&f->in_thread_queue,
|
||||
f->thread_queue_size, sizeof(AVPacket));
|
||||
8, sizeof(AVPacket));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -3543,7 +3331,7 @@ static int process_input(int file_index)
|
||||
for (i = 0; i < ifile->nb_streams; i++) {
|
||||
ist = input_streams[ifile->ist_index + i];
|
||||
if (ist->decoding_needed) {
|
||||
ret = process_input_packet(ist, NULL);
|
||||
ret = output_packet(ist, NULL);
|
||||
if (ret>0)
|
||||
return 0;
|
||||
}
|
||||
@@ -3659,14 +3447,13 @@ static int process_input(int file_index)
|
||||
if (pkt.dts != AV_NOPTS_VALUE)
|
||||
pkt.dts *= ist->ts_scale;
|
||||
|
||||
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
|
||||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
|
||||
pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
|
||||
if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
|
||||
&& (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
|
||||
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
int64_t delta = pkt_dts - ifile->last_ts;
|
||||
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
|
||||
if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
(delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
|
||||
ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)){
|
||||
ifile->ts_offset -= delta;
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
|
||||
@@ -3677,15 +3464,14 @@ static int process_input(int file_index)
|
||||
}
|
||||
}
|
||||
|
||||
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
|
||||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
|
||||
pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
|
||||
if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
|
||||
!copy_ts) {
|
||||
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
int64_t delta = pkt_dts - ist->next_dts;
|
||||
if (is->iformat->flags & AVFMT_TS_DISCONT) {
|
||||
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
(delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
|
||||
ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
|
||||
pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
|
||||
ifile->ts_offset -= delta;
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
@@ -3697,7 +3483,7 @@ static int process_input(int file_index)
|
||||
}
|
||||
} else {
|
||||
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
|
||||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
|
||||
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
|
||||
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
|
||||
pkt.dts = AV_NOPTS_VALUE;
|
||||
}
|
||||
@@ -3705,7 +3491,7 @@ static int process_input(int file_index)
|
||||
int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
delta = pkt_pts - ist->next_dts;
|
||||
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
|
||||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
|
||||
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
|
||||
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
|
||||
pkt.pts = AV_NOPTS_VALUE;
|
||||
}
|
||||
@@ -3727,7 +3513,7 @@ static int process_input(int file_index)
|
||||
|
||||
sub2video_heartbeat(ist, pkt.pts);
|
||||
|
||||
ret = process_input_packet(ist, &pkt);
|
||||
ret = output_packet(ist, &pkt);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
|
||||
ist->file_index, ist->st->index, av_err2str(ret));
|
||||
@@ -3893,7 +3679,7 @@ static int transcode(void)
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
|
||||
process_input_packet(ist, NULL);
|
||||
output_packet(ist, NULL);
|
||||
}
|
||||
}
|
||||
flush_encoders();
|
||||
@@ -3945,11 +3731,9 @@ static int transcode(void)
|
||||
}
|
||||
av_freep(&ost->forced_kf_pts);
|
||||
av_freep(&ost->apad);
|
||||
av_freep(&ost->disposition);
|
||||
av_dict_free(&ost->encoder_opts);
|
||||
av_dict_free(&ost->swr_opts);
|
||||
av_dict_free(&ost->resample_opts);
|
||||
av_dict_free(&ost->bsf_args);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3971,7 +3755,7 @@ static int64_t getutime(void)
|
||||
GetProcessTimes(proc, &c, &e, &k, &u);
|
||||
return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
|
||||
#else
|
||||
return av_gettime_relative();
|
||||
return av_gettime();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
20
ffmpeg.h
20
ffmpeg.h
@@ -111,7 +111,6 @@ typedef struct OptionsContext {
|
||||
int64_t input_ts_offset;
|
||||
int rate_emu;
|
||||
int accurate_seek;
|
||||
int thread_queue_size;
|
||||
|
||||
SpecifierOpt *ts_scale;
|
||||
int nb_ts_scale;
|
||||
@@ -207,8 +206,6 @@ typedef struct OptionsContext {
|
||||
int nb_apad;
|
||||
SpecifierOpt *discard;
|
||||
int nb_discard;
|
||||
SpecifierOpt *disposition;
|
||||
int nb_disposition;
|
||||
} OptionsContext;
|
||||
|
||||
typedef struct InputFilter {
|
||||
@@ -246,10 +243,7 @@ typedef struct InputStream {
|
||||
AVStream *st;
|
||||
int discard; /* true if stream data should be discarded */
|
||||
int user_set_discard;
|
||||
int decoding_needed; /* non zero if the packets must be decoded in 'raw_fifo', see DECODING_FOR_* */
|
||||
#define DECODING_FOR_OST 1
|
||||
#define DECODING_FOR_FILTER 2
|
||||
|
||||
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
|
||||
AVCodecContext *dec_ctx;
|
||||
AVCodec *dec;
|
||||
AVFrame *decoded_frame;
|
||||
@@ -351,7 +345,6 @@ typedef struct InputFile {
|
||||
pthread_t thread; /* thread reading from this file */
|
||||
int non_blocking; /* reading packets from the thread should not block */
|
||||
int joined; /* the thread has been joined */
|
||||
int thread_queue_size; /* maximum number of queued packets */
|
||||
#endif
|
||||
} InputFile;
|
||||
|
||||
@@ -392,8 +385,6 @@ typedef struct OutputStream {
|
||||
AVCodec *enc;
|
||||
int64_t max_frames;
|
||||
AVFrame *filtered_frame;
|
||||
AVFrame *last_frame;
|
||||
int last_droped;
|
||||
|
||||
/* video only */
|
||||
AVRational frame_rate;
|
||||
@@ -411,7 +402,7 @@ typedef struct OutputStream {
|
||||
double forced_keyframes_expr_const_values[FKF_NB];
|
||||
|
||||
/* audio only */
|
||||
int *audio_channels_map; /* list of the channels id to pick from the source stream */
|
||||
int audio_channels_map[SWR_CH_MAX]; /* list of the channels id to pick from the source stream */
|
||||
int audio_channels_mapped; /* number of channels in audio_channels_map */
|
||||
|
||||
char *logfile_prefix;
|
||||
@@ -426,7 +417,6 @@ typedef struct OutputStream {
|
||||
AVDictionary *encoder_opts;
|
||||
AVDictionary *swr_opts;
|
||||
AVDictionary *resample_opts;
|
||||
AVDictionary *bsf_args;
|
||||
char *apad;
|
||||
OSTFinished finished; /* no more packets should be written for this stream */
|
||||
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
|
||||
@@ -434,7 +424,6 @@ typedef struct OutputStream {
|
||||
const char *attachment_filename;
|
||||
int copy_initial_nonkeyframes;
|
||||
int copy_prior_start;
|
||||
char *disposition;
|
||||
|
||||
int keep_pix_fmt;
|
||||
|
||||
@@ -475,7 +464,6 @@ extern FilterGraph **filtergraphs;
|
||||
extern int nb_filtergraphs;
|
||||
|
||||
extern char *vstats_filename;
|
||||
extern char *sdp_filename;
|
||||
|
||||
extern float audio_drift_threshold;
|
||||
extern float dts_delta_threshold;
|
||||
@@ -484,14 +472,12 @@ extern float dts_error_threshold;
|
||||
extern int audio_volume;
|
||||
extern int audio_sync_method;
|
||||
extern int video_sync_method;
|
||||
extern float frame_drop_threshold;
|
||||
extern int do_benchmark;
|
||||
extern int do_benchmark_all;
|
||||
extern int do_deinterlace;
|
||||
extern int do_hex_dump;
|
||||
extern int do_pkt_dump;
|
||||
extern int copy_ts;
|
||||
extern int start_at_zero;
|
||||
extern int copy_tb;
|
||||
extern int debug_ts;
|
||||
extern int exit_on_error;
|
||||
@@ -501,7 +487,6 @@ extern int stdin_interaction;
|
||||
extern int frame_bits_per_raw_sample;
|
||||
extern AVIOContext *progress_avio;
|
||||
extern float max_error_rate;
|
||||
extern int vdpau_api_ver;
|
||||
|
||||
extern const AVIOInterruptCB int_cb;
|
||||
|
||||
@@ -517,7 +502,6 @@ void show_usage(void);
|
||||
|
||||
void opt_output_file(void *optctx, const char *filename);
|
||||
|
||||
void remove_avoptions(AVDictionary **a, AVDictionary *b);
|
||||
void assert_avoptions(AVDictionary *m);
|
||||
|
||||
int guess_input_channel_layout(InputStream *ist);
|
||||
|
@@ -52,7 +52,6 @@ DEFINE_GUID(DXVA2_ModeH264_F, 0x1b81be69, 0xa0c7,0x11d3,0xb9,0x84,0x00,0
|
||||
DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0xD2,0x5C,0x15,0xB3,0xD6);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeHEVC_VLD_Main, 0x5b11d51b, 0x2f4c,0x4452,0xbc,0xc3,0x09,0xf2,0xa1,0x16,0x0c,0xc0);
|
||||
DEFINE_GUID(DXVA2_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(GUID_NULL, 0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
|
||||
|
||||
@@ -81,9 +80,6 @@ static const dxva2_mode dxva2_modes[] = {
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
|
||||
|
||||
/* HEVC/H.265 */
|
||||
{ &DXVA2_ModeHEVC_VLD_Main, AV_CODEC_ID_HEVC },
|
||||
|
||||
{ NULL, 0 },
|
||||
};
|
||||
|
||||
@@ -530,10 +526,6 @@ static int dxva2_create_decoder(AVCodecContext *s)
|
||||
but it causes issues for H.264 on certain AMD GPUs..... */
|
||||
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
surface_alignment = 32;
|
||||
/* the HEVC DXVA2 spec asks for 128 pixel aligned surfaces to ensure
|
||||
all coding features have enough room to work with */
|
||||
else if (s->codec_id == AV_CODEC_ID_HEVC)
|
||||
surface_alignment = 128;
|
||||
else
|
||||
surface_alignment = 16;
|
||||
|
||||
@@ -541,7 +533,7 @@ static int dxva2_create_decoder(AVCodecContext *s)
|
||||
ctx->num_surfaces = 4;
|
||||
|
||||
/* add surfaces based on number of possible refs */
|
||||
if (s->codec_id == AV_CODEC_ID_H264 || s->codec_id == AV_CODEC_ID_HEVC)
|
||||
if (s->codec_id == AV_CODEC_ID_H264)
|
||||
ctx->num_surfaces += 16;
|
||||
else
|
||||
ctx->num_surfaces += 2;
|
||||
|
@@ -275,7 +275,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
||||
av_assert0(ist);
|
||||
|
||||
ist->discard = 0;
|
||||
ist->decoding_needed |= DECODING_FOR_FILTER;
|
||||
ist->decoding_needed++;
|
||||
ist->st->discard = AVDISCARD_NONE;
|
||||
|
||||
GROW_ARRAY(fg->inputs, fg->nb_inputs);
|
||||
@@ -383,8 +383,9 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
|
||||
snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&filter,
|
||||
avfilter_get_by_name("format"),
|
||||
"format", pix_fmts, NULL, fg->graph);
|
||||
avfilter_get_by_name("format"),
|
||||
"format", pix_fmts, NULL,
|
||||
fg->graph);
|
||||
av_freep(&pix_fmts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -619,7 +620,6 @@ static int sub2video_prepare(InputStream *ist)
|
||||
ist->sub2video.frame = av_frame_alloc();
|
||||
if (!ist->sub2video.frame)
|
||||
return AVERROR(ENOMEM);
|
||||
ist->sub2video.last_pts = INT64_MIN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -637,7 +637,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVBPrint args;
|
||||
char name[255];
|
||||
int ret, pad_idx = 0;
|
||||
int64_t tsoffset = 0;
|
||||
|
||||
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
|
||||
@@ -712,14 +711,8 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
|
||||
snprintf(name, sizeof(name), "trim for input stream %d:%d",
|
||||
ist->file_index, ist->st->index);
|
||||
if (copy_ts) {
|
||||
tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
|
||||
if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
|
||||
tsoffset += f->ctx->start_time;
|
||||
}
|
||||
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
|
||||
AV_NOPTS_VALUE : tsoffset, f->recording_time,
|
||||
&last_filter, &pad_idx, name);
|
||||
AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -738,7 +731,6 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVBPrint args;
|
||||
char name[255];
|
||||
int ret, pad_idx = 0;
|
||||
int64_t tsoffset = 0;
|
||||
|
||||
if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
|
||||
@@ -821,14 +813,8 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
|
||||
snprintf(name, sizeof(name), "trim for input stream %d:%d",
|
||||
ist->file_index, ist->st->index);
|
||||
if (copy_ts) {
|
||||
tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
|
||||
if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
|
||||
tsoffset += f->ctx->start_time;
|
||||
}
|
||||
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
|
||||
AV_NOPTS_VALUE : tsoffset, f->recording_time,
|
||||
&last_filter, &pad_idx, name);
|
||||
AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -844,12 +830,6 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
av_freep(&ifilter->name);
|
||||
DESCRIBE_FILTER_LINK(ifilter, in, 1);
|
||||
|
||||
if (!ifilter->ist->dec) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"No decoder for stream #%d:%d, filtering impossible\n",
|
||||
ifilter->ist->file_index, ifilter->ist->st->index);
|
||||
return AVERROR_DECODER_NOT_FOUND;
|
||||
}
|
||||
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
||||
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
||||
@@ -912,11 +892,8 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
init_input_filter(fg, cur);
|
||||
|
||||
for (cur = inputs, i = 0; cur; cur = cur->next, i++)
|
||||
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
|
||||
return ret;
|
||||
}
|
||||
avfilter_inout_free(&inputs);
|
||||
|
||||
if (!init || simple) {
|
||||
|
238
ffmpeg_opt.c
238
ffmpeg_opt.c
@@ -77,7 +77,6 @@ const HWAccel hwaccels[] = {
|
||||
};
|
||||
|
||||
char *vstats_filename;
|
||||
char *sdp_filename;
|
||||
|
||||
float audio_drift_threshold = 0.1;
|
||||
float dts_delta_threshold = 10;
|
||||
@@ -86,14 +85,12 @@ float dts_error_threshold = 3600*30;
|
||||
int audio_volume = 256;
|
||||
int audio_sync_method = 0;
|
||||
int video_sync_method = VSYNC_AUTO;
|
||||
float frame_drop_threshold = 0;
|
||||
int do_deinterlace = 0;
|
||||
int do_benchmark = 0;
|
||||
int do_benchmark_all = 0;
|
||||
int do_hex_dump = 0;
|
||||
int do_pkt_dump = 0;
|
||||
int copy_ts = 0;
|
||||
int start_at_zero = 0;
|
||||
int copy_tb = -1;
|
||||
int debug_ts = 0;
|
||||
int exit_on_error = 0;
|
||||
@@ -233,8 +230,6 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
arg++;
|
||||
}
|
||||
map = av_strdup(arg);
|
||||
if (!map)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* parse sync stream first, just pick first matching stream */
|
||||
if (sync = strchr(map, ',')) {
|
||||
@@ -383,13 +378,6 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
av_free(sdp_filename);
|
||||
sdp_filename = av_strdup(arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a metadata specifier passed as 'arg' parameter.
|
||||
* @param arg metadata string to parse
|
||||
@@ -520,8 +508,7 @@ static int opt_recording_timestamp(void *optctx, const char *opt, const char *ar
|
||||
char buf[128];
|
||||
int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6;
|
||||
struct tm time = *gmtime((time_t*)&recording_timestamp);
|
||||
if (!strftime(buf, sizeof(buf), "creation_time=%Y-%m-%dT%H:%M:%S%z", &time))
|
||||
return -1;
|
||||
strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time);
|
||||
parse_option(o, "metadata", buf, options);
|
||||
|
||||
av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata "
|
||||
@@ -715,7 +702,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
|
||||
MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
|
||||
if (canvas_size &&
|
||||
av_parse_video_size(&ist->dec_ctx->width, &ist->dec_ctx->height, canvas_size) < 0) {
|
||||
av_parse_video_size(&dec->width, &dec->height, canvas_size) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid canvas size: %s.\n", canvas_size);
|
||||
exit_program(1);
|
||||
}
|
||||
@@ -798,6 +785,7 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
AVInputFormat *file_iformat = NULL;
|
||||
int err, i, ret;
|
||||
int64_t timestamp;
|
||||
uint8_t buf[128];
|
||||
AVDictionary **opts;
|
||||
AVDictionary *unused_opts = NULL;
|
||||
AVDictionaryEntry *e = NULL;
|
||||
@@ -805,8 +793,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
char * video_codec_name = NULL;
|
||||
char * audio_codec_name = NULL;
|
||||
char *subtitle_codec_name = NULL;
|
||||
char * data_codec_name = NULL;
|
||||
int scan_all_pmts_set = 0;
|
||||
|
||||
if (o->format) {
|
||||
if (!(file_iformat = av_find_input_format(o->format))) {
|
||||
@@ -828,7 +814,8 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
exit_program(1);
|
||||
}
|
||||
if (o->nb_audio_sample_rate) {
|
||||
av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i, 0);
|
||||
snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
|
||||
av_dict_set(&o->g->format_opts, "sample_rate", buf, 0);
|
||||
}
|
||||
if (o->nb_audio_channels) {
|
||||
/* because we set audio_channels based on both the "ac" and
|
||||
@@ -837,7 +824,9 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
if (file_iformat && file_iformat->priv_class &&
|
||||
av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
|
||||
AV_OPT_SEARCH_FAKE_OBJ)) {
|
||||
av_dict_set_int(&o->g->format_opts, "channels", o->audio_channels[o->nb_audio_channels - 1].u.i, 0);
|
||||
snprintf(buf, sizeof(buf), "%d",
|
||||
o->audio_channels[o->nb_audio_channels - 1].u.i);
|
||||
av_dict_set(&o->g->format_opts, "channels", buf, 0);
|
||||
}
|
||||
}
|
||||
if (o->nb_frame_rates) {
|
||||
@@ -859,7 +848,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, video_codec_name, ic, "v");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, audio_codec_name, ic, "a");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, ic, "s");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, data_codec_name, ic, "d");
|
||||
|
||||
ic->video_codec_id = video_codec_name ?
|
||||
find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0)->id : AV_CODEC_ID_NONE;
|
||||
@@ -867,8 +855,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0)->id : AV_CODEC_ID_NONE;
|
||||
ic->subtitle_codec_id= subtitle_codec_name ?
|
||||
find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0)->id : AV_CODEC_ID_NONE;
|
||||
ic->data_codec_id = data_codec_name ?
|
||||
find_codec_or_die(data_codec_name, AVMEDIA_TYPE_DATA, 0)->id : AV_CODEC_ID_NONE;
|
||||
|
||||
if (video_codec_name)
|
||||
av_format_set_video_codec (ic, find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0));
|
||||
@@ -876,25 +862,16 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
av_format_set_audio_codec (ic, find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0));
|
||||
if (subtitle_codec_name)
|
||||
av_format_set_subtitle_codec(ic, find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0));
|
||||
if (data_codec_name)
|
||||
av_format_set_data_codec(ic, find_codec_or_die(data_codec_name, AVMEDIA_TYPE_DATA, 0));
|
||||
|
||||
ic->flags |= AVFMT_FLAG_NONBLOCK;
|
||||
ic->interrupt_callback = int_cb;
|
||||
|
||||
if (!av_dict_get(o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
|
||||
av_dict_set(&o->g->format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
|
||||
scan_all_pmts_set = 1;
|
||||
}
|
||||
/* open the input file with generic avformat function */
|
||||
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
|
||||
if (err < 0) {
|
||||
print_error(filename, err);
|
||||
exit_program(1);
|
||||
}
|
||||
if (scan_all_pmts_set)
|
||||
av_dict_set(&o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
||||
remove_avoptions(&o->g->format_opts, o->g->codec_opts);
|
||||
assert_avoptions(o->g->format_opts);
|
||||
|
||||
/* apply forced codec ids */
|
||||
@@ -947,13 +924,10 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
f->start_time = o->start_time;
|
||||
f->recording_time = o->recording_time;
|
||||
f->input_ts_offset = o->input_ts_offset;
|
||||
f->ts_offset = o->input_ts_offset - (copy_ts ? (start_at_zero && ic->start_time != AV_NOPTS_VALUE ? ic->start_time : 0) : timestamp);
|
||||
f->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
|
||||
f->nb_streams = ic->nb_streams;
|
||||
f->rate_emu = o->rate_emu;
|
||||
f->accurate_seek = o->accurate_seek;
|
||||
#if HAVE_PTHREADS
|
||||
f->thread_queue_size = o->thread_queue_size > 0 ? o->thread_queue_size : 8;
|
||||
#endif
|
||||
|
||||
/* check if all codec options have been used */
|
||||
unused_opts = strip_specifiers(o->g->codec_opts);
|
||||
@@ -1131,7 +1105,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
||||
av_free(buf);
|
||||
} while (!s->eof_reached);
|
||||
avio_closep(&s);
|
||||
avio_close(s);
|
||||
}
|
||||
if (ret) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
@@ -1158,11 +1132,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
|
||||
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
|
||||
while (bsf) {
|
||||
char *arg = NULL;
|
||||
if (next = strchr(bsf, ','))
|
||||
*next++ = 0;
|
||||
if (arg = strchr(bsf, '='))
|
||||
*arg++ = 0;
|
||||
if (!(bsfc = av_bitstream_filter_init(bsf))) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
|
||||
exit_program(1);
|
||||
@@ -1171,7 +1142,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
bsfc_prev->next = bsfc;
|
||||
else
|
||||
ost->bitstream_filters = bsfc;
|
||||
av_dict_set(&ost->bsf_args, bsfc->filter->name, arg, 0);
|
||||
|
||||
bsfc_prev = bsfc;
|
||||
bsf = next;
|
||||
@@ -1191,9 +1161,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
|
||||
ost->disposition = av_strdup(ost->disposition);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
@@ -1312,8 +1279,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
|
||||
exit_program(1);
|
||||
}
|
||||
if (frame_rate && video_sync_method == VSYNC_PASSTHROUGH)
|
||||
av_log(NULL, AV_LOG_ERROR, "Using -vsync 0 and -r can produce invalid output files\n");
|
||||
|
||||
MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
|
||||
if (frame_aspect_ratio) {
|
||||
@@ -1394,13 +1359,10 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
|
||||
exit_program(1);
|
||||
}
|
||||
/* FIXME realloc failure */
|
||||
video_enc->rc_override =
|
||||
av_realloc_array(video_enc->rc_override,
|
||||
i + 1, sizeof(RcOverride));
|
||||
if (!video_enc->rc_override) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Could not (re)allocate memory for rc_override.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
av_realloc(video_enc->rc_override,
|
||||
sizeof(RcOverride) * (i + 1));
|
||||
video_enc->rc_override[i].start_frame = start;
|
||||
video_enc->rc_override[i].end_frame = end;
|
||||
if (q > 0) {
|
||||
@@ -1515,13 +1477,11 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
}
|
||||
|
||||
if (!ist || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) {
|
||||
if (av_reallocp_array(&ost->audio_channels_map,
|
||||
ost->audio_channels_mapped + 1,
|
||||
sizeof(*ost->audio_channels_map)
|
||||
) < 0 )
|
||||
exit_program(1);
|
||||
|
||||
ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
|
||||
if (ost->audio_channels_mapped < FF_ARRAY_ELEMS(ost->audio_channels_map))
|
||||
ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
|
||||
else
|
||||
av_log(NULL, AV_LOG_FATAL, "Max channel mapping for output %d.%d reached\n",
|
||||
ost->file_index, ost->st->index);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1661,36 +1621,27 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
|
||||
AVStream *st;
|
||||
OutputStream *ost;
|
||||
AVCodec *codec;
|
||||
const char *enc_config;
|
||||
AVCodecContext *avctx;
|
||||
|
||||
codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
|
||||
if (!codec) {
|
||||
av_log(s, AV_LOG_ERROR, "no encoder found for codec id %i\n", ic->streams[i]->codec->codec_id);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (codec->type == AVMEDIA_TYPE_AUDIO)
|
||||
opt_audio_codec(o, "c:a", codec->name);
|
||||
else if (codec->type == AVMEDIA_TYPE_VIDEO)
|
||||
opt_video_codec(o, "c:v", codec->name);
|
||||
ost = new_output_stream(o, s, codec->type, -1);
|
||||
st = ost->st;
|
||||
avctx = st->codec;
|
||||
ost->enc = codec;
|
||||
|
||||
avcodec_get_context_defaults3(st->codec, codec);
|
||||
enc_config = av_stream_get_recommended_encoder_configuration(ic->streams[i]);
|
||||
if (enc_config) {
|
||||
AVDictionary *opts = NULL;
|
||||
av_dict_parse_string(&opts, enc_config, "=", ",", 0);
|
||||
av_opt_set_dict2(st->codec, &opts, AV_OPT_SEARCH_CHILDREN);
|
||||
av_dict_free(&opts);
|
||||
}
|
||||
// FIXME: a more elegant solution is needed
|
||||
memcpy(st, ic->streams[i], sizeof(AVStream));
|
||||
st->cur_dts = 0;
|
||||
st->info = av_malloc(sizeof(*st->info));
|
||||
memcpy(st->info, ic->streams[i]->info, sizeof(*st->info));
|
||||
st->codec= avctx;
|
||||
avcodec_copy_context(st->codec, ic->streams[i]->codec);
|
||||
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
|
||||
choose_sample_fmt(st, codec);
|
||||
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
|
||||
choose_pixel_fmt(st, st->codec, codec, st->codec->pix_fmt);
|
||||
avcodec_copy_context(ost->enc_ctx, st->codec);
|
||||
if (enc_config)
|
||||
av_dict_parse_string(&ost->encoder_opts, enc_config, "=", ",", 0);
|
||||
}
|
||||
|
||||
avformat_close_input(&ic);
|
||||
@@ -1778,8 +1729,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
if (o->stop_time != INT64_MAX && o->recording_time == INT64_MAX) {
|
||||
int64_t start_time = o->start_time == AV_NOPTS_VALUE ? 0 : o->start_time;
|
||||
if (o->stop_time <= start_time) {
|
||||
av_log(NULL, AV_LOG_ERROR, "-to value smaller than -ss; aborting.\n");
|
||||
exit_program(1);
|
||||
av_log(NULL, AV_LOG_WARNING, "-to value smaller than -ss; ignoring -to.\n");
|
||||
o->stop_time = INT64_MAX;
|
||||
} else {
|
||||
o->recording_time = o->stop_time - start_time;
|
||||
}
|
||||
@@ -1918,38 +1869,11 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
if (!o->subtitle_disable && (avcodec_find_encoder(oc->oformat->subtitle_codec) || subtitle_codec_name)) {
|
||||
for (i = 0; i < nb_input_streams; i++)
|
||||
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
AVCodecDescriptor const *input_descriptor =
|
||||
avcodec_descriptor_get(input_streams[i]->st->codec->codec_id);
|
||||
AVCodecDescriptor const *output_descriptor = NULL;
|
||||
AVCodec const *output_codec =
|
||||
avcodec_find_encoder(oc->oformat->subtitle_codec);
|
||||
int input_props = 0, output_props = 0;
|
||||
if (output_codec)
|
||||
output_descriptor = avcodec_descriptor_get(output_codec->id);
|
||||
if (input_descriptor)
|
||||
input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
|
||||
if (output_descriptor)
|
||||
output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
|
||||
if (subtitle_codec_name ||
|
||||
input_props & output_props ||
|
||||
// Map dvb teletext which has neither property to any output subtitle encoder
|
||||
input_descriptor && output_descriptor &&
|
||||
(!input_descriptor->props ||
|
||||
!output_descriptor->props)) {
|
||||
new_subtitle_stream(o, oc, i);
|
||||
break;
|
||||
}
|
||||
new_subtitle_stream(o, oc, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Data only if codec id match */
|
||||
if (!o->data_disable ) {
|
||||
enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_DATA);
|
||||
for (i = 0; codec_id != AV_CODEC_ID_NONE && i < nb_input_streams; i++) {
|
||||
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_DATA
|
||||
&& input_streams[i]->st->codec->codec_id == codec_id )
|
||||
new_data_stream(o, oc, i);
|
||||
}
|
||||
}
|
||||
/* do something with data? */
|
||||
} else {
|
||||
for (i = 0; i < o->nb_stream_maps; i++) {
|
||||
StreamMap *map = &o->stream_maps[i];
|
||||
@@ -2040,7 +1964,7 @@ loop_end:
|
||||
|
||||
p = strrchr(o->attachments[i], '/');
|
||||
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
|
||||
avio_closep(&pb);
|
||||
avio_close(pb);
|
||||
}
|
||||
|
||||
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
|
||||
@@ -2068,13 +1992,8 @@ loop_end:
|
||||
const AVClass *class = avcodec_get_class();
|
||||
const AVOption *option = av_opt_find(&class, e->key, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
|
||||
const AVClass *fclass = avformat_get_class();
|
||||
const AVOption *foption = av_opt_find(&fclass, e->key, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
|
||||
if (!option || foption)
|
||||
if (!option)
|
||||
continue;
|
||||
|
||||
|
||||
if (!(option->flags & AV_OPT_FLAG_ENCODING_PARAM)) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Codec AVOption %s (%s) specified for "
|
||||
"output file #%d (%s) is not an encoding option.\n", e->key,
|
||||
@@ -2119,7 +2038,9 @@ loop_end:
|
||||
assert_file_overwrite(filename);
|
||||
|
||||
if (o->mux_preload) {
|
||||
av_dict_set_int(&of->opts, "preload", o->mux_preload*AV_TIME_BASE, 0);
|
||||
uint8_t buf[64];
|
||||
snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
|
||||
av_dict_set(&of->opts, "preload", buf, 0);
|
||||
}
|
||||
oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
|
||||
|
||||
@@ -2245,8 +2166,7 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
for (j = 0; j < nb_input_files; j++) {
|
||||
for (i = 0; i < input_files[j]->nb_streams; i++) {
|
||||
AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
|
||||
if (c->codec_type != AVMEDIA_TYPE_VIDEO ||
|
||||
!c->time_base.num)
|
||||
if (c->codec_type != AVMEDIA_TYPE_VIDEO)
|
||||
continue;
|
||||
fr = c->time_base.den * 1000 / c->time_base.num;
|
||||
if (fr == 25000) {
|
||||
@@ -2279,19 +2199,19 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
|
||||
parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
opt_default(NULL, "b:v", "1150000");
|
||||
opt_default(NULL, "maxrate:v", "1150000");
|
||||
opt_default(NULL, "minrate:v", "1150000");
|
||||
opt_default(NULL, "bufsize:v", "327680"); // 40*1024*8;
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "327680", AV_DICT_DONT_OVERWRITE); // 40*1024*8;
|
||||
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
|
||||
parse_option(o, "ar", "44100", options);
|
||||
parse_option(o, "ac", "2", options);
|
||||
|
||||
opt_default(NULL, "packetsize", "2324");
|
||||
opt_default(NULL, "muxrate", "1411200"); // 2352 * 75 * 8;
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->format_opts, "muxrate", "1411200", AV_DICT_DONT_OVERWRITE); // 2352 * 75 * 8;
|
||||
|
||||
/* We have to offset the PTS, so that it is consistent with the SCR.
|
||||
SCR starts at 36000, but the first two packs contain only padding
|
||||
@@ -2308,18 +2228,18 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
parse_option(o, "pix_fmt", "yuv420p", options);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
opt_default(NULL, "b:v", "2040000");
|
||||
opt_default(NULL, "maxrate:v", "2516000");
|
||||
opt_default(NULL, "minrate:v", "0"); // 1145000;
|
||||
opt_default(NULL, "bufsize:v", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "scan_offset", "1");
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "2040000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "2516000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1145000;
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
|
||||
av_dict_set(&o->g->codec_opts, "scan_offset", "1", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
|
||||
parse_option(o, "ar", "44100", options);
|
||||
|
||||
opt_default(NULL, "packetsize", "2324");
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
} else if (!strcmp(arg, "dvd")) {
|
||||
|
||||
@@ -2330,17 +2250,17 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
parse_option(o, "pix_fmt", "yuv420p", options);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
opt_default(NULL, "b:v", "6000000");
|
||||
opt_default(NULL, "maxrate:v", "9000000");
|
||||
opt_default(NULL, "minrate:v", "0"); // 1500000;
|
||||
opt_default(NULL, "bufsize:v", "1835008"); // 224*1024*8;
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "6000000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "9000000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1500000;
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
|
||||
|
||||
opt_default(NULL, "packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
opt_default(NULL, "muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2048", AV_DICT_DONT_OVERWRITE); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
av_dict_set(&o->g->format_opts, "muxrate", "10080000", AV_DICT_DONT_OVERWRITE); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
|
||||
opt_default(NULL, "b:a", "448000");
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "448000", AV_DICT_DONT_OVERWRITE);
|
||||
parse_option(o, "ar", "48000", options);
|
||||
|
||||
} else if (!strncmp(arg, "dv", 2)) {
|
||||
@@ -2359,10 +2279,6 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
av_dict_copy(&o->g->codec_opts, codec_opts, AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_copy(&o->g->format_opts, format_opts, AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2379,9 +2295,6 @@ static int opt_vstats(void *optctx, const char *opt, const char *arg)
|
||||
time_t today2 = time(NULL);
|
||||
struct tm *today = localtime(&today2);
|
||||
|
||||
if (!today)
|
||||
return AVERROR(errno);
|
||||
|
||||
snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
|
||||
today->tm_sec);
|
||||
return opt_vstats_file(NULL, opt, filename);
|
||||
@@ -2862,13 +2775,13 @@ const OptionDef options[] = {
|
||||
{ "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
|
||||
"set the input ts scale", "scale" },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE, { .func_arg = opt_recording_timestamp },
|
||||
"set the recording timestamp ('now' to set the current time)", "time" },
|
||||
{ "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
|
||||
"add metadata", "string=string" },
|
||||
{ "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
|
||||
OPT_OUTPUT, { .func_arg = opt_data_frames },
|
||||
"set the number of data frames to output", "number" },
|
||||
"set the number of data frames to record", "number" },
|
||||
{ "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
|
||||
"add timings for benchmarking" },
|
||||
{ "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
|
||||
@@ -2891,16 +2804,12 @@ const OptionDef options[] = {
|
||||
" \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
|
||||
{ "vsync", HAS_ARG | OPT_EXPERT, { opt_vsync },
|
||||
"video sync method", "" },
|
||||
{ "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
|
||||
"frame drop threshold", "" },
|
||||
{ "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
|
||||
"audio sync method", "" },
|
||||
{ "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
|
||||
"audio drift threshold", "threshold" },
|
||||
{ "copyts", OPT_BOOL | OPT_EXPERT, { ©_ts },
|
||||
"copy timestamps" },
|
||||
{ "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
|
||||
"shift input timestamps to start at 0 when using copyts" },
|
||||
{ "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { ©_tb },
|
||||
"copy input stream time base when stream copying", "mode" },
|
||||
{ "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
|
||||
@@ -2921,7 +2830,7 @@ const OptionDef options[] = {
|
||||
{ "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
|
||||
"copy or discard frames before start time" },
|
||||
{ "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
|
||||
"set the number of frames to output", "number" },
|
||||
"set the number of frames to record", "number" },
|
||||
{ "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
|
||||
"force codec tag/fourcc", "fourcc/tag" },
|
||||
@@ -2960,16 +2869,10 @@ const OptionDef options[] = {
|
||||
{ "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_INPUT, { .off = OFFSET(discard) },
|
||||
"discard", "" },
|
||||
{ "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_OUTPUT, { .off = OFFSET(disposition) },
|
||||
"disposition", "" },
|
||||
{ "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
|
||||
{ .off = OFFSET(thread_queue_size) },
|
||||
"set the maximum number of queued packets from the demuxer" },
|
||||
|
||||
/* video options */
|
||||
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
||||
"set the number of video frames to output", "number" },
|
||||
"set the number of video frames to record", "number" },
|
||||
{ "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
|
||||
"set frame rate (Hz value, fraction or abbreviation)", "rate" },
|
||||
@@ -3051,13 +2954,10 @@ const OptionDef options[] = {
|
||||
{ "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
||||
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
|
||||
"select a device for HW acceleration" "devicename" },
|
||||
#if HAVE_VDPAU_X11
|
||||
{ "vdpau_api_ver", HAS_ARG | OPT_INT | OPT_EXPERT, { &vdpau_api_ver }, "" },
|
||||
#endif
|
||||
|
||||
/* audio options */
|
||||
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
|
||||
"set the number of audio frames to output", "number" },
|
||||
"set the number of audio frames to record", "number" },
|
||||
{ "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
|
||||
"set audio quality (codec-specific)", "quality", },
|
||||
{ "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
|
||||
@@ -3113,8 +3013,6 @@ const OptionDef options[] = {
|
||||
"set the initial demux-decode delay", "seconds" },
|
||||
{ "override_ffserver", OPT_BOOL | OPT_EXPERT | OPT_OUTPUT, { &override_ffserver },
|
||||
"override the options from ffserver", "" },
|
||||
{ "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { opt_sdp_file },
|
||||
"specify a file in which to print sdp information", "file" },
|
||||
|
||||
{ "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
|
||||
"A comma-separated list of bitstream filters", "bitstream_filters" },
|
||||
|
@@ -77,8 +77,6 @@ static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
frame->width, frame->height);
|
||||
|
||||
ret = av_frame_copy_props(vda->tmp_frame, frame);
|
||||
CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -42,11 +42,9 @@ typedef struct VDPAUContext {
|
||||
VdpGetErrorString *get_error_string;
|
||||
VdpGetInformationString *get_information_string;
|
||||
VdpDeviceDestroy *device_destroy;
|
||||
#if 1 // for ffmpegs older vdpau API, not the oldest though
|
||||
VdpDecoderCreate *decoder_create;
|
||||
VdpDecoderDestroy *decoder_destroy;
|
||||
VdpDecoderRender *decoder_render;
|
||||
#endif
|
||||
VdpVideoSurfaceCreate *video_surface_create;
|
||||
VdpVideoSurfaceDestroy *video_surface_destroy;
|
||||
VdpVideoSurfaceGetBitsYCbCr *video_surface_get_bits;
|
||||
@@ -59,8 +57,6 @@ typedef struct VDPAUContext {
|
||||
VdpYCbCrFormat vdpau_format;
|
||||
} VDPAUContext;
|
||||
|
||||
int vdpau_api_ver = 2;
|
||||
|
||||
static void vdpau_uninit(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
@@ -100,14 +96,9 @@ static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
VDPAUContext *ctx = ist->hwaccel_ctx;
|
||||
VdpVideoSurface *surface;
|
||||
VdpStatus err;
|
||||
VdpChromaType chroma;
|
||||
uint32_t width, height;
|
||||
|
||||
av_assert0(frame->format == AV_PIX_FMT_VDPAU);
|
||||
|
||||
if (av_vdpau_get_surface_parameters(s, &chroma, &width, &height))
|
||||
return AVERROR(ENOSYS);
|
||||
|
||||
surface = av_malloc(sizeof(*surface));
|
||||
if (!surface)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -123,8 +114,8 @@ static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
// properly we should keep a pool of surfaces instead of creating
|
||||
// them anew for each frame, but since we don't care about speed
|
||||
// much in this code, we don't bother
|
||||
err = ctx->video_surface_create(ctx->device, chroma, width, height,
|
||||
surface);
|
||||
err = ctx->video_surface_create(ctx->device, VDP_CHROMA_TYPE_420,
|
||||
frame->width, frame->height, surface);
|
||||
if (err != VDP_STATUS_OK) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error allocating a VDPAU video surface: %s\n",
|
||||
ctx->get_error_string(err));
|
||||
@@ -248,11 +239,9 @@ do {
|
||||
GET_CALLBACK(VDP_FUNC_ID_GET_ERROR_STRING, get_error_string);
|
||||
GET_CALLBACK(VDP_FUNC_ID_GET_INFORMATION_STRING, get_information_string);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DEVICE_DESTROY, device_destroy);
|
||||
if (vdpau_api_ver == 1) {
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_CREATE, decoder_create);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_DESTROY, decoder_destroy);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_RENDER, decoder_render);
|
||||
}
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_CREATE, decoder_create);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_DESTROY, decoder_destroy);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_RENDER, decoder_render);
|
||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, video_surface_create);
|
||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, video_surface_destroy);
|
||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, video_surface_get_bits);
|
||||
@@ -281,16 +270,12 @@ do {
|
||||
ctx->vdpau_format = vdpau_formats[i][0];
|
||||
ctx->pix_fmt = vdpau_formats[i][1];
|
||||
|
||||
if (vdpau_api_ver == 1) {
|
||||
vdpau_ctx = av_vdpau_alloc_context();
|
||||
if (!vdpau_ctx)
|
||||
goto fail;
|
||||
vdpau_ctx->render = ctx->decoder_render;
|
||||
|
||||
s->hwaccel_context = vdpau_ctx;
|
||||
} else
|
||||
if (av_vdpau_bind_context(s, ctx->device, ctx->get_proc_address, 0))
|
||||
vdpau_ctx = av_vdpau_alloc_context();
|
||||
if (!vdpau_ctx)
|
||||
goto fail;
|
||||
vdpau_ctx->render = ctx->decoder_render;
|
||||
|
||||
s->hwaccel_context = vdpau_ctx;
|
||||
|
||||
ctx->get_information_string(&vendor);
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Using VDPAU -- %s -- on X11 display %s, "
|
||||
@@ -306,7 +291,7 @@ fail:
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
static int vdpau_old_init(AVCodecContext *s)
|
||||
int vdpau_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
@@ -348,22 +333,3 @@ static int vdpau_old_init(AVCodecContext *s)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vdpau_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
|
||||
if (vdpau_api_ver == 1)
|
||||
return vdpau_old_init(s);
|
||||
|
||||
if (!ist->hwaccel_ctx) {
|
||||
int ret = vdpau_alloc(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ist->hwaccel_get_buffer = vdpau_get_buffer;
|
||||
ist->hwaccel_retrieve_data = vdpau_retrieve_data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
196
ffprobe.c
196
ffprobe.c
@@ -33,7 +33,6 @@
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/hash.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/dict.h"
|
||||
@@ -66,9 +65,6 @@ static int do_show_stream_disposition = 0;
|
||||
static int do_show_data = 0;
|
||||
static int do_show_program_version = 0;
|
||||
static int do_show_library_versions = 0;
|
||||
static int do_show_pixel_formats = 0;
|
||||
static int do_show_pixel_format_flags = 0;
|
||||
static int do_show_pixel_format_components = 0;
|
||||
|
||||
static int do_show_chapter_tags = 0;
|
||||
static int do_show_format_tags = 0;
|
||||
@@ -84,9 +80,8 @@ static int show_private_data = 1;
|
||||
|
||||
static char *print_format;
|
||||
static char *stream_specifier;
|
||||
static char *show_data_hash;
|
||||
|
||||
typedef struct ReadInterval {
|
||||
typedef struct {
|
||||
int id; ///< identifier
|
||||
int64_t start, end; ///< start, end in second/AV_TIME_BASE units
|
||||
int has_start, has_end;
|
||||
@@ -128,18 +123,11 @@ typedef enum {
|
||||
SECTION_ID_FRAME,
|
||||
SECTION_ID_FRAMES,
|
||||
SECTION_ID_FRAME_TAGS,
|
||||
SECTION_ID_FRAME_SIDE_DATA_LIST,
|
||||
SECTION_ID_FRAME_SIDE_DATA,
|
||||
SECTION_ID_LIBRARY_VERSION,
|
||||
SECTION_ID_LIBRARY_VERSIONS,
|
||||
SECTION_ID_PACKET,
|
||||
SECTION_ID_PACKETS,
|
||||
SECTION_ID_PACKETS_AND_FRAMES,
|
||||
SECTION_ID_PIXEL_FORMAT,
|
||||
SECTION_ID_PIXEL_FORMAT_FLAGS,
|
||||
SECTION_ID_PIXEL_FORMAT_COMPONENT,
|
||||
SECTION_ID_PIXEL_FORMAT_COMPONENTS,
|
||||
SECTION_ID_PIXEL_FORMATS,
|
||||
SECTION_ID_PROGRAM_STREAM_DISPOSITION,
|
||||
SECTION_ID_PROGRAM_STREAM_TAGS,
|
||||
SECTION_ID_PROGRAM,
|
||||
@@ -164,20 +152,13 @@ static struct section sections[] = {
|
||||
[SECTION_ID_FORMAT] = { SECTION_ID_FORMAT, "format", 0, { SECTION_ID_FORMAT_TAGS, -1 } },
|
||||
[SECTION_ID_FORMAT_TAGS] = { SECTION_ID_FORMAT_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "format_tags" },
|
||||
[SECTION_ID_FRAMES] = { SECTION_ID_FRAMES, "frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME, SECTION_ID_SUBTITLE, -1 } },
|
||||
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, -1 } },
|
||||
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, -1 } },
|
||||
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
|
||||
[SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 } },
|
||||
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { -1 } },
|
||||
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
|
||||
[SECTION_ID_LIBRARY_VERSION] = { SECTION_ID_LIBRARY_VERSION, "library_version", 0, { -1 } },
|
||||
[SECTION_ID_PACKETS] = { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||
[SECTION_ID_PACKETS_AND_FRAMES] = { SECTION_ID_PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { -1 } },
|
||||
[SECTION_ID_PIXEL_FORMATS] = { SECTION_ID_PIXEL_FORMATS, "pixel_formats", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PIXEL_FORMAT, -1 } },
|
||||
[SECTION_ID_PIXEL_FORMAT] = { SECTION_ID_PIXEL_FORMAT, "pixel_format", 0, { SECTION_ID_PIXEL_FORMAT_FLAGS, SECTION_ID_PIXEL_FORMAT_COMPONENTS, -1 } },
|
||||
[SECTION_ID_PIXEL_FORMAT_FLAGS] = { SECTION_ID_PIXEL_FORMAT_FLAGS, "flags", 0, { -1 }, .unique_name = "pixel_format_flags" },
|
||||
[SECTION_ID_PIXEL_FORMAT_COMPONENTS] = { SECTION_ID_PIXEL_FORMAT_COMPONENTS, "components", SECTION_FLAG_IS_ARRAY, {SECTION_ID_PIXEL_FORMAT_COMPONENT, -1 }, .unique_name = "pixel_format_components" },
|
||||
[SECTION_ID_PIXEL_FORMAT_COMPONENT] = { SECTION_ID_PIXEL_FORMAT_COMPONENT, "component", 0, { -1 } },
|
||||
[SECTION_ID_PROGRAM_STREAM_DISPOSITION] = { SECTION_ID_PROGRAM_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "program_stream_disposition" },
|
||||
[SECTION_ID_PROGRAM_STREAM_TAGS] = { SECTION_ID_PROGRAM_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "program_stream_tags" },
|
||||
[SECTION_ID_PROGRAM] = { SECTION_ID_PROGRAM, "program", 0, { SECTION_ID_PROGRAM_TAGS, SECTION_ID_PROGRAM_STREAMS, -1 } },
|
||||
@@ -188,8 +169,7 @@ static struct section sections[] = {
|
||||
[SECTION_ID_PROGRAMS] = { SECTION_ID_PROGRAMS, "programs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PROGRAM, -1 } },
|
||||
[SECTION_ID_ROOT] = { SECTION_ID_ROOT, "root", SECTION_FLAG_IS_WRAPPER,
|
||||
{ SECTION_ID_CHAPTERS, SECTION_ID_FORMAT, SECTION_ID_FRAMES, SECTION_ID_PROGRAMS, SECTION_ID_STREAMS,
|
||||
SECTION_ID_PACKETS, SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS,
|
||||
SECTION_ID_PIXEL_FORMATS, -1} },
|
||||
SECTION_ID_PACKETS, SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS, -1} },
|
||||
[SECTION_ID_STREAMS] = { SECTION_ID_STREAMS, "streams", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM, -1 } },
|
||||
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, -1 } },
|
||||
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
|
||||
@@ -203,8 +183,6 @@ static const OptionDef *options;
|
||||
static const char *input_filename;
|
||||
static AVInputFormat *iformat = NULL;
|
||||
|
||||
static struct AVHashContext *hash;
|
||||
|
||||
static const char *const binary_unit_prefixes [] = { "", "Ki", "Mi", "Gi", "Ti", "Pi" };
|
||||
static const char *const decimal_unit_prefixes[] = { "", "K" , "M" , "G" , "T" , "P" };
|
||||
|
||||
@@ -338,7 +316,7 @@ struct WriterContext {
|
||||
unsigned int nb_section_frame; ///< number of the frame section in case we are in "packets_and_frames" section
|
||||
unsigned int nb_section_packet_frame; ///< nb_section_packet or nb_section_frame according if is_packets_and_frames
|
||||
|
||||
int string_validation;
|
||||
StringValidation string_validation;
|
||||
char *string_validation_replacement;
|
||||
unsigned int string_validation_utf8_flags;
|
||||
};
|
||||
@@ -703,21 +681,6 @@ static void writer_print_data(WriterContext *wctx, const char *name,
|
||||
av_bprint_finalize(&bp, NULL);
|
||||
}
|
||||
|
||||
static void writer_print_data_hash(WriterContext *wctx, const char *name,
|
||||
uint8_t *data, int size)
|
||||
{
|
||||
char *p, buf[AV_HASH_MAX_SIZE * 2 + 64] = { 0 };
|
||||
|
||||
if (!hash)
|
||||
return;
|
||||
av_hash_init(hash);
|
||||
av_hash_update(hash, data, size);
|
||||
snprintf(buf, sizeof(buf), "%s:", av_hash_get_name(hash));
|
||||
p = buf + strlen(buf);
|
||||
av_hash_final_hex(hash, p, buf + sizeof(buf) - p);
|
||||
writer_print_string(wctx, name, buf, 0);
|
||||
}
|
||||
|
||||
#define MAX_REGISTERED_WRITERS_NB 64
|
||||
|
||||
static const Writer *registered_writers[MAX_REGISTERED_WRITERS_NB + 1];
|
||||
@@ -1192,7 +1155,7 @@ static const Writer flat_writer = {
|
||||
|
||||
/* INI format output */
|
||||
|
||||
typedef struct INIContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int hierarchical;
|
||||
} INIContext;
|
||||
@@ -1296,7 +1259,7 @@ static const Writer ini_writer = {
|
||||
|
||||
/* JSON output */
|
||||
|
||||
typedef struct JSONContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int indent_level;
|
||||
int compact;
|
||||
@@ -1458,7 +1421,7 @@ static const Writer json_writer = {
|
||||
|
||||
/* XML output */
|
||||
|
||||
typedef struct XMLContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int within_tag;
|
||||
int indent_level;
|
||||
@@ -1725,7 +1688,6 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
|
||||
print_fmt("flags", "%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
|
||||
if (do_show_data)
|
||||
writer_print_data(w, "data", pkt->data, pkt->size);
|
||||
writer_print_data_hash(w, "data_hash", pkt->data, pkt->size);
|
||||
writer_print_section_footer(w);
|
||||
|
||||
av_bprint_finalize(&pbuf, NULL);
|
||||
@@ -1760,7 +1722,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
{
|
||||
AVBPrint pbuf;
|
||||
const char *s;
|
||||
int i;
|
||||
|
||||
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
|
||||
@@ -1823,20 +1784,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
}
|
||||
if (do_show_frame_tags)
|
||||
show_tags(w, av_frame_get_metadata(frame), SECTION_ID_FRAME_TAGS);
|
||||
if (frame->nb_side_data) {
|
||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_LIST);
|
||||
for (i = 0; i < frame->nb_side_data; i++) {
|
||||
AVFrameSideData *sd = frame->side_data[i];
|
||||
const char *name;
|
||||
|
||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA);
|
||||
name = av_frame_side_data_name(sd->type);
|
||||
print_str("side_data_type", name ? name : "unknown");
|
||||
print_int("side_data_size", sd->size);
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
writer_print_section_footer(w);
|
||||
|
||||
@@ -2112,28 +2059,12 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
else print_str_opt("pix_fmt", "unknown");
|
||||
print_int("level", dec_ctx->level);
|
||||
if (dec_ctx->color_range != AVCOL_RANGE_UNSPECIFIED)
|
||||
print_str ("color_range", av_color_range_name(dec_ctx->color_range));
|
||||
print_str ("color_range", dec_ctx->color_range == AVCOL_RANGE_MPEG ? "tv": "pc");
|
||||
else
|
||||
print_str_opt("color_range", "N/A");
|
||||
s = av_get_colorspace_name(dec_ctx->colorspace);
|
||||
if (s) print_str ("color_space", s);
|
||||
else print_str_opt("color_space", "unknown");
|
||||
|
||||
if (dec_ctx->color_trc != AVCOL_TRC_UNSPECIFIED)
|
||||
print_str("color_transfer", av_color_transfer_name(dec_ctx->color_trc));
|
||||
else
|
||||
print_str_opt("color_transfer", av_color_transfer_name(dec_ctx->color_trc));
|
||||
|
||||
if (dec_ctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
|
||||
print_str("color_primaries", av_color_primaries_name(dec_ctx->color_primaries));
|
||||
else
|
||||
print_str_opt("color_primaries", av_color_primaries_name(dec_ctx->color_primaries));
|
||||
|
||||
if (dec_ctx->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED)
|
||||
print_str("chroma_location", av_chroma_location_name(dec_ctx->chroma_sample_location));
|
||||
else
|
||||
print_str_opt("chroma_location", av_chroma_location_name(dec_ctx->chroma_sample_location));
|
||||
|
||||
if (dec_ctx->timecode_frame_start >= 0) {
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
|
||||
@@ -2141,7 +2072,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
} else {
|
||||
print_str_opt("timecode", "N/A");
|
||||
}
|
||||
print_int("refs", dec_ctx->refs);
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -2201,8 +2131,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
else print_str_opt("bit_rate", "N/A");
|
||||
if (dec_ctx->rc_max_rate > 0) print_val ("max_bit_rate", dec_ctx->rc_max_rate, unit_bit_per_second_str);
|
||||
else print_str_opt("max_bit_rate", "N/A");
|
||||
if (dec_ctx->bits_per_raw_sample > 0) print_fmt("bits_per_raw_sample", "%d", dec_ctx->bits_per_raw_sample);
|
||||
else print_str_opt("bits_per_raw_sample", "N/A");
|
||||
if (stream->nb_frames) print_fmt ("nb_frames", "%"PRId64, stream->nb_frames);
|
||||
else print_str_opt("nb_frames", "N/A");
|
||||
if (nb_streams_frames[stream_idx]) print_fmt ("nb_read_frames", "%"PRIu64, nb_streams_frames[stream_idx]);
|
||||
@@ -2212,8 +2140,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
if (do_show_data)
|
||||
writer_print_data(w, "extradata", dec_ctx->extradata,
|
||||
dec_ctx->extradata_size);
|
||||
writer_print_data_hash(w, "extradata_hash", dec_ctx->extradata,
|
||||
dec_ctx->extradata_size);
|
||||
|
||||
/* Print disposition information */
|
||||
#define PRINT_DISPOSITION(flagname, name) do { \
|
||||
@@ -2387,20 +2313,12 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *t;
|
||||
AVDictionary **opts;
|
||||
int scan_all_pmts_set = 0;
|
||||
|
||||
if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
|
||||
av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
|
||||
scan_all_pmts_set = 1;
|
||||
}
|
||||
if ((err = avformat_open_input(&fmt_ctx, filename,
|
||||
iformat, &format_opts)) < 0) {
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
*fmt_ctx_ptr = fmt_ctx;
|
||||
if (scan_all_pmts_set)
|
||||
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
||||
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
|
||||
return AVERROR_OPTION_NOT_FOUND;
|
||||
@@ -2410,16 +2328,13 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
|
||||
orig_nb_streams = fmt_ctx->nb_streams;
|
||||
|
||||
err = avformat_find_stream_info(fmt_ctx, opts);
|
||||
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
if (err < 0) {
|
||||
if ((err = avformat_find_stream_info(fmt_ctx, opts)) < 0) {
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
av_dump_format(fmt_ctx, 0, filename, 0);
|
||||
|
||||
@@ -2470,7 +2385,7 @@ static void close_input_file(AVFormatContext **ctx_ptr)
|
||||
|
||||
static int probe_file(WriterContext *wctx, const char *filename)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVFormatContext *fmt_ctx;
|
||||
int ret, i;
|
||||
int section_id;
|
||||
|
||||
@@ -2479,7 +2394,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
|
||||
ret = open_input_file(&fmt_ctx, filename);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
return ret;
|
||||
|
||||
#define CHECK_END if (ret < 0) goto end
|
||||
|
||||
@@ -2537,8 +2452,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
}
|
||||
|
||||
end:
|
||||
if (fmt_ctx)
|
||||
close_input_file(&fmt_ctx);
|
||||
close_input_file(&fmt_ctx);
|
||||
av_freep(&nb_streams_frames);
|
||||
av_freep(&nb_streams_packets);
|
||||
av_freep(&selected_streams);
|
||||
@@ -2562,6 +2476,8 @@ static void ffprobe_show_program_version(WriterContext *w)
|
||||
print_str("version", FFMPEG_VERSION);
|
||||
print_fmt("copyright", "Copyright (c) %d-%d the FFmpeg developers",
|
||||
program_birth_year, CONFIG_THIS_YEAR);
|
||||
print_str("build_date", __DATE__);
|
||||
print_str("build_time", __TIME__);
|
||||
print_str("compiler_ident", CC_IDENT);
|
||||
print_str("configuration", FFMPEG_CONFIGURATION);
|
||||
writer_print_section_footer(w);
|
||||
@@ -2598,58 +2514,6 @@ static void ffprobe_show_library_versions(WriterContext *w)
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
#define PRINT_PIX_FMT_FLAG(flagname, name) \
|
||||
do { \
|
||||
print_int(name, !!(pixdesc->flags & AV_PIX_FMT_FLAG_##flagname)); \
|
||||
} while (0)
|
||||
|
||||
static void ffprobe_show_pixel_formats(WriterContext *w)
|
||||
{
|
||||
const AVPixFmtDescriptor *pixdesc = NULL;
|
||||
int i, n;
|
||||
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMATS);
|
||||
while (pixdesc = av_pix_fmt_desc_next(pixdesc)) {
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT);
|
||||
print_str("name", pixdesc->name);
|
||||
print_int("nb_components", pixdesc->nb_components);
|
||||
if ((pixdesc->nb_components >= 3) && !(pixdesc->flags & AV_PIX_FMT_FLAG_RGB)) {
|
||||
print_int ("log2_chroma_w", pixdesc->log2_chroma_w);
|
||||
print_int ("log2_chroma_h", pixdesc->log2_chroma_h);
|
||||
} else {
|
||||
print_str_opt("log2_chroma_w", "N/A");
|
||||
print_str_opt("log2_chroma_h", "N/A");
|
||||
}
|
||||
n = av_get_bits_per_pixel(pixdesc);
|
||||
if (n) print_int ("bits_per_pixel", n);
|
||||
else print_str_opt("bits_per_pixel", "N/A");
|
||||
if (do_show_pixel_format_flags) {
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_FLAGS);
|
||||
PRINT_PIX_FMT_FLAG(BE, "big_endian");
|
||||
PRINT_PIX_FMT_FLAG(PAL, "palette");
|
||||
PRINT_PIX_FMT_FLAG(BITSTREAM, "bitstream");
|
||||
PRINT_PIX_FMT_FLAG(HWACCEL, "hwaccel");
|
||||
PRINT_PIX_FMT_FLAG(PLANAR, "planar");
|
||||
PRINT_PIX_FMT_FLAG(RGB, "rgb");
|
||||
PRINT_PIX_FMT_FLAG(PSEUDOPAL, "pseudopal");
|
||||
PRINT_PIX_FMT_FLAG(ALPHA, "alpha");
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
if (do_show_pixel_format_components && (pixdesc->nb_components > 0)) {
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_COMPONENTS);
|
||||
for (i = 0; i < pixdesc->nb_components; i++) {
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_COMPONENT);
|
||||
print_int("index", i + 1);
|
||||
print_int("bit_depth", pixdesc->comp[i].depth_minus1 + 1);
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
static int opt_format(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
iformat = av_find_input_format(arg);
|
||||
@@ -2983,7 +2847,6 @@ DEFINE_OPT_SHOW_SECTION(format, FORMAT);
|
||||
DEFINE_OPT_SHOW_SECTION(frames, FRAMES);
|
||||
DEFINE_OPT_SHOW_SECTION(library_versions, LIBRARY_VERSIONS);
|
||||
DEFINE_OPT_SHOW_SECTION(packets, PACKETS);
|
||||
DEFINE_OPT_SHOW_SECTION(pixel_formats, PIXEL_FORMATS);
|
||||
DEFINE_OPT_SHOW_SECTION(program_version, PROGRAM_VERSION);
|
||||
DEFINE_OPT_SHOW_SECTION(streams, STREAMS);
|
||||
DEFINE_OPT_SHOW_SECTION(programs, PROGRAMS);
|
||||
@@ -3005,7 +2868,6 @@ static const OptionDef real_options[] = {
|
||||
{ "select_streams", OPT_STRING | HAS_ARG, {(void*)&stream_specifier}, "select the specified streams", "stream_specifier" },
|
||||
{ "sections", OPT_EXIT, {.func_arg = opt_sections}, "print sections structure and section information, and exit" },
|
||||
{ "show_data", OPT_BOOL, {(void*)&do_show_data}, "show packets data" },
|
||||
{ "show_data_hash", OPT_STRING | HAS_ARG, {(void*)&show_data_hash}, "show packets data hash" },
|
||||
{ "show_error", 0, {(void*)&opt_show_error}, "show probing error" },
|
||||
{ "show_format", 0, {(void*)&opt_show_format}, "show format/container info" },
|
||||
{ "show_frames", 0, {(void*)&opt_show_frames}, "show frames info" },
|
||||
@@ -3022,7 +2884,6 @@ static const OptionDef real_options[] = {
|
||||
{ "show_program_version", 0, {(void*)&opt_show_program_version}, "show ffprobe version" },
|
||||
{ "show_library_versions", 0, {(void*)&opt_show_library_versions}, "show library versions" },
|
||||
{ "show_versions", 0, {(void*)&opt_show_versions}, "show program and library versions" },
|
||||
{ "show_pixel_formats", 0, {(void*)&opt_show_pixel_formats}, "show pixel format descriptions" },
|
||||
{ "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" },
|
||||
{ "private", OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" },
|
||||
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },
|
||||
@@ -3079,9 +2940,6 @@ int main(int argc, char **argv)
|
||||
SET_DO_SHOW(FRAMES, frames);
|
||||
SET_DO_SHOW(LIBRARY_VERSIONS, library_versions);
|
||||
SET_DO_SHOW(PACKETS, packets);
|
||||
SET_DO_SHOW(PIXEL_FORMATS, pixel_formats);
|
||||
SET_DO_SHOW(PIXEL_FORMAT_FLAGS, pixel_format_flags);
|
||||
SET_DO_SHOW(PIXEL_FORMAT_COMPONENTS, pixel_format_components);
|
||||
SET_DO_SHOW(PROGRAM_VERSION, program_version);
|
||||
SET_DO_SHOW(PROGRAMS, programs);
|
||||
SET_DO_SHOW(STREAMS, streams);
|
||||
@@ -3113,21 +2971,6 @@ int main(int argc, char **argv)
|
||||
w_name = av_strtok(print_format, "=", &buf);
|
||||
w_args = buf;
|
||||
|
||||
if (show_data_hash) {
|
||||
if ((ret = av_hash_alloc(&hash, show_data_hash)) < 0) {
|
||||
if (ret == AVERROR(EINVAL)) {
|
||||
const char *n;
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Unknown hash algorithm '%s'\nKnown algorithms:",
|
||||
show_data_hash);
|
||||
for (i = 0; (n = av_hash_names(i)); i++)
|
||||
av_log(NULL, AV_LOG_ERROR, " %s", n);
|
||||
av_log(NULL, AV_LOG_ERROR, "\n");
|
||||
}
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
w = writer_get_by_name(w_name);
|
||||
if (!w) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown output format with name '%s'\n", w_name);
|
||||
@@ -3146,12 +2989,10 @@ int main(int argc, char **argv)
|
||||
ffprobe_show_program_version(wctx);
|
||||
if (do_show_library_versions)
|
||||
ffprobe_show_library_versions(wctx);
|
||||
if (do_show_pixel_formats)
|
||||
ffprobe_show_pixel_formats(wctx);
|
||||
|
||||
if (!input_filename &&
|
||||
((do_show_format || do_show_programs || do_show_streams || do_show_chapters || do_show_packets || do_show_error) ||
|
||||
(!do_show_program_version && !do_show_library_versions && !do_show_pixel_formats))) {
|
||||
(!do_show_program_version && !do_show_library_versions))) {
|
||||
show_usage();
|
||||
av_log(NULL, AV_LOG_ERROR, "You have to specify one input file.\n");
|
||||
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
|
||||
@@ -3169,7 +3010,6 @@ int main(int argc, char **argv)
|
||||
end:
|
||||
av_freep(&print_format);
|
||||
av_freep(&read_intervals);
|
||||
av_hash_freep(&hash);
|
||||
|
||||
uninit_opts();
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sections); i++)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user