Compare commits
23 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
7c2d152f56 | ||
![]() |
1f58590e1e | ||
![]() |
64bbbcd7b0 | ||
![]() |
de9d3f22f0 | ||
![]() |
ea5bb5613f | ||
![]() |
c61ac696e5 | ||
![]() |
6a250c858e | ||
![]() |
5411040802 | ||
![]() |
ab1ea597bd | ||
![]() |
ee606fd031 | ||
![]() |
2f71aeb301 | ||
![]() |
65259b4d68 | ||
![]() |
8f53d32dfb | ||
![]() |
fcc6568a10 | ||
![]() |
489d066d49 | ||
![]() |
9cb45f6ad2 | ||
![]() |
0f04e2741e | ||
![]() |
84642ec879 | ||
![]() |
bef4d9bf87 | ||
![]() |
bc4f6ae88e | ||
![]() |
2678b25099 | ||
![]() |
e322496054 | ||
![]() |
7fa72ff19c |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
||||
*.pnm -diff -text
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -39,7 +39,6 @@
|
||||
/doc/examples/avio_reading
|
||||
/doc/examples/decoding_encoding
|
||||
/doc/examples/demuxing_decoding
|
||||
/doc/examples/extract_mvs
|
||||
/doc/examples/filter_audio
|
||||
/doc/examples/filtering_audio
|
||||
/doc/examples/filtering_video
|
||||
@@ -62,9 +61,7 @@
|
||||
/tests/audiogen
|
||||
/tests/base64
|
||||
/tests/data/
|
||||
/tests/pixfmts.mak
|
||||
/tests/rotozoom
|
||||
/tests/test_copy.ffmeta
|
||||
/tests/tiny_psnr
|
||||
/tests/tiny_ssim
|
||||
/tests/videogen
|
||||
|
254
Changelog
254
Changelog
@@ -1,253 +1,10 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.5.7
|
||||
- avformat/nutdec: Fix recovery when immedeately after seeking a failure happens
|
||||
- nutdec: fix memleaks on error in nut_read_header
|
||||
- rtpenc_jpeg: handle case of picture dimensions not dividing by 8
|
||||
- avformat/mov: Fix parsing short loci
|
||||
- avcodec/shorten: Fix code depending on signed overflow behavior
|
||||
- avcodec/proresdec2: Reset slice_count on deallocation
|
||||
- ffmpeg_opt: Fix -timestamp parsing
|
||||
- hevc: make avcodec_decode_video2() fail if get_format() fails
|
||||
- avcodec/mpeg4audio: add some padding/alignment to MAX_PCE_SIZE
|
||||
- swr: fix alignment issue caused by 8ch sse functions
|
||||
- libswscale/x86/hscale_fast_bilinear_simd.c: Include BX in the clobber list on x86_64, because it isn't implicitly included when PIC is on.
|
||||
- aacdec: don't return frames without data
|
||||
- avformat/matroskadec: Cleanup error handling for bz2 & zlib
|
||||
- avformat/nutdec: Fix use of uinitialized value
|
||||
- tools/graph2dot: use larger data types than int for array/string sizes
|
||||
- id3v2: catch avio_read errors in check_tag
|
||||
- aacsbr: break infinite loop in sbr_hf_calc_npatches
|
||||
- diracdec: avoid overflow of bytes*8 in decode_lowdelay
|
||||
- diracdec: prevent overflow in data_unit_size check
|
||||
- avidec: avoid infinite loop due to negative ast->sample_size
|
||||
- pngdec: don't use AV_PIX_FMT_MONOBLACK for apng
|
||||
- avcodec/wavpack: Check L/R values before use to avoid harmless integer overflow and undefined behavior in fate
|
||||
- xcbgrab: Validate the capture area
|
||||
- xcbgrab: Do not assume the non shm image data is always available
|
||||
- avfilter/lavfutils: disable frame threads when decoding a single image
|
||||
- nutdec: fix illegal count check in decode_main_header
|
||||
- ffmpeg: remove incorrect network deinit
|
||||
- OpenCL: Avoid potential buffer overflow in cmdutils_opencl.c
|
||||
- apedec: set s->samples only when init_frame_decoder succeeded
|
||||
- swscale/ppc/swscale_altivec.c: POWER LE support in yuv2planeX_8() delete macro GET_VF()
|
||||
- libvpxenc: only set noise reduction w/vp8
|
||||
- tests/fate-run: do not attempt to parse tiny_psnrs output if it failed
|
||||
- alac: reject rice_limit 0 if compression is used
|
||||
- alsdec: only adapt order for positive max_order
|
||||
- alsdec: check sample pointer range in revert_channel_correlation
|
||||
- tests: drop bc dependency
|
||||
- fate: Include branch information in the payload header
|
||||
|
||||
|
||||
version 2.5.6
|
||||
- avcodec/atrac3plusdsp: fix on stack alignment
|
||||
- ac3: validate end in ff_ac3_bit_alloc_calc_mask
|
||||
- aacpsy: avoid psy_band->threshold becoming NaN
|
||||
- aasc: return correct buffer size from aasc_decode_frame
|
||||
- msrledec: use signed pixel_ptr in msrle_decode_pal4
|
||||
- swresample: Allow reinitialization without ever setting channel layouts (cherry picked from commit 80a28c7509a11114e1aea5b208d56c6646d69c07)
|
||||
- swresample: Allow reinitialization without ever setting channel counts
|
||||
- avcodec/h264: Do not fail with randomly truncated VUIs
|
||||
- avcodec/h264_ps: Move truncation check from VUI to SPS
|
||||
- avcodec/h264: Be more tolerant to changing pps id between slices
|
||||
- avcodec/aacdec: Fix storing state before PCE decode
|
||||
- avcodec/h264: reset the counts in the correct context
|
||||
- avcodec/h264_slice: Do not reset mb_aff_frame per slice
|
||||
- avcodec/h264: finish previous slices before switching to single thread mode
|
||||
- avcodec/h264: Fix race between slices where one overwrites data from the next
|
||||
- avcodec/h264_refs: Do not set reference to things which do not exist
|
||||
- avcodec/h264: Fail for invalid mixed IDR / non IDR frames in slice threading mode
|
||||
- h264: avoid unnecessary calls to get_format
|
||||
- avcodec/msrledec: restructure msrle_decode_pal4() based on the line number instead of the pixel pointer
|
||||
|
||||
|
||||
version 2.5.5:
|
||||
- vp9: make above buffer pointer 32-byte aligned.
|
||||
- avcodec/dnxhddec: Check that the frame is interlaced before using cur_field
|
||||
- avformat/mov: Disallow ".." in dref unless use_absolute_path is set
|
||||
- avformat/mov: Check for string truncation in mov_open_dref()
|
||||
- avformat/mov: Use sizeof(filename) instead of a literal number
|
||||
- eac3dec: fix scaling
|
||||
- ac3_fixed: fix computation of spx_noise_blend
|
||||
- ac3_fixed: fix out-of-bound read
|
||||
- ac3dec_fixed: always use the USE_FIXED=1 variant of the AC3DecodeContext
|
||||
- avcodec/012v: redesign main loop
|
||||
- avcodec/012v: Check dimensions more completely
|
||||
- asfenc: fix leaking asf->index_ptr on error
|
||||
- avcodec/options_table: remove extradata_size from the AVOptions table
|
||||
- ffmdec: limit the backward seek to the last resync position
|
||||
- ffmdec: make sure the time base is valid
|
||||
- ffmdec: fix infinite loop at EOF
|
||||
- ffmdec: initialize f_cprv, f_stvi and f_stau
|
||||
- avformat/rm: limit packet size
|
||||
- avcodec/webp: validate the distance prefix code
|
||||
- avcodec/rv10: check size of s->mb_width * s->mb_height
|
||||
- eamad: check for out of bounds read
|
||||
- mdec: check for out of bounds read
|
||||
- arm: Suppress tags about used cpu arch and extensions
|
||||
- aic: Fix decoding files with odd dimensions
|
||||
- avcodec/tiff: move bpp check to after "end:"
|
||||
- mxfdec: Fix the error handling for when strftime fails
|
||||
- avcodec/opusdec: Fix delayed sample value
|
||||
- avcodec/opusdec: Clear out pointers per packet
|
||||
- avcodec/utils: Align YUV411 by as much as the other YUV variants
|
||||
- vp9: fix segmentation map retention with threading enabled.
|
||||
- webp: ensure that each transform is only used once
|
||||
- doc/protocols/tcp: fix units of listen_timeout option value, from microseconds to milliseconds
|
||||
- fix VP9 packet decoder returning 0 instead of the used data size
|
||||
- avformat/flvenc: check that the codec_tag fits in the available bits
|
||||
- avcodec/utils: use correct printf specifier in ff_set_sar
|
||||
- avutil/imgutils: correctly check for negative SAR components
|
||||
- swscale/utils: clear formatConvBuffer on allocation
|
||||
- avformat/bit: only accept the g729 codec and 1 channel
|
||||
- avformat/bit: check that pkt->size is 10 in write_packet
|
||||
- avformat/adxdec: check avctx->channels for invalid values
|
||||
- avformat/adxdec: set avctx->channels in adx_read_header
|
||||
- Fix buffer_size argument to init_put_bits() in multiple encoders.
|
||||
- mips/acelp_filters: fix incorrect register constraint
|
||||
- avcodec/hevc_ps: Sanity checks for some log2_* values
|
||||
- avcodec/zmbv: Check len before reading in decode_frame()
|
||||
- avcodec/h264: Only reinit quant tables if a new PPS is allowed
|
||||
- avcodec/snowdec: Fix ref value check
|
||||
- swscale/utils: More carefully merge and clear coefficients outside the input
|
||||
- avcodec/a64multienc: Assert that the Packet size does not grow
|
||||
- avcodec/a64multienc: simplify frame handling code
|
||||
- avcodec/a64multienc: fix use of uninitialized values in to_meta_with_crop
|
||||
- avcodec/a64multienc: initialize mc_meta_charset to zero
|
||||
- avcodec/a64multienc: don't set incorrect packet size
|
||||
- avcodec/a64multienc: use av_frame_ref instead of copying the frame
|
||||
- avcodec/x86/mlpdsp_init: Simplify mlp_filter_channel_x86()
|
||||
- h264: initialize H264Context.avctx in init_thread_copy
|
||||
- wtvdec: fix integer overflow resulting in errors with large files
|
||||
- avcodec/gif: fix off by one in column offsetting finding
|
||||
|
||||
|
||||
version 2.5.4:
|
||||
- avcodec/arm/videodsp_armv5te: Fix linking failure with shared libs
|
||||
- avcodec/mjpegdec: Skip blocks which are outside the visible area
|
||||
- avcodec/h264_slice: ignore SAR changes in slices after the first
|
||||
- avcodec/h264_slice: Check picture structure before setting the related fields
|
||||
- avcodec/h264_slice: Do not change frame_num after the first slice
|
||||
- avutil/opt: Fix type used to access AV_OPT_TYPE_SAMPLE_FMT
|
||||
- avutil/opt: Fix types used to access AV_OPT_TYPE_PIXEL_FMT
|
||||
- avcodec/h264: Be more strict on rejecting pps/sps changes
|
||||
- avcodec/h264: Be more strict on rejecting pps_id changes
|
||||
- avcodec/h264_ps: More completely check the bit depths
|
||||
- avformat/thp: Check av_get_packet() for failure not only for partial output
|
||||
- swscale/utils: Limit filter shifting so as not to read from prior the array
|
||||
- avcodec/mpegvideo_motion: Fix gmc chroma dimensions
|
||||
- avcodec/mjpegdec: Check number of components for JPEG-LS
|
||||
- avcodec/mjpegdec: Check escape sequence validity
|
||||
- avformat/mpc8: Use uint64_t in *_get_v() to avoid undefined behavior
|
||||
- avformat/mpc8: fix broken pointer math
|
||||
- avformat/mpc8: fix hang with fuzzed file
|
||||
- avformat/tta: fix crash with corrupted files
|
||||
- avcodec/ppc/idctdsp.c: POWER LE support in idct_add_altivec()
|
||||
- swscale/input: fix rgba64 alpha non native
|
||||
- swscale/input: Fix alpha of YA16 input
|
||||
- libavcodec/ppc/mpegvideoencdsp.c: fix stack smashing in pix_norm1_altivec() and pix_sum_altivec()
|
||||
- avformat/rmdec: Check for overflow in ff_rm_read_mdpr_codecdata()
|
||||
- avformat/mpeg: do not count PES packets inside PES packets during probing
|
||||
- hevc: always clip luma_log2_weight_denom
|
||||
- rtpdec_h263_rfc2190: Clear the stored bits if discarding buffered data
|
||||
- aacenc: correctly check returned value
|
||||
- swscale: check memory allocations
|
||||
- opt: check memory allocation
|
||||
- avformat/utils: check for malloc failure
|
||||
- avcodec/flac_parser: fix handling EOF if no headers are found
|
||||
- avfilter/vf_framepack: Check and update frame_rate
|
||||
- vp8: improve memory allocation checks
|
||||
- configure: enable vsx together with altivec for ppc64el
|
||||
- avcodec/hevc: Fix handling of skipped_bytes() reallocation failures
|
||||
- qpeg: avoid pointless invalid memcpy()
|
||||
|
||||
|
||||
version 2.5.3:
|
||||
- vp9: fix parser return values in error case
|
||||
- ffmpeg: Clear error message array at init.
|
||||
- avcodec/dvdsubdec: fix accessing dangling pointers
|
||||
- avcodec/dvdsubdec: error on bitmaps with size 0
|
||||
- cmdutils: Use 64bit for file size/offset related variable in cmdutils_read_file()
|
||||
- mov: Fix negative size calculation in mov_read_default().
|
||||
- avformat/mov: fix integer overflow in mov_read_udta_string()
|
||||
- mov: Fix overflow and error handling in read_tfra().
|
||||
- mov: Avoid overflow with mov_metadata_raw()
|
||||
- avcodec/dvdsubdec: fix out of bounds accesses
|
||||
- avfilter/vf_sab: fix filtering tiny images
|
||||
- avformat/flvdec: Increase string array size
|
||||
- avformat/flvdec: do not inject dts=0 metadata packets which failed to be parsed into a new data stream
|
||||
- avformat/cdxl: Fix integer overflow of image_size
|
||||
- libavformat: Build hevc.o when building the RTP muxer
|
||||
|
||||
version 2.5.2:
|
||||
- avcodec/indeo3: ensure offsets are non negative
|
||||
- avcodec/h264: Check *log2_weight_denom
|
||||
- avcodec/hevc_ps: Check diff_cu_qp_delta_depth
|
||||
- avcodec/h264: Clear delayed_pic on deallocation
|
||||
- avcodec/hevc: clear filter_slice_edges() on allocation
|
||||
- avcodec/dcadec: Check that the added xch channel isnt already there
|
||||
- avcodec/indeo3: use signed variables to avoid underflow
|
||||
- swscale: increase yuv2rgb table headroom
|
||||
- avformat/mov: fix integer overflow of size
|
||||
- avformat/mov: check atom nesting depth
|
||||
- avcodec/utvideodec: Fix handling of slice_height=0
|
||||
- avcodec/xface: correct the XFACE_MAX_* values
|
||||
- avcodec/vmdvideo: Check len before using it in method 3
|
||||
- configure: create the tests directory like the doc directory
|
||||
- mmvideo: check frame dimensions
|
||||
- jvdec: check frame dimensions
|
||||
|
||||
version 2.5.1:
|
||||
- lavu/frame: fix malloc error path in av_frame_copy_props()
|
||||
- avformat/aviobuf: Check that avio_seek() target is non negative
|
||||
- swresample/soxr_resample: fix error handling
|
||||
- avformat/flvdec: fix potential use of uninitialized variables
|
||||
- avformat/crypto: fix key vs iv typo
|
||||
- configure: use use_pkg_config() instead of check_pkg_config() for libsmbclient
|
||||
- avcodec/ppc/vp3dsp_altivec: POWER LE support to vp3_idct_add_altivec()
|
||||
- avformat/matroskadec: fix handling of recursive SeekHead elements
|
||||
- doc/examples/filtering_video: fix frame rate
|
||||
- avcodec/mpegaudiodec_template: only allocate fdsp when its used
|
||||
- doc/examples/transcoding: check encoder before using it
|
||||
- update MAINTAINERS file
|
||||
- POWER LE support in put_vp8_epel_h_altivec_core() put_vp8_epel_v_altivec_core() put_vp8_pixels16_altivec()
|
||||
- POWER LE support in vc1_inv_trans_8x4_altivec()
|
||||
|
||||
version 2.5:
|
||||
- HEVC/H.265 RTP payload format (draft v6) packetizer
|
||||
- SUP/PGS subtitle demuxer
|
||||
- ffprobe -show_pixel_formats option
|
||||
- CAST128 symmetric block cipher, ECB mode
|
||||
- STL subtitle demuxer and decoder
|
||||
- libutvideo YUV 4:2:2 10bit support
|
||||
- XCB-based screen-grabber
|
||||
- UDP-Lite support (RFC 3828)
|
||||
- xBR scaling filter
|
||||
- AVFoundation screen capturing support
|
||||
- ffserver supports codec private options
|
||||
- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
|
||||
- WebP muxer with animated WebP support
|
||||
- zygoaudio decoding support
|
||||
- APNG decoder and demuxer
|
||||
- postproc visualization support
|
||||
|
||||
|
||||
version 2.4:
|
||||
- Icecast protocol
|
||||
- ported lenscorrection filter from frei0r filter
|
||||
- large optimizations in dctdnoiz to make it usable
|
||||
- ICY metadata are now requested by default with the HTTP protocol
|
||||
- support for using metadata in stream specifiers in fftools
|
||||
- LZMA compression support in TIFF decoder
|
||||
- support for H.261 RTP payload format (RFC 4587)
|
||||
- HEVC/H.265 RTP payload format (draft v6) depacketizer
|
||||
- added codecview filter to visualize information exported by some codecs
|
||||
- Matroska 3D support thorugh side data
|
||||
- HTML generation using texi2html is deprecated in favor of makeinfo/texi2any
|
||||
- silenceremove filter
|
||||
|
||||
version 2.3.1:
|
||||
- public AVDCT API/ABI for DCT functions
|
||||
- g2meet: allow size changes within original sizes
|
||||
- dv: improved error resilience, fixing Ticket2340 and Ticket2341
|
||||
|
||||
version 2.3:
|
||||
- AC3 fixed-point decoding
|
||||
@@ -272,7 +29,7 @@ version 2.3:
|
||||
- libbs2b-based stereo-to-binaural audio filter
|
||||
- libx264 reference frames count limiting depending on level
|
||||
- native Opus decoder
|
||||
- display matrix export and rotation API
|
||||
- display matrix export and rotation api
|
||||
- WebVTT encoder
|
||||
- showcqt multimedia filter
|
||||
- zoompan filter
|
||||
@@ -314,7 +71,6 @@ version 2.2:
|
||||
- libx265 encoder
|
||||
- dejudder filter
|
||||
- Autodetect VDA like all other hardware accelerations
|
||||
- aliases and defaults for Ogg subtypes (opus, spx)
|
||||
|
||||
|
||||
version 2.1:
|
||||
|
@@ -15,7 +15,6 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- libpostproc
|
||||
- libmpcodecs
|
||||
- optional x86 optimizations in the files
|
||||
libavcodec/x86/flac_dsp_gpl.asm
|
||||
libavcodec/x86/idct_mmx.c
|
||||
- libutvideo encoding/decoding wrappers in
|
||||
libavcodec/libutvideo*.cpp
|
||||
@@ -34,7 +33,6 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_interlace.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mcdeint.c
|
||||
- vf_mp.c
|
||||
@@ -64,7 +62,6 @@ There are a handful of files under other licensing terms, namely:
|
||||
documentation accompanying your program if you only distribute executables.
|
||||
You must also indicate any changes including additions and deletions to
|
||||
those three files in the documentation.
|
||||
tests/reference.pnm is under the expat license
|
||||
|
||||
|
||||
external libraries
|
||||
|
16
MAINTAINERS
16
MAINTAINERS
@@ -54,7 +54,7 @@ release management Michael Niedermayer
|
||||
Communication
|
||||
=============
|
||||
|
||||
website Deby Barbara Lepage
|
||||
website Robert Swain
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
@@ -309,7 +309,6 @@ libavdevice
|
||||
|
||||
|
||||
avfoundation.m Thilo Borgmann
|
||||
decklink* Deti Fliegl
|
||||
dshow.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
fbdev_enc.c Lukasz Marek
|
||||
gdigrab.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
@@ -320,7 +319,7 @@ libavdevice
|
||||
pulse_audio_enc.c Lukasz Marek
|
||||
qtkit.m Thilo Borgmann
|
||||
sdl Stefano Sabatini
|
||||
v4l2.c Giorgio Vazzana
|
||||
v4l2.c Luca Abeni
|
||||
vfwcap.c Ramiro Polla
|
||||
xv.c Lukasz Marek
|
||||
|
||||
@@ -344,7 +343,6 @@ Filters:
|
||||
af_compand.c Paul B Mahol
|
||||
af_ladspa.c Paul B Mahol
|
||||
af_pan.c Nicolas George
|
||||
af_silenceremove.c Paul B Mahol
|
||||
avf_avectorscope.c Paul B Mahol
|
||||
avf_showcqt.c Muhammad Faiz
|
||||
vf_blend.c Paul B Mahol
|
||||
@@ -355,9 +353,7 @@ Filters:
|
||||
vf_extractplanes.c Paul B Mahol
|
||||
vf_histogram.c Paul B Mahol
|
||||
vf_hqx.c Clément Bœsch
|
||||
vf_idet.c Pascal Massimino
|
||||
vf_il.c Paul B Mahol
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_psnr.c Paul B Mahol
|
||||
vf_scale.c Michael Niedermayer
|
||||
@@ -386,7 +382,6 @@ Muxers/Demuxers:
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
ape.c Kostya Shishkov
|
||||
apngdec.c Benoit Fouet
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
@@ -463,15 +458,12 @@ Muxers/Demuxers:
|
||||
rmdec.c, rmenc.c Ronald S. Bultje, Kostya Shishkov
|
||||
rtmp* Kostya Shishkov
|
||||
rtp.c, rtpenc.c Martin Storsjo
|
||||
rtpdec_h261.*, rtpenc_h261.* Thomas Volkert
|
||||
rtpdec_hevc.*, rtpenc_hevc.* Thomas Volkert
|
||||
rtpdec_asf.* Ronald S. Bultje
|
||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||
rtsp.c Luca Barbato
|
||||
sbgdec.c Nicolas George
|
||||
sdp.c Martin Storsjo
|
||||
segafilm.c Mike Melanson
|
||||
segment.c Stefano Sabatini
|
||||
siff.c Kostya Shishkov
|
||||
smacker.c Kostya Shishkov
|
||||
smjpeg* Paul B Mahol
|
||||
@@ -498,7 +490,6 @@ Protocols:
|
||||
libssh.c Lukasz Marek
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
icecast.c Marvin Scholz
|
||||
|
||||
|
||||
libswresample
|
||||
@@ -537,8 +528,7 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
2.5 Michael Niedermayer
|
||||
2.4 Michael Niedermayer
|
||||
2.3 Michael Niedermayer
|
||||
2.2 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
|
||||
|
8
Makefile
8
Makefile
@@ -32,7 +32,6 @@ OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
|
||||
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
|
||||
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
|
||||
OBJS-ffserver += ffserver_config.o
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
@@ -64,7 +63,7 @@ FF_DEP_LIBS := $(DEP_LIBS)
|
||||
all: $(AVPROGS)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS)
|
||||
$(LD) $(LDFLAGS) $(LD_O) $^ $(ELIBS)
|
||||
|
||||
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
@@ -93,7 +92,6 @@ $(foreach V,$(SUBDIR_VARS),$(eval $(call RESET,$(V))))
|
||||
SUBDIR := $(1)/
|
||||
include $(SRC_PATH)/$(1)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(ARCH)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(INTRINSICS)/Makefile
|
||||
include $(SRC_PATH)/library.mak
|
||||
endef
|
||||
|
||||
@@ -112,14 +110,14 @@ endef
|
||||
|
||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||
|
||||
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
|
||||
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
$(LD) $(LDFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
|
||||
OBJDIRS += tools
|
||||
|
||||
|
41
README.md
41
README.md
@@ -1,40 +1,19 @@
|
||||
FFmpeg README
|
||||
=============
|
||||
|
||||
FFmpeg is a collection of libraries and tools to process multimedia content
|
||||
such as audio, video, subtitles and related metadata.
|
||||
1) Documentation
|
||||
----------------
|
||||
|
||||
## Libraries
|
||||
* Read the documentation in the doc/ directory in git.
|
||||
|
||||
* `libavcodec` provides implementation of a wider range of codecs.
|
||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
||||
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
|
||||
* `libavdevice` provides an abstraction to access capture and playback devices.
|
||||
* `libswresample` implements audio mixing and resampling routines.
|
||||
* `libswscale` implements color conversion and scaling routines.
|
||||
You can also view it online at http://ffmpeg.org/documentation.html
|
||||
|
||||
## Tools
|
||||
2) Licensing
|
||||
------------
|
||||
|
||||
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
|
||||
manipulate, convert and stream multimedia content.
|
||||
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
|
||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analisys tool to inspect
|
||||
multimedia content.
|
||||
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
|
||||
* See the LICENSE file.
|
||||
|
||||
## Documentation
|
||||
3) Build and Install
|
||||
--------------------
|
||||
|
||||
The offline documentation is available in the **doc/** directory.
|
||||
|
||||
The online documentation is available in the main [website](http://ffmpeg.org)
|
||||
and in the [wiki](http://trac.ffmpeg.org).
|
||||
|
||||
### Examples
|
||||
|
||||
Coding examples are available in the **doc/examples** directory.
|
||||
|
||||
## License
|
||||
|
||||
FFmpeg codebase is mainly LGPL-licensed with optional components licensed under
|
||||
GPL. Please refer to the LICENSE file for detailed information.
|
||||
* See the INSTALL file.
|
||||
|
194
RELEASE_NOTES
194
RELEASE_NOTES
@@ -1,101 +1,177 @@
|
||||
┌────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.5 "Bohr" │
|
||||
└────────────────────────────────────────┘
|
||||
┌───────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.3 "Mandelbrot" │
|
||||
└───────────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 2.5 "Bohr", 2.5 months after the
|
||||
release of 2.4.
|
||||
The FFmpeg Project proudly presents FFmpeg 2.3 "Mandelbrot", a major
|
||||
release with all the great features committed during the three-month period
|
||||
since the release of FFmpeg 2.2.
|
||||
|
||||
The most important new features are AVFoundation screen-grabbing support,
|
||||
animated WebP decoding support, and Animated PNG support. In addition, many
|
||||
exciting features for video streaming are also implemented, including MPEG-
|
||||
DASH fragmenting muxer, HEVC RTP payload muxer, and UDP Lite support.
|
||||
In this release, there are lots of internal overhauls that make FFmpeg a
|
||||
more accessible project for new developers. Many important new
|
||||
optimizations and features like QTKit and AVFoundation input devices are
|
||||
committed. Contributions done by Libav such as a new native Opus decoder
|
||||
are also merged.
|
||||
|
||||
As usual, if you have any question on this release or any FFmpeg related
|
||||
topic, feel free to join us on the #ffmpeg IRC channel (on
|
||||
irc.freenode.net).
|
||||
Because of the increasing difficulty to maintain and lack of maintainers,
|
||||
we are very sorry to say that we have removed all Blackfin and SPARC
|
||||
architecture assembly optimizations with the cleanups done. If you are
|
||||
interested in maintaining optimization for these two architecture, feel
|
||||
free to contact us and we will restore the code!
|
||||
|
||||
Oh, and since this release, this modern-looking release note is provided in
|
||||
addition to the old-style Changelog file, to make it easier for you to
|
||||
focus on the most important features in this release.
|
||||
|
||||
Enjoy!
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ 🔨 API Information │
|
||||
│ * API Information │
|
||||
└────────────────────────────┘
|
||||
|
||||
FFmpeg 2.5 includes the following library versions:
|
||||
FFmpeg 2.3 is completely source-compatible to the FFmpeg 2.2 series. There
|
||||
are however some API deprecations that you need to take care of. Use `git
|
||||
diff n2.2 n2.3 doc/APIchanges` to show the list of added and deprecated
|
||||
APIs. FFmpeg 2.3 includes the following library versions:
|
||||
|
||||
• libavutil 54.15.100
|
||||
• libavcodec 56.13.100
|
||||
• libavformat 56.15.102
|
||||
• libavdevice 56. 3.100
|
||||
• libavfilter 5. 2.103
|
||||
• libswscale 3. 1.101
|
||||
• libswresample 1. 1.100
|
||||
• libpostproc 53. 3.100
|
||||
• libavutil 52.92.100
|
||||
• libavcodec 55.69.100
|
||||
• libavformat 55.48.100
|
||||
• libavdevice 55.13.102
|
||||
• libavfilter 4.11.100
|
||||
• libswscale 2. 6.100
|
||||
• libswresample 0.19.100
|
||||
• libpostproc 52. 3.100
|
||||
|
||||
Important API changes since 2.4:
|
||||
Please refer to the doc/APIChanges file for more information.
|
||||
|
||||
• avpriv_dv_frame_profile2() has been deprecated
|
||||
┌────────────────────────────┐
|
||||
│ New Optimization │
|
||||
└────────────────────────────┘
|
||||
|
||||
We are excited to announce that we have committed new x86 assembly
|
||||
optimization for HEVC, and FFmpeg's audio resampler libswresample. ARM
|
||||
users will get a boost in MLP/TrueHD decoding thanks to new optimization.
|
||||
Decoding Huffyuv also got a major boost from optimization on the C code.
|
||||
|
||||
Please refer to the doc/APIchanges file for more information.
|
||||
Of special interest for Microsoft Visual Studio users, we have also
|
||||
converted some preexisting x86 assembly to NASM/Yasm format compatible
|
||||
with MSVC setup, especially in the area of audio resampling.
|
||||
|
||||
Another major feature in this release is the introduction of AArch64
|
||||
(ARMv8) assembly optimization. AArch64 is another name for the first
|
||||
64-bit ARM architecture, used by Apple A7 SoC inside iPhone 5S. Some
|
||||
32-bit ARM assembly has already been ported to AArch64, but more work is
|
||||
underway.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ Native Opus decoder │
|
||||
└────────────────────────────┘
|
||||
|
||||
Opus is an open audio format jointly developed by Xiph.Org, Mozilla,
|
||||
Skype/Microsoft, and Broadcom. It combines the features of the Skype Cilk
|
||||
speech codec and the Xiph.Org CELT music codec into one low-latency
|
||||
codec. Decoding Opus is already possible since FFmpeg 1.0 using the
|
||||
libopus library, but the new Opus native decoder brings a higher level of
|
||||
stability and speed.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ QTKit and AVFoundation │
|
||||
└────────────────────────────┘
|
||||
|
||||
For OS X users, the new QTKit and AVFoundation devices allow you to use
|
||||
the integrated camera on Macs. AVFoundation is a newer API only available
|
||||
on OS X 10.7 "Lion" or newer. For users with older OS X systems, the
|
||||
QTKit device using the older OS X API is for you.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ API Additions │
|
||||
└────────────────────────────┘
|
||||
|
||||
In this release, stream side data are introduced as AVStream.side_data as
|
||||
a way to store miscellaneous stream-wide information. The format is
|
||||
similar to the previously anonymous structure AVPacket.side_data (now
|
||||
named as AVPacketSideData). With this change, audio ReplayGain
|
||||
information and video rotation matrix are now exported through this API,
|
||||
if available in the demuxer.
|
||||
|
||||
We also have improved libswresample's Doxygen API documentation, so new
|
||||
developers wishing to use FFmpeg's excellent libraries can get started
|
||||
more easily and faster.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ Last But Not Least │
|
||||
└────────────────────────────┘
|
||||
|
||||
Other interesting new features including hqx video filter, a pixel art
|
||||
scaling filter; a fixed-point AC-3 decoder contributed by Imagination
|
||||
Technologies; an On2 TrueMotion VP7 video decoder; an HTML5 WebVTT
|
||||
subtitle encoder that allows creation of WebVTT from any text-based
|
||||
subtitles; and an 1-bit Direct Stream Digital audio decoder.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ★ List of New Features │
|
||||
└────────────────────────────┘
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ffprobe │
|
||||
│ Command line tools │
|
||||
└────────────────────────────┘
|
||||
|
||||
• -show_pixel_formats option
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ffserver │
|
||||
└────────────────────────────┘
|
||||
|
||||
• codec private options support
|
||||
• Support for decoding through DXVA2 in ffmpeg
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavcodec │
|
||||
└────────────────────────────┘
|
||||
|
||||
• STL subtitle decoder
|
||||
• libutvideo YUV 4:2:2 10bit support
|
||||
• animated WebP decoding support
|
||||
• zygoaudio decoding support
|
||||
• APNG decoder
|
||||
• AC3 fixed-point decoding
|
||||
• VP7 video decoder
|
||||
• Alias PIX image encoder and decoder
|
||||
• Improvements to the BRender PIX image decoder
|
||||
• Improvements to the XBM decoder
|
||||
• Improvements to OpenEXR image decoder
|
||||
• Support decoding 16-bit RLE SGI images
|
||||
• Direct Stream Digital (DSD) decoder
|
||||
• On2 AVC (Audio for Video) decoder
|
||||
• Native Opus decoder
|
||||
• WebVTT encoder
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavdevice │
|
||||
└────────────────────────────┘
|
||||
|
||||
• XCB-based screen-grabber
|
||||
• AVFoundation screen capturing support
|
||||
• QTKit input device
|
||||
• GDI screen grabbing for Windows
|
||||
• AVFoundation input device
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavformat │
|
||||
└────────────────────────────┘
|
||||
|
||||
• HEVC/H.265 RTP payload format (draft v6) packetizer
|
||||
• SUP/PGS subtitle demuxer
|
||||
• STL subtitle demuxer
|
||||
• UDP-Lite support (RFC 3828)
|
||||
• MPEG-DASH segmenting muxer, which allows creating DASH compatible
|
||||
fragmented MP4
|
||||
• WebP muxer
|
||||
• APNG demuxer
|
||||
• subfile protocol
|
||||
• Phantom Cine demuxer
|
||||
• Alternative rendition support for HTTP Live Streaming
|
||||
• Magic Lantern Video (MLV) demuxer
|
||||
• Image format auto-detection
|
||||
• LRC lyric file demuxer and muxer
|
||||
• Samba protocol (via libsmbclient)
|
||||
• WebM DASH Manifest muxer
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavfilter │
|
||||
└────────────────────────────┘
|
||||
|
||||
• xBR scaling filter
|
||||
• shuffleplanes filter
|
||||
• libbs2b-based stereo-to-binaural audio filter
|
||||
• showcqt multimedia filter
|
||||
• zoompan filter
|
||||
• signalstats filter
|
||||
• hqx filter (hq2x, hq3x, hq4x)
|
||||
• flanger filter
|
||||
• libfribidi support in drawtext
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavutil │
|
||||
└────────────────────────────┘
|
||||
┌────────────────────────────┐
|
||||
│ ⚠ Behaviour changes │
|
||||
└────────────────────────────┘
|
||||
|
||||
• CAST128 symmetric block cipher, ECB mode
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libpostproc │
|
||||
└────────────────────────────┘
|
||||
|
||||
• visualization support
|
||||
• libx264 reference frames count is now limited depending on level chosen
|
||||
• Because of the new image format auto-detection feature, you don't need to
|
||||
specify image format when decoding an image with no extension.
|
||||
|
224
cmdutils.c
224
cmdutils.c
@@ -166,7 +166,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
int first;
|
||||
|
||||
first = 1;
|
||||
for (po = options; po->name; po++) {
|
||||
for (po = options; po->name != NULL; po++) {
|
||||
char buf[64];
|
||||
|
||||
if (((po->flags & req_flags) != req_flags) ||
|
||||
@@ -205,7 +205,7 @@ static const OptionDef *find_option(const OptionDef *po, const char *name)
|
||||
const char *p = strchr(name, ':');
|
||||
int len = p ? p - name : strlen(name);
|
||||
|
||||
while (po->name) {
|
||||
while (po->name != NULL) {
|
||||
if (!strncmp(name, po->name, len) && strlen(po->name) == len)
|
||||
break;
|
||||
po++;
|
||||
@@ -254,7 +254,7 @@ static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
|
||||
|
||||
win32_argv_utf8 = av_mallocz(sizeof(char *) * (win32_argc + 1) + buffsize);
|
||||
argstr_flat = (char *)win32_argv_utf8 + sizeof(char *) * (win32_argc + 1);
|
||||
if (!win32_argv_utf8) {
|
||||
if (win32_argv_utf8 == NULL) {
|
||||
LocalFree(argv_w);
|
||||
return;
|
||||
}
|
||||
@@ -444,7 +444,7 @@ int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
(po->name && !strcmp(optname, po->name)))
|
||||
return i;
|
||||
|
||||
if (!po->name || po->flags & HAS_ARG)
|
||||
if (po->flags & HAS_ARG)
|
||||
i++;
|
||||
}
|
||||
return 0;
|
||||
@@ -959,10 +959,9 @@ static int init_report(const char *env)
|
||||
|
||||
report_file = fopen(filename.str, "w");
|
||||
if (!report_file) {
|
||||
int ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open report \"%s\": %s\n",
|
||||
filename.str, strerror(errno));
|
||||
return ret;
|
||||
return AVERROR(errno);
|
||||
}
|
||||
av_log_set_callback(log_callback_report);
|
||||
av_log(NULL, AV_LOG_INFO,
|
||||
@@ -1243,7 +1242,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
is_dev = is_device(ofmt->priv_class);
|
||||
if (!is_dev && device_only)
|
||||
continue;
|
||||
if ((!name || strcmp(ofmt->name, name) < 0) &&
|
||||
if ((name == NULL || strcmp(ofmt->name, name) < 0) &&
|
||||
strcmp(ofmt->name, last_name) > 0) {
|
||||
name = ofmt->name;
|
||||
long_name = ofmt->long_name;
|
||||
@@ -1254,7 +1253,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
is_dev = is_device(ifmt->priv_class);
|
||||
if (!is_dev && device_only)
|
||||
continue;
|
||||
if ((!name || strcmp(ifmt->name, name) < 0) &&
|
||||
if ((name == NULL || strcmp(ifmt->name, name) < 0) &&
|
||||
strcmp(ifmt->name, last_name) > 0) {
|
||||
name = ifmt->name;
|
||||
long_name = ifmt->long_name;
|
||||
@@ -1263,7 +1262,7 @@ static int show_formats_devices(void *optctx, const char *opt, const char *arg,
|
||||
if (name && strcmp(ifmt->name, name) == 0)
|
||||
decode = 1;
|
||||
}
|
||||
if (!name)
|
||||
if (name == NULL)
|
||||
break;
|
||||
last_name = name;
|
||||
|
||||
@@ -1544,8 +1543,7 @@ int show_protocols(void *optctx, const char *opt, const char *arg)
|
||||
|
||||
int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
#if CONFIG_AVFILTER
|
||||
const AVFilter *filter = NULL;
|
||||
const AVFilter av_unused(*filter) = NULL;
|
||||
char descr[64], *descr_cur;
|
||||
int i, j;
|
||||
const AVFilterPad *pad;
|
||||
@@ -1558,6 +1556,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
" V = Video input/output\n"
|
||||
" N = Dynamic number and/or type of input/output\n"
|
||||
" | = Source or sink filter\n");
|
||||
#if CONFIG_AVFILTER
|
||||
while ((filter = avfilter_next(filter))) {
|
||||
descr_cur = descr;
|
||||
for (i = 0; i < 2; i++) {
|
||||
@@ -1582,8 +1581,6 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
filter->process_command ? 'C' : '.',
|
||||
filter->name, descr, filter->description);
|
||||
}
|
||||
#else
|
||||
printf("No filters available: libavfilter disabled\n");
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
@@ -1642,19 +1639,19 @@ int show_layouts(void *optctx, const char *opt, const char *arg)
|
||||
const char *name, *descr;
|
||||
|
||||
printf("Individual channels:\n"
|
||||
"NAME DESCRIPTION\n");
|
||||
"NAME DESCRIPTION\n");
|
||||
for (i = 0; i < 63; i++) {
|
||||
name = av_get_channel_name((uint64_t)1 << i);
|
||||
if (!name)
|
||||
continue;
|
||||
descr = av_get_channel_description((uint64_t)1 << i);
|
||||
printf("%-14s %s\n", name, descr);
|
||||
printf("%-12s%s\n", name, descr);
|
||||
}
|
||||
printf("\nStandard channel layouts:\n"
|
||||
"NAME DECOMPOSITION\n");
|
||||
"NAME DECOMPOSITION\n");
|
||||
for (i = 0; !av_get_standard_channel_layout(i, &layout, &name); i++) {
|
||||
if (name) {
|
||||
printf("%-14s ", name);
|
||||
printf("%-12s", name);
|
||||
for (j = 1; j; j <<= 1)
|
||||
if ((layout & j))
|
||||
printf("%s%s", (layout & (j - 1)) ? "+" : "", av_get_channel_name(j));
|
||||
@@ -1860,23 +1857,21 @@ int read_yesno(void)
|
||||
|
||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
{
|
||||
int64_t ret;
|
||||
int ret;
|
||||
FILE *f = av_fopen_utf8(filename, "rb");
|
||||
|
||||
if (!f) {
|
||||
ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
|
||||
strerror(errno));
|
||||
return ret;
|
||||
return AVERROR(errno);
|
||||
}
|
||||
fseek(f, 0, SEEK_END);
|
||||
*size = ftell(f);
|
||||
fseek(f, 0, SEEK_SET);
|
||||
if (*size == (size_t)-1) {
|
||||
ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", strerror(errno));
|
||||
fclose(f);
|
||||
return ret;
|
||||
return AVERROR(errno);
|
||||
}
|
||||
*bufptr = av_malloc(*size + 1);
|
||||
if (!*bufptr) {
|
||||
@@ -1888,9 +1883,9 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
if (ret < *size) {
|
||||
av_free(*bufptr);
|
||||
if (ferror(f)) {
|
||||
ret = AVERROR(errno);
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while reading file '%s': %s\n",
|
||||
filename, strerror(errno));
|
||||
ret = AVERROR(errno);
|
||||
} else
|
||||
ret = AVERROR_EOF;
|
||||
} else {
|
||||
@@ -1997,7 +1992,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
||||
switch (check_stream_specifier(s, st, p + 1)) {
|
||||
case 1: *p = 0; break;
|
||||
case 0: continue;
|
||||
default: exit_program(1);
|
||||
default: return NULL;
|
||||
}
|
||||
|
||||
if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
|
||||
@@ -2055,184 +2050,3 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
||||
}
|
||||
return array;
|
||||
}
|
||||
|
||||
#if CONFIG_AVDEVICE
|
||||
static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||
{
|
||||
int ret, i;
|
||||
AVFormatContext *dev = NULL;
|
||||
AVDeviceInfoList *device_list = NULL;
|
||||
AVDictionary *tmp_opts = NULL;
|
||||
|
||||
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
printf("Audo-detected sources for %s:\n", fmt->name);
|
||||
if (!fmt->get_device_list) {
|
||||
ret = AVERROR(ENOSYS);
|
||||
printf("Cannot list sources. Not implemented.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* TODO: avformat_open_input calls read_header callback which is not necessary.
|
||||
Function like avformat_alloc_output_context2 for input could be helpful here. */
|
||||
av_dict_copy(&tmp_opts, opts, 0);
|
||||
if ((ret = avformat_open_input(&dev, NULL, fmt, &tmp_opts)) < 0) {
|
||||
printf("Cannot open device: %s.\n", fmt->name);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_devices(dev, &device_list)) < 0) {
|
||||
printf("Cannot list sources.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < device_list->nb_devices; i++) {
|
||||
printf("%s %s [%s]\n", device_list->default_device == i ? "*" : " ",
|
||||
device_list->devices[i]->device_name, device_list->devices[i]->device_description);
|
||||
}
|
||||
|
||||
fail:
|
||||
av_dict_free(&tmp_opts);
|
||||
avdevice_free_list_devices(&device_list);
|
||||
avformat_close_input(&dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||
{
|
||||
int ret, i;
|
||||
AVFormatContext *dev = NULL;
|
||||
AVDeviceInfoList *device_list = NULL;
|
||||
AVDictionary *tmp_opts = NULL;
|
||||
|
||||
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
printf("Audo-detected sinks for %s:\n", fmt->name);
|
||||
if (!fmt->get_device_list) {
|
||||
ret = AVERROR(ENOSYS);
|
||||
printf("Cannot list sinks. Not implemented.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avformat_alloc_output_context2(&dev, fmt, NULL, NULL)) < 0) {
|
||||
printf("Cannot open device: %s.\n", fmt->name);
|
||||
goto fail;
|
||||
}
|
||||
av_dict_copy(&tmp_opts, opts, 0);
|
||||
av_opt_set_dict2(dev, &tmp_opts, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
if ((ret = avdevice_list_devices(dev, &device_list)) < 0) {
|
||||
printf("Cannot list sinks.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < device_list->nb_devices; i++) {
|
||||
printf("%s %s [%s]\n", device_list->default_device == i ? "*" : " ",
|
||||
device_list->devices[i]->device_name, device_list->devices[i]->device_description);
|
||||
}
|
||||
|
||||
fail:
|
||||
av_dict_free(&tmp_opts);
|
||||
avdevice_free_list_devices(&device_list);
|
||||
avformat_free_context(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int show_sinks_sources_parse_arg(const char *arg, char **dev, AVDictionary **opts)
|
||||
{
|
||||
int ret;
|
||||
if (arg) {
|
||||
char *opts_str = NULL;
|
||||
av_assert0(dev && opts);
|
||||
*dev = av_strdup(arg);
|
||||
if (!*dev)
|
||||
return AVERROR(ENOMEM);
|
||||
if ((opts_str = strchr(*dev, ','))) {
|
||||
*(opts_str++) = '\0';
|
||||
if (opts_str[0] && ((ret = av_dict_parse_string(opts, opts_str, "=", ":", 0)) < 0)) {
|
||||
av_freep(dev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
} else
|
||||
printf("\nDevice name is not provided.\n"
|
||||
"You can pass devicename[,opt1=val1[,opt2=val2...]] as an argument.\n\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int show_sources(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
AVInputFormat *fmt = NULL;
|
||||
char *dev = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
int ret = 0;
|
||||
int error_level = av_log_get_level();
|
||||
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
|
||||
if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0)
|
||||
goto fail;
|
||||
|
||||
do {
|
||||
fmt = av_input_audio_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (!strcmp(fmt->name, "lavfi"))
|
||||
continue; //it's pointless to probe lavfi
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sources(fmt, opts);
|
||||
}
|
||||
} while (fmt);
|
||||
do {
|
||||
fmt = av_input_video_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sources(fmt, opts);
|
||||
}
|
||||
} while (fmt);
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
av_free(dev);
|
||||
av_log_set_level(error_level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
AVOutputFormat *fmt = NULL;
|
||||
char *dev = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
int ret = 0;
|
||||
int error_level = av_log_get_level();
|
||||
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
|
||||
if ((ret = show_sinks_sources_parse_arg(arg, &dev, &opts)) < 0)
|
||||
goto fail;
|
||||
|
||||
do {
|
||||
fmt = av_output_audio_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sinks(fmt, opts);
|
||||
}
|
||||
} while (fmt);
|
||||
do {
|
||||
fmt = av_output_video_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sinks(fmt, opts);
|
||||
}
|
||||
} while (fmt);
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
av_free(dev);
|
||||
av_log_set_level(error_level);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
14
cmdutils.h
14
cmdutils.h
@@ -443,20 +443,6 @@ int show_formats(void *optctx, const char *opt, const char *arg);
|
||||
*/
|
||||
int show_devices(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
#if CONFIG_AVDEVICE
|
||||
/**
|
||||
* Print a listing containing audodetected sinks of the output device.
|
||||
* Device name with options may be passed as an argument to limit results.
|
||||
*/
|
||||
int show_sinks(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing audodetected sources of the input device.
|
||||
* Device name with options may be passed as an argument to limit results.
|
||||
*/
|
||||
int show_sources(void *optctx, const char *opt, const char *arg);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Print a listing containing all the codecs supported by the
|
||||
* program.
|
||||
|
@@ -27,9 +27,3 @@
|
||||
{ "opencl_bench", OPT_EXIT, {.func_arg = opt_opencl_bench}, "run benchmark on all OpenCL devices and show results" },
|
||||
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, "set OpenCL environment options" },
|
||||
#endif
|
||||
#if CONFIG_AVDEVICE
|
||||
{ "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
|
||||
"list sources of the input device", "device" },
|
||||
{ "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
|
||||
"list sinks of the output device", "device" },
|
||||
#endif
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opencl.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "cmdutils.h"
|
||||
|
||||
typedef struct {
|
||||
@@ -239,8 +238,7 @@ int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
|
||||
devices[count].platform_idx = i;
|
||||
devices[count].device_idx = j;
|
||||
devices[count].runtime = score;
|
||||
av_strlcpy(devices[count].device_name, device_node->device_name,
|
||||
sizeof(devices[count].device_name));
|
||||
strcpy(devices[count].device_name, device_node->device_name);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
@@ -805,7 +805,7 @@ struct AVS_Library {
|
||||
|
||||
AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
||||
if (!library)
|
||||
if (library == NULL)
|
||||
return NULL;
|
||||
library->handle = LoadLibrary("avisynth");
|
||||
if (library->handle == NULL)
|
||||
@@ -870,7 +870,7 @@ fail:
|
||||
}
|
||||
|
||||
AVSC_INLINE void avs_free_library(AVS_Library *library) {
|
||||
if (!library)
|
||||
if (library == NULL)
|
||||
return;
|
||||
FreeLibrary(library->handle);
|
||||
free(library);
|
||||
|
@@ -54,7 +54,7 @@ static int getopt(int argc, char *argv[], char *opts)
|
||||
}
|
||||
}
|
||||
optopt = c = argv[optind][sp];
|
||||
if (c == ':' || !(cp = strchr(opts, c))) {
|
||||
if (c == ':' || (cp = strchr(opts, c)) == NULL) {
|
||||
fprintf(stderr, ": illegal option -- %c\n", c);
|
||||
if (argv[optind][++sp] == '\0') {
|
||||
optind++;
|
||||
|
@@ -39,7 +39,6 @@
|
||||
#include <windows.h>
|
||||
#include <process.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
@@ -55,30 +54,36 @@ typedef struct pthread_t {
|
||||
* not mutexes */
|
||||
typedef CRITICAL_SECTION pthread_mutex_t;
|
||||
|
||||
/* This is the CONDITION_VARIABLE typedef for using Windows' native
|
||||
* conditional variables on kernels 6.0+. */
|
||||
#if HAVE_CONDITION_VARIABLE_PTR
|
||||
typedef CONDITION_VARIABLE pthread_cond_t;
|
||||
#else
|
||||
/* This is the CONDITIONAL_VARIABLE typedef for using Window's native
|
||||
* conditional variables on kernels 6.0+.
|
||||
* MinGW does not currently have this typedef. */
|
||||
typedef struct pthread_cond_t {
|
||||
void *Ptr;
|
||||
void *ptr;
|
||||
} pthread_cond_t;
|
||||
|
||||
/* function pointers to conditional variable API on windows 6.0+ kernels */
|
||||
#if _WIN32_WINNT < 0x0600
|
||||
static void (WINAPI *cond_broadcast)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_init)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_signal)(pthread_cond_t *cond);
|
||||
static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
DWORD milliseconds);
|
||||
#else
|
||||
#define cond_init InitializeConditionVariable
|
||||
#define cond_broadcast WakeAllConditionVariable
|
||||
#define cond_signal WakeConditionVariable
|
||||
#define cond_wait SleepConditionVariableCS
|
||||
#endif
|
||||
|
||||
#if _WIN32_WINNT >= 0x0600
|
||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
||||
#endif
|
||||
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
static unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t *h = arg;
|
||||
h->ret = h->func(h->arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
void *(*start_routine)(void*), void *arg)
|
||||
static int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
void *(*start_routine)(void*), void *arg)
|
||||
{
|
||||
thread->func = start_routine;
|
||||
thread->arg = arg;
|
||||
@@ -87,7 +92,7 @@ static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
return !thread->handle;
|
||||
}
|
||||
|
||||
static av_unused void pthread_join(pthread_t thread, void **value_ptr)
|
||||
static void pthread_join(pthread_t thread, void **value_ptr)
|
||||
{
|
||||
DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
|
||||
if (ret != WAIT_OBJECT_0)
|
||||
@@ -118,36 +123,6 @@ static inline int pthread_mutex_unlock(pthread_mutex_t *m)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if _WIN32_WINNT >= 0x0600
|
||||
static inline int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
{
|
||||
InitializeConditionVariable(cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* native condition variables do not destroy */
|
||||
static inline void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
{
|
||||
WakeAllConditionVariable(cond);
|
||||
}
|
||||
|
||||
static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
SleepConditionVariableCS(cond, mutex, INFINITE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
WakeConditionVariable(cond);
|
||||
}
|
||||
|
||||
#else // _WIN32_WINNT < 0x0600
|
||||
/* for pre-Windows 6.0 platforms we need to define and use our own condition
|
||||
* variable and api */
|
||||
typedef struct win32_cond_t {
|
||||
@@ -159,14 +134,7 @@ typedef struct win32_cond_t {
|
||||
volatile int is_broadcast;
|
||||
} win32_cond_t;
|
||||
|
||||
/* function pointers to conditional variable API on windows 6.0+ kernels */
|
||||
static void (WINAPI *cond_broadcast)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_init)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_signal)(pthread_cond_t *cond);
|
||||
static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
DWORD milliseconds);
|
||||
|
||||
static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
static int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
{
|
||||
win32_cond_t *win32_cond = NULL;
|
||||
if (cond_init) {
|
||||
@@ -178,7 +146,7 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
|
||||
win32_cond = av_mallocz(sizeof(win32_cond_t));
|
||||
if (!win32_cond)
|
||||
return ENOMEM;
|
||||
cond->Ptr = win32_cond;
|
||||
cond->ptr = win32_cond;
|
||||
win32_cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL);
|
||||
if (!win32_cond->semaphore)
|
||||
return ENOMEM;
|
||||
@@ -191,9 +159,9 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
static void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
/* native condition variables do not destroy */
|
||||
if (cond_init)
|
||||
return;
|
||||
@@ -204,12 +172,12 @@ static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
pthread_mutex_destroy(&win32_cond->mtx_waiter_count);
|
||||
pthread_mutex_destroy(&win32_cond->mtx_broadcast);
|
||||
av_freep(&win32_cond);
|
||||
cond->Ptr = NULL;
|
||||
cond->ptr = NULL;
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
static void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int have_waiter;
|
||||
|
||||
if (cond_broadcast) {
|
||||
@@ -238,9 +206,9 @@ static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
||||
}
|
||||
|
||||
static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
static int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int last_waiter;
|
||||
if (cond_wait) {
|
||||
cond_wait(cond, mutex, INFINITE);
|
||||
@@ -270,9 +238,9 @@ static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mu
|
||||
return pthread_mutex_lock(mutex);
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_signal(pthread_cond_t *cond)
|
||||
static void pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int have_waiter;
|
||||
if (cond_signal) {
|
||||
cond_signal(cond);
|
||||
@@ -294,9 +262,8 @@ static av_unused void pthread_cond_signal(pthread_cond_t *cond)
|
||||
|
||||
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
||||
}
|
||||
#endif
|
||||
|
||||
static av_unused void w32thread_init(void)
|
||||
static void w32thread_init(void)
|
||||
{
|
||||
#if _WIN32_WINNT < 0x0600
|
||||
HANDLE kernel_dll = GetModuleHandle(TEXT("kernel32.dll"));
|
||||
|
247
doc/APIchanges
247
doc/APIchanges
@@ -2,180 +2,19 @@ Never assume the API of libav* to be stable unless at least 1 month has passed
|
||||
since the last major version increase or the API was added.
|
||||
|
||||
The last version increases were:
|
||||
libavcodec: 2014-08-09
|
||||
libavdevice: 2014-08-09
|
||||
libavfilter: 2014-08-09
|
||||
libavformat: 2014-08-09
|
||||
libavresample: 2014-08-09
|
||||
libpostproc: 2014-08-09
|
||||
libswresample: 2014-08-09
|
||||
libswscale: 2014-08-09
|
||||
libavutil: 2014-08-09
|
||||
libavcodec: 2013-03-xx
|
||||
libavdevice: 2013-03-xx
|
||||
libavfilter: 2013-12-xx
|
||||
libavformat: 2013-03-xx
|
||||
libavresample: 2012-10-05
|
||||
libpostproc: 2011-04-18
|
||||
libswresample: 2011-09-19
|
||||
libswscale: 2011-06-20
|
||||
libavutil: 2012-10-22
|
||||
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2014-11-21 - ab922f9 - lavu 54.15.100 - dict.h
|
||||
Add av_dict_get_string().
|
||||
|
||||
2014-11-18 - a54a51c - lavu 54.14.100 - float_dsp.h
|
||||
Add avpriv_float_dsp_alloc().
|
||||
|
||||
2014-11-16 - 6690d4c3 - lavf 56.13.100 - avformat.h
|
||||
Add AVStream.recommended_encoder_configuration with accessors.
|
||||
|
||||
2014-11-16 - bee5844d - lavu 54.13.100 - opt.h
|
||||
Add av_opt_serialize().
|
||||
|
||||
2014-11-16 - eec69332 - lavu 54.12.100 - opt.h
|
||||
Add av_opt_is_set_to_default().
|
||||
|
||||
2014-11-06 - 44fa267 / 5e80fb7 - lavc 56.11.100 / 56.6.0 - vorbis_parser.h
|
||||
Add a public API for parsing vorbis packets.
|
||||
|
||||
2014-10-15 - 17085a0 / 7ea1b34 - lavc 56.7.100 / 56.5.0 - avcodec.h
|
||||
Replace AVCodecContext.time_base used for decoding
|
||||
with AVCodecContext.framerate.
|
||||
|
||||
2014-10-15 - 51c810e / d565fef1 - lavc 56.6.100 / 56.4.0 - avcodec.h
|
||||
Add AV_HWACCEL_FLAG_IGNORE_LEVEL flag to av_vdpau_bind_context().
|
||||
|
||||
2014-10-13 - da21895 / 2df0c32e - lavc 56.5.100 / 56.3.0 - avcodec.h
|
||||
Add AVCodecContext.initial_padding. Deprecate the use of AVCodecContext.delay
|
||||
for audio encoding.
|
||||
|
||||
2014-10-08 - bb44f7d / 5a419b2 - lavu 54.10.100 / 54.4.0 - pixdesc.h
|
||||
Add API to return the name of frame and context color properties.
|
||||
|
||||
2014-10-06 - a61899a / e3e158e - lavc 56.3.100 / 56.2.0 - vdpau.h
|
||||
Add av_vdpau_bind_context(). This function should now be used for creating
|
||||
(or resetting) a AVVDPAUContext instead of av_vdpau_alloc_context().
|
||||
|
||||
2014-10-02 - cdd6f05 - lavc 56.2.100 - avcodec.h
|
||||
2014-10-02 - cdd6f05 - lavu 54.9.100 - frame.h
|
||||
Add AV_FRAME_DATA_SKIP_SAMPLES. Add lavc CODEC_FLAG2_SKIP_MANUAL and
|
||||
AVOption "skip_manual", which makes lavc export skip information via
|
||||
AV_FRAME_DATA_SKIP_SAMPLES AVFrame side data, instead of skipping and
|
||||
discarding samples automatically.
|
||||
|
||||
2014-10-02 - 0d92b0d - lavu 54.8.100 - avstring.h
|
||||
Add av_match_list()
|
||||
|
||||
2014-09-24 - ac68295 - libpostproc 53.1.100
|
||||
Add visualization support
|
||||
|
||||
2014-09-19 - 6edd6a4 - lavc 56.1.101 - dv_profile.h
|
||||
deprecate avpriv_dv_frame_profile2(), which was made public by accident.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.4 was cut here -------- 8< ---------
|
||||
|
||||
2014-08-28 - f30a815 / 9301486 - lavc 56.1.100 / 56.1.0 - avcodec.h
|
||||
Add AV_PKT_DATA_STEREO3D to export container-level stereo3d information.
|
||||
|
||||
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
|
||||
Add AVFormatContext.max_ts_probe.
|
||||
|
||||
2014-08-23 - 8fc9bd0 - lavu 54.7.100 - dict.h
|
||||
AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL arguments are now
|
||||
freed even on error. This is consistent with the behaviour all users
|
||||
of it we could find expect.
|
||||
|
||||
2014-08-21 - 980a5b0 - lavu 54.6.100 - frame.h motion_vector.h
|
||||
Add AV_FRAME_DATA_MOTION_VECTORS side data and AVMotionVector structure
|
||||
|
||||
2014-08-16 - b7d5e01 - lswr 1.1.100 - swresample.h
|
||||
Add AVFrame based API
|
||||
|
||||
2014-08-16 - c2829dc - lavu 54.4.100 - dict.h
|
||||
Add av_dict_set_int helper function.
|
||||
|
||||
2014-08-13 - c8571c6 / 8ddc326 - lavu 54.3.100 / 54.3.0 - mem.h
|
||||
Add av_strndup().
|
||||
|
||||
2014-08-13 - 2ba4577 / a8c104a - lavu 54.2.100 / 54.2.0 - opt.h
|
||||
Add av_opt_get_dict_val/set_dict_val with AV_OPT_TYPE_DICT to support
|
||||
dictionary types being set as options.
|
||||
|
||||
2014-08-13 - afbd4b8 - lavf 56.01.0 - avformat.h
|
||||
Add AVFormatContext.event_flags and AVStream.event_flags for signaling to
|
||||
the user when events happen in the file/stream.
|
||||
|
||||
2014-08-10 - 78eaaa8 / fb1ddcd - lavr 2.1.0 - avresample.h
|
||||
Add avresample_convert_frame() and avresample_config().
|
||||
|
||||
2014-08-10 - 78eaaa8 / fb1ddcd - lavu 54.1.100 / 54.1.0 - error.h
|
||||
Add AVERROR_INPUT_CHANGED and AVERROR_OUTPUT_CHANGED.
|
||||
|
||||
2014-08-08 - 3841f2a / d35b94f - lavc 55.73.102 / 55.57.4 - avcodec.h
|
||||
Deprecate FF_IDCT_XVIDMMX define and xvidmmx idct option.
|
||||
Replaced by FF_IDCT_XVID and xvid respectively.
|
||||
|
||||
2014-08-08 - 5c3c671 - lavf 55.53.100 - avio.h
|
||||
Add avio_feof() and deprecate url_feof().
|
||||
|
||||
2014-08-07 - bb78903 - lsws 2.1.3 - swscale.h
|
||||
sws_getContext is not going to be removed in the future.
|
||||
|
||||
2014-08-07 - a561662 / ad1ee5f - lavc 55.73.101 / 55.57.3 - avcodec.h
|
||||
reordered_opaque is not going to be removed in the future.
|
||||
|
||||
2014-08-02 - 28a2107 - lavu 52.98.100 - pixelutils.h
|
||||
Add pixelutils API with SAD functions
|
||||
|
||||
2014-08-04 - 6017c98 / e9abafc - lavu 52.97.100 / 53.22.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_YA16 pixel format for 16 bit packed gray with alpha.
|
||||
|
||||
2014-08-04 - 4c8bc6f / e96c3b8 - lavu 52.96.101 / 53.21.1 - avstring.h
|
||||
Rename AV_PIX_FMT_Y400A to AV_PIX_FMT_YA8 to better identify the format.
|
||||
An alias pixel format and color space name are provided for compatibility.
|
||||
|
||||
2014-08-04 - 073c074 / d2962e9 - lavu 52.96.100 / 53.21.0 - pixdesc.h
|
||||
Support name aliases for pixel formats.
|
||||
|
||||
2014-08-03 - 71d008e / 1ef9e83 - lavc 55.72.101 / 55.57.2 - avcodec.h
|
||||
2014-08-03 - 71d008e / 1ef9e83 - lavu 52.95.100 / 53.20.0 - frame.h
|
||||
Deprecate AVCodecContext.dtg_active_format and use side-data instead.
|
||||
|
||||
2014-08-03 - e680c73 - lavc 55.72.100 - avcodec.h
|
||||
Add get_pixels() to AVDCT
|
||||
|
||||
2014-08-03 - 9400603 / 9f17685 - lavc 55.71.101 / 55.57.1 - avcodec.h
|
||||
Deprecate unused FF_IDCT_IPP define and ipp avcodec option.
|
||||
Deprecate unused FF_DEBUG_PTS define and pts avcodec option.
|
||||
Deprecate unused FF_CODER_TYPE_DEFLATE define and deflate avcodec option.
|
||||
Deprecate unused FF_DCT_INT define and int avcodec option.
|
||||
Deprecate unused avcodec option scenechange_factor.
|
||||
|
||||
2014-07-30 - ba3e331 - lavu 52.94.100 - frame.h
|
||||
Add av_frame_side_data_name()
|
||||
|
||||
2014-07-29 - 80a3a66 / 3a19405 - lavf 56.01.100 / 56.01.0 - avformat.h
|
||||
Add mime_type field to AVProbeData, which now MUST be initialized in
|
||||
order to avoid uninitialized reads of the mime_type pointer, likely
|
||||
leading to crashes.
|
||||
Typically, this means you will do 'AVProbeData pd = { 0 };' instead of
|
||||
'AVProbeData pd;'.
|
||||
|
||||
2014-07-29 - 31e0b5d / 69e7336 - lavu 52.92.100 / 53.19.0 - avstring.h
|
||||
Make name matching function from lavf public as av_match_name().
|
||||
|
||||
2014-07-28 - 2e5c8b0 / c5fca01 - lavc 55.71.100 / 55.57.0 - avcodec.h
|
||||
Add AV_CODEC_PROP_REORDER to mark codecs supporting frame reordering.
|
||||
|
||||
2014-07-27 - ff9a154 - lavf 55.50.100 - avformat.h
|
||||
New field int64_t probesize2 instead of deprecated
|
||||
field int probesize.
|
||||
|
||||
2014-07-27 - 932ff70 - lavc 55.70.100 - avdct.h
|
||||
Add AVDCT / avcodec_dct_alloc() / avcodec_dct_init().
|
||||
|
||||
2014-07-23 - 8a4c086 - lavf 55.49.100 - avio.h
|
||||
Add avio_read_to_bprint()
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.3 was cut here -------- 8< ---------
|
||||
|
||||
2014-07-14 - 62227a7 - lavf 55.47.100 - avformat.h
|
||||
Add av_stream_get_parser()
|
||||
|
||||
@@ -200,7 +39,7 @@ API changes, most recent first:
|
||||
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
||||
it
|
||||
|
||||
2014-06-10 - 5482780 - lavf 55.43.100 - avformat.h
|
||||
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
||||
New field int64_t max_analyze_duration2 instead of deprecated
|
||||
int max_analyze_duration.
|
||||
|
||||
@@ -220,28 +59,28 @@ API changes, most recent first:
|
||||
2014-05-29 - bdb2e80 / b2d4565 - lavr 1.3.0 - avresample.h
|
||||
Add avresample_max_output_samples
|
||||
|
||||
2014-05-28 - d858ee7 / 6d21259 - lavf 55.42.100 / 55.19.0 - avformat.h
|
||||
2014-05-24 - d858ee7 / 6d21259 - lavf 55.42.100 / 55.19.0 - avformat.h
|
||||
Add strict_std_compliance and related AVOptions to support experimental
|
||||
muxing.
|
||||
|
||||
2014-05-26 - 55cc60c - lavu 52.87.100 - threadmessage.h
|
||||
2014-05-26 - xxxxxxx - lavu 52.87.100 - threadmessage.h
|
||||
Add thread message queue API.
|
||||
|
||||
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
||||
Add format_probesize to AVFormatContext.
|
||||
|
||||
2014-05-20 - 7d25af1 / c23c96b - lavf 55.39.100 / 55.18.0 - avformat.h
|
||||
2014-05-19 - 7d25af1 / c23c96b - lavf 55.39.100 / 55.18.0 - avformat.h
|
||||
Add av_stream_get_side_data() to access stream-level side data
|
||||
in the same way as av_packet_get_side_data().
|
||||
|
||||
2014-05-20 - 7336e39 - lavu 52.86.100 - fifo.h
|
||||
2014-05-xx - xxxxxxx - lavu 52.86.100 - fifo.h
|
||||
Add av_fifo_alloc_array() function.
|
||||
|
||||
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
||||
Add AV_FRAME_DATA_DISPLAYMATRIX for exporting frame-level
|
||||
spatial rendering on video frames for proper display.
|
||||
|
||||
2014-05-19 - ef1d4ee / bddd8cb - lavc 55.64.100 / 55.53.0 - avcodec.h
|
||||
2014-05-xx - xxxxxxx - lavc 55.64.100 / 55.53.0 - avcodec.h
|
||||
Add AV_PKT_DATA_DISPLAYMATRIX for exporting packet-level
|
||||
spatial rendering on video frames for proper display.
|
||||
|
||||
@@ -257,7 +96,7 @@ API changes, most recent first:
|
||||
Add av_gettime_relative() av_gettime_relative_is_monotonic()
|
||||
|
||||
2014-05-15 - eacf7d6 / 0c1959b - lavf 55.38.100 / 55.17.0 - avformat.h
|
||||
Add AVFMT_FLAG_BITEXACT flag. Muxers now use it instead of checking
|
||||
Add AVMFT_FLAG_BITEXACT flag. Muxers now use it instead of checking
|
||||
CODEC_FLAG_BITEXACT on the first stream.
|
||||
|
||||
2014-05-15 - 96cb4c8 - lswr 0.19.100 - swresample.h
|
||||
@@ -266,10 +105,10 @@ API changes, most recent first:
|
||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
||||
|
||||
2014-05-07 - 351f611 - lavu 52.82.100 - fifo.h
|
||||
2014-05-xx - xxxxxxx - lavu 52.82.0 - fifo.h
|
||||
Add av_fifo_freep() function.
|
||||
|
||||
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
||||
2014-05-02 - ba52fb11 - lavu 52.81.0 - opt.h
|
||||
Add av_opt_set_dict2() function.
|
||||
|
||||
2014-05-01 - e77b985 / a2941c8 - lavc 55.60.103 / 55.50.3 - avcodec.h
|
||||
@@ -288,14 +127,10 @@ API changes, most recent first:
|
||||
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
||||
reference-counted frames to encoders.
|
||||
|
||||
2014-04-30 - 617e866 - lavu 52.81.100 - pixdesc.h
|
||||
Add av_find_best_pix_fmt_of_2(), av_get_pix_fmt_loss()
|
||||
Deprecate avcodec_get_pix_fmt_loss(), avcodec_find_best_pix_fmt_of_2()
|
||||
|
||||
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
||||
Add AVCodecDescriptor.mime_types field.
|
||||
|
||||
2014-04-29 - b804eb4 - lavu 52.80.100 - hash.h
|
||||
2014-04-29 - xxxxxxx - lavu 52.80.0 - hash.h
|
||||
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
||||
|
||||
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
||||
@@ -307,7 +142,7 @@ API changes, most recent first:
|
||||
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
||||
Add AV_CRC_16_ANSI_LE crc variant.
|
||||
|
||||
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
|
||||
2014-04-XX - xxxxxxx - lavf xx.xx.1xx - avformat.h
|
||||
Add av_format_inject_global_side_data()
|
||||
|
||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||
@@ -353,9 +188,6 @@ API changes, most recent first:
|
||||
Give the name AVPacketSideData to the previously anonymous struct used for
|
||||
AVPacket.side_data.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.2 was cut here -------- 8< ---------
|
||||
|
||||
2014-03-18 - 37c07d4 - lsws 2.5.102
|
||||
Make gray16 full-scale.
|
||||
|
||||
@@ -387,7 +219,7 @@ API changes, most recent first:
|
||||
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
||||
Deprecate unused AV_OPT_FLAG_METADATA.
|
||||
|
||||
2014-02-16 - 81c3f81 - lavd 55.10.100 - avdevice.h
|
||||
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
|
||||
Add avdevice_list_devices() and avdevice_free_list_devices()
|
||||
|
||||
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
|
||||
@@ -428,7 +260,7 @@ API changes, most recent first:
|
||||
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
||||
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
||||
|
||||
2014-01-19 - 3532dd5 - lavu 52.63.100 - rational.h
|
||||
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
|
||||
Add av_make_q() function.
|
||||
|
||||
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
|
||||
@@ -498,9 +330,6 @@ API changes, most recent first:
|
||||
2013-10-31 - 78265fc / 28096e0 - lavu 52.49.100 / 52.17.0 - frame.h
|
||||
Add AVFrame.flags and AV_FRAME_FLAG_CORRUPT.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.1 was cut here -------- 8< ---------
|
||||
|
||||
2013-10-27 - dbe6f9f - lavc 55.39.100 - avcodec.h
|
||||
Add CODEC_CAP_DELAY support to avcodec_decode_subtitle2.
|
||||
|
||||
@@ -573,9 +402,6 @@ API changes, most recent first:
|
||||
Add avcodec_chroma_pos_to_enum()
|
||||
Add avcodec_enum_to_chroma_pos()
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 2.0 was cut here -------- 8< ---------
|
||||
|
||||
2013-07-03 - 838bd73 - lavfi 3.78.100 - avfilter.h
|
||||
Deprecate avfilter_graph_parse() in favor of the equivalent
|
||||
avfilter_graph_parse_ptr().
|
||||
@@ -648,9 +474,6 @@ API changes, most recent first:
|
||||
2013-03-17 - 7aa9af5 - lavu 52.20.100 - opt.h
|
||||
Add AV_OPT_TYPE_VIDEO_RATE value to AVOptionType enum.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 1.2 was cut here -------- 8< ---------
|
||||
|
||||
2013-03-07 - 9767ec6 - lavu 52.18.100 - avstring.h,bprint.h
|
||||
Add av_escape() and av_bprint_escape() API.
|
||||
|
||||
@@ -663,9 +486,6 @@ API changes, most recent first:
|
||||
2013-01-01 - 2eb2e17 - lavfi 3.34.100
|
||||
Add avfilter_get_audio_buffer_ref_from_arrays_channels.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 1.1 was cut here -------- 8< ---------
|
||||
|
||||
2012-12-20 - 34de47aa - lavfi 3.29.100 - avfilter.h
|
||||
Add AVFilterLink.channels, avfilter_link_get_channels()
|
||||
and avfilter_ref_get_channels().
|
||||
@@ -711,9 +531,6 @@ API changes, most recent first:
|
||||
Add LIBSWRESAMPLE_VERSION, LIBSWRESAMPLE_BUILD
|
||||
and LIBSWRESAMPLE_IDENT symbols.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 1.0 was cut here -------- 8< ---------
|
||||
|
||||
2012-09-06 - 29e972f - lavu 51.72.100 - parseutils.h
|
||||
Add av_small_strptime() time parsing function.
|
||||
|
||||
@@ -918,9 +735,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
avresample_read() are now uint8_t** instead of void**.
|
||||
Libavresample is now stable.
|
||||
|
||||
2012-09-26 - 3ba0dab7 / 1384df64 - lavf 54.29.101 / 56.06.3 - avformat.h
|
||||
Add AVFormatContext.avoid_negative_ts.
|
||||
|
||||
2012-09-24 - 46a3595 / a42aada - lavc 54.59.100 / 54.28.0 - avcodec.h
|
||||
Add avcodec_free_frame(). This function must now
|
||||
be used for freeing an AVFrame.
|
||||
@@ -1135,9 +949,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
2012-01-12 - b18e17e / 3167dc9 - lavfi 2.59.100 / 2.15.0
|
||||
Add a new installed header -- libavfilter/version.h -- with version macros.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 0.9 was cut here -------- 8< ---------
|
||||
|
||||
2011-12-08 - a502939 - lavfi 2.52.0
|
||||
Add av_buffersink_poll_frame() to buffersink.h.
|
||||
|
||||
@@ -1347,13 +1158,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
2011-06-28 - 5129336 - lavu 51.11.0 - avutil.h
|
||||
Define the AV_PICTURE_TYPE_NONE value in AVPictureType enum.
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 0.7 was cut here -------- 8< ---------
|
||||
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 0.8 was cut here -------- 8< ---------
|
||||
|
||||
2011-06-19 - fd2c0a5 - lavfi 2.23.0 - avfilter.h
|
||||
Add layout negotiation fields and helper functions.
|
||||
|
||||
@@ -2031,9 +1835,6 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
2010-06-02 - 7e566bb - lavc 52.73.0 - av_get_codec_tag_string()
|
||||
Add av_get_codec_tag_string().
|
||||
|
||||
|
||||
-------- 8< --------- FFmpeg 0.6 was cut here -------- 8< ---------
|
||||
|
||||
2010-06-01 - 2b99142 - lsws 0.11.0 - convertPalette API
|
||||
Add sws_convertPalette8ToPacked32() and sws_convertPalette8ToPacked24().
|
||||
|
||||
@@ -2051,6 +1852,10 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
|
||||
2010-05-09 - b6bc205 - lavfi 1.20.0 - AVFilterPicRef
|
||||
Add interlaced and top_field_first fields to AVFilterPicRef.
|
||||
|
||||
------------------------------8<-------------------------------------
|
||||
0.6 branch was cut here
|
||||
----------------------------->8--------------------------------------
|
||||
|
||||
2010-05-01 - 8e2ee18 - lavf 52.62.0 - probe function
|
||||
Add av_probe_input_format2 to API, it allows ignoring probe
|
||||
results below given score and returns the actual probe score.
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.5.7
|
||||
PROJECT_NUMBER = 2.3.1
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
15
doc/Makefile
15
doc/Makefile
@@ -38,9 +38,7 @@ DOCS = $(DOCS-yes)
|
||||
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
|
||||
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
|
||||
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
DOC_EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
@@ -82,25 +80,14 @@ $(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
|
||||
$(M)doc/print_options $* > $@
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
doc/%-all.html: TAG = HTML
|
||||
|
||||
ifdef HAVE_MAKEINFO_HTML
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --html -I doc --no-split -D config-not-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
|
||||
|
||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --html -I doc --no-split -D config-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
|
||||
else
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
|
||||
doc/%-all.html: TAG = HTML
|
||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
endif
|
||||
|
||||
doc/%.pod: TAG = POD
|
||||
doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
|
@@ -13,16 +13,7 @@ bitstream filter using the option @code{--disable-bsf=BSF}.
|
||||
The option @code{-bsfs} of the ff* tools will display the list of
|
||||
all the supported bitstream filters included in your build.
|
||||
|
||||
The ff* tools have a -bsf option applied per stream, taking a
|
||||
comma-separated list of filters, whose parameters follow the filter
|
||||
name after a '='.
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
|
||||
@end example
|
||||
|
||||
Below is a description of the currently available bitstream filters,
|
||||
with their parameters, if any.
|
||||
Below is a description of the currently available bitstream filters.
|
||||
|
||||
@section aac_adtstoasc
|
||||
|
||||
@@ -144,16 +135,9 @@ ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
Damages the contents of packets without damaging the container. Can be
|
||||
used for fuzzing or testing error resilience/concealment.
|
||||
|
||||
Parameters:
|
||||
A numeral string, whose value is related to how often output bytes will
|
||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||
the lower the more frequent bytes will be modified, with 1 meaning
|
||||
every byte is modified.
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
ffmpeg -i INPUT -c copy -bsf noise output.mkv
|
||||
@end example
|
||||
applies the modification to every byte.
|
||||
|
||||
@section remove_extra
|
||||
|
||||
|
5
doc/bootstrap.min.css
vendored
5
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -71,9 +71,7 @@ Force low delay.
|
||||
@item global_header
|
||||
Place global headers in extradata instead of every keyframe.
|
||||
@item bitexact
|
||||
Only write platform-, build- and time-independent data. (except (I)DCT).
|
||||
This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
Use only bitexact stuff (except (I)DCT).
|
||||
@item aic
|
||||
Apply H263 advanced intra coding / mpeg4 ac prediction.
|
||||
@item cbp
|
||||
@@ -500,8 +498,6 @@ threading operations
|
||||
@item vismv @var{integer} (@emph{decoding,video})
|
||||
Visualize motion vectors (MVs).
|
||||
|
||||
This option is deprecated, see the codecview filter instead.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item pf
|
||||
@@ -801,9 +797,6 @@ Frame data might be split into multiple chunks.
|
||||
Show all frames before the first keyframe.
|
||||
@item skiprd
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item export_mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@end table
|
||||
|
||||
@item error @var{integer} (@emph{encoding,video})
|
||||
@@ -1116,19 +1109,6 @@ Interlaced video, bottom coded first, top displayed first
|
||||
Set to 1 to disable processing alpha (transparency). This works like the
|
||||
@samp{gray} flag in the @option{flags} option which skips chroma information
|
||||
instead of alpha. Default is 0.
|
||||
|
||||
@item codec_whitelist @var{list} (@emph{input})
|
||||
"," separated List of allowed decoders. By default all are allowed.
|
||||
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example to separate the fields with newlines and indention:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
@end example
|
||||
|
||||
@end table
|
||||
|
||||
@c man end CODEC OPTIONS
|
||||
|
@@ -163,9 +163,6 @@ Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
An FFmpeg native decoder for Opus exists, so users can decode Opus
|
||||
without this library.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
@@ -190,15 +187,6 @@ The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
|
||||
@item ifo_palette
|
||||
Specify the IFO file from which the global palette is obtained.
|
||||
(experimental)
|
||||
|
||||
@item forced_subs_only
|
||||
Only decode subtitle entries marked as forced. Some titles have forced
|
||||
and non-forced subtitles in the same track. Setting this flag to @code{1}
|
||||
will only keep the forced subtitles. Default value is @code{0}.
|
||||
@end table
|
||||
|
||||
@section libzvbi-teletext
|
||||
|
@@ -29,26 +29,6 @@ the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@section apng
|
||||
|
||||
Animated Portable Network Graphics demuxer.
|
||||
|
||||
This demuxer is used to demux APNG files.
|
||||
All headers, but the PNG signature, up to (but not including) the first
|
||||
fcTL chunk are transmitted as extradata.
|
||||
Frames are then split as being all the chunks between two fcTL ones, or
|
||||
between the last fcTL and IEND chunks.
|
||||
|
||||
@table @option
|
||||
@item -ignore_loop @var{bool}
|
||||
Ignore the loop variable in the file if set.
|
||||
@item -max_fps @var{int}
|
||||
Maximum framerate in frames per second (0 for no limit).
|
||||
@item -default_fps @var{int}
|
||||
Default framerate in frames per second when none is specified in the file
|
||||
(0 meaning as fast as possible).
|
||||
@end table
|
||||
|
||||
@section asf
|
||||
|
||||
Advanced Systems Format demuxer.
|
||||
@@ -194,40 +174,6 @@ See @url{http://quvi.sourceforge.net/} for more information.
|
||||
FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be
|
||||
enabled.
|
||||
|
||||
@section gif
|
||||
|
||||
Animated GIF demuxer.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item min_delay
|
||||
Set the minimum valid delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 2.
|
||||
|
||||
@item default_delay
|
||||
Set the default delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 10.
|
||||
|
||||
@item ignore_loop
|
||||
GIF files can contain information to loop a certain number of times (or
|
||||
infinitely). If @option{ignore_loop} is set to 1, then the loop setting
|
||||
from the input will be ignored and looping will not occur. If set to 0,
|
||||
then looping will occur and will cycle the number of times according to
|
||||
the GIF. Default value is 1.
|
||||
@end table
|
||||
|
||||
For example, with the overlay filter, place an infinitely looping GIF
|
||||
over another video:
|
||||
@example
|
||||
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
|
||||
@end example
|
||||
|
||||
Note that in the above example the shortest option for overlay filter is
|
||||
used to end the output video at the length of the shortest input file,
|
||||
which in this case is @file{input.mp4} as the GIF in this example loops
|
||||
infinitely.
|
||||
|
||||
@section image2
|
||||
|
||||
Image file demuxer.
|
||||
|
@@ -323,12 +323,9 @@ Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
|
||||
@example
|
||||
area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
@item
|
||||
Make sure the author of the commit is set correctly. (see git commit --author)
|
||||
|
@@ -1032,7 +1032,7 @@ configuration. You need to explicitly configure the build with
|
||||
|
||||
@subsection Option Mapping
|
||||
|
||||
Most libopus options are modelled after the @command{opusenc} utility from
|
||||
Most libopus options are modeled after the @command{opusenc} utility from
|
||||
opus-tools. The following is an option mapping chart describing options
|
||||
supported by the libopus wrapper, and their @command{opusenc}-equivalent
|
||||
in parentheses.
|
||||
@@ -1330,7 +1330,7 @@ ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
|
||||
|
||||
@section libvpx
|
||||
|
||||
VP8/VP9 format supported through libvpx.
|
||||
VP8 format supported through libvpx.
|
||||
|
||||
Requires the presence of the libvpx headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libvpx}.
|
||||
@@ -1442,9 +1442,6 @@ g_lag_in_frames
|
||||
@item vp8flags error_resilient
|
||||
g_error_resilient
|
||||
|
||||
@item aq_mode
|
||||
@code{VP9E_SET_AQ_MODE}
|
||||
|
||||
@end table
|
||||
|
||||
For more information about libvpx see:
|
||||
@@ -1745,10 +1742,6 @@ Enable calculation and printing SSIM stats after the encoding.
|
||||
Enable the use of Periodic Intra Refresh instead of IDR frames when set
|
||||
to 1.
|
||||
|
||||
@item avcintra-class (@emph{class})
|
||||
Configure the encoder to generate AVC-Intra.
|
||||
Valid values are 50,100 and 200
|
||||
|
||||
@item bluray-compat (@emph{bluray-compat})
|
||||
Configure the encoder to be compatible with the bluray standard.
|
||||
It is a shorthand for setting "bluray-compat=1 force-cfr=1".
|
||||
@@ -1886,34 +1879,6 @@ no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
|
||||
Encoding ffpresets for common usages are provided so they can be used with the
|
||||
general presets system (e.g. passing the @option{pre} option).
|
||||
|
||||
@section libx265
|
||||
|
||||
x265 H.265/HEVC encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the libx265 headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@option{--enable-libx265}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item preset
|
||||
Set the x265 preset.
|
||||
|
||||
@item tune
|
||||
Set the x265 tune parameter.
|
||||
|
||||
@item x265-params
|
||||
Set x265 options using a list of @var{key}=@var{value} couples separated
|
||||
by ":". See @command{x265 --help} for a list of options.
|
||||
|
||||
For example to specify libx265 encoding options with @option{-x265-params}:
|
||||
|
||||
@example
|
||||
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section libxvid
|
||||
|
||||
Xvid MPEG-4 Part 2 encoder wrapper.
|
||||
|
@@ -14,7 +14,6 @@ LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
EXAMPLES= avio_reading \
|
||||
decoding_encoding \
|
||||
demuxing_decoding \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
@@ -29,7 +28,6 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
|
@@ -288,7 +288,6 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
|
||||
while (avpkt.size > 0) {
|
||||
int i, ch;
|
||||
int got_frame = 0;
|
||||
|
||||
if (!decoded_frame) {
|
||||
@@ -305,15 +304,15 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
}
|
||||
if (got_frame) {
|
||||
/* if a frame has been decoded, output it */
|
||||
int data_size = av_get_bytes_per_sample(c->sample_fmt);
|
||||
int data_size = av_samples_get_buffer_size(NULL, c->channels,
|
||||
decoded_frame->nb_samples,
|
||||
c->sample_fmt, 1);
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
for (i=0; i<decoded_frame->nb_samples; i++)
|
||||
for (ch=0; ch<c->channels; ch++)
|
||||
fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
fwrite(decoded_frame->data[0], 1, data_size, outfile);
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
@@ -651,7 +650,7 @@ int main(int argc, char **argv)
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.pcm", "test.mp2");
|
||||
audio_decode_example("test.sw", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
|
@@ -1,185 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
* Copyright (c) 2014 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL;
|
||||
static AVStream *video_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
|
||||
static int video_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
int i;
|
||||
AVFrameSideData *sd;
|
||||
|
||||
video_frame_count++;
|
||||
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
|
||||
if (sd) {
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!video_stream) {
|
||||
fprintf(stderr, "Could not find video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
return ret < 0;
|
||||
}
|
@@ -45,7 +45,6 @@
|
||||
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/md5.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
|
@@ -90,7 +90,6 @@ static int init_filters(const char *filters_descr)
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
@@ -103,7 +102,7 @@ static int init_filters(const char *filters_descr)
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
time_base.num, time_base.den,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
|
@@ -55,7 +55,6 @@ typedef struct OutputStream {
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
int samples_count;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
@@ -63,7 +62,6 @@ typedef struct OutputStream {
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
struct SwrContext *swr_ctx;
|
||||
} OutputStream;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
@@ -94,7 +92,6 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int i;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
@@ -118,24 +115,8 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
if ((*codec)->supported_samplerates) {
|
||||
c->sample_rate = (*codec)->supported_samplerates[0];
|
||||
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
||||
if ((*codec)->supported_samplerates[i] == 44100)
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channels = 2;
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
@@ -149,9 +130,8 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
|
||||
c->time_base = ost->st->time_base;
|
||||
|
||||
c->time_base.den = STREAM_FRAME_RATE;
|
||||
c->time_base.num = 1;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
@@ -178,47 +158,19 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
int samples_count;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
struct SwrContext *swr_ctx = NULL;
|
||||
|
||||
frame->format = sample_fmt;
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
int ret;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
/* open it */
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
@@ -230,52 +182,84 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, A
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
ost->frame = av_frame_alloc();
|
||||
if (!ost->frame)
|
||||
exit(1);
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->frame->sample_rate = c->sample_rate;
|
||||
ost->frame->format = AV_SAMPLE_FMT_S16;
|
||||
ost->frame->channel_layout = c->channel_layout;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
ost->frame->nb_samples = 10000;
|
||||
else
|
||||
ost->frame->nb_samples = c->frame_size;
|
||||
|
||||
ost->tmp_frame = av_frame_alloc();
|
||||
if (!ost->frame)
|
||||
exit(1);
|
||||
|
||||
ost->tmp_frame->sample_rate = c->sample_rate;
|
||||
ost->tmp_frame->format = c->sample_fmt;
|
||||
ost->tmp_frame->channel_layout = c->channel_layout;
|
||||
ost->tmp_frame->nb_samples = ost->frame->nb_samples;
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
ret = av_frame_get_buffer(ost->frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate an audio frame.\n");
|
||||
exit(1);
|
||||
}
|
||||
ret = av_frame_get_buffer(ost->tmp_frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate an audio frame.\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
{
|
||||
AVFrame *frame = ost->tmp_frame;
|
||||
int j, i, v;
|
||||
int16_t *q = (int16_t*)frame->data[0];
|
||||
int j, i, v, ret;
|
||||
int16_t *q = (int16_t*)ost->frame->data[0];
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
for (j = 0; j < ost->frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->st->codec->channels; i++)
|
||||
*q++ = v;
|
||||
@@ -283,10 +267,10 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
ost->tincr += ost->tincr2;
|
||||
}
|
||||
|
||||
frame->pts = ost->next_pts;
|
||||
ost->next_pts += frame->nb_samples;
|
||||
ost->frame->pts = ost->next_pts;
|
||||
ost->next_pts += ost->frame->nb_samples;
|
||||
|
||||
return frame;
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -309,31 +293,27 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
if (swr_ctx) {
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(ost->swr_ctx,
|
||||
ost->frame->data, dst_nb_samples,
|
||||
ret = swr_convert(swr_ctx,
|
||||
ost->tmp_frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->frame;
|
||||
frame = ost->tmp_frame;
|
||||
} else {
|
||||
dst_nb_samples = frame->nb_samples;
|
||||
}
|
||||
|
||||
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
ost->samples_count += dst_nb_samples;
|
||||
frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
@@ -380,17 +360,13 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
@@ -547,7 +523,6 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@@ -563,12 +538,11 @@ int main(int argc, char **argv)
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
if (argc != 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
@@ -580,9 +554,6 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
if (argc > 3 && !strcmp(argv[2], "-flags")) {
|
||||
av_dict_set(&opt, argv[2]+1, argv[3], 0);
|
||||
}
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
@@ -611,10 +582,10 @@ int main(int argc, char **argv)
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st, opt);
|
||||
open_video(oc, video_codec, &video_st);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st, opt);
|
||||
open_audio(oc, audio_codec, &audio_st);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
@@ -629,7 +600,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, &opt);
|
||||
ret = avformat_write_header(oc, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
|
@@ -199,7 +199,8 @@ int main(int argc, char **argv)
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
|
@@ -132,7 +132,8 @@ int main(int argc, char **argv)
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
|
@@ -52,7 +52,7 @@
|
||||
* @param error Error code to be converted
|
||||
* @return Corresponding error text (not thread-safe)
|
||||
*/
|
||||
static const char *get_error_text(const int error)
|
||||
static char *const get_error_text(const int error)
|
||||
{
|
||||
static char error_buffer[255];
|
||||
av_strerror(error, error_buffer, sizeof(error_buffer));
|
||||
@@ -306,7 +306,7 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
|
||||
/** Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/** If we are at the end of the file, flush the decoder below. */
|
||||
/** If we are the the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
|
@@ -116,10 +116,6 @@ static int open_output_file(const char *filename)
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
@@ -389,9 +385,17 @@ static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, in
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
av_packet_rescale_ts(&enc_pkt,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
enc_pkt.duration = av_rescale_q(enc_pkt.duration,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
@@ -505,9 +509,14 @@ int main(int argc, char **argv)
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base);
|
||||
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
||||
@@ -529,9 +538,14 @@ int main(int argc, char **argv)
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
|
@@ -298,7 +298,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
|
@@ -339,7 +339,7 @@ ffmpeg -i in.avi -metadata title="my title" out.flv
|
||||
|
||||
To set the language of the first audio stream:
|
||||
@example
|
||||
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
|
||||
ffmpeg -i INPUT -metadata:s:a:1 language=eng OUTPUT
|
||||
@end example
|
||||
|
||||
@item -target @var{type} (@emph{output})
|
||||
@@ -360,7 +360,7 @@ ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
|
||||
@end example
|
||||
|
||||
@item -dframes @var{number} (@emph{output})
|
||||
Set the number of data frames to output. This is an alias for @code{-frames:d}.
|
||||
Set the number of data frames to record. This is an alias for @code{-frames:d}.
|
||||
|
||||
@item -frames[:@var{stream_specifier}] @var{framecount} (@emph{output,per-stream})
|
||||
Stop writing to the stream after @var{framecount} frames.
|
||||
@@ -467,15 +467,12 @@ attachments.
|
||||
|
||||
@table @option
|
||||
@item -vframes @var{number} (@emph{output})
|
||||
Set the number of video frames to output. This is an alias for @code{-frames:v}.
|
||||
Set the number of video frames to record. This is an alias for @code{-frames:v}.
|
||||
@item -r[:@var{stream_specifier}] @var{fps} (@emph{input/output,per-stream})
|
||||
Set frame rate (Hz value, fraction or abbreviation).
|
||||
|
||||
As an input option, ignore any timestamps stored in the file and instead
|
||||
generate timestamps assuming constant frame rate @var{fps}.
|
||||
This is not the same as the @option{-framerate} option used for some input formats
|
||||
like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
|
||||
If in doubt use @option{-framerate} instead of the input option @option{-r}.
|
||||
|
||||
As an output option, duplicate or drop input frames to achieve constant output
|
||||
frame rate @var{fps}.
|
||||
@@ -692,7 +689,7 @@ If this option is not specified, the default adapter is used.
|
||||
|
||||
@table @option
|
||||
@item -aframes @var{number} (@emph{output})
|
||||
Set the number of audio frames to output. This is an alias for @code{-frames:a}.
|
||||
Set the number of audio frames to record. This is an alias for @code{-frames:a}.
|
||||
@item -ar[:@var{stream_specifier}] @var{freq} (@emph{input/output,per-stream})
|
||||
Set the audio sampling frequency. For output streams it is set by
|
||||
default to the frequency of the corresponding input stream. For input
|
||||
@@ -824,11 +821,6 @@ To map all the streams except the second audio, use negative mappings
|
||||
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
|
||||
@end example
|
||||
|
||||
To pick the English audio stream:
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
|
||||
@end example
|
||||
|
||||
Note that using this option disables the default mappings for this output file.
|
||||
|
||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}]
|
||||
@@ -1016,12 +1008,6 @@ processing (e.g. in case the format option @option{avoid_negative_ts}
|
||||
is enabled) the output timestamps may mismatch with the input
|
||||
timestamps even when this option is selected.
|
||||
|
||||
@item -start_at_zero
|
||||
When used with @option{copyts}, shift input timestamps so they start at zero.
|
||||
|
||||
This means that using e.g. @code{-ss 50} will make output timestamps start at
|
||||
50 seconds, regardless of what timestamp the input file started at.
|
||||
|
||||
@item -copytb @var{mode}
|
||||
Specify how to set the encoder timebase when stream copying. @var{mode} is an
|
||||
integer numeric value, and can assume one of the following values:
|
||||
@@ -1464,11 +1450,11 @@ ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
|
||||
You can put many streams of the same type in the output:
|
||||
|
||||
@example
|
||||
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
|
||||
ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
|
||||
@end example
|
||||
|
||||
The resulting output file @file{test12.nut} will contain the first four streams
|
||||
from the input files in reverse order.
|
||||
The resulting output file @file{test12.avi} will contain first four streams from
|
||||
the input file in reverse order.
|
||||
|
||||
@item
|
||||
To force CBR video output:
|
||||
|
@@ -37,14 +37,10 @@ Force displayed height.
|
||||
Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
@item -fs
|
||||
Start in fullscreen mode.
|
||||
@item -an
|
||||
Disable audio.
|
||||
@item -vn
|
||||
Disable video.
|
||||
@item -sn
|
||||
Disable subtitles.
|
||||
@item -ss @var{pos}
|
||||
Seek to a given position in seconds.
|
||||
@item -t @var{duration}
|
||||
@@ -113,10 +109,15 @@ duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -bug
|
||||
Work around bugs.
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
@item -genpts
|
||||
Generate pts.
|
||||
@item -rtp_tcp
|
||||
Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful
|
||||
if you are streaming with the RTSP protocol.
|
||||
@item -sync @var{type}
|
||||
Set the master clock to audio (@code{type=audio}), video
|
||||
(@code{type=video}) or external (@code{type=ext}). Default is audio. The
|
||||
@@ -125,8 +126,7 @@ players use audio as master clock, but in some cases (streaming or high
|
||||
quality broadcast) it is necessary to change that. This option is mainly
|
||||
used for debugging purposes.
|
||||
@item -threads @var{count}
|
||||
Set the thread count. By default, @command{ffplay} automatically detects a
|
||||
suitable number of threads to use.
|
||||
Set the thread count.
|
||||
@item -ast @var{audio_stream_number}
|
||||
Select the desired audio stream number, counting from 0. The number
|
||||
refers to the list of all the input audio streams. If it is greater
|
||||
@@ -164,20 +164,8 @@ Force a specific video decoder.
|
||||
Force a specific subtitle decoder.
|
||||
|
||||
@item -autorotate
|
||||
Automatically rotate the video according to presentation metadata. Enabled by
|
||||
default, use @option{-noautorotate} to disable it.
|
||||
|
||||
@item -framedrop
|
||||
Drop video frames if video is out of sync. Enabled by default if the master
|
||||
clock is not set to video. Use this option to enable frame dropping for all
|
||||
master clock sources, use @option{-noframedrop} to disable it.
|
||||
|
||||
@item -infbuf
|
||||
Do not limit the input buffer size, read as much data as possible from the
|
||||
input as soon as possible. Enabled by default for realtime streams, where data
|
||||
may be dropped if not read in time. Use this option to enable infinite buffers
|
||||
for all inputs, use @option{-noinfbuf} to disable it.
|
||||
|
||||
Automatically rotate the video according to presentation metadata. Set by
|
||||
default, use -noautorotate to disable.
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
|
@@ -119,10 +119,6 @@ Show payload data, as a hexadecimal and ASCII dump. Coupled with
|
||||
|
||||
The dump is printed as the "data" field. It may contain newlines.
|
||||
|
||||
@item -show_data_hash @var{algorithm}
|
||||
Show a hash of payload data, for packets with @option{-show_packets} and for
|
||||
codec extradata with @option{-show_streams}.
|
||||
|
||||
@item -show_error
|
||||
Show information about the error found when trying to probe the input.
|
||||
|
||||
@@ -184,7 +180,7 @@ format : stream=codec_type
|
||||
|
||||
To show all the tags in the stream and format sections:
|
||||
@example
|
||||
stream_tags : format_tags
|
||||
format_tags : format_tags
|
||||
@end example
|
||||
|
||||
To show only the @code{title} tag (if available) in the stream
|
||||
@@ -321,12 +317,6 @@ Show information related to program and library versions. This is the
|
||||
equivalent of setting both @option{-show_program_version} and
|
||||
@option{-show_library_versions} options.
|
||||
|
||||
@item -show_pixel_formats
|
||||
Show information about all pixel formats supported by FFmpeg.
|
||||
|
||||
Pixel format information for each format is printed within a section
|
||||
with name "PIXEL_FORMAT".
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
|
@@ -10,10 +10,8 @@
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
@@ -37,16 +35,6 @@
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
@@ -62,13 +50,11 @@
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
@@ -105,16 +91,6 @@
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
@@ -166,7 +142,6 @@
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
@@ -178,9 +153,6 @@
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
@@ -200,7 +172,6 @@
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
@@ -292,45 +263,4 @@
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatFlagsType">
|
||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pseudopal" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
||||
|
@@ -1,11 +1,11 @@
|
||||
# Port on which the server is listening. You must select a different
|
||||
# port from your standard HTTP web server if it is running on the same
|
||||
# computer.
|
||||
HTTPPort 8090
|
||||
Port 8090
|
||||
|
||||
# Address on which the server is bound. Only useful if you have
|
||||
# several network interfaces.
|
||||
HTTPBindAddress 0.0.0.0
|
||||
BindAddress 0.0.0.0
|
||||
|
||||
# Number of simultaneous HTTP connections that can be handled. It has
|
||||
# to be defined *before* the MaxClients parameter, since it defines the
|
||||
|
@@ -66,12 +66,12 @@ http://@var{ffserver_ip_address}:@var{http_port}/@var{feed_name}
|
||||
|
||||
where @var{ffserver_ip_address} is the IP address of the machine where
|
||||
@command{ffserver} is installed, @var{http_port} is the port number of
|
||||
the HTTP server (configured through the @option{HTTPPort} option), and
|
||||
the HTTP server (configured through the @option{Port} option), and
|
||||
@var{feed_name} is the name of the corresponding feed defined in the
|
||||
configuration file.
|
||||
|
||||
Each feed is associated to a file which is stored on disk. This stored
|
||||
file is used to send pre-recorded data to a player as fast as
|
||||
file is used to allow to send pre-recorded data to a player as fast as
|
||||
possible when new content is added in real-time to the stream.
|
||||
|
||||
A "live-stream" or "stream" is a resource published by
|
||||
@@ -101,7 +101,7 @@ http://@var{ffserver_ip_address}:@var{rtsp_port}/@var{stream_name}[@var{options}
|
||||
the configuration file. @var{options} is a list of options specified
|
||||
after the URL which affects how the stream is served by
|
||||
@command{ffserver}. @var{http_port} and @var{rtsp_port} are the HTTP
|
||||
and RTSP ports configured with the options @var{HTTPPort} and
|
||||
and RTSP ports configured with the options @var{Port} and
|
||||
@var{RTSPPort} respectively.
|
||||
|
||||
In case the stream is associated to a feed, the encoding parameters
|
||||
@@ -203,9 +203,11 @@ WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
|
||||
transfer the entire file before starting to play.
|
||||
The same is true of AVI files.
|
||||
|
||||
You should edit the @file{ffserver.conf} file to suit your needs (in
|
||||
terms of frame rates etc). Then install @command{ffserver} and
|
||||
@command{ffmpeg}, write a script to start them up, and off you go.
|
||||
@section What happens next?
|
||||
|
||||
You should edit the ffserver.conf file to suit your needs (in terms of
|
||||
frame rates etc). Then install ffserver and ffmpeg, write a script to start
|
||||
them up, and off you go.
|
||||
|
||||
@section What else can it do?
|
||||
|
||||
@@ -352,29 +354,20 @@ allow everybody else.
|
||||
|
||||
@section Global options
|
||||
@table @option
|
||||
@item HTTPPort @var{port_number}
|
||||
@item Port @var{port_number}
|
||||
@item RTSPPort @var{port_number}
|
||||
|
||||
@var{HTTPPort} sets the HTTP server listening TCP port number,
|
||||
@var{RTSPPort} sets the RTSP server listening TCP port number.
|
||||
|
||||
@var{Port} is the equivalent of @var{HTTPPort} and is deprecated.
|
||||
|
||||
You must select a different port from your standard HTTP web server if
|
||||
it is running on the same computer.
|
||||
Set TCP port number on which the HTTP/RTSP server is listening. You
|
||||
must select a different port from your standard HTTP web server if it
|
||||
is running on the same computer.
|
||||
|
||||
If not specified, no corresponding server will be created.
|
||||
|
||||
@item HTTPBindAddress @var{ip_address}
|
||||
@item BindAddress @var{ip_address}
|
||||
@item RTSPBindAddress @var{ip_address}
|
||||
Set address on which the HTTP/RTSP server is bound. Only useful if you
|
||||
have several network interfaces.
|
||||
|
||||
@var{BindAddress} is the equivalent of @var{HTTPBindAddress} and is
|
||||
deprecated.
|
||||
|
||||
@item MaxHTTPConnections @var{n}
|
||||
Set number of simultaneous HTTP connections that can be handled. It
|
||||
has to be defined @emph{before} the @option{MaxClients} parameter,
|
||||
@@ -408,12 +401,6 @@ ignored, and the log is written to standard output.
|
||||
Set no-daemon mode. This option is currently ignored since now
|
||||
@command{ffserver} will always work in no-daemon mode, and is
|
||||
deprecated.
|
||||
|
||||
@item UseDefaults
|
||||
@item NoDefaults
|
||||
Control whether default codec options are used for the all streams or not.
|
||||
Each stream may overwrite this setting for its own. Default is @var{UseDefaults}.
|
||||
The lastest occurrence overrides previous if multiple definitions.
|
||||
@end table
|
||||
|
||||
@section Feed section
|
||||
@@ -577,11 +564,6 @@ deprecated in favor of @option{Metadata}.
|
||||
@item Metadata @var{key} @var{value}
|
||||
Set metadata value on the output stream.
|
||||
|
||||
@item UseDefaults
|
||||
@item NoDefaults
|
||||
Control whether default codec options are used for the stream or not.
|
||||
Default is @var{UseDefaults} unless disabled globally.
|
||||
|
||||
@item NoAudio
|
||||
@item NoVideo
|
||||
Suppress audio/video.
|
||||
@@ -600,9 +582,8 @@ Set sampling frequency for audio. When using low bitrates, you should
|
||||
lower this frequency to 22050 or 11025. The supported frequencies
|
||||
depend on the selected audio codec.
|
||||
|
||||
@item AVOptionAudio [@var{codec}:]@var{option} @var{value} (@emph{encoding,audio})
|
||||
Set generic or private option for audio stream.
|
||||
Private option must be prefixed with codec name or codec must be defined before.
|
||||
@item AVOptionAudio @var{option} @var{value} (@emph{encoding,audio})
|
||||
Set generic option for audio stream.
|
||||
|
||||
@item AVPresetAudio @var{preset} (@emph{encoding,audio})
|
||||
Set preset for audio stream.
|
||||
@@ -679,9 +660,8 @@ Set video @option{qdiff} encoding option.
|
||||
@item DarkMask @var{float} (@emph{encoding,video})
|
||||
Set @option{lumi_mask}/@option{dark_mask} encoding options.
|
||||
|
||||
@item AVOptionVideo [@var{codec}:]@var{option} @var{value} (@emph{encoding,video})
|
||||
Set generic or private option for video stream.
|
||||
Private option must be prefixed with codec name or codec must be defined before.
|
||||
@item AVOptionVideo @var{option} @var{value} (@emph{encoding,video})
|
||||
Set generic option for video stream.
|
||||
|
||||
@item AVPresetVideo @var{preset} (@emph{encoding,video})
|
||||
Set preset for video stream.
|
||||
|
@@ -3,7 +3,7 @@ representing a number as input, which may be followed by one of the SI
|
||||
unit prefixes, for example: 'K', 'M', or 'G'.
|
||||
|
||||
If 'i' is appended to the SI unit prefix, the complete prefix will be
|
||||
interpreted as a unit prefix for binary multiples, which are based on
|
||||
interpreted as a unit prefix for binary multiplies, which are based on
|
||||
powers of 1024 instead of powers of 1000. Appending 'B' to the SI unit
|
||||
prefix multiplies the value by 8. This allows using, for example:
|
||||
'KB', 'MiB', 'G' and 'B' as number suffixes.
|
||||
@@ -46,13 +46,6 @@ in the program with the id @var{program_id}. Otherwise, it matches all streams i
|
||||
program.
|
||||
@item #@var{stream_id} or i:@var{stream_id}
|
||||
Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
@item m:@var{key}[:@var{value}]
|
||||
Matches streams with the metadata tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
|
||||
Note that in @command{ffmpeg}, matching by metadata will only work properly for
|
||||
input files.
|
||||
@end table
|
||||
|
||||
@section Generic options
|
||||
@@ -103,10 +96,7 @@ Print detailed information about the filter name @var{filter_name}. Use the
|
||||
Show version.
|
||||
|
||||
@item -formats
|
||||
Show available formats (including devices).
|
||||
|
||||
@item -devices
|
||||
Show available devices.
|
||||
Show available formats.
|
||||
|
||||
@item -codecs
|
||||
Show all codecs known to libavcodec.
|
||||
@@ -141,22 +131,6 @@ Show channel names and standard channel layouts.
|
||||
@item -colors
|
||||
Show recognized color names.
|
||||
|
||||
@item -sources @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
|
||||
Show autodetected sources of the intput device.
|
||||
Some devices may provide system-dependent source names that cannot be autodetected.
|
||||
The returned list cannot be assumed to be always complete.
|
||||
@example
|
||||
ffmpeg -sources pulse,server=192.168.0.4
|
||||
@end example
|
||||
|
||||
@item -sinks @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
|
||||
Show autodetected sinks of the output device.
|
||||
Some devices may provide system-dependent sink names that cannot be autodetected.
|
||||
The returned list cannot be assumed to be always complete.
|
||||
@example
|
||||
ffmpeg -sinks pulse,server=192.168.0.4
|
||||
@end example
|
||||
|
||||
@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}
|
||||
Set the logging level used by the library.
|
||||
Adding "repeat+" indicates that repeated log output should not be compressed
|
||||
@@ -253,14 +227,10 @@ Possible flags for this option are:
|
||||
@item sse4.1
|
||||
@item sse4.2
|
||||
@item avx
|
||||
@item avx2
|
||||
@item xop
|
||||
@item fma3
|
||||
@item fma4
|
||||
@item 3dnow
|
||||
@item 3dnowext
|
||||
@item bmi1
|
||||
@item bmi2
|
||||
@item cmov
|
||||
@end table
|
||||
@item ARM
|
||||
@@ -271,13 +241,6 @@ Possible flags for this option are:
|
||||
@item vfp
|
||||
@item vfpv3
|
||||
@item neon
|
||||
@item setend
|
||||
@end table
|
||||
@item AArch64
|
||||
@table @samp
|
||||
@item armv8
|
||||
@item vfp
|
||||
@item neon
|
||||
@end table
|
||||
@item PowerPC
|
||||
@table @samp
|
||||
|
645
doc/filters.texi
645
doc/filters.texi
@@ -282,10 +282,6 @@ sequential number of the input frame, starting from 0
|
||||
|
||||
@item pos
|
||||
the position in the file of the input frame, NAN if unknown
|
||||
|
||||
@item w
|
||||
@item h
|
||||
width and height of the input frame if video
|
||||
@end table
|
||||
|
||||
Additionally, these filters support an @option{enable} command that can be used
|
||||
@@ -313,6 +309,41 @@ build.
|
||||
|
||||
Below is a description of the currently available audio filters.
|
||||
|
||||
@section aconvert
|
||||
|
||||
Convert the input audio format to the specified formats.
|
||||
|
||||
@emph{This filter is deprecated. Use @ref{aformat} instead.}
|
||||
|
||||
The filter accepts a string of the form:
|
||||
"@var{sample_format}:@var{channel_layout}".
|
||||
|
||||
@var{sample_format} specifies the sample format, and can be a string or the
|
||||
corresponding numeric value defined in @file{libavutil/samplefmt.h}. Use 'p'
|
||||
suffix for a planar sample format.
|
||||
|
||||
@var{channel_layout} specifies the channel layout, and can be a string
|
||||
or the corresponding number value defined in @file{libavutil/channel_layout.h}.
|
||||
|
||||
The special parameter "auto", signifies that the filter will
|
||||
automatically select the output format depending on the output filter.
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Convert input to float, planar, stereo:
|
||||
@example
|
||||
aconvert=fltp:stereo
|
||||
@end example
|
||||
|
||||
@item
|
||||
Convert input to unsigned 8-bit, automatically select out channel layout:
|
||||
@example
|
||||
aconvert=u8:auto
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section adelay
|
||||
|
||||
Delay one or more audio channels.
|
||||
@@ -460,7 +491,7 @@ aeval=val(ch)/2:c=same
|
||||
@item
|
||||
Invert phase of the second channel:
|
||||
@example
|
||||
aeval=val(0)|-val(1)
|
||||
eval=val(0)|-val(1)
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@@ -486,21 +517,27 @@ volume as the input audio, at the end of the fade-out transition
|
||||
the output audio will be silence. Default is 44100.
|
||||
|
||||
@item start_time, st
|
||||
Specify the start time of the fade effect. Default is 0.
|
||||
The value must be specified as a time duration; see
|
||||
@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the accepted syntax.
|
||||
If set this option is used instead of @var{start_sample}.
|
||||
Specify time for starting to apply the fade effect. Default is 0.
|
||||
The accepted syntax is:
|
||||
@example
|
||||
[-]HH[:MM[:SS[.m...]]]
|
||||
[-]S+[.m...]
|
||||
@end example
|
||||
See also the function @code{av_parse_time()}.
|
||||
If set this option is used instead of @var{start_sample} one.
|
||||
|
||||
@item duration, d
|
||||
Specify the duration of the fade effect. See
|
||||
@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the accepted syntax.
|
||||
Specify the duration for which the fade effect has to last. Default is 0.
|
||||
The accepted syntax is:
|
||||
@example
|
||||
[-]HH[:MM[:SS[.m...]]]
|
||||
[-]S+[.m...]
|
||||
@end example
|
||||
See also the function @code{av_parse_time()}.
|
||||
At the end of the fade-in effect the output audio will have the same
|
||||
volume as the input audio, at the end of the fade-out transition
|
||||
the output audio will be silence.
|
||||
By default the duration is determined by @var{nb_samples}.
|
||||
If set this option is used instead of @var{nb_samples}.
|
||||
If set this option is used instead of @var{nb_samples} one.
|
||||
|
||||
@item curve
|
||||
Set curve for fade transition.
|
||||
@@ -705,58 +742,8 @@ Pass the audio source unchanged to the output.
|
||||
|
||||
@section apad
|
||||
|
||||
Pad the end of an audio stream with silence.
|
||||
|
||||
This can be used together with @command{ffmpeg} @option{-shortest} to
|
||||
extend audio streams to the same length as the video stream.
|
||||
|
||||
A description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item packet_size
|
||||
Set silence packet size. Default value is 4096.
|
||||
|
||||
@item pad_len
|
||||
Set the number of samples of silence to add to the end. After the
|
||||
value is reached, the stream is terminated. This option is mutually
|
||||
exclusive with @option{whole_len}.
|
||||
|
||||
@item whole_len
|
||||
Set the minimum total number of samples in the output audio stream. If
|
||||
the value is longer than the input audio length, silence is added to
|
||||
the end, until the value is reached. This option is mutually exclusive
|
||||
with @option{pad_len}.
|
||||
@end table
|
||||
|
||||
If neither the @option{pad_len} nor the @option{whole_len} option is
|
||||
set, the filter will add silence to the end of the input stream
|
||||
indefinitely.
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Add 1024 samples of silence to the end of the input:
|
||||
@example
|
||||
apad=pad_len=1024
|
||||
@end example
|
||||
|
||||
@item
|
||||
Make sure the audio output will contain at least 10000 samples, pad
|
||||
the input with silence if required:
|
||||
@example
|
||||
apad=whole_len=10000
|
||||
@end example
|
||||
|
||||
@item
|
||||
Use @command{ffmpeg} to pad the audio input with silence, so that the
|
||||
video stream will always result the shortest and will be converted
|
||||
until the end in the output file when using the @option{shortest}
|
||||
option:
|
||||
@example
|
||||
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
|
||||
@end example
|
||||
@end itemize
|
||||
Pad the end of a audio stream with silence, this can be used together with
|
||||
-shortest to extend audio streams to the same length as the video stream.
|
||||
|
||||
@section aphaser
|
||||
Add a phasing effect to the input audio.
|
||||
@@ -875,7 +862,7 @@ The input audio is not modified.
|
||||
The shown line contains a sequence of key/value pairs of the form
|
||||
@var{key}:@var{value}.
|
||||
|
||||
The following values are shown in the output:
|
||||
It accepts the following parameters:
|
||||
|
||||
@table @option
|
||||
@item n
|
||||
@@ -1083,9 +1070,9 @@ The number of the first sample that should be output.
|
||||
The number of the first sample that should be dropped.
|
||||
@end table
|
||||
|
||||
@option{start}, @option{end}, and @option{duration} are expressed as time
|
||||
duration specifications; see
|
||||
@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
@option{start}, @option{end}, @option{duration} are expressed as time
|
||||
duration specifications, check the "Time duration" section in the
|
||||
ffmpeg-utils manual.
|
||||
|
||||
Note that the first two sets of the start/end options and the @option{duration}
|
||||
option look at the frame timestamp, while the _sample options simply count the
|
||||
@@ -1600,9 +1587,9 @@ Set the number of samples per channel per each output frame, default
|
||||
is 1024. Only used if plugin have zero inputs.
|
||||
|
||||
@item duration, d
|
||||
Set the minimum duration of the sourced audio. See
|
||||
@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the accepted syntax.
|
||||
Set the minimum duration of the sourced audio. See the function
|
||||
@code{av_parse_time()} for the accepted format, also check the "Time duration"
|
||||
section in the ffmpeg-utils manual.
|
||||
Note that the resulting duration may be greater than the specified duration,
|
||||
as the generated audio is always cut at the end of a complete frame.
|
||||
If not specified, or the expressed duration is negative, the audio is
|
||||
@@ -1710,11 +1697,11 @@ The default is 0.707q and gives a Butterworth response.
|
||||
Mix channels with specific gain levels. The filter accepts the output
|
||||
channel layout followed by a set of channels definitions.
|
||||
|
||||
This filter is also designed to efficiently remap the channels of an audio
|
||||
This filter is also designed to remap efficiently the channels of an audio
|
||||
stream.
|
||||
|
||||
The filter accepts parameters of the form:
|
||||
"@var{l}|@var{outdef}|@var{outdef}|..."
|
||||
"@var{l}:@var{outdef}:@var{outdef}:..."
|
||||
|
||||
@table @option
|
||||
@item l
|
||||
@@ -1745,13 +1732,13 @@ avoiding clipping noise.
|
||||
For example, if you want to down-mix from stereo to mono, but with a bigger
|
||||
factor for the left channel:
|
||||
@example
|
||||
pan=1c|c0=0.9*c0+0.1*c1
|
||||
pan=1:c0=0.9*c0+0.1*c1
|
||||
@end example
|
||||
|
||||
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
|
||||
7-channels surround:
|
||||
@example
|
||||
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
|
||||
pan=stereo: FL < FL + 0.5*FC + 0.6*BL + 0.6*SL : FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
|
||||
@end example
|
||||
|
||||
Note that @command{ffmpeg} integrates a default down-mix (and up-mix) system
|
||||
@@ -1774,25 +1761,25 @@ remapping.
|
||||
For example, if you have a 5.1 source and want a stereo audio stream by
|
||||
dropping the extra channels:
|
||||
@example
|
||||
pan="stereo| c0=FL | c1=FR"
|
||||
pan="stereo: c0=FL : c1=FR"
|
||||
@end example
|
||||
|
||||
Given the same source, you can also switch front left and front right channels
|
||||
and keep the input channel layout:
|
||||
@example
|
||||
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
|
||||
pan="5.1: c0=c1 : c1=c0 : c2=c2 : c3=c3 : c4=c4 : c5=c5"
|
||||
@end example
|
||||
|
||||
If the input is a stereo audio stream, you can mute the front left channel (and
|
||||
still keep the stereo channel layout) with:
|
||||
@example
|
||||
pan="stereo|c1=c1"
|
||||
pan="stereo:c1=c1"
|
||||
@end example
|
||||
|
||||
Still with a stereo audio stream input, you can copy the right channel in both
|
||||
front left and right:
|
||||
@example
|
||||
pan="stereo| c0=FR | c1=FR"
|
||||
pan="stereo: c0=FR : c1=FR"
|
||||
@end example
|
||||
|
||||
@section replaygain
|
||||
@@ -1844,75 +1831,6 @@ ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section silenceremove
|
||||
|
||||
Remove silence from the beginning, middle or end of the audio.
|
||||
|
||||
The filter accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item start_periods
|
||||
This value is used to indicate if audio should be trimmed at beginning of
|
||||
the audio. A value of zero indicates no silence should be trimmed from the
|
||||
beginning. When specifying a non-zero value, it trims audio up until it
|
||||
finds non-silence. Normally, when trimming silence from beginning of audio
|
||||
the @var{start_periods} will be @code{1} but it can be increased to higher
|
||||
values to trim all audio up to specific count of non-silence periods.
|
||||
Default value is @code{0}.
|
||||
|
||||
@item start_duration
|
||||
Specify the amount of time that non-silence must be detected before it stops
|
||||
trimming audio. By increasing the duration, bursts of noises can be treated
|
||||
as silence and trimmed off. Default value is @code{0}.
|
||||
|
||||
@item start_threshold
|
||||
This indicates what sample value should be treated as silence. For digital
|
||||
audio, a value of @code{0} may be fine but for audio recorded from analog,
|
||||
you may wish to increase the value to account for background noise.
|
||||
Can be specified in dB (in case "dB" is appended to the specified value)
|
||||
or amplitude ratio. Default value is @code{0}.
|
||||
|
||||
@item stop_periods
|
||||
Set the count for trimming silence from the end of audio.
|
||||
To remove silence from the middle of a file, specify a @var{stop_periods}
|
||||
that is negative. This value is then threated as a positive value and is
|
||||
used to indicate the effect should restart processing as specified by
|
||||
@var{start_periods}, making it suitable for removing periods of silence
|
||||
in the middle of the audio.
|
||||
Default value is @code{0}.
|
||||
|
||||
@item stop_duration
|
||||
Specify a duration of silence that must exist before audio is not copied any
|
||||
more. By specifying a higher duration, silence that is wanted can be left in
|
||||
the audio.
|
||||
Default value is @code{0}.
|
||||
|
||||
@item stop_threshold
|
||||
This is the same as @option{start_threshold} but for trimming silence from
|
||||
the end of audio.
|
||||
Can be specified in dB (in case "dB" is appended to the specified value)
|
||||
or amplitude ratio. Default value is @code{0}.
|
||||
|
||||
@item leave_silence
|
||||
This indicate that @var{stop_duration} length of audio should be left intact
|
||||
at the beginning of each period of silence.
|
||||
For example, if you want to remove long pauses between words but do not want
|
||||
to remove the pauses completely. Default value is @code{0}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
The following example shows how this filter can be used to start a recording
|
||||
that does not contain the delay at the start which usually occurs between
|
||||
pressing the record button and the start of the performance:
|
||||
@example
|
||||
silenceremove=1:5:0.02
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section treble
|
||||
|
||||
Boost or cut treble (upper) frequencies of the audio using a two-pole
|
||||
@@ -2221,9 +2139,8 @@ Set the channel layout. The number of channels in the specified layout
|
||||
must be equal to the number of specified expressions.
|
||||
|
||||
@item duration, d
|
||||
Set the minimum duration of the sourced audio. See
|
||||
@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the accepted syntax.
|
||||
Set the minimum duration of the sourced audio. See the function
|
||||
@code{av_parse_time()} for the accepted format.
|
||||
Note that the resulting duration may be greater than the specified
|
||||
duration, as the generated audio is always cut at the end of a
|
||||
complete frame.
|
||||
@@ -2521,26 +2438,6 @@ Same as the @ref{subtitles} filter, except that it doesn't require libavcodec
|
||||
and libavformat to work. On the other hand, it is limited to ASS (Advanced
|
||||
Substation Alpha) subtitles files.
|
||||
|
||||
This filter accepts the following option in addition to the common options from
|
||||
the @ref{subtitles} filter:
|
||||
|
||||
@table @option
|
||||
@item shaping
|
||||
Set the shaping engine
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item auto
|
||||
The default libass shaping engine, which is the best available.
|
||||
@item simple
|
||||
Fast, font-agnostic shaper that can do only substitutions
|
||||
@item complex
|
||||
Slower shaper using OpenType for substitutions and positioning
|
||||
@end table
|
||||
|
||||
The default is @code{auto}.
|
||||
@end table
|
||||
|
||||
@section bbox
|
||||
|
||||
Compute the bounding box for the non-black pixels in the input frame
|
||||
@@ -2858,42 +2755,6 @@ boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chrom
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section codecview
|
||||
|
||||
Visualize information exported by some codecs.
|
||||
|
||||
Some codecs can export information through frames using side-data or other
|
||||
means. For example, some MPEG based codecs export motion vectors through the
|
||||
@var{export_mvs} flag in the codec @option{flags2} option.
|
||||
|
||||
The filter accepts the following option:
|
||||
|
||||
@table @option
|
||||
@item mv
|
||||
Set motion vectors to visualize.
|
||||
|
||||
Available flags for @var{mv} are:
|
||||
|
||||
@table @samp
|
||||
@item pf
|
||||
forward predicted MVs of P-frames
|
||||
@item bf
|
||||
forward predicted MVs of B-frames
|
||||
@item bb
|
||||
backward predicted MVs of B-frames
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Visualizes multi-directionals MVs from P and B-Frames using @command{ffplay}:
|
||||
@example
|
||||
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section colorbalance
|
||||
Modify intensity of primary colors (red, green and blue) of input frames.
|
||||
|
||||
@@ -3358,7 +3219,7 @@ curves=psfile='MyCurvesPresets/purple.asv':green='0.45/0.53'
|
||||
|
||||
Denoise frames using 2D DCT (frequency domain filtering).
|
||||
|
||||
This filter is not designed for real time.
|
||||
This filter is not designed for real time and can be extremely slow.
|
||||
|
||||
The filter accepts the following options:
|
||||
|
||||
@@ -3374,14 +3235,14 @@ If you need a more advanced filtering, see @option{expr}.
|
||||
Default is @code{0}.
|
||||
|
||||
@item overlap
|
||||
Set number overlapping pixels for each block. Since the filter can be slow, you
|
||||
may want to reduce this value, at the cost of a less effective filter and the
|
||||
risk of various artefacts.
|
||||
Set number overlapping pixels for each block. Each block is of size
|
||||
@code{16x16}. Since the filter can be slow, you may want to reduce this value,
|
||||
at the cost of a less effective filter and the risk of various artefacts.
|
||||
|
||||
If the overlapping value doesn't permit processing the whole input width or
|
||||
If the overlapping value doesn't allow to process the whole input width or
|
||||
height, a warning will be displayed and according borders won't be denoised.
|
||||
|
||||
Default value is @var{blocksize}-1, which is the best possible setting.
|
||||
Default value is @code{15}.
|
||||
|
||||
@item expr, e
|
||||
Set the coefficient factor expression.
|
||||
@@ -3393,15 +3254,6 @@ If this is option is set, the @option{sigma} option will be ignored.
|
||||
|
||||
The absolute value of the coefficient can be accessed through the @var{c}
|
||||
variable.
|
||||
|
||||
@item n
|
||||
Set the @var{blocksize} using the number of bits. @code{1<<@var{n}} defines the
|
||||
@var{blocksize}, which is the width and height of the processed blocks.
|
||||
|
||||
The default value is @var{3} (8x8) and can be raised to @var{4} for a
|
||||
@var{blocksize} of 16x16. Note that changing this setting has huge consequences
|
||||
on the speed processing. Also, a larger block size does not necessarily means a
|
||||
better de-noising.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -3416,11 +3268,6 @@ The same operation can be achieved using the expression system:
|
||||
dctdnoiz=e='gte(c, 4.5*3)'
|
||||
@end example
|
||||
|
||||
Violent denoise using a block size of @code{16x16}:
|
||||
@example
|
||||
dctdnoiz=15:n=4
|
||||
@end example
|
||||
|
||||
@anchor{decimate}
|
||||
@section decimate
|
||||
|
||||
@@ -3851,11 +3698,6 @@ the "Color" section in the ffmpeg-utils manual.
|
||||
|
||||
The default value of @var{fontcolor} is "black".
|
||||
|
||||
@item fontcolor_expr
|
||||
String which is expanded the same way as @var{text} to obtain dynamic
|
||||
@var{fontcolor} value. By default this option has empty value and is not
|
||||
processed. When this option is set, it overrides @var{fontcolor} option.
|
||||
|
||||
@item font
|
||||
The font family to be used for drawing text. By default Sans.
|
||||
|
||||
@@ -4069,15 +3911,6 @@ example the text size is not known when evaluating the expression, so
|
||||
the constants @var{text_w} and @var{text_h} will have an undefined
|
||||
value.
|
||||
|
||||
@item expr_int_format, eif
|
||||
Evaluate the expression's value and output as formatted integer.
|
||||
|
||||
The first argument is the expression to be evaluated, just as for the @var{expr} function.
|
||||
The second argument specifies the output format. Allowed values are 'x', 'X', 'd' and
|
||||
'u'. They are treated exactly as in the printf function.
|
||||
The third parameter is optional and sets the number of positions taken by the output.
|
||||
It can be used to add padding with zeros from the left.
|
||||
|
||||
@item gmtime
|
||||
The time at which the filter is running, expressed in UTC.
|
||||
It can accept an argument: a strftime() format string.
|
||||
@@ -4174,18 +4007,7 @@ drawtext='fontfile=Linux Libertine O-40\:style=Semibold:text=FFmpeg'
|
||||
@item
|
||||
Print the date of a real-time encoding (see strftime(3)):
|
||||
@example
|
||||
drawtext='fontfile=FreeSans.ttf:text=%@{localtime\:%a %b %d %Y@}'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Show text fading in and out (appearing/disappearing):
|
||||
@example
|
||||
#!/bin/sh
|
||||
DS=1.0 # display start
|
||||
DE=10.0 # display end
|
||||
FID=1.5 # fade in duration
|
||||
FOD=5 # fade out duration
|
||||
ffplay -f lavfi "color,drawtext=text=TEST:fontsize=50:fontfile=FreeSerif.ttf:fontcolor_expr=ff0000%@{eif\\\\: clip(255*(1*between(t\\, $DS + $FID\\, $DE - $FOD) + ((t - $DS)/$FID)*between(t\\, $DS\\, $DS + $FID) + (-(t - $DE)/$FOD)*between(t\\, $DE - $FOD\\, $DE) )\\, 0\\, 255) \\\\: x\\\\: 2 @}"
|
||||
drawtext='fontfile=FreeSans.ttf:text=%@{localtime:%a %b %d %Y@}'
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
@@ -4447,10 +4269,6 @@ and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
|
||||
which @code{fieldmatch} is based on. While the semantic and usage are very
|
||||
close, some behaviour and options names can differ.
|
||||
|
||||
The @ref{decimate} filter currently only works for constant frame rate input.
|
||||
Do not use @code{fieldmatch} and @ref{decimate} if your input has mixed
|
||||
telecined and progressive content with changing framerate.
|
||||
|
||||
The filter accepts the following options:
|
||||
|
||||
@table @option
|
||||
@@ -5117,22 +4935,6 @@ Modify RGB components depending on pixel position:
|
||||
@example
|
||||
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Create a radial gradient that is the same size as the input (also see
|
||||
the @ref{vignette} filter):
|
||||
@example
|
||||
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
|
||||
@end example
|
||||
|
||||
@item
|
||||
Create a linear gradient to use as a mask for another filter, then
|
||||
compose with @ref{overlay}. In this example the video will gradually
|
||||
become more blurry from the top to the bottom of the y-axis as defined
|
||||
by the linear gradient:
|
||||
@example
|
||||
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section gradfun
|
||||
@@ -5574,62 +5376,8 @@ value.
|
||||
|
||||
Detect video interlacing type.
|
||||
|
||||
This filter tries to detect if the input frames as interlaced, progressive,
|
||||
top or bottom field first. It will also try and detect fields that are
|
||||
repeated between adjacent frames (a sign of telecine).
|
||||
|
||||
Single frame detection considers only immediately adjacent frames when classifying each frame.
|
||||
Multiple frame detection incorporates the classification history of previous frames.
|
||||
|
||||
The filter will log these metadata values:
|
||||
|
||||
@table @option
|
||||
@item single.current_frame
|
||||
Detected type of current frame using single-frame detection. One of:
|
||||
``tff'' (top field first), ``bff'' (bottom field first),
|
||||
``progressive'', or ``undetermined''
|
||||
|
||||
@item single.tff
|
||||
Cumulative number of frames detected as top field first using single-frame detection.
|
||||
|
||||
@item multiple.tff
|
||||
Cumulative number of frames detected as top field first using multiple-frame detection.
|
||||
|
||||
@item single.bff
|
||||
Cumulative number of frames detected as bottom field first using single-frame detection.
|
||||
|
||||
@item multiple.current_frame
|
||||
Detected type of current frame using multiple-frame detection. One of:
|
||||
``tff'' (top field first), ``bff'' (bottom field first),
|
||||
``progressive'', or ``undetermined''
|
||||
|
||||
@item multiple.bff
|
||||
Cumulative number of frames detected as bottom field first using multiple-frame detection.
|
||||
|
||||
@item single.progressive
|
||||
Cumulative number of frames detected as progressive using single-frame detection.
|
||||
|
||||
@item multiple.progressive
|
||||
Cumulative number of frames detected as progressive using multiple-frame detection.
|
||||
|
||||
@item single.undetermined
|
||||
Cumulative number of frames that could not be classified using single-frame detection.
|
||||
|
||||
@item multiple.undetermined
|
||||
Cumulative number of frames that could not be classified using multiple-frame detection.
|
||||
|
||||
@item repeated.current_frame
|
||||
Which field in the current frame is repeated from the last. One of ``neither'', ``top'', or ``bottom''.
|
||||
|
||||
@item repeated.neither
|
||||
Cumulative number of frames with no repeated field.
|
||||
|
||||
@item repeated.top
|
||||
Cumulative number of frames with the top field repeated from the previous frame's top field.
|
||||
|
||||
@item repeated.bottom
|
||||
Cumulative number of frames with the bottom field repeated from the previous frame's bottom field.
|
||||
@end table
|
||||
This filter tries to detect if the input is interlaced or progressive,
|
||||
top or bottom field first.
|
||||
|
||||
The filter accepts the following options:
|
||||
|
||||
@@ -5638,13 +5386,6 @@ The filter accepts the following options:
|
||||
Set interlacing threshold.
|
||||
@item prog_thres
|
||||
Set progressive threshold.
|
||||
@item repeat_thres
|
||||
Threshold for repeated field detection.
|
||||
@item half_life
|
||||
Number of frames after which a given frame's contribution to the
|
||||
statistics is halved (i.e., it contributes only 0.5 to it's
|
||||
classification). The default of 0 means that all frames seen are given
|
||||
full weight of 1.0 forever.
|
||||
@end table
|
||||
|
||||
@section il
|
||||
@@ -5688,7 +5429,8 @@ Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is @code
|
||||
|
||||
Simple interlacing filter from progressive contents. This interleaves upper (or
|
||||
lower) lines from odd frames with lower (or upper) lines from even frames,
|
||||
halving the frame rate and preserving image height.
|
||||
halving the frame rate and preserving image height. A vertical lowpass filter
|
||||
is always applied in order to avoid twitter effects and reduce moiré patterns.
|
||||
|
||||
@example
|
||||
Original Original New Frame
|
||||
@@ -5708,10 +5450,6 @@ It accepts the following optional parameters:
|
||||
@item scan
|
||||
This determines whether the interlaced frame is taken from the even
|
||||
(tff - default) or odd (bff) lines of the progressive frame.
|
||||
|
||||
@item lowpass
|
||||
Enable (default) or disable the vertical lowpass filter to avoid twitter
|
||||
interlacing and reduce moire patterns.
|
||||
@end table
|
||||
|
||||
@section kerndeint
|
||||
@@ -5766,51 +5504,6 @@ kerndeint=map=1
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section lenscorrection
|
||||
|
||||
Correct radial lens distortion
|
||||
|
||||
This filter can be used to correct for radial distortion as can result from the use
|
||||
of wide angle lenses, and thereby re-rectify the image. To find the right parameters
|
||||
one can use tools available for example as part of opencv or simply trial-and-error.
|
||||
To use opencv use the calibration sample (under samples/cpp) from the opencv sources
|
||||
and extract the k1 and k2 coefficients from the resulting matrix.
|
||||
|
||||
Note that effectively the same filter is available in the open-source tools Krita and
|
||||
Digikam from the KDE project.
|
||||
|
||||
In contrast to the @ref{vignette} filter, which can also be used to compensate lens errors,
|
||||
this filter corrects the distortion of the image, whereas @ref{vignette} corrects the
|
||||
brightness distribution, so you may want to use both filters together in certain
|
||||
cases, though you will have to take care of ordering, i.e. whether vignetting should
|
||||
be applied before or after lens correction.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The filter accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item cx
|
||||
Relative x-coordinate of the focal point of the image, and thereby the center of the
|
||||
distrortion. This value has a range [0,1] and is expressed as fractions of the image
|
||||
width.
|
||||
@item cy
|
||||
Relative y-coordinate of the focal point of the image, and thereby the center of the
|
||||
distrortion. This value has a range [0,1] and is expressed as fractions of the image
|
||||
height.
|
||||
@item k1
|
||||
Coefficient of the quadratic correction term. 0.5 means no correction.
|
||||
@item k2
|
||||
Coefficient of the double quadratic correction term. 0.5 means no correction.
|
||||
@end table
|
||||
|
||||
The formula that generates the correction is:
|
||||
|
||||
@var{r_src} = @var{r_tgt} * (1 + @var{k1} * (@var{r_tgt} / @var{r_0})^2 + @var{k2} * (@var{r_tgt} / @var{r_0})^4)
|
||||
|
||||
where @var{r_0} is halve of the image diagonal and @var{r_src} and @var{r_tgt} are the
|
||||
distances from the focal point in the source and target images, respectively.
|
||||
|
||||
@anchor{lut3d}
|
||||
@section lut3d
|
||||
|
||||
@@ -6291,7 +5984,7 @@ values are assumed.
|
||||
|
||||
Refer to the official libopencv documentation for more precise
|
||||
information:
|
||||
@url{http://docs.opencv.org/master/modules/imgproc/doc/filtering.html}
|
||||
@url{http://opencv.willowgarage.com/documentation/c/image_filtering.html}
|
||||
|
||||
Several libopencv filters are supported; see the following subsections.
|
||||
|
||||
@@ -6783,9 +6476,6 @@ A description of the accepted parameters follows.
|
||||
@item y3
|
||||
Set coordinates expression for top left, top right, bottom left and bottom right corners.
|
||||
Default values are @code{0:0:W:0:0:H:W:H} with which perspective will remain unchanged.
|
||||
If the @code{sense} option is set to @code{source}, then the specified points will be sent
|
||||
to the corners of the destination. If the @code{sense} option is set to @code{destination},
|
||||
then the corners of the source will be sent to the specified coordinates.
|
||||
|
||||
The expressions can use the following variables:
|
||||
|
||||
@@ -6805,24 +6495,6 @@ It accepts the following values:
|
||||
@end table
|
||||
|
||||
Default value is @samp{linear}.
|
||||
|
||||
@item sense
|
||||
Set interpretation of coordinate options.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@item 0, source
|
||||
|
||||
Send point in the source specified by the given coordinates to
|
||||
the corners of the destination.
|
||||
|
||||
@item 1, destination
|
||||
|
||||
Send the corners of the source to the point in the destination specified
|
||||
by the given coordinates.
|
||||
|
||||
Default value is @samp{source}.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@section phase
|
||||
@@ -7841,7 +7513,7 @@ The input video is not modified.
|
||||
The shown line contains a sequence of key/value pairs of the form
|
||||
@var{key}:@var{value}.
|
||||
|
||||
The following values are shown in the output:
|
||||
It accepts the following parameters:
|
||||
|
||||
@table @option
|
||||
@item n
|
||||
@@ -8701,10 +8373,9 @@ The number of the first frame that should be passed to the output.
|
||||
The number of the first frame that should be dropped.
|
||||
@end table
|
||||
|
||||
@option{start}, @option{end}, and @option{duration} are expressed as time
|
||||
duration specifications; see
|
||||
@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the accepted syntax.
|
||||
@option{start}, @option{end}, @option{duration} are expressed as time
|
||||
duration specifications, check the "Time duration" section in the
|
||||
ffmpeg-utils manual.
|
||||
|
||||
Note that the first two sets of the start/end options and the @option{duration}
|
||||
option look at the frame timestamp, while the _frame variants simply count the
|
||||
@@ -8737,7 +8408,6 @@ ffmpeg -i INPUT -vf trim=duration=1
|
||||
@end itemize
|
||||
|
||||
|
||||
@anchor{unsharp}
|
||||
@section unsharp
|
||||
|
||||
Sharpen or blur the input video.
|
||||
@@ -8899,7 +8569,7 @@ Read a file with transform information for each frame and
|
||||
apply/compensate them. Together with the @ref{vidstabdetect}
|
||||
filter this can be used to deshake videos. See also
|
||||
@url{http://public.hronopik.de/vid.stab}. It is important to also use
|
||||
the @ref{unsharp} filter, see below.
|
||||
the unsharp filter, see below.
|
||||
|
||||
To enable compilation of this filter you need to configure FFmpeg with
|
||||
@code{--enable-libvidstab}.
|
||||
@@ -8909,7 +8579,7 @@ To enable compilation of this filter you need to configure FFmpeg with
|
||||
@table @option
|
||||
@item input
|
||||
Set path to the file used to read the transforms. Default value is
|
||||
@file{transforms.trf}.
|
||||
@file{transforms.trf}).
|
||||
|
||||
@item smoothing
|
||||
Set the number of frames (value*2 + 1) used for lowpass filtering the
|
||||
@@ -8917,9 +8587,9 @@ camera movements. Default value is 10.
|
||||
|
||||
For example a number of 10 means that 21 frames are used (10 in the
|
||||
past and 10 in the future) to smoothen the motion in the video. A
|
||||
larger value leads to a smoother video, but limits the acceleration of
|
||||
the camera (pan/tilt movements). 0 is a special case where a static
|
||||
camera is simulated.
|
||||
larger values leads to a smoother video, but limits the acceleration
|
||||
of the camera (pan/tilt movements). 0 is a special case where a
|
||||
static camera is simulated.
|
||||
|
||||
@item optalgo
|
||||
Set the camera path optimization algorithm.
|
||||
@@ -8956,7 +8626,7 @@ fill the border black
|
||||
Invert transforms if set to 1. Default value is 0.
|
||||
|
||||
@item relative
|
||||
Consider transforms as relative to previous frame if set to 1,
|
||||
Consider transforms as relative to previsou frame if set to 1,
|
||||
absolute if set to 0. Default value is 0.
|
||||
|
||||
@item zoom
|
||||
@@ -9022,7 +8692,7 @@ Use @command{ffmpeg} for a typical stabilization with default values:
|
||||
ffmpeg -i inp.mpeg -vf vidstabtransform,unsharp=5:5:0.8:3:3:0.4 inp_stabilized.mpeg
|
||||
@end example
|
||||
|
||||
Note the use of the @ref{unsharp} filter which is always recommended.
|
||||
Note the use of the unsharp filter which is always recommended.
|
||||
|
||||
@item
|
||||
Zoom in a bit more and load transform data from a given file:
|
||||
@@ -9046,7 +8716,6 @@ For example, to vertically flip a video with @command{ffmpeg}:
|
||||
ffmpeg -i in.avi -vf "vflip" out.avi
|
||||
@end example
|
||||
|
||||
@anchor{vignette}
|
||||
@section vignette
|
||||
|
||||
Make or reverse a natural vignetting effect.
|
||||
@@ -9196,20 +8865,6 @@ Only deinterlace frames marked as interlaced.
|
||||
Default value is @samp{all}.
|
||||
@end table
|
||||
|
||||
@section xbr
|
||||
Apply the xBR high-quality magnification filter which is designed for pixel
|
||||
art. It follows a set of edge-detection rules, see
|
||||
@url{http://www.libretro.com/forums/viewtopic.php?f=6&t=134}.
|
||||
|
||||
It accepts the following option:
|
||||
|
||||
@table @option
|
||||
@item n
|
||||
Set the scaling dimension: @code{2} for @code{2xBR}, @code{3} for
|
||||
@code{3xBR} and @code{4} for @code{4xBR}.
|
||||
Default is @code{3}.
|
||||
@end table
|
||||
|
||||
@anchor{yadif}
|
||||
@section yadif
|
||||
|
||||
@@ -9630,9 +9285,12 @@ number or a valid video frame rate abbreviation. The default value is
|
||||
"25".
|
||||
|
||||
@item duration, d
|
||||
Set the duration of the sourced video. See
|
||||
@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the accepted syntax.
|
||||
Set the video duration of the sourced video. The accepted syntax is:
|
||||
@example
|
||||
[-]HH:MM:SS[.m...]
|
||||
[-]S+[.m...]
|
||||
@end example
|
||||
See also the function @code{av_parse_time()}.
|
||||
|
||||
If not specified, or the expressed duration is negative, the video is
|
||||
supposed to be generated forever.
|
||||
@@ -9660,7 +9318,7 @@ Default value is "all", which will cycle through the list of all tests.
|
||||
|
||||
Some examples:
|
||||
@example
|
||||
mptestsrc=t=dc_luma
|
||||
testsrc=t=dc_luma
|
||||
@end example
|
||||
|
||||
will generate a "dc_luma" test pattern.
|
||||
@@ -9894,9 +9552,12 @@ number or a valid video frame rate abbreviation. The default value is
|
||||
Set the sample aspect ratio of the sourced video.
|
||||
|
||||
@item duration, d
|
||||
Set the duration of the sourced video. See
|
||||
@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the accepted syntax.
|
||||
Set the video duration of the sourced video. The accepted syntax is:
|
||||
@example
|
||||
[-]HH[:MM[:SS[.m...]]]
|
||||
[-]S+[.m...]
|
||||
@end example
|
||||
Also see the the @code{av_parse_time()} function.
|
||||
|
||||
If not specified, or the expressed duration is negative, the video is
|
||||
supposed to be generated forever.
|
||||
@@ -10812,34 +10473,8 @@ The filter accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item volume
|
||||
Specify transform volume (multiplier) expression. The expression can contain
|
||||
variables:
|
||||
@table @option
|
||||
@item frequency, freq, f
|
||||
the frequency where transform is evaluated
|
||||
@item timeclamp, tc
|
||||
value of timeclamp option
|
||||
@end table
|
||||
and functions:
|
||||
@table @option
|
||||
@item a_weighting(f)
|
||||
A-weighting of equal loudness
|
||||
@item b_weighting(f)
|
||||
B-weighting of equal loudness
|
||||
@item c_weighting(f)
|
||||
C-weighting of equal loudness
|
||||
@end table
|
||||
Default value is @code{16}.
|
||||
|
||||
@item tlength
|
||||
Specify transform length expression. The expression can contain variables:
|
||||
@table @option
|
||||
@item frequency, freq, f
|
||||
the frequency where transform is evaluated
|
||||
@item timeclamp, tc
|
||||
value of timeclamp option
|
||||
@end table
|
||||
Default value is @code{384/f*tc/(384/f+tc)}.
|
||||
Specify the transform volume (multiplier). Acceptable value is [1.0, 100.0].
|
||||
Default value is @code{16.0}.
|
||||
|
||||
@item timeclamp
|
||||
Specify the transform timeclamp. At low frequency, there is trade-off between
|
||||
@@ -10861,26 +10496,6 @@ Default value is @code{3.0}.
|
||||
@item fontfile
|
||||
Specify font file for use with freetype. If not specified, use embedded font.
|
||||
|
||||
@item fontcolor
|
||||
Specify font color expression. This is arithmetic expression that should return
|
||||
integer value 0xRRGGBB. The expression can contain variables:
|
||||
@table @option
|
||||
@item frequency, freq, f
|
||||
the frequency where transform is evaluated
|
||||
@item timeclamp, tc
|
||||
value of timeclamp option
|
||||
@end table
|
||||
and functions:
|
||||
@table @option
|
||||
@item midi(f)
|
||||
midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
|
||||
@item r(x), g(x), b(x)
|
||||
red, green, and blue value of intensity x
|
||||
@end table
|
||||
Default value is @code{st(0, (midi(f)-59.5)/12);
|
||||
st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
|
||||
r(1-ld(1)) + b(ld(1))}
|
||||
|
||||
@item fullhd
|
||||
If set to 1 (the default), the video size is 1920x1080 (full HD),
|
||||
if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
|
||||
@@ -10930,24 +10545,6 @@ ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*
|
||||
asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
|
||||
@end example
|
||||
|
||||
@item
|
||||
B-weighting of equal loudness
|
||||
@example
|
||||
volume=16*b_weighting(f)
|
||||
@end example
|
||||
|
||||
@item
|
||||
Lower Q factor
|
||||
@example
|
||||
tlength=100/f*tc/(100/f+tc)
|
||||
@end example
|
||||
|
||||
@item
|
||||
Custom fontcolor, C-note is colored green, others are colored blue
|
||||
@example
|
||||
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section showspectrum
|
||||
@@ -10964,19 +10561,8 @@ the "Video size" section in the ffmpeg-utils manual. Default value is
|
||||
@code{640x512}.
|
||||
|
||||
@item slide
|
||||
Specify how the spectrum should slide along the window.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@item replace
|
||||
the samples start again on the left when they reach the right
|
||||
@item scroll
|
||||
the samples scroll from right to left
|
||||
@item fullframe
|
||||
frames are only produced when the samples reach the right
|
||||
@end table
|
||||
|
||||
Default value is @code{replace}.
|
||||
Specify if the spectrum should slide along the window. Default value is
|
||||
@code{0}.
|
||||
|
||||
@item mode
|
||||
Specify display mode.
|
||||
@@ -11087,12 +10673,6 @@ Draw a point for each sample.
|
||||
|
||||
@item line
|
||||
Draw a vertical line for each sample.
|
||||
|
||||
@item p2p
|
||||
Draw a point for each sample and a line between them.
|
||||
|
||||
@item cline
|
||||
Draw a centered vertical line for each sample.
|
||||
@end table
|
||||
|
||||
Default value is @code{point}.
|
||||
@@ -11107,9 +10687,6 @@ is not explicitly specified.
|
||||
Set the (approximate) output frame rate. This is done by setting the
|
||||
option @var{n}. Default value is "25".
|
||||
|
||||
@item split_channels
|
||||
Set if channels should be drawn separately or overlap. Default value is 0.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
@@ -23,7 +23,7 @@ Reduce buffering.
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will enable detecting more
|
||||
stream information. A higher value will allow to detect more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@@ -55,10 +55,6 @@ Do not merge side data.
|
||||
Enable RTP MP4A-LATM payload.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by optional buffering
|
||||
@item bitexact
|
||||
Only write platform-, build- and time-independent data.
|
||||
This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@end table
|
||||
|
||||
@item seek2any @var{integer} (@emph{input})
|
||||
@@ -67,7 +63,7 @@ Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will enable detecting more accurate information, but will
|
||||
higher value will allow to detect more accurate information, but will
|
||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
@@ -172,18 +168,6 @@ The offset is added by the muxer to the output timestamps.
|
||||
Specifying a positive offset means that the corresponding streams are
|
||||
delayed bt the time duration specified in @var{offset}. Default value
|
||||
is @code{0} (meaning that no offset is applied).
|
||||
|
||||
@item format_whitelist @var{list} (@emph{input})
|
||||
"," separated List of allowed demuxers. By default all are allowed.
|
||||
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example to separate the fields with newlines and indention:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
|
@@ -130,7 +130,7 @@ Go to @url{http://x265.org/developers.html} and follow the instructions
|
||||
for installing the library. Then pass @code{--enable-libx265} to configure
|
||||
to enable it.
|
||||
|
||||
@float NOTE
|
||||
@float note
|
||||
x265 is under the GNU Public License Version 2 or later
|
||||
(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for
|
||||
details), you must upgrade FFmpeg's license to GPL in order to use it.
|
||||
@@ -454,7 +454,6 @@ library:
|
||||
@item Sony Wave64 (W64) @tab X @tab X
|
||||
@item SoX native format @tab X @tab X
|
||||
@item SUN AU format @tab X @tab X
|
||||
@item SUP raw PGS subtitles @tab @tab X
|
||||
@item Text files @tab @tab X
|
||||
@item THP @tab @tab X
|
||||
@tab Used on the Nintendo GameCube.
|
||||
@@ -1031,7 +1030,6 @@ performance on systems without hardware floating point support).
|
||||
@item PJS (Phoenix) @tab @tab X @tab @tab X
|
||||
@item RealText @tab @tab X @tab @tab X
|
||||
@item SAMI @tab @tab X @tab @tab X
|
||||
@item Spruce format (STL) @tab @tab X @tab @tab X
|
||||
@item SSA/ASS @tab X @tab X @tab X @tab X
|
||||
@item SubRip (SRT) @tab X @tab X @tab X @tab X
|
||||
@item SubViewer v1 @tab @tab X @tab @tab X
|
||||
@@ -1057,7 +1055,6 @@ performance on systems without hardware floating point support).
|
||||
@item HLS @tab X
|
||||
@item HTTP @tab X
|
||||
@item HTTPS @tab X
|
||||
@item Icecast @tab X
|
||||
@item MMSH @tab X
|
||||
@item MMST @tab X
|
||||
@item pipe @tab X
|
||||
|
156
doc/indevs.texi
156
doc/indevs.texi
@@ -1,7 +1,7 @@
|
||||
@chapter Input Devices
|
||||
@c man begin INPUT DEVICES
|
||||
|
||||
Input devices are configured elements in FFmpeg which enable accessing
|
||||
Input devices are configured elements in FFmpeg which allow to access
|
||||
the data coming from a multimedia device attached to your system.
|
||||
|
||||
When you configure your FFmpeg build, all the supported input devices
|
||||
@@ -58,94 +58,34 @@ AVFoundation input device.
|
||||
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
|
||||
The older QTKit framework has been marked deprecated since OSX version 10.7.
|
||||
|
||||
The input filename has to be given in the following syntax:
|
||||
@example
|
||||
-i "[[VIDEO]:[AUDIO]]"
|
||||
@end example
|
||||
The first entry selects the video input while the latter selects the audio input.
|
||||
The stream has to be specified by the device name or the device index as shown by the device list.
|
||||
Alternatively, the video and/or audio input device can be chosen by index using the
|
||||
@option{
|
||||
-video_device_index <INDEX>
|
||||
}
|
||||
and/or
|
||||
@option{
|
||||
-audio_device_index <INDEX>
|
||||
}
|
||||
, overriding any
|
||||
device name or index given in the input filename.
|
||||
|
||||
All available devices can be enumerated by using @option{-list_devices true}, listing
|
||||
all device names and corresponding indices.
|
||||
|
||||
There are two device name aliases:
|
||||
@table @code
|
||||
|
||||
@item default
|
||||
Select the AVFoundation default device of the corresponding type.
|
||||
|
||||
@item none
|
||||
Do not record the corresponding media type.
|
||||
This is equivalent to specifying an empty device name or index.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
|
||||
AVFoundation supports the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item -list_devices <TRUE|FALSE>
|
||||
If set to true, a list of all available input devices is given showing all
|
||||
device names and indices.
|
||||
|
||||
@item -video_device_index <INDEX>
|
||||
Specify the video device by its index. Overrides anything given in the input filename.
|
||||
|
||||
@item -audio_device_index <INDEX>
|
||||
Specify the audio device by its index. Overrides anything given in the input filename.
|
||||
|
||||
@item -pixel_format <FORMAT>
|
||||
Request the video device to use a specific pixel format.
|
||||
If the specified format is not supported, a list of available formats is given
|
||||
und the first one in this list is used instead. Available pixel formats are:
|
||||
@code{monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
|
||||
The filename passed as input is parsed to contain either a device name or index.
|
||||
The device index can also be given by using -video_device_index.
|
||||
A given device index will override any given device name.
|
||||
If the desired device consists of numbers only, use -video_device_index to identify it.
|
||||
The default device will be chosen if an empty string or the device name "default" is given.
|
||||
The available devices can be enumerated by using -list_devices.
|
||||
The pixel format can be set using -pixel_format.
|
||||
Available formats:
|
||||
monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
|
||||
bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
|
||||
yuv420p, nv12, yuyv422, gray}
|
||||
yuv420p, nv12, yuyv422, gray
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Print the list of AVFoundation supported devices and exit:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -list_devices true -i ""
|
||||
ffmpeg -f avfoundation -i "0" out.mpg
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record video from video device 0 and audio from audio device 0 into out.avi:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -i "0:0" out.avi
|
||||
ffmpeg -f avfoundation -video_device_index 0 -i "" out.mpg
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record video from video device 2 and audio from audio device 1 into out.avi:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
|
||||
ffmpeg -f avfoundation -pixel_format bgr0 -i "default" out.mpg
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
||||
ffmpeg -f avfoundation -list_devices true -i ""
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section bktr
|
||||
|
||||
BSD video input device.
|
||||
@@ -548,8 +488,7 @@ ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||
Audio-CD input device based on cdio.
|
||||
|
||||
To enable this input device during configuration you need libcdio
|
||||
installed on your system. Requires the configure option
|
||||
@code{--enable-libcdio}.
|
||||
installed on your system.
|
||||
|
||||
This device allows playing and grabbing from an Audio-CD.
|
||||
|
||||
@@ -563,8 +502,6 @@ ffmpeg -f libcdio -i /dev/sr0 cd.wav
|
||||
|
||||
IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
|
||||
Requires the configure option @code{--enable-libdc1394}.
|
||||
|
||||
@section openal
|
||||
|
||||
The OpenAL input device provides audio capture on all systems with a
|
||||
@@ -893,9 +830,6 @@ other filename will be interpreted as device number 0.
|
||||
|
||||
X11 video input device.
|
||||
|
||||
Depends on X11, Xext, and Xfixes. Requires the configure option
|
||||
@code{--enable-x11grab}.
|
||||
|
||||
This device allows one to capture a region of an X11 display.
|
||||
|
||||
The filename passed as input has the syntax:
|
||||
@@ -976,64 +910,6 @@ ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_siz
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
|
||||
@item use_shm
|
||||
Use the MIT-SHM extension for shared memory. Default value is @code{1}.
|
||||
It may be necessary to disable it for remote displays.
|
||||
@end table
|
||||
|
||||
@section decklink
|
||||
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
DeckLink devices.
|
||||
|
||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format is always
|
||||
uyvy422, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels currently is limited to 2 (stereo).
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
List supported formats:
|
||||
@example
|
||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 (format 11):
|
||||
@example
|
||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -22,7 +22,7 @@ a mail for every change to every issue.
|
||||
(the above does all work already after light testing)
|
||||
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://lists.ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://trac.ffmpeg.org
|
||||
|
||||
|
112
doc/muxers.texi
112
doc/muxers.texi
@@ -194,19 +194,15 @@ can not be smaller than one centi second.
|
||||
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
|
||||
the HTTP Live Streaming (HLS) specification.
|
||||
|
||||
It creates a playlist file, and one or more segment files. The output filename
|
||||
specifies the playlist filename.
|
||||
|
||||
By default, the muxer creates a file for each segment produced. These files
|
||||
have the same name as the playlist, followed by a sequential number and a
|
||||
.ts extension.
|
||||
It creates a playlist file and numbered segment files. The output
|
||||
filename specifies the playlist filename; the segment filenames
|
||||
receive the same basename as the playlist, a sequential number and
|
||||
a .ts extension.
|
||||
|
||||
For example, to convert an input file with @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i in.nut out.m3u8
|
||||
@end example
|
||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@file{out0.ts}, @file{out1.ts}, @file{out2.ts}, etc.
|
||||
|
||||
See also the @ref{segment} muxer, which provides a more generic and
|
||||
flexible implementation of a segmenter, and can be used to perform HLS
|
||||
@@ -224,11 +220,6 @@ Set the segment length in seconds. Default value is 2.
|
||||
Set the maximum number of playlist entries. If set to 0 the list file
|
||||
will contain all the segments. Default value is 5.
|
||||
|
||||
@item hls_ts_options @var{options_list}
|
||||
Set output format options using a :-separated list of key=value
|
||||
parameters. Values containing @code{:} special characters must be
|
||||
escaped.
|
||||
|
||||
@item hls_wrap @var{wrap}
|
||||
Set the number after which the segment filename number (the number
|
||||
specified in each segment file) wraps. If set to 0 the number will be
|
||||
@@ -242,9 +233,6 @@ to @var{wrap}.
|
||||
Start the playlist sequence number from @var{number}. Default value is
|
||||
0.
|
||||
|
||||
@item hls_allow_cache @var{allowcache}
|
||||
Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
|
||||
|
||||
@item hls_base_url @var{baseurl}
|
||||
Append @var{baseurl} to every entry in the playlist.
|
||||
Useful to generate playlists with absolute paths.
|
||||
@@ -253,17 +241,6 @@ Note that the playlist sequence number must be unique for each segment
|
||||
and it is not to be confused with the segment filename sequence number
|
||||
which can be cyclic, for example if the @option{wrap} option is
|
||||
specified.
|
||||
|
||||
@item hls_flags single_file
|
||||
If this flag is set, the muxer will store all segments in a single MPEG-TS
|
||||
file, and will use byte ranges in the playlist. HLS playlists generated with
|
||||
this way will have the version number 4.
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -i in.nut -hls_flags single_file out.m3u8
|
||||
@end example
|
||||
Will produce the playlist, @file{out.m3u8}, and a single segment file,
|
||||
@file{out.ts}.
|
||||
@end table
|
||||
|
||||
@anchor{ico}
|
||||
@@ -559,6 +536,7 @@ a short portion of the file. With this option set, there is no initial
|
||||
mdat atom, and the moov atom only describes the tracks but has
|
||||
a zero duration.
|
||||
|
||||
Files written with this option set do not work in QuickTime.
|
||||
This option is implicitly set when writing ismv (Smooth Streaming) files.
|
||||
@item -movflags separate_moof
|
||||
Write a separate moof (movie fragment) atom for each track. Normally,
|
||||
@@ -573,22 +551,6 @@ This operation can take a while, and will not work in various situations such
|
||||
as fragmented output, thus it is not enabled by default.
|
||||
@item -movflags rtphint
|
||||
Add RTP hinting tracks to the output file.
|
||||
@item -movflags disable_chpl
|
||||
Disable Nero chapter markers (chpl atom). Normally, both Nero chapters
|
||||
and a QuickTime chapter track are written to the file. With this option
|
||||
set, only the QuickTime chapter track will be written. Nero chapters can
|
||||
cause failures when the file is reprocessed with certain tagging programs, like
|
||||
mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
|
||||
@item -movflags omit_tfhd_offset
|
||||
Do not write any absolute base_data_offset in tfhd atoms. This avoids
|
||||
tying fragments to absolute byte positions in the file/streams.
|
||||
@item -movflags default_base_moof
|
||||
Similarly to the omit_tfhd_offset, this flag avoids writing the
|
||||
absolute base_data_offset field in tfhd atoms, but does so by using
|
||||
the new default-base-is-moof flag instead. This flag is new from
|
||||
14496-12:2012. This may make the fragments easier to parse in certain
|
||||
circumstances (avoiding basing track fragment location calculations
|
||||
on the implicit end of the previous track fragment).
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@@ -601,38 +563,29 @@ ffmpeg -re @var{<normal input/transcoding options>} -movflags isml+frag_keyframe
|
||||
|
||||
@section mp3
|
||||
|
||||
The MP3 muxer writes a raw MP3 stream with the following optional features:
|
||||
@itemize @bullet
|
||||
@item
|
||||
An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
|
||||
2.4 are supported, the @code{id3v2_version} private option controls which one is
|
||||
used (3 or 4). Setting @code{id3v2_version} to 0 disables the ID3v2 header
|
||||
completely.
|
||||
The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and
|
||||
optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the
|
||||
@code{id3v2_version} option controls which one is used. Setting
|
||||
@code{id3v2_version} to 0 will disable the ID3v2 header completely. The legacy
|
||||
ID3v1 tag is not written by default, but may be enabled with the
|
||||
@code{write_id3v1} option.
|
||||
|
||||
The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
|
||||
The pictures are supplied to the muxer in form of a video stream with a single
|
||||
packet. There can be any number of those streams, each will correspond to a
|
||||
single APIC frame. The stream metadata tags @var{title} and @var{comment} map
|
||||
to APIC @var{description} and @var{picture type} respectively. See
|
||||
The muxer may also write a Xing frame at the beginning, which contains the
|
||||
number of frames in the file. It is useful for computing duration of VBR files.
|
||||
The Xing frame is written if the output stream is seekable and if the
|
||||
@code{write_xing} option is set to 1 (the default).
|
||||
|
||||
The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures
|
||||
are supplied to the muxer in form of a video stream with a single packet. There
|
||||
can be any number of those streams, each will correspond to a single APIC frame.
|
||||
The stream metadata tags @var{title} and @var{comment} map to APIC
|
||||
@var{description} and @var{picture type} respectively. See
|
||||
@url{http://id3.org/id3v2.4.0-frames} for allowed picture types.
|
||||
|
||||
Note that the APIC frames must be written at the beginning, so the muxer will
|
||||
buffer the audio frames until it gets all the pictures. It is therefore advised
|
||||
to provide the pictures as soon as possible to avoid excessive buffering.
|
||||
|
||||
@item
|
||||
A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
|
||||
default, but will be written only if the output is seekable. The
|
||||
@code{write_xing} private option can be used to disable it. The frame contains
|
||||
various information that may be useful to the decoder, like the audio duration
|
||||
or encoder delay.
|
||||
|
||||
@item
|
||||
A legacy ID3v1 tag at the end of the file (disabled by default). It may be
|
||||
enabled with the @code{write_id3v1} private option, but as its capabilities are
|
||||
very limited, its usage is not recommended.
|
||||
@end itemize
|
||||
|
||||
Examples:
|
||||
|
||||
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
|
||||
@@ -845,11 +798,6 @@ reference stream. The default value is @code{auto}.
|
||||
Override the inner container format, by default it is guessed by the filename
|
||||
extension.
|
||||
|
||||
@item segment_format_options @var{options_list}
|
||||
Set output format options using a :-separated list of key=value
|
||||
parameters. Values containing the @code{:} special character must be
|
||||
escaped.
|
||||
|
||||
@item segment_list @var{name}
|
||||
Generate also a listfile named @var{name}. If not specified no
|
||||
listfile is generated.
|
||||
@@ -874,7 +822,7 @@ Select the listing format.
|
||||
@end table
|
||||
|
||||
@item segment_list_size @var{size}
|
||||
Update the list file so that it contains at most @var{size}
|
||||
Update the list file so that it contains at most the last @var{size}
|
||||
segments. If 0 the list file will contain all the segments. Default
|
||||
value is 0.
|
||||
|
||||
@@ -997,7 +945,7 @@ argument must be a time duration specification, and defaults to 0.
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Remux the content of file @file{in.mkv} to a list of segments
|
||||
To remux the content of file @file{in.mkv} to a list of segments
|
||||
@file{out-000.nut}, @file{out-001.nut}, etc., and write the list of
|
||||
generated segments to @file{out.list}:
|
||||
@example
|
||||
@@ -1005,20 +953,14 @@ ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.list out%03d.nu
|
||||
@end example
|
||||
|
||||
@item
|
||||
Segment input and set output format options for the output segments:
|
||||
@example
|
||||
ffmpeg -i in.mkv -f segment -segment_time 10 -segment_format_options movflags=+faststart out%03d.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Segment the input file according to the split points specified by the
|
||||
@var{segment_times} option:
|
||||
As the example above, but segment the input file according to the split
|
||||
points specified by the @var{segment_times} option:
|
||||
@example
|
||||
ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut
|
||||
@end example
|
||||
|
||||
@item
|
||||
Use the @command{ffmpeg} @option{force_key_frames}
|
||||
As the example above, but use the @command{ffmpeg} @option{force_key_frames}
|
||||
option to force key frames in the input at the specified location, together
|
||||
with the segment option @option{segment_time_delta} to account for
|
||||
possible roundings operated when setting key frame times.
|
||||
@@ -1037,7 +979,7 @@ ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_fr
|
||||
@end example
|
||||
|
||||
@item
|
||||
Convert the @file{in.mkv} to TS segments using the @code{libx264}
|
||||
To convert the @file{in.mkv} to TS segments using the @code{libx264}
|
||||
and @code{libfaac} encoders:
|
||||
@example
|
||||
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
|
||||
|
@@ -35,9 +35,6 @@ to your project LDFLAGS:
|
||||
-Wl,-Bsymbolic
|
||||
@end example
|
||||
|
||||
If your target platform requires position independent binaries, you should
|
||||
pass the correct linking flag (e.g. @code{-pie}) to @code{--extra-ldexeflags}.
|
||||
|
||||
@section BSD
|
||||
|
||||
BSD make will not build FFmpeg, you need to install and use GNU Make
|
||||
@@ -135,6 +132,8 @@ You will need the following prerequisites:
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://www.mingw.org/, MSYS}
|
||||
@item @uref{http://yasm.tortall.net/, YASM}
|
||||
@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if
|
||||
you want to run @uref{fate.html, FATE}.
|
||||
@end itemize
|
||||
|
||||
To set up a proper environment in MSYS, you need to run @code{msys.bat} from
|
||||
@@ -276,12 +275,12 @@ llrint() in its C library.
|
||||
Install your Cygwin with all the "Base" packages, plus the
|
||||
following "Devel" ones:
|
||||
@example
|
||||
binutils, gcc4-core, make, git, mingw-runtime, texinfo
|
||||
binutils, gcc4-core, make, git, mingw-runtime, texi2html
|
||||
@end example
|
||||
|
||||
In order to run FATE you will also need the following "Utils" packages:
|
||||
@example
|
||||
diffutils
|
||||
bc, diffutils
|
||||
@end example
|
||||
|
||||
If you want to build FFmpeg with additional libraries, download Cygwin
|
||||
|
@@ -26,10 +26,6 @@
|
||||
#include <string.h>
|
||||
#include <float.h>
|
||||
|
||||
// print_options is build for the host, os_support.h isn't needed and is setup
|
||||
// for the target. without this build breaks on mingw
|
||||
#define AVFORMAT_OS_SUPPORT_H
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/options_table.h"
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
@@ -255,7 +255,7 @@ Export the MIME type.
|
||||
If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
|
||||
supports this, the metadata has to be retrieved by the application by reading
|
||||
the @option{icy_metadata_headers} and @option{icy_metadata_packet} options.
|
||||
The default is 1.
|
||||
The default is 0.
|
||||
|
||||
@item icy_metadata_headers
|
||||
If the server supports ICY metadata, this contains the ICY-specific HTTP reply
|
||||
@@ -293,50 +293,6 @@ The required syntax to play a stream specifying a cookie is:
|
||||
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
|
||||
@end example
|
||||
|
||||
@section Icecast
|
||||
|
||||
Icecast protocol (stream to Icecast servers)
|
||||
|
||||
This protocol accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item ice_genre
|
||||
Set the stream genre.
|
||||
|
||||
@item ice_name
|
||||
Set the stream name.
|
||||
|
||||
@item ice_description
|
||||
Set the stream description.
|
||||
|
||||
@item ice_url
|
||||
Set the stream website URL.
|
||||
|
||||
@item ice_public
|
||||
Set if the stream should be public.
|
||||
The default is 0 (not public).
|
||||
|
||||
@item user_agent
|
||||
Override the User-Agent header. If not specified a string of the form
|
||||
"Lavf/<version>" will be used.
|
||||
|
||||
@item password
|
||||
Set the Icecast mountpoint password.
|
||||
|
||||
@item content_type
|
||||
Set the stream content type. This must be set if it is different from
|
||||
audio/mpeg.
|
||||
|
||||
@item legacy_icecast
|
||||
This enables support for Icecast versions < 2.4.0, that do not support the
|
||||
HTTP PUT method but the SOURCE method.
|
||||
|
||||
@end table
|
||||
|
||||
@example
|
||||
icecast://[@var{username}[:@var{password}]@@]@var{server}:@var{port}/@var{mountpoint}
|
||||
@end example
|
||||
|
||||
@section mmst
|
||||
|
||||
MMS (Microsoft Media Server) protocol over TCP.
|
||||
@@ -583,7 +539,7 @@ firewalls.
|
||||
|
||||
@section libsmbclient
|
||||
|
||||
libsmbclient permits one to manipulate CIFS/SMB network resources.
|
||||
libsmbclient permits to manipulate CIFS/SMB network resources.
|
||||
|
||||
Following syntax is required.
|
||||
|
||||
@@ -750,7 +706,7 @@ port will be used for the local RTP and RTCP ports.
|
||||
|
||||
@item
|
||||
If @option{localrtcpport} (the local RTCP port) is not set it will be
|
||||
set to the local RTP port value plus 1.
|
||||
set to the the local RTP port value plus 1.
|
||||
@end enumerate
|
||||
|
||||
@section rtsp
|
||||
@@ -1081,8 +1037,8 @@ Set raise error timeout, expressed in microseconds.
|
||||
This option is only relevant in read mode: if no data arrived in more
|
||||
than this time interval, raise error.
|
||||
|
||||
@item listen_timeout=@var{milliseconds}
|
||||
Set listen timeout, expressed in milliseconds.
|
||||
@item listen_timeout=@var{microseconds}
|
||||
Set listen timeout, expressed in microseconds.
|
||||
@end table
|
||||
|
||||
The following example shows how to setup a listening TCP connection
|
||||
|
@@ -618,6 +618,7 @@ flip wavelet?
|
||||
try to use the wavelet transformed predicted image (motion compensated image) as context for coding the residual coefficients
|
||||
try the MV length as context for coding the residual coefficients
|
||||
use extradata for stuff which is in the keyframes now?
|
||||
the MV median predictor is patented IIRC
|
||||
implement per picture halfpel interpolation
|
||||
try different range coder state transition tables for different contexts
|
||||
|
||||
|
23
doc/style.min.css
vendored
23
doc/style.min.css
vendored
File diff suppressed because one or more lines are too long
59
doc/t2h.init
59
doc/t2h.init
@@ -1,35 +1,26 @@
|
||||
# Init file for texi2html.
|
||||
|
||||
# This is deprecated, and the makeinfo/texi2any version is doc/t2h.pm
|
||||
|
||||
# no horiz rules between sections
|
||||
$end_section = \&FFmpeg_end_section;
|
||||
sub FFmpeg_end_section($$)
|
||||
{
|
||||
}
|
||||
|
||||
my $TEMPLATE_HEADER1 = $ENV{"FFMPEG_HEADER1"} || <<EOT;
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
|
||||
<title>FFmpeg documentation</title>
|
||||
<link rel="stylesheet" href="bootstrap.min.css" />
|
||||
<link rel="stylesheet" href="style.min.css" />
|
||||
$EXTRA_HEAD =
|
||||
'<link rel="icon" href="favicon.png" type="image/png" />
|
||||
';
|
||||
|
||||
$CSS_LINES = $ENV{"FFMPEG_CSS"} || <<EOT;
|
||||
<link rel="stylesheet" type="text/css" href="default.css" />
|
||||
EOT
|
||||
|
||||
my $TEMPLATE_HEADER2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
my $TEMPLATE_HEADER = $ENV{"FFMPEG_HEADER"} || <<EOT;
|
||||
<link rel="icon" href="favicon.png" type="image/png" />
|
||||
</head>
|
||||
<body>
|
||||
<div id="container">
|
||||
<div id="body">
|
||||
EOT
|
||||
|
||||
my $TEMPLATE_FOOTER = $ENV{"FFMPEG_FOOTER"} || <<EOT;
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
EOT
|
||||
$PRE_BODY_CLOSE = '</div></div>';
|
||||
|
||||
$SMALL_RULE = '';
|
||||
$BODYTEXT = '';
|
||||
@@ -91,25 +82,21 @@ sub FFmpeg_print_page_head($$)
|
||||
$longtitle = "FFmpeg documentation : " . $longtitle;
|
||||
|
||||
print $fh <<EOT;
|
||||
$TEMPLATE_HEADER1
|
||||
$description
|
||||
<meta name="keywords" content="$longtitle">
|
||||
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
$Texi2HTML::THISDOC{'copying'}<!-- Created on $Texi2HTML::THISDOC{today} by $Texi2HTML::THISDOC{program} -->
|
||||
<!--
|
||||
$Texi2HTML::THISDOC{program_authors}
|
||||
-->
|
||||
$encoding
|
||||
$TEMPLATE_HEADER2
|
||||
EOT
|
||||
}
|
||||
<head>
|
||||
<title>$longtitle</title>
|
||||
|
||||
$print_page_foot = \&FFmpeg_print_page_foot;
|
||||
sub FFmpeg_print_page_foot($$)
|
||||
{
|
||||
my $fh = shift;
|
||||
print $fh <<EOT;
|
||||
$TEMPLATE_FOOTER
|
||||
$description
|
||||
<meta name="keywords" content="$longtitle">
|
||||
<meta name="Generator" content="$Texi2HTML::THISDOC{program}">
|
||||
$encoding
|
||||
$CSS_LINES
|
||||
$TEMPLATE_HEADER
|
||||
EOT
|
||||
}
|
||||
|
||||
|
221
doc/t2h.pm
221
doc/t2h.pm
@@ -1,221 +0,0 @@
|
||||
# makeinfo HTML output init file
|
||||
#
|
||||
# Copyright (c) 2011, 2012 Free Software Foundation, Inc.
|
||||
# Copyright (c) 2014 Andreas Cadhalpun
|
||||
# Copyright (c) 2014 Tiancheng "Timothy" Gu
|
||||
#
|
||||
# This file is part of FFmpeg.
|
||||
#
|
||||
# FFmpeg is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# FFmpeg is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
# General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public
|
||||
# License along with FFmpeg; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
# no navigation elements
|
||||
set_from_init_file('HEADERS', 0);
|
||||
|
||||
# TOC and Chapter headings link
|
||||
set_from_init_file('TOC_LINKS', 1);
|
||||
|
||||
# print the TOC where @contents is used
|
||||
set_from_init_file('INLINE_CONTENTS', 1);
|
||||
|
||||
# make chapters <h2>
|
||||
set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||
|
||||
# Do not add <hr>
|
||||
set_from_init_file('DEFAULT_RULE', '');
|
||||
set_from_init_file('BIG_RULE', '');
|
||||
|
||||
# Customized file beginning
|
||||
sub ffmpeg_begin_file($$$)
|
||||
{
|
||||
my $self = shift;
|
||||
my $filename = shift;
|
||||
my $element = shift;
|
||||
|
||||
my $command;
|
||||
if ($element and $self->get_conf('SPLIT')) {
|
||||
$command = $self->element_command($element);
|
||||
}
|
||||
|
||||
my ($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_informations($command);
|
||||
|
||||
my $links = $self->_get_links ($filename, $element);
|
||||
|
||||
my $head1 = $ENV{"FFMPEG_HEADER1"} || <<EOT;
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<!-- Created by $program_and_version, $program_homepage -->
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>
|
||||
EOT
|
||||
my $head_title = <<EOT;
|
||||
$title
|
||||
EOT
|
||||
|
||||
my $head2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
|
||||
</title>
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
<body>
|
||||
<div style="width: 95%; margin: auto">
|
||||
<h1>
|
||||
EOT
|
||||
|
||||
my $head3 = $ENV{"FFMPEG_HEADER3"} || <<EOT;
|
||||
</h1>
|
||||
EOT
|
||||
|
||||
return $head1 . $head_title . $head2 . $head_title . $head3;
|
||||
}
|
||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||
|
||||
# Customized file ending
|
||||
sub ffmpeg_end_file($)
|
||||
{
|
||||
my $self = shift;
|
||||
my $program_string = &{$self->{'format_program_string'}}($self);
|
||||
my $program_text = <<EOT;
|
||||
<p style="font-size: small;">
|
||||
$program_string
|
||||
</p>
|
||||
EOT
|
||||
my $footer = $ENV{FFMPEG_FOOTER} || <<EOT;
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
EOT
|
||||
return $program_text . $footer;
|
||||
}
|
||||
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
|
||||
|
||||
# Dummy title command
|
||||
# Ignore title. Title is handled through ffmpeg_begin_file().
|
||||
set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
|
||||
sub ffmpeg_title($$$$)
|
||||
{
|
||||
return '';
|
||||
}
|
||||
|
||||
texinfo_register_command_formatting('titlefont',
|
||||
\&ffmpeg_title);
|
||||
|
||||
# Customized float command. Part of code borrowed from GNU Texinfo.
|
||||
sub ffmpeg_float($$$$$)
|
||||
{
|
||||
my $self = shift;
|
||||
my $cmdname = shift;
|
||||
my $command = shift;
|
||||
my $args = shift;
|
||||
my $content = shift;
|
||||
|
||||
my ($caption, $prepended) = Texinfo::Common::float_name_caption($self,
|
||||
$command);
|
||||
my $caption_text = '';
|
||||
my $prepended_text;
|
||||
my $prepended_save = '';
|
||||
|
||||
if ($self->in_string()) {
|
||||
if ($prepended) {
|
||||
$prepended_text = $self->convert_tree_new_formatting_context(
|
||||
$prepended, 'float prepended');
|
||||
} else {
|
||||
$prepended_text = '';
|
||||
}
|
||||
if ($caption) {
|
||||
$caption_text = $self->convert_tree_new_formatting_context(
|
||||
{'contents' => $caption->{'args'}->[0]->{'contents'}},
|
||||
'float caption');
|
||||
}
|
||||
return $prepended.$content.$caption_text;
|
||||
}
|
||||
|
||||
my $id = $self->command_id($command);
|
||||
my $label;
|
||||
if (defined($id) and $id ne '') {
|
||||
$label = "<a name=\"$id\"></a>";
|
||||
} else {
|
||||
$label = '';
|
||||
}
|
||||
|
||||
if ($prepended) {
|
||||
if ($caption) {
|
||||
# prepend the prepended tree to the first paragraph
|
||||
my @caption_original_contents = @{$caption->{'args'}->[0]->{'contents'}};
|
||||
my @caption_contents;
|
||||
my $new_paragraph;
|
||||
while (@caption_original_contents) {
|
||||
my $content = shift @caption_original_contents;
|
||||
if ($content->{'type'} and $content->{'type'} eq 'paragraph') {
|
||||
%{$new_paragraph} = %{$content};
|
||||
$new_paragraph->{'contents'} = [@{$content->{'contents'}}];
|
||||
unshift (@{$new_paragraph->{'contents'}}, {'cmdname' => 'strong',
|
||||
'args' => [{'type' => 'brace_command_arg',
|
||||
'contents' => [$prepended]}]});
|
||||
push @caption_contents, $new_paragraph;
|
||||
last;
|
||||
} else {
|
||||
push @caption_contents, $content;
|
||||
}
|
||||
}
|
||||
push @caption_contents, @caption_original_contents;
|
||||
if ($new_paragraph) {
|
||||
$caption_text = $self->convert_tree_new_formatting_context(
|
||||
{'contents' => \@caption_contents}, 'float caption');
|
||||
$prepended_text = '';
|
||||
}
|
||||
}
|
||||
if ($caption_text eq '') {
|
||||
$prepended_text = $self->convert_tree_new_formatting_context(
|
||||
$prepended, 'float prepended');
|
||||
if ($prepended_text ne '') {
|
||||
$prepended_save = $prepended_text;
|
||||
$prepended_text = '<p><strong>'.$prepended_text.'</strong></p>';
|
||||
}
|
||||
}
|
||||
} else {
|
||||
$prepended_text = '';
|
||||
}
|
||||
|
||||
if ($caption and $caption_text eq '') {
|
||||
$caption_text = $self->convert_tree_new_formatting_context(
|
||||
$caption->{'args'}->[0], 'float caption');
|
||||
}
|
||||
if ($prepended_text.$caption_text ne '') {
|
||||
$prepended_text = $self->_attribute_class('div','float-caption'). '>'
|
||||
. $prepended_text;
|
||||
$caption_text .= '</div>';
|
||||
}
|
||||
my $html_class = '';
|
||||
if ($prepended_save =~ /NOTE/) {
|
||||
$html_class = 'info';
|
||||
$prepended_text = '';
|
||||
$caption_text = '';
|
||||
} elsif ($prepended_save =~ /IMPORTANT/) {
|
||||
$html_class = 'warning';
|
||||
$prepended_text = '';
|
||||
$caption_text = '';
|
||||
}
|
||||
return $self->_attribute_class('div', $html_class). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
}
|
||||
|
||||
texinfo_register_command_formatting('float',
|
||||
\&ffmpeg_float);
|
||||
|
||||
1;
|
@@ -332,12 +332,12 @@ $inf = pop @instack;
|
||||
|
||||
die "No filename or title\n" unless defined $fn && defined $tl;
|
||||
|
||||
# always use utf8
|
||||
print "=encoding utf8\n\n";
|
||||
|
||||
$chapters{NAME} = "$fn \- $tl\n";
|
||||
$chapters{FOOTNOTES} .= "=back\n" if exists $chapters{FOOTNOTES};
|
||||
|
||||
# always use utf8
|
||||
print "=encoding utf8\n\n";
|
||||
|
||||
unshift @chapters_sequence, "NAME";
|
||||
for $chapter (@chapters_sequence) {
|
||||
if (exists $chapters{$chapter}) {
|
||||
|
@@ -782,9 +782,6 @@ large numbers (usually 2^53 and larger).
|
||||
Round the value of expression @var{expr} upwards to the nearest
|
||||
integer. For example, "ceil(1.5)" is "2.0".
|
||||
|
||||
@item clip(x, min, max)
|
||||
Return the value of @var{x} clipped between @var{min} and @var{max}.
|
||||
|
||||
@item cos(x)
|
||||
Compute cosine of @var{x}.
|
||||
|
||||
@@ -844,7 +841,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||
|
||||
@item ld(var)
|
||||
Load the value of the internal variable with number
|
||||
Allow to load the value of the internal variable with number
|
||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||
The function returns the loaded value.
|
||||
|
||||
@@ -912,7 +909,7 @@ Compute the square root of @var{expr}. This is equivalent to
|
||||
Compute expression @code{1/(1 + exp(4*x))}.
|
||||
|
||||
@item st(var, expr)
|
||||
Store the value of the expression @var{expr} in an internal
|
||||
Allow to store the value of the expression @var{expr} in an internal
|
||||
variable. @var{var} specifies the number of the variable where to
|
||||
store the value, and it is a value ranging from 0 to 9. The function
|
||||
returns the value stored in the internal variable.
|
||||
|
@@ -16,15 +16,16 @@ outputs the modified frame. The most simple way of doing this is to take a
|
||||
similar filter. We'll pick edgedetect, but any other should do. You can look
|
||||
for others using the `./ffmpeg -v 0 -filters|grep ' V->V '` command.
|
||||
|
||||
- sed 's/edgedetect/foobar/g;s/EdgeDetect/Foobar/g' libavfilter/vf_edgedetect.c > libavfilter/vf_foobar.c
|
||||
- cp libavfilter/vf_{edgedetect,foobar}.c
|
||||
- sed -i s/edgedetect/foobar/g -i libavfilter/vf_foobar.c
|
||||
- sed -i s/EdgeDetect/Foobar/g -i libavfilter/vf_foobar.c
|
||||
- edit libavfilter/Makefile, and add an entry for "foobar" following the
|
||||
pattern of the other filters.
|
||||
- edit libavfilter/allfilters.c, and add an entry for "foobar" following the
|
||||
pattern of the other filters.
|
||||
- ./configure ...
|
||||
- make -j<whatever> ffmpeg
|
||||
- ./ffmpeg -i http://samples.ffmpeg.org/image-samples/lena.pnm -vf foobar foobar.png
|
||||
Note here: you can obviously use a random local image instead of a remote URL.
|
||||
- ./ffmpeg -i tests/lena.pnm -vf foobar foobar.png
|
||||
|
||||
If everything went right, you should get a foobar.png with Lena edge-detected.
|
||||
|
||||
|
253
ffmpeg.c
253
ffmpeg.c
@@ -352,6 +352,7 @@ void term_init(void)
|
||||
signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
|
||||
}
|
||||
#endif
|
||||
avformat_network_deinit();
|
||||
|
||||
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
|
||||
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
|
||||
@@ -474,7 +475,6 @@ static void ffmpeg_cleanup(int ret)
|
||||
}
|
||||
ost->bitstream_filters = NULL;
|
||||
av_frame_free(&ost->filtered_frame);
|
||||
av_frame_free(&ost->last_frame);
|
||||
|
||||
av_parser_close(ost->parser);
|
||||
|
||||
@@ -483,9 +483,6 @@ static void ffmpeg_cleanup(int ret)
|
||||
av_freep(&ost->avfilter);
|
||||
av_freep(&ost->logfile_prefix);
|
||||
|
||||
av_freep(&ost->audio_channels_map);
|
||||
ost->audio_channels_mapped = 0;
|
||||
|
||||
avcodec_free_context(&ost->enc_ctx);
|
||||
|
||||
av_freep(&output_streams[i]);
|
||||
@@ -535,15 +532,6 @@ static void ffmpeg_cleanup(int ret)
|
||||
term_exit();
|
||||
}
|
||||
|
||||
void remove_avoptions(AVDictionary **a, AVDictionary *b)
|
||||
{
|
||||
AVDictionaryEntry *t = NULL;
|
||||
|
||||
while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
|
||||
av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
|
||||
}
|
||||
}
|
||||
|
||||
void assert_avoptions(AVDictionary *m)
|
||||
{
|
||||
AVDictionaryEntry *t;
|
||||
@@ -590,14 +578,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
AVCodecContext *avctx = ost->st->codec;
|
||||
int ret;
|
||||
|
||||
if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
|
||||
ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (ost->st->codec->extradata) {
|
||||
memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
|
||||
ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
|
||||
}
|
||||
}
|
||||
|
||||
if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
|
||||
(avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
|
||||
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
|
||||
@@ -622,11 +602,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
|
||||
while (bsfc) {
|
||||
AVPacket new_pkt = *pkt;
|
||||
AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
|
||||
bsfc->filter->name,
|
||||
NULL, 0);
|
||||
int a = av_bitstream_filter_filter(bsfc, avctx,
|
||||
bsf_arg ? bsf_arg->value : NULL,
|
||||
int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
|
||||
&new_pkt.data, &new_pkt.size,
|
||||
pkt->data, pkt->size,
|
||||
pkt->flags & AV_PKT_FLAG_KEY);
|
||||
@@ -642,8 +618,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
a = AVERROR(ENOMEM);
|
||||
}
|
||||
if (a > 0) {
|
||||
pkt->side_data = NULL;
|
||||
pkt->side_data_elems = 0;
|
||||
av_free_packet(pkt);
|
||||
new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
|
||||
av_buffer_default_free, NULL, 0);
|
||||
@@ -663,17 +637,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
}
|
||||
|
||||
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
|
||||
if (pkt->dts != AV_NOPTS_VALUE &&
|
||||
pkt->pts != AV_NOPTS_VALUE &&
|
||||
pkt->dts > pkt->pts) {
|
||||
av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
|
||||
pkt->dts, pkt->pts,
|
||||
ost->file_index, ost->st->index);
|
||||
pkt->pts =
|
||||
pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
|
||||
- FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
|
||||
- FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
|
||||
}
|
||||
if(
|
||||
(avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
|
||||
pkt->dts != AV_NOPTS_VALUE &&
|
||||
@@ -696,6 +659,15 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
pkt->dts = max;
|
||||
}
|
||||
}
|
||||
if (pkt->dts != AV_NOPTS_VALUE &&
|
||||
pkt->pts != AV_NOPTS_VALUE &&
|
||||
pkt->dts > pkt->pts) {
|
||||
av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d\n",
|
||||
pkt->dts, pkt->pts,
|
||||
ost->file_index, ost->st->index);
|
||||
pkt->pts = AV_NOPTS_VALUE;
|
||||
pkt->dts = AV_NOPTS_VALUE;
|
||||
}
|
||||
}
|
||||
ost->last_mux_dts = pkt->dts;
|
||||
|
||||
@@ -878,14 +850,14 @@ static void do_subtitle_out(AVFormatContext *s,
|
||||
|
||||
static void do_video_out(AVFormatContext *s,
|
||||
OutputStream *ost,
|
||||
AVFrame *next_picture)
|
||||
AVFrame *in_picture)
|
||||
{
|
||||
int ret, format_video_sync;
|
||||
AVPacket pkt;
|
||||
AVCodecContext *enc = ost->enc_ctx;
|
||||
AVCodecContext *mux_enc = ost->st->codec;
|
||||
int nb_frames, nb0_frames, i;
|
||||
double sync_ipts, delta, delta0;
|
||||
int nb_frames, i;
|
||||
double sync_ipts, delta;
|
||||
double duration = 0;
|
||||
int frame_size = 0;
|
||||
InputStream *ist = NULL;
|
||||
@@ -896,20 +868,10 @@ static void do_video_out(AVFormatContext *s,
|
||||
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
|
||||
duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
|
||||
|
||||
if (!ost->filters_script &&
|
||||
!ost->filters &&
|
||||
next_picture &&
|
||||
ist &&
|
||||
lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
|
||||
duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
|
||||
}
|
||||
|
||||
sync_ipts = next_picture->pts;
|
||||
delta0 = sync_ipts - ost->sync_opts;
|
||||
delta = delta0 + duration;
|
||||
sync_ipts = in_picture->pts;
|
||||
delta = sync_ipts - ost->sync_opts + duration;
|
||||
|
||||
/* by default, we output a single frame */
|
||||
nb0_frames = 0;
|
||||
nb_frames = 1;
|
||||
|
||||
format_video_sync = video_sync_method;
|
||||
@@ -929,34 +891,19 @@ static void do_video_out(AVFormatContext *s,
|
||||
}
|
||||
}
|
||||
|
||||
if (delta0 < 0 &&
|
||||
delta > 0 &&
|
||||
format_video_sync != VSYNC_PASSTHROUGH &&
|
||||
format_video_sync != VSYNC_DROP) {
|
||||
double cor = FFMIN(-delta0, duration);
|
||||
av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
|
||||
sync_ipts += cor;
|
||||
duration -= cor;
|
||||
delta0 += cor;
|
||||
}
|
||||
|
||||
switch (format_video_sync) {
|
||||
case VSYNC_VSCFR:
|
||||
if (ost->frame_number == 0 && delta - duration >= 0.5) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
|
||||
delta = duration;
|
||||
delta0 = 0;
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
}
|
||||
case VSYNC_CFR:
|
||||
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
||||
if (delta < -1.1)
|
||||
nb_frames = 0;
|
||||
else if (delta > 1.1) {
|
||||
else if (delta > 1.1)
|
||||
nb_frames = lrintf(delta);
|
||||
if (delta0 > 1.1)
|
||||
nb0_frames = lrintf(delta0 - 0.6);
|
||||
}
|
||||
break;
|
||||
case VSYNC_VFR:
|
||||
if (delta <= -0.6)
|
||||
@@ -973,36 +920,28 @@ static void do_video_out(AVFormatContext *s,
|
||||
}
|
||||
|
||||
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
|
||||
nb0_frames = FFMIN(nb0_frames, nb_frames);
|
||||
if (nb0_frames == 0 && ost->last_droped) {
|
||||
if (nb_frames == 0) {
|
||||
nb_frames_drop++;
|
||||
av_log(NULL, AV_LOG_VERBOSE,
|
||||
"*** dropping frame %d from stream %d at ts %"PRId64"\n",
|
||||
ost->frame_number, ost->st->index, ost->last_frame->pts);
|
||||
}
|
||||
if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
|
||||
ost->frame_number, ost->st->index, in_picture->pts);
|
||||
return;
|
||||
} else if (nb_frames > 1) {
|
||||
if (nb_frames > dts_error_threshold * 30) {
|
||||
av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
|
||||
nb_frames_drop++;
|
||||
return;
|
||||
}
|
||||
nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
|
||||
nb_frames_dup += nb_frames - 1;
|
||||
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
|
||||
}
|
||||
ost->last_droped = nb_frames == nb0_frames;
|
||||
|
||||
/* duplicates frame if needed */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
AVFrame *in_picture;
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (i < nb0_frames && ost->last_frame) {
|
||||
in_picture = ost->last_frame;
|
||||
} else
|
||||
in_picture = next_picture;
|
||||
|
||||
in_picture->pts = ost->sync_opts;
|
||||
|
||||
#if 1
|
||||
@@ -1017,8 +956,10 @@ static void do_video_out(AVFormatContext *s,
|
||||
/* raw pictures are written as AVPicture structure to
|
||||
avoid any copies. We support temporarily the older
|
||||
method. */
|
||||
if (in_picture->interlaced_frame)
|
||||
mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
|
||||
mux_enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
|
||||
mux_enc->coded_frame->top_field_first = in_picture->top_field_first;
|
||||
if (mux_enc->coded_frame->interlaced_frame)
|
||||
mux_enc->field_order = mux_enc->coded_frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
|
||||
else
|
||||
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
||||
pkt.data = (uint8_t *)in_picture;
|
||||
@@ -1044,7 +985,8 @@ static void do_video_out(AVFormatContext *s,
|
||||
mux_enc->field_order = AV_FIELD_PROGRESSIVE;
|
||||
|
||||
in_picture->quality = enc->global_quality;
|
||||
in_picture->pict_type = 0;
|
||||
if (!enc->me_threshold)
|
||||
in_picture->pict_type = 0;
|
||||
|
||||
pts_time = in_picture->pts != AV_NOPTS_VALUE ?
|
||||
in_picture->pts * av_q2d(enc->time_base) : NAN;
|
||||
@@ -1138,11 +1080,6 @@ static void do_video_out(AVFormatContext *s,
|
||||
if (vstats_filename && frame_size)
|
||||
do_video_stats(ost, frame_size);
|
||||
}
|
||||
|
||||
if (!ost->last_frame)
|
||||
ost->last_frame = av_frame_alloc();
|
||||
av_frame_unref(ost->last_frame);
|
||||
av_frame_ref(ost->last_frame, next_picture);
|
||||
}
|
||||
|
||||
static double psnr(double d)
|
||||
@@ -1311,6 +1248,7 @@ static void print_final_stats(int64_t total_size)
|
||||
if (data_size && total_size>0 && total_size >= data_size)
|
||||
percent = 100.0 * (total_size - data_size) / data_size;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "\n");
|
||||
av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
|
||||
video_size / 1024.0,
|
||||
audio_size / 1024.0,
|
||||
@@ -1501,8 +1439,6 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
|
||||
pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
|
||||
ost->st->time_base, AV_TIME_BASE_Q));
|
||||
if (is_last_report)
|
||||
nb_frames_drop += ost->last_droped;
|
||||
}
|
||||
|
||||
secs = pts / AV_TIME_BASE;
|
||||
@@ -1538,11 +1474,10 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
|
||||
|
||||
if (print_stats || is_last_report) {
|
||||
const char end = is_last_report ? '\n' : '\r';
|
||||
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
|
||||
fprintf(stderr, "%s %c", buf, end);
|
||||
fprintf(stderr, "%s \r", buf);
|
||||
} else
|
||||
av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
|
||||
av_log(NULL, AV_LOG_INFO, "%s \r", buf);
|
||||
|
||||
fflush(stderr);
|
||||
}
|
||||
@@ -1864,10 +1799,18 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
for (i = 0; i < nb_filtergraphs; i++)
|
||||
if (ist_in_filtergraph(filtergraphs[i], ist)) {
|
||||
FilterGraph *fg = filtergraphs[i];
|
||||
int j;
|
||||
if (configure_filtergraph(fg) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
|
||||
exit_program(1);
|
||||
}
|
||||
for (j = 0; j < fg->nb_outputs; j++) {
|
||||
OutputStream *ost = fg->outputs[j]->ost;
|
||||
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
||||
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
|
||||
av_buffersink_set_frame_size(ost->filter->filter,
|
||||
ost->enc_ctx->frame_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1949,20 +1892,6 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (*got_output && ret >= 0) {
|
||||
if (ist->dec_ctx->width != decoded_frame->width ||
|
||||
ist->dec_ctx->height != decoded_frame->height ||
|
||||
ist->dec_ctx->pix_fmt != decoded_frame->format) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
|
||||
decoded_frame->width,
|
||||
decoded_frame->height,
|
||||
decoded_frame->format,
|
||||
ist->dec_ctx->width,
|
||||
ist->dec_ctx->height,
|
||||
ist->dec_ctx->pix_fmt);
|
||||
}
|
||||
}
|
||||
|
||||
if (!*got_output || ret < 0) {
|
||||
if (!pkt->size) {
|
||||
for (i = 0; i < ist->nb_filters; i++)
|
||||
@@ -2119,7 +2048,7 @@ out:
|
||||
}
|
||||
|
||||
/* pkt = NULL means EOF (needed to flush decoder buffers) */
|
||||
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
static int output_packet(InputStream *ist, const AVPacket *pkt)
|
||||
{
|
||||
int ret = 0, i;
|
||||
int got_output = 0;
|
||||
@@ -2128,7 +2057,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
if (!ist->saw_first_ts) {
|
||||
ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
|
||||
ist->pts = 0;
|
||||
if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
|
||||
if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
|
||||
ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
|
||||
}
|
||||
@@ -2140,7 +2069,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
if (ist->next_pts == AV_NOPTS_VALUE)
|
||||
ist->next_pts = ist->pts;
|
||||
|
||||
if (!pkt) {
|
||||
if (pkt == NULL) {
|
||||
/* EOF handling */
|
||||
av_init_packet(&avpkt);
|
||||
avpkt.data = NULL;
|
||||
@@ -2179,11 +2108,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
ret = decode_video (ist, &avpkt, &got_output);
|
||||
if (avpkt.duration) {
|
||||
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
|
||||
} else if(ist->dec_ctx->time_base.num != 0 && ist->dec_ctx->time_base.den != 0) {
|
||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
|
||||
duration = ((int64_t)AV_TIME_BASE *
|
||||
ist->dec_ctx->framerate.den * ticks) /
|
||||
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
|
||||
ist->dec_ctx->time_base.num * ticks) /
|
||||
ist->dec_ctx->time_base.den;
|
||||
} else
|
||||
duration = 0;
|
||||
|
||||
@@ -2238,11 +2167,11 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt)
|
||||
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
|
||||
} else if (pkt->duration) {
|
||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->dec_ctx->framerate.num != 0) {
|
||||
} else if(ist->dec_ctx->time_base.num != 0) {
|
||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
|
||||
ist->next_dts += ((int64_t)AV_TIME_BASE *
|
||||
ist->dec_ctx->framerate.den * ticks) /
|
||||
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
|
||||
ist->dec_ctx->time_base.num * ticks) /
|
||||
ist->dec_ctx->time_base.den;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -2354,12 +2283,8 @@ static int init_input_stream(int ist_index, char *error, int error_len)
|
||||
ist->dec_ctx->thread_safe_callbacks = 1;
|
||||
|
||||
av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
|
||||
if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
|
||||
(ist->decoding_needed & DECODING_FOR_OST)) {
|
||||
av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
|
||||
if (ist->decoding_needed & DECODING_FOR_FILTER)
|
||||
av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
|
||||
}
|
||||
if(ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
|
||||
av_dict_set(&ist->decoder_opts, "compute_edt", "1", 0);
|
||||
|
||||
if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
|
||||
av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
|
||||
@@ -2520,7 +2445,7 @@ static int transcode_init(void)
|
||||
AVFormatContext *oc;
|
||||
OutputStream *ost;
|
||||
InputStream *ist;
|
||||
char error[1024] = {0};
|
||||
char error[1024];
|
||||
int want_sdp = 1;
|
||||
|
||||
for (i = 0; i < nb_filtergraphs; i++) {
|
||||
@@ -2619,13 +2544,11 @@ static int transcode_init(void)
|
||||
enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
|
||||
enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
|
||||
enc_ctx->field_order = dec_ctx->field_order;
|
||||
if (dec_ctx->extradata_size) {
|
||||
enc_ctx->extradata = av_mallocz(extra_size);
|
||||
if (!enc_ctx->extradata) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
|
||||
enc_ctx->extradata = av_mallocz(extra_size);
|
||||
if (!enc_ctx->extradata) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
|
||||
enc_ctx->extradata_size= dec_ctx->extradata_size;
|
||||
enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
|
||||
|
||||
@@ -2680,26 +2603,6 @@ static int transcode_init(void)
|
||||
av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
|
||||
enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
|
||||
|
||||
if (ist->st->nb_side_data) {
|
||||
ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
|
||||
sizeof(*ist->st->side_data));
|
||||
if (!ost->st->side_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (j = 0; j < ist->st->nb_side_data; j++) {
|
||||
const AVPacketSideData *sd_src = &ist->st->side_data[j];
|
||||
AVPacketSideData *sd_dst = &ost->st->side_data[j];
|
||||
|
||||
sd_dst->data = av_malloc(sd_src->size);
|
||||
if (!sd_dst->data)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(sd_dst->data, sd_src->data, sd_src->size);
|
||||
sd_dst->size = sd_src->size;
|
||||
sd_dst->type = sd_src->type;
|
||||
ost->st->nb_side_data++;
|
||||
}
|
||||
}
|
||||
|
||||
ost->parser = av_parser_init(enc_ctx->codec_id);
|
||||
|
||||
switch (enc_ctx->codec_type) {
|
||||
@@ -2714,10 +2617,7 @@ static int transcode_init(void)
|
||||
enc_ctx->frame_size = dec_ctx->frame_size;
|
||||
enc_ctx->audio_service_type = dec_ctx->audio_service_type;
|
||||
enc_ctx->block_align = dec_ctx->block_align;
|
||||
enc_ctx->initial_padding = dec_ctx->delay;
|
||||
#if FF_API_AUDIOENC_DELAY
|
||||
enc_ctx->delay = dec_ctx->delay;
|
||||
#endif
|
||||
if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
|
||||
enc_ctx->block_align= 0;
|
||||
if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
|
||||
@@ -2764,7 +2664,7 @@ static int transcode_init(void)
|
||||
}
|
||||
|
||||
if (ist)
|
||||
ist->decoding_needed |= DECODING_FOR_OST;
|
||||
ist->decoding_needed++;
|
||||
ost->encoding_needed = 1;
|
||||
|
||||
set_encoder_id(output_files[ost->file_index], ost);
|
||||
@@ -2964,11 +2864,10 @@ static int transcode_init(void)
|
||||
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
||||
" It takes bits/s as argument, not kbits/s\n");
|
||||
} else {
|
||||
ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
|
||||
if (ret < 0) {
|
||||
if (av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"Error setting up codec context options.\n");
|
||||
return ret;
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3168,9 +3067,9 @@ static OutputStream *choose_output(void)
|
||||
OutputStream *ost = output_streams[i];
|
||||
int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
|
||||
AV_TIME_BASE_Q);
|
||||
if (!ost->finished && opts < opts_min) {
|
||||
if (!ost->unavailable && !ost->finished && opts < opts_min) {
|
||||
opts_min = opts;
|
||||
ost_min = ost->unavailable ? NULL : ost;
|
||||
ost_min = ost;
|
||||
}
|
||||
}
|
||||
return ost_min;
|
||||
@@ -3432,7 +3331,7 @@ static int process_input(int file_index)
|
||||
for (i = 0; i < ifile->nb_streams; i++) {
|
||||
ist = input_streams[ifile->ist_index + i];
|
||||
if (ist->decoding_needed) {
|
||||
ret = process_input_packet(ist, NULL);
|
||||
ret = output_packet(ist, NULL);
|
||||
if (ret>0)
|
||||
return 0;
|
||||
}
|
||||
@@ -3520,7 +3419,7 @@ static int process_input(int file_index)
|
||||
}
|
||||
|
||||
/* add the stream-global side data to the first packet */
|
||||
if (ist->nb_packets == 1) {
|
||||
if (ist->nb_packets == 1)
|
||||
if (ist->st->nb_side_data)
|
||||
av_packet_split_side_data(&pkt);
|
||||
for (i = 0; i < ist->st->nb_side_data; i++) {
|
||||
@@ -3536,7 +3435,6 @@ static int process_input(int file_index)
|
||||
|
||||
memcpy(dst_data, src_sd->data, src_sd->size);
|
||||
}
|
||||
}
|
||||
|
||||
if (pkt.dts != AV_NOPTS_VALUE)
|
||||
pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
|
||||
@@ -3548,14 +3446,13 @@ static int process_input(int file_index)
|
||||
if (pkt.dts != AV_NOPTS_VALUE)
|
||||
pkt.dts *= ist->ts_scale;
|
||||
|
||||
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
|
||||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
|
||||
pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
|
||||
if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
|
||||
&& (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
|
||||
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
int64_t delta = pkt_dts - ifile->last_ts;
|
||||
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
|
||||
if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
(delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
|
||||
ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)){
|
||||
ifile->ts_offset -= delta;
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
|
||||
@@ -3566,15 +3463,14 @@ static int process_input(int file_index)
|
||||
}
|
||||
}
|
||||
|
||||
if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
|
||||
ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
|
||||
pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
|
||||
if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
|
||||
!copy_ts) {
|
||||
int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
int64_t delta = pkt_dts - ist->next_dts;
|
||||
if (is->iformat->flags & AVFMT_TS_DISCONT) {
|
||||
if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
|
||||
(delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
|
||||
ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
|
||||
pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
|
||||
ifile->ts_offset -= delta;
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
@@ -3586,7 +3482,7 @@ static int process_input(int file_index)
|
||||
}
|
||||
} else {
|
||||
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
|
||||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
|
||||
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
|
||||
av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
|
||||
pkt.dts = AV_NOPTS_VALUE;
|
||||
}
|
||||
@@ -3594,7 +3490,7 @@ static int process_input(int file_index)
|
||||
int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
delta = pkt_pts - ist->next_dts;
|
||||
if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
|
||||
delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
|
||||
(delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
|
||||
av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
|
||||
pkt.pts = AV_NOPTS_VALUE;
|
||||
}
|
||||
@@ -3616,7 +3512,7 @@ static int process_input(int file_index)
|
||||
|
||||
sub2video_heartbeat(ist, pkt.pts);
|
||||
|
||||
ret = process_input_packet(ist, &pkt);
|
||||
ret = output_packet(ist, &pkt);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
|
||||
ist->file_index, ist->st->index, av_err2str(ret));
|
||||
@@ -3782,7 +3678,7 @@ static int transcode(void)
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
|
||||
process_input_packet(ist, NULL);
|
||||
output_packet(ist, NULL);
|
||||
}
|
||||
}
|
||||
flush_encoders();
|
||||
@@ -3837,7 +3733,6 @@ static int transcode(void)
|
||||
av_dict_free(&ost->encoder_opts);
|
||||
av_dict_free(&ost->swr_opts);
|
||||
av_dict_free(&ost->resample_opts);
|
||||
av_dict_free(&ost->bsf_args);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3859,7 +3754,7 @@ static int64_t getutime(void)
|
||||
GetProcessTimes(proc, &c, &e, &k, &u);
|
||||
return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
|
||||
#else
|
||||
return av_gettime_relative();
|
||||
return av_gettime();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
13
ffmpeg.h
13
ffmpeg.h
@@ -243,10 +243,7 @@ typedef struct InputStream {
|
||||
AVStream *st;
|
||||
int discard; /* true if stream data should be discarded */
|
||||
int user_set_discard;
|
||||
int decoding_needed; /* non zero if the packets must be decoded in 'raw_fifo', see DECODING_FOR_* */
|
||||
#define DECODING_FOR_OST 1
|
||||
#define DECODING_FOR_FILTER 2
|
||||
|
||||
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
|
||||
AVCodecContext *dec_ctx;
|
||||
AVCodec *dec;
|
||||
AVFrame *decoded_frame;
|
||||
@@ -388,8 +385,6 @@ typedef struct OutputStream {
|
||||
AVCodec *enc;
|
||||
int64_t max_frames;
|
||||
AVFrame *filtered_frame;
|
||||
AVFrame *last_frame;
|
||||
int last_droped;
|
||||
|
||||
/* video only */
|
||||
AVRational frame_rate;
|
||||
@@ -407,7 +402,7 @@ typedef struct OutputStream {
|
||||
double forced_keyframes_expr_const_values[FKF_NB];
|
||||
|
||||
/* audio only */
|
||||
int *audio_channels_map; /* list of the channels id to pick from the source stream */
|
||||
int audio_channels_map[SWR_CH_MAX]; /* list of the channels id to pick from the source stream */
|
||||
int audio_channels_mapped; /* number of channels in audio_channels_map */
|
||||
|
||||
char *logfile_prefix;
|
||||
@@ -422,7 +417,6 @@ typedef struct OutputStream {
|
||||
AVDictionary *encoder_opts;
|
||||
AVDictionary *swr_opts;
|
||||
AVDictionary *resample_opts;
|
||||
AVDictionary *bsf_args;
|
||||
char *apad;
|
||||
OSTFinished finished; /* no more packets should be written for this stream */
|
||||
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
|
||||
@@ -484,7 +478,6 @@ extern int do_deinterlace;
|
||||
extern int do_hex_dump;
|
||||
extern int do_pkt_dump;
|
||||
extern int copy_ts;
|
||||
extern int start_at_zero;
|
||||
extern int copy_tb;
|
||||
extern int debug_ts;
|
||||
extern int exit_on_error;
|
||||
@@ -494,7 +487,6 @@ extern int stdin_interaction;
|
||||
extern int frame_bits_per_raw_sample;
|
||||
extern AVIOContext *progress_avio;
|
||||
extern float max_error_rate;
|
||||
extern int vdpau_api_ver;
|
||||
|
||||
extern const AVIOInterruptCB int_cb;
|
||||
|
||||
@@ -510,7 +502,6 @@ void show_usage(void);
|
||||
|
||||
void opt_output_file(void *optctx, const char *filename);
|
||||
|
||||
void remove_avoptions(AVDictionary **a, AVDictionary *b);
|
||||
void assert_avoptions(AVDictionary *m);
|
||||
|
||||
int guess_input_channel_layout(InputStream *ist);
|
||||
|
@@ -275,7 +275,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
|
||||
av_assert0(ist);
|
||||
|
||||
ist->discard = 0;
|
||||
ist->decoding_needed |= DECODING_FOR_FILTER;
|
||||
ist->decoding_needed++;
|
||||
ist->st->discard = AVDISCARD_NONE;
|
||||
|
||||
GROW_ARRAY(fg->inputs, fg->nb_inputs);
|
||||
@@ -383,8 +383,9 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
|
||||
snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
|
||||
ost->file_index, ost->index);
|
||||
ret = avfilter_graph_create_filter(&filter,
|
||||
avfilter_get_by_name("format"),
|
||||
"format", pix_fmts, NULL, fg->graph);
|
||||
avfilter_get_by_name("format"),
|
||||
"format", pix_fmts, NULL,
|
||||
fg->graph);
|
||||
av_freep(&pix_fmts);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -619,7 +620,6 @@ static int sub2video_prepare(InputStream *ist)
|
||||
ist->sub2video.frame = av_frame_alloc();
|
||||
if (!ist->sub2video.frame)
|
||||
return AVERROR(ENOMEM);
|
||||
ist->sub2video.last_pts = INT64_MIN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -637,7 +637,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVBPrint args;
|
||||
char name[255];
|
||||
int ret, pad_idx = 0;
|
||||
int64_t tsoffset = 0;
|
||||
|
||||
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
|
||||
@@ -712,14 +711,8 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
|
||||
snprintf(name, sizeof(name), "trim for input stream %d:%d",
|
||||
ist->file_index, ist->st->index);
|
||||
if (copy_ts) {
|
||||
tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
|
||||
if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
|
||||
tsoffset += f->ctx->start_time;
|
||||
}
|
||||
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
|
||||
AV_NOPTS_VALUE : tsoffset, f->recording_time,
|
||||
&last_filter, &pad_idx, name);
|
||||
AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -738,7 +731,6 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
AVBPrint args;
|
||||
char name[255];
|
||||
int ret, pad_idx = 0;
|
||||
int64_t tsoffset = 0;
|
||||
|
||||
if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
|
||||
@@ -821,14 +813,8 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
|
||||
snprintf(name, sizeof(name), "trim for input stream %d:%d",
|
||||
ist->file_index, ist->st->index);
|
||||
if (copy_ts) {
|
||||
tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
|
||||
if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
|
||||
tsoffset += f->ctx->start_time;
|
||||
}
|
||||
ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
|
||||
AV_NOPTS_VALUE : tsoffset, f->recording_time,
|
||||
&last_filter, &pad_idx, name);
|
||||
AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -844,12 +830,6 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
av_freep(&ifilter->name);
|
||||
DESCRIBE_FILTER_LINK(ifilter, in, 1);
|
||||
|
||||
if (!ifilter->ist->dec) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"No decoder for stream #%d:%d, filtering impossible\n",
|
||||
ifilter->ist->file_index, ifilter->ist->st->index);
|
||||
return AVERROR_DECODER_NOT_FOUND;
|
||||
}
|
||||
switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
|
||||
case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
|
||||
case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
|
||||
@@ -912,11 +892,8 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
init_input_filter(fg, cur);
|
||||
|
||||
for (cur = inputs, i = 0; cur; cur = cur->next, i++)
|
||||
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
|
||||
return ret;
|
||||
}
|
||||
avfilter_inout_free(&inputs);
|
||||
|
||||
if (!init || simple) {
|
||||
@@ -942,16 +919,6 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
}
|
||||
|
||||
fg->reconfiguration = 1;
|
||||
|
||||
for (i = 0; i < fg->nb_outputs; i++) {
|
||||
OutputStream *ost = fg->outputs[i]->ost;
|
||||
if (ost &&
|
||||
ost->enc->type == AVMEDIA_TYPE_AUDIO &&
|
||||
!(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
|
||||
av_buffersink_set_frame_size(ost->filter->filter,
|
||||
ost->enc_ctx->frame_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
183
ffmpeg_opt.c
183
ffmpeg_opt.c
@@ -91,7 +91,6 @@ int do_benchmark_all = 0;
|
||||
int do_hex_dump = 0;
|
||||
int do_pkt_dump = 0;
|
||||
int copy_ts = 0;
|
||||
int start_at_zero = 0;
|
||||
int copy_tb = -1;
|
||||
int debug_ts = 0;
|
||||
int exit_on_error = 0;
|
||||
@@ -509,8 +508,7 @@ static int opt_recording_timestamp(void *optctx, const char *opt, const char *ar
|
||||
char buf[128];
|
||||
int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6;
|
||||
struct tm time = *gmtime((time_t*)&recording_timestamp);
|
||||
if (!strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time))
|
||||
return -1;
|
||||
strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time);
|
||||
parse_option(o, "metadata", buf, options);
|
||||
|
||||
av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata "
|
||||
@@ -704,7 +702,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
|
||||
MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
|
||||
if (canvas_size &&
|
||||
av_parse_video_size(&ist->dec_ctx->width, &ist->dec_ctx->height, canvas_size) < 0) {
|
||||
av_parse_video_size(&dec->width, &dec->height, canvas_size) < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid canvas size: %s.\n", canvas_size);
|
||||
exit_program(1);
|
||||
}
|
||||
@@ -787,6 +785,7 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
AVInputFormat *file_iformat = NULL;
|
||||
int err, i, ret;
|
||||
int64_t timestamp;
|
||||
uint8_t buf[128];
|
||||
AVDictionary **opts;
|
||||
AVDictionary *unused_opts = NULL;
|
||||
AVDictionaryEntry *e = NULL;
|
||||
@@ -794,7 +793,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
char * video_codec_name = NULL;
|
||||
char * audio_codec_name = NULL;
|
||||
char *subtitle_codec_name = NULL;
|
||||
int scan_all_pmts_set = 0;
|
||||
|
||||
if (o->format) {
|
||||
if (!(file_iformat = av_find_input_format(o->format))) {
|
||||
@@ -816,7 +814,8 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
exit_program(1);
|
||||
}
|
||||
if (o->nb_audio_sample_rate) {
|
||||
av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i, 0);
|
||||
snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
|
||||
av_dict_set(&o->g->format_opts, "sample_rate", buf, 0);
|
||||
}
|
||||
if (o->nb_audio_channels) {
|
||||
/* because we set audio_channels based on both the "ac" and
|
||||
@@ -825,7 +824,9 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
if (file_iformat && file_iformat->priv_class &&
|
||||
av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
|
||||
AV_OPT_SEARCH_FAKE_OBJ)) {
|
||||
av_dict_set_int(&o->g->format_opts, "channels", o->audio_channels[o->nb_audio_channels - 1].u.i, 0);
|
||||
snprintf(buf, sizeof(buf), "%d",
|
||||
o->audio_channels[o->nb_audio_channels - 1].u.i);
|
||||
av_dict_set(&o->g->format_opts, "channels", buf, 0);
|
||||
}
|
||||
}
|
||||
if (o->nb_frame_rates) {
|
||||
@@ -865,19 +866,12 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
ic->flags |= AVFMT_FLAG_NONBLOCK;
|
||||
ic->interrupt_callback = int_cb;
|
||||
|
||||
if (!av_dict_get(o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
|
||||
av_dict_set(&o->g->format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
|
||||
scan_all_pmts_set = 1;
|
||||
}
|
||||
/* open the input file with generic avformat function */
|
||||
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
|
||||
if (err < 0) {
|
||||
print_error(filename, err);
|
||||
exit_program(1);
|
||||
}
|
||||
if (scan_all_pmts_set)
|
||||
av_dict_set(&o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
||||
remove_avoptions(&o->g->format_opts, o->g->codec_opts);
|
||||
assert_avoptions(o->g->format_opts);
|
||||
|
||||
/* apply forced codec ids */
|
||||
@@ -930,7 +924,7 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
f->start_time = o->start_time;
|
||||
f->recording_time = o->recording_time;
|
||||
f->input_ts_offset = o->input_ts_offset;
|
||||
f->ts_offset = o->input_ts_offset - (copy_ts ? (start_at_zero && ic->start_time != AV_NOPTS_VALUE ? ic->start_time : 0) : timestamp);
|
||||
f->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
|
||||
f->nb_streams = ic->nb_streams;
|
||||
f->rate_emu = o->rate_emu;
|
||||
f->accurate_seek = o->accurate_seek;
|
||||
@@ -1138,11 +1132,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
|
||||
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
|
||||
while (bsf) {
|
||||
char *arg = NULL;
|
||||
if (next = strchr(bsf, ','))
|
||||
*next++ = 0;
|
||||
if (arg = strchr(bsf, '='))
|
||||
*arg++ = 0;
|
||||
if (!(bsfc = av_bitstream_filter_init(bsf))) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
|
||||
exit_program(1);
|
||||
@@ -1151,7 +1142,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
bsfc_prev->next = bsfc;
|
||||
else
|
||||
ost->bitstream_filters = bsfc;
|
||||
av_dict_set(&ost->bsf_args, bsfc->filter->name, arg, 0);
|
||||
|
||||
bsfc_prev = bsfc;
|
||||
bsf = next;
|
||||
@@ -1289,8 +1279,6 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
|
||||
exit_program(1);
|
||||
}
|
||||
if (frame_rate && video_sync_method == VSYNC_PASSTHROUGH)
|
||||
av_log(NULL, AV_LOG_ERROR, "Using -vsync 0 and -r can produce invalid output files\n");
|
||||
|
||||
MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
|
||||
if (frame_aspect_ratio) {
|
||||
@@ -1489,13 +1477,11 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
}
|
||||
|
||||
if (!ist || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) {
|
||||
if (av_reallocp_array(&ost->audio_channels_map,
|
||||
ost->audio_channels_mapped + 1,
|
||||
sizeof(*ost->audio_channels_map)
|
||||
) < 0 )
|
||||
exit_program(1);
|
||||
|
||||
ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
|
||||
if (ost->audio_channels_mapped < FF_ARRAY_ELEMS(ost->audio_channels_map))
|
||||
ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
|
||||
else
|
||||
av_log(NULL, AV_LOG_FATAL, "Max channel mapping for output %d.%d reached\n",
|
||||
ost->file_index, ost->st->index);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1635,36 +1621,27 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
|
||||
AVStream *st;
|
||||
OutputStream *ost;
|
||||
AVCodec *codec;
|
||||
const char *enc_config;
|
||||
AVCodecContext *avctx;
|
||||
|
||||
codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
|
||||
if (!codec) {
|
||||
av_log(s, AV_LOG_ERROR, "no encoder found for codec id %i\n", ic->streams[i]->codec->codec_id);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (codec->type == AVMEDIA_TYPE_AUDIO)
|
||||
opt_audio_codec(o, "c:a", codec->name);
|
||||
else if (codec->type == AVMEDIA_TYPE_VIDEO)
|
||||
opt_video_codec(o, "c:v", codec->name);
|
||||
ost = new_output_stream(o, s, codec->type, -1);
|
||||
st = ost->st;
|
||||
avctx = st->codec;
|
||||
ost->enc = codec;
|
||||
|
||||
avcodec_get_context_defaults3(st->codec, codec);
|
||||
enc_config = av_stream_get_recommended_encoder_configuration(ic->streams[i]);
|
||||
if (enc_config) {
|
||||
AVDictionary *opts = NULL;
|
||||
av_dict_parse_string(&opts, enc_config, "=", ",", 0);
|
||||
av_opt_set_dict2(st->codec, &opts, AV_OPT_SEARCH_CHILDREN);
|
||||
av_dict_free(&opts);
|
||||
}
|
||||
// FIXME: a more elegant solution is needed
|
||||
memcpy(st, ic->streams[i], sizeof(AVStream));
|
||||
st->cur_dts = 0;
|
||||
st->info = av_malloc(sizeof(*st->info));
|
||||
memcpy(st->info, ic->streams[i]->info, sizeof(*st->info));
|
||||
st->codec= avctx;
|
||||
avcodec_copy_context(st->codec, ic->streams[i]->codec);
|
||||
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
|
||||
choose_sample_fmt(st, codec);
|
||||
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
|
||||
choose_pixel_fmt(st, st->codec, codec, st->codec->pix_fmt);
|
||||
avcodec_copy_context(ost->enc_ctx, st->codec);
|
||||
if (enc_config)
|
||||
av_dict_parse_string(&ost->encoder_opts, enc_config, "=", ",", 0);
|
||||
}
|
||||
|
||||
avformat_close_input(&ic);
|
||||
@@ -1752,8 +1729,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
if (o->stop_time != INT64_MAX && o->recording_time == INT64_MAX) {
|
||||
int64_t start_time = o->start_time == AV_NOPTS_VALUE ? 0 : o->start_time;
|
||||
if (o->stop_time <= start_time) {
|
||||
av_log(NULL, AV_LOG_ERROR, "-to value smaller than -ss; aborting.\n");
|
||||
exit_program(1);
|
||||
av_log(NULL, AV_LOG_WARNING, "-to value smaller than -ss; ignoring -to.\n");
|
||||
o->stop_time = INT64_MAX;
|
||||
} else {
|
||||
o->recording_time = o->stop_time - start_time;
|
||||
}
|
||||
@@ -1851,7 +1828,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
/* pick the "best" stream of each type */
|
||||
|
||||
/* video: highest resolution */
|
||||
if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
|
||||
if (!o->video_disable && oc->oformat->video_codec != AV_CODEC_ID_NONE) {
|
||||
int area = 0, idx = -1;
|
||||
int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
@@ -1873,7 +1850,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
}
|
||||
|
||||
/* audio: most channels */
|
||||
if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
|
||||
if (!o->audio_disable && oc->oformat->audio_codec != AV_CODEC_ID_NONE) {
|
||||
int channels = 0, idx = -1;
|
||||
for (i = 0; i < nb_input_streams; i++) {
|
||||
ist = input_streams[i];
|
||||
@@ -1892,27 +1869,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
if (!o->subtitle_disable && (avcodec_find_encoder(oc->oformat->subtitle_codec) || subtitle_codec_name)) {
|
||||
for (i = 0; i < nb_input_streams; i++)
|
||||
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
AVCodecDescriptor const *input_descriptor =
|
||||
avcodec_descriptor_get(input_streams[i]->st->codec->codec_id);
|
||||
AVCodecDescriptor const *output_descriptor = NULL;
|
||||
AVCodec const *output_codec =
|
||||
avcodec_find_encoder(oc->oformat->subtitle_codec);
|
||||
int input_props = 0, output_props = 0;
|
||||
if (output_codec)
|
||||
output_descriptor = avcodec_descriptor_get(output_codec->id);
|
||||
if (input_descriptor)
|
||||
input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
|
||||
if (output_descriptor)
|
||||
output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
|
||||
if (subtitle_codec_name ||
|
||||
input_props & output_props ||
|
||||
// Map dvb teletext which has neither property to any output subtitle encoder
|
||||
input_descriptor && output_descriptor &&
|
||||
(!input_descriptor->props ||
|
||||
!output_descriptor->props)) {
|
||||
new_subtitle_stream(o, oc, i);
|
||||
break;
|
||||
}
|
||||
new_subtitle_stream(o, oc, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* do something with data? */
|
||||
@@ -2034,13 +1992,8 @@ loop_end:
|
||||
const AVClass *class = avcodec_get_class();
|
||||
const AVOption *option = av_opt_find(&class, e->key, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
|
||||
const AVClass *fclass = avformat_get_class();
|
||||
const AVOption *foption = av_opt_find(&fclass, e->key, NULL, 0,
|
||||
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
|
||||
if (!option || foption)
|
||||
if (!option)
|
||||
continue;
|
||||
|
||||
|
||||
if (!(option->flags & AV_OPT_FLAG_ENCODING_PARAM)) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Codec AVOption %s (%s) specified for "
|
||||
"output file #%d (%s) is not an encoding option.\n", e->key,
|
||||
@@ -2085,7 +2038,9 @@ loop_end:
|
||||
assert_file_overwrite(filename);
|
||||
|
||||
if (o->mux_preload) {
|
||||
av_dict_set_int(&of->opts, "preload", o->mux_preload*AV_TIME_BASE, 0);
|
||||
uint8_t buf[64];
|
||||
snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
|
||||
av_dict_set(&of->opts, "preload", buf, 0);
|
||||
}
|
||||
oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
|
||||
|
||||
@@ -2211,8 +2166,7 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
for (j = 0; j < nb_input_files; j++) {
|
||||
for (i = 0; i < input_files[j]->nb_streams; i++) {
|
||||
AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
|
||||
if (c->codec_type != AVMEDIA_TYPE_VIDEO ||
|
||||
!c->time_base.num)
|
||||
if (c->codec_type != AVMEDIA_TYPE_VIDEO)
|
||||
continue;
|
||||
fr = c->time_base.den * 1000 / c->time_base.num;
|
||||
if (fr == 25000) {
|
||||
@@ -2245,19 +2199,19 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
|
||||
parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
opt_default(NULL, "b:v", "1150000");
|
||||
opt_default(NULL, "maxrate", "1150000");
|
||||
opt_default(NULL, "minrate", "1150000");
|
||||
opt_default(NULL, "bufsize", "327680"); // 40*1024*8;
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "1150000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "327680", AV_DICT_DONT_OVERWRITE); // 40*1024*8;
|
||||
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
|
||||
parse_option(o, "ar", "44100", options);
|
||||
parse_option(o, "ac", "2", options);
|
||||
|
||||
opt_default(NULL, "packetsize", "2324");
|
||||
opt_default(NULL, "muxrate", "1411200"); // 2352 * 75 * 8;
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->format_opts, "muxrate", "1411200", AV_DICT_DONT_OVERWRITE); // 2352 * 75 * 8;
|
||||
|
||||
/* We have to offset the PTS, so that it is consistent with the SCR.
|
||||
SCR starts at 36000, but the first two packs contain only padding
|
||||
@@ -2274,18 +2228,18 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
parse_option(o, "pix_fmt", "yuv420p", options);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
opt_default(NULL, "b:v", "2040000");
|
||||
opt_default(NULL, "maxrate", "2516000");
|
||||
opt_default(NULL, "minrate", "0"); // 1145000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "scan_offset", "1");
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "2040000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "2516000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1145000;
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
|
||||
av_dict_set(&o->g->codec_opts, "scan_offset", "1", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "224000", AV_DICT_DONT_OVERWRITE);
|
||||
parse_option(o, "ar", "44100", options);
|
||||
|
||||
opt_default(NULL, "packetsize", "2324");
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2324", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
} else if (!strcmp(arg, "dvd")) {
|
||||
|
||||
@@ -2296,17 +2250,17 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
|
||||
parse_option(o, "r", frame_rates[norm], options);
|
||||
parse_option(o, "pix_fmt", "yuv420p", options);
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
av_dict_set(&o->g->codec_opts, "g", norm == PAL ? "15" : "18", AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
opt_default(NULL, "b:v", "6000000");
|
||||
opt_default(NULL, "maxrate", "9000000");
|
||||
opt_default(NULL, "minrate", "0"); // 1500000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
av_dict_set(&o->g->codec_opts, "b:v", "6000000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "maxrate", "9000000", AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_set(&o->g->codec_opts, "minrate", "0", AV_DICT_DONT_OVERWRITE); // 1500000;
|
||||
av_dict_set(&o->g->codec_opts, "bufsize", "1835008", AV_DICT_DONT_OVERWRITE); // 224*1024*8;
|
||||
|
||||
opt_default(NULL, "packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
opt_default(NULL, "muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
av_dict_set(&o->g->format_opts, "packetsize", "2048", AV_DICT_DONT_OVERWRITE); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
av_dict_set(&o->g->format_opts, "muxrate", "10080000", AV_DICT_DONT_OVERWRITE); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
|
||||
opt_default(NULL, "b:a", "448000");
|
||||
av_dict_set(&o->g->codec_opts, "b:a", "448000", AV_DICT_DONT_OVERWRITE);
|
||||
parse_option(o, "ar", "48000", options);
|
||||
|
||||
} else if (!strncmp(arg, "dv", 2)) {
|
||||
@@ -2325,10 +2279,6 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
av_dict_copy(&o->g->codec_opts, codec_opts, AV_DICT_DONT_OVERWRITE);
|
||||
av_dict_copy(&o->g->format_opts, format_opts, AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2825,13 +2775,13 @@ const OptionDef options[] = {
|
||||
{ "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
|
||||
"set the input ts scale", "scale" },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE, { .func_arg = opt_recording_timestamp },
|
||||
"set the recording timestamp ('now' to set the current time)", "time" },
|
||||
{ "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
|
||||
"add metadata", "string=string" },
|
||||
{ "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
|
||||
OPT_OUTPUT, { .func_arg = opt_data_frames },
|
||||
"set the number of data frames to output", "number" },
|
||||
"set the number of data frames to record", "number" },
|
||||
{ "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
|
||||
"add timings for benchmarking" },
|
||||
{ "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
|
||||
@@ -2860,8 +2810,6 @@ const OptionDef options[] = {
|
||||
"audio drift threshold", "threshold" },
|
||||
{ "copyts", OPT_BOOL | OPT_EXPERT, { ©_ts },
|
||||
"copy timestamps" },
|
||||
{ "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
|
||||
"shift input timestamps to start at 0 when using copyts" },
|
||||
{ "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { ©_tb },
|
||||
"copy input stream time base when stream copying", "mode" },
|
||||
{ "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
|
||||
@@ -2882,7 +2830,7 @@ const OptionDef options[] = {
|
||||
{ "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
|
||||
"copy or discard frames before start time" },
|
||||
{ "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
|
||||
"set the number of frames to output", "number" },
|
||||
"set the number of frames to record", "number" },
|
||||
{ "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
|
||||
"force codec tag/fourcc", "fourcc/tag" },
|
||||
@@ -2924,7 +2872,7 @@ const OptionDef options[] = {
|
||||
|
||||
/* video options */
|
||||
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
||||
"set the number of video frames to output", "number" },
|
||||
"set the number of video frames to record", "number" },
|
||||
{ "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
|
||||
"set frame rate (Hz value, fraction or abbreviation)", "rate" },
|
||||
@@ -3006,13 +2954,10 @@ const OptionDef options[] = {
|
||||
{ "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
|
||||
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
|
||||
"select a device for HW acceleration" "devicename" },
|
||||
#if HAVE_VDPAU_X11
|
||||
{ "vdpau_api_ver", HAS_ARG | OPT_INT | OPT_EXPERT, { &vdpau_api_ver }, "" },
|
||||
#endif
|
||||
|
||||
/* audio options */
|
||||
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
|
||||
"set the number of audio frames to output", "number" },
|
||||
"set the number of audio frames to record", "number" },
|
||||
{ "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
|
||||
"set audio quality (codec-specific)", "quality", },
|
||||
{ "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
|
||||
|
@@ -42,11 +42,9 @@ typedef struct VDPAUContext {
|
||||
VdpGetErrorString *get_error_string;
|
||||
VdpGetInformationString *get_information_string;
|
||||
VdpDeviceDestroy *device_destroy;
|
||||
#if 1 // for ffmpegs older vdpau API, not the oldest though
|
||||
VdpDecoderCreate *decoder_create;
|
||||
VdpDecoderDestroy *decoder_destroy;
|
||||
VdpDecoderRender *decoder_render;
|
||||
#endif
|
||||
VdpVideoSurfaceCreate *video_surface_create;
|
||||
VdpVideoSurfaceDestroy *video_surface_destroy;
|
||||
VdpVideoSurfaceGetBitsYCbCr *video_surface_get_bits;
|
||||
@@ -59,8 +57,6 @@ typedef struct VDPAUContext {
|
||||
VdpYCbCrFormat vdpau_format;
|
||||
} VDPAUContext;
|
||||
|
||||
int vdpau_api_ver = 2;
|
||||
|
||||
static void vdpau_uninit(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
@@ -243,11 +239,9 @@ do {
|
||||
GET_CALLBACK(VDP_FUNC_ID_GET_ERROR_STRING, get_error_string);
|
||||
GET_CALLBACK(VDP_FUNC_ID_GET_INFORMATION_STRING, get_information_string);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DEVICE_DESTROY, device_destroy);
|
||||
if (vdpau_api_ver == 1) {
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_CREATE, decoder_create);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_DESTROY, decoder_destroy);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_RENDER, decoder_render);
|
||||
}
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_CREATE, decoder_create);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_DESTROY, decoder_destroy);
|
||||
GET_CALLBACK(VDP_FUNC_ID_DECODER_RENDER, decoder_render);
|
||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, video_surface_create);
|
||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, video_surface_destroy);
|
||||
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, video_surface_get_bits);
|
||||
@@ -276,16 +270,12 @@ do {
|
||||
ctx->vdpau_format = vdpau_formats[i][0];
|
||||
ctx->pix_fmt = vdpau_formats[i][1];
|
||||
|
||||
if (vdpau_api_ver == 1) {
|
||||
vdpau_ctx = av_vdpau_alloc_context();
|
||||
if (!vdpau_ctx)
|
||||
goto fail;
|
||||
vdpau_ctx->render = ctx->decoder_render;
|
||||
|
||||
s->hwaccel_context = vdpau_ctx;
|
||||
} else
|
||||
if (av_vdpau_bind_context(s, ctx->device, ctx->get_proc_address, 0))
|
||||
vdpau_ctx = av_vdpau_alloc_context();
|
||||
if (!vdpau_ctx)
|
||||
goto fail;
|
||||
vdpau_ctx->render = ctx->decoder_render;
|
||||
|
||||
s->hwaccel_context = vdpau_ctx;
|
||||
|
||||
ctx->get_information_string(&vendor);
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Using VDPAU -- %s -- on X11 display %s, "
|
||||
@@ -301,7 +291,7 @@ fail:
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
static int vdpau_old_init(AVCodecContext *s)
|
||||
int vdpau_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
@@ -343,22 +333,3 @@ static int vdpau_old_init(AVCodecContext *s)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vdpau_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
|
||||
if (vdpau_api_ver == 1)
|
||||
return vdpau_old_init(s);
|
||||
|
||||
if (!ist->hwaccel_ctx) {
|
||||
int ret = vdpau_alloc(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ist->hwaccel_get_buffer = vdpau_get_buffer;
|
||||
ist->hwaccel_retrieve_data = vdpau_retrieve_data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
164
ffprobe.c
164
ffprobe.c
@@ -33,7 +33,6 @@
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/hash.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/dict.h"
|
||||
@@ -66,9 +65,6 @@ static int do_show_stream_disposition = 0;
|
||||
static int do_show_data = 0;
|
||||
static int do_show_program_version = 0;
|
||||
static int do_show_library_versions = 0;
|
||||
static int do_show_pixel_formats = 0;
|
||||
static int do_show_pixel_format_flags = 0;
|
||||
static int do_show_pixel_format_components = 0;
|
||||
|
||||
static int do_show_chapter_tags = 0;
|
||||
static int do_show_format_tags = 0;
|
||||
@@ -84,7 +80,6 @@ static int show_private_data = 1;
|
||||
|
||||
static char *print_format;
|
||||
static char *stream_specifier;
|
||||
static char *show_data_hash;
|
||||
|
||||
typedef struct {
|
||||
int id; ///< identifier
|
||||
@@ -128,18 +123,11 @@ typedef enum {
|
||||
SECTION_ID_FRAME,
|
||||
SECTION_ID_FRAMES,
|
||||
SECTION_ID_FRAME_TAGS,
|
||||
SECTION_ID_FRAME_SIDE_DATA_LIST,
|
||||
SECTION_ID_FRAME_SIDE_DATA,
|
||||
SECTION_ID_LIBRARY_VERSION,
|
||||
SECTION_ID_LIBRARY_VERSIONS,
|
||||
SECTION_ID_PACKET,
|
||||
SECTION_ID_PACKETS,
|
||||
SECTION_ID_PACKETS_AND_FRAMES,
|
||||
SECTION_ID_PIXEL_FORMAT,
|
||||
SECTION_ID_PIXEL_FORMAT_FLAGS,
|
||||
SECTION_ID_PIXEL_FORMAT_COMPONENT,
|
||||
SECTION_ID_PIXEL_FORMAT_COMPONENTS,
|
||||
SECTION_ID_PIXEL_FORMATS,
|
||||
SECTION_ID_PROGRAM_STREAM_DISPOSITION,
|
||||
SECTION_ID_PROGRAM_STREAM_TAGS,
|
||||
SECTION_ID_PROGRAM,
|
||||
@@ -164,20 +152,13 @@ static struct section sections[] = {
|
||||
[SECTION_ID_FORMAT] = { SECTION_ID_FORMAT, "format", 0, { SECTION_ID_FORMAT_TAGS, -1 } },
|
||||
[SECTION_ID_FORMAT_TAGS] = { SECTION_ID_FORMAT_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "format_tags" },
|
||||
[SECTION_ID_FRAMES] = { SECTION_ID_FRAMES, "frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME, SECTION_ID_SUBTITLE, -1 } },
|
||||
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, SECTION_ID_FRAME_SIDE_DATA_LIST, -1 } },
|
||||
[SECTION_ID_FRAME] = { SECTION_ID_FRAME, "frame", 0, { SECTION_ID_FRAME_TAGS, -1 } },
|
||||
[SECTION_ID_FRAME_TAGS] = { SECTION_ID_FRAME_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "frame_tags" },
|
||||
[SECTION_ID_FRAME_SIDE_DATA_LIST] ={ SECTION_ID_FRAME_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_FRAME_SIDE_DATA, -1 } },
|
||||
[SECTION_ID_FRAME_SIDE_DATA] = { SECTION_ID_FRAME_SIDE_DATA, "side_data", 0, { -1 } },
|
||||
[SECTION_ID_LIBRARY_VERSIONS] = { SECTION_ID_LIBRARY_VERSIONS, "library_versions", SECTION_FLAG_IS_ARRAY, { SECTION_ID_LIBRARY_VERSION, -1 } },
|
||||
[SECTION_ID_LIBRARY_VERSION] = { SECTION_ID_LIBRARY_VERSION, "library_version", 0, { -1 } },
|
||||
[SECTION_ID_PACKETS] = { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||
[SECTION_ID_PACKETS_AND_FRAMES] = { SECTION_ID_PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { -1 } },
|
||||
[SECTION_ID_PIXEL_FORMATS] = { SECTION_ID_PIXEL_FORMATS, "pixel_formats", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PIXEL_FORMAT, -1 } },
|
||||
[SECTION_ID_PIXEL_FORMAT] = { SECTION_ID_PIXEL_FORMAT, "pixel_format", 0, { SECTION_ID_PIXEL_FORMAT_FLAGS, SECTION_ID_PIXEL_FORMAT_COMPONENTS, -1 } },
|
||||
[SECTION_ID_PIXEL_FORMAT_FLAGS] = { SECTION_ID_PIXEL_FORMAT_FLAGS, "flags", 0, { -1 }, .unique_name = "pixel_format_flags" },
|
||||
[SECTION_ID_PIXEL_FORMAT_COMPONENTS] = { SECTION_ID_PIXEL_FORMAT_COMPONENTS, "components", SECTION_FLAG_IS_ARRAY, {SECTION_ID_PIXEL_FORMAT_COMPONENT, -1 }, .unique_name = "pixel_format_components" },
|
||||
[SECTION_ID_PIXEL_FORMAT_COMPONENT] = { SECTION_ID_PIXEL_FORMAT_COMPONENT, "component", 0, { -1 } },
|
||||
[SECTION_ID_PROGRAM_STREAM_DISPOSITION] = { SECTION_ID_PROGRAM_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "program_stream_disposition" },
|
||||
[SECTION_ID_PROGRAM_STREAM_TAGS] = { SECTION_ID_PROGRAM_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "program_stream_tags" },
|
||||
[SECTION_ID_PROGRAM] = { SECTION_ID_PROGRAM, "program", 0, { SECTION_ID_PROGRAM_TAGS, SECTION_ID_PROGRAM_STREAMS, -1 } },
|
||||
@@ -188,8 +169,7 @@ static struct section sections[] = {
|
||||
[SECTION_ID_PROGRAMS] = { SECTION_ID_PROGRAMS, "programs", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PROGRAM, -1 } },
|
||||
[SECTION_ID_ROOT] = { SECTION_ID_ROOT, "root", SECTION_FLAG_IS_WRAPPER,
|
||||
{ SECTION_ID_CHAPTERS, SECTION_ID_FORMAT, SECTION_ID_FRAMES, SECTION_ID_PROGRAMS, SECTION_ID_STREAMS,
|
||||
SECTION_ID_PACKETS, SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS,
|
||||
SECTION_ID_PIXEL_FORMATS, -1} },
|
||||
SECTION_ID_PACKETS, SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS, -1} },
|
||||
[SECTION_ID_STREAMS] = { SECTION_ID_STREAMS, "streams", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM, -1 } },
|
||||
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, -1 } },
|
||||
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
|
||||
@@ -203,8 +183,6 @@ static const OptionDef *options;
|
||||
static const char *input_filename;
|
||||
static AVInputFormat *iformat = NULL;
|
||||
|
||||
static struct AVHashContext *hash;
|
||||
|
||||
static const char *const binary_unit_prefixes [] = { "", "Ki", "Mi", "Gi", "Ti", "Pi" };
|
||||
static const char *const decimal_unit_prefixes[] = { "", "K" , "M" , "G" , "T" , "P" };
|
||||
|
||||
@@ -703,21 +681,6 @@ static void writer_print_data(WriterContext *wctx, const char *name,
|
||||
av_bprint_finalize(&bp, NULL);
|
||||
}
|
||||
|
||||
static void writer_print_data_hash(WriterContext *wctx, const char *name,
|
||||
uint8_t *data, int size)
|
||||
{
|
||||
char *p, buf[AV_HASH_MAX_SIZE * 2 + 64] = { 0 };
|
||||
|
||||
if (!hash)
|
||||
return;
|
||||
av_hash_init(hash);
|
||||
av_hash_update(hash, data, size);
|
||||
snprintf(buf, sizeof(buf), "%s:", av_hash_get_name(hash));
|
||||
p = buf + strlen(buf);
|
||||
av_hash_final_hex(hash, p, buf + sizeof(buf) - p);
|
||||
writer_print_string(wctx, name, buf, 0);
|
||||
}
|
||||
|
||||
#define MAX_REGISTERED_WRITERS_NB 64
|
||||
|
||||
static const Writer *registered_writers[MAX_REGISTERED_WRITERS_NB + 1];
|
||||
@@ -1725,7 +1688,6 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
|
||||
print_fmt("flags", "%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
|
||||
if (do_show_data)
|
||||
writer_print_data(w, "data", pkt->data, pkt->size);
|
||||
writer_print_data_hash(w, "data_hash", pkt->data, pkt->size);
|
||||
writer_print_section_footer(w);
|
||||
|
||||
av_bprint_finalize(&pbuf, NULL);
|
||||
@@ -1760,7 +1722,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
{
|
||||
AVBPrint pbuf;
|
||||
const char *s;
|
||||
int i;
|
||||
|
||||
av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
|
||||
|
||||
@@ -1823,20 +1784,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
}
|
||||
if (do_show_frame_tags)
|
||||
show_tags(w, av_frame_get_metadata(frame), SECTION_ID_FRAME_TAGS);
|
||||
if (frame->nb_side_data) {
|
||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_LIST);
|
||||
for (i = 0; i < frame->nb_side_data; i++) {
|
||||
AVFrameSideData *sd = frame->side_data[i];
|
||||
const char *name;
|
||||
|
||||
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA);
|
||||
name = av_frame_side_data_name(sd->type);
|
||||
print_str("side_data_type", name ? name : "unknown");
|
||||
print_int("side_data_size", sd->size);
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
writer_print_section_footer(w);
|
||||
|
||||
@@ -2112,28 +2059,12 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
else print_str_opt("pix_fmt", "unknown");
|
||||
print_int("level", dec_ctx->level);
|
||||
if (dec_ctx->color_range != AVCOL_RANGE_UNSPECIFIED)
|
||||
print_str ("color_range", av_color_range_name(dec_ctx->color_range));
|
||||
print_str ("color_range", dec_ctx->color_range == AVCOL_RANGE_MPEG ? "tv": "pc");
|
||||
else
|
||||
print_str_opt("color_range", "N/A");
|
||||
s = av_get_colorspace_name(dec_ctx->colorspace);
|
||||
if (s) print_str ("color_space", s);
|
||||
else print_str_opt("color_space", "unknown");
|
||||
|
||||
if (dec_ctx->color_trc != AVCOL_TRC_UNSPECIFIED)
|
||||
print_str("color_transfer", av_color_transfer_name(dec_ctx->color_trc));
|
||||
else
|
||||
print_str_opt("color_transfer", av_color_transfer_name(dec_ctx->color_trc));
|
||||
|
||||
if (dec_ctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
|
||||
print_str("color_primaries", av_color_primaries_name(dec_ctx->color_primaries));
|
||||
else
|
||||
print_str_opt("color_primaries", av_color_primaries_name(dec_ctx->color_primaries));
|
||||
|
||||
if (dec_ctx->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED)
|
||||
print_str("chroma_location", av_chroma_location_name(dec_ctx->chroma_sample_location));
|
||||
else
|
||||
print_str_opt("chroma_location", av_chroma_location_name(dec_ctx->chroma_sample_location));
|
||||
|
||||
if (dec_ctx->timecode_frame_start >= 0) {
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
|
||||
@@ -2200,8 +2131,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
else print_str_opt("bit_rate", "N/A");
|
||||
if (dec_ctx->rc_max_rate > 0) print_val ("max_bit_rate", dec_ctx->rc_max_rate, unit_bit_per_second_str);
|
||||
else print_str_opt("max_bit_rate", "N/A");
|
||||
if (dec_ctx->bits_per_raw_sample > 0) print_fmt("bits_per_raw_sample", "%d", dec_ctx->bits_per_raw_sample);
|
||||
else print_str_opt("bits_per_raw_sample", "N/A");
|
||||
if (stream->nb_frames) print_fmt ("nb_frames", "%"PRId64, stream->nb_frames);
|
||||
else print_str_opt("nb_frames", "N/A");
|
||||
if (nb_streams_frames[stream_idx]) print_fmt ("nb_read_frames", "%"PRIu64, nb_streams_frames[stream_idx]);
|
||||
@@ -2211,8 +2140,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
if (do_show_data)
|
||||
writer_print_data(w, "extradata", dec_ctx->extradata,
|
||||
dec_ctx->extradata_size);
|
||||
writer_print_data_hash(w, "extradata_hash", dec_ctx->extradata,
|
||||
dec_ctx->extradata_size);
|
||||
|
||||
/* Print disposition information */
|
||||
#define PRINT_DISPOSITION(flagname, name) do { \
|
||||
@@ -2386,19 +2313,12 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *t;
|
||||
AVDictionary **opts;
|
||||
int scan_all_pmts_set = 0;
|
||||
|
||||
if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
|
||||
av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
|
||||
scan_all_pmts_set = 1;
|
||||
}
|
||||
if ((err = avformat_open_input(&fmt_ctx, filename,
|
||||
iformat, &format_opts)) < 0) {
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
if (scan_all_pmts_set)
|
||||
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
||||
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
|
||||
return AVERROR_OPTION_NOT_FOUND;
|
||||
@@ -2594,58 +2514,6 @@ static void ffprobe_show_library_versions(WriterContext *w)
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
#define PRINT_PIX_FMT_FLAG(flagname, name) \
|
||||
do { \
|
||||
print_int(name, !!(pixdesc->flags & AV_PIX_FMT_FLAG_##flagname)); \
|
||||
} while (0)
|
||||
|
||||
static void ffprobe_show_pixel_formats(WriterContext *w)
|
||||
{
|
||||
const AVPixFmtDescriptor *pixdesc = NULL;
|
||||
int i, n;
|
||||
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMATS);
|
||||
while (pixdesc = av_pix_fmt_desc_next(pixdesc)) {
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT);
|
||||
print_str("name", pixdesc->name);
|
||||
print_int("nb_components", pixdesc->nb_components);
|
||||
if ((pixdesc->nb_components >= 3) && !(pixdesc->flags & AV_PIX_FMT_FLAG_RGB)) {
|
||||
print_int ("log2_chroma_w", pixdesc->log2_chroma_w);
|
||||
print_int ("log2_chroma_h", pixdesc->log2_chroma_h);
|
||||
} else {
|
||||
print_str_opt("log2_chroma_w", "N/A");
|
||||
print_str_opt("log2_chroma_h", "N/A");
|
||||
}
|
||||
n = av_get_bits_per_pixel(pixdesc);
|
||||
if (n) print_int ("bits_per_pixel", n);
|
||||
else print_str_opt("bits_per_pixel", "N/A");
|
||||
if (do_show_pixel_format_flags) {
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_FLAGS);
|
||||
PRINT_PIX_FMT_FLAG(BE, "big_endian");
|
||||
PRINT_PIX_FMT_FLAG(PAL, "palette");
|
||||
PRINT_PIX_FMT_FLAG(BITSTREAM, "bitstream");
|
||||
PRINT_PIX_FMT_FLAG(HWACCEL, "hwaccel");
|
||||
PRINT_PIX_FMT_FLAG(PLANAR, "planar");
|
||||
PRINT_PIX_FMT_FLAG(RGB, "rgb");
|
||||
PRINT_PIX_FMT_FLAG(PSEUDOPAL, "pseudopal");
|
||||
PRINT_PIX_FMT_FLAG(ALPHA, "alpha");
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
if (do_show_pixel_format_components && (pixdesc->nb_components > 0)) {
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_COMPONENTS);
|
||||
for (i = 0; i < pixdesc->nb_components; i++) {
|
||||
writer_print_section_header(w, SECTION_ID_PIXEL_FORMAT_COMPONENT);
|
||||
print_int("index", i + 1);
|
||||
print_int("bit_depth", pixdesc->comp[i].depth_minus1 + 1);
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
static int opt_format(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
iformat = av_find_input_format(arg);
|
||||
@@ -2979,7 +2847,6 @@ DEFINE_OPT_SHOW_SECTION(format, FORMAT);
|
||||
DEFINE_OPT_SHOW_SECTION(frames, FRAMES);
|
||||
DEFINE_OPT_SHOW_SECTION(library_versions, LIBRARY_VERSIONS);
|
||||
DEFINE_OPT_SHOW_SECTION(packets, PACKETS);
|
||||
DEFINE_OPT_SHOW_SECTION(pixel_formats, PIXEL_FORMATS);
|
||||
DEFINE_OPT_SHOW_SECTION(program_version, PROGRAM_VERSION);
|
||||
DEFINE_OPT_SHOW_SECTION(streams, STREAMS);
|
||||
DEFINE_OPT_SHOW_SECTION(programs, PROGRAMS);
|
||||
@@ -3001,7 +2868,6 @@ static const OptionDef real_options[] = {
|
||||
{ "select_streams", OPT_STRING | HAS_ARG, {(void*)&stream_specifier}, "select the specified streams", "stream_specifier" },
|
||||
{ "sections", OPT_EXIT, {.func_arg = opt_sections}, "print sections structure and section information, and exit" },
|
||||
{ "show_data", OPT_BOOL, {(void*)&do_show_data}, "show packets data" },
|
||||
{ "show_data_hash", OPT_STRING | HAS_ARG, {(void*)&show_data_hash}, "show packets data hash" },
|
||||
{ "show_error", 0, {(void*)&opt_show_error}, "show probing error" },
|
||||
{ "show_format", 0, {(void*)&opt_show_format}, "show format/container info" },
|
||||
{ "show_frames", 0, {(void*)&opt_show_frames}, "show frames info" },
|
||||
@@ -3018,7 +2884,6 @@ static const OptionDef real_options[] = {
|
||||
{ "show_program_version", 0, {(void*)&opt_show_program_version}, "show ffprobe version" },
|
||||
{ "show_library_versions", 0, {(void*)&opt_show_library_versions}, "show library versions" },
|
||||
{ "show_versions", 0, {(void*)&opt_show_versions}, "show program and library versions" },
|
||||
{ "show_pixel_formats", 0, {(void*)&opt_show_pixel_formats}, "show pixel format descriptions" },
|
||||
{ "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" },
|
||||
{ "private", OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" },
|
||||
{ "bitexact", OPT_BOOL, {&do_bitexact}, "force bitexact output" },
|
||||
@@ -3075,9 +2940,6 @@ int main(int argc, char **argv)
|
||||
SET_DO_SHOW(FRAMES, frames);
|
||||
SET_DO_SHOW(LIBRARY_VERSIONS, library_versions);
|
||||
SET_DO_SHOW(PACKETS, packets);
|
||||
SET_DO_SHOW(PIXEL_FORMATS, pixel_formats);
|
||||
SET_DO_SHOW(PIXEL_FORMAT_FLAGS, pixel_format_flags);
|
||||
SET_DO_SHOW(PIXEL_FORMAT_COMPONENTS, pixel_format_components);
|
||||
SET_DO_SHOW(PROGRAM_VERSION, program_version);
|
||||
SET_DO_SHOW(PROGRAMS, programs);
|
||||
SET_DO_SHOW(STREAMS, streams);
|
||||
@@ -3109,21 +2971,6 @@ int main(int argc, char **argv)
|
||||
w_name = av_strtok(print_format, "=", &buf);
|
||||
w_args = buf;
|
||||
|
||||
if (show_data_hash) {
|
||||
if ((ret = av_hash_alloc(&hash, show_data_hash)) < 0) {
|
||||
if (ret == AVERROR(EINVAL)) {
|
||||
const char *n;
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Unknown hash algorithm '%s'\nKnown algorithms:",
|
||||
show_data_hash);
|
||||
for (i = 0; (n = av_hash_names(i)); i++)
|
||||
av_log(NULL, AV_LOG_ERROR, " %s", n);
|
||||
av_log(NULL, AV_LOG_ERROR, "\n");
|
||||
}
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
w = writer_get_by_name(w_name);
|
||||
if (!w) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unknown output format with name '%s'\n", w_name);
|
||||
@@ -3142,12 +2989,10 @@ int main(int argc, char **argv)
|
||||
ffprobe_show_program_version(wctx);
|
||||
if (do_show_library_versions)
|
||||
ffprobe_show_library_versions(wctx);
|
||||
if (do_show_pixel_formats)
|
||||
ffprobe_show_pixel_formats(wctx);
|
||||
|
||||
if (!input_filename &&
|
||||
((do_show_format || do_show_programs || do_show_streams || do_show_chapters || do_show_packets || do_show_error) ||
|
||||
(!do_show_program_version && !do_show_library_versions && !do_show_pixel_formats))) {
|
||||
(!do_show_program_version && !do_show_library_versions))) {
|
||||
show_usage();
|
||||
av_log(NULL, AV_LOG_ERROR, "You have to specify one input file.\n");
|
||||
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
|
||||
@@ -3165,7 +3010,6 @@ int main(int argc, char **argv)
|
||||
end:
|
||||
av_freep(&print_format);
|
||||
av_freep(&read_intervals);
|
||||
av_hash_freep(&hash);
|
||||
|
||||
uninit_opts();
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sections); i++)
|
||||
|
1299
ffserver.c
1299
ffserver.c
File diff suppressed because it is too large
Load Diff
1260
ffserver_config.c
1260
ffserver_config.c
File diff suppressed because it is too large
Load Diff
@@ -1,133 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFSERVER_CONFIG_H
|
||||
#define FFSERVER_CONFIG_H
|
||||
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/network.h"
|
||||
|
||||
#define FFSERVER_MAX_STREAMS 20
|
||||
|
||||
/* each generated stream is described here */
|
||||
enum FFServerStreamType {
|
||||
STREAM_TYPE_LIVE,
|
||||
STREAM_TYPE_STATUS,
|
||||
STREAM_TYPE_REDIRECT,
|
||||
};
|
||||
|
||||
enum FFServerIPAddressAction {
|
||||
IP_ALLOW = 1,
|
||||
IP_DENY,
|
||||
};
|
||||
|
||||
typedef struct FFServerIPAddressACL {
|
||||
struct FFServerIPAddressACL *next;
|
||||
enum FFServerIPAddressAction action;
|
||||
/* These are in host order */
|
||||
struct in_addr first;
|
||||
struct in_addr last;
|
||||
} FFServerIPAddressACL;
|
||||
|
||||
/* description of each stream of the ffserver.conf file */
|
||||
typedef struct FFServerStream {
|
||||
enum FFServerStreamType stream_type;
|
||||
char filename[1024]; /* stream filename */
|
||||
struct FFServerStream *feed; /* feed we are using (can be null if coming from file) */
|
||||
AVDictionary *in_opts; /* input parameters */
|
||||
AVDictionary *metadata; /* metadata to set on the stream */
|
||||
AVInputFormat *ifmt; /* if non NULL, force input format */
|
||||
AVOutputFormat *fmt;
|
||||
FFServerIPAddressACL *acl;
|
||||
char dynamic_acl[1024];
|
||||
int nb_streams;
|
||||
int prebuffer; /* Number of milliseconds early to start */
|
||||
int64_t max_time; /* Number of milliseconds to run */
|
||||
int send_on_key;
|
||||
AVStream *streams[FFSERVER_MAX_STREAMS];
|
||||
int feed_streams[FFSERVER_MAX_STREAMS]; /* index of streams in the feed */
|
||||
char feed_filename[1024]; /* file name of the feed storage, or
|
||||
input file name for a stream */
|
||||
pid_t pid; /* Of ffmpeg process */
|
||||
time_t pid_start; /* Of ffmpeg process */
|
||||
char **child_argv;
|
||||
struct FFServerStream *next;
|
||||
unsigned bandwidth; /* bandwidth, in kbits/s */
|
||||
/* RTSP options */
|
||||
char *rtsp_option;
|
||||
/* multicast specific */
|
||||
int is_multicast;
|
||||
struct in_addr multicast_ip;
|
||||
int multicast_port; /* first port used for multicast */
|
||||
int multicast_ttl;
|
||||
int loop; /* if true, send the stream in loops (only meaningful if file) */
|
||||
|
||||
/* feed specific */
|
||||
int feed_opened; /* true if someone is writing to the feed */
|
||||
int is_feed; /* true if it is a feed */
|
||||
int readonly; /* True if writing is prohibited to the file */
|
||||
int truncate; /* True if feeder connection truncate the feed file */
|
||||
int conns_served;
|
||||
int64_t bytes_served;
|
||||
int64_t feed_max_size; /* maximum storage size, zero means unlimited */
|
||||
int64_t feed_write_index; /* current write position in feed (it wraps around) */
|
||||
int64_t feed_size; /* current size of feed */
|
||||
struct FFServerStream *next_feed;
|
||||
} FFServerStream;
|
||||
|
||||
typedef struct FFServerConfig {
|
||||
char *filename;
|
||||
FFServerStream *first_feed; /* contains only feeds */
|
||||
FFServerStream *first_stream; /* contains all streams, including feeds */
|
||||
unsigned int nb_max_http_connections;
|
||||
unsigned int nb_max_connections;
|
||||
uint64_t max_bandwidth;
|
||||
int debug;
|
||||
char logfilename[1024];
|
||||
struct sockaddr_in http_addr;
|
||||
struct sockaddr_in rtsp_addr;
|
||||
int errors;
|
||||
int warnings;
|
||||
int use_defaults;
|
||||
// Following variables MUST NOT be used outside configuration parsing code.
|
||||
enum AVCodecID guessed_audio_codec_id;
|
||||
enum AVCodecID guessed_video_codec_id;
|
||||
AVDictionary *video_opts; /* AVOptions for video encoder */
|
||||
AVDictionary *audio_opts; /* AVOptions for audio encoder */
|
||||
AVCodecContext *dummy_actx; /* Used internally to test audio AVOptions. */
|
||||
AVCodecContext *dummy_vctx; /* Used internally to test video AVOptions. */
|
||||
int no_audio;
|
||||
int no_video;
|
||||
int line_num;
|
||||
int stream_use_defaults;
|
||||
} FFServerConfig;
|
||||
|
||||
void ffserver_get_arg(char *buf, int buf_size, const char **pp);
|
||||
|
||||
void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
|
||||
FFServerIPAddressACL *ext_acl,
|
||||
const char *p, const char *filename, int line_num);
|
||||
|
||||
int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config);
|
||||
|
||||
void ffserver_free_child_args(void *argsp);
|
||||
|
||||
#endif /* FFSERVER_CONFIG_H */
|
@@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
int line, ret;
|
||||
int line = 0, ret;
|
||||
const int width = avctx->width;
|
||||
AVFrame *pic = data;
|
||||
uint16_t *y, *u, *v;
|
||||
const uint8_t *line_end, *src = avpkt->data;
|
||||
int stride = avctx->width * 8 / 3;
|
||||
|
||||
if (width <= 1 || avctx->height <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
||||
if (width == 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@@ -67,45 +67,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||
pic->key_frame = 1;
|
||||
|
||||
y = (uint16_t *)pic->data[0];
|
||||
u = (uint16_t *)pic->data[1];
|
||||
v = (uint16_t *)pic->data[2];
|
||||
line_end = avpkt->data + stride;
|
||||
for (line = 0; line < avctx->height; line++) {
|
||||
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
||||
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
int x;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
|
||||
for (x = 0; x < width; x += 6) {
|
||||
uint32_t t;
|
||||
|
||||
if (width - x < 6 || line_end - src < 16) {
|
||||
y = y_temp;
|
||||
u = u_temp;
|
||||
v = v_temp;
|
||||
}
|
||||
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
|
||||
t = AV_RL32(src);
|
||||
while (line++ < avctx->height) {
|
||||
while (1) {
|
||||
uint32_t t = AV_RL32(src);
|
||||
src += 4;
|
||||
*u++ = t << 6 & 0xFFC0;
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*v++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*y++ = t << 6 & 0xFFC0;
|
||||
*u++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 2) {
|
||||
if (!(width & 1)) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@@ -113,8 +113,15 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*u++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@@ -122,21 +129,18 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*v++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (width - x < 6)
|
||||
if (src >= line_end - 2) {
|
||||
if (width & 1) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x < width) {
|
||||
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||
}
|
||||
|
||||
line_end += stride;
|
||||
src = line_end - stride;
|
||||
}
|
||||
|
||||
*got_frame = 1;
|
||||
|
@@ -339,25 +339,21 @@ static inline void mcdc(uint16_t *dst, const uint16_t *src, int log2w,
|
||||
}
|
||||
}
|
||||
|
||||
static int decode_p_block(FourXContext *f, uint16_t *dst, const uint16_t *src,
|
||||
static int decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src,
|
||||
int log2w, int log2h, int stride)
|
||||
{
|
||||
int index, h, code, ret, scale = 1;
|
||||
uint16_t *start, *end;
|
||||
const int index = size2index[log2h][log2w];
|
||||
const int h = 1 << log2h;
|
||||
int code = get_vlc2(&f->gb,
|
||||
block_type_vlc[1 - (f->version > 1)][index].table,
|
||||
BLOCK_TYPE_VLC_BITS, 1);
|
||||
uint16_t *start = f->last_frame_buffer;
|
||||
uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
|
||||
int ret;
|
||||
int scale = 1;
|
||||
unsigned dc = 0;
|
||||
|
||||
av_assert0(log2w >= 0 && log2h >= 0);
|
||||
|
||||
index = size2index[log2h][log2w];
|
||||
av_assert0(index >= 0);
|
||||
|
||||
h = 1 << log2h;
|
||||
code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table,
|
||||
BLOCK_TYPE_VLC_BITS, 1);
|
||||
av_assert0(code >= 0 && code <= 6);
|
||||
|
||||
start = f->last_frame_buffer;
|
||||
end = start + stride * (f->avctx->height - h + 1) - (1 << log2w);
|
||||
av_assert0(code >= 0 && code <= 6 && log2w >= 0);
|
||||
|
||||
if (code == 1) {
|
||||
log2h--;
|
||||
|
@@ -101,7 +101,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
if (avpkt->size < (hdr_size + 1) * avctx->channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "packet size is too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
esc->fib_acc[0] = avpkt->data[1] + 128;
|
||||
@@ -124,7 +124,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
if (!esc->data[0]) {
|
||||
av_log(avctx, AV_LOG_ERROR, "unexpected empty packet\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* decode next piece of data from the buffer */
|
||||
|
@@ -11,7 +11,6 @@ HEADERS = avcodec.h \
|
||||
vda.h \
|
||||
vdpau.h \
|
||||
version.h \
|
||||
vorbis_parser.h \
|
||||
xvmc.h \
|
||||
|
||||
OBJS = allcodecs.o \
|
||||
@@ -32,10 +31,8 @@ OBJS = allcodecs.o \
|
||||
resample.o \
|
||||
resample2.o \
|
||||
utils.o \
|
||||
vorbis_parser.o \
|
||||
xiph.o \
|
||||
|
||||
# subsystems
|
||||
# parts needed for many different codecs
|
||||
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
|
||||
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o
|
||||
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
|
||||
@@ -45,11 +42,12 @@ OBJS-$(CONFIG_BSWAPDSP) += bswapdsp.o
|
||||
OBJS-$(CONFIG_CABAC) += cabac.o
|
||||
OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
|
||||
OBJS-$(CONFIG_DCT) += dct.o dct32_fixed.o dct32_float.o
|
||||
OBJS-$(CONFIG_DSPUTIL) += dsputil.o
|
||||
OBJS-$(CONFIG_DXVA2) += dxva2.o
|
||||
OBJS-$(CONFIG_ERROR_RESILIENCE) += error_resilience.o
|
||||
OBJS-$(CONFIG_EXIF) += exif.o tiff_common.o
|
||||
OBJS-$(CONFIG_FAANDCT) += faandct.o
|
||||
OBJS-$(CONFIG_FAANIDCT) += faanidct.o
|
||||
OBJS-$(CONFIG_FDCTDSP) += fdctdsp.o jfdctfst.o jfdctint.o
|
||||
OBJS-$(CONFIG_FDCTDSP) += fdctdsp.o faandct.o \
|
||||
jfdctfst.o jfdctint.o
|
||||
FFT-OBJS-$(CONFIG_HARDCODED_TABLES) += cos_tables.o cos_fixed_tables.o
|
||||
OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
|
||||
fft_fixed_32.o fft_init_table.o \
|
||||
@@ -57,15 +55,15 @@ OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
|
||||
OBJS-$(CONFIG_GOLOMB) += golomb.o
|
||||
OBJS-$(CONFIG_H263DSP) += h263dsp.o
|
||||
OBJS-$(CONFIG_H264CHROMA) += h264chroma.o
|
||||
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o
|
||||
OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o startcode.o
|
||||
OBJS-$(CONFIG_H264PRED) += h264pred.o
|
||||
OBJS-$(CONFIG_H264QPEL) += h264qpel.o
|
||||
OBJS-$(CONFIG_HPELDSP) += hpeldsp.o
|
||||
OBJS-$(CONFIG_HUFFMAN) += huffman.o
|
||||
OBJS-$(CONFIG_HUFFYUVDSP) += huffyuvdsp.o
|
||||
OBJS-$(CONFIG_HUFFYUVENCDSP) += huffyuvencdsp.o
|
||||
OBJS-$(CONFIG_IDCTDSP) += idctdsp.o simple_idct.o jrevdct.o
|
||||
OBJS-$(CONFIG_IIRFILTER) += iirfilter.o
|
||||
OBJS-$(CONFIG_IDCTDSP) += idctdsp.o faanidct.o \
|
||||
simple_idct.o jrevdct.o
|
||||
OBJS-$(CONFIG_INTRAX8) += intrax8.o intrax8dsp.o
|
||||
OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
|
||||
OBJS-$(CONFIG_LLAUDDSP) += lossless_audiodsp.o
|
||||
@@ -73,7 +71,6 @@ OBJS-$(CONFIG_LLVIDDSP) += lossless_videodsp.o
|
||||
OBJS-$(CONFIG_LPC) += lpc.o
|
||||
OBJS-$(CONFIG_LSP) += lsp.o
|
||||
OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o mdct_fixed_32.o
|
||||
OBJS-$(CONFIG_ME_CMP) += me_cmp.o
|
||||
OBJS-$(CONFIG_MPEG_ER) += mpeg_er.o
|
||||
OBJS-$(CONFIG_MPEGAUDIO) += mpegaudio.o mpegaudiodata.o \
|
||||
mpegaudiodecheader.o
|
||||
@@ -93,11 +90,12 @@ RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
|
||||
OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
|
||||
OBJS-$(CONFIG_SHARED) += log2_tab.o
|
||||
OBJS-$(CONFIG_SINEWIN) += sinewin.o
|
||||
OBJS-$(CONFIG_STARTCODE) += startcode.o
|
||||
OBJS-$(CONFIG_TPELDSP) += tpeldsp.o
|
||||
OBJS-$(CONFIG_VAAPI) += vaapi.o
|
||||
OBJS-$(CONFIG_VDA) += vda.o
|
||||
OBJS-$(CONFIG_VDPAU) += vdpau.o
|
||||
OBJS-$(CONFIG_VIDEODSP) += videodsp.o
|
||||
OBJS-$(CONFIG_VP3DSP) += vp3dsp.o
|
||||
OBJS-$(CONFIG_WMA_FREQS) += wma_freqs.o
|
||||
|
||||
# decoders/encoders
|
||||
OBJS-$(CONFIG_ZERO12V_DECODER) += 012v.o
|
||||
@@ -108,7 +106,8 @@ OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps.o \
|
||||
sbrdsp.o aacpsdsp.o
|
||||
OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o \
|
||||
aacpsy.o aactab.o \
|
||||
psymodel.o mpeg4audio.o kbdwin.o
|
||||
psymodel.o iirfilter.o \
|
||||
mpeg4audio.o kbdwin.o
|
||||
OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
|
||||
OBJS-$(CONFIG_AC3_DECODER) += ac3dec_float.o ac3dec_data.o ac3.o kbdwin.o
|
||||
OBJS-$(CONFIG_AC3_FIXED_DECODER) += ac3dec_fixed.o ac3dec_data.o ac3.o kbdwin.o
|
||||
@@ -136,7 +135,6 @@ OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpeg.o mjpegenc_common.o \
|
||||
OBJS-$(CONFIG_ANM_DECODER) += anm.o
|
||||
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
|
||||
OBJS-$(CONFIG_APE_DECODER) += apedec.o
|
||||
OBJS-$(CONFIG_APNG_DECODER) += png.o pngdec.o pngdsp.o
|
||||
OBJS-$(CONFIG_SSA_DECODER) += assdec.o ass.o ass_split.o
|
||||
OBJS-$(CONFIG_SSA_ENCODER) += assenc.o ass.o
|
||||
OBJS-$(CONFIG_ASS_DECODER) += assdec.o ass.o ass_split.o
|
||||
@@ -162,8 +160,8 @@ OBJS-$(CONFIG_AYUV_ENCODER) += v408enc.o
|
||||
OBJS-$(CONFIG_BETHSOFTVID_DECODER) += bethsoftvideo.o
|
||||
OBJS-$(CONFIG_BFI_DECODER) += bfi.o
|
||||
OBJS-$(CONFIG_BINK_DECODER) += bink.o binkdsp.o
|
||||
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o
|
||||
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o
|
||||
OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o wma.o wma_common.o
|
||||
OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o wma.o wma_common.o
|
||||
OBJS-$(CONFIG_BINTEXT_DECODER) += bintext.o cga_data.o
|
||||
OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o
|
||||
OBJS-$(CONFIG_BMP_ENCODER) += bmpenc.o
|
||||
@@ -210,7 +208,7 @@ OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
||||
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3_data.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
|
||||
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
|
||||
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
|
||||
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
||||
@@ -280,7 +278,7 @@ OBJS-$(CONFIG_INTERPLAY_VIDEO_DECODER) += interplayvideo.o
|
||||
OBJS-$(CONFIG_JACOSUB_DECODER) += jacosubdec.o ass.o
|
||||
OBJS-$(CONFIG_JPEG2000_ENCODER) += j2kenc.o mqcenc.o mqc.o jpeg2000.o \
|
||||
jpeg2000dwt.o
|
||||
OBJS-$(CONFIG_JPEG2000_DECODER) += jpeg2000dec.o jpeg2000.o jpeg2000dsp.o \
|
||||
OBJS-$(CONFIG_JPEG2000_DECODER) += jpeg2000dec.o jpeg2000.o \
|
||||
jpeg2000dwt.o mqcdec.o mqc.o
|
||||
OBJS-$(CONFIG_JPEGLS_DECODER) += jpeglsdec.o jpegls.o
|
||||
OBJS-$(CONFIG_JPEGLS_ENCODER) += jpeglsenc.o jpegls.o
|
||||
@@ -321,12 +319,12 @@ OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec_fixed.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o
|
||||
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12.o mpeg12data.o \
|
||||
mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
|
||||
OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpeg12.o
|
||||
OBJS-$(CONFIG_MPEG2VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
|
||||
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpeg12.o
|
||||
OBJS-$(CONFIG_MPEG4_DECODER) += xvididct.o
|
||||
OBJS-$(CONFIG_MPL2_DECODER) += mpl2dec.o ass.o
|
||||
OBJS-$(CONFIG_MSMPEG4V1_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
||||
OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4dec.o msmpeg4.o msmpeg4data.o
|
||||
@@ -433,7 +431,6 @@ OBJS-$(CONFIG_SONIC_LS_ENCODER) += sonic.o
|
||||
OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o
|
||||
OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
|
||||
OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
|
||||
OBJS-$(CONFIG_STL_DECODER) += textdec.o ass.o
|
||||
OBJS-$(CONFIG_SUBRIP_DECODER) += srtdec.o ass.o
|
||||
OBJS-$(CONFIG_SUBRIP_ENCODER) += srtenc.o ass_split.o
|
||||
OBJS-$(CONFIG_SUBVIEWER1_DECODER) += textdec.o ass.o
|
||||
@@ -449,6 +446,7 @@ OBJS-$(CONFIG_TAK_DECODER) += takdec.o tak.o
|
||||
OBJS-$(CONFIG_TARGA_DECODER) += targa.o
|
||||
OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
|
||||
OBJS-$(CONFIG_TARGA_Y216_DECODER) += targa_y216dec.o
|
||||
OBJS-$(CONFIG_THEORA_DECODER) += xiph.o
|
||||
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
|
||||
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o tiff_common.o
|
||||
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o tiff_data.o
|
||||
@@ -477,17 +475,15 @@ OBJS-$(CONFIG_V410_ENCODER) += v410enc.o
|
||||
OBJS-$(CONFIG_V210X_DECODER) += v210x.o
|
||||
OBJS-$(CONFIG_VB_DECODER) += vb.o
|
||||
OBJS-$(CONFIG_VBLE_DECODER) += vble.o
|
||||
OBJS-$(CONFIG_VC1_DECODER) += vc1dec.o vc1_block.o vc1_loopfilter.o \
|
||||
vc1_mc.o vc1_pred.o vc1.o vc1data.o \
|
||||
vc1dsp.o \
|
||||
OBJS-$(CONFIG_VC1_DECODER) += vc1dec.o vc1.o vc1data.o vc1dsp.o \
|
||||
msmpeg4dec.o msmpeg4.o msmpeg4data.o \
|
||||
wmv2dsp.o
|
||||
wmv2dsp.o startcode.o
|
||||
OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
|
||||
OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdaudio.o
|
||||
OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdvideo.o
|
||||
OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o
|
||||
OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbisdsp.o vorbis.o \
|
||||
vorbis_data.o
|
||||
vorbis_data.o xiph.o
|
||||
OBJS-$(CONFIG_VORBIS_ENCODER) += vorbisenc.o vorbis.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_VP3_DECODER) += vp3.o
|
||||
@@ -504,7 +500,7 @@ OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
|
||||
OBJS-$(CONFIG_WAVPACK_ENCODER) += wavpackenc.o
|
||||
OBJS-$(CONFIG_WEBP_DECODER) += vp8.o vp8dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_WEBP_DECODER) += webp.o exif.o tiff_common.o
|
||||
OBJS-$(CONFIG_WEBVTT_DECODER) += webvttdec.o ass.o
|
||||
OBJS-$(CONFIG_WEBVTT_DECODER) += webvttdec.o
|
||||
OBJS-$(CONFIG_WEBVTT_ENCODER) += webvttenc.o ass_split.o
|
||||
OBJS-$(CONFIG_WMALOSSLESS_DECODER) += wmalosslessdec.o wma_common.o
|
||||
OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o wma_common.o
|
||||
@@ -652,11 +648,6 @@ OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_VIMA_DECODER) += vima.o adpcm_data.o
|
||||
|
||||
# hardware accelerators
|
||||
OBJS-$(CONFIG_DXVA2) += dxva2.o
|
||||
OBJS-$(CONFIG_VAAPI) += vaapi.o
|
||||
OBJS-$(CONFIG_VDA) += vda.o
|
||||
OBJS-$(CONFIG_VDPAU) += vdpau.o
|
||||
|
||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_H263_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
||||
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
|
||||
@@ -679,18 +670,20 @@ OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o
|
||||
OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||
ac3tab.o
|
||||
OBJS-$(CONFIG_FLAC_DEMUXER) += flac.o flacdata.o vorbis_data.o
|
||||
OBJS-$(CONFIG_FLAC_DEMUXER) += flac.o flacdata.o vorbis_data.o \
|
||||
vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_FLAC_MUXER) += flac.o flacdata.o vorbis_data.o
|
||||
OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_GXF_DEMUXER) += mpeg12data.o
|
||||
OBJS-$(CONFIG_IFF_DEMUXER) += iff.o
|
||||
OBJS-$(CONFIG_ISMV_MUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_LATM_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += mpeg4audio.o vorbis_data.o \
|
||||
OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += xiph.o mpeg4audio.o vorbis_data.o \
|
||||
flac.o flacdata.o
|
||||
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||
vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_MATROSKA_MUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||
flac.o flacdata.o vorbis_data.o
|
||||
flac.o flacdata.o vorbis_data.o xiph.o
|
||||
OBJS-$(CONFIG_MP2_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_MP3_MUXER) += mpegaudiodata.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o ac3tab.o
|
||||
@@ -699,18 +692,19 @@ OBJS-$(CONFIG_MPEGTS_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MXF_MUXER) += dnxhddata.o
|
||||
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
|
||||
OBJS-$(CONFIG_OGA_MUXER) += flac.o flacdata.o
|
||||
OBJS-$(CONFIG_OGG_DEMUXER) += mpeg12data.o \
|
||||
OBJS-$(CONFIG_OGA_MUXER) += xiph.o flac.o flacdata.o
|
||||
OBJS-$(CONFIG_OGG_DEMUXER) += xiph.o flac.o flacdata.o \
|
||||
mpeg12data.o vorbis_parser.o \
|
||||
dirac.o vorbis_data.o
|
||||
OBJS-$(CONFIG_OGG_MUXER) += flac.o flacdata.o \
|
||||
OBJS-$(CONFIG_OGG_MUXER) += xiph.o flac.o flacdata.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o xiph.o
|
||||
OBJS-$(CONFIG_RTPDEC) += mjpeg.o
|
||||
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
|
||||
OBJS-$(CONFIG_TAK_DEMUXER) += tak.o
|
||||
OBJS-$(CONFIG_WEBM_MUXER) += mpeg4audio.o mpegaudiodata.o \
|
||||
flac.o flacdata.o \
|
||||
xiph.o flac.o flacdata.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_WTV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
|
||||
@@ -723,10 +717,10 @@ OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_DECODER) += libfdk-aacdec.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o
|
||||
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsmdec.o
|
||||
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsmenc.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsmdec.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsmenc.o
|
||||
OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsm.o
|
||||
OBJS-$(CONFIG_LIBILBC_DECODER) += libilbc.o
|
||||
OBJS-$(CONFIG_LIBILBC_ENCODER) += libilbc.o
|
||||
OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o
|
||||
@@ -755,7 +749,7 @@ OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
|
||||
OBJS-$(CONFIG_LIBVORBIS_DECODER) += libvorbisdec.o
|
||||
OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbisenc.o \
|
||||
vorbis_data.o
|
||||
vorbis_data.o vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_LIBVPX_VP8_DECODER) += libvpxdec.o
|
||||
OBJS-$(CONFIG_LIBVPX_VP8_ENCODER) += libvpxenc.o
|
||||
OBJS-$(CONFIG_LIBVPX_VP9_DECODER) += libvpxdec.o libvpx.o
|
||||
@@ -810,7 +804,8 @@ OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
|
||||
OBJS-$(CONFIG_TAK_PARSER) += tak_parser.o tak.o
|
||||
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o vc1dsp.o \
|
||||
msmpeg4.o msmpeg4data.o mpeg4video.o \
|
||||
h263.o
|
||||
h263.o startcode.o
|
||||
OBJS-$(CONFIG_VORBIS_PARSER) += vorbis_parser.o xiph.o
|
||||
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
|
||||
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
|
||||
OBJS-$(CONFIG_VP9_PARSER) += vp9_parser.o
|
||||
@@ -857,25 +852,24 @@ SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
|
||||
|
||||
TESTPROGS = imgconvert \
|
||||
|
||||
TESTPROGS-$(CONFIG_CABAC) += cabac
|
||||
TESTPROGS-$(CONFIG_FFT) += fft fft-fixed fft-fixed32
|
||||
TESTPROGS-$(CONFIG_IDCTDSP) += dct
|
||||
TESTPROGS-$(CONFIG_IIRFILTER) += iirfilter
|
||||
TESTPROGS-$(HAVE_MMX) += motion
|
||||
TESTPROGS-$(CONFIG_GOLOMB) += golomb
|
||||
TESTPROGS-$(CONFIG_RANGECODER) += rangecoder
|
||||
TESTPROGS-$(CONFIG_SNOW_ENCODER) += snowenc
|
||||
TESTPROGS = cabac \
|
||||
fft \
|
||||
fft-fixed \
|
||||
fft-fixed32 \
|
||||
golomb \
|
||||
iirfilter \
|
||||
imgconvert \
|
||||
rangecoder \
|
||||
snowenc \
|
||||
|
||||
TESTPROGS-$(CONFIG_DCT) += dct
|
||||
TESTPROGS-$(HAVE_MMX) += motion
|
||||
TESTOBJS = dctref.o
|
||||
|
||||
TOOLS = fourcc2pixfmt
|
||||
|
||||
HOSTPROGS = aac_tablegen \
|
||||
aacps_tablegen \
|
||||
aacsbr_tablegen \
|
||||
cabac_tablegen \
|
||||
cbrt_tablegen \
|
||||
cos_tablegen \
|
||||
dsd_tablegen \
|
||||
@@ -903,7 +897,7 @@ else
|
||||
$(SUBDIR)%_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DCONFIG_SMALL=0
|
||||
endif
|
||||
|
||||
GEN_HEADERS = cabac_tables.h cbrt_tables.h aacps_tables.h aacsbr_tables.h aac_tables.h dsd_tables.h dv_tables.h \
|
||||
GEN_HEADERS = cbrt_tables.h aacps_tables.h aac_tables.h dsd_tables.h dv_tables.h \
|
||||
sinewin_tables.h mpegaudio_tables.h motionpixels_tables.h \
|
||||
pcm_tables.h qdm2_tables.h
|
||||
GEN_HEADERS := $(addprefix $(SUBDIR), $(GEN_HEADERS))
|
||||
@@ -914,9 +908,7 @@ $(GEN_HEADERS): $(SUBDIR)%_tables.h: $(SUBDIR)%_tablegen$(HOSTEXESUF)
|
||||
ifdef CONFIG_HARDCODED_TABLES
|
||||
$(SUBDIR)aacdec.o: $(SUBDIR)cbrt_tables.h
|
||||
$(SUBDIR)aacps.o: $(SUBDIR)aacps_tables.h
|
||||
$(SUBDIR)aacsbr.o: $(SUBDIR)aacsbr_tables.h
|
||||
$(SUBDIR)aactab.o: $(SUBDIR)aac_tables.h
|
||||
$(SUBDIR)cabac.o: $(SUBDIR)cabac_tables.h
|
||||
$(SUBDIR)dsddec.o: $(SUBDIR)dsd_tables.h
|
||||
$(SUBDIR)dvenc.o: $(SUBDIR)dv_tables.h
|
||||
$(SUBDIR)sinewin.o: $(SUBDIR)sinewin_tables.h
|
||||
|
@@ -28,7 +28,6 @@
|
||||
#include "a64tables.h"
|
||||
#include "elbg.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
@@ -66,7 +65,7 @@ static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
|
||||
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
||||
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
||||
|
||||
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
|
||||
static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
||||
{
|
||||
int blockx, blocky, x, y;
|
||||
int luma = 0;
|
||||
@@ -79,13 +78,9 @@ static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest
|
||||
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
||||
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
||||
if(x < width && y < height) {
|
||||
if (x + 1 < width) {
|
||||
/* build average over 2 pixels */
|
||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||
} else {
|
||||
luma = src[(x + y * p->linesize[0])];
|
||||
}
|
||||
/* build average over 2 pixels */
|
||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||
/* write blocks as linear data now so they are suitable for elbg */
|
||||
dest[0] = luma;
|
||||
}
|
||||
@@ -191,11 +186,12 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
|
||||
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
||||
{
|
||||
A64Context *c = avctx->priv_data;
|
||||
av_freep(&c->mc_meta_charset);
|
||||
av_freep(&c->mc_best_cb);
|
||||
av_freep(&c->mc_charset);
|
||||
av_freep(&c->mc_charmap);
|
||||
av_freep(&c->mc_colram);
|
||||
av_frame_free(&avctx->coded_frame);
|
||||
av_free(c->mc_meta_charset);
|
||||
av_free(c->mc_best_cb);
|
||||
av_free(c->mc_charset);
|
||||
av_free(c->mc_charmap);
|
||||
av_free(c->mc_colram);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -224,7 +220,7 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
||||
a64_palette[mc_colors[a]][2] * 0.11;
|
||||
}
|
||||
|
||||
if (!(c->mc_meta_charset = av_mallocz_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||
if (!(c->mc_meta_charset = av_malloc_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
||||
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
||||
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
||||
@@ -242,6 +238,14 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
||||
AV_WB32(avctx->extradata, c->mc_lifetime);
|
||||
AV_WB32(avctx->extradata + 16, INTERLACED);
|
||||
|
||||
avctx->coded_frame = av_frame_alloc();
|
||||
if (!avctx->coded_frame) {
|
||||
a64multi_close_encoder(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||
avctx->coded_frame->key_frame = 1;
|
||||
if (!avctx->codec_tag)
|
||||
avctx->codec_tag = AV_RL32("a64m");
|
||||
|
||||
@@ -266,9 +270,10 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
|
||||
}
|
||||
|
||||
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const AVFrame *p, int *got_packet)
|
||||
const AVFrame *pict, int *got_packet)
|
||||
{
|
||||
A64Context *c = avctx->priv_data;
|
||||
AVFrame *const p = avctx->coded_frame;
|
||||
|
||||
int frame;
|
||||
int x, y;
|
||||
@@ -299,7 +304,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
|
||||
/* no data, means end encoding asap */
|
||||
if (!p) {
|
||||
if (!pict) {
|
||||
/* all done, end encoding */
|
||||
if (!c->mc_lifetime) return 0;
|
||||
/* no more frames in queue, prepare to flush remaining frames */
|
||||
@@ -312,10 +317,13 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
} else {
|
||||
/* fill up mc_meta_charset with data until lifetime exceeds */
|
||||
if (c->mc_frame_counter < c->mc_lifetime) {
|
||||
*p = *pict;
|
||||
p->pict_type = AV_PICTURE_TYPE_I;
|
||||
p->key_frame = 1;
|
||||
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
||||
c->mc_frame_counter++;
|
||||
if (c->next_pts == AV_NOPTS_VALUE)
|
||||
c->next_pts = p->pts;
|
||||
c->next_pts = pict->pts;
|
||||
/* lifetime is not reached so wait for next frame first */
|
||||
return 0;
|
||||
}
|
||||
@@ -326,8 +334,8 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
req_size = 0;
|
||||
/* any frames to encode? */
|
||||
if (c->mc_lifetime) {
|
||||
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size)) < 0)
|
||||
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
|
||||
return ret;
|
||||
buf = pkt->data;
|
||||
|
||||
@@ -343,7 +351,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
|
||||
/* advance pointers */
|
||||
buf += charset_size;
|
||||
req_size += charset_size;
|
||||
charset += charset_size;
|
||||
}
|
||||
|
||||
/* write x frames to buf */
|
||||
@@ -380,7 +388,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
pkt->pts = pkt->dts = c->next_pts;
|
||||
c->next_pts = AV_NOPTS_VALUE;
|
||||
|
||||
av_assert0(pkt->size >= req_size);
|
||||
pkt->size = req_size;
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
*got_packet = !!req_size;
|
||||
|
@@ -245,7 +245,6 @@ typedef struct SingleChannelElement {
|
||||
* channel element - generic struct for SCE/CPE/CCE/LFE
|
||||
*/
|
||||
typedef struct ChannelElement {
|
||||
int present;
|
||||
// CPE specific
|
||||
int common_window; ///< Set if channels share a common 'IndividualChannelStream' in bitstream.
|
||||
int ms_mode; ///< Signals mid/side stereo flags coding mode (used by encoder)
|
||||
@@ -275,7 +274,6 @@ struct AACContext {
|
||||
ChannelElement *che[4][MAX_ELEM_ID];
|
||||
ChannelElement *tag_che_map[4][MAX_ELEM_ID];
|
||||
int tags_mapped;
|
||||
int warned_remapping_once;
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
@@ -295,7 +293,7 @@ struct AACContext {
|
||||
FFTContext mdct_ld;
|
||||
FFTContext mdct_ltp;
|
||||
FmtConvertContext fmt_conv;
|
||||
AVFloatDSPContext *fdsp;
|
||||
AVFloatDSPContext fdsp;
|
||||
int random_state;
|
||||
/** @} */
|
||||
|
||||
|
@@ -57,7 +57,7 @@ static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc,
|
||||
|
||||
if (avpriv_aac_parse_header(&gb, &hdr) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error parsing ADTS frame header!\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!hdr.crc_absent && hdr.num_aac_frames > 1) {
|
||||
|
@@ -34,7 +34,7 @@ static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int size;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
uint8_t u8[8];
|
||||
} tmp;
|
||||
|
||||
tmp.u64 = av_be2ne64(state);
|
||||
|
@@ -31,7 +31,7 @@
|
||||
#include "libavutil/mathematics.h"
|
||||
float ff_aac_pow2sf_tab[428];
|
||||
|
||||
av_cold void ff_aac_tableinit(void)
|
||||
void ff_aac_tableinit(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < 428; i++)
|
||||
|
@@ -53,7 +53,7 @@ static const uint8_t run_value_bits_short[16] = {
|
||||
3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 9
|
||||
};
|
||||
|
||||
static const uint8_t * const run_value_bits[2] = {
|
||||
static const uint8_t *run_value_bits[2] = {
|
||||
run_value_bits_long, run_value_bits_short
|
||||
};
|
||||
|
||||
@@ -776,6 +776,7 @@ static void search_for_quantizers_twoloop(AVCodecContext *avctx,
|
||||
do {
|
||||
int prev = -1;
|
||||
tbits = 0;
|
||||
fflag = 0;
|
||||
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
|
||||
start = w*128;
|
||||
for (g = 0; g < sce->ics.num_swb; g++) {
|
||||
@@ -952,6 +953,7 @@ static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s,
|
||||
}
|
||||
sce->zeroes[w*16+g] = 0;
|
||||
scf = prev_scf = av_clip(SCALE_ONE_POS - SCALE_DIV_512 - log2f(1/maxq[w*16+g])*16/3, 60, 218);
|
||||
step = 16;
|
||||
for (;;) {
|
||||
float dist = 0.0f;
|
||||
int quant_max;
|
||||
|
@@ -425,7 +425,7 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
|
||||
* Save current output configuration if and only if it has been locked.
|
||||
*/
|
||||
static void push_output_configuration(AACContext *ac) {
|
||||
if (ac->oc[1].status == OC_LOCKED || ac->oc[0].status == OC_NONE) {
|
||||
if (ac->oc[1].status == OC_LOCKED) {
|
||||
ac->oc[0] = ac->oc[1];
|
||||
}
|
||||
ac->oc[1].status = OC_NONE;
|
||||
@@ -621,12 +621,6 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
* If we seem to have encountered such a stream, transfer
|
||||
* the LFE[0] element to the SCE[1]'s mapping */
|
||||
if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
|
||||
if (!ac->warned_remapping_once && (type != TYPE_LFE || elem_id != 0)) {
|
||||
av_log(ac->avctx, AV_LOG_WARNING,
|
||||
"This stream seems to incorrectly report its last channel as %s[%d], mapping to LFE[0]\n",
|
||||
type == TYPE_SCE ? "SCE" : "LFE", elem_id);
|
||||
ac->warned_remapping_once++;
|
||||
}
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
|
||||
}
|
||||
@@ -636,22 +630,6 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
|
||||
return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
|
||||
}
|
||||
case 4:
|
||||
/* Some streams incorrectly code 4.0 audio as
|
||||
* SCE[0] CPE[0] LFE[0]
|
||||
* instead of
|
||||
* SCE[0] CPE[0] SCE[1].
|
||||
* If we seem to have encountered such a stream, transfer
|
||||
* the SCE[1] element to the LFE[0]'s mapping */
|
||||
if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
|
||||
if (!ac->warned_remapping_once && (type != TYPE_SCE || elem_id != 1)) {
|
||||
av_log(ac->avctx, AV_LOG_WARNING,
|
||||
"This stream seems to incorrectly report its last channel as %s[%d], mapping to SCE[1]\n",
|
||||
type == TYPE_SCE ? "SCE" : "LFE", elem_id);
|
||||
ac->warned_remapping_once++;
|
||||
}
|
||||
ac->tags_mapped++;
|
||||
return ac->tag_che_map[type][elem_id] = ac->che[TYPE_SCE][1];
|
||||
}
|
||||
if (ac->tags_mapped == 2 &&
|
||||
ac->oc[1].m4ac.chan_config == 4 &&
|
||||
type == TYPE_SCE) {
|
||||
@@ -703,7 +681,6 @@ static void decode_channel_map(uint8_t layout_map[][3],
|
||||
syn_ele = TYPE_LFE;
|
||||
break;
|
||||
default:
|
||||
// AAC_CHANNEL_OFF has no channel map
|
||||
av_assert0(0);
|
||||
}
|
||||
layout_map[0][0] = syn_ele;
|
||||
@@ -904,7 +881,7 @@ static int decode_eld_specific_config(AACContext *ac, AVCodecContext *avctx,
|
||||
if (len == 15 + 255)
|
||||
len += get_bits(gb, 16);
|
||||
if (get_bits_left(gb) < len * 8 + 4) {
|
||||
av_log(avctx, AV_LOG_ERROR, overread_err);
|
||||
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
skip_bits_long(gb, 8 * len);
|
||||
@@ -1137,10 +1114,7 @@ static av_cold int aac_decode_init(AVCodecContext *avctx)
|
||||
ff_aac_sbr_init();
|
||||
|
||||
ff_fmt_convert_init(&ac->fmt_conv, avctx);
|
||||
ac->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
if (!ac->fdsp) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
avpriv_float_dsp_init(&ac->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
|
||||
ac->random_state = 0x1f2e3d4c;
|
||||
|
||||
@@ -1516,12 +1490,13 @@ static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
|
||||
int ms_present)
|
||||
{
|
||||
int idx;
|
||||
int max_idx = cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb;
|
||||
if (ms_present == 1) {
|
||||
for (idx = 0; idx < max_idx; idx++)
|
||||
for (idx = 0;
|
||||
idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb;
|
||||
idx++)
|
||||
cpe->ms_mask[idx] = get_bits1(gb);
|
||||
} else if (ms_present == 2) {
|
||||
memset(cpe->ms_mask, 1, max_idx * sizeof(cpe->ms_mask[0]));
|
||||
memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask[0]) * cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1643,9 +1618,9 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
||||
cfo[k] = ac->random_state;
|
||||
}
|
||||
|
||||
band_energy = ac->fdsp->scalarproduct_float(cfo, cfo, off_len);
|
||||
band_energy = ac->fdsp.scalarproduct_float(cfo, cfo, off_len);
|
||||
scale = sf[idx] / sqrtf(band_energy);
|
||||
ac->fdsp->vector_fmul_scalar(cfo, cfo, scale, off_len);
|
||||
ac->fdsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
|
||||
}
|
||||
} else {
|
||||
const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
|
||||
@@ -1791,7 +1766,7 @@ static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
|
||||
}
|
||||
} while (len -= 2);
|
||||
|
||||
ac->fdsp->vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
|
||||
ac->fdsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2004,7 +1979,7 @@ static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
|
||||
cpe->ch[0].band_type[idx] < NOISE_BT &&
|
||||
cpe->ch[1].band_type[idx] < NOISE_BT) {
|
||||
for (group = 0; group < ics->group_len[g]; group++) {
|
||||
ac->fdsp->butterflies_float(ch0 + group * 128 + offsets[i],
|
||||
ac->fdsp.butterflies_float(ch0 + group * 128 + offsets[i],
|
||||
ch1 + group * 128 + offsets[i],
|
||||
offsets[i+1] - offsets[i]);
|
||||
}
|
||||
@@ -2043,7 +2018,7 @@ static void apply_intensity_stereo(AACContext *ac,
|
||||
c *= 1 - 2 * cpe->ms_mask[idx];
|
||||
scale = c * sce1->sf[idx];
|
||||
for (group = 0; group < ics->group_len[g]; group++)
|
||||
ac->fdsp->vector_fmul_scalar(coef1 + group * 128 + offsets[i],
|
||||
ac->fdsp.vector_fmul_scalar(coef1 + group * 128 + offsets[i],
|
||||
coef0 + group * 128 + offsets[i],
|
||||
scale,
|
||||
offsets[i + 1] - offsets[i]);
|
||||
@@ -2293,12 +2268,7 @@ static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
|
||||
{
|
||||
int crc_flag = 0;
|
||||
int res = cnt;
|
||||
int type = get_bits(gb, 4);
|
||||
|
||||
if (ac->avctx->debug & FF_DEBUG_STARTCODE)
|
||||
av_log(ac->avctx, AV_LOG_DEBUG, "extension type: %d len:%d\n", type, cnt);
|
||||
|
||||
switch (type) { // extension type
|
||||
switch (get_bits(gb, 4)) { // extension type
|
||||
case EXT_SBR_DATA_CRC:
|
||||
crc_flag++;
|
||||
case EXT_SBR_DATA:
|
||||
@@ -2411,15 +2381,15 @@ static void windowing_and_mdct_ltp(AACContext *ac, float *out,
|
||||
const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
|
||||
|
||||
if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
|
||||
ac->fdsp->vector_fmul(in, in, lwindow_prev, 1024);
|
||||
ac->fdsp.vector_fmul(in, in, lwindow_prev, 1024);
|
||||
} else {
|
||||
memset(in, 0, 448 * sizeof(float));
|
||||
ac->fdsp->vector_fmul(in + 448, in + 448, swindow_prev, 128);
|
||||
ac->fdsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
|
||||
}
|
||||
if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
|
||||
ac->fdsp->vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
|
||||
ac->fdsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
|
||||
} else {
|
||||
ac->fdsp->vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
|
||||
ac->fdsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
|
||||
memset(in + 1024 + 576, 0, 448 * sizeof(float));
|
||||
}
|
||||
ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in);
|
||||
@@ -2472,17 +2442,17 @@ static void update_ltp(AACContext *ac, SingleChannelElement *sce)
|
||||
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
||||
memcpy(saved_ltp, saved, 512 * sizeof(float));
|
||||
memset(saved_ltp + 576, 0, 448 * sizeof(float));
|
||||
ac->fdsp->vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
|
||||
ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
|
||||
for (i = 0; i < 64; i++)
|
||||
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
|
||||
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
|
||||
memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(float));
|
||||
memset(saved_ltp + 576, 0, 448 * sizeof(float));
|
||||
ac->fdsp->vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
|
||||
ac->fdsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
|
||||
for (i = 0; i < 64; i++)
|
||||
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
|
||||
} else { // LONG_STOP or ONLY_LONG
|
||||
ac->fdsp->vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
|
||||
ac->fdsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
|
||||
for (i = 0; i < 512; i++)
|
||||
saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i];
|
||||
}
|
||||
@@ -2523,19 +2493,19 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
|
||||
*/
|
||||
if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
|
||||
(ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
|
||||
ac->fdsp->vector_fmul_window( out, saved, buf, lwindow_prev, 512);
|
||||
ac->fdsp.vector_fmul_window( out, saved, buf, lwindow_prev, 512);
|
||||
} else {
|
||||
memcpy( out, saved, 448 * sizeof(float));
|
||||
|
||||
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
||||
ac->fdsp->vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
|
||||
ac->fdsp->vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
|
||||
ac->fdsp->vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
|
||||
ac->fdsp->vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
|
||||
ac->fdsp->vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
|
||||
ac->fdsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
|
||||
ac->fdsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
|
||||
ac->fdsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
|
||||
ac->fdsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
|
||||
ac->fdsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
|
||||
memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
|
||||
} else {
|
||||
ac->fdsp->vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
|
||||
ac->fdsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
|
||||
memcpy( out + 576, buf + 64, 448 * sizeof(float));
|
||||
}
|
||||
}
|
||||
@@ -2543,9 +2513,9 @@ static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
|
||||
// buffer update
|
||||
if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
|
||||
memcpy( saved, temp + 64, 64 * sizeof(float));
|
||||
ac->fdsp->vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
|
||||
ac->fdsp->vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
|
||||
ac->fdsp->vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
|
||||
ac->fdsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
|
||||
ac->fdsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
|
||||
ac->fdsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
|
||||
memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
|
||||
} else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
|
||||
memcpy( saved, buf + 512, 448 * sizeof(float));
|
||||
@@ -2570,10 +2540,10 @@ static void imdct_and_windowing_ld(AACContext *ac, SingleChannelElement *sce)
|
||||
if (ics->use_kb_window[1]) {
|
||||
// AAC LD uses a low overlap sine window instead of a KBD window
|
||||
memcpy(out, saved, 192 * sizeof(float));
|
||||
ac->fdsp->vector_fmul_window(out + 192, saved + 192, buf, ff_sine_128, 64);
|
||||
ac->fdsp.vector_fmul_window(out + 192, saved + 192, buf, ff_sine_128, 64);
|
||||
memcpy( out + 320, buf + 64, 192 * sizeof(float));
|
||||
} else {
|
||||
ac->fdsp->vector_fmul_window(out, saved, buf, ff_sine_512, 256);
|
||||
ac->fdsp.vector_fmul_window(out, saved, buf, ff_sine_512, 256);
|
||||
}
|
||||
|
||||
// buffer update
|
||||
@@ -2746,7 +2716,7 @@ static void spectral_to_sample(AACContext *ac)
|
||||
for (type = 3; type >= 0; type--) {
|
||||
for (i = 0; i < MAX_ELEM_ID; i++) {
|
||||
ChannelElement *che = ac->che[type][i];
|
||||
if (che && che->present) {
|
||||
if (che) {
|
||||
if (type <= TYPE_CPE)
|
||||
apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
|
||||
if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
|
||||
@@ -2778,9 +2748,6 @@ static void spectral_to_sample(AACContext *ac)
|
||||
}
|
||||
if (type <= TYPE_CCE)
|
||||
apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
|
||||
che->present = 0;
|
||||
} else if (che) {
|
||||
av_log(ac->avctx, AV_LOG_VERBOSE, "ChannelElement %d.%d missing \n", type, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2885,7 +2852,6 @@ static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
|
||||
elem_type, elem_id);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
che->present = 1;
|
||||
if (aot != AOT_ER_AAC_ELD)
|
||||
skip_bits(gb, 4);
|
||||
switch (elem_type) {
|
||||
@@ -2949,9 +2915,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
|
||||
elem_id = get_bits(gb, 4);
|
||||
|
||||
if (avctx->debug & FF_DEBUG_STARTCODE)
|
||||
av_log(avctx, AV_LOG_DEBUG, "Elem type:%x id:%x\n", elem_type, elem_id);
|
||||
|
||||
if (elem_type < TYPE_DSE) {
|
||||
if (!(che=get_che(ac, elem_type, elem_id))) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
|
||||
@@ -2960,7 +2923,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
goto fail;
|
||||
}
|
||||
samples = 1024;
|
||||
che->present = 1;
|
||||
}
|
||||
|
||||
switch (elem_type) {
|
||||
@@ -3059,12 +3021,6 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
AV_WL32(side, 2*AV_RL32(side));
|
||||
}
|
||||
|
||||
if (!ac->frame->data[0] && samples) {
|
||||
av_log(avctx, AV_LOG_ERROR, "no frame data found\n");
|
||||
err = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*got_frame_ptr = !!samples;
|
||||
if (samples) {
|
||||
ac->frame->nb_samples = samples;
|
||||
@@ -3175,7 +3131,6 @@ static av_cold int aac_decode_close(AVCodecContext *avctx)
|
||||
ff_mdct_end(&ac->mdct_small);
|
||||
ff_mdct_end(&ac->mdct_ld);
|
||||
ff_mdct_end(&ac->mdct_ltp);
|
||||
av_freep(&ac->fdsp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3485,18 +3440,6 @@ static const AVClass aac_decoder_class = {
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
static const AVProfile profiles[] = {
|
||||
{ FF_PROFILE_AAC_MAIN, "Main" },
|
||||
{ FF_PROFILE_AAC_LOW, "LC" },
|
||||
{ FF_PROFILE_AAC_SSR, "SSR" },
|
||||
{ FF_PROFILE_AAC_LTP, "LTP" },
|
||||
{ FF_PROFILE_AAC_HE, "HE-AAC" },
|
||||
{ FF_PROFILE_AAC_HE_V2, "HE-AACv2" },
|
||||
{ FF_PROFILE_AAC_LD, "LD" },
|
||||
{ FF_PROFILE_AAC_ELD, "ELD" },
|
||||
{ FF_PROFILE_UNKNOWN },
|
||||
};
|
||||
|
||||
AVCodec ff_aac_decoder = {
|
||||
.name = "aac",
|
||||
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
|
||||
@@ -3513,7 +3456,6 @@ AVCodec ff_aac_decoder = {
|
||||
.channel_layouts = aac_channel_layout,
|
||||
.flush = flush,
|
||||
.priv_class = &aac_decoder_class,
|
||||
.profiles = profiles,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -3536,5 +3478,4 @@ AVCodec ff_aac_latm_decoder = {
|
||||
.capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
|
||||
.channel_layouts = aac_channel_layout,
|
||||
.flush = flush,
|
||||
.profiles = profiles,
|
||||
};
|
||||
|
@@ -165,7 +165,7 @@ static void put_audio_specific_config(AVCodecContext *avctx)
|
||||
PutBitContext pb;
|
||||
AACEncContext *s = avctx->priv_data;
|
||||
|
||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
|
||||
put_bits(&pb, 5, 2); //object type - AAC-LC
|
||||
put_bits(&pb, 4, s->samplerate_index); //sample rate index
|
||||
put_bits(&pb, 4, s->channels);
|
||||
@@ -252,7 +252,7 @@ static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
|
||||
int i;
|
||||
float *output = sce->ret_buf;
|
||||
|
||||
apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, audio);
|
||||
apply_window[sce->ics.window_sequence[0]](&s->fdsp, sce, audio);
|
||||
|
||||
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE)
|
||||
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
|
||||
@@ -567,10 +567,6 @@ static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
ics->group_len[w] = wi[ch].grouping[w];
|
||||
|
||||
apply_window_and_mdct(s, &cpe->ch[ch], overlap);
|
||||
if (isnan(cpe->ch->coeffs[0])) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Input contains NaN\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
}
|
||||
start_ch += chans;
|
||||
}
|
||||
@@ -682,7 +678,6 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
|
||||
ff_psy_preprocess_end(s->psypp);
|
||||
av_freep(&s->buffer.samples);
|
||||
av_freep(&s->cpe);
|
||||
av_freep(&s->fdsp);
|
||||
ff_af_queue_close(&s->afq);
|
||||
return 0;
|
||||
}
|
||||
@@ -691,9 +686,7 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
if (!s->fdsp)
|
||||
return AVERROR(ENOMEM);
|
||||
avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
|
||||
|
||||
// window init
|
||||
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
|
||||
@@ -712,8 +705,8 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
|
||||
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
|
||||
{
|
||||
int ch;
|
||||
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->buffer.samples, s->channels, 3 * 1024 * sizeof(s->buffer.samples[0]), alloc_fail);
|
||||
FF_ALLOCZ_ARRAY_OR_GOTO(avctx, s->cpe, s->chan_map[0], sizeof(ChannelElement), alloc_fail);
|
||||
FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 3 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail);
|
||||
FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail);
|
||||
FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail);
|
||||
|
||||
for(ch = 0; ch < s->channels; ch++)
|
||||
@@ -753,10 +746,10 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
|
||||
s->chan_map = aac_chan_configs[s->channels-1];
|
||||
|
||||
if ((ret = dsp_init(avctx, s)) < 0)
|
||||
if (ret = dsp_init(avctx, s))
|
||||
goto fail;
|
||||
|
||||
if ((ret = alloc_buffers(avctx, s)) < 0)
|
||||
if (ret = alloc_buffers(avctx, s))
|
||||
goto fail;
|
||||
|
||||
avctx->extradata_size = 5;
|
||||
@@ -768,8 +761,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
lengths[1] = ff_aac_num_swb_128[i];
|
||||
for (i = 0; i < s->chan_map[0]; i++)
|
||||
grouping[i] = s->chan_map[i + 1] == TYPE_CPE;
|
||||
if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths,
|
||||
s->chan_map[0], grouping)) < 0)
|
||||
if (ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], grouping))
|
||||
goto fail;
|
||||
s->psypp = ff_psy_preprocess_init(avctx);
|
||||
s->coder = &ff_aac_coders[s->options.aac_coder];
|
||||
@@ -784,7 +776,7 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
|
||||
for (i = 0; i < 428; i++)
|
||||
ff_aac_pow34sf_tab[i] = sqrt(ff_aac_pow2sf_tab[i] * sqrt(ff_aac_pow2sf_tab[i]));
|
||||
|
||||
avctx->initial_padding = 1024;
|
||||
avctx->delay = 1024;
|
||||
ff_af_queue_init(avctx, &s->afq);
|
||||
|
||||
return 0;
|
||||
|
@@ -67,7 +67,7 @@ typedef struct AACEncContext {
|
||||
PutBitContext pb;
|
||||
FFTContext mdct1024; ///< long (1024 samples) frame transform context
|
||||
FFTContext mdct128; ///< short (128 samples) frame transform context
|
||||
AVFloatDSPContext *fdsp;
|
||||
AVFloatDSPContext fdsp;
|
||||
float *planar_samples[6]; ///< saved preprocessed input
|
||||
|
||||
int samplerate_index; ///< MPEG-4 samplerate index
|
||||
|
@@ -82,7 +82,7 @@ int main(void)
|
||||
write_float_3d_array(f34_2_4, 4, 8, 2);
|
||||
printf("};\n");
|
||||
|
||||
printf("static const DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2] = {\n");
|
||||
printf("static TABLE_CONST DECLARE_ALIGNED(16, float, Q_fract_allpass)[2][50][3][2] = {\n");
|
||||
write_float_4d_array(Q_fract_allpass, 2, 50, 3, 2);
|
||||
printf("};\n");
|
||||
printf("static const DECLARE_ALIGNED(16, float, phi_fract)[2][50][2] = {\n");
|
||||
|
@@ -70,7 +70,7 @@ static const float g2_Q4[] = {
|
||||
0.16486303567403f, 0.23279856662996f, 0.25f
|
||||
};
|
||||
|
||||
static av_cold void make_filters_from_proto(float (*filter)[8][2], const float *proto, int bands)
|
||||
static void make_filters_from_proto(float (*filter)[8][2], const float *proto, int bands)
|
||||
{
|
||||
int q, n;
|
||||
for (q = 0; q < bands; q++) {
|
||||
@@ -82,7 +82,7 @@ static av_cold void make_filters_from_proto(float (*filter)[8][2], const float *
|
||||
}
|
||||
}
|
||||
|
||||
static av_cold void ps_tableinit(void)
|
||||
static void ps_tableinit(void)
|
||||
{
|
||||
static const float ipdopd_sin[] = { 0, M_SQRT1_2, 1, M_SQRT1_2, 0, -M_SQRT1_2, -1, -M_SQRT1_2 };
|
||||
static const float ipdopd_cos[] = { 1, M_SQRT1_2, 0, -M_SQRT1_2, -1, -M_SQRT1_2, 0, M_SQRT1_2 };
|
||||
|
@@ -727,10 +727,7 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
|
||||
if (active_lines > 0.0f)
|
||||
band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
|
||||
pe += calc_pe_3gpp(band);
|
||||
if (band->thr > 0.0f)
|
||||
band->norm_fac = band->active_lines / band->thr;
|
||||
else
|
||||
band->norm_fac = 0.0f;
|
||||
band->norm_fac = band->active_lines / band->thr;
|
||||
norm_fac += band->norm_fac;
|
||||
}
|
||||
}
|
||||
|
@@ -30,7 +30,6 @@
|
||||
#include "sbr.h"
|
||||
#include "aacsbr.h"
|
||||
#include "aacsbrdata.h"
|
||||
#include "aacsbr_tablegen.h"
|
||||
#include "fft.h"
|
||||
#include "aacps.h"
|
||||
#include "sbrdsp.h"
|
||||
@@ -96,6 +95,7 @@ static void aacsbr_func_ptr_init(AACSBRContext *c);
|
||||
|
||||
av_cold void ff_aac_sbr_init(void)
|
||||
{
|
||||
int n;
|
||||
static const struct {
|
||||
const void *sbr_codes, *sbr_bits;
|
||||
const unsigned int table_size, elem_size;
|
||||
@@ -124,7 +124,13 @@ av_cold void ff_aac_sbr_init(void)
|
||||
SBR_INIT_VLC_STATIC(8, 592);
|
||||
SBR_INIT_VLC_STATIC(9, 512);
|
||||
|
||||
aacsbr_tableinit();
|
||||
for (n = 1; n < 320; n++)
|
||||
sbr_qmf_window_us[320 + n] = sbr_qmf_window_us[320 - n];
|
||||
sbr_qmf_window_us[384] = -sbr_qmf_window_us[384];
|
||||
sbr_qmf_window_us[512] = -sbr_qmf_window_us[512];
|
||||
|
||||
for (n = 0; n < 320; n++)
|
||||
sbr_qmf_window_ds[n] = sbr_qmf_window_us[2*n];
|
||||
|
||||
ff_ps_init();
|
||||
}
|
||||
@@ -326,7 +332,7 @@ static int check_n_master(AVCodecContext *avctx, int n_master, int bs_xover_band
|
||||
static int sbr_make_f_master(AACContext *ac, SpectralBandReplication *sbr,
|
||||
SpectrumParameters *spectrum)
|
||||
{
|
||||
unsigned int temp, max_qmf_subbands = 0;
|
||||
unsigned int temp, max_qmf_subbands;
|
||||
unsigned int start_min, stop_min;
|
||||
int k;
|
||||
const int8_t *sbr_offset_ptr;
|
||||
@@ -514,7 +520,7 @@ static int sbr_make_f_master(AACContext *ac, SpectralBandReplication *sbr,
|
||||
/// High Frequency Generation - Patch Construction (14496-3 sp04 p216 fig. 4.46)
|
||||
static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||
{
|
||||
int i, k, last_k = -1, last_msb = -1, sb = 0;
|
||||
int i, k, sb = 0;
|
||||
int msb = sbr->k[0];
|
||||
int usb = sbr->kx[1];
|
||||
int goal_sb = ((1000 << 11) + (sbr->sample_rate >> 1)) / sbr->sample_rate;
|
||||
@@ -528,12 +534,6 @@ static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||
|
||||
do {
|
||||
int odd = 0;
|
||||
if (k == last_k && msb == last_msb) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "patch construction failed\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
last_k = k;
|
||||
last_msb = msb;
|
||||
for (i = k; i == k || sb > (sbr->k[0] - 1 + msb - odd); i--) {
|
||||
sb = sbr->f_master[i];
|
||||
odd = (sb + sbr->k[0]) & 1;
|
||||
@@ -562,8 +562,7 @@ static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||
k = sbr->n_master;
|
||||
} while (sb != sbr->kx[1] + sbr->m[1]);
|
||||
|
||||
if (sbr->num_patches > 1 &&
|
||||
sbr->patch_num_subbands[sbr->num_patches - 1] < 3)
|
||||
if (sbr->num_patches > 1 && sbr->patch_num_subbands[sbr->num_patches-1] < 3)
|
||||
sbr->num_patches--;
|
||||
|
||||
return 0;
|
||||
@@ -1618,14 +1617,8 @@ static void sbr_hf_assemble(float Y1[38][64][2],
|
||||
memcpy(q_temp[i + 2*ch_data->t_env[0]], sbr->q_m[0], m_max * sizeof(sbr->q_m[0][0]));
|
||||
}
|
||||
} else if (h_SL) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
memcpy(g_temp[i + 2 * ch_data->t_env[0]],
|
||||
g_temp[i + 2 * ch_data->t_env_num_env_old],
|
||||
sizeof(g_temp[0]));
|
||||
memcpy(q_temp[i + 2 * ch_data->t_env[0]],
|
||||
q_temp[i + 2 * ch_data->t_env_num_env_old],
|
||||
sizeof(q_temp[0]));
|
||||
}
|
||||
memcpy(g_temp[2*ch_data->t_env[0]], g_temp[2*ch_data->t_env_num_env_old], 4*sizeof(g_temp[0]));
|
||||
memcpy(q_temp[2*ch_data->t_env[0]], q_temp[2*ch_data->t_env_num_env_old], 4*sizeof(q_temp[0]));
|
||||
}
|
||||
|
||||
for (e = 0; e < ch_data->bs_num_env; e++) {
|
||||
@@ -1706,7 +1699,7 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
|
||||
}
|
||||
for (ch = 0; ch < nch; ch++) {
|
||||
/* decode channel */
|
||||
sbr_qmf_analysis(ac->fdsp, &sbr->mdct_ana, &sbr->dsp, ch ? R : L, sbr->data[ch].analysis_filterbank_samples,
|
||||
sbr_qmf_analysis(&ac->fdsp, &sbr->mdct_ana, &sbr->dsp, ch ? R : L, sbr->data[ch].analysis_filterbank_samples,
|
||||
(float*)sbr->qmf_filter_scratch,
|
||||
sbr->data[ch].W, sbr->data[ch].Ypos);
|
||||
sbr->c.sbr_lf_gen(ac, sbr, sbr->X_low,
|
||||
@@ -1752,13 +1745,13 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
|
||||
nch = 2;
|
||||
}
|
||||
|
||||
sbr_qmf_synthesis(&sbr->mdct, &sbr->dsp, ac->fdsp,
|
||||
sbr_qmf_synthesis(&sbr->mdct, &sbr->dsp, &ac->fdsp,
|
||||
L, sbr->X[0], sbr->qmf_filter_scratch,
|
||||
sbr->data[0].synthesis_filterbank_samples,
|
||||
&sbr->data[0].synthesis_filterbank_samples_offset,
|
||||
downsampled);
|
||||
if (nch == 2)
|
||||
sbr_qmf_synthesis(&sbr->mdct, &sbr->dsp, ac->fdsp,
|
||||
sbr_qmf_synthesis(&sbr->mdct, &sbr->dsp, &ac->fdsp,
|
||||
R, sbr->X[1], sbr->qmf_filter_scratch,
|
||||
sbr->data[1].synthesis_filterbank_samples,
|
||||
&sbr->data[1].synthesis_filterbank_samples_offset,
|
||||
|
@@ -1,39 +0,0 @@
|
||||
/*
|
||||
* Header file for hardcoded AAC SBR windows
|
||||
*
|
||||
* Copyright (c) 2014 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#define CONFIG_HARDCODED_TABLES 0
|
||||
#include "libavutil/common.h"
|
||||
#include "aacsbr_tablegen.h"
|
||||
#include "tableprint.h"
|
||||
|
||||
int main(void)
|
||||
{
|
||||
aacsbr_tableinit();
|
||||
|
||||
write_fileheader();
|
||||
|
||||
WRITE_ARRAY_ALIGNED("static const", 32, float, sbr_qmf_window_ds);
|
||||
WRITE_ARRAY_ALIGNED("static const", 32, float, sbr_qmf_window_us);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -1,129 +0,0 @@
|
||||
/*
|
||||
* Header file for hardcoded AAC SBR windows
|
||||
*
|
||||
* Copyright (c) 2014 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_AACSBR_TABLEGEN_H
|
||||
#define AVCODEC_AACSBR_TABLEGEN_H
|
||||
|
||||
#if CONFIG_HARDCODED_TABLES
|
||||
#define aacsbr_tableinit()
|
||||
#include "libavcodec/aacsbr_tables.h"
|
||||
#else
|
||||
///< window coefficients for analysis/synthesis QMF banks
|
||||
static DECLARE_ALIGNED(32, float, sbr_qmf_window_ds)[320];
|
||||
static DECLARE_ALIGNED(32, float, sbr_qmf_window_us)[640] = {
|
||||
0.0000000000, -0.0005525286, -0.0005617692, -0.0004947518,
|
||||
-0.0004875227, -0.0004893791, -0.0005040714, -0.0005226564,
|
||||
-0.0005466565, -0.0005677802, -0.0005870930, -0.0006132747,
|
||||
-0.0006312493, -0.0006540333, -0.0006777690, -0.0006941614,
|
||||
-0.0007157736, -0.0007255043, -0.0007440941, -0.0007490598,
|
||||
-0.0007681371, -0.0007724848, -0.0007834332, -0.0007779869,
|
||||
-0.0007803664, -0.0007801449, -0.0007757977, -0.0007630793,
|
||||
-0.0007530001, -0.0007319357, -0.0007215391, -0.0006917937,
|
||||
-0.0006650415, -0.0006341594, -0.0005946118, -0.0005564576,
|
||||
-0.0005145572, -0.0004606325, -0.0004095121, -0.0003501175,
|
||||
-0.0002896981, -0.0002098337, -0.0001446380, -0.0000617334,
|
||||
0.0000134949, 0.0001094383, 0.0002043017, 0.0002949531,
|
||||
0.0004026540, 0.0005107388, 0.0006239376, 0.0007458025,
|
||||
0.0008608443, 0.0009885988, 0.0011250155, 0.0012577884,
|
||||
0.0013902494, 0.0015443219, 0.0016868083, 0.0018348265,
|
||||
0.0019841140, 0.0021461583, 0.0023017254, 0.0024625616,
|
||||
0.0026201758, 0.0027870464, 0.0029469447, 0.0031125420,
|
||||
0.0032739613, 0.0034418874, 0.0036008268, 0.0037603922,
|
||||
0.0039207432, 0.0040819753, 0.0042264269, 0.0043730719,
|
||||
0.0045209852, 0.0046606460, 0.0047932560, 0.0049137603,
|
||||
0.0050393022, 0.0051407353, 0.0052461166, 0.0053471681,
|
||||
0.0054196775, 0.0054876040, 0.0055475714, 0.0055938023,
|
||||
0.0056220643, 0.0056455196, 0.0056389199, 0.0056266114,
|
||||
0.0055917128, 0.0055404363, 0.0054753783, 0.0053838975,
|
||||
0.0052715758, 0.0051382275, 0.0049839687, 0.0048109469,
|
||||
0.0046039530, 0.0043801861, 0.0041251642, 0.0038456408,
|
||||
0.0035401246, 0.0032091885, 0.0028446757, 0.0024508540,
|
||||
0.0020274176, 0.0015784682, 0.0010902329, 0.0005832264,
|
||||
0.0000276045, -0.0005464280, -0.0011568135, -0.0018039472,
|
||||
-0.0024826723, -0.0031933778, -0.0039401124, -0.0047222596,
|
||||
-0.0055337211, -0.0063792293, -0.0072615816, -0.0081798233,
|
||||
-0.0091325329, -0.0101150215, -0.0111315548, -0.0121849995,
|
||||
0.0132718220, 0.0143904666, 0.0155405553, 0.0167324712,
|
||||
0.0179433381, 0.0191872431, 0.0204531793, 0.0217467550,
|
||||
0.0230680169, 0.0244160992, 0.0257875847, 0.0271859429,
|
||||
0.0286072173, 0.0300502657, 0.0315017608, 0.0329754081,
|
||||
0.0344620948, 0.0359697560, 0.0374812850, 0.0390053679,
|
||||
0.0405349170, 0.0420649094, 0.0436097542, 0.0451488405,
|
||||
0.0466843027, 0.0482165720, 0.0497385755, 0.0512556155,
|
||||
0.0527630746, 0.0542452768, 0.0557173648, 0.0571616450,
|
||||
0.0585915683, 0.0599837480, 0.0613455171, 0.0626857808,
|
||||
0.0639715898, 0.0652247106, 0.0664367512, 0.0676075985,
|
||||
0.0687043828, 0.0697630244, 0.0707628710, 0.0717002673,
|
||||
0.0725682583, 0.0733620255, 0.0741003642, 0.0747452558,
|
||||
0.0753137336, 0.0758008358, 0.0761992479, 0.0764992170,
|
||||
0.0767093490, 0.0768173975, 0.0768230011, 0.0767204924,
|
||||
0.0765050718, 0.0761748321, 0.0757305756, 0.0751576255,
|
||||
0.0744664394, 0.0736406005, 0.0726774642, 0.0715826364,
|
||||
0.0703533073, 0.0689664013, 0.0674525021, 0.0657690668,
|
||||
0.0639444805, 0.0619602779, 0.0598166570, 0.0575152691,
|
||||
0.0550460034, 0.0524093821, 0.0495978676, 0.0466303305,
|
||||
0.0434768782, 0.0401458278, 0.0366418116, 0.0329583930,
|
||||
0.0290824006, 0.0250307561, 0.0207997072, 0.0163701258,
|
||||
0.0117623832, 0.0069636862, 0.0019765601, -0.0032086896,
|
||||
-0.0085711749, -0.0141288827, -0.0198834129, -0.0258227288,
|
||||
-0.0319531274, -0.0382776572, -0.0447806821, -0.0514804176,
|
||||
-0.0583705326, -0.0654409853, -0.0726943300, -0.0801372934,
|
||||
-0.0877547536, -0.0955533352, -0.1035329531, -0.1116826931,
|
||||
-0.1200077984, -0.1285002850, -0.1371551761, -0.1459766491,
|
||||
-0.1549607071, -0.1640958855, -0.1733808172, -0.1828172548,
|
||||
-0.1923966745, -0.2021250176, -0.2119735853, -0.2219652696,
|
||||
-0.2320690870, -0.2423016884, -0.2526480309, -0.2631053299,
|
||||
-0.2736634040, -0.2843214189, -0.2950716717, -0.3059098575,
|
||||
-0.3168278913, -0.3278113727, -0.3388722693, -0.3499914122,
|
||||
0.3611589903, 0.3723795546, 0.3836350013, 0.3949211761,
|
||||
0.4062317676, 0.4175696896, 0.4289119920, 0.4402553754,
|
||||
0.4515996535, 0.4629308085, 0.4742453214, 0.4855253091,
|
||||
0.4967708254, 0.5079817500, 0.5191234970, 0.5302240895,
|
||||
0.5412553448, 0.5522051258, 0.5630789140, 0.5738524131,
|
||||
0.5845403235, 0.5951123086, 0.6055783538, 0.6159109932,
|
||||
0.6261242695, 0.6361980107, 0.6461269695, 0.6559016302,
|
||||
0.6655139880, 0.6749663190, 0.6842353293, 0.6933282376,
|
||||
0.7022388719, 0.7109410426, 0.7194462634, 0.7277448900,
|
||||
0.7358211758, 0.7436827863, 0.7513137456, 0.7587080760,
|
||||
0.7658674865, 0.7727780881, 0.7794287519, 0.7858353120,
|
||||
0.7919735841, 0.7978466413, 0.8034485751, 0.8087695004,
|
||||
0.8138191270, 0.8185776004, 0.8230419890, 0.8272275347,
|
||||
0.8311038457, 0.8346937361, 0.8379717337, 0.8409541392,
|
||||
0.8436238281, 0.8459818469, 0.8480315777, 0.8497805198,
|
||||
0.8511971524, 0.8523047035, 0.8531020949, 0.8535720573,
|
||||
0.8537385600,
|
||||
};
|
||||
|
||||
static av_cold void aacsbr_tableinit(void)
|
||||
{
|
||||
int n;
|
||||
for (n = 1; n < 320; n++)
|
||||
sbr_qmf_window_us[320 + n] = sbr_qmf_window_us[320 - n];
|
||||
sbr_qmf_window_us[384] = -sbr_qmf_window_us[384];
|
||||
sbr_qmf_window_us[512] = -sbr_qmf_window_us[512];
|
||||
|
||||
for (n = 0; n < 320; n++)
|
||||
sbr_qmf_window_ds[n] = sbr_qmf_window_us[2*n];
|
||||
}
|
||||
#endif /* CONFIG_HARDCODED_TABLES */
|
||||
|
||||
#endif /* AVCODEC_AACSBR_TABLEGEN_H */
|
@@ -266,6 +266,92 @@ static const int8_t sbr_offset[6][16] = {
|
||||
{-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 16, 20, 24}, // 64000 Hz < fs_sbr
|
||||
};
|
||||
|
||||
///< window coefficients for analysis/synthesis QMF banks
|
||||
static DECLARE_ALIGNED(32, float, sbr_qmf_window_ds)[320];
|
||||
static DECLARE_ALIGNED(32, float, sbr_qmf_window_us)[640] = {
|
||||
0.0000000000, -0.0005525286, -0.0005617692, -0.0004947518,
|
||||
-0.0004875227, -0.0004893791, -0.0005040714, -0.0005226564,
|
||||
-0.0005466565, -0.0005677802, -0.0005870930, -0.0006132747,
|
||||
-0.0006312493, -0.0006540333, -0.0006777690, -0.0006941614,
|
||||
-0.0007157736, -0.0007255043, -0.0007440941, -0.0007490598,
|
||||
-0.0007681371, -0.0007724848, -0.0007834332, -0.0007779869,
|
||||
-0.0007803664, -0.0007801449, -0.0007757977, -0.0007630793,
|
||||
-0.0007530001, -0.0007319357, -0.0007215391, -0.0006917937,
|
||||
-0.0006650415, -0.0006341594, -0.0005946118, -0.0005564576,
|
||||
-0.0005145572, -0.0004606325, -0.0004095121, -0.0003501175,
|
||||
-0.0002896981, -0.0002098337, -0.0001446380, -0.0000617334,
|
||||
0.0000134949, 0.0001094383, 0.0002043017, 0.0002949531,
|
||||
0.0004026540, 0.0005107388, 0.0006239376, 0.0007458025,
|
||||
0.0008608443, 0.0009885988, 0.0011250155, 0.0012577884,
|
||||
0.0013902494, 0.0015443219, 0.0016868083, 0.0018348265,
|
||||
0.0019841140, 0.0021461583, 0.0023017254, 0.0024625616,
|
||||
0.0026201758, 0.0027870464, 0.0029469447, 0.0031125420,
|
||||
0.0032739613, 0.0034418874, 0.0036008268, 0.0037603922,
|
||||
0.0039207432, 0.0040819753, 0.0042264269, 0.0043730719,
|
||||
0.0045209852, 0.0046606460, 0.0047932560, 0.0049137603,
|
||||
0.0050393022, 0.0051407353, 0.0052461166, 0.0053471681,
|
||||
0.0054196775, 0.0054876040, 0.0055475714, 0.0055938023,
|
||||
0.0056220643, 0.0056455196, 0.0056389199, 0.0056266114,
|
||||
0.0055917128, 0.0055404363, 0.0054753783, 0.0053838975,
|
||||
0.0052715758, 0.0051382275, 0.0049839687, 0.0048109469,
|
||||
0.0046039530, 0.0043801861, 0.0041251642, 0.0038456408,
|
||||
0.0035401246, 0.0032091885, 0.0028446757, 0.0024508540,
|
||||
0.0020274176, 0.0015784682, 0.0010902329, 0.0005832264,
|
||||
0.0000276045, -0.0005464280, -0.0011568135, -0.0018039472,
|
||||
-0.0024826723, -0.0031933778, -0.0039401124, -0.0047222596,
|
||||
-0.0055337211, -0.0063792293, -0.0072615816, -0.0081798233,
|
||||
-0.0091325329, -0.0101150215, -0.0111315548, -0.0121849995,
|
||||
0.0132718220, 0.0143904666, 0.0155405553, 0.0167324712,
|
||||
0.0179433381, 0.0191872431, 0.0204531793, 0.0217467550,
|
||||
0.0230680169, 0.0244160992, 0.0257875847, 0.0271859429,
|
||||
0.0286072173, 0.0300502657, 0.0315017608, 0.0329754081,
|
||||
0.0344620948, 0.0359697560, 0.0374812850, 0.0390053679,
|
||||
0.0405349170, 0.0420649094, 0.0436097542, 0.0451488405,
|
||||
0.0466843027, 0.0482165720, 0.0497385755, 0.0512556155,
|
||||
0.0527630746, 0.0542452768, 0.0557173648, 0.0571616450,
|
||||
0.0585915683, 0.0599837480, 0.0613455171, 0.0626857808,
|
||||
0.0639715898, 0.0652247106, 0.0664367512, 0.0676075985,
|
||||
0.0687043828, 0.0697630244, 0.0707628710, 0.0717002673,
|
||||
0.0725682583, 0.0733620255, 0.0741003642, 0.0747452558,
|
||||
0.0753137336, 0.0758008358, 0.0761992479, 0.0764992170,
|
||||
0.0767093490, 0.0768173975, 0.0768230011, 0.0767204924,
|
||||
0.0765050718, 0.0761748321, 0.0757305756, 0.0751576255,
|
||||
0.0744664394, 0.0736406005, 0.0726774642, 0.0715826364,
|
||||
0.0703533073, 0.0689664013, 0.0674525021, 0.0657690668,
|
||||
0.0639444805, 0.0619602779, 0.0598166570, 0.0575152691,
|
||||
0.0550460034, 0.0524093821, 0.0495978676, 0.0466303305,
|
||||
0.0434768782, 0.0401458278, 0.0366418116, 0.0329583930,
|
||||
0.0290824006, 0.0250307561, 0.0207997072, 0.0163701258,
|
||||
0.0117623832, 0.0069636862, 0.0019765601, -0.0032086896,
|
||||
-0.0085711749, -0.0141288827, -0.0198834129, -0.0258227288,
|
||||
-0.0319531274, -0.0382776572, -0.0447806821, -0.0514804176,
|
||||
-0.0583705326, -0.0654409853, -0.0726943300, -0.0801372934,
|
||||
-0.0877547536, -0.0955533352, -0.1035329531, -0.1116826931,
|
||||
-0.1200077984, -0.1285002850, -0.1371551761, -0.1459766491,
|
||||
-0.1549607071, -0.1640958855, -0.1733808172, -0.1828172548,
|
||||
-0.1923966745, -0.2021250176, -0.2119735853, -0.2219652696,
|
||||
-0.2320690870, -0.2423016884, -0.2526480309, -0.2631053299,
|
||||
-0.2736634040, -0.2843214189, -0.2950716717, -0.3059098575,
|
||||
-0.3168278913, -0.3278113727, -0.3388722693, -0.3499914122,
|
||||
0.3611589903, 0.3723795546, 0.3836350013, 0.3949211761,
|
||||
0.4062317676, 0.4175696896, 0.4289119920, 0.4402553754,
|
||||
0.4515996535, 0.4629308085, 0.4742453214, 0.4855253091,
|
||||
0.4967708254, 0.5079817500, 0.5191234970, 0.5302240895,
|
||||
0.5412553448, 0.5522051258, 0.5630789140, 0.5738524131,
|
||||
0.5845403235, 0.5951123086, 0.6055783538, 0.6159109932,
|
||||
0.6261242695, 0.6361980107, 0.6461269695, 0.6559016302,
|
||||
0.6655139880, 0.6749663190, 0.6842353293, 0.6933282376,
|
||||
0.7022388719, 0.7109410426, 0.7194462634, 0.7277448900,
|
||||
0.7358211758, 0.7436827863, 0.7513137456, 0.7587080760,
|
||||
0.7658674865, 0.7727780881, 0.7794287519, 0.7858353120,
|
||||
0.7919735841, 0.7978466413, 0.8034485751, 0.8087695004,
|
||||
0.8138191270, 0.8185776004, 0.8230419890, 0.8272275347,
|
||||
0.8311038457, 0.8346937361, 0.8379717337, 0.8409541392,
|
||||
0.8436238281, 0.8459818469, 0.8480315777, 0.8497805198,
|
||||
0.8511971524, 0.8523047035, 0.8531020949, 0.8535720573,
|
||||
0.8537385600,
|
||||
};
|
||||
|
||||
/* First eight entries repeated at end to simplify SIMD implementations. */
|
||||
const DECLARE_ALIGNED(16, float, ff_sbr_noise_table)[][2] = {
|
||||
{-0.99948153278296, -0.59483417516607}, { 0.97113454393991, -0.67528515225647},
|
||||
|
@@ -336,7 +336,7 @@ function fft_pass_neon
|
||||
endfunc
|
||||
|
||||
.macro def_fft n, n2, n4
|
||||
function fft\n\()_neon, align=6
|
||||
function fft\n\()_neon align=6
|
||||
sub sp, sp, #16
|
||||
stp x28, x30, [sp]
|
||||
add x28, x0, #\n4*2*8
|
||||
@@ -376,8 +376,7 @@ function ff_fft_calc_neon, export=1
|
||||
ld1 {v30.16b}, [x10]
|
||||
mov x7, #-8
|
||||
movrel x12, pmmp
|
||||
ldr x4, [x3, x2, lsl #3]
|
||||
add x3, x3, x4
|
||||
ldr x3, [x3, x2, lsl #3]
|
||||
movrel x13, mppm
|
||||
movrel x14, X(ff_cos_16)
|
||||
ld1 {v31.16b}, [x11]
|
||||
@@ -417,21 +416,21 @@ function ff_fft_permute_neon, export=1
|
||||
endfunc
|
||||
|
||||
const fft_tab_neon
|
||||
.quad fft4_neon - fft_tab_neon
|
||||
.quad fft8_neon - fft_tab_neon
|
||||
.quad fft16_neon - fft_tab_neon
|
||||
.quad fft32_neon - fft_tab_neon
|
||||
.quad fft64_neon - fft_tab_neon
|
||||
.quad fft128_neon - fft_tab_neon
|
||||
.quad fft256_neon - fft_tab_neon
|
||||
.quad fft512_neon - fft_tab_neon
|
||||
.quad fft1024_neon - fft_tab_neon
|
||||
.quad fft2048_neon - fft_tab_neon
|
||||
.quad fft4096_neon - fft_tab_neon
|
||||
.quad fft8192_neon - fft_tab_neon
|
||||
.quad fft16384_neon - fft_tab_neon
|
||||
.quad fft32768_neon - fft_tab_neon
|
||||
.quad fft65536_neon - fft_tab_neon
|
||||
.quad fft4_neon
|
||||
.quad fft8_neon
|
||||
.quad fft16_neon
|
||||
.quad fft32_neon
|
||||
.quad fft64_neon
|
||||
.quad fft128_neon
|
||||
.quad fft256_neon
|
||||
.quad fft512_neon
|
||||
.quad fft1024_neon
|
||||
.quad fft2048_neon
|
||||
.quad fft4096_neon
|
||||
.quad fft8192_neon
|
||||
.quad fft16384_neon
|
||||
.quad fft32768_neon
|
||||
.quad fft65536_neon
|
||||
endconst
|
||||
|
||||
const pmmp, align=4
|
||||
|
@@ -78,7 +78,6 @@ av_cold void ff_h264dsp_init_aarch64(H264DSPContext *c, const int bit_depth,
|
||||
c->h264_v_loop_filter_luma = ff_h264_v_loop_filter_luma_neon;
|
||||
c->h264_h_loop_filter_luma = ff_h264_h_loop_filter_luma_neon;
|
||||
c->h264_v_loop_filter_chroma = ff_h264_v_loop_filter_chroma_neon;
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_h_loop_filter_chroma = ff_h264_h_loop_filter_chroma_neon;
|
||||
|
||||
c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels_16_neon;
|
||||
|
@@ -27,73 +27,73 @@
|
||||
#include "libavutil/aarch64/cpu.h"
|
||||
#include "libavcodec/h264qpel.h"
|
||||
|
||||
void ff_put_h264_qpel16_mc00_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc10_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc20_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc30_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc01_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc11_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc21_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc31_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc02_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc12_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc22_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc32_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc03_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc13_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc23_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc33_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc00_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc10_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc20_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc30_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc01_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc11_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc21_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc31_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc02_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc12_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc22_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc32_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc03_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc13_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc23_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel16_mc33_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
|
||||
void ff_put_h264_qpel8_mc00_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc10_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc20_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc30_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc01_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc11_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc21_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc31_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc02_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc12_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc22_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc32_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc03_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc13_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc23_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc33_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc00_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc10_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc20_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc30_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc01_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc11_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc21_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc31_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc02_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc12_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc22_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc32_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc03_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc13_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc23_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_put_h264_qpel8_mc33_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
|
||||
void ff_avg_h264_qpel16_mc00_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc10_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc20_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc30_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc01_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc11_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc21_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc31_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc02_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc12_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc22_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc32_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc03_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc13_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc23_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc33_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc00_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc10_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc20_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc30_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc01_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc11_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc21_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc31_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc02_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc12_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc22_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc32_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc03_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc13_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc23_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel16_mc33_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
|
||||
void ff_avg_h264_qpel8_mc00_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc10_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc20_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc30_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc01_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc11_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc21_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc31_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc02_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc12_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc22_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc32_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc03_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc13_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc23_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc33_neon(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc00_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc10_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc20_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc30_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc01_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc11_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc21_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc31_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc02_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc12_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc22_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc32_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc03_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc13_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc23_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
void ff_avg_h264_qpel8_mc33_neon(uint8_t *dst, uint8_t *src, ptrdiff_t stride);
|
||||
|
||||
av_cold void ff_h264qpel_init_aarch64(H264QpelContext *c, int bit_depth)
|
||||
{
|
||||
|
@@ -24,7 +24,7 @@
|
||||
#define WFRAC_BITS 16 // fractional bits for window
|
||||
#define OUT_SHIFT (WFRAC_BITS + FRAC_BITS - 15)
|
||||
|
||||
const tbl_rev128.s, align=4
|
||||
const tbl_rev128.s align=4
|
||||
.byte 12, 13, 14, 15
|
||||
.byte 8, 9, 10, 11
|
||||
.byte 4, 5, 6, 7
|
||||
|
@@ -23,7 +23,7 @@
|
||||
#include "asm-offsets.h"
|
||||
|
||||
.macro shuffle a, b, c, d
|
||||
const shuffle_\a\b\c\d, align=4
|
||||
const shuffle_\a\b\c\d align=4
|
||||
.byte (\a * 4), (\a * 4 + 1), (\a * 4 + 2), (\a * 4 + 3)
|
||||
.byte (\b * 4), (\b * 4 + 1), (\b * 4 + 2), (\b * 4 + 3)
|
||||
.byte (\c * 4), (\c * 4 + 1), (\c * 4 + 2), (\c * 4 + 3)
|
||||
@@ -344,7 +344,7 @@ function fft15_pass
|
||||
ret
|
||||
endfunc
|
||||
|
||||
function fft30_neon, align=6
|
||||
function fft30_neon align=6
|
||||
sub sp, sp, #0x20
|
||||
stp x20, x21, [sp]
|
||||
stp x22, x30, [sp, #0x10]
|
||||
@@ -372,7 +372,7 @@ function fft30_neon, align=6
|
||||
endfunc
|
||||
|
||||
.macro def_fft n, n2
|
||||
function fft\n\()_neon, align=6
|
||||
function fft\n\()_neon align=6
|
||||
sub sp, sp, #0x30
|
||||
stp x20, x21, [sp]
|
||||
stp x22, x30, [sp, #0x10]
|
||||
@@ -438,8 +438,8 @@ function fft_b15_calc_neon
|
||||
uzp1 v12.4s, v4.4s, v5.4s // exp[11 - 14].re
|
||||
uzp2 v13.4s, v4.4s, v5.4s // exp[11 - 14].im
|
||||
zip1 v14.4s, v6.4s, v7.4s // exp[5,10].re/exp[5,10].im
|
||||
ldr x6, [x5, x3, lsl #3]
|
||||
add x5, x5, x6
|
||||
add x5, x5, x3, lsl #3
|
||||
ldr x5, [x5]
|
||||
mov x10, x0
|
||||
blr x5
|
||||
ldp x20, x30, [sp]
|
||||
@@ -452,13 +452,13 @@ function fft_b15_calc_neon
|
||||
endfunc
|
||||
|
||||
const fft_tab_neon
|
||||
.quad fft15_neon - fft_tab_neon
|
||||
.quad fft30_neon - fft_tab_neon
|
||||
.quad fft60_neon - fft_tab_neon
|
||||
.quad fft120_neon - fft_tab_neon
|
||||
.quad fft240_neon - fft_tab_neon
|
||||
.quad fft480_neon - fft_tab_neon
|
||||
.quad fft960_neon - fft_tab_neon
|
||||
.quad fft15_neon
|
||||
.quad fft30_neon
|
||||
.quad fft60_neon
|
||||
.quad fft120_neon
|
||||
.quad fft240_neon
|
||||
.quad fft480_neon
|
||||
.quad fft960_neon
|
||||
endconst
|
||||
|
||||
function ff_celt_imdct_half_neon, export=1
|
||||
@@ -641,7 +641,7 @@ function ff_celt_imdct_half_neon, export=1
|
||||
endfunc
|
||||
|
||||
// [0] = exp(2 * i * pi / 5), [1] = exp(2 * i * pi * 2 / 5)
|
||||
const fact5, align=4
|
||||
const fact5 align=4
|
||||
.float 0.30901699437494745, 0.95105651629515353
|
||||
.float -0.80901699437494734, 0.58778525229247325
|
||||
endconst
|
||||
|
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Autodesk RLE Decoder
|
||||
* Copyright (c) 2005 The FFmpeg Project
|
||||
* Copyright (C) 2005 the ffmpeg project
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@@ -137,7 +137,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
|
||||
return ret;
|
||||
|
||||
/* report that the buffer was completely consumed */
|
||||
return avpkt->size;
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
static av_cold int aasc_decode_end(AVCodecContext *avctx)
|
||||
|
@@ -131,9 +131,6 @@ int ff_ac3_bit_alloc_calc_mask(AC3BitAllocParameters *s, int16_t *band_psd,
|
||||
int band_start, band_end, begin, end1;
|
||||
int lowcomp, fastleak, slowleak;
|
||||
|
||||
if (end <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* excitation function */
|
||||
band_start = ff_ac3_bin_to_band_tab[start];
|
||||
band_end = ff_ac3_bin_to_band_tab[end-1] + 1;
|
||||
|
@@ -67,8 +67,7 @@
|
||||
#define AC3_RENAME(x) x ## _fixed
|
||||
#define AC3_NORM(norm) (1<<24)/(norm)
|
||||
#define AC3_MUL(a,b) ((((int64_t) (a)) * (b))>>12)
|
||||
#define AC3_RANGE(x) (x|((x&128)<<1))
|
||||
#define AC3_HEAVY_RANGE(x) (x<<1)
|
||||
#define AC3_RANGE(x) (x)
|
||||
#define AC3_DYNAMIC_RANGE(x) (x)
|
||||
#define AC3_SPX_BLEND(x) (x)
|
||||
#define AC3_DYNAMIC_RANGE1 0
|
||||
@@ -87,7 +86,6 @@
|
||||
#define AC3_NORM(norm) (1.0f/(norm))
|
||||
#define AC3_MUL(a,b) ((a) * (b))
|
||||
#define AC3_RANGE(x) (dynamic_range_tab[(x)])
|
||||
#define AC3_HEAVY_RANGE(x) (heavy_dynamic_range_tab[(x)])
|
||||
#define AC3_DYNAMIC_RANGE(x) (powf(x, s->drc_scale))
|
||||
#define AC3_SPX_BLEND(x) (x)* (1.0f/32)
|
||||
#define AC3_DYNAMIC_RANGE1 1.0f
|
||||
|
@@ -166,7 +166,7 @@ static int ac3_sync(uint64_t state, AACAC3ParseContext *hdr_info,
|
||||
int err;
|
||||
union {
|
||||
uint64_t u64;
|
||||
uint8_t u8[8 + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
uint8_t u8[8];
|
||||
} tmp = { av_be2ne64(state) };
|
||||
AC3HeaderInfo hdr, *phdr = &hdr;
|
||||
GetBitContext gbc;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user