Compare commits
4 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d9bdf7d9ae | ||
![]() |
a588e1e560 | ||
![]() |
36e7385d0e | ||
![]() |
5108323aa9 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -36,7 +36,6 @@
|
||||
/doc/avoptions_codec.texi
|
||||
/doc/avoptions_format.texi
|
||||
/doc/doxy/html/
|
||||
/doc/examples/avio_list_dir
|
||||
/doc/examples/avio_reading
|
||||
/doc/examples/decoding_encoding
|
||||
/doc/examples/demuxing_decoding
|
||||
@@ -84,7 +83,6 @@
|
||||
/tools/pktdumper
|
||||
/tools/probetest
|
||||
/tools/qt-faststart
|
||||
/tools/sidxindex
|
||||
/tools/trasher
|
||||
/tools/seek_print
|
||||
/tools/uncoded_frame
|
||||
|
440
Changelog
440
Changelog
@@ -1,442 +1,6 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.7.7
|
||||
- avformat/ffmdec: Check pix_fmt
|
||||
- avcodec/ttaenc: Reallocate packet if its too small
|
||||
- pgssubdec: fix subpicture output colorspace and range
|
||||
- avcodec/ac3dec: Reset SPX when switching from EAC3 to AC3
|
||||
- avfilter/vf_drawtext: Check return code of load_glyph()
|
||||
- avcodec/takdec: add code that got somehow lost in process of REing
|
||||
- avcodec/apedec: fix decoding of stereo files with one channel full of silence
|
||||
- avcodec/avpacket: Fix off by 5 error
|
||||
- avcodec/h264: Fix for H.264 configuration parsing
|
||||
- avcodec/bmp_parser: Ensure remaining_size is not too small in startcode packet crossing corner case
|
||||
- avfilter/src_movie: fix how we check for overflows with seek_point
|
||||
- avcodec/j2kenc: Add attribution to OpenJPEG project:
|
||||
- avcodec/libutvideodec: copy frame so it has reference counters when refcounted_frames is set
|
||||
- avformat/rtpdec_jpeg: fix low contrast image on low quality setting
|
||||
- avcodec/mjpegenc_common: Store approximate aspect if exact cannot be stored
|
||||
- avcodec/resample: Remove disabled and faulty code
|
||||
- indeo2: Fix banding artefacts
|
||||
- indeo2data: K&R formatting cosmetics
|
||||
- avcodec/imgconvert: Support non-planar colorspaces while padding
|
||||
- avutil/random_seed: Add the runtime in cycles of the main loop to the entropy pool
|
||||
- avutil/channel_layout: AV_CH_LAYOUT_6POINT1_BACK not reachable in parsing
|
||||
- avformat/concatdec: set safe mode to enabled instead of auto
|
||||
- avformat/utils: fix dts from pts code in compute_pkt_fields() during ascending delay
|
||||
- avformat/rtpenc: Fix integer overflow in NTP_TO_RTP_FORMAT
|
||||
- avformat/cache: Fix memleak of tree entries
|
||||
- lavf/mov: fix sidx with edit lists (cherry picked from commit 3617e69d50dd9dd07b5011dfb9477a9d1a630354)
|
||||
- avcodec/mjpegdec: Fix decoding slightly odd progressive jpeg
|
||||
- avcodec/avpacket: clear priv in av_init_packet()
|
||||
- swscale/utils: Fix chrSrcHSubSample for GBRAP16
|
||||
- swscale/input: Fix GBRAP16 input
|
||||
- postproc: fix unaligned access
|
||||
- avutil/pixdesc: Make get_color_type() aware of CIE XYZ formats
|
||||
- avcodec/h264: Execute error concealment before marking the frame as done.
|
||||
- swscale/x86/output: Fix yuv2planeX_16* with unaligned destination
|
||||
- swscale/x86/output: Move code into yuv2planeX_mainloop
|
||||
- avutil/frame: Free destination qp_table_buf in frame_copy_props()
|
||||
- libwebpenc_animencoder: print library messages in verbose log levels
|
||||
- libwebpenc_animencoder: zero initialize the WebPAnimEncoderOptions struct
|
||||
- doc/utils: fix typo for min() description
|
||||
|
||||
|
||||
version 2.7.6
|
||||
- avcodec/jpeg2000dec: More completely check cdef
|
||||
- avutil/opt: check for and handle errors in av_opt_set_dict2()
|
||||
- avcodec/flacenc: fix calculation of bits required in case of custom sample rate
|
||||
- avformat: Document urls a bit
|
||||
- avformat/libquvi: Set default demuxer and protocol limitations
|
||||
- avformat/concat: Check protocol prefix
|
||||
- doc/demuxers: Document enable_drefs and use_absolute_path
|
||||
- avcodec/mjpegdec: Check for end for both bytes in unescaping
|
||||
- avcodec/mpegvideo_enc: Check for integer overflow in ff_mpv_reallocate_putbitbuffer()
|
||||
- avformat/avformat: Replace some references to filenames by urls
|
||||
- avcodec/wmaenc: Check ff_wma_init() for failure
|
||||
- avcodec/mpeg12enc: Move high resolution thread check to before initializing threads
|
||||
- avformat/img2dec: Use AVOpenCallback
|
||||
- avformat/avio: Limit url option parsing to the documented cases
|
||||
- avformat/img2dec: do not interpret the filename by default if a IO context has been opened
|
||||
- avcodec/ass_split: Fix null pointer dereference in ff_ass_style_get()
|
||||
- mov: Add an option to toggle dref opening
|
||||
- avcodec/gif: Fix lzw buffer size
|
||||
- avcodec/put_bits: Assert buf_ptr in flush_put_bits()
|
||||
- avcodec/tiff: Check subsample & rps values more completely
|
||||
- swscale/swscale: Add some sanity checks for srcSlice* parameters
|
||||
- swscale/x86/rgb2rgb_template: Fix planar2x() for short width
|
||||
- swscale/swscale_unscaled: Fix odd height inputs for bayer_to_yv12_wrapper()
|
||||
- swscale/swscale_unscaled: Fix odd height inputs for bayer_to_rgb24_wrapper()
|
||||
- avcodec/aacenc: Check both channels for finiteness
|
||||
- swscale/swscale-test: Fix slice height in random reference data creation.
|
||||
- dca: fix misaligned access in avpriv_dca_convert_bitstream
|
||||
- brstm: fix missing closing brace
|
||||
- brstm: also allocate b->table in read_packet
|
||||
- brstm: make sure an ADPC chunk was read for adpcm_thp
|
||||
- vorbisdec: reject rangebits 0 with non-0 partitions
|
||||
- vorbisdec: reject channel mapping with less than two channels
|
||||
- ffmdec: reset packet_end in case of failure
|
||||
- avformat/ipmovie: put video decoding_map_size into packet and use it in decoder
|
||||
|
||||
version 2.7.5
|
||||
- configure: bump copyright year to 2016
|
||||
- avformat/hls: Even stricter URL checks
|
||||
- avformat/hls: More strict url checks
|
||||
- swscale/utils: Detect and skip unneeded sws_setColorspaceDetails() calls
|
||||
- swscale/yuv2rgb: Increase YUV2RGB table headroom
|
||||
- swscale/yuv2rgb: Factor YUVRGB_TABLE_LUMA_HEADROOM out
|
||||
- avformat/hls: forbid all protocols except http(s) & file
|
||||
- avformat/aviobuf: Fix end check in put_str16()
|
||||
- avformat/asfenc: Check pts
|
||||
- avcodec/mpeg4video: Check time_incr
|
||||
- avcodec/wavpackenc: Check the number of channels
|
||||
- avcodec/wavpackenc: Headers are per channel
|
||||
- avcodec/aacdec_template: Check id_map
|
||||
- avcodec/dvdec: Fix "left shift of negative value -254"
|
||||
- avcodec/mjpegdec: Fix negative shift
|
||||
- avcodec/mss2: Check for repeat overflow
|
||||
- avformat: Add integer fps from 31 to 60 to get_std_framerate()
|
||||
- avcodec/mpegvideo_enc: Clip bits_per_raw_sample within valid range
|
||||
- avfilter/vf_scale: set proper out frame color range
|
||||
- avcodec/motion_est: Fix mv_penalty table size
|
||||
- avcodec/h264_slice: Fix integer overflow in implicit weight computation
|
||||
- swscale/utils: Use normal bilinear scaler if fast cannot be used due to tiny dimensions
|
||||
- avcodec/put_bits: Always check buffer end before writing
|
||||
- mjpegdec: extend check for incompatible values of s->rgb and s->ls
|
||||
- swscale/utils: Fix intermediate format for cascaded alpha downscaling
|
||||
- x86/float_dsp: zero extend offset from ff_scalarproduct_float_sse
|
||||
- avfilter/vf_zoompan: do not free frame we pushed to lavfi
|
||||
|
||||
|
||||
version 2.7.4
|
||||
- nuv: sanitize negative fps rate
|
||||
- rawdec: only exempt BIT0 with need_copy from buffer sanity check
|
||||
- mlvdec: check that index_entries exist
|
||||
- nutdec: reject negative value_len in read_sm_data
|
||||
- xwddec: prevent overflow of lsize * avctx->height
|
||||
- nutdec: only copy the header if it exists
|
||||
- exr: fix out of bounds read in get_code
|
||||
- on2avc: limit number of bits to 30 in get_egolomb
|
||||
- avcodec/mpeg4videodec: also for empty partitioned slices
|
||||
- avcodec/h264_refs: Fix long_idx check
|
||||
- avcodec/h264_mc_template: prefetch list1 only if it is used in the MB
|
||||
- avcodec/h264_slice: Simplify ref2frm indexing
|
||||
- Revert "avcodec/aarch64/neon.S: Update neon.s for transpose_4x4H"
|
||||
- avfilter/vf_mpdecimate: Add missing emms_c()
|
||||
- sonic: make sure num_taps * channels is not larger than frame_size
|
||||
- opus_silk: fix typo causing overflow in silk_stabilize_lsf
|
||||
- ffm: reject invalid codec_id and codec_type
|
||||
- golomb: always check for invalid UE golomb codes in get_ue_golomb
|
||||
- aaccoder: prevent crash of anmr coder
|
||||
- ffmdec: reject zero-sized chunks
|
||||
- swscale/x86/rgb2rgb_template: Fallback to mmx in interleaveBytes() if the alignment is insufficient for SSE*
|
||||
- swscale/x86/rgb2rgb_template: Do not crash on misaligend stride
|
||||
- avformat/mxfenc: Do not crash if there is no packet in the first stream
|
||||
- avcodec/aarch64/neon.S: Update neon.s for transpose_4x4H
|
||||
- avformat/utils: estimate_timings_from_pts - increase retry counter, fixes invalid duration for ts files with hevc codec
|
||||
- avformat/matroskaenc: Check codecdelay before use
|
||||
- avutil/mathematics: Fix division by 0
|
||||
- mjpegdec: consider chroma subsampling in size check
|
||||
- avcodec/hevc: Check max ctb addresses for WPP
|
||||
- avcodec/vp3: ensure header is parsed successfully before tables
|
||||
- avcodec/jpeg2000dec: Check bpno in decode_cblk()
|
||||
- avcodec/pgssubdec: Fix left shift of 255 by 24 places cannot be represented in type int
|
||||
- swscale/utils: Fix for runtime error: left shift of negative value -1
|
||||
- avcodec/hevc: Fix integer overflow of entry_point_offset
|
||||
- avcodec/dirac_parser: Check that there is a previous PU before accessing it
|
||||
- avcodec/dirac_parser: Add basic validity checks for next_pu_offset and prev_pu_offset
|
||||
- avcodec/dirac_parser: Fix potential overflows in pointer checks
|
||||
- avcodec/wmaprodec: Check bits per sample to be within the range not causing integer overflows
|
||||
- avcodec/wmaprodec: Fix overflow of cutoff
|
||||
- avformat/smacker: fix integer overflow with pts_inc
|
||||
- avcodec/vp3: Fix "runtime error: left shift of negative value"
|
||||
- mpegencts: Fix overflow in cbr mode period calculations
|
||||
- avutil/timecode: Fix fps check
|
||||
- avutil/mathematics: return INT64_MIN (=AV_NOPTS_VALUE) from av_rescale_rnd() for overflows
|
||||
- avcodec/apedec: Check length in long_filter_high_3800()
|
||||
- avcodec/vp3: always set pix_fmt in theora_decode_header()
|
||||
- avcodec/mpeg4videodec: Check available data before reading custom matrix
|
||||
- avutil/mathematics: Do not treat INT64_MIN as positive in av_rescale_rnd
|
||||
- avutil/integer: Fix av_mod_i() with negative dividend
|
||||
- avformat/dump: Fix integer overflow in av_dump_format()
|
||||
- avcodec/h264_refs: Check that long references match before use
|
||||
- avcodec/utils: Clear dimensions in ff_get_buffer() on failure
|
||||
- avcodec/utils: Use 64bit for aspect ratio calculation in avcodec_string()
|
||||
- avcodec/vp3: Clear context on reinitialization failure
|
||||
- avcodec/hevc: allocate entries unconditionally
|
||||
- avcodec/hevc_cabac: Fix multiple integer overflows
|
||||
- avcodec/jpeg2000dwt: Check ndeclevels before calling dwt_encode*()
|
||||
- avcodec/jpeg2000dwt: Check ndeclevels before calling dwt_decode*()
|
||||
- avcodec/hevc: Check entry_point_offsets
|
||||
- avcodec/cabac: Check initial cabac decoder state
|
||||
- avcodec/cabac_functions: Fix "left shift of negative value -31767"
|
||||
- avcodec/h264_slice: Limit max_contexts when slice_context_count is initialized
|
||||
- avcodec/vp8: Do not use num_coeff_partitions in thread/buffer setup
|
||||
- avcodec/ffv1dec: Clear quant_table_count if its invalid
|
||||
- avcodec/ffv1dec: Print an error if the quant table count is invalid
|
||||
- doc/filters/drawtext: fix centering example
|
||||
- hqx: correct type and size check of info_offset
|
||||
- mxfdec: check edit_rate also for physical_track
|
||||
- mpegvideo: clear overread in clear_context
|
||||
- dvdsubdec: validate offset2 similar to offset1
|
||||
- aacdec: don't return frames without data from aac_decode_er_frame
|
||||
- avcodec/takdec: Use memove, avoid undefined memcpy() use
|
||||
- riffdec: prevent negative bit rate
|
||||
|
||||
|
||||
version 2.7.3:
|
||||
- rtmpcrypt: Do the xtea decryption in little endian mode
|
||||
- Update versions for 2.7.3
|
||||
- avformat/matroskadec: Check subtitle stream before dereferencing
|
||||
- avformat/utils: Do not init parser if probing is unfinished
|
||||
- avcodec/jpeg2000dec: Fix potential integer overflow with tile dimensions
|
||||
- avcodec/jpeg2000dec: Check SIZ dimensions to be within the supported range
|
||||
- avcodec/jpeg2000: Check comp coords to be within the supported size
|
||||
- avcodec/jpeg2000: Use av_image_check_size() in ff_jpeg2000_init_component()
|
||||
- avcodec/wmaprodec: Check for overread in decode_packet()
|
||||
- avcodec/smacker: Check that the data size is a multiple of a sample vector
|
||||
- avcodec/takdec: Skip last p2 sample (which is unused)
|
||||
- avcodec/dxtory: Fix input size check in dxtory_decode_v1_410()
|
||||
- avcodec/dxtory: Fix input size check in dxtory_decode_v1_420()
|
||||
- avcodec/error_resilience: avoid accessing previous or next frames tables beyond height
|
||||
- avcodec/dpx: Move need_align to act per line
|
||||
- avcodec/flashsv: Check size before updating it
|
||||
- avcodec/ivi: Check image dimensions
|
||||
- avcodec/utils: Better check for channels in av_get_audio_frame_duration()
|
||||
- avcodec/jpeg2000dec: Check for duplicate SIZ marker
|
||||
- tests/fate/avformat: Fix fate-lavf
|
||||
- doc/ffmpeg: Clarify that the sdp_file option requires an rtp output.
|
||||
- ffmpeg: Don't try and write sdp info if none of the outputs had an rtp format.
|
||||
- apng: use correct size for output buffer
|
||||
- jvdec: avoid unsigned overflow in comparison
|
||||
- avcodec/hevc_ps: Check chroma_format_idc
|
||||
- avcodec/jpeg2000dec: Clip all tile coordinates
|
||||
- avcodec/microdvddec: Check for string end in 'P' case
|
||||
- avcodec/dirac_parser: Fix undefined memcpy() use
|
||||
- avformat/xmv: Discard remainder of packet on error
|
||||
- avformat/xmv: factor return check out of if/else
|
||||
- avcodec/mpeg12dec: Do not call show_bits() with invalid bits
|
||||
- libavutil/channel_layout: Check strtol*() for failure
|
||||
- avcodec/ffv1dec: Check for 0 quant tables
|
||||
- avcodec/mjpegdec: Reinitialize IDCT on BPP changes
|
||||
- avcodec/mjpegdec: Check index in ljpeg_decode_yuv_scan() before using it
|
||||
- avutil/file_open: avoid file handle inheritance on Windows
|
||||
- avcodec/h264_slice: Disable slice threads if there are multiple access units in a packet
|
||||
- opusdec: Don't run vector_fmul_scalar on zero length arrays
|
||||
- avcodec/ffv1: Initialize vlc_state on allocation
|
||||
- avcodec/ffv1dec: update progress in case of broken pointer chains
|
||||
- avcodec/ffv1dec: Clear slice coordinates if they are invalid or slice header decoding fails for other reasons
|
||||
- avformat/httpauth: Add space after commas in HTTP/RTSP auth header
|
||||
- avcodec/x86/sbrdsp: Fix using uninitialized upper 32bit of noise
|
||||
- avcodec/ffv1dec: Fix off by 1 error in quant_table_count check
|
||||
- avcodec/ffv1dec: Explicitly check read_quant_table() return value
|
||||
- avcodec/rangecoder: Check e
|
||||
- avutil/log: fix zero length gnu_printf format string warning
|
||||
- lavf/webvttenc: Require webvtt file to contain exactly one WebVTT stream.
|
||||
- avcodec/mjpegdec: Fix decoding RGBA RCT LJPEG
|
||||
- avfilter/af_asyncts: use llabs for int64_t
|
||||
- avcodec/g2meet: Also clear tile dimensions on header_fail
|
||||
- avcodec/g2meet: Fix potential overflow in tile dimensions check
|
||||
- avcodec/svq1dec: Check init_get_bits8() for failure
|
||||
- avcodec/tta: Check init_get_bits8() for failure
|
||||
- avcodec/vp3: Check init_get_bits8() for failure
|
||||
- swresample/swresample: Fix integer overflow in seed calculation
|
||||
- avformat/mov: Fix integer overflow in FFABS
|
||||
- avutil/common: Add FFNABS()
|
||||
- avutil/common: Document FFABS() corner case
|
||||
- avformat/dump: Fix integer overflow in aspect ratio calculation
|
||||
- avformat/mxg: Use memmove()
|
||||
- avcodec/truemotion1: Check for even width
|
||||
- avcodec/mpeg12dec: Set dimensions in mpeg1_decode_sequence() only in absence of errors
|
||||
- avcodec/libopusenc: Fix infinite loop on flushing after 0 input
|
||||
- avformat/hevc: Check num_long_term_ref_pics_sps to avoid potentially long loops
|
||||
- avformat/hevc: Fix parsing errors
|
||||
- ffmpeg: Use correct codec_id for av_parser_change() check
|
||||
- ffmpeg: Check av_parser_change() for failure
|
||||
- ffmpeg: Check for RAWVIDEO and do not relay only on AVFMT_RAWPICTURE
|
||||
- ffmpeg: check avpicture_fill() return value
|
||||
- avformat/mux: Update sidedata in ff_write_chained()
|
||||
- avcodec/flashsvenc: Correct max dimension in error message
|
||||
- avcodec/svq1enc: Check dimensions
|
||||
- avcodec/dcaenc: clear bitstream end
|
||||
- libavcodec/aacdec_template: Use init_get_bits8() in aac_decode_frame()
|
||||
- rawdec: fix mjpeg probing buffer size check
|
||||
- rawdec: fix mjpeg probing
|
||||
- configure: loongson disable expensive optimizations in gcc O3 optimization
|
||||
- videodsp: don't overread edges in vfix3 emu_edge.
|
||||
- avformat/mp3dec: improve junk skipping heuristic
|
||||
- avformat/hls: add support for EXT-X-MAP
|
||||
- avformat/hls: fix segment selection regression on track changes of live streams
|
||||
- lavf/matroskadec: Fully parse and repack MP3 packets
|
||||
- avcodec/h264_mp4toannexb_bsf: Reorder operations in nal_size check
|
||||
- avformat/oggenc: Check segments_count for headers too
|
||||
- avformat/segment: atomically update list if possible
|
||||
- avformat/avidec: Workaround broken initial frame
|
||||
- hevc: properly handle no_rasl_output_flag when removing pictures from the DPB
|
||||
- hevc: fix wpp threading deadlock.
|
||||
- avcodec/ffv1: separate slice_count from max_slice_count
|
||||
- lavf/img2dec: Fix memory leak
|
||||
- avcodec/mp3: fix skipping zeros
|
||||
- avformat/srtdec: make sure we probe a number
|
||||
- avformat/srtdec: more lenient first line probing
|
||||
- doc: mention libavcodec can decode Opus natively
|
||||
- avcodec/ffv1enc: fix assertion failure with unset bits per raw sample
|
||||
- MAINTAINERS: Remove myself as leader
|
||||
- mips/hevcdsp: fix string concatenation on macros
|
||||
|
||||
|
||||
version 2.7.2:
|
||||
- imc: use correct position for flcoeffs2 calculation
|
||||
- hevc: check slice address length
|
||||
- snow: remove an obsolete av_assert2
|
||||
- webp: fix infinite loop in webp_decode_frame
|
||||
- wavpack: limit extra_bits to 32 and use get_bits_long
|
||||
- ffmpeg: only count got_output/errors in decode_error_stat
|
||||
- ffmpeg: exit_on_error if decoding a packet failed
|
||||
- pthread_frame: forward error codes when flushing
|
||||
- huffyuvdec: validate image size
|
||||
- wavpack: use get_bits_long to read up to 32 bits
|
||||
- nutdec: check maxpos in read_sm_data before returning success
|
||||
- s302m: fix arithmetic exception
|
||||
- vc1dec: use get_bits_long and limit the read bits to 32
|
||||
- mpegaudiodec: copy AVFloatDSPContext from first context to all contexts
|
||||
- avcodec/vp8: Check buffer size in vp8_decode_frame_header()
|
||||
- avcodec/vp8: Fix null pointer dereference in ff_vp8_decode_free()
|
||||
- avcodec/diracdec: Check for hpel_base allocation failure
|
||||
- avcodec/rv34: Clear pointers in ff_rv34_decode_init_thread_copy()
|
||||
- avfilter/af_aresample: Check ff_all_* for allocation failures
|
||||
- avcodec/pthread_frame: clear priv_data, avoid stale pointer in error case
|
||||
- swscale/utils: Clear pix buffers
|
||||
- avutil/fifo: Fix the case where func() returns less bytes than requested in av_fifo_generic_write()
|
||||
- ffmpeg: Fix cleanup after failed allocation of output_files
|
||||
- avformat/mov: Fix deallocation when MOVStreamContext failed to allocate
|
||||
- ffmpeg: Fix crash with ost->last_frame allocation failure
|
||||
- ffmpeg: Fix cleanup with ost = NULL
|
||||
- avcodec/pthread_frame: check avctx on deallocation
|
||||
- avcodec/sanm: Reset sizes in destroy_buffers()
|
||||
- avcodec/alac: Clear pointers in allocate_buffers()
|
||||
- bytestream2: set the reader to the end when reading more than available
|
||||
- avcodec/utils: use a minimum 32pixel width in avcodec_align_dimensions2() for H.264
|
||||
- avcodec/mpegvideo: Clear pointers in ff_mpv_common_init()
|
||||
- oggparsedirac: check return value of init_get_bits
|
||||
- wmalosslessdec: reset frame->nb_samples on packet loss
|
||||
- wmalosslessdec: avoid reading 0 bits with get_bits
|
||||
- Put a space between string literals and macros.
|
||||
- avcodec/rawenc: Use ff_alloc_packet() instead of ff_alloc_packet2()
|
||||
- avcodec/aacsbr: check that the element type matches before applying SBR
|
||||
- avcodec/h264_slice: Use w/h from the AVFrame instead of mb_w/h
|
||||
- vp9/update_prob: prevent out of bounds table read
|
||||
- avfilter/vf_transpose: Fix rounding error
|
||||
- avcodec/h264_refs: discard mismatching references
|
||||
- avcodec/mjpegdec: Fix small picture upscale
|
||||
- avcodec/pngdec: Check values before updating context in decode_fctl_chunk()
|
||||
- avcodec/pngdec: Copy IHDR & plte state from last thread
|
||||
- avcodec/pngdec: Require a IHDR chunk before fctl
|
||||
- avcodec/pngdec: Only allow one IHDR chunk
|
||||
- wmavoice: limit wmavoice_decode_packet return value to packet size
|
||||
- swscale/swscale_unscaled: Fix rounding difference with RGBA output between little and big endian
|
||||
- ffmpeg: Do not use the data/size of a bitstream filter after failure
|
||||
- swscale/x86/rgb2rgb_template: fix signedness of v in shuffle_bytes_2103_{mmx,mmxext}
|
||||
- vda: unlock the pixel buffer base address.
|
||||
- swscale/rgb2rgb_template: Fix signedness of v in shuffle_bytes_2103_c()
|
||||
- swscale/rgb2rgb_template: Implement shuffle_bytes_0321_c and fix shuffle_bytes_2103_c on BE
|
||||
- swscale/rgb2rgb_template: Disable shuffle_bytes_2103_c on big endian
|
||||
- swr: Remember previously set int_sample_format from user
|
||||
- swresample: soxr implementation for swr_get_out_samples()
|
||||
- avformat/swfdec: Do not error out on pixel format changes
|
||||
- ffmpeg_opt: Fix forcing fourccs
|
||||
- configure: Check for x265_api_get
|
||||
- swscale/x86/rgb2rgb_template: don't call emms on sse2/avx functions
|
||||
- swscale/x86/rgb2rgb_template: add missing xmm clobbers
|
||||
- library.mak: Workaround SDL redefining main and breaking fate tests on mingw
|
||||
- vaapi_h264: fix RefPicList[] field flags.
|
||||
|
||||
version 2.7.1:
|
||||
- postproc: fix unaligned access
|
||||
- avformat: clarify what package needs to be compiled with SSL support
|
||||
- avcodec/libx264: Avoid reconfig on equivalent aspect ratios
|
||||
- avcodec/flacenc: Fix Invalid Rice order
|
||||
- tls_gnutls: fix hang on disconnection
|
||||
- avcodec/hevc_ps: Only discard overread VPS if a previous is available
|
||||
- ffmpeg: Free last_frame instead of just unref
|
||||
- avcodec/ffv1enc: fix bps for >8bit yuv when not explicitly set
|
||||
- avio: fix potential crashes when combining ffio_ensure_seekback + crc
|
||||
- examples/demuxing_decoding: use properties from frame instead of video_dec_ctx
|
||||
- h264: er: Copy from the previous reference only if compatible
|
||||
- doc: fix spelling errors
|
||||
- configure: only disable VSX for !ppc64el
|
||||
- ffmpeg_opt: Check for localtime() failure
|
||||
- avformat/singlejpeg: fix standalone compilation
|
||||
- configure: Disable VSX on unspecified / generic CPUs
|
||||
- avformat: Fix bug in parse_rps for HEVC.
|
||||
- takdec: ensure chan2 is a valid channel index
|
||||
- avcodec/h264_slice: Use AVFrame dimensions for grayscale handling
|
||||
|
||||
|
||||
version 2.7:
|
||||
- FFT video filter
|
||||
- TDSC decoder
|
||||
- DTS lossless extension (XLL) decoding (not lossless, disabled by default)
|
||||
- showwavespic filter
|
||||
- DTS decoding through libdcadec
|
||||
- Drop support for nvenc API before 5.0
|
||||
- nvenc HEVC encoder
|
||||
- Detelecine filter
|
||||
- Intel QSV-accelerated H.264 encoding
|
||||
- MMAL-accelerated H.264 decoding
|
||||
- basic APNG encoder and muxer with default extension "apng"
|
||||
- unpack DivX-style packed B-frames in MPEG-4 bitstream filter
|
||||
- WebM Live Chunk Muxer
|
||||
- nvenc level and tier options
|
||||
- chorus filter
|
||||
- Canopus HQ/HQA decoder
|
||||
- Automatically rotate videos based on metadata in ffmpeg
|
||||
- improved Quickdraw compatibility
|
||||
- VP9 high bit-depth and extended colorspaces decoding support
|
||||
- WebPAnimEncoder API when available for encoding and muxing WebP
|
||||
- Direct3D11-accelerated decoding
|
||||
- Support Secure Transport
|
||||
- Multipart JPEG demuxer
|
||||
|
||||
|
||||
version 2.6:
|
||||
- nvenc encoder
|
||||
- 10bit spp filter
|
||||
- colorlevels filter
|
||||
- RIFX format for *.wav files
|
||||
- RTP/mpegts muxer
|
||||
- non continuous cache protocol support
|
||||
- tblend filter
|
||||
- cropdetect support for non 8bpp, absolute (if limit >= 1) and relative (if limit < 1.0) threshold
|
||||
- Camellia symmetric block cipher
|
||||
- OpenH264 encoder wrapper
|
||||
- VOC seeking support
|
||||
- Closed caption Decoder
|
||||
- fspp, uspp, pp7 MPlayer postprocessing filters ported to native filters
|
||||
- showpalette filter
|
||||
- Twofish symmetric block cipher
|
||||
- Support DNx100 (960x720@8)
|
||||
- eq2 filter ported from libmpcodecs as eq filter
|
||||
- removed libmpcodecs
|
||||
- Changed default DNxHD colour range in QuickTime .mov derivatives to mpeg range
|
||||
- ported softpulldown filter from libmpcodecs as repeatfields filter
|
||||
- dcshift filter
|
||||
- RTP depacketizer for loss tolerant payload format for MP3 audio (RFC 5219)
|
||||
- RTP depacketizer for AC3 payload format (RFC 4184)
|
||||
- palettegen and paletteuse filters
|
||||
- VP9 RTP payload format (draft 0) experimental depacketizer
|
||||
- RTP depacketizer for DV (RFC 6469)
|
||||
- DXVA2-accelerated HEVC decoding
|
||||
- AAC ELD 480 decoding
|
||||
- Intel QSV-accelerated H.264 decoding
|
||||
- DSS SP decoder and DSS demuxer
|
||||
- Fix stsd atom corruption in DNxHD QuickTimes
|
||||
- Canopus HQX decoder
|
||||
- RTP depacketization of T.140 text (RFC 4103)
|
||||
- Port MIPS optimizations to 64-bit
|
||||
|
||||
|
||||
version 2.5:
|
||||
- HEVC/H.265 RTP payload format (draft v6) packetizer
|
||||
- SUP/PGS subtitle demuxer
|
||||
@@ -463,7 +27,7 @@ version 2.4:
|
||||
- ICY metadata are now requested by default with the HTTP protocol
|
||||
- support for using metadata in stream specifiers in fftools
|
||||
- LZMA compression support in TIFF decoder
|
||||
- H.261 RTP payload format (RFC 4587) depacketizer and experimental packetizer
|
||||
- support for H.261 RTP payload format (RFC 4587)
|
||||
- HEVC/H.265 RTP payload format (draft v6) depacketizer
|
||||
- added codecview filter to visualize information exported by some codecs
|
||||
- Matroska 3D support thorugh side data
|
||||
@@ -943,7 +507,7 @@ easier to use. The changes are:
|
||||
all the stream in the first input file, except for the second audio
|
||||
stream'.
|
||||
* There is a new option -c (or -codec) for choosing the decoder/encoder to
|
||||
use, which makes it possible to precisely specify target stream(s) consistently with
|
||||
use, which allows to precisely specify target stream(s) consistently with
|
||||
other options. E.g. -c:v lib264 sets the codec for all video streams, -c:a:0
|
||||
libvorbis sets the codec for the first audio stream and -c copy copies all
|
||||
the streams without reencoding. Old -vcodec/-acodec/-scodec options are now
|
||||
|
112
LICENSE.md
112
LICENSE.md
@@ -1,75 +1,70 @@
|
||||
#FFmpeg:
|
||||
|
||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||
or later (LGPL v2.1+). Read the file `COPYING.LGPLv2.1` for details. Some other
|
||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||
files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to
|
||||
FFmpeg.
|
||||
|
||||
Some optional parts of FFmpeg are licensed under the GNU General Public License
|
||||
version 2 or later (GPL v2+). See the file `COPYING.GPLv2` for details. None of
|
||||
these parts are used by default, you have to explicitly pass `--enable-gpl` to
|
||||
version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of
|
||||
these parts are used by default, you have to explicitly pass --enable-gpl to
|
||||
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
|
||||
Specifically, the GPL parts of FFmpeg are:
|
||||
|
||||
- libpostproc
|
||||
- libmpcodecs
|
||||
- optional x86 optimizations in the files
|
||||
- `libavcodec/x86/flac_dsp_gpl.asm`
|
||||
- `libavcodec/x86/idct_mmx.c`
|
||||
libavcodec/x86/flac_dsp_gpl.asm
|
||||
libavcodec/x86/idct_mmx.c
|
||||
- libutvideo encoding/decoding wrappers in
|
||||
`libavcodec/libutvideo*.cpp`
|
||||
- the X11 grabber in `libavdevice/x11grab.c`
|
||||
libavcodec/libutvideo*.cpp
|
||||
- the X11 grabber in libavdevice/x11grab.c
|
||||
- the swresample test app in
|
||||
`libswresample/swresample-test.c`
|
||||
- the `texi2pod.pl` tool
|
||||
libswresample/swresample-test.c
|
||||
- the texi2pod.pl tool
|
||||
- the following filters in libavfilter:
|
||||
- `f_ebur128.c`
|
||||
- `vf_blackframe.c`
|
||||
- `vf_boxblur.c`
|
||||
- `vf_colormatrix.c`
|
||||
- `vf_cover_rect.c`
|
||||
- `vf_cropdetect.c`
|
||||
- `vf_delogo.c`
|
||||
- `vf_eq.c`
|
||||
- `vf_find_rect.c`
|
||||
- `vf_fspp.c`
|
||||
- `vf_geq.c`
|
||||
- `vf_histeq.c`
|
||||
- `vf_hqdn3d.c`
|
||||
- `vf_interlace.c`
|
||||
- `vf_kerndeint.c`
|
||||
- `vf_mcdeint.c`
|
||||
- `vf_mpdecimate.c`
|
||||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
- `vf_pp.c`
|
||||
- `vf_pp7.c`
|
||||
- `vf_pullup.c`
|
||||
- `vf_sab.c`
|
||||
- `vf_smartblur.c`
|
||||
- `vf_repeatfields.c`
|
||||
- `vf_spp.c`
|
||||
- `vf_stereo3d.c`
|
||||
- `vf_super2xsai.c`
|
||||
- `vf_tinterlace.c`
|
||||
- `vf_uspp.c`
|
||||
- `vsrc_mptestsrc.c`
|
||||
- f_ebur128.c
|
||||
- vf_blackframe.c
|
||||
- vf_boxblur.c
|
||||
- vf_colormatrix.c
|
||||
- vf_cropdetect.c
|
||||
- vf_decimate.c
|
||||
- vf_delogo.c
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_interlace.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mcdeint.c
|
||||
- vf_mp.c
|
||||
- vf_owdenoise.c
|
||||
- vf_perspective.c
|
||||
- vf_phase.c
|
||||
- vf_pp.c
|
||||
- vf_pullup.c
|
||||
- vf_sab.c
|
||||
- vf_smartblur.c
|
||||
- vf_spp.c
|
||||
- vf_stereo3d.c
|
||||
- vf_super2xsai.c
|
||||
- vf_tinterlace.c
|
||||
- vsrc_mptestsrc.c
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
the configure parameter `--enable-version3` will activate this licensing option
|
||||
for you. Read the file `COPYING.LGPLv3` or, if you have enabled GPL parts,
|
||||
`COPYING.GPLv3` to learn the exact legal terms that apply in this case.
|
||||
the configure parameter --enable-version3 will activate this licensing option
|
||||
for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
|
||||
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
* The files `libavcodec/jfdctfst.c`, `libavcodec/jfdctint_template.c` and
|
||||
`libavcodec/jrevdct.c` are taken from libjpeg, see the top of the files for
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
|
||||
libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
|
||||
licensing details. Specifically note that you must credit the IJG in the
|
||||
documentation accompanying your program if you only distribute executables.
|
||||
You must also indicate any changes including additions and deletions to
|
||||
those three files in the documentation.
|
||||
* `tests/reference.pnm` is under the expat license.
|
||||
tests/reference.pnm is under the expat license
|
||||
|
||||
|
||||
external libraries
|
||||
@@ -82,22 +77,21 @@ compatible libraries
|
||||
--------------------
|
||||
|
||||
The following libraries are under GPL:
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libutvideo
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxvid
|
||||
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libutvideo
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxvid
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing `--enable-gpl` to configure.
|
||||
passing --enable-gpl to configure.
|
||||
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing `--enable-version3` to configure.
|
||||
license version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
incompatible libraries
|
||||
----------------------
|
||||
@@ -105,7 +99,7 @@ incompatible libraries
|
||||
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
|
||||
are incompatible with the GPLv2 and v3. We do not know for certain if their
|
||||
licenses are compatible with the LGPL.
|
||||
If you wish to enable these libraries, pass `--enable-nonfree` to configure.
|
||||
If you wish to enable these libraries, pass --enable-nonfree to configure.
|
||||
But note that if you enable any of these libraries the resulting binary will
|
||||
be under a complex license mix that is more restrictive than the LGPL and that
|
||||
may result in additional obligations. It is possible that these
|
||||
|
20
MAINTAINERS
20
MAINTAINERS
@@ -14,6 +14,7 @@ patches and related discussions.
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
Michael Niedermayer
|
||||
final design decisions
|
||||
|
||||
|
||||
@@ -44,7 +45,7 @@ Miscellaneous Areas
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu
|
||||
build system (configure, makefiles) Diego Biurrun, Mans Rullgard
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Lou Logan
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
@@ -58,7 +59,7 @@ fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet
|
||||
Twitter Lou Logan
|
||||
Launchpad Timothy Gu
|
||||
|
||||
|
||||
@@ -155,7 +156,6 @@ Codecs:
|
||||
celp_filters.* Vitor Sessak
|
||||
cinepak.c Roberto Togni
|
||||
cinepakenc.c Rl / Aetey G.T. AB
|
||||
ccaption_dec.c Anshul Maheshwari
|
||||
cljr Alex Beregszaszi
|
||||
cllc.c Derek Buitenhuis
|
||||
cook.c, cookdata.h Benjamin Larsson
|
||||
@@ -165,7 +165,6 @@ Codecs:
|
||||
dca.c Kostya Shishkov, Benjamin Larsson
|
||||
dnxhd* Baptiste Coudurier
|
||||
dpcm.c Mike Melanson
|
||||
dss_sp.c Oleksij Rempel, Michael Niedermayer
|
||||
dv.c Roman Shaposhnik
|
||||
dvbsubdec.c Anshul Maheshwari
|
||||
dxa.c Kostya Shishkov
|
||||
@@ -227,7 +226,6 @@ Codecs:
|
||||
msvideo1.c Mike Melanson
|
||||
nellymoserdec.c Benjamin Larsson
|
||||
nuv.c Reimar Doeffinger
|
||||
nvenc.c Timo Rothenpieler
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
pgssubdec.c Reimar Doeffinger
|
||||
@@ -401,7 +399,6 @@ Muxers/Demuxers:
|
||||
cdxl.c Paul B Mahol
|
||||
crc.c Michael Niedermayer
|
||||
daud.c Reimar Doeffinger
|
||||
dss.c Oleksij Rempel, Michael Niedermayer
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
dxa.c Kostya Shishkov
|
||||
@@ -466,13 +463,9 @@ Muxers/Demuxers:
|
||||
rmdec.c, rmenc.c Ronald S. Bultje, Kostya Shishkov
|
||||
rtmp* Kostya Shishkov
|
||||
rtp.c, rtpenc.c Martin Storsjo
|
||||
rtpdec_ac3.* Gilles Chanteperdrix
|
||||
rtpdec_dv.* Thomas Volkert
|
||||
rtpdec_h261.*, rtpenc_h261.* Thomas Volkert
|
||||
rtpdec_hevc.*, rtpenc_hevc.* Thomas Volkert
|
||||
rtpdec_mpa_robust.* Gilles Chanteperdrix
|
||||
rtpdec_asf.* Ronald S. Bultje
|
||||
rtpdec_vp9.c Thomas Volkert
|
||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||
rtsp.c Luca Barbato
|
||||
sbgdec.c Nicolas George
|
||||
@@ -539,15 +532,14 @@ Windows ICL Matthew Oliver
|
||||
ADI/Blackfin DSP Marc Hoffman
|
||||
Sparc Roman Shaposhnik
|
||||
x86 Michael Niedermayer
|
||||
OS/2 KO Myung-Hun
|
||||
|
||||
|
||||
Releases
|
||||
========
|
||||
|
||||
2.7 Michael Niedermayer
|
||||
2.6 Michael Niedermayer
|
||||
2.5 Michael Niedermayer
|
||||
2.4 Michael Niedermayer
|
||||
2.2 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
6
Makefile
6
Makefile
@@ -80,8 +80,8 @@ SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MSA-OBJS \
|
||||
LOONGSON3-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||
OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -112,7 +112,7 @@ endef
|
||||
|
||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||
|
||||
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(CP) $< $@
|
||||
|
@@ -19,10 +19,8 @@ such as audio, video, subtitles and related metadata.
|
||||
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
|
||||
manipulate, convert and stream multimedia content.
|
||||
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
|
||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analysis tool to inspect
|
||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analisys tool to inspect
|
||||
multimedia content.
|
||||
* [ffserver](http://ffmpeg.org/ffserver.html) is a multimedia streaming server
|
||||
for live broadcasts.
|
||||
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
|
||||
|
||||
## Documentation
|
||||
|
106
RELEASE_NOTES
106
RELEASE_NOTES
@@ -1,15 +1,99 @@
|
||||
┌────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.5 "Bohr" │
|
||||
└────────────────────────────────────────┘
|
||||
|
||||
┌─────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.7 "Nash" │
|
||||
└─────────────────────────────────────┘
|
||||
The FFmpeg Project proudly presents FFmpeg 2.5 "Bohr", just 2.5 months
|
||||
after the release of 2.4. Since this wasn't a long time ago, the Changelog
|
||||
is a bit short this time.
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 2.7 "Nash", about 3
|
||||
months after the release of FFmpeg 2.6.
|
||||
As usual, if you have any question on this release or any FFmpeg related
|
||||
topic, feel free to join us on the #ffmpeg IRC channel (on
|
||||
irc.freenode.net).
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on http://source.ffmpeg.org.
|
||||
┌────────────────────────────┐
|
||||
│ 🔨 API Information │
|
||||
└────────────────────────────┘
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
||||
FFmpeg 2.5 includes the following library versions:
|
||||
|
||||
• libavutil 54.15.100
|
||||
• libavcodec 56.13.100
|
||||
• libavformat 56.15.102
|
||||
• libavdevice 56. 3.100
|
||||
• libavfilter 5. 2.103
|
||||
• libswscale 3. 1.101
|
||||
• libswresample 1. 1.100
|
||||
• libpostproc 53. 3.100
|
||||
|
||||
Important API changes since 2.4:
|
||||
|
||||
• avpriv_dv_frame_profile2() has been deprecated
|
||||
|
||||
|
||||
Please refer to the doc/APIchanges file for more information.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ★ List of New Features │
|
||||
└────────────────────────────┘
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ffprobe │
|
||||
└────────────────────────────┘
|
||||
|
||||
• -show_pixel_formats option
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ffserver │
|
||||
└────────────────────────────┘
|
||||
|
||||
• codec private options support
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavcodec │
|
||||
└────────────────────────────┘
|
||||
|
||||
• STL subtitle decoder
|
||||
• libutvideo YUV 4:2:2 10bit support
|
||||
• animated WebP decoding support
|
||||
• zygoaudio decoding support
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavdevice │
|
||||
└────────────────────────────┘
|
||||
|
||||
• XCB-based screen-grabber
|
||||
• AVFoundation screen capturing support
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavformat │
|
||||
└────────────────────────────┘
|
||||
|
||||
• HEVC/H.265 RTP payload format (draft v6) packetizer
|
||||
• SUP/PGS subtitle demuxer
|
||||
• STL subtitle demuxer
|
||||
• UDP-Lite support (RFC 3828)
|
||||
• creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
|
||||
• WebP muxer
|
||||
• APNG demuxer
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavfilter │
|
||||
└────────────────────────────┘
|
||||
|
||||
• xBR scaling filter
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavutil │
|
||||
└────────────────────────────┘
|
||||
|
||||
• CAST128 symmetric block cipher, ECB mode
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libpostproc │
|
||||
└────────────────────────────┘
|
||||
|
||||
• visualization support
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ⚠ Behaviour changes │
|
||||
└────────────────────────────┘
|
||||
|
4
arch.mak
4
arch.mak
@@ -5,13 +5,11 @@ OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||
OBJS-$(HAVE_MSA) += $(MSA-OBJS) $(MSA-OBJS-yes)
|
||||
OBJS-$(HAVE_LOONGSON3) += $(LOONGSON3-OBJS) $(LOONGSON3-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
OBJS-$(HAVE_VSX) += $(VSX-OBJS) $(VSX-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_YASM) += $(YASM-OBJS) $(YASM-OBJS-yes)
|
||||
|
137
cmdutils.c
137
cmdutils.c
@@ -41,10 +41,8 @@
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/display.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/libm.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/eval.h"
|
||||
@@ -292,14 +290,10 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
||||
if (po->flags & OPT_SPEC) {
|
||||
SpecifierOpt **so = dst;
|
||||
char *p = strchr(opt, ':');
|
||||
char *str;
|
||||
|
||||
dstcount = (int *)(so + 1);
|
||||
*so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1);
|
||||
str = av_strdup(p ? p + 1 : "");
|
||||
if (!str)
|
||||
return AVERROR(ENOMEM);
|
||||
(*so)[*dstcount - 1].specifier = str;
|
||||
(*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : "");
|
||||
dst = &(*so)[*dstcount - 1].u;
|
||||
}
|
||||
|
||||
@@ -307,8 +301,6 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
||||
char *str;
|
||||
str = av_strdup(arg);
|
||||
av_freep(dst);
|
||||
if (!str)
|
||||
return AVERROR(ENOMEM);
|
||||
*(char **)dst = str;
|
||||
} else if (po->flags & OPT_BOOL || po->flags & OPT_INT) {
|
||||
*(int *)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
|
||||
@@ -482,22 +474,10 @@ static void dump_argument(const char *a)
|
||||
fputc('"', report_file);
|
||||
}
|
||||
|
||||
static void check_options(const OptionDef *po)
|
||||
{
|
||||
while (po->name) {
|
||||
if (po->flags & OPT_PERFILE)
|
||||
av_assert0(po->flags & (OPT_INPUT | OPT_OUTPUT));
|
||||
po++;
|
||||
}
|
||||
}
|
||||
|
||||
void parse_loglevel(int argc, char **argv, const OptionDef *options)
|
||||
{
|
||||
int idx = locate_option(argc, argv, options, "loglevel");
|
||||
const char *env;
|
||||
|
||||
check_options(options);
|
||||
|
||||
if (!idx)
|
||||
idx = locate_option(argc, argv, options, "v");
|
||||
if (idx && argv[idx + 1])
|
||||
@@ -859,7 +839,6 @@ int opt_loglevel(void *optctx, const char *opt, const char *arg)
|
||||
{ "info" , AV_LOG_INFO },
|
||||
{ "verbose", AV_LOG_VERBOSE },
|
||||
{ "debug" , AV_LOG_DEBUG },
|
||||
{ "trace" , AV_LOG_TRACE },
|
||||
};
|
||||
char *tail;
|
||||
int level;
|
||||
@@ -1096,7 +1075,8 @@ static void print_program_info(int flags, int level)
|
||||
av_log(NULL, level, " Copyright (c) %d-%d the FFmpeg developers",
|
||||
program_birth_year, CONFIG_THIS_YEAR);
|
||||
av_log(NULL, level, "\n");
|
||||
av_log(NULL, level, "%sbuilt with %s\n", indent, CC_IDENT);
|
||||
av_log(NULL, level, "%sbuilt on %s %s with %s\n",
|
||||
indent, __DATE__, __TIME__, CC_IDENT);
|
||||
|
||||
av_log(NULL, level, "%sconfiguration: " FFMPEG_CONFIGURATION "\n", indent);
|
||||
}
|
||||
@@ -1233,7 +1213,12 @@ static int is_device(const AVClass *avclass)
|
||||
{
|
||||
if (!avclass)
|
||||
return 0;
|
||||
return AV_IS_INPUT_DEVICE(avclass->category) || AV_IS_OUTPUT_DEVICE(avclass->category);
|
||||
return avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_OUTPUT ||
|
||||
avclass->category == AV_CLASS_CATEGORY_DEVICE_INPUT;
|
||||
}
|
||||
|
||||
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only)
|
||||
@@ -1550,10 +1535,10 @@ int show_protocols(void *optctx, const char *opt, const char *arg)
|
||||
printf("Supported file protocols:\n"
|
||||
"Input:\n");
|
||||
while ((name = avio_enum_protocols(&opaque, 0)))
|
||||
printf(" %s\n", name);
|
||||
printf("%s\n", name);
|
||||
printf("Output:\n");
|
||||
while ((name = avio_enum_protocols(&opaque, 1)))
|
||||
printf(" %s\n", name);
|
||||
printf("%s\n", name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1568,7 +1553,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
|
||||
printf("Filters:\n"
|
||||
" T.. = Timeline support\n"
|
||||
" .S. = Slice threading\n"
|
||||
" ..C = Command support\n"
|
||||
" ..C = Commmand support\n"
|
||||
" A = Audio input/output\n"
|
||||
" V = Video input/output\n"
|
||||
" N = Dynamic number and/or type of input/output\n"
|
||||
@@ -1836,8 +1821,6 @@ int show_help(void *optctx, const char *opt, const char *arg)
|
||||
av_log_set_callback(log_callback_help);
|
||||
|
||||
topic = av_strdup(arg ? arg : "");
|
||||
if (!topic)
|
||||
return AVERROR(ENOMEM);
|
||||
par = strchr(topic, '=');
|
||||
if (par)
|
||||
*par++ = 0;
|
||||
@@ -1877,7 +1860,7 @@ int read_yesno(void)
|
||||
|
||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
{
|
||||
int64_t ret;
|
||||
int ret;
|
||||
FILE *f = av_fopen_utf8(filename, "rb");
|
||||
|
||||
if (!f) {
|
||||
@@ -1886,31 +1869,20 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
strerror(errno));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = fseek(f, 0, SEEK_END);
|
||||
if (ret == -1) {
|
||||
fseek(f, 0, SEEK_END);
|
||||
*size = ftell(f);
|
||||
fseek(f, 0, SEEK_SET);
|
||||
if (*size == (size_t)-1) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", strerror(errno));
|
||||
fclose(f);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ftell(f);
|
||||
if (ret < 0) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
}
|
||||
*size = ret;
|
||||
|
||||
ret = fseek(f, 0, SEEK_SET);
|
||||
if (ret == -1) {
|
||||
ret = AVERROR(errno);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*bufptr = av_malloc(*size + 1);
|
||||
if (!*bufptr) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto out;
|
||||
fclose(f);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
ret = fread(*bufptr, 1, *size, f);
|
||||
if (ret < *size) {
|
||||
@@ -1926,9 +1898,6 @@ int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
|
||||
(*bufptr)[(*size)++] = '\0';
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "IO error: %s\n", av_err2str(ret));
|
||||
fclose(f);
|
||||
return ret;
|
||||
}
|
||||
@@ -2075,7 +2044,7 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
||||
exit_program(1);
|
||||
}
|
||||
if (*size < new_size) {
|
||||
uint8_t *tmp = av_realloc_array(array, new_size, elem_size);
|
||||
uint8_t *tmp = av_realloc(array, new_size*elem_size);
|
||||
if (!tmp) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
|
||||
exit_program(1);
|
||||
@@ -2087,35 +2056,13 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
|
||||
return array;
|
||||
}
|
||||
|
||||
double get_rotation(AVStream *st)
|
||||
{
|
||||
AVDictionaryEntry *rotate_tag = av_dict_get(st->metadata, "rotate", NULL, 0);
|
||||
uint8_t* displaymatrix = av_stream_get_side_data(st,
|
||||
AV_PKT_DATA_DISPLAYMATRIX, NULL);
|
||||
double theta = 0;
|
||||
|
||||
if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
|
||||
char *tail;
|
||||
theta = av_strtod(rotate_tag->value, &tail);
|
||||
if (*tail)
|
||||
theta = 0;
|
||||
}
|
||||
if (displaymatrix && !theta)
|
||||
theta = -av_display_rotation_get((int32_t*) displaymatrix);
|
||||
|
||||
theta -= 360*floor(theta/360 + 0.9/360);
|
||||
|
||||
if (fabs(theta - 90*round(theta/90)) > 2)
|
||||
av_log_ask_for_sample(NULL, "Odd rotation angle\n");
|
||||
|
||||
return theta;
|
||||
}
|
||||
|
||||
#if CONFIG_AVDEVICE
|
||||
static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||
{
|
||||
int ret, i;
|
||||
AVFormatContext *dev = NULL;
|
||||
AVDeviceInfoList *device_list = NULL;
|
||||
AVDictionary *tmp_opts = NULL;
|
||||
|
||||
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
|
||||
return AVERROR(EINVAL);
|
||||
@@ -2127,7 +2074,15 @@ static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_input_sources(fmt, NULL, opts, &device_list)) < 0) {
|
||||
/* TODO: avformat_open_input calls read_header callback which is not necessary.
|
||||
Function like avformat_alloc_output_context2 for input could be helpful here. */
|
||||
av_dict_copy(&tmp_opts, opts, 0);
|
||||
if ((ret = avformat_open_input(&dev, NULL, fmt, &tmp_opts)) < 0) {
|
||||
printf("Cannot open device: %s.\n", fmt->name);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_devices(dev, &device_list)) < 0) {
|
||||
printf("Cannot list sources.\n");
|
||||
goto fail;
|
||||
}
|
||||
@@ -2138,14 +2093,18 @@ static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
|
||||
}
|
||||
|
||||
fail:
|
||||
av_dict_free(&tmp_opts);
|
||||
avdevice_free_list_devices(&device_list);
|
||||
avformat_close_input(&dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||
{
|
||||
int ret, i;
|
||||
AVFormatContext *dev = NULL;
|
||||
AVDeviceInfoList *device_list = NULL;
|
||||
AVDictionary *tmp_opts = NULL;
|
||||
|
||||
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
|
||||
return AVERROR(EINVAL);
|
||||
@@ -2157,7 +2116,14 @@ static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = avdevice_list_output_sinks(fmt, NULL, opts, &device_list)) < 0) {
|
||||
if ((ret = avformat_alloc_output_context2(&dev, fmt, NULL, NULL)) < 0) {
|
||||
printf("Cannot open device: %s.\n", fmt->name);
|
||||
goto fail;
|
||||
}
|
||||
av_dict_copy(&tmp_opts, opts, 0);
|
||||
av_opt_set_dict2(dev, &tmp_opts, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
if ((ret = avdevice_list_devices(dev, &device_list)) < 0) {
|
||||
printf("Cannot list sinks.\n");
|
||||
goto fail;
|
||||
}
|
||||
@@ -2168,7 +2134,9 @@ static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
|
||||
}
|
||||
|
||||
fail:
|
||||
av_dict_free(&tmp_opts);
|
||||
avdevice_free_list_devices(&device_list);
|
||||
avformat_free_context(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2212,7 +2180,7 @@ int show_sources(void *optctx, const char *opt, const char *arg)
|
||||
if (fmt) {
|
||||
if (!strcmp(fmt->name, "lavfi"))
|
||||
continue; //it's pointless to probe lavfi
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sources(fmt, opts);
|
||||
}
|
||||
@@ -2220,7 +2188,7 @@ int show_sources(void *optctx, const char *opt, const char *arg)
|
||||
do {
|
||||
fmt = av_input_video_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sources(fmt, opts);
|
||||
}
|
||||
@@ -2248,7 +2216,7 @@ int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||
do {
|
||||
fmt = av_output_audio_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sinks(fmt, opts);
|
||||
}
|
||||
@@ -2256,7 +2224,7 @@ int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||
do {
|
||||
fmt = av_output_video_device_next(fmt);
|
||||
if (fmt) {
|
||||
if (dev && !av_match_name(dev, fmt->name))
|
||||
if (dev && strcmp(fmt->name, dev))
|
||||
continue;
|
||||
print_device_sinks(fmt, opts);
|
||||
}
|
||||
@@ -2267,5 +2235,4 @@ int show_sinks(void *optctx, const char *opt, const char *arg)
|
||||
av_log_set_level(error_level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -597,6 +597,4 @@ void *grow_array(void *array, int elem_size, int *size, int new_size);
|
||||
char name[128];\
|
||||
av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
|
||||
|
||||
double get_rotation(AVStream *st);
|
||||
|
||||
#endif /* CMDUTILS_H */
|
||||
|
@@ -22,7 +22,6 @@
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opencl.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "cmdutils.h"
|
||||
|
||||
typedef struct {
|
||||
@@ -239,8 +238,7 @@ int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
|
||||
devices[count].platform_idx = i;
|
||||
devices[count].device_idx = j;
|
||||
devices[count].runtime = score;
|
||||
av_strlcpy(devices[count].device_name, device_node->device_name,
|
||||
sizeof(devices[count].device_name));
|
||||
strcpy(devices[count].device_name, device_node->device_name);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
14
common.mak
14
common.mak
@@ -5,14 +5,6 @@
|
||||
# first so "all" becomes default target
|
||||
all: all-yes
|
||||
|
||||
DEFAULT_YASMD=.dbg
|
||||
|
||||
ifeq ($(DBG),1)
|
||||
YASMD=$(DEFAULT_YASMD)
|
||||
else
|
||||
YASMD=
|
||||
endif
|
||||
|
||||
ifndef SUBDIR
|
||||
|
||||
ifndef V
|
||||
@@ -146,17 +138,17 @@ $(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda *$(DEFAULT_YASMD).asm
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
define RULES
|
||||
clean::
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d)
|
||||
$(RM) $(HOSTPROGS)
|
||||
$(RM) $(TOOLS)
|
||||
endef
|
||||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d))
|
||||
|
@@ -38,9 +38,40 @@
|
||||
#ifndef __AVISYNTH_C__
|
||||
#define __AVISYNTH_C__
|
||||
|
||||
#include "avs/config.h"
|
||||
#include "avs/capi.h"
|
||||
#include "avs/types.h"
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
typedef unsigned char BYTE;
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
@@ -48,8 +79,8 @@
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVISYNTH_6_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 6 };
|
||||
#ifndef __AVISYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 4 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
@@ -81,8 +112,8 @@ enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_PLANAR = 1<<31,
|
||||
|
||||
AVS_CS_SHIFT_SUB_WIDTH = 0,
|
||||
AVS_CS_SHIFT_SUB_HEIGHT = 8,
|
||||
AVS_CS_SHIFT_SAMPLE_BITS = 16,
|
||||
AVS_CS_SHIFT_SUB_HEIGHT = 1 << 3,
|
||||
AVS_CS_SHIFT_SAMPLE_BITS = 1 << 4,
|
||||
|
||||
AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH,
|
||||
AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24
|
||||
@@ -149,66 +180,15 @@ enum { //SUBTYPES
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
// New 2.6 explicitly defined cache hints.
|
||||
AVS_CACHE_NOTHING=10, // Do not cache video.
|
||||
AVS_CACHE_WINDOW=11, // Hard protect upto X frames within a range of X from the current frame N.
|
||||
AVS_CACHE_GENERIC=12, // LRU cache upto X frames.
|
||||
AVS_CACHE_FORCE_GENERIC=13, // LRU cache upto X frames, override any previous CACHE_WINDOW.
|
||||
|
||||
AVS_CACHE_GET_POLICY=30, // Get the current policy.
|
||||
AVS_CACHE_GET_WINDOW=31, // Get the current window h_span.
|
||||
AVS_CACHE_GET_RANGE=32, // Get the current generic frame range.
|
||||
|
||||
AVS_CACHE_AUDIO=50, // Explicitly do cache audio, X byte cache.
|
||||
AVS_CACHE_AUDIO_NOTHING=51, // Explicitly do not cache audio.
|
||||
AVS_CACHE_AUDIO_NONE=52, // Audio cache off (auto mode), X byte intial cache.
|
||||
AVS_CACHE_AUDIO_AUTO=53, // Audio cache on (auto mode), X byte intial cache.
|
||||
|
||||
AVS_CACHE_GET_AUDIO_POLICY=70, // Get the current audio policy.
|
||||
AVS_CACHE_GET_AUDIO_SIZE=71, // Get the current audio cache size.
|
||||
|
||||
AVS_CACHE_PREFETCH_FRAME=100, // Queue request to prefetch frame N.
|
||||
AVS_CACHE_PREFETCH_GO=101, // Action video prefetches.
|
||||
|
||||
AVS_CACHE_PREFETCH_AUDIO_BEGIN=120, // Begin queue request transaction to prefetch audio (take critical section).
|
||||
AVS_CACHE_PREFETCH_AUDIO_STARTLO=121, // Set low 32 bits of start.
|
||||
AVS_CACHE_PREFETCH_AUDIO_STARTHI=122, // Set high 32 bits of start.
|
||||
AVS_CACHE_PREFETCH_AUDIO_COUNT=123, // Set low 32 bits of length.
|
||||
AVS_CACHE_PREFETCH_AUDIO_COMMIT=124, // Enqueue request transaction to prefetch audio (release critical section).
|
||||
AVS_CACHE_PREFETCH_AUDIO_GO=125, // Action audio prefetches.
|
||||
|
||||
AVS_CACHE_GETCHILD_CACHE_MODE=200, // Cache ask Child for desired video cache mode.
|
||||
AVS_CACHE_GETCHILD_CACHE_SIZE=201, // Cache ask Child for desired video cache size.
|
||||
AVS_CACHE_GETCHILD_AUDIO_MODE=202, // Cache ask Child for desired audio cache mode.
|
||||
AVS_CACHE_GETCHILD_AUDIO_SIZE=203, // Cache ask Child for desired audio cache size.
|
||||
|
||||
AVS_CACHE_GETCHILD_COST=220, // Cache ask Child for estimated processing cost.
|
||||
AVS_CACHE_COST_ZERO=221, // Child response of zero cost (ptr arithmetic only).
|
||||
AVS_CACHE_COST_UNIT=222, // Child response of unit cost (less than or equal 1 full frame blit).
|
||||
AVS_CACHE_COST_LOW=223, // Child response of light cost. (Fast)
|
||||
AVS_CACHE_COST_MED=224, // Child response of medium cost. (Real time)
|
||||
AVS_CACHE_COST_HI=225, // Child response of heavy cost. (Slow)
|
||||
|
||||
AVS_CACHE_GETCHILD_THREAD_MODE=240, // Cache ask Child for thread safetyness.
|
||||
AVS_CACHE_THREAD_UNSAFE=241, // Only 1 thread allowed for all instances. 2.5 filters default!
|
||||
AVS_CACHE_THREAD_CLASS=242, // Only 1 thread allowed for each instance. 2.6 filters default!
|
||||
AVS_CACHE_THREAD_SAFE=243, // Allow all threads in any instance.
|
||||
AVS_CACHE_THREAD_OWN=244, // Safe but limit to 1 thread, internally threaded.
|
||||
|
||||
AVS_CACHE_GETCHILD_ACCESS_COST=260, // Cache ask Child for preferred access pattern.
|
||||
AVS_CACHE_ACCESS_RAND=261, // Filter is access order agnostic.
|
||||
AVS_CACHE_ACCESS_SEQ0=262, // Filter prefers sequential access (low cost)
|
||||
AVS_CACHE_ACCESS_SEQ1=263, // Filter needs sequential access (high cost)
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
struct AVS_ScriptEnvironment {
|
||||
IScriptEnvironment * env;
|
||||
const char * error;
|
||||
AVS_ScriptEnvironment(IScriptEnvironment * e = 0)
|
||||
: env(e), error(0) {}
|
||||
};
|
||||
#endif
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
@@ -258,23 +238,29 @@ AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_API(int, avs_is_yv24)(const AVS_VideoInfo * p);
|
||||
AVSC_INLINE int avs_is_yv24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV24 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_API(int, avs_is_yv16)(const AVS_VideoInfo * p);
|
||||
AVSC_INLINE int avs_is_yv16(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV16 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_API(int, avs_is_yv12)(const AVS_VideoInfo * p) ;
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV12 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_API(int, avs_is_yv411)(const AVS_VideoInfo * p);
|
||||
AVSC_INLINE int avs_is_yv411(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV411 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_API(int, avs_is_y8)(const AVS_VideoInfo * p);
|
||||
AVSC_INLINE int avs_is_y8(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_Y8 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->image_type & property)==property ); }
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_API(int, avs_is_color_space)(const AVS_VideoInfo * p, int c_space);
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return avs_is_planar(p) ? ((p->pixel_type & AVS_CS_PLANAR_MASK) == (c_space & AVS_CS_PLANAR_FILTER)) : ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
@@ -288,18 +274,25 @@ AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_API(int, avs_get_plane_width_subsampling)(const AVS_VideoInfo * p, int plane);
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_API(int, avs_get_plane_height_subsampling)(const AVS_VideoInfo * p, int plane);
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
|
||||
AVSC_API(int, avs_bits_per_pixel)(const AVS_VideoInfo * p);
|
||||
|
||||
AVSC_API(int, avs_bytes_from_pixels)(const AVS_VideoInfo * p, int pixels);
|
||||
|
||||
AVSC_API(int, avs_row_size)(const AVS_VideoInfo * p, int plane);
|
||||
|
||||
AVSC_API(int, avs_bmp_size)(const AVS_VideoInfo * vi);
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
@@ -357,13 +350,11 @@ AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned den
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
@@ -400,38 +391,89 @@ typedef struct AVS_VideoFrame {
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_API(int, avs_get_pitch_p)(const AVS_VideoFrame * p, int plane);
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return avs_get_pitch_p(p, 0);}
|
||||
#endif
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_API(int, avs_get_row_size_p)(const AVS_VideoFrame * p, int plane);
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_API(int, avs_get_height_p)(const AVS_VideoFrame * p, int plane);
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_sizeUV;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = (p->row_sizeUV+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_sizeUV;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_API(const BYTE *, avs_get_read_ptr_p)(const AVS_VideoFrame * p, int plane);
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->heightUV;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return avs_get_read_ptr_p(p, 0);}
|
||||
#endif
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_API(int, avs_is_writable)(const AVS_VideoFrame * p);
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_API(BYTE *, avs_get_write_ptr_p)(const AVS_VideoFrame * p, int plane);
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef AVS_IMPLICIT_FUNCTION_DECLARATION_ERROR
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p) {
|
||||
return avs_get_write_ptr_p(p, 0);}
|
||||
#endif
|
||||
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
@@ -616,16 +658,12 @@ enum {
|
||||
AVS_CPUF_SSSE3 = 0x200, // Core 2
|
||||
AVS_CPUF_SSE4 = 0x400, // Penryn, Wolfdale, Yorkfield
|
||||
AVS_CPUF_SSE4_1 = 0x400,
|
||||
//AVS_CPUF_AVX = 0x800, // Sandy Bridge, Bulldozer
|
||||
AVS_CPUF_SSE4_2 = 0x1000, // Nehalem
|
||||
//AVS_CPUF_AVX2 = 0x2000, // Haswell
|
||||
//AVS_CPUF_AVX512 = 0x4000, // Knights Landing
|
||||
AVS_CPUF_SSE4_2 = 0x800, // Nehalem
|
||||
};
|
||||
|
||||
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(int, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
@@ -662,12 +700,12 @@ AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,FRAME_ALIGN);}
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,FRAME_ALIGN);}
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -735,6 +773,7 @@ struct AVS_Library {
|
||||
AVSC_DECLARE_FUNC(avs_function_exists);
|
||||
AVSC_DECLARE_FUNC(avs_get_audio);
|
||||
AVSC_DECLARE_FUNC(avs_get_cpu_flags);
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_get_frame);
|
||||
AVSC_DECLARE_FUNC(avs_get_parity);
|
||||
AVSC_DECLARE_FUNC(avs_get_var);
|
||||
@@ -759,27 +798,6 @@ struct AVS_Library {
|
||||
AVSC_DECLARE_FUNC(avs_subframe_planar);
|
||||
AVSC_DECLARE_FUNC(avs_take_clip);
|
||||
AVSC_DECLARE_FUNC(avs_vsprintf);
|
||||
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv24);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv16);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv12);
|
||||
AVSC_DECLARE_FUNC(avs_is_yv411);
|
||||
AVSC_DECLARE_FUNC(avs_is_y8);
|
||||
AVSC_DECLARE_FUNC(avs_is_color_space);
|
||||
|
||||
AVSC_DECLARE_FUNC(avs_get_plane_width_subsampling);
|
||||
AVSC_DECLARE_FUNC(avs_get_plane_height_subsampling);
|
||||
AVSC_DECLARE_FUNC(avs_bits_per_pixel);
|
||||
AVSC_DECLARE_FUNC(avs_bytes_from_pixels);
|
||||
AVSC_DECLARE_FUNC(avs_row_size);
|
||||
AVSC_DECLARE_FUNC(avs_bmp_size);
|
||||
AVSC_DECLARE_FUNC(avs_get_pitch_p);
|
||||
AVSC_DECLARE_FUNC(avs_get_row_size_p);
|
||||
AVSC_DECLARE_FUNC(avs_get_height_p);
|
||||
AVSC_DECLARE_FUNC(avs_get_read_ptr_p);
|
||||
AVSC_DECLARE_FUNC(avs_is_writable);
|
||||
AVSC_DECLARE_FUNC(avs_get_write_ptr_p);
|
||||
};
|
||||
|
||||
#undef AVSC_DECLARE_FUNC
|
||||
@@ -787,7 +805,7 @@ struct AVS_Library {
|
||||
|
||||
AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
||||
if (library == NULL)
|
||||
if (!library)
|
||||
return NULL;
|
||||
library->handle = LoadLibrary("avisynth");
|
||||
if (library->handle == NULL)
|
||||
@@ -814,6 +832,7 @@ AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVSC_LOAD_FUNC(avs_function_exists);
|
||||
AVSC_LOAD_FUNC(avs_get_audio);
|
||||
AVSC_LOAD_FUNC(avs_get_cpu_flags);
|
||||
AVSC_LOAD_FUNC(avs_get_error);
|
||||
AVSC_LOAD_FUNC(avs_get_frame);
|
||||
AVSC_LOAD_FUNC(avs_get_parity);
|
||||
AVSC_LOAD_FUNC(avs_get_var);
|
||||
@@ -839,27 +858,6 @@ AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVSC_LOAD_FUNC(avs_take_clip);
|
||||
AVSC_LOAD_FUNC(avs_vsprintf);
|
||||
|
||||
AVSC_LOAD_FUNC(avs_get_error);
|
||||
AVSC_LOAD_FUNC(avs_is_yv24);
|
||||
AVSC_LOAD_FUNC(avs_is_yv16);
|
||||
AVSC_LOAD_FUNC(avs_is_yv12);
|
||||
AVSC_LOAD_FUNC(avs_is_yv411);
|
||||
AVSC_LOAD_FUNC(avs_is_y8);
|
||||
AVSC_LOAD_FUNC(avs_is_color_space);
|
||||
|
||||
AVSC_LOAD_FUNC(avs_get_plane_width_subsampling);
|
||||
AVSC_LOAD_FUNC(avs_get_plane_height_subsampling);
|
||||
AVSC_LOAD_FUNC(avs_bits_per_pixel);
|
||||
AVSC_LOAD_FUNC(avs_bytes_from_pixels);
|
||||
AVSC_LOAD_FUNC(avs_row_size);
|
||||
AVSC_LOAD_FUNC(avs_bmp_size);
|
||||
AVSC_LOAD_FUNC(avs_get_pitch_p);
|
||||
AVSC_LOAD_FUNC(avs_get_row_size_p);
|
||||
AVSC_LOAD_FUNC(avs_get_height_p);
|
||||
AVSC_LOAD_FUNC(avs_get_read_ptr_p);
|
||||
AVSC_LOAD_FUNC(avs_is_writable);
|
||||
AVSC_LOAD_FUNC(avs_get_write_ptr_p);
|
||||
|
||||
#undef __AVSC_STRINGIFY
|
||||
#undef AVSC_STRINGIFY
|
||||
#undef AVSC_LOAD_FUNC
|
||||
@@ -872,7 +870,7 @@ fail:
|
||||
}
|
||||
|
||||
AVSC_INLINE void avs_free_library(AVS_Library *library) {
|
||||
if (library == NULL)
|
||||
if (!library)
|
||||
return;
|
||||
FreeLibrary(library->handle);
|
||||
free(library);
|
||||
|
68
compat/avisynth/avisynth_c_25.h
Normal file
68
compat/avisynth/avisynth_c_25.h
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (c) 2011 FFmpegSource Project
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
/* these are defines/functions that are used and were changed in the switch to 2.6
|
||||
* and are needed to maintain full compatility with 2.5 */
|
||||
|
||||
enum {
|
||||
AVS_CS_YV12_25 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420_25 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
};
|
||||
|
||||
AVSC_INLINE int avs_get_height_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->row_size>>1;
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV)
|
||||
{
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_yv12_25(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12_25) == AVS_CS_YV12_25)||((p->pixel_type & AVS_CS_I420_25) == AVS_CS_I420_25); }
|
@@ -1,62 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CAPI_H
|
||||
#define AVS_CAPI_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif //AVS_CAPI_H
|
@@ -1,55 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CONFIG_H
|
||||
#define AVS_CONFIG_H
|
||||
|
||||
// Undefine this to get cdecl calling convention
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
// NOTE TO PLUGIN AUTHORS:
|
||||
// Because FRAME_ALIGN can be substantially higher than the alignment
|
||||
// a plugin actually needs, plugins should not use FRAME_ALIGN to check for
|
||||
// alignment. They should always request the exact alignment value they need.
|
||||
// This is to make sure that plugins work over the widest range of AviSynth
|
||||
// builds possible.
|
||||
#define FRAME_ALIGN 32
|
||||
|
||||
#if defined(_M_AMD64) || defined(__x86_64)
|
||||
# define X86_64
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
# define X86_32
|
||||
#else
|
||||
# error Unsupported CPU architecture.
|
||||
#endif
|
||||
|
||||
#endif //AVS_CONFIG_H
|
@@ -1,51 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_TYPES_H
|
||||
#define AVS_TYPES_H
|
||||
|
||||
// Define all types necessary for interfacing with avisynth.dll
|
||||
|
||||
// Raster types used by VirtualDub & Avisynth
|
||||
typedef unsigned int Pixel32;
|
||||
typedef unsigned char BYTE;
|
||||
|
||||
// Audio Sample information
|
||||
typedef float SFLOAT;
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
#endif //AVS_TYPES_H
|
@@ -513,21 +513,21 @@ AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
{ AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
{ AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; }
|
||||
{ AVS_Value v; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
{ AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; }
|
||||
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; }
|
||||
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
|
@@ -52,8 +52,8 @@ namespace avxsynth {
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||
#define MAKEWORD(a,b) (((a) << 8) | (b))
|
||||
#define MAKEDWORD(a,b,c,d) ((a << 24) | (b << 16) | (c << 8) | (d))
|
||||
#define MAKEWORD(a,b) ((a << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
|
@@ -66,8 +66,6 @@ static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr
|
||||
struct thread_arg *thread_arg;
|
||||
|
||||
thread_arg = av_mallocz(sizeof(struct thread_arg));
|
||||
if (!thread_arg)
|
||||
return ENOMEM;
|
||||
|
||||
thread_arg->start_routine = start_routine;
|
||||
thread_arg->arg = arg;
|
||||
|
127
doc/APIchanges
127
doc/APIchanges
@@ -15,105 +15,6 @@ libavutil: 2014-08-09
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 2.7 was cut here -------- 8< ---------
|
||||
|
||||
2015-06-04 - cc17b43 - lswr 1.2.100
|
||||
Add swr_get_out_samples()
|
||||
|
||||
2015-05-27 - c312bfa - lavu 54.26.100 - cpu.h
|
||||
Add AV_CPU_FLAG_AVXSLOW.
|
||||
|
||||
2015-05-26 - 1fb9b2a - lavu 54.25.100 - rational.h
|
||||
Add av_q2intfloat().
|
||||
|
||||
2015-05-13 - cc48409 / e7c5e17 - lavc 56.39.100 / 56.23.0
|
||||
Add av_vda_default_init2.
|
||||
|
||||
2015-05-11 - 541d75f - lavf 56.33.100 - avformat.h
|
||||
Add AVOpenCallback AVFormatContext.open_cb
|
||||
|
||||
2015-05-07 - a7dd933 - 56.38.100 - avcodec.h
|
||||
Add av_packet_side_data_name().
|
||||
|
||||
2015-05-07 - 01e59d4 - 56.37.102 - avcodec.h
|
||||
Add FF_PROFILE_VP9_2 and FF_PROFILE_VP9_3.
|
||||
|
||||
2015-05-04 - 079b7f6 - 56.37.100 - avcodec.h
|
||||
Add FF_PROFILE_VP9_0 and FF_PROFILE_VP9_1.
|
||||
|
||||
2015-04-22 - 748d481 - lavf 56.31.100 - avformat.h
|
||||
Add AVFMT_FLAG_FAST_SEEK flag. Some formats (initially mp3) use it to enable
|
||||
fast, but inaccurate seeking.
|
||||
|
||||
2015-04-20 - 8e8219e / c253340 - lavu 54.23.100 / 54.12.0 - log.h
|
||||
Add AV_LOG_TRACE for extremely verbose debugging.
|
||||
|
||||
2015-04-02 - 26e0e393 - lavf 56.29.100 - avio.h
|
||||
Add AVIODirEntryType.AVIO_ENTRY_SERVER.
|
||||
Add AVIODirEntryType.AVIO_ENTRY_SHARE.
|
||||
Add AVIODirEntryType.AVIO_ENTRY_WORKGROUP.
|
||||
|
||||
2015-03-31 - 3188696 - lavu 54.22.100 - avstring.h
|
||||
Add av_append_path_component()
|
||||
|
||||
2015-03-27 - 184084c - lavf 56.27.100 - avio.h url.h
|
||||
New directory listing API.
|
||||
|
||||
Add AVIODirEntryType enum.
|
||||
Add AVIODirEntry, AVIODirContext structures.
|
||||
Add avio_open_dir(), avio_read_dir(), avio_close_dir(), avio_free_directory_entry().
|
||||
Add ff_alloc_dir_entry().
|
||||
Extend URLProtocol with url_open_dir(), url_read_dir(), url_close_dir().
|
||||
|
||||
2015-03-29 - 268ff17 / c484561 - lavu 54.21.100 / 54.10.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_MMAL for MMAL hardware acceleration.
|
||||
|
||||
2015-03-19 - 11fe56c - 56.29.100 / lavc 56.22.0
|
||||
Add FF_PROFILE_DTS_EXPRESS.
|
||||
|
||||
-------- 8< --------- FFmpeg 2.6 was cut here -------- 8< ---------
|
||||
|
||||
2015-03-04 - cca4476 - lavf 56.25.100
|
||||
Add avformat_flush()
|
||||
|
||||
2015-03-03 - 81a9126 - lavf 56.24.100
|
||||
Add avio_put_str16be()
|
||||
|
||||
2015-02-19 - 560eb71 / 31d2039 - lavc 56.23.100 / 56.13.0
|
||||
Add width, height, coded_width, coded_height and format to
|
||||
AVCodecParserContext.
|
||||
|
||||
2015-02-19 - e375511 / 5b1d9ce - lavu 54.19.100 / 54.9.0
|
||||
Add AV_PIX_FMT_QSV for QSV hardware acceleration.
|
||||
|
||||
2015-02-14 - ba22295 - lavc 56.21.102
|
||||
Deprecate VIMA decoder.
|
||||
|
||||
2015-01-27 - 62a82c6 / 728685f - lavc 56.21.100 / 56.12.0, lavu 54.18.100 / 54.8.0 - avcodec.h, frame.h
|
||||
Add AV_PKT_DATA_AUDIO_SERVICE_TYPE and AV_FRAME_DATA_AUDIO_SERVICE_TYPE for
|
||||
storing the audio service type as side data.
|
||||
|
||||
2015-01-16 - a47c933 - lavf 56.19.100 - avformat.h
|
||||
Add data_codec and data_codec_id for storing codec of data stream
|
||||
|
||||
2015-01-11 - 007c33d - lavd 56.4.100 - avdevice.h
|
||||
Add avdevice_list_input_sources().
|
||||
Add avdevice_list_output_sinks().
|
||||
|
||||
2014-12-25 - d7aaeea / c220a60 - lavc 56.19.100 / 56.10.0 - vdpau.h
|
||||
Add av_vdpau_get_surface_parameters().
|
||||
|
||||
2014-12-25 - ddb9a24 / 6c99c92 - lavc 56.18.100 / 56.9.0 - avcodec.h
|
||||
Add AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH flag to av_vdpau_bind_context().
|
||||
|
||||
2014-12-25 - d16079a / 57b6704 - lavc 56.17.100 / 56.8.0 - avcodec.h
|
||||
Add AVCodecContext.sw_pix_fmt.
|
||||
|
||||
2014-12-04 - 6e9ac02 - lavc 56.14.100 - dv_profile.h
|
||||
Add av_dv_codec_profile2().
|
||||
|
||||
-------- 8< --------- FFmpeg 2.5 was cut here -------- 8< ---------
|
||||
|
||||
2014-11-21 - ab922f9 - lavu 54.15.100 - dict.h
|
||||
Add av_dict_get_string().
|
||||
|
||||
@@ -169,12 +70,12 @@ API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 2.4 was cut here -------- 8< ---------
|
||||
|
||||
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
|
||||
Add AVFormatContext.max_ts_probe.
|
||||
|
||||
2014-08-28 - f30a815 / 9301486 - lavc 56.1.100 / 56.1.0 - avcodec.h
|
||||
Add AV_PKT_DATA_STEREO3D to export container-level stereo3d information.
|
||||
|
||||
2014-08-25 - 215db29 / b263f8f - lavf 56.3.100 / 56.3.0 - avformat.h
|
||||
Add AVFormatContext.max_ts_probe.
|
||||
|
||||
2014-08-23 - 8fc9bd0 - lavu 54.7.100 - dict.h
|
||||
AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL arguments are now
|
||||
freed even on error. This is consistent with the behaviour all users
|
||||
@@ -299,7 +200,7 @@ API changes, most recent first:
|
||||
Increase FF_INPUT_BUFFER_PADDING_SIZE to 32 due to some corner cases needing
|
||||
it
|
||||
|
||||
2014-06-10 - 5482780 - lavf 55.43.100 - avformat.h
|
||||
2014-06-10 - xxxxxxx - lavf 55.43.100 - avformat.h
|
||||
New field int64_t max_analyze_duration2 instead of deprecated
|
||||
int max_analyze_duration.
|
||||
|
||||
@@ -323,7 +224,7 @@ API changes, most recent first:
|
||||
Add strict_std_compliance and related AVOptions to support experimental
|
||||
muxing.
|
||||
|
||||
2014-05-26 - 55cc60c - lavu 52.87.100 - threadmessage.h
|
||||
2014-05-26 - xxxxxxx - lavu 52.87.100 - threadmessage.h
|
||||
Add thread message queue API.
|
||||
|
||||
2014-05-26 - c37d179 - lavf 55.41.100 - avformat.h
|
||||
@@ -333,7 +234,7 @@ API changes, most recent first:
|
||||
Add av_stream_get_side_data() to access stream-level side data
|
||||
in the same way as av_packet_get_side_data().
|
||||
|
||||
2014-05-20 - 7336e39 - lavu 52.86.100 - fifo.h
|
||||
2014-05-xx - xxxxxxx - lavu 52.86.100 - fifo.h
|
||||
Add av_fifo_alloc_array() function.
|
||||
|
||||
2014-05-19 - ef1d4ee / bddd8cb - lavu 52.85.100 / 53.15.0 - frame.h, display.h
|
||||
@@ -352,7 +253,7 @@ API changes, most recent first:
|
||||
Add avcodec_free_context(). From now on it should be used for freeing
|
||||
AVCodecContext.
|
||||
|
||||
2014-05-17 - 0eec06e / 1bd0bdc - lavu 52.84.100 / 54.5.0 - time.h
|
||||
2014-05-17 - 0eec06e - lavu 52.84.100 - time.h
|
||||
Add av_gettime_relative() av_gettime_relative_is_monotonic()
|
||||
|
||||
2014-05-15 - eacf7d6 / 0c1959b - lavf 55.38.100 / 55.17.0 - avformat.h
|
||||
@@ -365,7 +266,7 @@ API changes, most recent first:
|
||||
2014-05-11 - 14aef38 / 66e6c8a - lavu 52.83.100 / 53.14.0 - pixfmt.h
|
||||
Add AV_PIX_FMT_VDA for new-style VDA acceleration.
|
||||
|
||||
2014-05-07 - 351f611 - lavu 52.82.100 - fifo.h
|
||||
2014-05-xx - xxxxxxx - lavu 52.82.100 - fifo.h
|
||||
Add av_fifo_freep() function.
|
||||
|
||||
2014-05-02 - ba52fb11 - lavu 52.81.100 - opt.h
|
||||
@@ -387,14 +288,10 @@ API changes, most recent first:
|
||||
Deprecate CODEC_FLAG_INPUT_PRESERVED. Its functionality is replaced by passing
|
||||
reference-counted frames to encoders.
|
||||
|
||||
2014-04-30 - 617e866 - lavu 52.81.100 - pixdesc.h
|
||||
Add av_find_best_pix_fmt_of_2(), av_get_pix_fmt_loss()
|
||||
Deprecate avcodec_get_pix_fmt_loss(), avcodec_find_best_pix_fmt_of_2()
|
||||
|
||||
2014-04-29 - 1bf6396 - lavc 55.60.100 - avcodec.h
|
||||
Add AVCodecDescriptor.mime_types field.
|
||||
|
||||
2014-04-29 - b804eb4 - lavu 52.80.100 - hash.h
|
||||
2014-04-29 - xxxxxxx - lavu 52.80.0 - hash.h
|
||||
Add av_hash_final_bin(), av_hash_final_hex() and av_hash_final_b64().
|
||||
|
||||
2014-03-07 - 8b2a130 - lavc 55.50.0 / 55.53.100 - dxva2.h
|
||||
@@ -406,7 +303,7 @@ API changes, most recent first:
|
||||
2014-04-17 - a8d01a7 / 0983d48 - lavu 53.12.0 / 52.77.100 - crc.h
|
||||
Add AV_CRC_16_ANSI_LE crc variant.
|
||||
|
||||
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
|
||||
2014-04-XX - xxxxxxx - lavf xx.xx.1xx - avformat.h
|
||||
Add av_format_inject_global_side_data()
|
||||
|
||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||
@@ -486,7 +383,7 @@ API changes, most recent first:
|
||||
2014-02-19 - f4c8d00 / 6bb8720 - lavu 52.64.101 / 53.3.1 - opt.h
|
||||
Deprecate unused AV_OPT_FLAG_METADATA.
|
||||
|
||||
2014-02-16 - 81c3f81 - lavd 55.10.100 - avdevice.h
|
||||
2014-02-xx - xxxxxxx - lavd 55.10.100 - avdevice.h
|
||||
Add avdevice_list_devices() and avdevice_free_list_devices()
|
||||
|
||||
2014-02-16 - db3c970 - lavf 55.33.100 - avio.h
|
||||
@@ -527,7 +424,7 @@ API changes, most recent first:
|
||||
2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h
|
||||
Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags().
|
||||
|
||||
2014-01-19 - 3532dd5 - lavu 52.63.100 - rational.h
|
||||
2014-01-19 - xxxxxxx - lavu 52.63.100 - rational.h
|
||||
Add av_make_q() function.
|
||||
|
||||
2014-01-05 - 4cf4da9 / 5b4797a - lavu 52.62.100 / 53.2.0 - frame.h
|
||||
|
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 2.7.7
|
||||
PROJECT_NUMBER = 2.5
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -36,7 +36,6 @@ DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
|
||||
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
|
||||
DOCS = $(DOCS-yes)
|
||||
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_LIST_DIR_EXAMPLE) += avio_list_dir
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
|
||||
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
|
||||
@@ -47,7 +46,6 @@ DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
DOC_EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
@@ -116,9 +114,9 @@ doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
|
||||
doc/%.1 doc/%.3: TAG = MAN
|
||||
doc/%.1: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=1 --center=" " --release=" " --date=" " $< > $@
|
||||
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
doc/%.3: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=3 --center=" " --release=" " --date=" " $< > $@
|
||||
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
|
||||
|
||||
$(DOCS) doc/doxy/html: | doc/
|
||||
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
|
||||
|
@@ -139,26 +139,6 @@ ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
|
||||
@section mp3_header_decompress
|
||||
|
||||
@section mpeg4_unpack_bframes
|
||||
|
||||
Unpack DivX-style packed B-frames.
|
||||
|
||||
DivX-style packed B-frames are not valid MPEG-4 and were only a
|
||||
workaround for the broken Video for Windows subsystem.
|
||||
They use more space, can cause minor AV sync issues, require more
|
||||
CPU power to decode (unless the player has some decoded picture queue
|
||||
to compensate the 2,0,2,0 frame per packet style) and cause
|
||||
trouble if copied into a standard container like mp4 or mpeg-ps/ts,
|
||||
because MPEG-4 decoders may not be able to decode them, since they are
|
||||
not valid MPEG-4.
|
||||
|
||||
For example to fix an AVI file containing an MPEG-4 stream with
|
||||
DivX-style packed B-frames using @command{ffmpeg}, you can use the command:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT.avi -codec copy -bsf:v mpeg4_unpack_bframes OUTPUT.avi
|
||||
@end example
|
||||
|
||||
@section noise
|
||||
|
||||
Damages the contents of packets without damaging the container. Can be
|
||||
|
@@ -7,18 +7,10 @@ V
|
||||
Disable the default terse mode, the full command issued by make and its
|
||||
output will be shown on the screen.
|
||||
|
||||
DBG
|
||||
Preprocess x86 external assembler files to a .dbg.asm file in the object
|
||||
directory, which then gets compiled. Helps developping those assembler
|
||||
files.
|
||||
|
||||
DESTDIR
|
||||
Destination directory for the install targets, useful to prepare packages
|
||||
or install FFmpeg in cross-environments.
|
||||
|
||||
GEN
|
||||
Set to ‘1’ to generate the missing or mismatched references.
|
||||
|
||||
Makefile targets:
|
||||
|
||||
all
|
||||
|
@@ -7,7 +7,7 @@ all the encoders and decoders. In addition each codec may support
|
||||
so-called private options, which are specific for a given codec.
|
||||
|
||||
Sometimes, a global option may only affect a specific kind of codec,
|
||||
and may be nonsensical or ignored by another, so you need to be aware
|
||||
and may be unsensical or ignored by another, so you need to be aware
|
||||
of the meaning of the specified options. Also some options are
|
||||
meant only for decoding or encoding.
|
||||
|
||||
@@ -495,8 +495,6 @@ visualize block types
|
||||
picture buffer allocations
|
||||
@item thread_ops
|
||||
threading operations
|
||||
@item nomc
|
||||
skip motion compensation
|
||||
@end table
|
||||
|
||||
@item vismv @var{integer} (@emph{decoding,video})
|
||||
@@ -865,14 +863,6 @@ Possible values:
|
||||
|
||||
@item mpeg2_aac_he
|
||||
|
||||
@item mpeg4_sp
|
||||
|
||||
@item mpeg4_core
|
||||
|
||||
@item mpeg4_main
|
||||
|
||||
@item mpeg4_asp
|
||||
|
||||
@item dts
|
||||
|
||||
@item dts_es
|
||||
|
@@ -83,23 +83,6 @@ Loud sounds are fully compressed. Soft sounds are enhanced.
|
||||
|
||||
@end table
|
||||
|
||||
@section flac
|
||||
|
||||
FLAC audio decoder.
|
||||
|
||||
This decoder aims to implement the complete FLAC specification from Xiph.
|
||||
|
||||
@subsection FLAC Decoder options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -use_buggy_lpc
|
||||
The lavc FLAC encoder used to produce buggy streams with high lpc values
|
||||
(like the default value). This option makes it possible to decode such streams
|
||||
correctly by using lavc's old buggy lpc logic for decoding.
|
||||
|
||||
@end table
|
||||
|
||||
@section ffwavesynth
|
||||
|
||||
Internal wave synthetizer.
|
||||
|
@@ -98,7 +98,7 @@ All subsequent file-related directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
to 1 if it was -1.
|
||||
to 1 if it was to its default -1.
|
||||
|
||||
To make FFmpeg recognize the format automatically, this directive must
|
||||
appears exactly as is (no extra space or byte-order-mark) on the very first
|
||||
@@ -145,15 +145,12 @@ component.
|
||||
|
||||
If set to 0, any file name is accepted.
|
||||
|
||||
The default is 1.
|
||||
|
||||
-1 is equivalent to 1 if the format was automatically
|
||||
The default is -1, it is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@item auto_convert
|
||||
If set to 1, try to perform automatic conversions on packet data to make the
|
||||
streams concatenable.
|
||||
The default is 1.
|
||||
|
||||
Currently, the only conversion is adding the h264_mp4toannexb bitstream
|
||||
filter to H.264 streams in MP4 format. This is necessary in particular if
|
||||
@@ -208,11 +205,6 @@ It accepts the following options:
|
||||
Set the minimum valid delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 2.
|
||||
|
||||
@item max_gif_delay
|
||||
Set the maximum valid delay between frames in hundredth of seconds.
|
||||
Range is 0 to 65535. Default value is 65535 (nearly eleven minutes),
|
||||
the maximum value allowed by the specification.
|
||||
|
||||
@item default_delay
|
||||
Set the default delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 10.
|
||||
@@ -261,10 +253,6 @@ Select the pattern type used to interpret the provided filename.
|
||||
|
||||
@var{pattern_type} accepts one of the following values.
|
||||
@table @option
|
||||
@item none
|
||||
Disable pattern matching, therefore the video will only contain the specified
|
||||
image. You should use this option if you do not want to create sequences from
|
||||
multiple images and your filenames may contain special pattern characters.
|
||||
@item sequence
|
||||
Select a sequence pattern type, used to specify a sequence of files
|
||||
indexed by sequential numbers.
|
||||
@@ -371,23 +359,6 @@ ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section mov/mp4/3gp/Quicktme
|
||||
|
||||
Quicktime / MP4 demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item enable_drefs
|
||||
Enable loading of external tracks, disabled by default.
|
||||
Enabling this can theoretically leak information in some use cases.
|
||||
|
||||
@item use_absolute_path
|
||||
Allows loading of external tracks via absolute paths, disabled by default.
|
||||
Enabling this poses a security risk. It should only be enabled if the source
|
||||
is known to be non malicious.
|
||||
|
||||
@end table
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Developer Documentation
|
||||
@titlepage
|
||||
@@ -228,7 +227,7 @@ autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
@example
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
@@ -239,7 +238,7 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
@end example
|
||||
|
||||
@section Development Policy
|
||||
|
||||
@@ -648,12 +647,12 @@ accordingly].
|
||||
@subsection Adding files to the fate-suite dataset
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be included in the fate-suite.
|
||||
specific test then the media has to be inlcuded in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductory message for the patch series that you post to
|
||||
message or introductionary message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
|
||||
|
||||
|
@@ -6,16 +6,8 @@ DOXYGEN="${3}"
|
||||
|
||||
shift 3
|
||||
|
||||
if [ -e "$SRC_PATH/VERSION" ]; then
|
||||
VERSION=`cat "$SRC_PATH/VERSION"`
|
||||
else
|
||||
VERSION=`cd "$SRC_PATH"; git describe`
|
||||
fi
|
||||
|
||||
$DOXYGEN - <<EOF
|
||||
@INCLUDE = ${DOXYFILE}
|
||||
INPUT = $@
|
||||
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
|
||||
HTML_TIMESTAMP = NO
|
||||
PROJECT_NUMBER = $VERSION
|
||||
EOF
|
||||
|
@@ -494,85 +494,6 @@ Selected by Encoder (default)
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{flac}
|
||||
@section flac
|
||||
|
||||
FLAC (Free Lossless Audio Codec) Encoder
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by FFmpeg's flac encoder.
|
||||
|
||||
@table @option
|
||||
@item compression_level
|
||||
Sets the compression level, which chooses defaults for many other options
|
||||
if they are not set explicitly.
|
||||
|
||||
@item frame_size
|
||||
Sets the size of the frames in samples per channel.
|
||||
|
||||
@item lpc_coeff_precision
|
||||
Sets the LPC coefficient precision, valid values are from 1 to 15, 15 is the
|
||||
default.
|
||||
|
||||
@item lpc_type
|
||||
Sets the first stage LPC algorithm
|
||||
@table @samp
|
||||
@item none
|
||||
LPC is not used
|
||||
|
||||
@item fixed
|
||||
fixed LPC coefficients
|
||||
|
||||
@item levinson
|
||||
|
||||
@item cholesky
|
||||
@end table
|
||||
|
||||
@item lpc_passes
|
||||
Number of passes to use for Cholesky factorization during LPC analysis
|
||||
|
||||
@item min_partition_order
|
||||
The minimum partition order
|
||||
|
||||
@item max_partition_order
|
||||
The maximum partition order
|
||||
|
||||
@item prediction_order_method
|
||||
@table @samp
|
||||
@item estimation
|
||||
@item 2level
|
||||
@item 4level
|
||||
@item 8level
|
||||
@item search
|
||||
Bruteforce search
|
||||
@item log
|
||||
@end table
|
||||
|
||||
@item ch_mode
|
||||
Channel mode
|
||||
@table @samp
|
||||
@item auto
|
||||
The mode is chosen automatically for each frame
|
||||
@item indep
|
||||
Chanels are independently coded
|
||||
@item left_side
|
||||
@item right_side
|
||||
@item mid_side
|
||||
@end table
|
||||
|
||||
@item exact_rice_parameters
|
||||
Chooses if rice parameters are calculated exactly or approximately.
|
||||
if set to 1 then they are chosen exactly, which slows the code down slightly and
|
||||
improves compression slightly.
|
||||
|
||||
@item multi_dim_quant
|
||||
Multi Dimensional Quantization. If set to 1 then a 2nd stage LPC algorithm is
|
||||
applied after the first stage to finetune the coefficients. This is quite slow
|
||||
and slightly improves compression.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{libfaac}
|
||||
@section libfaac
|
||||
|
||||
@@ -2250,7 +2171,7 @@ Use @var{0} to disable alpha plane coding.
|
||||
@subsection Speed considerations
|
||||
|
||||
In the default mode of operation the encoder has to honor frame constraints
|
||||
(i.e. not produce frames with size bigger than requested) while still making
|
||||
(i.e. not produc frames with size bigger than requested) while still making
|
||||
output picture as good as possible.
|
||||
A frame containing a lot of small details is harder to compress and the encoder
|
||||
would spend more time searching for appropriate quantizers for each slice.
|
||||
|
@@ -11,8 +11,7 @@ CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_list_dir \
|
||||
avio_reading \
|
||||
EXAMPLES= avio_reading \
|
||||
decoding_encoding \
|
||||
demuxing_decoding \
|
||||
extract_mvs \
|
||||
@@ -30,7 +29,6 @@ OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
|
@@ -1,120 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Lukasz Marek
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
|
||||
static const char *type_string(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case AVIO_ENTRY_DIRECTORY:
|
||||
return "<DIR>";
|
||||
case AVIO_ENTRY_FILE:
|
||||
return "<FILE>";
|
||||
case AVIO_ENTRY_BLOCK_DEVICE:
|
||||
return "<BLOCK DEVICE>";
|
||||
case AVIO_ENTRY_CHARACTER_DEVICE:
|
||||
return "<CHARACTER DEVICE>";
|
||||
case AVIO_ENTRY_NAMED_PIPE:
|
||||
return "<PIPE>";
|
||||
case AVIO_ENTRY_SYMBOLIC_LINK:
|
||||
return "<LINK>";
|
||||
case AVIO_ENTRY_SOCKET:
|
||||
return "<SOCKET>";
|
||||
case AVIO_ENTRY_SERVER:
|
||||
return "<SERVER>";
|
||||
case AVIO_ENTRY_SHARE:
|
||||
return "<SHARE>";
|
||||
case AVIO_ENTRY_WORKGROUP:
|
||||
return "<WORKGROUP>";
|
||||
case AVIO_ENTRY_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return "<UNKNOWN>";
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const char *input_dir = NULL;
|
||||
AVIODirEntry *entry = NULL;
|
||||
AVIODirContext *ctx = NULL;
|
||||
int cnt, ret;
|
||||
char filemode[4], uid_and_gid[20];
|
||||
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "usage: %s input_dir\n"
|
||||
"API example program to show how to list files in directory "
|
||||
"accessed through AVIOContext.\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
input_dir = argv[1];
|
||||
|
||||
/* register codecs and formats and other lavf/lavc components*/
|
||||
av_register_all();
|
||||
avformat_network_init();
|
||||
|
||||
if ((ret = avio_open_dir(&ctx, input_dir, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open directory: %s.\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cnt = 0;
|
||||
for (;;) {
|
||||
if ((ret = avio_read_dir(ctx, &entry)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot list directory: %s.\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
if (!entry)
|
||||
break;
|
||||
if (entry->filemode == -1) {
|
||||
snprintf(filemode, 4, "???");
|
||||
} else {
|
||||
snprintf(filemode, 4, "%3"PRIo64, entry->filemode);
|
||||
}
|
||||
snprintf(uid_and_gid, 20, "%"PRId64"(%"PRId64")", entry->user_id, entry->group_id);
|
||||
if (cnt == 0)
|
||||
av_log(NULL, AV_LOG_INFO, "%-9s %12s %30s %10s %s %16s %16s %16s\n",
|
||||
"TYPE", "SIZE", "NAME", "UID(GID)", "UGO", "MODIFIED",
|
||||
"ACCESSED", "STATUS_CHANGED");
|
||||
av_log(NULL, AV_LOG_INFO, "%-9s %12"PRId64" %30s %10s %s %16"PRId64" %16"PRId64" %16"PRId64"\n",
|
||||
type_string(entry->type),
|
||||
entry->size,
|
||||
entry->name,
|
||||
uid_and_gid,
|
||||
filemode,
|
||||
entry->modification_timestamp,
|
||||
entry->access_timestamp,
|
||||
entry->status_change_timestamp);
|
||||
avio_free_directory_entry(&entry);
|
||||
cnt++;
|
||||
};
|
||||
|
||||
fail:
|
||||
avio_close_dir(&ctx);
|
||||
avformat_network_deinit();
|
||||
|
||||
return ret < 0 ? 1 : 0;
|
||||
}
|
@@ -36,8 +36,6 @@
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static int width, height;
|
||||
static enum AVPixelFormat pix_fmt;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
@@ -83,22 +81,6 @@ static int decode_packet(int *got_frame, int cached)
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
@@ -108,7 +90,7 @@ static int decode_packet(int *got_frame, int cached)
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
@@ -156,7 +138,7 @@ static int decode_packet(int *got_frame, int cached)
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret, stream_index;
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
@@ -168,8 +150,8 @@ static int open_codec_context(int *stream_idx,
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
stream_index = ret;
|
||||
st = fmt_ctx->streams[stream_index];
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
@@ -188,7 +170,6 @@ static int open_codec_context(int *stream_idx,
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
*stream_idx = stream_index;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -283,11 +264,9 @@ int main (int argc, char **argv)
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
width = video_dec_ctx->width;
|
||||
height = video_dec_ctx->height;
|
||||
pix_fmt = video_dec_ctx->pix_fmt;
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
width, height, pix_fmt, 1);
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
@@ -362,7 +341,7 @@ int main (int argc, char **argv)
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(pix_fmt), width, height,
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
|
@@ -145,28 +145,12 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
|
@@ -90,7 +90,6 @@ static int init_filters(const char *filters_descr)
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
@@ -103,7 +102,7 @@ static int init_filters(const char *filters_descr)
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
time_base.num, time_base.den,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
@@ -128,28 +127,12 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
|
@@ -661,7 +661,7 @@ int main(int argc, char **argv)
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_closep(&oc->pb);
|
||||
avio_close(oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
@@ -1,484 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Anton Khirnov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Intel QSV-accelerated H.264 decoding example.
|
||||
*
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the VA-API video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <mfx/mfxvideo.h>
|
||||
|
||||
#include <va/va.h>
|
||||
#include <va/va_x11.h>
|
||||
#include <X11/Xlib.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/qsv.h"
|
||||
|
||||
#include "libavutil/error.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
mfxSession mfx_session;
|
||||
VADisplay va_dpy;
|
||||
|
||||
VASurfaceID *surfaces;
|
||||
mfxMemId *surface_ids;
|
||||
int *surface_used;
|
||||
int nb_surfaces;
|
||||
|
||||
mfxFrameInfo frame_info;
|
||||
} DecodeContext;
|
||||
|
||||
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
|
||||
mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
int err, i;
|
||||
|
||||
if (decode->surfaces) {
|
||||
fprintf(stderr, "Multiple allocation requests.\n");
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)) {
|
||||
fprintf(stderr, "Unsupported surface type: %d\n", req->Type);
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
if (req->Info.BitDepthLuma != 8 || req->Info.BitDepthChroma != 8 ||
|
||||
req->Info.Shift || req->Info.FourCC != MFX_FOURCC_NV12 ||
|
||||
req->Info.ChromaFormat != MFX_CHROMAFORMAT_YUV420) {
|
||||
fprintf(stderr, "Unsupported surface properties.\n");
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
decode->surfaces = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surfaces));
|
||||
decode->surface_ids = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surface_ids));
|
||||
decode->surface_used = av_mallocz_array(req->NumFrameSuggested, sizeof(*decode->surface_used));
|
||||
if (!decode->surfaces || !decode->surface_ids || !decode->surface_used)
|
||||
goto fail;
|
||||
|
||||
err = vaCreateSurfaces(decode->va_dpy, VA_RT_FORMAT_YUV420,
|
||||
req->Info.Width, req->Info.Height,
|
||||
decode->surfaces, req->NumFrameSuggested,
|
||||
NULL, 0);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error allocating VA surfaces\n");
|
||||
goto fail;
|
||||
}
|
||||
decode->nb_surfaces = req->NumFrameSuggested;
|
||||
|
||||
for (i = 0; i < decode->nb_surfaces; i++)
|
||||
decode->surface_ids[i] = &decode->surfaces[i];
|
||||
|
||||
resp->mids = decode->surface_ids;
|
||||
resp->NumFrameActual = decode->nb_surfaces;
|
||||
|
||||
decode->frame_info = req->Info;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
fail:
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
|
||||
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
|
||||
if (decode->surfaces)
|
||||
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
decode->nb_surfaces = 0;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
|
||||
{
|
||||
*hdl = mid;
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static void free_buffer(void *opaque, uint8_t *data)
|
||||
{
|
||||
int *used = opaque;
|
||||
*used = 0;
|
||||
av_freep(&data);
|
||||
}
|
||||
|
||||
static int get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
||||
{
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
|
||||
mfxFrameSurface1 *surf;
|
||||
AVBufferRef *surf_buf;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < decode->nb_surfaces; idx++) {
|
||||
if (!decode->surface_used[idx])
|
||||
break;
|
||||
}
|
||||
if (idx == decode->nb_surfaces) {
|
||||
fprintf(stderr, "No free surfaces\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf = av_mallocz(sizeof(*surf));
|
||||
if (!surf)
|
||||
return AVERROR(ENOMEM);
|
||||
surf_buf = av_buffer_create((uint8_t*)surf, sizeof(*surf), free_buffer,
|
||||
&decode->surface_used[idx], AV_BUFFER_FLAG_READONLY);
|
||||
if (!surf_buf) {
|
||||
av_freep(&surf);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf->Info = decode->frame_info;
|
||||
surf->Data.MemId = &decode->surfaces[idx];
|
||||
|
||||
frame->buf[0] = surf_buf;
|
||||
frame->data[3] = (uint8_t*)surf;
|
||||
|
||||
decode->surface_used[idx] = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
if (!avctx->hwaccel_context) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVQSVContext *qsv = av_qsv_alloc_context();
|
||||
if (!qsv)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
qsv->session = decode->mfx_session;
|
||||
qsv->iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
||||
|
||||
avctx->hwaccel_context = qsv;
|
||||
}
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVPacket *pkt,
|
||||
AVIOContext *output_ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
int got_frame = 1;
|
||||
|
||||
while (pkt->size > 0 || (!pkt->data && got_frame)) {
|
||||
ret = avcodec_decode_video2(decoder_ctx, frame, &got_frame, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pkt->data += ret;
|
||||
pkt->size -= ret;
|
||||
|
||||
/* A real program would do something useful with the decoded frame here.
|
||||
* We just retrieve the raw data and write it to a file, which is rather
|
||||
* useless but pedagogic. */
|
||||
if (got_frame) {
|
||||
mfxFrameSurface1 *surf = (mfxFrameSurface1*)frame->data[3];
|
||||
VASurfaceID surface = *(VASurfaceID*)surf->Data.MemId;
|
||||
|
||||
VAImageFormat img_fmt = {
|
||||
.fourcc = VA_FOURCC_NV12,
|
||||
.byte_order = VA_LSB_FIRST,
|
||||
.bits_per_pixel = 8,
|
||||
.depth = 8,
|
||||
};
|
||||
|
||||
VAImage img;
|
||||
|
||||
VAStatus err;
|
||||
uint8_t *data;
|
||||
int i, j;
|
||||
|
||||
img.buf = VA_INVALID_ID;
|
||||
img.image_id = VA_INVALID_ID;
|
||||
|
||||
err = vaCreateImage(decode->va_dpy, &img_fmt,
|
||||
frame->width, frame->height, &img);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error creating an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaGetImage(decode->va_dpy, surface, 0, 0,
|
||||
frame->width, frame->height,
|
||||
img.image_id);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error getting an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaMapBuffer(decode->va_dpy, img.buf, (void**)&data);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error mapping the image buffer: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < img.num_planes; i++)
|
||||
for (j = 0; j < (img.height >> (i > 0)); j++)
|
||||
avio_write(output_ctx, data + img.offsets[i] + j * img.pitches[i], img.width);
|
||||
|
||||
fail:
|
||||
if (img.buf != VA_INVALID_ID)
|
||||
vaUnmapBuffer(decode->va_dpy, img.buf);
|
||||
if (img.image_id != VA_INVALID_ID)
|
||||
vaDestroyImage(decode->va_dpy, img.image_id);
|
||||
av_frame_unref(frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_ctx = NULL;
|
||||
AVStream *video_st = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
Display *dpy = NULL;
|
||||
int va_ver_major, va_ver_minor;
|
||||
|
||||
mfxIMPL mfx_impl = MFX_IMPL_AUTO_ANY;
|
||||
mfxVersion mfx_ver = { { 1, 1 } };
|
||||
|
||||
mfxFrameAllocator frame_allocator = {
|
||||
.pthis = &decode,
|
||||
.Alloc = frame_alloc,
|
||||
.Lock = frame_lock,
|
||||
.Unlock = frame_unlock,
|
||||
.GetHDL = frame_get_hdl,
|
||||
.Free = frame_free,
|
||||
};
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i, err;
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* find the first H.264 video stream */
|
||||
for (i = 0; i < input_ctx->nb_streams; i++) {
|
||||
AVStream *st = input_ctx->streams[i];
|
||||
|
||||
if (st->codec->codec_id == AV_CODEC_ID_H264 && !video_st)
|
||||
video_st = st;
|
||||
else
|
||||
st->discard = AVDISCARD_ALL;
|
||||
}
|
||||
if (!video_st) {
|
||||
fprintf(stderr, "No H.264 video stream in the input file\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* initialize VA-API */
|
||||
dpy = XOpenDisplay(NULL);
|
||||
if (!dpy) {
|
||||
fprintf(stderr, "Cannot open the X display\n");
|
||||
goto finish;
|
||||
}
|
||||
decode.va_dpy = vaGetDisplay(dpy);
|
||||
if (!decode.va_dpy) {
|
||||
fprintf(stderr, "Cannot open the VA display\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
err = vaInitialize(decode.va_dpy, &va_ver_major, &va_ver_minor);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Cannot initialize VA: %s\n", vaErrorStr(err));
|
||||
goto finish;
|
||||
}
|
||||
fprintf(stderr, "Initialized VA v%d.%d\n", va_ver_major, va_ver_minor);
|
||||
|
||||
/* initialize an MFX session */
|
||||
err = MFXInit(mfx_impl, &mfx_ver, &decode.mfx_session);
|
||||
if (err != MFX_ERR_NONE) {
|
||||
fprintf(stderr, "Error initializing an MFX session\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
MFXVideoCORE_SetHandle(decode.mfx_session, MFX_HANDLE_VA_DISPLAY, decode.va_dpy);
|
||||
MFXVideoCORE_SetFrameAllocator(decode.mfx_session, &frame_allocator);
|
||||
|
||||
/* initialize the decoder */
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
if (!decoder) {
|
||||
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
decoder_ctx = avcodec_alloc_context3(decoder);
|
||||
if (!decoder_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
decoder_ctx->codec_id = AV_CODEC_ID_H264;
|
||||
if (video_st->codec->extradata_size) {
|
||||
decoder_ctx->extradata = av_mallocz(video_st->codec->extradata_size +
|
||||
FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!decoder_ctx->extradata) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
memcpy(decoder_ctx->extradata, video_st->codec->extradata,
|
||||
video_st->codec->extradata_size);
|
||||
decoder_ctx->extradata_size = video_st->codec->extradata_size;
|
||||
}
|
||||
decoder_ctx->refcounted_frames = 1;
|
||||
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_buffer2 = get_buffer;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the decoder: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the output stream */
|
||||
ret = avio_open(&output_ctx, argv[2], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the output context: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "%s\n", buf);
|
||||
}
|
||||
|
||||
avformat_close_input(&input_ctx);
|
||||
|
||||
av_frame_free(&frame);
|
||||
|
||||
if (decode.mfx_session)
|
||||
MFXClose(decode.mfx_session);
|
||||
if (decode.va_dpy)
|
||||
vaTerminate(decode.va_dpy);
|
||||
if (dpy)
|
||||
XCloseDisplay(dpy);
|
||||
|
||||
if (decoder_ctx)
|
||||
av_freep(&decoder_ctx->hwaccel_context);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
@@ -153,7 +153,7 @@ end:
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
|
@@ -41,9 +41,11 @@
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
/** The output bit rate in kbit/s */
|
||||
#define OUTPUT_BIT_RATE 96000
|
||||
#define OUTPUT_BIT_RATE 48000
|
||||
/** The number of output channels */
|
||||
#define OUTPUT_CHANNELS 2
|
||||
/** The audio sample output format */
|
||||
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
|
||||
|
||||
/**
|
||||
* Convert an error code into a text message.
|
||||
@@ -167,7 +169,7 @@ static int open_output_file(const char *filename,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Save the encoder context for easier access later. */
|
||||
/** Save the encoder context for easiert access later. */
|
||||
*output_codec_context = stream->codec;
|
||||
|
||||
/**
|
||||
@@ -177,16 +179,9 @@ static int open_output_file(const char *filename,
|
||||
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
||||
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
||||
(*output_codec_context)->sample_fmt = output_codec->sample_fmts[0];
|
||||
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/** Allow the use of the experimental AAC encoder */
|
||||
(*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
||||
|
||||
/** Set the sample rate for the container. */
|
||||
stream->time_base.den = input_codec_context->sample_rate;
|
||||
stream->time_base.num = 1;
|
||||
|
||||
/**
|
||||
* Some container formats (like MP4) require global headers to be present
|
||||
* Mark the encoder so that it behaves accordingly.
|
||||
@@ -204,7 +199,7 @@ static int open_output_file(const char *filename,
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
avio_closep(&(*output_format_context)->pb);
|
||||
avio_close((*output_format_context)->pb);
|
||||
avformat_free_context(*output_format_context);
|
||||
*output_format_context = NULL;
|
||||
return error < 0 ? error : AVERROR_EXIT;
|
||||
@@ -276,11 +271,10 @@ static int init_resampler(AVCodecContext *input_codec_context,
|
||||
}
|
||||
|
||||
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
||||
static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
||||
static int init_fifo(AVAudioFifo **fifo)
|
||||
{
|
||||
/** Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
||||
output_codec_context->channels, 1))) {
|
||||
if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -543,9 +537,6 @@ static int init_output_frame(AVFrame **frame,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Global timestamp for the audio frames */
|
||||
static int64_t pts = 0;
|
||||
|
||||
/** Encode one frame worth of audio to the output file. */
|
||||
static int encode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *output_format_context,
|
||||
@@ -557,12 +548,6 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
int error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/** Set a timestamp based on the sample rate for the container. */
|
||||
if (frame) {
|
||||
frame->pts = pts;
|
||||
pts += frame->nb_samples;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode the audio frame and store it in the temporary packet.
|
||||
* The output audio stream encoder is used to do this.
|
||||
@@ -674,7 +659,7 @@ int main(int argc, char **argv)
|
||||
&resample_context))
|
||||
goto cleanup;
|
||||
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
||||
if (init_fifo(&fifo, output_codec_context))
|
||||
if (init_fifo(&fifo))
|
||||
goto cleanup;
|
||||
/** Write the header of the output file container. */
|
||||
if (write_output_file_header(output_format_context))
|
||||
@@ -758,7 +743,7 @@ cleanup:
|
||||
if (output_codec_context)
|
||||
avcodec_close(output_codec_context);
|
||||
if (output_format_context) {
|
||||
avio_closep(&output_format_context->pb);
|
||||
avio_close(output_format_context->pb);
|
||||
avformat_free_context(output_format_context);
|
||||
}
|
||||
if (input_codec_context)
|
||||
|
@@ -116,10 +116,6 @@ static int open_output_file(const char *filename)
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
@@ -573,7 +569,7 @@ end:
|
||||
av_free(filter_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
|
87
doc/faq.texi
87
doc/faq.texi
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg FAQ
|
||||
@titlepage
|
||||
@@ -91,56 +90,6 @@ To build FFmpeg, you need to install the development package. It is usually
|
||||
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
||||
build is finished, but be sure to keep the main package.
|
||||
|
||||
@section How do I make @command{pkg-config} find my libraries?
|
||||
|
||||
Somewhere along with your libraries, there is a @file{.pc} file (or several)
|
||||
in a @file{pkgconfig} directory. You need to set environment variables to
|
||||
point @command{pkg-config} to these files.
|
||||
|
||||
If you need to @emph{add} directories to @command{pkg-config}'s search list
|
||||
(typical use case: library installed separately), add it to
|
||||
@code{$PKG_CONFIG_PATH}:
|
||||
|
||||
@example
|
||||
export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
|
||||
@end example
|
||||
|
||||
If you need to @emph{replace} @command{pkg-config}'s search list
|
||||
(typical use case: cross-compiling), set it in
|
||||
@code{$PKG_CONFIG_LIBDIR}:
|
||||
|
||||
@example
|
||||
export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
|
||||
@end example
|
||||
|
||||
If you need to know the library's internal dependencies (typical use: static
|
||||
linking), add the @code{--static} option to @command{pkg-config}:
|
||||
|
||||
@example
|
||||
./configure --pkg-config-flags=--static
|
||||
@end example
|
||||
|
||||
@section How do I use @command{pkg-config} when cross-compiling?
|
||||
|
||||
The best way is to install @command{pkg-config} in your cross-compilation
|
||||
environment. It will automatically use the cross-compilation libraries.
|
||||
|
||||
You can also use @command{pkg-config} from the host environment by
|
||||
specifying explicitly @code{--pkg-config=pkg-config} to @command{configure}.
|
||||
In that case, you must point @command{pkg-config} to the correct directories
|
||||
using the @code{PKG_CONFIG_LIBDIR}, as explained in the previous entry.
|
||||
|
||||
As an intermediate solution, you can place in your cross-compilation
|
||||
environment a script that calls the host @command{pkg-config} with
|
||||
@code{PKG_CONFIG_LIBDIR} set. That script can look like that:
|
||||
|
||||
@example
|
||||
#!/bin/sh
|
||||
PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
|
||||
export PKG_CONFIG_LIBDIR
|
||||
exec /usr/bin/pkg-config "$@@"
|
||||
@end example
|
||||
|
||||
@chapter Usage
|
||||
|
||||
@section ffmpeg does not work; what is wrong?
|
||||
@@ -349,7 +298,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
@@ -467,40 +416,6 @@ point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@section I have a stretched video, why does scaling does not fix it?
|
||||
|
||||
A lot of video codecs and formats can store the @emph{aspect ratio} of the
|
||||
video: this is the ratio between the width and the height of either the full
|
||||
image (DAR, display aspect ratio) or individual pixels (SAR, sample aspect
|
||||
ratio). For example, EGA screens at resolution 640×350 had 4:3 DAR and 35:48
|
||||
SAR.
|
||||
|
||||
Most still image processing work with square pixels, i.e. 1:1 SAR, but a lot
|
||||
of video standards, especially from the analogic-numeric transition era, use
|
||||
non-square pixels.
|
||||
|
||||
Most processing filters in FFmpeg handle the aspect ratio to avoid
|
||||
stretching the image: cropping adjusts the DAR to keep the SAR constant,
|
||||
scaling adjusts the SAR to keep the DAR constant.
|
||||
|
||||
If you want to stretch, or “unstretch”, the image, you need to override the
|
||||
information with the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
|
||||
|
||||
Do not forget to examine carefully the original video to check whether the
|
||||
stretching comes from the image or from the aspect ratio information.
|
||||
|
||||
For example, to fix a badly encoded EGA capture, use the following commands,
|
||||
either the first one to upscale to square pixels or the second one to set
|
||||
the correct aspect ratio or the third one to avoid transcoding (may not work
|
||||
depending on the format / codec / player / phase of the moon):
|
||||
|
||||
@example
|
||||
ffmpeg -i ega_screen.nut -vf scale=640:480,setsar=1 ega_screen_scaled.nut
|
||||
ffmpeg -i ega_screen.nut -vf setdar=4/3 ega_screen_anamorphic.nut
|
||||
ffmpeg -i ega_screen.nut -aspect 4/3 -c copy ega_screen_overridden.nut
|
||||
@end example
|
||||
|
||||
@chapter Development
|
||||
|
||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Automated Testing Environment
|
||||
@titlepage
|
||||
@@ -13,36 +12,36 @@
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
|
||||
@url{http://fate.ffmpeg.org/}
|
||||
@url{http://fate.ffmpeg.org/}
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with their recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
|
||||
|
||||
@chapter Using FATE from your FFmpeg source directory
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
|
||||
@@ -51,11 +50,11 @@ make fate-rsync SAMPLES=fate-suite/
|
||||
make fate SAMPLES=fate-suite/
|
||||
@end example
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
@option{--samples=<path to the samples directory>}. Afterwards you can
|
||||
invoke the makefile targets without setting the @var{SAMPLES} makefile
|
||||
`--samples=<path to the samples directory>'. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
|
||||
@example
|
||||
@@ -64,7 +63,7 @@ make fate-rsync
|
||||
make fate
|
||||
@end example
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
@@ -85,7 +84,7 @@ To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
|
||||
@chapter Submitting the results to the FFmpeg result aggregation server
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
@@ -93,23 +92,23 @@ to be invoked with a configuration file as its first argument.
|
||||
tests/fate.sh /path/to/fate_config
|
||||
@end example
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at @file{doc/fate_config.sh.template}.
|
||||
|
||||
@ifhtml
|
||||
The mentioned configuration template is also available here:
|
||||
The mentioned configuration template is also available here:
|
||||
@verbatiminclude fate_config.sh.template
|
||||
@end ifhtml
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The @env{slot} configuration variable can be any string that is not
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The `slot' configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern @samp{@var{arch}-@var{os}-@var{compiler}-@var{compiler version}}. The
|
||||
configuration file itself will be sourced in a shell script, therefore all
|
||||
shell features may be used. This enables you to setup the environment as you
|
||||
need it for your build.
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
|
||||
For your first test runs the @env{fate_recv} variable should be empty or
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
@@ -122,29 +121,29 @@ present in $workdir as specified in the configuration file:
|
||||
@item version
|
||||
@end itemize
|
||||
|
||||
When you have everything working properly you can create an SSH key pair
|
||||
When you have everything working properly you can create an SSH key pair
|
||||
and send the public key to the FATE server administrator who can be contacted
|
||||
at the email address @email{fate-admin@@ffmpeg.org}.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server's fingerprint is:
|
||||
|
||||
@table @samp
|
||||
@table @option
|
||||
@item RSA
|
||||
d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
|
||||
@item ECDSA
|
||||
76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
|
||||
@end table
|
||||
|
||||
If you have problems connecting to the FATE server, it may help to try out
|
||||
If you have problems connecting to the FATE server, it may help to try out
|
||||
the @command{ssh} command with one or more @option{-v} options. You should
|
||||
get detailed output concerning your SSH configuration and the authentication
|
||||
process.
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
|
||||
@@ -165,7 +164,7 @@ Run the FATE test suite (requires the fate-suite dataset).
|
||||
|
||||
@section Makefile variables
|
||||
|
||||
@table @env
|
||||
@table @option
|
||||
@item V
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
@itemize
|
||||
@@ -183,20 +182,20 @@ Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @samp{slice} or @samp{frame},
|
||||
by default @samp{slice+frame}
|
||||
Specify which threading strategy test, either @var{slice} or @var{frame},
|
||||
by default @var{slice+frame}
|
||||
|
||||
@item CPUFLAGS
|
||||
Specify CPU flags.
|
||||
|
||||
@item TARGET_EXEC
|
||||
Specify or override the wrapper used to run the tests.
|
||||
The @env{TARGET_EXEC} option provides a way to run FATE wrapped in
|
||||
The @var{TARGET_EXEC} option provides a way to run FATE wrapped in
|
||||
@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets
|
||||
through @command{ssh}.
|
||||
|
||||
@item GEN
|
||||
Set to @samp{1} to generate the missing or mismatched references.
|
||||
Set to @var{1} to generate the missing or mismatched references.
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
@@ -1,6 +1,5 @@
|
||||
slot= # some unique identifier
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
#branch=release/2.6 # the branch to test
|
||||
samples= # path to samples directory
|
||||
workdir= # directory in which to do all the work
|
||||
#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Bitstream Filters Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Codecs Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Devices Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Filters Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Formats Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Protocols Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Resampler Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Scaler Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Utilities Documentation
|
||||
@titlepage
|
||||
|
104
doc/ffmpeg.texi
104
doc/ffmpeg.texi
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffmpeg Documentation
|
||||
@titlepage
|
||||
@@ -80,7 +79,7 @@ The format option may be needed for raw input files.
|
||||
The transcoding process in @command{ffmpeg} for each output can be described by
|
||||
the following diagram:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_______ ______________
|
||||
| | | |
|
||||
| input | demuxer | encoded data | decoder
|
||||
@@ -99,7 +98,7 @@ the following diagram:
|
||||
|________| |______________|
|
||||
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
@command{ffmpeg} calls the libavformat library (containing demuxers) to read
|
||||
input files and get packets containing encoded data from them. When there are
|
||||
@@ -124,7 +123,7 @@ Simple filtergraphs are those that have exactly one input and output, both of
|
||||
the same type. In the above diagram they can be represented by simply inserting
|
||||
an additional step between decoding and encoding:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_________ ______________
|
||||
| | | |
|
||||
| decoded | | encoded data |
|
||||
@@ -136,19 +135,19 @@ an additional step between decoding and encoding:
|
||||
| frames |
|
||||
|__________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
Simple filtergraphs are configured with the per-stream @option{-filter} option
|
||||
(with @option{-vf} and @option{-af} aliases for video and audio respectively).
|
||||
A simple filtergraph for video can look for example like this:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_______ _____________ _______ ________
|
||||
| | | | | | | |
|
||||
| input | ---> | deinterlace | ---> | scale | ---> | output |
|
||||
|_______| |_____________| |_______| |________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
Note that some filters change frame properties but not frame contents. E.g. the
|
||||
@code{fps} filter in the example above changes number of frames, but does not
|
||||
@@ -161,7 +160,7 @@ processing chain applied to one stream. This is the case, for example, when the
|
||||
more than one input and/or output, or when output stream type is different from
|
||||
input. They can be represented with the following diagram:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_________
|
||||
| |
|
||||
| input 0 |\ __________
|
||||
@@ -179,7 +178,7 @@ input. They can be represented with the following diagram:
|
||||
| input 2 |/
|
||||
|_________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
Complex filtergraphs are configured with the @option{-filter_complex} option.
|
||||
Note that this option is global, since a complex filtergraph, by its nature,
|
||||
@@ -198,14 +197,14 @@ step for the specified stream, so it does only demuxing and muxing. It is useful
|
||||
for changing the container format or modifying container-level metadata. The
|
||||
diagram above will, in this case, simplify to this:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_______ ______________ ________
|
||||
| | | | | |
|
||||
| input | demuxer | encoded data | muxer | output |
|
||||
| file | ---------> | packets | -------> | file |
|
||||
|_______| |______________| |________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
Since there is no decoding or encoding, it is very fast and there is no quality
|
||||
loss. However, it might not work in some cases because of many factors. Applying
|
||||
@@ -462,9 +461,6 @@ Technical note -- attachments are implemented as codec extradata, so this
|
||||
option can actually be used to extract extradata from any stream, not just
|
||||
attachments.
|
||||
|
||||
@item -noautorotate
|
||||
Disable automatically rotating video based on file metadata.
|
||||
|
||||
@end table
|
||||
|
||||
@section Video Options
|
||||
@@ -835,14 +831,6 @@ ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
|
||||
|
||||
Note that using this option disables the default mappings for this output file.
|
||||
|
||||
@item -ignore_unknown
|
||||
Ignore input streams with unknown type instead of failing if copying
|
||||
such streams is attempted.
|
||||
|
||||
@item -copy_unknown
|
||||
Allow input streams with unknown type to be copied instead of failing if copying
|
||||
such streams is attempted.
|
||||
|
||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}]
|
||||
Map an audio channel from a given input to an output. If
|
||||
@var{output_file_id}.@var{stream_specifier} is not set, the audio channel will
|
||||
@@ -1006,13 +994,6 @@ With -map you can select from which stream the timestamps should be
|
||||
taken. You can leave either video or audio unchanged and sync the
|
||||
remaining stream(s) to the unchanged one.
|
||||
|
||||
@item -frame_drop_threshold @var{parameter}
|
||||
Frame drop threshold, which specifies how much behind video frames can
|
||||
be before they are dropped. In frame rate units, so 1.0 is one frame.
|
||||
The default is -1.1. One possible usecase is to avoid framedrops in case
|
||||
of noisy timestamps or to increase frame drop precision in case of exact
|
||||
timestamps.
|
||||
|
||||
@item -async @var{samples_per_second}
|
||||
Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
|
||||
the parameter is the maximum samples per second by which the audio is changed.
|
||||
@@ -1169,19 +1150,6 @@ This option enables or disables accurate seeking in input files with the
|
||||
transcoding. Use @option{-noaccurate_seek} to disable it, which may be useful
|
||||
e.g. when copying some streams and transcoding the others.
|
||||
|
||||
@item -seek_timestamp (@emph{input})
|
||||
This option enables or disables seeking by timestamp in input files with the
|
||||
@option{-ss} option. It is disabled by default. If enabled, the argument
|
||||
to the @option{-ss} option is considered an actual timestamp, and is not
|
||||
offset by the start time of the file. This matters only for files which do
|
||||
not start from timestamp 0, such as transport streams.
|
||||
|
||||
@item -thread_queue_size @var{size} (@emph{input})
|
||||
This option sets the maximum number of queued packets when reading from the
|
||||
file or device. With low latency / high rate live streams, packets may be
|
||||
discarded if they are not read in a timely manner; raising this value can
|
||||
avoid it.
|
||||
|
||||
@item -override_ffserver (@emph{global})
|
||||
Overrides the input specifications from @command{ffserver}. Using this
|
||||
option you can map any input stream to @command{ffserver} and control
|
||||
@@ -1192,11 +1160,6 @@ requested by @command{ffserver}.
|
||||
The option is intended for cases where features are needed that cannot be
|
||||
specified to @command{ffserver} but can be to @command{ffmpeg}.
|
||||
|
||||
@item -sdp_file @var{file} (@emph{global})
|
||||
Print sdp information for an output stream to @var{file}.
|
||||
This allows dumping sdp information when at least one output isn't an
|
||||
rtp stream. (Requires at least one of the output formats to be rtp).
|
||||
|
||||
@item -discard (@emph{input})
|
||||
Allows discarding specific streams or frames of streams at the demuxer.
|
||||
Not all demuxers support this.
|
||||
@@ -1221,9 +1184,6 @@ Discard all frames excepts keyframes.
|
||||
Discard all frames.
|
||||
@end table
|
||||
|
||||
@item -xerror (@emph{global})
|
||||
Stop and exit on error
|
||||
|
||||
@end table
|
||||
|
||||
As a special exception, you can use a bitmap subtitle stream as input: it
|
||||
@@ -1249,10 +1209,7 @@ awkward to specify on the command line. Lines starting with the hash
|
||||
('#') character are ignored and are used to provide comments. Check
|
||||
the @file{presets} directory in the FFmpeg source tree for examples.
|
||||
|
||||
There are two types of preset files: ffpreset and avpreset files.
|
||||
|
||||
@subsection ffpreset files
|
||||
ffpreset files are specified with the @code{vpre}, @code{apre},
|
||||
Preset files are specified with the @code{vpre}, @code{apre},
|
||||
@code{spre}, and @code{fpre} options. The @code{fpre} option takes the
|
||||
filename of the preset instead of a preset name as input and can be
|
||||
used for any kind of codec. For the @code{vpre}, @code{apre}, and
|
||||
@@ -1277,26 +1234,6 @@ directories, where @var{codec_name} is the name of the codec to which
|
||||
the preset file options will be applied. For example, if you select
|
||||
the video codec with @code{-vcodec libvpx} and use @code{-vpre 1080p},
|
||||
then it will search for the file @file{libvpx-1080p.ffpreset}.
|
||||
|
||||
@subsection avpreset files
|
||||
avpreset files are specified with the @code{pre} option. They work similar to
|
||||
ffpreset files, but they only allow encoder- specific options. Therefore, an
|
||||
@var{option}=@var{value} pair specifying an encoder cannot be used.
|
||||
|
||||
When the @code{pre} option is specified, ffmpeg will look for files with the
|
||||
suffix .avpreset in the directories @file{$AVCONV_DATADIR} (if set), and
|
||||
@file{$HOME/.avconv}, and in the datadir defined at configuration time (usually
|
||||
@file{PREFIX/share/ffmpeg}), in that order.
|
||||
|
||||
First ffmpeg searches for a file named @var{codec_name}-@var{arg}.avpreset in
|
||||
the above-mentioned directories, where @var{codec_name} is the name of the codec
|
||||
to which the preset file options will be applied. For example, if you select the
|
||||
video codec with @code{-vcodec libvpx} and use @code{-pre 1080p}, then it will
|
||||
search for the file @file{libvpx-1080p.avpreset}.
|
||||
|
||||
If no such file is found, then ffmpeg will search for a file named
|
||||
@var{arg}.avpreset in the same directories.
|
||||
|
||||
@c man end OPTIONS
|
||||
|
||||
@chapter Tips
|
||||
@@ -1343,6 +1280,21 @@ quality).
|
||||
@chapter Examples
|
||||
@c man begin EXAMPLES
|
||||
|
||||
@section Preset files
|
||||
|
||||
A preset file contains a sequence of @var{option=value} pairs, one for
|
||||
each line, specifying a sequence of options which can be specified also on
|
||||
the command line. Lines starting with the hash ('#') character are ignored and
|
||||
are used to provide comments. Empty lines are also ignored. Check the
|
||||
@file{presets} directory in the FFmpeg source tree for examples.
|
||||
|
||||
Preset files are specified with the @code{pre} option, this option takes a
|
||||
preset name as input. FFmpeg searches for a file named @var{preset_name}.avpreset in
|
||||
the directories @file{$AVCONV_DATADIR} (if set), and @file{$HOME/.ffmpeg}, and in
|
||||
the data directory defined at configuration time (usually @file{$PREFIX/share/ffmpeg})
|
||||
in that order. For example, if the argument is @code{libx264-max}, it will
|
||||
search for the file @file{libx264-max.avpreset}.
|
||||
|
||||
@section Video and Audio grabbing
|
||||
|
||||
If you specify the input format and device then ffmpeg can grab video
|
||||
@@ -1490,7 +1442,7 @@ combination with -ss to start extracting from a certain point in time.
|
||||
|
||||
For creating a video from many images:
|
||||
@example
|
||||
ffmpeg -f image2 -framerate 12 -i foo-%03d.jpeg -s WxH foo.avi
|
||||
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
|
||||
@end example
|
||||
|
||||
The syntax @code{foo-%03d.jpeg} specifies to use a decimal number
|
||||
@@ -1505,7 +1457,7 @@ image2-specific @code{-pattern_type glob} option.
|
||||
For example, for creating a video from filenames matching the glob pattern
|
||||
@code{foo-*.jpeg}:
|
||||
@example
|
||||
ffmpeg -f image2 -pattern_type glob -framerate 12 -i 'foo-*.jpeg' -s WxH foo.avi
|
||||
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffplay Documentation
|
||||
@titlepage
|
||||
@@ -125,20 +124,24 @@ master clock is used to control audio-video synchronization. Most media
|
||||
players use audio as master clock, but in some cases (streaming or high
|
||||
quality broadcast) it is necessary to change that. This option is mainly
|
||||
used for debugging purposes.
|
||||
@item -ast @var{audio_stream_specifier}
|
||||
Select the desired audio stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" audio stream is selected in the program of the
|
||||
already selected video stream.
|
||||
@item -vst @var{video_stream_specifier}
|
||||
Select the desired video stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" video stream is selected.
|
||||
@item -sst @var{subtitle_stream_specifier}
|
||||
Select the desired subtitle stream using the given stream specifier. The stream
|
||||
specifiers are described in the @ref{Stream specifiers} chapter. If this option
|
||||
is not specified, the "best" subtitle stream is selected in the program of the
|
||||
already selected video or audio stream.
|
||||
@item -threads @var{count}
|
||||
Set the thread count. By default, @command{ffplay} automatically detects a
|
||||
suitable number of threads to use.
|
||||
@item -ast @var{audio_stream_number}
|
||||
Select the desired audio stream number, counting from 0. The number
|
||||
refers to the list of all the input audio streams. If it is greater
|
||||
than the number of audio streams minus one, then the last one is
|
||||
selected, if it is negative the audio playback is disabled.
|
||||
@item -vst @var{video_stream_number}
|
||||
Select the desired video stream number, counting from 0. The number
|
||||
refers to the list of all the input video streams. If it is greater
|
||||
than the number of video streams minus one, then the last one is
|
||||
selected, if it is negative the video playback is disabled.
|
||||
@item -sst @var{subtitle_stream_number}
|
||||
Select the desired subtitle stream number, counting from 0. The number
|
||||
refers to the list of all the input subtitle streams. If it is greater
|
||||
than the number of subtitle streams minus one, then the last one is
|
||||
selected, if it is negative the subtitle rendering is disabled.
|
||||
@item -autoexit
|
||||
Exit when video is done playing.
|
||||
@item -exitonkeydown
|
||||
@@ -161,7 +164,7 @@ Force a specific video decoder.
|
||||
Force a specific subtitle decoder.
|
||||
|
||||
@item -autorotate
|
||||
Automatically rotate the video according to file metadata. Enabled by
|
||||
Automatically rotate the video according to presentation metadata. Enabled by
|
||||
default, use @option{-noautorotate} to disable it.
|
||||
|
||||
@item -framedrop
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffprobe Documentation
|
||||
@titlepage
|
||||
@@ -447,17 +446,17 @@ writer).
|
||||
It can assume one of the following values:
|
||||
@table @option
|
||||
@item c
|
||||
Perform C-like escaping. Strings containing a newline (@samp{\n}), carriage
|
||||
return (@samp{\r}), a tab (@samp{\t}), a form feed (@samp{\f}), the escaping
|
||||
character (@samp{\}) or the item separator character @var{SEP} are escaped
|
||||
using C-like fashioned escaping, so that a newline is converted to the
|
||||
sequence @samp{\n}, a carriage return to @samp{\r}, @samp{\} to @samp{\\} and
|
||||
the separator @var{SEP} is converted to @samp{\@var{SEP}}.
|
||||
Perform C-like escaping. Strings containing a newline ('\n'), carriage
|
||||
return ('\r'), a tab ('\t'), a form feed ('\f'), the escaping
|
||||
character ('\') or the item separator character @var{SEP} are escaped using C-like fashioned
|
||||
escaping, so that a newline is converted to the sequence "\n", a
|
||||
carriage return to "\r", '\' to "\\" and the separator @var{SEP} is
|
||||
converted to "\@var{SEP}".
|
||||
|
||||
@item csv
|
||||
Perform CSV-like escaping, as described in RFC4180. Strings
|
||||
containing a newline (@samp{\n}), a carriage return (@samp{\r}), a double quote
|
||||
(@samp{"}), or @var{SEP} are enclosed in double-quotes.
|
||||
containing a newline ('\n'), a carriage return ('\r'), a double quote
|
||||
('"'), or @var{SEP} are enclosed in double-quotes.
|
||||
|
||||
@item none
|
||||
Perform no escaping.
|
||||
@@ -485,7 +484,7 @@ The description of the accepted options follows.
|
||||
Separator character used to separate the chapter, the section name, IDs and
|
||||
potential tags in the printed field key.
|
||||
|
||||
Default value is @samp{.}.
|
||||
Default value is '.'.
|
||||
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
@@ -507,22 +506,21 @@ The following conventions are adopted:
|
||||
@item
|
||||
all key and values are UTF-8
|
||||
@item
|
||||
@samp{.} is the subgroup separator
|
||||
'.' is the subgroup separator
|
||||
@item
|
||||
newline, @samp{\t}, @samp{\f}, @samp{\b} and the following characters are
|
||||
escaped
|
||||
newline, '\t', '\f', '\b' and the following characters are escaped
|
||||
@item
|
||||
@samp{\} is the escape character
|
||||
'\' is the escape character
|
||||
@item
|
||||
@samp{#} is the comment indicator
|
||||
'#' is the comment indicator
|
||||
@item
|
||||
@samp{=} is the key/value separator
|
||||
'=' is the key/value separator
|
||||
@item
|
||||
@samp{:} is not used but usually parsed as key/value separator
|
||||
':' is not used but usually parsed as key/value separator
|
||||
@end itemize
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by @samp{:}.
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
|
@@ -48,11 +48,6 @@
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
@@ -70,16 +65,6 @@
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
@@ -87,7 +72,6 @@
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="stream_index" type="xsd:int" />
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
@@ -171,7 +155,6 @@
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
@@ -188,8 +171,6 @@
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
@@ -201,7 +182,6 @@
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
@@ -273,8 +253,8 @@
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
@@ -82,7 +82,6 @@ Feed feed1.ffm
|
||||
# ra : RealNetworks-compatible stream. Audio only.
|
||||
# mpjpeg : Multipart JPEG (works with Netscape without any plugin)
|
||||
# jpeg : Generate a single JPEG image.
|
||||
# mjpeg : Generate a M-JPEG stream.
|
||||
# asf : ASF compatible streaming (Windows Media Player format).
|
||||
# swf : Macromedia Flash compatible stream
|
||||
# avi : AVI format (MPEG-4 video, MPEG audio sound)
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle ffserver Documentation
|
||||
@titlepage
|
||||
@@ -72,7 +71,7 @@ the HTTP server (configured through the @option{HTTPPort} option), and
|
||||
configuration file.
|
||||
|
||||
Each feed is associated to a file which is stored on disk. This stored
|
||||
file is used to send pre-recorded data to a player as fast as
|
||||
file is used to allow to send pre-recorded data to a player as fast as
|
||||
possible when new content is added in real-time to the stream.
|
||||
|
||||
A "live-stream" or "stream" is a resource published by
|
||||
@@ -118,8 +117,7 @@ Multiple streams can be connected to the same feed.
|
||||
|
||||
For example, you can have a situation described by the following
|
||||
graph:
|
||||
|
||||
@verbatim
|
||||
@example
|
||||
_________ __________
|
||||
| | | |
|
||||
ffmpeg 1 -----| feed 1 |-----| stream 1 |
|
||||
@@ -144,8 +142,7 @@ ffmpeg 2 -----| feed 3 |-----| stream 4 |
|
||||
| | | |
|
||||
| file 1 |-----| stream 5 |
|
||||
|_________| |__________|
|
||||
|
||||
@end verbatim
|
||||
@end example
|
||||
|
||||
@anchor{FFM}
|
||||
@section FFM, FFM2 formats
|
||||
|
@@ -50,9 +50,6 @@ Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
Matches streams with the metadata tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
@item u
|
||||
Matches streams with usable configuration, the codec must be defined and the
|
||||
essential information such as video dimension or audio sample rate must be present.
|
||||
|
||||
Note that in @command{ffmpeg}, matching by metadata will only work properly for
|
||||
input files.
|
||||
@@ -168,29 +165,28 @@ omitted. "repeat" can also be used alone.
|
||||
If "repeat" is used alone, and with no prior loglevel set, the default
|
||||
loglevel will be used. If multiple loglevel parameters are given, using
|
||||
'repeat' will not change the loglevel.
|
||||
@var{loglevel} is a string or a number containing one of the following values:
|
||||
@var{loglevel} is a number or a string containing one of the following values:
|
||||
@table @samp
|
||||
@item quiet, -8
|
||||
@item quiet
|
||||
Show nothing at all; be silent.
|
||||
@item panic, 0
|
||||
@item panic
|
||||
Only show fatal errors which could lead the process to crash, such as
|
||||
and assert failure. This is not currently used for anything.
|
||||
@item fatal, 8
|
||||
@item fatal
|
||||
Only show fatal errors. These are errors after which the process absolutely
|
||||
cannot continue after.
|
||||
@item error, 16
|
||||
@item error
|
||||
Show all errors, including ones which can be recovered from.
|
||||
@item warning, 24
|
||||
@item warning
|
||||
Show all warnings and errors. Any message related to possibly
|
||||
incorrect or unexpected events will be shown.
|
||||
@item info, 32
|
||||
@item info
|
||||
Show informative messages during processing. This is in addition to
|
||||
warnings and errors. This is the default value.
|
||||
@item verbose, 40
|
||||
@item verbose
|
||||
Same as @code{info}, except more verbose.
|
||||
@item debug, 48
|
||||
@item debug
|
||||
Show everything, including debugging information.
|
||||
@item trace, 56
|
||||
@end table
|
||||
|
||||
By default the program logs to stderr, if coloring is supported by the
|
||||
@@ -208,29 +204,21 @@ directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @env{FFREPORT} to any value has the
|
||||
Setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
options will affect the report; option values must be escaped if they
|
||||
options will affect the report; options values must be escaped if they
|
||||
contain special characters or the options delimiter ':' (see the
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual).
|
||||
|
||||
The following options are recognized:
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual). The
|
||||
following option is recognized:
|
||||
@table @option
|
||||
@item file
|
||||
set the file name to use for the report; @code{%p} is expanded to the name
|
||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||
to a plain @code{%}
|
||||
@item level
|
||||
set the log verbosity level using a numerical value (see @code{-loglevel}).
|
||||
set the log level
|
||||
@end table
|
||||
|
||||
For example, to output a report to a file named @file{ffreport.log}
|
||||
using a log level of @code{32} (alias for log level @code{info}):
|
||||
|
||||
@example
|
||||
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
|
||||
@end example
|
||||
|
||||
Errors in parsing the environment variable are not fatal, and will not
|
||||
appear in the report.
|
||||
|
||||
@@ -265,14 +253,10 @@ Possible flags for this option are:
|
||||
@item sse4.1
|
||||
@item sse4.2
|
||||
@item avx
|
||||
@item avx2
|
||||
@item xop
|
||||
@item fma3
|
||||
@item fma4
|
||||
@item 3dnow
|
||||
@item 3dnowext
|
||||
@item bmi1
|
||||
@item bmi2
|
||||
@item cmov
|
||||
@end table
|
||||
@item ARM
|
||||
@@ -283,13 +267,6 @@ Possible flags for this option are:
|
||||
@item vfp
|
||||
@item vfpv3
|
||||
@item neon
|
||||
@item setend
|
||||
@end table
|
||||
@item AArch64
|
||||
@table @samp
|
||||
@item armv8
|
||||
@item vfp
|
||||
@item neon
|
||||
@end table
|
||||
@item PowerPC
|
||||
@table @samp
|
||||
@@ -309,41 +286,8 @@ Possible flags for this option are:
|
||||
@end table
|
||||
|
||||
@item -opencl_bench
|
||||
This option is used to benchmark all available OpenCL devices and print the
|
||||
results. This option is only available when FFmpeg has been compiled with
|
||||
@code{--enable-opencl}.
|
||||
|
||||
When FFmpeg is configured with @code{--enable-opencl}, the options for the
|
||||
global OpenCL context are set via @option{-opencl_options}. See the
|
||||
"OpenCL Options" section in the ffmpeg-utils manual for the complete list of
|
||||
supported options. Amongst others, these options include the ability to select
|
||||
a specific platform and device to run the OpenCL code on. By default, FFmpeg
|
||||
will run on the first device of the first platform. While the options for the
|
||||
global OpenCL context provide flexibility to the user in selecting the OpenCL
|
||||
device of their choice, most users would probably want to select the fastest
|
||||
OpenCL device for their system.
|
||||
|
||||
This option assists the selection of the most efficient configuration by
|
||||
identifying the appropriate device for the user's system. The built-in
|
||||
benchmark is run on all the OpenCL devices and the performance is measured for
|
||||
each device. The devices in the results list are sorted based on their
|
||||
performance with the fastest device listed first. The user can subsequently
|
||||
invoke @command{ffmpeg} using the device deemed most appropriate via
|
||||
@option{-opencl_options} to obtain the best performance for the OpenCL
|
||||
accelerated code.
|
||||
|
||||
Typical usage to use the fastest OpenCL device involve the following steps.
|
||||
|
||||
Run the command:
|
||||
@example
|
||||
ffmpeg -opencl_bench
|
||||
@end example
|
||||
Note down the platform ID (@var{pidx}) and device ID (@var{didx}) of the first
|
||||
i.e. fastest device in the list.
|
||||
Select the platform and device using the command:
|
||||
@example
|
||||
ffmpeg -opencl_options platform_idx=@var{pidx}:device_idx=@var{didx} ...
|
||||
@end example
|
||||
Benchmark all available OpenCL devices and show the results. This option
|
||||
is only available when FFmpeg has been compiled with @code{--enable-opencl}.
|
||||
|
||||
@item -opencl_options options (@emph{global})
|
||||
Set OpenCL environment options. This option is only available when
|
||||
|
1058
doc/filters.texi
1058
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -23,7 +23,7 @@ Reduce buffering.
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will enable detecting more
|
||||
stream information. A higher value will allow to detect more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@@ -37,8 +37,6 @@ Possible values:
|
||||
@table @samp
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item fastseek
|
||||
Enable fast, but inaccurate seeks for some formats.
|
||||
@item genpts
|
||||
Generate PTS.
|
||||
@item nofillin
|
||||
@@ -69,7 +67,7 @@ Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will enable detecting more accurate information, but will
|
||||
higher value will allow to detect more accurate information, but will
|
||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
@@ -127,25 +125,6 @@ Consider all spec non compliancies as errors.
|
||||
Consider things that a sane encoder should not do as an error.
|
||||
@end table
|
||||
|
||||
@item max_interleave_delta @var{integer} (@emph{output})
|
||||
Set maximum buffering duration for interleaving. The duration is
|
||||
expressed in microseconds, and defaults to 1000000 (1 second).
|
||||
|
||||
To ensure all the streams are interleaved correctly, libavformat will
|
||||
wait until it has at least one packet for each stream before actually
|
||||
writing any packets to the output file. When some streams are
|
||||
"sparse" (i.e. there are large gaps between successive packets), this
|
||||
can result in excessive buffering.
|
||||
|
||||
This field specifies the maximum difference between the timestamps of the
|
||||
first and the last packet in the muxing queue, above which libavformat
|
||||
will output a packet regardless of whether it has queued a packet for all
|
||||
the streams.
|
||||
|
||||
If set to 0, libavformat will continue buffering packets until it has
|
||||
a packet for each stream, regardless of the maximum timestamp
|
||||
difference between the buffered packets.
|
||||
|
||||
@item use_wallclock_as_timestamps @var{integer} (@emph{input})
|
||||
Use wallclock as timestamps.
|
||||
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle General Documentation
|
||||
@titlepage
|
||||
@@ -109,14 +108,6 @@ Go to @url{http://www.wavpack.com/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
||||
enable it.
|
||||
|
||||
@section OpenH264
|
||||
|
||||
FFmpeg can make use of the OpenH264 library for H.264 encoding.
|
||||
|
||||
Go to @url{http://www.openh264.org/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libopenh264} to configure to
|
||||
enable it.
|
||||
|
||||
@section x264
|
||||
|
||||
FFmpeg can make use of the x264 library for H.264 encoding.
|
||||
@@ -152,7 +143,7 @@ by Google as part of the WebRTC project. libilbc is a packaging friendly
|
||||
copy of the iLBC codec. FFmpeg can make use of the libilbc library for
|
||||
iLBC encoding and decoding.
|
||||
|
||||
Go to @url{https://github.com/TimothyGu/libilbc} and follow the instructions for
|
||||
Go to @url{https://github.com/dekkers/libilbc} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libilbc} to configure to
|
||||
enable it.
|
||||
|
||||
@@ -179,8 +170,8 @@ included in compat/avisynth/, which allows the user to enable support
|
||||
without needing to search for these headers themselves.
|
||||
|
||||
For Windows, supported AviSynth variants are
|
||||
@url{http://avisynth.nl, AviSynth 2.6 RC1 or higher} for 32-bit builds and
|
||||
@url{http://avs-plus.net, AviSynth+ r1718 or higher} for 32-bit and 64-bit builds.
|
||||
@url{http://avisynth.nl, AviSynth 2.5 or 2.6} for 32-bit builds and
|
||||
@url{http://avs-plus.net, AviSynth+ 0.1} for 32-bit and 64-bit builds.
|
||||
|
||||
For Linux and OS X, the supported AviSynth variant is
|
||||
@url{https://github.com/avxsynth/avxsynth, AvxSynth}.
|
||||
@@ -222,7 +213,6 @@ library:
|
||||
@tab Audio format used on the Nintendo Gamecube.
|
||||
@item AFC @tab @tab X
|
||||
@tab Audio format used on the Nintendo Gamecube.
|
||||
@item APNG @tab X @tab X
|
||||
@item ASF @tab X @tab X
|
||||
@item AST @tab X @tab X
|
||||
@tab Audio format used on the Nintendo Wii.
|
||||
@@ -253,10 +243,6 @@ library:
|
||||
@tab Used in the game Cyberia from Interplay.
|
||||
@item Delphine Software International CIN @tab @tab X
|
||||
@tab Multimedia format used by Delphine Software games.
|
||||
@item Digital Speech Standard (DSS) @tab @tab X
|
||||
@item Canopus HQ @tab @tab X
|
||||
@item Canopus HQA @tab @tab X
|
||||
@item Canopus HQX @tab @tab X
|
||||
@item CD+G @tab @tab X
|
||||
@tab Video format used by CD+G karaoke disks
|
||||
@item Phantom Cine @tab @tab X
|
||||
@@ -469,7 +455,6 @@ library:
|
||||
@item SoX native format @tab X @tab X
|
||||
@item SUN AU format @tab X @tab X
|
||||
@item SUP raw PGS subtitles @tab @tab X
|
||||
@item TDSC @tab @tab X
|
||||
@item Text files @tab @tab X
|
||||
@item THP @tab @tab X
|
||||
@tab Used on the Nintendo GameCube.
|
||||
@@ -511,7 +496,6 @@ following image formats are supported:
|
||||
@item Alias PIX @tab X @tab X
|
||||
@tab Alias/Wavefront PIX image format
|
||||
@item animated GIF @tab X @tab X
|
||||
@item APNG @tab X @tab X
|
||||
@item BMP @tab X @tab X
|
||||
@tab Microsoft BMP image
|
||||
@item BRender PIX @tab @tab X
|
||||
@@ -669,7 +653,7 @@ following image formats are supported:
|
||||
@item H.263 / H.263-1996 @tab X @tab X
|
||||
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
|
||||
@item H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 @tab E @tab X
|
||||
@tab encoding supported through external library libx264 and OpenH264
|
||||
@tab encoding supported through external library libx264
|
||||
@item HEVC @tab X @tab X
|
||||
@tab encoding supported through the external library libx265
|
||||
@item HNM version 4 @tab @tab X
|
||||
@@ -905,12 +889,10 @@ following image formats are supported:
|
||||
@tab decoding supported through external library libcelt
|
||||
@item Delphine Software International CIN audio @tab @tab X
|
||||
@tab Codec used in Delphine Software International games.
|
||||
@item Digital Speech Standard - Standard Play mode (DSS SP) @tab @tab X
|
||||
@item Discworld II BMV Audio @tab @tab X
|
||||
@item COOK @tab @tab X
|
||||
@tab All versions except 5.1 are supported.
|
||||
@item DCA (DTS Coherent Acoustics) @tab X @tab X
|
||||
@tab supported extensions: XCh, XLL (partially)
|
||||
@item DPCM id RoQ @tab X @tab X
|
||||
@tab Used in Quake III, Jedi Knight 2 and other computer games.
|
||||
@item DPCM Interplay @tab @tab X
|
||||
@@ -954,8 +936,8 @@ following image formats are supported:
|
||||
@item Musepack SV8 @tab @tab X
|
||||
@item Nellymoser Asao @tab X @tab X
|
||||
@item On2 AVC (Audio for Video Codec) @tab @tab X
|
||||
@item Opus @tab E @tab X
|
||||
@tab encoding supported through external library libopus
|
||||
@item Opus @tab E @tab E
|
||||
@tab supported through external library libopus
|
||||
@item PCM A-law @tab X @tab X
|
||||
@item PCM mu-law @tab X @tab X
|
||||
@item PCM signed 8-bit planar @tab X @tab X
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Using git to develop FFmpeg
|
||||
|
||||
@@ -75,7 +74,6 @@ git config --global core.autocrlf false
|
||||
@end example
|
||||
|
||||
|
||||
@anchor{Updating the source tree to the latest revision}
|
||||
@section Updating the source tree to the latest revision
|
||||
|
||||
@example
|
||||
@@ -331,7 +329,7 @@ git push
|
||||
|
||||
Will push the changes to the default remote (@var{origin}).
|
||||
Git will prevent you from pushing changes if the local and remote trees are
|
||||
out of sync. Refer to @ref{Updating the source tree to the latest revision}.
|
||||
out of sync. Refer to and to sync the local tree.
|
||||
|
||||
@example
|
||||
git remote add <name> <url>
|
||||
|
302
doc/indevs.texi
302
doc/indevs.texi
@@ -1,7 +1,7 @@
|
||||
@chapter Input Devices
|
||||
@c man begin INPUT DEVICES
|
||||
|
||||
Input devices are configured elements in FFmpeg which enable accessing
|
||||
Input devices are configured elements in FFmpeg which allow to access
|
||||
the data coming from a multimedia device attached to your system.
|
||||
|
||||
When you configure your FFmpeg build, all the supported input devices
|
||||
@@ -150,81 +150,6 @@ $ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
||||
|
||||
BSD video input device.
|
||||
|
||||
@section decklink
|
||||
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
DeckLink devices.
|
||||
|
||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format is
|
||||
uyvy422 or v210, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels can be 2, 8 or 16.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item bm_v210
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
List supported formats:
|
||||
@example
|
||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 (format 11):
|
||||
@example
|
||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -bm_v210 1 -f decklink -i 'UltraStudio Mini Recorder@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 720p50 with 32bit audio:
|
||||
@example
|
||||
ffmpeg -bm_audiodepth 32 -f decklink -i 'UltraStudio Mini Recorder@@14' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 576i50 with 8 audio channels:
|
||||
@example
|
||||
ffmpeg -bm_channels 8 -f decklink -i 'UltraStudio Mini Recorder@@3' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dshow
|
||||
|
||||
Windows DirectShow input device.
|
||||
@@ -242,7 +167,7 @@ The input name should be in the format:
|
||||
@end example
|
||||
|
||||
where @var{TYPE} can be either @var{audio} or @var{video},
|
||||
and @var{NAME} is the device's name or alternative name..
|
||||
and @var{NAME} is the device's name.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@@ -295,85 +220,6 @@ Setting this value too low can degrade performance.
|
||||
See also
|
||||
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
|
||||
|
||||
@item video_pin_name
|
||||
Select video capture pin to use by name or alternative name.
|
||||
|
||||
@item audio_pin_name
|
||||
Select audio capture pin to use by name or alternative name.
|
||||
|
||||
@item crossbar_video_input_pin_number
|
||||
Select video input pin number for crossbar device. This will be
|
||||
routed to the crossbar device's Video Decoder output pin.
|
||||
Note that changing this value can affect future invocations
|
||||
(sets a new default) until system reboot occurs.
|
||||
|
||||
@item crossbar_audio_input_pin_number
|
||||
Select audio input pin number for crossbar device. This will be
|
||||
routed to the crossbar device's Audio Decoder output pin.
|
||||
Note that changing this value can affect future invocations
|
||||
(sets a new default) until system reboot occurs.
|
||||
|
||||
@item show_video_device_dialog
|
||||
If set to @option{true}, before capture starts, popup a display dialog
|
||||
to the end user, allowing them to change video filter properties
|
||||
and configurations manually.
|
||||
Note that for crossbar devices, adjusting values in this dialog
|
||||
may be needed at times to toggle between PAL (25 fps) and NTSC (29.97)
|
||||
input frame rates, sizes, interlacing, etc. Changing these values can
|
||||
enable different scan rates/frame rates and avoiding green bars at
|
||||
the bottom, flickering scan lines, etc.
|
||||
Note that with some devices, changing these properties can also affect future
|
||||
invocations (sets new defaults) until system reboot occurs.
|
||||
|
||||
@item show_audio_device_dialog
|
||||
If set to @option{true}, before capture starts, popup a display dialog
|
||||
to the end user, allowing them to change audio filter properties
|
||||
and configurations manually.
|
||||
|
||||
@item show_video_crossbar_connection_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify crossbar pin routings, when it opens a video device.
|
||||
|
||||
@item show_audio_crossbar_connection_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify crossbar pin routings, when it opens an audio device.
|
||||
|
||||
@item show_analog_tv_tuner_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify TV channels and frequencies.
|
||||
|
||||
@item show_analog_tv_tuner_audio_dialog
|
||||
If set to @option{true}, before capture starts, popup a display
|
||||
dialog to the end user, allowing them to manually
|
||||
modify TV audio (like mono vs. stereo, Language A,B or C).
|
||||
|
||||
@item audio_device_load
|
||||
Load an audio capture filter device from file instead of searching
|
||||
it by name. It may load additional parameters too, if the filter
|
||||
supports the serialization of its properties to.
|
||||
To use this an audio capture source has to be specified, but it can
|
||||
be anything even fake one.
|
||||
|
||||
@item audio_device_save
|
||||
Save the currently used audio capture filter device and its
|
||||
parameters (if the filter supports it) to a file.
|
||||
If a file with the same name exists it will be overwritten.
|
||||
|
||||
@item video_device_load
|
||||
Load a video capture filter device from file instead of searching
|
||||
it by name. It may load additional parameters too, if the filter
|
||||
supports the serialization of its properties to.
|
||||
To use this a video capture source has to be specified, but it can
|
||||
be anything even fake one.
|
||||
|
||||
@item video_device_save
|
||||
Save the currently used video capture filter device and its
|
||||
parameters (if the filter supports it) to a file.
|
||||
If a file with the same name exists it will be overwritten.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -410,19 +256,6 @@ Print the list of supported options in selected device and exit:
|
||||
$ ffmpeg -list_options true -f dshow -i video="Camera"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Specify pin names to capture by name or alternative name, specify alternative device name:
|
||||
@example
|
||||
$ ffmpeg -f dshow -audio_pin_name "Audio Out" -video_pin_name 2 -i video=video="@@device_pnp_\\?\pci#ven_1a0a&dev_6200&subsys_62021461&rev_01#4&e2c7dd6&0&00e1#@{65e8773d-8f56-11d0-a3b9-00a0c9223196@}\@{ca465100-deb0-4d59-818f-8c477184adf6@}":audio="Microphone"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Configure a crossbar device, specifying crossbar pins, allow user to adjust video capture properties at startup:
|
||||
@example
|
||||
$ ffmpeg -f dshow -show_video_device_dialog true -crossbar_video_input_pin_number 0
|
||||
-crossbar_audio_input_pin_number 3 -i video="AVerMedia BDA Analog Capture":audio="AVerMedia BDA Analog Capture"
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dv1394
|
||||
@@ -557,7 +390,7 @@ not work and result in undefined behavior.
|
||||
The values @option{auto}, @option{dv} and @option{hdv} are supported.
|
||||
|
||||
@item dvbuffer
|
||||
Set maximum size of buffer for incoming data, in frames. For DV, this
|
||||
Set maxiumum size of buffer for incoming data, in frames. For DV, this
|
||||
is an exact value. For HDV, it is not frame exact, since HDV does
|
||||
not have a fixed frame size.
|
||||
|
||||
@@ -662,14 +495,6 @@ generated by the device.
|
||||
The first unlabelled output is automatically assigned to the "out0"
|
||||
label, but all the others need to be specified explicitly.
|
||||
|
||||
The suffix "+subcc" can be appended to the output label to create an extra
|
||||
stream with the closed captions packets attached to that output
|
||||
(experimental; only for EIA-608 / CEA-708 for now).
|
||||
The subcc streams are created after all the normal streams, in the order of
|
||||
the corresponding stream.
|
||||
For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
|
||||
stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
|
||||
|
||||
If not specified defaults to the filename specified for the input
|
||||
device.
|
||||
|
||||
@@ -716,57 +541,24 @@ Read an audio stream and a video stream and play it back with
|
||||
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Dump decoded frames to images and closed captions to a file (experimental):
|
||||
@example
|
||||
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libcdio
|
||||
|
||||
Audio-CD input device based on libcdio.
|
||||
Audio-CD input device based on cdio.
|
||||
|
||||
To enable this input device during configuration you need libcdio
|
||||
installed on your system. It requires the configure option
|
||||
installed on your system. Requires the configure option
|
||||
@code{--enable-libcdio}.
|
||||
|
||||
This device allows playing and grabbing from an Audio-CD.
|
||||
|
||||
For example to copy with @command{ffmpeg} the entire Audio-CD in @file{/dev/sr0},
|
||||
For example to copy with @command{ffmpeg} the entire Audio-CD in /dev/sr0,
|
||||
you may run the command:
|
||||
@example
|
||||
ffmpeg -f libcdio -i /dev/sr0 cd.wav
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
@item speed
|
||||
Set drive reading speed. Default value is 0.
|
||||
|
||||
The speed is specified CD-ROM speed units. The speed is set through
|
||||
the libcdio @code{cdio_cddap_speed_set} function. On many CD-ROM
|
||||
drives, specifying a value too large will result in using the fastest
|
||||
speed.
|
||||
|
||||
@item paranoia_mode
|
||||
Set paranoia recovery mode flags. It accepts one of the following values:
|
||||
|
||||
@table @samp
|
||||
@item disable
|
||||
@item verify
|
||||
@item overlap
|
||||
@item neverskip
|
||||
@item full
|
||||
@end table
|
||||
|
||||
Default value is @samp{disable}.
|
||||
|
||||
For more information about the available recovery modes, consult the
|
||||
paranoia project documentation.
|
||||
@end table
|
||||
|
||||
@section libdc1394
|
||||
|
||||
IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
@@ -1003,12 +795,6 @@ conversion into the real time clock.
|
||||
Some usage examples of the video4linux2 device with @command{ffmpeg}
|
||||
and @command{ffplay}:
|
||||
@itemize
|
||||
@item
|
||||
List supported formats for a video4linux2 device:
|
||||
@example
|
||||
ffplay -f video4linux2 -list_formats all /dev/video0
|
||||
@end example
|
||||
|
||||
@item
|
||||
Grab and show the input of a video4linux2 device:
|
||||
@example
|
||||
@@ -1107,12 +893,8 @@ other filename will be interpreted as device number 0.
|
||||
|
||||
X11 video input device.
|
||||
|
||||
To enable this input device during configuration you need libxcb
|
||||
installed on your system. It will be automatically detected during
|
||||
configuration.
|
||||
|
||||
Alternatively, the configure option @option{--enable-x11grab} exists
|
||||
for legacy Xlib users.
|
||||
Depends on X11, Xext, and Xfixes. Requires the configure option
|
||||
@code{--enable-x11grab}.
|
||||
|
||||
This device allows one to capture a region of an X11 display.
|
||||
|
||||
@@ -1130,12 +912,10 @@ omitted, and defaults to "localhost". The environment variable
|
||||
area with respect to the top-left border of the X11 screen. They
|
||||
default to 0.
|
||||
|
||||
Check the X11 documentation (e.g. @command{man X}) for more detailed
|
||||
information.
|
||||
Check the X11 documentation (e.g. man X) for more detailed information.
|
||||
|
||||
Use the @command{xdpyinfo} program for getting basic information about
|
||||
the properties of your X11 display (e.g. grep for "name" or
|
||||
"dimensions").
|
||||
Use the @command{dpyinfo} program for getting basic information about the
|
||||
properties of your X11 display (e.g. grep for "name" or "dimensions").
|
||||
|
||||
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
||||
@example
|
||||
@@ -1184,10 +964,6 @@ If @var{show_region} is specified with @code{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it is easy to
|
||||
know what is being grabbed if only a portion of the screen is grabbed.
|
||||
|
||||
@item region_border
|
||||
Set the region border thickness if @option{-show_region 1} is used.
|
||||
Range is 1 to 128 and default is 3 (XCB-based x11grab only).
|
||||
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||
@@ -1203,19 +979,61 @@ Set the video frame size. Default value is @code{vga}.
|
||||
|
||||
@item use_shm
|
||||
Use the MIT-SHM extension for shared memory. Default value is @code{1}.
|
||||
It may be necessary to disable it for remote displays (legacy x11grab
|
||||
only).
|
||||
It may be necessary to disable it for remote displays.
|
||||
@end table
|
||||
|
||||
@subsection @var{grab_x} @var{grab_y} AVOption
|
||||
@section decklink
|
||||
|
||||
The syntax is:
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
DeckLink devices.
|
||||
|
||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format is always
|
||||
uyvy422, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels currently is limited to 2 (stereo).
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
-grab_x @var{x_offset} -grab_y @var{y_offset}
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
Set the grabbing region coordinates. They are expressed as offset from the top left
|
||||
corner of the X11 window. The default value is 0.
|
||||
@item
|
||||
List supported formats:
|
||||
@example
|
||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 (format 11):
|
||||
@example
|
||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavcodec Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavdevice Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavfilter Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavformat Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libavutil Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libswresample Documentation
|
||||
@titlepage
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Libswscale Documentation
|
||||
@titlepage
|
||||
|
@@ -12,10 +12,10 @@ A file consists of a header and a number of metadata tags divided into sections,
|
||||
each on its own line.
|
||||
|
||||
@item
|
||||
The header is a @samp{;FFMETADATA} string, followed by a version number (now 1).
|
||||
The header is a ';FFMETADATA' string, followed by a version number (now 1).
|
||||
|
||||
@item
|
||||
Metadata tags are of the form @samp{key=value}
|
||||
Metadata tags are of the form 'key=value'
|
||||
|
||||
@item
|
||||
Immediately after header follows global metadata
|
||||
@@ -26,30 +26,26 @@ metadata.
|
||||
|
||||
@item
|
||||
A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
|
||||
brackets (@samp{[}, @samp{]}) and ends with next section or end of file.
|
||||
brackets ('[', ']') and ends with next section or end of file.
|
||||
|
||||
@item
|
||||
At the beginning of a chapter section there may be an optional timebase to be
|
||||
used for start/end values. It must be in form
|
||||
@samp{TIMEBASE=@var{num}/@var{den}}, where @var{num} and @var{den} are
|
||||
integers. If the timebase is missing then start/end times are assumed to
|
||||
used for start/end values. It must be in form 'TIMEBASE=num/den', where num and
|
||||
den are integers. If the timebase is missing then start/end times are assumed to
|
||||
be in milliseconds.
|
||||
|
||||
Next a chapter section must contain chapter start and end times in form
|
||||
@samp{START=@var{num}}, @samp{END=@var{num}}, where @var{num} is a positive
|
||||
integer.
|
||||
'START=num', 'END=num', where num is a positive integer.
|
||||
|
||||
@item
|
||||
Empty lines and lines starting with @samp{;} or @samp{#} are ignored.
|
||||
Empty lines and lines starting with ';' or '#' are ignored.
|
||||
|
||||
@item
|
||||
Metadata keys or values containing special characters (@samp{=}, @samp{;},
|
||||
@samp{#}, @samp{\} and a newline) must be escaped with a backslash @samp{\}.
|
||||
Metadata keys or values containing special characters ('=', ';', '#', '\' and a
|
||||
newline) must be escaped with a backslash '\'.
|
||||
|
||||
@item
|
||||
Note that whitespace in metadata (e.g. @samp{foo = bar}) is considered to be
|
||||
a part of the tag (in the example above key is @samp{foo }, value is
|
||||
@samp{ bar}).
|
||||
Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
|
||||
the tag (in the example above key is 'foo ', value is ' bar').
|
||||
@end enumerate
|
||||
|
||||
A ffmetadata file might look like this:
|
||||
|
145
doc/muxers.texi
145
doc/muxers.texi
@@ -254,15 +254,6 @@ and it is not to be confused with the segment filename sequence number
|
||||
which can be cyclic, for example if the @option{wrap} option is
|
||||
specified.
|
||||
|
||||
@item hls_segment_filename @var{filename}
|
||||
Set the segment filename. Unless hls_flags single_file is set @var{filename}
|
||||
is used as a string format with the segment number:
|
||||
@example
|
||||
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
|
||||
@end example
|
||||
This example will produce the playlist, @file{out.m3u8}, and segment files:
|
||||
@file{file000.ts}, @file{file001.ts}, @file{file002.ts}, etc.
|
||||
|
||||
@item hls_flags single_file
|
||||
If this flag is set, the muxer will store all segments in a single MPEG-TS
|
||||
file, and will use byte ranges in the playlist. HLS playlists generated with
|
||||
@@ -273,10 +264,6 @@ ffmpeg -i in.nut -hls_flags single_file out.m3u8
|
||||
@end example
|
||||
Will produce the playlist, @file{out.m3u8}, and a single segment file,
|
||||
@file{out.ts}.
|
||||
|
||||
@item hls_flags delete_segments
|
||||
Segment files removed from the playlist are deleted after a period of time
|
||||
equal to the duration of the segment plus the duration of the playlist.
|
||||
@end table
|
||||
|
||||
@anchor{ico}
|
||||
@@ -381,7 +368,8 @@ ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
|
||||
|
||||
@table @option
|
||||
@item start_number
|
||||
Start the sequence from the specified number. Default value is 0.
|
||||
Start the sequence from the specified number. Default value is 1. Must
|
||||
be a non-negative number.
|
||||
|
||||
@item update
|
||||
If set to 1, the filename will always be interpreted as just a
|
||||
@@ -689,9 +677,6 @@ Set the transport_stream_id (default 0x0001). This identifies a
|
||||
transponder in DVB.
|
||||
@item -mpegts_service_id @var{number}
|
||||
Set the service_id (default 0x0001) also known as program in DVB.
|
||||
@item -mpegts_service_type @var{number}
|
||||
Set the program service_type (default @var{digital_tv}), see below
|
||||
a list of pre defined values.
|
||||
@item -mpegts_pmt_start_pid @var{number}
|
||||
Set the first PID for PMT (default 0x1000, max 0x1f00).
|
||||
@item -mpegts_start_pid @var{number}
|
||||
@@ -726,27 +711,6 @@ ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
|
||||
@end example
|
||||
@end table
|
||||
|
||||
Option mpegts_service_type accepts the following values:
|
||||
|
||||
@table @option
|
||||
@item hex_value
|
||||
Any hexdecimal value between 0x01 to 0xff as defined in ETSI 300 468.
|
||||
@item digital_tv
|
||||
Digital TV service.
|
||||
@item digital_radio
|
||||
Digital Radio service.
|
||||
@item teletext
|
||||
Teletext service.
|
||||
@item advanced_codec_digital_radio
|
||||
Advanced Codec Digital Radio service.
|
||||
@item mpeg2_digital_hdtv
|
||||
MPEG2 Digital HDTV service.
|
||||
@item advanced_codec_digital_sdtv
|
||||
Advanced Codec Digital SDTV service.
|
||||
@item advanced_codec_digital_hdtv
|
||||
Advanced Codec Digital HDTV service.
|
||||
@end table
|
||||
|
||||
Option mpegts_flags may take a set of such flags:
|
||||
|
||||
@table @option
|
||||
@@ -804,7 +768,7 @@ Change the syncpoint usage in nut:
|
||||
sensitive and seeking is not possible. Also in general the overhead from
|
||||
syncpoints is negligible. Note, -@code{write_index} 0 can be used to disable
|
||||
all growing data tables, allowing to mux endless streams with limited memory
|
||||
and without these disadvantages.
|
||||
and wihout these disadvantages.
|
||||
@item @var{timestamped} extend the syncpoint with a wallclock field.
|
||||
@end table
|
||||
The @var{none} and @var{timestamped} flags are experimental.
|
||||
@@ -829,11 +793,6 @@ is 1 second. A value of 0 will fill all segments, making pages as large as
|
||||
possible. A value of 1 will effectively use 1 packet-per-page in most
|
||||
situations, giving a small seek granularity at the cost of additional container
|
||||
overhead.
|
||||
@item -serial_offset @var{value}
|
||||
Serial value from which to set the streams serial number.
|
||||
Setting it to different and sufficiently large values ensures that the produced
|
||||
ogg files can be safely chained.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{segment}
|
||||
@@ -842,9 +801,8 @@ ogg files can be safely chained.
|
||||
Basic stream segmenter.
|
||||
|
||||
This muxer outputs streams to a number of separate files of nearly
|
||||
fixed duration. Output filename pattern can be set in a fashion
|
||||
similar to @ref{image2}, or by using a @code{strftime} template if
|
||||
the @option{strftime} option is enabled.
|
||||
fixed duration. Output filename pattern can be set in a fashion similar to
|
||||
@ref{image2}.
|
||||
|
||||
@code{stream_segment} is a variant of the muxer used to write to
|
||||
streaming output formats, i.e. which do not require global headers,
|
||||
@@ -1024,18 +982,6 @@ Wrap around segment index once it reaches @var{limit}.
|
||||
@item segment_start_number @var{number}
|
||||
Set the sequence number of the first segment. Defaults to @code{0}.
|
||||
|
||||
@item strftime @var{1|0}
|
||||
Use the @code{strftime} function to define the name of the new
|
||||
segments to write. If this is selected, the output segment name must
|
||||
contain a @code{strftime} function template. Default value is
|
||||
@code{0}.
|
||||
|
||||
@item break_non_keyframes @var{1|0}
|
||||
If enabled, allow segments to start on frames other than keyframes. This
|
||||
improves behavior on some players when the time between keyframes is
|
||||
inconsistent, but may make things worse on others, and can cause some oddities
|
||||
during seeking. Defaults to @code{0}.
|
||||
|
||||
@item reset_timestamps @var{1|0}
|
||||
Reset timestamps at the begin of each segment, so that each segment
|
||||
will start with near-zero timestamps. It is meant to ease the playback
|
||||
@@ -1216,17 +1162,7 @@ is the @option{global_header} flag.
|
||||
|
||||
WebM DASH Manifest muxer.
|
||||
|
||||
This muxer implements the WebM DASH Manifest specification to generate the DASH
|
||||
manifest XML. It also supports manifest generation for DASH live streams.
|
||||
|
||||
For more information see:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
WebM DASH Specification: @url{https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification}
|
||||
@item
|
||||
ISO DASH Specification: @url{http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip}
|
||||
@end itemize
|
||||
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@@ -1237,32 +1173,6 @@ This muxer supports the following options:
|
||||
This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
|
||||
unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
|
||||
audio and video streams. Any number of adaptation sets can be added using this option.
|
||||
|
||||
@item live
|
||||
Set this to 1 to create a live stream DASH Manifest. Default: 0.
|
||||
|
||||
@item chunk_start_index
|
||||
Start index of the first chunk. This will go in the @samp{startNumber} attribute
|
||||
of the @samp{SegmentTemplate} element in the manifest. Default: 0.
|
||||
|
||||
@item chunk_duration_ms
|
||||
Duration of each chunk in milliseconds. This will go in the @samp{duration}
|
||||
attribute of the @samp{SegmentTemplate} element in the manifest. Default: 1000.
|
||||
|
||||
@item utc_timing_url
|
||||
URL of the page that will return the UTC timestamp in ISO format. This will go
|
||||
in the @samp{value} attribute of the @samp{UTCTiming} element in the manifest.
|
||||
Default: None.
|
||||
|
||||
@item time_shift_buffer_depth
|
||||
Smallest time (in seconds) shifting buffer for which any Representation is
|
||||
guaranteed to be available. This will go in the @samp{timeShiftBufferDepth}
|
||||
attribute of the @samp{MPD} element. Default: 60.
|
||||
|
||||
@item minimum_update_period
|
||||
Minimum update period (in seconds) of the manifest. This will go in the
|
||||
@samp{minimumUpdatePeriod} attribute of the @samp{MPD} element. Default: 0.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@@ -1278,47 +1188,4 @@ ffmpeg -f webm_dash_manifest -i video1.webm \
|
||||
manifest.xml
|
||||
@end example
|
||||
|
||||
@section webm_chunk
|
||||
|
||||
WebM Live Chunk Muxer.
|
||||
|
||||
This muxer writes out WebM headers and chunks as separate files which can be
|
||||
consumed by clients that support WebM Live streams via DASH.
|
||||
|
||||
@subsection Options
|
||||
|
||||
This muxer supports the following options:
|
||||
|
||||
@table @option
|
||||
@item chunk_start_index
|
||||
Index of the first chunk (defaults to 0).
|
||||
|
||||
@item header
|
||||
Filename of the header where the initialization data will be written.
|
||||
|
||||
@item audio_chunk_duration
|
||||
Duration of each audio chunk in milliseconds (defaults to 5000).
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@example
|
||||
ffmpeg -f v4l2 -i /dev/video0 \
|
||||
-f alsa -i hw:0 \
|
||||
-map 0:0 \
|
||||
-c:v libvpx-vp9 \
|
||||
-s 640x360 -keyint_min 30 -g 30 \
|
||||
-f webm_chunk \
|
||||
-header webm_live_video_360.hdr \
|
||||
-chunk_start_index 1 \
|
||||
webm_live_video_360_%d.chk \
|
||||
-map 1:0 \
|
||||
-c:a libvorbis \
|
||||
-b:a 128k \
|
||||
-f webm_chunk \
|
||||
-header webm_live_audio_128.hdr \
|
||||
-chunk_start_index 1 \
|
||||
-audio_chunk_duration 1000 \
|
||||
webm_live_audio_128_%d.chk
|
||||
@end example
|
||||
|
||||
@c man end MUXERS
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle NUT
|
||||
|
||||
|
@@ -191,11 +191,6 @@ __asm__() block.
|
||||
Use external asm (nasm/yasm) or inline asm (__asm__()), do not use intrinsics.
|
||||
The latter requires a good optimizing compiler which gcc is not.
|
||||
|
||||
When debugging a x86 external asm compilation issue, if lost in the macro
|
||||
expansions, add DBG=1 to your make command-line: the input file will be
|
||||
preprocessed, stripped of the debug/empty lines, then compiled, showing the
|
||||
actual lines causing issues.
|
||||
|
||||
Inline asm vs. external asm
|
||||
---------------------------
|
||||
Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc)
|
||||
|
@@ -1,5 +1,4 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Platform Specific Information
|
||||
@titlepage
|
||||
@@ -97,9 +96,9 @@ the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}.
|
||||
|
||||
@section Native Windows compilation using MinGW or MinGW-w64
|
||||
|
||||
FFmpeg can be built to run natively on Windows using the MinGW-w64
|
||||
toolchain. Install the latest versions of MSYS2 and MinGW-w64 from
|
||||
@url{http://msys2.github.io/} and/or @url{http://mingw-w64.sourceforge.net/}.
|
||||
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
|
||||
toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
|
||||
@url{http://www.mingw.org/} or @url{http://mingw-w64.sourceforge.net/}.
|
||||
You can find detailed installation instructions in the download section and
|
||||
the FAQ.
|
||||
|
||||
@@ -107,7 +106,7 @@ Notes:
|
||||
|
||||
@itemize
|
||||
|
||||
@item Building natively using MSYS2 can be sped up by disabling implicit rules
|
||||
@item Building natively using MSYS can be sped up by disabling implicit rules
|
||||
in the Makefile by calling @code{make -r} instead of plain @code{make}. This
|
||||
speed up is close to non-existent for normal one-off builds and is only
|
||||
noticeable when running make for a second time (for example during
|
||||
@@ -134,12 +133,13 @@ You will need the following prerequisites:
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://msys2.github.io/, MSYS2}
|
||||
@item @uref{http://www.mingw.org/, MSYS}
|
||||
@item @uref{http://yasm.tortall.net/, YASM}
|
||||
(Also available via MSYS2's package manager.)
|
||||
@item @uref{http://gnuwin32.sourceforge.net/packages/bc.htm, bc for Windows} if
|
||||
you want to run @uref{fate.html, FATE}.
|
||||
@end itemize
|
||||
|
||||
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
|
||||
To set up a proper environment in MSYS, you need to run @code{msys.bat} from
|
||||
the Visual Studio or Intel Compiler command prompt.
|
||||
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
|
||||
@@ -283,7 +283,7 @@ binutils, gcc4-core, make, git, mingw-runtime, texinfo
|
||||
|
||||
In order to run FATE you will also need the following "Utils" packages:
|
||||
@example
|
||||
diffutils
|
||||
bc, diffutils
|
||||
@end example
|
||||
|
||||
If you want to build FFmpeg with additional libraries, download Cygwin
|
||||
|
@@ -63,7 +63,7 @@ cache:@var{URL}
|
||||
|
||||
Physical concatenation protocol.
|
||||
|
||||
Read and seek from many resources in sequence as if they were
|
||||
Allow to read and seek from many resource in sequence as if they were
|
||||
a unique resource.
|
||||
|
||||
A URL accepted by this protocol has the syntax:
|
||||
@@ -117,7 +117,7 @@ ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP/////////////
|
||||
|
||||
File access protocol.
|
||||
|
||||
Read from or write to a file.
|
||||
Allow to read from or write to a file.
|
||||
|
||||
A file URL can have the form:
|
||||
@example
|
||||
@@ -155,7 +155,7 @@ time, which is valuable for files on slow medium.
|
||||
|
||||
FTP (File Transfer Protocol).
|
||||
|
||||
Read from or write to remote resources using FTP protocol.
|
||||
Allow to read from or write to remote resources using FTP protocol.
|
||||
|
||||
Following syntax is required.
|
||||
@example
|
||||
@@ -277,41 +277,6 @@ Set initial byte offset.
|
||||
|
||||
@item end_offset
|
||||
Try to limit the request to bytes preceding this offset.
|
||||
|
||||
@item method
|
||||
When used as a client option it sets the HTTP method for the request.
|
||||
|
||||
When used as a server option it sets the HTTP method that is going to be
|
||||
expected from the client(s).
|
||||
If the expected and the received HTTP method do not match the client will
|
||||
be given a Bad Request response.
|
||||
When unset the HTTP method is not checked for now. This will be replaced by
|
||||
autodetection in the future.
|
||||
|
||||
@item listen
|
||||
If set to 1 enables experimental HTTP server. This can be used to send data when
|
||||
used as an output option, or read data from a client with HTTP POST when used as
|
||||
an input option.
|
||||
@example
|
||||
# Server side (sending):
|
||||
ffmpeg -i somefile.ogg -c copy -listen 1 -f ogg http://@var{server}:@var{port}
|
||||
|
||||
# Client side (receiving):
|
||||
ffmpeg -i http://@var{server}:@var{port} -c copy somefile.ogg
|
||||
|
||||
# Client can also be done with wget:
|
||||
wget http://@var{server}:@var{port} -O somefile.ogg
|
||||
|
||||
# Server side (receiving):
|
||||
ffmpeg -listen 1 -i http://@var{server}:@var{port} -c copy somefile.ogg
|
||||
|
||||
# Client side (sending):
|
||||
ffmpeg -i somefile.ogg -chunked_post 0 -c copy -f ogg http://@var{server}:@var{port}
|
||||
|
||||
# Client can also be done with wget:
|
||||
wget --post-file=somefile.ogg http://@var{server}:@var{port}
|
||||
@end example
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HTTP Cookies
|
||||
@@ -409,7 +374,7 @@ be seekable, so they will fail with the MD5 output protocol.
|
||||
|
||||
UNIX pipe access protocol.
|
||||
|
||||
Read and write from UNIX pipes.
|
||||
Allow to read and write from UNIX pipes.
|
||||
|
||||
The accepted syntax is:
|
||||
@example
|
||||
@@ -649,7 +614,7 @@ For more information see: @url{http://www.samba.org/}.
|
||||
|
||||
Secure File Transfer Protocol via libssh
|
||||
|
||||
Read from or write to remote resources using SFTP protocol.
|
||||
Allow to read from or write to remote resources using SFTP protocol.
|
||||
|
||||
Following syntax is required.
|
||||
|
||||
@@ -1090,9 +1055,7 @@ subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
|
||||
@end example
|
||||
|
||||
Play an AVI file directly from a TAR archive:
|
||||
@example
|
||||
subfile,,start,183241728,end,366490624,,:archive.tar
|
||||
@end example
|
||||
|
||||
@section tcp
|
||||
|
||||
@@ -1118,8 +1081,8 @@ Set raise error timeout, expressed in microseconds.
|
||||
This option is only relevant in read mode: if no data arrived in more
|
||||
than this time interval, raise error.
|
||||
|
||||
@item listen_timeout=@var{milliseconds}
|
||||
Set listen timeout, expressed in milliseconds.
|
||||
@item listen_timeout=@var{microseconds}
|
||||
Set listen timeout, expressed in microseconds.
|
||||
@end table
|
||||
|
||||
The following example shows how to setup a listening TCP connection
|
||||
|
122
doc/t2h.pm
122
doc/t2h.pm
@@ -23,108 +23,8 @@
|
||||
# no navigation elements
|
||||
set_from_init_file('HEADERS', 0);
|
||||
|
||||
sub ffmpeg_heading_command($$$$$)
|
||||
{
|
||||
my $self = shift;
|
||||
my $cmdname = shift;
|
||||
my $command = shift;
|
||||
my $args = shift;
|
||||
my $content = shift;
|
||||
|
||||
my $result = '';
|
||||
|
||||
# not clear that it may really happen
|
||||
if ($self->in_string) {
|
||||
$result .= $self->command_string($command) ."\n" if ($cmdname ne 'node');
|
||||
$result .= $content if (defined($content));
|
||||
return $result;
|
||||
}
|
||||
|
||||
my $element_id = $self->command_id($command);
|
||||
$result .= "<a name=\"$element_id\"></a>\n"
|
||||
if (defined($element_id) and $element_id ne '');
|
||||
|
||||
print STDERR "Process $command "
|
||||
.Texinfo::Structuring::_print_root_command_texi($command)."\n"
|
||||
if ($self->get_conf('DEBUG'));
|
||||
my $element;
|
||||
if ($Texinfo::Common::root_commands{$command->{'cmdname'}}
|
||||
and $command->{'parent'}
|
||||
and $command->{'parent'}->{'type'}
|
||||
and $command->{'parent'}->{'type'} eq 'element') {
|
||||
$element = $command->{'parent'};
|
||||
}
|
||||
if ($element) {
|
||||
$result .= &{$self->{'format_element_header'}}($self, $cmdname,
|
||||
$command, $element);
|
||||
}
|
||||
|
||||
my $heading_level;
|
||||
# node is used as heading if there is nothing else.
|
||||
if ($cmdname eq 'node') {
|
||||
if (!$element or (!$element->{'extra'}->{'section'}
|
||||
and $element->{'extra'}->{'node'}
|
||||
and $element->{'extra'}->{'node'} eq $command
|
||||
# bogus node may not have been normalized
|
||||
and defined($command->{'extra'}->{'normalized'}))) {
|
||||
if ($command->{'extra'}->{'normalized'} eq 'Top') {
|
||||
$heading_level = 0;
|
||||
} else {
|
||||
$heading_level = 3;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
$heading_level = $command->{'level'};
|
||||
}
|
||||
|
||||
my $heading = $self->command_text($command);
|
||||
# $heading not defined may happen if the command is a @node, for example
|
||||
# if there is an error in the node.
|
||||
if (defined($heading) and $heading ne '' and defined($heading_level)) {
|
||||
|
||||
if ($Texinfo::Common::root_commands{$cmdname}
|
||||
and $Texinfo::Common::sectioning_commands{$cmdname}) {
|
||||
my $content_href = $self->command_contents_href($command, 'contents',
|
||||
$self->{'current_filename'});
|
||||
if ($content_href) {
|
||||
my $this_href = $content_href =~ s/^\#toc-/\#/r;
|
||||
$heading .= '<span class="pull-right">'.
|
||||
'<a class="anchor hidden-xs" '.
|
||||
"href=\"$this_href\" aria-hidden=\"true\">".
|
||||
($ENV{"FA_ICONS"} ? '<i class="fa fa-link"></i>'
|
||||
: '#').
|
||||
'</a> '.
|
||||
'<a class="anchor hidden-xs"'.
|
||||
"href=\"$content_href\" aria-hidden=\"true\">".
|
||||
($ENV{"FA_ICONS"} ? '<i class="fa fa-navicon"></i>'
|
||||
: 'TOC').
|
||||
'</a>'.
|
||||
'</span>';
|
||||
}
|
||||
}
|
||||
|
||||
if ($self->in_preformatted()) {
|
||||
$result .= $heading."\n";
|
||||
} else {
|
||||
# if the level was changed, set the command name right
|
||||
if ($cmdname ne 'node'
|
||||
and $heading_level ne $Texinfo::Common::command_structuring_level{$cmdname}) {
|
||||
$cmdname
|
||||
= $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level];
|
||||
}
|
||||
$result .= &{$self->{'format_heading_text'}}(
|
||||
$self, $cmdname, $heading,
|
||||
$heading_level +
|
||||
$self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command);
|
||||
}
|
||||
}
|
||||
$result .= $content if (defined($content));
|
||||
return $result;
|
||||
}
|
||||
|
||||
foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
|
||||
texinfo_register_command_formatting($command, \&ffmpeg_heading_command);
|
||||
}
|
||||
# TOC and Chapter headings link
|
||||
set_from_init_file('TOC_LINKS', 1);
|
||||
|
||||
# print the TOC where @contents is used
|
||||
set_from_init_file('INLINE_CONTENTS', 1);
|
||||
@@ -169,7 +69,6 @@ EOT
|
||||
|
||||
my $head2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
|
||||
</title>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
||||
<link rel="stylesheet" type="text/css" href="bootstrap.min.css">
|
||||
<link rel="stylesheet" type="text/css" href="style.min.css">
|
||||
</head>
|
||||
@@ -186,23 +85,6 @@ EOT
|
||||
}
|
||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||
|
||||
sub ffmpeg_program_string($)
|
||||
{
|
||||
my $self = shift;
|
||||
if (defined($self->get_conf('PROGRAM'))
|
||||
and $self->get_conf('PROGRAM') ne ''
|
||||
and defined($self->get_conf('PACKAGE_URL'))) {
|
||||
return $self->convert_tree(
|
||||
$self->gdt('This document was generated using @uref{{program_homepage}, @emph{{program}}}.',
|
||||
{ 'program_homepage' => $self->get_conf('PACKAGE_URL'),
|
||||
'program' => $self->get_conf('PROGRAM') }));
|
||||
} else {
|
||||
return $self->convert_tree(
|
||||
$self->gdt('This document was generated automatically.'));
|
||||
}
|
||||
}
|
||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
||||
|
||||
# Customized file ending
|
||||
sub ffmpeg_end_file($)
|
||||
{
|
||||
|
@@ -166,7 +166,7 @@ INF: while(<$inf>) {
|
||||
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex|ifhtml|ifnothtml)$/) {
|
||||
$skipping = pop @skstack;
|
||||
next;
|
||||
} elsif ($ended =~ /^(?:example|smallexample|verbatim|display)$/) {
|
||||
} elsif ($ended =~ /^(?:example|smallexample|display)$/) {
|
||||
$shift = "";
|
||||
$_ = ""; # need a paragraph break
|
||||
} elsif ($ended =~ /^(?:itemize|enumerate|(?:multi|[fv])?table)$/) {
|
||||
@@ -290,7 +290,7 @@ INF: while(<$inf>) {
|
||||
$_ = "\n=over 4\n";
|
||||
};
|
||||
|
||||
/^\@((?:small)?example|verbatim|display)/ and do {
|
||||
/^\@((?:small)?example|display)/ and do {
|
||||
push @endwstack, $endw;
|
||||
$endw = $1;
|
||||
$shift = "\t";
|
||||
|
@@ -12,17 +12,17 @@ explicitly specified. The following rules are applied:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@samp{'} and @samp{\} are special characters (respectively used for
|
||||
@code{'} and @code{\} are special characters (respectively used for
|
||||
quoting and escaping). In addition to them, there might be other
|
||||
special characters depending on the specific syntax where the escaping
|
||||
and quoting are employed.
|
||||
|
||||
@item
|
||||
A special character is escaped by prefixing it with a @samp{\}.
|
||||
A special character is escaped by prefixing it with a '\'.
|
||||
|
||||
@item
|
||||
All characters enclosed between @samp{''} are included literally in the
|
||||
parsed string. The quote character @samp{'} itself cannot be quoted,
|
||||
All characters enclosed between '' are included literally in the
|
||||
parsed string. The quote character @code{'} itself cannot be quoted,
|
||||
so you may need to close the quote and escape it.
|
||||
|
||||
@item
|
||||
@@ -71,7 +71,7 @@ Escaping and quoting can be mixed together:
|
||||
@end example
|
||||
|
||||
@item
|
||||
To include a literal @samp{\} you can use either escaping or quoting:
|
||||
To include a literal @code{\} you can use either escaping or quoting:
|
||||
@example
|
||||
'c:\foo' can be written as c:\\foo
|
||||
@end example
|
||||
@@ -844,7 +844,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||
|
||||
@item ld(var)
|
||||
Load the value of the internal variable with number
|
||||
Allow to load the value of the internal variable with number
|
||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||
The function returns the loaded value.
|
||||
|
||||
@@ -861,7 +861,7 @@ Return 1 if @var{x} is lesser than or equal to @var{y}, 0 otherwise.
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
|
||||
@item min(x, y)
|
||||
Return the minimum between @var{x} and @var{y}.
|
||||
Return the maximum between @var{x} and @var{y}.
|
||||
|
||||
@item mod(x, y)
|
||||
Compute the remainder of division of @var{x} by @var{y}.
|
||||
@@ -912,7 +912,7 @@ Compute the square root of @var{expr}. This is equivalent to
|
||||
Compute expression @code{1/(1 + exp(4*x))}.
|
||||
|
||||
@item st(var, expr)
|
||||
Store the value of the expression @var{expr} in an internal
|
||||
Allow to store the value of the expression @var{expr} in an internal
|
||||
variable. @var{var} specifies the number of the variable where to
|
||||
store the value, and it is a value ranging from 0 to 9. The function
|
||||
returns the value stored in the internal variable.
|
||||
|
472
ffmpeg.c
472
ffmpeg.c
@@ -60,9 +60,10 @@
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/threadmessage.h"
|
||||
#include "libavcodec/mathops.h"
|
||||
#include "libavformat/os_support.h"
|
||||
|
||||
#include "libavformat/ffm.h" // not public API
|
||||
|
||||
# include "libavfilter/avcodec.h"
|
||||
# include "libavfilter/avfilter.h"
|
||||
# include "libavfilter/buffersrc.h"
|
||||
@@ -154,9 +155,8 @@ static struct termios oldtty;
|
||||
static int restore_tty;
|
||||
#endif
|
||||
|
||||
#if HAVE_PTHREADS
|
||||
static void free_input_threads(void);
|
||||
#endif
|
||||
|
||||
|
||||
/* sub2video hack:
|
||||
Convert subtitles to video with alpha to insert them in filter graphs.
|
||||
@@ -352,6 +352,7 @@ void term_init(void)
|
||||
signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
|
||||
}
|
||||
#endif
|
||||
avformat_network_deinit();
|
||||
|
||||
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
|
||||
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
|
||||
@@ -456,12 +457,9 @@ static void ffmpeg_cleanup(int ret)
|
||||
/* close files */
|
||||
for (i = 0; i < nb_output_files; i++) {
|
||||
OutputFile *of = output_files[i];
|
||||
AVFormatContext *s;
|
||||
if (!of)
|
||||
continue;
|
||||
s = of->ctx;
|
||||
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&s->pb);
|
||||
AVFormatContext *s = of->ctx;
|
||||
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
|
||||
avio_close(s->pb);
|
||||
avformat_free_context(s);
|
||||
av_dict_free(&of->opts);
|
||||
|
||||
@@ -469,12 +467,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
}
|
||||
for (i = 0; i < nb_output_streams; i++) {
|
||||
OutputStream *ost = output_streams[i];
|
||||
AVBitStreamFilterContext *bsfc;
|
||||
|
||||
if (!ost)
|
||||
continue;
|
||||
|
||||
bsfc = ost->bitstream_filters;
|
||||
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
|
||||
while (bsfc) {
|
||||
AVBitStreamFilterContext *next = bsfc->next;
|
||||
av_bitstream_filter_close(bsfc);
|
||||
@@ -523,7 +516,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
|
||||
if (vstats_file)
|
||||
fclose(vstats_file);
|
||||
av_freep(&vstats_filename);
|
||||
av_free(vstats_filename);
|
||||
|
||||
av_freep(&input_streams);
|
||||
av_freep(&input_files);
|
||||
@@ -595,7 +588,7 @@ static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream,
|
||||
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
{
|
||||
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
|
||||
AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
|
||||
AVCodecContext *avctx = ost->st->codec;
|
||||
int ret;
|
||||
|
||||
if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
|
||||
@@ -658,7 +651,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
||||
if (!new_pkt.buf)
|
||||
exit_program(1);
|
||||
} else if (a < 0) {
|
||||
new_pkt = *pkt;
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
|
||||
bsfc->filter->name, pkt->stream_index,
|
||||
avctx->codec ? avctx->codec->name : "copy");
|
||||
@@ -827,10 +819,6 @@ static void do_subtitle_out(AVFormatContext *s,
|
||||
|
||||
if (!subtitle_out) {
|
||||
subtitle_out = av_malloc(subtitle_out_max_size);
|
||||
if (!subtitle_out) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Note: DVB subtitle need one packet to draw them and one other
|
||||
@@ -891,29 +879,23 @@ static void do_subtitle_out(AVFormatContext *s,
|
||||
|
||||
static void do_video_out(AVFormatContext *s,
|
||||
OutputStream *ost,
|
||||
AVFrame *next_picture,
|
||||
double sync_ipts)
|
||||
AVFrame *next_picture)
|
||||
{
|
||||
int ret, format_video_sync;
|
||||
AVPacket pkt;
|
||||
AVCodecContext *enc = ost->enc_ctx;
|
||||
AVCodecContext *mux_enc = ost->st->codec;
|
||||
int nb_frames, nb0_frames, i;
|
||||
double delta, delta0;
|
||||
double sync_ipts, delta, delta0;
|
||||
double duration = 0;
|
||||
int frame_size = 0;
|
||||
InputStream *ist = NULL;
|
||||
AVFilterContext *filter = ost->filter->filter;
|
||||
|
||||
if (ost->source_index >= 0)
|
||||
ist = input_streams[ost->source_index];
|
||||
|
||||
if (filter->inputs[0]->frame_rate.num > 0 &&
|
||||
filter->inputs[0]->frame_rate.den > 0)
|
||||
duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
|
||||
|
||||
if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
|
||||
duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
|
||||
duration = 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base));
|
||||
|
||||
if (!ost->filters_script &&
|
||||
!ost->filters &&
|
||||
@@ -923,93 +905,76 @@ static void do_video_out(AVFormatContext *s,
|
||||
duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
|
||||
}
|
||||
|
||||
if (!next_picture) {
|
||||
//end, flushing
|
||||
nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
|
||||
ost->last_nb0_frames[1],
|
||||
ost->last_nb0_frames[2]);
|
||||
} else {
|
||||
delta0 = sync_ipts - ost->sync_opts;
|
||||
delta = delta0 + duration;
|
||||
sync_ipts = next_picture->pts;
|
||||
delta0 = sync_ipts - ost->sync_opts;
|
||||
delta = delta0 + duration;
|
||||
|
||||
/* by default, we output a single frame */
|
||||
nb0_frames = 0;
|
||||
nb_frames = 1;
|
||||
/* by default, we output a single frame */
|
||||
nb0_frames = 0;
|
||||
nb_frames = 1;
|
||||
|
||||
format_video_sync = video_sync_method;
|
||||
if (format_video_sync == VSYNC_AUTO) {
|
||||
if(!strcmp(s->oformat->name, "avi")) {
|
||||
format_video_sync = VSYNC_VFR;
|
||||
} else
|
||||
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
|
||||
if ( ist
|
||||
&& format_video_sync == VSYNC_CFR
|
||||
&& input_files[ist->file_index]->ctx->nb_streams == 1
|
||||
&& input_files[ist->file_index]->input_ts_offset == 0) {
|
||||
format_video_sync = VSYNC_VSCFR;
|
||||
}
|
||||
if (format_video_sync == VSYNC_CFR && copy_ts) {
|
||||
format_video_sync = VSYNC_VSCFR;
|
||||
}
|
||||
format_video_sync = video_sync_method;
|
||||
if (format_video_sync == VSYNC_AUTO) {
|
||||
if(!strcmp(s->oformat->name, "avi")) {
|
||||
format_video_sync = VSYNC_VFR;
|
||||
} else
|
||||
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
|
||||
if ( ist
|
||||
&& format_video_sync == VSYNC_CFR
|
||||
&& input_files[ist->file_index]->ctx->nb_streams == 1
|
||||
&& input_files[ist->file_index]->input_ts_offset == 0) {
|
||||
format_video_sync = VSYNC_VSCFR;
|
||||
}
|
||||
|
||||
if (delta0 < 0 &&
|
||||
delta > 0 &&
|
||||
format_video_sync != VSYNC_PASSTHROUGH &&
|
||||
format_video_sync != VSYNC_DROP) {
|
||||
double cor = FFMIN(-delta0, duration);
|
||||
if (delta0 < -0.6) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
|
||||
} else
|
||||
av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
|
||||
sync_ipts += cor;
|
||||
duration -= cor;
|
||||
delta0 += cor;
|
||||
if (format_video_sync == VSYNC_CFR && copy_ts) {
|
||||
format_video_sync = VSYNC_VSCFR;
|
||||
}
|
||||
}
|
||||
|
||||
switch (format_video_sync) {
|
||||
case VSYNC_VSCFR:
|
||||
if (ost->frame_number == 0 && delta - duration >= 0.5) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
|
||||
delta = duration;
|
||||
delta0 = 0;
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
}
|
||||
case VSYNC_CFR:
|
||||
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
||||
if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
|
||||
nb_frames = 0;
|
||||
} else if (delta < -1.1)
|
||||
nb_frames = 0;
|
||||
else if (delta > 1.1) {
|
||||
nb_frames = lrintf(delta);
|
||||
if (delta0 > 1.1)
|
||||
nb0_frames = lrintf(delta0 - 0.6);
|
||||
}
|
||||
break;
|
||||
case VSYNC_VFR:
|
||||
if (delta <= -0.6)
|
||||
nb_frames = 0;
|
||||
else if (delta > 0.6)
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
break;
|
||||
case VSYNC_DROP:
|
||||
case VSYNC_PASSTHROUGH:
|
||||
if (delta0 < 0 &&
|
||||
delta > 0 &&
|
||||
format_video_sync != VSYNC_PASSTHROUGH &&
|
||||
format_video_sync != VSYNC_DROP) {
|
||||
double cor = FFMIN(-delta0, duration);
|
||||
av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
|
||||
sync_ipts += cor;
|
||||
duration -= cor;
|
||||
delta0 += cor;
|
||||
}
|
||||
|
||||
switch (format_video_sync) {
|
||||
case VSYNC_VSCFR:
|
||||
if (ost->frame_number == 0 && delta - duration >= 0.5) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
|
||||
delta = duration;
|
||||
delta0 = 0;
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
break;
|
||||
default:
|
||||
av_assert0(0);
|
||||
}
|
||||
case VSYNC_CFR:
|
||||
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
|
||||
if (delta < -1.1)
|
||||
nb_frames = 0;
|
||||
else if (delta > 1.1) {
|
||||
nb_frames = lrintf(delta);
|
||||
if (delta0 > 1.1)
|
||||
nb0_frames = lrintf(delta0 - 0.6);
|
||||
}
|
||||
break;
|
||||
case VSYNC_VFR:
|
||||
if (delta <= -0.6)
|
||||
nb_frames = 0;
|
||||
else if (delta > 0.6)
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
break;
|
||||
case VSYNC_DROP:
|
||||
case VSYNC_PASSTHROUGH:
|
||||
ost->sync_opts = lrint(sync_ipts);
|
||||
break;
|
||||
default:
|
||||
av_assert0(0);
|
||||
}
|
||||
|
||||
nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
|
||||
nb0_frames = FFMIN(nb0_frames, nb_frames);
|
||||
|
||||
memmove(ost->last_nb0_frames + 1,
|
||||
ost->last_nb0_frames,
|
||||
sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
|
||||
ost->last_nb0_frames[0] = nb0_frames;
|
||||
|
||||
if (nb0_frames == 0 && ost->last_droped) {
|
||||
nb_frames_drop++;
|
||||
av_log(NULL, AV_LOG_VERBOSE,
|
||||
@@ -1025,7 +990,7 @@ static void do_video_out(AVFormatContext *s,
|
||||
nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
|
||||
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
|
||||
}
|
||||
ost->last_droped = nb_frames == nb0_frames && next_picture;
|
||||
ost->last_droped = nb_frames == nb0_frames;
|
||||
|
||||
/* duplicates frame if needed */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
@@ -1039,9 +1004,6 @@ static void do_video_out(AVFormatContext *s,
|
||||
} else
|
||||
in_picture = next_picture;
|
||||
|
||||
if (!in_picture)
|
||||
return;
|
||||
|
||||
in_picture->pts = ost->sync_opts;
|
||||
|
||||
#if 1
|
||||
@@ -1113,10 +1075,6 @@ static void do_video_out(AVFormatContext *s,
|
||||
}
|
||||
|
||||
ost->forced_keyframes_expr_const_values[FKF_N] += 1;
|
||||
} else if ( ost->forced_keyframes
|
||||
&& !strncmp(ost->forced_keyframes, "source", 6)
|
||||
&& in_picture->key_frame==1) {
|
||||
forced_keyframe = 1;
|
||||
}
|
||||
|
||||
if (forced_keyframe) {
|
||||
@@ -1185,10 +1143,7 @@ static void do_video_out(AVFormatContext *s,
|
||||
if (!ost->last_frame)
|
||||
ost->last_frame = av_frame_alloc();
|
||||
av_frame_unref(ost->last_frame);
|
||||
if (next_picture && ost->last_frame)
|
||||
av_frame_ref(ost->last_frame, next_picture);
|
||||
else
|
||||
av_frame_free(&ost->last_frame);
|
||||
av_frame_ref(ost->last_frame, next_picture);
|
||||
}
|
||||
|
||||
static double psnr(double d)
|
||||
@@ -1214,8 +1169,8 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
||||
enc = ost->enc_ctx;
|
||||
if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
frame_number = ost->st->nb_frames;
|
||||
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
|
||||
if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
|
||||
fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
|
||||
if (enc->flags&CODEC_FLAG_PSNR)
|
||||
fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
|
||||
|
||||
fprintf(vstats_file,"f_size= %6d ", frame_size);
|
||||
@@ -1228,7 +1183,7 @@ static void do_video_stats(OutputStream *ost, int frame_size)
|
||||
avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
|
||||
fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
||||
(double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
|
||||
fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
|
||||
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1251,10 +1206,11 @@ static void finish_output_stream(OutputStream *ost)
|
||||
*
|
||||
* @return 0 for success, <0 for severe errors
|
||||
*/
|
||||
static int reap_filters(int flush)
|
||||
static int reap_filters(void)
|
||||
{
|
||||
AVFrame *filtered_frame = NULL;
|
||||
int i;
|
||||
int64_t frame_pts;
|
||||
|
||||
/* Reap all buffers present in the buffer sinks */
|
||||
for (i = 0; i < nb_output_streams; i++) {
|
||||
@@ -1274,16 +1230,12 @@ static int reap_filters(int flush)
|
||||
filtered_frame = ost->filtered_frame;
|
||||
|
||||
while (1) {
|
||||
double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
|
||||
ret = av_buffersink_get_frame_flags(filter, filtered_frame,
|
||||
AV_BUFFERSINK_FLAG_NO_REQUEST);
|
||||
if (ret < 0) {
|
||||
if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
|
||||
} else if (flush && ret == AVERROR_EOF) {
|
||||
if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
|
||||
do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -1291,20 +1243,10 @@ static int reap_filters(int flush)
|
||||
av_frame_unref(filtered_frame);
|
||||
continue;
|
||||
}
|
||||
frame_pts = AV_NOPTS_VALUE;
|
||||
if (filtered_frame->pts != AV_NOPTS_VALUE) {
|
||||
int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
|
||||
AVRational tb = enc->time_base;
|
||||
int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
|
||||
|
||||
tb.den <<= extra_bits;
|
||||
float_pts =
|
||||
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
|
||||
av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
|
||||
float_pts /= 1 << extra_bits;
|
||||
// avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
|
||||
float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
|
||||
|
||||
filtered_frame->pts =
|
||||
filtered_frame->pts = frame_pts =
|
||||
av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
|
||||
av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
|
||||
}
|
||||
@@ -1313,19 +1255,20 @@ static int reap_filters(int flush)
|
||||
|
||||
switch (filter->inputs[0]->type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
filtered_frame->pts = frame_pts;
|
||||
if (!ost->frame_aspect_ratio.num)
|
||||
enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
|
||||
|
||||
if (debug_ts) {
|
||||
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
|
||||
av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s time_base:%d/%d\n",
|
||||
av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
|
||||
float_pts,
|
||||
enc->time_base.num, enc->time_base.den);
|
||||
}
|
||||
|
||||
do_video_out(of->ctx, ost, filtered_frame, float_pts);
|
||||
do_video_out(of->ctx, ost, filtered_frame);
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
filtered_frame->pts = frame_pts;
|
||||
if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
|
||||
enc->channels != av_frame_get_channels(filtered_frame)) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
@@ -1353,7 +1296,6 @@ static void print_final_stats(int64_t total_size)
|
||||
uint64_t data_size = 0;
|
||||
float percent = -1.0;
|
||||
int i, j;
|
||||
int pass1_used = 1;
|
||||
|
||||
for (i = 0; i < nb_output_streams; i++) {
|
||||
OutputStream *ost = output_streams[i];
|
||||
@@ -1365,9 +1307,6 @@ static void print_final_stats(int64_t total_size)
|
||||
}
|
||||
extra_size += ost->enc_ctx->extradata_size;
|
||||
data_size += ost->data_size;
|
||||
if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
|
||||
!= CODEC_FLAG_PASS1)
|
||||
pass1_used = 0;
|
||||
}
|
||||
|
||||
if (data_size && total_size>0 && total_size >= data_size)
|
||||
@@ -1454,12 +1393,7 @@ static void print_final_stats(int64_t total_size)
|
||||
total_packets, total_size);
|
||||
}
|
||||
if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
|
||||
av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
|
||||
if (pass1_used) {
|
||||
av_log(NULL, AV_LOG_WARNING, "\n");
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
|
||||
}
|
||||
av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1572,8 +1506,8 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
nb_frames_drop += ost->last_droped;
|
||||
}
|
||||
|
||||
secs = FFABS(pts) / AV_TIME_BASE;
|
||||
us = FFABS(pts) % AV_TIME_BASE;
|
||||
secs = pts / AV_TIME_BASE;
|
||||
us = pts % AV_TIME_BASE;
|
||||
mins = secs / 60;
|
||||
secs %= 60;
|
||||
hours = mins / 60;
|
||||
@@ -1585,20 +1519,13 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
"size=N/A time=");
|
||||
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||
"size=%8.0fkB time=", total_size / 1024.0);
|
||||
if (pts < 0)
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||
"%02d:%02d:%02d.%02d ", hours, mins, secs,
|
||||
(100 * us) / AV_TIME_BASE);
|
||||
|
||||
if (bitrate < 0) {
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
|
||||
av_bprintf(&buf_script, "bitrate=N/A\n");
|
||||
}else{
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
|
||||
av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
|
||||
}
|
||||
|
||||
if (bitrate < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||
"bitrate=N/A");
|
||||
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
|
||||
"bitrate=%6.1fkbits/s", bitrate);
|
||||
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
|
||||
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
|
||||
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
|
||||
@@ -1629,7 +1556,8 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
||||
avio_flush(progress_avio);
|
||||
av_bprint_finalize(&buf_script, NULL);
|
||||
if (is_last_report) {
|
||||
avio_closep(&progress_avio);
|
||||
avio_close(progress_avio);
|
||||
progress_avio = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1800,21 +1728,17 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
||||
|
||||
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
|
||||
opkt.flags = pkt->flags;
|
||||
|
||||
// FIXME remove the following 2 lines they shall be replaced by the bitstream filters
|
||||
if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
|
||||
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
|
||||
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
|
||||
&& ost->st->codec->codec_id != AV_CODEC_ID_VC1
|
||||
if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
|
||||
&& ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
|
||||
&& ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
|
||||
&& ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
|
||||
) {
|
||||
int ret = av_parser_change(ost->parser, ost->st->codec,
|
||||
if (av_parser_change(ost->parser, ost->st->codec,
|
||||
&opkt.data, &opkt.size,
|
||||
pkt->data, pkt->size,
|
||||
pkt->flags & AV_PKT_FLAG_KEY);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "av_parser_change failed\n");
|
||||
exit_program(1);
|
||||
}
|
||||
if (ret) {
|
||||
pkt->flags & AV_PKT_FLAG_KEY)) {
|
||||
opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
|
||||
if (!opkt.buf)
|
||||
exit_program(1);
|
||||
@@ -1825,15 +1749,9 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
||||
}
|
||||
av_copy_packet_side_data(&opkt, pkt);
|
||||
|
||||
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||
ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
|
||||
(of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
|
||||
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
|
||||
/* store AVPicture in AVPacket, as expected by the output format */
|
||||
int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed\n");
|
||||
exit_program(1);
|
||||
}
|
||||
avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
|
||||
opkt.data = (uint8_t *)&pict;
|
||||
opkt.size = sizeof(AVPicture);
|
||||
opkt.flags |= AV_PKT_FLAG_KEY;
|
||||
@@ -1884,12 +1802,9 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (*got_output || ret<0)
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (!*got_output || ret < 0) {
|
||||
if (!pkt->size) {
|
||||
for (i = 0; i < ist->nb_filters; i++)
|
||||
@@ -2032,12 +1947,9 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
);
|
||||
}
|
||||
|
||||
if (*got_output || ret<0)
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (*got_output && ret >= 0) {
|
||||
if (ist->dec_ctx->width != decoded_frame->width ||
|
||||
ist->dec_ctx->height != decoded_frame->height ||
|
||||
@@ -2153,12 +2065,9 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
|
||||
int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
|
||||
&subtitle, got_output, pkt);
|
||||
|
||||
if (*got_output || ret<0)
|
||||
if (*got_output || ret<0 || pkt->size)
|
||||
decode_error_stat[ret<0] ++;
|
||||
|
||||
if (ret < 0 && exit_on_error)
|
||||
exit_program(1);
|
||||
|
||||
if (ret < 0 || !*got_output) {
|
||||
if (!pkt->size)
|
||||
sub2video_flush(ist);
|
||||
@@ -2357,38 +2266,16 @@ static void print_sdp(void)
|
||||
{
|
||||
char sdp[16384];
|
||||
int i;
|
||||
int j;
|
||||
AVIOContext *sdp_pb;
|
||||
AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
|
||||
|
||||
if (!avc)
|
||||
exit_program(1);
|
||||
for (i = 0, j = 0; i < nb_output_files; i++) {
|
||||
if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
|
||||
avc[j] = output_files[i]->ctx;
|
||||
j++;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < nb_output_files; i++)
|
||||
avc[i] = output_files[i]->ctx;
|
||||
|
||||
if (!j)
|
||||
goto fail;
|
||||
|
||||
av_sdp_create(avc, j, sdp, sizeof(sdp));
|
||||
|
||||
if (!sdp_filename) {
|
||||
printf("SDP:\n%s\n", sdp);
|
||||
fflush(stdout);
|
||||
} else {
|
||||
if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
|
||||
} else {
|
||||
avio_printf(sdp_pb, "SDP:\n%s", sdp);
|
||||
avio_closep(&sdp_pb);
|
||||
av_freep(&sdp_filename);
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
|
||||
printf("SDP:\n%s\n", sdp);
|
||||
fflush(stdout);
|
||||
av_freep(&avc);
|
||||
}
|
||||
|
||||
@@ -2427,7 +2314,7 @@ static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat
|
||||
"%s hwaccel requested for input stream #%d:%d, "
|
||||
"but cannot be initialized.\n", hwaccel->name,
|
||||
ist->file_index, ist->st->index);
|
||||
return AV_PIX_FMT_NONE;
|
||||
exit_program(1);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
@@ -2634,7 +2521,7 @@ static int transcode_init(void)
|
||||
AVFormatContext *oc;
|
||||
OutputStream *ost;
|
||||
InputStream *ist;
|
||||
char error[1024] = {0};
|
||||
char error[1024];
|
||||
int want_sdp = 1;
|
||||
|
||||
for (i = 0; i < nb_filtergraphs; i++) {
|
||||
@@ -2686,7 +2573,7 @@ static int transcode_init(void)
|
||||
if (ost->attachment_filename)
|
||||
continue;
|
||||
|
||||
enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
|
||||
enc_ctx = ost->enc_ctx;
|
||||
|
||||
if (ist) {
|
||||
dec_ctx = ist->dec_ctx;
|
||||
@@ -2733,13 +2620,11 @@ static int transcode_init(void)
|
||||
enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
|
||||
enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
|
||||
enc_ctx->field_order = dec_ctx->field_order;
|
||||
if (dec_ctx->extradata_size) {
|
||||
enc_ctx->extradata = av_mallocz(extra_size);
|
||||
if (!enc_ctx->extradata) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
|
||||
enc_ctx->extradata = av_mallocz(extra_size);
|
||||
if (!enc_ctx->extradata) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
|
||||
enc_ctx->extradata_size= dec_ctx->extradata_size;
|
||||
enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
|
||||
|
||||
@@ -2800,13 +2685,9 @@ static int transcode_init(void)
|
||||
if (!ost->st->side_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ost->st->nb_side_data = 0;
|
||||
for (j = 0; j < ist->st->nb_side_data; j++) {
|
||||
const AVPacketSideData *sd_src = &ist->st->side_data[j];
|
||||
AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
|
||||
|
||||
if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
|
||||
continue;
|
||||
AVPacketSideData *sd_dst = &ost->st->side_data[j];
|
||||
|
||||
sd_dst->data = av_malloc(sd_src->size);
|
||||
if (!sd_dst->data)
|
||||
@@ -2859,13 +2740,11 @@ static int transcode_init(void)
|
||||
sar = dec_ctx->sample_aspect_ratio;
|
||||
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
|
||||
ost->st->avg_frame_rate = ist->st->avg_frame_rate;
|
||||
ost->st->r_frame_rate = ist->st->r_frame_rate;
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
break;
|
||||
case AVMEDIA_TYPE_UNKNOWN:
|
||||
case AVMEDIA_TYPE_DATA:
|
||||
case AVMEDIA_TYPE_ATTACHMENT:
|
||||
break;
|
||||
@@ -2901,7 +2780,7 @@ static int transcode_init(void)
|
||||
}
|
||||
|
||||
if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
if (!ost->frame_rate.num)
|
||||
if (ost->filter && !ost->frame_rate.num)
|
||||
ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
|
||||
if (ist && !ost->frame_rate.num)
|
||||
ost->frame_rate = ist->framerate;
|
||||
@@ -2921,7 +2800,6 @@ static int transcode_init(void)
|
||||
int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
|
||||
ost->frame_rate = ost->enc->supported_framerates[idx];
|
||||
}
|
||||
// reduce frame rate for mpeg4 to be within the spec limits
|
||||
if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
|
||||
av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
|
||||
ost->frame_rate.num, ost->frame_rate.den, 65535);
|
||||
@@ -2938,7 +2816,7 @@ static int transcode_init(void)
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
enc_ctx->time_base = av_inv_q(ost->frame_rate);
|
||||
if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
|
||||
if (ost->filter && !(enc_ctx->time_base.num && enc_ctx->time_base.den))
|
||||
enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
|
||||
if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
|
||||
&& (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
|
||||
@@ -2994,10 +2872,7 @@ static int transcode_init(void)
|
||||
ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
|
||||
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
|
||||
ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
|
||||
|
||||
// Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
|
||||
// parse it only for static kf timings
|
||||
} else if(strncmp(ost->forced_keyframes, "source", 6)) {
|
||||
} else {
|
||||
parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
|
||||
}
|
||||
}
|
||||
@@ -3009,8 +2884,6 @@ static int transcode_init(void)
|
||||
enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_DATA:
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
break;
|
||||
@@ -3049,37 +2922,6 @@ static int transcode_init(void)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ost->disposition) {
|
||||
static const AVOption opts[] = {
|
||||
{ "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
|
||||
{ "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
|
||||
{ "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
|
||||
{ "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
|
||||
{ "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
|
||||
{ "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
|
||||
{ "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
|
||||
{ "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
|
||||
{ "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
|
||||
{ "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
|
||||
{ "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
|
||||
{ "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
|
||||
{ "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
|
||||
{ "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
|
||||
{ NULL },
|
||||
};
|
||||
static const AVClass class = {
|
||||
.class_name = "",
|
||||
.item_name = av_default_item_name,
|
||||
.option = opts,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
const AVClass *pclass = &class;
|
||||
|
||||
ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
|
||||
if (ret < 0)
|
||||
goto dump_format;
|
||||
}
|
||||
}
|
||||
|
||||
/* open each encoder */
|
||||
@@ -3120,17 +2962,6 @@ static int transcode_init(void)
|
||||
if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
|
||||
av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
||||
" It takes bits/s as argument, not kbits/s\n");
|
||||
|
||||
ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"Error initializing the output stream codec context.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
// copy timebase while removing common factors
|
||||
ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
|
||||
ost->st->codec->codec= ost->enc_ctx->codec;
|
||||
} else {
|
||||
ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
|
||||
if (ret < 0) {
|
||||
@@ -3138,9 +2969,18 @@ static int transcode_init(void)
|
||||
"Error setting up codec context options.\n");
|
||||
return ret;
|
||||
}
|
||||
// copy timebase while removing common factors
|
||||
ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
|
||||
}
|
||||
|
||||
ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"Error initializing the output stream codec context.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
ost->st->codec->codec= ost->enc_ctx->codec;
|
||||
|
||||
// copy timebase while removing common factors
|
||||
ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
|
||||
}
|
||||
|
||||
/* init input streams */
|
||||
@@ -3250,22 +3090,17 @@ static int transcode_init(void)
|
||||
const char *in_codec_name = "?";
|
||||
const char *encoder_name = "?";
|
||||
const char *out_codec_name = "?";
|
||||
const AVCodecDescriptor *desc;
|
||||
|
||||
if (in_codec) {
|
||||
decoder_name = in_codec->name;
|
||||
desc = avcodec_descriptor_get(in_codec->id);
|
||||
if (desc)
|
||||
in_codec_name = desc->name;
|
||||
in_codec_name = avcodec_descriptor_get(in_codec->id)->name;
|
||||
if (!strcmp(decoder_name, in_codec_name))
|
||||
decoder_name = "native";
|
||||
}
|
||||
|
||||
if (out_codec) {
|
||||
encoder_name = out_codec->name;
|
||||
desc = avcodec_descriptor_get(out_codec->id);
|
||||
if (desc)
|
||||
out_codec_name = desc->name;
|
||||
out_codec_name = avcodec_descriptor_get(out_codec->id)->name;
|
||||
if (!strcmp(encoder_name, out_codec_name))
|
||||
encoder_name = "native";
|
||||
}
|
||||
@@ -3282,7 +3117,7 @@ static int transcode_init(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (sdp_filename || want_sdp) {
|
||||
if (want_sdp) {
|
||||
print_sdp();
|
||||
}
|
||||
|
||||
@@ -3392,8 +3227,6 @@ static int check_keyboard_interaction(int64_t cur_time)
|
||||
ret = AVERROR_PATCHWELCOME;
|
||||
} else {
|
||||
ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
|
||||
if (ret < 0)
|
||||
fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3443,7 +3276,6 @@ static int check_keyboard_interaction(int64_t cur_time)
|
||||
static void *input_thread(void *arg)
|
||||
{
|
||||
InputFile *f = arg;
|
||||
unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
|
||||
int ret = 0;
|
||||
|
||||
while (1) {
|
||||
@@ -3459,15 +3291,7 @@ static void *input_thread(void *arg)
|
||||
break;
|
||||
}
|
||||
av_dup_packet(&pkt);
|
||||
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
|
||||
if (flags && ret == AVERROR(EAGAIN)) {
|
||||
flags = 0;
|
||||
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
|
||||
av_log(f->ctx, AV_LOG_WARNING,
|
||||
"Thread message queue blocking; consider raising the "
|
||||
"thread_queue_size option (current value: %d)\n",
|
||||
f->thread_queue_size);
|
||||
}
|
||||
ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, 0);
|
||||
if (ret < 0) {
|
||||
if (ret != AVERROR_EOF)
|
||||
av_log(f->ctx, AV_LOG_ERROR,
|
||||
@@ -3516,7 +3340,7 @@ static int init_input_threads(void)
|
||||
strcmp(f->ctx->iformat->name, "lavfi"))
|
||||
f->non_blocking = 1;
|
||||
ret = av_thread_message_queue_alloc(&f->in_thread_queue,
|
||||
f->thread_queue_size, sizeof(AVPacket));
|
||||
8, sizeof(AVPacket));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@@ -3704,8 +3528,6 @@ static int process_input(int file_index)
|
||||
|
||||
if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
|
||||
continue;
|
||||
if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
|
||||
continue;
|
||||
|
||||
dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
|
||||
if (!dst_data)
|
||||
@@ -3824,10 +3646,10 @@ static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
|
||||
*best_ist = NULL;
|
||||
ret = avfilter_graph_request_oldest(graph->graph);
|
||||
if (ret >= 0)
|
||||
return reap_filters(0);
|
||||
return reap_filters();
|
||||
|
||||
if (ret == AVERROR_EOF) {
|
||||
ret = reap_filters(1);
|
||||
ret = reap_filters();
|
||||
for (i = 0; i < graph->nb_outputs; i++)
|
||||
close_output_stream(graph->outputs[i]->ost);
|
||||
return ret;
|
||||
@@ -3893,11 +3715,10 @@ static int transcode_step(void)
|
||||
ost->unavailable = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret == AVERROR_EOF ? 0 : ret;
|
||||
|
||||
return reap_filters(0);
|
||||
return reap_filters();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3942,15 +3763,11 @@ static int transcode(void)
|
||||
|
||||
ret = transcode_step();
|
||||
if (ret < 0) {
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
continue;
|
||||
} else {
|
||||
char errbuf[128];
|
||||
av_strerror(ret, errbuf, sizeof(errbuf));
|
||||
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* dump report by using the output first video and audio streams */
|
||||
@@ -4016,7 +3833,6 @@ static int transcode(void)
|
||||
}
|
||||
av_freep(&ost->forced_kf_pts);
|
||||
av_freep(&ost->apad);
|
||||
av_freep(&ost->disposition);
|
||||
av_dict_free(&ost->encoder_opts);
|
||||
av_dict_free(&ost->swr_opts);
|
||||
av_dict_free(&ost->resample_opts);
|
||||
|
14
ffmpeg.h
14
ffmpeg.h
@@ -92,7 +92,6 @@ typedef struct OptionsContext {
|
||||
|
||||
/* input/output options */
|
||||
int64_t start_time;
|
||||
int seek_timestamp;
|
||||
const char *format;
|
||||
|
||||
SpecifierOpt *codec_names;
|
||||
@@ -112,7 +111,6 @@ typedef struct OptionsContext {
|
||||
int64_t input_ts_offset;
|
||||
int rate_emu;
|
||||
int accurate_seek;
|
||||
int thread_queue_size;
|
||||
|
||||
SpecifierOpt *ts_scale;
|
||||
int nb_ts_scale;
|
||||
@@ -122,8 +120,6 @@ typedef struct OptionsContext {
|
||||
int nb_hwaccels;
|
||||
SpecifierOpt *hwaccel_devices;
|
||||
int nb_hwaccel_devices;
|
||||
SpecifierOpt *autorotate;
|
||||
int nb_autorotate;
|
||||
|
||||
/* output options */
|
||||
StreamMap *stream_maps;
|
||||
@@ -210,8 +206,6 @@ typedef struct OptionsContext {
|
||||
int nb_apad;
|
||||
SpecifierOpt *discard;
|
||||
int nb_discard;
|
||||
SpecifierOpt *disposition;
|
||||
int nb_disposition;
|
||||
} OptionsContext;
|
||||
|
||||
typedef struct InputFilter {
|
||||
@@ -278,7 +272,6 @@ typedef struct InputStream {
|
||||
int top_field_first;
|
||||
int guess_layout_max;
|
||||
|
||||
int autorotate;
|
||||
int resample_height;
|
||||
int resample_width;
|
||||
int resample_pix_fmt;
|
||||
@@ -343,7 +336,6 @@ typedef struct InputFile {
|
||||
int64_t ts_offset;
|
||||
int64_t last_ts;
|
||||
int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
|
||||
int seek_timestamp;
|
||||
int64_t recording_time;
|
||||
int nb_streams; /* number of stream that ffmpeg is aware of; may be different
|
||||
from ctx.nb_streams if new streams appear during av_read_frame() */
|
||||
@@ -356,7 +348,6 @@ typedef struct InputFile {
|
||||
pthread_t thread; /* thread reading from this file */
|
||||
int non_blocking; /* reading packets from the thread should not block */
|
||||
int joined; /* the thread has been joined */
|
||||
int thread_queue_size; /* maximum number of queued packets */
|
||||
#endif
|
||||
} InputFile;
|
||||
|
||||
@@ -399,13 +390,11 @@ typedef struct OutputStream {
|
||||
AVFrame *filtered_frame;
|
||||
AVFrame *last_frame;
|
||||
int last_droped;
|
||||
int last_nb0_frames[3];
|
||||
|
||||
/* video only */
|
||||
AVRational frame_rate;
|
||||
int force_fps;
|
||||
int top_field_first;
|
||||
int rotate_overridden;
|
||||
|
||||
AVRational frame_aspect_ratio;
|
||||
|
||||
@@ -441,7 +430,6 @@ typedef struct OutputStream {
|
||||
const char *attachment_filename;
|
||||
int copy_initial_nonkeyframes;
|
||||
int copy_prior_start;
|
||||
char *disposition;
|
||||
|
||||
int keep_pix_fmt;
|
||||
|
||||
@@ -482,7 +470,6 @@ extern FilterGraph **filtergraphs;
|
||||
extern int nb_filtergraphs;
|
||||
|
||||
extern char *vstats_filename;
|
||||
extern char *sdp_filename;
|
||||
|
||||
extern float audio_drift_threshold;
|
||||
extern float dts_delta_threshold;
|
||||
@@ -491,7 +478,6 @@ extern float dts_error_threshold;
|
||||
extern int audio_volume;
|
||||
extern int audio_sync_method;
|
||||
extern int video_sync_method;
|
||||
extern float frame_drop_threshold;
|
||||
extern int do_benchmark;
|
||||
extern int do_benchmark_all;
|
||||
extern int do_deinterlace;
|
||||
|
@@ -52,7 +52,6 @@ DEFINE_GUID(DXVA2_ModeH264_F, 0x1b81be69, 0xa0c7,0x11d3,0xb9,0x84,0x00,0
|
||||
DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0xD2,0x5C,0x15,0xB3,0xD6);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeHEVC_VLD_Main, 0x5b11d51b, 0x2f4c,0x4452,0xbc,0xc3,0x09,0xf2,0xa1,0x16,0x0c,0xc0);
|
||||
DEFINE_GUID(DXVA2_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(GUID_NULL, 0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
|
||||
|
||||
@@ -81,9 +80,6 @@ static const dxva2_mode dxva2_modes[] = {
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
|
||||
|
||||
/* HEVC/H.265 */
|
||||
{ &DXVA2_ModeHEVC_VLD_Main, AV_CODEC_ID_HEVC },
|
||||
|
||||
{ NULL, 0 },
|
||||
};
|
||||
|
||||
@@ -364,7 +360,7 @@ static int dxva2_alloc(AVCodecContext *s)
|
||||
d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
|
||||
d3dpp.Flags = D3DPRESENTFLAG_VIDEO;
|
||||
|
||||
hr = IDirect3D9_CreateDevice(ctx->d3d9, adapter, D3DDEVTYPE_HAL, GetDesktopWindow(),
|
||||
hr = IDirect3D9_CreateDevice(ctx->d3d9, adapter, D3DDEVTYPE_HAL, GetShellWindow(),
|
||||
D3DCREATE_SOFTWARE_VERTEXPROCESSING | D3DCREATE_MULTITHREADED | D3DCREATE_FPU_PRESERVE,
|
||||
&d3dpp, &ctx->d3d9device);
|
||||
if (FAILED(hr)) {
|
||||
@@ -530,10 +526,6 @@ static int dxva2_create_decoder(AVCodecContext *s)
|
||||
but it causes issues for H.264 on certain AMD GPUs..... */
|
||||
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
surface_alignment = 32;
|
||||
/* the HEVC DXVA2 spec asks for 128 pixel aligned surfaces to ensure
|
||||
all coding features have enough room to work with */
|
||||
else if (s->codec_id == AV_CODEC_ID_HEVC)
|
||||
surface_alignment = 128;
|
||||
else
|
||||
surface_alignment = 16;
|
||||
|
||||
@@ -541,7 +533,7 @@ static int dxva2_create_decoder(AVCodecContext *s)
|
||||
ctx->num_surfaces = 4;
|
||||
|
||||
/* add surfaces based on number of possible refs */
|
||||
if (s->codec_id == AV_CODEC_ID_H264 || s->codec_id == AV_CODEC_ID_HEVC)
|
||||
if (s->codec_id == AV_CODEC_ID_H264)
|
||||
ctx->num_surfaces += 16;
|
||||
else
|
||||
ctx->num_surfaces += 2;
|
||||
|
@@ -31,7 +31,6 @@
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/display.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
@@ -340,28 +339,6 @@ static int insert_trim(int64_t start_time, int64_t duration,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
|
||||
const char *filter_name, const char *args)
|
||||
{
|
||||
AVFilterGraph *graph = (*last_filter)->graph;
|
||||
AVFilterContext *ctx;
|
||||
int ret;
|
||||
|
||||
ret = avfilter_graph_create_filter(&ctx,
|
||||
avfilter_get_by_name(filter_name),
|
||||
filter_name, args, NULL, graph);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*last_filter = ctx;
|
||||
*pad_idx = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
|
||||
{
|
||||
char *pix_fmts;
|
||||
@@ -699,27 +676,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||
return ret;
|
||||
last_filter = ifilter->filter;
|
||||
|
||||
if (ist->autorotate) {
|
||||
double theta = get_rotation(ist->st);
|
||||
|
||||
if (fabs(theta - 90) < 1.0) {
|
||||
ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
|
||||
} else if (fabs(theta - 180) < 1.0) {
|
||||
ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
|
||||
} else if (fabs(theta - 270) < 1.0) {
|
||||
ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
|
||||
} else if (fabs(theta) > 1.0) {
|
||||
char rotate_buf[64];
|
||||
snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
|
||||
ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ist->framerate.num) {
|
||||
AVFilterContext *setpts;
|
||||
|
||||
|
173
ffmpeg_opt.c
173
ffmpeg_opt.c
@@ -77,7 +77,6 @@ const HWAccel hwaccels[] = {
|
||||
};
|
||||
|
||||
char *vstats_filename;
|
||||
char *sdp_filename;
|
||||
|
||||
float audio_drift_threshold = 0.1;
|
||||
float dts_delta_threshold = 10;
|
||||
@@ -86,7 +85,6 @@ float dts_error_threshold = 3600*30;
|
||||
int audio_volume = 256;
|
||||
int audio_sync_method = 0;
|
||||
int video_sync_method = VSYNC_AUTO;
|
||||
float frame_drop_threshold = 0;
|
||||
int do_deinterlace = 0;
|
||||
int do_benchmark = 0;
|
||||
int do_benchmark_all = 0;
|
||||
@@ -110,9 +108,6 @@ static int no_file_overwrite = 0;
|
||||
static int do_psnr = 0;
|
||||
static int input_sync;
|
||||
static int override_ffserver = 0;
|
||||
static int input_stream_potentially_available = 0;
|
||||
static int ignore_unknown_streams = 0;
|
||||
static int copy_unknown_streams = 0;
|
||||
|
||||
static void uninit_options(OptionsContext *o)
|
||||
{
|
||||
@@ -236,8 +231,6 @@ static int opt_map(void *optctx, const char *opt, const char *arg)
|
||||
arg++;
|
||||
}
|
||||
map = av_strdup(arg);
|
||||
if (!map)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
/* parse sync stream first, just pick first matching stream */
|
||||
if (sync = strchr(map, ',')) {
|
||||
@@ -386,13 +379,6 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
av_free(sdp_filename);
|
||||
sdp_filename = av_strdup(arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a metadata specifier passed as 'arg' parameter.
|
||||
* @param arg metadata string to parse
|
||||
@@ -523,7 +509,7 @@ static int opt_recording_timestamp(void *optctx, const char *opt, const char *ar
|
||||
char buf[128];
|
||||
int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6;
|
||||
struct tm time = *gmtime((time_t*)&recording_timestamp);
|
||||
if (!strftime(buf, sizeof(buf), "creation_time=%Y-%m-%dT%H:%M:%S%z", &time))
|
||||
if (!strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time))
|
||||
return -1;
|
||||
parse_option(o, "metadata", buf, options);
|
||||
|
||||
@@ -604,9 +590,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
ist->ts_scale = 1.0;
|
||||
MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
|
||||
|
||||
ist->autorotate = 1;
|
||||
MATCH_PER_STREAM_OPT(autorotate, i, ist->autorotate, ic, st);
|
||||
|
||||
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, ic, st);
|
||||
if (codec_tag) {
|
||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||
@@ -811,7 +794,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
char * video_codec_name = NULL;
|
||||
char * audio_codec_name = NULL;
|
||||
char *subtitle_codec_name = NULL;
|
||||
char * data_codec_name = NULL;
|
||||
int scan_all_pmts_set = 0;
|
||||
|
||||
if (o->format) {
|
||||
@@ -865,7 +847,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, video_codec_name, ic, "v");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, audio_codec_name, ic, "a");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, ic, "s");
|
||||
MATCH_PER_TYPE_OPT(codec_names, str, data_codec_name, ic, "d");
|
||||
|
||||
ic->video_codec_id = video_codec_name ?
|
||||
find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0)->id : AV_CODEC_ID_NONE;
|
||||
@@ -873,8 +854,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0)->id : AV_CODEC_ID_NONE;
|
||||
ic->subtitle_codec_id= subtitle_codec_name ?
|
||||
find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0)->id : AV_CODEC_ID_NONE;
|
||||
ic->data_codec_id = data_codec_name ?
|
||||
find_codec_or_die(data_codec_name, AVMEDIA_TYPE_DATA, 0)->id : AV_CODEC_ID_NONE;
|
||||
|
||||
if (video_codec_name)
|
||||
av_format_set_video_codec (ic, find_codec_or_die(video_codec_name , AVMEDIA_TYPE_VIDEO , 0));
|
||||
@@ -882,8 +861,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
av_format_set_audio_codec (ic, find_codec_or_die(audio_codec_name , AVMEDIA_TYPE_AUDIO , 0));
|
||||
if (subtitle_codec_name)
|
||||
av_format_set_subtitle_codec(ic, find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0));
|
||||
if (data_codec_name)
|
||||
av_format_set_data_codec(ic, find_codec_or_die(data_codec_name, AVMEDIA_TYPE_DATA, 0));
|
||||
|
||||
ic->flags |= AVFMT_FLAG_NONBLOCK;
|
||||
ic->interrupt_callback = int_cb;
|
||||
@@ -924,25 +901,12 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
|
||||
timestamp = (o->start_time == AV_NOPTS_VALUE) ? 0 : o->start_time;
|
||||
/* add the stream start time */
|
||||
if (!o->seek_timestamp && ic->start_time != AV_NOPTS_VALUE)
|
||||
if (ic->start_time != AV_NOPTS_VALUE)
|
||||
timestamp += ic->start_time;
|
||||
|
||||
/* if seeking requested, we execute it */
|
||||
if (o->start_time != AV_NOPTS_VALUE) {
|
||||
int64_t seek_timestamp = timestamp;
|
||||
|
||||
if (!(ic->iformat->flags & AVFMT_SEEK_TO_PTS)) {
|
||||
int dts_heuristic = 0;
|
||||
for (i=0; i<ic->nb_streams; i++) {
|
||||
AVCodecContext *avctx = ic->streams[i]->codec;
|
||||
if (avctx->has_b_frames)
|
||||
dts_heuristic = 1;
|
||||
}
|
||||
if (dts_heuristic) {
|
||||
seek_timestamp -= 3*AV_TIME_BASE / 23;
|
||||
}
|
||||
}
|
||||
ret = avformat_seek_file(ic, -1, INT64_MIN, seek_timestamp, seek_timestamp, 0);
|
||||
ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, timestamp, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
|
||||
filename, (double)timestamp / AV_TIME_BASE);
|
||||
@@ -970,9 +934,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
f->nb_streams = ic->nb_streams;
|
||||
f->rate_emu = o->rate_emu;
|
||||
f->accurate_seek = o->accurate_seek;
|
||||
#if HAVE_PTHREADS
|
||||
f->thread_queue_size = o->thread_queue_size > 0 ? o->thread_queue_size : 8;
|
||||
#endif
|
||||
|
||||
/* check if all codec options have been used */
|
||||
unused_opts = strip_specifiers(o->g->codec_opts);
|
||||
@@ -1027,8 +988,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
input_stream_potentially_available = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1152,7 +1111,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
|
||||
av_free(buf);
|
||||
} while (!s->eof_reached);
|
||||
avio_closep(&s);
|
||||
avio_close(s);
|
||||
}
|
||||
if (ret) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
@@ -1203,7 +1162,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||
if (*next)
|
||||
tag = AV_RL32(codec_tag);
|
||||
ost->st->codec->codec_tag =
|
||||
ost->enc_ctx->codec_tag = tag;
|
||||
}
|
||||
|
||||
@@ -1213,9 +1171,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
|
||||
ost->disposition = av_strdup(ost->disposition);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
@@ -1416,13 +1371,10 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, in
|
||||
av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
|
||||
exit_program(1);
|
||||
}
|
||||
/* FIXME realloc failure */
|
||||
video_enc->rc_override =
|
||||
av_realloc_array(video_enc->rc_override,
|
||||
i + 1, sizeof(RcOverride));
|
||||
if (!video_enc->rc_override) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Could not (re)allocate memory for rc_override.\n");
|
||||
exit_program(1);
|
||||
}
|
||||
av_realloc(video_enc->rc_override,
|
||||
sizeof(RcOverride) * (i + 1));
|
||||
video_enc->rc_override[i].start_frame = start;
|
||||
video_enc->rc_override[i].end_frame = end;
|
||||
if (q > 0) {
|
||||
@@ -1568,19 +1520,6 @@ static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc, int
|
||||
return ost;
|
||||
}
|
||||
|
||||
static OutputStream *new_unknown_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
|
||||
{
|
||||
OutputStream *ost;
|
||||
|
||||
ost = new_output_stream(o, oc, AVMEDIA_TYPE_UNKNOWN, source_index);
|
||||
if (!ost->stream_copy) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Unknown stream encoding not supported yet (only streamcopy)\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
return ost;
|
||||
}
|
||||
|
||||
static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
|
||||
{
|
||||
OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT, source_index);
|
||||
@@ -1976,15 +1915,7 @@ static int open_output_file(OptionsContext *o, const char *filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Data only if codec id match */
|
||||
if (!o->data_disable ) {
|
||||
enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_DATA);
|
||||
for (i = 0; codec_id != AV_CODEC_ID_NONE && i < nb_input_streams; i++) {
|
||||
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_DATA
|
||||
&& input_streams[i]->st->codec->codec_id == codec_id )
|
||||
new_data_stream(o, oc, i);
|
||||
}
|
||||
}
|
||||
/* do something with data? */
|
||||
} else {
|
||||
for (i = 0; i < o->nb_stream_maps; i++) {
|
||||
StreamMap *map = &o->stream_maps[i];
|
||||
@@ -2033,22 +1964,10 @@ loop_end:
|
||||
case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream (o, oc, src_idx); break;
|
||||
case AVMEDIA_TYPE_DATA: ost = new_data_stream (o, oc, src_idx); break;
|
||||
case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc, src_idx); break;
|
||||
case AVMEDIA_TYPE_UNKNOWN:
|
||||
if (copy_unknown_streams) {
|
||||
ost = new_unknown_stream (o, oc, src_idx);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
av_log(NULL, ignore_unknown_streams ? AV_LOG_WARNING : AV_LOG_FATAL,
|
||||
"Cannot map stream #%d:%d - unsupported type.\n",
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
|
||||
map->file_index, map->stream_index);
|
||||
if (!ignore_unknown_streams) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"If you want unsupported types ignored instead "
|
||||
"of failing, please use the -ignore_unknown option\n"
|
||||
"If you want them copied, please use -copy_unknown\n");
|
||||
exit_program(1);
|
||||
}
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2082,12 +2001,12 @@ loop_end:
|
||||
ost->stream_copy = 0;
|
||||
ost->attachment_filename = o->attachments[i];
|
||||
ost->finished = 1;
|
||||
ost->st->codec->extradata = attachment;
|
||||
ost->st->codec->extradata_size = len;
|
||||
ost->enc_ctx->extradata = attachment;
|
||||
ost->enc_ctx->extradata_size = len;
|
||||
|
||||
p = strrchr(o->attachments[i], '/');
|
||||
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
|
||||
avio_closep(&pb);
|
||||
avio_close(pb);
|
||||
}
|
||||
|
||||
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
|
||||
@@ -2151,12 +2070,6 @@ loop_end:
|
||||
}
|
||||
}
|
||||
|
||||
if (!(oc->oformat->flags & AVFMT_NOSTREAMS) && !input_stream_potentially_available) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"No input streams but output needs an input stream\n");
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
if (!(oc->oformat->flags & AVFMT_NOFILE)) {
|
||||
/* test if it already exists to avoid losing precious files */
|
||||
assert_file_overwrite(filename);
|
||||
@@ -2225,11 +2138,8 @@ loop_end:
|
||||
continue;
|
||||
ist = input_streams[output_streams[i]->source_index];
|
||||
av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
|
||||
if (!output_streams[i]->stream_copy) {
|
||||
if (!output_streams[i]->stream_copy)
|
||||
av_dict_set(&output_streams[i]->st->metadata, "encoder", NULL, 0);
|
||||
if (ist->autorotate)
|
||||
av_dict_set(&output_streams[i]->st->metadata, "rotate", NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* process manually set metadata */
|
||||
@@ -2250,12 +2160,8 @@ loop_end:
|
||||
parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
|
||||
if (type == 's') {
|
||||
for (j = 0; j < oc->nb_streams; j++) {
|
||||
ost = output_streams[nb_output_streams - oc->nb_streams + j];
|
||||
if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
|
||||
av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
|
||||
if (!strcmp(o->metadata[i].u.str, "rotate")) {
|
||||
ost->rotate_overridden = 1;
|
||||
}
|
||||
} else if (ret < 0)
|
||||
exit_program(1);
|
||||
}
|
||||
@@ -2342,9 +2248,9 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
opt_default(NULL, "b:v", "1150000");
|
||||
opt_default(NULL, "maxrate:v", "1150000");
|
||||
opt_default(NULL, "minrate:v", "1150000");
|
||||
opt_default(NULL, "bufsize:v", "327680"); // 40*1024*8;
|
||||
opt_default(NULL, "maxrate", "1150000");
|
||||
opt_default(NULL, "minrate", "1150000");
|
||||
opt_default(NULL, "bufsize", "327680"); // 40*1024*8;
|
||||
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
parse_option(o, "ar", "44100", options);
|
||||
@@ -2371,9 +2277,9 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
opt_default(NULL, "b:v", "2040000");
|
||||
opt_default(NULL, "maxrate:v", "2516000");
|
||||
opt_default(NULL, "minrate:v", "0"); // 1145000;
|
||||
opt_default(NULL, "bufsize:v", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "maxrate", "2516000");
|
||||
opt_default(NULL, "minrate", "0"); // 1145000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "scan_offset", "1");
|
||||
|
||||
opt_default(NULL, "b:a", "224000");
|
||||
@@ -2393,9 +2299,9 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
opt_default(NULL, "g", norm == PAL ? "15" : "18");
|
||||
|
||||
opt_default(NULL, "b:v", "6000000");
|
||||
opt_default(NULL, "maxrate:v", "9000000");
|
||||
opt_default(NULL, "minrate:v", "0"); // 1500000;
|
||||
opt_default(NULL, "bufsize:v", "1835008"); // 224*1024*8;
|
||||
opt_default(NULL, "maxrate", "9000000");
|
||||
opt_default(NULL, "minrate", "0"); // 1500000;
|
||||
opt_default(NULL, "bufsize", "1835008"); // 224*1024*8;
|
||||
|
||||
opt_default(NULL, "packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
|
||||
opt_default(NULL, "muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
|
||||
@@ -2439,9 +2345,6 @@ static int opt_vstats(void *optctx, const char *opt, const char *arg)
|
||||
time_t today2 = time(NULL);
|
||||
struct tm *today = localtime(&today2);
|
||||
|
||||
if (!today)
|
||||
return AVERROR(errno);
|
||||
|
||||
snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
|
||||
today->tm_sec);
|
||||
return opt_vstats_file(NULL, opt, filename);
|
||||
@@ -2671,9 +2574,6 @@ static int opt_filter_complex(void *optctx, const char *opt, const char *arg)
|
||||
filtergraphs[nb_filtergraphs - 1]->graph_desc = av_strdup(arg);
|
||||
if (!filtergraphs[nb_filtergraphs - 1]->graph_desc)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
input_stream_potentially_available = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2688,9 +2588,6 @@ static int opt_filter_complex_script(void *optctx, const char *opt, const char *
|
||||
return AVERROR(ENOMEM);
|
||||
filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
|
||||
filtergraphs[nb_filtergraphs - 1]->graph_desc = graph_desc;
|
||||
|
||||
input_stream_potentially_available = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2886,10 +2783,6 @@ const OptionDef options[] = {
|
||||
"overwrite output files" },
|
||||
{ "n", OPT_BOOL, { &no_file_overwrite },
|
||||
"never overwrite output files" },
|
||||
{ "ignore_unknown", OPT_BOOL, { &ignore_unknown_streams },
|
||||
"Ignore unknown stream types" },
|
||||
{ "copy_unknown", OPT_BOOL | OPT_EXPERT, { ©_unknown_streams },
|
||||
"Copy unknown stream types" },
|
||||
{ "c", HAS_ARG | OPT_STRING | OPT_SPEC |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
|
||||
"codec name", "codec" },
|
||||
@@ -2923,9 +2816,6 @@ const OptionDef options[] = {
|
||||
{ "ss", HAS_ARG | OPT_TIME | OPT_OFFSET |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
|
||||
"set the start time offset", "time_off" },
|
||||
{ "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
|
||||
OPT_INPUT, { .off = OFFSET(seek_timestamp) },
|
||||
"enable/disable seeking by timestamp with -ss" },
|
||||
{ "accurate_seek", OPT_BOOL | OPT_OFFSET | OPT_EXPERT |
|
||||
OPT_INPUT, { .off = OFFSET(accurate_seek) },
|
||||
"enable/disable accurate seeking with -ss" },
|
||||
@@ -2935,7 +2825,7 @@ const OptionDef options[] = {
|
||||
{ "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
|
||||
"set the input ts scale", "scale" },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE, { .func_arg = opt_recording_timestamp },
|
||||
"set the recording timestamp ('now' to set the current time)", "time" },
|
||||
{ "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
|
||||
"add metadata", "string=string" },
|
||||
@@ -2964,8 +2854,6 @@ const OptionDef options[] = {
|
||||
" \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
|
||||
{ "vsync", HAS_ARG | OPT_EXPERT, { opt_vsync },
|
||||
"video sync method", "" },
|
||||
{ "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
|
||||
"frame drop threshold", "" },
|
||||
{ "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
|
||||
"audio sync method", "" },
|
||||
{ "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
|
||||
@@ -3033,12 +2921,6 @@ const OptionDef options[] = {
|
||||
{ "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_INPUT, { .off = OFFSET(discard) },
|
||||
"discard", "" },
|
||||
{ "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
|
||||
OPT_OUTPUT, { .off = OFFSET(disposition) },
|
||||
"disposition", "" },
|
||||
{ "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
|
||||
{ .off = OFFSET(thread_queue_size) },
|
||||
"set the maximum number of queued packets from the demuxer" },
|
||||
|
||||
/* video options */
|
||||
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
||||
@@ -3101,7 +2983,7 @@ const OptionDef options[] = {
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
|
||||
"top=1/bottom=0/auto=-1 field first", "" },
|
||||
{ "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
|
||||
OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_old2new },
|
||||
OPT_OUTPUT, { .func_arg = opt_old2new },
|
||||
"force video tag/fourcc", "fourcc/tag" },
|
||||
{ "qphist", OPT_VIDEO | OPT_BOOL | OPT_EXPERT , { &qp_hist },
|
||||
"show QP histogram" },
|
||||
@@ -3127,9 +3009,6 @@ const OptionDef options[] = {
|
||||
#if HAVE_VDPAU_X11
|
||||
{ "vdpau_api_ver", HAS_ARG | OPT_INT | OPT_EXPERT, { &vdpau_api_ver }, "" },
|
||||
#endif
|
||||
{ "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
|
||||
"automatically insert correct rotate filters" },
|
||||
|
||||
/* audio options */
|
||||
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
|
||||
@@ -3189,8 +3068,6 @@ const OptionDef options[] = {
|
||||
"set the initial demux-decode delay", "seconds" },
|
||||
{ "override_ffserver", OPT_BOOL | OPT_EXPERT | OPT_OUTPUT, { &override_ffserver },
|
||||
"override the options from ffserver", "" },
|
||||
{ "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { opt_sdp_file },
|
||||
"specify a file in which to print sdp information", "file" },
|
||||
|
||||
{ "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
|
||||
"A comma-separated list of bitstream filters", "bitstream_filters" },
|
||||
|
@@ -73,12 +73,10 @@ static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
}
|
||||
|
||||
av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
|
||||
(const uint8_t **)data, linesize, vda->tmp_frame->format,
|
||||
data, linesize, vda->tmp_frame->format,
|
||||
frame->width, frame->height);
|
||||
|
||||
ret = av_frame_copy_props(vda->tmp_frame, frame);
|
||||
CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@@ -100,14 +100,9 @@ static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
VDPAUContext *ctx = ist->hwaccel_ctx;
|
||||
VdpVideoSurface *surface;
|
||||
VdpStatus err;
|
||||
VdpChromaType chroma;
|
||||
uint32_t width, height;
|
||||
|
||||
av_assert0(frame->format == AV_PIX_FMT_VDPAU);
|
||||
|
||||
if (av_vdpau_get_surface_parameters(s, &chroma, &width, &height))
|
||||
return AVERROR(ENOSYS);
|
||||
|
||||
surface = av_malloc(sizeof(*surface));
|
||||
if (!surface)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -123,8 +118,8 @@ static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
// properly we should keep a pool of surfaces instead of creating
|
||||
// them anew for each frame, but since we don't care about speed
|
||||
// much in this code, we don't bother
|
||||
err = ctx->video_surface_create(ctx->device, chroma, width, height,
|
||||
surface);
|
||||
err = ctx->video_surface_create(ctx->device, VDP_CHROMA_TYPE_420,
|
||||
frame->width, frame->height, surface);
|
||||
if (err != VDP_STATUS_OK) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error allocating a VDPAU video surface: %s\n",
|
||||
ctx->get_error_string(err));
|
||||
|
204
ffplay.c
204
ffplay.c
@@ -32,7 +32,6 @@
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/colorspace.h"
|
||||
#include "libavutil/eval.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
@@ -186,18 +185,21 @@ typedef struct Decoder {
|
||||
AVCodecContext *avctx;
|
||||
int pkt_serial;
|
||||
int finished;
|
||||
int flushed;
|
||||
int packet_pending;
|
||||
SDL_cond *empty_queue_cond;
|
||||
int64_t start_pts;
|
||||
AVRational start_pts_tb;
|
||||
int64_t next_pts;
|
||||
AVRational next_pts_tb;
|
||||
SDL_Thread *decoder_tid;
|
||||
} Decoder;
|
||||
|
||||
typedef struct VideoState {
|
||||
SDL_Thread *read_tid;
|
||||
SDL_Thread *video_tid;
|
||||
SDL_Thread *audio_tid;
|
||||
AVInputFormat *iformat;
|
||||
int no_background;
|
||||
int abort_request;
|
||||
int force_refresh;
|
||||
int paused;
|
||||
@@ -264,6 +266,7 @@ typedef struct VideoState {
|
||||
int xpos;
|
||||
double last_vis_time;
|
||||
|
||||
SDL_Thread *subtitle_tid;
|
||||
int subtitle_stream;
|
||||
AVStream *subtitle_st;
|
||||
PacketQueue subtitleq;
|
||||
@@ -279,7 +282,6 @@ typedef struct VideoState {
|
||||
struct SwsContext *img_convert_ctx;
|
||||
#endif
|
||||
SDL_Rect last_display_rect;
|
||||
int eof;
|
||||
|
||||
char filename[1024];
|
||||
int width, height, xleft, ytop;
|
||||
@@ -312,7 +314,11 @@ static int screen_height = 0;
|
||||
static int audio_disable;
|
||||
static int video_disable;
|
||||
static int subtitle_disable;
|
||||
static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
|
||||
static int wanted_stream[AVMEDIA_TYPE_NB] = {
|
||||
[AVMEDIA_TYPE_AUDIO] = -1,
|
||||
[AVMEDIA_TYPE_VIDEO] = -1,
|
||||
[AVMEDIA_TYPE_SUBTITLE] = -1,
|
||||
};
|
||||
static int seek_by_bytes = -1;
|
||||
static int display_disable;
|
||||
static int show_status = 1;
|
||||
@@ -542,6 +548,8 @@ static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue,
|
||||
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
|
||||
int got_frame = 0;
|
||||
|
||||
d->flushed = 0;
|
||||
|
||||
do {
|
||||
int ret = -1;
|
||||
|
||||
@@ -558,6 +566,7 @@ static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
|
||||
if (pkt.data == flush_pkt.data) {
|
||||
avcodec_flush_buffers(d->avctx);
|
||||
d->finished = 0;
|
||||
d->flushed = 1;
|
||||
d->next_pts = d->start_pts;
|
||||
d->next_pts_tb = d->start_pts_tb;
|
||||
}
|
||||
@@ -768,15 +777,6 @@ static int64_t frame_queue_last_pos(FrameQueue *f)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void decoder_abort(Decoder *d, FrameQueue *fq)
|
||||
{
|
||||
packet_queue_abort(d->queue);
|
||||
frame_queue_signal(fq);
|
||||
SDL_WaitThread(d->decoder_tid, NULL);
|
||||
d->decoder_tid = NULL;
|
||||
packet_queue_flush(d->queue);
|
||||
}
|
||||
|
||||
static inline void fill_rectangle(SDL_Surface *screen,
|
||||
int x, int y, int w, int h, int color, int update)
|
||||
{
|
||||
@@ -1248,10 +1248,7 @@ static void video_audio_display(VideoState *s)
|
||||
s->rdft_bits = rdft_bits;
|
||||
s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
|
||||
}
|
||||
if (!s->rdft || !s->rdft_data){
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
|
||||
s->show_mode = SHOW_MODE_WAVES;
|
||||
} else {
|
||||
{
|
||||
FFTSample *data[2];
|
||||
for (ch = 0; ch < nb_display_channels; ch++) {
|
||||
data[ch] = s->rdft_data + 2 * nb_freq * ch;
|
||||
@@ -1506,7 +1503,7 @@ static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_by
|
||||
static void stream_toggle_pause(VideoState *is)
|
||||
{
|
||||
if (is->paused) {
|
||||
is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
|
||||
is->frame_timer += av_gettime_relative() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
|
||||
if (is->read_pause_return != AVERROR(ENOSYS)) {
|
||||
is->vidclk.paused = 0;
|
||||
}
|
||||
@@ -1532,7 +1529,7 @@ static void step_to_next_frame(VideoState *is)
|
||||
|
||||
static double compute_target_delay(double delay, VideoState *is)
|
||||
{
|
||||
double sync_threshold, diff = 0;
|
||||
double sync_threshold, diff;
|
||||
|
||||
/* update delay to follow master synchronisation source */
|
||||
if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
|
||||
@@ -1554,7 +1551,7 @@ static double compute_target_delay(double delay, VideoState *is)
|
||||
}
|
||||
}
|
||||
|
||||
av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
|
||||
av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
|
||||
delay, -diff);
|
||||
|
||||
return delay;
|
||||
@@ -1996,20 +1993,20 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
|
||||
/* Note: this macro adds a filter before the lastly added filter, so the
|
||||
* processing order of the filters is in reverse */
|
||||
#define INSERT_FILT(name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(name), \
|
||||
"ffplay_" name, arg, NULL, graph); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
last_filter = filt_ctx; \
|
||||
#define INSERT_FILT(name, arg) do { \
|
||||
AVFilterContext *filt_ctx; \
|
||||
\
|
||||
ret = avfilter_graph_create_filter(&filt_ctx, \
|
||||
avfilter_get_by_name(name), \
|
||||
"ffplay_" name, arg, NULL, graph); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
|
||||
if (ret < 0) \
|
||||
goto fail; \
|
||||
\
|
||||
last_filter = filt_ctx; \
|
||||
} while (0)
|
||||
|
||||
/* SDL YUV code is not handling odd width/height for some driver
|
||||
@@ -2017,19 +2014,20 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
|
||||
|
||||
if (autorotate) {
|
||||
double theta = get_rotation(is->video_st);
|
||||
|
||||
if (fabs(theta - 90) < 1.0) {
|
||||
INSERT_FILT("transpose", "clock");
|
||||
} else if (fabs(theta - 180) < 1.0) {
|
||||
INSERT_FILT("hflip", NULL);
|
||||
INSERT_FILT("vflip", NULL);
|
||||
} else if (fabs(theta - 270) < 1.0) {
|
||||
INSERT_FILT("transpose", "cclock");
|
||||
} else if (fabs(theta) > 1.0) {
|
||||
char rotate_buf[64];
|
||||
snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
|
||||
INSERT_FILT("rotate", rotate_buf);
|
||||
AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
|
||||
if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
|
||||
if (!strcmp(rotate_tag->value, "90")) {
|
||||
INSERT_FILT("transpose", "clock");
|
||||
} else if (!strcmp(rotate_tag->value, "180")) {
|
||||
INSERT_FILT("hflip", NULL);
|
||||
INSERT_FILT("vflip", NULL);
|
||||
} else if (!strcmp(rotate_tag->value, "270")) {
|
||||
INSERT_FILT("transpose", "cclock");
|
||||
} else {
|
||||
char rotate_buf[64];
|
||||
snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
|
||||
INSERT_FILT("rotate", rotate_buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2207,12 +2205,6 @@ static int audio_thread(void *arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void decoder_start(Decoder *d, int (*fn)(void *), void *arg)
|
||||
{
|
||||
packet_queue_start(d->queue);
|
||||
d->decoder_tid = SDL_CreateThread(fn, arg);
|
||||
}
|
||||
|
||||
static int video_thread(void *arg)
|
||||
{
|
||||
VideoState *is = arg;
|
||||
@@ -2231,20 +2223,8 @@ static int video_thread(void *arg)
|
||||
enum AVPixelFormat last_format = -2;
|
||||
int last_serial = -1;
|
||||
int last_vfilter_idx = 0;
|
||||
if (!graph) {
|
||||
av_frame_free(&frame);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
if (!frame) {
|
||||
#if CONFIG_AVFILTER
|
||||
avfilter_graph_free(&graph);
|
||||
#endif
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
ret = get_video_frame(is, frame);
|
||||
if (ret < 0)
|
||||
@@ -2412,9 +2392,9 @@ static int synchronize_audio(VideoState *is, int nb_samples)
|
||||
wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
|
||||
min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
|
||||
max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
|
||||
wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
|
||||
wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
|
||||
}
|
||||
av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
|
||||
av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
|
||||
diff, avg_diff, wanted_nb_samples - nb_samples,
|
||||
is->audio_clock, is->audio_diff_threshold);
|
||||
}
|
||||
@@ -2716,7 +2696,6 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
is->eof = 0;
|
||||
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
|
||||
switch (avctx->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -2759,28 +2738,31 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
is->audio_stream = stream_index;
|
||||
is->audio_st = ic->streams[stream_index];
|
||||
|
||||
packet_queue_start(&is->audioq);
|
||||
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
|
||||
if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
|
||||
is->auddec.start_pts = is->audio_st->start_time;
|
||||
is->auddec.start_pts_tb = is->audio_st->time_base;
|
||||
}
|
||||
decoder_start(&is->auddec, audio_thread, is);
|
||||
is->audio_tid = SDL_CreateThread(audio_thread, is);
|
||||
SDL_PauseAudio(0);
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
is->video_stream = stream_index;
|
||||
is->video_st = ic->streams[stream_index];
|
||||
|
||||
packet_queue_start(&is->videoq);
|
||||
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
|
||||
decoder_start(&is->viddec, video_thread, is);
|
||||
is->video_tid = SDL_CreateThread(video_thread, is);
|
||||
is->queue_attachments_req = 1;
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
is->subtitle_stream = stream_index;
|
||||
is->subtitle_st = ic->streams[stream_index];
|
||||
|
||||
packet_queue_start(&is->subtitleq);
|
||||
decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
|
||||
decoder_start(&is->subdec, subtitle_thread, is);
|
||||
is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -2803,9 +2785,13 @@ static void stream_component_close(VideoState *is, int stream_index)
|
||||
|
||||
switch (avctx->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
decoder_abort(&is->auddec, &is->sampq);
|
||||
packet_queue_abort(&is->audioq);
|
||||
frame_queue_signal(&is->sampq);
|
||||
SDL_CloseAudio();
|
||||
SDL_WaitThread(is->audio_tid, NULL);
|
||||
|
||||
decoder_destroy(&is->auddec);
|
||||
packet_queue_flush(&is->audioq);
|
||||
swr_free(&is->swr_ctx);
|
||||
av_freep(&is->audio_buf1);
|
||||
is->audio_buf1_size = 0;
|
||||
@@ -2819,12 +2805,28 @@ static void stream_component_close(VideoState *is, int stream_index)
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
decoder_abort(&is->viddec, &is->pictq);
|
||||
packet_queue_abort(&is->videoq);
|
||||
|
||||
/* note: we also signal this mutex to make sure we deblock the
|
||||
video thread in all cases */
|
||||
frame_queue_signal(&is->pictq);
|
||||
|
||||
SDL_WaitThread(is->video_tid, NULL);
|
||||
|
||||
decoder_destroy(&is->viddec);
|
||||
packet_queue_flush(&is->videoq);
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
decoder_abort(&is->subdec, &is->subpq);
|
||||
packet_queue_abort(&is->subtitleq);
|
||||
|
||||
/* note: we also signal this mutex to make sure we deblock the
|
||||
video thread in all cases */
|
||||
frame_queue_signal(&is->subpq);
|
||||
|
||||
SDL_WaitThread(is->subtitle_tid, NULL);
|
||||
|
||||
decoder_destroy(&is->subdec);
|
||||
packet_queue_flush(&is->subtitleq);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -2880,6 +2882,7 @@ static int read_thread(void *arg)
|
||||
int err, i, ret;
|
||||
int st_index[AVMEDIA_TYPE_NB];
|
||||
AVPacket pkt1, *pkt = &pkt1;
|
||||
int eof = 0;
|
||||
int64_t stream_start_time;
|
||||
int pkt_in_play_range = 0;
|
||||
AVDictionaryEntry *t;
|
||||
@@ -2887,20 +2890,13 @@ static int read_thread(void *arg)
|
||||
int orig_nb_streams;
|
||||
SDL_mutex *wait_mutex = SDL_CreateMutex();
|
||||
int scan_all_pmts_set = 0;
|
||||
int64_t pkt_ts;
|
||||
|
||||
memset(st_index, -1, sizeof(st_index));
|
||||
is->last_video_stream = is->video_stream = -1;
|
||||
is->last_audio_stream = is->audio_stream = -1;
|
||||
is->last_subtitle_stream = is->subtitle_stream = -1;
|
||||
is->eof = 0;
|
||||
|
||||
ic = avformat_alloc_context();
|
||||
if (!ic) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
ic->interrupt_callback.callback = decode_interrupt_cb;
|
||||
ic->interrupt_callback.opaque = is;
|
||||
if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
|
||||
@@ -2972,42 +2968,29 @@ static int read_thread(void *arg)
|
||||
|
||||
is->realtime = is_realtime(ic);
|
||||
|
||||
if (show_status)
|
||||
av_dump_format(ic, 0, is->filename, 0);
|
||||
|
||||
for (i = 0; i < ic->nb_streams; i++) {
|
||||
AVStream *st = ic->streams[i];
|
||||
enum AVMediaType type = st->codec->codec_type;
|
||||
st->discard = AVDISCARD_ALL;
|
||||
if (wanted_stream_spec[type] && st_index[type] == -1)
|
||||
if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
|
||||
st_index[type] = i;
|
||||
}
|
||||
for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
|
||||
if (wanted_stream_spec[i] && st_index[i] == -1) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
|
||||
st_index[i] = INT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ic->nb_streams; i++)
|
||||
ic->streams[i]->discard = AVDISCARD_ALL;
|
||||
if (!video_disable)
|
||||
st_index[AVMEDIA_TYPE_VIDEO] =
|
||||
av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
|
||||
st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
|
||||
wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
|
||||
if (!audio_disable)
|
||||
st_index[AVMEDIA_TYPE_AUDIO] =
|
||||
av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
|
||||
st_index[AVMEDIA_TYPE_AUDIO],
|
||||
wanted_stream[AVMEDIA_TYPE_AUDIO],
|
||||
st_index[AVMEDIA_TYPE_VIDEO],
|
||||
NULL, 0);
|
||||
if (!video_disable && !subtitle_disable)
|
||||
st_index[AVMEDIA_TYPE_SUBTITLE] =
|
||||
av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
|
||||
st_index[AVMEDIA_TYPE_SUBTITLE],
|
||||
wanted_stream[AVMEDIA_TYPE_SUBTITLE],
|
||||
(st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
|
||||
st_index[AVMEDIA_TYPE_AUDIO] :
|
||||
st_index[AVMEDIA_TYPE_VIDEO]),
|
||||
NULL, 0);
|
||||
if (show_status) {
|
||||
av_dump_format(ic, 0, is->filename, 0);
|
||||
}
|
||||
|
||||
is->show_mode = show_mode;
|
||||
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
|
||||
@@ -3096,7 +3079,7 @@ static int read_thread(void *arg)
|
||||
}
|
||||
is->seek_req = 0;
|
||||
is->queue_attachments_req = 1;
|
||||
is->eof = 0;
|
||||
eof = 0;
|
||||
if (is->paused)
|
||||
step_to_next_frame(is);
|
||||
}
|
||||
@@ -3136,14 +3119,14 @@ static int read_thread(void *arg)
|
||||
}
|
||||
ret = av_read_frame(ic, pkt);
|
||||
if (ret < 0) {
|
||||
if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
|
||||
if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !eof) {
|
||||
if (is->video_stream >= 0)
|
||||
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
|
||||
if (is->audio_stream >= 0)
|
||||
packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
|
||||
if (is->subtitle_stream >= 0)
|
||||
packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
|
||||
is->eof = 1;
|
||||
eof = 1;
|
||||
}
|
||||
if (ic->pb && ic->pb->error)
|
||||
break;
|
||||
@@ -3152,13 +3135,12 @@ static int read_thread(void *arg)
|
||||
SDL_UnlockMutex(wait_mutex);
|
||||
continue;
|
||||
} else {
|
||||
is->eof = 0;
|
||||
eof = 0;
|
||||
}
|
||||
/* check if packet is in play range specified by user, then queue, otherwise discard */
|
||||
stream_start_time = ic->streams[pkt->stream_index]->start_time;
|
||||
pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
|
||||
pkt_in_play_range = duration == AV_NOPTS_VALUE ||
|
||||
(pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
|
||||
(pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
|
||||
av_q2d(ic->streams[pkt->stream_index]->time_base) -
|
||||
(double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
|
||||
<= ((double)duration / 1000000);
|
||||
@@ -3692,9 +3674,9 @@ static const OptionDef options[] = {
|
||||
{ "an", OPT_BOOL, { &audio_disable }, "disable audio" },
|
||||
{ "vn", OPT_BOOL, { &video_disable }, "disable video" },
|
||||
{ "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
|
||||
{ "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
|
||||
{ "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
|
||||
{ "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
|
||||
{ "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
|
||||
{ "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
|
||||
{ "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
|
||||
{ "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
|
||||
{ "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
|
||||
{ "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
|
||||
|
118
ffprobe.c
118
ffprobe.c
@@ -33,12 +33,10 @@
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/display.h"
|
||||
#include "libavutil/hash.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "libavutil/libm.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/timecode.h"
|
||||
@@ -88,7 +86,7 @@ static char *print_format;
|
||||
static char *stream_specifier;
|
||||
static char *show_data_hash;
|
||||
|
||||
typedef struct ReadInterval {
|
||||
typedef struct {
|
||||
int id; ///< identifier
|
||||
int64_t start, end; ///< start, end in second/AV_TIME_BASE units
|
||||
int has_start, has_end;
|
||||
@@ -137,8 +135,6 @@ typedef enum {
|
||||
SECTION_ID_PACKET,
|
||||
SECTION_ID_PACKETS,
|
||||
SECTION_ID_PACKETS_AND_FRAMES,
|
||||
SECTION_ID_PACKET_SIDE_DATA_LIST,
|
||||
SECTION_ID_PACKET_SIDE_DATA,
|
||||
SECTION_ID_PIXEL_FORMAT,
|
||||
SECTION_ID_PIXEL_FORMAT_FLAGS,
|
||||
SECTION_ID_PIXEL_FORMAT_COMPONENT,
|
||||
@@ -157,8 +153,6 @@ typedef enum {
|
||||
SECTION_ID_STREAM_DISPOSITION,
|
||||
SECTION_ID_STREAMS,
|
||||
SECTION_ID_STREAM_TAGS,
|
||||
SECTION_ID_STREAM_SIDE_DATA_LIST,
|
||||
SECTION_ID_STREAM_SIDE_DATA,
|
||||
SECTION_ID_SUBTITLE,
|
||||
} SectionID;
|
||||
|
||||
@@ -178,9 +172,7 @@ static struct section sections[] = {
|
||||
[SECTION_ID_LIBRARY_VERSION] = { SECTION_ID_LIBRARY_VERSION, "library_version", 0, { -1 } },
|
||||
[SECTION_ID_PACKETS] = { SECTION_ID_PACKETS, "packets", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||
[SECTION_ID_PACKETS_AND_FRAMES] = { SECTION_ID_PACKETS_AND_FRAMES, "packets_and_frames", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET, -1} },
|
||||
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { SECTION_ID_PACKET_SIDE_DATA_LIST, -1 } },
|
||||
[SECTION_ID_PACKET_SIDE_DATA_LIST] ={ SECTION_ID_PACKET_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PACKET_SIDE_DATA, -1 } },
|
||||
[SECTION_ID_PACKET_SIDE_DATA] = { SECTION_ID_PACKET_SIDE_DATA, "side_data", 0, { -1 } },
|
||||
[SECTION_ID_PACKET] = { SECTION_ID_PACKET, "packet", 0, { -1 } },
|
||||
[SECTION_ID_PIXEL_FORMATS] = { SECTION_ID_PIXEL_FORMATS, "pixel_formats", SECTION_FLAG_IS_ARRAY, { SECTION_ID_PIXEL_FORMAT, -1 } },
|
||||
[SECTION_ID_PIXEL_FORMAT] = { SECTION_ID_PIXEL_FORMAT, "pixel_format", 0, { SECTION_ID_PIXEL_FORMAT_FLAGS, SECTION_ID_PIXEL_FORMAT_COMPONENTS, -1 } },
|
||||
[SECTION_ID_PIXEL_FORMAT_FLAGS] = { SECTION_ID_PIXEL_FORMAT_FLAGS, "flags", 0, { -1 }, .unique_name = "pixel_format_flags" },
|
||||
@@ -199,11 +191,9 @@ static struct section sections[] = {
|
||||
SECTION_ID_PACKETS, SECTION_ID_ERROR, SECTION_ID_PROGRAM_VERSION, SECTION_ID_LIBRARY_VERSIONS,
|
||||
SECTION_ID_PIXEL_FORMATS, -1} },
|
||||
[SECTION_ID_STREAMS] = { SECTION_ID_STREAMS, "streams", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM, -1 } },
|
||||
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, SECTION_ID_STREAM_SIDE_DATA_LIST, -1 } },
|
||||
[SECTION_ID_STREAM] = { SECTION_ID_STREAM, "stream", 0, { SECTION_ID_STREAM_DISPOSITION, SECTION_ID_STREAM_TAGS, -1 } },
|
||||
[SECTION_ID_STREAM_DISPOSITION] = { SECTION_ID_STREAM_DISPOSITION, "disposition", 0, { -1 }, .unique_name = "stream_disposition" },
|
||||
[SECTION_ID_STREAM_TAGS] = { SECTION_ID_STREAM_TAGS, "tags", SECTION_FLAG_HAS_VARIABLE_FIELDS, { -1 }, .element_name = "tag", .unique_name = "stream_tags" },
|
||||
[SECTION_ID_STREAM_SIDE_DATA_LIST] ={ SECTION_ID_STREAM_SIDE_DATA_LIST, "side_data_list", SECTION_FLAG_IS_ARRAY, { SECTION_ID_STREAM_SIDE_DATA, -1 } },
|
||||
[SECTION_ID_STREAM_SIDE_DATA] = { SECTION_ID_STREAM_SIDE_DATA, "side_data", 0, { -1 } },
|
||||
[SECTION_ID_SUBTITLE] = { SECTION_ID_SUBTITLE, "subtitle", 0, { -1 } },
|
||||
};
|
||||
|
||||
@@ -348,7 +338,7 @@ struct WriterContext {
|
||||
unsigned int nb_section_frame; ///< number of the frame section in case we are in "packets_and_frames" section
|
||||
unsigned int nb_section_packet_frame; ///< nb_section_packet or nb_section_frame according if is_packets_and_frames
|
||||
|
||||
int string_validation;
|
||||
StringValidation string_validation;
|
||||
char *string_validation_replacement;
|
||||
unsigned int string_validation_utf8_flags;
|
||||
};
|
||||
@@ -728,32 +718,6 @@ static void writer_print_data_hash(WriterContext *wctx, const char *name,
|
||||
writer_print_string(wctx, name, buf, 0);
|
||||
}
|
||||
|
||||
static void writer_print_integers(WriterContext *wctx, const char *name,
|
||||
uint8_t *data, int size, const char *format,
|
||||
int columns, int bytes, int offset_add)
|
||||
{
|
||||
AVBPrint bp;
|
||||
int offset = 0, l, i;
|
||||
|
||||
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_UNLIMITED);
|
||||
av_bprintf(&bp, "\n");
|
||||
while (size) {
|
||||
av_bprintf(&bp, "%08x: ", offset);
|
||||
l = FFMIN(size, columns);
|
||||
for (i = 0; i < l; i++) {
|
||||
if (bytes == 1) av_bprintf(&bp, format, *data);
|
||||
else if (bytes == 2) av_bprintf(&bp, format, AV_RN16(data));
|
||||
else if (bytes == 4) av_bprintf(&bp, format, AV_RN32(data));
|
||||
data += bytes;
|
||||
size --;
|
||||
}
|
||||
av_bprintf(&bp, "\n");
|
||||
offset += offset_add;
|
||||
}
|
||||
writer_print_string(wctx, name, bp.str, 0);
|
||||
av_bprint_finalize(&bp, NULL);
|
||||
}
|
||||
|
||||
#define MAX_REGISTERED_WRITERS_NB 64
|
||||
|
||||
static const Writer *registered_writers[MAX_REGISTERED_WRITERS_NB + 1];
|
||||
@@ -1228,7 +1192,7 @@ static const Writer flat_writer = {
|
||||
|
||||
/* INI format output */
|
||||
|
||||
typedef struct INIContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int hierarchical;
|
||||
} INIContext;
|
||||
@@ -1332,7 +1296,7 @@ static const Writer ini_writer = {
|
||||
|
||||
/* JSON output */
|
||||
|
||||
typedef struct JSONContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int indent_level;
|
||||
int compact;
|
||||
@@ -1494,7 +1458,7 @@ static const Writer json_writer = {
|
||||
|
||||
/* XML output */
|
||||
|
||||
typedef struct XMLContext {
|
||||
typedef struct {
|
||||
const AVClass *class;
|
||||
int within_tag;
|
||||
int indent_level;
|
||||
@@ -1759,25 +1723,6 @@ static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pk
|
||||
if (pkt->pos != -1) print_fmt ("pos", "%"PRId64, pkt->pos);
|
||||
else print_str_opt("pos", "N/A");
|
||||
print_fmt("flags", "%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
|
||||
|
||||
if (pkt->side_data_elems) {
|
||||
int i;
|
||||
writer_print_section_header(w, SECTION_ID_PACKET_SIDE_DATA_LIST);
|
||||
for (i = 0; i < pkt->side_data_elems; i++) {
|
||||
AVPacketSideData *sd = &pkt->side_data[i];
|
||||
const char *name = av_packet_side_data_name(sd->type);
|
||||
writer_print_section_header(w, SECTION_ID_PACKET_SIDE_DATA);
|
||||
print_str("side_data_type", name ? name : "unknown");
|
||||
print_int("side_data_size", sd->size);
|
||||
if (sd->type == AV_PKT_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
|
||||
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
|
||||
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
if (do_show_data)
|
||||
writer_print_data(w, "data", pkt->data, pkt->size);
|
||||
writer_print_data_hash(w, "data_hash", pkt->data, pkt->size);
|
||||
@@ -1824,7 +1769,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
s = av_get_media_type_string(stream->codec->codec_type);
|
||||
if (s) print_str ("media_type", s);
|
||||
else print_str_opt("media_type", "unknown");
|
||||
print_int("stream_index", stream->index);
|
||||
print_int("key_frame", frame->key_frame);
|
||||
print_ts ("pkt_pts", frame->pkt_pts);
|
||||
print_time("pkt_pts_time", frame->pkt_pts, &stream->time_base);
|
||||
@@ -1889,11 +1833,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
name = av_frame_side_data_name(sd->type);
|
||||
print_str("side_data_type", name ? name : "unknown");
|
||||
print_int("side_data_size", sd->size);
|
||||
if (sd->type == AV_FRAME_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
|
||||
abort();
|
||||
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
|
||||
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
@@ -2155,8 +2094,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
print_int("width", dec_ctx->width);
|
||||
print_int("height", dec_ctx->height);
|
||||
print_int("coded_width", dec_ctx->coded_width);
|
||||
print_int("coded_height", dec_ctx->coded_height);
|
||||
print_int("has_b_frames", dec_ctx->has_b_frames);
|
||||
sar = av_guess_sample_aspect_ratio(fmt_ctx, stream, NULL);
|
||||
if (sar.den) {
|
||||
@@ -2204,7 +2141,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
} else {
|
||||
print_str_opt("timecode", "N/A");
|
||||
}
|
||||
print_int("refs", dec_ctx->refs);
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -2302,25 +2238,6 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
if (do_show_stream_tags)
|
||||
ret = show_tags(w, stream->metadata, in_program ? SECTION_ID_PROGRAM_STREAM_TAGS : SECTION_ID_STREAM_TAGS);
|
||||
|
||||
if (stream->nb_side_data) {
|
||||
int i;
|
||||
writer_print_section_header(w, SECTION_ID_STREAM_SIDE_DATA_LIST);
|
||||
for (i = 0; i < stream->nb_side_data; i++) {
|
||||
AVPacketSideData *sd = &stream->side_data[i];
|
||||
const char *name = av_packet_side_data_name(sd->type);
|
||||
|
||||
writer_print_section_header(w, SECTION_ID_STREAM_SIDE_DATA);
|
||||
print_str("side_data_type", name ? name : "unknown");
|
||||
print_int("side_data_size", sd->size);
|
||||
if (sd->type == AV_PKT_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
|
||||
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
|
||||
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
writer_print_section_footer(w);
|
||||
av_bprint_finalize(&pbuf, NULL);
|
||||
fflush(stdout);
|
||||
@@ -2480,7 +2397,6 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
*fmt_ctx_ptr = fmt_ctx;
|
||||
if (scan_all_pmts_set)
|
||||
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
||||
if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
|
||||
@@ -2492,16 +2408,13 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
|
||||
opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
|
||||
orig_nb_streams = fmt_ctx->nb_streams;
|
||||
|
||||
err = avformat_find_stream_info(fmt_ctx, opts);
|
||||
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
if (err < 0) {
|
||||
if ((err = avformat_find_stream_info(fmt_ctx, opts)) < 0) {
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
av_dump_format(fmt_ctx, 0, filename, 0);
|
||||
|
||||
@@ -2552,7 +2465,7 @@ static void close_input_file(AVFormatContext **ctx_ptr)
|
||||
|
||||
static int probe_file(WriterContext *wctx, const char *filename)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVFormatContext *fmt_ctx;
|
||||
int ret, i;
|
||||
int section_id;
|
||||
|
||||
@@ -2561,7 +2474,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
|
||||
ret = open_input_file(&fmt_ctx, filename);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
return ret;
|
||||
|
||||
#define CHECK_END if (ret < 0) goto end
|
||||
|
||||
@@ -2619,8 +2532,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
}
|
||||
|
||||
end:
|
||||
if (fmt_ctx)
|
||||
close_input_file(&fmt_ctx);
|
||||
close_input_file(&fmt_ctx);
|
||||
av_freep(&nb_streams_frames);
|
||||
av_freep(&nb_streams_packets);
|
||||
av_freep(&selected_streams);
|
||||
@@ -2644,6 +2556,8 @@ static void ffprobe_show_program_version(WriterContext *w)
|
||||
print_str("version", FFMPEG_VERSION);
|
||||
print_fmt("copyright", "Copyright (c) %d-%d the FFmpeg developers",
|
||||
program_birth_year, CONFIG_THIS_YEAR);
|
||||
print_str("build_date", __DATE__);
|
||||
print_str("build_time", __TIME__);
|
||||
print_str("compiler_ident", CC_IDENT);
|
||||
print_str("configuration", FFMPEG_CONFIGURATION);
|
||||
writer_print_section_footer(w);
|
||||
|
850
ffserver.c
850
ffserver.c
File diff suppressed because it is too large
Load Diff
@@ -35,12 +35,10 @@
|
||||
|
||||
static int ffserver_save_avoption(const char *opt, const char *arg, int type,
|
||||
FFServerConfig *config);
|
||||
static void vreport_config_error(const char *filename, int line_num,
|
||||
int log_level, int *errors, const char *fmt,
|
||||
va_list vl);
|
||||
static void report_config_error(const char *filename, int line_num,
|
||||
int log_level, int *errors, const char *fmt,
|
||||
...);
|
||||
static void vreport_config_error(const char *filename, int line_num, int log_level,
|
||||
int *errors, const char *fmt, va_list vl);
|
||||
static void report_config_error(const char *filename, int line_num, int log_level,
|
||||
int *errors, const char *fmt, ...);
|
||||
|
||||
#define ERROR(...) report_config_error(config->filename, config->line_num,\
|
||||
AV_LOG_ERROR, &config->errors, __VA_ARGS__)
|
||||
@@ -86,24 +84,28 @@ void ffserver_get_arg(char *buf, int buf_size, const char **pp)
|
||||
{
|
||||
const char *p;
|
||||
char *q;
|
||||
int quote = 0;
|
||||
int quote;
|
||||
|
||||
p = *pp;
|
||||
q = buf;
|
||||
|
||||
while (av_isspace(*p)) p++;
|
||||
|
||||
q = buf;
|
||||
quote = 0;
|
||||
if (*p == '\"' || *p == '\'')
|
||||
quote = *p++;
|
||||
|
||||
while (*p != '\0') {
|
||||
if (quote && *p == quote || !quote && av_isspace(*p))
|
||||
for(;;) {
|
||||
if (quote) {
|
||||
if (*p == quote)
|
||||
break;
|
||||
} else {
|
||||
if (av_isspace(*p))
|
||||
break;
|
||||
}
|
||||
if (*p == '\0')
|
||||
break;
|
||||
if ((q - buf) < buf_size - 1)
|
||||
*q++ = *p;
|
||||
p++;
|
||||
}
|
||||
|
||||
*q = '\0';
|
||||
if (quote && *p == quote)
|
||||
p++;
|
||||
@@ -124,7 +126,7 @@ void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
|
||||
else if (av_strcasecmp(arg, "deny") == 0)
|
||||
acl.action = IP_DENY;
|
||||
else {
|
||||
fprintf(stderr, "%s:%d: ACL action '%s' should be ALLOW or DENY.\n",
|
||||
fprintf(stderr, "%s:%d: ACL action '%s' is not ALLOW or DENY\n",
|
||||
filename, line_num, arg);
|
||||
errors++;
|
||||
}
|
||||
@@ -132,8 +134,7 @@ void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
|
||||
ffserver_get_arg(arg, sizeof(arg), &p);
|
||||
|
||||
if (resolve_host(&acl.first, arg)) {
|
||||
fprintf(stderr,
|
||||
"%s:%d: ACL refers to invalid host or IP address '%s'\n",
|
||||
fprintf(stderr, "%s:%d: ACL refers to invalid host or IP address '%s'\n",
|
||||
filename, line_num, arg);
|
||||
errors++;
|
||||
} else
|
||||
@@ -164,7 +165,7 @@ void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
|
||||
else if (ext_acl)
|
||||
naclp = &ext_acl;
|
||||
else {
|
||||
fprintf(stderr, "%s:%d: ACL found not in <Stream> or <Feed>\n",
|
||||
fprintf(stderr, "%s:%d: ACL found not in <stream> or <feed>\n",
|
||||
filename, line_num);
|
||||
errors++;
|
||||
}
|
||||
@@ -195,38 +196,12 @@ static void add_codec(FFServerStream *stream, AVCodecContext *av,
|
||||
av_dict_copy(&recommended, *opts, 0);
|
||||
av_opt_set_dict2(av->priv_data, opts, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_dict2(av, opts, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
if (av_dict_count(*opts))
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"Something is wrong, %d options are not set!\n",
|
||||
av_dict_count(*opts));
|
||||
|
||||
if (!config->stream_use_defaults) {
|
||||
switch(av->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
if (av->bit_rate == 0)
|
||||
report_config_error(config->filename, config->line_num,
|
||||
AV_LOG_ERROR, &config->errors,
|
||||
"audio bit rate is not set\n");
|
||||
if (av->sample_rate == 0)
|
||||
report_config_error(config->filename, config->line_num,
|
||||
AV_LOG_ERROR, &config->errors,
|
||||
"audio sample rate is not set\n");
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if (av->width == 0 || av->height == 0)
|
||||
report_config_error(config->filename, config->line_num,
|
||||
AV_LOG_ERROR, &config->errors,
|
||||
"video size is not set\n");
|
||||
break;
|
||||
default:
|
||||
av_assert0(0);
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* stream_use_defaults = true */
|
||||
"Something is wrong, %d options are not set!\n", av_dict_count(*opts));
|
||||
|
||||
if (config->stream_use_defaults) {
|
||||
//TODO: reident
|
||||
/* compute default parameters */
|
||||
switch(av->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -289,8 +264,8 @@ static void add_codec(FFServerStream *stream, AVCodecContext *av,
|
||||
if (!av_dict_get(recommended, "rc_eq", NULL, 0)) {
|
||||
av->rc_eq = av_strdup("tex^qComp");
|
||||
av_dict_set(&recommended, "rc_eq", "tex^qComp", 0);
|
||||
WARNING("Setting default value for video rate control equation = "
|
||||
"%s. Use NoDefaults to disable it.\n",
|
||||
WARNING("Setting default value for video rate control equation = %s. "
|
||||
"Use NoDefaults to disable it.\n",
|
||||
av->rc_eq);
|
||||
}
|
||||
if (!av_dict_get(recommended, "maxrate", NULL, 0)) {
|
||||
@@ -312,8 +287,26 @@ static void add_codec(FFServerStream *stream, AVCodecContext *av,
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
switch(av->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
if (av->bit_rate == 0)
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
|
||||
&config->errors, "audio bit rate is not set\n");
|
||||
if (av->sample_rate == 0)
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
|
||||
&config->errors, "audio sample rate is not set\n");
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if (av->width == 0 || av->height == 0)
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
|
||||
&config->errors, "video size is not set\n");
|
||||
break;
|
||||
default:
|
||||
av_assert0(0);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
st = av_mallocz(sizeof(AVStream));
|
||||
if (!st)
|
||||
return;
|
||||
@@ -324,15 +317,13 @@ done:
|
||||
stream->streams[stream->nb_streams++] = st;
|
||||
}
|
||||
|
||||
static int ffserver_set_codec(AVCodecContext *ctx, const char *codec_name,
|
||||
FFServerConfig *config)
|
||||
static int ffserver_set_codec(AVCodecContext *ctx, const char *codec_name, FFServerConfig *config)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *codec = avcodec_find_encoder_by_name(codec_name);
|
||||
if (!codec || codec->type != ctx->codec_type) {
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
|
||||
&config->errors,
|
||||
"Invalid codec name: '%s'\n", codec_name);
|
||||
&config->errors, "Invalid codec name: %s\n", codec_name);
|
||||
return 0;
|
||||
}
|
||||
if (ctx->codec_id == AV_CODEC_ID_NONE && !ctx->priv_data) {
|
||||
@@ -341,10 +332,8 @@ static int ffserver_set_codec(AVCodecContext *ctx, const char *codec_name,
|
||||
ctx->codec = codec;
|
||||
}
|
||||
if (ctx->codec_id != codec->id)
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
|
||||
&config->errors,
|
||||
"Inconsistent configuration: trying to set '%s' "
|
||||
"codec option, but '%s' codec is used previously\n",
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR, &config->errors,
|
||||
"Inconsistent configuration: trying to set %s codec option, but %s codec is used previously\n",
|
||||
codec_name, avcodec_get_name(ctx->codec_id));
|
||||
return 0;
|
||||
}
|
||||
@@ -381,13 +370,12 @@ static int ffserver_opt_preset(const char *arg, int type, FFServerConfig *config
|
||||
continue;
|
||||
e|= sscanf(line, "%999[^=]=%999[^\n]\n", tmp, tmp2) - 2;
|
||||
if(e){
|
||||
av_log(NULL, AV_LOG_ERROR, "%s: Invalid syntax: '%s'\n", filename,
|
||||
line);
|
||||
av_log(NULL, AV_LOG_ERROR, "%s: Invalid syntax: '%s'\n", filename, line);
|
||||
ret = AVERROR(EINVAL);
|
||||
break;
|
||||
}
|
||||
if (!strcmp(tmp, "acodec") && avctx->codec_type == AVMEDIA_TYPE_AUDIO ||
|
||||
!strcmp(tmp, "vcodec") && avctx->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
if ((!strcmp(tmp, "acodec") && avctx->codec_type == AVMEDIA_TYPE_AUDIO) ||
|
||||
!strcmp(tmp, "vcodec") && avctx->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
{
|
||||
if (ffserver_set_codec(avctx, tmp2, config) < 0)
|
||||
break;
|
||||
@@ -404,9 +392,7 @@ static int ffserver_opt_preset(const char *arg, int type, FFServerConfig *config
|
||||
return ret;
|
||||
}
|
||||
|
||||
static AVOutputFormat *ffserver_guess_format(const char *short_name,
|
||||
const char *filename,
|
||||
const char *mime_type)
|
||||
static AVOutputFormat *ffserver_guess_format(const char *short_name, const char *filename, const char *mime_type)
|
||||
{
|
||||
AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
|
||||
|
||||
@@ -425,9 +411,7 @@ static AVOutputFormat *ffserver_guess_format(const char *short_name,
|
||||
return fmt;
|
||||
}
|
||||
|
||||
static void vreport_config_error(const char *filename, int line_num,
|
||||
int log_level, int *errors, const char *fmt,
|
||||
va_list vl)
|
||||
static void vreport_config_error(const char *filename, int line_num, int log_level, int *errors, const char *fmt, va_list vl)
|
||||
{
|
||||
av_log(NULL, log_level, "%s:%d: ", filename, line_num);
|
||||
av_vlog(NULL, log_level, fmt, vl);
|
||||
@@ -435,9 +419,7 @@ static void vreport_config_error(const char *filename, int line_num,
|
||||
(*errors)++;
|
||||
}
|
||||
|
||||
static void report_config_error(const char *filename, int line_num,
|
||||
int log_level, int *errors,
|
||||
const char *fmt, ...)
|
||||
static void report_config_error(const char *filename, int line_num, int log_level, int *errors, const char *fmt, ...)
|
||||
{
|
||||
va_list vl;
|
||||
va_start(vl, fmt);
|
||||
@@ -445,9 +427,8 @@ static void report_config_error(const char *filename, int line_num,
|
||||
va_end(vl);
|
||||
}
|
||||
|
||||
static int ffserver_set_int_param(int *dest, const char *value, int factor,
|
||||
int min, int max, FFServerConfig *config,
|
||||
const char *error_msg, ...)
|
||||
static int ffserver_set_int_param(int *dest, const char *value, int factor, int min, int max,
|
||||
FFServerConfig *config, const char *error_msg, ...)
|
||||
{
|
||||
int tmp;
|
||||
char *tailp;
|
||||
@@ -478,10 +459,8 @@ static int ffserver_set_int_param(int *dest, const char *value, int factor,
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
static int ffserver_set_float_param(float *dest, const char *value,
|
||||
float factor, float min, float max,
|
||||
FFServerConfig *config,
|
||||
const char *error_msg, ...)
|
||||
static int ffserver_set_float_param(float *dest, const char *value, float factor, float min, float max,
|
||||
FFServerConfig *config, const char *error_msg, ...)
|
||||
{
|
||||
double tmp;
|
||||
char *tailp;
|
||||
@@ -509,8 +488,7 @@ static int ffserver_set_float_param(float *dest, const char *value,
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
static int ffserver_save_avoption(const char *opt, const char *arg, int type,
|
||||
FFServerConfig *config)
|
||||
static int ffserver_save_avoption(const char *opt, const char *arg, int type, FFServerConfig *config)
|
||||
{
|
||||
static int hinted = 0;
|
||||
int ret = 0;
|
||||
@@ -544,13 +522,7 @@ static int ffserver_save_avoption(const char *opt, const char *arg, int type,
|
||||
//explicit private option
|
||||
snprintf(buff, sizeof(buff), "%s", opt);
|
||||
codec_name = buff;
|
||||
if(!(option = strchr(buff, ':'))){
|
||||
report_config_error(config->filename, config->line_num,
|
||||
AV_LOG_ERROR, &config->errors,
|
||||
"Syntax error. Unmatched ':'\n");
|
||||
return -1;
|
||||
|
||||
}
|
||||
option = strchr(buff, ':');
|
||||
buff[option - buff] = '\0';
|
||||
option++;
|
||||
if ((ret = ffserver_set_codec(ctx, codec_name, config)) < 0)
|
||||
@@ -561,35 +533,31 @@ static int ffserver_save_avoption(const char *opt, const char *arg, int type,
|
||||
option = opt;
|
||||
}
|
||||
|
||||
o = av_opt_find(ctx, option, NULL, type | AV_OPT_FLAG_ENCODING_PARAM,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (!o &&
|
||||
(!strcmp(option, "time_base") || !strcmp(option, "pixel_format") ||
|
||||
!strcmp(option, "video_size") || !strcmp(option, "codec_tag")))
|
||||
o = av_opt_find(ctx, option, NULL, type | AV_OPT_FLAG_ENCODING_PARAM, AV_OPT_SEARCH_CHILDREN);
|
||||
if (!o && (!strcmp(option, "time_base") || !strcmp(option, "pixel_format") ||
|
||||
!strcmp(option, "video_size") || !strcmp(option, "codec_tag")))
|
||||
o = av_opt_find(ctx, option, NULL, 0, 0);
|
||||
if (!o) {
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
|
||||
&config->errors, "Option not found: '%s'\n", opt);
|
||||
&config->errors, "Option not found: %s\n", opt);
|
||||
if (!hinted && ctx->codec_id == AV_CODEC_ID_NONE) {
|
||||
hinted = 1;
|
||||
report_config_error(config->filename, config->line_num,
|
||||
AV_LOG_ERROR, NULL, "If '%s' is a codec private"
|
||||
"option, then prefix it with codec name, for "
|
||||
"example '%s:%s %s' or define codec earlier.\n",
|
||||
opt, avcodec_get_name(guessed_codec_id) ,opt,
|
||||
arg);
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR, NULL,
|
||||
"If '%s' is a codec private option, then prefix it with codec name, "
|
||||
"for example '%s:%s %s' or define codec earlier.\n",
|
||||
opt, avcodec_get_name(guessed_codec_id) ,opt, arg);
|
||||
}
|
||||
} else if ((ret = av_opt_set(ctx, option, arg, AV_OPT_SEARCH_CHILDREN)) < 0) {
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
|
||||
&config->errors, "Invalid value for option %s (%s): %s\n", opt,
|
||||
arg, av_err2str(ret));
|
||||
} else if ((e = av_dict_get(*dict, option, NULL, 0))) {
|
||||
if ((o->type == AV_OPT_TYPE_FLAGS) && arg &&
|
||||
(arg[0] == '+' || arg[0] == '-'))
|
||||
if ((o->type == AV_OPT_TYPE_FLAGS) && arg && (arg[0] == '+' || arg[0] == '-'))
|
||||
return av_dict_set(dict, option, arg, AV_DICT_APPEND);
|
||||
report_config_error(config->filename, config->line_num, AV_LOG_ERROR,
|
||||
&config->errors, "Redeclaring value of option '%s'."
|
||||
"Previous value was: '%s'.\n", opt, e->value);
|
||||
&config->errors,
|
||||
"Redeclaring value of the option %s, previous value: %s\n",
|
||||
opt, e->value);
|
||||
} else if (av_dict_set(dict, option, arg, 0) < 0) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -611,23 +579,21 @@ static int ffserver_parse_config_global(FFServerConfig *config, const char *cmd,
|
||||
char arg[1024];
|
||||
if (!av_strcasecmp(cmd, "Port") || !av_strcasecmp(cmd, "HTTPPort")) {
|
||||
if (!av_strcasecmp(cmd, "Port"))
|
||||
WARNING("Port option is deprecated. Use HTTPPort instead.\n");
|
||||
WARNING("Port option is deprecated, use HTTPPort instead\n");
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 0, 1, 65535, config,
|
||||
"Invalid port: %s\n", arg);
|
||||
if (val < 1024)
|
||||
WARNING("Trying to use IETF assigned system port: '%d'\n", val);
|
||||
WARNING("Trying to use IETF assigned system port: %d\n", val);
|
||||
config->http_addr.sin_port = htons(val);
|
||||
} else if (!av_strcasecmp(cmd, "HTTPBindAddress") ||
|
||||
!av_strcasecmp(cmd, "BindAddress")) {
|
||||
} else if (!av_strcasecmp(cmd, "HTTPBindAddress") || !av_strcasecmp(cmd, "BindAddress")) {
|
||||
if (!av_strcasecmp(cmd, "BindAddress"))
|
||||
WARNING("BindAddress option is deprecated. Use HTTPBindAddress "
|
||||
"instead.\n");
|
||||
WARNING("BindAddress option is deprecated, use HTTPBindAddress instead\n");
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
if (resolve_host(&config->http_addr.sin_addr, arg))
|
||||
ERROR("Invalid host/IP address: '%s'\n", arg);
|
||||
ERROR("Invalid host/IP address: %s\n", arg);
|
||||
} else if (!av_strcasecmp(cmd, "NoDaemon")) {
|
||||
WARNING("NoDaemon option has no effect. You should remove it.\n");
|
||||
WARNING("NoDaemon option has no effect, you should remove it\n");
|
||||
} else if (!av_strcasecmp(cmd, "RTSPPort")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 0, 1, 65535, config,
|
||||
@@ -642,21 +608,17 @@ static int ffserver_parse_config_global(FFServerConfig *config, const char *cmd,
|
||||
ffserver_set_int_param(&val, arg, 0, 1, 65535, config,
|
||||
"Invalid MaxHTTPConnections: %s\n", arg);
|
||||
config->nb_max_http_connections = val;
|
||||
if (config->nb_max_connections > config->nb_max_http_connections) {
|
||||
ERROR("Inconsistent configuration: MaxClients(%d) > "
|
||||
"MaxHTTPConnections(%d)\n", config->nb_max_connections,
|
||||
config->nb_max_http_connections);
|
||||
}
|
||||
if (config->nb_max_connections > config->nb_max_http_connections)
|
||||
ERROR("Inconsistent configuration: MaxClients(%d) > MaxHTTPConnections(%d)\n",
|
||||
config->nb_max_connections, config->nb_max_http_connections);
|
||||
} else if (!av_strcasecmp(cmd, "MaxClients")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 0, 1, 65535, config,
|
||||
"Invalid MaxClients: '%s'\n", arg);
|
||||
"Invalid MaxClients: %s\n", arg);
|
||||
config->nb_max_connections = val;
|
||||
if (config->nb_max_connections > config->nb_max_http_connections) {
|
||||
ERROR("Inconsistent configuration: MaxClients(%d) > "
|
||||
"MaxHTTPConnections(%d)\n", config->nb_max_connections,
|
||||
config->nb_max_http_connections);
|
||||
}
|
||||
if (config->nb_max_connections > config->nb_max_http_connections)
|
||||
ERROR("Inconsistent configuration: MaxClients(%d) > MaxHTTPConnections(%d)\n",
|
||||
config->nb_max_connections, config->nb_max_http_connections);
|
||||
} else if (!av_strcasecmp(cmd, "MaxBandwidth")) {
|
||||
int64_t llval;
|
||||
char *tailp;
|
||||
@@ -664,14 +626,12 @@ static int ffserver_parse_config_global(FFServerConfig *config, const char *cmd,
|
||||
errno = 0;
|
||||
llval = strtoll(arg, &tailp, 10);
|
||||
if (llval < 10 || llval > 10000000 || tailp[0] || errno)
|
||||
ERROR("Invalid MaxBandwidth: '%s'\n", arg);
|
||||
ERROR("Invalid MaxBandwidth: %s\n", arg);
|
||||
else
|
||||
config->max_bandwidth = llval;
|
||||
} else if (!av_strcasecmp(cmd, "CustomLog")) {
|
||||
if (!config->debug) {
|
||||
ffserver_get_arg(config->logfilename, sizeof(config->logfilename),
|
||||
p);
|
||||
}
|
||||
if (!config->debug)
|
||||
ffserver_get_arg(config->logfilename, sizeof(config->logfilename), p);
|
||||
} else if (!av_strcasecmp(cmd, "LoadModule")) {
|
||||
ERROR("Loadable modules are no longer supported\n");
|
||||
} else if (!av_strcasecmp(cmd, "NoDefaults")) {
|
||||
@@ -735,16 +695,15 @@ static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd, c
|
||||
|
||||
feed->child_argv[i] =
|
||||
av_asprintf("http://%s:%d/%s",
|
||||
(config->http_addr.sin_addr.s_addr == INADDR_ANY) ?
|
||||
"127.0.0.1" : inet_ntoa(config->http_addr.sin_addr),
|
||||
ntohs(config->http_addr.sin_port), feed->filename);
|
||||
(config->http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" :
|
||||
inet_ntoa(config->http_addr.sin_addr), ntohs(config->http_addr.sin_port),
|
||||
feed->filename);
|
||||
if (!feed->child_argv[i])
|
||||
return AVERROR(ENOMEM);
|
||||
} else if (!av_strcasecmp(cmd, "ACL")) {
|
||||
ffserver_parse_acl_row(NULL, feed, NULL, *p, config->filename,
|
||||
config->line_num);
|
||||
} else if (!av_strcasecmp(cmd, "File") ||
|
||||
!av_strcasecmp(cmd, "ReadOnlyFile")) {
|
||||
} else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) {
|
||||
ffserver_get_arg(feed->feed_filename, sizeof(feed->feed_filename), p);
|
||||
feed->readonly = !av_strcasecmp(cmd, "ReadOnlyFile");
|
||||
} else if (!av_strcasecmp(cmd, "Truncate")) {
|
||||
@@ -753,8 +712,8 @@ static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd, c
|
||||
if (!arg[0]) {
|
||||
feed->truncate = 1;
|
||||
} else {
|
||||
WARNING("Truncate N syntax in configuration file is deprecated. "
|
||||
"Use Truncate alone with no arguments.\n");
|
||||
WARNING("Truncate N syntax in configuration file is deprecated, "
|
||||
"use Truncate alone with no arguments\n");
|
||||
feed->truncate = strtod(arg, NULL);
|
||||
}
|
||||
} else if (!av_strcasecmp(cmd, "FileMaxSize")) {
|
||||
@@ -775,14 +734,13 @@ static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd, c
|
||||
fsize *= 1024 * 1024 * 1024;
|
||||
break;
|
||||
default:
|
||||
ERROR("Invalid file size: '%s'\n", arg);
|
||||
ERROR("Invalid file size: %s\n", arg);
|
||||
break;
|
||||
}
|
||||
feed->feed_max_size = (int64_t)fsize;
|
||||
if (feed->feed_max_size < FFM_PACKET_SIZE*4) {
|
||||
ERROR("Feed max file size is too small. Must be at least %d.\n",
|
||||
FFM_PACKET_SIZE*4);
|
||||
}
|
||||
if (feed->feed_max_size < FFM_PACKET_SIZE*4)
|
||||
ERROR("Feed max file size is too small, must be at least %d\n",
|
||||
FFM_PACKET_SIZE*4);
|
||||
} else if (!av_strcasecmp(cmd, "</Feed>")) {
|
||||
*pfeed = NULL;
|
||||
} else {
|
||||
@@ -862,13 +820,11 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
} else {
|
||||
stream->stream_type = STREAM_TYPE_LIVE;
|
||||
/* JPEG cannot be used here, so use single frame MJPEG */
|
||||
if (!strcmp(arg, "jpeg")) {
|
||||
strcpy(arg, "singlejpeg");
|
||||
stream->single_frame=1;
|
||||
}
|
||||
if (!strcmp(arg, "jpeg"))
|
||||
strcpy(arg, "mjpeg");
|
||||
stream->fmt = ffserver_guess_format(arg, NULL, NULL);
|
||||
if (!stream->fmt)
|
||||
ERROR("Unknown Format: '%s'\n", arg);
|
||||
ERROR("Unknown Format: %s\n", arg);
|
||||
}
|
||||
if (stream->fmt) {
|
||||
config->guessed_audio_codec_id = stream->fmt->audio_codec;
|
||||
@@ -878,7 +834,7 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
stream->ifmt = av_find_input_format(arg);
|
||||
if (!stream->ifmt)
|
||||
ERROR("Unknown input format: '%s'\n", arg);
|
||||
ERROR("Unknown input format: %s\n", arg);
|
||||
} else if (!av_strcasecmp(cmd, "FaviconURL")) {
|
||||
if (stream->stream_type == STREAM_TYPE_STATUS)
|
||||
ffserver_get_arg(stream->feed_filename,
|
||||
@@ -895,8 +851,8 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
for (i = 0; i < strlen(cmd); i++)
|
||||
key[i] = av_tolower(cmd[i]);
|
||||
key[i] = 0;
|
||||
WARNING("Deprecated '%s' option in configuration file. Use "
|
||||
"'Metadata %s VALUE' instead.\n", cmd, key);
|
||||
WARNING("'%s' option in configuration file is deprecated, "
|
||||
"use 'Metadata %s VALUE' instead\n", cmd, key);
|
||||
if (av_dict_set(&stream->metadata, key, arg, 0) < 0)
|
||||
goto nomem;
|
||||
} else if (!av_strcasecmp(cmd, "Metadata")) {
|
||||
@@ -922,9 +878,8 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
float f;
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_float_param(&f, arg, 1000, -FLT_MAX, FLT_MAX, config,
|
||||
"Invalid %s: '%s'\n", cmd, arg);
|
||||
if (ffserver_save_avoption_int("ab", (int64_t)lrintf(f),
|
||||
AV_OPT_FLAG_AUDIO_PARAM, config) < 0)
|
||||
"Invalid %s: %s\n", cmd, arg);
|
||||
if (ffserver_save_avoption_int("ab", (int64_t)lrintf(f), AV_OPT_FLAG_AUDIO_PARAM, config) < 0)
|
||||
goto nomem;
|
||||
} else if (!av_strcasecmp(cmd, "AudioChannels")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
@@ -942,15 +897,15 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
if (dash) {
|
||||
*dash = '\0';
|
||||
dash++;
|
||||
if (ffserver_set_int_param(&minrate, arg, 1000, 0, INT_MAX, config, "Invalid %s: '%s'", cmd, arg) >= 0 &&
|
||||
ffserver_set_int_param(&maxrate, dash, 1000, 0, INT_MAX, config, "Invalid %s: '%s'", cmd, arg) >= 0) {
|
||||
if (ffserver_set_int_param(&minrate, arg, 1000, 0, INT_MAX, config, "Invalid %s: %s", cmd, arg) >= 0 &&
|
||||
ffserver_set_int_param(&maxrate, dash, 1000, 0, INT_MAX, config, "Invalid %s: %s", cmd, arg) >= 0) {
|
||||
if (ffserver_save_avoption_int("minrate", minrate, AV_OPT_FLAG_VIDEO_PARAM, config) < 0 ||
|
||||
ffserver_save_avoption_int("maxrate", maxrate, AV_OPT_FLAG_VIDEO_PARAM, config) < 0)
|
||||
goto nomem;
|
||||
}
|
||||
} else
|
||||
ERROR("Incorrect format for VideoBitRateRange. It should be "
|
||||
"<min>-<max>: '%s'.\n", arg);
|
||||
ERROR("Incorrect format for VideoBitRateRange -- should be "
|
||||
"<min>-<max>: %s\n", arg);
|
||||
} else if (!av_strcasecmp(cmd, "Debug")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
if (ffserver_save_avoption("debug", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0 ||
|
||||
@@ -964,19 +919,19 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
} else if (!av_strcasecmp(cmd, "VideoBufferSize")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 8*1024, 0, INT_MAX, config,
|
||||
"Invalid %s: '%s'", cmd, arg);
|
||||
"Invalid %s: %s", cmd, arg);
|
||||
if (ffserver_save_avoption_int("bufsize", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0)
|
||||
goto nomem;
|
||||
} else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 1000, INT_MIN, INT_MAX, config,
|
||||
"Invalid %s: '%s'", cmd, arg);
|
||||
"Invalid %s: %s", cmd, arg);
|
||||
if (ffserver_save_avoption_int("bt", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0)
|
||||
goto nomem;
|
||||
} else if (!av_strcasecmp(cmd, "VideoBitRate")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 1000, INT_MIN, INT_MAX, config,
|
||||
"Invalid %s: '%s'", cmd, arg);
|
||||
"Invalid %s: %s", cmd, arg);
|
||||
if (ffserver_save_avoption_int("b", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0)
|
||||
goto nomem;
|
||||
} else if (!av_strcasecmp(cmd, "VideoSize")) {
|
||||
@@ -1001,7 +956,7 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
pix_fmt = av_get_pix_fmt(arg);
|
||||
if (pix_fmt == AV_PIX_FMT_NONE)
|
||||
ERROR("Unknown pixel format: '%s'\n", arg);
|
||||
ERROR("Unknown pixel format: %s\n", arg);
|
||||
else if (ffserver_save_avoption("pixel_format", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0)
|
||||
goto nomem;
|
||||
} else if (!av_strcasecmp(cmd, "VideoGopSize")) {
|
||||
@@ -1024,11 +979,9 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_get_arg(arg2, sizeof(arg2), p);
|
||||
if (!av_strcasecmp(cmd, "AVOptionVideo"))
|
||||
ret = ffserver_save_avoption(arg, arg2, AV_OPT_FLAG_VIDEO_PARAM,
|
||||
config);
|
||||
ret = ffserver_save_avoption(arg, arg2, AV_OPT_FLAG_VIDEO_PARAM, config);
|
||||
else
|
||||
ret = ffserver_save_avoption(arg, arg2, AV_OPT_FLAG_AUDIO_PARAM,
|
||||
config);
|
||||
ret = ffserver_save_avoption(arg, arg2, AV_OPT_FLAG_AUDIO_PARAM, config);
|
||||
if (ret < 0)
|
||||
goto nomem;
|
||||
} else if (!av_strcasecmp(cmd, "AVPresetVideo") ||
|
||||
@@ -1041,8 +994,7 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
} else if (!av_strcasecmp(cmd, "VideoTag")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
if (strlen(arg) == 4 &&
|
||||
ffserver_save_avoption_int("codec_tag",
|
||||
MKTAG(arg[0], arg[1], arg[2], arg[3]),
|
||||
ffserver_save_avoption_int("codec_tag", MKTAG(arg[0], arg[1], arg[2], arg[3]),
|
||||
AV_OPT_FLAG_VIDEO_PARAM, config) < 0)
|
||||
goto nomem;
|
||||
} else if (!av_strcasecmp(cmd, "BitExact")) {
|
||||
@@ -1057,7 +1009,7 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
} else if (!av_strcasecmp(cmd, "Qscale")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config,
|
||||
"Invalid Qscale: '%s'\n", arg);
|
||||
"Invalid Qscale: %s\n", arg);
|
||||
if (ffserver_save_avoption("flags", "+qscale", AV_OPT_FLAG_VIDEO_PARAM, config) < 0 ||
|
||||
ffserver_save_avoption_int("global_quality", FF_QP2LAMBDA * val,
|
||||
AV_OPT_FLAG_VIDEO_PARAM, config) < 0)
|
||||
@@ -1098,18 +1050,18 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
} else if (!av_strcasecmp(cmd, "MulticastAddress")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
if (resolve_host(&stream->multicast_ip, arg))
|
||||
ERROR("Invalid host/IP address: '%s'\n", arg);
|
||||
ERROR("Invalid host/IP address: %s\n", arg);
|
||||
stream->is_multicast = 1;
|
||||
stream->loop = 1; /* default is looping */
|
||||
} else if (!av_strcasecmp(cmd, "MulticastPort")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 0, 1, 65535, config,
|
||||
"Invalid MulticastPort: '%s'\n", arg);
|
||||
"Invalid MulticastPort: %s\n", arg);
|
||||
stream->multicast_port = val;
|
||||
} else if (!av_strcasecmp(cmd, "MulticastTTL")) {
|
||||
ffserver_get_arg(arg, sizeof(arg), p);
|
||||
ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config,
|
||||
"Invalid MulticastTTL: '%s'\n", arg);
|
||||
"Invalid MulticastTTL: %s\n", arg);
|
||||
stream->multicast_ttl = val;
|
||||
} else if (!av_strcasecmp(cmd, "NoLoop")) {
|
||||
stream->loop = 0;
|
||||
@@ -1118,15 +1070,13 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm")) {
|
||||
if (config->dummy_actx->codec_id == AV_CODEC_ID_NONE)
|
||||
config->dummy_actx->codec_id = config->guessed_audio_codec_id;
|
||||
if (!config->no_audio &&
|
||||
config->dummy_actx->codec_id != AV_CODEC_ID_NONE) {
|
||||
if (!config->no_audio && config->dummy_actx->codec_id != AV_CODEC_ID_NONE) {
|
||||
AVCodecContext *audio_enc = avcodec_alloc_context3(avcodec_find_encoder(config->dummy_actx->codec_id));
|
||||
add_codec(stream, audio_enc, config);
|
||||
}
|
||||
if (config->dummy_vctx->codec_id == AV_CODEC_ID_NONE)
|
||||
config->dummy_vctx->codec_id = config->guessed_video_codec_id;
|
||||
if (!config->no_video &&
|
||||
config->dummy_vctx->codec_id != AV_CODEC_ID_NONE) {
|
||||
if (!config->no_video && config->dummy_vctx->codec_id != AV_CODEC_ID_NONE) {
|
||||
AVCodecContext *video_enc = avcodec_alloc_context3(avcodec_find_encoder(config->dummy_vctx->codec_id));
|
||||
add_codec(stream, video_enc, config);
|
||||
}
|
||||
@@ -1136,8 +1086,7 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
avcodec_free_context(&config->dummy_vctx);
|
||||
avcodec_free_context(&config->dummy_actx);
|
||||
*pstream = NULL;
|
||||
} else if (!av_strcasecmp(cmd, "File") ||
|
||||
!av_strcasecmp(cmd, "ReadOnlyFile")) {
|
||||
} else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) {
|
||||
ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename),
|
||||
p);
|
||||
} else if (!av_strcasecmp(cmd, "UseDefaults")) {
|
||||
@@ -1161,8 +1110,7 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
static int ffserver_parse_config_redirect(FFServerConfig *config,
|
||||
const char *cmd, const char **p,
|
||||
static int ffserver_parse_config_redirect(FFServerConfig *config, const char *cmd, const char **p,
|
||||
FFServerStream **predirect)
|
||||
{
|
||||
FFServerStream *redirect;
|
||||
@@ -1209,6 +1157,7 @@ int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config)
|
||||
|
||||
av_assert0(config);
|
||||
|
||||
config->line_num = 0;
|
||||
f = fopen(filename, "r");
|
||||
if (!f) {
|
||||
ret = AVERROR(errno);
|
||||
@@ -1218,14 +1167,14 @@ int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config)
|
||||
}
|
||||
|
||||
config->first_stream = NULL;
|
||||
last_stream = &config->first_stream;
|
||||
config->first_feed = NULL;
|
||||
last_feed = &config->first_feed;
|
||||
config->errors = config->warnings = 0;
|
||||
|
||||
last_stream = &config->first_stream;
|
||||
last_feed = &config->first_feed;
|
||||
|
||||
config->line_num = 0;
|
||||
while (fgets(line, sizeof(line), f) != NULL) {
|
||||
for(;;) {
|
||||
if (fgets(line, sizeof(line), f) == NULL)
|
||||
break;
|
||||
config->line_num++;
|
||||
p = line;
|
||||
while (av_isspace(*p))
|
||||
@@ -1240,14 +1189,14 @@ int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config)
|
||||
if (opening && (stream || feed || redirect)) {
|
||||
ERROR("Already in a tag\n");
|
||||
} else {
|
||||
ret = ffserver_parse_config_feed(config, cmd, &p, &feed);
|
||||
if (ret < 0)
|
||||
if ((ret = ffserver_parse_config_feed(config, cmd, &p, &feed)) < 0)
|
||||
break;
|
||||
if (opening) {
|
||||
/* add in stream & feed list */
|
||||
/* add in stream list */
|
||||
*last_stream = feed;
|
||||
*last_feed = feed;
|
||||
last_stream = &feed->next;
|
||||
/* add in feed list */
|
||||
*last_feed = feed;
|
||||
last_feed = &feed->next_feed;
|
||||
}
|
||||
}
|
||||
@@ -1256,8 +1205,7 @@ int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config)
|
||||
if (opening && (stream || feed || redirect)) {
|
||||
ERROR("Already in a tag\n");
|
||||
} else {
|
||||
ret = ffserver_parse_config_stream(config, cmd, &p, &stream);
|
||||
if (ret < 0)
|
||||
if ((ret = ffserver_parse_config_stream(config, cmd, &p, &stream)) < 0)
|
||||
break;
|
||||
if (opening) {
|
||||
/* add in stream list */
|
||||
@@ -1270,9 +1218,7 @@ int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config)
|
||||
if (opening && (stream || feed || redirect))
|
||||
ERROR("Already in a tag\n");
|
||||
else {
|
||||
ret = ffserver_parse_config_redirect(config, cmd, &p,
|
||||
&redirect);
|
||||
if (ret < 0)
|
||||
if ((ret = ffserver_parse_config_redirect(config, cmd, &p, &redirect)) < 0)
|
||||
break;
|
||||
if (opening) {
|
||||
/* add in stream list */
|
||||
@@ -1285,8 +1231,7 @@ int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config)
|
||||
}
|
||||
}
|
||||
if (stream || feed || redirect)
|
||||
ERROR("Missing closing </%s> tag\n",
|
||||
stream ? "Stream" : (feed ? "Feed" : "Redirect"));
|
||||
ERROR("Not closed tag %s\n", stream ? "<Stream>" : (feed ? "<Feed>" : "<Redirect>"));
|
||||
|
||||
fclose(f);
|
||||
if (ret < 0)
|
||||
|
@@ -79,7 +79,6 @@ typedef struct FFServerStream {
|
||||
int multicast_port; /* first port used for multicast */
|
||||
int multicast_ttl;
|
||||
int loop; /* if true, send the stream in loops (only meaningful if file) */
|
||||
char single_frame; /* only single frame */
|
||||
|
||||
/* feed specific */
|
||||
int feed_opened; /* true if someone is writing to the feed */
|
||||
|
@@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
int line, ret;
|
||||
int line = 0, ret;
|
||||
const int width = avctx->width;
|
||||
AVFrame *pic = data;
|
||||
uint16_t *y, *u, *v;
|
||||
const uint8_t *line_end, *src = avpkt->data;
|
||||
int stride = avctx->width * 8 / 3;
|
||||
|
||||
if (width <= 1 || avctx->height <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
||||
if (width == 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@@ -67,45 +67,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||
pic->key_frame = 1;
|
||||
|
||||
y = (uint16_t *)pic->data[0];
|
||||
u = (uint16_t *)pic->data[1];
|
||||
v = (uint16_t *)pic->data[2];
|
||||
line_end = avpkt->data + stride;
|
||||
for (line = 0; line < avctx->height; line++) {
|
||||
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
||||
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
int x;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
|
||||
for (x = 0; x < width; x += 6) {
|
||||
uint32_t t;
|
||||
|
||||
if (width - x < 6 || line_end - src < 16) {
|
||||
y = y_temp;
|
||||
u = u_temp;
|
||||
v = v_temp;
|
||||
}
|
||||
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
|
||||
t = AV_RL32(src);
|
||||
while (line++ < avctx->height) {
|
||||
while (1) {
|
||||
uint32_t t = AV_RL32(src);
|
||||
src += 4;
|
||||
*u++ = t << 6 & 0xFFC0;
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*v++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*y++ = t << 6 & 0xFFC0;
|
||||
*u++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 2) {
|
||||
if (!(width & 1)) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@@ -113,8 +113,15 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*u++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (line_end - src < 4)
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@@ -122,21 +129,18 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*v++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (width - x < 6)
|
||||
if (src >= line_end - 2) {
|
||||
if (width & 1) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x < width) {
|
||||
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||
}
|
||||
|
||||
line_end += stride;
|
||||
src = line_end - stride;
|
||||
}
|
||||
|
||||
*got_frame = 1;
|
||||
|
@@ -5,10 +5,8 @@ NAME = avcodec
|
||||
HEADERS = avcodec.h \
|
||||
avfft.h \
|
||||
dv_profile.h \
|
||||
d3d11va.h \
|
||||
dxva2.h \
|
||||
old_codec_ids.h \
|
||||
qsv.h \
|
||||
vaapi.h \
|
||||
vda.h \
|
||||
vdpau.h \
|
||||
@@ -25,11 +23,11 @@ OBJS = allcodecs.o \
|
||||
bitstream_filter.o \
|
||||
codec_desc.o \
|
||||
dv_profile.o \
|
||||
fmtconvert.o \
|
||||
imgconvert.o \
|
||||
mathtables.o \
|
||||
options.o \
|
||||
parser.o \
|
||||
qsv_api.o \
|
||||
raw.o \
|
||||
resample.o \
|
||||
resample2.o \
|
||||
@@ -56,7 +54,6 @@ FFT-OBJS-$(CONFIG_HARDCODED_TABLES) += cos_tables.o cos_fixed_tables.o
|
||||
OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
|
||||
fft_fixed_32.o fft_init_table.o \
|
||||
$(FFT-OBJS-yes)
|
||||
OBJS-$(CONFIG_FMTCONVERT) += fmtconvert.o
|
||||
OBJS-$(CONFIG_GOLOMB) += golomb.o
|
||||
OBJS-$(CONFIG_H263DSP) += h263dsp.o
|
||||
OBJS-$(CONFIG_H264CHROMA) += h264chroma.o
|
||||
@@ -69,9 +66,7 @@ OBJS-$(CONFIG_HUFFYUVDSP) += huffyuvdsp.o
|
||||
OBJS-$(CONFIG_HUFFYUVENCDSP) += huffyuvencdsp.o
|
||||
OBJS-$(CONFIG_IDCTDSP) += idctdsp.o simple_idct.o jrevdct.o
|
||||
OBJS-$(CONFIG_IIRFILTER) += iirfilter.o
|
||||
OBJS-$(CONFIG_IMDCT15) += imdct15.o
|
||||
OBJS-$(CONFIG_INTRAX8) += intrax8.o intrax8dsp.o
|
||||
OBJS-$(CONFIG_JPEGTABLES) += jpegtables.o
|
||||
OBJS-$(CONFIG_LIBXVID) += libxvid_rc.o
|
||||
OBJS-$(CONFIG_LLAUDDSP) += lossless_audiodsp.o
|
||||
OBJS-$(CONFIG_LLVIDDSP) += lossless_videodsp.o
|
||||
@@ -86,18 +81,13 @@ OBJS-$(CONFIG_MPEGAUDIODSP) += mpegaudiodsp.o \
|
||||
mpegaudiodsp_data.o \
|
||||
mpegaudiodsp_fixed.o \
|
||||
mpegaudiodsp_float.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideodsp.o rl.o \
|
||||
mpegvideo_motion.o mpegutils.o \
|
||||
mpegvideodata.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO) += mpegvideo.o mpegvideodsp.o \
|
||||
mpegvideo_motion.o mpegutils.o
|
||||
OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \
|
||||
motion_est.o ratecontrol.o \
|
||||
mpegvideoencdsp.o
|
||||
OBJS-$(CONFIG_NVENC) += nvenc.o
|
||||
OBJS-$(CONFIG_PIXBLOCKDSP) += pixblockdsp.o
|
||||
OBJS-$(CONFIG_QPELDSP) += qpeldsp.o
|
||||
OBJS-$(CONFIG_QSV) += qsv.o
|
||||
OBJS-$(CONFIG_QSVDEC) += qsvdec.o
|
||||
OBJS-$(CONFIG_QSVENC) += qsvenc.o
|
||||
OBJS-$(CONFIG_RANGECODER) += rangecoder.o
|
||||
RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
|
||||
OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
|
||||
@@ -139,7 +129,7 @@ OBJS-$(CONFIG_AMRWB_DECODER) += amrwbdec.o celp_filters.o \
|
||||
celp_math.o acelp_filters.o \
|
||||
acelp_vectors.o \
|
||||
acelp_pitch_delay.o
|
||||
OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpegenc_common.o \
|
||||
OBJS-$(CONFIG_AMV_ENCODER) += mjpegenc.o mjpeg.o mjpegenc_common.o \
|
||||
mpegvideo_enc.o motion_est.o \
|
||||
ratecontrol.o mpeg12data.o \
|
||||
mpegvideo.o
|
||||
@@ -147,7 +137,6 @@ OBJS-$(CONFIG_ANM_DECODER) += anm.o
|
||||
OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
|
||||
OBJS-$(CONFIG_APE_DECODER) += apedec.o
|
||||
OBJS-$(CONFIG_APNG_DECODER) += png.o pngdec.o pngdsp.o
|
||||
OBJS-$(CONFIG_APNG_ENCODER) += png.o pngenc.o
|
||||
OBJS-$(CONFIG_SSA_DECODER) += assdec.o ass.o ass_split.o
|
||||
OBJS-$(CONFIG_SSA_ENCODER) += assenc.o ass.o
|
||||
OBJS-$(CONFIG_ASS_DECODER) += assdec.o ass.o ass_split.o
|
||||
@@ -162,7 +151,7 @@ OBJS-$(CONFIG_ATRAC3P_DECODER) += atrac3plusdec.o atrac3plus.o \
|
||||
atrac3plusdsp.o atrac.o
|
||||
OBJS-$(CONFIG_AURA_DECODER) += cyuv.o
|
||||
OBJS-$(CONFIG_AURA2_DECODER) += aura.o
|
||||
OBJS-$(CONFIG_AVRN_DECODER) += avrndec.o mjpegdec.o
|
||||
OBJS-$(CONFIG_AVRN_DECODER) += avrndec.o mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_AVRP_DECODER) += r210dec.o
|
||||
OBJS-$(CONFIG_AVRP_ENCODER) += r210enc.o
|
||||
OBJS-$(CONFIG_AVS_DECODER) += avs.o
|
||||
@@ -184,14 +173,13 @@ OBJS-$(CONFIG_BRENDER_PIX_DECODER) += brenderpix.o
|
||||
OBJS-$(CONFIG_C93_DECODER) += c93.o
|
||||
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
|
||||
cavsdata.o mpeg12data.o
|
||||
OBJS-$(CONFIG_CCAPTION_DECODER) += ccaption_dec.o
|
||||
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
|
||||
OBJS-$(CONFIG_CDXL_DECODER) += cdxl.o
|
||||
OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
|
||||
OBJS-$(CONFIG_CINEPAK_ENCODER) += cinepakenc.o elbg.o
|
||||
OBJS-$(CONFIG_CLJR_DECODER) += cljrdec.o
|
||||
OBJS-$(CONFIG_CLJR_ENCODER) += cljrenc.o
|
||||
OBJS-$(CONFIG_CLLC_DECODER) += cllc.o canopus.o
|
||||
OBJS-$(CONFIG_CLLC_DECODER) += cllc.o
|
||||
OBJS-$(CONFIG_COOK_DECODER) += cook.o
|
||||
OBJS-$(CONFIG_COMFORTNOISE_DECODER) += cngdec.o celp_filters.o
|
||||
OBJS-$(CONFIG_COMFORTNOISE_ENCODER) += cngenc.o
|
||||
@@ -199,9 +187,8 @@ OBJS-$(CONFIG_CPIA_DECODER) += cpia.o
|
||||
OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
|
||||
OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
|
||||
OBJS-$(CONFIG_DCA_DECODER) += dcadec.o dca.o dcadsp.o \
|
||||
dcadata.o dca_exss.o \
|
||||
dca_xll.o synth_filter.o
|
||||
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dca.o dcadata.o
|
||||
synth_filter.o
|
||||
OBJS-$(CONFIG_DCA_ENCODER) += dcaenc.o dca.o
|
||||
OBJS-$(CONFIG_DIRAC_DECODER) += diracdec.o dirac.o diracdsp.o \
|
||||
dirac_arith.o mpeg12data.o dirac_dwt.o
|
||||
OBJS-$(CONFIG_DFA_DECODER) += dfa.o
|
||||
@@ -215,7 +202,6 @@ OBJS-$(CONFIG_DSD_LSBF_PLANAR_DECODER) += dsddec.o
|
||||
OBJS-$(CONFIG_DSD_MSBF_PLANAR_DECODER) += dsddec.o
|
||||
OBJS-$(CONFIG_DSICINAUDIO_DECODER) += dsicinaudio.o
|
||||
OBJS-$(CONFIG_DSICINVIDEO_DECODER) += dsicinvideo.o
|
||||
OBJS-$(CONFIG_DSS_SP_DECODER) += dss_sp.o
|
||||
OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
|
||||
OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
|
||||
OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
|
||||
@@ -224,7 +210,7 @@ OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
||||
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3_data.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
|
||||
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
|
||||
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
|
||||
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
||||
@@ -253,7 +239,7 @@ OBJS-$(CONFIG_FLIC_DECODER) += flicvideo.o
|
||||
OBJS-$(CONFIG_FOURXM_DECODER) += 4xm.o
|
||||
OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o
|
||||
OBJS-$(CONFIG_FRWU_DECODER) += frwu.o
|
||||
OBJS-$(CONFIG_G2M_DECODER) += g2meet.o
|
||||
OBJS-$(CONFIG_G2M_DECODER) += g2meet.o mjpeg.o
|
||||
OBJS-$(CONFIG_G723_1_DECODER) += g723_1.o acelp_vectors.o \
|
||||
celp_filters.o celp_math.o
|
||||
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o acelp_vectors.o celp_math.o
|
||||
@@ -273,17 +259,11 @@ OBJS-$(CONFIG_H264_DECODER) += h264.o h264_cabac.o h264_cavlc.o \
|
||||
h264_direct.o h264_loopfilter.o \
|
||||
h264_mb.o h264_picture.o h264_ps.o \
|
||||
h264_refs.o h264_sei.o h264_slice.o
|
||||
OBJS-$(CONFIG_H264_MMAL_DECODER) += mmaldec.o
|
||||
OBJS-$(CONFIG_H264_VDA_DECODER) += vda_h264_dec.o
|
||||
OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec_h264.o
|
||||
OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o
|
||||
OBJS-$(CONFIG_HEVC_DECODER) += hevc.o hevc_mvs.o hevc_ps.o hevc_sei.o \
|
||||
hevc_cabac.o hevc_refs.o hevcpred.o \
|
||||
hevcdsp.o hevc_filter.o
|
||||
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
|
||||
OBJS-$(CONFIG_HQ_HQA_DECODER) += hq_hqa.o hq_hqadata.o hq_hqadsp.o \
|
||||
canopus.o
|
||||
OBJS-$(CONFIG_HQX_DECODER) += hqx.o hqxvlc.o hqxdsp.o canopus.o
|
||||
OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o huffyuvdec.o
|
||||
OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o huffyuvenc.o
|
||||
OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
|
||||
@@ -293,8 +273,8 @@ OBJS-$(CONFIG_IFF_ILBM_DECODER) += iff.o
|
||||
OBJS-$(CONFIG_IMC_DECODER) += imc.o
|
||||
OBJS-$(CONFIG_INDEO2_DECODER) += indeo2.o
|
||||
OBJS-$(CONFIG_INDEO3_DECODER) += indeo3.o
|
||||
OBJS-$(CONFIG_INDEO4_DECODER) += indeo4.o ivi.o ivi_dsp.o
|
||||
OBJS-$(CONFIG_INDEO5_DECODER) += indeo5.o ivi.o ivi_dsp.o
|
||||
OBJS-$(CONFIG_INDEO4_DECODER) += indeo4.o ivi_common.o ivi_dsp.o
|
||||
OBJS-$(CONFIG_INDEO5_DECODER) += indeo5.o ivi_common.o ivi_dsp.o
|
||||
OBJS-$(CONFIG_INTERPLAY_DPCM_DECODER) += dpcm.o
|
||||
OBJS-$(CONFIG_INTERPLAY_VIDEO_DECODER) += interplayvideo.o
|
||||
OBJS-$(CONFIG_JACOSUB_DECODER) += jacosubdec.o ass.o
|
||||
@@ -308,7 +288,7 @@ OBJS-$(CONFIG_JV_DECODER) += jvdec.o
|
||||
OBJS-$(CONFIG_KGV1_DECODER) += kgv1dec.o
|
||||
OBJS-$(CONFIG_KMVC_DECODER) += kmvc.o
|
||||
OBJS-$(CONFIG_LAGARITH_DECODER) += lagarith.o lagarithrac.o
|
||||
OBJS-$(CONFIG_LJPEG_ENCODER) += ljpegenc.o mjpegenc_common.o
|
||||
OBJS-$(CONFIG_LJPEG_ENCODER) += ljpegenc.o mjpeg.o mjpegenc_common.o
|
||||
OBJS-$(CONFIG_LOCO_DECODER) += loco.o
|
||||
OBJS-$(CONFIG_MACE3_DECODER) += mace.o
|
||||
OBJS-$(CONFIG_MACE6_DECODER) += mace.o
|
||||
@@ -317,8 +297,8 @@ OBJS-$(CONFIG_METASOUND_DECODER) += metasound.o metasound_data.o \
|
||||
twinvq.o
|
||||
OBJS-$(CONFIG_MICRODVD_DECODER) += microdvddec.o ass.o
|
||||
OBJS-$(CONFIG_MIMIC_DECODER) += mimic.o
|
||||
OBJS-$(CONFIG_MJPEG_DECODER) += mjpegdec.o
|
||||
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpegenc_common.o
|
||||
OBJS-$(CONFIG_MJPEG_DECODER) += mjpegdec.o mjpeg.o
|
||||
OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpeg.o mjpegenc_common.o
|
||||
OBJS-$(CONFIG_MJPEGB_DECODER) += mjpegbdec.o
|
||||
OBJS-$(CONFIG_MLP_DECODER) += mlpdec.o mlpdsp.o
|
||||
OBJS-$(CONFIG_MMVIDEO_DECODER) += mmvideo.o
|
||||
@@ -369,7 +349,8 @@ OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
|
||||
OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
|
||||
OBJS-$(CONFIG_ON2AVC_DECODER) += on2avc.o on2avcdata.o
|
||||
OBJS-$(CONFIG_OPUS_DECODER) += opusdec.o opus.o opus_celt.o \
|
||||
opus_silk.o vorbis_data.o
|
||||
opus_imdct.o opus_silk.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += pafaudio.o
|
||||
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += pafvideo.o
|
||||
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
|
||||
@@ -468,7 +449,6 @@ OBJS-$(CONFIG_TAK_DECODER) += takdec.o tak.o
|
||||
OBJS-$(CONFIG_TARGA_DECODER) += targa.o
|
||||
OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
|
||||
OBJS-$(CONFIG_TARGA_Y216_DECODER) += targa_y216dec.o
|
||||
OBJS-$(CONFIG_TDSC_DECODER) += tdsc.o
|
||||
OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
|
||||
OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o tiff_data.o tiff_common.o
|
||||
OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o tiff_data.o
|
||||
@@ -517,8 +497,7 @@ OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \
|
||||
vp6dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VP7_DECODER) += vp8.o vp8dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9dsp.o vp56rac.o vp9dsp_8bpp.o \
|
||||
vp9dsp_10bpp.o vp9dsp_12bpp.o
|
||||
OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9dsp.o vp56rac.o
|
||||
OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o
|
||||
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
|
||||
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
|
||||
@@ -638,8 +617,8 @@ OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o adpcm_data.o
|
||||
OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o g722dsp.o g722dec.o
|
||||
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o g722dsp.o g722enc.o
|
||||
OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o g722dec.o
|
||||
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o g722enc.o
|
||||
OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
|
||||
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
|
||||
OBJS-$(CONFIG_ADPCM_G726LE_DECODER) += g726.o
|
||||
@@ -673,33 +652,27 @@ OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcmenc.o adpcm_data.o
|
||||
OBJS-$(CONFIG_VIMA_DECODER) += vima.o adpcm_data.o
|
||||
|
||||
# hardware accelerators
|
||||
OBJS-$(CONFIG_D3D11VA) += dxva2.o
|
||||
OBJS-$(CONFIG_DXVA2) += dxva2.o
|
||||
OBJS-$(CONFIG_VAAPI) += vaapi.o
|
||||
OBJS-$(CONFIG_VDA) += vda.o
|
||||
OBJS-$(CONFIG_VDPAU) += vdpau.o
|
||||
|
||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o
|
||||
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_H263_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
||||
OBJS-$(CONFIG_H264_D3D11VA_HWACCEL) += dxva2_h264.o
|
||||
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
|
||||
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
|
||||
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
|
||||
OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o
|
||||
OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o
|
||||
OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o
|
||||
OBJS-$(CONFIG_MPEG1_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
||||
OBJS-$(CONFIG_MPEG1_XVMC_HWACCEL) += mpegvideo_xvmc.o
|
||||
OBJS-$(CONFIG_MPEG2_D3D11VA_HWACCEL) += dxva2_mpeg2.o
|
||||
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
|
||||
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o
|
||||
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
||||
OBJS-$(CONFIG_MPEG2_XVMC_HWACCEL) += mpegvideo_xvmc.o
|
||||
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
|
||||
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_MPEG4_VDPAU_HWACCEL) += vdpau_mpeg4.o
|
||||
OBJS-$(CONFIG_VC1_D3D11VA_HWACCEL) += dxva2_vc1.o
|
||||
OBJS-$(CONFIG_VC1_DXVA2_HWACCEL) += dxva2_vc1.o
|
||||
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o
|
||||
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o vaapi_mpeg.o
|
||||
OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o
|
||||
|
||||
# libavformat dependencies
|
||||
@@ -726,13 +699,13 @@ OBJS-$(CONFIG_MPEGTS_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MXF_MUXER) += dnxhddata.o
|
||||
OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
|
||||
OBJS-$(CONFIG_NUT_DEMUXER) += mpegaudiodata.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_OGA_MUXER) += flac.o flacdata.o
|
||||
OBJS-$(CONFIG_OGG_DEMUXER) += mpeg12data.o \
|
||||
dirac.o vorbis_data.o
|
||||
OBJS-$(CONFIG_OGG_MUXER) += flac.o flacdata.o \
|
||||
vorbis_data.o
|
||||
OBJS-$(CONFIG_RTP_MUXER) += mpeg4audio.o
|
||||
OBJS-$(CONFIG_RTPDEC) += mjpeg.o
|
||||
OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
|
||||
OBJS-$(CONFIG_SPDIF_MUXER) += dca.o
|
||||
OBJS-$(CONFIG_TAK_DEMUXER) += tak.o
|
||||
@@ -747,7 +720,6 @@ OBJS-$(CONFIG_ELBG_FILTER) += elbg.o
|
||||
# external codec libraries
|
||||
OBJS-$(CONFIG_LIBAACPLUS_ENCODER) += libaacplus.o
|
||||
OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
|
||||
OBJS-$(CONFIG_LIBDCADEC_DECODER) += libdcadec.o dca.o
|
||||
OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_DECODER) += libfdk-aacdec.o
|
||||
OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o
|
||||
@@ -761,7 +733,6 @@ OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o mpegaudiodecheader.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
|
||||
OBJS-$(CONFIG_LIBOPENH264_ENCODER) += libopenh264enc.o
|
||||
OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpegdec.o
|
||||
OBJS-$(CONFIG_LIBOPENJPEG_ENCODER) += libopenjpegenc.o
|
||||
OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusdec.o libopus.o \
|
||||
@@ -790,8 +761,7 @@ OBJS-$(CONFIG_LIBVPX_VP8_ENCODER) += libvpxenc.o
|
||||
OBJS-$(CONFIG_LIBVPX_VP9_DECODER) += libvpxdec.o libvpx.o
|
||||
OBJS-$(CONFIG_LIBVPX_VP9_ENCODER) += libvpxenc.o libvpx.o
|
||||
OBJS-$(CONFIG_LIBWAVPACK_ENCODER) += libwavpackenc.o
|
||||
OBJS-$(CONFIG_LIBWEBP_ENCODER) += libwebpenc_common.o libwebpenc.o
|
||||
OBJS-$(CONFIG_LIBWEBP_ANIM_ENCODER) += libwebpenc_common.o libwebpenc_animencoder.o
|
||||
OBJS-$(CONFIG_LIBWEBP_ENCODER) += libwebpenc.o
|
||||
OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
|
||||
OBJS-$(CONFIG_LIBX265_ENCODER) += libx265.o
|
||||
OBJS-$(CONFIG_LIBXAVS_ENCODER) += libxavs.o
|
||||
@@ -838,7 +808,9 @@ OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
|
||||
OBJS-$(CONFIG_RV30_PARSER) += rv34_parser.o
|
||||
OBJS-$(CONFIG_RV40_PARSER) += rv34_parser.o
|
||||
OBJS-$(CONFIG_TAK_PARSER) += tak_parser.o tak.o
|
||||
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o
|
||||
OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o vc1dsp.o \
|
||||
msmpeg4.o msmpeg4data.o mpeg4video.o \
|
||||
h263.o
|
||||
OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
|
||||
OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
|
||||
OBJS-$(CONFIG_VP9_PARSER) += vp9_parser.o
|
||||
@@ -850,9 +822,8 @@ OBJS-$(CONFIG_CHOMP_BSF) += chomp_bsf.o
|
||||
OBJS-$(CONFIG_DUMP_EXTRADATA_BSF) += dump_extradata_bsf.o
|
||||
OBJS-$(CONFIG_H264_MP4TOANNEXB_BSF) += h264_mp4toannexb_bsf.o
|
||||
OBJS-$(CONFIG_IMX_DUMP_HEADER_BSF) += imx_dump_header_bsf.o
|
||||
OBJS-$(CONFIG_MJPEG2JPEG_BSF) += mjpeg2jpeg_bsf.o
|
||||
OBJS-$(CONFIG_MJPEG2JPEG_BSF) += mjpeg2jpeg_bsf.o mjpeg.o
|
||||
OBJS-$(CONFIG_MJPEGA_DUMP_HEADER_BSF) += mjpega_dump_header_bsf.o
|
||||
OBJS-$(CONFIG_MPEG4_UNPACK_BFRAMES_BSF) += mpeg4_unpack_bframes_bsf.o
|
||||
OBJS-$(CONFIG_MOV2TEXTSUB_BSF) += movsub_bsf.o
|
||||
OBJS-$(CONFIG_MP3_HEADER_DECOMPRESS_BSF) += mp3_header_decompress_bsf.o \
|
||||
mpegaudiodata.o
|
||||
@@ -876,27 +847,17 @@ SKIPHEADERS += %_tablegen.h \
|
||||
libutvideo.h \
|
||||
old_codec_ids.h \
|
||||
tableprint.h \
|
||||
tableprint_vlc.h \
|
||||
$(ARCH)/vp56_arith.h \
|
||||
|
||||
SKIPHEADERS-$(CONFIG_D3D11VA) += d3d11va.h dxva2_internal.h
|
||||
SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
|
||||
SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
|
||||
SKIPHEADERS-$(CONFIG_LIBUTVIDEO) += libutvideo.h
|
||||
SKIPHEADERS-$(CONFIG_QSV) += qsv.h qsv_internal.h
|
||||
SKIPHEADERS-$(CONFIG_QSVDEC) += qsvdec.h
|
||||
SKIPHEADERS-$(CONFIG_QSVENC) += qsvenc.h
|
||||
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
|
||||
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
|
||||
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
|
||||
|
||||
TESTPROGS = imgconvert \
|
||||
mathops \
|
||||
options \
|
||||
avfft \
|
||||
|
||||
TESTPROGS += api-flac
|
||||
|
||||
TESTPROGS-$(CONFIG_CABAC) += cabac
|
||||
TESTPROGS-$(CONFIG_FFT) += fft fft-fixed fft-fixed32
|
||||
|
@@ -28,7 +28,6 @@
|
||||
#include "a64tables.h"
|
||||
#include "elbg.h"
|
||||
#include "internal.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
|
||||
@@ -66,7 +65,7 @@ static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
|
||||
//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
|
||||
//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
|
||||
|
||||
static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest)
|
||||
static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
|
||||
{
|
||||
int blockx, blocky, x, y;
|
||||
int luma = 0;
|
||||
@@ -79,13 +78,9 @@ static void to_meta_with_crop(AVCodecContext *avctx, const AVFrame *p, int *dest
|
||||
for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
|
||||
for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
|
||||
if(x < width && y < height) {
|
||||
if (x + 1 < width) {
|
||||
/* build average over 2 pixels */
|
||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||
} else {
|
||||
luma = src[(x + y * p->linesize[0])];
|
||||
}
|
||||
/* build average over 2 pixels */
|
||||
luma = (src[(x + 0 + y * p->linesize[0])] +
|
||||
src[(x + 1 + y * p->linesize[0])]) / 2;
|
||||
/* write blocks as linear data now so they are suitable for elbg */
|
||||
dest[0] = luma;
|
||||
}
|
||||
@@ -191,6 +186,7 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
|
||||
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
|
||||
{
|
||||
A64Context *c = avctx->priv_data;
|
||||
av_frame_free(&avctx->coded_frame);
|
||||
av_freep(&c->mc_meta_charset);
|
||||
av_freep(&c->mc_best_cb);
|
||||
av_freep(&c->mc_charset);
|
||||
@@ -224,7 +220,7 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
||||
a64_palette[mc_colors[a]][2] * 0.11;
|
||||
}
|
||||
|
||||
if (!(c->mc_meta_charset = av_mallocz_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||
if (!(c->mc_meta_charset = av_malloc_array(c->mc_lifetime, 32000 * sizeof(int))) ||
|
||||
!(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
|
||||
!(c->mc_charmap = av_mallocz_array(c->mc_lifetime, 1000 * sizeof(int))) ||
|
||||
!(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
|
||||
@@ -242,6 +238,14 @@ static av_cold int a64multi_encode_init(AVCodecContext *avctx)
|
||||
AV_WB32(avctx->extradata, c->mc_lifetime);
|
||||
AV_WB32(avctx->extradata + 16, INTERLACED);
|
||||
|
||||
avctx->coded_frame = av_frame_alloc();
|
||||
if (!avctx->coded_frame) {
|
||||
a64multi_close_encoder(avctx);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||
avctx->coded_frame->key_frame = 1;
|
||||
if (!avctx->codec_tag)
|
||||
avctx->codec_tag = AV_RL32("a64m");
|
||||
|
||||
@@ -266,9 +270,10 @@ static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colra
|
||||
}
|
||||
|
||||
static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const AVFrame *p, int *got_packet)
|
||||
const AVFrame *pict, int *got_packet)
|
||||
{
|
||||
A64Context *c = avctx->priv_data;
|
||||
AVFrame *const p = avctx->coded_frame;
|
||||
|
||||
int frame;
|
||||
int x, y;
|
||||
@@ -299,7 +304,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
|
||||
/* no data, means end encoding asap */
|
||||
if (!p) {
|
||||
if (!pict) {
|
||||
/* all done, end encoding */
|
||||
if (!c->mc_lifetime) return 0;
|
||||
/* no more frames in queue, prepare to flush remaining frames */
|
||||
@@ -312,10 +317,13 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
} else {
|
||||
/* fill up mc_meta_charset with data until lifetime exceeds */
|
||||
if (c->mc_frame_counter < c->mc_lifetime) {
|
||||
*p = *pict;
|
||||
p->pict_type = AV_PICTURE_TYPE_I;
|
||||
p->key_frame = 1;
|
||||
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
|
||||
c->mc_frame_counter++;
|
||||
if (c->next_pts == AV_NOPTS_VALUE)
|
||||
c->next_pts = p->pts;
|
||||
c->next_pts = pict->pts;
|
||||
/* lifetime is not reached so wait for next frame first */
|
||||
return 0;
|
||||
}
|
||||
@@ -326,20 +334,14 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
req_size = 0;
|
||||
/* any frames to encode? */
|
||||
if (c->mc_lifetime) {
|
||||
int alloc_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, alloc_size)) < 0)
|
||||
req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
|
||||
if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
|
||||
return ret;
|
||||
buf = pkt->data;
|
||||
|
||||
/* calc optimal new charset + charmaps */
|
||||
ret = avpriv_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
|
||||
CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = avpriv_do_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
|
||||
CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
avpriv_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||
avpriv_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
|
||||
|
||||
/* create colorram map and a c64 readable charset */
|
||||
render_charset(avctx, charset, colram);
|
||||
@@ -349,7 +351,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
|
||||
/* advance pointers */
|
||||
buf += charset_size;
|
||||
req_size += charset_size;
|
||||
}
|
||||
|
||||
/* write x frames to buf */
|
||||
@@ -386,7 +387,6 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
pkt->pts = pkt->dts = c->next_pts;
|
||||
c->next_pts = AV_NOPTS_VALUE;
|
||||
|
||||
av_assert0(pkt->size >= req_size);
|
||||
pkt->size = req_size;
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
*got_packet = !!req_size;
|
||||
|
@@ -32,10 +32,10 @@
|
||||
|
||||
#include "libavutil/float_dsp.h"
|
||||
#include "avcodec.h"
|
||||
#include "imdct15.h"
|
||||
#include "fft.h"
|
||||
#include "mpeg4audio.h"
|
||||
#include "sbr.h"
|
||||
#include "fmtconvert.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -141,10 +141,6 @@ typedef struct PredictorState {
|
||||
#define SCALE_MAX_DIFF 60 ///< maximum scalefactor difference allowed by standard
|
||||
#define SCALE_DIFF_ZERO 60 ///< codebook index corresponding to zero scalefactor indices difference
|
||||
|
||||
#define NOISE_PRE 256 ///< preamble for NOISE_BT, put in bitstream with the first noise band
|
||||
#define NOISE_PRE_BITS 9 ///< length of preamble
|
||||
#define NOISE_OFFSET 90 ///< subtracted from global gain, used as offset for the preamble
|
||||
|
||||
/**
|
||||
* Long Term Prediction
|
||||
*/
|
||||
@@ -237,8 +233,7 @@ typedef struct SingleChannelElement {
|
||||
float sf[120]; ///< scalefactors
|
||||
int sf_idx[128]; ///< scalefactor indices (used by encoder)
|
||||
uint8_t zeroes[128]; ///< band is not coded (used by encoder)
|
||||
DECLARE_ALIGNED(32, float, pcoeffs)[1024]; ///< coefficients for IMDCT, pristine
|
||||
DECLARE_ALIGNED(32, float, coeffs)[1024]; ///< coefficients for IMDCT, maybe processed
|
||||
DECLARE_ALIGNED(32, float, coeffs)[1024]; ///< coefficients for IMDCT
|
||||
DECLARE_ALIGNED(32, float, saved)[1536]; ///< overlap
|
||||
DECLARE_ALIGNED(32, float, ret_buf)[2048]; ///< PCM output buffer
|
||||
DECLARE_ALIGNED(16, float, ltp_state)[3072]; ///< time signal for LTP
|
||||
@@ -299,7 +294,7 @@ struct AACContext {
|
||||
FFTContext mdct_small;
|
||||
FFTContext mdct_ld;
|
||||
FFTContext mdct_ltp;
|
||||
IMDCT15Context *mdct480;
|
||||
FmtConvertContext fmt_conv;
|
||||
AVFloatDSPContext *fdsp;
|
||||
int random_state;
|
||||
/** @} */
|
||||
|
@@ -90,10 +90,6 @@ static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc,
|
||||
av_free(avctx->extradata);
|
||||
avctx->extradata_size = 2 + pce_size;
|
||||
avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!avctx->extradata) {
|
||||
avctx->extradata_size = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
|
||||
put_bits(&pb, 5, hdr.object_type);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user