Compare commits
272 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
26241af6f8 | ||
![]() |
7ad0581686 | ||
![]() |
3219de21f4 | ||
![]() |
93eda8dcba | ||
![]() |
1dc135e03d | ||
![]() |
f040f3e5d5 | ||
![]() |
7f320ed15d | ||
![]() |
5b88d24f24 | ||
![]() |
3e187a9a2d | ||
![]() |
4bfaa040fc | ||
![]() |
5124bf8ea6 | ||
![]() |
50ce06cc7c | ||
![]() |
a628b70831 | ||
![]() |
e274976c92 | ||
![]() |
1586fda2fc | ||
![]() |
34b9c7612d | ||
![]() |
ea4b99f82c | ||
![]() |
8b30abd020 | ||
![]() |
435b0ab969 | ||
![]() |
c78d268e9f | ||
![]() |
e56d322032 | ||
![]() |
3ffc224435 | ||
![]() |
e7e4c65571 | ||
![]() |
4aa876f288 | ||
![]() |
a9ce4583df | ||
![]() |
93fa19addf | ||
![]() |
5c7ffbbda3 | ||
![]() |
8be41ad2bb | ||
![]() |
9a8d2f51cf | ||
![]() |
d013f51303 | ||
![]() |
548a07cdc4 | ||
![]() |
0f331f94c0 | ||
![]() |
0a06e2824a | ||
![]() |
dc0bc71471 | ||
![]() |
5af5396970 | ||
![]() |
d17298b666 | ||
![]() |
ece3912daf | ||
![]() |
d092b7f04c | ||
![]() |
1601420be4 | ||
![]() |
bdf79f29db | ||
![]() |
4e80d4bf25 | ||
![]() |
e3ffc7ab4a | ||
![]() |
76cb34f7f5 | ||
![]() |
da87a699ea | ||
![]() |
86a52988bd | ||
![]() |
d259a0534e | ||
![]() |
710dccf036 | ||
![]() |
fc69fa8474 | ||
![]() |
88ccca204a | ||
![]() |
07a3024631 | ||
![]() |
cf1f615b67 | ||
![]() |
900039e7dc | ||
![]() |
fad0748b92 | ||
![]() |
6c25411c06 | ||
![]() |
c1d29678f1 | ||
![]() |
4fe6f9f627 | ||
![]() |
694416e327 | ||
![]() |
c1db1a5ff4 | ||
![]() |
8ef86669ca | ||
![]() |
0cd23e0d1e | ||
![]() |
ed3d433676 | ||
![]() |
1258bdf7f0 | ||
![]() |
54e94522b8 | ||
![]() |
8405b63294 | ||
![]() |
e6264f00ad | ||
![]() |
183f580594 | ||
![]() |
70373bde2d | ||
![]() |
67ed1ee680 | ||
![]() |
1f6c88e287 | ||
![]() |
725d7fb2c6 | ||
![]() |
258584cfaf | ||
![]() |
9e3848328d | ||
![]() |
27f3696502 | ||
![]() |
65dc698607 | ||
![]() |
185126415e | ||
![]() |
93f3752b97 | ||
![]() |
cf115791da | ||
![]() |
bc3d86cddb | ||
![]() |
aa9dadccac | ||
![]() |
0ecde45570 | ||
![]() |
ca43c6e1f5 | ||
![]() |
2deaccfe67 | ||
![]() |
a9c73b13c1 | ||
![]() |
d972df307a | ||
![]() |
082bdba4e7 | ||
![]() |
1039803823 | ||
![]() |
acba2a36b3 | ||
![]() |
588a7769c7 | ||
![]() |
08940cd545 | ||
![]() |
196be9284a | ||
![]() |
d61c3d1fca | ||
![]() |
ec1f59150d | ||
![]() |
b996cde193 | ||
![]() |
63465f0070 | ||
![]() |
3300005351 | ||
![]() |
4eb67f3ddc | ||
![]() |
ae270314c7 | ||
![]() |
b9a3e0af07 | ||
![]() |
132bd4bc4f | ||
![]() |
cd8bedb9fa | ||
![]() |
d332aa6ec6 | ||
![]() |
169a02da1e | ||
![]() |
af3a608e7e | ||
![]() |
327dd70690 | ||
![]() |
bcedc2b0f0 | ||
![]() |
6ad10cbec9 | ||
![]() |
b0b1b208f5 | ||
![]() |
6acf5ff1d3 | ||
![]() |
e98f3ba221 | ||
![]() |
32808460cc | ||
![]() |
edf06a0562 | ||
![]() |
5ae4842122 | ||
![]() |
ab6f4bc064 | ||
![]() |
c3ce508135 | ||
![]() |
c6cb24ffbf | ||
![]() |
06c87da410 | ||
![]() |
f7ef265387 | ||
![]() |
6d3f047a85 | ||
![]() |
f43dac169e | ||
![]() |
f10ba15d73 | ||
![]() |
54661cf0b2 | ||
![]() |
50f62fef19 | ||
![]() |
abd20c2057 | ||
![]() |
d1de492330 | ||
![]() |
04ad22d77a | ||
![]() |
a30a30b59a | ||
![]() |
484eb2b0e3 | ||
![]() |
746a65640f | ||
![]() |
50e4d38ce4 | ||
![]() |
1e7ff902f2 | ||
![]() |
dceec28eac | ||
![]() |
95db0aa276 | ||
![]() |
1e6f641052 | ||
![]() |
03ef13889c | ||
![]() |
ab3f129011 | ||
![]() |
ab91919e57 | ||
![]() |
13aaefb70e | ||
![]() |
19a29c92db | ||
![]() |
ffcd26d25b | ||
![]() |
42fd94b514 | ||
![]() |
679df05683 | ||
![]() |
717c3ebfa4 | ||
![]() |
43b094fa03 | ||
![]() |
483f5bccc1 | ||
![]() |
ce17111ed1 | ||
![]() |
a8c8a2f3f4 | ||
![]() |
485372e91c | ||
![]() |
b87fc3526f | ||
![]() |
d165ec179e | ||
![]() |
94541f5f76 | ||
![]() |
5e71f4aa6a | ||
![]() |
1900d023ae | ||
![]() |
2c8a8a114c | ||
![]() |
99ab664ff8 | ||
![]() |
131f2c366e | ||
![]() |
9362282fd1 | ||
![]() |
1f25a3ddb8 | ||
![]() |
e69d8fc619 | ||
![]() |
6197d35a93 | ||
![]() |
8f00ff5ae0 | ||
![]() |
eee1faef75 | ||
![]() |
1ef419447e | ||
![]() |
6752f8ef04 | ||
![]() |
98fdd0baf3 | ||
![]() |
f7cb979766 | ||
![]() |
0fb4a85603 | ||
![]() |
2ef386d0a6 | ||
![]() |
114c0f002a | ||
![]() |
6b6932471e | ||
![]() |
5ac6daf995 | ||
![]() |
763c0d25b1 | ||
![]() |
b1863e3d94 | ||
![]() |
4afccfdc04 | ||
![]() |
a3d45dbcc1 | ||
![]() |
15466db69e | ||
![]() |
27816fb9ef | ||
![]() |
f06d9dced4 | ||
![]() |
26cb351452 | ||
![]() |
762a5878a6 | ||
![]() |
f0af6e705f | ||
![]() |
70b97a89d2 | ||
![]() |
1d1adf5ff4 | ||
![]() |
acfad331ad | ||
![]() |
43f8a422b3 | ||
![]() |
95bd0f3a4b | ||
![]() |
eddf146ada | ||
![]() |
7293372959 | ||
![]() |
2e1226a695 | ||
![]() |
f66d2bf949 | ||
![]() |
4a6ac71742 | ||
![]() |
08337cca05 | ||
![]() |
9c655d2a57 | ||
![]() |
f00f799833 | ||
![]() |
05684cee42 | ||
![]() |
e693af81b7 | ||
![]() |
73ebc4046e | ||
![]() |
1cbd7b08f6 | ||
![]() |
a330aca126 | ||
![]() |
2e7bd0f725 | ||
![]() |
a066b2cedd | ||
![]() |
a18e8d82de | ||
![]() |
441ef87ea8 | ||
![]() |
6e53134f98 | ||
![]() |
237751eb25 | ||
![]() |
264eb0074f | ||
![]() |
7db809a373 | ||
![]() |
0df814cf97 | ||
![]() |
88fa3243dd | ||
![]() |
2a6f2cd848 | ||
![]() |
c001472226 | ||
![]() |
1ec0541ae0 | ||
![]() |
151554e1eb | ||
![]() |
ac91bfe086 | ||
![]() |
bbcf6f5c62 | ||
![]() |
e740506d31 | ||
![]() |
65aac419e5 | ||
![]() |
662714abbe | ||
![]() |
51782e8690 | ||
![]() |
0afb004d3c | ||
![]() |
a9c3b588af | ||
![]() |
f775a92054 | ||
![]() |
cccb06b095 | ||
![]() |
be54d1f104 | ||
![]() |
e84d17c7c9 | ||
![]() |
254fabe758 | ||
![]() |
d1f8eaf3d2 | ||
![]() |
483a02e25f | ||
![]() |
3f06023bd2 | ||
![]() |
ce3a8c983f | ||
![]() |
f5c880cecb | ||
![]() |
2af2c7ecff | ||
![]() |
12aa4220dd | ||
![]() |
e40688ed80 | ||
![]() |
d403242a28 | ||
![]() |
f921a47fae | ||
![]() |
a7fa1c9b2b | ||
![]() |
6ff54eb87b | ||
![]() |
7f2ab5e50f | ||
![]() |
aebb9410c5 | ||
![]() |
459090181f | ||
![]() |
071d7f4e17 | ||
![]() |
620197d1ff | ||
![]() |
73e7fe8e64 | ||
![]() |
973e67e5d2 | ||
![]() |
d89cd16afa | ||
![]() |
7cbceb16ad | ||
![]() |
157dd52700 | ||
![]() |
23614d09d5 | ||
![]() |
bea4894d0c | ||
![]() |
250cf2d4da | ||
![]() |
93d076b4fd | ||
![]() |
8749b83e0b | ||
![]() |
a9b600cf39 | ||
![]() |
c3c8365dbd | ||
![]() |
6432f8826d | ||
![]() |
e04ae11fa0 | ||
![]() |
08fadda68a | ||
![]() |
9473e5a05d | ||
![]() |
8a24ccfee3 | ||
![]() |
3ba55ea4ae | ||
![]() |
265db41540 | ||
![]() |
ed5041143e | ||
![]() |
965f96c5ed | ||
![]() |
3b6aeb148b | ||
![]() |
259bb2555b | ||
![]() |
665b67014f | ||
![]() |
1051c152f9 | ||
![]() |
5a0862af55 | ||
![]() |
6e94e77632 | ||
![]() |
f5bb7b9992 | ||
![]() |
0e9209183a | ||
![]() |
90932a9e3c |
267
Changelog
267
Changelog
@ -1,7 +1,270 @@
|
|||||||
Entries are sorted chronologically from oldest to youngest within each release,
|
Entries are sorted chronologically from oldest to youngest within each release,
|
||||||
releases are sorted from youngest to oldest.
|
releases are sorted from youngest to oldest.
|
||||||
|
|
||||||
version <next>:
|
|
||||||
|
version 2.7.4
|
||||||
|
- nuv: sanitize negative fps rate
|
||||||
|
- rawdec: only exempt BIT0 with need_copy from buffer sanity check
|
||||||
|
- mlvdec: check that index_entries exist
|
||||||
|
- nutdec: reject negative value_len in read_sm_data
|
||||||
|
- xwddec: prevent overflow of lsize * avctx->height
|
||||||
|
- nutdec: only copy the header if it exists
|
||||||
|
- exr: fix out of bounds read in get_code
|
||||||
|
- on2avc: limit number of bits to 30 in get_egolomb
|
||||||
|
- avcodec/mpeg4videodec: also for empty partitioned slices
|
||||||
|
- avcodec/h264_refs: Fix long_idx check
|
||||||
|
- avcodec/h264_mc_template: prefetch list1 only if it is used in the MB
|
||||||
|
- avcodec/h264_slice: Simplify ref2frm indexing
|
||||||
|
- Revert "avcodec/aarch64/neon.S: Update neon.s for transpose_4x4H"
|
||||||
|
- avfilter/vf_mpdecimate: Add missing emms_c()
|
||||||
|
- sonic: make sure num_taps * channels is not larger than frame_size
|
||||||
|
- opus_silk: fix typo causing overflow in silk_stabilize_lsf
|
||||||
|
- ffm: reject invalid codec_id and codec_type
|
||||||
|
- golomb: always check for invalid UE golomb codes in get_ue_golomb
|
||||||
|
- aaccoder: prevent crash of anmr coder
|
||||||
|
- ffmdec: reject zero-sized chunks
|
||||||
|
- swscale/x86/rgb2rgb_template: Fallback to mmx in interleaveBytes() if the alignment is insufficient for SSE*
|
||||||
|
- swscale/x86/rgb2rgb_template: Do not crash on misaligend stride
|
||||||
|
- avformat/mxfenc: Do not crash if there is no packet in the first stream
|
||||||
|
- avcodec/aarch64/neon.S: Update neon.s for transpose_4x4H
|
||||||
|
- avformat/utils: estimate_timings_from_pts - increase retry counter, fixes invalid duration for ts files with hevc codec
|
||||||
|
- avformat/matroskaenc: Check codecdelay before use
|
||||||
|
- avutil/mathematics: Fix division by 0
|
||||||
|
- mjpegdec: consider chroma subsampling in size check
|
||||||
|
- avcodec/hevc: Check max ctb addresses for WPP
|
||||||
|
- avcodec/vp3: ensure header is parsed successfully before tables
|
||||||
|
- avcodec/jpeg2000dec: Check bpno in decode_cblk()
|
||||||
|
- avcodec/pgssubdec: Fix left shift of 255 by 24 places cannot be represented in type int
|
||||||
|
- swscale/utils: Fix for runtime error: left shift of negative value -1
|
||||||
|
- avcodec/hevc: Fix integer overflow of entry_point_offset
|
||||||
|
- avcodec/dirac_parser: Check that there is a previous PU before accessing it
|
||||||
|
- avcodec/dirac_parser: Add basic validity checks for next_pu_offset and prev_pu_offset
|
||||||
|
- avcodec/dirac_parser: Fix potential overflows in pointer checks
|
||||||
|
- avcodec/wmaprodec: Check bits per sample to be within the range not causing integer overflows
|
||||||
|
- avcodec/wmaprodec: Fix overflow of cutoff
|
||||||
|
- avformat/smacker: fix integer overflow with pts_inc
|
||||||
|
- avcodec/vp3: Fix "runtime error: left shift of negative value"
|
||||||
|
- mpegencts: Fix overflow in cbr mode period calculations
|
||||||
|
- avutil/timecode: Fix fps check
|
||||||
|
- avutil/mathematics: return INT64_MIN (=AV_NOPTS_VALUE) from av_rescale_rnd() for overflows
|
||||||
|
- avcodec/apedec: Check length in long_filter_high_3800()
|
||||||
|
- avcodec/vp3: always set pix_fmt in theora_decode_header()
|
||||||
|
- avcodec/mpeg4videodec: Check available data before reading custom matrix
|
||||||
|
- avutil/mathematics: Do not treat INT64_MIN as positive in av_rescale_rnd
|
||||||
|
- avutil/integer: Fix av_mod_i() with negative dividend
|
||||||
|
- avformat/dump: Fix integer overflow in av_dump_format()
|
||||||
|
- avcodec/h264_refs: Check that long references match before use
|
||||||
|
- avcodec/utils: Clear dimensions in ff_get_buffer() on failure
|
||||||
|
- avcodec/utils: Use 64bit for aspect ratio calculation in avcodec_string()
|
||||||
|
- avcodec/vp3: Clear context on reinitialization failure
|
||||||
|
- avcodec/hevc: allocate entries unconditionally
|
||||||
|
- avcodec/hevc_cabac: Fix multiple integer overflows
|
||||||
|
- avcodec/jpeg2000dwt: Check ndeclevels before calling dwt_encode*()
|
||||||
|
- avcodec/jpeg2000dwt: Check ndeclevels before calling dwt_decode*()
|
||||||
|
- avcodec/hevc: Check entry_point_offsets
|
||||||
|
- avcodec/cabac: Check initial cabac decoder state
|
||||||
|
- avcodec/cabac_functions: Fix "left shift of negative value -31767"
|
||||||
|
- avcodec/h264_slice: Limit max_contexts when slice_context_count is initialized
|
||||||
|
- avcodec/vp8: Do not use num_coeff_partitions in thread/buffer setup
|
||||||
|
- avcodec/ffv1dec: Clear quant_table_count if its invalid
|
||||||
|
- avcodec/ffv1dec: Print an error if the quant table count is invalid
|
||||||
|
- doc/filters/drawtext: fix centering example
|
||||||
|
- hqx: correct type and size check of info_offset
|
||||||
|
- mxfdec: check edit_rate also for physical_track
|
||||||
|
- mpegvideo: clear overread in clear_context
|
||||||
|
- dvdsubdec: validate offset2 similar to offset1
|
||||||
|
- aacdec: don't return frames without data from aac_decode_er_frame
|
||||||
|
- avcodec/takdec: Use memove, avoid undefined memcpy() use
|
||||||
|
- riffdec: prevent negative bit rate
|
||||||
|
|
||||||
|
|
||||||
|
version 2.7.3:
|
||||||
|
- rtmpcrypt: Do the xtea decryption in little endian mode
|
||||||
|
- Update versions for 2.7.3
|
||||||
|
- avformat/matroskadec: Check subtitle stream before dereferencing
|
||||||
|
- avformat/utils: Do not init parser if probing is unfinished
|
||||||
|
- avcodec/jpeg2000dec: Fix potential integer overflow with tile dimensions
|
||||||
|
- avcodec/jpeg2000dec: Check SIZ dimensions to be within the supported range
|
||||||
|
- avcodec/jpeg2000: Check comp coords to be within the supported size
|
||||||
|
- avcodec/jpeg2000: Use av_image_check_size() in ff_jpeg2000_init_component()
|
||||||
|
- avcodec/wmaprodec: Check for overread in decode_packet()
|
||||||
|
- avcodec/smacker: Check that the data size is a multiple of a sample vector
|
||||||
|
- avcodec/takdec: Skip last p2 sample (which is unused)
|
||||||
|
- avcodec/dxtory: Fix input size check in dxtory_decode_v1_410()
|
||||||
|
- avcodec/dxtory: Fix input size check in dxtory_decode_v1_420()
|
||||||
|
- avcodec/error_resilience: avoid accessing previous or next frames tables beyond height
|
||||||
|
- avcodec/dpx: Move need_align to act per line
|
||||||
|
- avcodec/flashsv: Check size before updating it
|
||||||
|
- avcodec/ivi: Check image dimensions
|
||||||
|
- avcodec/utils: Better check for channels in av_get_audio_frame_duration()
|
||||||
|
- avcodec/jpeg2000dec: Check for duplicate SIZ marker
|
||||||
|
- tests/fate/avformat: Fix fate-lavf
|
||||||
|
- doc/ffmpeg: Clarify that the sdp_file option requires an rtp output.
|
||||||
|
- ffmpeg: Don't try and write sdp info if none of the outputs had an rtp format.
|
||||||
|
- apng: use correct size for output buffer
|
||||||
|
- jvdec: avoid unsigned overflow in comparison
|
||||||
|
- avcodec/hevc_ps: Check chroma_format_idc
|
||||||
|
- avcodec/jpeg2000dec: Clip all tile coordinates
|
||||||
|
- avcodec/microdvddec: Check for string end in 'P' case
|
||||||
|
- avcodec/dirac_parser: Fix undefined memcpy() use
|
||||||
|
- avformat/xmv: Discard remainder of packet on error
|
||||||
|
- avformat/xmv: factor return check out of if/else
|
||||||
|
- avcodec/mpeg12dec: Do not call show_bits() with invalid bits
|
||||||
|
- libavutil/channel_layout: Check strtol*() for failure
|
||||||
|
- avcodec/ffv1dec: Check for 0 quant tables
|
||||||
|
- avcodec/mjpegdec: Reinitialize IDCT on BPP changes
|
||||||
|
- avcodec/mjpegdec: Check index in ljpeg_decode_yuv_scan() before using it
|
||||||
|
- avutil/file_open: avoid file handle inheritance on Windows
|
||||||
|
- avcodec/h264_slice: Disable slice threads if there are multiple access units in a packet
|
||||||
|
- opusdec: Don't run vector_fmul_scalar on zero length arrays
|
||||||
|
- avcodec/ffv1: Initialize vlc_state on allocation
|
||||||
|
- avcodec/ffv1dec: update progress in case of broken pointer chains
|
||||||
|
- avcodec/ffv1dec: Clear slice coordinates if they are invalid or slice header decoding fails for other reasons
|
||||||
|
- avformat/httpauth: Add space after commas in HTTP/RTSP auth header
|
||||||
|
- avcodec/x86/sbrdsp: Fix using uninitialized upper 32bit of noise
|
||||||
|
- avcodec/ffv1dec: Fix off by 1 error in quant_table_count check
|
||||||
|
- avcodec/ffv1dec: Explicitly check read_quant_table() return value
|
||||||
|
- avcodec/rangecoder: Check e
|
||||||
|
- avutil/log: fix zero length gnu_printf format string warning
|
||||||
|
- lavf/webvttenc: Require webvtt file to contain exactly one WebVTT stream.
|
||||||
|
- avcodec/mjpegdec: Fix decoding RGBA RCT LJPEG
|
||||||
|
- avfilter/af_asyncts: use llabs for int64_t
|
||||||
|
- avcodec/g2meet: Also clear tile dimensions on header_fail
|
||||||
|
- avcodec/g2meet: Fix potential overflow in tile dimensions check
|
||||||
|
- avcodec/svq1dec: Check init_get_bits8() for failure
|
||||||
|
- avcodec/tta: Check init_get_bits8() for failure
|
||||||
|
- avcodec/vp3: Check init_get_bits8() for failure
|
||||||
|
- swresample/swresample: Fix integer overflow in seed calculation
|
||||||
|
- avformat/mov: Fix integer overflow in FFABS
|
||||||
|
- avutil/common: Add FFNABS()
|
||||||
|
- avutil/common: Document FFABS() corner case
|
||||||
|
- avformat/dump: Fix integer overflow in aspect ratio calculation
|
||||||
|
- avformat/mxg: Use memmove()
|
||||||
|
- avcodec/truemotion1: Check for even width
|
||||||
|
- avcodec/mpeg12dec: Set dimensions in mpeg1_decode_sequence() only in absence of errors
|
||||||
|
- avcodec/libopusenc: Fix infinite loop on flushing after 0 input
|
||||||
|
- avformat/hevc: Check num_long_term_ref_pics_sps to avoid potentially long loops
|
||||||
|
- avformat/hevc: Fix parsing errors
|
||||||
|
- ffmpeg: Use correct codec_id for av_parser_change() check
|
||||||
|
- ffmpeg: Check av_parser_change() for failure
|
||||||
|
- ffmpeg: Check for RAWVIDEO and do not relay only on AVFMT_RAWPICTURE
|
||||||
|
- ffmpeg: check avpicture_fill() return value
|
||||||
|
- avformat/mux: Update sidedata in ff_write_chained()
|
||||||
|
- avcodec/flashsvenc: Correct max dimension in error message
|
||||||
|
- avcodec/svq1enc: Check dimensions
|
||||||
|
- avcodec/dcaenc: clear bitstream end
|
||||||
|
- libavcodec/aacdec_template: Use init_get_bits8() in aac_decode_frame()
|
||||||
|
- rawdec: fix mjpeg probing buffer size check
|
||||||
|
- rawdec: fix mjpeg probing
|
||||||
|
- configure: loongson disable expensive optimizations in gcc O3 optimization
|
||||||
|
- videodsp: don't overread edges in vfix3 emu_edge.
|
||||||
|
- avformat/mp3dec: improve junk skipping heuristic
|
||||||
|
- avformat/hls: add support for EXT-X-MAP
|
||||||
|
- avformat/hls: fix segment selection regression on track changes of live streams
|
||||||
|
- lavf/matroskadec: Fully parse and repack MP3 packets
|
||||||
|
- avcodec/h264_mp4toannexb_bsf: Reorder operations in nal_size check
|
||||||
|
- avformat/oggenc: Check segments_count for headers too
|
||||||
|
- avformat/segment: atomically update list if possible
|
||||||
|
- avformat/avidec: Workaround broken initial frame
|
||||||
|
- hevc: properly handle no_rasl_output_flag when removing pictures from the DPB
|
||||||
|
- hevc: fix wpp threading deadlock.
|
||||||
|
- avcodec/ffv1: separate slice_count from max_slice_count
|
||||||
|
- lavf/img2dec: Fix memory leak
|
||||||
|
- avcodec/mp3: fix skipping zeros
|
||||||
|
- avformat/srtdec: make sure we probe a number
|
||||||
|
- avformat/srtdec: more lenient first line probing
|
||||||
|
- doc: mention libavcodec can decode Opus natively
|
||||||
|
- avcodec/ffv1enc: fix assertion failure with unset bits per raw sample
|
||||||
|
- MAINTAINERS: Remove myself as leader
|
||||||
|
- mips/hevcdsp: fix string concatenation on macros
|
||||||
|
|
||||||
|
|
||||||
|
version 2.7.2:
|
||||||
|
- imc: use correct position for flcoeffs2 calculation
|
||||||
|
- hevc: check slice address length
|
||||||
|
- snow: remove an obsolete av_assert2
|
||||||
|
- webp: fix infinite loop in webp_decode_frame
|
||||||
|
- wavpack: limit extra_bits to 32 and use get_bits_long
|
||||||
|
- ffmpeg: only count got_output/errors in decode_error_stat
|
||||||
|
- ffmpeg: exit_on_error if decoding a packet failed
|
||||||
|
- pthread_frame: forward error codes when flushing
|
||||||
|
- huffyuvdec: validate image size
|
||||||
|
- wavpack: use get_bits_long to read up to 32 bits
|
||||||
|
- nutdec: check maxpos in read_sm_data before returning success
|
||||||
|
- s302m: fix arithmetic exception
|
||||||
|
- vc1dec: use get_bits_long and limit the read bits to 32
|
||||||
|
- mpegaudiodec: copy AVFloatDSPContext from first context to all contexts
|
||||||
|
- avcodec/vp8: Check buffer size in vp8_decode_frame_header()
|
||||||
|
- avcodec/vp8: Fix null pointer dereference in ff_vp8_decode_free()
|
||||||
|
- avcodec/diracdec: Check for hpel_base allocation failure
|
||||||
|
- avcodec/rv34: Clear pointers in ff_rv34_decode_init_thread_copy()
|
||||||
|
- avfilter/af_aresample: Check ff_all_* for allocation failures
|
||||||
|
- avcodec/pthread_frame: clear priv_data, avoid stale pointer in error case
|
||||||
|
- swscale/utils: Clear pix buffers
|
||||||
|
- avutil/fifo: Fix the case where func() returns less bytes than requested in av_fifo_generic_write()
|
||||||
|
- ffmpeg: Fix cleanup after failed allocation of output_files
|
||||||
|
- avformat/mov: Fix deallocation when MOVStreamContext failed to allocate
|
||||||
|
- ffmpeg: Fix crash with ost->last_frame allocation failure
|
||||||
|
- ffmpeg: Fix cleanup with ost = NULL
|
||||||
|
- avcodec/pthread_frame: check avctx on deallocation
|
||||||
|
- avcodec/sanm: Reset sizes in destroy_buffers()
|
||||||
|
- avcodec/alac: Clear pointers in allocate_buffers()
|
||||||
|
- bytestream2: set the reader to the end when reading more than available
|
||||||
|
- avcodec/utils: use a minimum 32pixel width in avcodec_align_dimensions2() for H.264
|
||||||
|
- avcodec/mpegvideo: Clear pointers in ff_mpv_common_init()
|
||||||
|
- oggparsedirac: check return value of init_get_bits
|
||||||
|
- wmalosslessdec: reset frame->nb_samples on packet loss
|
||||||
|
- wmalosslessdec: avoid reading 0 bits with get_bits
|
||||||
|
- Put a space between string literals and macros.
|
||||||
|
- avcodec/rawenc: Use ff_alloc_packet() instead of ff_alloc_packet2()
|
||||||
|
- avcodec/aacsbr: check that the element type matches before applying SBR
|
||||||
|
- avcodec/h264_slice: Use w/h from the AVFrame instead of mb_w/h
|
||||||
|
- vp9/update_prob: prevent out of bounds table read
|
||||||
|
- avfilter/vf_transpose: Fix rounding error
|
||||||
|
- avcodec/h264_refs: discard mismatching references
|
||||||
|
- avcodec/mjpegdec: Fix small picture upscale
|
||||||
|
- avcodec/pngdec: Check values before updating context in decode_fctl_chunk()
|
||||||
|
- avcodec/pngdec: Copy IHDR & plte state from last thread
|
||||||
|
- avcodec/pngdec: Require a IHDR chunk before fctl
|
||||||
|
- avcodec/pngdec: Only allow one IHDR chunk
|
||||||
|
- wmavoice: limit wmavoice_decode_packet return value to packet size
|
||||||
|
- swscale/swscale_unscaled: Fix rounding difference with RGBA output between little and big endian
|
||||||
|
- ffmpeg: Do not use the data/size of a bitstream filter after failure
|
||||||
|
- swscale/x86/rgb2rgb_template: fix signedness of v in shuffle_bytes_2103_{mmx,mmxext}
|
||||||
|
- vda: unlock the pixel buffer base address.
|
||||||
|
- swscale/rgb2rgb_template: Fix signedness of v in shuffle_bytes_2103_c()
|
||||||
|
- swscale/rgb2rgb_template: Implement shuffle_bytes_0321_c and fix shuffle_bytes_2103_c on BE
|
||||||
|
- swscale/rgb2rgb_template: Disable shuffle_bytes_2103_c on big endian
|
||||||
|
- swr: Remember previously set int_sample_format from user
|
||||||
|
- swresample: soxr implementation for swr_get_out_samples()
|
||||||
|
- avformat/swfdec: Do not error out on pixel format changes
|
||||||
|
- ffmpeg_opt: Fix forcing fourccs
|
||||||
|
- configure: Check for x265_api_get
|
||||||
|
- swscale/x86/rgb2rgb_template: don't call emms on sse2/avx functions
|
||||||
|
- swscale/x86/rgb2rgb_template: add missing xmm clobbers
|
||||||
|
- library.mak: Workaround SDL redefining main and breaking fate tests on mingw
|
||||||
|
- vaapi_h264: fix RefPicList[] field flags.
|
||||||
|
|
||||||
|
version 2.7.1:
|
||||||
|
- postproc: fix unaligned access
|
||||||
|
- avformat: clarify what package needs to be compiled with SSL support
|
||||||
|
- avcodec/libx264: Avoid reconfig on equivalent aspect ratios
|
||||||
|
- avcodec/flacenc: Fix Invalid Rice order
|
||||||
|
- tls_gnutls: fix hang on disconnection
|
||||||
|
- avcodec/hevc_ps: Only discard overread VPS if a previous is available
|
||||||
|
- ffmpeg: Free last_frame instead of just unref
|
||||||
|
- avcodec/ffv1enc: fix bps for >8bit yuv when not explicitly set
|
||||||
|
- avio: fix potential crashes when combining ffio_ensure_seekback + crc
|
||||||
|
- examples/demuxing_decoding: use properties from frame instead of video_dec_ctx
|
||||||
|
- h264: er: Copy from the previous reference only if compatible
|
||||||
|
- doc: fix spelling errors
|
||||||
|
- configure: only disable VSX for !ppc64el
|
||||||
|
- ffmpeg_opt: Check for localtime() failure
|
||||||
|
- avformat/singlejpeg: fix standalone compilation
|
||||||
|
- configure: Disable VSX on unspecified / generic CPUs
|
||||||
|
- avformat: Fix bug in parse_rps for HEVC.
|
||||||
|
- takdec: ensure chan2 is a valid channel index
|
||||||
|
- avcodec/h264_slice: Use AVFrame dimensions for grayscale handling
|
||||||
|
|
||||||
|
|
||||||
version 2.7:
|
version 2.7:
|
||||||
@ -573,7 +836,7 @@ easier to use. The changes are:
|
|||||||
all the stream in the first input file, except for the second audio
|
all the stream in the first input file, except for the second audio
|
||||||
stream'.
|
stream'.
|
||||||
* There is a new option -c (or -codec) for choosing the decoder/encoder to
|
* There is a new option -c (or -codec) for choosing the decoder/encoder to
|
||||||
use, which allows to precisely specify target stream(s) consistently with
|
use, which makes it possible to precisely specify target stream(s) consistently with
|
||||||
other options. E.g. -c:v lib264 sets the codec for all video streams, -c:a:0
|
other options. E.g. -c:v lib264 sets the codec for all video streams, -c:a:0
|
||||||
libvorbis sets the codec for the first audio stream and -c copy copies all
|
libvorbis sets the codec for the first audio stream and -c copy copies all
|
||||||
the streams without reencoding. Old -vcodec/-acodec/-scodec options are now
|
the streams without reencoding. Old -vcodec/-acodec/-scodec options are now
|
||||||
|
@ -14,7 +14,6 @@ patches and related discussions.
|
|||||||
Project Leader
|
Project Leader
|
||||||
==============
|
==============
|
||||||
|
|
||||||
Michael Niedermayer
|
|
||||||
final design decisions
|
final design decisions
|
||||||
|
|
||||||
|
|
||||||
@ -545,6 +544,7 @@ x86 Michael Niedermayer
|
|||||||
Releases
|
Releases
|
||||||
========
|
========
|
||||||
|
|
||||||
|
2.7 Michael Niedermayer
|
||||||
2.6 Michael Niedermayer
|
2.6 Michael Niedermayer
|
||||||
2.5 Michael Niedermayer
|
2.5 Michael Niedermayer
|
||||||
2.4 Michael Niedermayer
|
2.4 Michael Niedermayer
|
||||||
|
15
RELEASE_NOTES
Normal file
15
RELEASE_NOTES
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
|
||||||
|
┌─────────────────────────────────────┐
|
||||||
|
│ RELEASE NOTES for FFmpeg 2.7 "Nash" │
|
||||||
|
└─────────────────────────────────────┘
|
||||||
|
|
||||||
|
The FFmpeg Project proudly presents FFmpeg 2.7 "Nash", about 3
|
||||||
|
months after the release of FFmpeg 2.6.
|
||||||
|
|
||||||
|
A complete Changelog is available at the root of the project, and the
|
||||||
|
complete Git history on http://source.ffmpeg.org.
|
||||||
|
|
||||||
|
We hope you will like this release as much as we enjoyed working on it, and
|
||||||
|
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||||
|
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||||
|
on the mailing-lists.
|
11
configure
vendored
11
configure
vendored
@ -3965,7 +3965,7 @@ elif enabled mips; then
|
|||||||
enable fast_cmov
|
enable fast_cmov
|
||||||
enable fast_unaligned
|
enable fast_unaligned
|
||||||
disable aligned_stack
|
disable aligned_stack
|
||||||
cpuflags="-march=loongson3a -mhard-float"
|
cpuflags="-march=loongson3a -mhard-float -fno-expensive-optimizations"
|
||||||
;;
|
;;
|
||||||
generic)
|
generic)
|
||||||
disable mips32r5
|
disable mips32r5
|
||||||
@ -4607,6 +4607,9 @@ unsigned int endian = 'B' << 24 | 'I' << 16 | 'G' << 8 | 'E';
|
|||||||
EOF
|
EOF
|
||||||
od -t x1 $TMPO | grep -q '42 *49 *47 *45' && enable bigendian
|
od -t x1 $TMPO | grep -q '42 *49 *47 *45' && enable bigendian
|
||||||
|
|
||||||
|
if ! enabled ppc64 || enabled bigendian; then
|
||||||
|
disable vsx
|
||||||
|
fi
|
||||||
|
|
||||||
check_gas() {
|
check_gas() {
|
||||||
log "check_gas using '$as' as AS"
|
log "check_gas using '$as' as AS"
|
||||||
@ -5154,7 +5157,7 @@ enabled libx264 && { use_pkg_config x264 "stdint.h x264.h" x264_encode
|
|||||||
warn "using libx264 without pkg-config"; } } &&
|
warn "using libx264 without pkg-config"; } } &&
|
||||||
{ check_cpp_condition x264.h "X264_BUILD >= 118" ||
|
{ check_cpp_condition x264.h "X264_BUILD >= 118" ||
|
||||||
die "ERROR: libx264 must be installed and version must be >= 0.118."; }
|
die "ERROR: libx264 must be installed and version must be >= 0.118."; }
|
||||||
enabled libx265 && require_pkg_config x265 x265.h x265_encoder_encode &&
|
enabled libx265 && require_pkg_config x265 x265.h x265_api_get &&
|
||||||
{ check_cpp_condition x265.h "X265_BUILD >= 57" ||
|
{ check_cpp_condition x265.h "X265_BUILD >= 57" ||
|
||||||
die "ERROR: libx265 version must be >= 57."; }
|
die "ERROR: libx265 version must be >= 57."; }
|
||||||
enabled libxavs && require libxavs xavs.h xavs_encoder_encode -lxavs
|
enabled libxavs && require libxavs xavs.h xavs_encoder_encode -lxavs
|
||||||
@ -5236,8 +5239,8 @@ disabled securetransport || { check_func SecIdentityCreate "-Wl,-framework,CoreF
|
|||||||
enable securetransport; }
|
enable securetransport; }
|
||||||
|
|
||||||
makeinfo --version > /dev/null 2>&1 && enable makeinfo || disable makeinfo
|
makeinfo --version > /dev/null 2>&1 && enable makeinfo || disable makeinfo
|
||||||
enabled makeinfo && (makeinfo --version | \
|
enabled makeinfo \
|
||||||
grep -q 'makeinfo (GNU texinfo) 5' > /dev/null 2>&1) \
|
&& [ 0$(makeinfo --version | grep "texinfo" | sed 's/.*texinfo[^0-9]*\([0-9]*\)\..*/\1/') -ge 5 ] \
|
||||||
&& enable makeinfo_html || disable makeinfo_html
|
&& enable makeinfo_html || disable makeinfo_html
|
||||||
disabled makeinfo_html && texi2html --help 2> /dev/null | grep -q 'init-file' && enable texi2html || disable texi2html
|
disabled makeinfo_html && texi2html --help 2> /dev/null | grep -q 'init-file' && enable texi2html || disable texi2html
|
||||||
perl -v > /dev/null 2>&1 && enable perl || disable perl
|
perl -v > /dev/null 2>&1 && enable perl || disable perl
|
||||||
|
@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
|||||||
# This could be handy for archiving the generated documentation or
|
# This could be handy for archiving the generated documentation or
|
||||||
# if some version control system is used.
|
# if some version control system is used.
|
||||||
|
|
||||||
PROJECT_NUMBER =
|
PROJECT_NUMBER = 2.7.4
|
||||||
|
|
||||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||||
# in the documentation. The maximum height of the logo should not exceed 55
|
# in the documentation. The maximum height of the logo should not exceed 55
|
||||||
|
@ -95,7 +95,7 @@ This decoder aims to implement the complete FLAC specification from Xiph.
|
|||||||
|
|
||||||
@item -use_buggy_lpc
|
@item -use_buggy_lpc
|
||||||
The lavc FLAC encoder used to produce buggy streams with high lpc values
|
The lavc FLAC encoder used to produce buggy streams with high lpc values
|
||||||
(like the default value). This option allows to decode such streams
|
(like the default value). This option makes it possible to decode such streams
|
||||||
correctly by using lavc's old buggy lpc logic for decoding.
|
correctly by using lavc's old buggy lpc logic for decoding.
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
@ -81,8 +81,11 @@ static int decode_packet(int *got_frame, int cached)
|
|||||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
if (video_dec_ctx->width != width || video_dec_ctx->height != height ||
|
|
||||||
video_dec_ctx->pix_fmt != pix_fmt) {
|
if (*got_frame) {
|
||||||
|
|
||||||
|
if (frame->width != width || frame->height != height ||
|
||||||
|
frame->format != pix_fmt) {
|
||||||
/* To handle this change, one could call av_image_alloc again and
|
/* To handle this change, one could call av_image_alloc again and
|
||||||
* decode the following frames into another rawvideo file. */
|
* decode the following frames into another rawvideo file. */
|
||||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||||
@ -91,12 +94,11 @@ static int decode_packet(int *got_frame, int cached)
|
|||||||
"old: width = %d, height = %d, format = %s\n"
|
"old: width = %d, height = %d, format = %s\n"
|
||||||
"new: width = %d, height = %d, format = %s\n",
|
"new: width = %d, height = %d, format = %s\n",
|
||||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||||
video_dec_ctx->width, video_dec_ctx->height,
|
frame->width, frame->height,
|
||||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt));
|
av_get_pix_fmt_name(frame->format));
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*got_frame) {
|
|
||||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||||
cached ? "(cached)" : "",
|
cached ? "(cached)" : "",
|
||||||
video_frame_count++, frame->coded_picture_number,
|
video_frame_count++, frame->coded_picture_number,
|
||||||
|
@ -117,7 +117,7 @@ static int open_output_file(const char *filename)
|
|||||||
/* in this example, we choose transcoding to same codec */
|
/* in this example, we choose transcoding to same codec */
|
||||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||||
if (!encoder) {
|
if (!encoder) {
|
||||||
av_log(NULL, AV_LOG_FATAL, "Neccessary encoder not found\n");
|
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1193,9 +1193,9 @@ The option is intended for cases where features are needed that cannot be
|
|||||||
specified to @command{ffserver} but can be to @command{ffmpeg}.
|
specified to @command{ffserver} but can be to @command{ffmpeg}.
|
||||||
|
|
||||||
@item -sdp_file @var{file} (@emph{global})
|
@item -sdp_file @var{file} (@emph{global})
|
||||||
Print sdp information to @var{file}.
|
Print sdp information for an output stream to @var{file}.
|
||||||
This allows dumping sdp information when at least one output isn't an
|
This allows dumping sdp information when at least one output isn't an
|
||||||
rtp stream.
|
rtp stream. (Requires at least one of the output formats to be rtp).
|
||||||
|
|
||||||
@item -discard (@emph{input})
|
@item -discard (@emph{input})
|
||||||
Allows discarding specific streams or frames of streams at the demuxer.
|
Allows discarding specific streams or frames of streams at the demuxer.
|
||||||
|
@ -4343,7 +4343,7 @@ within the parameter list.
|
|||||||
@item
|
@item
|
||||||
Show the text at the center of the video frame:
|
Show the text at the center of the video frame:
|
||||||
@example
|
@example
|
||||||
drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h-line_h)/2"
|
drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h)/2"
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
@item
|
||||||
|
@ -954,8 +954,8 @@ following image formats are supported:
|
|||||||
@item Musepack SV8 @tab @tab X
|
@item Musepack SV8 @tab @tab X
|
||||||
@item Nellymoser Asao @tab X @tab X
|
@item Nellymoser Asao @tab X @tab X
|
||||||
@item On2 AVC (Audio for Video Codec) @tab @tab X
|
@item On2 AVC (Audio for Video Codec) @tab @tab X
|
||||||
@item Opus @tab E @tab E
|
@item Opus @tab E @tab X
|
||||||
@tab supported through external library libopus
|
@tab encoding supported through external library libopus
|
||||||
@item PCM A-law @tab X @tab X
|
@item PCM A-law @tab X @tab X
|
||||||
@item PCM mu-law @tab X @tab X
|
@item PCM mu-law @tab X @tab X
|
||||||
@item PCM signed 8-bit planar @tab X @tab X
|
@item PCM signed 8-bit planar @tab X @tab X
|
||||||
|
64
ffmpeg.c
64
ffmpeg.c
@ -456,7 +456,10 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
/* close files */
|
/* close files */
|
||||||
for (i = 0; i < nb_output_files; i++) {
|
for (i = 0; i < nb_output_files; i++) {
|
||||||
OutputFile *of = output_files[i];
|
OutputFile *of = output_files[i];
|
||||||
AVFormatContext *s = of->ctx;
|
AVFormatContext *s;
|
||||||
|
if (!of)
|
||||||
|
continue;
|
||||||
|
s = of->ctx;
|
||||||
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
|
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
|
||||||
avio_closep(&s->pb);
|
avio_closep(&s->pb);
|
||||||
avformat_free_context(s);
|
avformat_free_context(s);
|
||||||
@ -466,7 +469,12 @@ static void ffmpeg_cleanup(int ret)
|
|||||||
}
|
}
|
||||||
for (i = 0; i < nb_output_streams; i++) {
|
for (i = 0; i < nb_output_streams; i++) {
|
||||||
OutputStream *ost = output_streams[i];
|
OutputStream *ost = output_streams[i];
|
||||||
AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
|
AVBitStreamFilterContext *bsfc;
|
||||||
|
|
||||||
|
if (!ost)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
bsfc = ost->bitstream_filters;
|
||||||
while (bsfc) {
|
while (bsfc) {
|
||||||
AVBitStreamFilterContext *next = bsfc->next;
|
AVBitStreamFilterContext *next = bsfc->next;
|
||||||
av_bitstream_filter_close(bsfc);
|
av_bitstream_filter_close(bsfc);
|
||||||
@ -650,6 +658,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
|
|||||||
if (!new_pkt.buf)
|
if (!new_pkt.buf)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
} else if (a < 0) {
|
} else if (a < 0) {
|
||||||
|
new_pkt = *pkt;
|
||||||
av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
|
av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
|
||||||
bsfc->filter->name, pkt->stream_index,
|
bsfc->filter->name, pkt->stream_index,
|
||||||
avctx->codec ? avctx->codec->name : "copy");
|
avctx->codec ? avctx->codec->name : "copy");
|
||||||
@ -1176,8 +1185,10 @@ static void do_video_out(AVFormatContext *s,
|
|||||||
if (!ost->last_frame)
|
if (!ost->last_frame)
|
||||||
ost->last_frame = av_frame_alloc();
|
ost->last_frame = av_frame_alloc();
|
||||||
av_frame_unref(ost->last_frame);
|
av_frame_unref(ost->last_frame);
|
||||||
if (next_picture)
|
if (next_picture && ost->last_frame)
|
||||||
av_frame_ref(ost->last_frame, next_picture);
|
av_frame_ref(ost->last_frame, next_picture);
|
||||||
|
else
|
||||||
|
av_frame_free(&ost->last_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
static double psnr(double d)
|
static double psnr(double d)
|
||||||
@ -1789,17 +1800,21 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
|||||||
|
|
||||||
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
|
opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
|
||||||
opkt.flags = pkt->flags;
|
opkt.flags = pkt->flags;
|
||||||
|
|
||||||
// FIXME remove the following 2 lines they shall be replaced by the bitstream filters
|
// FIXME remove the following 2 lines they shall be replaced by the bitstream filters
|
||||||
if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
|
if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
|
||||||
&& ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
|
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
|
||||||
&& ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
|
&& ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
|
||||||
&& ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
|
&& ost->st->codec->codec_id != AV_CODEC_ID_VC1
|
||||||
) {
|
) {
|
||||||
if (av_parser_change(ost->parser, ost->st->codec,
|
int ret = av_parser_change(ost->parser, ost->st->codec,
|
||||||
&opkt.data, &opkt.size,
|
&opkt.data, &opkt.size,
|
||||||
pkt->data, pkt->size,
|
pkt->data, pkt->size,
|
||||||
pkt->flags & AV_PKT_FLAG_KEY)) {
|
pkt->flags & AV_PKT_FLAG_KEY);
|
||||||
|
if (ret < 0) {
|
||||||
|
av_log(NULL, AV_LOG_FATAL, "av_parser_change failed\n");
|
||||||
|
exit_program(1);
|
||||||
|
}
|
||||||
|
if (ret) {
|
||||||
opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
|
opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
|
||||||
if (!opkt.buf)
|
if (!opkt.buf)
|
||||||
exit_program(1);
|
exit_program(1);
|
||||||
@ -1810,9 +1825,15 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
|||||||
}
|
}
|
||||||
av_copy_packet_side_data(&opkt, pkt);
|
av_copy_packet_side_data(&opkt, pkt);
|
||||||
|
|
||||||
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
|
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
|
||||||
|
ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
|
||||||
|
(of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
|
||||||
/* store AVPicture in AVPacket, as expected by the output format */
|
/* store AVPicture in AVPacket, as expected by the output format */
|
||||||
avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
|
int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
|
||||||
|
if (ret < 0) {
|
||||||
|
av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed\n");
|
||||||
|
exit_program(1);
|
||||||
|
}
|
||||||
opkt.data = (uint8_t *)&pict;
|
opkt.data = (uint8_t *)&pict;
|
||||||
opkt.size = sizeof(AVPicture);
|
opkt.size = sizeof(AVPicture);
|
||||||
opkt.flags |= AV_PKT_FLAG_KEY;
|
opkt.flags |= AV_PKT_FLAG_KEY;
|
||||||
@ -1863,9 +1884,12 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
|
|||||||
ret = AVERROR_INVALIDDATA;
|
ret = AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*got_output || ret<0 || pkt->size)
|
if (*got_output || ret<0)
|
||||||
decode_error_stat[ret<0] ++;
|
decode_error_stat[ret<0] ++;
|
||||||
|
|
||||||
|
if (ret < 0 && exit_on_error)
|
||||||
|
exit_program(1);
|
||||||
|
|
||||||
if (!*got_output || ret < 0) {
|
if (!*got_output || ret < 0) {
|
||||||
if (!pkt->size) {
|
if (!pkt->size) {
|
||||||
for (i = 0; i < ist->nb_filters; i++)
|
for (i = 0; i < ist->nb_filters; i++)
|
||||||
@ -2008,9 +2032,12 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*got_output || ret<0 || pkt->size)
|
if (*got_output || ret<0)
|
||||||
decode_error_stat[ret<0] ++;
|
decode_error_stat[ret<0] ++;
|
||||||
|
|
||||||
|
if (ret < 0 && exit_on_error)
|
||||||
|
exit_program(1);
|
||||||
|
|
||||||
if (*got_output && ret >= 0) {
|
if (*got_output && ret >= 0) {
|
||||||
if (ist->dec_ctx->width != decoded_frame->width ||
|
if (ist->dec_ctx->width != decoded_frame->width ||
|
||||||
ist->dec_ctx->height != decoded_frame->height ||
|
ist->dec_ctx->height != decoded_frame->height ||
|
||||||
@ -2126,9 +2153,12 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
|
|||||||
int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
|
int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
|
||||||
&subtitle, got_output, pkt);
|
&subtitle, got_output, pkt);
|
||||||
|
|
||||||
if (*got_output || ret<0 || pkt->size)
|
if (*got_output || ret<0)
|
||||||
decode_error_stat[ret<0] ++;
|
decode_error_stat[ret<0] ++;
|
||||||
|
|
||||||
|
if (ret < 0 && exit_on_error)
|
||||||
|
exit_program(1);
|
||||||
|
|
||||||
if (ret < 0 || !*got_output) {
|
if (ret < 0 || !*got_output) {
|
||||||
if (!pkt->size)
|
if (!pkt->size)
|
||||||
sub2video_flush(ist);
|
sub2video_flush(ist);
|
||||||
@ -2340,6 +2370,9 @@ static void print_sdp(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!j)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
av_sdp_create(avc, j, sdp, sizeof(sdp));
|
av_sdp_create(avc, j, sdp, sizeof(sdp));
|
||||||
|
|
||||||
if (!sdp_filename) {
|
if (!sdp_filename) {
|
||||||
@ -2355,6 +2388,7 @@ static void print_sdp(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fail:
|
||||||
av_freep(&avc);
|
av_freep(&avc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1203,6 +1203,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
|||||||
uint32_t tag = strtol(codec_tag, &next, 0);
|
uint32_t tag = strtol(codec_tag, &next, 0);
|
||||||
if (*next)
|
if (*next)
|
||||||
tag = AV_RL32(codec_tag);
|
tag = AV_RL32(codec_tag);
|
||||||
|
ost->st->codec->codec_tag =
|
||||||
ost->enc_ctx->codec_tag = tag;
|
ost->enc_ctx->codec_tag = tag;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2438,6 +2439,9 @@ static int opt_vstats(void *optctx, const char *opt, const char *arg)
|
|||||||
time_t today2 = time(NULL);
|
time_t today2 = time(NULL);
|
||||||
struct tm *today = localtime(&today2);
|
struct tm *today = localtime(&today2);
|
||||||
|
|
||||||
|
if (!today)
|
||||||
|
return AVERROR(errno);
|
||||||
|
|
||||||
snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
|
snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
|
||||||
today->tm_sec);
|
today->tm_sec);
|
||||||
return opt_vstats_file(NULL, opt, filename);
|
return opt_vstats_file(NULL, opt, filename);
|
||||||
|
@ -77,6 +77,8 @@ static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
|||||||
frame->width, frame->height);
|
frame->width, frame->height);
|
||||||
|
|
||||||
ret = av_frame_copy_props(vda->tmp_frame, frame);
|
ret = av_frame_copy_props(vda->tmp_frame, frame);
|
||||||
|
CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -728,7 +728,7 @@ static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s,
|
|||||||
}
|
}
|
||||||
while (idx) {
|
while (idx) {
|
||||||
sce->sf_idx[bandaddr[idx]] = minq + q0;
|
sce->sf_idx[bandaddr[idx]] = minq + q0;
|
||||||
minq = paths[idx][minq].prev;
|
minq = FFMAX(paths[idx][minq].prev, 0);
|
||||||
idx--;
|
idx--;
|
||||||
}
|
}
|
||||||
//set the same quantizers inside window groups
|
//set the same quantizers inside window groups
|
||||||
|
@ -2940,6 +2940,11 @@ static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
spectral_to_sample(ac);
|
spectral_to_sample(ac);
|
||||||
|
|
||||||
|
if (!ac->frame->data[0] && samples) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "no frame data found\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
ac->frame->nb_samples = samples;
|
ac->frame->nb_samples = samples;
|
||||||
ac->frame->sample_rate = avctx->sample_rate;
|
ac->frame->sample_rate = avctx->sample_rate;
|
||||||
*got_frame_ptr = 1;
|
*got_frame_ptr = 1;
|
||||||
@ -3178,7 +3183,7 @@ static int aac_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (INT_MAX / 8 <= buf_size)
|
if (INT_MAX / 8 <= buf_size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if ((err = init_get_bits(&gb, buf, buf_size * 8)) < 0)
|
if ((err = init_get_bits8(&gb, buf, buf_size)) < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
switch (ac->oc[1].m4ac.object_type) {
|
switch (ac->oc[1].m4ac.object_type) {
|
||||||
|
@ -1019,6 +1019,8 @@ static unsigned int read_sbr_data(AACContext *ac, SpectralBandReplication *sbr,
|
|||||||
{
|
{
|
||||||
unsigned int cnt = get_bits_count(gb);
|
unsigned int cnt = get_bits_count(gb);
|
||||||
|
|
||||||
|
sbr->id_aac = id_aac;
|
||||||
|
|
||||||
if (id_aac == TYPE_SCE || id_aac == TYPE_CCE) {
|
if (id_aac == TYPE_SCE || id_aac == TYPE_CCE) {
|
||||||
if (read_sbr_single_channel_element(ac, sbr, gb)) {
|
if (read_sbr_single_channel_element(ac, sbr, gb)) {
|
||||||
sbr_turnoff(sbr);
|
sbr_turnoff(sbr);
|
||||||
@ -1695,6 +1697,12 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
|
|||||||
int nch = (id_aac == TYPE_CPE) ? 2 : 1;
|
int nch = (id_aac == TYPE_CPE) ? 2 : 1;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (id_aac != sbr->id_aac) {
|
||||||
|
av_log(ac->avctx, AV_LOG_ERROR,
|
||||||
|
"element type mismatch %d != %d\n", id_aac, sbr->id_aac);
|
||||||
|
sbr_turnoff(sbr);
|
||||||
|
}
|
||||||
|
|
||||||
if (!sbr->kx_and_m_pushed) {
|
if (!sbr->kx_and_m_pushed) {
|
||||||
sbr->kx[0] = sbr->kx[1];
|
sbr->kx[0] = sbr->kx[1];
|
||||||
sbr->m[0] = sbr->m[1];
|
sbr->m[0] = sbr->m[1];
|
||||||
@ -1718,6 +1726,7 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
|
|||||||
sbr->c.sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1,
|
sbr->c.sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1,
|
||||||
(const float (*)[40][2]) sbr->X_low, sbr->k[0]);
|
(const float (*)[40][2]) sbr->X_low, sbr->k[0]);
|
||||||
sbr_chirp(sbr, &sbr->data[ch]);
|
sbr_chirp(sbr, &sbr->data[ch]);
|
||||||
|
av_assert0(sbr->data[ch].bs_num_env > 0);
|
||||||
sbr_hf_gen(ac, sbr, sbr->X_high,
|
sbr_hf_gen(ac, sbr, sbr->X_high,
|
||||||
(const float (*)[40][2]) sbr->X_low,
|
(const float (*)[40][2]) sbr->X_low,
|
||||||
(const float (*)[2]) sbr->alpha0,
|
(const float (*)[2]) sbr->alpha0,
|
||||||
|
@ -534,6 +534,12 @@ static int allocate_buffers(ALACContext *alac)
|
|||||||
int ch;
|
int ch;
|
||||||
int buf_size = alac->max_samples_per_frame * sizeof(int32_t);
|
int buf_size = alac->max_samples_per_frame * sizeof(int32_t);
|
||||||
|
|
||||||
|
for (ch = 0; ch < 2; ch++) {
|
||||||
|
alac->predict_error_buffer[ch] = NULL;
|
||||||
|
alac->output_samples_buffer[ch] = NULL;
|
||||||
|
alac->extra_bits_buffer[ch] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) {
|
for (ch = 0; ch < FFMIN(alac->channels, 2); ch++) {
|
||||||
FF_ALLOC_OR_GOTO(alac->avctx, alac->predict_error_buffer[ch],
|
FF_ALLOC_OR_GOTO(alac->avctx, alac->predict_error_buffer[ch],
|
||||||
buf_size, buf_alloc_fail);
|
buf_size, buf_alloc_fail);
|
||||||
|
@ -892,6 +892,9 @@ static void long_filter_high_3800(int32_t *buffer, int order, int shift, int len
|
|||||||
int32_t dotprod, sign;
|
int32_t dotprod, sign;
|
||||||
int32_t coeffs[256], delay[256];
|
int32_t coeffs[256], delay[256];
|
||||||
|
|
||||||
|
if (order >= length)
|
||||||
|
return;
|
||||||
|
|
||||||
memset(coeffs, 0, order * sizeof(*coeffs));
|
memset(coeffs, 0, order * sizeof(*coeffs));
|
||||||
for (i = 0; i < order; i++)
|
for (i = 0; i < order; i++)
|
||||||
delay[i] = buffer[i];
|
delay[i] = buffer[i];
|
||||||
|
@ -103,7 +103,7 @@ typedef struct ASSSplitContext ASSSplitContext;
|
|||||||
* Split a full ASS file or a ASS header from a string buffer and store
|
* Split a full ASS file or a ASS header from a string buffer and store
|
||||||
* the split structure in a newly allocated context.
|
* the split structure in a newly allocated context.
|
||||||
*
|
*
|
||||||
* @param buf String containing the ASS formated data.
|
* @param buf String containing the ASS formatted data.
|
||||||
* @return Newly allocated struct containing split data.
|
* @return Newly allocated struct containing split data.
|
||||||
*/
|
*/
|
||||||
ASSSplitContext *ff_ass_split(const char *buf);
|
ASSSplitContext *ff_ass_split(const char *buf);
|
||||||
|
@ -247,7 +247,7 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
|
|||||||
|
|
||||||
/* Build VLC decoding tables suitable for use with get_vlc().
|
/* Build VLC decoding tables suitable for use with get_vlc().
|
||||||
|
|
||||||
'nb_bits' set thee decoding table size (2^nb_bits) entries. The
|
'nb_bits' set the decoding table size (2^nb_bits) entries. The
|
||||||
bigger it is, the faster is the decoding. But it should not be too
|
bigger it is, the faster is the decoding. But it should not be too
|
||||||
big to save memory and L1 cache. '9' is a good compromise.
|
big to save memory and L1 cache. '9' is a good compromise.
|
||||||
|
|
||||||
@ -265,7 +265,7 @@ static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
|
|||||||
'xxx_size' : gives the number of bytes of each entry of the 'bits'
|
'xxx_size' : gives the number of bytes of each entry of the 'bits'
|
||||||
or 'codes' tables.
|
or 'codes' tables.
|
||||||
|
|
||||||
'wrap' and 'size' allows to use any memory configuration and types
|
'wrap' and 'size' make it possible to use any memory configuration and types
|
||||||
(byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
|
(byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
|
||||||
|
|
||||||
'use_static' should be set to 1 for tables, which should be freed
|
'use_static' should be set to 1 for tables, which should be freed
|
||||||
|
@ -71,8 +71,10 @@ static av_always_inline type bytestream2_get_ ## name ## u(GetByteContext *g) \
|
|||||||
} \
|
} \
|
||||||
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \
|
static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \
|
||||||
{ \
|
{ \
|
||||||
if (g->buffer_end - g->buffer < bytes) \
|
if (g->buffer_end - g->buffer < bytes) { \
|
||||||
|
g->buffer = g->buffer_end; \
|
||||||
return 0; \
|
return 0; \
|
||||||
|
} \
|
||||||
return bytestream2_get_ ## name ## u(g); \
|
return bytestream2_get_ ## name ## u(g); \
|
||||||
} \
|
} \
|
||||||
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \
|
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \
|
||||||
|
@ -51,7 +51,7 @@ void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size){
|
|||||||
*
|
*
|
||||||
* @param buf_size size of buf in bits
|
* @param buf_size size of buf in bits
|
||||||
*/
|
*/
|
||||||
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){
|
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){
|
||||||
c->bytestream_start=
|
c->bytestream_start=
|
||||||
c->bytestream= buf;
|
c->bytestream= buf;
|
||||||
c->bytestream_end= buf + buf_size;
|
c->bytestream_end= buf + buf_size;
|
||||||
@ -64,6 +64,9 @@ void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){
|
|||||||
#endif
|
#endif
|
||||||
c->low+= ((*c->bytestream++)<<2) + 2;
|
c->low+= ((*c->bytestream++)<<2) + 2;
|
||||||
c->range= 0x1FE;
|
c->range= 0x1FE;
|
||||||
|
if ((c->range<<(CABAC_BITS+1)) < c->low)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_init_cabac_states(void)
|
void ff_init_cabac_states(void)
|
||||||
|
@ -56,7 +56,7 @@ typedef struct CABACContext{
|
|||||||
}CABACContext;
|
}CABACContext;
|
||||||
|
|
||||||
void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size);
|
void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size);
|
||||||
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size);
|
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size);
|
||||||
void ff_init_cabac_states(void);
|
void ff_init_cabac_states(void);
|
||||||
|
|
||||||
#endif /* AVCODEC_CABAC_H */
|
#endif /* AVCODEC_CABAC_H */
|
||||||
|
@ -74,7 +74,8 @@ static inline void renorm_cabac_decoder_once(CABACContext *c){
|
|||||||
|
|
||||||
#ifndef get_cabac_inline
|
#ifndef get_cabac_inline
|
||||||
static void refill2(CABACContext *c){
|
static void refill2(CABACContext *c){
|
||||||
int i, x;
|
int i;
|
||||||
|
unsigned x;
|
||||||
|
|
||||||
x= c->low ^ (c->low-1);
|
x= c->low ^ (c->low-1);
|
||||||
i= 7 - ff_h264_norm_shift[x>>(CABAC_BITS-1)];
|
i= 7 - ff_h264_norm_shift[x>>(CABAC_BITS-1)];
|
||||||
@ -190,7 +191,8 @@ static av_unused const uint8_t* skip_bytes(CABACContext *c, int n) {
|
|||||||
#endif
|
#endif
|
||||||
if ((int) (c->bytestream_end - ptr) < n)
|
if ((int) (c->bytestream_end - ptr) < n)
|
||||||
return NULL;
|
return NULL;
|
||||||
ff_init_cabac_decoder(c, ptr + n, c->bytestream_end - ptr - n);
|
if (ff_init_cabac_decoder(c, ptr + n, c->bytestream_end - ptr - n) < 0)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
@ -938,6 +938,10 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
for (i = 0; i < SUBFRAMES; i++)
|
for (i = 0; i < SUBFRAMES; i++)
|
||||||
put_subframe(c, i);
|
put_subframe(c, i);
|
||||||
|
|
||||||
|
|
||||||
|
for (i = put_bits_count(&c->pb); i < 8*c->frame_size; i++)
|
||||||
|
put_bits(&c->pb, 1, 0);
|
||||||
|
|
||||||
flush_put_bits(&c->pb);
|
flush_put_bits(&c->pb);
|
||||||
|
|
||||||
avpkt->pts = frame->pts;
|
avpkt->pts = frame->pts;
|
||||||
|
@ -100,10 +100,12 @@ typedef struct DiracParseUnit {
|
|||||||
static int unpack_parse_unit(DiracParseUnit *pu, DiracParseContext *pc,
|
static int unpack_parse_unit(DiracParseUnit *pu, DiracParseContext *pc,
|
||||||
int offset)
|
int offset)
|
||||||
{
|
{
|
||||||
uint8_t *start = pc->buffer + offset;
|
int8_t *start;
|
||||||
uint8_t *end = pc->buffer + pc->index;
|
|
||||||
if (start < pc->buffer || (start + 13 > end))
|
if (offset < 0 || pc->index - 13 < offset)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
start = pc->buffer + offset;
|
||||||
pu->pu_type = start[4];
|
pu->pu_type = start[4];
|
||||||
|
|
||||||
pu->next_pu_offset = AV_RB32(start + 5);
|
pu->next_pu_offset = AV_RB32(start + 5);
|
||||||
@ -112,6 +114,15 @@ static int unpack_parse_unit(DiracParseUnit *pu, DiracParseContext *pc,
|
|||||||
if (pu->pu_type == 0x10 && pu->next_pu_offset == 0)
|
if (pu->pu_type == 0x10 && pu->next_pu_offset == 0)
|
||||||
pu->next_pu_offset = 13;
|
pu->next_pu_offset = 13;
|
||||||
|
|
||||||
|
if (pu->next_pu_offset && pu->next_pu_offset < 13) {
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "next_pu_offset %d is invalid\n", pu->next_pu_offset);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (pu->prev_pu_offset && pu->prev_pu_offset < 13) {
|
||||||
|
av_log(NULL, AV_LOG_ERROR, "prev_pu_offset %d is invalid\n", pu->prev_pu_offset);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -123,7 +134,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
|
|||||||
DiracParseContext *pc = s->priv_data;
|
DiracParseContext *pc = s->priv_data;
|
||||||
|
|
||||||
if (pc->overread_index) {
|
if (pc->overread_index) {
|
||||||
memcpy(pc->buffer, pc->buffer + pc->overread_index,
|
memmove(pc->buffer, pc->buffer + pc->overread_index,
|
||||||
pc->index - pc->overread_index);
|
pc->index - pc->overread_index);
|
||||||
pc->index -= pc->overread_index;
|
pc->index -= pc->overread_index;
|
||||||
pc->overread_index = 0;
|
pc->overread_index = 0;
|
||||||
@ -190,7 +201,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Get the picture number to set the pts and dts*/
|
/* Get the picture number to set the pts and dts*/
|
||||||
if (parse_timing_info) {
|
if (parse_timing_info && pu1.prev_pu_offset >= 13) {
|
||||||
uint8_t *cur_pu = pc->buffer +
|
uint8_t *cur_pu = pc->buffer +
|
||||||
pc->index - 13 - pu1.prev_pu_offset;
|
pc->index - 13 - pu1.prev_pu_offset;
|
||||||
int pts = AV_RB32(cur_pu + 13);
|
int pts = AV_RB32(cur_pu + 13);
|
||||||
|
@ -1563,7 +1563,7 @@ static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
|
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
|
||||||
{
|
{
|
||||||
/* chroma allocates an edge of 8 when subsampled
|
/* chroma allocates an edge of 8 when subsampled
|
||||||
which for 4:2:2 means an h edge of 16 and v edge of 8
|
which for 4:2:2 means an h edge of 16 and v edge of 8
|
||||||
@ -1575,11 +1575,14 @@ static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, in
|
|||||||
|
|
||||||
/* no need for hpel if we only have fpel vectors */
|
/* no need for hpel if we only have fpel vectors */
|
||||||
if (!s->mv_precision)
|
if (!s->mv_precision)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
for (i = 1; i < 4; i++) {
|
for (i = 1; i < 4; i++) {
|
||||||
if (!ref->hpel_base[plane][i])
|
if (!ref->hpel_base[plane][i])
|
||||||
ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
|
ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
|
||||||
|
if (!ref->hpel_base[plane][i]) {
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
/* we need to be 16-byte aligned even for chroma */
|
/* we need to be 16-byte aligned even for chroma */
|
||||||
ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
|
ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
|
||||||
}
|
}
|
||||||
@ -1593,6 +1596,8 @@ static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, in
|
|||||||
s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
|
s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
|
||||||
}
|
}
|
||||||
ref->interpolated[plane] = 1;
|
ref->interpolated[plane] = 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1646,8 +1651,11 @@ static int dirac_decode_frame_internal(DiracContext *s)
|
|||||||
|
|
||||||
select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
|
select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
|
||||||
|
|
||||||
for (i = 0; i < s->num_refs; i++)
|
for (i = 0; i < s->num_refs; i++) {
|
||||||
interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
|
int ret = interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
memset(s->mctmp, 0, 4*p->yoffset*p->stride);
|
memset(s->mctmp, 0, 4*p->yoffset*p->stride);
|
||||||
|
|
||||||
|
@ -348,11 +348,11 @@ static int decode_frame(AVCodecContext *avctx,
|
|||||||
// For 12 bit, ignore alpha
|
// For 12 bit, ignore alpha
|
||||||
if (elements == 4)
|
if (elements == 4)
|
||||||
buf += 2;
|
buf += 2;
|
||||||
// Jump to next aligned position
|
|
||||||
buf += need_align;
|
|
||||||
}
|
}
|
||||||
for (i = 0; i < 3; i++)
|
for (i = 0; i < 3; i++)
|
||||||
ptr[i] += p->linesize[i];
|
ptr[i] += p->linesize[i];
|
||||||
|
// Jump to next aligned position
|
||||||
|
buf += need_align;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 16:
|
case 16:
|
||||||
|
@ -75,17 +75,20 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define write16(p, value) \
|
static av_always_inline void write16_internal(int big_endian, void *p, int value)
|
||||||
do { \
|
{
|
||||||
if (s->big_endian) AV_WB16(p, value); \
|
if (big_endian) AV_WB16(p, value);
|
||||||
else AV_WL16(p, value); \
|
else AV_WL16(p, value);
|
||||||
} while(0)
|
}
|
||||||
|
|
||||||
#define write32(p, value) \
|
static av_always_inline void write32_internal(int big_endian, void *p, int value)
|
||||||
do { \
|
{
|
||||||
if (s->big_endian) AV_WB32(p, value); \
|
if (big_endian) AV_WB32(p, value);
|
||||||
else AV_WL32(p, value); \
|
else AV_WL32(p, value);
|
||||||
} while(0)
|
}
|
||||||
|
|
||||||
|
#define write16(p, value) write16_internal(s->big_endian, p, value)
|
||||||
|
#define write32(p, value) write32_internal(s->big_endian, p, value)
|
||||||
|
|
||||||
static void encode_rgb48_10bit(AVCodecContext *avctx, const AVPicture *pic, uint8_t *dst)
|
static void encode_rgb48_10bit(AVCodecContext *avctx, const AVPicture *pic, uint8_t *dst)
|
||||||
{
|
{
|
||||||
|
@ -346,7 +346,7 @@ static int decode_dvd_subtitles(DVDSubContext *ctx, AVSubtitle *sub_header,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
the_end:
|
the_end:
|
||||||
if (offset1 >= 0) {
|
if (offset1 >= 0 && offset2 >= 0) {
|
||||||
int w, h;
|
int w, h;
|
||||||
uint8_t *bitmap;
|
uint8_t *bitmap;
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
|
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (src_size < avctx->width * avctx->height * 9LL / 8) {
|
if (src_size < FFALIGN(avctx->width, 4) * FFALIGN(avctx->height, 4) * 9LL / 8) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@ -108,7 +108,7 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
|
|||||||
uint8_t *Y1, *Y2, *U, *V;
|
uint8_t *Y1, *Y2, *U, *V;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (src_size < avctx->width * avctx->height * 3LL / 2) {
|
if (src_size < FFALIGN(avctx->width, 2) * FFALIGN(avctx->height, 2) * 3LL / 2) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@ -381,14 +381,19 @@ static void guess_mv(ERContext *s)
|
|||||||
#define MV_UNCHANGED 1
|
#define MV_UNCHANGED 1
|
||||||
const int mb_stride = s->mb_stride;
|
const int mb_stride = s->mb_stride;
|
||||||
const int mb_width = s->mb_width;
|
const int mb_width = s->mb_width;
|
||||||
const int mb_height = s->mb_height;
|
int mb_height = s->mb_height;
|
||||||
int i, depth, num_avail;
|
int i, depth, num_avail;
|
||||||
int mb_x, mb_y, mot_step, mot_stride;
|
int mb_x, mb_y, mot_step, mot_stride;
|
||||||
|
|
||||||
|
if (s->last_pic.f && s->last_pic.f->data[0])
|
||||||
|
mb_height = FFMIN(mb_height, (s->last_pic.f->height+15)>>4);
|
||||||
|
if (s->next_pic.f && s->next_pic.f->data[0])
|
||||||
|
mb_height = FFMIN(mb_height, (s->next_pic.f->height+15)>>4);
|
||||||
|
|
||||||
set_mv_strides(s, &mot_step, &mot_stride);
|
set_mv_strides(s, &mot_step, &mot_stride);
|
||||||
|
|
||||||
num_avail = 0;
|
num_avail = 0;
|
||||||
for (i = 0; i < s->mb_num; i++) {
|
for (i = 0; i < mb_width * mb_height; i++) {
|
||||||
const int mb_xy = s->mb_index2xy[i];
|
const int mb_xy = s->mb_index2xy[i];
|
||||||
int f = 0;
|
int f = 0;
|
||||||
int error = s->error_status_table[mb_xy];
|
int error = s->error_status_table[mb_xy];
|
||||||
@ -413,7 +418,7 @@ static void guess_mv(ERContext *s)
|
|||||||
|
|
||||||
if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
|
if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
|
||||||
num_avail <= mb_width / 2) {
|
num_avail <= mb_width / 2) {
|
||||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
for (mb_y = 0; mb_y < mb_height; mb_y++) {
|
||||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||||
int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
|
int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
|
||||||
@ -442,7 +447,7 @@ static void guess_mv(ERContext *s)
|
|||||||
int score_sum = 0;
|
int score_sum = 0;
|
||||||
|
|
||||||
changed = 0;
|
changed = 0;
|
||||||
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
|
for (mb_y = 0; mb_y < mb_height; mb_y++) {
|
||||||
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||||
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
const int mb_xy = mb_x + mb_y * s->mb_stride;
|
||||||
int mv_predictor[8][2] = { { 0 } };
|
int mv_predictor[8][2] = { { 0 } };
|
||||||
@ -675,7 +680,7 @@ skip_last_mv:
|
|||||||
if (none_left)
|
if (none_left)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < s->mb_num; i++) {
|
for (i = 0; i < mb_width * mb_height; i++) {
|
||||||
int mb_xy = s->mb_index2xy[i];
|
int mb_xy = s->mb_index2xy[i];
|
||||||
if (fixed[mb_xy])
|
if (fixed[mb_xy])
|
||||||
fixed[mb_xy] = MV_FROZEN;
|
fixed[mb_xy] = MV_FROZEN;
|
||||||
|
@ -459,7 +459,7 @@ static int huf_build_dec_table(const uint64_t *hcode, int im,
|
|||||||
lc += 8; \
|
lc += 8; \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define get_code(po, rlc, c, lc, gb, out, oe) \
|
#define get_code(po, rlc, c, lc, gb, out, oe, outb) \
|
||||||
{ \
|
{ \
|
||||||
if (po == rlc) { \
|
if (po == rlc) { \
|
||||||
if (lc < 8) \
|
if (lc < 8) \
|
||||||
@ -468,7 +468,7 @@ static int huf_build_dec_table(const uint64_t *hcode, int im,
|
|||||||
\
|
\
|
||||||
cs = c >> lc; \
|
cs = c >> lc; \
|
||||||
\
|
\
|
||||||
if (out + cs > oe) \
|
if (out + cs > oe || out == outb) \
|
||||||
return AVERROR_INVALIDDATA; \
|
return AVERROR_INVALIDDATA; \
|
||||||
\
|
\
|
||||||
s = out[-1]; \
|
s = out[-1]; \
|
||||||
@ -501,7 +501,7 @@ static int huf_decode(const uint64_t *hcode, const HufDec *hdecod,
|
|||||||
|
|
||||||
if (pl.len) {
|
if (pl.len) {
|
||||||
lc -= pl.len;
|
lc -= pl.len;
|
||||||
get_code(pl.lit, rlc, c, lc, gb, out, oe);
|
get_code(pl.lit, rlc, c, lc, gb, out, oe, outb);
|
||||||
} else {
|
} else {
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
@ -518,7 +518,7 @@ static int huf_decode(const uint64_t *hcode, const HufDec *hdecod,
|
|||||||
if ((hcode[pl.p[j]] >> 6) ==
|
if ((hcode[pl.p[j]] >> 6) ==
|
||||||
((c >> (lc - l)) & ((1LL << l) - 1))) {
|
((c >> (lc - l)) & ((1LL << l) - 1))) {
|
||||||
lc -= l;
|
lc -= l;
|
||||||
get_code(pl.p[j], rlc, c, lc, gb, out, oe);
|
get_code(pl.p[j], rlc, c, lc, gb, out, oe, outb);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -539,7 +539,7 @@ static int huf_decode(const uint64_t *hcode, const HufDec *hdecod,
|
|||||||
|
|
||||||
if (pl.len) {
|
if (pl.len) {
|
||||||
lc -= pl.len;
|
lc -= pl.len;
|
||||||
get_code(pl.lit, rlc, c, lc, gb, out, oe);
|
get_code(pl.lit, rlc, c, lc, gb, out, oe, outb);
|
||||||
} else {
|
} else {
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ av_cold int ffv1_common_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
av_cold int ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
|
av_cold int ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
|
||||||
{
|
{
|
||||||
int j;
|
int j, i;
|
||||||
|
|
||||||
fs->plane_count = f->plane_count;
|
fs->plane_count = f->plane_count;
|
||||||
fs->transparency = f->transparency;
|
fs->transparency = f->transparency;
|
||||||
@ -80,10 +80,15 @@ av_cold int ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
|
|||||||
if (!p->state)
|
if (!p->state)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
} else {
|
} else {
|
||||||
if (!p->vlc_state)
|
if (!p->vlc_state) {
|
||||||
p->vlc_state = av_malloc_array(p->context_count, sizeof(VlcState));
|
p->vlc_state = av_mallocz_array(p->context_count, sizeof(VlcState));
|
||||||
if (!p->vlc_state)
|
if (!p->vlc_state)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
for (i = 0; i < p->context_count; i++) {
|
||||||
|
p->vlc_state[i].error_sum = 4;
|
||||||
|
p->vlc_state[i].count = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,7 +106,7 @@ av_cold int ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
|
|||||||
av_cold int ffv1_init_slices_state(FFV1Context *f)
|
av_cold int ffv1_init_slices_state(FFV1Context *f)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
for (i = 0; i < f->slice_count; i++) {
|
for (i = 0; i < f->max_slice_count; i++) {
|
||||||
FFV1Context *fs = f->slice_context[i];
|
FFV1Context *fs = f->slice_context[i];
|
||||||
if ((ret = ffv1_init_slice_state(f, fs)) < 0)
|
if ((ret = ffv1_init_slice_state(f, fs)) < 0)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@ -113,10 +118,10 @@ av_cold int ffv1_init_slice_contexts(FFV1Context *f)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
f->slice_count = f->num_h_slices * f->num_v_slices;
|
f->max_slice_count = f->num_h_slices * f->num_v_slices;
|
||||||
av_assert0(f->slice_count > 0);
|
av_assert0(f->max_slice_count > 0);
|
||||||
|
|
||||||
for (i = 0; i < f->slice_count; i++) {
|
for (i = 0; i < f->max_slice_count; i++) {
|
||||||
int sx = i % f->num_h_slices;
|
int sx = i % f->num_h_slices;
|
||||||
int sy = i / f->num_h_slices;
|
int sy = i / f->num_h_slices;
|
||||||
int sxs = f->avctx->width * sx / f->num_h_slices;
|
int sxs = f->avctx->width * sx / f->num_h_slices;
|
||||||
@ -210,7 +215,7 @@ av_cold int ffv1_close(AVCodecContext *avctx)
|
|||||||
ff_thread_release_buffer(avctx, &s->last_picture);
|
ff_thread_release_buffer(avctx, &s->last_picture);
|
||||||
av_frame_free(&s->last_picture.f);
|
av_frame_free(&s->last_picture.f);
|
||||||
|
|
||||||
for (j = 0; j < s->slice_count; j++) {
|
for (j = 0; j < s->max_slice_count; j++) {
|
||||||
FFV1Context *fs = s->slice_context[j];
|
FFV1Context *fs = s->slice_context[j];
|
||||||
for (i = 0; i < s->plane_count; i++) {
|
for (i = 0; i < s->plane_count; i++) {
|
||||||
PlaneContext *p = &fs->plane[i];
|
PlaneContext *p = &fs->plane[i];
|
||||||
@ -224,14 +229,14 @@ av_cold int ffv1_close(AVCodecContext *avctx)
|
|||||||
av_freep(&avctx->stats_out);
|
av_freep(&avctx->stats_out);
|
||||||
for (j = 0; j < s->quant_table_count; j++) {
|
for (j = 0; j < s->quant_table_count; j++) {
|
||||||
av_freep(&s->initial_states[j]);
|
av_freep(&s->initial_states[j]);
|
||||||
for (i = 0; i < s->slice_count; i++) {
|
for (i = 0; i < s->max_slice_count; i++) {
|
||||||
FFV1Context *sf = s->slice_context[i];
|
FFV1Context *sf = s->slice_context[i];
|
||||||
av_freep(&sf->rc_stat2[j]);
|
av_freep(&sf->rc_stat2[j]);
|
||||||
}
|
}
|
||||||
av_freep(&s->rc_stat2[j]);
|
av_freep(&s->rc_stat2[j]);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < s->slice_count; i++)
|
for (i = 0; i < s->max_slice_count; i++)
|
||||||
av_freep(&s->slice_context[i]);
|
av_freep(&s->slice_context[i]);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -117,6 +117,7 @@ typedef struct FFV1Context {
|
|||||||
|
|
||||||
struct FFV1Context *slice_context[MAX_SLICES];
|
struct FFV1Context *slice_context[MAX_SLICES];
|
||||||
int slice_count;
|
int slice_count;
|
||||||
|
int max_slice_count;
|
||||||
int num_v_slices;
|
int num_v_slices;
|
||||||
int num_h_slices;
|
int num_h_slices;
|
||||||
int slice_width;
|
int slice_width;
|
||||||
|
@ -47,8 +47,11 @@ static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
|
|||||||
else {
|
else {
|
||||||
int i, e, a;
|
int i, e, a;
|
||||||
e = 0;
|
e = 0;
|
||||||
while (get_rac(c, state + 1 + FFMIN(e, 9))) // 1..10
|
while (get_rac(c, state + 1 + FFMIN(e, 9))) { // 1..10
|
||||||
e++;
|
e++;
|
||||||
|
if (e > 31)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
a = 1;
|
a = 1;
|
||||||
for (i = e - 1; i >= 0; i--)
|
for (i = e - 1; i >= 0; i--)
|
||||||
@ -302,7 +305,7 @@ static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
|
|||||||
for (i = 0; i < f->plane_count; i++) {
|
for (i = 0; i < f->plane_count; i++) {
|
||||||
PlaneContext * const p = &fs->plane[i];
|
PlaneContext * const p = &fs->plane[i];
|
||||||
int idx = get_symbol(c, state, 0);
|
int idx = get_symbol(c, state, 0);
|
||||||
if (idx > (unsigned)f->quant_table_count) {
|
if (idx >= (unsigned)f->quant_table_count) {
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
|
av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -405,6 +408,7 @@ static int decode_slice(AVCodecContext *c, void *arg)
|
|||||||
if (ffv1_init_slice_state(f, fs) < 0)
|
if (ffv1_init_slice_state(f, fs) < 0)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
if (decode_slice_header(f, fs) < 0) {
|
if (decode_slice_header(f, fs) < 0) {
|
||||||
|
fs->slice_x = fs->slice_y = fs->slice_height = fs->slice_width = 0;
|
||||||
fs->slice_damaged = 1;
|
fs->slice_damaged = 1;
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@ -499,7 +503,10 @@ static int read_quant_tables(RangeCoder *c,
|
|||||||
int context_count = 1;
|
int context_count = 1;
|
||||||
|
|
||||||
for (i = 0; i < 5; i++) {
|
for (i = 0; i < 5; i++) {
|
||||||
context_count *= read_quant_table(c, quant_table[i], context_count);
|
int ret = read_quant_table(c, quant_table[i], context_count);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
context_count *= ret;
|
||||||
if (context_count > 32768U) {
|
if (context_count > 32768U) {
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@ -561,8 +568,11 @@ static int read_extra_header(FFV1Context *f)
|
|||||||
}
|
}
|
||||||
|
|
||||||
f->quant_table_count = get_symbol(c, state, 0);
|
f->quant_table_count = get_symbol(c, state, 0);
|
||||||
if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
|
if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES || !f->quant_table_count) {
|
||||||
|
av_log(f->avctx, AV_LOG_ERROR, "quant table count %d is invalid\n", f->quant_table_count);
|
||||||
|
f->quant_table_count = 0;
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < f->quant_table_count; i++) {
|
for (i = 0; i < f->quant_table_count; i++) {
|
||||||
f->context_count[i] = read_quant_tables(c, f->quant_tables[i]);
|
f->context_count[i] = read_quant_tables(c, f->quant_tables[i]);
|
||||||
@ -772,6 +782,7 @@ static int read_header(FFV1Context *f)
|
|||||||
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
|
av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
f->slice_count = f->max_slice_count;
|
||||||
} else if (f->version < 3) {
|
} else if (f->version < 3) {
|
||||||
f->slice_count = get_symbol(c, state, 0);
|
f->slice_count = get_symbol(c, state, 0);
|
||||||
} else {
|
} else {
|
||||||
@ -786,8 +797,8 @@ static int read_header(FFV1Context *f)
|
|||||||
p -= size + trailer;
|
p -= size + trailer;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0) {
|
if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0 || f->slice_count > f->max_slice_count) {
|
||||||
av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
|
av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid (max=%d)\n", f->slice_count, f->max_slice_count);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -929,6 +940,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
|
|||||||
else v = buf_p - c->bytestream_start;
|
else v = buf_p - c->bytestream_start;
|
||||||
if (buf_p - c->bytestream_start < v) {
|
if (buf_p - c->bytestream_start < v) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
|
av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
|
||||||
|
ff_thread_report_progress(&f->picture, INT_MAX, 0);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
buf_p -= v;
|
buf_p -= v;
|
||||||
@ -1010,6 +1022,7 @@ static int init_thread_copy(AVCodecContext *avctx)
|
|||||||
f->picture.f = NULL;
|
f->picture.f = NULL;
|
||||||
f->last_picture.f = NULL;
|
f->last_picture.f = NULL;
|
||||||
f->sample_buffer = NULL;
|
f->sample_buffer = NULL;
|
||||||
|
f->max_slice_count = 0;
|
||||||
f->slice_count = 0;
|
f->slice_count = 0;
|
||||||
|
|
||||||
for (i = 0; i < f->quant_table_count; i++) {
|
for (i = 0; i < f->quant_table_count; i++) {
|
||||||
@ -1085,7 +1098,7 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
|||||||
av_assert0(!fdst->sample_buffer);
|
av_assert0(!fdst->sample_buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
av_assert1(fdst->slice_count == fsrc->slice_count);
|
av_assert1(fdst->max_slice_count == fsrc->max_slice_count);
|
||||||
|
|
||||||
|
|
||||||
ff_thread_release_buffer(dst, &fdst->picture);
|
ff_thread_release_buffer(dst, &fdst->picture);
|
||||||
|
@ -753,7 +753,9 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
|||||||
s->chroma_planes = desc->nb_components < 3 ? 0 : 1;
|
s->chroma_planes = desc->nb_components < 3 ? 0 : 1;
|
||||||
s->colorspace = 0;
|
s->colorspace = 0;
|
||||||
s->transparency = desc->nb_components == 4;
|
s->transparency = desc->nb_components == 4;
|
||||||
if (!avctx->bits_per_raw_sample)
|
if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
|
||||||
|
s->bits_per_raw_sample = 8;
|
||||||
|
else if (!s->bits_per_raw_sample)
|
||||||
s->bits_per_raw_sample = 8;
|
s->bits_per_raw_sample = 8;
|
||||||
break;
|
break;
|
||||||
case AV_PIX_FMT_RGB32:
|
case AV_PIX_FMT_RGB32:
|
||||||
@ -974,6 +976,7 @@ slices_ok:
|
|||||||
|
|
||||||
if ((ret = ffv1_init_slice_contexts(s)) < 0)
|
if ((ret = ffv1_init_slice_contexts(s)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
s->slice_count = s->max_slice_count;
|
||||||
if ((ret = ffv1_init_slices_state(s)) < 0)
|
if ((ret = ffv1_init_slices_state(s)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -983,7 +986,7 @@ slices_ok:
|
|||||||
if (!avctx->stats_out)
|
if (!avctx->stats_out)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
for (i = 0; i < s->quant_table_count; i++)
|
for (i = 0; i < s->quant_table_count; i++)
|
||||||
for (j = 0; j < s->slice_count; j++) {
|
for (j = 0; j < s->max_slice_count; j++) {
|
||||||
FFV1Context *sf = s->slice_context[j];
|
FFV1Context *sf = s->slice_context[j];
|
||||||
av_assert0(!sf->rc_stat2[i]);
|
av_assert0(!sf->rc_stat2[i]);
|
||||||
sf->rc_stat2[i] = av_mallocz(s->context_count[i] *
|
sf->rc_stat2[i] = av_mallocz(s->context_count[i] *
|
||||||
@ -1207,6 +1210,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
for (i = 0; i < f->quant_table_count; i++)
|
for (i = 0; i < f->quant_table_count; i++)
|
||||||
memset(f->rc_stat2[i], 0, f->context_count[i] * sizeof(*f->rc_stat2[i]));
|
memset(f->rc_stat2[i], 0, f->context_count[i] * sizeof(*f->rc_stat2[i]));
|
||||||
|
|
||||||
|
av_assert0(f->slice_count == f->max_slice_count);
|
||||||
for (j = 0; j < f->slice_count; j++) {
|
for (j = 0; j < f->slice_count; j++) {
|
||||||
FFV1Context *fs = f->slice_context[j];
|
FFV1Context *fs = f->slice_context[j];
|
||||||
for (i = 0; i < 256; i++) {
|
for (i = 0; i < 256; i++) {
|
||||||
|
@ -705,7 +705,7 @@ static uint64_t calc_rice_params(RiceContext *rc,
|
|||||||
bits[pmin] = UINT32_MAX;
|
bits[pmin] = UINT32_MAX;
|
||||||
for (i = pmax; ; ) {
|
for (i = pmax; ; ) {
|
||||||
bits[i] = calc_optimal_rice_params(&tmp_rc, i, sums, n, pred_order, kmax, exact);
|
bits[i] = calc_optimal_rice_params(&tmp_rc, i, sums, n, pred_order, kmax, exact);
|
||||||
if (bits[i] < bits[opt_porder]) {
|
if (bits[i] < bits[opt_porder] || pmax == pmin) {
|
||||||
opt_porder = i;
|
opt_porder = i;
|
||||||
*rc = tmp_rc;
|
*rc = tmp_rc;
|
||||||
}
|
}
|
||||||
|
@ -413,6 +413,10 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (has_diff) {
|
if (has_diff) {
|
||||||
|
if (size < 3) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "size too small for diff\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
if (!s->keyframe) {
|
if (!s->keyframe) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Inter frame without keyframe\n");
|
"Inter frame without keyframe\n");
|
||||||
@ -440,6 +444,10 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int row = get_bits(&gb, 8);
|
int row = get_bits(&gb, 8);
|
||||||
av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_curr %dx%d\n",
|
av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_curr %dx%d\n",
|
||||||
i, j, col, row);
|
i, j, col, row);
|
||||||
|
if (size < 3) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "size too small for zlibprime_curr\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
size -= 2;
|
size -= 2;
|
||||||
avpriv_request_sample(avctx, "zlibprime_curr");
|
avpriv_request_sample(avctx, "zlibprime_curr");
|
||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
|
@ -111,7 +111,7 @@ static av_cold int flashsv_encode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
if (avctx->width > 4095 || avctx->height > 4095) {
|
if (avctx->width > 4095 || avctx->height > 4095) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Input dimensions too large, input must be max 4096x4096 !\n");
|
"Input dimensions too large, input must be max 4095x4095 !\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -746,7 +746,7 @@ static int g2m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
c->tile_height = bytestream2_get_be32(&bc);
|
c->tile_height = bytestream2_get_be32(&bc);
|
||||||
if (c->tile_width <= 0 || c->tile_height <= 0 ||
|
if (c->tile_width <= 0 || c->tile_height <= 0 ||
|
||||||
((c->tile_width | c->tile_height) & 0xF) ||
|
((c->tile_width | c->tile_height) & 0xF) ||
|
||||||
c->tile_width * 4LL * c->tile_height >= INT_MAX
|
c->tile_width * (uint64_t)c->tile_height >= INT_MAX / 4
|
||||||
) {
|
) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Invalid tile dimensions %dx%d\n",
|
"Invalid tile dimensions %dx%d\n",
|
||||||
@ -877,6 +877,8 @@ header_fail:
|
|||||||
c->height = 0;
|
c->height = 0;
|
||||||
c->tiles_x =
|
c->tiles_x =
|
||||||
c->tiles_y = 0;
|
c->tiles_y = 0;
|
||||||
|
c->tile_width =
|
||||||
|
c->tile_height = 0;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ static inline int get_ue_golomb(GetBitContext *gb)
|
|||||||
int log = 2 * av_log2(buf) - 31;
|
int log = 2 * av_log2(buf) - 31;
|
||||||
LAST_SKIP_BITS(re, gb, 32 - log);
|
LAST_SKIP_BITS(re, gb, 32 - log);
|
||||||
CLOSE_READER(re, gb);
|
CLOSE_READER(re, gb);
|
||||||
if (CONFIG_FTRAPV && log < 0) {
|
if (log < 7) {
|
||||||
av_log(NULL, AV_LOG_ERROR, "Invalid UE golomb code\n");
|
av_log(NULL, AV_LOG_ERROR, "Invalid UE golomb code\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
@ -2026,6 +2026,7 @@ decode_intra_mb:
|
|||||||
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
|
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
|
||||||
h->sps.bit_depth_luma >> 3;
|
h->sps.bit_depth_luma >> 3;
|
||||||
const uint8_t *ptr;
|
const uint8_t *ptr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
// We assume these blocks are very rare so we do not optimize it.
|
// We assume these blocks are very rare so we do not optimize it.
|
||||||
// FIXME The two following lines get the bitstream position in the cabac
|
// FIXME The two following lines get the bitstream position in the cabac
|
||||||
@ -2042,7 +2043,9 @@ decode_intra_mb:
|
|||||||
sl->intra_pcm_ptr = ptr;
|
sl->intra_pcm_ptr = ptr;
|
||||||
ptr += mb_size;
|
ptr += mb_size;
|
||||||
|
|
||||||
ff_init_cabac_decoder(&sl->cabac, ptr, sl->cabac.bytestream_end - ptr);
|
ret = ff_init_cabac_decoder(&sl->cabac, ptr, sl->cabac.bytestream_end - ptr);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
// All blocks are present
|
// All blocks are present
|
||||||
h->cbp_table[mb_xy] = 0xf7ef;
|
h->cbp_table[mb_xy] = 0xf7ef;
|
||||||
|
@ -158,6 +158,7 @@ static void MCFUNC(hl_motion)(const H264Context *h, H264SliceContext *sl,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (USES_LIST(mb_type, 1))
|
||||||
prefetch_motion(h, sl, 1, PIXEL_SHIFT, CHROMA_IDC);
|
prefetch_motion(h, sl, 1, PIXEL_SHIFT, CHROMA_IDC);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc,
|
|||||||
buf += ctx->length_size;
|
buf += ctx->length_size;
|
||||||
unit_type = *buf & 0x1f;
|
unit_type = *buf & 0x1f;
|
||||||
|
|
||||||
if (buf + nal_size > buf_end || nal_size < 0)
|
if (nal_size > buf_end - buf || nal_size < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (unit_type == 7)
|
if (unit_type == 7)
|
||||||
|
@ -122,9 +122,18 @@ static int add_sorted(H264Picture **sorted, H264Picture **src, int len, int limi
|
|||||||
return out_i;
|
return out_i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mismatches_ref(H264Context *h, H264Picture *pic)
|
||||||
|
{
|
||||||
|
AVFrame *f = pic->f;
|
||||||
|
return (h->cur_pic_ptr->f->width != f->width ||
|
||||||
|
h->cur_pic_ptr->f->height != f->height ||
|
||||||
|
h->cur_pic_ptr->f->format != f->format);
|
||||||
|
}
|
||||||
|
|
||||||
int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
|
int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
|
||||||
{
|
{
|
||||||
int i, len;
|
int i, len;
|
||||||
|
int j;
|
||||||
|
|
||||||
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
|
||||||
H264Picture *sorted[32];
|
H264Picture *sorted[32];
|
||||||
@ -188,6 +197,18 @@ int ff_h264_fill_default_ref_list(H264Context *h, H264SliceContext *sl)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
for (j = 0; j<1+(sl->slice_type_nos == AV_PICTURE_TYPE_B); j++) {
|
||||||
|
for (i = 0; i < sl->ref_count[j]; i++) {
|
||||||
|
if (h->default_ref_list[j][i].parent) {
|
||||||
|
if (mismatches_ref(h, h->default_ref_list[j][i].parent)) {
|
||||||
|
av_log(h->avctx, AV_LOG_ERROR, "Discarding mismatching reference\n");
|
||||||
|
memset(&h->default_ref_list[j][i], 0, sizeof(h->default_ref_list[j][i]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,14 +303,14 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h, H264SliceContext *sl)
|
|||||||
|
|
||||||
long_idx = pic_num_extract(h, pic_id, &pic_structure);
|
long_idx = pic_num_extract(h, pic_id, &pic_structure);
|
||||||
|
|
||||||
if (long_idx > 31) {
|
if (long_idx > 31U) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
av_log(h->avctx, AV_LOG_ERROR,
|
||||||
"long_term_pic_idx overflow\n");
|
"long_term_pic_idx overflow\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
ref = h->long_ref[long_idx];
|
ref = h->long_ref[long_idx];
|
||||||
assert(!(ref && !ref->reference));
|
assert(!(ref && !ref->reference));
|
||||||
if (ref && (ref->reference & pic_structure)) {
|
if (ref && (ref->reference & pic_structure) && !mismatches_ref(h, ref)) {
|
||||||
ref->pic_id = pic_id;
|
ref->pic_id = pic_id;
|
||||||
assert(ref->long_ref);
|
assert(ref->long_ref);
|
||||||
i = 0;
|
i = 0;
|
||||||
|
@ -251,11 +251,11 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
|
|||||||
av_pix_fmt_get_chroma_sub_sample(pic->f->format,
|
av_pix_fmt_get_chroma_sub_sample(pic->f->format,
|
||||||
&h_chroma_shift, &v_chroma_shift);
|
&h_chroma_shift, &v_chroma_shift);
|
||||||
|
|
||||||
for(i=0; i<FF_CEIL_RSHIFT(h->avctx->height, v_chroma_shift); i++) {
|
for(i=0; i<FF_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
|
||||||
memset(pic->f->data[1] + pic->f->linesize[1]*i,
|
memset(pic->f->data[1] + pic->f->linesize[1]*i,
|
||||||
0x80, FF_CEIL_RSHIFT(h->avctx->width, h_chroma_shift));
|
0x80, FF_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
|
||||||
memset(pic->f->data[2] + pic->f->linesize[2]*i,
|
memset(pic->f->data[2] + pic->f->linesize[2]*i,
|
||||||
0x80, FF_CEIL_RSHIFT(h->avctx->width, h_chroma_shift));
|
0x80, FF_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1086,6 +1086,7 @@ static int h264_slice_header_init(H264Context *h)
|
|||||||
nb_slices = max_slices;
|
nb_slices = max_slices;
|
||||||
}
|
}
|
||||||
h->slice_context_count = nb_slices;
|
h->slice_context_count = nb_slices;
|
||||||
|
h->max_contexts = FFMIN(h->max_contexts, nb_slices);
|
||||||
|
|
||||||
if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
|
if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||||
ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
|
ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
|
||||||
@ -1159,6 +1160,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
|
|||||||
|
|
||||||
if (first_mb_in_slice == 0) { // FIXME better field boundary detection
|
if (first_mb_in_slice == 0) { // FIXME better field boundary detection
|
||||||
if (h->current_slice) {
|
if (h->current_slice) {
|
||||||
|
if (h->max_contexts > 1) {
|
||||||
|
if (!h->single_decode_warning) {
|
||||||
|
av_log(h->avctx, AV_LOG_WARNING, "Cannot decode multiple access units as slice threads\n");
|
||||||
|
h->single_decode_warning = 1;
|
||||||
|
}
|
||||||
|
h->max_contexts = 1;
|
||||||
|
return SLICE_SINGLETHREAD;
|
||||||
|
}
|
||||||
|
|
||||||
if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
|
if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
|
||||||
ff_h264_field_end(h, h->slice_ctx, 1);
|
ff_h264_field_end(h, h->slice_ctx, 1);
|
||||||
h->current_slice = 0;
|
h->current_slice = 0;
|
||||||
@ -1549,14 +1559,17 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
|
|||||||
* vectors. Given we are concealing a lost frame, this probably
|
* vectors. Given we are concealing a lost frame, this probably
|
||||||
* is not noticeable by comparison, but it should be fixed. */
|
* is not noticeable by comparison, but it should be fixed. */
|
||||||
if (h->short_ref_count) {
|
if (h->short_ref_count) {
|
||||||
if (prev) {
|
if (prev &&
|
||||||
|
h->short_ref[0]->f->width == prev->f->width &&
|
||||||
|
h->short_ref[0]->f->height == prev->f->height &&
|
||||||
|
h->short_ref[0]->f->format == prev->f->format) {
|
||||||
av_image_copy(h->short_ref[0]->f->data,
|
av_image_copy(h->short_ref[0]->f->data,
|
||||||
h->short_ref[0]->f->linesize,
|
h->short_ref[0]->f->linesize,
|
||||||
(const uint8_t **)prev->f->data,
|
(const uint8_t **)prev->f->data,
|
||||||
prev->f->linesize,
|
prev->f->linesize,
|
||||||
h->avctx->pix_fmt,
|
prev->f->format,
|
||||||
h->mb_width * 16,
|
prev->f->width,
|
||||||
h->mb_height * 16);
|
prev->f->height);
|
||||||
h->short_ref[0]->poc = prev->poc + 2;
|
h->short_ref[0]->poc = prev->poc + 2;
|
||||||
}
|
}
|
||||||
h->short_ref[0]->frame_num = h->prev_frame_num;
|
h->short_ref[0]->frame_num = h->prev_frame_num;
|
||||||
@ -1927,12 +1940,12 @@ static av_always_inline void fill_filter_caches_inter(const H264Context *h,
|
|||||||
if (USES_LIST(top_type, list)) {
|
if (USES_LIST(top_type, list)) {
|
||||||
const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
|
const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
|
||||||
const int b8_xy = 4 * top_xy + 2;
|
const int b8_xy = 4 * top_xy + 2;
|
||||||
int (*ref2frm)[64] = (void*)(sl->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF(sl) ? 20 : 2));
|
int *ref2frm = sl->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list] + (MB_MBAFF(sl) ? 20 : 2);
|
||||||
AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
|
AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
|
||||||
ref_cache[0 - 1 * 8] =
|
ref_cache[0 - 1 * 8] =
|
||||||
ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
|
ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
|
||||||
ref_cache[2 - 1 * 8] =
|
ref_cache[2 - 1 * 8] =
|
||||||
ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
|
ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
|
||||||
} else {
|
} else {
|
||||||
AV_ZERO128(mv_dst - 1 * 8);
|
AV_ZERO128(mv_dst - 1 * 8);
|
||||||
AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
|
AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
|
||||||
@ -1942,15 +1955,15 @@ static av_always_inline void fill_filter_caches_inter(const H264Context *h,
|
|||||||
if (USES_LIST(left_type[LTOP], list)) {
|
if (USES_LIST(left_type[LTOP], list)) {
|
||||||
const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
|
const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
|
||||||
const int b8_xy = 4 * left_xy[LTOP] + 1;
|
const int b8_xy = 4 * left_xy[LTOP] + 1;
|
||||||
int (*ref2frm)[64] =(void*)( sl->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF(sl) ? 20 : 2));
|
int *ref2frm = sl->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list] + (MB_MBAFF(sl) ? 20 : 2);
|
||||||
AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
|
AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
|
||||||
AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
|
AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
|
||||||
AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
|
AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
|
||||||
AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
|
AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
|
||||||
ref_cache[-1 + 0] =
|
ref_cache[-1 + 0] =
|
||||||
ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
|
ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
|
||||||
ref_cache[-1 + 16] =
|
ref_cache[-1 + 16] =
|
||||||
ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
|
ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
|
||||||
} else {
|
} else {
|
||||||
AV_ZERO32(mv_dst - 1 + 0);
|
AV_ZERO32(mv_dst - 1 + 0);
|
||||||
AV_ZERO32(mv_dst - 1 + 8);
|
AV_ZERO32(mv_dst - 1 + 8);
|
||||||
@ -1975,9 +1988,9 @@ static av_always_inline void fill_filter_caches_inter(const H264Context *h,
|
|||||||
|
|
||||||
{
|
{
|
||||||
int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
|
int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
|
||||||
int (*ref2frm)[64] = (void*)(sl->ref2frm[sl->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF(sl) ? 20 : 2));
|
int *ref2frm = sl->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list] + (MB_MBAFF(sl) ? 20 : 2);
|
||||||
uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
|
uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
|
||||||
uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
|
uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
|
||||||
AV_WN32A(&ref_cache[0 * 8], ref01);
|
AV_WN32A(&ref_cache[0 * 8], ref01);
|
||||||
AV_WN32A(&ref_cache[1 * 8], ref01);
|
AV_WN32A(&ref_cache[1 * 8], ref01);
|
||||||
AV_WN32A(&ref_cache[2 * 8], ref23);
|
AV_WN32A(&ref_cache[2 * 8], ref23);
|
||||||
@ -2306,9 +2319,11 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
|
|||||||
align_get_bits(&sl->gb);
|
align_get_bits(&sl->gb);
|
||||||
|
|
||||||
/* init cabac */
|
/* init cabac */
|
||||||
ff_init_cabac_decoder(&sl->cabac,
|
ret = ff_init_cabac_decoder(&sl->cabac,
|
||||||
sl->gb.buffer + get_bits_count(&sl->gb) / 8,
|
sl->gb.buffer + get_bits_count(&sl->gb) / 8,
|
||||||
(get_bits_left(&sl->gb) + 7) / 8);
|
(get_bits_left(&sl->gb) + 7) / 8);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ff_h264_init_cabac_states(h, sl);
|
ff_h264_init_cabac_states(h, sl);
|
||||||
|
|
||||||
|
@ -456,7 +456,7 @@ static int hls_slice_header(HEVCContext *s)
|
|||||||
|
|
||||||
slice_address_length = av_ceil_log2(s->sps->ctb_width *
|
slice_address_length = av_ceil_log2(s->sps->ctb_width *
|
||||||
s->sps->ctb_height);
|
s->sps->ctb_height);
|
||||||
sh->slice_segment_addr = get_bits(gb, slice_address_length);
|
sh->slice_segment_addr = slice_address_length ? get_bits(gb, slice_address_length) : 0;
|
||||||
if (sh->slice_segment_addr >= s->sps->ctb_width * s->sps->ctb_height) {
|
if (sh->slice_segment_addr >= s->sps->ctb_width * s->sps->ctb_height) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"Invalid slice segment address: %u.\n",
|
"Invalid slice segment address: %u.\n",
|
||||||
@ -730,7 +730,7 @@ static int hls_slice_header(HEVCContext *s)
|
|||||||
av_freep(&sh->entry_point_offset);
|
av_freep(&sh->entry_point_offset);
|
||||||
av_freep(&sh->offset);
|
av_freep(&sh->offset);
|
||||||
av_freep(&sh->size);
|
av_freep(&sh->size);
|
||||||
sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
|
sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
|
||||||
sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
|
sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
|
||||||
sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
|
sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
|
||||||
if (!sh->entry_point_offset || !sh->offset || !sh->size) {
|
if (!sh->entry_point_offset || !sh->offset || !sh->size) {
|
||||||
@ -795,6 +795,8 @@ static int hls_slice_header(HEVCContext *s)
|
|||||||
s->HEVClc->tu.cu_qp_offset_cb = 0;
|
s->HEVClc->tu.cu_qp_offset_cb = 0;
|
||||||
s->HEVClc->tu.cu_qp_offset_cr = 0;
|
s->HEVClc->tu.cu_qp_offset_cr = 0;
|
||||||
|
|
||||||
|
s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == NAL_CRA_NUT && s->last_eos);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2385,6 +2387,8 @@ static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int
|
|||||||
|
|
||||||
if (more_data < 0) {
|
if (more_data < 0) {
|
||||||
s->tab_slice_address[ctb_addr_rs] = -1;
|
s->tab_slice_address[ctb_addr_rs] = -1;
|
||||||
|
avpriv_atomic_int_set(&s1->wpp_err, 1);
|
||||||
|
ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
|
||||||
return more_data;
|
return more_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2422,8 +2426,8 @@ static int hls_slice_data_wpp(HEVCContext *s, const uint8_t *nal, int length)
|
|||||||
HEVCLocalContext *lc = s->HEVClc;
|
HEVCLocalContext *lc = s->HEVClc;
|
||||||
int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
|
int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
|
||||||
int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
|
int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
|
||||||
int offset;
|
int64_t offset;
|
||||||
int startheader, cmpt = 0;
|
int64_t startheader, cmpt = 0;
|
||||||
int i, j, res = 0;
|
int i, j, res = 0;
|
||||||
|
|
||||||
if (!ret || !arg) {
|
if (!ret || !arg) {
|
||||||
@ -2432,11 +2436,18 @@ static int hls_slice_data_wpp(HEVCContext *s, const uint8_t *nal, int length)
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->sps->ctb_width >= s->sps->ctb_width * s->sps->ctb_height) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
|
||||||
|
s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
|
||||||
|
s->sps->ctb_width, s->sps->ctb_height
|
||||||
|
);
|
||||||
|
res = AVERROR_INVALIDDATA;
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
if (!s->sList[1]) {
|
|
||||||
ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
|
ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
|
||||||
|
|
||||||
|
if (!s->sList[1]) {
|
||||||
for (i = 1; i < s->threads_number; i++) {
|
for (i = 1; i < s->threads_number; i++) {
|
||||||
s->sList[i] = av_malloc(sizeof(HEVCContext));
|
s->sList[i] = av_malloc(sizeof(HEVCContext));
|
||||||
memcpy(s->sList[i], s, sizeof(HEVCContext));
|
memcpy(s->sList[i], s, sizeof(HEVCContext));
|
||||||
@ -2469,6 +2480,11 @@ static int hls_slice_data_wpp(HEVCContext *s, const uint8_t *nal, int length)
|
|||||||
}
|
}
|
||||||
if (s->sh.num_entry_point_offsets != 0) {
|
if (s->sh.num_entry_point_offsets != 0) {
|
||||||
offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
|
offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
|
||||||
|
if (length < offset) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
|
||||||
|
res = AVERROR_INVALIDDATA;
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
|
s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
|
||||||
s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
|
s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
|
||||||
|
|
||||||
@ -2495,6 +2511,7 @@ static int hls_slice_data_wpp(HEVCContext *s, const uint8_t *nal, int length)
|
|||||||
|
|
||||||
for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
|
for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
|
||||||
res += ret[i];
|
res += ret[i];
|
||||||
|
error:
|
||||||
av_free(ret);
|
av_free(ret);
|
||||||
av_free(arg);
|
av_free(arg);
|
||||||
return res;
|
return res;
|
||||||
@ -3353,6 +3370,7 @@ static int hevc_update_thread_context(AVCodecContext *dst,
|
|||||||
s->pocTid0 = s0->pocTid0;
|
s->pocTid0 = s0->pocTid0;
|
||||||
s->max_ra = s0->max_ra;
|
s->max_ra = s0->max_ra;
|
||||||
s->eos = s0->eos;
|
s->eos = s0->eos;
|
||||||
|
s->no_rasl_output_flag = s0->no_rasl_output_flag;
|
||||||
|
|
||||||
s->is_nalff = s0->is_nalff;
|
s->is_nalff = s0->is_nalff;
|
||||||
s->nal_length_size = s0->nal_length_size;
|
s->nal_length_size = s0->nal_length_size;
|
||||||
@ -3457,6 +3475,7 @@ static av_cold int hevc_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->enable_parallel_tiles = 0;
|
s->enable_parallel_tiles = 0;
|
||||||
s->picture_struct = 0;
|
s->picture_struct = 0;
|
||||||
|
s->eos = 1;
|
||||||
|
|
||||||
if(avctx->active_thread_type & FF_THREAD_SLICE)
|
if(avctx->active_thread_type & FF_THREAD_SLICE)
|
||||||
s->threads_number = avctx->thread_count;
|
s->threads_number = avctx->thread_count;
|
||||||
@ -3498,6 +3517,7 @@ static void hevc_decode_flush(AVCodecContext *avctx)
|
|||||||
HEVCContext *s = avctx->priv_data;
|
HEVCContext *s = avctx->priv_data;
|
||||||
ff_hevc_flush_dpb(s);
|
ff_hevc_flush_dpb(s);
|
||||||
s->max_ra = INT_MAX;
|
s->max_ra = INT_MAX;
|
||||||
|
s->eos = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define OFFSET(x) offsetof(HEVCContext, x)
|
#define OFFSET(x) offsetof(HEVCContext, x)
|
||||||
|
@ -609,7 +609,7 @@ typedef struct SliceHeader {
|
|||||||
|
|
||||||
unsigned int max_num_merge_cand; ///< 5 - 5_minus_max_num_merge_cand
|
unsigned int max_num_merge_cand; ///< 5 - 5_minus_max_num_merge_cand
|
||||||
|
|
||||||
int *entry_point_offset;
|
unsigned *entry_point_offset;
|
||||||
int * offset;
|
int * offset;
|
||||||
int * size;
|
int * size;
|
||||||
int num_entry_point_offsets;
|
int num_entry_point_offsets;
|
||||||
@ -842,6 +842,7 @@ typedef struct HEVCContext {
|
|||||||
int bs_height;
|
int bs_height;
|
||||||
|
|
||||||
int is_decoded;
|
int is_decoded;
|
||||||
|
int no_rasl_output_flag;
|
||||||
|
|
||||||
HEVCPredContext hpc;
|
HEVCPredContext hpc;
|
||||||
HEVCDSPContext hevcdsp;
|
HEVCDSPContext hevcdsp;
|
||||||
|
@ -883,11 +883,13 @@ static av_always_inline int mvd_decode(HEVCContext *s)
|
|||||||
int k = 1;
|
int k = 1;
|
||||||
|
|
||||||
while (k < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc)) {
|
while (k < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc)) {
|
||||||
ret += 1 << k;
|
ret += 1U << k;
|
||||||
k++;
|
k++;
|
||||||
}
|
}
|
||||||
if (k == CABAC_MAX_BIN)
|
if (k == CABAC_MAX_BIN) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", k);
|
av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", k);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
while (k--)
|
while (k--)
|
||||||
ret += get_cabac_bypass(&s->HEVClc->cc) << k;
|
ret += get_cabac_bypass(&s->HEVClc->cc) << k;
|
||||||
return get_cabac_bypass_sign(&s->HEVClc->cc, -ret);
|
return get_cabac_bypass_sign(&s->HEVClc->cc, -ret);
|
||||||
@ -1025,8 +1027,10 @@ static av_always_inline int coeff_abs_level_remaining_decode(HEVCContext *s, int
|
|||||||
|
|
||||||
while (prefix < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc))
|
while (prefix < CABAC_MAX_BIN && get_cabac_bypass(&s->HEVClc->cc))
|
||||||
prefix++;
|
prefix++;
|
||||||
if (prefix == CABAC_MAX_BIN)
|
if (prefix == CABAC_MAX_BIN) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", prefix);
|
av_log(s->avctx, AV_LOG_ERROR, "CABAC_MAX_BIN : %d\n", prefix);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
if (prefix < 3) {
|
if (prefix < 3) {
|
||||||
for (i = 0; i < rc_rice_param; i++)
|
for (i = 0; i < rc_rice_param; i++)
|
||||||
suffix = (suffix << 1) | get_cabac_bypass(&s->HEVClc->cc);
|
suffix = (suffix << 1) | get_cabac_bypass(&s->HEVClc->cc);
|
||||||
|
@ -200,7 +200,7 @@ static inline int parse_nal_units(AVCodecParserContext *s, AVCodecContext *avctx
|
|||||||
|
|
||||||
slice_address_length = av_ceil_log2_c(h->sps->ctb_width *
|
slice_address_length = av_ceil_log2_c(h->sps->ctb_width *
|
||||||
h->sps->ctb_height);
|
h->sps->ctb_height);
|
||||||
sh->slice_segment_addr = get_bits(gb, slice_address_length);
|
sh->slice_segment_addr = slice_address_length ? get_bits(gb, slice_address_length) : 0;
|
||||||
if (sh->slice_segment_addr >= h->sps->ctb_width * h->sps->ctb_height) {
|
if (sh->slice_segment_addr >= h->sps->ctb_width * h->sps->ctb_height) {
|
||||||
av_log(h->avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n",
|
av_log(h->avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n",
|
||||||
sh->slice_segment_addr);
|
sh->slice_segment_addr);
|
||||||
|
@ -499,6 +499,7 @@ int ff_hevc_decode_nal_vps(HEVCContext *s)
|
|||||||
if (get_bits_left(gb) < 0) {
|
if (get_bits_left(gb) < 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"Overread VPS by %d bits\n", -get_bits_left(gb));
|
"Overread VPS by %d bits\n", -get_bits_left(gb));
|
||||||
|
if (s->vps_list[vps_id])
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -805,6 +806,9 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
sps->chroma_format_idc = get_ue_golomb_long(gb);
|
sps->chroma_format_idc = get_ue_golomb_long(gb);
|
||||||
|
if (sps->chroma_format_idc > 3U) {
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
if (sps->chroma_format_idc == 3)
|
if (sps->chroma_format_idc == 3)
|
||||||
sps->separate_colour_plane_flag = get_bits1(gb);
|
sps->separate_colour_plane_flag = get_bits1(gb);
|
||||||
|
@ -174,7 +174,7 @@ int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
|
|||||||
int min_poc = INT_MAX;
|
int min_poc = INT_MAX;
|
||||||
int i, min_idx, ret;
|
int i, min_idx, ret;
|
||||||
|
|
||||||
if (s->sh.no_output_of_prior_pics_flag == 1) {
|
if (s->sh.no_output_of_prior_pics_flag == 1 && s->no_rasl_output_flag == 1) {
|
||||||
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
|
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
|
||||||
HEVCFrame *frame = &s->DPB[i];
|
HEVCFrame *frame = &s->DPB[i];
|
||||||
if (!(frame->flags & HEVC_FRAME_FLAG_BUMPING) && frame->poc != s->poc &&
|
if (!(frame->flags & HEVC_FRAME_FLAG_BUMPING) && frame->poc != s->poc &&
|
||||||
|
@ -417,8 +417,8 @@ static int hqx_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
info_tag = AV_RL32(src);
|
info_tag = AV_RL32(src);
|
||||||
if (info_tag == MKTAG('I', 'N', 'F', 'O')) {
|
if (info_tag == MKTAG('I', 'N', 'F', 'O')) {
|
||||||
int info_offset = AV_RL32(src + 4);
|
unsigned info_offset = AV_RL32(src + 4);
|
||||||
if (info_offset > UINT32_MAX - 8 || info_offset + 8 > avpkt->size) {
|
if (info_offset > INT_MAX || info_offset + 8 > avpkt->size) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"Invalid INFO header offset: 0x%08"PRIX32" is too large.\n",
|
"Invalid INFO header offset: 0x%08"PRIX32" is too large.\n",
|
||||||
info_offset);
|
info_offset);
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include "huffyuv.h"
|
#include "huffyuv.h"
|
||||||
#include "huffyuvdsp.h"
|
#include "huffyuvdsp.h"
|
||||||
#include "thread.h"
|
#include "thread.h"
|
||||||
|
#include "libavutil/imgutils.h"
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
|
|
||||||
#define classic_shift_luma_table_size 42
|
#define classic_shift_luma_table_size 42
|
||||||
@ -291,6 +292,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
|||||||
HYuvContext *s = avctx->priv_data;
|
HYuvContext *s = avctx->priv_data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ff_huffyuvdsp_init(&s->hdsp);
|
ff_huffyuvdsp_init(&s->hdsp);
|
||||||
memset(s->vlc, 0, 4 * sizeof(VLC));
|
memset(s->vlc, 0, 4 * sizeof(VLC));
|
||||||
|
|
||||||
|
@ -426,7 +426,7 @@ static void imc_decode_level_coefficients_raw(IMCContext *q, int *levlCoeffBuf,
|
|||||||
|
|
||||||
pos = q->coef0_pos;
|
pos = q->coef0_pos;
|
||||||
flcoeffs1[pos] = 20000.0 / pow (2, levlCoeffBuf[0] * 0.18945); // 0.18945 = log2(10) * 0.05703125
|
flcoeffs1[pos] = 20000.0 / pow (2, levlCoeffBuf[0] * 0.18945); // 0.18945 = log2(10) * 0.05703125
|
||||||
flcoeffs2[pos] = log2f(flcoeffs1[0]);
|
flcoeffs2[pos] = log2f(flcoeffs1[pos]);
|
||||||
tmp = flcoeffs1[pos];
|
tmp = flcoeffs1[pos];
|
||||||
tmp2 = flcoeffs2[pos];
|
tmp2 = flcoeffs2[pos];
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
|
|
||||||
#define BITSTREAM_READER_LE
|
#define BITSTREAM_READER_LE
|
||||||
#include "libavutil/attributes.h"
|
#include "libavutil/attributes.h"
|
||||||
|
#include "libavutil/imgutils.h"
|
||||||
#include "libavutil/timer.h"
|
#include "libavutil/timer.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
@ -310,7 +311,7 @@ av_cold int ff_ivi_init_planes(IVIPlaneDesc *planes, const IVIPicConfig *cfg,
|
|||||||
|
|
||||||
ivi_free_buffers(planes);
|
ivi_free_buffers(planes);
|
||||||
|
|
||||||
if (cfg->pic_width < 1 || cfg->pic_height < 1 ||
|
if (av_image_check_size(cfg->pic_width, cfg->pic_height, 0, NULL) < 0 ||
|
||||||
cfg->luma_bands < 1 || cfg->chroma_bands < 1)
|
cfg->luma_bands < 1 || cfg->chroma_bands < 1)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "libavutil/attributes.h"
|
#include "libavutil/attributes.h"
|
||||||
#include "libavutil/avassert.h"
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
|
#include "libavutil/imgutils.h"
|
||||||
#include "libavutil/mem.h"
|
#include "libavutil/mem.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "jpeg2000.h"
|
#include "jpeg2000.h"
|
||||||
@ -210,9 +211,17 @@ int ff_jpeg2000_init_component(Jpeg2000Component *comp,
|
|||||||
codsty->nreslevels2decode - 1,
|
codsty->nreslevels2decode - 1,
|
||||||
codsty->transform))
|
codsty->transform))
|
||||||
return ret;
|
return ret;
|
||||||
// component size comp->coord is uint16_t so ir cannot overflow
|
|
||||||
|
if (av_image_check_size(comp->coord[0][1] - comp->coord[0][0],
|
||||||
|
comp->coord[1][1] - comp->coord[1][0], 0, avctx))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
csize = (comp->coord[0][1] - comp->coord[0][0]) *
|
csize = (comp->coord[0][1] - comp->coord[0][0]) *
|
||||||
(comp->coord[1][1] - comp->coord[1][0]);
|
(comp->coord[1][1] - comp->coord[1][0]);
|
||||||
|
if (comp->coord[0][1] > 32768 ||
|
||||||
|
comp->coord[1][1] > 32768) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "component size too large\n");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
if (codsty->transform == FF_DWT97) {
|
if (codsty->transform == FF_DWT97) {
|
||||||
comp->i_data = NULL;
|
comp->i_data = NULL;
|
||||||
|
@ -252,6 +252,10 @@ static int get_siz(Jpeg2000DecoderContext *s)
|
|||||||
avpriv_request_sample(s->avctx, "Support for image offsets");
|
avpriv_request_sample(s->avctx, "Support for image offsets");
|
||||||
return AVERROR_PATCHWELCOME;
|
return AVERROR_PATCHWELCOME;
|
||||||
}
|
}
|
||||||
|
if (s->width > 32768U || s->height > 32768U) {
|
||||||
|
avpriv_request_sample(s->avctx, "Large Dimensions");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
if (ncomponents <= 0) {
|
if (ncomponents <= 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of components: %d\n",
|
||||||
@ -705,10 +709,10 @@ static int init_tile(Jpeg2000DecoderContext *s, int tileno)
|
|||||||
Jpeg2000QuantStyle *qntsty = tile->qntsty + compno;
|
Jpeg2000QuantStyle *qntsty = tile->qntsty + compno;
|
||||||
int ret; // global bandno
|
int ret; // global bandno
|
||||||
|
|
||||||
comp->coord_o[0][0] = FFMAX(tilex * s->tile_width + s->tile_offset_x, s->image_offset_x);
|
comp->coord_o[0][0] = av_clip(tilex * (int64_t)s->tile_width + s->tile_offset_x, s->image_offset_x, s->width);
|
||||||
comp->coord_o[0][1] = FFMIN((tilex + 1) * s->tile_width + s->tile_offset_x, s->width);
|
comp->coord_o[0][1] = av_clip((tilex + 1) * (int64_t)s->tile_width + s->tile_offset_x, s->image_offset_x, s->width);
|
||||||
comp->coord_o[1][0] = FFMAX(tiley * s->tile_height + s->tile_offset_y, s->image_offset_y);
|
comp->coord_o[1][0] = av_clip(tiley * (int64_t)s->tile_height + s->tile_offset_y, s->image_offset_y, s->height);
|
||||||
comp->coord_o[1][1] = FFMIN((tiley + 1) * s->tile_height + s->tile_offset_y, s->height);
|
comp->coord_o[1][1] = av_clip((tiley + 1) * (int64_t)s->tile_height + s->tile_offset_y, s->image_offset_y, s->height);
|
||||||
if (compno) {
|
if (compno) {
|
||||||
comp->coord_o[0][0] /= s->cdx[compno];
|
comp->coord_o[0][0] /= s->cdx[compno];
|
||||||
comp->coord_o[0][1] /= s->cdx[compno];
|
comp->coord_o[0][1] /= s->cdx[compno];
|
||||||
@ -1102,6 +1106,10 @@ static int decode_cblk(Jpeg2000DecoderContext *s, Jpeg2000CodingStyle *codsty,
|
|||||||
ff_mqc_initdec(&t1->mqc, cblk->data);
|
ff_mqc_initdec(&t1->mqc, cblk->data);
|
||||||
|
|
||||||
while (passno--) {
|
while (passno--) {
|
||||||
|
if (bpno < 0) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "bpno became negative\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
switch(pass_t) {
|
switch(pass_t) {
|
||||||
case 0:
|
case 0:
|
||||||
decode_sigpass(t1, width, height, bpno + 1, bandpos,
|
decode_sigpass(t1, width, height, bpno + 1, bandpos,
|
||||||
@ -1400,6 +1408,7 @@ static void jpeg2000_dec_cleanup(Jpeg2000DecoderContext *s)
|
|||||||
memset(s->codsty, 0, sizeof(s->codsty));
|
memset(s->codsty, 0, sizeof(s->codsty));
|
||||||
memset(s->qntsty, 0, sizeof(s->qntsty));
|
memset(s->qntsty, 0, sizeof(s->qntsty));
|
||||||
s->numXtiles = s->numYtiles = 0;
|
s->numXtiles = s->numYtiles = 0;
|
||||||
|
s->ncomponents = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int jpeg2000_read_main_headers(Jpeg2000DecoderContext *s)
|
static int jpeg2000_read_main_headers(Jpeg2000DecoderContext *s)
|
||||||
@ -1454,6 +1463,10 @@ static int jpeg2000_read_main_headers(Jpeg2000DecoderContext *s)
|
|||||||
|
|
||||||
switch (marker) {
|
switch (marker) {
|
||||||
case JPEG2000_SIZ:
|
case JPEG2000_SIZ:
|
||||||
|
if (s->ncomponents) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "Duplicate SIZ\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
ret = get_siz(s);
|
ret = get_siz(s);
|
||||||
if (!s->tile)
|
if (!s->tile)
|
||||||
s->numXtiles = s->numYtiles = 0;
|
s->numXtiles = s->numYtiles = 0;
|
||||||
|
@ -540,6 +540,9 @@ int ff_jpeg2000_dwt_init(DWTContext *s, uint16_t border[2][2],
|
|||||||
|
|
||||||
int ff_dwt_encode(DWTContext *s, void *t)
|
int ff_dwt_encode(DWTContext *s, void *t)
|
||||||
{
|
{
|
||||||
|
if (s->ndeclevels == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
switch(s->type){
|
switch(s->type){
|
||||||
case FF_DWT97:
|
case FF_DWT97:
|
||||||
dwt_encode97_float(s, t); break;
|
dwt_encode97_float(s, t); break;
|
||||||
@ -555,6 +558,9 @@ int ff_dwt_encode(DWTContext *s, void *t)
|
|||||||
|
|
||||||
int ff_dwt_decode(DWTContext *s, void *t)
|
int ff_dwt_decode(DWTContext *s, void *t)
|
||||||
{
|
{
|
||||||
|
if (s->ndeclevels == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
switch (s->type) {
|
switch (s->type) {
|
||||||
case FF_DWT97:
|
case FF_DWT97:
|
||||||
dwt_decode97_float(s, t);
|
dwt_decode97_float(s, t);
|
||||||
|
@ -326,7 +326,7 @@ static int libopus_encode(AVCodecContext *avctx, AVPacket *avpkt,
|
|||||||
} else
|
} else
|
||||||
audio = frame->data[0];
|
audio = frame->data[0];
|
||||||
} else {
|
} else {
|
||||||
if (!opus->afq.remaining_samples)
|
if (!opus->afq.remaining_samples || (!opus->afq.frame_alloc && !opus->afq.frame_count))
|
||||||
return 0;
|
return 0;
|
||||||
audio = opus->samples;
|
audio = opus->samples;
|
||||||
memset(audio, 0, opus->opts.packet_size * sample_size);
|
memset(audio, 0, opus->opts.packet_size * sample_size);
|
||||||
|
@ -76,7 +76,7 @@
|
|||||||
* encodes them with just enough bits to reproduce the background noise.
|
* encodes them with just enough bits to reproduce the background noise.
|
||||||
*
|
*
|
||||||
* Discontinuous Transmission (DTX)
|
* Discontinuous Transmission (DTX)
|
||||||
* DTX is an addition to VAD/VBR operation, that allows to stop transmitting
|
* DTX is an addition to VAD/VBR operation, that makes it possible to stop transmitting
|
||||||
* completely when the background noise is stationary.
|
* completely when the background noise is stationary.
|
||||||
* In file-based operation only 5 bits are used for such frames.
|
* In file-based operation only 5 bits are used for such frames.
|
||||||
*/
|
*/
|
||||||
|
@ -191,8 +191,7 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
|||||||
x4->params.b_tff = frame->top_field_first;
|
x4->params.b_tff = frame->top_field_first;
|
||||||
x264_encoder_reconfig(x4->enc, &x4->params);
|
x264_encoder_reconfig(x4->enc, &x4->params);
|
||||||
}
|
}
|
||||||
if (x4->params.vui.i_sar_height != ctx->sample_aspect_ratio.den ||
|
if (x4->params.vui.i_sar_height*ctx->sample_aspect_ratio.num != ctx->sample_aspect_ratio.den * x4->params.vui.i_sar_width) {
|
||||||
x4->params.vui.i_sar_width != ctx->sample_aspect_ratio.num) {
|
|
||||||
x4->params.vui.i_sar_height = ctx->sample_aspect_ratio.den;
|
x4->params.vui.i_sar_height = ctx->sample_aspect_ratio.den;
|
||||||
x4->params.vui.i_sar_width = ctx->sample_aspect_ratio.num;
|
x4->params.vui.i_sar_width = ctx->sample_aspect_ratio.num;
|
||||||
x264_encoder_reconfig(x4->enc, &x4->params);
|
x264_encoder_reconfig(x4->enc, &x4->params);
|
||||||
|
@ -164,6 +164,8 @@ static char *microdvd_load_tags(struct microdvd_tag *tags, char *s)
|
|||||||
|
|
||||||
/* Position */
|
/* Position */
|
||||||
case 'P':
|
case 'P':
|
||||||
|
if (!*s)
|
||||||
|
break;
|
||||||
tag.persistent = MICRODVD_PERSISTENT_ON;
|
tag.persistent = MICRODVD_PERSISTENT_ON;
|
||||||
tag.data1 = (*s++ == '1');
|
tag.data1 = (*s++ == '1');
|
||||||
if (*s != '}')
|
if (*s != '}')
|
||||||
|
@ -4369,7 +4369,7 @@ BI_MC_COPY(64);
|
|||||||
#undef BI_MC_COPY
|
#undef BI_MC_COPY
|
||||||
|
|
||||||
#define BI_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
#define BI_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
||||||
void ff_hevc_put_hevc_bi_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_bi_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t dst_stride, \
|
ptrdiff_t dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
ptrdiff_t src_stride, \
|
ptrdiff_t src_stride, \
|
||||||
@ -4423,7 +4423,7 @@ BI_MC(epel, v, 32, 4, vt, my);
|
|||||||
#undef BI_MC
|
#undef BI_MC
|
||||||
|
|
||||||
#define BI_MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
#define BI_MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
||||||
void ff_hevc_put_hevc_bi_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_bi_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t dst_stride, \
|
ptrdiff_t dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
ptrdiff_t src_stride, \
|
ptrdiff_t src_stride, \
|
||||||
|
@ -5454,7 +5454,7 @@ BI_W_MC_COPY(64);
|
|||||||
#undef BI_W_MC_COPY
|
#undef BI_W_MC_COPY
|
||||||
|
|
||||||
#define BI_W_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
#define BI_W_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
||||||
void ff_hevc_put_hevc_bi_w_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_bi_w_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t \
|
ptrdiff_t \
|
||||||
dst_stride, \
|
dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
@ -5521,7 +5521,7 @@ BI_W_MC(epel, v, 32, 4, vt, my);
|
|||||||
#undef BI_W_MC
|
#undef BI_W_MC
|
||||||
|
|
||||||
#define BI_W_MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
#define BI_W_MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
||||||
void ff_hevc_put_hevc_bi_w_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_bi_w_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t \
|
ptrdiff_t \
|
||||||
dst_stride, \
|
dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
|
@ -3871,7 +3871,7 @@ UNI_MC_COPY(64);
|
|||||||
#undef UNI_MC_COPY
|
#undef UNI_MC_COPY
|
||||||
|
|
||||||
#define UNI_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
#define UNI_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
||||||
void ff_hevc_put_hevc_uni_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_uni_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t \
|
ptrdiff_t \
|
||||||
dst_stride, \
|
dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
@ -3925,7 +3925,7 @@ UNI_MC(epel, v, 32, 4, vt, my);
|
|||||||
#undef UNI_MC
|
#undef UNI_MC
|
||||||
|
|
||||||
#define UNI_MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
#define UNI_MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
||||||
void ff_hevc_put_hevc_uni_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_uni_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t \
|
ptrdiff_t \
|
||||||
dst_stride, \
|
dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
|
@ -4687,7 +4687,7 @@ UNIWGT_MC_COPY(64);
|
|||||||
#undef UNIWGT_MC_COPY
|
#undef UNIWGT_MC_COPY
|
||||||
|
|
||||||
#define UNI_W_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
#define UNI_W_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
||||||
void ff_hevc_put_hevc_uni_w_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_uni_w_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t \
|
ptrdiff_t \
|
||||||
dst_stride, \
|
dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
@ -4746,7 +4746,7 @@ UNI_W_MC(epel, v, 32, 4, vt, my);
|
|||||||
#undef UNI_W_MC
|
#undef UNI_W_MC
|
||||||
|
|
||||||
#define UNI_W_MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
#define UNI_W_MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
||||||
void ff_hevc_put_hevc_uni_w_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_uni_w_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t \
|
ptrdiff_t \
|
||||||
dst_stride, \
|
dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
#include "libavcodec/hevcdsp.h"
|
#include "libavcodec/hevcdsp.h"
|
||||||
|
|
||||||
#define MC(PEL, DIR, WIDTH) \
|
#define MC(PEL, DIR, WIDTH) \
|
||||||
void ff_hevc_put_hevc_##PEL##_##DIR####WIDTH##_8_msa(int16_t *dst, \
|
void ff_hevc_put_hevc_##PEL##_##DIR##WIDTH##_8_msa(int16_t *dst, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
ptrdiff_t src_stride, \
|
ptrdiff_t src_stride, \
|
||||||
int height, \
|
int height, \
|
||||||
@ -102,7 +102,7 @@ MC(epel, hv, 64);
|
|||||||
#undef MC
|
#undef MC
|
||||||
|
|
||||||
#define UNI_MC(PEL, DIR, WIDTH) \
|
#define UNI_MC(PEL, DIR, WIDTH) \
|
||||||
void ff_hevc_put_hevc_uni_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_uni_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t dst_stride, \
|
ptrdiff_t dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
ptrdiff_t src_stride, \
|
ptrdiff_t src_stride, \
|
||||||
@ -181,7 +181,7 @@ UNI_MC(epel, hv, 64);
|
|||||||
#undef UNI_MC
|
#undef UNI_MC
|
||||||
|
|
||||||
#define UNI_W_MC(PEL, DIR, WIDTH) \
|
#define UNI_W_MC(PEL, DIR, WIDTH) \
|
||||||
void ff_hevc_put_hevc_uni_w_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_uni_w_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t \
|
ptrdiff_t \
|
||||||
dst_stride, \
|
dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
@ -265,7 +265,7 @@ UNI_W_MC(epel, hv, 64);
|
|||||||
#undef UNI_W_MC
|
#undef UNI_W_MC
|
||||||
|
|
||||||
#define BI_MC(PEL, DIR, WIDTH) \
|
#define BI_MC(PEL, DIR, WIDTH) \
|
||||||
void ff_hevc_put_hevc_bi_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_bi_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t dst_stride, \
|
ptrdiff_t dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
ptrdiff_t src_stride, \
|
ptrdiff_t src_stride, \
|
||||||
@ -345,7 +345,7 @@ BI_MC(epel, hv, 64);
|
|||||||
#undef BI_MC
|
#undef BI_MC
|
||||||
|
|
||||||
#define BI_W_MC(PEL, DIR, WIDTH) \
|
#define BI_W_MC(PEL, DIR, WIDTH) \
|
||||||
void ff_hevc_put_hevc_bi_w_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst, \
|
void ff_hevc_put_hevc_bi_w_##PEL##_##DIR##WIDTH##_8_msa(uint8_t *dst, \
|
||||||
ptrdiff_t \
|
ptrdiff_t \
|
||||||
dst_stride, \
|
dst_stride, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
|
@ -3792,7 +3792,7 @@ MC_COPY(64);
|
|||||||
#undef MC_COPY
|
#undef MC_COPY
|
||||||
|
|
||||||
#define MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
#define MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
|
||||||
void ff_hevc_put_hevc_##PEL##_##DIR####WIDTH##_8_msa(int16_t *dst, \
|
void ff_hevc_put_hevc_##PEL##_##DIR##WIDTH##_8_msa(int16_t *dst, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
ptrdiff_t src_stride, \
|
ptrdiff_t src_stride, \
|
||||||
int height, \
|
int height, \
|
||||||
@ -3843,7 +3843,7 @@ MC(epel, v, 32, 4, vt, my);
|
|||||||
#undef MC
|
#undef MC
|
||||||
|
|
||||||
#define MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
#define MC_HV(PEL, DIR, WIDTH, TAP, DIR1) \
|
||||||
void ff_hevc_put_hevc_##PEL##_##DIR####WIDTH##_8_msa(int16_t *dst, \
|
void ff_hevc_put_hevc_##PEL##_##DIR##WIDTH##_8_msa(int16_t *dst, \
|
||||||
uint8_t *src, \
|
uint8_t *src, \
|
||||||
ptrdiff_t src_stride, \
|
ptrdiff_t src_stride, \
|
||||||
int height, \
|
int height, \
|
||||||
|
@ -98,6 +98,15 @@ static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
|
|||||||
av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
|
av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void init_idct(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
MJpegDecodeContext *s = avctx->priv_data;
|
||||||
|
|
||||||
|
ff_idctdsp_init(&s->idsp, avctx);
|
||||||
|
ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
|
||||||
|
ff_zigzag_direct);
|
||||||
|
}
|
||||||
|
|
||||||
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
|
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
MJpegDecodeContext *s = avctx->priv_data;
|
MJpegDecodeContext *s = avctx->priv_data;
|
||||||
@ -112,9 +121,7 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
|
|||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
ff_blockdsp_init(&s->bdsp, avctx);
|
ff_blockdsp_init(&s->bdsp, avctx);
|
||||||
ff_hpeldsp_init(&s->hdsp, avctx->flags);
|
ff_hpeldsp_init(&s->hdsp, avctx->flags);
|
||||||
ff_idctdsp_init(&s->idsp, avctx);
|
init_idct(avctx);
|
||||||
ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
|
|
||||||
ff_zigzag_direct);
|
|
||||||
s->buffer_size = 0;
|
s->buffer_size = 0;
|
||||||
s->buffer = NULL;
|
s->buffer = NULL;
|
||||||
s->start_code = -1;
|
s->start_code = -1;
|
||||||
@ -267,7 +274,6 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
|
|
||||||
/* XXX: verify len field validity */
|
/* XXX: verify len field validity */
|
||||||
len = get_bits(&s->gb, 16);
|
len = get_bits(&s->gb, 16);
|
||||||
s->avctx->bits_per_raw_sample =
|
|
||||||
bits = get_bits(&s->gb, 8);
|
bits = get_bits(&s->gb, 8);
|
||||||
|
|
||||||
if (bits > 16 || bits < 1) {
|
if (bits > 16 || bits < 1) {
|
||||||
@ -275,6 +281,11 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (s->avctx->bits_per_raw_sample != bits) {
|
||||||
|
av_log(s->avctx, AV_LOG_INFO, "Changeing bps to %d\n", bits);
|
||||||
|
s->avctx->bits_per_raw_sample = bits;
|
||||||
|
init_idct(s->avctx);
|
||||||
|
}
|
||||||
if (s->pegasus_rct)
|
if (s->pegasus_rct)
|
||||||
bits = 9;
|
bits = 9;
|
||||||
if (bits == 9 && !s->pegasus_rct)
|
if (bits == 9 && !s->pegasus_rct)
|
||||||
@ -990,7 +1001,14 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
|
|||||||
skip_bits(&s->gb, 16); /* skip RSTn */
|
skip_bits(&s->gb, 16); /* skip RSTn */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->nb_components == 4) {
|
if (s->rct && s->nb_components == 4) {
|
||||||
|
for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
|
||||||
|
ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
|
||||||
|
ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
|
||||||
|
ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
|
||||||
|
ptr[4*mb_x + 0] = buffer[mb_x][3];
|
||||||
|
}
|
||||||
|
} else if (s->nb_components == 4) {
|
||||||
for(i=0; i<nb_components; i++) {
|
for(i=0; i<nb_components; i++) {
|
||||||
int c= s->comp_index[i];
|
int c= s->comp_index[i];
|
||||||
if (s->bits <= 8) {
|
if (s->bits <= 8) {
|
||||||
@ -1081,7 +1099,10 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
|
|||||||
dc = mjpeg_decode_dc(s, s->dc_index[i]);
|
dc = mjpeg_decode_dc(s, s->dc_index[i]);
|
||||||
if(dc == 0xFFFFF)
|
if(dc == 0xFFFFF)
|
||||||
return -1;
|
return -1;
|
||||||
if(bits<=8){
|
if ( h * mb_x + x >= s->width
|
||||||
|
|| v * mb_y + y >= s->height) {
|
||||||
|
// Nothing to do
|
||||||
|
} else if (bits<=8) {
|
||||||
ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
|
ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
|
||||||
if(y==0 && toprow){
|
if(y==0 && toprow){
|
||||||
if(x==0 && leftcol){
|
if(x==0 && leftcol){
|
||||||
@ -1149,7 +1170,10 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
|
|||||||
dc = mjpeg_decode_dc(s, s->dc_index[i]);
|
dc = mjpeg_decode_dc(s, s->dc_index[i]);
|
||||||
if(dc == 0xFFFFF)
|
if(dc == 0xFFFFF)
|
||||||
return -1;
|
return -1;
|
||||||
if(bits<=8){
|
if ( h * mb_x + x >= s->width
|
||||||
|
|| v * mb_y + y >= s->height) {
|
||||||
|
// Nothing to do
|
||||||
|
} else if (bits<=8) {
|
||||||
ptr = s->picture_ptr->data[c] +
|
ptr = s->picture_ptr->data[c] +
|
||||||
(linesize * (v * mb_y + y)) +
|
(linesize * (v * mb_y + y)) +
|
||||||
(h * mb_x + x); //FIXME optimize this crap
|
(h * mb_x + x); //FIXME optimize this crap
|
||||||
@ -1217,7 +1241,7 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
|||||||
int mb_bitmask_size,
|
int mb_bitmask_size,
|
||||||
const AVFrame *reference)
|
const AVFrame *reference)
|
||||||
{
|
{
|
||||||
int i, mb_x, mb_y;
|
int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
|
||||||
uint8_t *data[MAX_COMPONENTS];
|
uint8_t *data[MAX_COMPONENTS];
|
||||||
const uint8_t *reference_data[MAX_COMPONENTS];
|
const uint8_t *reference_data[MAX_COMPONENTS];
|
||||||
int linesize[MAX_COMPONENTS];
|
int linesize[MAX_COMPONENTS];
|
||||||
@ -1234,6 +1258,11 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
|||||||
|
|
||||||
s->restart_count = 0;
|
s->restart_count = 0;
|
||||||
|
|
||||||
|
av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
|
||||||
|
&chroma_v_shift);
|
||||||
|
chroma_width = FF_CEIL_RSHIFT(s->width, chroma_h_shift);
|
||||||
|
chroma_height = FF_CEIL_RSHIFT(s->height, chroma_v_shift);
|
||||||
|
|
||||||
for (i = 0; i < nb_components; i++) {
|
for (i = 0; i < nb_components; i++) {
|
||||||
int c = s->comp_index[i];
|
int c = s->comp_index[i];
|
||||||
data[c] = s->picture_ptr->data[c];
|
data[c] = s->picture_ptr->data[c];
|
||||||
@ -1270,8 +1299,8 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
|||||||
|
|
||||||
if (s->interlaced && s->bottom_field)
|
if (s->interlaced && s->bottom_field)
|
||||||
block_offset += linesize[c] >> 1;
|
block_offset += linesize[c] >> 1;
|
||||||
if ( 8*(h * mb_x + x) < s->width
|
if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
|
||||||
&& 8*(v * mb_y + y) < s->height) {
|
&& 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
|
||||||
ptr = data[c] + block_offset;
|
ptr = data[c] + block_offset;
|
||||||
} else
|
} else
|
||||||
ptr = NULL;
|
ptr = NULL;
|
||||||
@ -2192,11 +2221,13 @@ the_end:
|
|||||||
}
|
}
|
||||||
} else if (s->upscale_h[p] == 2) {
|
} else if (s->upscale_h[p] == 2) {
|
||||||
if (is16bit) {
|
if (is16bit) {
|
||||||
((uint16_t*)line)[w - 1] =
|
((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
|
||||||
((uint16_t*)line)[w - 2] = ((uint16_t*)line)[(w - 1) / 3];
|
if (w > 1)
|
||||||
|
((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
|
||||||
} else {
|
} else {
|
||||||
line[w - 1] =
|
line[w - 1] = line[(w - 1) / 3];
|
||||||
line[w - 2] = line[(w - 1) / 3];
|
if (w > 1)
|
||||||
|
line[w - 2] = line[w - 1];
|
||||||
}
|
}
|
||||||
for (index = w - 3; index > 0; index--) {
|
for (index = w - 3; index > 0; index--) {
|
||||||
line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
|
line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
|
||||||
|
@ -1928,7 +1928,7 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
|||||||
(left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
|
(left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
|
||||||
((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
|
((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n",
|
av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X\n",
|
||||||
left, show_bits(&s->gb, FFMIN(left, 23)));
|
left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
} else
|
} else
|
||||||
goto eos;
|
goto eos;
|
||||||
@ -2152,8 +2152,6 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
|
|||||||
if (check_marker(&s->gb, "in sequence header") == 0) {
|
if (check_marker(&s->gb, "in sequence header") == 0) {
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
s->width = width;
|
|
||||||
s->height = height;
|
|
||||||
|
|
||||||
s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
|
s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
|
||||||
skip_bits(&s->gb, 1);
|
skip_bits(&s->gb, 1);
|
||||||
@ -2185,6 +2183,9 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s->width = width;
|
||||||
|
s->height = height;
|
||||||
|
|
||||||
/* We set MPEG-2 parameters so that it emulates MPEG-1. */
|
/* We set MPEG-2 parameters so that it emulates MPEG-1. */
|
||||||
s->progressive_sequence = 1;
|
s->progressive_sequence = 1;
|
||||||
s->progressive_frame = 1;
|
s->progressive_frame = 1;
|
||||||
|
@ -882,7 +882,7 @@ int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
|
|||||||
const int part_a_end = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END;
|
const int part_a_end = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_END | ER_MV_END) : ER_MV_END;
|
||||||
|
|
||||||
mb_num = mpeg4_decode_partition_a(ctx);
|
mb_num = mpeg4_decode_partition_a(ctx);
|
||||||
if (mb_num < 0) {
|
if (mb_num <= 0) {
|
||||||
ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
|
ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
|
||||||
s->mb_x, s->mb_y, part_a_error);
|
s->mb_x, s->mb_y, part_a_error);
|
||||||
return -1;
|
return -1;
|
||||||
@ -1875,6 +1875,10 @@ static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
|
|||||||
int last = 0;
|
int last = 0;
|
||||||
for (i = 0; i < 64; i++) {
|
for (i = 0; i < 64; i++) {
|
||||||
int j;
|
int j;
|
||||||
|
if (get_bits_left(gb) < 8) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
v = get_bits(gb, 8);
|
v = get_bits(gb, 8);
|
||||||
if (v == 0)
|
if (v == 0)
|
||||||
break;
|
break;
|
||||||
@ -1898,6 +1902,10 @@ static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
|
|||||||
int last = 0;
|
int last = 0;
|
||||||
for (i = 0; i < 64; i++) {
|
for (i = 0; i < 64; i++) {
|
||||||
int j;
|
int j;
|
||||||
|
if (get_bits_left(gb) < 8) {
|
||||||
|
av_log(s->avctx, AV_LOG_ERROR, "insufficient data for custom matrix\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
v = get_bits(gb, 8);
|
v = get_bits(gb, 8);
|
||||||
if (v == 0)
|
if (v == 0)
|
||||||
break;
|
break;
|
||||||
|
@ -1657,9 +1657,11 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
|
|||||||
uint32_t header;
|
uint32_t header;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
int skipped = 0;
|
||||||
while(buf_size && !*buf){
|
while(buf_size && !*buf){
|
||||||
buf++;
|
buf++;
|
||||||
buf_size--;
|
buf_size--;
|
||||||
|
skipped++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buf_size < HEADER_SIZE)
|
if (buf_size < HEADER_SIZE)
|
||||||
@ -1714,7 +1716,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *got_frame_ptr,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
s->frame_size = 0;
|
s->frame_size = 0;
|
||||||
return buf_size;
|
return buf_size + skipped;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mp_flush(MPADecodeContext *ctx)
|
static void mp_flush(MPADecodeContext *ctx)
|
||||||
@ -1893,6 +1895,7 @@ static av_cold int decode_init_mp3on4(AVCodecContext * avctx)
|
|||||||
s->mp3decctx[i]->adu_mode = 1;
|
s->mp3decctx[i]->adu_mode = 1;
|
||||||
s->mp3decctx[i]->avctx = avctx;
|
s->mp3decctx[i]->avctx = avctx;
|
||||||
s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
|
s->mp3decctx[i]->mpadsp = s->mp3decctx[0]->mpadsp;
|
||||||
|
s->mp3decctx[i]->fdsp = s->mp3decctx[0]->fdsp;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1227,6 +1227,83 @@ fail:
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void clear_context(MpegEncContext *s)
|
||||||
|
{
|
||||||
|
int i, j, k;
|
||||||
|
|
||||||
|
memset(&s->next_picture, 0, sizeof(s->next_picture));
|
||||||
|
memset(&s->last_picture, 0, sizeof(s->last_picture));
|
||||||
|
memset(&s->current_picture, 0, sizeof(s->current_picture));
|
||||||
|
memset(&s->new_picture, 0, sizeof(s->new_picture));
|
||||||
|
|
||||||
|
memset(s->thread_context, 0, sizeof(s->thread_context));
|
||||||
|
|
||||||
|
s->me.map = NULL;
|
||||||
|
s->me.score_map = NULL;
|
||||||
|
s->dct_error_sum = NULL;
|
||||||
|
s->block = NULL;
|
||||||
|
s->blocks = NULL;
|
||||||
|
memset(s->pblocks, 0, sizeof(s->pblocks));
|
||||||
|
s->ac_val_base = NULL;
|
||||||
|
s->ac_val[0] =
|
||||||
|
s->ac_val[1] =
|
||||||
|
s->ac_val[2] =NULL;
|
||||||
|
s->sc.edge_emu_buffer = NULL;
|
||||||
|
s->me.scratchpad = NULL;
|
||||||
|
s->me.temp =
|
||||||
|
s->sc.rd_scratchpad =
|
||||||
|
s->sc.b_scratchpad =
|
||||||
|
s->sc.obmc_scratchpad = NULL;
|
||||||
|
|
||||||
|
s->parse_context.buffer = NULL;
|
||||||
|
s->parse_context.buffer_size = 0;
|
||||||
|
s->parse_context.overread = 0;
|
||||||
|
s->bitstream_buffer = NULL;
|
||||||
|
s->allocated_bitstream_buffer_size = 0;
|
||||||
|
s->picture = NULL;
|
||||||
|
s->mb_type = NULL;
|
||||||
|
s->p_mv_table_base = NULL;
|
||||||
|
s->b_forw_mv_table_base = NULL;
|
||||||
|
s->b_back_mv_table_base = NULL;
|
||||||
|
s->b_bidir_forw_mv_table_base = NULL;
|
||||||
|
s->b_bidir_back_mv_table_base = NULL;
|
||||||
|
s->b_direct_mv_table_base = NULL;
|
||||||
|
s->p_mv_table = NULL;
|
||||||
|
s->b_forw_mv_table = NULL;
|
||||||
|
s->b_back_mv_table = NULL;
|
||||||
|
s->b_bidir_forw_mv_table = NULL;
|
||||||
|
s->b_bidir_back_mv_table = NULL;
|
||||||
|
s->b_direct_mv_table = NULL;
|
||||||
|
for (i = 0; i < 2; i++) {
|
||||||
|
for (j = 0; j < 2; j++) {
|
||||||
|
for (k = 0; k < 2; k++) {
|
||||||
|
s->b_field_mv_table_base[i][j][k] = NULL;
|
||||||
|
s->b_field_mv_table[i][j][k] = NULL;
|
||||||
|
}
|
||||||
|
s->b_field_select_table[i][j] = NULL;
|
||||||
|
s->p_field_mv_table_base[i][j] = NULL;
|
||||||
|
s->p_field_mv_table[i][j] = NULL;
|
||||||
|
}
|
||||||
|
s->p_field_select_table[i] = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
s->dc_val_base = NULL;
|
||||||
|
s->coded_block_base = NULL;
|
||||||
|
s->mbintra_table = NULL;
|
||||||
|
s->cbp_table = NULL;
|
||||||
|
s->pred_dir_table = NULL;
|
||||||
|
|
||||||
|
s->mbskip_table = NULL;
|
||||||
|
|
||||||
|
s->er.error_status_table = NULL;
|
||||||
|
s->er.er_temp_buffer = NULL;
|
||||||
|
s->mb_index2xy = NULL;
|
||||||
|
s->lambda_table = NULL;
|
||||||
|
|
||||||
|
s->cplx_tab = NULL;
|
||||||
|
s->bits_tab = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init common structure for both encoder and decoder.
|
* init common structure for both encoder and decoder.
|
||||||
* this assumes that some variables like width/height are already set
|
* this assumes that some variables like width/height are already set
|
||||||
@ -1238,6 +1315,8 @@ av_cold int ff_mpv_common_init(MpegEncContext *s)
|
|||||||
s->avctx->active_thread_type & FF_THREAD_SLICE) ?
|
s->avctx->active_thread_type & FF_THREAD_SLICE) ?
|
||||||
s->avctx->thread_count : 1;
|
s->avctx->thread_count : 1;
|
||||||
|
|
||||||
|
clear_context(s);
|
||||||
|
|
||||||
if (s->encoding && s->avctx->slices)
|
if (s->encoding && s->avctx->slices)
|
||||||
nb_slices = s->avctx->slices;
|
nb_slices = s->avctx->slices;
|
||||||
|
|
||||||
@ -1282,10 +1361,6 @@ av_cold int ff_mpv_common_init(MpegEncContext *s)
|
|||||||
if (!s->picture[i].f)
|
if (!s->picture[i].f)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
memset(&s->next_picture, 0, sizeof(s->next_picture));
|
|
||||||
memset(&s->last_picture, 0, sizeof(s->last_picture));
|
|
||||||
memset(&s->current_picture, 0, sizeof(s->current_picture));
|
|
||||||
memset(&s->new_picture, 0, sizeof(s->new_picture));
|
|
||||||
s->next_picture.f = av_frame_alloc();
|
s->next_picture.f = av_frame_alloc();
|
||||||
if (!s->next_picture.f)
|
if (!s->next_picture.f)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -211,9 +211,16 @@ static inline int get_egolomb(GetBitContext *gb)
|
|||||||
{
|
{
|
||||||
int v = 4;
|
int v = 4;
|
||||||
|
|
||||||
while (get_bits1(gb)) v++;
|
while (get_bits1(gb)) {
|
||||||
|
v++;
|
||||||
|
if (v > 30) {
|
||||||
|
av_log(NULL, AV_LOG_WARNING, "Too large golomb code in get_egolomb.\n");
|
||||||
|
v = 30;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return (1 << v) + get_bits(gb, v);
|
return (1 << v) + get_bits_long(gb, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int on2avc_decode_pairs(On2AVCContext *c, GetBitContext *gb, float *dst,
|
static int on2avc_decode_pairs(On2AVCContext *c, GetBitContext *gb, float *dst,
|
||||||
|
@ -824,7 +824,7 @@ static inline void silk_stabilize_lsf(int16_t nlsf[16], int order, const uint16_
|
|||||||
|
|
||||||
/* upper extent */
|
/* upper extent */
|
||||||
for (i = order; i > k; i--)
|
for (i = order; i > k; i--)
|
||||||
max_center -= min_delta[k];
|
max_center -= min_delta[i];
|
||||||
max_center -= min_delta[k] >> 1;
|
max_center -= min_delta[k] >> 1;
|
||||||
|
|
||||||
/* move apart */
|
/* move apart */
|
||||||
|
@ -538,7 +538,7 @@ static int opus_decode_packet(AVCodecContext *avctx, void *data,
|
|||||||
memset(frame->extended_data[i], 0, frame->linesize[0]);
|
memset(frame->extended_data[i], 0, frame->linesize[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c->gain_i) {
|
if (c->gain_i && decoded_samples > 0) {
|
||||||
c->fdsp->vector_fmul_scalar((float*)frame->extended_data[i],
|
c->fdsp->vector_fmul_scalar((float*)frame->extended_data[i],
|
||||||
(float*)frame->extended_data[i],
|
(float*)frame->extended_data[i],
|
||||||
c->gain, FFALIGN(decoded_samples, 8));
|
c->gain, FFALIGN(decoded_samples, 8));
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
#include "libavutil/imgutils.h"
|
#include "libavutil/imgutils.h"
|
||||||
#include "libavutil/opt.h"
|
#include "libavutil/opt.h"
|
||||||
|
|
||||||
#define RGBA(r,g,b,a) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b))
|
#define RGBA(r,g,b,a) (((unsigned)(a) << 24) | ((r) << 16) | ((g) << 8) | (b))
|
||||||
#define MAX_EPOCH_PALETTES 8 // Max 8 allowed per PGS epoch
|
#define MAX_EPOCH_PALETTES 8 // Max 8 allowed per PGS epoch
|
||||||
#define MAX_EPOCH_OBJECTS 64 // Max 64 allowed per PGS epoch
|
#define MAX_EPOCH_OBJECTS 64 // Max 64 allowed per PGS epoch
|
||||||
#define MAX_OBJECT_REFS 2 // Max objects per display set
|
#define MAX_OBJECT_REFS 2 // Max objects per display set
|
||||||
|
@ -542,17 +542,17 @@ static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
s->width = bytestream2_get_be32(&s->gb);
|
if (s->state & PNG_IHDR) {
|
||||||
s->height = bytestream2_get_be32(&s->gb);
|
av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
|
||||||
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
|
||||||
s->width = s->height = 0;
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
|
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
if (s->cur_w == 0 && s->cur_h == 0) {
|
|
||||||
// Only set cur_w/h if update_thread_context() has not set it
|
s->width = s->cur_w = bytestream2_get_be32(&s->gb);
|
||||||
s->cur_w = s->width;
|
s->height = s->cur_h = bytestream2_get_be32(&s->gb);
|
||||||
s->cur_h = s->height;
|
if (av_image_check_size(s->width, s->height, 0, avctx)) {
|
||||||
|
s->cur_w = s->cur_h = s->width = s->height = 0;
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
s->bit_depth = bytestream2_get_byte(&s->gb);
|
s->bit_depth = bytestream2_get_byte(&s->gb);
|
||||||
s->color_type = bytestream2_get_byte(&s->gb);
|
s->color_type = bytestream2_get_byte(&s->gb);
|
||||||
@ -815,10 +815,16 @@ static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
|||||||
uint32_t length)
|
uint32_t length)
|
||||||
{
|
{
|
||||||
uint32_t sequence_number;
|
uint32_t sequence_number;
|
||||||
|
int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
|
||||||
|
|
||||||
if (length != 26)
|
if (length != 26)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
|
if (!(s->state & PNG_IHDR)) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
s->last_w = s->cur_w;
|
s->last_w = s->cur_w;
|
||||||
s->last_h = s->cur_h;
|
s->last_h = s->cur_h;
|
||||||
s->last_x_offset = s->x_offset;
|
s->last_x_offset = s->x_offset;
|
||||||
@ -826,23 +832,23 @@ static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
|||||||
s->last_dispose_op = s->dispose_op;
|
s->last_dispose_op = s->dispose_op;
|
||||||
|
|
||||||
sequence_number = bytestream2_get_be32(&s->gb);
|
sequence_number = bytestream2_get_be32(&s->gb);
|
||||||
s->cur_w = bytestream2_get_be32(&s->gb);
|
cur_w = bytestream2_get_be32(&s->gb);
|
||||||
s->cur_h = bytestream2_get_be32(&s->gb);
|
cur_h = bytestream2_get_be32(&s->gb);
|
||||||
s->x_offset = bytestream2_get_be32(&s->gb);
|
x_offset = bytestream2_get_be32(&s->gb);
|
||||||
s->y_offset = bytestream2_get_be32(&s->gb);
|
y_offset = bytestream2_get_be32(&s->gb);
|
||||||
bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
|
bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
|
||||||
s->dispose_op = bytestream2_get_byte(&s->gb);
|
dispose_op = bytestream2_get_byte(&s->gb);
|
||||||
s->blend_op = bytestream2_get_byte(&s->gb);
|
blend_op = bytestream2_get_byte(&s->gb);
|
||||||
bytestream2_skip(&s->gb, 4); /* crc */
|
bytestream2_skip(&s->gb, 4); /* crc */
|
||||||
|
|
||||||
if (sequence_number == 0 &&
|
if (sequence_number == 0 &&
|
||||||
(s->cur_w != s->width ||
|
(cur_w != s->width ||
|
||||||
s->cur_h != s->height ||
|
cur_h != s->height ||
|
||||||
s->x_offset != 0 ||
|
x_offset != 0 ||
|
||||||
s->y_offset != 0) ||
|
y_offset != 0) ||
|
||||||
s->cur_w <= 0 || s->cur_h <= 0 ||
|
cur_w <= 0 || cur_h <= 0 ||
|
||||||
s->x_offset < 0 || s->y_offset < 0 ||
|
x_offset < 0 || y_offset < 0 ||
|
||||||
s->cur_w > s->width - s->x_offset|| s->cur_h > s->height - s->y_offset)
|
cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if (sequence_number == 0 && s->dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
|
if (sequence_number == 0 && s->dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
|
||||||
@ -863,6 +869,13 @@ static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
|||||||
s->dispose_op = APNG_BLEND_OP_SOURCE;
|
s->dispose_op = APNG_BLEND_OP_SOURCE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s->cur_w = cur_w;
|
||||||
|
s->cur_h = cur_h;
|
||||||
|
s->x_offset = x_offset;
|
||||||
|
s->y_offset = y_offset;
|
||||||
|
s->dispose_op = dispose_op;
|
||||||
|
s->blend_op = blend_op;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -933,7 +946,7 @@ static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s,
|
|||||||
for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
|
for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
|
||||||
size_t b;
|
size_t b;
|
||||||
uint8_t foreground_alpha, background_alpha, output_alpha;
|
uint8_t foreground_alpha, background_alpha, output_alpha;
|
||||||
uint8_t output[4];
|
uint8_t output[10];
|
||||||
|
|
||||||
// Since we might be blending alpha onto alpha, we use the following equations:
|
// Since we might be blending alpha onto alpha, we use the following equations:
|
||||||
// output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
|
// output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
|
||||||
@ -973,6 +986,8 @@ static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s,
|
|||||||
|
|
||||||
output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
|
output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
|
||||||
|
|
||||||
|
av_assert0(s->bpp <= 10);
|
||||||
|
|
||||||
for (b = 0; b < s->bpp - 1; ++b) {
|
for (b = 0; b < s->bpp - 1; ++b) {
|
||||||
if (output_alpha == 0) {
|
if (output_alpha == 0) {
|
||||||
output[b] = 0;
|
output[b] = 0;
|
||||||
@ -1259,12 +1274,24 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
|
|||||||
(ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
|
(ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
|
if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
|
||||||
|
pdst->width = psrc->width;
|
||||||
|
pdst->height = psrc->height;
|
||||||
|
pdst->bit_depth = psrc->bit_depth;
|
||||||
|
pdst->color_type = psrc->color_type;
|
||||||
|
pdst->compression_type = psrc->compression_type;
|
||||||
|
pdst->interlace_type = psrc->interlace_type;
|
||||||
|
pdst->filter_type = psrc->filter_type;
|
||||||
pdst->cur_w = psrc->cur_w;
|
pdst->cur_w = psrc->cur_w;
|
||||||
pdst->cur_h = psrc->cur_h;
|
pdst->cur_h = psrc->cur_h;
|
||||||
pdst->x_offset = psrc->x_offset;
|
pdst->x_offset = psrc->x_offset;
|
||||||
pdst->y_offset = psrc->y_offset;
|
pdst->y_offset = psrc->y_offset;
|
||||||
|
|
||||||
pdst->dispose_op = psrc->dispose_op;
|
pdst->dispose_op = psrc->dispose_op;
|
||||||
|
|
||||||
|
memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
|
||||||
|
|
||||||
|
pdst->state |= psrc->state & (PNG_IHDR | PNG_PLTE);
|
||||||
|
|
||||||
ff_thread_release_buffer(dst, &pdst->last_picture);
|
ff_thread_release_buffer(dst, &pdst->last_picture);
|
||||||
if (psrc->last_picture.f->data[0])
|
if (psrc->last_picture.f->data[0])
|
||||||
return ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture);
|
return ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture);
|
||||||
|
@ -504,7 +504,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 16x8 works with 16 elements; it allows to avoid replicating loads, and
|
* 16x8 works with 16 elements; it can avoid replicating loads, and
|
||||||
* gives the compiler more room for scheduling. It's only used from
|
* gives the compiler more room for scheduling. It's only used from
|
||||||
* inside hadamard8_diff16_altivec.
|
* inside hadamard8_diff16_altivec.
|
||||||
*
|
*
|
||||||
|
@ -454,6 +454,9 @@ int ff_thread_decode_frame(AVCodecContext *avctx,
|
|||||||
*got_picture_ptr = p->got_frame;
|
*got_picture_ptr = p->got_frame;
|
||||||
picture->pkt_dts = p->avpkt.dts;
|
picture->pkt_dts = p->avpkt.dts;
|
||||||
|
|
||||||
|
if (p->result < 0)
|
||||||
|
err = p->result;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A later call with avkpt->size == 0 may loop over all threads,
|
* A later call with avkpt->size == 0 may loop over all threads,
|
||||||
* including this one, searching for a frame to return before being
|
* including this one, searching for a frame to return before being
|
||||||
@ -471,6 +474,14 @@ int ff_thread_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
fctx->next_finished = finished;
|
fctx->next_finished = finished;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When no frame was found while flushing, but an error occured in
|
||||||
|
* any thread, return it instead of 0.
|
||||||
|
* Otherwise the error can get lost.
|
||||||
|
*/
|
||||||
|
if (!avpkt->size && !*got_picture_ptr)
|
||||||
|
return err;
|
||||||
|
|
||||||
/* return the size of the consumed packet if no error occurred */
|
/* return the size of the consumed packet if no error occurred */
|
||||||
return (p->result >= 0) ? avpkt->size : p->result;
|
return (p->result >= 0) ? avpkt->size : p->result;
|
||||||
}
|
}
|
||||||
@ -572,7 +583,7 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
|
|||||||
pthread_join(p->thread, NULL);
|
pthread_join(p->thread, NULL);
|
||||||
p->thread_init=0;
|
p->thread_init=0;
|
||||||
|
|
||||||
if (codec->close)
|
if (codec->close && p->avctx)
|
||||||
codec->close(p->avctx);
|
codec->close(p->avctx);
|
||||||
|
|
||||||
release_delayed_buffers(p);
|
release_delayed_buffers(p);
|
||||||
@ -590,11 +601,12 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
|
|||||||
av_packet_unref(&p->avpkt);
|
av_packet_unref(&p->avpkt);
|
||||||
av_freep(&p->released_buffers);
|
av_freep(&p->released_buffers);
|
||||||
|
|
||||||
if (i) {
|
if (i && p->avctx) {
|
||||||
av_freep(&p->avctx->priv_data);
|
av_freep(&p->avctx->priv_data);
|
||||||
av_freep(&p->avctx->slice_offset);
|
av_freep(&p->avctx->slice_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (p->avctx)
|
||||||
av_freep(&p->avctx->internal);
|
av_freep(&p->avctx->internal);
|
||||||
av_freep(&p->avctx);
|
av_freep(&p->avctx);
|
||||||
}
|
}
|
||||||
@ -678,6 +690,7 @@ int ff_frame_thread_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
copy->internal = av_malloc(sizeof(AVCodecInternal));
|
copy->internal = av_malloc(sizeof(AVCodecInternal));
|
||||||
if (!copy->internal) {
|
if (!copy->internal) {
|
||||||
|
copy->priv_data = NULL;
|
||||||
err = AVERROR(ENOMEM);
|
err = AVERROR(ENOMEM);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
@ -258,7 +258,7 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
|
|||||||
buf += buf_size - context->frame_size;
|
buf += buf_size - context->frame_size;
|
||||||
|
|
||||||
len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
|
len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
|
||||||
if (buf_size < len && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0)) {
|
if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
|
av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
|
||||||
av_buffer_unref(&frame->buf[0]);
|
av_buffer_unref(&frame->buf[0]);
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
@ -54,7 +54,7 @@ static int raw_encode(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if ((ret = ff_alloc_packet2(avctx, pkt, ret)) < 0)
|
if ((ret = ff_alloc_packet(pkt, ret)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if ((ret = avpicture_layout((const AVPicture *)frame, avctx->pix_fmt, avctx->width,
|
if ((ret = avpicture_layout((const AVPicture *)frame, avctx->pix_fmt, avctx->width,
|
||||||
avctx->height, pkt->data, pkt->size)) < 0)
|
avctx->height, pkt->data, pkt->size)) < 0)
|
||||||
|
@ -1534,7 +1534,14 @@ int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
|
|||||||
|
|
||||||
if (avctx->internal->is_copy) {
|
if (avctx->internal->is_copy) {
|
||||||
r->tmp_b_block_base = NULL;
|
r->tmp_b_block_base = NULL;
|
||||||
|
r->cbp_chroma = NULL;
|
||||||
|
r->cbp_luma = NULL;
|
||||||
|
r->deblock_coefs = NULL;
|
||||||
|
r->intra_types_hist = NULL;
|
||||||
|
r->mb_type = NULL;
|
||||||
|
|
||||||
ff_mpv_idct_init(&r->s);
|
ff_mpv_idct_init(&r->s);
|
||||||
|
|
||||||
if ((err = ff_mpv_common_init(&r->s)) < 0)
|
if ((err = ff_mpv_common_init(&r->s)) < 0)
|
||||||
return err;
|
return err;
|
||||||
if ((err = rv34_decoder_alloc(r)) < 0) {
|
if ((err = rv34_decoder_alloc(r)) < 0) {
|
||||||
|
@ -85,10 +85,6 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf,
|
|||||||
case 8:
|
case 8:
|
||||||
avctx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK | AV_CH_LAYOUT_STEREO_DOWNMIX;
|
avctx->channel_layout = AV_CH_LAYOUT_5POINT1_BACK | AV_CH_LAYOUT_STEREO_DOWNMIX;
|
||||||
}
|
}
|
||||||
avctx->bit_rate = 48000 * avctx->channels * (avctx->bits_per_raw_sample + 4) +
|
|
||||||
32 * (48000 / (buf_size * 8 /
|
|
||||||
(avctx->channels *
|
|
||||||
(avctx->bits_per_raw_sample + 4))));
|
|
||||||
|
|
||||||
return frame_size;
|
return frame_size;
|
||||||
}
|
}
|
||||||
@ -117,6 +113,8 @@ static int s302m_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
avctx->bit_rate = 48000 * avctx->channels * (avctx->bits_per_raw_sample + 4) +
|
||||||
|
32 * 48000 / frame->nb_samples;
|
||||||
buf_size = (frame->nb_samples * avctx->channels / 2) * block_size;
|
buf_size = (frame->nb_samples * avctx->channels / 2) * block_size;
|
||||||
|
|
||||||
if (avctx->bits_per_raw_sample == 24) {
|
if (avctx->bits_per_raw_sample == 24) {
|
||||||
|
@ -457,6 +457,7 @@ static void destroy_buffers(SANMVideoContext *ctx)
|
|||||||
ctx->frm0_size =
|
ctx->frm0_size =
|
||||||
ctx->frm1_size =
|
ctx->frm1_size =
|
||||||
ctx->frm2_size = 0;
|
ctx->frm2_size = 0;
|
||||||
|
init_sizes(ctx, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static av_cold int init_buffers(SANMVideoContext *ctx)
|
static av_cold int init_buffers(SANMVideoContext *ctx)
|
||||||
|
@ -137,6 +137,7 @@ typedef struct AACSBRContext {
|
|||||||
struct SpectralBandReplication {
|
struct SpectralBandReplication {
|
||||||
int sample_rate;
|
int sample_rate;
|
||||||
int start;
|
int start;
|
||||||
|
int id_aac;
|
||||||
int reset;
|
int reset;
|
||||||
SpectrumParameters spectrum_params;
|
SpectrumParameters spectrum_params;
|
||||||
int bs_amp_res_header;
|
int bs_amp_res_header;
|
||||||
|
@ -668,6 +668,10 @@ static int smka_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
/* get output buffer */
|
/* get output buffer */
|
||||||
frame->nb_samples = unp_size / (avctx->channels * (bits + 1));
|
frame->nb_samples = unp_size / (avctx->channels * (bits + 1));
|
||||||
|
if (unp_size % (avctx->channels * (bits + 1))) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "unp_size %d is odd\n", unp_size);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
samples = (int16_t *)frame->data[0];
|
samples = (int16_t *)frame->data[0];
|
||||||
|
@ -304,6 +304,8 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
|||||||
BlockNode *lb= lt+b_stride;
|
BlockNode *lb= lt+b_stride;
|
||||||
BlockNode *rb= lb+1;
|
BlockNode *rb= lb+1;
|
||||||
uint8_t *block[4];
|
uint8_t *block[4];
|
||||||
|
// When src_stride is large enough, it is possible to interleave the blocks.
|
||||||
|
// Otherwise the blocks are written sequentially in the tmp buffer.
|
||||||
int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
|
int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
|
||||||
uint8_t *tmp = s->scratchbuf;
|
uint8_t *tmp = s->scratchbuf;
|
||||||
uint8_t *ptmp;
|
uint8_t *ptmp;
|
||||||
@ -347,8 +349,6 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
|||||||
|
|
||||||
if(b_w<=0 || b_h<=0) return;
|
if(b_w<=0 || b_h<=0) return;
|
||||||
|
|
||||||
av_assert2(src_stride > 2*MB_SIZE + 5);
|
|
||||||
|
|
||||||
if(!sliced && offset_dst)
|
if(!sliced && offset_dst)
|
||||||
dst += src_x + src_y*dst_stride;
|
dst += src_x + src_y*dst_stride;
|
||||||
dst8+= src_x + src_y*src_stride;
|
dst8+= src_x + src_y*src_stride;
|
||||||
@ -563,6 +563,8 @@ static inline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
|
|||||||
e= 0;
|
e= 0;
|
||||||
while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
|
while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
|
||||||
e++;
|
e++;
|
||||||
|
if (e > 31)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
a= 1;
|
a= 1;
|
||||||
|
@ -925,6 +925,13 @@ static av_cold int sonic_decode_init(AVCodecContext *avctx)
|
|||||||
s->frame_size = s->channels*s->block_align*s->downsampling;
|
s->frame_size = s->channels*s->block_align*s->downsampling;
|
||||||
// avctx->frame_size = s->block_align;
|
// avctx->frame_size = s->block_align;
|
||||||
|
|
||||||
|
if (s->num_taps * s->channels > s->frame_size) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"number of taps times channels (%d * %d) larger than frame size %d\n",
|
||||||
|
s->num_taps, s->channels, s->frame_size);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
|
av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
|
||||||
s->version, s->minor_version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
|
s->version, s->minor_version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
|
||||||
|
|
||||||
|
@ -618,9 +618,12 @@ static int svq1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
uint8_t *current;
|
uint8_t *current;
|
||||||
int result, i, x, y, width, height;
|
int result, i, x, y, width, height;
|
||||||
svq1_pmv *pmv;
|
svq1_pmv *pmv;
|
||||||
|
int ret;
|
||||||
|
|
||||||
/* initialize bit buffer */
|
/* initialize bit buffer */
|
||||||
init_get_bits8(&s->gb, buf, buf_size);
|
ret = init_get_bits8(&s->gb, buf, buf_size);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* decode frame header */
|
/* decode frame header */
|
||||||
s->frame_code = get_bits(&s->gb, 22);
|
s->frame_code = get_bits(&s->gb, 22);
|
||||||
|
@ -515,6 +515,11 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
|
|||||||
SVQ1EncContext *const s = avctx->priv_data;
|
SVQ1EncContext *const s = avctx->priv_data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (avctx->width >= 4096 || avctx->height >= 4096) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Dimensions too large, maximum is 4095x4095\n");
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
ff_hpeldsp_init(&s->hdsp, avctx->flags);
|
ff_hpeldsp_init(&s->hdsp, avctx->flags);
|
||||||
ff_me_cmp_init(&s->mecc, avctx);
|
ff_me_cmp_init(&s->mecc, avctx);
|
||||||
ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx);
|
ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx);
|
||||||
|
@ -632,7 +632,7 @@ static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
|
|||||||
for (; length2 > 0; length2 -= tmp) {
|
for (; length2 > 0; length2 -= tmp) {
|
||||||
tmp = FFMIN(length2, x);
|
tmp = FFMIN(length2, x);
|
||||||
|
|
||||||
for (i = 0; i < tmp; i++)
|
for (i = 0; i < tmp - (tmp == length2); i++)
|
||||||
s->residues[filter_order + i] = *p2++ >> dshift;
|
s->residues[filter_order + i] = *p2++ >> dshift;
|
||||||
|
|
||||||
for (i = 0; i < tmp; i++) {
|
for (i = 0; i < tmp; i++) {
|
||||||
@ -656,7 +656,7 @@ static int decorrelate(TAKDecContext *s, int c1, int c2, int length)
|
|||||||
*p1++ = v;
|
*p1++ = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(s->residues, &s->residues[tmp], 2 * filter_order);
|
memmove(s->residues, &s->residues[tmp], 2 * filter_order);
|
||||||
}
|
}
|
||||||
|
|
||||||
emms_c();
|
emms_c();
|
||||||
@ -801,6 +801,12 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (s->mcdparams[i].present) {
|
if (s->mcdparams[i].present) {
|
||||||
s->mcdparams[i].index = get_bits(gb, 2);
|
s->mcdparams[i].index = get_bits(gb, 2);
|
||||||
s->mcdparams[i].chan2 = get_bits(gb, 4);
|
s->mcdparams[i].chan2 = get_bits(gb, 4);
|
||||||
|
if (s->mcdparams[i].chan2 >= avctx->channels) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"invalid channel 2 (%d) for %d channel(s)\n",
|
||||||
|
s->mcdparams[i].chan2, avctx->channels);
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
if (s->mcdparams[i].index == 1) {
|
if (s->mcdparams[i].index == 1) {
|
||||||
if ((nbit == s->mcdparams[i].chan2) ||
|
if ((nbit == s->mcdparams[i].chan2) ||
|
||||||
(ch_mask & 1 << s->mcdparams[i].chan2))
|
(ch_mask & 1 << s->mcdparams[i].chan2))
|
||||||
|
@ -402,6 +402,10 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
|
|||||||
new_pix_fmt = AV_PIX_FMT_RGB555; // RGB565 is supported as well
|
new_pix_fmt = AV_PIX_FMT_RGB555; // RGB565 is supported as well
|
||||||
|
|
||||||
s->w >>= width_shift;
|
s->w >>= width_shift;
|
||||||
|
if (s->w & 1) {
|
||||||
|
avpriv_request_sample(s->avctx, "Frame with odd width");
|
||||||
|
return AVERROR_PATCHWELCOME;
|
||||||
|
}
|
||||||
|
|
||||||
if (s->w != s->avctx->width || s->h != s->avctx->height ||
|
if (s->w != s->avctx->width || s->h != s->avctx->height ||
|
||||||
new_pix_fmt != s->avctx->pix_fmt) {
|
new_pix_fmt != s->avctx->pix_fmt) {
|
||||||
|
@ -123,6 +123,7 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
|
|||||||
TTAContext *s = avctx->priv_data;
|
TTAContext *s = avctx->priv_data;
|
||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int total_frames;
|
int total_frames;
|
||||||
|
int ret;
|
||||||
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
|
|
||||||
@ -131,7 +132,10 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
|
|||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
s->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
|
s->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
|
||||||
init_get_bits8(&gb, avctx->extradata, avctx->extradata_size);
|
ret = init_get_bits8(&gb, avctx->extradata, avctx->extradata_size);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (show_bits_long(&gb, 32) == AV_RL32("TTA1")) {
|
if (show_bits_long(&gb, 32) == AV_RL32("TTA1")) {
|
||||||
/* signature */
|
/* signature */
|
||||||
skip_bits_long(&gb, 32);
|
skip_bits_long(&gb, 32);
|
||||||
|
@ -428,10 +428,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
|||||||
|
|
||||||
*width = FFALIGN(*width, w_align);
|
*width = FFALIGN(*width, w_align);
|
||||||
*height = FFALIGN(*height, h_align);
|
*height = FFALIGN(*height, h_align);
|
||||||
if (s->codec_id == AV_CODEC_ID_H264 || s->lowres)
|
if (s->codec_id == AV_CODEC_ID_H264 || s->lowres) {
|
||||||
// some of the optimized chroma MC reads one line too much
|
// some of the optimized chroma MC reads one line too much
|
||||||
// which is also done in mpeg decoders with lowres > 0
|
// which is also done in mpeg decoders with lowres > 0
|
||||||
*height += 2;
|
*height += 2;
|
||||||
|
*width = FFMAX(*width, 32);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
linesize_align[i] = STRIDE_ALIGN;
|
linesize_align[i] = STRIDE_ALIGN;
|
||||||
@ -1035,8 +1037,10 @@ end:
|
|||||||
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
||||||
{
|
{
|
||||||
int ret = get_buffer_internal(avctx, frame, flags);
|
int ret = get_buffer_internal(avctx, frame, flags);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
|
frame->width = frame->height = 0;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3113,8 +3117,8 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
|
|||||||
|
|
||||||
if (enc->sample_aspect_ratio.num) {
|
if (enc->sample_aspect_ratio.num) {
|
||||||
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
|
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
|
||||||
enc->width * enc->sample_aspect_ratio.num,
|
enc->width * (int64_t)enc->sample_aspect_ratio.num,
|
||||||
enc->height * enc->sample_aspect_ratio.den,
|
enc->height * (int64_t)enc->sample_aspect_ratio.den,
|
||||||
1024 * 1024);
|
1024 * 1024);
|
||||||
snprintf(buf + strlen(buf), buf_size - strlen(buf),
|
snprintf(buf + strlen(buf), buf_size - strlen(buf),
|
||||||
" [SAR %d:%d DAR %d:%d]",
|
" [SAR %d:%d DAR %d:%d]",
|
||||||
@ -3416,7 +3420,7 @@ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
|
|||||||
return frame_bytes * 8 / bps;
|
return frame_bytes * 8 / bps;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ch > 0) {
|
if (ch > 0 && ch < INT_MAX/16) {
|
||||||
/* calc from frame_bytes and channels */
|
/* calc from frame_bytes and channels */
|
||||||
switch (id) {
|
switch (id) {
|
||||||
case AV_CODEC_ID_ADPCM_AFC:
|
case AV_CODEC_ID_ADPCM_AFC:
|
||||||
|
@ -162,7 +162,8 @@ static void fill_vaapi_RefPicList(VAPictureH264 RefPicList[32],
|
|||||||
unsigned int i, n = 0;
|
unsigned int i, n = 0;
|
||||||
for (i = 0; i < ref_count; i++)
|
for (i = 0; i < ref_count; i++)
|
||||||
if (ref_list[i].reference)
|
if (ref_list[i].reference)
|
||||||
fill_vaapi_pic(&RefPicList[n++], ref_list[i].parent, 0);
|
fill_vaapi_pic(&RefPicList[n++], ref_list[i].parent,
|
||||||
|
ref_list[i].reference);
|
||||||
|
|
||||||
for (; n < 32; n++)
|
for (; n < 32; n++)
|
||||||
init_vaapi_pic(&RefPicList[n]);
|
init_vaapi_pic(&RefPicList[n]);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user