Compare commits

...

145 Commits

Author SHA1 Message Date
Michael Niedermayer
29353dd3f8 Update for 2.1.2
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 17:21:42 +01:00
Carl Eugen Hoyos
b336daa952 Fix a crash on oom when decoding hevc.
(cherry picked from commit 5ab1efb9d0dc65e748a0291b67915e35578b302e)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 17:20:19 +01:00
Michael Niedermayer
a0aa5c34a9 avcodec/hevc: Check entry point arrays for malloc failure
Fixes null pointer dereference
Fixes: signal_sigsegv_e1d3b6_2192_DBLK_F_VIXS_2.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 22bfb4be284c12f33b9dac010713fe3ca6d974bf)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:17:08 +01:00
Luca Barbato
d63476347a hevc: Bound check slice_qp
The T-REC-H.265-2013044 page 79 states they have to be into the range
[-s->sps->qp_bd_offset, 51].

Fixes: asan_stack-oob_eae8e3_9522_WP_MAIN10_B_Toshiba_3.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit aead772b5814142b0e530804486ff7970ecd9eef)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:17:04 +01:00
Luca Barbato
39545c5482 hevc: Reject impossible dependent tile
The tile 0 cannot depend on a previous one.
Prevent an out of array bound load in ff_hevc_cabac_init().

Fixes: asan_heap-oob_e3a924_1630_DBLK_A_MAIN10_VIXS_2.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind

Reviewed-by: Guillaume Martres <smarter@ubuntu.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 48a5b155433ed7af20fb0a5c20ca131958727727)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:16:58 +01:00
Guillaume Martres
e43805d401 hevc: remove useless clip in FUNC(sao_band_filter)()
The src buffer should only contain values in the interval
[0, (1 << BIT_DEPTH) - 1]. Since shift = (BIT_DEPTH - 5), src[x] >> shift
must be in the interval [0, 31], so no clip is needed.

This removes the code that was changed in 5856bca360c5bc3e340a357d91b1f993c80a7bea
as the clip that was repositioned in that commit is removed

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit b00a8b4d194f1bf23343f3f42138affa1fe26641)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:15:07 +01:00
Guillaume Martres
ea21b7b68c hevc: clip pixels when transquant bypass is used
Fixes: asan_stack-oob_eae8e3_7333_WPP_B_ericsson_MAIN10_2.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind

This is a more proper fix than 5856bca360c5bc3e340a357d91b1f993c80a7bea

The reconstructed picture should always be clipped (see section 8.6.5),
previously we did not clip coding units where
cu_transquant_bypass_flag == 1

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit c9fe0caf7a1abde7ca0b1a359f551103064867b1)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:12:17 +01:00
Luca Barbato
738a2a04b6 hevc: Clip the pixel before shifting
Prevent an out of array bound read.

Fixes: asan_stack-oob_eae8e3_7333_WPP_B_ericsson_MAIN10_2.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 5856bca360c5bc3e340a357d91b1f993c80a7bea)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:12:01 +01:00
Michael Niedermayer
706dca18d0 avcodec/hevc: use av_mallocz() for allocating tab_ipm
Fixes use of uninitialized memory and out of stack array read
Fixes: signal_sigsegv_ecc526_7846_WPP_C_ericsson_MAIN_2.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 0999f1613bc48ed9d6578a3ad7bcd17610e07fbf)

Conflicts:

	libavcodec/hevc.c
2014-01-13 16:11:50 +01:00
Michael Niedermayer
b3c3dc54a5 avcodec/alac: only set *got_frame_ptr when all channels have been decoded
Fixes use of uninitialized memory
Fixes: msan_uninit-mem_7f8b64436530_7895_quicktime_newcodec_applelosslessaudiocodec.m4a
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit e11983bda073f8c63f60509ee753da9fba20ed10)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:10:34 +01:00
Clément Bœsch
b6af89be6a avformat/pjsdec: dont increase pointer when its already at the end in read_ts()
Fixes use of uninitialized memory
Fixes: msan_uninit-mem_7f91f2de7764_2649_PJS_capability_tester.pjs
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit b84a7330af41cec93384bf59ed68c67b09d105cd)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:10:30 +01:00
Michael Niedermayer
57824d1ba6 avcodec/wmalosslessdec: shrink output on error so no uninitialized data is returned
Fixes use of uninitialized memory
partly fixes: msan_uninit-mem_7f7834b6a530_6473_luckynight-partial.wma
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 6b18a6839b43ea78e70cd3e35f781d1c955bda73)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:10:25 +01:00
Michael Niedermayer
cd753ee36b avcodec/wmalosslessdec: deallocate uninitialized frame on decode_tilehdr() failure
Fixes use of uninitialized memory
partly fixes: msan_uninit-mem_7f7834b6a530_6473_luckynight-partial.wma
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit ae3856dcaf9c5ef339969c95a72bcaf7c4bba9ec)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:10:12 +01:00
Michael Niedermayer
aaaafc300d avcodec/wmalosslessdec: Pass on error code from decode_tilehdr()
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 03fff09b32171e0c76d104c02ebf578c7f4fe21d)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:10:09 +01:00
Michael Niedermayer
6da213ce20 avformat/matroskadec: check generic audio deinterleaver sub_packet_size against frame_size
Fixes use of uninitialized memory
Fixes: msan_uninit-mem_7f67d052a530_7517_nosound.mkv
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit a1ed1c2193483849df689b105bec0d26c2497999)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:09:26 +01:00
Michael Niedermayer
763a808c96 avformat/flvdec: initialize context before reading from it
Fixes use of uninitialized memory
Fixes: msan_uninit-mem_7f9b8387069e_5377_flv_with_pcm_s16be_audio_track.flv
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 396ddcf22d55fa7e735d69eed22a4a4b1649b73c)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:09:22 +01:00
Michael Niedermayer
4f3b0afaa2 avformat/mxfdec: check avio_read(UID) result
Fixes use of uninitialized memory
Fixes: msan_uninit-mem_7fc9ba2fd98e_82_02785736.mxf
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 4162ceea93684f3cd656dc21d30903e102a44e73)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:09:15 +01:00
Michael Niedermayer
050d8d727a avformat/rmdec: when reading audio blocks, dont leave holes when reading fails
The fate test is changed because the reference file depends on the use of
non cleared data at the very
end. Alternatively we could upload a new reference file, though that would
then have to be changed every time the handling of a truncated frame changes
or theres a change to error concealment, each time adding a new file ...

Fixes use of uninitialized memory
Fixed: msan_uninit-mem_7f3c02b81363_2787_RLG2_19.rm
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 77d2a1ca595ebe082d35c4b624ac9a9145991494)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-13 16:08:26 +01:00
Michael Niedermayer
6fb0f37def avcodec/h264: fix code that blindly dereferences NULL DPB
Fixes mixed flushing and decoding NULL packets
Found-by: wm4

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit d9339ab55373b12f078a3e3f1e294d8ff78652dd)

Conflicts:

	libavcodec/h264.c
2014-01-13 16:06:13 +01:00
Hendrik Leppkes
70028e917c vdpau: restore compatibility with deprecated fields in AVVDPAUContext
Fixes ticket #3133.

Signed-off-by: Hendrik Leppkes <h.leppkes@gmail.com>
Tested-by: EricV
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 23bc1351ad7db698da9348e23ee63ec7300a881d)
2014-01-11 00:21:06 +01:00
Carl Eugen Hoyos
6ce835d77f Fix libopenjpeg colour range adjust for 8<bpp<16.
Fixes ticket #3284.

Reviewed-by: Michael Bradshaw
(cherry picked from commit 8298b54179c92fc3293ea312c4fcf153917bca0a)
2014-01-10 14:07:36 +01:00
Michael Niedermayer
d45a724192 avcodec/msvideo1enc: fix SKIPS_MAX
Fixes Ticket3270

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit fb8f5d0510619cea2204246631f1c0dcd994ee25)
2014-01-09 11:43:21 +01:00
Carl Eugen Hoyos
a48440d857 Use the h264 parser when decoding VSSH in avi.
Fixes ticket #3261 visually.

Analyzed-by: Michael Doilnitsyn
(cherry picked from commit 94cf4f8bac12c58e30ce3b5d72cf5898baafe9a8)
2014-01-08 10:19:52 +01:00
Michael Niedermayer
de14fe7e29 avformat/mxfdec: detect loops during header parsing
The header parser uses forward and backward parsing, making the
bulletproof prevention of loops difficult, thus this simple
detection code.
If someone improves the forward/backward parsing so it cannot loop
then this commit should be reverted

Fixes Ticket3278

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 1c010fd035c1a14dc73827b84f21f593e969a5d6)
2014-01-08 10:19:36 +01:00
Michael Niedermayer
580d3ef6fe avformat/mov: Check that we have a stream before accessing it in mov_read_ares()
Fixes out of array read
Fixes: signal_sigsegv_6f1855_3910_avid_test_alpha.mov

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit a7f27453f64d9020b92b01687baeb5909c6cdad0)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:25:25 +01:00
Michael Niedermayer
2864c987d8 avformat/ipmovie: check OPCODE_INIT_VIDEO_BUFFERS size more completely
Fixes use of uninitialized data

Fixes: signal_sigsegv_1571228_5930_ipmovie_interplayvideo_interplay_dpcm__bislogo.mve

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 2e97e244097c309571b383dd107252404ebb3326)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:25:20 +01:00
Michael Niedermayer
d1a9195863 avcodec/mjpegdec: check len in mjpeg_decode_app() more completely
Avoids len from becoming negative and causing assertion failure

Fixes: signal_sigabrt_7ffff7126425_5140_fd44dc63fa7bdd12ee34fc602231ef02.jpg

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 6060234d43dcf0b5200cdd7dbd2f1542146827eb)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:25:07 +01:00
Michael Niedermayer
898ab02557 avformat/avidec: Dont assert the existence of an index for video streams.
Its possible in various rare cases that an index cannot be created or allocated.
Fixes assertion failure
Fixes: signal_sigabrt_7ffff7126425_7712_pokem.avi

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 7865759409b27089b444bc029b2b76b06161b2cf)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:25:02 +01:00
Michael Niedermayer
69aa3d5b88 avcodec/hevc: clear HEVClc when its deallocated in hevc_decode_free()
Fixes reading freed memory
Fixes: asan_heap-uaf_1abf8ef_3987_NUT_A_ericsson_4.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 21a2fb7e0579703fdea96f659498ef8b1f243289)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:23:49 +01:00
Michael Niedermayer
dcecca0758 avutil/log: check that len is within the buffer before reading it
Fixes out of array read
Fixes: asan_heap-oob_19d6979_6857_mmw_deadzy.ogg
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 808c10e728db2d92ccbb0f8b3bcd4a2f4305a2cf)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:23:43 +01:00
Michael Niedermayer
413065aff4 avcodec/g2meet: check available space before copying palette
Fixes out of array read
Fixes: asan_heap-uaf_ae6067_5415_g2m4.wmv

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 6d9dad6a7cb5d544d540abf941fedbd34c14d2bd)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:23:35 +01:00
Michael Niedermayer
c094aec76e avcodec/ac3dec: check bap before use.
Fixes out of array read
Fixes assertion failure
Fixes asan_static-oob_16431c0_8036_rio_bravo_mono_64_spx.ac3

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 4782c4284fa3856a9b6910fe5ff6e4fb1c65b58c)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:23:23 +01:00
Michael Niedermayer
5e21989de4 Revert "Merge remote-tracking branch 'qatar/master'" (43dec5ef9a360c9ffac3278f464832bd99af0cb0)
Fixes out of array accesses
Fixes asan_static-oob_eb9812_5961_iv41.avi
This reverts the merge of c9ef6b09326a24010bf86d6b0d19cfa42df4d546

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
(cherry picked from commit c3d5cd1ebfba8fe36a0da7fad47df7fdf9c4ccd0)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-08 00:23:18 +01:00
Michael Niedermayer
908b951b4e avcodec/hevc: Fix modulo operations
Fixes qp fields becoming out of range
Fixes: asan_static-oob_e393a3_6998_WPP_A_ericsson_MAIN10_2.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 4ced5d7780fea2ea49444d6686d26f26b3a2160f)

Conflicts:

	libavcodec/hevc_filter.c
2014-01-07 23:48:10 +01:00
Michael Niedermayer
4b0cecb457 avcodec/hevc_ps: check that VPS referenced from SPS exists
This matches how its done for SPS/PPS.
An alternative to this is to check it when its used.

Fixes null pointer dereference
Fixes: signal_sigsegv_e30a43_1437_CIP_A_Panasonic_3.bit
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit d66bab0a69ac1860e78dd951ad8db1a507e75642)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 23:47:39 +01:00
Nicolas George
3dae9d13e5 lavc/mjpegenc: use proper error codes.
(cherry picked from commit 2ebaadf35c9387610ca1eb7e94c171050562a77c)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Nicolas George
9189a0a71b lavc/mjpegenc: check av_frame_alloc() failure.
(cherry picked from commit 19a2d101acc0260bb310e79010a8491b10716189)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Nicolas George
4b25b5a8a3 lavc/libopenjpegenc: check av_frame_alloc() failure.
(cherry picked from commit 97af2faaba70c866ae4c11459a79a16d4a014530)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Nicolas George
4c3cd88144 lavc/diracdec: check av_frame_alloc() failure.
(cherry picked from commit a91394f4de63ae5c2e21c548045b79393ca7fea1)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Nicolas George
4d70639d53 lavc/utils: check av_frame_alloc() failure.
(cherry picked from commit 38004051b53ddecb518053e6dadafa9adc4fc1b2)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Nicolas George
a6ba0f7be9 ffprobe: check av_frame_alloc() failure.
(cherry picked from commit a55692a96099c40aabb25e1443890be99f9c845c)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Nicolas George
89205b637e lavc/ffwavesynth: fix dependency sizeof(AVFrame).
(cherry picked from commit bcfcb8b8524dfcc1c37d520ccf3fba3b3a4c104d)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
88058b4650 ffprobe: Dont clear AVFrame between uses.
The old API required this clearing in the past, the new API does not
require it.

Fixes memleak
Regression introduced by 37a749012aaacc801fe860428417a6d7b81c103f

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 565f786d1da1fea80fcea231550d5d0f174c009a)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
ca22a2dec5 avcodec/utils: drop 2 dependancies on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit c90f31146e8b1407a4a5808d0d904d85baeed5d4)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
d058583510 avcodec/libvorbisenc: drop dependancy on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 3c8b085764ed4b036df4a8908a0781dc6d73ee11)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
e0c3c612eb ffprobe: drop dependancy on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit bf1c87ee7ab1b98c1b92172eb1ebd6ad55564ff7)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
4d4a10cfa1 avcodec/flashsv2enc: drop dependancy on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit b8f4410ff60b3a973cd13351d00a1d88eaddfb71)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
ce675bd54a avcodec/j2kenc: drop dependancy on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 8443b27072a076abb28d7f2f60bc90e1d5c285df)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
965eb42be0 avcodec/libopenjpegenc: drop dependancy on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 1458f0647ca0c882cc1c29892ac130a1056a1f47)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
d2578f8152 avcodec/mjpegenc: drop dependancy on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 5b3f4b3ef590b1221d44d24345a846c1aa636b69)

Conflicts:

	libavcodec/mjpegenc.c
2014-01-07 21:28:41 +01:00
Michael Niedermayer
31c52cd442 avcodec/msvideo1enc: drop dependancy on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit c81234651f761a44a3e72829fd494211e237069c)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
fa220e7307 avcodec/diracdec: avoid depending on sizeof(AVFrame)
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit fca7943850ecdc1e67a0275b488768be01867f75)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
304260a572 avcodec/utils: implement avcodec_alloc_frame() through av_alloc_frame()
This ensures that theres just one AVFrame allocation function and libs dont
produce multiple AVFrame variants after a minor lib update

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 5abdda214df53f009434f19b9eb8e1375f2924d9)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
ed621efb36 avcodec/libutvideodec: use av_frame_move_ref()
AVFrames cannot be copied literally, their definition is in
avutil and their extended_data can point to their data[]

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 4c1b4ae1baf77df7150fa8cbcece8057a261e47d)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
9f864bd324 Merge commit 'd4f1188d1a662fed5347e70016da49e01563e8a8'
* commit 'd4f1188d1a662fed5347e70016da49e01563e8a8':
  dv: use AVFrame API properly

Conflicts:
	libavcodec/dvdec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 94a849b8b6c3e4a90361485b2e12a9a5c35833a3)

Conflicts:

	libavcodec/dv.h
	libavcodec/dvdec.c
	libavcodec/dvenc.c

Author of the merged code: Anton Khirnov
2014-01-07 21:28:41 +01:00
Michael Niedermayer
117728cf8f Merge commit 'd351ef47d0e0ccb7de96b37f137c16b2885580ac'
* commit 'd351ef47d0e0ccb7de96b37f137c16b2885580ac':
  pthread_frame: use the AVFrame API properly.

Conflicts:
	libavcodec/pthread_frame.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 45fd4ec9ef2b3a7074c49cdddac6e7dcc127a874)

Conflicts:

	libavcodec/pthread_frame.c
Author of the merged code: Anton Khirnov
2014-01-07 21:28:41 +01:00
Michael Niedermayer
caf7db0c35 Merge commit 'b605b123ef1d3bac0e7c221d8d7fa74cd8c7253c'
* commit 'b605b123ef1d3bac0e7c221d8d7fa74cd8c7253c':
  mxpegdec: use the AVFrame API properly.

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 8947f47fdfaf7f3a907a334fc65dc724f2fdd23f)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:41 +01:00
Michael Niedermayer
09965ae7d8 Merge commit 'afa21a12bf084f905187615706b0a8d92bc98661'
* commit 'afa21a12bf084f905187615706b0a8d92bc98661':
  p*menc: use the AVFrame API properly.

Conflicts:
	libavcodec/Makefile
	libavcodec/pamenc.c
	libavcodec/pnmenc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 37945584bfb29f187e38531c90bb02a32014e48d)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:40 +01:00
anatoly
f448478a31 Add support for picture_ptr field in MJpegDecodeContext
Signed-off-by: Anton Khirnov <anton@khirnov.net>
(cherry picked from commit e0e3b8b297bae5144f23fd4b46a1309857040b63)

Conflicts:

	libavcodec/jpeglsdec.c
	libavcodec/mjpegbdec.c
	libavcodec/mjpegdec.c

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 2fade10cb0c53e6b2a663d8ce0566ba7c61013cf)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:28:40 +01:00
Michael Niedermayer
82ec6183bc Merge commit 'e2274aa555f023e4f4e4819bf29b2d7e0adec7d5'
* commit 'e2274aa555f023e4f4e4819bf29b2d7e0adec7d5':
  mjpegdec: use the AVFrame API properly.

Conflicts:
	libavcodec/mjpegdec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 639303867640d1880fad675472bc47e9c95f96c7)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 21:23:32 +01:00
Michael Niedermayer
345e2a2b43 Merge remote-tracking branch 'qatar/master'
* qatar/master:
  mpegvideo_enc: use the AVFrame API properly.
  ffv1: use the AVFrame API properly.
  jpegls: use the AVFrame API properly.
  huffyuv: use the AVFrame API properly.

Conflicts:
	libavcodec/ffv1.c
	libavcodec/ffv1.h
	libavcodec/ffv1dec.c
	libavcodec/ffv1enc.c

Changes to ffv1 are more redone than merged due to them being based on
an ancient codebase and a good part of that having being done already
as well.

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit be1e6e7503b2f10b0176201418eb97912cee093f)

Conflicts:

	libavcodec/ffv1enc.c
	libavcodec/mpegvideo.h
	libavcodec/mpegvideo_enc.c
Author of the merged code: Anton Khirnov
2014-01-07 21:23:25 +01:00
Michael Niedermayer
7442aa20fa Merge commit 'd48c20630214a4effcc920e93a5044bee4e2002e'
* commit 'd48c20630214a4effcc920e93a5044bee4e2002e':
  qtrleenc: use the AVFrame API properly.
  ulti: use the AVFrame API properly.
  vc1: use the AVFrame API properly.
  flashsv: use the AVFrame API properly.

Conflicts:
	libavcodec/flashsv.c
	libavcodec/qtrleenc.c
	libavcodec/ulti.c
	libavcodec/vc1dec.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 92cbd775687204f9750a09c69f97702719036aab)

Conflicts:

	libavcodec/flashsv.c
Author of the merged code: Anton Khirnov
2014-01-07 21:23:19 +01:00
Michael Niedermayer
9918296a2d Merge commit 'ffe04c330335add4c6d70ab0bb98e6b3f4f7abfa'
* commit 'ffe04c330335add4c6d70ab0bb98e6b3f4f7abfa':
  libxvid: use the AVFrame API properly.
  pcxenc: use the AVFrame API properly.
  roqvideo: remove unused variables
  libschroedingerenc: use the AVFrame API properly.

Conflicts:
	libavcodec/pcxenc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit f4f7888bab7061f08c54356c285adaba24383dc0)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:32:48 +01:00
Michael Niedermayer
4d373ee8e4 Merge commit '97168b204a0b6b79bb6c5f0d40efdf7fc2262476'
* commit '97168b204a0b6b79bb6c5f0d40efdf7fc2262476':
  eatgv: use the AVFrame API properly.
  libxavs: use the AVFrame API properly.
  nuv: use the AVFrame API properly.
  flashsvenc: use the AVFrame API properly.

Conflicts:
	libavcodec/eatgv.c
	libavcodec/nuv.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit a0c0629dd963b00f989172f0c599353b6b288c37)

Conflicts:

	libavcodec/eatgv.c
Author of the merged code: Anton Khirnov
2014-01-07 03:32:43 +01:00
Michael Niedermayer
6133f450bb Merge commit '57e7b3a89f5a0879ad039e8f04273b48649799a8'
* commit '57e7b3a89f5a0879ad039e8f04273b48649799a8':
  dnxhdenc: use the AVFrame API properly.
  libx264: use the AVFrame API properly.
  svq1enc: use the AVFrame API properly.
  gif: use the AVFrame API properly.

Conflicts:
	libavcodec/gif.c
	libavcodec/svq1enc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 5b0c70c2499e20529d517b712910d6f4f72e9485)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:32:38 +01:00
Michael Niedermayer
8a9f4f8800 Merge commit '45bde93eefa78c1bdb0936109fbd2e2fb27fbfe7'
* commit '45bde93eefa78c1bdb0936109fbd2e2fb27fbfe7':
  sunrastenc: use the AVFrame API properly.
  targaenc: use the AVFrame API properly.
  tiffenc: use the AVFrame API properly.
  pngenc: use the AVFrame API properly.

Conflicts:
	libavcodec/pngenc.c
	libavcodec/sunrastenc.c
	libavcodec/targaenc.c
	libavcodec/tiffenc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 3ea168edeb7a20eae1fccf7da66ac7b8c8c791ba)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:32:32 +01:00
Michael Niedermayer
9ebe344166 Merge commit '0ea430c75b8d90449d2878ad84669a2da2ad3cbc'
* commit '0ea430c75b8d90449d2878ad84669a2da2ad3cbc':
  lclenc: use the AVFrame API properly.

Conflicts:
	libavcodec/lclenc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 85b7b0c519f8d9491b4c0340329a605cc97c8984)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:32:26 +01:00
Michael Niedermayer
badb8e15ac Merge commit 'e4155f15b35c4272a235f5521d2dc6c2aabdd462'
* commit 'e4155f15b35c4272a235f5521d2dc6c2aabdd462':
  eamad: use the AVFrame API properly.
  dpxenc: use the AVFrame API properly.
  bmpenc: use the AVFrame API properly.
  sgienc: use the AVFrame API properly.

Conflicts:
	libavcodec/bmpenc.c
	libavcodec/dpxenc.c
	libavcodec/eamad.c
	libavcodec/sgienc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 9ad477d9098b5281cede0bd8525ca90b0e52436d)

Conflicts:

	libavcodec/eamad.c

Author of the merged code: Anton Khirnov
2014-01-07 03:32:19 +01:00
Michael Niedermayer
48c192c48c Merge commit '730bac7bab3c7dcd9fcb7c70f154e5f4cfaef9a7'
* commit '730bac7bab3c7dcd9fcb7c70f154e5f4cfaef9a7':
  mss4: use the AVFrame API properly.
  mss3: use the AVFrame API properly.
  mss2: use the AVFrame API properly.
  mss1: use the AVFrame API properly.

Conflicts:
	libavcodec/mss1.c
	libavcodec/mss2.c
	libavcodec/mss3.c
	libavcodec/mss4.c

See: 02fe531afefa7ac3fcc552f8e83461a4bfa7f868
See: ff1c13b133d548b3ce103f91999b6cc1bb7e65cc
See: 310bf283542ff81a9ec8fa7492fe7d625e80562f
Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 8d193a24f2da825aaf5382e4aa42ab533806b033)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:32:13 +01:00
Michael Niedermayer
dbb4ff6851 Merge commit '508b37557bf36eae83c18e64d42f27b44a321d81'
* commit '508b37557bf36eae83c18e64d42f27b44a321d81':
  tiertexseqv: use the AVFrame API properly.
  smc: use the AVFrame API properly.
  truemotion2: use the AVFrame API properly.
  truemotion1: use the AVFrame API properly.

Conflicts:
	libavcodec/smc.c
	libavcodec/tiertexseqv.c
	libavcodec/truemotion1.c
	libavcodec/truemotion2.c

See: e999f2339ab0200039ee7123b75d79a52aaac5d1
Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 72df87088c8a6593d66b207140edd32b4d2fb6ee)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:32:08 +01:00
Michael Niedermayer
8c53cacbfd Merge commit '4a4841d4e0f0dc50998511bf6c48b518012024db'
* commit '4a4841d4e0f0dc50998511bf6c48b518012024db':
  fraps: use the AVFrame API properly.
  rpza: use the AVFrame API properly.
  motionpixels: use the AVFrame API properly.
  vmdvideo: use the AVFrame API properly.

Conflicts:
	libavcodec/fraps.c
	libavcodec/motionpixels.c
	libavcodec/rpza.c
	libavcodec/vmdav.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 4362f272c0ae280cde833589e5c9c6696bd878d5)

Conflicts:

	libavcodec/vmdav.c

Author of the merged code: Anton Khirnov
2014-01-07 03:32:03 +01:00
Michael Niedermayer
dcb91e3dfe Merge commit '3c8ea9d4a74fd4d7493d40c818ca64ee492709f3'
* commit '3c8ea9d4a74fd4d7493d40c818ca64ee492709f3':
  vmnc: use the AVFrame API properly.
  xan: use the AVFrame API properly.
  xxan: use the AVFrame API properly.
  zerocodec: use the AVFrame API properly.

Conflicts:
	libavcodec/vmnc.c
	libavcodec/xan.c
	libavcodec/xxan.c

See: cf5ab8b6f71699a48a6384d5e5779630b4be7b56
See: ad438f450b83882a1277a79c1c3d6dfe55573b1c
See: 67607e20e882eb5639a4e9099caecb52a863ab68
Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 8af7774c7aca6f3b595d0417b92f543ce0c7b537)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:31:57 +01:00
Michael Niedermayer
bfe4aa892a Merge commit 'a837c4f2df96a30bf9aa4115b426d608487c7101'
* commit 'a837c4f2df96a30bf9aa4115b426d608487c7101':
  zmbvenc: use the AVFrame API properly.
  flicvideo: use the AVFrame API properly.
  smacker: use the AVFrame API properly.
  mmvideo: use the AVFrame API properly.

Conflicts:
	libavcodec/flicvideo.c
	libavcodec/mmvideo.c
	libavcodec/smacker.c
	libavcodec/zmbvenc.c

See: 76e27b1d0594199b4b1ff8520312069f42373944
See: 099e57bc38d7e53cf6823dfec349ff9fdaee99ba
Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit fe3808eddee81ce4712d1e729fa6fe619f1685c8)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:31:47 +01:00
Michael Niedermayer
1dcf9de6e9 Merge commit '2e09096da912f563c4dd889a8f25c314529bbaa6'
* commit '2e09096da912f563c4dd889a8f25c314529bbaa6':
  kgv1: use the AVFrame API properly.
  indeo2: use the AVFrame API properly.
  iff: use the AVFrame API properly.
  msrle: use the AVFrame API properly.

Conflicts:
	libavcodec/iff.c
	libavcodec/indeo2.c
	libavcodec/kgv1dec.c
	libavcodec/msrle.c

See: 451b2ca1b4349f9b60416cc057eaf5518d81025c
See: 80e9e63c946660304fc65fa8141ccfdbe4d196d1
See: 057dce5f21cd70db1ef6e3b67644a39f0d51aba5
Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 9f890a165666a73376c73b3c2bd920345b5c3b79)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:31:34 +01:00
Michael Niedermayer
2fe67ddb24 Merge commit 'b7462a3904d71ff799584faf5b875cad59ca2f31'
* commit 'b7462a3904d71ff799584faf5b875cad59ca2f31':
  jvdec: use the AVFrame API properly.

Conflicts:
	libavcodec/jvdec.c

See: 678431d3f2c5f35fe48b02d5035604ace742be2e
Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit ddfdcd2b5ecce0914c1eefa6269060bdbc879b17)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:31:29 +01:00
Michael Niedermayer
b7cb77a322 Merge commit '2d2a92f72199823a92e4e226c32e42a27ec801c0'
* commit '2d2a92f72199823a92e4e226c32e42a27ec801c0':
  dxa: use the AVFrame API properly.
  qpeg: use the AVFrame API properly.
  cin video: use the AVFrame API properly.
  msvideo1: use the AVFrame API properly.

Conflicts:
	libavcodec/dsicinav.c
	libavcodec/dxa.c
	libavcodec/msvideo1.c
	libavcodec/qpeg.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 5219afc09d8e97e18917738cbc052f903df9a619)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:31:20 +01:00
Michael Niedermayer
2d7f20d2bb Merge commit 'a639ea7f4bc44bf6bfa452675558a342924a66a9'
* commit 'a639ea7f4bc44bf6bfa452675558a342924a66a9':
  escape124: use the AVFrame API properly.
  qtrle: use the AVFrame API properly.
  cljr: use the AVFrame API properly.
  cinepak: use the AVFrame API properly.

Conflicts:
	libavcodec/cinepak.c
	libavcodec/cljr.c
	libavcodec/qtrle.c

See: 80e9e63c libavcodec/cinepak.c
See: 71c378984b0bd5470f67c424a79a4750f84d2d3e
Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit bfb1f44d246f4ed97d5cad9c1eace8a20951ff76)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:31:14 +01:00
Michael Niedermayer
3976c50fc1 Merge commit 'cec5ce49229d61e4eb1f331a6d0dff3aa24f6655'
* commit 'cec5ce49229d61e4eb1f331a6d0dff3aa24f6655':
  cdxl: remove an unused variable
  c93: use the AVFrame API properly.
  bethsoftvid: use the AVFrame API properly.
  avs: use the AVFrame API properly.

Conflicts:
	libavcodec/bethsoftvideo.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 21c41e76d7c1ddaadafc9da50e99db51358f3754)

Conflicts:

	libavcodec/avs.c
Author of the merged code: Anton Khirnov
2014-01-07 03:31:02 +01:00
Michael Niedermayer
572ccbd299 Merge commit '6139f481ac9feb1bee4e7d04789fb15d7f24ebbf'
* commit '6139f481ac9feb1bee4e7d04789fb15d7f24ebbf':
  asvenc: use the AVFrame API properly.
  a64multienc: use the AVFrame API properly.

Conflicts:
	libavcodec/vaapi_mpeg.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit cc4a6435638fa2a471fef048a3e68eaf7e6e306c)

Author of the merged code: Anton Khirnov
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:30:57 +01:00
Dale Curtis
dcf0f82d08 h264: Clear ERContext.cur_pic when unref'ing current picture.
Signed-off-by: Dale Curtis <dalecurtis@chromium.org>
(cherry picked from commit 4feca2214a0b69dcbe4d1c7cd145c3881459e867)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Michael Niedermayer
061e948153 configure: support raising major version in soname
this allows seperate installation of shared libs that should not conflict with
whatever is already installed.

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 102b794e09482fec881e7ec903e57914895f9b74)

Conflicts:

	libavcodec/utils.c

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Michael Niedermayer
08808084f6 swscale/utils: fill xyz tables only when they will be used
makes the first call to sws_getContext() 1ms faster

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 4d18060e56aac9d7248854ba75d5fc19f5cd3db8)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Michael Niedermayer
e779595dca avutil/log: skip IO calls on empty strings
These occur when no context is set for example, thus they are common

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit a044a183a3fb90b20a8deaa3ea1158510bcdd420)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Michael Niedermayer
cf6cf50ac6 do O(1) instead of O(n) atomic operations in register functions
about 1ms faster startup time

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 133fbfc7811ffae7b97dd129fcd0b5e646742362)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Michael Niedermayer
1ecd1b4aee avcodec/g2meet: fix stride calculation, use correct format field
Fixes out of array accesses
Fixes: asan_heap-oob_ae5f63_5415_g2m4.wmv
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 83f7bd6dcf00875725c5f3b7e1bedac5a6b3c77d)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Paul B Mahol
04a4c4144a avcodec/libopusenc: change default frame duration to 20 ms
20 ms is used by libopus encoder.

Signed-off-by: Paul B Mahol <onemda@gmail.com>
(cherry picked from commit 74906d3727ec3bd9b7b28dfa7a98ff6e8cf8b6d7)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Jan Gerber
73aa4518ee lavf/matroskadec ReferenceBlock is a signed integer
according to the Matroska Specification
ReferenceBlock is a signed integer too.

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 8cc59ec881b8706fb3036a2a83f7ededa468dedb)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Jan Gerber
aee36a7d16 lavf/matroska*: DiscardPadding is a signed integer
according to the Matriska Specification
 http://matroska.org/technical/specs/index.html
DiscardPadding is a signed integer.

Tested-by: Jan Gerber <j@v2v.cc>
Tested-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit f4b1ca99ff86c6ba78e1b4730c85eac0d5a5817a)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Michael Niedermayer
d86930b8ff avformat/matroska: simplify signed int access code
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit cddd15ba5c9cd2e92d2f2942e0fc40bf3bf56115)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:30 +01:00
Jan Gerber
95b5496dce lavf/matroska*: add support for signed integers
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit d03eea36b2c329241f63c8aca2d6adbb6ea81d9c)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:29 +01:00
Michael Niedermayer
3ffd1c2e40 avcodec/jpeg2000dec: Check precno before using it in JPEG2000_PGOD_CPRL
Fixes out of array reads
Fixes: asan_heap-oob_f0de57_6823_mjp2.mov

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 3d5a5e86be2a65e33c34ab3ad7923f54e8e49c1d)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:29 +01:00
Michael Niedermayer
2d16a88a9c avcodec: move end zeroing code from av_packet_split_side_data() to avcodec_decode_subtitle2()
This code changes the input packet, which is read only and can in
rare circumstances lead to decoder errors. (i run into one of these in
the audio decoder, which corrupted the packet during av_find_stream_info()
so that actual decoding that single packet failed later)
Until a better fix is implemented, this commit limits the problem.
A better fix might be to make the subtitle decoders not depend on
data[size] = 0 or to copy their input when this is not the case.
(cherry picked from commit 01923bab98506b1e98b4cbf08419364ce6ffea6d)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2014-01-07 03:05:29 +01:00
Michael Niedermayer
edc6f3da0e avfilter/vf_format: check that the format list is not empty
Fixes Ticket3210

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit ee16e0cacc16ea60c35a66796410012755263c3c)
2014-01-07 02:02:52 +01:00
Michael Niedermayer
8763aca389 avformat/oggdec: dont read timestamps from EOS pages of ogm videos
Some muxers store invalid timestamps there, which breaks seeking
Fixes Ticket2739

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 5e0c7eab2a9d43e6e3be967ec1a6b04a3e0328da)
2014-01-05 17:25:29 +01:00
James Almer
b962157ce3 matroskadec: Fix bug when parsing realaudio codec parameters
flavor can be 0.

This fixes tract ticket #3214

Signed-off-by: James Almer <jamrial@gmail.com>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 0d944ee34349805b29f9c91b15c8009d16df01ab)
2013-12-30 00:49:35 +01:00
Alexander Strasser
8c79730a8e configure: Special case libfreetype test
Include the freetype header, in-directly through a macro, like it
is done in the drawtext filter. Do not break if the header is moved.

Unfortunately the drawtext filter included the file where the include
macros are defined in a wrong way. This is not needed and breaks the
build. Remove that #include line too.

(cherry picked from commit cea5812fa723c08b89d929eeba73462e05de2973)

Signed-off-by: Alexander Strasser <eclipse7@gmx.net>
2013-12-29 11:36:01 +01:00
Michael Niedermayer
b432043d55 nutenc/write_index: warn if 2 consecutive keyframes have the same PTS and discard the 2nd
This fixes an assertion failure and regression and restores previous behaviour
Fixes Ticket3197

An alternative would be to fail hard in this case and refuse to mux such data.

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit de2a2caf4dedb28a959d0ff6f02751bb6c3ff033)
2013-12-24 07:45:32 +01:00
Peter Ross
94c3f8165c wtvenc: populate VIDEOINFOHEADER2
Fixes ticket #2835.

Signed-off-by: Peter Ross <pross@xvid.org>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 6da21c1f8190d674fd1e5619bb148c1bbab8ca3c)
2013-12-24 07:39:31 +01:00
Peter Ross
f27895db0f avformat/riffenc: indent
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit f93b0abe4041b75f0dbb590ee932b37a07662856)
2013-12-24 07:39:23 +01:00
Peter Ross
c3f9628407 riffenc: add option to ff_put_bmp_header to ignore extradata
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit fcbb94712d9873a37cdc8b526e368154b5982186)
2013-12-24 07:39:15 +01:00
Mason Carter
7eec11463f VC1: Fix intensity compensation performance regression
Fix https://trac.ffmpeg.org/ticket/3204

The problem was that intensity compensation was always used once it was
encountered. This is because v->next_use_ic was never set back to zero.
To fix this, when resetting v->next_luty/uv, also reset v->next_use_ic.

This improved (restored) performance by 85% when decoding
http://bit.ly/bbbwmv

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit ed5bed4152203aed8cce01a679bed67bbda8903f)
2013-12-22 16:07:03 +01:00
Martin Storsjö
0d32483a11 arm: Don't clobber callee saved registers in scalarproduct
q4-q7/d8-d15 are supposed to not be clobbered by the callee.

CC: libav-stable@libav.org
Signed-off-by: Martin Storsjö <martin@martin.st>
(cherry picked from commit d307e408d4a9ada22df443cc38be77cc5e492694)
2013-12-21 09:58:08 +01:00
Michael Niedermayer
85ea846580 swscale/utils: check chroma width for fast bilinear scaler
Fixes artifacts where fast bilinear was used for downscaling chroma

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 037fc3b054b10aee0f11fdbe835e5dffa8e95b37)
2013-12-16 02:21:37 +01:00
Michael Niedermayer
5b52b4962e swscale/utils: remove useless ()
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 554e913fd7acc9da02ddac2c5ce9487f7f633c92)
2013-12-16 02:21:35 +01:00
Michael Niedermayer
69a283e0d2 avcodec/cabac: force get_cabac to be not inlined
works around bug in gccs inline asm register assignment
Fixes Ticket3177

gcc from 4.4 to 4.6 is affected at least, no non affected gccs known
clang seems not affected

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 0538b29ae8002c44f27bae8a1a6fc6e646998be5)
2013-12-09 10:34:52 +01:00
Michael Niedermayer
624b83b3ef avcodec/error_resilience: check that er is supported before attempting to read the status of the previous slice
Fixes incorrectly set error_occured and improves speed

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 90539cea336fd513c47295a03c164cb4a851166f)
2013-12-07 11:44:18 +01:00
Michael Niedermayer
e8304f4ee0 avcodec/error_resilience: factor er_supported() check out
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit afb18c55783362546b5e512ce01b7fe7bf5744d9)
2013-12-07 11:44:15 +01:00
Michael Niedermayer
cbcc18bd9f avfilter/vf_pad: fix req_end
Fixes out of array accesses
Fixes Ticket3190

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 0cc5011f9a1b05132f9a20a71feb031f30a8a53b)
2013-12-03 09:52:23 +01:00
Michael Niedermayer
c765b64641 avcodec/h264_refs: improve key frame detection heuristic
Fixes Ticket3186

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit e3d7a3978b857e32b32575ff78ecc7d67a18687e)
2013-12-02 03:04:04 +01:00
Michael Niedermayer
1141a18e89 avcodec/h264_refs: split conditions of if() up for better readability
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit ab6ea7a81921a168575be63b3d9049ca716e707a)

Conflicts:
	libavcodec/h264_refs.c
2013-12-02 03:03:41 +01:00
Nicolas George
12c2d2ed46 lavc/srtenc: use bprint for text buffers.
Fix trac ticket #3120.
(cherry picked from commit 4b1c9b720e11d200ca7090210b34c409f43fafeb)
2013-11-30 19:22:07 +01:00
Michael Niedermayer
88e368d5a7 ffmpeg: set VCFR when copying timestamps
This fixes unreasonable initial frame repeats
Fixes Ticket3176

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 738ebb4a0e0c8fbdc83b44cf30b8c9b7ac866270)
2013-11-29 11:31:05 +01:00
Nicolas George
ad19cb3ca7 lavfi/af_pan: support unknown layouts on input.
Fix trac ticket #2899.
(cherry picked from commit 7b0a587393e03dab552d66450d43ab82bda0a5a1)
2013-11-28 01:05:33 +01:00
Nicolas George
bc04a3a489 lavfi/af_pan: support unknown layouts on output.
(cherry picked from commit 4e9adc9b7363cc336e3d47c98455e1508902fd29)
2013-11-28 01:05:26 +01:00
Nicolas George
cfcb22a77b lswr: fix assert failure on unknown layouts.
(cherry picked from commit 4a640a6ac89099bfb02d6d3d3ada04e321a37476)
2013-11-28 01:05:20 +01:00
Nicolas George
838a453e39 lavfi: parsing helper for unknown channel layouts.
Make ff_parse_channel_layout() accept unknown layouts too.
(cherry picked from commit 6e2473edfda26a556c615ebc04d8aeba800bef7e)
2013-11-28 01:05:14 +01:00
Nicolas George
1a676881ca lavfi/avfiltergraph: do not reduce incompatible lists.
A list of "all channel layouts" but not "all channel counts"
can not be reduced to a single unknown channel count.
(cherry picked from commit d300f5f6f570659e4b58567b35c9e8600c9f2956)
2013-11-28 01:05:07 +01:00
Nicolas George
c2ae9f75d7 lavfi/avfiltergraph: suggest a solution when format selection fails.
Format selection can fail if unknown channel layouts are used
with filters that do not support it.
(cherry picked from commit f775eb3fb4c7b716107355e428e40cb63f71ee7a)
2013-11-28 01:05:01 +01:00
Nicolas George
31647c5a46 lavd/lavfi: support unknown channel layouts.
(cherry picked from commit 863fb11f63f7f60feec390f3c54dd13606e07d05)
2013-11-28 01:04:54 +01:00
Michael Niedermayer
9422cd85a0 update for 2.1.1
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-20 03:12:11 +01:00
Michael Niedermayer
87c416d93a avcodec/pcm-dvd: fix 20/24bit 1 channel
Fixes part of ticket3122

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit ab184b298d4a54199986de10927258aed18c7b6b)
2013-11-19 11:46:43 +01:00
Michael Niedermayer
607e5038a9 avcodec/pcm-dvd: fix 20bit 2 channels
Fixes part of ticket3122

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 5db49fc38d9132e134de92584f296559bec3b789)
2013-11-19 11:46:32 +01:00
Michael Niedermayer
a289b0b91a avformat/mpegts: fix resync seek
The seek ended up seeking before the begin, which caused problems
Fixes initial sync issues with libbluray
Fixes Ticket3117

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 7d0e927a31edb5fb584c2ab17f7fd676838d6639)
2013-11-18 17:54:15 +01:00
Michael Niedermayer
842def7d78 avformat/utils: dont count attached pics toward the probesize
Such pics behave more like headers which we also dont count.
Fixes Ticket3146

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit a8dec360c5db15e8da4b44ff3c0f02a6c57e8ac0)
2013-11-18 14:26:49 +01:00
Clément Bœsch
c38af02626 build: avoid stdin stall with GNU AS probing.
a758c5e added probing for various tools, such as AS. Unfortunately, GNU
AS is reading stdin with -v, and thus configure is stalled with
configure arguments such as --as=as.

Fixes Ticket #1898.
(cherry picked from commit dbb41f93c16cbc65a899a75723c95da51c851cd5)
2013-11-18 14:26:18 +01:00
Clément Bœsch
7ce0f4ea3b avformat/image2: allow muxing gif files.
Fixes Ticket #2936.
(cherry picked from commit f70db22999d713da3306bf29ec763d670b9bf1ea)
2013-11-18 14:23:52 +01:00
Michael Niedermayer
3193b85be3 avcodec/tiff_common: allow count = 0 in ff_tadd_bytes_metadata()
Fixes Ticket3103

Reviewed-by: Thilo Borgmann <thilo.borgmann@mail.de>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 73d887733dc2ccb5d570615a753d5c44c3f1d9a4)
2013-11-18 14:21:37 +01:00
Lou Logan
5c8845a554 Fix example in pullup documentation.
The pullup filter does not work well with the fps filter, it
currently needs -r

Signed-off-by: Carl Eugen Hoyos <cehoyos@ag.or.at>
(cherry picked from commit 6d90a5c149fbdf9678c3f03fac820f835665b985)
2013-11-18 14:20:01 +01:00
Michael Niedermayer
e5e048bbf7 avutil: reintroduce lls1 as the 52 ABI needs it
lls1 taken from ff130d7

This is incompatible with libavcodec version
55.18.100 to 55.43.100 except 55.39.101
This incompatibility is caused by these libavcodec versions depending on
a libavutil 52 which is ABI incompatible with the previous ABI 52

you can avoid this incompatibility by upgrading your libavcodec so it
does no longer depend on the invalid ABI

See: 502ab21af0ca68f76d6112722c46d2f35c004053
See: cc6714bb16b1f0716ba43701d47273dbe9657b8b
See: 41578f70cf8aec8e7565fba1ca7e07f3dc46c3d2
See: Ticket3136
Tested-by: marillat
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit b382d09d29be90e0947295a70cdcbaa60b9030b8)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:26:43 +01:00
Michael Niedermayer
b276b913a1 rename new lls code to lls2 to avoid conflict with the old which has a different ABI
also remove failed attempt at a compatibility layer, the code simply cannot work

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit c3814ab654a993723b0e5f14cc252d68f233ad79)

Conflicts:

	libavcodec/version.h
2013-11-17 19:12:29 +01:00
Michael Niedermayer
d89e14bf54 avutil: rename lls to lls2
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit bbe66ef912470007f7cc424badde2ccec500b36b)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:28 +01:00
Michael Niedermayer
4b846f0ccf ffmpeg: Do not fill gap before the first decodable frame on single stream input files unless the user explicitly requests it.
Fixes different behavior to JM and probably several if not all
reference decoders.

We cannot just do this unconditionally as it would ruin AV sync in
some use cases.

Bug-Found-by: BugMaster
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit d7ebeba80c609e160a171168b3434c342a652237)

Conflicts:

	ffmpeg.c
2013-11-17 19:11:07 +01:00
Diego Biurrun
425517eecb mpeg12dec: Remove incomplete and wrong UV swapping code for VCR2
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 321514042534a2501a9f6223b88f0d2b8060f858)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Kostya Shishkov
bc89c2902b mpegvideo: Fix swapping of UV planes for VCR2
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit bae14f38d992f326c94d93f01197ccd84ea62053)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Michael Niedermayer
1497633924 h264: Do not treat the initial frame special in handling of frame gaps
The not handling of frame gaps has lead to the lack of a dummy reference
frame, which has lead to the failure of decode_slice_header() which has
lead to one SEI recovery message being skiped which had introduced a
slightly suboptimal recovery point for at least 1 h264 file compared to
JM.

Found-by: Carl & BugMaster
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 9e5ef1c5c37208326c59d642e2dc7afd3f10b09b)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Michael Niedermayer
f167511753 avcodec/ffv1enc: Check high bpp RGB against coder type too
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Michael Niedermayer
8c00647982 avformat/utils: never decrease has_b_frames in compute_pkt_fields()
The intent of the original check was to increase has_b_frames when
it was incorrectly set to 0. Later codecs allowed larger values

Found-by: divVerent
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 8b73a3f6f6598cb9249034fa020ddead3c943e2f)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Michael Niedermayer
e40d01f45d avcodec/cabac: support UNCHECKED_BITSTREAM_READER = 0
Fixes overreads in HEVC
Fixes Ticket3070
Also fixed remaining issues from Ticket3075 and Ticket3076

Some lines of code taken from  0c5f839693da2276c2da23400f67a67be4ea0af1:libavcodec/x86/cabac.h
and                            0c5f839693da2276c2da23400f67a67be4ea0af1:libavcodec/cabac_functions.h

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit fa6fa2162b730336fc1d6ee0d547dcc81f4afbad)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Michael Niedermayer
51d1e79cc1 avformat/thp: force moving forward
Fixes infinite loop
Fixes Ticket3098

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 6c4b87d3d6ae08a6da16b4616626b4d2a726afbf)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Michael Niedermayer
807d85400c avformat/thp: fix variable types to avoid overflows
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit 2b1056e4e27b046af3777e8bd65a5145abff878f)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Michael Niedermayer
5c1e9d3722 avcodec/jpeglsdec: check err value for ls_get_code_runterm()
Fixes infinite loop
Fixes Ticket3086

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit cc0e47b55096361723b364afa43b79a3f5619cdc)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Anssi Hannula
b56e9beeb8 lavf/spdifdec: fix demuxing of AAC in IEC 61937
Return value of avpriv_aac_parse_header() is not checked correctly. Fix
it.

Signed-off-by: Anssi Hannula <anssi.hannula@iki.fi>
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit f86387b6c2b11650cb9d5a8fd886be76e48c665b)

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-11-17 19:11:07 +01:00
Michael Niedermayer
d8be5bda1b avformat/http: fix cookies
Fixes Ticket3096

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit b73900b8a6c0a23e63e84a5eed0a5b9b3ffe1198)
2013-10-31 01:55:00 +01:00
Michael Niedermayer
1cd5797f8e avcodec/bink: fix seeking to frame 0
Fixes Ticket3088

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
(cherry picked from commit cb52d6da0a9c88c584a38a9a7a94825565854b7e)
2013-10-31 00:50:24 +01:00
Michael Niedermayer
35a7b73590 update for 2.1
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
2013-10-28 01:35:03 +01:00
184 changed files with 2195 additions and 1265 deletions

View File

@ -1 +1 @@
2.0
2.1.2

1
VERSION Normal file
View File

@ -0,0 +1 @@
2.1.2

29
configure vendored
View File

@ -105,6 +105,7 @@ Configuration options:
--disable-all disable building components, libraries and programs
--enable-incompatible-libav-abi enable incompatible Libav fork ABI [no]
--enable-incompatible-fork-abi enable incompatible Libav fork ABI (deprecated) [no]
--enable-raise-major increase major version numbers in sonames [no]
Program options:
--disable-programs do not build command line programs
@ -1102,6 +1103,26 @@ require_pkg_config(){
add_extralibs $(get_safe ${pkg}_libs)
}
require_libfreetype(){
log require_libfreetype "$@"
pkg="freetype2"
check_cmd $pkg_config --exists --print-errors $pkg \
|| die "ERROR: $pkg not found"
pkg_cflags=$($pkg_config --cflags $pkg)
pkg_libs=$($pkg_config --libs $pkg)
{
echo "#include <ft2build.h>"
echo "#include FT_FREETYPE_H"
echo "long check_func(void) { return (long) FT_Init_FreeType; }"
echo "int main(void) { return 0; }"
} | check_ld "cc" $pkg_cflags $pkg_libs \
&& set_safe ${pkg}_cflags $pkg_cflags \
&& set_safe ${pkg}_libs $pkg_libs \
|| die "ERROR: $pkg not found"
add_cflags $(get_safe ${pkg}_cflags)
add_extralibs $(get_safe ${pkg}_libs)
}
hostcc_o(){
eval printf '%s\\n' $HOSTCC_O
}
@ -1280,6 +1301,7 @@ CONFIG_LIST="
network
nonfree
pic
raise_major
rdft
runtime_cpudetect
safe_bitstream_reader
@ -2878,7 +2900,9 @@ probe_cc(){
unset _depflags _DEPCMD _DEPFLAGS
_flags_filter=echo
if $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
if $_cc --version 2>&1 | grep -q '^GNU assembler'; then
true # no-op to avoid reading stdin in following checks
elif $_cc -v 2>&1 | grep -q '^gcc.*LLVM'; then
_type=llvm_gcc
gcc_extra_ver=$(expr "$($_cc --version | head -n1)" : '.*\((.*)\)')
_ident="llvm-gcc $($_cc -dumpversion) $gcc_extra_ver"
@ -4240,7 +4264,7 @@ enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersio
enabled libfdk_aac && require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac
flite_libs="-lflite_cmu_time_awb -lflite_cmu_us_awb -lflite_cmu_us_kal -lflite_cmu_us_kal16 -lflite_cmu_us_rms -lflite_cmu_us_slt -lflite_usenglish -lflite_cmulex -lflite"
enabled libflite && require2 libflite "flite/flite.h" flite_init $flite_libs
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
enabled libfreetype && require_libfreetype
enabled libgme && require libgme gme/gme.h gme_new_emu -lgme -lstdc++
enabled libgsm && { for gsm_hdr in "gsm.h" "gsm/gsm.h"; do
check_lib "${gsm_hdr}" gsm_create -lgsm && break;
@ -4849,6 +4873,7 @@ get_version(){
name=$(toupper $lcname)
file=$source_path/$lcname/version.h
eval $(awk "/#define ${name}_VERSION_M/ { print \$2 \"=\" \$3 }" "$file")
enabled raise_major && eval ${name}_VERSION_MAJOR=$((${name}_VERSION_MAJOR+100))
eval ${name}_VERSION=\$${name}_VERSION_MAJOR.\$${name}_VERSION_MINOR.\$${name}_VERSION_MICRO
eval echo "${lcname}_VERSION=\$${name}_VERSION" >> config.mak
eval echo "${lcname}_VERSION_MAJOR=\$${name}_VERSION_MAJOR" >> config.mak

View File

@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER =
PROJECT_NUMBER = 2.1.2
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55

View File

@ -786,7 +786,7 @@ Set maximum frame size, or duration of a frame in milliseconds. The
argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
frame sizes achieve lower latency but less quality at a given bitrate.
Sizes greater than 20ms are only interesting at fairly low bitrates.
The default of FFmpeg is 10ms, but is 20ms in @command{opusenc}.
The default is 20ms.
@item packet_loss (@emph{expect-loss})
Set expected packet loss percentage. The default is 0.

View File

@ -6422,9 +6422,11 @@ The main purpose of setting @option{mp} to a chroma plane is to reduce CPU
load and make pullup usable in realtime on slow machines.
@end table
For example to inverse telecined NTSC input:
For best results (without duplicated frames in the output file) it is
necessary to change the output frame rate. For example, to inverse
telecine NTSC input:
@example
pullup,fps=24000/1001
ffmpeg -i input -vf pullup -r 24000/1001 ...
@end example
@section removelogo

View File

@ -817,10 +817,26 @@ static void do_video_out(AVFormatContext *s,
nb_frames = 1;
format_video_sync = video_sync_method;
if (format_video_sync == VSYNC_AUTO)
if (format_video_sync == VSYNC_AUTO) {
format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
if ( ist
&& format_video_sync == VSYNC_CFR
&& input_files[ist->file_index]->ctx->nb_streams == 1
&& input_files[ist->file_index]->input_ts_offset == 0) {
format_video_sync = VSYNC_VSCFR;
}
if (format_video_sync == VSYNC_CFR && copy_ts) {
format_video_sync = VSYNC_VSCFR;
}
}
switch (format_video_sync) {
case VSYNC_VSCFR:
if (ost->frame_number == 0 && delta - duration >= 0.5) {
av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
delta = duration;
ost->sync_opts = lrint(sync_ipts);
}
case VSYNC_CFR:
// FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
if (delta < -1.1)
@ -2357,7 +2373,7 @@ static int transcode_init(void)
if (ost->filter && !(codec->time_base.num && codec->time_base.den))
codec->time_base = ost->filter->filter->inputs[0]->time_base;
if ( av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
&& (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
&& (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
"Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
}

View File

@ -51,6 +51,7 @@
#define VSYNC_PASSTHROUGH 0
#define VSYNC_CFR 1
#define VSYNC_VFR 2
#define VSYNC_VSCFR 0xfe
#define VSYNC_DROP 0xff
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
@ -281,6 +282,7 @@ typedef struct InputFile {
int eof_reached; /* true if eof reached */
int eagain; /* true if last read attempt returned EAGAIN */
int ist_index; /* index of first stream in input_streams */
int64_t input_ts_offset;
int64_t ts_offset;
int64_t last_ts;
int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */

View File

@ -852,6 +852,7 @@ static int open_input_file(OptionsContext *o, const char *filename)
f->ist_index = nb_input_streams - ic->nb_streams;
f->start_time = o->start_time;
f->recording_time = o->recording_time;
f->input_ts_offset = o->input_ts_offset;
f->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
f->nb_streams = ic->nb_streams;
f->rate_emu = o->rate_emu;

View File

@ -1580,7 +1580,6 @@ static av_always_inline int process_frame(WriterContext *w,
AVCodecContext *dec_ctx = fmt_ctx->streams[pkt->stream_index]->codec;
int ret = 0, got_frame = 0;
avcodec_get_frame_defaults(frame);
if (dec_ctx->codec) {
switch (dec_ctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
@ -1634,7 +1633,7 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
const ReadInterval *interval, int64_t *cur_ts)
{
AVPacket pkt, pkt1;
AVFrame frame;
AVFrame *frame = NULL;
int ret = 0, i = 0, frame_count = 0;
int64_t start = -INT64_MAX, end = interval->end;
int has_start = 0, has_end = interval->has_end && !interval->end_is_offset;
@ -1668,6 +1667,11 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
}
}
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
goto end;
}
while (!av_read_frame(fmt_ctx, &pkt)) {
if (selected_streams[pkt.stream_index]) {
AVRational tb = fmt_ctx->streams[pkt.stream_index]->time_base;
@ -1700,7 +1704,7 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
}
if (do_read_frames) {
pkt1 = pkt;
while (pkt1.size && process_frame(w, fmt_ctx, &frame, &pkt1) > 0);
while (pkt1.size && process_frame(w, fmt_ctx, frame, &pkt1) > 0);
}
}
av_free_packet(&pkt);
@ -1712,10 +1716,11 @@ static int read_interval_packets(WriterContext *w, AVFormatContext *fmt_ctx,
for (i = 0; i < fmt_ctx->nb_streams; i++) {
pkt.stream_index = i;
if (do_read_frames)
while (process_frame(w, fmt_ctx, &frame, &pkt) > 0);
while (process_frame(w, fmt_ctx, frame, &pkt) > 0);
}
end:
av_frame_free(&frame);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not read packets in interval ");
log_read_interval(interval, NULL, AV_LOG_ERROR);

View File

@ -331,22 +331,22 @@ OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
OBJS-$(CONFIG_PAF_VIDEO_DECODER) += paf.o
OBJS-$(CONFIG_PAF_AUDIO_DECODER) += paf.o
OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o pnm.o
OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o
OBJS-$(CONFIG_PBM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PBM_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PBM_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PCX_DECODER) += pcx.o
OBJS-$(CONFIG_PCX_ENCODER) += pcxenc.o
OBJS-$(CONFIG_PGM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o
OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o
OBJS-$(CONFIG_PJS_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o pngdsp.o
OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o
OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o pnm.o
OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o
OBJS-$(CONFIG_PRORES_DECODER) += proresdec2.o proresdsp.o proresdata.o
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += proresdec_lgpl.o proresdsp.o proresdata.o
OBJS-$(CONFIG_PRORES_ENCODER) += proresenc_anatoliy.o

View File

@ -40,9 +40,6 @@
#define C64YRES 200
typedef struct A64Context {
/* general variables */
AVFrame picture;
/* variables for multicolor modes */
AVLFG randctx;
int mc_lifetime;
@ -189,6 +186,7 @@ static void render_charset(AVCodecContext *avctx, uint8_t *charset,
static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
{
A64Context *c = avctx->priv_data;
av_frame_free(&avctx->coded_frame);
av_free(c->mc_meta_charset);
av_free(c->mc_best_cb);
av_free(c->mc_charset);
@ -240,8 +238,12 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
AV_WB32(avctx->extradata, c->mc_lifetime);
AV_WB32(avctx->extradata + 16, INTERLACED);
avcodec_get_frame_defaults(&c->picture);
avctx->coded_frame = &c->picture;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
a64multi_close_encoder(avctx);
return AVERROR(ENOMEM);
}
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
if (!avctx->codec_tag)
@ -271,7 +273,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
A64Context *c = avctx->priv_data;
AVFrame *const p = &c->picture;
AVFrame *const p = avctx->coded_frame;
int frame;
int x, y;

View File

@ -488,6 +488,10 @@ static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, ma
break;
default: /* 6 to 15 */
/* Shift mantissa and sign-extend it. */
if (bap > 15) {
av_log(s->avctx, AV_LOG_ERROR, "bap %d is invalid in plain AC-3\n", bap);
bap = 15;
}
mantissa = get_sbits(gbc, quantization_tab[bap]);
mantissa <<= 24 - quantization_tab[bap];
break;

View File

@ -490,7 +490,8 @@ static int alac_decode_frame(AVCodecContext *avctx, void *data,
avpkt->size * 8 - get_bits_count(&alac->gb));
}
*got_frame_ptr = 1;
if (alac->channels == ch)
*got_frame_ptr = 1;
return avpkt->size;
}

View File

@ -41,10 +41,10 @@ function ff_scalarproduct_int16_neon, export=1
vpadd.s32 d16, d0, d1
vpadd.s32 d17, d2, d3
vpadd.s32 d10, d4, d5
vpadd.s32 d11, d6, d7
vpadd.s32 d18, d4, d5
vpadd.s32 d19, d6, d7
vpadd.s32 d0, d16, d17
vpadd.s32 d1, d10, d11
vpadd.s32 d1, d18, d19
vpadd.s32 d2, d0, d1
vpaddl.s32 d3, d2
vmov.32 r0, d3[0]
@ -81,10 +81,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
vpadd.s32 d16, d0, d1
vpadd.s32 d17, d2, d3
vpadd.s32 d10, d4, d5
vpadd.s32 d11, d6, d7
vpadd.s32 d18, d4, d5
vpadd.s32 d19, d6, d7
vpadd.s32 d0, d16, d17
vpadd.s32 d1, d10, d11
vpadd.s32 d1, d18, d19
vpadd.s32 d2, d0, d1
vpaddl.s32 d3, d2
vmov.32 r0, d3[0]

View File

@ -89,6 +89,5 @@ av_cold void ff_asv_common_init(AVCodecContext *avctx) {
a->mb_width2 = (avctx->width + 0) / 16;
a->mb_height2 = (avctx->height + 0) / 16;
avctx->coded_frame= &a->picture;
a->avctx= avctx;
}

View File

@ -38,7 +38,6 @@
typedef struct ASV1Context{
AVCodecContext *avctx;
DSPContext dsp;
AVFrame picture;
PutBitContext pb;
GetBitContext gb;
ScanTable scantable;

View File

@ -148,14 +148,16 @@ static inline int encode_mb(ASV1Context *a, int16_t block[6][64]){
return 0;
}
static inline void dct_get(ASV1Context *a, int mb_x, int mb_y){
static inline void dct_get(ASV1Context *a, const AVFrame *frame,
int mb_x, int mb_y)
{
int16_t (*block)[64]= a->block;
int linesize= a->picture.linesize[0];
int linesize = frame->linesize[0];
int i;
uint8_t *ptr_y = a->picture.data[0] + (mb_y * 16* linesize ) + mb_x * 16;
uint8_t *ptr_cb = a->picture.data[1] + (mb_y * 8 * a->picture.linesize[1]) + mb_x * 8;
uint8_t *ptr_cr = a->picture.data[2] + (mb_y * 8 * a->picture.linesize[2]) + mb_x * 8;
uint8_t *ptr_y = frame->data[0] + (mb_y * 16* linesize ) + mb_x * 16;
uint8_t *ptr_cb = frame->data[1] + (mb_y * 8 * frame->linesize[1]) + mb_x * 8;
uint8_t *ptr_cr = frame->data[2] + (mb_y * 8 * frame->linesize[2]) + mb_x * 8;
a->dsp.get_pixels(block[0], ptr_y , linesize);
a->dsp.get_pixels(block[1], ptr_y + 8, linesize);
@ -165,8 +167,8 @@ static inline void dct_get(ASV1Context *a, int mb_x, int mb_y){
a->dsp.fdct(block[i]);
if(!(a->avctx->flags&CODEC_FLAG_GRAY)){
a->dsp.get_pixels(block[4], ptr_cb, a->picture.linesize[1]);
a->dsp.get_pixels(block[5], ptr_cr, a->picture.linesize[2]);
a->dsp.get_pixels(block[4], ptr_cb, frame->linesize[1]);
a->dsp.get_pixels(block[5], ptr_cr, frame->linesize[2]);
for(i=4; i<6; i++)
a->dsp.fdct(block[i]);
}
@ -176,7 +178,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
ASV1Context * const a = avctx->priv_data;
AVFrame * const p= &a->picture;
int size, ret;
int mb_x, mb_y;
@ -186,13 +187,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
init_put_bits(&a->pb, pkt->data, pkt->size);
*p = *pict;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
for(mb_x=0; mb_x<a->mb_width2; mb_x++){
dct_get(a, mb_x, mb_y);
dct_get(a, pict, mb_x, mb_y);
encode_mb(a, a->block);
}
}
@ -200,7 +197,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if(a->mb_width2 != a->mb_width){
mb_x= a->mb_width2;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
dct_get(a, mb_x, mb_y);
dct_get(a, pict, mb_x, mb_y);
encode_mb(a, a->block);
}
}
@ -208,7 +205,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if(a->mb_height2 != a->mb_height){
mb_y= a->mb_height2;
for(mb_x=0; mb_x<a->mb_width; mb_x++){
dct_get(a, mb_x, mb_y);
dct_get(a, pict, mb_x, mb_y);
encode_mb(a, a->block);
}
}
@ -240,6 +237,12 @@ static av_cold int encode_init(AVCodecContext *avctx){
int i;
const int scale= avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
ff_asv_common_init(avctx);
if(avctx->global_quality == 0) avctx->global_quality= 4*FF_QUALITY_SCALE;

View File

@ -380,7 +380,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
int av_packet_split_side_data(AVPacket *pkt){
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
int i;
unsigned int size, orig_pktsize = pkt->size;
unsigned int size;
uint8_t *p;
p = pkt->data + pkt->size - 8 - 5;
@ -413,13 +413,6 @@ int av_packet_split_side_data(AVPacket *pkt){
p-= size+5;
}
pkt->size -= 8;
/* FFMIN() prevents overflow in case the packet wasn't allocated with
* proper padding.
* If the side data is smaller than the buffer padding size, the
* remaining bytes should have already been filled with zeros by the
* original packet allocation anyway. */
memset(pkt->data + pkt->size, 0,
FFMIN(orig_pktsize - pkt->size, FF_INPUT_BUFFER_PADDING_SIZE));
pkt->side_data_elems = i+1;
return 1;
}

View File

@ -25,7 +25,7 @@
typedef struct {
AVFrame picture;
AVFrame *frame;
} AvsContext;
typedef enum {
@ -52,7 +52,7 @@ avs_decode_frame(AVCodecContext * avctx,
int buf_size = avpkt->size;
AvsContext *const avs = avctx->priv_data;
AVFrame *picture = data;
AVFrame *const p = &avs->picture;
AVFrame *const p = avs->frame;
const uint8_t *table, *vect;
uint8_t *out;
int i, j, x, y, stride, ret, vect_w = 3, vect_h = 3;
@ -65,8 +65,8 @@ avs_decode_frame(AVCodecContext * avctx,
p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
out = avs->picture.data[0];
stride = avs->picture.linesize[0];
out = p->data[0];
stride = p->linesize[0];
if (buf_end - buf < 4)
return AVERROR_INVALIDDATA;
@ -76,7 +76,7 @@ avs_decode_frame(AVCodecContext * avctx,
if (type == AVS_PALETTE) {
int first, last;
uint32_t *pal = (uint32_t *) avs->picture.data[1];
uint32_t *pal = (uint32_t *) p->data[1];
first = AV_RL16(buf);
last = first + AV_RL16(buf + 2);
@ -149,7 +149,7 @@ avs_decode_frame(AVCodecContext * avctx,
align_get_bits(&change_map);
}
if ((ret = av_frame_ref(picture, &avs->picture)) < 0)
if ((ret = av_frame_ref(picture, p)) < 0)
return ret;
*got_frame = 1;
@ -159,16 +159,21 @@ avs_decode_frame(AVCodecContext * avctx,
static av_cold int avs_decode_init(AVCodecContext * avctx)
{
AvsContext *s = avctx->priv_data;
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_set_dimensions(avctx, 318, 198);
avcodec_get_frame_defaults(&s->picture);
return 0;
}
static av_cold int avs_decode_end(AVCodecContext *avctx)
{
AvsContext *s = avctx->priv_data;
av_frame_unref(&s->picture);
av_frame_free(&s->frame);
return 0;
}

View File

@ -34,21 +34,25 @@
#include "internal.h"
typedef struct BethsoftvidContext {
AVFrame frame;
AVFrame *frame;
GetByteContext g;
} BethsoftvidContext;
static av_cold int bethsoftvid_decode_init(AVCodecContext *avctx)
{
BethsoftvidContext *vid = avctx->priv_data;
avcodec_get_frame_defaults(&vid->frame);
avctx->pix_fmt = AV_PIX_FMT_PAL8;
vid->frame = av_frame_alloc();
if (!vid->frame)
return AVERROR(ENOMEM);
return 0;
}
static int set_palette(BethsoftvidContext *ctx)
{
uint32_t *palette = (uint32_t *)ctx->frame.data[1];
uint32_t *palette = (uint32_t *)ctx->frame->data[1];
int a;
if (bytestream2_get_bytes_left(&ctx->g) < 256*3)
@ -58,7 +62,7 @@ static int set_palette(BethsoftvidContext *ctx)
palette[a] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->g) * 4;
palette[a] |= palette[a] >> 6 & 0x30303;
}
ctx->frame.palette_has_changed = 1;
ctx->frame->palette_has_changed = 1;
return 0;
}
@ -75,9 +79,9 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
int code, ret;
int yoffset;
if ((ret = ff_reget_buffer(avctx, &vid->frame)) < 0)
if ((ret = ff_reget_buffer(avctx, vid->frame)) < 0)
return ret;
wrap_to_next_line = vid->frame.linesize[0] - avctx->width;
wrap_to_next_line = vid->frame->linesize[0] - avctx->width;
if (avpkt->side_data_elems > 0 &&
avpkt->side_data[0].type == AV_PKT_DATA_PALETTE) {
@ -88,8 +92,8 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
}
bytestream2_init(&vid->g, avpkt->data, avpkt->size);
dst = vid->frame.data[0];
frame_end = vid->frame.data[0] + vid->frame.linesize[0] * avctx->height;
dst = vid->frame->data[0];
frame_end = vid->frame->data[0] + vid->frame->linesize[0] * avctx->height;
switch(block_type = bytestream2_get_byte(&vid->g)){
case PALETTE_BLOCK: {
@ -104,7 +108,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
yoffset = bytestream2_get_le16(&vid->g);
if(yoffset >= avctx->height)
return AVERROR_INVALIDDATA;
dst += vid->frame.linesize[0] * yoffset;
dst += vid->frame->linesize[0] * yoffset;
}
// main code
@ -134,7 +138,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
}
end:
if ((ret = av_frame_ref(data, &vid->frame)) < 0)
if ((ret = av_frame_ref(data, vid->frame)) < 0)
return ret;
*got_frame = 1;
@ -145,7 +149,7 @@ static int bethsoftvid_decode_frame(AVCodecContext *avctx,
static av_cold int bethsoftvid_decode_end(AVCodecContext *avctx)
{
BethsoftvidContext * vid = avctx->priv_data;
av_frame_unref(&vid->frame);
av_frame_free(&vid->frame);
return 0;
}

View File

@ -120,6 +120,7 @@ typedef struct BinkContext {
int version; ///< internal Bink file version
int has_alpha;
int swap_planes;
unsigned frame_num;
Bundle bundle[BINKB_NB_SRC]; ///< bundles for decoding all data types
Tree col_high[16]; ///< trees for decoding high nibble in "colours" data type
@ -1206,6 +1207,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
if (c->version >= 'i')
skip_bits_long(&gb, 32);
c->frame_num++;
for (plane = 0; plane < 3; plane++) {
plane_idx = (!plane || !c->swap_planes) ? plane : (plane ^ 3);
@ -1214,7 +1217,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
return ret;
} else {
if ((ret = binkb_decode_plane(c, frame, &gb, plane_idx,
!avctx->frame_number, !!plane)) < 0)
c->frame_num == 1, !!plane)) < 0)
return ret;
}
if (get_bits_count(&gb) >= bits_count)
@ -1332,6 +1335,13 @@ static av_cold int decode_end(AVCodecContext *avctx)
return 0;
}
static void flush(AVCodecContext *avctx)
{
BinkContext * const c = avctx->priv_data;
c->frame_num = 0;
}
AVCodec ff_bink_decoder = {
.name = "binkvideo",
.long_name = NULL_IF_CONFIG_SMALL("Bink video"),
@ -1341,5 +1351,6 @@ AVCodec ff_bink_decoder = {
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
.flush = flush,
.capabilities = CODEC_CAP_DR1,
};

View File

@ -60,22 +60,26 @@ static av_cold int bmp_encode_init(AVCodecContext *avctx){
return AVERROR(EINVAL);
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0;
}
static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
const AVFrame * const p = pict;
int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret;
const uint32_t *pal = NULL;
uint32_t palette256[256];
int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB;
int bit_count = avctx->bits_per_coded_sample;
uint8_t *ptr, *buf;
AVFrame * const p = (AVFrame *)pict;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
switch (avctx->pix_fmt) {
case AV_PIX_FMT_RGB444:
compression = BMP_BITFIELDS;
@ -159,6 +163,12 @@ static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return 0;
}
static av_cold int bmp_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
AVCodec ff_bmp_encoder = {
.name = "bmp",
.long_name = NULL_IF_CONFIG_SMALL("BMP (Windows and OS/2 bitmap)"),
@ -166,6 +176,7 @@ AVCodec ff_bmp_encoder = {
.id = AV_CODEC_ID_BMP,
.init = bmp_encode_init,
.encode2 = bmp_encode_frame,
.close = bmp_encode_close,
.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGB565, AV_PIX_FMT_RGB555, AV_PIX_FMT_RGB444,

View File

@ -24,7 +24,7 @@
#include "internal.h"
typedef struct {
AVFrame pictures[2];
AVFrame *pictures[2];
int currentpic;
} C93DecoderContext;
@ -46,21 +46,27 @@ typedef enum {
#define C93_HAS_PALETTE 0x01
#define C93_FIRST_FRAME 0x02
static av_cold int decode_init(AVCodecContext *avctx)
{
C93DecoderContext *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&s->pictures[0]);
avcodec_get_frame_defaults(&s->pictures[1]);
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
C93DecoderContext * const c93 = avctx->priv_data;
av_frame_unref(&c93->pictures[0]);
av_frame_unref(&c93->pictures[1]);
av_frame_free(&c93->pictures[0]);
av_frame_free(&c93->pictures[1]);
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
C93DecoderContext *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
s->pictures[0] = av_frame_alloc();
s->pictures[1] = av_frame_alloc();
if (!s->pictures[0] || !s->pictures[1]) {
decode_end(avctx);
return AVERROR(ENOMEM);
}
return 0;
}
@ -121,8 +127,8 @@ static int decode_frame(AVCodecContext *avctx, void *data,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
C93DecoderContext * const c93 = avctx->priv_data;
AVFrame * const newpic = &c93->pictures[c93->currentpic];
AVFrame * const oldpic = &c93->pictures[c93->currentpic^1];
AVFrame * const newpic = c93->pictures[c93->currentpic];
AVFrame * const oldpic = c93->pictures[c93->currentpic^1];
GetByteContext gb;
uint8_t *out;
int stride, ret, i, x, y, b, bt = 0;

View File

@ -301,7 +301,7 @@ STOP_TIMER("get_cabac_bypass")
for(i=0; i<SIZE; i++){
START_TIMER
if( (r[i]&1) != get_cabac(&c, state) )
if( (r[i]&1) != get_cabac_noinline(&c, state) )
av_log(NULL, AV_LOG_ERROR, "CABAC failure at %d\n", i);
STOP_TIMER("get_cabac")
}

View File

@ -49,7 +49,10 @@ static void refill(CABACContext *c){
c->low+= c->bytestream[0]<<1;
#endif
c->low -= CABAC_MASK;
c->bytestream += CABAC_BITS / 8;
#if !UNCHECKED_BITSTREAM_READER
if (c->bytestream < c->bytestream_end)
#endif
c->bytestream += CABAC_BITS / 8;
}
static inline void renorm_cabac_decoder_once(CABACContext *c){
@ -76,7 +79,10 @@ static void refill2(CABACContext *c){
#endif
c->low += x<<i;
c->bytestream += CABAC_BITS/8;
#if !UNCHECKED_BITSTREAM_READER
if (c->bytestream < c->bytestream_end)
#endif
c->bytestream += CABAC_BITS/8;
}
static av_always_inline int get_cabac_inline(CABACContext *c, uint8_t * const state){

View File

@ -99,16 +99,21 @@ AVCodec ff_cljr_decoder = {
#if CONFIG_CLJR_ENCODER
typedef struct CLJRContext {
AVClass *avclass;
AVFrame picture;
int dither_type;
} CLJRContext;
static av_cold int encode_init(AVCodecContext *avctx)
{
CLJRContext * const a = avctx->priv_data;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame = &a->picture;
return 0;
}
static av_cold int encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
@ -183,6 +188,7 @@ AVCodec ff_cljr_encoder = {
.priv_data_size = sizeof(CLJRContext),
.init = encode_init,
.encode2 = encode_frame,
.close = encode_close,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE },
.priv_class = &cljr_class,

View File

@ -80,7 +80,7 @@
#define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
typedef struct {
AVFrame avframe;
AVFrame *avframe;
int interpolated[3]; /* 1 if hpel[] is valid */
uint8_t *hpel[3][4];
uint8_t *hpel_base[3][4];
@ -291,7 +291,7 @@ static DiracFrame *remove_frame(DiracFrame *framelist[], int picnum)
int i, remove_idx = -1;
for (i = 0; framelist[i]; i++)
if (framelist[i]->avframe.display_picture_number == picnum) {
if (framelist[i]->avframe->display_picture_number == picnum) {
remove_pic = framelist[i];
remove_idx = i;
}
@ -364,8 +364,8 @@ static void free_sequence_buffers(DiracContext *s)
int i, j, k;
for (i = 0; i < MAX_FRAMES; i++) {
if (s->all_frames[i].avframe.data[0]) {
av_frame_unref(&s->all_frames[i].avframe);
if (s->all_frames[i].avframe->data[0]) {
av_frame_unref(s->all_frames[i].avframe);
memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
}
@ -393,6 +393,8 @@ static void free_sequence_buffers(DiracContext *s)
static av_cold int dirac_decode_init(AVCodecContext *avctx)
{
DiracContext *s = avctx->priv_data;
int i;
s->avctx = avctx;
s->frame_number = -1;
@ -404,6 +406,15 @@ static av_cold int dirac_decode_init(AVCodecContext *avctx)
ff_dsputil_init(&s->dsp, avctx);
ff_diracdsp_init(&s->diracdsp);
for (i = 0; i < MAX_FRAMES; i++) {
s->all_frames[i].avframe = av_frame_alloc();
if (!s->all_frames[i].avframe) {
while (i > 0)
av_frame_free(&s->all_frames[--i].avframe);
return AVERROR(ENOMEM);
}
}
return 0;
}
@ -417,7 +428,13 @@ static void dirac_decode_flush(AVCodecContext *avctx)
static av_cold int dirac_decode_end(AVCodecContext *avctx)
{
DiracContext *s = avctx->priv_data;
int i;
dirac_decode_flush(avctx);
for (i = 0; i < MAX_FRAMES; i++)
av_frame_free(&s->all_frames[i].avframe);
return 0;
}
@ -1519,8 +1536,8 @@ static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, in
just use 8 for everything for the moment */
int i, edge = EDGE_WIDTH/2;
ref->hpel[plane][0] = ref->avframe.data[plane];
s->dsp.draw_edges(ref->hpel[plane][0], ref->avframe.linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
ref->hpel[plane][0] = ref->avframe->data[plane];
s->dsp.draw_edges(ref->hpel[plane][0], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
/* no need for hpel if we only have fpel vectors */
if (!s->mv_precision)
@ -1528,18 +1545,18 @@ static void interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, in
for (i = 1; i < 4; i++) {
if (!ref->hpel_base[plane][i])
ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe.linesize[plane] + 32);
ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
/* we need to be 16-byte aligned even for chroma */
ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe.linesize[plane] + 16;
ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
}
if (!ref->interpolated[plane]) {
s->diracdsp.dirac_hpel_filter(ref->hpel[plane][1], ref->hpel[plane][2],
ref->hpel[plane][3], ref->hpel[plane][0],
ref->avframe.linesize[plane], width, height);
s->dsp.draw_edges(ref->hpel[plane][1], ref->avframe.linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(ref->hpel[plane][2], ref->avframe.linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(ref->hpel[plane][3], ref->avframe.linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
ref->avframe->linesize[plane], width, height);
s->dsp.draw_edges(ref->hpel[plane][1], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(ref->hpel[plane][2], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
}
ref->interpolated[plane] = 1;
}
@ -1565,7 +1582,7 @@ static int dirac_decode_frame_internal(DiracContext *s)
for (comp = 0; comp < 3; comp++) {
Plane *p = &s->plane[comp];
uint8_t *frame = s->current_picture->avframe.data[comp];
uint8_t *frame = s->current_picture->avframe->data[comp];
/* FIXME: small resolutions */
for (i = 0; i < 4; i++)
@ -1640,7 +1657,7 @@ static int dirac_decode_picture_header(DiracContext *s)
GetBitContext *gb = &s->gb;
/* [DIRAC_STD] 11.1.1 Picture Header. picture_header() PICTURE_NUM */
picnum = s->current_picture->avframe.display_picture_number = get_bits_long(gb, 32);
picnum = s->current_picture->avframe->display_picture_number = get_bits_long(gb, 32);
av_log(s->avctx,AV_LOG_DEBUG,"PICTURE_NUM: %d\n",picnum);
@ -1659,9 +1676,9 @@ static int dirac_decode_picture_header(DiracContext *s)
/* Jordi: this is needed if the referenced picture hasn't yet arrived */
for (j = 0; j < MAX_REFERENCE_FRAMES && refdist; j++)
if (s->ref_frames[j]
&& FFABS(s->ref_frames[j]->avframe.display_picture_number - refnum) < refdist) {
&& FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum) < refdist) {
s->ref_pics[i] = s->ref_frames[j];
refdist = FFABS(s->ref_frames[j]->avframe.display_picture_number - refnum);
refdist = FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum);
}
if (!s->ref_pics[i] || refdist)
@ -1670,21 +1687,21 @@ static int dirac_decode_picture_header(DiracContext *s)
/* if there were no references at all, allocate one */
if (!s->ref_pics[i])
for (j = 0; j < MAX_FRAMES; j++)
if (!s->all_frames[j].avframe.data[0]) {
if (!s->all_frames[j].avframe->data[0]) {
s->ref_pics[i] = &s->all_frames[j];
ff_get_buffer(s->avctx, &s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
ff_get_buffer(s->avctx, s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
break;
}
}
/* retire the reference frames that are not used anymore */
if (s->current_picture->avframe.reference) {
if (s->current_picture->avframe->reference) {
retire = picnum + dirac_get_se_golomb(gb);
if (retire != picnum) {
DiracFrame *retire_pic = remove_frame(s->ref_frames, retire);
if (retire_pic)
retire_pic->avframe.reference &= DELAYED_PIC_REF;
retire_pic->avframe->reference &= DELAYED_PIC_REF;
else
av_log(s->avctx, AV_LOG_DEBUG, "Frame to retire not found\n");
}
@ -1692,7 +1709,7 @@ static int dirac_decode_picture_header(DiracContext *s)
/* if reference array is full, remove the oldest as per the spec */
while (add_frame(s->ref_frames, MAX_REFERENCE_FRAMES, s->current_picture)) {
av_log(s->avctx, AV_LOG_ERROR, "Reference frame overflow\n");
remove_frame(s->ref_frames, s->ref_frames[0]->avframe.display_picture_number)->avframe.reference &= DELAYED_PIC_REF;
remove_frame(s->ref_frames, s->ref_frames[0]->avframe->display_picture_number)->avframe->reference &= DELAYED_PIC_REF;
}
}
@ -1717,7 +1734,7 @@ static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
/* find frame with lowest picture number */
for (i = 1; s->delay_frames[i]; i++)
if (s->delay_frames[i]->avframe.display_picture_number < out->avframe.display_picture_number) {
if (s->delay_frames[i]->avframe->display_picture_number < out->avframe->display_picture_number) {
out = s->delay_frames[i];
out_idx = i;
}
@ -1726,9 +1743,9 @@ static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
s->delay_frames[i] = s->delay_frames[i+1];
if (out) {
out->avframe.reference ^= DELAYED_PIC_REF;
out->avframe->reference ^= DELAYED_PIC_REF;
*got_frame = 1;
if((ret = av_frame_ref(picture, &out->avframe)) < 0)
if((ret = av_frame_ref(picture, out->avframe)) < 0)
return ret;
}
@ -1790,14 +1807,14 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
/* find an unused frame */
for (i = 0; i < MAX_FRAMES; i++)
if (s->all_frames[i].avframe.data[0] == NULL)
if (s->all_frames[i].avframe->data[0] == NULL)
pic = &s->all_frames[i];
if (!pic) {
av_log(avctx, AV_LOG_ERROR, "framelist full\n");
return -1;
}
avcodec_get_frame_defaults(&pic->avframe);
av_frame_unref(pic->avframe);
/* [DIRAC_STD] Defined in 9.6.1 ... */
tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */
@ -1808,16 +1825,16 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
s->num_refs = tmp;
s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */
s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */
pic->avframe.reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
pic->avframe.key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
pic->avframe.pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
pic->avframe->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
pic->avframe->key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
if ((ret = ff_get_buffer(avctx, &pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
if ((ret = ff_get_buffer(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
return ret;
s->current_picture = pic;
s->plane[0].stride = pic->avframe.linesize[0];
s->plane[1].stride = pic->avframe.linesize[1];
s->plane[2].stride = pic->avframe.linesize[2];
s->plane[0].stride = pic->avframe->linesize[0];
s->plane[1].stride = pic->avframe->linesize[1];
s->plane[2].stride = pic->avframe->linesize[2];
/* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
if (dirac_decode_picture_header(s))
@ -1833,7 +1850,7 @@ static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int
static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
{
DiracContext *s = avctx->priv_data;
DiracFrame *picture = data;
AVFrame *picture = data;
uint8_t *buf = pkt->data;
int buf_size = pkt->size;
int i, data_unit_size, buf_idx = 0;
@ -1841,8 +1858,8 @@ static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
/* release unused frames */
for (i = 0; i < MAX_FRAMES; i++)
if (s->all_frames[i].avframe.data[0] && !s->all_frames[i].avframe.reference) {
av_frame_unref(&s->all_frames[i].avframe);
if (s->all_frames[i].avframe->data[0] && !s->all_frames[i].avframe->reference) {
av_frame_unref(s->all_frames[i].avframe);
memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
}
@ -1887,40 +1904,40 @@ static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (!s->current_picture)
return buf_size;
if (s->current_picture->avframe.display_picture_number > s->frame_number) {
if (s->current_picture->avframe->display_picture_number > s->frame_number) {
DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number);
s->current_picture->avframe.reference |= DELAYED_PIC_REF;
s->current_picture->avframe->reference |= DELAYED_PIC_REF;
if (add_frame(s->delay_frames, MAX_DELAY, s->current_picture)) {
int min_num = s->delay_frames[0]->avframe.display_picture_number;
int min_num = s->delay_frames[0]->avframe->display_picture_number;
/* Too many delayed frames, so we display the frame with the lowest pts */
av_log(avctx, AV_LOG_ERROR, "Delay frame overflow\n");
delayed_frame = s->delay_frames[0];
for (i = 1; s->delay_frames[i]; i++)
if (s->delay_frames[i]->avframe.display_picture_number < min_num)
min_num = s->delay_frames[i]->avframe.display_picture_number;
if (s->delay_frames[i]->avframe->display_picture_number < min_num)
min_num = s->delay_frames[i]->avframe->display_picture_number;
delayed_frame = remove_frame(s->delay_frames, min_num);
add_frame(s->delay_frames, MAX_DELAY, s->current_picture);
}
if (delayed_frame) {
delayed_frame->avframe.reference ^= DELAYED_PIC_REF;
if((ret=av_frame_ref(data, &delayed_frame->avframe)) < 0)
delayed_frame->avframe->reference ^= DELAYED_PIC_REF;
if((ret=av_frame_ref(data, delayed_frame->avframe)) < 0)
return ret;
*got_frame = 1;
}
} else if (s->current_picture->avframe.display_picture_number == s->frame_number) {
} else if (s->current_picture->avframe->display_picture_number == s->frame_number) {
/* The right frame at the right time :-) */
if((ret=av_frame_ref(data, &s->current_picture->avframe)) < 0)
if((ret=av_frame_ref(data, s->current_picture->avframe)) < 0)
return ret;
*got_frame = 1;
}
if (*got_frame)
s->frame_number = picture->avframe.display_picture_number + 1;
s->frame_number = picture->display_picture_number + 1;
return buf_idx;
}

View File

@ -329,9 +329,12 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t), fail);
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t), fail);
ctx->frame.key_frame = 1;
ctx->frame.pict_type = AV_PICTURE_TYPE_I;
ctx->m.avctx->coded_frame = &ctx->frame;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
if (avctx->thread_count > MAX_THREADS) {
av_log(avctx, AV_LOG_ERROR, "too many threads\n");
@ -922,19 +925,14 @@ static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
{
int i;
for (i = 0; i < 3; i++) {
ctx->frame.data[i] = frame->data[i];
ctx->frame.linesize[i] = frame->linesize[i];
}
for (i = 0; i < ctx->m.avctx->thread_count; i++) {
ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced;
ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced;
ctx->thread[i]->m.linesize = frame->linesize[0] << ctx->interlaced;
ctx->thread[i]->m.uvlinesize = frame->linesize[1] << ctx->interlaced;
ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
}
ctx->frame.interlaced_frame = frame->interlaced_frame;
ctx->m.avctx->coded_frame->interlaced_frame = frame->interlaced_frame;
ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
}
@ -954,9 +952,9 @@ static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
encode_coding_unit:
for (i = 0; i < 3; i++) {
ctx->src[i] = ctx->frame.data[i];
ctx->src[i] = frame->data[i];
if (ctx->interlaced && ctx->cur_field)
ctx->src[i] += ctx->frame.linesize[i];
ctx->src[i] += frame->linesize[i];
}
dnxhd_write_header(avctx, buf);
@ -994,7 +992,7 @@ static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
goto encode_coding_unit;
}
ctx->frame.quality = ctx->qscale*FF_QP2LAMBDA;
avctx->coded_frame->quality = ctx->qscale * FF_QP2LAMBDA;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
@ -1027,6 +1025,8 @@ static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
for (i = 1; i < avctx->thread_count; i++)
av_freep(&ctx->thread[i]);
av_frame_free(&avctx->coded_frame);
return 0;
}

View File

@ -43,7 +43,6 @@ typedef struct DNXHDEncContext {
AVClass *class;
MpegEncContext m; ///< Used for quantization dsp functions
AVFrame frame;
int cid;
const CIDEntry *cid_table;
uint8_t *msip; ///< Macroblock Scan Indexes Payload

View File

@ -39,7 +39,7 @@ typedef enum CinVideoBitmapIndex {
typedef struct CinVideoContext {
AVCodecContext *avctx;
AVFrame frame;
AVFrame *frame;
unsigned int bitmap_size;
uint32_t palette[256];
uint8_t *bitmap_table[3];
@ -118,7 +118,9 @@ static av_cold int cinvideo_decode_init(AVCodecContext *avctx)
cin->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&cin->frame);
cin->frame = av_frame_alloc();
if (!cin->frame)
return AVERROR(ENOMEM);
cin->bitmap_size = avctx->width * avctx->height;
if (allocate_buffers(cin))
@ -315,20 +317,20 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
break;
}
if ((res = ff_reget_buffer(avctx, &cin->frame)) < 0)
if ((res = ff_reget_buffer(avctx, cin->frame)) < 0)
return res;
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
cin->frame.palette_has_changed = 1;
memcpy(cin->frame->data[1], cin->palette, sizeof(cin->palette));
cin->frame->palette_has_changed = 1;
for (y = 0; y < cin->avctx->height; ++y)
memcpy(cin->frame.data[0] + (cin->avctx->height - 1 - y) * cin->frame.linesize[0],
memcpy(cin->frame->data[0] + (cin->avctx->height - 1 - y) * cin->frame->linesize[0],
cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width,
cin->avctx->width);
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP],
cin->bitmap_table[CIN_PRE_BMP]);
if ((res = av_frame_ref(data, &cin->frame)) < 0)
if ((res = av_frame_ref(data, cin->frame)) < 0)
return res;
*got_frame = 1;
@ -340,7 +342,7 @@ static av_cold int cinvideo_decode_end(AVCodecContext *avctx)
{
CinVideoContext *cin = avctx->priv_data;
av_frame_unref(&cin->frame);
av_frame_free(&cin->frame);
destroy_buffers(cin);

View File

@ -320,8 +320,6 @@ av_cold int ff_dvvideo_init(AVCodecContext *avctx)
}else
memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64);
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame = &s->picture;
s->avctx = avctx;
avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
@ -342,6 +340,10 @@ static av_cold int dvvideo_init_encoder(AVCodecContext *avctx)
return AVERROR_PATCHWELCOME;
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
dv_vlc_map_tableinit();
return ff_dvvideo_init(avctx);
@ -687,12 +689,12 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
(s->sys->height >= 720 && mb_y != 134)) {
y_stride = s->picture.linesize[0] << 3;
y_stride = s->frame->linesize[0] << 3;
} else {
y_stride = 16;
}
y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << 3);
linesize = s->picture.linesize[0];
y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << 3);
linesize = s->frame->linesize[0];
if (s->sys->video_stype == 4) { /* SD 422 */
vs_bit_size +=
@ -710,12 +712,12 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
enc_blk += 4;
/* initializing chrominance blocks */
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->picture.linesize[1] +
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] +
(mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << 3);
for (j = 2; j; j--) {
uint8_t *c_ptr = s->picture.data[j] + c_offset;
linesize = s->picture.linesize[j];
y_stride = (mb_y == 134) ? 8 : (s->picture.linesize[j] << 3);
uint8_t *c_ptr = s->frame->data[j] + c_offset;
linesize = s->frame->linesize[j];
y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << 3);
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint8_t* d;
uint8_t* b = scratch;
@ -814,7 +816,7 @@ static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c,
* compression scheme (if any).
*/
int apt = (c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0 : 1);
int fs = c->picture.top_field_first ? 0x00 : 0x40;
int fs = c->frame->top_field_first ? 0x00 : 0x40;
uint8_t aspect = 0;
if ((int)(av_q2d(c->avctx->sample_aspect_ratio) * c->avctx->width / c->avctx->height * 10) >= 17) /* 16:9 */
@ -963,10 +965,10 @@ static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt,
if ((ret = ff_alloc_packet2(c, pkt, s->sys->frame_size)) < 0)
return ret;
c->pix_fmt = s->sys->pix_fmt;
s->picture = *frame;
s->picture.key_frame = 1;
s->picture.pict_type = AV_PICTURE_TYPE_I;
c->pix_fmt = s->sys->pix_fmt;
s->frame = frame;
c->coded_frame->key_frame = 1;
c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
s->buf = pkt->data;
c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL,
@ -982,6 +984,12 @@ static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt,
return 0;
}
static int dvvideo_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
AVCodec ff_dvvideo_encoder = {
.name = "dvvideo",
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
@ -990,6 +998,7 @@ AVCodec ff_dvvideo_encoder = {
.priv_data_size = sizeof(DVVideoContext),
.init = dvvideo_init_encoder,
.encode2 = dvvideo_encode_frame,
.close = dvvideo_encode_close,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE

View File

@ -34,7 +34,7 @@
typedef struct DVVideoContext {
const DVprofile *sys;
AVFrame picture;
AVFrame *frame;
AVCodecContext *avctx;
uint8_t *buf;

View File

@ -258,12 +258,12 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
(s->sys->height >= 720 && mb_y != 134)) {
y_stride = (s->picture.linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize));
y_stride = (s->frame->linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize));
} else {
y_stride = (2 << log2_blocksize);
}
y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << log2_blocksize);
linesize = s->picture.linesize[0] << is_field_mode[mb_index];
y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << log2_blocksize);
linesize = s->frame->linesize[0] << is_field_mode[mb_index];
mb[0] .idct_put(y_ptr , linesize, block + 0*64);
if (s->sys->video_stype == 4) { /* SD 422 */
mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64);
@ -276,19 +276,19 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
block += 4*64;
/* idct_put'ting chrominance */
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->picture.linesize[1] +
c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] +
(mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize);
for (j = 2; j; j--) {
uint8_t *c_ptr = s->picture.data[j] + c_offset;
uint8_t *c_ptr = s->frame->data[j] + c_offset;
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint64_t aligned_pixels[64/8];
uint8_t *pixels = (uint8_t*)aligned_pixels;
uint8_t *c_ptr1, *ptr1;
int x, y;
mb->idct_put(pixels, 8, block);
for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) {
for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->frame->linesize[j], pixels += 8) {
ptr1 = pixels + ((1 << (log2_blocksize))>>1);
c_ptr1 = c_ptr + (s->picture.linesize[j] << log2_blocksize);
c_ptr1 = c_ptr + (s->frame->linesize[j] << log2_blocksize);
for (x = 0; x < (1 << FFMAX(log2_blocksize - 1, 0)); x++) {
c_ptr[x] = pixels[x];
c_ptr1[x] = ptr1[x];
@ -297,8 +297,8 @@ static int dv_decode_video_segment(AVCodecContext *avctx, void *arg)
block += 64; mb++;
} else {
y_stride = (mb_y == 134) ? (1 << log2_blocksize) :
s->picture.linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize);
linesize = s->picture.linesize[j] << is_field_mode[mb_index];
s->frame->linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize);
linesize = s->frame->linesize[j] << is_field_mode[mb_index];
(mb++)-> idct_put(c_ptr , linesize, block); block += 64;
if (s->sys->bpm == 8) {
(mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64;
@ -327,15 +327,16 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
return -1; /* NOTE: we only accept several full frames */
}
s->picture.key_frame = 1;
s->picture.pict_type = AV_PICTURE_TYPE_I;
s->frame = data;
s->frame->key_frame = 1;
s->frame->pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
if ((ret = ff_get_buffer(avctx, &s->picture, 0)) < 0)
if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
return ret;
s->picture.interlaced_frame = 1;
s->picture.top_field_first = 0;
s->frame->interlaced_frame = 1;
s->frame->top_field_first = 0;
/* Determine the codec's sample_aspect ratio and field order from the packet */
vsc_pack = buf + 80*5 + 48 + 5;
@ -343,7 +344,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
apt = buf[4] & 0x07;
is16_9 = (vsc_pack[2] & 0x07) == 0x02 || (!apt && (vsc_pack[2] & 0x07) == 0x07);
avctx->sample_aspect_ratio = s->sys->sar[is16_9];
s->picture.top_field_first = !(vsc_pack[3] & 0x40);
s->frame->top_field_first = !(vsc_pack[3] & 0x40);
}
s->buf = buf;
@ -354,20 +355,10 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
/* return image */
*got_frame = 1;
av_frame_move_ref(data, &s->picture);
return s->sys->frame_size;
}
static int dvvideo_close(AVCodecContext *c)
{
DVVideoContext *s = c->priv_data;
av_frame_unref(&s->picture);
return 0;
}
AVCodec ff_dvvideo_decoder = {
.name = "dvvideo",
.long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
@ -375,7 +366,6 @@ AVCodec ff_dvvideo_decoder = {
.id = AV_CODEC_ID_DVVIDEO,
.priv_data_size = sizeof(DVVideoContext),
.init = ff_dvvideo_init,
.close = dvvideo_close,
.decode = dvvideo_decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
.max_lowres = 3,

View File

@ -321,12 +321,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
{
DxaDecContext * const c = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
c->prev = av_frame_alloc();
if (!c->prev)
return AVERROR(ENOMEM);
avctx->pix_fmt = AV_PIX_FMT_PAL8;
c->dsize = avctx->width * avctx->height * 2;
c->decomp_buf = av_malloc(c->dsize);
if (!c->decomp_buf) {

View File

@ -45,7 +45,7 @@
typedef struct MadContext {
AVCodecContext *avctx;
DSPContext dsp;
AVFrame last_frame;
AVFrame *last_frame;
GetBitContext gb;
void *bitstream_buf;
unsigned int bitstream_buf_size;
@ -65,6 +65,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
ff_init_scantable_permutation(s->dsp.idct_permutation, FF_NO_IDCT_PERM);
ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
ff_mpeg12_init_vlcs();
s->last_frame = av_frame_alloc();
if (!s->last_frame)
return AVERROR(ENOMEM);
return 0;
}
@ -82,22 +87,22 @@ static inline void comp_block(MadContext *t, AVFrame *frame,
int j, int mv_x, int mv_y, int add)
{
if (j < 4) {
unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame.linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x;
if (offset >= (t->avctx->height - 7) * t->last_frame.linesize[0] - 7)
unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame->linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x;
if (offset >= (t->avctx->height - 7) * t->last_frame->linesize[0] - 7)
return;
comp(frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
frame->linesize[0],
t->last_frame.data[0] + offset,
t->last_frame.linesize[0], add);
t->last_frame->data[0] + offset,
t->last_frame->linesize[0], add);
} else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
int index = j - 3;
unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame.linesize[index] + mb_x * 8 + (mv_x/2);
if (offset >= (t->avctx->height/2 - 7) * t->last_frame.linesize[index] - 7)
unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame->linesize[index] + mb_x * 8 + (mv_x/2);
if (offset >= (t->avctx->height/2 - 7) * t->last_frame->linesize[index] - 7)
return;
comp(frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x * 8,
frame->linesize[index],
t->last_frame.data[index] + offset,
t->last_frame.linesize[index], add);
t->last_frame->data[index] + offset,
t->last_frame->linesize[index], add);
}
}
@ -205,7 +210,7 @@ static int decode_mb(MadContext *s, AVFrame *frame, int inter)
for (j=0; j<6; j++) {
if (mv_map & (1<<j)) { // mv_x and mv_y are guarded by mv_map
int add = 2*decode_motion(&s->gb);
if (s->last_frame.data[0])
if (s->last_frame->data[0])
comp_block(s, frame, s->mb_x, s->mb_y, j, mv_x, mv_y, add);
} else {
s->dsp.clear_block(s->block);
@ -263,28 +268,28 @@ static int decode_frame(AVCodecContext *avctx,
}
if (avctx->width != width || avctx->height != height) {
av_frame_unref(s->last_frame);
if((width * height)/2048*7 > buf_end-buf)
return AVERROR_INVALIDDATA;
if ((ret = av_image_check_size(width, height, 0, avctx)) < 0)
return ret;
avcodec_set_dimensions(avctx, width, height);
av_frame_unref(&s->last_frame);
}
if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
return ret;
if (inter && !s->last_frame.data[0]) {
if (inter && !s->last_frame->data[0]) {
av_log(avctx, AV_LOG_WARNING, "Missing reference frame.\n");
ret = ff_get_buffer(avctx, &s->last_frame, AV_GET_BUFFER_FLAG_REF);
ret = ff_get_buffer(avctx, s->last_frame, AV_GET_BUFFER_FLAG_REF);
if (ret < 0)
return ret;
memset(s->last_frame.data[0], 0, s->last_frame.height *
s->last_frame.linesize[0]);
memset(s->last_frame.data[1], 0x80, s->last_frame.height / 2 *
s->last_frame.linesize[1]);
memset(s->last_frame.data[2], 0x80, s->last_frame.height / 2 *
s->last_frame.linesize[2]);
memset(s->last_frame->data[0], 0, s->last_frame->height *
s->last_frame->linesize[0]);
memset(s->last_frame->data[1], 0x80, s->last_frame->height / 2 *
s->last_frame->linesize[1]);
memset(s->last_frame->data[2], 0x80, s->last_frame->height / 2 *
s->last_frame->linesize[2]);
}
av_fast_padded_malloc(&s->bitstream_buf, &s->bitstream_buf_size,
@ -303,8 +308,8 @@ static int decode_frame(AVCodecContext *avctx,
*got_frame = 1;
if (chunk_type != MADe_TAG) {
av_frame_unref(&s->last_frame);
if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
av_frame_unref(s->last_frame);
if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
return ret;
}
@ -314,7 +319,7 @@ static int decode_frame(AVCodecContext *avctx,
static av_cold int decode_end(AVCodecContext *avctx)
{
MadContext *t = avctx->priv_data;
av_frame_unref(&t->last_frame);
av_frame_free(&t->last_frame);
av_free(t->bitstream_buf);
return 0;
}

View File

@ -40,7 +40,7 @@
typedef struct TgvContext {
AVCodecContext *avctx;
AVFrame last_frame;
AVFrame *last_frame;
uint8_t *frame_buffer;
int width,height;
uint32_t palette[AVPALETTE_COUNT];
@ -57,7 +57,11 @@ static av_cold int tgv_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
avctx->time_base = (AVRational){1, 15};
avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&s->last_frame);
s->last_frame = av_frame_alloc();
if (!s->last_frame)
return AVERROR(ENOMEM);
return 0;
}
@ -232,8 +236,8 @@ static int tgv_decode_inter(TgvContext *s, AVFrame *frame,
continue;
}
src = s->last_frame.data[0] + mx + my * s->last_frame.linesize[0];
src_stride = s->last_frame.linesize[0];
src = s->last_frame->data[0] + mx + my * s->last_frame->linesize[0];
src_stride = s->last_frame->linesize[0];
} else {
int offset = vector - num_mvs;
if (offset < num_blocks_raw)
@ -283,7 +287,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
if (s->avctx->width != s->width || s->avctx->height != s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height);
av_freep(&s->frame_buffer);
av_frame_unref(&s->last_frame);
av_frame_unref(s->last_frame);
}
pal_count = AV_RL16(&buf[6]);
@ -320,7 +324,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
s->frame_buffer + y * s->width,
s->width);
} else {
if (!s->last_frame.data[0]) {
if (!s->last_frame->data[0]) {
av_log(avctx, AV_LOG_WARNING, "inter frame without corresponding intra frame\n");
return buf_size;
}
@ -332,8 +336,8 @@ static int tgv_decode_frame(AVCodecContext *avctx,
}
}
av_frame_unref(&s->last_frame);
if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
av_frame_unref(s->last_frame);
if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
return ret;
*got_frame = 1;
@ -344,7 +348,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
static av_cold int tgv_decode_end(AVCodecContext *avctx)
{
TgvContext *s = avctx->priv_data;
av_frame_unref(&s->last_frame);
av_frame_free(&s->last_frame);
av_freep(&s->frame_buffer);
av_free(s->mv_codebook);
av_free(s->block_codebook);

View File

@ -762,6 +762,17 @@ void ff_er_frame_start(ERContext *s)
s->error_occurred = 0;
}
static int er_supported(ERContext *s)
{
if(s->avctx->hwaccel ||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
!s->cur_pic ||
s->cur_pic->field_picture
)
return 0;
return 1;
}
/**
* Add a slice.
* @param endx x component of the last macroblock, can be -1
@ -828,7 +839,7 @@ void ff_er_add_slice(ERContext *s, int startx, int starty,
s->error_status_table[start_xy] |= VP_START;
if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
s->avctx->skip_top * s->mb_width < start_i) {
er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
prev_status &= ~ VP_START;
@ -853,9 +864,7 @@ void ff_er_frame_end(ERContext *s)
* though it should not crash if enabled. */
if (!s->avctx->error_concealment || s->error_count == 0 ||
s->avctx->lowres ||
s->avctx->hwaccel ||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
!s->cur_pic || s->cur_pic->field_picture ||
!er_supported(s) ||
s->error_count == 3 * s->mb_width *
(s->avctx->skip_top + s->avctx->skip_bottom)) {
return;

View File

@ -42,7 +42,7 @@ typedef struct CodeBook {
} CodeBook;
typedef struct Escape124Context {
AVFrame frame;
AVFrame *frame;
unsigned num_superblocks;
@ -58,12 +58,15 @@ static av_cold int escape124_decode_init(AVCodecContext *avctx)
{
Escape124Context *s = avctx->priv_data;
avcodec_get_frame_defaults(&s->frame);
avctx->pix_fmt = AV_PIX_FMT_RGB555;
s->num_superblocks = ((unsigned)avctx->width / 8) *
((unsigned)avctx->height / 8);
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0;
}
@ -75,7 +78,7 @@ static av_cold int escape124_decode_close(AVCodecContext *avctx)
for (i = 0; i < 3; i++)
av_free(s->codebooks[i].blocks);
av_frame_unref(&s->frame);
av_frame_free(&s->frame);
return 0;
}
@ -227,13 +230,13 @@ static int escape124_decode_frame(AVCodecContext *avctx,
// Leave last frame unchanged
// FIXME: Is this necessary? I haven't seen it in any real samples
if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) {
if (!s->frame.data[0])
if (!s->frame->data[0])
return AVERROR_INVALIDDATA;
av_log(avctx, AV_LOG_DEBUG, "Skipping frame\n");
*got_frame = 1;
if ((ret = av_frame_ref(frame, &s->frame)) < 0)
if ((ret = av_frame_ref(frame, s->frame)) < 0)
return ret;
return frame_size;
@ -272,8 +275,8 @@ static int escape124_decode_frame(AVCodecContext *avctx,
new_frame_data = (uint16_t*)frame->data[0];
new_stride = frame->linesize[0] / 2;
old_frame_data = (uint16_t*)s->frame.data[0];
old_stride = s->frame.linesize[0] / 2;
old_frame_data = (uint16_t*)s->frame->data[0];
old_stride = s->frame->linesize[0] / 2;
for (superblock_index = 0; superblock_index < s->num_superblocks;
superblock_index++) {
@ -350,8 +353,8 @@ static int escape124_decode_frame(AVCodecContext *avctx,
"Escape sizes: %i, %i, %i\n",
frame_size, buf_size, get_bits_count(&gb) / 8);
av_frame_unref(&s->frame);
if ((ret = av_frame_ref(&s->frame, frame)) < 0)
av_frame_unref(s->frame);
if ((ret = av_frame_ref(s->frame, frame)) < 0)
return ret;
*got_frame = 1;

View File

@ -53,6 +53,7 @@ av_cold int ffv1_common_init(AVCodecContext *avctx)
s->last_picture.f = av_frame_alloc();
if (!s->picture.f || !s->last_picture.f)
return AVERROR(ENOMEM);
ff_dsputil_init(&s->dsp, avctx);
s->width = avctx->width;

View File

@ -938,12 +938,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
uint8_t *dst[4];
ff_thread_await_progress(&f->last_picture, INT_MAX, 0);
for (j = 0; j < 4; j++) {
int sh = (j==1 || j==2) ? f->chroma_h_shift : 0;
int sv = (j==1 || j==2) ? f->chroma_v_shift : 0;
dst[j] = p->data[j] + p->linesize[j]*
(fs->slice_y>>sv) + (fs->slice_x>>sh);
src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j]*
(fs->slice_y>>sv) + (fs->slice_x>>sh);
int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
dst[j] = p->data[j] + p->linesize[j] *
(fs->slice_y >> sv) + (fs->slice_x >> sh);
src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j] *
(fs->slice_y >> sv) + (fs->slice_x >> sh);
}
av_image_copy(dst, p->linesize, (const uint8_t **)src,
f->last_picture.f->linesize,

View File

@ -771,6 +771,10 @@ static av_cold int encode_init(AVCodecContext *avctx)
s->colorspace = 1;
s->chroma_planes = 1;
s->version = FFMAX(s->version, 1);
if (!s->ac) {
av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
return AVERROR(ENOSYS);
}
break;
default:
av_log(avctx, AV_LOG_ERROR, "format not supported\n");
@ -826,6 +830,12 @@ static av_cold int encode_init(AVCodecContext *avctx)
if ((ret = ffv1_allocate_initial_states(s)) < 0)
return ret;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
if (!s->transparency)
s->plane_count = 2;
if (!s->chroma_planes && s->version > 3)
@ -996,7 +1006,7 @@ static int encode_slice(AVCodecContext *c, void *arg)
int height = fs->slice_height;
int x = fs->slice_x;
int y = fs->slice_y;
AVFrame *const p = f->picture.f;
const AVFrame *const p = f->picture.f;
const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step_minus1 + 1;
int ret;
RangeCoder c_bak = fs->c;
@ -1004,7 +1014,7 @@ static int encode_slice(AVCodecContext *c, void *arg)
fs->slice_coding_mode = 0;
retry:
if (p->key_frame)
if (c->coded_frame->key_frame)
ffv1_clear_slice_state(f, fs);
if (f->version > 2) {
encode_slice_header(f, fs);
@ -1080,16 +1090,16 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
av_frame_unref(p);
if ((ret = av_frame_ref(p, pict)) < 0)
return ret;
p->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
put_rac(c, &keystate, 1);
p->key_frame = 1;
avctx->coded_frame->key_frame = 1;
f->gob_count++;
write_header(f);
} else {
put_rac(c, &keystate, 0);
p->key_frame = 0;
avctx->coded_frame->key_frame = 0;
}
if (f->ac > 1) {
@ -1184,12 +1194,19 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
f->picture_number++;
pkt->size = buf_p - pkt->data;
pkt->flags |= AV_PKT_FLAG_KEY * p->key_frame;
pkt->flags |= AV_PKT_FLAG_KEY * avctx->coded_frame->key_frame;
*got_packet = 1;
return 0;
}
static av_cold int encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
ffv1_close(avctx);
return 0;
}
#define OFFSET(x) offsetof(FFV1Context, x)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
@ -1217,7 +1234,7 @@ AVCodec ff_ffv1_encoder = {
.priv_data_size = sizeof(FFV1Context),
.init = encode_init,
.encode2 = encode_frame,
.close = ffv1_close,
.close = encode_close,
.capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P,

View File

@ -93,7 +93,6 @@ struct wavesynth_context {
int64_t cur_ts;
int64_t next_ts;
int32_t *sin;
AVFrame frame;
struct ws_interval *inter;
uint32_t dither_state;
uint32_t pink_state;
@ -341,8 +340,6 @@ static av_cold int wavesynth_init(AVCodecContext *avc)
ws->pink_need += ws->inter[i].type == WS_NOISE;
ws->pink_state = MKTAG('P','I','N','K');
ws->pink_pos = PINK_UNIT;
avcodec_get_frame_defaults(&ws->frame);
avc->coded_frame = &ws->frame;
wavesynth_seek(ws, 0);
avc->sample_fmt = AV_SAMPLE_FMT_S16;
return 0;
@ -428,6 +425,7 @@ static int wavesynth_decode(AVCodecContext *avc, void *rframe, int *rgot_frame,
AVPacket *packet)
{
struct wavesynth_context *ws = avc->priv_data;
AVFrame *frame = rframe;
int64_t ts;
int duration;
int s, c, r;
@ -443,11 +441,11 @@ static int wavesynth_decode(AVCodecContext *avc, void *rframe, int *rgot_frame,
duration = AV_RL32(packet->data + 8);
if (duration <= 0)
return AVERROR(EINVAL);
ws->frame.nb_samples = duration;
r = ff_get_buffer(avc, &ws->frame, 0);
frame->nb_samples = duration;
r = ff_get_buffer(avc, frame, 0);
if (r < 0)
return r;
pcm = (int16_t *)ws->frame.data[0];
pcm = (int16_t *)frame->data[0];
for (s = 0; s < duration; s++, ts++) {
memset(channels, 0, avc->channels * sizeof(*channels));
if (ts >= ws->next_ts)
@ -458,7 +456,6 @@ static int wavesynth_decode(AVCodecContext *avc, void *rframe, int *rgot_frame,
}
ws->cur_ts += duration;
*rgot_frame = 1;
*(AVFrame *)rframe = ws->frame;
return packet->size;
}

View File

@ -50,7 +50,7 @@ typedef struct BlockInfo {
typedef struct FlashSVContext {
AVCodecContext *avctx;
AVFrame frame;
AVFrame *frame;
int image_width, image_height;
int block_width, block_height;
uint8_t *tmpblock;
@ -100,6 +100,19 @@ static int decode_hybrid(const uint8_t *sptr, uint8_t *dptr, int dx, int dy,
return sptr - orig_src;
}
static av_cold int flashsv_decode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
/* release the frame if needed */
av_frame_free(&s->frame);
/* free the tmpblock */
av_freep(&s->tmpblock);
return 0;
}
static av_cold int flashsv_decode_init(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
@ -115,7 +128,12 @@ static av_cold int flashsv_decode_init(AVCodecContext *avctx)
return 1;
}
avctx->pix_fmt = AV_PIX_FMT_BGR24;
avcodec_get_frame_defaults(&s->frame);
s->frame = av_frame_alloc();
if (!s->frame) {
flashsv_decode_end(avctx);
return AVERROR(ENOMEM);
}
return 0;
}
@ -205,18 +223,18 @@ static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
/* Flash Screen Video stores the image upside down, so copy
* lines to destination in reverse order. */
for (k = 1; k <= s->diff_height; k++) {
memcpy(s->frame.data[0] + x_pos * 3 +
(s->image_height - y_pos - s->diff_start - k) * s->frame.linesize[0],
memcpy(s->frame->data[0] + x_pos * 3 +
(s->image_height - y_pos - s->diff_start - k) * s->frame->linesize[0],
line, width * 3);
/* advance source pointer to next line */
line += width * 3;
}
} else {
/* hybrid 15-bit/palette mode */
decode_hybrid(s->tmpblock, s->frame.data[0],
decode_hybrid(s->tmpblock, s->frame->data[0],
s->image_height - (y_pos + 1 + s->diff_start + s->diff_height),
x_pos, s->diff_height, width,
s->frame.linesize[0], s->pal);
s->frame->linesize[0], s->pal);
}
skip_bits_long(gb, 8 * block_size); /* skip the consumed bits */
return 0;
@ -337,7 +355,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->image_width, s->image_height, s->block_width, s->block_height,
h_blocks, v_blocks, h_part, v_part);
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
/* loop over all block columns */
@ -362,7 +380,7 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
s->diff_height = cur_blk_height;
if (8 * size > get_bits_left(&gb)) {
av_frame_unref(&s->frame);
av_frame_unref(s->frame);
return AVERROR_INVALIDDATA;
}
@ -418,11 +436,11 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
if (has_diff) {
int k;
int off = (s->image_height - y_pos - 1) * s->frame.linesize[0];
int off = (s->image_height - y_pos - 1) * s->frame->linesize[0];
for (k = 0; k < cur_blk_height; k++)
memcpy(s->frame.data[0] + off - k*s->frame.linesize[0] + x_pos*3,
s->keyframe + off - k*s->frame.linesize[0] + x_pos*3,
memcpy(s->frame->data[0] + off - k*s->frame->linesize[0] + x_pos*3,
s->keyframe + off - k*s->frame->linesize[0] + x_pos*3,
cur_blk_width * 3);
}
@ -439,16 +457,16 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
}
if (s->is_keyframe && s->ver == 2) {
if (!s->keyframe) {
s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height);
s->keyframe = av_malloc(s->frame->linesize[0] * avctx->height);
if (!s->keyframe) {
av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n");
return AVERROR(ENOMEM);
}
}
memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
memcpy(s->keyframe, s->frame->data[0], s->frame->linesize[0] * avctx->height);
}
if ((ret = av_frame_ref(data, &s->frame)) < 0)
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
*got_frame = 1;
@ -461,21 +479,6 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
return buf_size;
}
static av_cold int flashsv_decode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
/* release the frame if needed */
av_frame_unref(&s->frame);
/* free the tmpblock */
av_free(s->tmpblock);
return 0;
}
#if CONFIG_FLASHSV_DECODER
AVCodec ff_flashsv_decoder = {
.name = "flashsv",

View File

@ -87,7 +87,6 @@ typedef struct FlashSV2Context {
AVCodecContext *avctx;
uint8_t *current_frame;
uint8_t *key_frame;
AVFrame frame;
uint8_t *encbuffer;
uint8_t *keybuffer;
uint8_t *databuffer;
@ -849,15 +848,12 @@ static int reconfigure_at_keyframe(FlashSV2Context * s, const uint8_t * image,
}
static int flashsv2_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
const AVFrame *p, int *got_packet)
{
FlashSV2Context *const s = avctx->priv_data;
AVFrame *const p = &s->frame;
int res;
int keyframe = 0;
*p = *pict;
if ((res = ff_alloc_packet2(avctx, pkt, s->frame_size + FF_MIN_BUFFER_SIZE)) < 0)
return res;
@ -891,18 +887,11 @@ static int flashsv2_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if (keyframe) {
new_key_frame(s);
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
s->last_key_frame = avctx->frame_number;
pkt->flags |= AV_PKT_FLAG_KEY;
av_log(avctx, AV_LOG_DEBUG, "Inserting key frame at frame %d\n", avctx->frame_number);
} else {
p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
}
avctx->coded_frame = p;
pkt->size = res;
*got_packet = 1;

View File

@ -57,7 +57,6 @@
typedef struct FlashSVContext {
AVCodecContext *avctx;
uint8_t *previous_frame;
AVFrame frame;
int image_width, image_height;
int block_width, block_height;
uint8_t *tmpblock;
@ -89,6 +88,21 @@ static int copy_region_enc(uint8_t *sptr, uint8_t *dptr, int dx, int dy,
return 0;
}
static av_cold int flashsv_encode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
deflateEnd(&s->zstream);
av_free(s->encbuffer);
av_free(s->previous_frame);
av_free(s->tmpblock);
av_frame_free(&avctx->coded_frame);
return 0;
}
static av_cold int flashsv_encode_init(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
@ -117,11 +131,17 @@ static av_cold int flashsv_encode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM);
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
flashsv_encode_end(avctx);
return AVERROR(ENOMEM);
}
return 0;
}
static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf,
static int encode_bitstream(FlashSVContext *s, const AVFrame *p, uint8_t *buf,
int buf_size, int block_width, int block_height,
uint8_t *previous_frame, int *I_frame)
{
@ -199,14 +219,12 @@ static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
FlashSVContext * const s = avctx->priv_data;
AVFrame * const p = &s->frame;
const AVFrame * const p = pict;
uint8_t *pfptr;
int res;
int I_frame = 0;
int opt_w = 4, opt_h = 4;
*p = *pict;
/* First frame needs to be a keyframe */
if (avctx->frame_number == 0) {
s->previous_frame = av_mallocz(FFABS(p->linesize[0]) * s->image_height);
@ -244,37 +262,22 @@ static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
//mark the frame type so the muxer can mux it correctly
if (I_frame) {
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
s->last_key_frame = avctx->frame_number;
av_dlog(avctx, "Inserting keyframe at frame %d\n", avctx->frame_number);
} else {
p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
avctx->coded_frame->key_frame = 0;
}
avctx->coded_frame = p;
if (p->key_frame)
if (avctx->coded_frame->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
}
static av_cold int flashsv_encode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
deflateEnd(&s->zstream);
av_free(s->encbuffer);
av_free(s->previous_frame);
av_free(s->tmpblock);
return 0;
}
AVCodec ff_flashsv_encoder = {
.name = "flashsv",
.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"),

View File

@ -71,7 +71,7 @@
typedef struct FlicDecodeContext {
AVCodecContext *avctx;
AVFrame frame;
AVFrame *frame;
unsigned int palette[256];
int new_palette;
@ -141,7 +141,10 @@ static av_cold int flic_decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
avcodec_get_frame_defaults(&s->frame);
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
s->new_palette = 0;
return 0;
@ -185,11 +188,11 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
bytestream2_init(&g2, buf, buf_size);
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
pixels = s->frame.data[0];
pixel_limit = s->avctx->height * s->frame.linesize[0];
pixels = s->frame->data[0];
pixel_limit = s->avctx->height * s->frame->linesize[0];
if (buf_size < 16 || buf_size > INT_MAX - (3 * 256 + FF_INPUT_BUFFER_PADDING_SIZE))
return AVERROR_INVALIDDATA;
frame_size = bytestream2_get_le32(&g2);
@ -273,12 +276,12 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
if ((line_packets & 0xC000) == 0xC000) {
// line skip opcode
line_packets = -line_packets;
y_ptr += line_packets * s->frame.linesize[0];
y_ptr += line_packets * s->frame->linesize[0];
} else if ((line_packets & 0xC000) == 0x4000) {
av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets);
} else if ((line_packets & 0xC000) == 0x8000) {
// "last byte" opcode
pixel_ptr= y_ptr + s->frame.linesize[0] - 1;
pixel_ptr= y_ptr + s->frame->linesize[0] - 1;
CHECK_PIXEL_PTR(0);
pixels[pixel_ptr] = line_packets & 0xff;
} else {
@ -313,7 +316,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
}
}
y_ptr += s->frame.linesize[0];
y_ptr += s->frame->linesize[0];
}
}
break;
@ -322,7 +325,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
/* line compressed */
starting_line = bytestream2_get_le16(&g2);
y_ptr = 0;
y_ptr += starting_line * s->frame.linesize[0];
y_ptr += starting_line * s->frame->linesize[0];
compressed_lines = bytestream2_get_le16(&g2);
while (compressed_lines > 0) {
@ -359,7 +362,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
}
}
y_ptr += s->frame.linesize[0];
y_ptr += s->frame->linesize[0];
compressed_lines--;
}
break;
@ -367,7 +370,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
case FLI_BLACK:
/* set the whole frame to color 0 (which is usually black) */
memset(pixels, 0,
s->frame.linesize[0] * s->avctx->height);
s->frame->linesize[0] * s->avctx->height);
break;
case FLI_BRUN:
@ -414,7 +417,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
}
}
y_ptr += s->frame.linesize[0];
y_ptr += s->frame->linesize[0];
}
break;
@ -425,8 +428,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
"has incorrect size, skipping chunk\n", chunk_size - 6);
bytestream2_skip(&g2, chunk_size - 6);
} else {
for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
y_ptr += s->frame.linesize[0]) {
for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height;
y_ptr += s->frame->linesize[0]) {
bytestream2_get_buffer(&g2, &pixels[y_ptr],
s->avctx->width);
}
@ -457,13 +460,13 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
buf_size - bytestream2_get_bytes_left(&g2));
/* make the palette available on the way out */
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
if (s->new_palette) {
s->frame.palette_has_changed = 1;
s->frame->palette_has_changed = 1;
s->new_palette = 0;
}
if ((ret = av_frame_ref(data, &s->frame)) < 0)
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
*got_frame = 1;
@ -504,11 +507,11 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
bytestream2_init(&g2, buf, buf_size);
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
pixels = s->frame.data[0];
pixel_limit = s->avctx->height * s->frame.linesize[0];
pixels = s->frame->data[0];
pixel_limit = s->avctx->height * s->frame->linesize[0];
frame_size = bytestream2_get_le32(&g2);
bytestream2_skip(&g2, 2); /* skip the magic number */
@ -556,7 +559,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
line_packets = bytestream2_get_le16(&g2);
if (line_packets < 0) {
line_packets = -line_packets;
y_ptr += line_packets * s->frame.linesize[0];
y_ptr += line_packets * s->frame->linesize[0];
} else {
compressed_lines--;
pixel_ptr = y_ptr;
@ -589,7 +592,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
}
}
y_ptr += s->frame.linesize[0];
y_ptr += s->frame->linesize[0];
}
}
break;
@ -602,7 +605,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
case FLI_BLACK:
/* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */
memset(pixels, 0x0000,
s->frame.linesize[0] * s->avctx->height);
s->frame->linesize[0] * s->avctx->height);
break;
case FLI_BRUN:
@ -657,7 +660,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
pixel_ptr += 2;
}
#endif
y_ptr += s->frame.linesize[0];
y_ptr += s->frame->linesize[0];
}
break;
@ -701,7 +704,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
}
}
y_ptr += s->frame.linesize[0];
y_ptr += s->frame->linesize[0];
}
break;
@ -714,8 +717,8 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
bytestream2_skip(&g2, chunk_size - 6);
} else {
for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
y_ptr += s->frame.linesize[0]) {
for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height;
y_ptr += s->frame->linesize[0]) {
pixel_countdown = s->avctx->width;
pixel_ptr = 0;
@ -748,7 +751,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
"and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));
if ((ret = av_frame_ref(data, &s->frame)) < 0)
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
*got_frame = 1;
@ -797,7 +800,7 @@ static av_cold int flic_decode_end(AVCodecContext *avctx)
{
FlicDecodeContext *s = avctx->priv_data;
av_frame_unref(&s->frame);
av_frame_free(&s->frame);
return 0;
}

View File

@ -375,6 +375,8 @@ static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
src += 3;
}
npal = *src++ + 1;
if (src_end - src < npal * 3)
return AVERROR_INVALIDDATA;
memcpy(pal, src, npal * 3); src += npal * 3;
if (sub_type != 2) {
for (i = 0; i < npal; i++) {
@ -490,7 +492,7 @@ static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c,
cursor_hot_y = bytestream2_get_byte(gb);
cursor_fmt = bytestream2_get_byte(gb);
cursor_stride = FFALIGN(cursor_w, c->cursor_fmt==1 ? 32 : 1) * 4;
cursor_stride = FFALIGN(cursor_w, cursor_fmt==1 ? 32 : 1) * 4;
if (cursor_w < 1 || cursor_w > 256 ||
cursor_h < 1 || cursor_h > 256) {

View File

@ -216,6 +216,13 @@ static av_cold int gif_encode_init(AVCodecContext *avctx)
return AVERROR(EINVAL);
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
s->lzw = av_mallocz(ff_lzw_encode_state_size);
s->buf = av_malloc(avctx->width*avctx->height*2);
s->tmpl = av_malloc(avctx->width);
@ -232,7 +239,6 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
GIFContext *s = avctx->priv_data;
AVFrame *const p = (AVFrame *)pict;
uint8_t *outbuf_ptr, *end;
const uint32_t *palette = NULL;
int ret;
@ -242,15 +248,12 @@ static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
outbuf_ptr = pkt->data;
end = pkt->data + pkt->size;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
uint8_t *pal_exdata = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
if (!pal_exdata)
return AVERROR(ENOMEM);
memcpy(pal_exdata, p->data[1], AVPALETTE_SIZE);
palette = (uint32_t*)p->data[1];
memcpy(pal_exdata, pict->data[1], AVPALETTE_SIZE);
palette = (uint32_t*)pict->data[1];
}
gif_image_write_image(avctx, &outbuf_ptr, end, palette,
@ -276,6 +279,8 @@ static int gif_encode_close(AVCodecContext *avctx)
{
GIFContext *s = avctx->priv_data;
av_frame_free(&avctx->coded_frame);
av_freep(&s->lzw);
av_freep(&s->buf);
av_frame_free(&s->last_frame);

View File

@ -1839,7 +1839,7 @@ static int decode_update_thread_context(AVCodecContext *dst,
for (i = 0; h->DPB && i < MAX_PICTURE_COUNT; i++) {
unref_picture(h, &h->DPB[i]);
if (h1->DPB[i].f.data[0] &&
if (h1->DPB && h1->DPB[i].f.data[0] &&
(ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
return ret;
}
@ -1957,6 +1957,10 @@ static int h264_frame_start(H264Context *h)
h->cur_pic_ptr = pic;
unref_picture(h, &h->cur_pic);
if (CONFIG_ERROR_RESILIENCE) {
h->er.cur_pic = NULL;
}
if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
return ret;
@ -3595,7 +3599,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
} else {
/* Shorten frame num gaps so we don't have to allocate reference
* frames just to throw them away */
if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
if (h->frame_num != h->prev_frame_num) {
int unwrap_prev_frame_num = h->prev_frame_num;
int max_frame_num = 1 << h->sps.log2_max_frame_num;
@ -3668,7 +3672,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
}
}
while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field &&
while (h->frame_num != h->prev_frame_num && !h0->first_field &&
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",

View File

@ -562,6 +562,7 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
{
int i, av_uninit(j);
int pps_count;
int current_ref_assigned = 0, err = 0;
Picture *av_uninit(pic);
@ -732,7 +733,15 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
print_short_term(h);
print_long_term(h);
if(err >= 0 && h->long_ref_count==0 && h->short_ref_count<=2 && h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) && h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
pps_count = 0;
for (i = 0; i < FF_ARRAY_ELEMS(h->pps_buffers); i++)
pps_count += !!h->pps_buffers[i];
if ( err >= 0
&& h->long_ref_count==0
&& (h->short_ref_count<=2 || h->pps.ref_count[0] <= 1 && h->pps.ref_count[1] <= 1 && pps_count == 1)
&& h->pps.ref_count[0]<=2 + (h->picture_structure != PICT_FRAME)
&& h->cur_pic_ptr->f.pict_type == AV_PICTURE_TYPE_I){
h->cur_pic_ptr->sync |= 1;
if(!h->avctx->has_b_frames)
h->sync = 2;

View File

@ -109,7 +109,7 @@ static int pic_arrays_init(HEVCContext *s)
if (!s->skip_flag || !s->tab_ct_depth)
goto fail;
s->tab_ipm = av_malloc(pic_size_in_min_pu);
s->tab_ipm = av_mallocz(pic_size_in_min_pu);
s->cbf_luma = av_malloc(pic_width_in_min_tu * pic_height_in_min_tu);
s->is_pcm = av_malloc(pic_size_in_min_pu);
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
@ -602,6 +602,11 @@ static int hls_slice_header(HEVCContext *s)
sh->entry_point_offset = av_malloc(sh->num_entry_point_offsets * sizeof(int));
sh->offset = av_malloc(sh->num_entry_point_offsets * sizeof(int));
sh->size = av_malloc(sh->num_entry_point_offsets * sizeof(int));
if (!sh->entry_point_offset || !sh->offset || !sh->size) {
sh->num_entry_point_offsets = 0;
av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
return AVERROR(ENOMEM);
}
for (i = 0; i < sh->num_entry_point_offsets; i++) {
int val = 0;
for (j = 0; j < segments; j++) {
@ -630,14 +635,24 @@ static int hls_slice_header(HEVCContext *s)
}
// Inferred parameters
sh->slice_qp = 26 + s->pps->pic_init_qp_minus26 + sh->slice_qp_delta;
sh->slice_qp = 26U + s->pps->pic_init_qp_minus26 + sh->slice_qp_delta;
if (sh->slice_qp > 51 ||
sh->slice_qp < -s->sps->qp_bd_offset) {
av_log(s->avctx, AV_LOG_ERROR,
"The slice_qp %d is outside the valid range "
"[%d, 51].\n",
sh->slice_qp,
-s->sps->qp_bd_offset);
return AVERROR_INVALIDDATA;
}
sh->slice_ctb_addr_rs = sh->slice_segment_addr;
s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
if (!s->pps->cu_qp_delta_enabled_flag)
s->HEVClc->qp_y = ((s->sh.slice_qp + 52 + 2 * s->sps->qp_bd_offset) %
(52 + s->sps->qp_bd_offset)) - s->sps->qp_bd_offset;
s->HEVClc->qp_y = FFUMOD(s->sh.slice_qp + 52 + 2 * s->sps->qp_bd_offset,
52 + s->sps->qp_bd_offset) - s->sps->qp_bd_offset;
s->slice_initialized = 1;
@ -1775,6 +1790,11 @@ static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
int y_ctb = 0;
int ctb_addr_ts = s->pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
return AVERROR_INVALIDDATA;
}
while (more_data && ctb_addr_ts < s->sps->ctb_size) {
int ctb_addr_rs = s->pps->ctb_addr_ts_to_rs[ctb_addr_ts];
@ -2584,7 +2604,8 @@ static av_cold int hevc_decode_free(AVCodecContext *avctx)
pic_arrays_free(s);
av_freep(&lc->edge_emu_buffer);
if (lc)
av_freep(&lc->edge_emu_buffer);
av_freep(&s->md5_ctx);
for(i=0; i < s->nals_allocated; i++) {
@ -2624,6 +2645,8 @@ static av_cold int hevc_decode_free(AVCodecContext *avctx)
av_freep(&s->sList[i]);
}
}
if (s->HEVClc == s->HEVClcList[0])
s->HEVClc = NULL;
av_freep(&s->HEVClcList[0]);
for (i = 0; i < s->nals_allocated; i++)

View File

@ -155,7 +155,8 @@ void ff_hevc_set_qPy(HEVCContext *s, int xC, int yC, int xBase, int yBase, int l
if (s->HEVClc->tu.cu_qp_delta != 0) {
int off = s->sps->qp_bd_offset;
s->HEVClc->qp_y = ((qp_y + s->HEVClc->tu.cu_qp_delta + 52 + 2 * off) % (52 + off)) - off;
s->HEVClc->qp_y = FFUMOD(qp_y + s->HEVClc->tu.cu_qp_delta + 52 + 2 * off,
52 + off) - off;
} else
s->HEVClc->qp_y = qp_y;
}

View File

@ -621,6 +621,12 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
goto err;
}
if (!s->vps_list[sps->vps_id]) {
av_log(s->avctx, AV_LOG_ERROR, "VPS does not exist \n");
ret = AVERROR_INVALIDDATA;
goto err;
}
sps->max_sub_layers = get_bits(gb, 3) + 1;
if (sps->max_sub_layers > MAX_SUB_LAYERS) {
av_log(s->avctx, AV_LOG_ERROR, "sps_max_sub_layers out of range: %d\n",

View File

@ -51,7 +51,7 @@ static void FUNC(transquant_bypass4x4)(uint8_t *_dst, int16_t *coeffs, ptrdiff_t
for (y = 0; y < 4; y++) {
for (x = 0; x < 4; x++) {
dst[x] += *coeffs;
dst[x] = av_clip_pixel(dst[x] + *coeffs);
coeffs++;
}
dst += stride;
@ -67,7 +67,7 @@ static void FUNC(transquant_bypass8x8)(uint8_t *_dst, int16_t *coeffs, ptrdiff_t
for (y = 0; y < 8; y++) {
for (x = 0; x < 8; x++) {
dst[x] += *coeffs;
dst[x] = av_clip_pixel(dst[x] + *coeffs);
coeffs++;
}
dst += stride;
@ -82,7 +82,7 @@ static void FUNC(transquant_bypass16x16)(uint8_t *_dst, int16_t *coeffs, ptrdiff
for (y = 0; y < 16; y++) {
for (x = 0; x < 16; x++) {
dst[x] += *coeffs;
dst[x] = av_clip_pixel(dst[x] + *coeffs);
coeffs++;
}
dst += stride;
@ -98,7 +98,7 @@ static void FUNC(transquant_bypass32x32)(uint8_t *_dst, int16_t *coeffs, ptrdiff
for (y = 0; y < 32; y++) {
for (x = 0; x < 32; x++) {
dst[x] += *coeffs;
dst[x] = av_clip_pixel(dst[x] + *coeffs);
coeffs++;
}
dst += stride;
@ -391,7 +391,7 @@ static void FUNC(sao_band_filter)(uint8_t *_dst, uint8_t *_src,
offset_table[(k + sao_left_class) & 31] = sao_offset_val[k + 1];
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++)
dst[x] = av_clip_pixel(src[x] + offset_table[av_clip_pixel(src[x] >> shift)]);
dst[x] = av_clip_pixel(src[x] + offset_table[src[x] >> shift]);
dst += stride;
src += stride;
}

View File

@ -78,7 +78,6 @@ typedef struct HYuvContext {
uint32_t bits[3][256];
uint32_t pix_bgr_map[1<<VLC_BITS];
VLC vlc[6]; //Y,U,V,YY,YU,YV
AVFrame picture;
uint8_t *bitstream_buffer;
unsigned int bitstream_buffer_size;
DSPContext dsp;

View File

@ -256,7 +256,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
ff_huffyuv_common_init(avctx);
memset(s->vlc, 0, 3 * sizeof(VLC));
avcodec_get_frame_defaults(&s->picture);
s->interlaced = s->height > 288;
s->bgr32 = 1;

View File

@ -156,7 +156,12 @@ static av_cold int encode_init(AVCodecContext *avctx)
}
s->version = 2;
avctx->coded_frame = &s->picture;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV420P:
@ -446,16 +451,12 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
AVFrame * const p = &s->picture;
const AVFrame * const p = pict;
int i, j, size = 0, ret;
if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
return ret;
*p = *pict;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
if (s->context) {
for (i = 0; i < 3; i++) {
ff_huff_gen_len_table(s->len[i], s->stats[i]);
@ -681,6 +682,8 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_freep(&avctx->extradata);
av_freep(&avctx->stats_out);
av_frame_free(&avctx->coded_frame);
return 0;
}

View File

@ -318,6 +318,16 @@ static int extract_header(AVCodecContext *const avctx,
return 0;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
IffContext *s = avctx->priv_data;
av_frame_free(&s->frame);
av_freep(&s->planebuf);
av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf);
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
IffContext *s = avctx->priv_data;
@ -360,8 +370,10 @@ static av_cold int decode_init(AVCodecContext *avctx)
s->bpp = avctx->bits_per_coded_sample;
s->frame = av_frame_alloc();
if (!s->frame)
if (!s->frame) {
decode_end(avctx);
return AVERROR(ENOMEM);
}
if ((err = extract_header(avctx, NULL)) < 0)
return err;
@ -858,16 +870,6 @@ static int decode_frame(AVCodecContext *avctx,
return buf_size;
}
static av_cold int decode_end(AVCodecContext *avctx)
{
IffContext *s = avctx->priv_data;
av_frame_free(&s->frame);
av_freep(&s->planebuf);
av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf);
return 0;
}
#if CONFIG_IFF_ILBM_DECODER
AVCodec ff_iff_ilbm_decoder = {
.name = "iff",

View File

@ -171,36 +171,36 @@ static int ir2_decode_frame(AVCodecContext *avctx,
if (s->decode_delta) { /* intraframe */
if ((ret = ir2_decode_plane(s, avctx->width, avctx->height,
s->picture->data[0], s->picture->linesize[0],
p->data[0], p->linesize[0],
ir2_luma_table)) < 0)
return ret;
/* swapped U and V */
if ((ret = ir2_decode_plane(s, avctx->width >> 2, avctx->height >> 2,
s->picture->data[2], s->picture->linesize[2],
p->data[2], p->linesize[2],
ir2_luma_table)) < 0)
return ret;
if ((ret = ir2_decode_plane(s, avctx->width >> 2, avctx->height >> 2,
s->picture->data[1], s->picture->linesize[1],
p->data[1], p->linesize[1],
ir2_luma_table)) < 0)
return ret;
} else { /* interframe */
if ((ret = ir2_decode_plane_inter(s, avctx->width, avctx->height,
s->picture->data[0], s->picture->linesize[0],
p->data[0], p->linesize[0],
ir2_luma_table)) < 0)
return ret;
/* swapped U and V */
if ((ret = ir2_decode_plane_inter(s, avctx->width >> 2, avctx->height >> 2,
s->picture->data[2], s->picture->linesize[2],
p->data[2], p->linesize[2],
ir2_luma_table)) < 0)
return ret;
if ((ret = ir2_decode_plane_inter(s, avctx->width >> 2, avctx->height >> 2,
s->picture->data[1], s->picture->linesize[1],
p->data[1], p->linesize[1],
ir2_luma_table)) < 0)
return ret;
}
if ((ret = av_frame_ref(picture, s->picture)) < 0)
if ((ret = av_frame_ref(picture, p)) < 0)
return ret;
*got_frame = 1;

View File

@ -284,6 +284,7 @@ static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
{
int plane, band_num, indx, transform_id, scan_indx;
int i;
int quant_mat;
plane = get_bits(&ctx->gb, 2);
band_num = get_bits(&ctx->gb, 4);
@ -382,18 +383,17 @@ static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
band->scan = scan_index_to_tab[scan_indx];
band->scan_size = band->blk_size;
band->quant_mat = get_bits(&ctx->gb, 5);
if (band->quant_mat >= FF_ARRAY_ELEMS(quant_index_to_tab)) {
if (band->quant_mat == 31)
av_log(avctx, AV_LOG_ERROR,
"Custom quant matrix encountered!\n");
else
avpriv_request_sample(avctx, "Quantization matrix %d",
band->quant_mat);
band->quant_mat = -1;
quant_mat = get_bits(&ctx->gb, 5);
if (quant_mat == 31) {
av_log(avctx, AV_LOG_ERROR, "Custom quant matrix encountered!\n");
return AVERROR_INVALIDDATA;
}
if (quant_mat >= FF_ARRAY_ELEMS(quant_index_to_tab)) {
avpriv_request_sample(avctx, "Quantization matrix %d",
quant_mat);
return AVERROR_INVALIDDATA;
}
band->quant_mat = quant_mat;
} else {
if (old_blk_size != band->blk_size) {
av_log(avctx, AV_LOG_ERROR,
@ -401,10 +401,6 @@ static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
"inherited\n");
return AVERROR_INVALIDDATA;
}
if (band->quant_mat < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid quant_mat inherited\n");
return AVERROR_INVALIDDATA;
}
}
if (quant_index_to_tab[band->quant_mat] > 4 && band->blk_size == 4) {
av_log(avctx, AV_LOG_ERROR, "Invalid quant matrix for 4x4 block encountered!\n");

View File

@ -60,7 +60,7 @@ typedef struct {
typedef struct {
AVCodecContext *avctx;
AVFrame picture;
const AVFrame *picture;
int width, height; ///< image width and height
uint8_t cbps[4]; ///< bits per sample in particular components
@ -390,18 +390,18 @@ static void copy_frame(Jpeg2000EncoderContext *s)
for (compno = 0; compno < s->ncomponents; compno++){
Jpeg2000Component *comp = tile->comp + compno;
int *dst = comp->i_data;
line = s->picture.data[compno]
+ comp->coord[1][0] * s->picture.linesize[compno]
line = s->picture->data[compno]
+ comp->coord[1][0] * s->picture->linesize[compno]
+ comp->coord[0][0];
for (y = comp->coord[1][0]; y < comp->coord[1][1]; y++){
uint8_t *ptr = line;
for (x = comp->coord[0][0]; x < comp->coord[0][1]; x++)
*dst++ = *ptr++ - (1 << 7);
line += s->picture.linesize[compno];
line += s->picture->linesize[compno];
}
}
} else{
line = s->picture.data[0] + tile->comp[0].coord[1][0] * s->picture.linesize[0]
line = s->picture->data[0] + tile->comp[0].coord[1][0] * s->picture->linesize[0]
+ tile->comp[0].coord[0][0] * s->ncomponents;
i = 0;
@ -412,7 +412,7 @@ static void copy_frame(Jpeg2000EncoderContext *s)
tile->comp[compno].i_data[i] = *ptr++ - (1 << 7);
}
}
line += s->picture.linesize[0];
line += s->picture->linesize[0];
}
}
}
@ -927,10 +927,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
s->buf = s->buf_start = pkt->data;
s->buf_end = pkt->data + pkt->size;
s->picture = *pict;
avctx->coded_frame= &s->picture;
s->picture = pict;
s->lambda = s->picture.quality * LAMBDA_SCALE;
s->lambda = s->picture->quality * LAMBDA_SCALE;
copy_frame(s);
reinit(s);

View File

@ -889,6 +889,10 @@ static int jpeg2000_decode_packets(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile
prcx = ff_jpeg2000_ceildivpow2(x, reducedresno) >> rlevel->log2_prec_width;
prcy = ff_jpeg2000_ceildivpow2(y, reducedresno) >> rlevel->log2_prec_height;
precno = prcx + rlevel->num_precincts_x * prcy;
if (prcx >= rlevel->num_precincts_x || prcy >= rlevel->num_precincts_y)
return AVERROR_PATCHWELCOME;
for (layno = 0; layno < tile->codsty[0].nlayers; layno++) {
if ((ret = jpeg2000_decode_packet(s, codsty, rlevel,
precno, layno,

View File

@ -33,7 +33,6 @@
typedef struct JpeglsContext {
AVCodecContext *avctx;
AVFrame picture;
} JpeglsContext;
typedef struct JLSState {

View File

@ -148,6 +148,8 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state,
ret = ret >> 1;
}
if(FFABS(ret) > 0xFFFF)
return -0x10000;
/* update state */
state->A[Q] += FFABS(ret) - RItype;
ret *= state->twonear;
@ -279,9 +281,9 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near,
JLSState *state;
int off = 0, stride = 1, width, shift, ret = 0;
zero = av_mallocz(s->picture.linesize[0]);
zero = av_mallocz(s->picture_ptr->linesize[0]);
last = zero;
cur = s->picture.data[0];
cur = s->picture_ptr->data[0];
state = av_mallocz(sizeof(JLSState));
/* initialize JPEG-LS state from JPEG parameters */
@ -328,7 +330,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near,
t = *((uint16_t *)last);
}
last = cur;
cur += s->picture.linesize[0];
cur += s->picture_ptr->linesize[0];
if (s->restart_interval && !--s->restart_count) {
align_get_bits(&s->gb);
@ -339,7 +341,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near,
int j;
int Rc[3] = { 0, 0, 0 };
stride = (s->nb_components > 1) ? 3 : 1;
memset(cur, 0, s->picture.linesize[0]);
memset(cur, 0, s->picture_ptr->linesize[0]);
width = s->width * stride;
for (i = 0; i < s->height; i++) {
for (j = 0; j < stride; j++) {
@ -353,7 +355,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near,
}
}
last = cur;
cur += s->picture.linesize[0];
cur += s->picture_ptr->linesize[0];
}
} else if (ilv == 2) { /* sample interleaving */
avpriv_report_missing_feature(s->avctx, "Sample interleaved images");
@ -367,7 +369,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near,
w = s->width * s->nb_components;
if (s->bits <= 8) {
uint8_t *src = s->picture.data[0];
uint8_t *src = s->picture_ptr->data[0];
for (i = 0; i < s->height; i++) {
switch(s->xfrm) {
@ -402,7 +404,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near,
}
break;
}
src += s->picture.linesize[0];
src += s->picture_ptr->linesize[0];
}
}else
avpriv_report_missing_feature(s->avctx, "16bit xfrm");
@ -414,20 +416,20 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near,
w = s->width * s->nb_components;
if (s->bits <= 8) {
uint8_t *src = s->picture.data[0];
uint8_t *src = s->picture_ptr->data[0];
for (i = 0; i < s->height; i++) {
for (x = off; x < w; x += stride)
src[x] <<= shift;
src += s->picture.linesize[0];
src += s->picture_ptr->linesize[0];
}
} else {
uint16_t *src = (uint16_t *)s->picture.data[0];
uint16_t *src = (uint16_t *)s->picture_ptr->data[0];
for (i = 0; i < s->height; i++) {
for (x = 0; x < w; x++)
src[x] <<= shift;
src += s->picture.linesize[0] / 2;
src += s->picture_ptr->linesize[0] / 2;
}
}
}

View File

@ -249,8 +249,7 @@ static void ls_store_lse(JLSState *state, PutBitContext *pb)
static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
JpeglsContext *const s = avctx->priv_data;
AVFrame *const p = &s->picture;
const AVFrame *const p = pict;
const int near = avctx->prediction_method;
PutBitContext pb, pb2;
GetBitContext gb;
@ -259,10 +258,6 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
int i, size, ret;
int comps;
*p = *pict;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
if (avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
avctx->pix_fmt == AV_PIX_FMT_GRAY16)
comps = 1;
@ -349,7 +344,7 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
Rc[j] = last[j];
}
last = cur;
cur += s->picture.linesize[0];
cur += p->linesize[0];
}
} else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) {
int j, width;
@ -363,7 +358,7 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
Rc[j] = last[j];
}
last = cur;
cur += s->picture.linesize[0];
cur += p->linesize[0];
}
}
@ -403,12 +398,20 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
return 0;
}
static av_cold int encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
static av_cold int encode_init_ls(AVCodecContext *ctx)
{
JpeglsContext *c = (JpeglsContext *)ctx->priv_data;
ctx->coded_frame = av_frame_alloc();
if (!ctx->coded_frame)
return AVERROR(ENOMEM);
c->avctx = ctx;
ctx->coded_frame = &c->picture;
ctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
ctx->coded_frame->key_frame = 1;
if (ctx->pix_fmt != AV_PIX_FMT_GRAY8 &&
ctx->pix_fmt != AV_PIX_FMT_GRAY16 &&
@ -426,8 +429,8 @@ AVCodec ff_jpegls_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_JPEGLS,
.priv_data_size = sizeof(JpeglsContext),
.init = encode_init_ls,
.close = encode_close,
.encode2 = encode_picture_ls,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,

View File

@ -41,11 +41,13 @@ typedef struct JvContext {
static av_cold int decode_init(AVCodecContext *avctx)
{
JvContext *s = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
ff_dsputil_init(&s->dsp, avctx);
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
avctx->pix_fmt = AV_PIX_FMT_PAL8;
ff_dsputil_init(&s->dsp, avctx);
return 0;
}

View File

@ -38,7 +38,7 @@ static void decode_flush(AVCodecContext *avctx)
{
KgvContext * const c = avctx->priv_data;
av_frame_unref(c->prev);
av_frame_free(&c->prev);
}
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
@ -157,13 +157,13 @@ static av_cold int decode_init(AVCodecContext *avctx)
{
KgvContext * const c = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_RGB555;
avctx->flags |= CODEC_FLAG_EMU_EDGE;
c->prev = av_frame_alloc();
if (!c->prev)
return AVERROR(ENOMEM);
avctx->pix_fmt = AV_PIX_FMT_RGB555;
avctx->flags |= CODEC_FLAG_EMU_EDGE;
return 0;
}

View File

@ -135,6 +135,13 @@ static av_cold int encode_init(AVCodecContext *avctx)
if (!avctx->extradata)
return AVERROR(ENOMEM);
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
c->compression = avctx->compression_level == FF_COMPRESSION_DEFAULT ?
COMP_ZLIB_NORMAL :
av_clip(avctx->compression_level, 0, 9);
@ -176,6 +183,8 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_freep(&avctx->extradata);
deflateEnd(&c->zstream);
av_frame_free(&avctx->coded_frame);
return 0;
}

View File

@ -172,7 +172,7 @@ static inline void libopenjpeg_copy_to_packed16(AVFrame *picture, opj_image_t *i
int index, x, y, c;
int adjust[4];
for (x = 0; x < image->numcomps; x++)
adjust[x] = FFMAX(FFMIN(16 - image->comps[x].prec, 8), 0);
adjust[x] = FFMAX(FFMIN(av_pix_fmt_desc_get(picture->format)->comp[x].depth_minus1 + 1 - image->comps[x].prec, 8), 0);
for (y = 0; y < picture->height; y++) {
index = y*picture->width;
@ -209,7 +209,7 @@ static inline void libopenjpeg_copyto16(AVFrame *picture, opj_image_t *image) {
int index, x, y;
int adjust[4];
for (x = 0; x < image->numcomps; x++)
adjust[x] = FFMAX(FFMIN(16 - image->comps[x].prec, 8), 0);
adjust[x] = FFMAX(FFMIN(av_pix_fmt_desc_get(picture->format)->comp[x].depth_minus1 + 1 - image->comps[x].prec, 8), 0);
for (index = 0; index < image->numcomps; index++) {
comp_data = image->comps[index].data;

View File

@ -480,7 +480,7 @@ static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
opj_cio_t *stream = ctx->stream;
int cpyresult = 0;
int ret, len;
AVFrame gbrframe;
AVFrame *gbrframe;
switch (avctx->pix_fmt) {
case AV_PIX_FMT_RGB24:
@ -501,18 +501,22 @@ static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
case AV_PIX_FMT_GBRP12:
case AV_PIX_FMT_GBRP14:
case AV_PIX_FMT_GBRP16:
gbrframe = *frame;
gbrframe.data[0] = frame->data[2]; // swap to be rgb
gbrframe.data[1] = frame->data[0];
gbrframe.data[2] = frame->data[1];
gbrframe.linesize[0] = frame->linesize[2];
gbrframe.linesize[1] = frame->linesize[0];
gbrframe.linesize[2] = frame->linesize[1];
gbrframe = av_frame_alloc();
if (!gbrframe)
return AVERROR(ENOMEM);
av_frame_ref(gbrframe, frame);
gbrframe->data[0] = frame->data[2]; // swap to be rgb
gbrframe->data[1] = frame->data[0];
gbrframe->data[2] = frame->data[1];
gbrframe->linesize[0] = frame->linesize[2];
gbrframe->linesize[1] = frame->linesize[0];
gbrframe->linesize[2] = frame->linesize[1];
if (avctx->pix_fmt == AV_PIX_FMT_GBR24P) {
cpyresult = libopenjpeg_copy_unpacked8(avctx, &gbrframe, image);
cpyresult = libopenjpeg_copy_unpacked8(avctx, gbrframe, image);
} else {
cpyresult = libopenjpeg_copy_unpacked16(avctx, &gbrframe, image);
cpyresult = libopenjpeg_copy_unpacked16(avctx, gbrframe, image);
}
av_frame_free(&gbrframe);
break;
case AV_PIX_FMT_GRAY8:
case AV_PIX_FMT_YUV410P:

View File

@ -400,7 +400,7 @@ static const AVOption libopus_options[] = {
{ "voip", "Favor improved speech intelligibility", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_VOIP }, 0, 0, FLAGS, "application" },
{ "audio", "Favor faithfulness to the input", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_AUDIO }, 0, 0, FLAGS, "application" },
{ "lowdelay", "Restrict to only the lowest delay modes", 0, AV_OPT_TYPE_CONST, { .i64 = OPUS_APPLICATION_RESTRICTED_LOWDELAY }, 0, 0, FLAGS, "application" },
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 10.0 }, 2.5, 60.0, FLAGS },
{ "frame_duration", "Duration of a frame in milliseconds", OFFSET(frame_duration), AV_OPT_TYPE_FLOAT, { .dbl = 20.0 }, 2.5, 60.0, FLAGS },
{ "packet_loss", "Expected packet loss percentage", OFFSET(packet_loss), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 100, FLAGS },
{ "vbr", "Variable bit rate mode", OFFSET(vbr), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 2, FLAGS, "vbr" },
{ "off", "Use constant bit rate", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "vbr" },

View File

@ -47,9 +47,6 @@ typedef struct SchroEncoderParams {
/** Schroedinger frame format */
SchroFrameFormat frame_format;
/** frame being encoded */
AVFrame picture;
/** frame size */
int frame_size;
@ -162,7 +159,9 @@ static av_cold int libschroedinger_encode_init(AVCodecContext *avctx)
avctx->width,
avctx->height);
avctx->coded_frame = &p_schro_params->picture;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
if (!avctx->gop_size) {
schro_encoder_setting_set_double(p_schro_params->encoder,
@ -427,6 +426,8 @@ static int libschroedinger_encode_close(AVCodecContext *avctx)
/* Free the video format structure. */
av_freep(&p_schro_params->format);
av_frame_free(&avctx->coded_frame);
return 0;
}

View File

@ -164,7 +164,7 @@ static int utvideo_decode_frame(AVCodecContext *avctx, void *data,
}
*got_frame = 1;
*(AVFrame *)data = *pic;
av_frame_move_ref((AVFrame*)data, pic);
return avpkt->size;
}

View File

@ -41,7 +41,6 @@
typedef struct OggVorbisEncContext {
AVClass *av_class; /**< class for AVOptions */
AVFrame frame;
vorbis_info vi; /**< vorbis_info used during init */
vorbis_dsp_state vd; /**< DSP state used for analysis */
vorbis_block vb; /**< vorbis_block used for analysis */

View File

@ -44,7 +44,6 @@ typedef struct X264Context {
x264_picture_t pic;
uint8_t *sei;
int sei_size;
AVFrame out_pic;
char *preset;
char *tune;
char *profile;
@ -208,20 +207,20 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
switch (pic_out.i_type) {
case X264_TYPE_IDR:
case X264_TYPE_I:
x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
ctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
break;
case X264_TYPE_P:
x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
ctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
break;
case X264_TYPE_B:
case X264_TYPE_BREF:
x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
ctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
break;
}
pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe;
if (ret)
x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
ctx->coded_frame->quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
*got_packet = ret;
return 0;
@ -237,6 +236,8 @@ static av_cold int X264_close(AVCodecContext *avctx)
if (x4->enc)
x264_encoder_close(x4->enc);
av_frame_free(&avctx->coded_frame);
return 0;
}
@ -570,7 +571,9 @@ static av_cold int X264_init(AVCodecContext *avctx)
if (!x4->enc)
return -1;
avctx->coded_frame = &x4->out_pic;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
x264_nal_t *nal;

View File

@ -45,7 +45,6 @@ typedef struct XavsContext {
xavs_picture_t pic;
uint8_t *sei;
int sei_size;
AVFrame out_pic;
int end_of_stream;
float crf;
int cqp;
@ -159,7 +158,7 @@ static int XAVS_frame(AVCodecContext *ctx, AVPacket *pkt,
return 0;
}
x4->out_pic.pts = pic_out.i_pts;
avctx->coded_frame->pts = pic_out.i_pts;
pkt->pts = pic_out.i_pts;
if (ctx->has_b_frames) {
if (!x4->out_frame_count)
@ -172,25 +171,25 @@ static int XAVS_frame(AVCodecContext *ctx, AVPacket *pkt,
switch (pic_out.i_type) {
case XAVS_TYPE_IDR:
case XAVS_TYPE_I:
x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
break;
case XAVS_TYPE_P:
x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
break;
case XAVS_TYPE_B:
case XAVS_TYPE_BREF:
x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
break;
}
/* There is no IDR frame in AVS JiZhun */
/* Sequence header is used as a flag */
if (pic_out.i_type == XAVS_TYPE_I) {
x4->out_pic.key_frame = 1;
avctx->coded_frame->key_frame = 1;
pkt->flags |= AV_PKT_FLAG_KEY;
}
x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
avctx->coded_frame->quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
x4->out_frame_count++;
*got_packet = ret;
@ -208,6 +207,8 @@ static av_cold int XAVS_close(AVCodecContext *avctx)
if (x4->enc)
xavs_encoder_close(x4->enc);
av_frame_free(&avctx->coded_frame);
return 0;
}
@ -355,7 +356,10 @@ static av_cold int XAVS_init(AVCodecContext *avctx)
if (!(x4->pts_buffer = av_mallocz((avctx->max_b_frames+1) * sizeof(*x4->pts_buffer))))
return AVERROR(ENOMEM);
avctx->coded_frame = &x4->out_pic;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
/* TAG: Do we have GLOBAL HEADER in AVS */
/* We Have PPS and SPS in AVS */
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {

View File

@ -56,7 +56,6 @@ struct xvid_context {
int me_flags; /**< Motion Estimation flags */
int qscale; /**< Do we use constant scale? */
int quicktime_format; /**< Are we in a QT-based format? */
AVFrame encoded_picture; /**< Encoded frame information */
char *twopassbuffer; /**< Character buffer for two-pass */
char *old_twopassbuffer; /**< Old character buffer (two-pass) */
char *twopassfile; /**< second pass temp file name */
@ -651,7 +650,9 @@ static av_cold int xvid_encode_init(AVCodecContext *avctx) {
}
x->encoder_handle = xvid_enc_create.handle;
avctx->coded_frame = &x->encoded_picture;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0;
fail:
@ -665,7 +666,7 @@ static int xvid_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
int xerr, i, ret, user_packet = !!pkt->data;
char *tmp;
struct xvid_context *x = avctx->priv_data;
AVFrame *p = &x->encoded_picture;
AVFrame *p = avctx->coded_frame;
int mb_width = (avctx->width + 15) / 16;
int mb_height = (avctx->height + 15) / 16;
@ -678,7 +679,6 @@ static int xvid_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
/* Start setting up the frame */
xvid_enc_frame.version = XVID_VERSION;
xvid_enc_stats.version = XVID_VERSION;
*p = *picture;
/* Let Xvid know where to put the frame. */
xvid_enc_frame.bitstream = pkt->data;

View File

@ -20,7 +20,7 @@
*/
#include "libavutil/common.h"
#include "libavutil/lls.h"
#include "libavutil/lls2.h"
#define LPC_USE_DOUBLE
#include "lpc.h"
@ -208,7 +208,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
}
if (lpc_type == FF_LPC_TYPE_CHOLESKY) {
LLSModel m[2];
LLSModel2 m[2];
LOCAL_ALIGNED(32, double, var, [FFALIGN(MAX_LPC_ORDER+1,4)]);
double av_uninit(weight);
memset(var, 0, FFALIGN(MAX_LPC_ORDER+1,4)*sizeof(*var));
@ -217,7 +217,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
m[0].coeff[max_order-1][j] = -lpc[max_order-1][j];
for(; pass<lpc_passes; pass++){
avpriv_init_lls(&m[pass&1], max_order);
avpriv_init_lls2(&m[pass&1], max_order);
weight=0;
for(i=max_order; i<blocksize; i++){
@ -238,7 +238,7 @@ int ff_lpc_calc_coefs(LPCContext *s,
m[pass&1].update_lls(&m[pass&1], var);
}
avpriv_solve_lls(&m[pass&1], 0.001, 0);
avpriv_solve_lls2(&m[pass&1], 0.001, 0);
}
for(i=0; i<max_order; i++){

View File

@ -87,9 +87,12 @@ av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
{
MJpegDecodeContext *s = avctx->priv_data;
if (!s->picture_ptr)
s->picture_ptr = &s->picture;
avcodec_get_frame_defaults(&s->picture);
if (!s->picture_ptr) {
s->picture = av_frame_alloc();
if (!s->picture)
return AVERROR(ENOMEM);
s->picture_ptr = s->picture;
}
s->avctx = avctx;
ff_hpeldsp_init(&s->hdsp, avctx->flags);
@ -823,7 +826,7 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p
buffer[0][i] = 1 << (s->bits - 1);
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
uint8_t *ptr = s->picture.data[0] + (linesize * mb_y);
uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
if (s->interlaced && s->bottom_field)
ptr += linesize >> 1;
@ -957,7 +960,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
if(dc == 0xFFFFF)
return -1;
if(bits<=8){
ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
if(y==0 && toprow){
if(x==0 && leftcol){
pred= 1 << (bits - 1);
@ -977,7 +980,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
pred &= mask;
*ptr= pred + (dc << point_transform);
}else{
ptr16 = (uint16_t*)(s->picture.data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
if(y==0 && toprow){
if(x==0 && leftcol){
pred= 1 << (bits - 1);
@ -1025,7 +1028,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
if(dc == 0xFFFFF)
return -1;
if(bits<=8){
ptr = s->picture.data[c] +
ptr = s->picture_ptr->data[c] +
(linesize * (v * mb_y + y)) +
(h * mb_x + x); //FIXME optimize this crap
PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
@ -1033,7 +1036,7 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
pred &= mask;
*ptr = pred + (dc << point_transform);
}else{
ptr16 = (uint16_t*)(s->picture.data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
pred &= mask;
@ -1207,7 +1210,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
int mb_x, mb_y;
int EOBRUN = 0;
int c = s->comp_index[0];
uint8_t *data = s->picture.data[c];
uint8_t *data = s->picture_ptr->data[c];
int linesize = s->linesize[c];
int last_scan = 0;
int16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
@ -1368,7 +1371,7 @@ next_field:
s->last_dc[i] = (4 << s->bits);
if (s->lossless) {
av_assert0(s->picture_ptr == &s->picture);
av_assert0(s->picture_ptr == s->picture);
if (CONFIG_JPEGLS_DECODER && s->ls) {
// for () {
// reset_ls_coding_parameters(s, 0);
@ -1389,7 +1392,7 @@ next_field:
}
} else {
if (s->progressive && predictor) {
av_assert0(s->picture_ptr == &s->picture);
av_assert0(s->picture_ptr == s->picture);
if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
ilv, prev_shift,
point_transform)) < 0)
@ -1441,7 +1444,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
int len, id, i;
len = get_bits(&s->gb, 16);
if (len < 5)
if (len < 6)
return AVERROR_INVALIDDATA;
if (8 * len > get_bits_left(&s->gb))
return AVERROR_INVALIDDATA;
@ -1555,7 +1558,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
}
/* EXIF metadata */
if (s->start_code == APP1 && id == AV_RB32("Exif")) {
if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
GetByteContext gbytes;
int ret, le, ifd_offset, bytes_read;
const uint8_t *aligned;
@ -2035,7 +2038,10 @@ av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
av_log(avctx, AV_LOG_INFO, "Single field\n");
}
if (s->picture_ptr)
if (s->picture) {
av_frame_free(&s->picture);
s->picture_ptr = NULL;
} else if (s->picture_ptr)
av_frame_unref(s->picture_ptr);
av_free(s->buffer);

View File

@ -90,7 +90,7 @@ typedef struct MJpegDecodeContext {
int h_max, v_max; /* maximum h and v counts */
int quant_index[4]; /* quant table index for each component */
int last_dc[MAX_COMPONENTS]; /* last DEQUANTIZED dc (XXX: am I right to do that ?) */
AVFrame picture; /* picture structure */
AVFrame *picture; /* picture structure */
AVFrame *picture_ptr; /* pointer to picture structure */
int got_picture; ///< we found a SOF and picture is valid, too.
int linesize[MAX_COMPONENTS]; ///< linesize << interlaced

View File

@ -46,12 +46,12 @@ av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
if (s->width > 65500 || s->height > 65500) {
av_log(s, AV_LOG_ERROR, "JPEG does not support resolutions above 65500x65500\n");
return -1;
return AVERROR(EINVAL);
}
m = av_malloc(sizeof(MJpegContext));
if (!m)
return -1;
return AVERROR(ENOMEM);
s->min_qcoeff=-1023;
s->max_qcoeff= 1023;
@ -498,19 +498,25 @@ static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
{
MpegEncContext *s = avctx->priv_data;
AVFrame pic = *pic_arg;
int i;
AVFrame *pic;
int i, ret;
//CODEC_FLAG_EMU_EDGE have to be cleared
if(s->avctx->flags & CODEC_FLAG_EMU_EDGE)
return -1;
return AVERROR(EINVAL);
pic = av_frame_alloc();
if (!pic)
return AVERROR(ENOMEM);
av_frame_ref(pic, pic_arg);
//picture should be flipped upside-down
for(i=0; i < 3; i++) {
pic.data[i] += (pic.linesize[i] * (s->mjpeg_vsample[i] * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 ));
pic.linesize[i] *= -1;
pic->data[i] += (pic->linesize[i] * (s->mjpeg_vsample[i] * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 ));
pic->linesize[i] *= -1;
}
return ff_MPV_encode_picture(avctx, pkt, &pic, got_packet);
ret = ff_MPV_encode_picture(avctx, pkt, pic, got_packet);
av_frame_free(&pic);
return ret;
}
#if CONFIG_MJPEG_ENCODER

View File

@ -48,7 +48,7 @@
typedef struct MmContext {
AVCodecContext *avctx;
AVFrame frame;
AVFrame *frame;
int palette[AVPALETTE_COUNT];
GetByteContext gb;
} MmContext;
@ -61,7 +61,9 @@ static av_cold int mm_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&s->frame);
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0;
}
@ -108,9 +110,9 @@ static int mm_decode_intra(MmContext * s, int half_horiz, int half_vert)
return AVERROR_INVALIDDATA;
if (color) {
memset(s->frame.data[0] + y*s->frame.linesize[0] + x, color, run_length);
memset(s->frame->data[0] + y*s->frame->linesize[0] + x, color, run_length);
if (half_vert)
memset(s->frame.data[0] + (y+1)*s->frame.linesize[0] + x, color, run_length);
memset(s->frame->data[0] + (y+1)*s->frame->linesize[0] + x, color, run_length);
}
x+= run_length;
@ -158,13 +160,13 @@ static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert)
return AVERROR_INVALIDDATA;
if (replace) {
int color = bytestream2_get_byte(&data_ptr);
s->frame.data[0][y*s->frame.linesize[0] + x] = color;
s->frame->data[0][y*s->frame->linesize[0] + x] = color;
if (half_horiz)
s->frame.data[0][y*s->frame.linesize[0] + x + 1] = color;
s->frame->data[0][y*s->frame->linesize[0] + x + 1] = color;
if (half_vert) {
s->frame.data[0][(y+1)*s->frame.linesize[0] + x] = color;
s->frame->data[0][(y+1)*s->frame->linesize[0] + x] = color;
if (half_horiz)
s->frame.data[0][(y+1)*s->frame.linesize[0] + x + 1] = color;
s->frame->data[0][(y+1)*s->frame->linesize[0] + x + 1] = color;
}
}
x += 1 + half_horiz;
@ -193,7 +195,7 @@ static int mm_decode_frame(AVCodecContext *avctx,
buf_size -= MM_PREAMBLE_SIZE;
bytestream2_init(&s->gb, buf, buf_size);
if ((res = ff_reget_buffer(avctx, &s->frame)) < 0)
if ((res = ff_reget_buffer(avctx, s->frame)) < 0)
return res;
switch(type) {
@ -211,9 +213,9 @@ static int mm_decode_frame(AVCodecContext *avctx,
if (res < 0)
return res;
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
if ((res = av_frame_ref(data, &s->frame)) < 0)
if ((res = av_frame_ref(data, s->frame)) < 0)
return res;
*got_frame = 1;
@ -225,7 +227,7 @@ static av_cold int mm_decode_end(AVCodecContext *avctx)
{
MmContext *s = avctx->priv_data;
av_frame_unref(&s->frame);
av_frame_free(&s->frame);
return 0;
}

View File

@ -36,7 +36,7 @@ typedef struct HuffCode {
typedef struct MotionPixelsContext {
AVCodecContext *avctx;
AVFrame frame;
AVFrame *frame;
DSPContext dsp;
uint8_t *changes_map;
int offset_bits_len;
@ -50,6 +50,19 @@ typedef struct MotionPixelsContext {
int bswapbuf_size;
} MotionPixelsContext;
static av_cold int mp_decode_end(AVCodecContext *avctx)
{
MotionPixelsContext *mp = avctx->priv_data;
av_freep(&mp->changes_map);
av_freep(&mp->vpt);
av_freep(&mp->hpt);
av_freep(&mp->bswapbuf);
av_frame_free(&mp->frame);
return 0;
}
static av_cold int mp_decode_init(AVCodecContext *avctx)
{
MotionPixelsContext *mp = avctx->priv_data;
@ -75,7 +88,13 @@ static av_cold int mp_decode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM);
}
avctx->pix_fmt = AV_PIX_FMT_RGB555;
avcodec_get_frame_defaults(&mp->frame);
mp->frame = av_frame_alloc();
if (!mp->frame) {
mp_decode_end(avctx);
return AVERROR(ENOMEM);
}
return 0;
}
@ -96,14 +115,14 @@ static void mp_read_changes_map(MotionPixelsContext *mp, GetBitContext *gb, int
continue;
w = FFMIN(w, mp->avctx->width - x);
h = FFMIN(h, mp->avctx->height - y);
pixels = (uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2];
pixels = (uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2];
while (h--) {
mp->changes_map[offset] = w;
if (read_color)
for (i = 0; i < w; ++i)
pixels[i] = color;
offset += mp->avctx->width;
pixels += mp->frame.linesize[0] / 2;
pixels += mp->frame->linesize[0] / 2;
}
}
}
@ -165,7 +184,7 @@ static YuvPixel mp_get_yuv_from_rgb(MotionPixelsContext *mp, int x, int y)
{
int color;
color = *(uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2];
color = *(uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2];
return mp_rgb_yuv_table[color];
}
@ -174,7 +193,7 @@ static void mp_set_rgb_from_yuv(MotionPixelsContext *mp, int x, int y, const Yuv
int color;
color = mp_yuv_to_rgb(p->y, p->v, p->u, 1);
*(uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2] = color;
*(uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2] = color;
}
static int mp_get_vlc(MotionPixelsContext *mp, GetBitContext *gb)
@ -271,7 +290,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
GetBitContext gb;
int i, count1, count2, sz, ret;
if ((ret = ff_reget_buffer(avctx, &mp->frame)) < 0)
if ((ret = ff_reget_buffer(avctx, mp->frame)) < 0)
return ret;
/* le32 bitstream msb first */
@ -297,7 +316,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
goto end;
if (mp->changes_map[0] == 0) {
*(uint16_t *)mp->frame.data[0] = get_bits(&gb, 15);
*(uint16_t *)mp->frame->data[0] = get_bits(&gb, 15);
mp->changes_map[0] = 1;
}
if (mp_read_codes_table(mp, &gb) < 0)
@ -317,25 +336,12 @@ static int mp_decode_frame(AVCodecContext *avctx,
ff_free_vlc(&mp->vlc);
end:
if ((ret = av_frame_ref(data, &mp->frame)) < 0)
if ((ret = av_frame_ref(data, mp->frame)) < 0)
return ret;
*got_frame = 1;
return buf_size;
}
static av_cold int mp_decode_end(AVCodecContext *avctx)
{
MotionPixelsContext *mp = avctx->priv_data;
av_freep(&mp->changes_map);
av_freep(&mp->vpt);
av_freep(&mp->hpt);
av_freep(&mp->bswapbuf);
av_frame_unref(&mp->frame);
return 0;
}
AVCodec ff_motionpixels_decoder = {
.name = "motionpixels",
.long_name = NULL_IF_CONFIG_SMALL("Motion Pixels video"),

View File

@ -2070,7 +2070,6 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
if (s->codec_tag == AV_RL32("BW10")) {
s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
} else {
exchange_uv(s); // common init reset pblocks, so we swap them here
s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB
s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
}

View File

@ -537,6 +537,15 @@ fail:
return ret;
}
static void exchange_uv(MpegEncContext *s)
{
int16_t (*tmp)[64];
tmp = s->pblocks[4];
s->pblocks[4] = s->pblocks[5];
s->pblocks[5] = tmp;
}
static int init_duplicate_context(MpegEncContext *s)
{
int y_size = s->b8_stride * (2 * s->mb_height + 1);
@ -567,6 +576,8 @@ static int init_duplicate_context(MpegEncContext *s)
for (i = 0; i < 12; i++) {
s->pblocks[i] = &s->block[i];
}
if (s->avctx->codec_tag == AV_RL32("VCR2"))
exchange_uv(s);
if (s->out_format == FMT_H263) {
/* ac values */
@ -641,6 +652,8 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
for (i = 0; i < 12; i++) {
dst->pblocks[i] = &dst->block[i];
}
if (dst->avctx->codec_tag == AV_RL32("VCR2"))
exchange_uv(dst);
if (!dst->edge_emu_buffer &&
(ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "

View File

@ -745,6 +745,9 @@ typedef struct MpegEncContext {
int context_reinit;
ERContext er;
/* temporary frames used by b_frame_strategy = 2 */
AVFrame *tmp_frames[FF_MAX_B_FRAMES + 2];
} MpegEncContext;
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \

View File

@ -236,7 +236,7 @@ av_cold int ff_dct_encode_init(MpegEncContext *s) {
av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
int i;
int i, ret;
int chroma_h_shift, chroma_v_shift;
MPV_encode_defaults(s);
@ -876,12 +876,29 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
if (ff_rate_control_init(s) < 0)
return -1;
if (avctx->b_frame_strategy == 2) {
for (i = 0; i < s->max_b_frames + 2; i++) {
s->tmp_frames[i] = av_frame_alloc();
if (!s->tmp_frames[i])
return AVERROR(ENOMEM);
s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
ret = av_frame_get_buffer(s->tmp_frames[i], 32);
if (ret < 0)
return ret;
}
}
return 0;
}
av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
int i;
ff_rate_control_uninit(s);
@ -892,6 +909,9 @@ av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
av_freep(&avctx->extradata);
for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
av_frame_free(&s->tmp_frames[i]);
return 0;
}
@ -1133,7 +1153,6 @@ static int estimate_best_b_count(MpegEncContext *s)
{
AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
AVCodecContext *c = avcodec_alloc_context3(NULL);
AVFrame input[FF_MAX_B_FRAMES + 2];
const int scale = s->avctx->brd_scale;
int i, j, out_size, p_lambda, b_lambda, lambda2;
int64_t best_rd = INT64_MAX;
@ -1168,19 +1187,9 @@ static int estimate_best_b_count(MpegEncContext *s)
return -1;
for (i = 0; i < s->max_b_frames + 2; i++) {
int ysize = c->width * c->height;
int csize = (c->width / 2) * (c->height / 2);
Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
s->next_picture_ptr;
avcodec_get_frame_defaults(&input[i]);
input[i].data[0] = av_malloc(ysize + 2 * csize);
input[i].data[1] = input[i].data[0] + ysize;
input[i].data[2] = input[i].data[1] + csize;
input[i].linesize[0] = c->width;
input[i].linesize[1] =
input[i].linesize[2] = c->width / 2;
if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
pre_input = *pre_input_ptr;
@ -1190,13 +1199,13 @@ static int estimate_best_b_count(MpegEncContext *s)
pre_input.f.data[2] += INPLACE_OFFSET;
}
s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
pre_input.f.data[0], pre_input.f.linesize[0],
c->width, c->height);
s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
pre_input.f.data[1], pre_input.f.linesize[1],
c->width >> 1, c->height >> 1);
s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
pre_input.f.data[2], pre_input.f.linesize[2],
c->width >> 1, c->height >> 1);
}
@ -1210,21 +1219,21 @@ static int estimate_best_b_count(MpegEncContext *s)
c->error[0] = c->error[1] = c->error[2] = 0;
input[0].pict_type = AV_PICTURE_TYPE_I;
input[0].quality = 1 * FF_QP2LAMBDA;
s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
out_size = encode_frame(c, &input[0]);
out_size = encode_frame(c, s->tmp_frames[0]);
//rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
for (i = 0; i < s->max_b_frames + 1; i++) {
int is_p = i % (j + 1) == j || i == s->max_b_frames;
input[i + 1].pict_type = is_p ?
s->tmp_frames[i + 1]->pict_type = is_p ?
AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
input[i + 1].quality = is_p ? p_lambda : b_lambda;
s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
out_size = encode_frame(c, &input[i + 1]);
out_size = encode_frame(c, s->tmp_frames[i + 1]);
rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
}
@ -1246,10 +1255,6 @@ static int estimate_best_b_count(MpegEncContext *s)
avcodec_close(c);
av_freep(&c);
for (i = 0; i < s->max_b_frames + 2; i++) {
av_freep(&input[i].data[0]);
}
return best_b_count;
}

View File

@ -38,7 +38,7 @@
typedef struct MsrleContext {
AVCodecContext *avctx;
AVFrame frame;
AVFrame *frame;
GetByteContext gb;
const unsigned char *buf;
@ -70,7 +70,9 @@ static av_cold int msrle_decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
avcodec_get_frame_defaults(&s->frame);
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
if (avctx->extradata_size >= 4)
for (i = 0; i < FFMIN(avctx->extradata_size, AVPALETTE_SIZE)/4; i++)
@ -92,24 +94,24 @@ static int msrle_decode_frame(AVCodecContext *avctx,
s->buf = buf;
s->size = buf_size;
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
if (avctx->bits_per_coded_sample > 1 && avctx->bits_per_coded_sample <= 8) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
if (pal) {
s->frame.palette_has_changed = 1;
s->frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE);
}
/* make the palette available */
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE);
}
/* FIXME how to correctly detect RLE ??? */
if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
int linesize = (avctx->width * avctx->bits_per_coded_sample + 7) / 8;
uint8_t *ptr = s->frame.data[0];
uint8_t *ptr = s->frame->data[0];
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
int i, j;
@ -125,14 +127,14 @@ static int msrle_decode_frame(AVCodecContext *avctx,
memcpy(ptr, buf, linesize);
}
buf -= istride;
ptr += s->frame.linesize[0];
ptr += s->frame->linesize[0];
}
} else {
bytestream2_init(&s->gb, buf, buf_size);
ff_msrle_decode(avctx, (AVPicture*)&s->frame, avctx->bits_per_coded_sample, &s->gb);
ff_msrle_decode(avctx, (AVPicture*)s->frame, avctx->bits_per_coded_sample, &s->gb);
}
if ((ret = av_frame_ref(data, &s->frame)) < 0)
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
*got_frame = 1;
@ -146,7 +148,7 @@ static av_cold int msrle_decode_end(AVCodecContext *avctx)
MsrleContext *s = avctx->priv_data;
/* release the last frame */
av_frame_unref(&s->frame);
av_frame_free(&s->frame);
return 0;
}

View File

@ -30,7 +30,7 @@
typedef struct MSS1Context {
MSS12Context ctx;
AVFrame pic;
AVFrame *pic;
SliceContext sc;
} MSS1Context;
@ -151,32 +151,32 @@ static int mss1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
init_get_bits(&gb, buf, buf_size * 8);
arith_init(&acoder, &gb);
if ((ret = ff_reget_buffer(avctx, &ctx->pic)) < 0)
if ((ret = ff_reget_buffer(avctx, ctx->pic)) < 0)
return ret;
c->pal_pic = ctx->pic.data[0] + ctx->pic.linesize[0] * (avctx->height - 1);
c->pal_stride = -ctx->pic.linesize[0];
c->pal_pic = ctx->pic->data[0] + ctx->pic->linesize[0] * (avctx->height - 1);
c->pal_stride = -ctx->pic->linesize[0];
c->keyframe = !arith_get_bit(&acoder);
if (c->keyframe) {
c->corrupted = 0;
ff_mss12_slicecontext_reset(&ctx->sc);
pal_changed = decode_pal(c, &acoder);
ctx->pic.key_frame = 1;
ctx->pic.pict_type = AV_PICTURE_TYPE_I;
ctx->pic->key_frame = 1;
ctx->pic->pict_type = AV_PICTURE_TYPE_I;
} else {
if (c->corrupted)
return AVERROR_INVALIDDATA;
ctx->pic.key_frame = 0;
ctx->pic.pict_type = AV_PICTURE_TYPE_P;
ctx->pic->key_frame = 0;
ctx->pic->pict_type = AV_PICTURE_TYPE_P;
}
c->corrupted = ff_mss12_decode_rect(&ctx->sc, &acoder, 0, 0,
avctx->width, avctx->height);
if (c->corrupted)
return AVERROR_INVALIDDATA;
memcpy(ctx->pic.data[1], c->pal, AVPALETTE_SIZE);
ctx->pic.palette_has_changed = pal_changed;
memcpy(ctx->pic->data[1], c->pal, AVPALETTE_SIZE);
ctx->pic->palette_has_changed = pal_changed;
if ((ret = av_frame_ref(data, &ctx->pic)) < 0)
if ((ret = av_frame_ref(data, ctx->pic)) < 0)
return ret;
*got_frame = 1;
@ -192,7 +192,9 @@ static av_cold int mss1_decode_init(AVCodecContext *avctx)
c->ctx.avctx = avctx;
avcodec_get_frame_defaults(&c->pic);
c->pic = av_frame_alloc();
if (!c->pic)
return AVERROR(ENOMEM);
ret = ff_mss12_decode_init(&c->ctx, 0, &c->sc, NULL);
@ -205,7 +207,7 @@ static av_cold int mss1_decode_end(AVCodecContext *avctx)
{
MSS1Context * const ctx = avctx->priv_data;
av_frame_unref(&ctx->pic);
av_frame_free(&ctx->pic);
ff_mss12_decode_end(&ctx->ctx);
return 0;

View File

@ -837,6 +837,7 @@ static av_cold int mss2_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = c->free_colours == 127 ? AV_PIX_FMT_RGB555
: AV_PIX_FMT_RGB24;
return 0;
}

View File

@ -803,15 +803,24 @@ static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return buf_size;
}
static av_cold int mss3_decode_end(AVCodecContext *avctx)
{
MSS3Context * const c = avctx->priv_data;
int i;
av_frame_free(&c->pic);
for (i = 0; i < 3; i++)
av_freep(&c->dct_coder[i].prev_dc);
return 0;
}
static av_cold int mss3_decode_init(AVCodecContext *avctx)
{
MSS3Context * const c = avctx->priv_data;
int i;
c->avctx = avctx;
c->pic = av_frame_alloc();
if (!c->pic)
return AVERROR(ENOMEM);
if ((avctx->width & 0xF) || (avctx->height & 0xF)) {
av_log(avctx, AV_LOG_ERROR,
@ -838,6 +847,12 @@ static av_cold int mss3_decode_init(AVCodecContext *avctx)
}
}
c->pic = av_frame_alloc();
if (!c->pic) {
mss3_decode_end(avctx);
return AVERROR(ENOMEM);
}
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
init_coders(c);
@ -845,18 +860,6 @@ static av_cold int mss3_decode_init(AVCodecContext *avctx)
return 0;
}
static av_cold int mss3_decode_end(AVCodecContext *avctx)
{
MSS3Context * const c = avctx->priv_data;
int i;
av_frame_free(&c->pic);
for (i = 0; i < 3; i++)
av_freep(&c->dct_coder[i].prev_dc);
return 0;
}
AVCodec ff_msa1_decoder = {
.name = "msa1",
.long_name = NULL_IF_CONFIG_SMALL("MS ATC Screen"),

View File

@ -626,14 +626,23 @@ static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return buf_size;
}
static av_cold int mss4_decode_init(AVCodecContext *avctx)
static av_cold int mss4_decode_end(AVCodecContext *avctx)
{
MSS4Context * const c = avctx->priv_data;
int i;
c->pic = av_frame_alloc();
if (!c->pic)
return AVERROR(ENOMEM);
av_frame_free(&c->pic);
for (i = 0; i < 3; i++)
av_freep(&c->prev_dc[i]);
mss4_free_vlcs(c);
return 0;
}
static av_cold int mss4_decode_init(AVCodecContext *avctx)
{
MSS4Context * const c = avctx->priv_data;
int i;
if (mss4_init_vlcs(c)) {
av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
@ -650,25 +659,17 @@ static av_cold int mss4_decode_init(AVCodecContext *avctx)
}
}
c->pic = av_frame_alloc();
if (!c->pic) {
mss4_decode_end(avctx);
return AVERROR(ENOMEM);
}
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
return 0;
}
static av_cold int mss4_decode_end(AVCodecContext *avctx)
{
MSS4Context * const c = avctx->priv_data;
int i;
av_frame_free(&c->pic);
for (i = 0; i < 3; i++)
av_freep(&c->prev_dc[i]);
mss4_free_vlcs(c);
return 0;
}
AVCodec ff_mts2_decoder = {
.name = "mts2",
.long_name = NULL_IF_CONFIG_SMALL("MS Expression Encoder Screen"),

View File

@ -47,7 +47,7 @@
typedef struct Msvideo1Context {
AVCodecContext *avctx;
AVFrame frame;
AVFrame *frame;
const unsigned char *buf;
int size;
@ -72,7 +72,9 @@ static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_RGB555;
}
avcodec_get_frame_defaults(&s->frame);
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0;
}
@ -93,8 +95,8 @@ static void msvideo1_decode_8bit(Msvideo1Context *s)
unsigned short flags;
int skip_blocks;
unsigned char colors[8];
unsigned char *pixels = s->frame.data[0];
int stride = s->frame.linesize[0];
unsigned char *pixels = s->frame->data[0];
int stride = s->frame->linesize[0];
stream_ptr = 0;
skip_blocks = 0;
@ -174,7 +176,7 @@ static void msvideo1_decode_8bit(Msvideo1Context *s)
/* make the palette available on the way out */
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE);
}
static void msvideo1_decode_16bit(Msvideo1Context *s)
@ -193,8 +195,8 @@ static void msvideo1_decode_16bit(Msvideo1Context *s)
unsigned short flags;
int skip_blocks;
unsigned short colors[8];
unsigned short *pixels = (unsigned short *)s->frame.data[0];
int stride = s->frame.linesize[0] / 2;
unsigned short *pixels = (unsigned short *)s->frame->data[0];
int stride = s->frame->linesize[0] / 2;
stream_ptr = 0;
skip_blocks = 0;
@ -298,7 +300,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
s->buf = buf;
s->size = buf_size;
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0)
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
if (s->mode_8bit) {
@ -306,7 +308,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
if (pal) {
memcpy(s->pal, pal, AVPALETTE_SIZE);
s->frame.palette_has_changed = 1;
s->frame->palette_has_changed = 1;
}
}
@ -315,7 +317,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
else
msvideo1_decode_16bit(s);
if ((ret = av_frame_ref(data, &s->frame)) < 0)
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
*got_frame = 1;
@ -328,7 +330,7 @@ static av_cold int msvideo1_decode_end(AVCodecContext *avctx)
{
Msvideo1Context *s = avctx->priv_data;
av_frame_unref(&s->frame);
av_frame_free(&s->frame);
return 0;
}

View File

@ -35,7 +35,6 @@
*/
typedef struct Msvideo1EncContext {
AVCodecContext *avctx;
AVFrame pic;
AVLFG rnd;
uint8_t *prev;
@ -58,7 +57,7 @@ enum MSV1Mode{
};
#define SKIP_PREFIX 0x8400
#define SKIPS_MAX 0x0FFF
#define SKIPS_MAX 0x03FF
#define MKRGB555(in, off) ((in[off] << 10) | (in[off + 1] << 5) | (in[off + 2]))
static const int remap[16] = { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 };
@ -67,7 +66,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
Msvideo1EncContext * const c = avctx->priv_data;
AVFrame * const p = &c->pic;
const AVFrame *p = pict;
uint16_t *src;
uint8_t *prevptr;
uint8_t *dst, *buf;
@ -75,12 +74,12 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
int no_skips = 1;
int i, j, k, x, y, ret;
int skips = 0;
int quality = 24;
if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*9 + FF_MIN_BUFFER_SIZE)) < 0)
return ret;
dst= buf= pkt->data;
*p = *pict;
if(!c->prev)
c->prev = av_malloc(avctx->width * 3 * (avctx->height + 3));
prevptr = c->prev + avctx->width * 3 * (FFALIGN(avctx->height, 4) - 1);
@ -88,7 +87,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if(c->keyint >= avctx->keyint_min)
keyframe = 1;
p->quality = 24;
for(y = 0; y < avctx->height; y += 4){
for(x = 0; x < avctx->width; x += 4){
@ -114,7 +112,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
bestscore += t*t;
}
}
bestscore /= p->quality;
bestscore /= quality;
}
// try to find optimal value to fill whole 4x4 block
score = 0;
@ -130,7 +128,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
}
}
}
score /= p->quality;
score /= quality;
score += 2;
if(score < bestscore){
bestscore = score;
@ -155,7 +153,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
}
}
}
score /= p->quality;
score /= quality;
score += 6;
if(score < bestscore){
bestscore = score;
@ -182,7 +180,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
}
}
}
score /= p->quality;
score /= quality;
score += 18;
if(score < bestscore){
bestscore = score;
@ -248,8 +246,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
c->keyint = 0;
else
c->keyint++;
p->pict_type= keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
p->key_frame= keyframe;
if (keyframe) pkt->flags |= AV_PKT_FLAG_KEY;
pkt->size = dst - buf;
*got_packet = 1;
@ -274,8 +270,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
return -1;
}
avcodec_get_frame_defaults(&c->pic);
avctx->coded_frame = (AVFrame*)&c->pic;
avctx->bits_per_coded_sample = 16;
c->keyint = avctx->keyint_min;

View File

@ -31,7 +31,7 @@
typedef struct MXpegDecodeContext {
MJpegDecodeContext jpg;
AVFrame picture[2]; /* pictures array */
AVFrame *picture[2]; /* pictures array */
int picture_index; /* index of current picture */
int got_sof_data; /* true if SOF data successfully parsed */
int got_mxm_bitmask; /* true if MXM bitmask available */
@ -42,11 +42,36 @@ typedef struct MXpegDecodeContext {
unsigned mb_width, mb_height; /* size of picture in MB's from MXM header */
} MXpegDecodeContext;
static av_cold int mxpeg_decode_end(AVCodecContext *avctx)
{
MXpegDecodeContext *s = avctx->priv_data;
MJpegDecodeContext *jpg = &s->jpg;
int i;
jpg->picture_ptr = NULL;
ff_mjpeg_decode_end(avctx);
for (i = 0; i < 2; ++i)
av_frame_free(&s->picture[i]);
av_freep(&s->mxm_bitmask);
av_freep(&s->completion_bitmask);
return 0;
}
static av_cold int mxpeg_decode_init(AVCodecContext *avctx)
{
MXpegDecodeContext *s = avctx->priv_data;
s->jpg.picture_ptr = &s->picture[0];
s->picture[0] = av_frame_alloc();
s->picture[1] = av_frame_alloc();
if (!s->picture[0] || !s->picture[1]) {
mxpeg_decode_end(avctx);
return AVERROR(ENOMEM);
}
s->jpg.picture_ptr = s->picture[0];
return ff_mjpeg_decode_init(avctx);
}
@ -260,7 +285,7 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
}
if (s->got_mxm_bitmask) {
AVFrame *reference_ptr = &s->picture[s->picture_index ^ 1];
AVFrame *reference_ptr = s->picture[s->picture_index ^ 1];
if (mxpeg_check_dimensions(s, jpg, reference_ptr) < 0)
break;
@ -295,7 +320,7 @@ the_end:
*got_frame = 1;
s->picture_index ^= 1;
jpg->picture_ptr = &s->picture[s->picture_index];
jpg->picture_ptr = s->picture[s->picture_index];
if (!s->has_complete_frame) {
if (!s->got_mxm_bitmask)
@ -308,24 +333,6 @@ the_end:
return buf_ptr - buf;
}
static av_cold int mxpeg_decode_end(AVCodecContext *avctx)
{
MXpegDecodeContext *s = avctx->priv_data;
MJpegDecodeContext *jpg = &s->jpg;
int i;
jpg->picture_ptr = NULL;
ff_mjpeg_decode_end(avctx);
for (i = 0; i < 2; ++i)
av_frame_unref(&s->picture[i]);
av_freep(&s->mxm_bitmask);
av_freep(&s->completion_bitmask);
return 0;
}
AVCodec ff_mxpeg_decoder = {
.name = "mxpeg",
.long_name = NULL_IF_CONFIG_SMALL("Mobotix MxPEG video"),

View File

@ -32,7 +32,7 @@
#include "rtjpeg.h"
typedef struct {
AVFrame pic;
AVFrame *pic;
int codec_frameheader;
int quality;
int width, height;
@ -140,7 +140,7 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
}
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height,
c->lq, c->cq);
av_frame_unref(&c->pic);
av_frame_unref(c->pic);
return 1;
} else if (quality != c->quality)
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height,
@ -248,20 +248,20 @@ retry:
}
if (size_change || keyframe) {
av_frame_unref(&c->pic);
av_frame_unref(c->pic);
init_frame = 1;
}
if ((result = ff_reget_buffer(avctx, &c->pic)) < 0)
if ((result = ff_reget_buffer(avctx, c->pic)) < 0)
return result;
if (init_frame) {
memset(c->pic.data[0], 0, avctx->height * c->pic.linesize[0]);
memset(c->pic.data[1], 0x80, avctx->height * c->pic.linesize[1] / 2);
memset(c->pic.data[2], 0x80, avctx->height * c->pic.linesize[2] / 2);
memset(c->pic->data[0], 0, avctx->height * c->pic->linesize[0]);
memset(c->pic->data[1], 0x80, avctx->height * c->pic->linesize[1] / 2);
memset(c->pic->data[2], 0x80, avctx->height * c->pic->linesize[2] / 2);
}
c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
c->pic.key_frame = keyframe;
c->pic->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
c->pic->key_frame = keyframe;
// decompress/copy/whatever data
switch (comptype) {
case NUV_LZO:
@ -272,19 +272,19 @@ retry:
height = buf_size / c->width / 3 * 2;
}
if(height > 0)
copy_frame(&c->pic, buf, c->width, height);
copy_frame(c->pic, buf, c->width, height);
break;
}
case NUV_RTJPEG_IN_LZO:
case NUV_RTJPEG:
ret = ff_rtjpeg_decode_frame_yuv420(&c->rtj, &c->pic, buf, buf_size);
ret = ff_rtjpeg_decode_frame_yuv420(&c->rtj, c->pic, buf, buf_size);
if (ret < 0)
return ret;
break;
case NUV_BLACK:
memset(c->pic.data[0], 0, c->width * c->height);
memset(c->pic.data[1], 128, c->width * c->height / 4);
memset(c->pic.data[2], 128, c->width * c->height / 4);
memset(c->pic->data[0], 0, c->width * c->height);
memset(c->pic->data[1], 128, c->width * c->height / 4);
memset(c->pic->data[2], 128, c->width * c->height / 4);
break;
case NUV_COPY_LAST:
/* nothing more to do here */
@ -294,7 +294,7 @@ retry:
return AVERROR_INVALIDDATA;
}
if ((result = av_frame_ref(picture, &c->pic)) < 0)
if ((result = av_frame_ref(picture, c->pic)) < 0)
return result;
*got_frame = 1;
@ -306,8 +306,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
NuvContext *c = avctx->priv_data;
int ret;
c->pic = av_frame_alloc();
if (!c->pic)
return AVERROR(ENOMEM);
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
c->pic.data[0] = NULL;
c->decomp_buf = NULL;
c->quality = -1;
c->width = 0;
@ -331,7 +334,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
NuvContext *c = avctx->priv_data;
av_freep(&c->decomp_buf);
av_frame_unref(&c->pic);
av_frame_free(&c->pic);
return 0;
}

View File

@ -21,13 +21,11 @@
#include "avcodec.h"
#include "internal.h"
#include "pnm.h"
static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *p, int *got_packet)
{
PNMContext *s = avctx->priv_data;
uint8_t *bytestream_start, *bytestream, *bytestream_end;
int i, h, w, n, linesize, depth, maxval, ret;
const char *tuple_type;
uint8_t *ptr;
@ -90,14 +88,14 @@ static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if ((ret = ff_alloc_packet2(avctx, pkt, n*h + 200)) < 0)
return ret;
s->bytestream_start =
s->bytestream = pkt->data;
s->bytestream_end = pkt->data + pkt->size;
bytestream_start =
bytestream = pkt->data;
bytestream_end = pkt->data + pkt->size;
snprintf(s->bytestream, s->bytestream_end - s->bytestream,
snprintf(bytestream, bytestream_end - bytestream,
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
w, h, depth, maxval, tuple_type);
s->bytestream += strlen(s->bytestream);
bytestream += strlen(bytestream);
ptr = p->data[0];
linesize = p->linesize[0];
@ -106,30 +104,48 @@ static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
int j;
for (i = 0; i < h; i++) {
for (j = 0; j < w; j++)
*s->bytestream++ = ptr[j >> 3] >> (7 - j & 7) & 1;
*bytestream++ = ptr[j >> 3] >> (7 - j & 7) & 1;
ptr += linesize;
}
} else {
for (i = 0; i < h; i++) {
memcpy(s->bytestream, ptr, n);
s->bytestream += n;
ptr += linesize;
memcpy(bytestream, ptr, n);
bytestream += n;
ptr += linesize;
}
}
pkt->size = s->bytestream - s->bytestream_start;
pkt->size = bytestream - bytestream_start;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
}
static av_cold int pam_encode_init(AVCodecContext *avctx)
{
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
return 0;
}
static av_cold int pam_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
AVCodec ff_pam_encoder = {
.name = "pam",
.long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PAM,
.priv_data_size = sizeof(PNMContext),
.init = pam_encode_init,
.close = pam_encode_close,
.encode2 = pam_encode_frame,
.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY8A, AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_NONE

View File

@ -173,6 +173,17 @@ static void *pcm_dvd_decode_samples(AVCodecContext *avctx, const uint8_t *src,
#endif
return dst16;
case 20:
if (avctx->channels == 1) {
do {
for (i = 2; i; i--) {
dst32[0] = bytestream2_get_be16u(&gb) << 16;
dst32[1] = bytestream2_get_be16u(&gb) << 16;
t = bytestream2_get_byteu(&gb);
*dst32++ += (t & 0xf0) << 8;
*dst32++ += (t & 0x0f) << 12;
}
} while (--blocks);
} else {
do {
for (i = s->groups_per_block; i; i--) {
dst32[0] = bytestream2_get_be16u(&gb) << 16;
@ -180,15 +191,26 @@ static void *pcm_dvd_decode_samples(AVCodecContext *avctx, const uint8_t *src,
dst32[2] = bytestream2_get_be16u(&gb) << 16;
dst32[3] = bytestream2_get_be16u(&gb) << 16;
t = bytestream2_get_byteu(&gb);
*dst32 += (t & 0xf0) << 8;
*dst32 += (t & 0x0f) << 12;
*dst32++ += (t & 0xf0) << 8;
*dst32++ += (t & 0x0f) << 12;
t = bytestream2_get_byteu(&gb);
*dst32 += (t & 0xf0) << 8;
*dst32 += (t & 0x0f) << 12;
*dst32++ += (t & 0xf0) << 8;
*dst32++ += (t & 0x0f) << 12;
}
} while (--blocks);
}
return dst32;
case 24:
if (avctx->channels == 1) {
do {
for (i = 2; i; i--) {
dst32[0] = bytestream2_get_be16u(&gb) << 16;
dst32[1] = bytestream2_get_be16u(&gb) << 16;
*dst32++ += bytestream2_get_byteu(&gb) << 8;
*dst32++ += bytestream2_get_byteu(&gb) << 8;
}
} while (--blocks);
} else {
do {
for (i = s->groups_per_block; i; i--) {
dst32[0] = bytestream2_get_be16u(&gb) << 16;
@ -201,6 +223,7 @@ static void *pcm_dvd_decode_samples(AVCodecContext *avctx, const uint8_t *src,
*dst32++ += bytestream2_get_byteu(&gb) << 8;
}
} while (--blocks);
}
return dst32;
default:
return NULL;

View File

@ -33,6 +33,24 @@
static const uint32_t monoblack_pal[16] = { 0x000000, 0xFFFFFF };
static av_cold int pcx_encode_init(AVCodecContext *avctx)
{
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
return 0;
}
static av_cold int pcx_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
/**
* PCX run-length encoder
* @param dst output buffer
@ -86,7 +104,6 @@ static int pcx_rle_encode( uint8_t *dst, int dst_size,
static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_packet)
{
AVFrame *const pict = (AVFrame *) frame;
const uint8_t *buf_end;
uint8_t *buf;
@ -95,9 +112,6 @@ static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
uint32_t palette256[256];
const uint8_t *src;
pict->pict_type = AV_PICTURE_TYPE_I;
pict->key_frame = 1;
if (avctx->width > 65535 || avctx->height > 65535) {
av_log(avctx, AV_LOG_ERROR, "image dimensions do not fit in 16 bits\n");
return -1;
@ -121,7 +135,7 @@ static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
case AV_PIX_FMT_PAL8:
bpp = 8;
nplanes = 1;
pal = (uint32_t *)pict->data[1];
pal = (uint32_t *)frame->data[1];
break;
case AV_PIX_FMT_MONOBLACK:
bpp = 1;
@ -166,7 +180,7 @@ static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
while (buf - pkt->data < 128)
*buf++= 0;
src = pict->data[0];
src = frame->data[0];
for (y = 0; y < avctx->height; y++) {
if ((written = pcx_rle_encode(buf, buf_end - buf,
@ -175,7 +189,7 @@ static int pcx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return -1;
}
buf += written;
src += pict->linesize[0];
src += frame->linesize[0];
}
if (nplanes == 1 && bpp == 8) {
@ -201,6 +215,8 @@ AVCodec ff_pcx_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PCX,
.init = pcx_encode_init,
.close = pcx_encode_close,
.encode2 = pcx_encode_frame,
.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB24,

Some files were not shown because too many files have changed in this diff Show More