Compare commits
203 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
af5917698b | ||
![]() |
b58cbb07bc | ||
![]() |
dd9789ab6d | ||
![]() |
aded27a30b | ||
![]() |
a1f68d9518 | ||
![]() |
2612b69d3f | ||
![]() |
88217c40b6 | ||
![]() |
001cc6d27a | ||
![]() |
ea6bd45885 | ||
![]() |
51850d9a2c | ||
![]() |
211ec9196d | ||
![]() |
e4318f6875 | ||
![]() |
008c1debb9 | ||
![]() |
f33bd39d27 | ||
![]() |
0337770352 | ||
![]() |
ab8f686bbc | ||
![]() |
b5a942685f | ||
![]() |
7e2ea1cb70 | ||
![]() |
10e1108732 | ||
![]() |
3da0395b34 | ||
![]() |
25d50ad814 | ||
![]() |
fb564cd21d | ||
![]() |
10609d8864 | ||
![]() |
dbc48ba1cd | ||
![]() |
19ffa15d2d | ||
![]() |
531ea97b89 | ||
![]() |
0a5b242422 | ||
![]() |
f43fb74ad4 | ||
![]() |
0463e9d6da | ||
![]() |
e8784ec5f5 | ||
![]() |
0a93dc83d2 | ||
![]() |
bced2ad1bd | ||
![]() |
fe22d0d7c6 | ||
![]() |
a5167b4d66 | ||
![]() |
3123cb5d0b | ||
![]() |
375a97bb70 | ||
![]() |
0fc6a95116 | ||
![]() |
f93412406f | ||
![]() |
05eddbbc05 | ||
![]() |
75b21eb8f8 | ||
![]() |
1933fa5daa | ||
![]() |
bd05ac1df8 | ||
![]() |
8d0f079c45 | ||
![]() |
889a4779eb | ||
![]() |
a9d3baff03 | ||
![]() |
f4ce752f22 | ||
![]() |
02532e1162 | ||
![]() |
b1652fbb9e | ||
![]() |
39236d5618 | ||
![]() |
38e628fd6d | ||
![]() |
cf6cbcca7e | ||
![]() |
df9eca44f8 | ||
![]() |
a563a1468f | ||
![]() |
fd17e34217 | ||
![]() |
ec17706e5c | ||
![]() |
e0f85f10c8 | ||
![]() |
9ccaeff67a | ||
![]() |
4a46a29b07 | ||
![]() |
22ef88ee30 | ||
![]() |
48e53620e0 | ||
![]() |
8a7748e1de | ||
![]() |
86a360e349 | ||
![]() |
07ad029d1b | ||
![]() |
b53b6a25e2 | ||
![]() |
83ee63e7f8 | ||
![]() |
536093824d | ||
![]() |
485f53548b | ||
![]() |
2ac7a0d999 | ||
![]() |
b35d67fa51 | ||
![]() |
b942813a41 | ||
![]() |
a75f293264 | ||
![]() |
33862c1f6d | ||
![]() |
89fbae7686 | ||
![]() |
3f9ecaf886 | ||
![]() |
50dc37f281 | ||
![]() |
6c39528a3a | ||
![]() |
9b0c9261cc | ||
![]() |
9fc45b313c | ||
![]() |
d9390b9d64 | ||
![]() |
5a8b43285b | ||
![]() |
8e94e5d339 | ||
![]() |
73cf5d9a28 | ||
![]() |
ac83399281 | ||
![]() |
9bcaf90378 | ||
![]() |
d9249b5582 | ||
![]() |
66b95ee4df | ||
![]() |
0f7e67be3a | ||
![]() |
0856eea770 | ||
![]() |
04f80ed3f8 | ||
![]() |
c6f6be93ac | ||
![]() |
c6418be043 | ||
![]() |
f78d7e6a03 | ||
![]() |
0cb8d786f2 | ||
![]() |
c74846388b | ||
![]() |
35013fa23a | ||
![]() |
33d77bc384 | ||
![]() |
e6a5023d1f | ||
![]() |
99e737a7c7 | ||
![]() |
835037506b | ||
![]() |
2f290cf881 | ||
![]() |
2523bdcd67 | ||
![]() |
9b87d15ca8 | ||
![]() |
a13a288ec8 | ||
![]() |
9f2c8734b9 | ||
![]() |
d35086d715 | ||
![]() |
be1b665dec | ||
![]() |
47f5f6b230 | ||
![]() |
01291b4e2f | ||
![]() |
787e094ed0 | ||
![]() |
2f8f4351b8 | ||
![]() |
f77cb3d4a6 | ||
![]() |
3258e12d8c | ||
![]() |
84cd276d0e | ||
![]() |
a298e13c2c | ||
![]() |
50fb69c737 | ||
![]() |
9c826d8d51 | ||
![]() |
4aa4c78dae | ||
![]() |
7b13aef5d2 | ||
![]() |
aebafed24f | ||
![]() |
4ea7ff4354 | ||
![]() |
262c678357 | ||
![]() |
763ab41f77 | ||
![]() |
76ee9fdb61 | ||
![]() |
692fd5635f | ||
![]() |
c3b1261afa | ||
![]() |
04fd0250e1 | ||
![]() |
6f236d3774 | ||
![]() |
c1c245e1a3 | ||
![]() |
369f46aae3 | ||
![]() |
05b448082a | ||
![]() |
b4bfbbfb95 | ||
![]() |
e6d9094fd3 | ||
![]() |
bcc4c360aa | ||
![]() |
8be177e048 | ||
![]() |
7d5908d5c8 | ||
![]() |
3550d239a6 | ||
![]() |
d3f96c1e3c | ||
![]() |
ac07ab7db7 | ||
![]() |
9bff35abde | ||
![]() |
9ee7fcdcd0 | ||
![]() |
f1b4a71ddf | ||
![]() |
fa538f1a8c | ||
![]() |
7689fe5cfd | ||
![]() |
f597b9f04e | ||
![]() |
037d6cf580 | ||
![]() |
8bd7bf1a3c | ||
![]() |
f90c9bbbca | ||
![]() |
f2abcdedfe | ||
![]() |
eebd161e76 | ||
![]() |
6a4d1325e2 | ||
![]() |
dfce316c12 | ||
![]() |
87e2a689a8 | ||
![]() |
e9eb9839bd | ||
![]() |
c3be71001c | ||
![]() |
f3deed98ec | ||
![]() |
7439ed2f39 | ||
![]() |
40607290c9 | ||
![]() |
a9e683bb7a | ||
![]() |
0edf9c6907 | ||
![]() |
b2c9cd36d3 | ||
![]() |
b3e6d3ee78 | ||
![]() |
f6327e5fa6 | ||
![]() |
b80c486fb3 | ||
![]() |
37469af294 | ||
![]() |
350054e8e2 | ||
![]() |
7ff46a20d5 | ||
![]() |
7edd380668 | ||
![]() |
26bed98d64 | ||
![]() |
480e18e6ff | ||
![]() |
742bc7eea8 | ||
![]() |
c58edf52f9 | ||
![]() |
ea52c0614c | ||
![]() |
5e84ab838c | ||
![]() |
3f7b89937d | ||
![]() |
39c9b47bb6 | ||
![]() |
016bf7cb81 | ||
![]() |
bb00821f62 | ||
![]() |
d9dbd2362d | ||
![]() |
ec684aa58a | ||
![]() |
a613dd627c | ||
![]() |
1903d6d2b0 | ||
![]() |
2319fddfd3 | ||
![]() |
5a6ef7d7cb | ||
![]() |
47e8d17132 | ||
![]() |
a156f86e91 | ||
![]() |
3586314147 | ||
![]() |
28e17ba220 | ||
![]() |
998173ed94 | ||
![]() |
bffed1d9d0 | ||
![]() |
52bf144ad9 | ||
![]() |
6fc8c72c7d | ||
![]() |
77621ca311 | ||
![]() |
1a89aab3c0 | ||
![]() |
c43e5faf03 | ||
![]() |
876d2d8db8 | ||
![]() |
1caad74533 | ||
![]() |
28950d1f8c | ||
![]() |
791e085634 | ||
![]() |
3dc88e1fd6 | ||
![]() |
4e2cab5a79 | ||
![]() |
1dab67b647 | ||
![]() |
0d4549c2d6 | ||
![]() |
c741eb7d88 |
128
Changelog
128
Changelog
@ -1,6 +1,131 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 2.6.3:
|
||||
- avcodec/libtheoraenc: Check for av_malloc failure
|
||||
- ffmpeg_opt: Fix -timestamp parsing
|
||||
- hevc: make avcodec_decode_video2() fail if get_format() fails
|
||||
- avcodec/cavsdec: Use ff_set_dimensions()
|
||||
- swr: fix alignment issue caused by 8ch sse functions
|
||||
- avcodec/mjpegdec: fix len computation in ff_mjpeg_decode_dqt()
|
||||
- avcodec/jpeg2000dec: fix boolean operator
|
||||
- avcodec/hevc_ps: Explicitly check num_tile_* for negative values
|
||||
- avformat/matroskadec: Cleanup error handling for bz2 & zlib
|
||||
- avformat/nutdec: Fix use of uinitialized value
|
||||
- tools/graph2dot: use larger data types than int for array/string sizes
|
||||
- avformat/matroskaenc: Check ff_vorbiscomment_length in put_flac_codecpriv()
|
||||
- avcodec/mpeg12dec: use the correct dimensions for checking SAR
|
||||
- xcbgrab: Validate the capture area
|
||||
- xcbgrab: Do not assume the non shm image data is always available
|
||||
- avfilter/lavfutils: disable frame threads when decoding a single image
|
||||
- avformat/mov: Do not read ACLR into extradata for H.264
|
||||
- ffmpeg: remove incorrect network deinit
|
||||
- OpenCL: Avoid potential buffer overflow in cmdutils_opencl.c
|
||||
- libvpxenc: only set noise reduction w/vp8
|
||||
- vp9: remove another optimization branch in iadst16 which causes overflows.
|
||||
- lavf: Reset global flag on deinit
|
||||
- network: Do not leave context locked on error
|
||||
- vp9: remove one optimization branch in iadst16 which causes overflows.
|
||||
- fate: Include branch information in the payload header
|
||||
- avformat/utils: Ensure that AVFMT_FLAG_CUSTOM_IO is set before use
|
||||
- avformat/img2dec: do not rewind custom io buffers
|
||||
- avcodec/alsdec: Use av_mallocz_array() for chan_data to ensure the arrays never contain random data
|
||||
- avcodec/atrac3plusdsp: fix on stack alignment
|
||||
- swresample/swresample-test: Randomly wipe out channel counts
|
||||
- swresample: Check channel layouts and channels against each other and print human readable error messages
|
||||
- swresample: Allow reinitialization without ever setting channel layouts (cherry picked from commit 80a28c7509a11114e1aea5b208d56c6646d69c07)
|
||||
- swresample: Allow reinitialization without ever setting channel counts
|
||||
- dashenc: replace attribute id with contentType for the AdaptationSet element
|
||||
- avformat/matroskaenc: Use avoid_negative_ts_use_pts if no stream writes dts
|
||||
- avformat/mux: Add avoid_negative_ts_use_pts
|
||||
- tests/fate-run: do not attempt to parse tiny_psnrs output if it failed
|
||||
- cafdec: free extradata before allocating it
|
||||
- imgutils: initialize palette padding bytes in av_image_alloc
|
||||
- aacdec: don't return frames without data
|
||||
- id3v2: catch avio_read errors in check_tag
|
||||
- avi: Validate sample_size
|
||||
- aacsbr: break infinite loop in sbr_hf_calc_npatches
|
||||
- diracdec: avoid overflow of bytes*8 in decode_lowdelay
|
||||
- diracdec: prevent overflow in data_unit_size check
|
||||
- avformat/matroskadec: Use tracks[k]->stream instead of s->streams[k]
|
||||
- matroskadec: use uint64_t instead of int for index_scale
|
||||
- pngdec: don't use AV_PIX_FMT_MONOBLACK for apng
|
||||
- pngdec: return correct error code from decode_frame_common
|
||||
- nutdec: fix illegal count check in decode_main_header
|
||||
- nutdec: fix memleaks on error in nut_read_header
|
||||
- apedec: prevent out of array writes in decode_array_0000
|
||||
- apedec: set s->samples only when init_frame_decoder succeeded
|
||||
- swscale/ppc/swscale_altivec.c: POWER LE support in yuv2planeX_8() delete macro GET_VF() it was wrong
|
||||
- alac: reject rice_limit 0 if compression is used
|
||||
- alsdec: only adapt order for positive max_order
|
||||
- bink: check vst->index_entries before using it
|
||||
- mpeg4videodec: only allow a positive length
|
||||
- aacpsy: correct calculation of minath in psy_3gpp_init
|
||||
- alsdec: validate time diff index
|
||||
- alsdec: ensure channel reordering is reversible
|
||||
- ac3: validate end in ff_ac3_bit_alloc_calc_mask
|
||||
- aacpsy: avoid psy_band->threshold becoming NaN
|
||||
- aasc: return correct buffer size from aasc_decode_frame
|
||||
- matroskadec: export cover art correctly
|
||||
- mxfenc: don't try to write footer without header
|
||||
- mxfenc: fix memleaks in mxf_write_footer
|
||||
- rtpenc_mpegts: Set chain->rtp_ctx only after avformat_write_header succeeded
|
||||
- rtpenc_mpegts: Free the right ->pb in the error path in the init function
|
||||
|
||||
version 2.6.2:
|
||||
- avcodec/h264: Do not fail with randomly truncated VUIs
|
||||
- avcodec/h264_ps: Move truncation check from VUI to SPS
|
||||
- avcodec/h264: Be more tolerant to changing pps id between slices
|
||||
- avcodec/aacdec: Fix storing state before PCE decode
|
||||
- avcodec/h264: reset the counts in the correct context
|
||||
- avcodec/h264_slice: Do not reset mb_aff_frame per slice
|
||||
- avcodec/h264: finish previous slices before switching to single thread mode
|
||||
- avcodec/h264: Fix race between slices where one overwrites data from the next
|
||||
- avformat/utils: avoid discarded streams in av_find_default_stream_index()
|
||||
- ffmpeg: Fix extradata allocation
|
||||
- avcodec/h264_refs: Do not set reference to things which do not exist
|
||||
- avcodec/h264: Fail for invalid mixed IDR / non IDR frames in slice threading mode
|
||||
- Revert "avcodec/exr: fix memset first arg in reverse_lut()"
|
||||
- h264: avoid unnecessary calls to get_format
|
||||
- avutil/pca: Check for av_malloc* failures
|
||||
- avutil/cpu: add missing check for mmxext to av_force_cpu_flags
|
||||
- lavc/dnxhd: Fix pix_fmt change.
|
||||
- avformat/http: replace cookies with updated values instead of appending forever
|
||||
- avformat/hls: store cookies returned in HLS key response
|
||||
- avformat/rmdec: fix support for 0 sized mdpr
|
||||
- avcodec/msrledec: restructure msrle_decode_pal4() based on the line number instead of the pixel pointer
|
||||
- avcodec/hevc_ps: Check cropping parameters more correctly
|
||||
- hevc: make the crop sizes unsigned
|
||||
- avcodec/dnxhddec: Reset is_444 if format is not 444
|
||||
- avcodec/dnxhddec: Check that the frame is interlaced before using cur_field
|
||||
- mips/float_dsp: fix vector_fmul_window_mips on mips64
|
||||
- doc: Remove non-existing decklink options.
|
||||
|
||||
version 2.6.1:
|
||||
- avformat/mov: Disallow ".." in dref unless use_absolute_path is set
|
||||
- avfilter/palettegen: make sure at least one frame was sent to the filter
|
||||
- avformat/mov: Check for string truncation in mov_open_dref()
|
||||
- ac3_fixed: fix out-of-bound read
|
||||
- mips/asmdefs: use _ABI64 as defined by gcc
|
||||
- hevc: delay ff_thread_finish_setup for hwaccel
|
||||
- avcodec/012v: Check dimensions more completely
|
||||
- asfenc: fix leaking asf->index_ptr on error
|
||||
- roqvideoenc: set enc->avctx in roq_encode_init
|
||||
- avcodec/options_table: remove extradata_size from the AVOptions table
|
||||
- ffmdec: limit the backward seek to the last resync position
|
||||
- Add dependencies to configure file for vf_fftfilt
|
||||
- ffmdec: make sure the time base is valid
|
||||
- ffmdec: fix infinite loop at EOF
|
||||
- ffmdec: initialize f_cprv, f_stvi and f_stau
|
||||
- arm: Suppress tags about used cpu arch and extensions
|
||||
- mxfdec: Fix the error handling for when strftime fails
|
||||
- avcodec/opusdec: Fix delayed sample value
|
||||
- avcodec/opusdec: Clear out pointers per packet
|
||||
- avcodec/utils: Align YUV411 by as much as the other YUV variants
|
||||
- lavc/hevcdsp: Fix compilation for arm with --disable-neon.
|
||||
- vp9: fix segmentation map retention with threading enabled.
|
||||
- Revert "avutil/opencl: is_compiled flag not being cleared in av_opencl_uninit"
|
||||
|
||||
version 2.6:
|
||||
- nvenc encoder
|
||||
- 10bit spp filter
|
||||
@ -35,8 +160,7 @@ version 2.6:
|
||||
- Fix stsd atom corruption in DNxHD QuickTimes
|
||||
- Canopus HQX decoder
|
||||
- RTP depacketization of T.140 text (RFC 4103)
|
||||
- VP9 RTP payload format (draft 0) experimental depacketizer
|
||||
- Port MIPS opttimizations to 64-bit
|
||||
- Port MIPS optimizations to 64-bit
|
||||
|
||||
|
||||
version 2.5:
|
||||
|
@ -545,6 +545,7 @@ x86 Michael Niedermayer
|
||||
Releases
|
||||
========
|
||||
|
||||
2.6 Michael Niedermayer
|
||||
2.5 Michael Niedermayer
|
||||
2.4 Michael Niedermayer
|
||||
2.2 Michael Niedermayer
|
||||
|
65
RELEASE_NOTES
Normal file
65
RELEASE_NOTES
Normal file
@ -0,0 +1,65 @@
|
||||
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.6 "Grothendieck" │
|
||||
└─────────────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 2.6 "Grothendieck", about 3
|
||||
months after the release of FFmpeg 2.5.
|
||||
|
||||
A lot of important work got in this time, so let's start talking about what
|
||||
we like to brag the most about: features.
|
||||
|
||||
A lot of people will probably be happy to hear that we now have support for
|
||||
NVENC — the Nvidia Video Encoder interface for H.264 encoding — thanks to
|
||||
Timo Rothenpieler, with some little help from NVIDIA and Philip Langdale.
|
||||
|
||||
People in the broadcasting industry might also be interested in the first
|
||||
steps of closed captions support with the introduction of a decoder by
|
||||
Anshul Maheswhwari.
|
||||
|
||||
Regarding filters love, we improved and added many. We could talk about the
|
||||
10-bit support in spp, but maybe it's more important to mention the addition
|
||||
of colorlevels (yet another color handling filter), tblend (allowing you
|
||||
to for example run a diff between successive frames of a video stream), or
|
||||
the dcshift audio filter.
|
||||
|
||||
There are also two other important filters landing in libavfilter: palettegen
|
||||
and paletteuse. Both submitted by the Stupeflix company. These filters will
|
||||
be very useful in case you are looking for creating high quality GIFs, a
|
||||
format that still bravely fights annihilation in 2015.
|
||||
|
||||
There are many other new features, but let's follow-up on one big cleanup
|
||||
achievement: the libmpcodecs (MPlayer filters) wrapper is finally dead. The
|
||||
last remaining filters (softpulldown/repeatfields, eq*, and various
|
||||
postprocessing filters) were ported by Arwa Arif (OPW student) and Paul B
|
||||
Mahol.
|
||||
|
||||
Concerning API changes, there are not many things to mention. Though, the
|
||||
introduction of device inputs and outputs listing by Lukasz Marek is a
|
||||
notable addition (try ffmpeg -sources or ffmpeg -sinks for an example of
|
||||
the usage). As usual, see doc/APIchanges for more information.
|
||||
|
||||
Now let's talk about optimizations. Ronald S. Bultje made the VP9 decoder
|
||||
usable on x86 32-bit systems and pre-ssse3 CPUs like Phenom (even dual core
|
||||
Athlons can play 1080p 30fps VP9 content now), so we now secretly hope for
|
||||
Google and Mozilla to use ffvp9 instead of libvpx. But VP9 is not the
|
||||
center of attention anymore, and HEVC/H.265 is also getting many
|
||||
improvements, which include C and x86 ASM optimizations, mainly from James
|
||||
Almer, Christophe Gisquet and Pierre-Edouard Lepere.
|
||||
|
||||
Even though we had many x86 contributions, it is not the only architecture
|
||||
getting some love, with Seppo Tomperi adding ARM NEON optimizations to the
|
||||
HEVC stack, and James Cowgill adding MIPS64 assembly for all kind of audio
|
||||
processing code in libavcodec.
|
||||
|
||||
And finally, Michael Niedermayer is still fixing many bugs, dealing with
|
||||
most of the boring work such as making releases, applying tons of
|
||||
contributors patches, and daily merging the changes from the Libav project.
|
||||
|
||||
A more complete Changelog is available at the root of the project, and the
|
||||
complete Git history on http://source.ffmpeg.org.
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
@ -22,6 +22,7 @@
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opencl.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "cmdutils.h"
|
||||
|
||||
typedef struct {
|
||||
@ -238,7 +239,8 @@ int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
|
||||
devices[count].platform_idx = i;
|
||||
devices[count].device_idx = j;
|
||||
devices[count].runtime = score;
|
||||
strcpy(devices[count].device_name, device_node->device_name);
|
||||
av_strlcpy(devices[count].device_name, device_node->device_name,
|
||||
sizeof(devices[count].device_name));
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
9
configure
vendored
9
configure
vendored
@ -1769,6 +1769,7 @@ SYSTEM_FUNCS="
|
||||
TOOLCHAIN_FEATURES="
|
||||
as_dn_directive
|
||||
as_func
|
||||
as_object_arch
|
||||
asm_mod_q
|
||||
attribute_may_alias
|
||||
attribute_packed
|
||||
@ -2595,6 +2596,8 @@ deshake_filter_select="pixelutils"
|
||||
drawtext_filter_deps="libfreetype"
|
||||
ebur128_filter_deps="gpl"
|
||||
eq_filter_deps="gpl"
|
||||
fftfilt_filter_deps="avcodec"
|
||||
fftfilt_filter_select="rdft"
|
||||
flite_filter_deps="libflite"
|
||||
frei0r_filter_deps="frei0r dlopen"
|
||||
frei0r_src_filter_deps="frei0r dlopen"
|
||||
@ -4560,6 +4563,11 @@ EOF
|
||||
check_as <<EOF && enable as_dn_directive
|
||||
ra .dn d0.i16
|
||||
.unreq ra
|
||||
EOF
|
||||
|
||||
# llvm's integrated assembler supports .object_arch from llvm 3.5
|
||||
[ "$objformat" = elf ] && check_as <<EOF && enable as_object_arch
|
||||
.object_arch armv4
|
||||
EOF
|
||||
|
||||
[ $target_os != win32 ] && enabled_all armv6t2 shared !pic && enable_weak_pic
|
||||
@ -5451,6 +5459,7 @@ enabled asyncts_filter && prepend avfilter_deps "avresample"
|
||||
enabled atempo_filter && prepend avfilter_deps "avcodec"
|
||||
enabled ebur128_filter && enabled swresample && prepend avfilter_deps "swresample"
|
||||
enabled elbg_filter && prepend avfilter_deps "avcodec"
|
||||
enabled fftfilt_filter && prepend avfilter_deps "avcodec"
|
||||
enabled mcdeint_filter && prepend avfilter_deps "avcodec"
|
||||
enabled movie_filter && prepend avfilter_deps "avformat avcodec"
|
||||
enabled pan_filter && prepend avfilter_deps "swresample"
|
||||
|
@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER =
|
||||
PROJECT_NUMBER = 2.6.3
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@ -349,7 +349,7 @@ FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
|
@ -72,7 +72,7 @@ the HTTP server (configured through the @option{HTTPPort} option), and
|
||||
configuration file.
|
||||
|
||||
Each feed is associated to a file which is stored on disk. This stored
|
||||
file is used to allow to send pre-recorded data to a player as fast as
|
||||
file is used to send pre-recorded data to a player as fast as
|
||||
possible when new content is added in real-time to the stream.
|
||||
|
||||
A "live-stream" or "stream" is a resource published by
|
||||
|
@ -261,10 +261,14 @@ Possible flags for this option are:
|
||||
@item sse4.1
|
||||
@item sse4.2
|
||||
@item avx
|
||||
@item avx2
|
||||
@item xop
|
||||
@item fma3
|
||||
@item fma4
|
||||
@item 3dnow
|
||||
@item 3dnowext
|
||||
@item bmi1
|
||||
@item bmi2
|
||||
@item cmov
|
||||
@end table
|
||||
@item ARM
|
||||
@ -275,6 +279,13 @@ Possible flags for this option are:
|
||||
@item vfp
|
||||
@item vfpv3
|
||||
@item neon
|
||||
@item setend
|
||||
@end table
|
||||
@item AArch64
|
||||
@table @samp
|
||||
@item armv8
|
||||
@item vfp
|
||||
@item neon
|
||||
@end table
|
||||
@item PowerPC
|
||||
@table @samp
|
||||
|
@ -3486,7 +3486,7 @@ Set number overlapping pixels for each block. Since the filter can be slow, you
|
||||
may want to reduce this value, at the cost of a less effective filter and the
|
||||
risk of various artefacts.
|
||||
|
||||
If the overlapping value doesn't allow to process the whole input width or
|
||||
If the overlapping value doesn't permit processing the whole input width or
|
||||
height, a warning will be displayed and according borders won't be denoised.
|
||||
|
||||
Default value is @var{blocksize}-1, which is the best possible setting.
|
||||
|
@ -23,7 +23,7 @@ Reduce buffering.
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will allow to detect more
|
||||
stream information. A higher value will enable detecting more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@ -67,7 +67,7 @@ Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will allow to detect more accurate information, but will
|
||||
higher value will enable detecting more accurate information, but will
|
||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
|
160
doc/indevs.texi
160
doc/indevs.texi
@ -1,7 +1,7 @@
|
||||
@chapter Input Devices
|
||||
@c man begin INPUT DEVICES
|
||||
|
||||
Input devices are configured elements in FFmpeg which allow to access
|
||||
Input devices are configured elements in FFmpeg which enable accessing
|
||||
the data coming from a multimedia device attached to your system.
|
||||
|
||||
When you configure your FFmpeg build, all the supported input devices
|
||||
@ -150,6 +150,81 @@ $ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
||||
|
||||
BSD video input device.
|
||||
|
||||
@section decklink
|
||||
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
DeckLink devices.
|
||||
|
||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format is
|
||||
uyvy422 or v210, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels can be 2, 8 or 16.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item bm_v210
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
List supported formats:
|
||||
@example
|
||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 (format 11):
|
||||
@example
|
||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -bm_v210 1 -f decklink -i 'UltraStudio Mini Recorder@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 720p50 with 32bit audio:
|
||||
@example
|
||||
ffmpeg -bm_audiodepth 32 -f decklink -i 'UltraStudio Mini Recorder@@14' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 576i50 with 8 audio channels:
|
||||
@example
|
||||
ffmpeg -bm_channels 8 -f decklink -i 'UltraStudio Mini Recorder@@3' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dshow
|
||||
|
||||
Windows DirectShow input device.
|
||||
@ -1109,89 +1184,8 @@ The syntax is:
|
||||
-grab_x @var{x_offset} -grab_y @var{y_offset}
|
||||
@end example
|
||||
|
||||
Set the grabing region coordinates. The are expressed as offset from the top left
|
||||
Set the grabbing region coordinates. They are expressed as offset from the top left
|
||||
corner of the X11 window. The default value is 0.
|
||||
|
||||
@section decklink
|
||||
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
DeckLink devices.
|
||||
|
||||
To enable this input device, you need the Blackmagic DeckLink SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format is
|
||||
uyvy422 or v210, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels can be 2, 8 or 16.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item bm_v210
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@item bm_channels <CHANNELS>
|
||||
Number of audio channels, can be 2, 8 or 16
|
||||
|
||||
@item bm_audiodepth <BITDEPTH>
|
||||
Audio bit depth, can be 16 or 32.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
List supported formats:
|
||||
@example
|
||||
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 (format 11):
|
||||
@example
|
||||
ffmpeg -f decklink -i 'Intensity Pro@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -bm_v210 1 -f decklink -i 'UltraStudio Mini Recorder@@11' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 720p50 with 32bit audio:
|
||||
@example
|
||||
ffmpeg -bm_audiodepth 32 -f decklink -i 'UltraStudio Mini Recorder@@14' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 576i50 with 8 audio channels:
|
||||
@example
|
||||
ffmpeg -bm_channels 8 -f decklink -i 'UltraStudio Mini Recorder@@3' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
|
||||
@c man end INPUT DEVICES
|
||||
|
@ -63,7 +63,7 @@ cache:@var{URL}
|
||||
|
||||
Physical concatenation protocol.
|
||||
|
||||
Allow to read and seek from many resource in sequence as if they were
|
||||
Read and seek from many resources in sequence as if they were
|
||||
a unique resource.
|
||||
|
||||
A URL accepted by this protocol has the syntax:
|
||||
@ -117,7 +117,7 @@ ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP/////////////
|
||||
|
||||
File access protocol.
|
||||
|
||||
Allow to read from or write to a file.
|
||||
Read from or write to a file.
|
||||
|
||||
A file URL can have the form:
|
||||
@example
|
||||
@ -155,7 +155,7 @@ time, which is valuable for files on slow medium.
|
||||
|
||||
FTP (File Transfer Protocol).
|
||||
|
||||
Allow to read from or write to remote resources using FTP protocol.
|
||||
Read from or write to remote resources using FTP protocol.
|
||||
|
||||
Following syntax is required.
|
||||
@example
|
||||
@ -374,7 +374,7 @@ be seekable, so they will fail with the MD5 output protocol.
|
||||
|
||||
UNIX pipe access protocol.
|
||||
|
||||
Allow to read and write from UNIX pipes.
|
||||
Read and write from UNIX pipes.
|
||||
|
||||
The accepted syntax is:
|
||||
@example
|
||||
@ -614,7 +614,7 @@ For more information see: @url{http://www.samba.org/}.
|
||||
|
||||
Secure File Transfer Protocol via libssh
|
||||
|
||||
Allow to read from or write to remote resources using SFTP protocol.
|
||||
Read from or write to remote resources using SFTP protocol.
|
||||
|
||||
Following syntax is required.
|
||||
|
||||
|
@ -844,7 +844,7 @@ Return 1.0 if @var{x} is +/-INFINITY, 0.0 otherwise.
|
||||
Return 1.0 if @var{x} is NAN, 0.0 otherwise.
|
||||
|
||||
@item ld(var)
|
||||
Allow to load the value of the internal variable with number
|
||||
Load the value of the internal variable with number
|
||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||
The function returns the loaded value.
|
||||
|
||||
@ -912,7 +912,7 @@ Compute the square root of @var{expr}. This is equivalent to
|
||||
Compute expression @code{1/(1 + exp(4*x))}.
|
||||
|
||||
@item st(var, expr)
|
||||
Allow to store the value of the expression @var{expr} in an internal
|
||||
Store the value of the expression @var{expr} in an internal
|
||||
variable. @var{var} specifies the number of the variable where to
|
||||
store the value, and it is a value ranging from 0 to 9. The function
|
||||
returns the value stored in the internal variable.
|
||||
|
11
ffmpeg.c
11
ffmpeg.c
@ -351,7 +351,6 @@ void term_init(void)
|
||||
signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
|
||||
}
|
||||
#endif
|
||||
avformat_network_deinit();
|
||||
|
||||
signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
|
||||
signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
|
||||
@ -2666,11 +2665,13 @@ static int transcode_init(void)
|
||||
enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
|
||||
enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
|
||||
enc_ctx->field_order = dec_ctx->field_order;
|
||||
enc_ctx->extradata = av_mallocz(extra_size);
|
||||
if (!enc_ctx->extradata) {
|
||||
return AVERROR(ENOMEM);
|
||||
if (dec_ctx->extradata_size) {
|
||||
enc_ctx->extradata = av_mallocz(extra_size);
|
||||
if (!enc_ctx->extradata) {
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
|
||||
}
|
||||
memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
|
||||
enc_ctx->extradata_size= dec_ctx->extradata_size;
|
||||
enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
|
||||
|
||||
|
@ -2859,7 +2859,7 @@ const OptionDef options[] = {
|
||||
{ "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
|
||||
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
|
||||
"set the input ts scale", "scale" },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE, { .func_arg = opt_recording_timestamp },
|
||||
{ "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
|
||||
"set the recording timestamp ('now' to set the current time)", "time" },
|
||||
{ "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
|
||||
"add metadata", "string=string" },
|
||||
|
@ -38,15 +38,15 @@ static av_cold int zero12v_decode_init(AVCodecContext *avctx)
|
||||
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int *got_frame, AVPacket *avpkt)
|
||||
{
|
||||
int line = 0, ret;
|
||||
int line, ret;
|
||||
const int width = avctx->width;
|
||||
AVFrame *pic = data;
|
||||
uint16_t *y, *u, *v;
|
||||
const uint8_t *line_end, *src = avpkt->data;
|
||||
int stride = avctx->width * 8 / 3;
|
||||
|
||||
if (width == 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
|
||||
if (width <= 1 || avctx->height <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
@ -67,45 +67,45 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
pic->pict_type = AV_PICTURE_TYPE_I;
|
||||
pic->key_frame = 1;
|
||||
|
||||
y = (uint16_t *)pic->data[0];
|
||||
u = (uint16_t *)pic->data[1];
|
||||
v = (uint16_t *)pic->data[2];
|
||||
line_end = avpkt->data + stride;
|
||||
for (line = 0; line < avctx->height; line++) {
|
||||
uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
|
||||
uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
|
||||
int x;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
|
||||
while (line++ < avctx->height) {
|
||||
while (1) {
|
||||
uint32_t t = AV_RL32(src);
|
||||
for (x = 0; x < width; x += 6) {
|
||||
uint32_t t;
|
||||
|
||||
if (width - x < 6 || line_end - src < 16) {
|
||||
y = y_temp;
|
||||
u = u_temp;
|
||||
v = v_temp;
|
||||
}
|
||||
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*u++ = t << 6 & 0xFFC0;
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*v++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
*y++ = t << 6 & 0xFFC0;
|
||||
*u++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
if (src >= line_end - 2) {
|
||||
if (!(width & 1)) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@ -113,15 +113,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*y++ = t >> 4 & 0xFFC0;
|
||||
*u++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 1) {
|
||||
*y = 0x80;
|
||||
src++;
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
if (line_end - src < 4)
|
||||
break;
|
||||
}
|
||||
|
||||
t = AV_RL32(src);
|
||||
src += 4;
|
||||
@ -129,18 +122,21 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*v++ = t >> 4 & 0xFFC0;
|
||||
*y++ = t >> 14 & 0xFFC0;
|
||||
|
||||
if (src >= line_end - 2) {
|
||||
if (width & 1) {
|
||||
*y = 0x80;
|
||||
src += 2;
|
||||
}
|
||||
line_end += stride;
|
||||
y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
if (width - x < 6)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x < width) {
|
||||
y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
|
||||
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||
}
|
||||
|
||||
line_end += stride;
|
||||
src = line_end - stride;
|
||||
}
|
||||
|
||||
*got_frame = 1;
|
||||
|
@ -217,7 +217,7 @@ OBJS-$(CONFIG_DVVIDEO_DECODER) += dvdec.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DVVIDEO_ENCODER) += dvenc.o dv.o dvdata.o
|
||||
OBJS-$(CONFIG_DXA_DECODER) += dxa.o
|
||||
OBJS-$(CONFIG_DXTORY_DECODER) += dxtory.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3_data.o
|
||||
OBJS-$(CONFIG_EAC3_DECODER) += eac3_data.o
|
||||
OBJS-$(CONFIG_EAC3_ENCODER) += eac3enc.o eac3_data.o
|
||||
OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
|
||||
OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
|
||||
|
@ -424,7 +424,7 @@ static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
|
||||
* Save current output configuration if and only if it has been locked.
|
||||
*/
|
||||
static void push_output_configuration(AACContext *ac) {
|
||||
if (ac->oc[1].status == OC_LOCKED) {
|
||||
if (ac->oc[1].status == OC_LOCKED || ac->oc[0].status == OC_NONE) {
|
||||
ac->oc[0] = ac->oc[1];
|
||||
}
|
||||
ac->oc[1].status = OC_NONE;
|
||||
@ -900,7 +900,7 @@ static int decode_eld_specific_config(AACContext *ac, AVCodecContext *avctx,
|
||||
if (len == 15 + 255)
|
||||
len += get_bits(gb, 16);
|
||||
if (get_bits_left(gb) < len * 8 + 4) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, overread_err);
|
||||
av_log(avctx, AV_LOG_ERROR, overread_err);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
skip_bits_long(gb, 8 * len);
|
||||
@ -3073,6 +3073,12 @@ static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
|
||||
AV_WL32(side, 2*AV_RL32(side));
|
||||
}
|
||||
|
||||
if (!ac->frame->data[0] && samples) {
|
||||
av_log(avctx, AV_LOG_ERROR, "no frame data found\n");
|
||||
err = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*got_frame_ptr = !!samples;
|
||||
if (samples) {
|
||||
ac->frame->nb_samples = samples;
|
||||
|
@ -313,7 +313,7 @@ static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
|
||||
ctx->bitres.size = 6144 - pctx->frame_bits;
|
||||
ctx->bitres.size -= ctx->bitres.size % 8;
|
||||
pctx->fill_level = ctx->bitres.size;
|
||||
minath = ath(3410, ATH_ADD);
|
||||
minath = ath(3410 - 0.733 * ATH_ADD, ATH_ADD);
|
||||
for (j = 0; j < 2; j++) {
|
||||
AacPsyCoeffs *coeffs = pctx->psy_coef[j];
|
||||
const uint8_t *band_sizes = ctx->bands[j];
|
||||
@ -727,7 +727,10 @@ static void psy_3gpp_analyze_channel(FFPsyContext *ctx, int channel,
|
||||
if (active_lines > 0.0f)
|
||||
band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
|
||||
pe += calc_pe_3gpp(band);
|
||||
band->norm_fac = band->active_lines / band->thr;
|
||||
if (band->thr > 0.0f)
|
||||
band->norm_fac = band->active_lines / band->thr;
|
||||
else
|
||||
band->norm_fac = 0.0f;
|
||||
norm_fac += band->norm_fac;
|
||||
}
|
||||
}
|
||||
|
@ -514,7 +514,7 @@ static int sbr_make_f_master(AACContext *ac, SpectralBandReplication *sbr,
|
||||
/// High Frequency Generation - Patch Construction (14496-3 sp04 p216 fig. 4.46)
|
||||
static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||
{
|
||||
int i, k, sb = 0;
|
||||
int i, k, last_k = -1, last_msb = -1, sb = 0;
|
||||
int msb = sbr->k[0];
|
||||
int usb = sbr->kx[1];
|
||||
int goal_sb = ((1000 << 11) + (sbr->sample_rate >> 1)) / sbr->sample_rate;
|
||||
@ -528,6 +528,12 @@ static int sbr_hf_calc_npatches(AACContext *ac, SpectralBandReplication *sbr)
|
||||
|
||||
do {
|
||||
int odd = 0;
|
||||
if (k == last_k && msb == last_msb) {
|
||||
av_log(ac->avctx, AV_LOG_ERROR, "patch construction failed\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
last_k = k;
|
||||
last_msb = msb;
|
||||
for (i = k; i == k || sb > (sbr->k[0] - 1 + msb - odd); i--) {
|
||||
sb = sbr->f_master[i];
|
||||
odd = (sb + sbr->k[0]) & 1;
|
||||
|
@ -137,7 +137,7 @@ static int aasc_decode_frame(AVCodecContext *avctx,
|
||||
return ret;
|
||||
|
||||
/* report that the buffer was completely consumed */
|
||||
return buf_size;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static av_cold int aasc_decode_end(AVCodecContext *avctx)
|
||||
|
@ -131,6 +131,9 @@ int ff_ac3_bit_alloc_calc_mask(AC3BitAllocParameters *s, int16_t *band_psd,
|
||||
int band_start, band_end, begin, end1;
|
||||
int lowcomp, fastleak, slowleak;
|
||||
|
||||
if (end <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* excitation function */
|
||||
band_start = ff_ac3_bin_to_band_tab[start];
|
||||
band_end = ff_ac3_bin_to_band_tab[end-1] + 1;
|
||||
|
@ -872,7 +872,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
start_subband += start_subband - 7;
|
||||
end_subband = get_bits(gbc, 3) + 5;
|
||||
#if USE_FIXED
|
||||
s->spx_dst_end_freq = end_freq_inv_tab[end_subband];
|
||||
s->spx_dst_end_freq = end_freq_inv_tab[end_subband-5];
|
||||
#endif
|
||||
if (end_subband > 7)
|
||||
end_subband += end_subband - 7;
|
||||
@ -939,7 +939,7 @@ static int decode_audio_block(AC3DecodeContext *s, int blk)
|
||||
nblend = 0;
|
||||
sblend = 0x800000;
|
||||
} else if (nratio > 0x7fffff) {
|
||||
nblend = 0x800000;
|
||||
nblend = 14529495; // sqrt(3) in FP.23
|
||||
sblend = 0;
|
||||
} else {
|
||||
nblend = fixed_sqrt(nratio, 23);
|
||||
|
@ -243,19 +243,19 @@ typedef struct AC3DecodeContext {
|
||||
* Parse the E-AC-3 frame header.
|
||||
* This parses both the bit stream info and audio frame header.
|
||||
*/
|
||||
int ff_eac3_parse_header(AC3DecodeContext *s);
|
||||
static int ff_eac3_parse_header(AC3DecodeContext *s);
|
||||
|
||||
/**
|
||||
* Decode mantissas in a single channel for the entire frame.
|
||||
* This is used when AHT mode is enabled.
|
||||
*/
|
||||
void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
|
||||
static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch);
|
||||
|
||||
/**
|
||||
* Apply spectral extension to each channel by copying lower frequency
|
||||
* coefficients to higher frequency bins and applying side information to
|
||||
* approximate the original high frequency signal.
|
||||
*/
|
||||
void ff_eac3_apply_spectral_extension(AC3DecodeContext *s);
|
||||
static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s);
|
||||
|
||||
#endif /* AVCODEC_AC3DEC_H */
|
||||
|
@ -164,6 +164,7 @@ static void ac3_downmix_c_fixed16(int16_t **samples, int16_t (*matrix)[2],
|
||||
}
|
||||
}
|
||||
|
||||
#include "eac3dec.c"
|
||||
#include "ac3dec.c"
|
||||
|
||||
static const AVOption options[] = {
|
||||
|
@ -28,6 +28,7 @@
|
||||
* Upmix delay samples from stereo to original channel layout.
|
||||
*/
|
||||
#include "ac3dec.h"
|
||||
#include "eac3dec.c"
|
||||
#include "ac3dec.c"
|
||||
|
||||
static const AVOption options[] = {
|
||||
|
@ -316,6 +316,11 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
|
||||
int lpc_quant[2];
|
||||
int rice_history_mult[2];
|
||||
|
||||
if (!alac->rice_limit) {
|
||||
avpriv_request_sample(alac->avctx, "Compression with rice limit 0");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
decorr_shift = get_bits(&alac->gb, 8);
|
||||
decorr_left_weight = get_bits(&alac->gb, 8);
|
||||
|
||||
|
@ -357,11 +357,15 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
|
||||
ctx->cs_switch = 1;
|
||||
|
||||
for (i = 0; i < avctx->channels; i++) {
|
||||
sconf->chan_pos[i] = -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < avctx->channels; i++) {
|
||||
int idx;
|
||||
|
||||
idx = get_bits(&gb, chan_pos_bits);
|
||||
if (idx >= avctx->channels) {
|
||||
if (idx >= avctx->channels || sconf->chan_pos[idx] != -1) {
|
||||
av_log(avctx, AV_LOG_WARNING, "Invalid channel reordering.\n");
|
||||
ctx->cs_switch = 0;
|
||||
break;
|
||||
@ -678,7 +682,7 @@ static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
|
||||
|
||||
|
||||
if (!sconf->rlslms) {
|
||||
if (sconf->adapt_order) {
|
||||
if (sconf->adapt_order && sconf->max_order) {
|
||||
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
|
||||
2, sconf->max_order + 1));
|
||||
*bd->opt_order = get_bits(gb, opt_order_length);
|
||||
@ -1242,6 +1246,7 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
||||
ALSChannelData *ch = cd[c];
|
||||
unsigned int dep = 0;
|
||||
unsigned int channels = ctx->avctx->channels;
|
||||
unsigned int channel_size = ctx->sconf.frame_length + ctx->sconf.max_order;
|
||||
|
||||
if (reverted[c])
|
||||
return 0;
|
||||
@ -1272,9 +1277,9 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
||||
bd->raw_samples = ctx->raw_samples[c] + offset;
|
||||
|
||||
for (dep = 0; !ch[dep].stop_flag; dep++) {
|
||||
unsigned int smp;
|
||||
unsigned int begin = 1;
|
||||
unsigned int end = bd->block_length - 1;
|
||||
ptrdiff_t smp;
|
||||
ptrdiff_t begin = 1;
|
||||
ptrdiff_t end = bd->block_length - 1;
|
||||
int64_t y;
|
||||
int32_t *master = ctx->raw_samples[ch[dep].master_channel] + offset;
|
||||
|
||||
@ -1286,11 +1291,28 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
||||
|
||||
if (ch[dep].time_diff_sign) {
|
||||
t = -t;
|
||||
if (begin < t) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "begin %td smaller than time diff index %d.\n", begin, t);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
begin -= t;
|
||||
} else {
|
||||
if (end < t) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "end %td smaller than time diff index %d.\n", end, t);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
end -= t;
|
||||
}
|
||||
|
||||
if (FFMIN(begin - 1, begin - 1 + t) < ctx->raw_buffer - master ||
|
||||
FFMAX(end + 1, end + 1 + t) > ctx->raw_buffer + channels * channel_size - master) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR,
|
||||
"sample pointer range [%p, %p] not contained in raw_buffer [%p, %p].\n",
|
||||
master + FFMIN(begin - 1, begin - 1 + t), master + FFMAX(end + 1, end + 1 + t),
|
||||
ctx->raw_buffer, ctx->raw_buffer + channels * channel_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (smp = begin; smp < end; smp++) {
|
||||
y = (1 << 6) +
|
||||
MUL64(ch[dep].weighting[0], master[smp - 1 ]) +
|
||||
@ -1303,6 +1325,16 @@ static int revert_channel_correlation(ALSDecContext *ctx, ALSBlockData *bd,
|
||||
bd->raw_samples[smp] += y >> 7;
|
||||
}
|
||||
} else {
|
||||
|
||||
if (begin - 1 < ctx->raw_buffer - master ||
|
||||
end + 1 > ctx->raw_buffer + channels * channel_size - master) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR,
|
||||
"sample pointer range [%p, %p] not contained in raw_buffer [%p, %p].\n",
|
||||
master + begin - 1, master + end + 1,
|
||||
ctx->raw_buffer, ctx->raw_buffer + channels * channel_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (smp = begin; smp < end; smp++) {
|
||||
y = (1 << 6) +
|
||||
MUL64(ch[dep].weighting[0], master[smp - 1]) +
|
||||
@ -1666,6 +1698,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
avctx->sample_fmt = sconf->resolution > 1
|
||||
? AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S16;
|
||||
avctx->bits_per_raw_sample = (sconf->resolution + 1) * 8;
|
||||
if (avctx->bits_per_raw_sample > 32) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Bits per raw sample %d larger than 32.\n",
|
||||
avctx->bits_per_raw_sample);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
// set maximum Rice parameter for progressive decoding based on resolution
|
||||
@ -1728,9 +1766,9 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||
|
||||
// allocate and assign channel data buffer for mcc mode
|
||||
if (sconf->mc_coding) {
|
||||
ctx->chan_data_buffer = av_malloc(sizeof(*ctx->chan_data_buffer) *
|
||||
ctx->chan_data_buffer = av_mallocz(sizeof(*ctx->chan_data_buffer) *
|
||||
num_buffers * num_buffers);
|
||||
ctx->chan_data = av_malloc(sizeof(*ctx->chan_data) *
|
||||
ctx->chan_data = av_mallocz(sizeof(*ctx->chan_data) *
|
||||
num_buffers);
|
||||
ctx->reverted_channels = av_malloc(sizeof(*ctx->reverted_channels) *
|
||||
num_buffers);
|
||||
|
@ -592,14 +592,14 @@ static void decode_array_0000(APEContext *ctx, GetBitContext *gb,
|
||||
int ksummax, ksummin;
|
||||
|
||||
rice->ksum = 0;
|
||||
for (i = 0; i < 5; i++) {
|
||||
for (i = 0; i < FFMIN(blockstodecode, 5); i++) {
|
||||
out[i] = get_rice_ook(&ctx->gb, 10);
|
||||
rice->ksum += out[i];
|
||||
}
|
||||
rice->k = av_log2(rice->ksum / 10) + 1;
|
||||
if (rice->k >= 24)
|
||||
return;
|
||||
for (; i < 64; i++) {
|
||||
for (; i < FFMIN(blockstodecode, 64); i++) {
|
||||
out[i] = get_rice_ook(&ctx->gb, rice->k);
|
||||
rice->ksum += out[i];
|
||||
rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1;
|
||||
@ -1461,13 +1461,13 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
nblocks);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->samples = nblocks;
|
||||
|
||||
/* Initialize the frame decoder */
|
||||
if (init_frame_decoder(s) < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->samples = nblocks;
|
||||
}
|
||||
|
||||
if (!s->data) {
|
||||
|
@ -37,6 +37,7 @@ OBJS-$(CONFIG_DCA_DECODER) += arm/dcadsp_init_arm.o
|
||||
OBJS-$(CONFIG_FLAC_DECODER) += arm/flacdsp_init_arm.o \
|
||||
arm/flacdsp_arm.o
|
||||
OBJS-$(CONFIG_FLAC_ENCODER) += arm/flacdsp_init_arm.o
|
||||
OBJS-$(CONFIG_HEVC_DECODER) += arm/hevcdsp_init_arm.o
|
||||
OBJS-$(CONFIG_MLP_DECODER) += arm/mlpdsp_init_arm.o
|
||||
OBJS-$(CONFIG_VC1_DECODER) += arm/vc1dsp_init_arm.o
|
||||
OBJS-$(CONFIG_VORBIS_DECODER) += arm/vorbisdsp_init_arm.o
|
||||
|
26
libavcodec/arm/hevcdsp_arm.h
Normal file
26
libavcodec/arm/hevcdsp_arm.h
Normal file
@ -0,0 +1,26 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_ARM_HEVCDSP_ARM_H
|
||||
#define AVCODEC_ARM_HEVCDSP_ARM_H
|
||||
|
||||
#include "libavcodec/hevcdsp.h"
|
||||
|
||||
void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth);
|
||||
|
||||
#endif /* AVCODEC_ARM_HEVCDSP_ARM_H */
|
32
libavcodec/arm/hevcdsp_init_arm.c
Normal file
32
libavcodec/arm/hevcdsp_init_arm.c
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Seppo Tomperi <seppo.tomperi@vtt.fi>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/arm/cpu.h"
|
||||
#include "libavcodec/hevcdsp.h"
|
||||
#include "hevcdsp_arm.h"
|
||||
|
||||
av_cold void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_neon(cpu_flags))
|
||||
ff_hevcdsp_init_neon(c, bit_depth);
|
||||
}
|
@ -21,6 +21,7 @@
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/arm/cpu.h"
|
||||
#include "libavcodec/hevcdsp.h"
|
||||
#include "hevcdsp_arm.h"
|
||||
|
||||
void ff_hevc_v_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int _beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
|
||||
void ff_hevc_h_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int _beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
|
||||
@ -141,9 +142,8 @@ void ff_hevc_put_qpel_bi_neon_wrapper(uint8_t *dst, ptrdiff_t dststride, uint8_t
|
||||
put_hevc_qpel_uw_neon[my][mx](dst, dststride, src, srcstride, width, height, src2, MAX_PB_SIZE);
|
||||
}
|
||||
|
||||
static av_cold void hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth)
|
||||
av_cold void ff_hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth)
|
||||
{
|
||||
#if HAVE_NEON
|
||||
if (bit_depth == 8) {
|
||||
int x;
|
||||
c->hevc_v_loop_filter_luma = ff_hevc_v_loop_filter_luma_neon;
|
||||
@ -221,13 +221,4 @@ static av_cold void hevcdsp_init_neon(HEVCDSPContext *c, const int bit_depth)
|
||||
c->put_hevc_qpel_uni[8][0][0] = ff_hevc_put_qpel_uw_pixels_w48_neon_8;
|
||||
c->put_hevc_qpel_uni[9][0][0] = ff_hevc_put_qpel_uw_pixels_w64_neon_8;
|
||||
}
|
||||
#endif // HAVE_NEON
|
||||
}
|
||||
|
||||
void ff_hevcdsp_init_arm(HEVCDSPContext *c, const int bit_depth)
|
||||
{
|
||||
int cpu_flags = av_get_cpu_flags();
|
||||
|
||||
if (have_neon(cpu_flags))
|
||||
hevcdsp_init_neon(c, bit_depth);
|
||||
}
|
||||
|
@ -599,8 +599,8 @@ void ff_atrac3p_ipqf(FFTContext *dct_ctx, Atrac3pIPQFChannelCtx *hist,
|
||||
const float *in, float *out)
|
||||
{
|
||||
int i, s, sb, t, pos_now, pos_next;
|
||||
DECLARE_ALIGNED(32, float, idct_in)[ATRAC3P_SUBBANDS];
|
||||
DECLARE_ALIGNED(32, float, idct_out)[ATRAC3P_SUBBANDS];
|
||||
LOCAL_ALIGNED(32, float, idct_in, [ATRAC3P_SUBBANDS]);
|
||||
LOCAL_ALIGNED(32, float, idct_out, [ATRAC3P_SUBBANDS]);
|
||||
|
||||
memset(out, 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out));
|
||||
|
||||
|
@ -563,6 +563,11 @@ static int decode_residual_block(AVSContext *h, GetBitContext *gb,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
esc_code = get_ue_code(gb, esc_golomb_order);
|
||||
if (esc_code < 0 || esc_code > 32767) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "esc_code invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
level = esc_code + (run > r->max_run ? 1 : r->level_add[run]);
|
||||
while (level > r->inc_limit)
|
||||
r++;
|
||||
@ -1118,6 +1123,7 @@ static int decode_seq_header(AVSContext *h)
|
||||
{
|
||||
int frame_rate_code;
|
||||
int width, height;
|
||||
int ret;
|
||||
|
||||
h->profile = get_bits(&h->gb, 8);
|
||||
h->level = get_bits(&h->gb, 8);
|
||||
@ -1134,9 +1140,6 @@ static int decode_seq_header(AVSContext *h)
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Dimensions invalid\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
h->width = width;
|
||||
h->height = height;
|
||||
|
||||
skip_bits(&h->gb, 2); //chroma format
|
||||
skip_bits(&h->gb, 3); //sample_precision
|
||||
h->aspect_ratio = get_bits(&h->gb, 4);
|
||||
@ -1145,11 +1148,16 @@ static int decode_seq_header(AVSContext *h)
|
||||
skip_bits1(&h->gb); //marker_bit
|
||||
skip_bits(&h->gb, 12); //bit_rate_upper
|
||||
h->low_delay = get_bits1(&h->gb);
|
||||
|
||||
ret = ff_set_dimensions(h->avctx, width, height);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
h->width = width;
|
||||
h->height = height;
|
||||
h->mb_width = (h->width + 15) >> 4;
|
||||
h->mb_height = (h->height + 15) >> 4;
|
||||
h->avctx->framerate = ff_mpeg12_frame_rate_tab[frame_rate_code];
|
||||
h->avctx->width = h->width;
|
||||
h->avctx->height = h->height;
|
||||
if (!h->top_qp)
|
||||
return ff_cavs_init_top_lines(h);
|
||||
return 0;
|
||||
|
@ -226,6 +226,14 @@ static int dca_parse_audio_coding_header(DCAContext *s, int base_channel,
|
||||
}
|
||||
|
||||
nchans = get_bits(&s->gb, 3) + 1;
|
||||
if (xxch && nchans >= 3) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "nchans %d is too large\n", nchans);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else if (nchans + base_channel > DCA_PRIM_CHANNELS_MAX) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "channel sum %d + %d is too large\n", nchans, base_channel);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
s->total_channels = nchans + base_channel;
|
||||
s->prim_channels = s->total_channels;
|
||||
|
||||
@ -426,6 +434,10 @@ static int dca_subframe_header(DCAContext *s, int base_channel, int block_index)
|
||||
|
||||
if (!base_channel) {
|
||||
s->subsubframes[s->current_subframe] = get_bits(&s->gb, 2) + 1;
|
||||
if (block_index + s->subsubframes[s->current_subframe] > s->sample_blocks/8) {
|
||||
s->subsubframes[s->current_subframe] = 1;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->partial_samples[s->current_subframe] = get_bits(&s->gb, 3);
|
||||
}
|
||||
|
||||
@ -1120,8 +1132,13 @@ int ff_dca_xbr_parse_frame(DCAContext *s)
|
||||
for(i = 0; i < num_chsets; i++) {
|
||||
n_xbr_ch[i] = get_bits(&s->gb, 3) + 1;
|
||||
k = get_bits(&s->gb, 2) + 5;
|
||||
for(j = 0; j < n_xbr_ch[i]; j++)
|
||||
for(j = 0; j < n_xbr_ch[i]; j++) {
|
||||
active_bands[i][j] = get_bits(&s->gb, k) + 1;
|
||||
if (active_bands[i][j] > DCA_SUBBANDS) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "too many active subbands (%d)\n", active_bands[i][j]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* skip to the end of the header */
|
||||
@ -1163,23 +1180,34 @@ int ff_dca_xbr_parse_frame(DCAContext *s)
|
||||
for(i = 0; i < n_xbr_ch[chset]; i++) {
|
||||
const uint32_t *scale_table;
|
||||
int nbits;
|
||||
int scale_table_size;
|
||||
|
||||
if (s->scalefactor_huffman[chan_base+i] == 6) {
|
||||
scale_table = ff_dca_scale_factor_quant7;
|
||||
scale_table_size = FF_ARRAY_ELEMS(ff_dca_scale_factor_quant7);
|
||||
} else {
|
||||
scale_table = ff_dca_scale_factor_quant6;
|
||||
scale_table_size = FF_ARRAY_ELEMS(ff_dca_scale_factor_quant6);
|
||||
}
|
||||
|
||||
nbits = anctemp[i];
|
||||
|
||||
for(j = 0; j < active_bands[chset][i]; j++) {
|
||||
if(abits_high[i][j] > 0) {
|
||||
scale_table_high[i][j][0] =
|
||||
scale_table[get_bits(&s->gb, nbits)];
|
||||
int index = get_bits(&s->gb, nbits);
|
||||
if (index >= scale_table_size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "scale table index %d invalid\n", index);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
scale_table_high[i][j][0] = scale_table[index];
|
||||
|
||||
if(xbr_tmode && s->transition_mode[i][j]) {
|
||||
scale_table_high[i][j][1] =
|
||||
scale_table[get_bits(&s->gb, nbits)];
|
||||
int index = get_bits(&s->gb, nbits);
|
||||
if (index >= scale_table_size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "scale table index %d invalid\n", index);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
scale_table_high[i][j][1] = scale_table[index];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -801,7 +801,10 @@ static int decode_lowdelay(DiracContext *s)
|
||||
slice_num++;
|
||||
|
||||
buf += bytes;
|
||||
bufsize -= bytes*8;
|
||||
if (bufsize/8 >= bytes)
|
||||
bufsize -= bytes*8;
|
||||
else
|
||||
bufsize = 0;
|
||||
}
|
||||
|
||||
avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
|
||||
@ -899,6 +902,14 @@ static int dirac_unpack_prediction_parameters(DiracContext *s)
|
||||
/*[DIRAC_STD] 11.2.4 motion_data_dimensions()
|
||||
Calculated in function dirac_unpack_block_motion_data */
|
||||
|
||||
if (s->plane[0].xblen % (1 << s->chroma_x_shift) != 0 ||
|
||||
s->plane[0].yblen % (1 << s->chroma_y_shift) != 0 ||
|
||||
!s->plane[0].xblen || !s->plane[0].yblen) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
|
||||
s->plane[0].xblen, s->plane[0].yblen, s->chroma_x_shift, s->chroma_y_shift);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
|
||||
return -1;
|
||||
@ -1742,6 +1753,12 @@ static int dirac_decode_picture_header(DiracContext *s)
|
||||
get_buffer_with_edge(s->avctx, s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!s->ref_pics[i]) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Reference could not be allocated\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* retire the reference frames that are not used anymore */
|
||||
@ -1937,8 +1954,8 @@ static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||
break;
|
||||
|
||||
data_unit_size = AV_RB32(buf+buf_idx+5);
|
||||
if (buf_idx + data_unit_size > buf_size || !data_unit_size) {
|
||||
if(buf_idx + data_unit_size > buf_size)
|
||||
if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
|
||||
if(data_unit_size > buf_size - buf_idx)
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Data unit with size %d is larger than input buffer, discarding\n",
|
||||
data_unit_size);
|
||||
|
@ -119,6 +119,7 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
static const uint8_t header_prefix[] = { 0x00, 0x00, 0x02, 0x80, 0x01 };
|
||||
static const uint8_t header_prefix444[] = { 0x00, 0x00, 0x02, 0x80, 0x02 };
|
||||
int i, cid, ret;
|
||||
int old_bit_depth = ctx->bit_depth;
|
||||
|
||||
if (buf_size < 0x280) {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < 640).\n",
|
||||
@ -143,10 +144,6 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
|
||||
av_dlog(ctx->avctx, "width %d, height %d\n", ctx->width, ctx->height);
|
||||
|
||||
if (!ctx->bit_depth) {
|
||||
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
||||
ff_idctdsp_init(&ctx->idsp, ctx->avctx);
|
||||
}
|
||||
if (buf[0x21] == 0x58) { /* 10 bit */
|
||||
ctx->bit_depth = ctx->avctx->bits_per_raw_sample = 10;
|
||||
|
||||
@ -157,17 +154,23 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame,
|
||||
} else {
|
||||
ctx->decode_dct_block = dnxhd_decode_dct_block_10;
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV422P10;
|
||||
ctx->is_444 = 0;
|
||||
}
|
||||
} else if (buf[0x21] == 0x38) { /* 8 bit */
|
||||
ctx->bit_depth = ctx->avctx->bits_per_raw_sample = 8;
|
||||
|
||||
ctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
||||
ctx->is_444 = 0;
|
||||
ctx->decode_dct_block = dnxhd_decode_dct_block_8;
|
||||
} else {
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "invalid bit depth value (%d).\n",
|
||||
buf[0x21]);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (ctx->bit_depth != old_bit_depth) {
|
||||
ff_blockdsp_init(&ctx->bdsp, ctx->avctx);
|
||||
ff_idctdsp_init(&ctx->idsp, ctx->avctx);
|
||||
}
|
||||
|
||||
cid = AV_RB32(buf + 0x28);
|
||||
av_dlog(ctx->avctx, "compression id %d\n", cid);
|
||||
@ -373,7 +376,7 @@ static int dnxhd_decode_macroblock(DNXHDContext *ctx, AVFrame *frame,
|
||||
dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444));
|
||||
dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444));
|
||||
|
||||
if (ctx->cur_field) {
|
||||
if (frame->interlaced_frame && ctx->cur_field) {
|
||||
dest_y += frame->linesize[0];
|
||||
dest_u += frame->linesize[1];
|
||||
dest_v += frame->linesize[2];
|
||||
|
@ -1197,8 +1197,12 @@ static int dvbsub_parse_region_segment(AVCodecContext *avctx,
|
||||
region->buf_size = region->width * region->height;
|
||||
|
||||
region->pbuf = av_malloc(region->buf_size);
|
||||
if (!region->pbuf)
|
||||
if (!region->pbuf) {
|
||||
region->buf_size =
|
||||
region->width =
|
||||
region->height = 0;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
fill = 1;
|
||||
region->dirty = 0;
|
||||
@ -1417,7 +1421,7 @@ static void save_display_set(DVBSubContext *ctx)
|
||||
|
||||
pbuf = av_malloc(width * height * 4);
|
||||
if (!pbuf)
|
||||
return AVERROR(ENOMEM);
|
||||
return;
|
||||
|
||||
for (display = ctx->display_list; display; display = display->next) {
|
||||
region = get_region(ctx, display->region_id);
|
||||
|
@ -63,7 +63,7 @@ typedef enum {
|
||||
|
||||
#define EAC3_SR_CODE_REDUCED 3
|
||||
|
||||
void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
{
|
||||
int bin, bnd, ch, i;
|
||||
uint8_t wrapflag[SPX_MAX_BANDS]={1,0,}, num_copy_sections, copy_sizes[SPX_MAX_BANDS];
|
||||
@ -101,7 +101,7 @@ void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
for (i = 0; i < num_copy_sections; i++) {
|
||||
memcpy(&s->transform_coeffs[ch][bin],
|
||||
&s->transform_coeffs[ch][s->spx_dst_start_freq],
|
||||
copy_sizes[i]*sizeof(float));
|
||||
copy_sizes[i]*sizeof(INTFLOAT));
|
||||
bin += copy_sizes[i];
|
||||
}
|
||||
|
||||
@ -124,7 +124,7 @@ void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
bin = s->spx_src_start_freq - 2;
|
||||
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
||||
if (wrapflag[bnd]) {
|
||||
float *coeffs = &s->transform_coeffs[ch][bin];
|
||||
INTFLOAT *coeffs = &s->transform_coeffs[ch][bin];
|
||||
coeffs[0] *= atten_tab[0];
|
||||
coeffs[1] *= atten_tab[1];
|
||||
coeffs[2] *= atten_tab[2];
|
||||
@ -142,6 +142,11 @@ void ff_eac3_apply_spectral_extension(AC3DecodeContext *s)
|
||||
for (bnd = 0; bnd < s->num_spx_bands; bnd++) {
|
||||
float nscale = s->spx_noise_blend[ch][bnd] * rms_energy[bnd] * (1.0f / INT32_MIN);
|
||||
float sscale = s->spx_signal_blend[ch][bnd];
|
||||
#if USE_FIXED
|
||||
// spx_noise_blend and spx_signal_blend are both FP.23
|
||||
nscale *= 1.0 / (1<<23);
|
||||
sscale *= 1.0 / (1<<23);
|
||||
#endif
|
||||
for (i = 0; i < s->spx_band_sizes[bnd]; i++) {
|
||||
float noise = nscale * (int32_t)av_lfg_get(&s->dith_state);
|
||||
s->transform_coeffs[ch][bin] *= sscale;
|
||||
@ -195,7 +200,7 @@ static void idct6(int pre_mant[6])
|
||||
pre_mant[5] = even0 - odd0;
|
||||
}
|
||||
|
||||
void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
||||
static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
||||
{
|
||||
int bin, blk, gs;
|
||||
int end_bap, gaq_mode;
|
||||
@ -288,7 +293,7 @@ void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch)
|
||||
}
|
||||
}
|
||||
|
||||
int ff_eac3_parse_header(AC3DecodeContext *s)
|
||||
static int ff_eac3_parse_header(AC3DecodeContext *s)
|
||||
{
|
||||
int i, blk, ch;
|
||||
int ac3_exponent_strategy, parse_aht_info, parse_spx_atten_data;
|
||||
|
@ -322,7 +322,7 @@ static uint16_t reverse_lut(const uint8_t *bitmap, uint16_t *lut)
|
||||
|
||||
i = k - 1;
|
||||
|
||||
memset(lut + k * 2, 0, (USHORT_RANGE - k) * 2);
|
||||
memset(lut + k, 0, (USHORT_RANGE - k) * 2);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -546,6 +546,12 @@ static int read_extra_header(FFV1Context *f)
|
||||
f->num_h_slices = 1 + get_symbol(c, state, 0);
|
||||
f->num_v_slices = 1 + get_symbol(c, state, 0);
|
||||
|
||||
if (f->chroma_h_shift > 4U || f->chroma_v_shift > 4U) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
|
||||
f->chroma_h_shift, f->chroma_v_shift);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices ||
|
||||
f->num_v_slices > (unsigned)f->height || !f->num_v_slices
|
||||
) {
|
||||
@ -651,6 +657,12 @@ static int read_header(FFV1Context *f)
|
||||
}
|
||||
}
|
||||
|
||||
if (chroma_h_shift > 4U || chroma_v_shift > 4U) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
|
||||
chroma_h_shift, chroma_v_shift);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
f->colorspace = colorspace;
|
||||
f->avctx->bits_per_raw_sample = bits_per_raw_sample;
|
||||
f->chroma_planes = chroma_planes;
|
||||
|
@ -1499,9 +1499,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
|
||||
continue;
|
||||
|
||||
again:
|
||||
if ( (!(avctx->active_thread_type & FF_THREAD_FRAME) || nals_needed >= nal_index)
|
||||
&& !h->current_slice)
|
||||
h->au_pps_id = -1;
|
||||
/* Ignore per frame NAL unit type during extradata
|
||||
* parsing. Decoding slices is not possible in codec init
|
||||
* with frame-mt */
|
||||
@ -1537,8 +1534,14 @@ again:
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
if(!idr_cleared)
|
||||
if(!idr_cleared) {
|
||||
if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||
av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto end;
|
||||
}
|
||||
idr(h); // FIXME ensure we don't lose some frames if there is reordering
|
||||
}
|
||||
idr_cleared = 1;
|
||||
h->has_recovery_point = 1;
|
||||
case NAL_SLICE:
|
||||
@ -1546,6 +1549,10 @@ again:
|
||||
hx->intra_gb_ptr =
|
||||
hx->inter_gb_ptr = &hx->gb;
|
||||
|
||||
if ( nals_needed >= nal_index
|
||||
|| (!(avctx->active_thread_type & FF_THREAD_FRAME) && !context_count))
|
||||
h->au_pps_id = -1;
|
||||
|
||||
if ((err = ff_h264_decode_slice_header(hx, h)))
|
||||
break;
|
||||
|
||||
@ -1629,7 +1636,9 @@ again:
|
||||
break;
|
||||
case NAL_SPS:
|
||||
init_get_bits(&h->gb, ptr, bit_length);
|
||||
if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? nalsize : 1)) {
|
||||
if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
|
||||
break;
|
||||
if (h->is_avc ? nalsize : 1) {
|
||||
av_log(h->avctx, AV_LOG_DEBUG,
|
||||
"SPS decoding failure, trying again with the complete NAL\n");
|
||||
if (h->is_avc)
|
||||
@ -1638,8 +1647,11 @@ again:
|
||||
break;
|
||||
init_get_bits(&h->gb, &buf[buf_index + 1 - consumed],
|
||||
8*(next_avc - buf_index + consumed - 1));
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
|
||||
break;
|
||||
}
|
||||
init_get_bits(&h->gb, ptr, bit_length);
|
||||
ff_h264_decode_seq_parameter_set(h, 1);
|
||||
|
||||
break;
|
||||
case NAL_PPS:
|
||||
@ -1672,8 +1684,14 @@ again:
|
||||
if (err < 0 || err == SLICE_SKIPED) {
|
||||
if (err < 0)
|
||||
av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
|
||||
h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
|
||||
hx->ref_count[0] = hx->ref_count[1] = hx->list_count = 0;
|
||||
} else if (err == SLICE_SINGLETHREAD) {
|
||||
if (context_count > 1) {
|
||||
ret = ff_h264_execute_decode_slices(h, context_count - 1);
|
||||
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
|
||||
goto end;
|
||||
context_count = 0;
|
||||
}
|
||||
/* Slice could not be decoded in parallel mode, copy down
|
||||
* NAL unit stuff to context 0 and restart. Note that
|
||||
* rbsp_buffer is not transferred, but since we no longer
|
||||
|
@ -536,6 +536,7 @@ typedef struct H264Context {
|
||||
int mb_x, mb_y;
|
||||
int resync_mb_x;
|
||||
int resync_mb_y;
|
||||
int mb_index_end;
|
||||
int mb_skip_run;
|
||||
int mb_height, mb_width;
|
||||
int mb_stride;
|
||||
@ -776,7 +777,7 @@ int ff_h264_decode_sei(H264Context *h);
|
||||
/**
|
||||
* Decode SPS
|
||||
*/
|
||||
int ff_h264_decode_seq_parameter_set(H264Context *h);
|
||||
int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation);
|
||||
|
||||
/**
|
||||
* compute profile from sps
|
||||
|
@ -280,7 +280,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
|
||||
init_get_bits(&h->gb, ptr, 8 * dst_length);
|
||||
switch (h->nal_unit_type) {
|
||||
case NAL_SPS:
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
ff_h264_decode_seq_parameter_set(h, 0);
|
||||
break;
|
||||
case NAL_PPS:
|
||||
ff_h264_decode_picture_parameter_set(h, h->gb.size_in_bits);
|
||||
|
@ -241,12 +241,6 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps)
|
||||
}
|
||||
}
|
||||
|
||||
if (get_bits_left(&h->gb) < 0) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"Overread VUI by %d bits\n", -get_bits_left(&h->gb));
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -303,7 +297,7 @@ static void decode_scaling_matrices(H264Context *h, SPS *sps,
|
||||
}
|
||||
}
|
||||
|
||||
int ff_h264_decode_seq_parameter_set(H264Context *h)
|
||||
int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
|
||||
{
|
||||
int profile_idc, level_idc, constraint_set_flags = 0;
|
||||
unsigned int sps_id;
|
||||
@ -523,6 +517,13 @@ int ff_h264_decode_seq_parameter_set(H264Context *h)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (get_bits_left(&h->gb) < 0) {
|
||||
av_log(h->avctx, ignore_truncation ? AV_LOG_WARNING : AV_LOG_ERROR,
|
||||
"Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(&h->gb));
|
||||
if (!ignore_truncation)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!sps->sar.den)
|
||||
sps->sar.den = 1;
|
||||
|
||||
|
@ -705,7 +705,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
|
||||
*/
|
||||
if (h->short_ref_count && h->short_ref[0] == h->cur_pic_ptr) {
|
||||
/* Just mark the second field valid */
|
||||
h->cur_pic_ptr->reference = PICT_FRAME;
|
||||
h->cur_pic_ptr->reference |= h->picture_structure;
|
||||
} else if (h->cur_pic_ptr->long_ref) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "illegal short term reference "
|
||||
"assignment for second field "
|
||||
|
@ -1289,6 +1289,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
int field_pic_flag, bottom_field_flag;
|
||||
int first_slice = h == h0 && !h0->current_slice;
|
||||
int frame_num, picture_structure, droppable;
|
||||
int mb_aff_frame, last_mb_aff_frame;
|
||||
PPS *pps;
|
||||
|
||||
h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
|
||||
@ -1416,7 +1417,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
|| h->mb_width != h->sps.mb_width
|
||||
|| h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
|
||||
));
|
||||
if (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0)))
|
||||
if (h0->avctx->pix_fmt == AV_PIX_FMT_NONE
|
||||
|| (non_j_pixfmt(h0->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h0, 0))))
|
||||
must_reinit = 1;
|
||||
|
||||
if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio))
|
||||
@ -1512,7 +1514,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
|
||||
h->mb_mbaff = 0;
|
||||
h->mb_aff_frame = 0;
|
||||
mb_aff_frame = 0;
|
||||
last_mb_aff_frame = h0->mb_aff_frame;
|
||||
last_pic_structure = h0->picture_structure;
|
||||
last_pic_droppable = h0->droppable;
|
||||
droppable = h->nal_ref_idc == 0;
|
||||
@ -1530,12 +1533,13 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
picture_structure = PICT_TOP_FIELD + bottom_field_flag;
|
||||
} else {
|
||||
picture_structure = PICT_FRAME;
|
||||
h->mb_aff_frame = h->sps.mb_aff;
|
||||
mb_aff_frame = h->sps.mb_aff;
|
||||
}
|
||||
}
|
||||
if (h0->current_slice) {
|
||||
if (last_pic_structure != picture_structure ||
|
||||
last_pic_droppable != droppable) {
|
||||
last_pic_droppable != droppable ||
|
||||
last_mb_aff_frame != mb_aff_frame) {
|
||||
av_log(h->avctx, AV_LOG_ERROR,
|
||||
"Changing field mode (%d -> %d) between slices is not allowed\n",
|
||||
last_pic_structure, h->picture_structure);
|
||||
@ -1551,6 +1555,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264Context *h0)
|
||||
h->picture_structure = picture_structure;
|
||||
h->droppable = droppable;
|
||||
h->frame_num = frame_num;
|
||||
h->mb_aff_frame = mb_aff_frame;
|
||||
h->mb_field_decoding_flag = picture_structure != PICT_FRAME;
|
||||
|
||||
if (h0->current_slice == 0) {
|
||||
@ -2424,8 +2429,17 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
|
||||
|
||||
for (;;) {
|
||||
// START_TIMER
|
||||
int ret = ff_h264_decode_mb_cabac(h);
|
||||
int eos;
|
||||
int ret, eos;
|
||||
|
||||
if (h->mb_x + h->mb_y * h->mb_width >= h->mb_index_end) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps next at %d\n",
|
||||
h->mb_index_end);
|
||||
er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
|
||||
h->mb_y, ER_MB_ERROR);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ret = ff_h264_decode_mb_cabac(h);
|
||||
// STOP_TIMER("decode_mb_cabac")
|
||||
|
||||
if (ret >= 0)
|
||||
@ -2487,7 +2501,17 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
|
||||
}
|
||||
} else {
|
||||
for (;;) {
|
||||
int ret = ff_h264_decode_mb_cavlc(h);
|
||||
int ret;
|
||||
|
||||
if (h->mb_x + h->mb_y * h->mb_width >= h->mb_index_end) {
|
||||
av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps next at %d\n",
|
||||
h->mb_index_end);
|
||||
er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
|
||||
h->mb_y, ER_MB_ERROR);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ret = ff_h264_decode_mb_cavlc(h);
|
||||
|
||||
if (ret >= 0)
|
||||
ff_h264_hl_decode_mb(h);
|
||||
@ -2575,19 +2599,33 @@ int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
|
||||
|
||||
av_assert0(h->mb_y < h->mb_height);
|
||||
|
||||
h->mb_index_end = INT_MAX;
|
||||
|
||||
if (h->avctx->hwaccel ||
|
||||
h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
|
||||
return 0;
|
||||
if (context_count == 1) {
|
||||
return decode_slice(avctx, &h);
|
||||
} else {
|
||||
int j, mb_index;
|
||||
av_assert0(context_count > 0);
|
||||
for (i = 1; i < context_count; i++) {
|
||||
for (i = 0; i < context_count; i++) {
|
||||
int mb_index_end = h->mb_width * h->mb_height;
|
||||
hx = h->thread_context[i];
|
||||
if (CONFIG_ERROR_RESILIENCE) {
|
||||
mb_index = hx->resync_mb_x + hx->resync_mb_y * h->mb_width;
|
||||
if (CONFIG_ERROR_RESILIENCE && i) {
|
||||
hx->er.error_count = 0;
|
||||
}
|
||||
hx->x264_build = h->x264_build;
|
||||
for (j = 0; j < context_count; j++) {
|
||||
H264Context *sl2 = h->thread_context[j];
|
||||
int mb_index2 = sl2->resync_mb_x + sl2->resync_mb_y * h->mb_width;
|
||||
|
||||
if (i==j || mb_index > mb_index2)
|
||||
continue;
|
||||
mb_index_end = FFMIN(mb_index_end, mb_index2);
|
||||
}
|
||||
hx->mb_index_end = mb_index_end;
|
||||
}
|
||||
|
||||
avctx->execute(avctx, decode_slice, h->thread_context,
|
||||
|
@ -694,11 +694,25 @@ static int hls_slice_header(HEVCContext *s)
|
||||
|
||||
sh->num_entry_point_offsets = 0;
|
||||
if (s->pps->tiles_enabled_flag || s->pps->entropy_coding_sync_enabled_flag) {
|
||||
sh->num_entry_point_offsets = get_ue_golomb_long(gb);
|
||||
unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
|
||||
// It would be possible to bound this tighter but this here is simpler
|
||||
if (num_entry_point_offsets > get_bits_left(gb)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
sh->num_entry_point_offsets = num_entry_point_offsets;
|
||||
if (sh->num_entry_point_offsets > 0) {
|
||||
int offset_len = get_ue_golomb_long(gb) + 1;
|
||||
int segments = offset_len >> 4;
|
||||
int rest = (offset_len & 15);
|
||||
|
||||
if (offset_len < 1 || offset_len > 32) {
|
||||
sh->num_entry_point_offsets = 0;
|
||||
av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_freep(&sh->entry_point_offset);
|
||||
av_freep(&sh->offset);
|
||||
av_freep(&sh->size);
|
||||
@ -2600,7 +2614,8 @@ static int hevc_frame_start(HEVCContext *s)
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
ff_thread_finish_setup(s->avctx);
|
||||
if (!s->avctx->hwaccel)
|
||||
ff_thread_finish_setup(s->avctx);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -2982,7 +2997,6 @@ static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
|
||||
|
||||
/* parse the NAL units */
|
||||
for (i = 0; i < s->nb_nals; i++) {
|
||||
int ret;
|
||||
s->skipped_bytes = s->skipped_bytes_nal[i];
|
||||
s->skipped_bytes_pos = s->skipped_bytes_pos_nal[i];
|
||||
|
||||
|
@ -298,10 +298,10 @@ typedef struct RefPicListTab {
|
||||
} RefPicListTab;
|
||||
|
||||
typedef struct HEVCWindow {
|
||||
int left_offset;
|
||||
int right_offset;
|
||||
int top_offset;
|
||||
int bottom_offset;
|
||||
unsigned int left_offset;
|
||||
unsigned int right_offset;
|
||||
unsigned int top_offset;
|
||||
unsigned int bottom_offset;
|
||||
} HEVCWindow;
|
||||
|
||||
typedef struct VUI {
|
||||
|
@ -424,7 +424,8 @@ int ff_hevc_decode_nal_vps(HEVCContext *s)
|
||||
|
||||
vps->vps_max_layer_id = get_bits(gb, 6);
|
||||
vps->vps_num_layer_sets = get_ue_golomb_long(gb) + 1;
|
||||
if ((vps->vps_num_layer_sets - 1LL) * (vps->vps_max_layer_id + 1LL) > get_bits_left(gb)) {
|
||||
if (vps->vps_num_layer_sets < 1 || vps->vps_num_layer_sets > 1024 ||
|
||||
(vps->vps_num_layer_sets - 1LL) * (vps->vps_max_layer_id + 1LL) > get_bits_left(gb)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "too many layer_id_included_flags\n");
|
||||
goto err;
|
||||
}
|
||||
@ -441,6 +442,11 @@ int ff_hevc_decode_nal_vps(HEVCContext *s)
|
||||
if (vps->vps_poc_proportional_to_timing_flag)
|
||||
vps->vps_num_ticks_poc_diff_one = get_ue_golomb_long(gb) + 1;
|
||||
vps->vps_num_hrd_parameters = get_ue_golomb_long(gb);
|
||||
if (vps->vps_num_hrd_parameters > (unsigned)vps->vps_num_layer_sets) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"vps_num_hrd_parameters %d is invalid\n", vps->vps_num_hrd_parameters);
|
||||
goto err;
|
||||
}
|
||||
for (i = 0; i < vps->vps_num_hrd_parameters; i++) {
|
||||
int common_inf_present = 1;
|
||||
|
||||
@ -1039,7 +1045,8 @@ int ff_hevc_decode_nal_sps(HEVCContext *s)
|
||||
(sps->output_window.left_offset + sps->output_window.right_offset);
|
||||
sps->output_height = sps->height -
|
||||
(sps->output_window.top_offset + sps->output_window.bottom_offset);
|
||||
if (sps->output_width <= 0 || sps->output_height <= 0) {
|
||||
if (sps->width <= sps->output_window.left_offset + (int64_t)sps->output_window.right_offset ||
|
||||
sps->height <= sps->output_window.top_offset + (int64_t)sps->output_window.bottom_offset) {
|
||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid visible frame dimensions: %dx%d.\n",
|
||||
sps->output_width, sps->output_height);
|
||||
if (s->avctx->err_recognition & AV_EF_EXPLODE) {
|
||||
@ -1315,14 +1322,14 @@ int ff_hevc_decode_nal_pps(HEVCContext *s)
|
||||
if (pps->tiles_enabled_flag) {
|
||||
pps->num_tile_columns = get_ue_golomb_long(gb) + 1;
|
||||
pps->num_tile_rows = get_ue_golomb_long(gb) + 1;
|
||||
if (pps->num_tile_columns == 0 ||
|
||||
if (pps->num_tile_columns <= 0 ||
|
||||
pps->num_tile_columns >= sps->width) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "num_tile_columns_minus1 out of range: %d\n",
|
||||
pps->num_tile_columns - 1);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto err;
|
||||
}
|
||||
if (pps->num_tile_rows == 0 ||
|
||||
if (pps->num_tile_rows <= 0 ||
|
||||
pps->num_tile_rows >= sps->height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "num_tile_rows_minus1 out of range: %d\n",
|
||||
pps->num_tile_rows - 1);
|
||||
|
@ -126,6 +126,11 @@ static int active_parameter_sets(HEVCContext *s)
|
||||
get_bits(gb, 1); // num_sps_ids_minus1
|
||||
num_sps_ids_minus1 = get_ue_golomb_long(gb); // num_sps_ids_minus1
|
||||
|
||||
if (num_sps_ids_minus1 < 0 || num_sps_ids_minus1 > 15) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "num_sps_ids_minus1 %d invalid\n", num_sps_ids_minus1);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
active_seq_parameter_set_id = get_ue_golomb_long(gb);
|
||||
if (active_seq_parameter_set_id >= MAX_SPS_COUNT) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "active_parameter_set_id %d invalid\n", active_seq_parameter_set_id);
|
||||
|
@ -1559,7 +1559,7 @@ static int jp2_find_codestream(Jpeg2000DecoderContext *s)
|
||||
int cn = bytestream2_get_be16(&s->g);
|
||||
int av_unused typ = bytestream2_get_be16(&s->g);
|
||||
int asoc = bytestream2_get_be16(&s->g);
|
||||
if (cn < 4 || asoc < 4)
|
||||
if (cn < 4 && asoc < 4)
|
||||
s->cdef[cn] = asoc;
|
||||
}
|
||||
}
|
||||
|
@ -111,6 +111,8 @@ static int get_stats(AVCodecContext *avctx, int eos)
|
||||
// libtheora generates a summary header at the end
|
||||
memcpy(h->stats, buf, bytes);
|
||||
avctx->stats_out = av_malloc(b64_size);
|
||||
if (!avctx->stats_out)
|
||||
return AVERROR(ENOMEM);
|
||||
av_base64_encode(avctx->stats_out, b64_size, h->stats, h->stats_offset);
|
||||
}
|
||||
return 0;
|
||||
|
@ -441,9 +441,10 @@ static av_cold int vpx_init(AVCodecContext *avctx,
|
||||
codecctl_int(avctx, VP8E_SET_ARNR_STRENGTH, ctx->arnr_strength);
|
||||
if (ctx->arnr_type >= 0)
|
||||
codecctl_int(avctx, VP8E_SET_ARNR_TYPE, ctx->arnr_type);
|
||||
codecctl_int(avctx, VP8E_SET_NOISE_SENSITIVITY, avctx->noise_reduction);
|
||||
if (avctx->codec_id == AV_CODEC_ID_VP8)
|
||||
if (avctx->codec_id == AV_CODEC_ID_VP8) {
|
||||
codecctl_int(avctx, VP8E_SET_NOISE_SENSITIVITY, avctx->noise_reduction);
|
||||
codecctl_int(avctx, VP8E_SET_TOKEN_PARTITIONS, av_log2(avctx->slices));
|
||||
}
|
||||
#if FF_API_MPV_OPT
|
||||
FF_DISABLE_DEPRECATION_WARNINGS
|
||||
if (avctx->mb_threshold) {
|
||||
|
@ -182,7 +182,7 @@ int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
|
||||
s->quant_matrixes[index][s->scantable.permutated[8]]) >> 1;
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
|
||||
index, s->qscale[index]);
|
||||
len -= 65;
|
||||
len -= 1 + 64 * (1+pr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/stereo3d.h"
|
||||
|
||||
@ -1315,7 +1316,13 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
|
||||
}
|
||||
} // MPEG-2
|
||||
|
||||
ff_set_sar(s->avctx, s->avctx->sample_aspect_ratio);
|
||||
if (av_image_check_sar(s->width, s->height,
|
||||
avctx->sample_aspect_ratio) < 0) {
|
||||
av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
|
||||
avctx->sample_aspect_ratio.num,
|
||||
avctx->sample_aspect_ratio.den);
|
||||
avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
|
||||
}
|
||||
|
||||
if ((s1->mpeg_enc_ctx_allocated == 0) ||
|
||||
avctx->coded_width != s->width ||
|
||||
|
@ -102,7 +102,7 @@ enum AudioObjectType {
|
||||
AOT_USAC, ///< N Unified Speech and Audio Coding
|
||||
};
|
||||
|
||||
#define MAX_PCE_SIZE 304 ///<Maximum size of a PCE including the 3-bit ID_PCE
|
||||
#define MAX_PCE_SIZE 320 ///<Maximum size of a PCE including the 3-bit ID_PCE
|
||||
///<marker and the comment
|
||||
|
||||
int avpriv_copy_pce_data(PutBitContext *pb, GetBitContext *gb);
|
||||
|
@ -189,14 +189,14 @@ static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *g
|
||||
int x = 0, y = 0;
|
||||
|
||||
length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
|
||||
if (length)
|
||||
if (length > 0)
|
||||
x = get_xbits(gb, length);
|
||||
|
||||
if (!(ctx->divx_version == 500 && ctx->divx_build == 413))
|
||||
skip_bits1(gb); /* marker bit */
|
||||
|
||||
length = get_vlc2(gb, sprite_trajectory.table, SPRITE_TRAJ_VLC_BITS, 3);
|
||||
if (length)
|
||||
if (length > 0)
|
||||
y = get_xbits(gb, length);
|
||||
|
||||
skip_bits1(gb); /* marker bit */
|
||||
|
@ -36,17 +36,15 @@ static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic,
|
||||
unsigned char rle_code;
|
||||
unsigned char extra_byte, odd_pixel;
|
||||
unsigned char stream_byte;
|
||||
unsigned int pixel_ptr = 0;
|
||||
int row_dec = pic->linesize[0];
|
||||
int row_ptr = (avctx->height - 1) * row_dec;
|
||||
int frame_size = row_dec * avctx->height;
|
||||
int pixel_ptr = 0;
|
||||
int line = avctx->height - 1;
|
||||
int i;
|
||||
|
||||
while (row_ptr >= 0) {
|
||||
while (line >= 0 && pixel_ptr <= avctx->width) {
|
||||
if (bytestream2_get_bytes_left(gb) <= 0) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"MS RLE: bytestream overrun, %d rows left\n",
|
||||
row_ptr);
|
||||
"MS RLE: bytestream overrun, %dx%d left\n",
|
||||
avctx->width - pixel_ptr, line);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
rle_code = stream_byte = bytestream2_get_byteu(gb);
|
||||
@ -55,7 +53,7 @@ static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic,
|
||||
stream_byte = bytestream2_get_byte(gb);
|
||||
if (stream_byte == 0) {
|
||||
/* line is done, goto the next one */
|
||||
row_ptr -= row_dec;
|
||||
line--;
|
||||
pixel_ptr = 0;
|
||||
} else if (stream_byte == 1) {
|
||||
/* decode is done */
|
||||
@ -65,13 +63,12 @@ static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic,
|
||||
stream_byte = bytestream2_get_byte(gb);
|
||||
pixel_ptr += stream_byte;
|
||||
stream_byte = bytestream2_get_byte(gb);
|
||||
row_ptr -= stream_byte * row_dec;
|
||||
} else {
|
||||
// copy pixels from encoded stream
|
||||
odd_pixel = stream_byte & 1;
|
||||
rle_code = (stream_byte + 1) / 2;
|
||||
extra_byte = rle_code & 0x01;
|
||||
if (row_ptr + pixel_ptr + stream_byte > frame_size ||
|
||||
if (pixel_ptr + 2*rle_code - odd_pixel > avctx->width ||
|
||||
bytestream2_get_bytes_left(gb) < rle_code) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"MS RLE: frame/stream ptr just went out of bounds (copy)\n");
|
||||
@ -82,13 +79,13 @@ static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic,
|
||||
if (pixel_ptr >= avctx->width)
|
||||
break;
|
||||
stream_byte = bytestream2_get_byteu(gb);
|
||||
pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4;
|
||||
pic->data[0][line * pic->linesize[0] + pixel_ptr] = stream_byte >> 4;
|
||||
pixel_ptr++;
|
||||
if (i + 1 == rle_code && odd_pixel)
|
||||
break;
|
||||
if (pixel_ptr >= avctx->width)
|
||||
break;
|
||||
pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F;
|
||||
pic->data[0][line * pic->linesize[0] + pixel_ptr] = stream_byte & 0x0F;
|
||||
pixel_ptr++;
|
||||
}
|
||||
|
||||
@ -98,7 +95,7 @@ static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic,
|
||||
}
|
||||
} else {
|
||||
// decode a run of data
|
||||
if (row_ptr + pixel_ptr + stream_byte > frame_size) {
|
||||
if (pixel_ptr + rle_code > avctx->width + 1) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"MS RLE: frame ptr just went out of bounds (run)\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
@ -108,9 +105,9 @@ static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic,
|
||||
if (pixel_ptr >= avctx->width)
|
||||
break;
|
||||
if ((i & 1) == 0)
|
||||
pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4;
|
||||
pic->data[0][line * pic->linesize[0] + pixel_ptr] = stream_byte >> 4;
|
||||
else
|
||||
pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F;
|
||||
pic->data[0][line * pic->linesize[0] + pixel_ptr] = stream_byte & 0x0F;
|
||||
pixel_ptr++;
|
||||
}
|
||||
}
|
||||
|
@ -119,12 +119,12 @@ static int on2avc_decode_band_types(On2AVCContext *c, GetBitContext *gb)
|
||||
run_len = 1;
|
||||
do {
|
||||
run = get_bits(gb, bits_per_sect);
|
||||
if (run > num_bands - band - run_len) {
|
||||
av_log(c->avctx, AV_LOG_ERROR, "Invalid band type run\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
run_len += run;
|
||||
} while (run == esc_val);
|
||||
if (band + run_len > num_bands) {
|
||||
av_log(c->avctx, AV_LOG_ERROR, "Invalid band type run\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
for (i = band; i < band + run_len; i++) {
|
||||
c->band_type[i] = band_type;
|
||||
c->band_run_end[i] = band + run_len;
|
||||
|
@ -103,7 +103,6 @@ static const AVOption avcodec_options[] = {
|
||||
{"hex", "hex motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_HEX }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||
{"umh", "umh motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_UMH }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||
{"iter", "iter motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ITER }, INT_MIN, INT_MAX, V|E, "me_method" },
|
||||
{"extradata_size", NULL, OFFSET(extradata_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
|
||||
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, INT_MIN, INT_MAX},
|
||||
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
|
||||
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
|
||||
|
@ -449,6 +449,14 @@ static int opus_decode_packet(AVCodecContext *avctx, void *data,
|
||||
int coded_samples = 0;
|
||||
int decoded_samples = 0;
|
||||
int i, ret;
|
||||
int delayed_samples = 0;
|
||||
|
||||
for (i = 0; i < c->nb_streams; i++) {
|
||||
OpusStreamContext *s = &c->streams[i];
|
||||
s->out[0] =
|
||||
s->out[1] = NULL;
|
||||
delayed_samples = FFMAX(delayed_samples, s->delayed_samples);
|
||||
}
|
||||
|
||||
/* decode the header of the first sub-packet to find out the sample count */
|
||||
if (buf) {
|
||||
@ -462,7 +470,7 @@ static int opus_decode_packet(AVCodecContext *avctx, void *data,
|
||||
c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
|
||||
}
|
||||
|
||||
frame->nb_samples = coded_samples + c->streams[0].delayed_samples;
|
||||
frame->nb_samples = coded_samples + delayed_samples;
|
||||
|
||||
/* no input or buffered data => nothing to do */
|
||||
if (!frame->nb_samples) {
|
||||
|
@ -618,7 +618,7 @@ static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s,
|
||||
} else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
|
||||
s->color_type == PNG_COLOR_TYPE_PALETTE) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_PAL8;
|
||||
} else if (s->bit_depth == 1 && s->bits_per_pixel == 1) {
|
||||
} else if (s->bit_depth == 1 && s->bits_per_pixel == 1 && avctx->codec_id != AV_CODEC_ID_APNG) {
|
||||
avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
|
||||
} else if (s->bit_depth == 8 &&
|
||||
s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
|
||||
@ -968,7 +968,7 @@ static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
|
||||
AVDictionary *metadata = NULL;
|
||||
uint32_t tag, length;
|
||||
int decode_next_dat = 0;
|
||||
int ret = AVERROR_INVALIDDATA;
|
||||
int ret;
|
||||
AVFrame *ref;
|
||||
|
||||
for (;;) {
|
||||
@ -984,12 +984,14 @@ static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
|
||||
if ( s->state & PNG_ALLIMAGE
|
||||
&& avctx->strict_std_compliance <= FF_COMPLIANCE_NORMAL)
|
||||
goto exit_loop;
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
length = bytestream2_get_be32(&s->gb);
|
||||
if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "chunk too big\n");
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
tag = bytestream2_get_le32(&s->gb);
|
||||
@ -1001,11 +1003,11 @@ static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
|
||||
((tag >> 24) & 0xff), length);
|
||||
switch (tag) {
|
||||
case MKTAG('I', 'H', 'D', 'R'):
|
||||
if (decode_ihdr_chunk(avctx, s, length) < 0)
|
||||
if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0)
|
||||
goto fail;
|
||||
break;
|
||||
case MKTAG('p', 'H', 'Y', 's'):
|
||||
if (decode_phys_chunk(avctx, s) < 0)
|
||||
if ((ret = decode_phys_chunk(avctx, s)) < 0)
|
||||
goto fail;
|
||||
break;
|
||||
case MKTAG('f', 'c', 'T', 'L'):
|
||||
@ -1018,15 +1020,17 @@ static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
|
||||
case MKTAG('f', 'd', 'A', 'T'):
|
||||
if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
|
||||
goto skip_tag;
|
||||
if (!decode_next_dat)
|
||||
if (!decode_next_dat) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
bytestream2_get_be32(&s->gb);
|
||||
length -= 4;
|
||||
/* fallthrough */
|
||||
case MKTAG('I', 'D', 'A', 'T'):
|
||||
if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat)
|
||||
goto skip_tag;
|
||||
if (decode_idat_chunk(avctx, s, length, p) < 0)
|
||||
if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0)
|
||||
goto fail;
|
||||
break;
|
||||
case MKTAG('P', 'L', 'T', 'E'):
|
||||
@ -1051,6 +1055,7 @@ static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
|
||||
if (!(s->state & PNG_ALLIMAGE))
|
||||
av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
|
||||
if (!(s->state & (PNG_ALLIMAGE|PNG_IDAT))) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
bytestream2_skip(&s->gb, 4); /* crc */
|
||||
@ -1070,7 +1075,7 @@ exit_loop:
|
||||
/* handle p-frames only if a predecessor frame is available */
|
||||
ref = s->dispose_op == APNG_DISPOSE_OP_PREVIOUS ?
|
||||
s->previous_picture.f : s->last_picture.f;
|
||||
if (ref->data[0]) {
|
||||
if (ref->data[0] && s->last_picture.f->data[0]) {
|
||||
if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG")
|
||||
&& ref->width == p->width
|
||||
&& ref->height== p->height
|
||||
|
@ -183,6 +183,7 @@ static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, cons
|
||||
|
||||
if (ctx->slice_count != slice_count || !ctx->slices) {
|
||||
av_freep(&ctx->slices);
|
||||
ctx->slice_count = 0;
|
||||
ctx->slices = av_mallocz_array(slice_count, sizeof(*ctx->slices));
|
||||
if (!ctx->slices)
|
||||
return AVERROR(ENOMEM);
|
||||
|
@ -999,6 +999,8 @@ static av_cold int roq_encode_init(AVCodecContext *avctx)
|
||||
|
||||
av_lfg_init(&enc->randctx, 1);
|
||||
|
||||
enc->avctx = avctx;
|
||||
|
||||
enc->framesSinceKeyframe = 0;
|
||||
if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
|
||||
|
@ -91,6 +91,7 @@ static int sami_paragraph_to_ass(AVCodecContext *avctx, const char *src)
|
||||
break;
|
||||
if (*p == '>')
|
||||
p++;
|
||||
continue;
|
||||
}
|
||||
if (!av_isspace(*p))
|
||||
av_bprint_chars(dst, *p, 1);
|
||||
|
@ -129,8 +129,7 @@ static int allocate_buffers(ShortenContext *s)
|
||||
av_log(s->avctx, AV_LOG_ERROR, "nmean too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (s->blocksize + s->nwrap >= UINT_MAX / sizeof(int32_t) ||
|
||||
s->blocksize + s->nwrap <= (unsigned)s->nwrap) {
|
||||
if (s->blocksize + (uint64_t)s->nwrap >= UINT_MAX / sizeof(int32_t)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"s->blocksize + s->nwrap too large\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
@ -278,7 +277,7 @@ static int decode_subframe_lpc(ShortenContext *s, int command, int channel,
|
||||
if (command == FN_QLPC) {
|
||||
/* read/validate prediction order */
|
||||
pred_order = get_ur_golomb_shorten(&s->gb, LPCQSIZE);
|
||||
if (pred_order > s->nwrap) {
|
||||
if ((unsigned)pred_order > s->nwrap) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid pred_order %d\n",
|
||||
pred_order);
|
||||
return AVERROR(EINVAL);
|
||||
@ -370,6 +369,11 @@ static int read_header(ShortenContext *s)
|
||||
s->nmean = get_uint(s, 0);
|
||||
|
||||
skip_bytes = get_uint(s, NSKIPSIZE);
|
||||
if ((unsigned)skip_bytes > get_bits_left(&s->gb)/8) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "invalid skip_bytes: %d\n", skip_bytes);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
for (i = 0; i < skip_bytes; i++)
|
||||
skip_bits(&s->gb, 8);
|
||||
}
|
||||
|
@ -155,6 +155,10 @@ static int smvjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_siz
|
||||
if (!cur_frame) {
|
||||
av_frame_unref(mjpeg_data);
|
||||
ret = avcodec_decode_video2(s->avctx, mjpeg_data, &s->mjpeg_data_size, avpkt);
|
||||
if (ret < 0) {
|
||||
s->mjpeg_data_size = 0;
|
||||
return ret;
|
||||
}
|
||||
} else if (!s->mjpeg_data_size)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
|
@ -497,12 +497,15 @@ static int predictor_calc_error(int *k, int *state, int order, int error)
|
||||
// copes better with quantization, and calculates the
|
||||
// actual whitened result as it goes.
|
||||
|
||||
static void modified_levinson_durbin(int *window, int window_entries,
|
||||
static int modified_levinson_durbin(int *window, int window_entries,
|
||||
int *out, int out_entries, int channels, int *tap_quant)
|
||||
{
|
||||
int i;
|
||||
int *state = av_calloc(window_entries, sizeof(*state));
|
||||
|
||||
if (!state)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
memcpy(state, window, 4* window_entries);
|
||||
|
||||
for (i = 0; i < out_entries; i++)
|
||||
@ -567,6 +570,7 @@ static void modified_levinson_durbin(int *window, int window_entries,
|
||||
}
|
||||
|
||||
av_free(state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int code_samplerate(int samplerate)
|
||||
@ -627,6 +631,9 @@ static av_cold int sonic_encode_init(AVCodecContext *avctx)
|
||||
|
||||
// generate taps
|
||||
s->tap_quant = av_calloc(s->num_taps, sizeof(*s->tap_quant));
|
||||
if (!s->tap_quant)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < s->num_taps; i++)
|
||||
s->tap_quant[i] = ff_sqrt(i+1);
|
||||
|
||||
@ -656,7 +663,7 @@ static av_cold int sonic_encode_init(AVCodecContext *avctx)
|
||||
|
||||
s->window_size = ((2*s->tail_size)+s->frame_size);
|
||||
s->window = av_calloc(s->window_size, sizeof(*s->window));
|
||||
if (!s->window)
|
||||
if (!s->window || !s->int_samples)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
avctx->extradata = av_mallocz(16);
|
||||
@ -769,8 +776,11 @@ static int sonic_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
s->tail[i] = s->int_samples[s->frame_size - s->tail_size + i];
|
||||
|
||||
// generate taps
|
||||
modified_levinson_durbin(s->window, s->window_size,
|
||||
ret = modified_levinson_durbin(s->window, s->window_size,
|
||||
s->predictor_k, s->num_taps, s->channels, s->tap_quant);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = intlist_write(&c, state, s->predictor_k, s->num_taps, 0)) < 0)
|
||||
return ret;
|
||||
|
||||
@ -873,13 +883,19 @@ static av_cold int sonic_decode_init(AVCodecContext *avctx)
|
||||
|
||||
if (s->version >= 1)
|
||||
{
|
||||
int sample_rate_index;
|
||||
s->channels = get_bits(&gb, 2);
|
||||
s->samplerate = samplerate_table[get_bits(&gb, 4)];
|
||||
sample_rate_index = get_bits(&gb, 4);
|
||||
if (sample_rate_index >= FF_ARRAY_ELEMS(samplerate_table)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid sample_rate_index %d\n", sample_rate_index);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
s->samplerate = samplerate_table[sample_rate_index];
|
||||
av_log(avctx, AV_LOG_INFO, "Sonicv2 chans: %d samprate: %d\n",
|
||||
s->channels, s->samplerate);
|
||||
}
|
||||
|
||||
if (s->channels > MAX_CHANNELS)
|
||||
if (s->channels > MAX_CHANNELS || s->channels < 1)
|
||||
{
|
||||
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
@ -913,6 +929,9 @@ static av_cold int sonic_decode_init(AVCodecContext *avctx)
|
||||
|
||||
// generate taps
|
||||
s->tap_quant = av_calloc(s->num_taps, sizeof(*s->tap_quant));
|
||||
if (!s->tap_quant)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < s->num_taps; i++)
|
||||
s->tap_quant[i] = ff_sqrt(i+1);
|
||||
|
||||
@ -932,6 +951,8 @@ static av_cold int sonic_decode_init(AVCodecContext *avctx)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
s->int_samples = av_calloc(s->frame_size, sizeof(*s->int_samples));
|
||||
if (!s->int_samples)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
return 0;
|
||||
|
@ -839,13 +839,6 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
s->bpp = -1;
|
||||
}
|
||||
}
|
||||
if (s->bpp > 64U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
s->bpp = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
break;
|
||||
case TIFF_SAMPLES_PER_PIXEL:
|
||||
if (count != 1) {
|
||||
@ -1158,6 +1151,13 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||
}
|
||||
}
|
||||
end:
|
||||
if (s->bpp > 64U) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"This format is not supported (bpp=%d, %d components)\n",
|
||||
s->bpp, count);
|
||||
s->bpp = 0;
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
bytestream2_seek(&s->gb, start, SEEK_SET);
|
||||
return 0;
|
||||
}
|
||||
|
@ -374,7 +374,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
|
||||
case AV_PIX_FMT_YUVJ411P:
|
||||
case AV_PIX_FMT_UYYVYY411:
|
||||
w_align = 32;
|
||||
h_align = 8;
|
||||
h_align = 16 * 2;
|
||||
break;
|
||||
case AV_PIX_FMT_YUV410P:
|
||||
if (s->codec_id == AV_CODEC_ID_SVQ1) {
|
||||
|
@ -279,7 +279,8 @@ static int vp9_alloc_frame(AVCodecContext *ctx, VP9Frame *f)
|
||||
|
||||
// retain segmentation map if it doesn't update
|
||||
if (s->segmentation.enabled && !s->segmentation.update_map &&
|
||||
!s->intraonly && !s->keyframe && !s->errorres) {
|
||||
!s->intraonly && !s->keyframe && !s->errorres &&
|
||||
ctx->active_thread_type != FF_THREAD_FRAME) {
|
||||
memcpy(f->segmentation_map, s->frames[LAST_FRAME].segmentation_map, sz);
|
||||
}
|
||||
|
||||
@ -1351,9 +1352,18 @@ static void decode_mode(AVCodecContext *ctx)
|
||||
|
||||
if (!s->last_uses_2pass)
|
||||
ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0);
|
||||
for (y = 0; y < h4; y++)
|
||||
for (y = 0; y < h4; y++) {
|
||||
int idx_base = (y + row) * 8 * s->sb_cols + col;
|
||||
for (x = 0; x < w4; x++)
|
||||
pred = FFMIN(pred, refsegmap[(y + row) * 8 * s->sb_cols + x + col]);
|
||||
pred = FFMIN(pred, refsegmap[idx_base + x]);
|
||||
if (!s->segmentation.update_map && ctx->active_thread_type == FF_THREAD_FRAME) {
|
||||
// FIXME maybe retain reference to previous frame as
|
||||
// segmap reference instead of copying the whole map
|
||||
// into a new buffer
|
||||
memcpy(&s->frames[CUR_FRAME].segmentation_map[idx_base],
|
||||
&refsegmap[idx_base], w4);
|
||||
}
|
||||
}
|
||||
av_assert1(pred < 8);
|
||||
b->seg_id = pred;
|
||||
} else {
|
||||
@ -2508,7 +2518,7 @@ static void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, ptrdiff_t uv_off)
|
||||
for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d,
|
||||
ptr_r += 4 * uvstep1d, n += step) {
|
||||
int mode = b->uvmode;
|
||||
uint8_t *a = &a_buf[16];
|
||||
uint8_t *a = &a_buf[32];
|
||||
int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
|
||||
|
||||
mode = check_intra_mode(s, mode, &a, ptr_r,
|
||||
|
@ -231,6 +231,12 @@ static int decode_format80(VqaContext *s, int src_size,
|
||||
unsigned char color;
|
||||
int i;
|
||||
|
||||
if (src_size < 0 || src_size > bytestream2_get_bytes_left(&s->gb)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Chunk size %d is out of range\n",
|
||||
src_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
start = bytestream2_tell(&s->gb);
|
||||
while (bytestream2_tell(&s->gb) - start < src_size) {
|
||||
opcode = bytestream2_get_byte(&s->gb);
|
||||
|
@ -472,6 +472,14 @@ static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb,
|
||||
s->decorr[i].samplesB[0] = L;
|
||||
}
|
||||
}
|
||||
|
||||
if (type == AV_SAMPLE_FMT_S16P) {
|
||||
if (FFABS(L) + FFABS(R) > (1<<19)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "sample %d %d too large\n", L, R);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
|
||||
pos = (pos + 1) & 7;
|
||||
if (s->joint)
|
||||
L += (R -= (L >> 1));
|
||||
|
@ -1622,7 +1622,9 @@ VP9_IDCT_IDCT_16x16_ADD_XMM avx
|
||||
PSIGNW m3, [pw_m1] ; m3=out1[w], m7=t10[w]
|
||||
SUMSUB_BA w, 2, 6, 1 ; m2=out14[w], m6=t11[w]
|
||||
|
||||
%if cpuflag(ssse3)
|
||||
; unfortunately, the code below overflows in some cases, e.g.
|
||||
; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8.webm
|
||||
%if 0; cpuflag(ssse3)
|
||||
SUMSUB_BA w, 7, 6, 1
|
||||
pmulhrsw m7, [pw_11585x2] ; m7=out6[w]
|
||||
pmulhrsw m6, [pw_11585x2] ; m6=out9[w]
|
||||
@ -1699,7 +1701,9 @@ VP9_IDCT_IDCT_16x16_ADD_XMM avx
|
||||
SUMSUB_BA w, 5, 7, 4
|
||||
PSIGNW m5, [pw_m1] ; m12=out15[w], m8=t3[w]
|
||||
|
||||
%if cpuflag(ssse3)
|
||||
; unfortunately, the code below overflows in some cases, e.g.
|
||||
; http://downloads.webmproject.org/test_data/libvpx/vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm
|
||||
%if 0 ; cpuflag(ssse3)
|
||||
SUMSUB_BA w, 7, 6, 4
|
||||
pmulhrsw m7, [pw_m11585x2] ; m8=out7[w]
|
||||
pmulhrsw m6, [pw_11585x2] ; m1=out8[w]
|
||||
|
@ -286,7 +286,7 @@ static void list_formats(AVFormatContext *ctx, int type)
|
||||
} else if (vfd.flags & V4L2_FMT_FLAG_COMPRESSED &&
|
||||
type & V4L_COMPFORMATS) {
|
||||
const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
|
||||
av_log(ctx, AV_LOG_INFO, "Compressedll : %9s : %20s :",
|
||||
av_log(ctx, AV_LOG_INFO, "Compressed: %9s : %20s :",
|
||||
desc ? desc->name : "Unsupported",
|
||||
vfd.description);
|
||||
} else {
|
||||
@ -747,7 +747,7 @@ static int v4l2_set_parameters(AVFormatContext *ctx)
|
||||
}
|
||||
} else {
|
||||
av_log(ctx, AV_LOG_WARNING,
|
||||
"The driver does not allow to change time per frame\n");
|
||||
"The driver does not permit changing the time per frame\n");
|
||||
}
|
||||
}
|
||||
if (tpf->denominator > 0 && tpf->numerator > 0) {
|
||||
|
@ -149,13 +149,25 @@ static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
|
||||
xcb_get_image_cookie_t iq;
|
||||
xcb_get_image_reply_t *img;
|
||||
xcb_drawable_t drawable = c->screen->root;
|
||||
xcb_generic_error_t *e = NULL;
|
||||
uint8_t *data;
|
||||
int length, ret;
|
||||
|
||||
iq = xcb_get_image(c->conn, XCB_IMAGE_FORMAT_Z_PIXMAP, drawable,
|
||||
c->x, c->y, c->width, c->height, ~0);
|
||||
|
||||
img = xcb_get_image_reply(c->conn, iq, NULL);
|
||||
img = xcb_get_image_reply(c->conn, iq, &e);
|
||||
|
||||
if (e) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"Cannot get the image data "
|
||||
"event_error: response_type:%u error_code:%u "
|
||||
"sequence:%u resource_id:%u minor_code:%u major_code:%u.\n",
|
||||
e->response_type, e->error_code,
|
||||
e->sequence, e->resource_id, e->minor_code, e->major_code);
|
||||
return AVERROR(EACCES);
|
||||
}
|
||||
|
||||
if (!img)
|
||||
return AVERROR(EAGAIN);
|
||||
|
||||
@ -409,7 +421,7 @@ static int xcbgrab_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
ret = xcbgrab_frame(s, pkt);
|
||||
|
||||
#if CONFIG_LIBXCB_XFIXES
|
||||
if (c->draw_mouse && p->same_screen)
|
||||
if (ret >= 0 && c->draw_mouse && p->same_screen)
|
||||
xcbgrab_draw_mouse(s, pkt, p, geo);
|
||||
#endif
|
||||
|
||||
@ -520,8 +532,17 @@ static int create_stream(AVFormatContext *s)
|
||||
gc = xcb_get_geometry(c->conn, c->screen->root);
|
||||
geo = xcb_get_geometry_reply(c->conn, gc, NULL);
|
||||
|
||||
c->width = FFMIN(geo->width, c->width);
|
||||
c->height = FFMIN(geo->height, c->height);
|
||||
if (c->x + c->width >= geo->width ||
|
||||
c->y + c->height >= geo->height) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"Capture area %dx%d at position %d.%d "
|
||||
"outside the screen size %dx%d\n",
|
||||
c->width, c->height,
|
||||
c->x, c->y,
|
||||
geo->width, geo->height);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
c->time_base = (AVRational){ st->avg_frame_rate.den,
|
||||
st->avg_frame_rate.num };
|
||||
c->time_frame = av_gettime();
|
||||
|
@ -32,6 +32,7 @@ int ff_load_image(uint8_t *data[4], int linesize[4],
|
||||
AVFrame *frame;
|
||||
int frame_decoded, ret = 0;
|
||||
AVPacket pkt;
|
||||
AVDictionary *opt=NULL;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
|
||||
@ -57,7 +58,8 @@ int ff_load_image(uint8_t *data[4], int linesize[4],
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) {
|
||||
av_dict_set(&opt, "thread_type", "slice", 0);
|
||||
if ((ret = avcodec_open2(codec_ctx, codec, &opt)) < 0) {
|
||||
av_log(log_ctx, AV_LOG_ERROR, "Failed to open codec\n");
|
||||
goto end;
|
||||
}
|
||||
@ -97,6 +99,7 @@ end:
|
||||
avcodec_close(codec_ctx);
|
||||
avformat_close_input(&format_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_dict_free(&opt);
|
||||
|
||||
if (ret < 0)
|
||||
av_log(log_ctx, AV_LOG_ERROR, "Error loading image file '%s'\n", filename);
|
||||
|
@ -124,7 +124,7 @@ static int query_formats(AVFilterContext *ctx)
|
||||
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
|
||||
if (!(desc->flags & (AV_PIX_FMT_FLAG_HWACCEL | AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_PAL)) &&
|
||||
(desc->flags & AV_PIX_FMT_FLAG_PLANAR || desc->nb_components == 1) &&
|
||||
(!(desc->flags & AV_PIX_FMT_FLAG_BE) == !HAVE_BIGENDIAN) || desc->comp[0].depth_minus1 == 7)
|
||||
(!(desc->flags & AV_PIX_FMT_FLAG_BE) == !HAVE_BIGENDIAN || desc->comp[0].depth_minus1 == 7))
|
||||
ff_add_format(&formats, fmt);
|
||||
}
|
||||
|
||||
|
@ -504,7 +504,7 @@ static int request_frame(AVFilterLink *outlink)
|
||||
int r;
|
||||
|
||||
r = ff_request_frame(inlink);
|
||||
if (r == AVERROR_EOF && !s->palette_pushed) {
|
||||
if (r == AVERROR_EOF && !s->palette_pushed && s->nb_refs) {
|
||||
r = ff_filter_frame(outlink, get_palette_frame(ctx));
|
||||
s->palette_pushed = 1;
|
||||
return r;
|
||||
|
@ -660,6 +660,7 @@ static int asf_write_header(AVFormatContext *s)
|
||||
* It is needed to use asf as a streamable format. */
|
||||
if (asf_write_header1(s, 0, DATA_HEADER_SIZE) < 0) {
|
||||
//av_free(asf);
|
||||
av_freep(&asf->index_ptr);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "riff.h"
|
||||
#include "libavcodec/bytestream.h"
|
||||
#include "libavcodec/exif.h"
|
||||
#include "libavformat/isom.h"
|
||||
|
||||
typedef struct AVIStream {
|
||||
int64_t frame_offset; /* current frame (video) or byte (audio) counter
|
||||
@ -127,7 +128,7 @@ static inline int get_duration(AVIStream *ast, int len)
|
||||
{
|
||||
if (ast->sample_size)
|
||||
return len;
|
||||
else if (ast->dshow_block_align > 1)
|
||||
else if (ast->dshow_block_align)
|
||||
return (len + ast->dshow_block_align - 1) / ast->dshow_block_align;
|
||||
else
|
||||
return 1;
|
||||
@ -449,6 +450,7 @@ static int calculate_bitrate(AVFormatContext *s)
|
||||
int64_t len = 0;
|
||||
AVStream *st = s->streams[i];
|
||||
int64_t duration;
|
||||
int64_t bitrate;
|
||||
|
||||
for (j = 0; j < st->nb_index_entries; j++)
|
||||
len += st->index_entries[j].size;
|
||||
@ -456,7 +458,10 @@ static int calculate_bitrate(AVFormatContext *s)
|
||||
if (st->nb_index_entries < 2 || st->codec->bit_rate > 0)
|
||||
continue;
|
||||
duration = st->index_entries[j-1].timestamp - st->index_entries[0].timestamp;
|
||||
st->codec->bit_rate = av_rescale(8*len, st->time_base.den, duration * st->time_base.num);
|
||||
bitrate = av_rescale(8*len, st->time_base.den, duration * st->time_base.num);
|
||||
if (bitrate <= INT_MAX && bitrate > 0) {
|
||||
st->codec->bit_rate = bitrate;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -687,6 +692,23 @@ static int avi_read_header(AVFormatContext *s)
|
||||
default:
|
||||
av_log(s, AV_LOG_INFO, "unknown stream type %X\n", tag1);
|
||||
}
|
||||
|
||||
if (ast->sample_size < 0) {
|
||||
if (s->error_recognition & AV_EF_EXPLODE) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"Invalid sample_size %d at stream %d\n",
|
||||
ast->sample_size,
|
||||
stream_index);
|
||||
goto fail;
|
||||
}
|
||||
av_log(s, AV_LOG_WARNING,
|
||||
"Invalid sample_size %d at stream %d "
|
||||
"setting it to 0\n",
|
||||
ast->sample_size,
|
||||
stream_index);
|
||||
ast->sample_size = 0;
|
||||
}
|
||||
|
||||
if (ast->sample_size == 0) {
|
||||
st->duration = st->nb_frames;
|
||||
if (st->duration > 0 && avi->io_fsize > 0 && avi->riff_end > avi->io_fsize) {
|
||||
@ -773,6 +795,12 @@ static int avi_read_header(AVFormatContext *s)
|
||||
st->codec->codec_tag = tag1;
|
||||
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags,
|
||||
tag1);
|
||||
if (!st->codec->codec_id) {
|
||||
st->codec->codec_id = ff_codec_get_id(ff_codec_movvideo_tags,
|
||||
tag1);
|
||||
if (st->codec->codec_id)
|
||||
av_log(s, AV_LOG_WARNING, "mov tag found in avi\n");
|
||||
}
|
||||
/* This is needed to get the pict type which is necessary
|
||||
* for generating correct pts. */
|
||||
st->need_parsing = AVSTREAM_PARSE_HEADERS;
|
||||
@ -838,7 +866,8 @@ static int avi_read_header(AVFormatContext *s)
|
||||
st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_AMV;
|
||||
ast->dshow_block_align = 0;
|
||||
}
|
||||
if (st->codec->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align <= 4 && ast->dshow_block_align) {
|
||||
if (st->codec->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align <= 4 && ast->dshow_block_align ||
|
||||
st->codec->codec_id == AV_CODEC_ID_MP2 && ast->dshow_block_align <= 4 && ast->dshow_block_align) {
|
||||
av_log(s, AV_LOG_DEBUG, "overriding invalid dshow_block_align of %d\n", ast->dshow_block_align);
|
||||
ast->dshow_block_align = 0;
|
||||
}
|
||||
|
@ -194,7 +194,10 @@ static int read_header(AVFormatContext *s)
|
||||
return ret;
|
||||
}
|
||||
|
||||
avio_seek(pb, vst->index_entries[0].pos, SEEK_SET);
|
||||
if (vst->index_entries)
|
||||
avio_seek(pb, vst->index_entries[0].pos, SEEK_SET);
|
||||
else
|
||||
avio_skip(pb, 4);
|
||||
|
||||
bink->current_track = -1;
|
||||
return 0;
|
||||
|
@ -145,7 +145,7 @@ static int add_entry(URLContext *h, const unsigned char *buf, int size)
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
//we could truncate the file to pos here if pos >=0 but ftruncate isnt available in VS so
|
||||
//we could truncate the file to pos here if pos >=0 but ftruncate isn't available in VS so
|
||||
//for simplicty we just leave the file a bit larger
|
||||
av_free(entry);
|
||||
av_free(node);
|
||||
@ -300,7 +300,7 @@ static int cache_close(URLContext *h)
|
||||
#define D AV_OPT_FLAG_DECODING_PARAM
|
||||
|
||||
static const AVOption options[] = {
|
||||
{ "read_ahead_limit", "Amount in bytes that may be read ahead when seeking isnt supported, -1 for unlimited", OFFSET(read_ahead_limit), AV_OPT_TYPE_INT, { .i64 = 65536 }, -1, INT_MAX, D },
|
||||
{ "read_ahead_limit", "Amount in bytes that may be read ahead when seeking isn't supported, -1 for unlimited", OFFSET(read_ahead_limit), AV_OPT_TYPE_INT, { .i64 = 65536 }, -1, INT_MAX, D },
|
||||
{NULL},
|
||||
};
|
||||
|
||||
|
@ -129,8 +129,12 @@ static int read_kuki_chunk(AVFormatContext *s, int64_t size)
|
||||
avio_skip(pb, size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
avio_read(pb, preamble, ALAC_PREAMBLE);
|
||||
if (avio_read(pb, preamble, ALAC_PREAMBLE) != ALAC_PREAMBLE) {
|
||||
av_log(s, AV_LOG_ERROR, "failed to read preamble\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_freep(&st->codec->extradata);
|
||||
if (ff_alloc_extradata(st->codec, ALAC_HEADER))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
@ -144,17 +148,26 @@ static int read_kuki_chunk(AVFormatContext *s, int64_t size)
|
||||
av_freep(&st->codec->extradata);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
avio_read(pb, st->codec->extradata, ALAC_HEADER);
|
||||
if (avio_read(pb, st->codec->extradata, ALAC_HEADER) != ALAC_HEADER) {
|
||||
av_log(s, AV_LOG_ERROR, "failed to read kuki header\n");
|
||||
av_freep(&st->codec->extradata);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
avio_skip(pb, size - ALAC_PREAMBLE - ALAC_HEADER);
|
||||
} else {
|
||||
AV_WB32(st->codec->extradata, 36);
|
||||
memcpy(&st->codec->extradata[4], "alac", 4);
|
||||
AV_WB32(&st->codec->extradata[8], 0);
|
||||
memcpy(&st->codec->extradata[12], preamble, 12);
|
||||
avio_read(pb, &st->codec->extradata[24], ALAC_NEW_KUKI - 12);
|
||||
if (avio_read(pb, &st->codec->extradata[24], ALAC_NEW_KUKI - 12) != ALAC_NEW_KUKI - 12) {
|
||||
av_log(s, AV_LOG_ERROR, "failed to read new kuki header\n");
|
||||
av_freep(&st->codec->extradata);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
avio_skip(pb, size - ALAC_NEW_KUKI);
|
||||
}
|
||||
} else {
|
||||
av_freep(&st->codec->extradata);
|
||||
if (ff_get_extradata(st->codec, pb, size) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
@ -494,7 +494,7 @@ static int write_manifest(AVFormatContext *s, int final)
|
||||
}
|
||||
|
||||
if (c->has_video) {
|
||||
avio_printf(out, "\t\t<AdaptationSet id=\"video\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
|
||||
avio_printf(out, "\t\t<AdaptationSet contentType=\"video\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
OutputStream *os = &c->streams[i];
|
||||
@ -509,7 +509,7 @@ static int write_manifest(AVFormatContext *s, int final)
|
||||
avio_printf(out, "\t\t</AdaptationSet>\n");
|
||||
}
|
||||
if (c->has_audio) {
|
||||
avio_printf(out, "\t\t<AdaptationSet id=\"audio\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
|
||||
avio_printf(out, "\t\t<AdaptationSet contentType=\"audio\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
OutputStream *os = &c->streams[i];
|
||||
|
@ -82,6 +82,7 @@ static int ffm_read_data(AVFormatContext *s,
|
||||
FFMContext *ffm = s->priv_data;
|
||||
AVIOContext *pb = s->pb;
|
||||
int len, fill_size, size1, frame_offset, id;
|
||||
int64_t last_pos = -1;
|
||||
|
||||
size1 = size;
|
||||
while (size > 0) {
|
||||
@ -101,9 +102,11 @@ static int ffm_read_data(AVFormatContext *s,
|
||||
avio_seek(pb, tell, SEEK_SET);
|
||||
}
|
||||
id = avio_rb16(pb); /* PACKET_ID */
|
||||
if (id != PACKET_ID)
|
||||
if (id != PACKET_ID) {
|
||||
if (ffm_resync(s, id) < 0)
|
||||
return -1;
|
||||
last_pos = avio_tell(pb);
|
||||
}
|
||||
fill_size = avio_rb16(pb);
|
||||
ffm->dts = avio_rb64(pb);
|
||||
frame_offset = avio_rb16(pb);
|
||||
@ -117,7 +120,9 @@ static int ffm_read_data(AVFormatContext *s,
|
||||
if (!frame_offset) {
|
||||
/* This packet has no frame headers in it */
|
||||
if (avio_tell(pb) >= ffm->packet_size * 3LL) {
|
||||
avio_seek(pb, -ffm->packet_size * 2LL, SEEK_CUR);
|
||||
int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos);
|
||||
seekback = FFMAX(seekback, 0);
|
||||
avio_seek(pb, -seekback, SEEK_CUR);
|
||||
goto retry_read;
|
||||
}
|
||||
/* This is bad, we cannot find a valid frame header */
|
||||
@ -261,7 +266,7 @@ static int ffm2_read_header(AVFormatContext *s)
|
||||
AVIOContext *pb = s->pb;
|
||||
AVCodecContext *codec;
|
||||
int ret;
|
||||
int f_main = 0, f_cprv, f_stvi, f_stau;
|
||||
int f_main = 0, f_cprv = -1, f_stvi = -1, f_stau = -1;
|
||||
AVCodec *enc;
|
||||
char *buffer;
|
||||
|
||||
@ -331,6 +336,12 @@ static int ffm2_read_header(AVFormatContext *s)
|
||||
}
|
||||
codec->time_base.num = avio_rb32(pb);
|
||||
codec->time_base.den = avio_rb32(pb);
|
||||
if (codec->time_base.num <= 0 || codec->time_base.den <= 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid time base %d/%d\n",
|
||||
codec->time_base.num, codec->time_base.den);
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto fail;
|
||||
}
|
||||
codec->width = avio_rb16(pb);
|
||||
codec->height = avio_rb16(pb);
|
||||
codec->gop_size = avio_rb16(pb);
|
||||
@ -434,7 +445,7 @@ static int ffm2_read_header(AVFormatContext *s)
|
||||
}
|
||||
|
||||
/* get until end of block reached */
|
||||
while ((avio_tell(pb) % ffm->packet_size) != 0)
|
||||
while ((avio_tell(pb) % ffm->packet_size) != 0 && !pb->eof_reached)
|
||||
avio_r8(pb);
|
||||
|
||||
/* init packet demux */
|
||||
@ -503,6 +514,11 @@ static int ffm_read_header(AVFormatContext *s)
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
codec->time_base.num = avio_rb32(pb);
|
||||
codec->time_base.den = avio_rb32(pb);
|
||||
if (codec->time_base.num <= 0 || codec->time_base.den <= 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid time base %d/%d\n",
|
||||
codec->time_base.num, codec->time_base.den);
|
||||
goto fail;
|
||||
}
|
||||
codec->width = avio_rb16(pb);
|
||||
codec->height = avio_rb16(pb);
|
||||
codec->gop_size = avio_rb16(pb);
|
||||
@ -561,7 +577,7 @@ static int ffm_read_header(AVFormatContext *s)
|
||||
}
|
||||
|
||||
/* get until end of block reached */
|
||||
while ((avio_tell(pb) % ffm->packet_size) != 0)
|
||||
while ((avio_tell(pb) % ffm->packet_size) != 0 && !pb->eof_reached)
|
||||
avio_r8(pb);
|
||||
|
||||
/* init packet demux */
|
||||
|
@ -50,12 +50,14 @@ static int flac_write_block_comment(AVIOContext *pb, AVDictionary **m,
|
||||
int last_block, int bitexact)
|
||||
{
|
||||
const char *vendor = bitexact ? "ffmpeg" : LIBAVFORMAT_IDENT;
|
||||
unsigned int len;
|
||||
int64_t len;
|
||||
uint8_t *p, *p0;
|
||||
|
||||
ff_metadata_conv(m, ff_vorbiscomment_metadata_conv, NULL);
|
||||
|
||||
len = ff_vorbiscomment_length(*m, vendor);
|
||||
if (len >= ((1<<24) - 4))
|
||||
return AVERROR(EINVAL);
|
||||
p0 = av_malloc(len+4);
|
||||
if (!p0)
|
||||
return AVERROR(ENOMEM);
|
||||
|
@ -189,7 +189,7 @@ static void skip_sub_layer_hrd_parameters(GetBitContext *gb,
|
||||
}
|
||||
}
|
||||
|
||||
static void skip_hrd_parameters(GetBitContext *gb, uint8_t cprms_present_flag,
|
||||
static int skip_hrd_parameters(GetBitContext *gb, uint8_t cprms_present_flag,
|
||||
unsigned int max_sub_layers_minus1)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -246,8 +246,11 @@ static void skip_hrd_parameters(GetBitContext *gb, uint8_t cprms_present_flag,
|
||||
else
|
||||
low_delay_hrd_flag = get_bits1(gb);
|
||||
|
||||
if (!low_delay_hrd_flag)
|
||||
if (!low_delay_hrd_flag) {
|
||||
cpb_cnt_minus1 = get_ue_golomb_long(gb);
|
||||
if (cpb_cnt_minus1 > 31)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (nal_hrd_parameters_present_flag)
|
||||
skip_sub_layer_hrd_parameters(gb, cpb_cnt_minus1,
|
||||
@ -257,6 +260,8 @@ static void skip_hrd_parameters(GetBitContext *gb, uint8_t cprms_present_flag,
|
||||
skip_sub_layer_hrd_parameters(gb, cpb_cnt_minus1,
|
||||
sub_pic_hrd_params_present_flag);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void skip_timing_info(GetBitContext *gb)
|
||||
@ -457,6 +462,9 @@ static int parse_rps(GetBitContext *gb, unsigned int rps_idx,
|
||||
unsigned int num_negative_pics = get_ue_golomb_long(gb);
|
||||
unsigned int num_positive_pics = get_ue_golomb_long(gb);
|
||||
|
||||
if ((num_positive_pics + (uint64_t)num_negative_pics) * 2 > get_bits_left(gb))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
num_delta_pocs[rps_idx] = num_negative_pics + num_positive_pics;
|
||||
|
||||
for (i = 0; i < num_negative_pics; i++) {
|
||||
|
@ -903,6 +903,14 @@ static void intercept_id3(struct playlist *pls, uint8_t *buf,
|
||||
pls->is_id3_timestamped = (pls->id3_mpegts_timestamp != AV_NOPTS_VALUE);
|
||||
}
|
||||
|
||||
static void update_options(char **dest, const char *name, void *src)
|
||||
{
|
||||
av_freep(dest);
|
||||
av_opt_get(src, name, 0, (uint8_t**)dest);
|
||||
if (*dest && !strlen(*dest))
|
||||
av_freep(dest);
|
||||
}
|
||||
|
||||
static int open_input(HLSContext *c, struct playlist *pls)
|
||||
{
|
||||
AVDictionary *opts = NULL;
|
||||
@ -944,6 +952,8 @@ static int open_input(HLSContext *c, struct playlist *pls)
|
||||
av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n",
|
||||
seg->key);
|
||||
}
|
||||
update_options(&c->cookies, "cookies", uc->priv_data);
|
||||
av_dict_set(&opts, "cookies", c->cookies, 0);
|
||||
ffurl_close(uc);
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_ERROR, "Unable to open key file %s\n",
|
||||
@ -1252,22 +1262,13 @@ static int hls_read_header(AVFormatContext *s)
|
||||
// if the URL context is good, read important options we must broker later
|
||||
if (u && u->prot->priv_data_class) {
|
||||
// get the previous user agent & set back to null if string size is zero
|
||||
av_freep(&c->user_agent);
|
||||
av_opt_get(u->priv_data, "user-agent", 0, (uint8_t**)&(c->user_agent));
|
||||
if (c->user_agent && !strlen(c->user_agent))
|
||||
av_freep(&c->user_agent);
|
||||
update_options(&c->user_agent, "user-agent", u->priv_data);
|
||||
|
||||
// get the previous cookies & set back to null if string size is zero
|
||||
av_freep(&c->cookies);
|
||||
av_opt_get(u->priv_data, "cookies", 0, (uint8_t**)&(c->cookies));
|
||||
if (c->cookies && !strlen(c->cookies))
|
||||
av_freep(&c->cookies);
|
||||
update_options(&c->cookies, "cookies", u->priv_data);
|
||||
|
||||
// get the previous headers & set back to null if string size is zero
|
||||
av_freep(&c->headers);
|
||||
av_opt_get(u->priv_data, "headers", 0, (uint8_t**)&(c->headers));
|
||||
if (c->headers && !strlen(c->headers))
|
||||
av_freep(&c->headers);
|
||||
update_options(&c->headers, "headers", u->priv_data);
|
||||
}
|
||||
|
||||
if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
|
||||
|
@ -77,6 +77,8 @@ typedef struct HTTPContext {
|
||||
int is_akamai;
|
||||
int is_mediagateway;
|
||||
char *cookies; ///< holds newline (\n) delimited Set-Cookie header field values (without the "Set-Cookie: " field name)
|
||||
/* A dictionary containing cookies keyed by cookie name */
|
||||
AVDictionary *cookie_dict;
|
||||
int icy;
|
||||
/* how much data was read since the last ICY metadata packet */
|
||||
int icy_data_read;
|
||||
@ -464,6 +466,43 @@ static int parse_icy(HTTPContext *s, const char *tag, const char *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_cookie(HTTPContext *s, const char *p, AVDictionary **cookies)
|
||||
{
|
||||
char *eql, *name;
|
||||
|
||||
// duplicate the cookie name (dict will dupe the value)
|
||||
if (!(eql = strchr(p, '='))) return AVERROR(EINVAL);
|
||||
if (!(name = av_strndup(p, eql - p))) return AVERROR(ENOMEM);
|
||||
|
||||
// add the cookie to the dictionary
|
||||
av_dict_set(cookies, name, eql, AV_DICT_DONT_STRDUP_KEY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cookie_string(AVDictionary *dict, char **cookies)
|
||||
{
|
||||
AVDictionaryEntry *e = NULL;
|
||||
int len = 1;
|
||||
|
||||
// determine how much memory is needed for the cookies string
|
||||
while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX))
|
||||
len += strlen(e->key) + strlen(e->value) + 1;
|
||||
|
||||
// reallocate the cookies
|
||||
e = NULL;
|
||||
if (*cookies) av_free(*cookies);
|
||||
*cookies = av_malloc(len);
|
||||
if (!cookies) return AVERROR(ENOMEM);
|
||||
*cookies[0] = '\0';
|
||||
|
||||
// write out the cookies
|
||||
while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX))
|
||||
av_strlcatf(*cookies, len, "%s%s\n", e->key, e->value);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_line(URLContext *h, char *line, int line_count,
|
||||
int *new_location)
|
||||
{
|
||||
@ -535,19 +574,8 @@ static int process_line(URLContext *h, char *line, int line_count,
|
||||
av_free(s->mime_type);
|
||||
s->mime_type = av_strdup(p);
|
||||
} else if (!av_strcasecmp(tag, "Set-Cookie")) {
|
||||
if (!s->cookies) {
|
||||
if (!(s->cookies = av_strdup(p)))
|
||||
return AVERROR(ENOMEM);
|
||||
} else {
|
||||
char *tmp = s->cookies;
|
||||
size_t str_size = strlen(tmp) + strlen(p) + 2;
|
||||
if (!(s->cookies = av_malloc(str_size))) {
|
||||
s->cookies = tmp;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
snprintf(s->cookies, str_size, "%s\n%s", tmp, p);
|
||||
av_free(tmp);
|
||||
}
|
||||
if (parse_cookie(s, p, &s->cookie_dict))
|
||||
av_log(h, AV_LOG_WARNING, "Unable to parse '%s'\n", p);
|
||||
} else if (!av_strcasecmp(tag, "Icy-MetaInt")) {
|
||||
s->icy_metaint = strtoll(p, NULL, 10);
|
||||
} else if (!av_strncasecmp(tag, "Icy-", 4)) {
|
||||
@ -578,12 +606,19 @@ static int get_cookies(HTTPContext *s, char **cookies, const char *path,
|
||||
|
||||
if (!set_cookies) return AVERROR(EINVAL);
|
||||
|
||||
// destroy any cookies in the dictionary.
|
||||
av_dict_free(&s->cookie_dict);
|
||||
|
||||
*cookies = NULL;
|
||||
while ((cookie = av_strtok(set_cookies, "\n", &next))) {
|
||||
int domain_offset = 0;
|
||||
char *param, *next_param, *cdomain = NULL, *cpath = NULL, *cvalue = NULL;
|
||||
set_cookies = NULL;
|
||||
|
||||
// store the cookie in a dict in case it is updated in the response
|
||||
if (parse_cookie(s, cookie, &s->cookie_dict))
|
||||
av_log(s, AV_LOG_WARNING, "Unable to parse '%s'\n", cookie);
|
||||
|
||||
while ((param = av_strtok(cookie, "; ", &next_param))) {
|
||||
if (cookie) {
|
||||
// first key-value pair is the actual cookie value
|
||||
@ -691,6 +726,10 @@ static int http_read_header(URLContext *h, int *new_location)
|
||||
if (s->seekable == -1 && s->is_mediagateway && s->filesize == 2000000000)
|
||||
h->is_streamed = 1; /* we can in fact _not_ seek */
|
||||
|
||||
// add any new cookies into the existing cookie string
|
||||
cookie_string(s->cookie_dict, &s->cookies);
|
||||
av_dict_free(&s->cookie_dict);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ static int check_tag(AVIOContext *s, int offset, unsigned int len)
|
||||
|
||||
if (len > 4 ||
|
||||
avio_seek(s, offset, SEEK_SET) < 0 ||
|
||||
avio_read(s, tag, len) < len)
|
||||
avio_read(s, tag, len) < (int)len)
|
||||
return -1;
|
||||
else if (!AV_RB32(tag) || is_tag(tag, len))
|
||||
return 1;
|
||||
|
@ -341,7 +341,10 @@ int ff_img_read_header(AVFormatContext *s1)
|
||||
break;
|
||||
}
|
||||
}
|
||||
ffio_rewind_with_probe_data(s1->pb, &probe_buffer, probe_buffer_size);
|
||||
if (s1->flags & AVFMT_FLAG_CUSTOM_IO) {
|
||||
avio_seek(s1->pb, 0, SEEK_SET);
|
||||
} else
|
||||
ffio_rewind_with_probe_data(s1->pb, &probe_buffer, probe_buffer_size);
|
||||
}
|
||||
if (st->codec->codec_id == AV_CODEC_ID_NONE)
|
||||
st->codec->codec_id = ff_guess_image2_codec(s->path);
|
||||
|
@ -97,6 +97,8 @@ struct AVFormatInternal {
|
||||
AVRational offset_timebase;
|
||||
|
||||
int inject_global_side_data;
|
||||
|
||||
int avoid_negative_ts_use_pts;
|
||||
};
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
@ -99,12 +99,17 @@ const CodecTags ff_mkv_codec_tags[]={
|
||||
{"" , AV_CODEC_ID_NONE}
|
||||
};
|
||||
|
||||
const CodecMime ff_mkv_mime_tags[] = {
|
||||
{"text/plain" , AV_CODEC_ID_TEXT},
|
||||
const CodecMime ff_mkv_image_mime_tags[] = {
|
||||
{"image/gif" , AV_CODEC_ID_GIF},
|
||||
{"image/jpeg" , AV_CODEC_ID_MJPEG},
|
||||
{"image/png" , AV_CODEC_ID_PNG},
|
||||
{"image/tiff" , AV_CODEC_ID_TIFF},
|
||||
|
||||
{"" , AV_CODEC_ID_NONE}
|
||||
};
|
||||
|
||||
const CodecMime ff_mkv_mime_tags[] = {
|
||||
{"text/plain" , AV_CODEC_ID_TEXT},
|
||||
{"application/x-truetype-font", AV_CODEC_ID_TTF},
|
||||
{"application/x-font" , AV_CODEC_ID_TTF},
|
||||
{"application/vnd.ms-opentype", AV_CODEC_ID_OTF},
|
||||
|
@ -280,6 +280,7 @@ typedef struct CodecTags{
|
||||
|
||||
extern const CodecTags ff_mkv_codec_tags[];
|
||||
extern const CodecMime ff_mkv_mime_tags[];
|
||||
extern const CodecMime ff_mkv_image_mime_tags[];
|
||||
extern const AVMetadataConv ff_mkv_metadata_conv[];
|
||||
extern const char * const ff_matroska_video_stereo_mode[MATROSKA_VIDEO_STEREOMODE_TYPE_NB];
|
||||
extern const char * const ff_matroska_video_stereo_plane[MATROSKA_VIDEO_STEREO_PLANE_COUNT];
|
||||
|
@ -1274,15 +1274,13 @@ static int matroska_decode_buffer(uint8_t **buf, int *buf_size,
|
||||
newpktdata = av_realloc(pkt_data, pkt_size);
|
||||
if (!newpktdata) {
|
||||
inflateEnd(&zstream);
|
||||
result = AVERROR(ENOMEM);
|
||||
goto failed;
|
||||
}
|
||||
pkt_data = newpktdata;
|
||||
zstream.avail_out = pkt_size - zstream.total_out;
|
||||
zstream.next_out = pkt_data + zstream.total_out;
|
||||
if (pkt_data) {
|
||||
result = inflate(&zstream, Z_NO_FLUSH);
|
||||
} else
|
||||
result = Z_MEM_ERROR;
|
||||
result = inflate(&zstream, Z_NO_FLUSH);
|
||||
} while (result == Z_OK && pkt_size < 10000000);
|
||||
pkt_size = zstream.total_out;
|
||||
inflateEnd(&zstream);
|
||||
@ -1309,15 +1307,13 @@ static int matroska_decode_buffer(uint8_t **buf, int *buf_size,
|
||||
newpktdata = av_realloc(pkt_data, pkt_size);
|
||||
if (!newpktdata) {
|
||||
BZ2_bzDecompressEnd(&bzstream);
|
||||
result = AVERROR(ENOMEM);
|
||||
goto failed;
|
||||
}
|
||||
pkt_data = newpktdata;
|
||||
bzstream.avail_out = pkt_size - bzstream.total_out_lo32;
|
||||
bzstream.next_out = pkt_data + bzstream.total_out_lo32;
|
||||
if (pkt_data) {
|
||||
result = BZ2_bzDecompress(&bzstream);
|
||||
} else
|
||||
result = BZ_MEM_ERROR;
|
||||
result = BZ2_bzDecompress(&bzstream);
|
||||
} while (result == BZ_OK && pkt_size < 10000000);
|
||||
pkt_size = bzstream.total_out_lo32;
|
||||
BZ2_bzDecompressEnd(&bzstream);
|
||||
@ -1496,7 +1492,7 @@ static void matroska_add_index_entries(MatroskaDemuxContext *matroska)
|
||||
{
|
||||
EbmlList *index_list;
|
||||
MatroskaIndex *index;
|
||||
int index_scale = 1;
|
||||
uint64_t index_scale = 1;
|
||||
int i, j;
|
||||
|
||||
if (matroska->ctx->flags & AVFMT_FLAG_IGNIDX)
|
||||
@ -2004,8 +2000,8 @@ static int matroska_parse_tracks(AVFormatContext *s)
|
||||
snprintf(buf, sizeof(buf), "%s_%d",
|
||||
ff_matroska_video_stereo_plane[planes[j].type], i);
|
||||
for (k=0; k < matroska->tracks.nb_elem; k++)
|
||||
if (planes[j].uid == tracks[k].uid) {
|
||||
av_dict_set(&s->streams[k]->metadata,
|
||||
if (planes[j].uid == tracks[k].uid && tracks[k].stream) {
|
||||
av_dict_set(&tracks[k].stream->metadata,
|
||||
"stereo_mode", buf, 0);
|
||||
break;
|
||||
}
|
||||
@ -2142,20 +2138,41 @@ static int matroska_read_header(AVFormatContext *s)
|
||||
av_dict_set(&st->metadata, "filename", attachments[j].filename, 0);
|
||||
av_dict_set(&st->metadata, "mimetype", attachments[j].mime, 0);
|
||||
st->codec->codec_id = AV_CODEC_ID_NONE;
|
||||
st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
|
||||
if (ff_alloc_extradata(st->codec, attachments[j].bin.size))
|
||||
break;
|
||||
memcpy(st->codec->extradata, attachments[j].bin.data,
|
||||
attachments[j].bin.size);
|
||||
|
||||
for (i = 0; ff_mkv_mime_tags[i].id != AV_CODEC_ID_NONE; i++) {
|
||||
if (!strncmp(ff_mkv_mime_tags[i].str, attachments[j].mime,
|
||||
strlen(ff_mkv_mime_tags[i].str))) {
|
||||
st->codec->codec_id = ff_mkv_mime_tags[i].id;
|
||||
for (i = 0; ff_mkv_image_mime_tags[i].id != AV_CODEC_ID_NONE; i++) {
|
||||
if (!strncmp(ff_mkv_image_mime_tags[i].str, attachments[j].mime,
|
||||
strlen(ff_mkv_image_mime_tags[i].str))) {
|
||||
st->codec->codec_id = ff_mkv_image_mime_tags[i].id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
attachments[j].stream = st;
|
||||
|
||||
if (st->codec->codec_id != AV_CODEC_ID_NONE) {
|
||||
st->disposition |= AV_DISPOSITION_ATTACHED_PIC;
|
||||
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
|
||||
av_init_packet(&st->attached_pic);
|
||||
if ((res = av_new_packet(&st->attached_pic, attachments[j].bin.size)) < 0)
|
||||
return res;
|
||||
memcpy(st->attached_pic.data, attachments[j].bin.data, attachments[j].bin.size);
|
||||
st->attached_pic.stream_index = st->index;
|
||||
st->attached_pic.flags |= AV_PKT_FLAG_KEY;
|
||||
} else {
|
||||
st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
|
||||
if (ff_alloc_extradata(st->codec, attachments[j].bin.size))
|
||||
break;
|
||||
memcpy(st->codec->extradata, attachments[j].bin.data,
|
||||
attachments[j].bin.size);
|
||||
|
||||
for (i = 0; ff_mkv_mime_tags[i].id != AV_CODEC_ID_NONE; i++) {
|
||||
if (!strncmp(ff_mkv_mime_tags[i].str, attachments[j].mime,
|
||||
strlen(ff_mkv_mime_tags[i].str))) {
|
||||
st->codec->codec_id = ff_mkv_mime_tags[i].id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
attachments[j].stream = st;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user