Compare commits
212 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9153b33a74 | ||
![]() |
a907fc0b21 | ||
![]() |
d77ad6ec2d | ||
![]() |
e122fb594a | ||
![]() |
359383c983 | ||
![]() |
e7f5dacd55 | ||
![]() |
6a968073da | ||
![]() |
c1ac5896ef | ||
![]() |
9c7321e2b8 | ||
![]() |
da97174dcb | ||
![]() |
6a679279f7 | ||
![]() |
dd8464bc99 | ||
![]() |
363cf196c9 | ||
![]() |
90b69d1d02 | ||
![]() |
71a3ad42b8 | ||
![]() |
ec29aec618 | ||
![]() |
6cf254ea68 | ||
![]() |
f24246a889 | ||
![]() |
2facb10f70 | ||
![]() |
3b977a6ded | ||
![]() |
2088340173 | ||
![]() |
952738d0af | ||
![]() |
d499685d80 | ||
![]() |
0be41c6586 | ||
![]() |
18b233428a | ||
![]() |
c437ab3c4e | ||
![]() |
b827189c6f | ||
![]() |
fe30351d52 | ||
![]() |
7c520e5cd6 | ||
![]() |
d076ee5216 | ||
![]() |
8d32735629 | ||
![]() |
0558832796 | ||
![]() |
ced5c8e6b3 | ||
![]() |
1e83191d5c | ||
![]() |
69db5f2779 | ||
![]() |
97a5b4ba53 | ||
![]() |
f2275400fe | ||
![]() |
1ab94468dd | ||
![]() |
ef076d3bd6 | ||
![]() |
2b40afae70 | ||
![]() |
bca2ebbeee | ||
![]() |
53dcd772aa | ||
![]() |
2f894deeda | ||
![]() |
e771425e98 | ||
![]() |
9552b37e26 | ||
![]() |
d75b149757 | ||
![]() |
516ea2dccd | ||
![]() |
6f4404b24b | ||
![]() |
7fa7270029 | ||
![]() |
110680c5a2 | ||
![]() |
65c3593792 | ||
![]() |
b7b798a1af | ||
![]() |
5463a2b056 | ||
![]() |
079758e49a | ||
![]() |
42dcfe32a8 | ||
![]() |
a0a90b1a11 | ||
![]() |
fa60904ebd | ||
![]() |
330c180324 | ||
![]() |
71b8c8430c | ||
![]() |
27ac9585c9 | ||
![]() |
35ba079fbf | ||
![]() |
a7cce9ebf3 | ||
![]() |
51ae8e26af | ||
![]() |
c4033cd4eb | ||
![]() |
b473fdcde3 | ||
![]() |
1dce4a031f | ||
![]() |
d0ecfe3249 | ||
![]() |
db52f056c3 | ||
![]() |
9938e450c8 | ||
![]() |
07558d0b9f | ||
![]() |
7f33a24e82 | ||
![]() |
0f71a5df4b | ||
![]() |
3ee26080d6 | ||
![]() |
e0d8a17402 | ||
![]() |
2cbc8dfedd | ||
![]() |
ecf21ab0ec | ||
![]() |
afab4c422b | ||
![]() |
16f0f97eec | ||
![]() |
f1a8885ae9 | ||
![]() |
aedf1a2996 | ||
![]() |
bf0cb89a8d | ||
![]() |
ec772cca60 | ||
![]() |
7b00340f97 | ||
![]() |
6f9e6ac6aa | ||
![]() |
43b1762ab8 | ||
![]() |
f157f18b34 | ||
![]() |
68b14c044a | ||
![]() |
ae51d93983 | ||
![]() |
ca8c3ec11b | ||
![]() |
4e47ae4e71 | ||
![]() |
5cab56dc9e | ||
![]() |
98f44b24b5 | ||
![]() |
ad4963a944 | ||
![]() |
5640ea43d7 | ||
![]() |
15efd9a7c0 | ||
![]() |
0d82c3a0ca | ||
![]() |
b7a750f67f | ||
![]() |
0120e480bf | ||
![]() |
fd2fc130b2 | ||
![]() |
3da4fdd5ac | ||
![]() |
2c1d84499b | ||
![]() |
2fb0a52e70 | ||
![]() |
de0e442e9d | ||
![]() |
43aa7eb38e | ||
![]() |
9786c24bb7 | ||
![]() |
4279e0e8d0 | ||
![]() |
a6003760bd | ||
![]() |
cf676c159b | ||
![]() |
36017d49e2 | ||
![]() |
8cade1352b | ||
![]() |
5522c564d4 | ||
![]() |
b0db7a523d | ||
![]() |
e03b875c0b | ||
![]() |
30c8a5e4f6 | ||
![]() |
716ee73c99 | ||
![]() |
979f77b0dc | ||
![]() |
2f4e066d66 | ||
![]() |
6a56d16dc1 | ||
![]() |
23144c5f06 | ||
![]() |
e964207e6c | ||
![]() |
2c0bfce4cb | ||
![]() |
b68e5b1195 | ||
![]() |
cb5d0ea0be | ||
![]() |
ef6c90e102 | ||
![]() |
d04194db45 | ||
![]() |
976a7b72a3 | ||
![]() |
a89acaa0b0 | ||
![]() |
41216ebb9e | ||
![]() |
86a423d2b8 | ||
![]() |
a26bfc444d | ||
![]() |
93e3ec451c | ||
![]() |
76c48a78d1 | ||
![]() |
6c0c799bd5 | ||
![]() |
d1c7a7776f | ||
![]() |
5339a9f000 | ||
![]() |
bd953f9404 | ||
![]() |
9e96051d5d | ||
![]() |
85c02da307 | ||
![]() |
8b24e17d09 | ||
![]() |
3736b13753 | ||
![]() |
48d57650f1 | ||
![]() |
9925f7df0a | ||
![]() |
1123870879 | ||
![]() |
371659d1ad | ||
![]() |
29fa517d40 | ||
![]() |
c00e491aeb | ||
![]() |
fbc52044f3 | ||
![]() |
49c1defee5 | ||
![]() |
871baf3127 | ||
![]() |
591d5281f5 | ||
![]() |
e972338e35 | ||
![]() |
90294e31a1 | ||
![]() |
d92c908e23 | ||
![]() |
cb4a101fbe | ||
![]() |
993977032a | ||
![]() |
5e8243e843 | ||
![]() |
159993acc7 | ||
![]() |
d3986f4f1b | ||
![]() |
5a40e4c64d | ||
![]() |
416ad3ecf2 | ||
![]() |
b29c31c21e | ||
![]() |
380e373267 | ||
![]() |
ac1e61d559 | ||
![]() |
7fdb915fc7 | ||
![]() |
456a939210 | ||
![]() |
1438181a29 | ||
![]() |
35c39d2ee2 | ||
![]() |
c211ba9b59 | ||
![]() |
213b8aa0a9 | ||
![]() |
ea1c9424d1 | ||
![]() |
f23b1cc7d9 | ||
![]() |
889bdc47f6 | ||
![]() |
e80071892b | ||
![]() |
efe59ad90b | ||
![]() |
2c31141585 | ||
![]() |
61d56054a9 | ||
![]() |
ff8837e9c6 | ||
![]() |
cd254e8540 | ||
![]() |
036136fa89 | ||
![]() |
b19eafa2b9 | ||
![]() |
486c457675 | ||
![]() |
68a1df13c4 | ||
![]() |
75dabbff8b | ||
![]() |
e3ba6ff935 | ||
![]() |
b81d804f2a | ||
![]() |
61032c577d | ||
![]() |
3b169044ca | ||
![]() |
843b330c3c | ||
![]() |
f241d5aa1f | ||
![]() |
e01d623e01 | ||
![]() |
0a23055b8a | ||
![]() |
33b88170d7 | ||
![]() |
7d8a4bb8d2 | ||
![]() |
2b71a78841 | ||
![]() |
f0db793bee | ||
![]() |
712945d21e | ||
![]() |
2ef84218b2 | ||
![]() |
75f811babc | ||
![]() |
d5c104c1ae | ||
![]() |
48f27c854f | ||
![]() |
802deb2d13 | ||
![]() |
290783b848 | ||
![]() |
7f80928c0e | ||
![]() |
b9e90b36cd | ||
![]() |
7981b5c20e | ||
![]() |
9291012d52 | ||
![]() |
594b843608 | ||
![]() |
b6fc0127ce | ||
![]() |
b997a6a86d | ||
![]() |
6f6cd7dbe5 | ||
![]() |
b5736759ee | ||
![]() |
d4a24e43ed |
73
Changelog
73
Changelog
@@ -4,6 +4,79 @@ releases are sorted from youngest to oldest.
|
||||
version next:
|
||||
|
||||
|
||||
version 0.10.11
|
||||
|
||||
- pthread: Avoid spurious wakeups
|
||||
- pthread: Fix deadlock during thread initialization
|
||||
- mpegvideo: Initialize chroma_*_shift and codec_tag even if the size is 0
|
||||
- vc1dec: Don't decode slices when the latest slice header failed to decode
|
||||
- vc1dec: Make sure last_picture is initialized in vc1_decode_skip_blocks
|
||||
- r3d: Add more input value validation
|
||||
- fraps: Make the input buffer size checks more strict
|
||||
- svq3: Avoid a division by zero
|
||||
- rmdec: Validate the fps value
|
||||
- twinvqdec: Check the ibps parameter separately
|
||||
- asfdec: Check the return value of asf_read_stream_properties
|
||||
- mxfdec: set audio timebase to 1/samplerate
|
||||
- pcx: Check the packet size before assuming it fits a palette
|
||||
- rpza: Fix a buffer size check
|
||||
- xxan: Disallow odd width
|
||||
- xan: Only read within the data that actually was initialized
|
||||
- xan: Use bytestream2 to limit reading to within the buffer
|
||||
- pcx: Consume the whole packet if giving up due to missing palette
|
||||
- pngdec: Stop trying to decode once inflate returns Z_STREAM_END
|
||||
- mov: Make sure the read sample count is nonnegative
|
||||
- bfi: Add some very basic sanity checks for input packet sizes
|
||||
- bfi: Avoid divisions by zero
|
||||
- electronicarts: Add more sanity checking for the number of channels
|
||||
- riffdec: Add sanity checks for the sample rate
|
||||
- mvi: Add sanity checking for the audio frame size
|
||||
- xwma: Avoid division by zero
|
||||
- avidec: Make sure a packet is large enough before reading its data
|
||||
- vqf: Make sure the bitrate is in the valid range
|
||||
- vqf: Make sure sample_rate is set to a valid value
|
||||
- vc1dec: Undo mpegvideo initialization if unable to allocate tables
|
||||
- vc1dec: Fix leaks in ff_vc1_decode_init_alloc_tables on errors
|
||||
- wnv1: Make sure the input packet is large enough
|
||||
- dca: Validate the lfe parameter
|
||||
- rl2: Avoid a division by zero
|
||||
- wtv: Add more sanity checks for a length read from the file
|
||||
- segafilm: Validate the number of audio channels
|
||||
- qpeg: Add checks for running out of rows in qpeg_decode_inter
|
||||
- mpegaudiodec: Validate that the number of channels fits at the given offset
|
||||
- asv1: Verify the amount of extradata
|
||||
- idroqdec: Make sure a video stream has been allocated before returning packets
|
||||
- rv10: Validate the dimensions set from the container
|
||||
- xmv: Add more sanity checks for parameters read from the bitstream
|
||||
- ffv1: Make sure at least one slice context is initialized
|
||||
- truemotion2: Use av_freep properly in an error path
|
||||
- eacmv: Make sure a reference frame exists before referencing it
|
||||
- mpeg4videodec: Check the width/height in mpeg4_decode_sprite_trajectory
|
||||
- ivi_common: Make sure color planes have been initialized
|
||||
- oggparseogm: Convert to use bytestream2
|
||||
- rv34: Check the return value from ff_rv34_decode_init
|
||||
- matroskadec: Verify realaudio codec parameters
|
||||
- mace: Make sure that the channel count is set to a valid value
|
||||
- svq3: Check for any negative return value from ff_h264_check_intra_pred_mode
|
||||
- vp3: Check the framerate for validity
|
||||
- cavsdec: Make sure a sequence header has been decoded before decoding pictures
|
||||
- sierravmd: Do sanity checking of frame sizes
|
||||
- omadec: Properly check lengths before incrementing the position
|
||||
- mpc8: Make sure the first stream exists before parsing the seek table
|
||||
- mpc8: Check the seek table size parsed from the bitstream
|
||||
- zmbvdec: Check the buffer size for uncompressed data
|
||||
- ape: Don't allow the seektable to be omitted
|
||||
- shorten: Break out of loop looking for fmt chunk if none is found
|
||||
- shorten: Use a checked bytestream reader for the wave header
|
||||
- smacker: Make sure we don't fill in huffman codes out of range
|
||||
- smacker: Avoid integer overflow when allocating packets
|
||||
- smacker: Don't return packets in unallocated streams
|
||||
- dsicin: Add some basic sanity checks for fields read from the file
|
||||
- roqvideodec: check dimensions validity
|
||||
- qdm2: check array index before use, fix out of array accesses
|
||||
- alsdec: check block length
|
||||
|
||||
|
||||
version 0.10.10
|
||||
|
||||
- x86: fft: Remove 3DNow! optimizations, they break FATE
|
||||
|
2
Doxyfile
2
Doxyfile
@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.10.10
|
||||
PROJECT_NUMBER = 0.10.14
|
||||
|
||||
# With the PROJECT_LOGO tag one can specify an logo or icon that is included
|
||||
# in the documentation. The maximum height of the logo should not exceed 55
|
||||
|
@@ -56,7 +56,7 @@
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *format_opts, *codec_opts;
|
||||
|
||||
const int this_year = 2013;
|
||||
const int this_year = 2014;
|
||||
|
||||
static FILE *report_file;
|
||||
|
||||
|
13
configure
vendored
13
configure
vendored
@@ -54,6 +54,8 @@ if test "$E1" != 0 || test "$E2" = 0; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test -d /usr/xpg4/bin && PATH=/usr/xpg4/bin:$PATH
|
||||
|
||||
show_help(){
|
||||
cat <<EOF
|
||||
Usage: configure [options]
|
||||
@@ -688,6 +690,13 @@ check_ld(){
|
||||
check_cmd $ld $LDFLAGS $flags -o $TMPE $TMPO $libs $extralibs
|
||||
}
|
||||
|
||||
print_include(){
|
||||
hdr=$1
|
||||
test "${hdr%.h}" = "${hdr}" &&
|
||||
echo "#include $hdr" ||
|
||||
echo "#include <$hdr>"
|
||||
}
|
||||
|
||||
check_cppflags(){
|
||||
log check_cppflags "$@"
|
||||
set -- $($filter_cppflags "$@")
|
||||
@@ -765,7 +774,7 @@ check_func_headers(){
|
||||
shift 2
|
||||
{
|
||||
for hdr in $headers; do
|
||||
echo "#include <$hdr>"
|
||||
print_include $hdr
|
||||
done
|
||||
for func in $funcs; do
|
||||
echo "long check_$func(void) { return (long) $func; }"
|
||||
@@ -3134,7 +3143,7 @@ enabled libdirac && require_pkg_config dirac \
|
||||
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \
|
||||
"dirac_decoder_init dirac_encoder_init"
|
||||
enabled libfaac && require2 libfaac "stdint.h faac.h" faacEncGetVersion -lfaac
|
||||
enabled libfreetype && require_pkg_config freetype2 "ft2build.h freetype/freetype.h" FT_Init_FreeType
|
||||
enabled libfreetype && require_pkg_config freetype2 "ft2build.h FT_FREETYPE_H" FT_Init_FreeType
|
||||
enabled libgsm && require libgsm gsm/gsm.h gsm_create -lgsm
|
||||
enabled libmodplug && require libmodplug libmodplug/modplug.h ModPlug_Load -lmodplug
|
||||
enabled libmp3lame && require "libmp3lame >= 3.98.3" lame/lame.h lame_set_VBR_quality -lmp3lame
|
||||
|
@@ -51,14 +51,15 @@ The toolchain provided with Xcode is sufficient to build the basic
|
||||
unacelerated code.
|
||||
|
||||
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
||||
@url{http://github.com/yuvi/gas-preprocessor} to build the optimized
|
||||
assembler functions. Just download the Perl script and put it somewhere
|
||||
@url{https://github.com/FFmpeg/gas-preprocessor} or
|
||||
@url{https://github.com/yuvi/gas-preprocessor} to build the optimized
|
||||
assembler functions. Put the Perl script somewhere
|
||||
in your PATH, FFmpeg's configure will pick it up automatically.
|
||||
|
||||
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
|
||||
optimized assembler functions. @uref{http://www.finkproject.org/, Fink},
|
||||
@uref{http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml, Gentoo Prefix},
|
||||
@uref{http://mxcl.github.com/homebrew/, Homebrew}
|
||||
@uref{https://mxcl.github.com/homebrew/, Homebrew}
|
||||
or @uref{http://www.macports.org, MacPorts} can easily provide it.
|
||||
|
||||
|
||||
|
@@ -242,7 +242,7 @@ data transferred over RDT).
|
||||
|
||||
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
|
||||
supporting it (currently Darwin Streaming Server and Mischa Spiegelmock's
|
||||
@uref{http://github.com/revmischa/rtsp-server, RTSP server}).
|
||||
@uref{https://github.com/revmischa/rtsp-server, RTSP server}).
|
||||
|
||||
The required syntax for a RTSP url is:
|
||||
@example
|
||||
|
@@ -47,7 +47,7 @@ int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf,
|
||||
offset = AV_RB16(buf + 2) + 4;
|
||||
|
||||
/* if copyright string is within the provided data, validate it */
|
||||
if (bufsize >= offset && memcmp(buf + offset - 6, "(c)CRI", 6))
|
||||
if (bufsize >= offset && offset >= 6 && memcmp(buf + offset - 6, "(c)CRI", 6))
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
/* check for encoding=3 block_size=18, sample_size=4 */
|
||||
|
@@ -615,6 +615,12 @@ static int alac_set_info(ALACContext *alac)
|
||||
|
||||
/* buffer size / 2 ? */
|
||||
alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr);
|
||||
if (!alac->setinfo_max_samples_per_frame ||
|
||||
alac->setinfo_max_samples_per_frame > INT_MAX / sizeof(int32_t)) {
|
||||
av_log(alac->avctx, AV_LOG_ERROR, "max samples per frame invalid: %u\n",
|
||||
alac->setinfo_max_samples_per_frame);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
ptr++; /* compatible version */
|
||||
alac->setinfo_sample_size = *ptr++;
|
||||
alac->setinfo_rice_historymult = *ptr++;
|
||||
|
@@ -283,7 +283,7 @@ static av_cold int read_specific_config(ALSDecContext *ctx)
|
||||
GetBitContext gb;
|
||||
uint64_t ht_size;
|
||||
int i, config_offset;
|
||||
MPEG4AudioConfig m4ac;
|
||||
MPEG4AudioConfig m4ac = {0};
|
||||
ALSSpecificConfig *sconf = &ctx->sconf;
|
||||
AVCodecContext *avctx = ctx->avctx;
|
||||
uint32_t als_id, header_size, trailer_size;
|
||||
@@ -1386,6 +1386,11 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
|
||||
|
||||
for (b = 0; b < ctx->num_blocks; b++) {
|
||||
bd.block_length = div_blocks[b];
|
||||
if (bd.block_length <= 0) {
|
||||
av_log(ctx->avctx, AV_LOG_WARNING,
|
||||
"Invalid block length %d in channel data!\n", bd.block_length);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (c = 0; c < avctx->channels; c++) {
|
||||
bd.const_block = ctx->const_block + c;
|
||||
|
@@ -822,7 +822,6 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
int16_t *samples;
|
||||
int i, ret;
|
||||
int blockstodecode;
|
||||
int bytes_used = 0;
|
||||
|
||||
/* this should never be negative, but bad things will happen if it is, so
|
||||
check it just to make sure. */
|
||||
@@ -877,7 +876,6 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
bytes_used = buf_size;
|
||||
}
|
||||
|
||||
if (!s->data) {
|
||||
@@ -920,7 +918,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
|
||||
*got_frame_ptr = 1;
|
||||
*(AVFrame *)data = s->frame;
|
||||
|
||||
return bytes_used;
|
||||
return (s->samples == 0) ? buf_size : 0;
|
||||
}
|
||||
|
||||
static void ape_flush(AVCodecContext *avctx)
|
||||
|
@@ -132,6 +132,13 @@ T ldr \rt, [\rn]
|
||||
T add \rn, \rn, \rm
|
||||
.endm
|
||||
|
||||
.macro ldrc_pre cc, rt, rn, rm:vararg
|
||||
A ldr\cc \rt, [\rn, \rm]!
|
||||
T itt \cc
|
||||
T add\cc \rn, \rn, \rm
|
||||
T ldr\cc \rt, [\rn]
|
||||
.endm
|
||||
|
||||
.macro ldrd_reg rt, rt2, rn, rm
|
||||
A ldrd \rt, \rt2, [\rn, \rm]
|
||||
T add \rt, \rn, \rm
|
||||
|
@@ -146,10 +146,11 @@ function ff_put_pixels8_y2_armv6, export=1
|
||||
eor r7, r5, r7
|
||||
uadd8 r10, r10, r6
|
||||
and r7, r7, r12
|
||||
ldr_pre r6, r1, r2
|
||||
ldrc_pre ne, r6, r1, r2
|
||||
uadd8 r11, r11, r7
|
||||
strd_post r8, r9, r0, r2
|
||||
ldr r7, [r1, #4]
|
||||
it ne
|
||||
ldrne r7, [r1, #4]
|
||||
strd_post r10, r11, r0, r2
|
||||
bne 1b
|
||||
|
||||
@@ -198,9 +199,10 @@ function ff_put_pixels8_y2_no_rnd_armv6, export=1
|
||||
uhadd8 r9, r5, r7
|
||||
ldr r5, [r1, #4]
|
||||
uhadd8 r12, r4, r6
|
||||
ldr_pre r6, r1, r2
|
||||
ldrc_pre ne, r6, r1, r2
|
||||
uhadd8 r14, r5, r7
|
||||
ldr r7, [r1, #4]
|
||||
it ne
|
||||
ldrne r7, [r1, #4]
|
||||
stm r0, {r8,r9}
|
||||
add r0, r0, r2
|
||||
stm r0, {r12,r14}
|
||||
|
@@ -89,7 +89,7 @@ static void ff_h264dsp_init_neon(H264DSPContext *c, const int bit_depth, const i
|
||||
c->h264_idct_dc_add = ff_h264_idct_dc_add_neon;
|
||||
c->h264_idct_add16 = ff_h264_idct_add16_neon;
|
||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_neon;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_idct_add8 = ff_h264_idct_add8_neon;
|
||||
c->h264_idct8_add = ff_h264_idct8_add_neon;
|
||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_neon;
|
||||
|
@@ -66,10 +66,10 @@ function ff_scalarproduct_int16_neon, export=1
|
||||
|
||||
3: vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
@@ -106,10 +106,10 @@ function ff_scalarproduct_and_madd_int16_neon, export=1
|
||||
|
||||
vpadd.s32 d16, d0, d1
|
||||
vpadd.s32 d17, d2, d3
|
||||
vpadd.s32 d10, d4, d5
|
||||
vpadd.s32 d11, d6, d7
|
||||
vpadd.s32 d18, d4, d5
|
||||
vpadd.s32 d19, d6, d7
|
||||
vpadd.s32 d0, d16, d17
|
||||
vpadd.s32 d1, d10, d11
|
||||
vpadd.s32 d1, d18, d19
|
||||
vpadd.s32 d2, d0, d1
|
||||
vpaddl.s32 d3, d2
|
||||
vmov.32 r0, d3[0]
|
||||
|
@@ -535,6 +535,11 @@ static av_cold int decode_init(AVCodecContext *avctx){
|
||||
int i;
|
||||
const int scale= avctx->codec_id == CODEC_ID_ASV1 ? 1 : 2;
|
||||
|
||||
if (avctx->extradata_size < 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "No extradata provided\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
common_init(avctx);
|
||||
init_vlcs(a);
|
||||
ff_init_scantable(a->dsp.idct_permutation, &a->scantable, scantab);
|
||||
|
@@ -198,6 +198,16 @@ static av_always_inline int bytestream2_tell_p(PutByteContext *p)
|
||||
return (int)(p->buffer - p->buffer_start);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_size(GetByteContext *g)
|
||||
{
|
||||
return (int)(g->buffer_end - g->buffer_start);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_size_p(PutByteContext *p)
|
||||
{
|
||||
return (int)(p->buffer_end - p->buffer_start);
|
||||
}
|
||||
|
||||
static av_always_inline int bytestream2_seek(GetByteContext *g,
|
||||
int offset,
|
||||
int whence)
|
||||
@@ -323,6 +333,32 @@ static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
|
||||
return p->eof;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p,
|
||||
GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
memcpy(p->buffer, g->buffer, size);
|
||||
p->buffer += size;
|
||||
g->buffer += size;
|
||||
return size;
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p,
|
||||
GetByteContext *g,
|
||||
unsigned int size)
|
||||
{
|
||||
int size2;
|
||||
|
||||
if (p->eof)
|
||||
return 0;
|
||||
size = FFMIN(g->buffer_end - g->buffer, size);
|
||||
size2 = FFMIN(p->buffer_end - p->buffer, size);
|
||||
if (size2 != size)
|
||||
p->eof = 1;
|
||||
|
||||
return bytestream2_copy_bufferu(p, g, size2);
|
||||
}
|
||||
|
||||
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
|
||||
uint8_t *dst,
|
||||
unsigned int size)
|
||||
|
@@ -166,8 +166,8 @@ static inline int decode_residual_inter(AVSContext *h) {
|
||||
|
||||
/* get coded block pattern */
|
||||
int cbp= get_ue_golomb(&h->s.gb);
|
||||
if(cbp > 63U){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal inter cbp\n");
|
||||
if(cbp > 63 || cbp < 0){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal inter cbp %d\n", cbp);
|
||||
return -1;
|
||||
}
|
||||
h->cbp = cbp_tab[cbp][1];
|
||||
@@ -226,7 +226,7 @@ static int decode_mb_i(AVSContext *h, int cbp_code) {
|
||||
/* get coded block pattern */
|
||||
if(h->pic_type == AV_PICTURE_TYPE_I)
|
||||
cbp_code = get_ue_golomb(gb);
|
||||
if(cbp_code > 63U){
|
||||
if(cbp_code > 63 || cbp_code < 0 ){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra cbp\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -468,6 +468,11 @@ static int decode_pic(AVSContext *h) {
|
||||
int skip_count = -1;
|
||||
enum cavs_mb mb_type;
|
||||
|
||||
if (!h->top_qp) {
|
||||
av_log(h, AV_LOG_ERROR, "No sequence header decoded yet\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (!s->context_initialized) {
|
||||
s->avctx->idct_algo = FF_IDCT_CAVS;
|
||||
if (MPV_common_init(s) < 0)
|
||||
|
@@ -577,6 +577,11 @@ static int dca_parse_frame_header(DCAContext *s)
|
||||
s->lfe = get_bits(&s->gb, 2);
|
||||
s->predictor_history = get_bits(&s->gb, 1);
|
||||
|
||||
if (s->lfe > 2) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid LFE value: %d\n", s->lfe);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* TODO: check CRC */
|
||||
if (s->crc_present)
|
||||
s->header_crc = get_bits(&s->gb, 16);
|
||||
|
@@ -1332,8 +1332,8 @@ static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
|
||||
motion_y >>= s->chroma_y_shift;
|
||||
}
|
||||
|
||||
mx = motion_x & ~(-1 << s->mv_precision);
|
||||
my = motion_y & ~(-1 << s->mv_precision);
|
||||
mx = motion_x & ~(-1U << s->mv_precision);
|
||||
my = motion_y & ~(-1U << s->mv_precision);
|
||||
motion_x >>= s->mv_precision;
|
||||
motion_y >>= s->mv_precision;
|
||||
/* normalize subpel coordinates to epel */
|
||||
|
@@ -220,7 +220,7 @@ static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
|
||||
|
||||
static int dnxhd_init_rc(DNXHDEncContext *ctx)
|
||||
{
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry), fail);
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_rc, 8160*(ctx->m.avctx->qmax + 1)*sizeof(RCEntry), fail);
|
||||
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
|
||||
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry), fail);
|
||||
|
||||
|
@@ -1912,7 +1912,7 @@ void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type){
|
||||
|
||||
static void add_bytes_c(uint8_t *dst, uint8_t *src, int w){
|
||||
long i;
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
|
||||
long a = *(long*)(src+i);
|
||||
long b = *(long*)(dst+i);
|
||||
*(long*)(dst+i) = ((a&pb_7f) + (b&pb_7f)) ^ ((a^b)&pb_80);
|
||||
@@ -1937,7 +1937,7 @@ static void diff_bytes_c(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
|
||||
}
|
||||
}else
|
||||
#endif
|
||||
for(i=0; i<=w-(int)sizeof(long); i+=sizeof(long)){
|
||||
for (i = 0; i <= w - (int) sizeof(long); i += sizeof(long)) {
|
||||
long a = *(long*)(src1+i);
|
||||
long b = *(long*)(src2+i);
|
||||
*(long*)(dst+i) = ((a|pb_80) - (b&pb_7f)) ^ ((a^b^pb_80)&pb_80);
|
||||
|
@@ -112,8 +112,8 @@ static void cmv_decode_inter(CmvContext * s, const uint8_t *buf, const uint8_t *
|
||||
int yoffset = ((buf[i] >> 4)) - 7;
|
||||
if (s->last_frame.data[0])
|
||||
cmv_motcomp(s->frame.data[0], s->frame.linesize[0],
|
||||
s->last_frame.data[0], s->last_frame.linesize[0],
|
||||
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
|
||||
s->last_frame.data[0], s->last_frame.linesize[0],
|
||||
x*4, y*4, xoffset, yoffset, s->avctx->width, s->avctx->height);
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
@@ -722,6 +722,10 @@ static av_cold int init_slice_contexts(FFV1Context *f){
|
||||
int i;
|
||||
|
||||
f->slice_count= f->num_h_slices * f->num_v_slices;
|
||||
if (f->slice_count <= 0) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "Invalid number of slices\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for(i=0; i<f->slice_count; i++){
|
||||
FFV1Context *fs= av_mallocz(sizeof(*fs));
|
||||
|
@@ -389,7 +389,9 @@ static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
|
||||
s->diff_start = get_bits(&gb, 8);
|
||||
s->diff_height = get_bits(&gb, 8);
|
||||
if (s->diff_start + s->diff_height > cur_blk_height) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Block parameters invalid\n");
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Block parameters invalid: %d + %d > %d\n",
|
||||
s->diff_start, s->diff_height, cur_blk_height);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
|
@@ -142,6 +142,11 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
const int planes = 3;
|
||||
enum PixelFormat pix_fmt;
|
||||
|
||||
if (buf_size < 4) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Packet is too short\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
header = AV_RL32(buf);
|
||||
version = header & 0xff;
|
||||
header_size = (header & (1<<30))? 8 : 4; /* bit 30 means pad to 8 bytes */
|
||||
@@ -180,7 +185,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
}
|
||||
avctx->pix_fmt = pix_fmt;
|
||||
|
||||
switch(version) {
|
||||
switch (version) {
|
||||
case 0:
|
||||
default:
|
||||
/* Fraps v0 is a reordered YUV420 */
|
||||
@@ -219,6 +224,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
case 1:
|
||||
/* Fraps v1 is an upside-down BGR24 */
|
||||
|
||||
if (avctx->reget_buffer(avctx, f)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
|
||||
return -1;
|
||||
|
@@ -355,7 +355,7 @@ static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
|
||||
int buffer_size;
|
||||
int ret = 0;
|
||||
|
||||
if (bit_size > INT_MAX - 7 || bit_size < 0) {
|
||||
if (bit_size > INT_MAX - 7 || bit_size < 0 || !buffer) {
|
||||
buffer_size = bit_size = 0;
|
||||
buffer = NULL;
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
|
@@ -106,10 +106,10 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h){
|
||||
|
||||
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
|
||||
MpegEncContext * const s = &h->s;
|
||||
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
|
||||
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
|
||||
static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
|
||||
static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
|
||||
|
||||
if(mode > 6U) {
|
||||
if(mode > 3U) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "out of range intra chroma pred mode at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
@@ -1300,6 +1300,8 @@ int ff_h264_frame_start(H264Context *h){
|
||||
int i;
|
||||
const int pixel_shift = h->pixel_shift;
|
||||
|
||||
h->next_output_pic = NULL;
|
||||
|
||||
if(MPV_frame_start(s, s->avctx) < 0)
|
||||
return -1;
|
||||
ff_er_frame_start(s);
|
||||
@@ -1349,8 +1351,6 @@ int ff_h264_frame_start(H264Context *h){
|
||||
s->current_picture_ptr->field_poc[0]=
|
||||
s->current_picture_ptr->field_poc[1]= INT_MAX;
|
||||
|
||||
h->next_output_pic = NULL;
|
||||
|
||||
assert(s->current_picture_ptr->long_ref==0);
|
||||
|
||||
return 0;
|
||||
@@ -2607,6 +2607,52 @@ int ff_h264_get_profile(SPS *sps)
|
||||
return profile;
|
||||
}
|
||||
|
||||
static int h264_set_parameter_from_sps(H264Context *h)
|
||||
{
|
||||
MpegEncContext *s = &h->s;
|
||||
|
||||
if (s->flags & CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag &&
|
||||
!h->sps.num_reorder_frames)) {
|
||||
if (s->avctx->has_b_frames > 1 || h->delayed_pic[0])
|
||||
av_log(h->s.avctx, AV_LOG_WARNING, "Delayed frames seen. "
|
||||
"Reenabling low delay requires a codec flush.\n");
|
||||
else
|
||||
s->low_delay = 1;
|
||||
}
|
||||
|
||||
if (s->avctx->has_b_frames < 2)
|
||||
s->avctx->has_b_frames = !s->low_delay;
|
||||
|
||||
if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
|
||||
h->cur_chroma_format_idc != h->sps.chroma_format_idc) {
|
||||
if (s->avctx->codec &&
|
||||
s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU &&
|
||||
(h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"VDPAU decoding does not support video colorspace.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
|
||||
s->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
|
||||
h->cur_chroma_format_idc = h->sps.chroma_format_idc;
|
||||
h->pixel_shift = h->sps.bit_depth_luma > 8;
|
||||
|
||||
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
|
||||
h->sps.chroma_format_idc);
|
||||
ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma,
|
||||
h->sps.chroma_format_idc);
|
||||
s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
|
||||
dsputil_init(&s->dsp, s->avctx);
|
||||
} else {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n",
|
||||
h->sps.bit_depth_luma);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a slice header.
|
||||
* This will also call MPV_common_init() and frame_start() as needed.
|
||||
@@ -2624,7 +2670,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
int num_ref_idx_active_override_flag;
|
||||
unsigned int slice_type, tmp, i, j;
|
||||
int default_ref_list_done = 0;
|
||||
int last_pic_structure, last_pic_dropable;
|
||||
int last_pic_structure, last_pic_dropable, ret;
|
||||
|
||||
/* FIXME: 2tap qpel isn't implemented for high bit depth. */
|
||||
if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !h->nal_ref_idc && !h->pixel_shift){
|
||||
@@ -2672,7 +2718,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->slice_type= slice_type;
|
||||
h->slice_type_nos= slice_type & 3;
|
||||
|
||||
s->pict_type= h->slice_type; // to make a few old functions happy, it's wrong though
|
||||
if (h->nal_unit_type == NAL_IDR_SLICE &&
|
||||
h->slice_type_nos != AV_PICTURE_TYPE_I) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
// to make a few old functions happy, it's wrong though
|
||||
s->pict_type = h->slice_type;
|
||||
|
||||
pps_id= get_ue_golomb(&s->gb);
|
||||
if(pps_id>=MAX_PPS_COUNT){
|
||||
@@ -2689,7 +2742,17 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id);
|
||||
return -1;
|
||||
}
|
||||
h->sps = *h0->sps_buffers[h->pps.sps_id];
|
||||
|
||||
if (h->pps.sps_id != h->current_sps_id ||
|
||||
h0->sps_buffers[h->pps.sps_id]->new) {
|
||||
h0->sps_buffers[h->pps.sps_id]->new = 0;
|
||||
|
||||
h->current_sps_id = h->pps.sps_id;
|
||||
h->sps = *h0->sps_buffers[h->pps.sps_id];
|
||||
|
||||
if ((ret = h264_set_parameter_from_sps(h)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
s->avctx->profile = ff_h264_get_profile(&h->sps);
|
||||
s->avctx->level = h->sps.level_idc;
|
||||
@@ -2989,8 +3052,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
|
||||
Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num);
|
||||
if (ff_h264_frame_start(h) < 0)
|
||||
if (ff_h264_frame_start(h) < 0) {
|
||||
h0->s.first_field = 0;
|
||||
return -1;
|
||||
}
|
||||
h->prev_frame_num++;
|
||||
h->prev_frame_num %= 1<<h->sps.log2_max_frame_num;
|
||||
s->current_picture_ptr->frame_num= h->prev_frame_num;
|
||||
@@ -3224,8 +3289,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
|
||||
h->deblocking_filter = 1;
|
||||
h->slice_alpha_c0_offset = 52;
|
||||
h->slice_beta_offset = 52;
|
||||
h->slice_alpha_c0_offset = 0;
|
||||
h->slice_beta_offset = 0;
|
||||
if( h->pps.deblocking_filter_parameters_present ) {
|
||||
tmp= get_ue_golomb_31(&s->gb);
|
||||
if(tmp > 2){
|
||||
@@ -3236,12 +3301,16 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
if(h->deblocking_filter < 2)
|
||||
h->deblocking_filter^= 1; // 1<->0
|
||||
|
||||
if( h->deblocking_filter ) {
|
||||
h->slice_alpha_c0_offset += get_se_golomb(&s->gb) << 1;
|
||||
h->slice_beta_offset += get_se_golomb(&s->gb) << 1;
|
||||
if( h->slice_alpha_c0_offset > 104U
|
||||
|| h->slice_beta_offset > 104U){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", h->slice_alpha_c0_offset, h->slice_beta_offset);
|
||||
if (h->deblocking_filter) {
|
||||
h->slice_alpha_c0_offset = get_se_golomb(&s->gb) * 2;
|
||||
h->slice_beta_offset = get_se_golomb(&s->gb) * 2;
|
||||
if (h->slice_alpha_c0_offset > 12 ||
|
||||
h->slice_alpha_c0_offset < -12 ||
|
||||
h->slice_beta_offset > 12 ||
|
||||
h->slice_beta_offset < -12) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"deblocking filter parameters %d %d out of range\n",
|
||||
h->slice_alpha_c0_offset, h->slice_beta_offset);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -3270,14 +3339,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
}
|
||||
}
|
||||
}
|
||||
h->qp_thresh = 15 + 52 - FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset)
|
||||
- FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1])
|
||||
+ 6 * (h->sps.bit_depth_luma - 8);
|
||||
|
||||
#if 0 //FMO
|
||||
if( h->pps.num_slice_groups > 1 && h->pps.mb_slice_group_map_type >= 3 && h->pps.mb_slice_group_map_type <= 5)
|
||||
slice_group_change_cycle= get_bits(&s->gb, ?);
|
||||
#endif
|
||||
h->qp_thresh = 15 -
|
||||
FFMIN(h->slice_alpha_c0_offset, h->slice_beta_offset) -
|
||||
FFMAX3(0,
|
||||
h->pps.chroma_qp_index_offset[0],
|
||||
h->pps.chroma_qp_index_offset[1]) +
|
||||
6 * (h->sps.bit_depth_luma - 8);
|
||||
|
||||
h0->last_slice_type = slice_type;
|
||||
h->slice_num = ++h0->current_slice;
|
||||
@@ -3338,7 +3405,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
|
||||
s->current_picture_ptr->field_poc[0], s->current_picture_ptr->field_poc[1],
|
||||
h->ref_count[0], h->ref_count[1],
|
||||
s->qscale,
|
||||
h->deblocking_filter, h->slice_alpha_c0_offset/2-26, h->slice_beta_offset/2-26,
|
||||
h->deblocking_filter,
|
||||
h->slice_alpha_c0_offset, h->slice_beta_offset,
|
||||
h->use_weight,
|
||||
h->use_weight==1 && h->use_weight_chroma ? "c" : "",
|
||||
h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : ""
|
||||
@@ -3821,6 +3889,12 @@ static int execute_decode_slices(H264Context *h, int context_count){
|
||||
H264Context *hx;
|
||||
int i;
|
||||
|
||||
if (s->mb_y >= s->mb_height) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"Input contains more MB rows than the frame height.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (s->avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
||||
return 0;
|
||||
if(context_count == 1) {
|
||||
@@ -4033,12 +4107,24 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
}
|
||||
break;
|
||||
case NAL_DPA:
|
||||
if (s->flags2 & CODEC_FLAG2_CHUNKS) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Decoding in chunks is not supported for "
|
||||
"partitioned slices.\n");
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
init_get_bits(&hx->s.gb, ptr, bit_length);
|
||||
hx->intra_gb_ptr=
|
||||
hx->inter_gb_ptr= NULL;
|
||||
|
||||
if ((err = decode_slice_header(hx, h)) < 0)
|
||||
if ((err = decode_slice_header(hx, h)) < 0) {
|
||||
/* make sure data_partitioning is cleared if it was set
|
||||
* before, so we don't try decoding a slice without a valid
|
||||
* slice header later */
|
||||
s->data_partitioning = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
hx->s.data_partitioning = 1;
|
||||
|
||||
@@ -4073,24 +4159,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
ff_h264_decode_seq_parameter_set(h);
|
||||
}
|
||||
|
||||
if (s->flags & CODEC_FLAG_LOW_DELAY ||
|
||||
(h->sps.bitstream_restriction_flag &&
|
||||
!h->sps.num_reorder_frames)) {
|
||||
if (s->avctx->has_b_frames > 1 || h->delayed_pic[0])
|
||||
av_log(avctx, AV_LOG_WARNING, "Delayed frames seen "
|
||||
"reenabling low delay requires a codec "
|
||||
"flush.\n");
|
||||
else
|
||||
s->low_delay = 1;
|
||||
}
|
||||
|
||||
if(avctx->has_b_frames < 2)
|
||||
avctx->has_b_frames= !s->low_delay;
|
||||
|
||||
if (h->sps.bit_depth_luma != h->sps.bit_depth_chroma) {
|
||||
av_log_missing_feature(s->avctx,
|
||||
"Different bit depth between chroma and luma", 1);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
if (h264_set_parameter_from_sps(h) < 0) {
|
||||
buf_index = -1;
|
||||
goto end;
|
||||
}
|
||||
break;
|
||||
case NAL_PPS:
|
||||
@@ -4115,9 +4186,10 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){
|
||||
context_count = 0;
|
||||
}
|
||||
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "decode_slice_header error\n");
|
||||
else if(err == 1) {
|
||||
h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
|
||||
} else if (err == 1) {
|
||||
/* Slice could not be decoded in parallel mode, copy down
|
||||
* NAL unit stuff to context 0 and restart. Note that
|
||||
* rbsp_buffer is not transferred, but since we no longer
|
||||
@@ -4168,6 +4240,9 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
s->flags= avctx->flags;
|
||||
s->flags2= avctx->flags2;
|
||||
/* reset data partitioning here, to ensure GetBitContexts from previous
|
||||
* packets do not get used. */
|
||||
s->data_partitioning = 0;
|
||||
|
||||
/* end of stream, output what is still in the buffers */
|
||||
if (buf_size == 0) {
|
||||
|
@@ -206,6 +206,7 @@ typedef struct SPS{
|
||||
int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8
|
||||
int residual_color_transform_flag; ///< residual_colour_transform_flag
|
||||
int constraint_set_flags; ///< constraint_set[0-3]_flag
|
||||
int new; ///< flag to keep track if the decoder context needs re-init due to changed SPS
|
||||
}SPS;
|
||||
|
||||
/**
|
||||
@@ -332,6 +333,7 @@ typedef struct H264Context{
|
||||
int emu_edge_width;
|
||||
int emu_edge_height;
|
||||
|
||||
unsigned current_sps_id; ///< id of the current SPS
|
||||
SPS sps; ///< current sps
|
||||
|
||||
/**
|
||||
|
@@ -770,6 +770,10 @@ decode_intra_mb:
|
||||
|
||||
// We assume these blocks are very rare so we do not optimize it.
|
||||
align_get_bits(&s->gb);
|
||||
if (get_bits_left(&s->gb) < mb_size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Not enough data for an intra PCM block.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
// The pixels are stored in the same order as levels in h->mb array.
|
||||
for(x=0; x < mb_size; x++){
|
||||
|
@@ -254,8 +254,8 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
|
||||
int top_type= h->top_type;
|
||||
|
||||
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
|
||||
int a = h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = h->slice_beta_offset - qp_bd_offset;
|
||||
int a = 52 + h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = 52 + h->slice_beta_offset - qp_bd_offset;
|
||||
|
||||
int mb_type = s->current_picture.f.mb_type[mb_xy];
|
||||
int qp = s->current_picture.f.qscale_table[mb_xy];
|
||||
@@ -715,8 +715,8 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
|
||||
av_unused int dir;
|
||||
int chroma = !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
|
||||
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
|
||||
int a = h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = h->slice_beta_offset - qp_bd_offset;
|
||||
int a = 52 + h->slice_alpha_c0_offset - qp_bd_offset;
|
||||
int b = 52 + h->slice_beta_offset - qp_bd_offset;
|
||||
|
||||
if (FRAME_MBAFF
|
||||
// and current and left pair do not have the same interlaced type
|
||||
|
@@ -156,7 +156,7 @@ pps:
|
||||
goto fail;
|
||||
|
||||
/* prepend only to the first type 5 NAL unit of an IDR picture */
|
||||
if (ctx->first_idr && unit_type == 5) {
|
||||
if (ctx->first_idr && (unit_type == 5 || unit_type == 7 || unit_type == 8)) {
|
||||
if ((ret=alloc_and_copy(poutbuf, poutbuf_size,
|
||||
avctx->extradata, avctx->extradata_size,
|
||||
buf, nal_size)) < 0)
|
||||
|
@@ -250,7 +250,9 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){
|
||||
}
|
||||
|
||||
if(sps->num_reorder_frames > 16U /*max_dec_frame_buffering || max_dec_frame_buffering > 16*/){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "illegal num_reorder_frames %d\n", sps->num_reorder_frames);
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "Clipping illegal num_reorder_frames %d\n",
|
||||
sps->num_reorder_frames);
|
||||
sps->num_reorder_frames = 16;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -368,6 +370,11 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
sps->bit_depth_luma, sps->bit_depth_chroma);
|
||||
goto fail;
|
||||
}
|
||||
if (sps->bit_depth_chroma != sps->bit_depth_luma) {
|
||||
av_log_missing_feature(s->avctx,
|
||||
"Different bit depth between chroma and luma", 1);
|
||||
goto fail;
|
||||
}
|
||||
sps->transform_bypass = get_bits1(&s->gb);
|
||||
decode_scaling_matrices(h, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8);
|
||||
}else{
|
||||
@@ -487,10 +494,13 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
|
||||
sps->bit_depth_luma
|
||||
);
|
||||
}
|
||||
sps->new = 1;
|
||||
|
||||
av_free(h->sps_buffers[sps_id]);
|
||||
h->sps_buffers[sps_id]= sps;
|
||||
h->sps = *sps;
|
||||
h->sps_buffers[sps_id] = sps;
|
||||
h->sps = *sps;
|
||||
h->current_sps_id = sps_id;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
av_free(sps);
|
||||
|
@@ -63,20 +63,22 @@ static int split_field_copy(Picture *dest, Picture *src,
|
||||
return match;
|
||||
}
|
||||
|
||||
static int build_def_list(Picture *def, Picture **in, int len, int is_long, int sel){
|
||||
static int build_def_list(Picture *def, int def_len,
|
||||
Picture **in, int len, int is_long, int sel)
|
||||
{
|
||||
int i[2]={0};
|
||||
int index=0;
|
||||
|
||||
while(i[0]<len || i[1]<len){
|
||||
while ((i[0] < len || i[1] < len) && index < def_len) {
|
||||
while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel)))
|
||||
i[0]++;
|
||||
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
|
||||
i[1]++;
|
||||
if(i[0] < len){
|
||||
if (i[0] < len && index < def_len) {
|
||||
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
|
||||
split_field_copy(&def[index++], in[ i[0]++ ], sel , 1);
|
||||
}
|
||||
if(i[1] < len){
|
||||
if (i[1] < len && index < def_len) {
|
||||
in[ i[1] ]->pic_id= is_long ? i[1] : in[ i[1] ]->frame_num;
|
||||
split_field_copy(&def[index++], in[ i[1]++ ], sel^3, 0);
|
||||
}
|
||||
@@ -124,9 +126,12 @@ int ff_h264_fill_default_ref_list(H264Context *h){
|
||||
len= add_sorted(sorted , h->short_ref, h->short_ref_count, cur_poc, 1^list);
|
||||
len+=add_sorted(sorted+len, h->short_ref, h->short_ref_count, cur_poc, 0^list);
|
||||
assert(len<=32);
|
||||
len= build_def_list(h->default_ref_list[list] , sorted , len, 0, s->picture_structure);
|
||||
len+=build_def_list(h->default_ref_list[list]+len, h->long_ref, 16 , 1, s->picture_structure);
|
||||
assert(len<=32);
|
||||
|
||||
len = build_def_list(h->default_ref_list[list], FF_ARRAY_ELEMS(h->default_ref_list[0]),
|
||||
sorted, len, 0, s->picture_structure);
|
||||
len += build_def_list(h->default_ref_list[list] + len,
|
||||
FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
|
||||
h->long_ref, 16, 1, s->picture_structure);
|
||||
|
||||
if(len < h->ref_count[list])
|
||||
memset(&h->default_ref_list[list][len], 0, sizeof(Picture)*(h->ref_count[list] - len));
|
||||
@@ -139,9 +144,12 @@ int ff_h264_fill_default_ref_list(H264Context *h){
|
||||
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
|
||||
}
|
||||
}else{
|
||||
len = build_def_list(h->default_ref_list[0] , h->short_ref, h->short_ref_count, 0, s->picture_structure);
|
||||
len+= build_def_list(h->default_ref_list[0]+len, h-> long_ref, 16 , 1, s->picture_structure);
|
||||
assert(len <= 32);
|
||||
len = build_def_list(h->default_ref_list[0], FF_ARRAY_ELEMS(h->default_ref_list[0]),
|
||||
h->short_ref, h->short_ref_count, 0, s->picture_structure);
|
||||
len += build_def_list(h->default_ref_list[0] + len,
|
||||
FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
|
||||
h-> long_ref, 16, 1, s->picture_structure);
|
||||
|
||||
if(len < h->ref_count[0])
|
||||
memset(&h->default_ref_list[0][len], 0, sizeof(Picture)*(h->ref_count[0] - len));
|
||||
}
|
||||
|
@@ -53,13 +53,13 @@ void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_fo
|
||||
c->h264_idct8_dc_add= FUNC(ff_h264_idct8_dc_add, depth);\
|
||||
c->h264_idct_add16 = FUNC(ff_h264_idct_add16, depth);\
|
||||
c->h264_idct8_add4 = FUNC(ff_h264_idct8_add4, depth);\
|
||||
if (chroma_format_idc == 1)\
|
||||
if (chroma_format_idc <= 1)\
|
||||
c->h264_idct_add8 = FUNC(ff_h264_idct_add8, depth);\
|
||||
else\
|
||||
c->h264_idct_add8 = FUNC(ff_h264_idct_add8_422, depth);\
|
||||
c->h264_idct_add16intra= FUNC(ff_h264_idct_add16intra, depth);\
|
||||
c->h264_luma_dc_dequant_idct= FUNC(ff_h264_luma_dc_dequant_idct, depth);\
|
||||
if (chroma_format_idc == 1)\
|
||||
if (chroma_format_idc <= 1)\
|
||||
c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma_dc_dequant_idct, depth);\
|
||||
else\
|
||||
c->h264_chroma_dc_dequant_idct= FUNC(ff_h264_chroma422_dc_dequant_idct, depth);\
|
||||
@@ -80,20 +80,20 @@ void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_fo
|
||||
c->h264_h_loop_filter_luma_intra= FUNC(h264_h_loop_filter_luma_intra, depth);\
|
||||
c->h264_h_loop_filter_luma_mbaff_intra= FUNC(h264_h_loop_filter_luma_mbaff_intra, depth);\
|
||||
c->h264_v_loop_filter_chroma= FUNC(h264_v_loop_filter_chroma, depth);\
|
||||
if (chroma_format_idc == 1)\
|
||||
if (chroma_format_idc <= 1)\
|
||||
c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma, depth);\
|
||||
else\
|
||||
c->h264_h_loop_filter_chroma= FUNC(h264_h_loop_filter_chroma422, depth);\
|
||||
if (chroma_format_idc == 1)\
|
||||
if (chroma_format_idc <= 1)\
|
||||
c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma_mbaff, depth);\
|
||||
else\
|
||||
c->h264_h_loop_filter_chroma_mbaff= FUNC(h264_h_loop_filter_chroma422_mbaff, depth);\
|
||||
c->h264_v_loop_filter_chroma_intra= FUNC(h264_v_loop_filter_chroma_intra, depth);\
|
||||
if (chroma_format_idc == 1)\
|
||||
if (chroma_format_idc <= 1)\
|
||||
c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma_intra, depth);\
|
||||
else\
|
||||
c->h264_h_loop_filter_chroma_intra= FUNC(h264_h_loop_filter_chroma422_intra, depth);\
|
||||
if (chroma_format_idc == 1)\
|
||||
if (chroma_format_idc <= 1)\
|
||||
c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma_mbaff_intra, depth);\
|
||||
else\
|
||||
c->h264_h_loop_filter_chroma_mbaff_intra= FUNC(h264_h_loop_filter_chroma422_mbaff_intra, depth);\
|
||||
|
@@ -434,7 +434,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, co
|
||||
h->pred8x8l[TOP_DC_PRED ]= FUNCC(pred8x8l_top_dc , depth);\
|
||||
h->pred8x8l[DC_128_PRED ]= FUNCC(pred8x8l_128_dc , depth);\
|
||||
\
|
||||
if (chroma_format_idc == 1) {\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
|
||||
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
|
||||
} else {\
|
||||
@@ -442,7 +442,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, co
|
||||
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
|
||||
}\
|
||||
if (codec_id != CODEC_ID_VP8) {\
|
||||
if (chroma_format_idc == 1) {\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
|
||||
} else {\
|
||||
h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane , depth);\
|
||||
@@ -450,7 +450,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, co
|
||||
} else\
|
||||
h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
|
||||
if(codec_id != CODEC_ID_RV40 && codec_id != CODEC_ID_VP8){\
|
||||
if (chroma_format_idc == 1) {\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
|
||||
h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
|
||||
h->pred8x8[TOP_DC_PRED8x8 ]= FUNCC(pred8x8_top_dc , depth);\
|
||||
@@ -476,7 +476,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, co
|
||||
h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
|
||||
}\
|
||||
}\
|
||||
if (chroma_format_idc == 1) {\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
|
||||
} else {\
|
||||
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x16_128_dc , depth);\
|
||||
@@ -510,7 +510,7 @@ void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, co
|
||||
h->pred4x4_add [ HOR_PRED ]= FUNCC(pred4x4_horizontal_add , depth);\
|
||||
h->pred8x8l_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_add , depth);\
|
||||
h->pred8x8l_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_add , depth);\
|
||||
if (chroma_format_idc == 1) {\
|
||||
if (chroma_format_idc <= 1) {\
|
||||
h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add , depth);\
|
||||
h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add , depth);\
|
||||
} else {\
|
||||
|
@@ -755,6 +755,8 @@ int ff_h263_decode_mb(MpegEncContext *s,
|
||||
}
|
||||
|
||||
if(IS_DIRECT(mb_type)){
|
||||
if (!s->pp_time)
|
||||
return AVERROR_INVALIDDATA;
|
||||
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
|
||||
mb_type |= ff_mpeg4_set_direct_mv(s, 0, 0);
|
||||
}else{
|
||||
|
@@ -894,6 +894,11 @@ int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
if (!ctx->planes[0].bands) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Color planes not initialized yet\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
ctx->switch_buffers(ctx);
|
||||
|
||||
//{ START_TIMER;
|
||||
|
@@ -52,6 +52,7 @@ typedef struct LagarithContext {
|
||||
int zeros; /**< number of consecutive zero bytes encountered */
|
||||
int zeros_rem; /**< number of zero bytes remaining to output */
|
||||
uint8_t *rgb_planes;
|
||||
int rgb_planes_allocated;
|
||||
int rgb_stride;
|
||||
} LagarithContext;
|
||||
|
||||
@@ -507,13 +508,12 @@ static int lag_decode_frame(AVCodecContext *avctx,
|
||||
offs[2] = 13;
|
||||
offs[3] = AV_RL32(buf + 9);
|
||||
|
||||
l->rgb_stride = FFALIGN(avctx->width, 16);
|
||||
av_fast_malloc(&l->rgb_planes, &l->rgb_planes_allocated,
|
||||
l->rgb_stride * avctx->height * 4 + 1);
|
||||
if (!l->rgb_planes) {
|
||||
l->rgb_stride = FFALIGN(avctx->width, 16);
|
||||
l->rgb_planes = av_malloc(l->rgb_stride * avctx->height * 4);
|
||||
if (!l->rgb_planes) {
|
||||
av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
for (i = 0; i < 4; i++)
|
||||
srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
|
||||
|
@@ -107,6 +107,9 @@ static inline uint8_t lag_get_rac(lag_rac *l)
|
||||
l->range -= range_scaled * l->prob[255];
|
||||
}
|
||||
|
||||
if (!l->range)
|
||||
l->range = 0x80;
|
||||
|
||||
l->low -= range_scaled * l->prob[val];
|
||||
|
||||
return val;
|
||||
|
@@ -231,8 +231,8 @@ static av_cold int mace_decode_init(AVCodecContext * avctx)
|
||||
{
|
||||
MACEContext *ctx = avctx->priv_data;
|
||||
|
||||
if (avctx->channels > 2)
|
||||
return -1;
|
||||
if (avctx->channels > 2 || avctx->channels < 1)
|
||||
return AVERROR(EINVAL);
|
||||
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
|
||||
avcodec_get_frame_defaults(&ctx->frame);
|
||||
|
@@ -1111,7 +1111,7 @@ static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if (!Al) {
|
||||
s->coefs_finished[c] |= (1LL << (se + 1)) - (1LL << ss);
|
||||
s->coefs_finished[c] |= (2LL << se) - (1LL << ss);
|
||||
last_scan = !~s->coefs_finished[c];
|
||||
}
|
||||
|
||||
|
@@ -83,6 +83,15 @@ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
|
||||
return sign_extend(val, 5 + shift);
|
||||
}
|
||||
|
||||
#define check_scantable_index(ctx, x) \
|
||||
do { \
|
||||
if ((x) > 63) { \
|
||||
av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
|
||||
ctx->mb_x, ctx->mb_y); \
|
||||
return AVERROR_INVALIDDATA; \
|
||||
} \
|
||||
} while (0) \
|
||||
|
||||
static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||
{
|
||||
int level, dc, diff, i, j, run;
|
||||
@@ -114,6 +123,7 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
||||
break;
|
||||
} else if (level != 0) {
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
level = (level * qscale * quant_matrix[j]) >> 4;
|
||||
level = (level - 1) | 1;
|
||||
@@ -130,6 +140,7 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
||||
level = SHOW_UBITS(re, &s->gb, 8) ; LAST_SKIP_BITS(re, &s->gb, 8);
|
||||
}
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = -level;
|
||||
@@ -141,10 +152,6 @@ static inline int mpeg1_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
||||
level = (level - 1) | 1;
|
||||
}
|
||||
}
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
|
||||
block[j] = level;
|
||||
}
|
||||
@@ -264,6 +271,7 @@ static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *bloc
|
||||
|
||||
if (level != 0) {
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
level = ((level * 2 + 1) * qscale) >> 1;
|
||||
level = (level - 1) | 1;
|
||||
@@ -280,6 +288,7 @@ static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s, DCTELEM *bloc
|
||||
level = SHOW_UBITS(re, &s->gb, 8) ; SKIP_BITS(re, &s->gb, 8);
|
||||
}
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = -level;
|
||||
@@ -345,6 +354,7 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, DCTELEM *block
|
||||
|
||||
if (level != 0) {
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
||||
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
||||
@@ -356,6 +366,7 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, DCTELEM *block
|
||||
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
||||
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
||||
@@ -364,10 +375,6 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s, DCTELEM *block
|
||||
level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
|
||||
}
|
||||
}
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mismatch ^= level;
|
||||
block[j] = level;
|
||||
@@ -414,6 +421,7 @@ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
|
||||
|
||||
if (level != 0) {
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
level = ((level * 2 + 1) * qscale) >> 1;
|
||||
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
||||
@@ -425,6 +433,7 @@ static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
|
||||
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
||||
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = ((-level * 2 + 1) * qscale) >> 1;
|
||||
@@ -491,6 +500,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
||||
break;
|
||||
} else if (level != 0) {
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
level = (level * qscale * quant_matrix[j]) >> 4;
|
||||
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
||||
@@ -501,6 +511,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
||||
UPDATE_CACHE(re, &s->gb);
|
||||
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = (-level * qscale * quant_matrix[j]) >> 4;
|
||||
@@ -509,10 +520,6 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
||||
level = (level * qscale * quant_matrix[j]) >> 4;
|
||||
}
|
||||
}
|
||||
if (i > 63) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y);
|
||||
return -1;
|
||||
}
|
||||
|
||||
mismatch ^= level;
|
||||
block[j] = level;
|
||||
@@ -527,10 +534,10 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, DCTELEM *block, in
|
||||
|
||||
static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *block, int n)
|
||||
{
|
||||
int level, dc, diff, j, run;
|
||||
int level, dc, diff, i, j, run;
|
||||
int component;
|
||||
RLTable *rl;
|
||||
uint8_t * scantable = s->intra_scantable.permutated;
|
||||
uint8_t * const scantable = s->intra_scantable.permutated;
|
||||
const uint16_t *quant_matrix;
|
||||
const int qscale = s->qscale;
|
||||
|
||||
@@ -549,6 +556,7 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *bloc
|
||||
dc += diff;
|
||||
s->last_dc[component] = dc;
|
||||
block[0] = dc << (3 - s->intra_dc_precision);
|
||||
i = 0;
|
||||
if (s->intra_vlc_format)
|
||||
rl = &ff_rl_mpeg2;
|
||||
else
|
||||
@@ -564,8 +572,9 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *bloc
|
||||
if (level == 127) {
|
||||
break;
|
||||
} else if (level != 0) {
|
||||
scantable += run;
|
||||
j = *scantable;
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
level = (level * qscale * quant_matrix[j]) >> 4;
|
||||
level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1);
|
||||
LAST_SKIP_BITS(re, &s->gb, 1);
|
||||
@@ -574,8 +583,9 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *bloc
|
||||
run = SHOW_UBITS(re, &s->gb, 6) + 1; LAST_SKIP_BITS(re, &s->gb, 6);
|
||||
UPDATE_CACHE(re, &s->gb);
|
||||
level = SHOW_SBITS(re, &s->gb, 12); SKIP_BITS(re, &s->gb, 12);
|
||||
scantable += run;
|
||||
j = *scantable;
|
||||
i += run;
|
||||
check_scantable_index(s, i);
|
||||
j = scantable[i];
|
||||
if (level < 0) {
|
||||
level = (-level * qscale * quant_matrix[j]) >> 4;
|
||||
level = -level;
|
||||
@@ -589,7 +599,7 @@ static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s, DCTELEM *bloc
|
||||
CLOSE_READER(re, &s->gb);
|
||||
}
|
||||
|
||||
s->block_last_index[n] = scantable - s->intra_scantable.permutated;
|
||||
s->block_last_index[n] = i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -160,7 +160,7 @@ static inline int mpeg4_is_resync(MpegEncContext *s){
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mpeg4_decode_sprite_trajectory(MpegEncContext * s, GetBitContext *gb)
|
||||
static int mpeg4_decode_sprite_trajectory(MpegEncContext *s, GetBitContext *gb)
|
||||
{
|
||||
int i;
|
||||
int a= 2<<s->sprite_warping_accuracy;
|
||||
@@ -176,8 +176,8 @@ static int mpeg4_decode_sprite_trajectory(MpegEncContext * s, GetBitContext *gb)
|
||||
int h= s->height;
|
||||
int min_ab;
|
||||
|
||||
if(w<=0 || h<=0)
|
||||
return -1;
|
||||
if (w <= 0 || h <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
for(i=0; i<s->num_sprite_warping_points; i++){
|
||||
int length;
|
||||
@@ -415,8 +415,8 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s)
|
||||
skip_bits(&s->gb, 3); /* intra dc vlc threshold */
|
||||
//FIXME don't just ignore everything
|
||||
if(s->pict_type == AV_PICTURE_TYPE_S && s->vol_sprite_usage==GMC_SPRITE){
|
||||
if(mpeg4_decode_sprite_trajectory(s, &s->gb) < 0)
|
||||
return -1;
|
||||
if (mpeg4_decode_sprite_trajectory(s, &s->gb) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
av_log(s->avctx, AV_LOG_ERROR, "untested\n");
|
||||
}
|
||||
|
||||
@@ -2056,8 +2056,8 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
|
||||
}
|
||||
|
||||
if(s->pict_type == AV_PICTURE_TYPE_S && (s->vol_sprite_usage==STATIC_SPRITE || s->vol_sprite_usage==GMC_SPRITE)){
|
||||
if(mpeg4_decode_sprite_trajectory(s, gb) < 0)
|
||||
return -1;
|
||||
if (mpeg4_decode_sprite_trajectory(s, gb) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if(s->sprite_brightness_change) av_log(s->avctx, AV_LOG_ERROR, "sprite_brightness_change not supported\n");
|
||||
if(s->vol_sprite_usage==STATIC_SPRITE) av_log(s->avctx, AV_LOG_ERROR, "static sprite not supported\n");
|
||||
}
|
||||
|
@@ -1941,7 +1941,8 @@ static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
|
||||
|
||||
avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header);
|
||||
|
||||
if (ch + m->nb_channels > avctx->channels) {
|
||||
if (ch + m->nb_channels > avctx->channels ||
|
||||
s->coff[fr] + m->nb_channels > avctx->channels) {
|
||||
av_log(avctx, AV_LOG_ERROR, "frame channel count exceeds codec "
|
||||
"channel count\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@@ -1237,8 +1237,13 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
i = ff_find_unused_picture(s, 0);
|
||||
if (i < 0)
|
||||
return i;
|
||||
s->last_picture_ptr= &s->picture[i];
|
||||
|
||||
s->last_picture_ptr = &s->picture[i];
|
||||
|
||||
s->last_picture_ptr->f.reference = 3;
|
||||
s->last_picture_ptr->f.key_frame = 0;
|
||||
s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P;
|
||||
|
||||
if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
|
||||
return -1;
|
||||
|
||||
@@ -1259,8 +1264,13 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
||||
i = ff_find_unused_picture(s, 0);
|
||||
if (i < 0)
|
||||
return i;
|
||||
s->next_picture_ptr= &s->picture[i];
|
||||
|
||||
s->next_picture_ptr = &s->picture[i];
|
||||
|
||||
s->next_picture_ptr->f.reference = 3;
|
||||
s->next_picture_ptr->f.key_frame = 0;
|
||||
s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_P;
|
||||
|
||||
if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
|
||||
return -1;
|
||||
ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
|
||||
|
@@ -35,6 +35,7 @@
|
||||
#include "avcodec.h"
|
||||
#include "dsputil.h"
|
||||
#include "msrledec.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
|
||||
typedef struct MsrleContext {
|
||||
AVCodecContext *avctx;
|
||||
@@ -108,7 +109,7 @@ static int msrle_decode_frame(AVCodecContext *avctx,
|
||||
|
||||
/* FIXME how to correctly detect RLE ??? */
|
||||
if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
|
||||
int linesize = (avctx->width * avctx->bits_per_coded_sample + 7) / 8;
|
||||
int linesize = av_image_get_linesize(avctx->pix_fmt, avctx->width, 0);
|
||||
uint8_t *ptr = s->frame.data[0];
|
||||
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
|
||||
int i, j;
|
||||
|
@@ -183,7 +183,13 @@ static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
} else if (nplanes == 1 && bits_per_pixel == 8) {
|
||||
const uint8_t *palstart = bufstart + buf_size - 769;
|
||||
|
||||
for (y=0; y<h; y++, ptr+=stride) {
|
||||
if (buf_size < 769) {
|
||||
av_log(avctx, AV_LOG_ERROR, "File is too short\n");
|
||||
ret = buf_size;
|
||||
goto end;
|
||||
}
|
||||
|
||||
for (y = 0; y < h; y++, ptr += stride) {
|
||||
buf = pcx_rle_decode(buf, buf_end,
|
||||
scanline, bytes_per_scanline, compressed);
|
||||
memcpy(ptr, scanline, w);
|
||||
@@ -195,6 +201,7 @@ static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
|
||||
}
|
||||
if (*buf++ != 12) {
|
||||
av_log(avctx, AV_LOG_ERROR, "expected palette after image data\n");
|
||||
ret = buf_size;
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@@ -378,6 +378,10 @@ static int png_decode_idat(PNGDecContext *s, int length)
|
||||
s->zstream.avail_out = s->crow_size;
|
||||
s->zstream.next_out = s->crow_buf;
|
||||
}
|
||||
if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
|
||||
av_log(NULL, AV_LOG_WARNING, "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1004,7 +1004,7 @@ void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, const int chrom
|
||||
if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
|
||||
if (bit_depth == 8) {
|
||||
c->h264_idct_add = ff_h264_idct_add_altivec;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_idct_add8 = ff_h264_idct_add8_altivec;
|
||||
c->h264_idct_add16 = ff_h264_idct_add16_altivec;
|
||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
|
||||
|
@@ -78,8 +78,8 @@ typedef struct ThreadContext {
|
||||
pthread_cond_t last_job_cond;
|
||||
pthread_cond_t current_job_cond;
|
||||
pthread_mutex_t current_job_lock;
|
||||
unsigned current_execute;
|
||||
int current_job;
|
||||
unsigned int current_execute;
|
||||
int done;
|
||||
} ThreadContext;
|
||||
|
||||
@@ -203,8 +203,8 @@ static void* attribute_align_arg worker(void *v)
|
||||
{
|
||||
AVCodecContext *avctx = v;
|
||||
ThreadContext *c = avctx->thread_opaque;
|
||||
unsigned last_execute = 0;
|
||||
int our_job = c->job_count;
|
||||
int last_execute = 0;
|
||||
int thread_count = avctx->thread_count;
|
||||
int self_id;
|
||||
|
||||
|
@@ -1250,6 +1250,11 @@ static void qdm2_decode_super_block (QDM2Context *q)
|
||||
for (i = 0; packet_bytes > 0; i++) {
|
||||
int j;
|
||||
|
||||
if (i >= FF_ARRAY_ELEMS(q->sub_packet_list_A)) {
|
||||
SAMPLES_NEEDED_2("too many packet bytes");
|
||||
return;
|
||||
}
|
||||
|
||||
q->sub_packet_list_A[i].next = NULL;
|
||||
|
||||
if (i > 0) {
|
||||
|
@@ -203,7 +203,7 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size,
|
||||
filled = 0;
|
||||
dst -= stride;
|
||||
height--;
|
||||
if(height < 0)
|
||||
if (height < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -216,7 +216,7 @@ static void qpeg_decode_inter(const uint8_t *src, uint8_t *dst, int size,
|
||||
filled = 0;
|
||||
dst -= stride;
|
||||
height--;
|
||||
if(height < 0)
|
||||
if (height < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@@ -173,6 +173,13 @@ static av_cold int roq_decode_init(AVCodecContext *avctx)
|
||||
RoqContext *s = avctx->priv_data;
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
if (avctx->width % 16 || avctx->height % 16) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"Dimensions must be a multiple of 16\n");
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
|
||||
s->width = avctx->width;
|
||||
s->height = avctx->height;
|
||||
avcodec_get_frame_defaults(&s->frames[0]);
|
||||
|
@@ -38,6 +38,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/intreadwrite.h"
|
||||
#include "avcodec.h"
|
||||
|
||||
@@ -125,6 +126,8 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
}
|
||||
}
|
||||
|
||||
n_blocks = FFMIN(n_blocks, total_blocks);
|
||||
|
||||
switch (opcode & 0xe0) {
|
||||
|
||||
/* Skip blocks */
|
||||
@@ -202,7 +205,7 @@ static void rpza_decode_stream(RpzaContext *s)
|
||||
|
||||
/* Fill block with 16 colors */
|
||||
case 0x00:
|
||||
if (s->size - stream_ptr < 16)
|
||||
if (s->size - stream_ptr < 30)
|
||||
return;
|
||||
ADVANCE_BLOCK();
|
||||
block_ptr = row_ptr + pixel_ptr;
|
||||
|
@@ -442,12 +442,15 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
static int done=0;
|
||||
int major_ver, minor_ver, micro_ver;
|
||||
int major_ver, minor_ver, micro_ver, ret;
|
||||
|
||||
if (avctx->extradata_size < 8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Extradata is too small.\n");
|
||||
return -1;
|
||||
}
|
||||
if ((ret = av_image_check_size(avctx->coded_width,
|
||||
avctx->coded_height, 0, avctx)) < 0)
|
||||
return ret;
|
||||
|
||||
MPV_decode_defaults(s);
|
||||
|
||||
|
@@ -249,9 +249,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
|
||||
static av_cold int rv30_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
r->rv30 = 1;
|
||||
ff_rv34_decode_init(avctx);
|
||||
if ((ret = ff_rv34_decode_init(avctx)) < 0)
|
||||
return ret;
|
||||
if(avctx->extradata_size < 2){
|
||||
av_log(avctx, AV_LOG_ERROR, "Extradata is too small.\n");
|
||||
return -1;
|
||||
|
@@ -544,9 +544,11 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
|
||||
static av_cold int rv40_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
RV34DecContext *r = avctx->priv_data;
|
||||
int ret;
|
||||
|
||||
r->rv30 = 0;
|
||||
ff_rv34_decode_init(avctx);
|
||||
if ((ret = ff_rv34_decode_init(avctx)) < 0)
|
||||
return ret;
|
||||
if(!aic_top_vlc.bits)
|
||||
rv40_init_tables();
|
||||
r->parse_slice_header = rv40_parse_slice_header;
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include "sgi.h"
|
||||
|
||||
typedef struct SgiState {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame picture;
|
||||
unsigned int width;
|
||||
unsigned int height;
|
||||
@@ -39,12 +40,12 @@ typedef struct SgiState {
|
||||
* Expand an RLE row into a channel.
|
||||
* @param s the current image state
|
||||
* @param out_buf Points to one line after the output buffer.
|
||||
* @param out_end end of line in output buffer
|
||||
* @param len length of out_buf in bytes
|
||||
* @param pixelstride pixel stride of input buffer
|
||||
* @return size of output in bytes, -1 if buffer overflows
|
||||
*/
|
||||
static int expand_rle_row(SgiState *s, uint8_t *out_buf,
|
||||
uint8_t *out_end, int pixelstride)
|
||||
int len, int pixelstride)
|
||||
{
|
||||
unsigned char pixel, count;
|
||||
unsigned char *orig = out_buf;
|
||||
@@ -58,7 +59,10 @@ static int expand_rle_row(SgiState *s, uint8_t *out_buf,
|
||||
}
|
||||
|
||||
/* Check for buffer overflow. */
|
||||
if(out_buf + pixelstride * count >= out_end) return -1;
|
||||
if (pixelstride * (count - 1) >= len) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid pixel count.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
if (pixel & 0x80) {
|
||||
while (count--) {
|
||||
@@ -101,7 +105,7 @@ static int read_rle_sgi(uint8_t *out_buf, SgiState *s)
|
||||
dest_row -= s->linesize;
|
||||
start_offset = bytestream2_get_be32(&g_table);
|
||||
bytestream2_seek(&s->g, start_offset, SEEK_SET);
|
||||
if (expand_rle_row(s, dest_row + z, dest_row + FFABS(s->linesize),
|
||||
if (expand_rle_row(s, dest_row + z, FFABS(s->linesize) - z,
|
||||
s->depth) != s->width) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
@@ -243,6 +247,8 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
static av_cold int sgi_init(AVCodecContext *avctx){
|
||||
SgiState *s = avctx->priv_data;
|
||||
|
||||
s->avctx = avctx;
|
||||
|
||||
avcodec_get_frame_defaults(&s->picture);
|
||||
avctx->coded_frame = &s->picture;
|
||||
|
||||
|
@@ -205,34 +205,38 @@ static int decode_wave_header(AVCodecContext *avctx, const uint8_t *header,
|
||||
{
|
||||
int len;
|
||||
short wave_format;
|
||||
const uint8_t *end= header + header_size;
|
||||
GetByteContext gb;
|
||||
|
||||
if (bytestream_get_le32(&header) != MKTAG('R', 'I', 'F', 'F')) {
|
||||
bytestream2_init(&gb, header, header_size);
|
||||
|
||||
if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
|
||||
av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
header += 4; /* chunk size */
|
||||
bytestream2_skip(&gb, 4); /* chunk size */
|
||||
|
||||
if (bytestream_get_le32(&header) != MKTAG('W', 'A', 'V', 'E')) {
|
||||
if (bytestream2_get_le32(&gb) != MKTAG('W', 'A', 'V', 'E')) {
|
||||
av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
while (bytestream_get_le32(&header) != MKTAG('f', 'm', 't', ' ')) {
|
||||
len = bytestream_get_le32(&header);
|
||||
if (len < 0 || end - header - 8 < len)
|
||||
while (bytestream2_get_le32(&gb) != MKTAG('f', 'm', 't', ' ')) {
|
||||
len = bytestream2_get_le32(&gb);
|
||||
bytestream2_skip(&gb, len);
|
||||
if (bytestream2_get_bytes_left(&gb) < 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "no fmt chunk found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
header += len;
|
||||
}
|
||||
}
|
||||
len = bytestream_get_le32(&header);
|
||||
len = bytestream2_get_le32(&gb);
|
||||
|
||||
if (len < 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "fmt chunk was too short\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
wave_format = bytestream_get_le16(&header);
|
||||
wave_format = bytestream2_get_le16(&gb);
|
||||
|
||||
switch (wave_format) {
|
||||
case WAVE_FORMAT_PCM:
|
||||
@@ -242,11 +246,11 @@ static int decode_wave_header(AVCodecContext *avctx, const uint8_t *header,
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
|
||||
header += 2; // skip channels (already got from shorten header)
|
||||
avctx->sample_rate = bytestream_get_le32(&header);
|
||||
header += 4; // skip bit rate (represents original uncompressed bit rate)
|
||||
header += 2; // skip block align (not needed)
|
||||
avctx->bits_per_coded_sample = bytestream_get_le16(&header);
|
||||
bytestream2_skip(&gb, 2); // skip channels (already got from shorten header)
|
||||
avctx->sample_rate = bytestream2_get_le32(&gb);
|
||||
bytestream2_skip(&gb, 4); // skip bit rate (represents original uncompressed bit rate)
|
||||
bytestream2_skip(&gb, 2); // skip block align (not needed)
|
||||
avctx->bits_per_coded_sample = bytestream2_get_le16(&gb);
|
||||
|
||||
if (avctx->bits_per_coded_sample != 16) {
|
||||
av_log(avctx, AV_LOG_ERROR, "unsupported number of bits per sample\n");
|
||||
@@ -427,7 +431,7 @@ static int shorten_decode_frame(AVCodecContext *avctx, void *data,
|
||||
void *tmp_ptr;
|
||||
s->max_framesize = 1024; // should hopefully be enough for the first header
|
||||
tmp_ptr = av_fast_realloc(s->bitstream, &s->allocated_bitstream_size,
|
||||
s->max_framesize);
|
||||
s->max_framesize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!tmp_ptr) {
|
||||
av_log(avctx, AV_LOG_ERROR, "error allocating bitstream buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
|
@@ -263,10 +263,11 @@ static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int
|
||||
if(ctx.last[0] == -1) ctx.last[0] = huff.current++;
|
||||
if(ctx.last[1] == -1) ctx.last[1] = huff.current++;
|
||||
if(ctx.last[2] == -1) ctx.last[2] = huff.current++;
|
||||
if(huff.current > huff.length){
|
||||
ctx.last[0] = ctx.last[1] = ctx.last[2] = 1;
|
||||
av_log(smk->avctx, AV_LOG_ERROR, "bigtree damaged\n");
|
||||
return -1;
|
||||
if (ctx.last[0] >= huff.length ||
|
||||
ctx.last[1] >= huff.length ||
|
||||
ctx.last[2] >= huff.length) {
|
||||
av_log(smk->avctx, AV_LOG_ERROR, "Huffman codes out of range\n");
|
||||
err = AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
*recodes = huff.values;
|
||||
|
@@ -311,7 +311,8 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
||||
if(!sliced && !offset_dst)
|
||||
dst -= src_x;
|
||||
src_x=0;
|
||||
}else if(src_x + b_w > w){
|
||||
}
|
||||
if(src_x + b_w > w){
|
||||
b_w = w - src_x;
|
||||
}
|
||||
if(src_y<0){
|
||||
@@ -320,7 +321,8 @@ static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer
|
||||
if(!sliced && !offset_dst)
|
||||
dst -= src_y*dst_stride;
|
||||
src_y=0;
|
||||
}else if(src_y + b_h> h){
|
||||
}
|
||||
if(src_y + b_h> h){
|
||||
b_h = h - src_y;
|
||||
}
|
||||
|
||||
|
@@ -613,9 +613,9 @@ static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
|
||||
dir = i_mb_type_info[mb_type - 8].pred_mode;
|
||||
dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
|
||||
|
||||
if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1){
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
|
||||
return -1;
|
||||
if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
|
||||
return h->intra16x16_pred_mode;
|
||||
}
|
||||
|
||||
cbp = i_mb_type_info[mb_type - 8].cbp;
|
||||
@@ -905,7 +905,8 @@ static av_cold int svq3_decode_init(AVCodecContext *avctx)
|
||||
int offset = (get_bits_count(&gb)+7)>>3;
|
||||
uint8_t *buf;
|
||||
|
||||
if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
|
||||
if (watermark_height > 0 &&
|
||||
(uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
|
||||
return -1;
|
||||
|
||||
buf = av_malloc(buf_len);
|
||||
|
@@ -25,6 +25,7 @@
|
||||
*/
|
||||
|
||||
#include "avcodec.h"
|
||||
#include "bytestream.h"
|
||||
#if CONFIG_ZLIB
|
||||
#include <zlib.h>
|
||||
#endif
|
||||
@@ -38,6 +39,7 @@
|
||||
typedef struct TiffContext {
|
||||
AVCodecContext *avctx;
|
||||
AVFrame picture;
|
||||
GetByteContext gb;
|
||||
|
||||
int width, height;
|
||||
unsigned int bpp, bppcount;
|
||||
@@ -52,30 +54,27 @@ typedef struct TiffContext {
|
||||
|
||||
int strips, rps, sstype;
|
||||
int sot;
|
||||
const uint8_t* stripdata;
|
||||
const uint8_t* stripsizes;
|
||||
int stripsize, stripoff;
|
||||
int stripsizesoff, stripsize, stripoff, strippos;
|
||||
LZWState *lzw;
|
||||
} TiffContext;
|
||||
|
||||
static unsigned tget_short(const uint8_t **p, int le) {
|
||||
unsigned v = le ? AV_RL16(*p) : AV_RB16(*p);
|
||||
*p += 2;
|
||||
return v;
|
||||
static unsigned tget_short(GetByteContext *gb, int le)
|
||||
{
|
||||
return le ? bytestream2_get_le16(gb) : bytestream2_get_be16(gb);
|
||||
}
|
||||
|
||||
static unsigned tget_long(const uint8_t **p, int le) {
|
||||
unsigned v = le ? AV_RL32(*p) : AV_RB32(*p);
|
||||
*p += 4;
|
||||
return v;
|
||||
static unsigned tget_long(GetByteContext *gb, int le)
|
||||
{
|
||||
return le ? bytestream2_get_le32(gb) : bytestream2_get_be32(gb);
|
||||
}
|
||||
|
||||
static unsigned tget(const uint8_t **p, int type, int le) {
|
||||
static unsigned tget(GetByteContext *gb, int type, int le)
|
||||
{
|
||||
switch(type){
|
||||
case TIFF_BYTE : return *(*p)++;
|
||||
case TIFF_SHORT: return tget_short(p, le);
|
||||
case TIFF_LONG : return tget_long (p, le);
|
||||
default : return UINT_MAX;
|
||||
case TIFF_BYTE: return bytestream2_get_byte(gb);
|
||||
case TIFF_SHORT: return tget_short(gb, le);
|
||||
case TIFF_LONG: return tget_long(gb, le);
|
||||
default: return UINT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,8 +142,8 @@ static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst,
|
||||
}
|
||||
|
||||
static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uint8_t *src, int size, int lines){
|
||||
PutByteContext pb;
|
||||
int c, line, pixels, code;
|
||||
const uint8_t *ssrc = src;
|
||||
int width = ((s->width * s->bpp) + 7) >> 3;
|
||||
#if CONFIG_ZLIB
|
||||
uint8_t *zbuf; unsigned long outlen;
|
||||
@@ -178,6 +177,16 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
|
||||
return -1;
|
||||
}
|
||||
for (line = 0; line < lines; line++) {
|
||||
pixels = ff_lzw_decode(s->lzw, dst, width);
|
||||
if (pixels < width) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
|
||||
pixels, width);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
dst += stride;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if(s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3 || s->compr == TIFF_G4){
|
||||
int i, ret = 0;
|
||||
@@ -214,65 +223,40 @@ static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, const uin
|
||||
av_free(src2);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bytestream2_init(&s->gb, src, size);
|
||||
bytestream2_init_writer(&pb, dst, stride * lines);
|
||||
|
||||
for(line = 0; line < lines; line++){
|
||||
if(src - ssrc > size){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
|
||||
return -1;
|
||||
}
|
||||
if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
|
||||
break;
|
||||
bytestream2_seek_p(&pb, stride * line, SEEK_SET);
|
||||
switch(s->compr){
|
||||
case TIFF_RAW:
|
||||
if (ssrc + size - src < width)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (!s->fill_order) {
|
||||
horizontal_fill(s->bpp * (s->avctx->pix_fmt == PIX_FMT_PAL8),
|
||||
dst, 1, src, 0, width, 0);
|
||||
bytestream2_copy_buffer(&pb, &s->gb, width);
|
||||
} else {
|
||||
int i;
|
||||
for (i = 0; i < width; i++)
|
||||
dst[i] = av_reverse[src[i]];
|
||||
bytestream2_put_byte(&pb, av_reverse[bytestream2_get_byte(&s->gb)]);
|
||||
}
|
||||
src += width;
|
||||
break;
|
||||
case TIFF_PACKBITS:
|
||||
for(pixels = 0; pixels < width;){
|
||||
if (ssrc + size - src < 2)
|
||||
return AVERROR_INVALIDDATA;
|
||||
code = (int8_t)*src++;
|
||||
code = (int8_t)bytestream2_get_byte(&s->gb);
|
||||
if(code >= 0){
|
||||
code++;
|
||||
if (pixels + code > width ||
|
||||
ssrc + size - src < code) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Copy went out of bounds\n");
|
||||
return -1;
|
||||
}
|
||||
horizontal_fill(s->bpp * (s->avctx->pix_fmt == PIX_FMT_PAL8),
|
||||
dst, 1, src, 0, code, pixels);
|
||||
src += code;
|
||||
bytestream2_copy_buffer(&pb, &s->gb, code);
|
||||
pixels += code;
|
||||
}else if(code != -128){ // -127..-1
|
||||
code = (-code) + 1;
|
||||
if(pixels + code > width){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Run went out of bounds\n");
|
||||
return -1;
|
||||
}
|
||||
c = *src++;
|
||||
horizontal_fill(s->bpp * (s->avctx->pix_fmt == PIX_FMT_PAL8),
|
||||
dst, 0, NULL, c, code, pixels);
|
||||
c = bytestream2_get_byte(&s->gb);
|
||||
bytestream2_set_buffer(&pb, c, code);
|
||||
pixels += code;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case TIFF_LZW:
|
||||
pixels = ff_lzw_decode(s->lzw, dst, width);
|
||||
if(pixels < width){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n", pixels, width);
|
||||
return -1;
|
||||
}
|
||||
if (s->bpp < 8 && s->avctx->pix_fmt == PIX_FMT_PAL8)
|
||||
horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
|
||||
break;
|
||||
}
|
||||
dst += stride;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -341,19 +325,19 @@ static int init_image(TiffContext *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf)
|
||||
static int tiff_decode_tag(TiffContext *s)
|
||||
{
|
||||
unsigned tag, type, count, off, value = 0;
|
||||
int i, j;
|
||||
int i, start;
|
||||
uint32_t *pal;
|
||||
const uint8_t *rp, *gp, *bp;
|
||||
|
||||
if (end_buf - buf < 12)
|
||||
if (bytestream2_get_bytes_left(&s->gb) < 12)
|
||||
return -1;
|
||||
tag = tget_short(&buf, s->le);
|
||||
type = tget_short(&buf, s->le);
|
||||
count = tget_long(&buf, s->le);
|
||||
off = tget_long(&buf, s->le);
|
||||
tag = tget_short(&s->gb, s->le);
|
||||
type = tget_short(&s->gb, s->le);
|
||||
count = tget_long(&s->gb, s->le);
|
||||
off = tget_long(&s->gb, s->le);
|
||||
start = bytestream2_tell(&s->gb);
|
||||
|
||||
if (type == 0 || type >= FF_ARRAY_ELEMS(type_sizes)) {
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Unknown tiff type (%u) encountered\n", type);
|
||||
@@ -364,34 +348,26 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
switch(type){
|
||||
case TIFF_BYTE:
|
||||
case TIFF_SHORT:
|
||||
buf -= 4;
|
||||
value = tget(&buf, type, s->le);
|
||||
buf = NULL;
|
||||
bytestream2_seek(&s->gb, -4, SEEK_CUR);
|
||||
value = tget(&s->gb, type, s->le);
|
||||
break;
|
||||
case TIFF_LONG:
|
||||
value = off;
|
||||
buf = NULL;
|
||||
break;
|
||||
case TIFF_STRING:
|
||||
if(count <= 4){
|
||||
buf -= 4;
|
||||
bytestream2_seek(&s->gb, -4, SEEK_CUR);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
value = UINT_MAX;
|
||||
buf = start + off;
|
||||
bytestream2_seek(&s->gb, off, SEEK_SET);
|
||||
}
|
||||
} else {
|
||||
if (count <= 4 && type_sizes[type] * count <= 4) {
|
||||
buf -= 4;
|
||||
} else {
|
||||
buf = start + off;
|
||||
}
|
||||
}
|
||||
|
||||
if(buf && (buf < start || buf > end_buf)){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
|
||||
return -1;
|
||||
if (count <= 4 && type_sizes[type] * count <= 4)
|
||||
bytestream2_seek(&s->gb, -4, SEEK_CUR);
|
||||
else
|
||||
bytestream2_seek(&s->gb, off, SEEK_SET);
|
||||
}
|
||||
|
||||
switch(tag){
|
||||
@@ -416,7 +392,8 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
case TIFF_SHORT:
|
||||
case TIFF_LONG:
|
||||
s->bpp = 0;
|
||||
for(i = 0; i < count && buf < end_buf; i++) s->bpp += tget(&buf, type, s->le);
|
||||
for (i = 0; i < count; i++)
|
||||
s->bpp += tget(&s->gb, type, s->le);
|
||||
break;
|
||||
default:
|
||||
s->bpp = -1;
|
||||
@@ -474,32 +451,24 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
break;
|
||||
case TIFF_STRIP_OFFS:
|
||||
if(count == 1){
|
||||
s->stripdata = NULL;
|
||||
s->strippos = 0;
|
||||
s->stripoff = value;
|
||||
}else
|
||||
s->stripdata = start + off;
|
||||
s->strippos = off;
|
||||
s->strips = count;
|
||||
if(s->strips == 1) s->rps = s->height;
|
||||
s->sot = type;
|
||||
if(s->stripdata > end_buf){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case TIFF_STRIP_SIZE:
|
||||
if(count == 1){
|
||||
s->stripsizes = NULL;
|
||||
s->stripsize = value;
|
||||
s->strips = 1;
|
||||
s->stripsizesoff = 0;
|
||||
s->stripsize = value;
|
||||
s->strips = 1;
|
||||
}else{
|
||||
s->stripsizes = start + off;
|
||||
s->stripsizesoff = off;
|
||||
}
|
||||
s->strips = count;
|
||||
s->sstype = type;
|
||||
if(s->stripsizes > end_buf){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
case TIFF_TILE_BYTE_COUNTS:
|
||||
case TIFF_TILE_LENGTH:
|
||||
@@ -534,24 +503,27 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
}
|
||||
s->fill_order = value - 1;
|
||||
break;
|
||||
case TIFF_PAL:
|
||||
case TIFF_PAL: {
|
||||
GetByteContext pal_gb[3];
|
||||
pal = (uint32_t *) s->palette;
|
||||
off = type_sizes[type];
|
||||
if (count / 3 > 256 || end_buf - buf < count / 3 * off * 3)
|
||||
if (count / 3 > 256 ||
|
||||
bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
|
||||
return -1;
|
||||
rp = buf;
|
||||
gp = buf + count / 3 * off;
|
||||
bp = buf + count / 3 * off * 2;
|
||||
pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
|
||||
bytestream2_skip(&pal_gb[1], count / 3 * off);
|
||||
bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
|
||||
off = (type_sizes[type] - 1) << 3;
|
||||
for(i = 0; i < count / 3; i++){
|
||||
j = 0xff << 24;
|
||||
j |= (tget(&rp, type, s->le) >> off) << 16;
|
||||
j |= (tget(&gp, type, s->le) >> off) << 8;
|
||||
j |= tget(&bp, type, s->le) >> off;
|
||||
pal[i] = j;
|
||||
uint32_t p = 0xFF000000;
|
||||
p |= (tget(&pal_gb[0], type, s->le) >> off) << 16;
|
||||
p |= (tget(&pal_gb[1], type, s->le) >> off) << 8;
|
||||
p |= tget(&pal_gb[2], type, s->le) >> off;
|
||||
pal[i] = p;
|
||||
}
|
||||
s->palette_is_set = 1;
|
||||
break;
|
||||
}
|
||||
case TIFF_PLANAR:
|
||||
if(value == 2){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
|
||||
@@ -569,6 +541,7 @@ static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *
|
||||
default:
|
||||
av_log(s->avctx, AV_LOG_DEBUG, "Unknown or unsupported tag %d/0X%0X\n", tag, tag);
|
||||
}
|
||||
bytestream2_seek(&s->gb, start, SEEK_SET);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -576,23 +549,24 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
void *data, int *data_size,
|
||||
AVPacket *avpkt)
|
||||
{
|
||||
const uint8_t *buf = avpkt->data;
|
||||
int buf_size = avpkt->size;
|
||||
TiffContext * const s = avctx->priv_data;
|
||||
AVFrame *picture = data;
|
||||
AVFrame * const p= (AVFrame*)&s->picture;
|
||||
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
|
||||
unsigned off;
|
||||
int id, le, ret;
|
||||
int i, j, entries;
|
||||
int stride;
|
||||
unsigned soff, ssize;
|
||||
uint8_t *dst;
|
||||
GetByteContext stripsizes;
|
||||
GetByteContext stripdata;
|
||||
|
||||
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
|
||||
|
||||
//parse image header
|
||||
if (end_buf - buf < 8)
|
||||
if (avpkt->size < 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
id = AV_RL16(buf); buf += 2;
|
||||
id = bytestream2_get_le16(&s->gb);
|
||||
if(id == 0x4949) le = 1;
|
||||
else if(id == 0x4D4D) le = 0;
|
||||
else{
|
||||
@@ -605,26 +579,25 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
s->fill_order = 0;
|
||||
// As TIFF 6.0 specification puts it "An arbitrary but carefully chosen number
|
||||
// that further identifies the file as a TIFF file"
|
||||
if(tget_short(&buf, le) != 42){
|
||||
if (tget_short(&s->gb, le) != 42) {
|
||||
av_log(avctx, AV_LOG_ERROR, "The answer to life, universe and everything is not correct!\n");
|
||||
return -1;
|
||||
}
|
||||
// Reset these pointers so we can tell if they were set this frame
|
||||
s->stripsizes = s->stripdata = NULL;
|
||||
// Reset these offsets so we can tell if they were set this frame
|
||||
s->stripsizesoff = s->strippos = 0;
|
||||
/* parse image file directory */
|
||||
off = tget_long(&buf, le);
|
||||
if (off >= UINT_MAX - 14 || end_buf - orig_buf < off + 14) {
|
||||
off = tget_long(&s->gb, le);
|
||||
if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
|
||||
av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
buf = orig_buf + off;
|
||||
entries = tget_short(&buf, le);
|
||||
bytestream2_seek(&s->gb, off, SEEK_SET);
|
||||
entries = tget_short(&s->gb, le);
|
||||
for(i = 0; i < entries; i++){
|
||||
if(tiff_decode_tag(s, orig_buf, buf, end_buf) < 0)
|
||||
if (tiff_decode_tag(s) < 0)
|
||||
return -1;
|
||||
buf += 12;
|
||||
}
|
||||
if(!s->stripdata && !s->stripoff){
|
||||
if (!s->strippos && !s->stripoff) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -634,30 +607,41 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
|
||||
if(s->strips == 1 && !s->stripsize){
|
||||
av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
|
||||
s->stripsize = buf_size - s->stripoff;
|
||||
s->stripsize = avpkt->size - s->stripoff;
|
||||
}
|
||||
stride = p->linesize[0];
|
||||
dst = p->data[0];
|
||||
|
||||
if (s->stripsizesoff) {
|
||||
if (s->stripsizesoff >= avpkt->size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
|
||||
avpkt->size - s->stripsizesoff);
|
||||
}
|
||||
if (s->strippos) {
|
||||
if (s->strippos >= avpkt->size)
|
||||
return AVERROR_INVALIDDATA;
|
||||
bytestream2_init(&stripdata, avpkt->data + s->strippos,
|
||||
avpkt->size - s->strippos);
|
||||
}
|
||||
|
||||
for(i = 0; i < s->height; i += s->rps){
|
||||
if(s->stripsizes) {
|
||||
if (s->stripsizes >= end_buf)
|
||||
return AVERROR_INVALIDDATA;
|
||||
ssize = tget(&s->stripsizes, s->sstype, s->le);
|
||||
} else
|
||||
if (s->stripsizesoff)
|
||||
ssize = tget(&stripsizes, s->sstype, le);
|
||||
else
|
||||
ssize = s->stripsize;
|
||||
|
||||
if(s->stripdata){
|
||||
if (s->stripdata >= end_buf)
|
||||
return AVERROR_INVALIDDATA;
|
||||
soff = tget(&s->stripdata, s->sot, s->le);
|
||||
}else
|
||||
if (s->strippos)
|
||||
soff = tget(&stripdata, s->sot, le);
|
||||
else
|
||||
soff = s->stripoff;
|
||||
|
||||
if (soff > buf_size || ssize > buf_size - soff) {
|
||||
if (soff > avpkt->size || ssize > avpkt->size - soff) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
|
||||
return -1;
|
||||
}
|
||||
if(tiff_unpack_strip(s, dst, stride, orig_buf + soff, ssize, FFMIN(s->rps, s->height - i)) < 0)
|
||||
if (tiff_unpack_strip(s, dst, stride, avpkt->data + soff, ssize,
|
||||
FFMIN(s->rps, s->height - i)) < 0)
|
||||
break;
|
||||
dst += s->rps * stride;
|
||||
}
|
||||
@@ -699,7 +683,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
*picture= *(AVFrame*)&s->picture;
|
||||
*data_size = sizeof(AVPicture);
|
||||
|
||||
return buf_size;
|
||||
return avpkt->size;
|
||||
}
|
||||
|
||||
static av_cold int tiff_init(AVCodecContext *avctx){
|
||||
|
@@ -320,6 +320,11 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (header.header_size + 1 > s->size) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "Input packet too small.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* unscramble the header bytes with a XOR operation */
|
||||
memset(header_buffer, 0, 128);
|
||||
for (i = 1; i < header.header_size; i++)
|
||||
|
@@ -996,7 +996,7 @@ static void linear_perm(int16_t *out, int16_t *in, int n_blocks, int size)
|
||||
out[i] = block_size * (in[i] % n_blocks) + in[i] / n_blocks;
|
||||
}
|
||||
|
||||
static av_cold void construct_perm_table(TwinContext *tctx,enum FrameType ftype)
|
||||
static av_cold void construct_perm_table(TwinContext *tctx, int ftype)
|
||||
{
|
||||
int block_size;
|
||||
const ModeTab *mtab = tctx->mtab;
|
||||
@@ -1137,6 +1137,10 @@ static av_cold int twin_decode_init(AVCodecContext *avctx)
|
||||
return -1;
|
||||
}
|
||||
ibps = avctx->bit_rate / (1000 * avctx->channels);
|
||||
if (ibps < 8 || ibps > 48) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Bad bitrate per channel value %d\n", ibps);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
switch ((isampf << 8) + ibps) {
|
||||
case (8 <<8) + 8: tctx->mtab = &mode_08_08; break;
|
||||
|
@@ -578,6 +578,8 @@ int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
|
||||
{
|
||||
int pqindex, lowquant, status;
|
||||
|
||||
v->field_mode = 0;
|
||||
v->fcm = 0;
|
||||
if (v->finterpflag)
|
||||
v->interpfrm = get_bits1(gb);
|
||||
skip_bits(gb, 2); //framecnt unused
|
||||
@@ -824,7 +826,7 @@ int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
|
||||
int mbmodetab, imvtab, icbptab, twomvbptab, fourmvbptab; /* useful only for debugging */
|
||||
int scale, shift, i; /* for initializing LUT for intensity compensation */
|
||||
|
||||
v->numref=0;
|
||||
v->numref = 0;
|
||||
v->p_frame_skipped = 0;
|
||||
if (v->second_field) {
|
||||
if(v->fcm!=2 || v->field_mode!=1)
|
||||
|
@@ -4741,6 +4741,9 @@ static void vc1_decode_skip_blocks(VC1Context *v)
|
||||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
|
||||
if (!v->s.last_picture.f.data[0])
|
||||
return;
|
||||
|
||||
ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
|
||||
s->first_slice_line = 1;
|
||||
for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
|
||||
@@ -5126,8 +5129,19 @@ static av_cold int vc1_decode_init_alloc_tables(VC1Context *v)
|
||||
|
||||
if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
|
||||
!v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
|
||||
!v->mb_type_base)
|
||||
return -1;
|
||||
!v->mb_type_base) {
|
||||
av_freep(&v->mv_type_mb_plane);
|
||||
av_freep(&v->direct_mb_plane);
|
||||
av_freep(&v->acpred_plane);
|
||||
av_freep(&v->over_flags_plane);
|
||||
av_freep(&v->block);
|
||||
av_freep(&v->cbp_base);
|
||||
av_freep(&v->ttblk_base);
|
||||
av_freep(&v->is_intra_base);
|
||||
av_freep(&v->luma_mv_base);
|
||||
av_freep(&v->mb_type_base);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -5475,8 +5489,12 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
if (!s->context_initialized) {
|
||||
if (ff_msmpeg4_decode_init(avctx) < 0 || vc1_decode_init_alloc_tables(v) < 0)
|
||||
if (ff_msmpeg4_decode_init(avctx) < 0)
|
||||
return -1;
|
||||
if (vc1_decode_init_alloc_tables(v) < 0) {
|
||||
MPV_common_end(s);
|
||||
return -1;
|
||||
}
|
||||
|
||||
s->low_delay = !avctx->has_b_frames || v->res_sprite;
|
||||
|
||||
@@ -5564,6 +5582,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if (avctx->hwaccel->end_frame(avctx) < 0)
|
||||
goto err;
|
||||
} else {
|
||||
int header_ret = 0;
|
||||
|
||||
ff_er_frame_start(s);
|
||||
|
||||
v->bits = buf_size * 8;
|
||||
@@ -5608,13 +5628,21 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
if (i) {
|
||||
v->pic_header_flag = 0;
|
||||
if (v->field_mode && i == n_slices1 + 2)
|
||||
vc1_parse_frame_header_adv(v, &s->gb);
|
||||
else if (get_bits1(&s->gb)) {
|
||||
if (v->field_mode && i == n_slices1 + 2) {
|
||||
if ((header_ret = vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
|
||||
av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
|
||||
continue;
|
||||
}
|
||||
} else if (get_bits1(&s->gb)) {
|
||||
v->pic_header_flag = 1;
|
||||
vc1_parse_frame_header_adv(v, &s->gb);
|
||||
if ((header_ret = vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
|
||||
av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (header_ret < 0)
|
||||
continue;
|
||||
s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
|
||||
if (!v->field_mode || v->second_field)
|
||||
s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
|
||||
|
@@ -275,6 +275,11 @@ static int decode_hextile(VmncContext *c, uint8_t* dst, const uint8_t* src, int
|
||||
}
|
||||
xy = *src++;
|
||||
wh = *src++;
|
||||
if ( (xy >> 4) + (wh >> 4) + 1 > w - i
|
||||
|| (xy & 0xF) + (wh & 0xF)+1 > h - j) {
|
||||
av_log(c->avctx, AV_LOG_ERROR, "Rectangle outside picture\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
paint_rect(dst2, xy >> 4, xy & 0xF, (wh>>4)+1, (wh & 0xF)+1, fg, bpp, stride);
|
||||
}
|
||||
}
|
||||
|
@@ -2150,6 +2150,10 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
|
||||
fps.num = get_bits_long(gb, 32);
|
||||
fps.den = get_bits_long(gb, 32);
|
||||
if (fps.num && fps.den) {
|
||||
if (fps.num < 0 || fps.den < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
av_reduce(&avctx->time_base.num, &avctx->time_base.den,
|
||||
fps.den, fps.num, 1<<30);
|
||||
}
|
||||
|
@@ -534,6 +534,12 @@ static int vqa_decode_chunk(VqaContext *s)
|
||||
bytestream2_seek(&s->gb, cbp0_chunk, SEEK_SET);
|
||||
chunk_size = bytestream2_get_be32(&s->gb);
|
||||
|
||||
if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cbp0 chunk too large (%u bytes)\n",
|
||||
chunk_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* accumulate partial codebook */
|
||||
bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index],
|
||||
chunk_size);
|
||||
@@ -557,6 +563,12 @@ static int vqa_decode_chunk(VqaContext *s)
|
||||
bytestream2_seek(&s->gb, cbpz_chunk, SEEK_SET);
|
||||
chunk_size = bytestream2_get_be32(&s->gb);
|
||||
|
||||
if (chunk_size > MAX_CODEBOOK_SIZE - s->next_codebook_buffer_index) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "cbpz chunk too large (%u bytes)\n",
|
||||
chunk_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* accumulate partial codebook */
|
||||
bytestream2_get_buffer(&s->gb, &s->next_codebook_buffer[s->next_codebook_buffer_index],
|
||||
chunk_size);
|
||||
|
@@ -424,9 +424,9 @@ int ff_wma_end(AVCodecContext *avctx)
|
||||
}
|
||||
for (i = 0; i < 2; i++) {
|
||||
ff_free_vlc(&s->coef_vlc[i]);
|
||||
av_free(s->run_table[i]);
|
||||
av_free(s->level_table[i]);
|
||||
av_free(s->int_table[i]);
|
||||
av_freep(&s->run_table[i]);
|
||||
av_freep(&s->level_table[i]);
|
||||
av_freep(&s->int_table[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -238,11 +238,11 @@ typedef struct WmallDecodeCtx {
|
||||
|
||||
int8_t mclms_order;
|
||||
int8_t mclms_scaling;
|
||||
int16_t mclms_coeffs[128];
|
||||
int16_t mclms_coeffs_cur[4];
|
||||
int16_t mclms_prevvalues[64]; // FIXME: should be 32-bit / 16-bit depending on bit-depth
|
||||
int16_t mclms_updates[64];
|
||||
int mclms_recent;
|
||||
int16_t mclms_coeffs[WMALL_MAX_CHANNELS * WMALL_MAX_CHANNELS * 32];
|
||||
int16_t mclms_coeffs_cur[WMALL_MAX_CHANNELS * WMALL_MAX_CHANNELS];
|
||||
int16_t mclms_prevvalues[WMALL_MAX_CHANNELS * 2 * 32];
|
||||
int16_t mclms_updates[WMALL_MAX_CHANNELS * 2 * 32];
|
||||
int mclms_recent;
|
||||
|
||||
int movave_scaling;
|
||||
int quant_stepsize;
|
||||
|
@@ -70,7 +70,7 @@ static int decode_frame(AVCodecContext *avctx,
|
||||
int prev_y = 0, prev_u = 0, prev_v = 0;
|
||||
uint8_t *rbuf;
|
||||
|
||||
if(buf_size<=8) {
|
||||
if (buf_size<=8) {
|
||||
av_log(avctx, AV_LOG_ERROR, "buf_size %d is too small\n", buf_size);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
@@ -176,7 +176,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
if (mm_flags & AV_CPU_FLAG_MMX) {
|
||||
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_mmx;
|
||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmx;
|
||||
if (chroma_format_idc == 1) {
|
||||
if (chroma_format_idc <= 1) {
|
||||
h->pred8x8 [VERT_PRED8x8 ] = ff_pred8x8_vertical_mmx;
|
||||
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmx;
|
||||
}
|
||||
@@ -185,7 +185,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_mmx;
|
||||
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmx;
|
||||
} else {
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_mmx;
|
||||
if (codec_id == CODEC_ID_SVQ3) {
|
||||
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_mmx;
|
||||
@@ -200,7 +200,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
if (mm_flags & AV_CPU_FLAG_MMX2) {
|
||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext;
|
||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext;
|
||||
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_mmxext;
|
||||
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_mmxext;
|
||||
@@ -225,7 +225,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
h->pred4x4 [HOR_UP_PRED ] = ff_pred4x4_horizontal_up_mmxext;
|
||||
}
|
||||
if (codec_id == CODEC_ID_SVQ3 || codec_id == CODEC_ID_H264) {
|
||||
if (chroma_format_idc == 1) {
|
||||
if (chroma_format_idc <= 1) {
|
||||
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_mmxext;
|
||||
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_mmxext;
|
||||
}
|
||||
@@ -237,7 +237,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_mmxext;
|
||||
h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_mmxext;
|
||||
} else {
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_mmx2;
|
||||
if (codec_id == CODEC_ID_SVQ3) {
|
||||
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_svq3_mmx2;
|
||||
@@ -264,7 +264,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_sse2;
|
||||
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_sse2;
|
||||
} else {
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_sse2;
|
||||
if (codec_id == CODEC_ID_SVQ3) {
|
||||
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_sse2;
|
||||
@@ -279,7 +279,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
if (mm_flags & AV_CPU_FLAG_SSSE3) {
|
||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3;
|
||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3;
|
||||
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_ssse3;
|
||||
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_ssse3;
|
||||
@@ -295,7 +295,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_ssse3;
|
||||
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_ssse3;
|
||||
} else {
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_ssse3;
|
||||
if (codec_id == CODEC_ID_SVQ3) {
|
||||
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_ssse3;
|
||||
@@ -311,7 +311,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
h->pred4x4[DC_PRED ] = ff_pred4x4_dc_10_mmxext;
|
||||
h->pred4x4[HOR_UP_PRED ] = ff_pred4x4_horizontal_up_10_mmxext;
|
||||
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_mmxext;
|
||||
|
||||
h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_mmxext;
|
||||
@@ -330,7 +330,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth
|
||||
h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_sse2;
|
||||
h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_sse2;
|
||||
|
||||
if (chroma_format_idc == 1) {
|
||||
if (chroma_format_idc <= 1) {
|
||||
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_sse2;
|
||||
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_sse2;
|
||||
h->pred8x8[PLANE_PRED8x8 ] = ff_pred8x8_plane_10_sse2;
|
||||
|
@@ -344,7 +344,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
|
||||
{
|
||||
int mm_flags = av_get_cpu_flags();
|
||||
|
||||
if (chroma_format_idc == 1 && mm_flags & AV_CPU_FLAG_MMX2) {
|
||||
if (chroma_format_idc <= 1 && mm_flags & AV_CPU_FLAG_MMX2) {
|
||||
c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
|
||||
}
|
||||
|
||||
@@ -358,7 +358,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
|
||||
|
||||
c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
|
||||
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
|
||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
|
||||
c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_mmx;
|
||||
@@ -368,13 +368,13 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
|
||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmx2;
|
||||
c->h264_idct_add16 = ff_h264_idct_add16_8_mmx2;
|
||||
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx2;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_idct_add8 = ff_h264_idct_add8_8_mmx2;
|
||||
c->h264_idct_add16intra= ff_h264_idct_add16intra_8_mmx2;
|
||||
|
||||
c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
|
||||
c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmxext;
|
||||
if (chroma_format_idc == 1) {
|
||||
if (chroma_format_idc <= 1) {
|
||||
c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
|
||||
c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmxext;
|
||||
}
|
||||
@@ -397,7 +397,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
|
||||
|
||||
c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
|
||||
c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
|
||||
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
|
||||
c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
|
||||
@@ -448,7 +448,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
|
||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
|
||||
|
||||
c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
|
||||
c->h264_idct_add16intra= ff_h264_idct_add16intra_10_sse2;
|
||||
#if HAVE_ALIGNED_STACK
|
||||
@@ -489,7 +489,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chrom
|
||||
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
|
||||
|
||||
c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
|
||||
if (chroma_format_idc == 1)
|
||||
if (chroma_format_idc <= 1)
|
||||
c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
|
||||
c->h264_idct_add16intra= ff_h264_idct_add16intra_10_avx;
|
||||
#if HAVE_ALIGNED_STACK
|
||||
|
@@ -106,6 +106,7 @@ static int xan_huffman_decode(unsigned char *dest, int dest_len,
|
||||
int ptr_len = src_len - 1 - byte*2;
|
||||
unsigned char val = ival;
|
||||
unsigned char *dest_end = dest + dest_len;
|
||||
unsigned char *dest_start = dest;
|
||||
GetBitContext gb;
|
||||
|
||||
if (ptr_len < 0)
|
||||
@@ -121,13 +122,13 @@ static int xan_huffman_decode(unsigned char *dest, int dest_len,
|
||||
|
||||
if (val < 0x16) {
|
||||
if (dest >= dest_end)
|
||||
return 0;
|
||||
return dest_len;
|
||||
*dest++ = val;
|
||||
val = ival;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return dest - dest_start;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -276,7 +277,7 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
||||
unsigned char flag = 0;
|
||||
int size = 0;
|
||||
int motion_x, motion_y;
|
||||
int x, y;
|
||||
int x, y, ret;
|
||||
|
||||
unsigned char *opcode_buffer = s->buffer1;
|
||||
unsigned char *opcode_buffer_end = s->buffer1 + s->buffer1_size;
|
||||
@@ -285,8 +286,8 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
||||
|
||||
/* pointers to segments inside the compressed chunk */
|
||||
const unsigned char *huffman_segment;
|
||||
const unsigned char *size_segment;
|
||||
const unsigned char *vector_segment;
|
||||
GetByteContext size_segment;
|
||||
GetByteContext vector_segment;
|
||||
const unsigned char *imagedata_segment;
|
||||
int huffman_offset, size_offset, vector_offset, imagedata_offset,
|
||||
imagedata_size;
|
||||
@@ -306,13 +307,14 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
huffman_segment = s->buf + huffman_offset;
|
||||
size_segment = s->buf + size_offset;
|
||||
vector_segment = s->buf + vector_offset;
|
||||
bytestream2_init(&size_segment, s->buf + size_offset, s->size - size_offset);
|
||||
bytestream2_init(&vector_segment, s->buf + vector_offset, s->size - vector_offset);
|
||||
imagedata_segment = s->buf + imagedata_offset;
|
||||
|
||||
if (xan_huffman_decode(opcode_buffer, opcode_buffer_size,
|
||||
huffman_segment, s->size - huffman_offset) < 0)
|
||||
if ((ret = xan_huffman_decode(opcode_buffer, opcode_buffer_size,
|
||||
huffman_segment, s->size - huffman_offset)) < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
opcode_buffer_end = opcode_buffer + ret;
|
||||
|
||||
if (imagedata_segment[0] == 2) {
|
||||
xan_unpack(s->buffer2, s->buffer2_size,
|
||||
@@ -359,19 +361,17 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
||||
|
||||
case 9:
|
||||
case 19:
|
||||
size = *size_segment++;
|
||||
size = bytestream2_get_byte(&size_segment);
|
||||
break;
|
||||
|
||||
case 10:
|
||||
case 20:
|
||||
size = AV_RB16(&size_segment[0]);
|
||||
size_segment += 2;
|
||||
size = bytestream2_get_be16(&size_segment);
|
||||
break;
|
||||
|
||||
case 11:
|
||||
case 21:
|
||||
size = AV_RB24(size_segment);
|
||||
size_segment += 3;
|
||||
size = bytestream2_get_be24(&size_segment);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -393,9 +393,9 @@ static int xan_wc3_decode_frame(XanContext *s) {
|
||||
}
|
||||
} else {
|
||||
/* run-based motion compensation from last frame */
|
||||
motion_x = sign_extend(*vector_segment >> 4, 4);
|
||||
motion_y = sign_extend(*vector_segment & 0xF, 4);
|
||||
vector_segment++;
|
||||
uint8_t vector = bytestream2_get_byte(&vector_segment);
|
||||
motion_x = sign_extend(vector >> 4, 4);
|
||||
motion_y = sign_extend(vector & 0xF, 4);
|
||||
|
||||
/* copy a run of pixels from the previous frame */
|
||||
xan_wc3_copy_pixel_run(s, x, y, size, motion_x, motion_y);
|
||||
|
@@ -46,6 +46,11 @@ static av_cold int xan_decode_init(AVCodecContext *avctx)
|
||||
|
||||
avctx->pix_fmt = PIX_FMT_YUV420P;
|
||||
|
||||
if (avctx->width & 1) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Invalid frame width: %d.\n", avctx->width);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->buffer_size = avctx->width * avctx->height;
|
||||
s->y_buffer = av_malloc(s->buffer_size);
|
||||
if (!s->y_buffer)
|
||||
|
@@ -500,10 +500,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (c->comp == 0) { //Uncompressed data
|
||||
memcpy(c->decomp_buf, buf, len);
|
||||
c->decomp_size = 1;
|
||||
} else { // ZLIB-compressed data
|
||||
if (c->comp == 0) { //Uncompressed data
|
||||
if (c->decomp_size < len) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Buffer too small\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
memcpy(c->decomp_buf, buf, len);
|
||||
} else { // ZLIB-compressed data
|
||||
c->zstream.total_in = c->zstream.total_out = 0;
|
||||
c->zstream.next_in = buf;
|
||||
c->zstream.avail_in = len;
|
||||
|
@@ -315,8 +315,8 @@ static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
|
||||
//av_log(NULL, AV_LOG_ERROR, "\n");
|
||||
}
|
||||
|
||||
p_x = (center_x - width / 2);
|
||||
p_y = (center_y - height / 2);
|
||||
p_x = (center_x - width / 2.0);
|
||||
p_y = (center_y - height / 2.0);
|
||||
t->vector.x += (cos(t->angle)-1)*p_x - sin(t->angle)*p_y;
|
||||
t->vector.y += sin(t->angle)*p_x + (cos(t->angle)-1)*p_y;
|
||||
|
||||
|
@@ -47,7 +47,6 @@
|
||||
#undef time
|
||||
|
||||
#include <ft2build.h>
|
||||
#include <freetype/config/ftheader.h>
|
||||
#include FT_FREETYPE_H
|
||||
#include FT_GLYPH_H
|
||||
|
||||
|
@@ -113,6 +113,7 @@ static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int wi
|
||||
ctx->filter_line(dst + y * dst_linesize, src + y * src_linesize, dc - r / 2, width, thresh, dither[y & 7]);
|
||||
if (++y >= height) break;
|
||||
}
|
||||
emms_c();
|
||||
}
|
||||
|
||||
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
|
||||
|
@@ -255,7 +255,7 @@ static int ape_read_header(AVFormatContext * s, AVFormatParameters * ap)
|
||||
ape->totalframes);
|
||||
return -1;
|
||||
}
|
||||
if (ape->seektablelength && (ape->seektablelength / sizeof(*ape->seektable)) < ape->totalframes) {
|
||||
if (ape->seektablelength / sizeof(*ape->seektable) < ape->totalframes) {
|
||||
av_log(s, AV_LOG_ERROR,
|
||||
"Number of seek entries is less than number of frames: %zu vs. %"PRIu32"\n",
|
||||
ape->seektablelength / sizeof(*ape->seektable), ape->totalframes);
|
||||
|
@@ -618,7 +618,9 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (!ff_guidcmp(&g, &ff_asf_stream_header)) {
|
||||
asf_read_stream_properties(s, gsize);
|
||||
int ret = asf_read_stream_properties(s, gsize);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (!ff_guidcmp(&g, &ff_asf_comment_header)) {
|
||||
asf_read_content_desc(s, gsize);
|
||||
} else if (!ff_guidcmp(&g, &ff_asf_language_guid)) {
|
||||
|
@@ -781,7 +781,11 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
if(!avi->index_loaded && pb->seekable)
|
||||
avi_load_index(s);
|
||||
avi->index_loaded |= 1;
|
||||
avi->non_interleaved |= guess_ni_flag(s) | (s->flags & AVFMT_FLAG_SORT_DTS);
|
||||
|
||||
if ((ret = guess_ni_flag(s)) < 0)
|
||||
return ret;
|
||||
|
||||
avi->non_interleaved |= ret | (s->flags & AVFMT_FLAG_SORT_DTS);
|
||||
for(i=0; i<s->nb_streams; i++){
|
||||
AVStream *st = s->streams[i];
|
||||
if(st->nb_index_entries)
|
||||
@@ -807,8 +811,10 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_gab2_sub(AVStream *st, AVPacket *pkt) {
|
||||
if (!strcmp(pkt->data, "GAB2") && AV_RL16(pkt->data+5) == 2) {
|
||||
static int read_gab2_sub(AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
if (pkt->size >= 7 &&
|
||||
!strcmp(pkt->data, "GAB2") && AV_RL16(pkt->data + 5) == 2) {
|
||||
uint8_t desc[256];
|
||||
int score = AVPROBE_SCORE_MAX / 2, ret;
|
||||
AVIStream *ast = st->priv_data;
|
||||
@@ -938,7 +944,7 @@ start_sync:
|
||||
goto start_sync;
|
||||
}
|
||||
|
||||
n= get_stream_idx(d);
|
||||
n = avi->dv_demux ? 0 : get_stream_idx(d);
|
||||
|
||||
if(!((i-avi->last_pkt_pos)&1) && get_stream_idx(d+1) < s->nb_streams)
|
||||
continue;
|
||||
@@ -1049,6 +1055,8 @@ static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
int size = avpriv_dv_get_packet(avi->dv_demux, pkt);
|
||||
if (size >= 0)
|
||||
return size;
|
||||
else
|
||||
goto resync;
|
||||
}
|
||||
|
||||
if(avi->non_interleaved){
|
||||
@@ -1302,6 +1310,66 @@ static int avi_read_idx1(AVFormatContext *s, int size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Scan the index and consider any file with streams more than
|
||||
* 2 seconds or 64MB apart non-interleaved. */
|
||||
static int check_stream_max_drift(AVFormatContext *s)
|
||||
{
|
||||
int64_t min_pos, pos;
|
||||
int i;
|
||||
int *idx = av_malloc(s->nb_streams * sizeof(*idx));
|
||||
if (!idx)
|
||||
return AVERROR(ENOMEM);
|
||||
else
|
||||
memset(idx, 0, s->nb_streams * sizeof(*idx));
|
||||
|
||||
for (min_pos = pos = 0; min_pos != INT64_MAX; pos = min_pos + 1LU) {
|
||||
int64_t max_dts = INT64_MIN / 2;
|
||||
int64_t min_dts = INT64_MAX / 2;
|
||||
int64_t max_buffer = 0;
|
||||
|
||||
min_pos = INT64_MAX;
|
||||
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
AVIStream *ast = st->priv_data;
|
||||
int n = st->nb_index_entries;
|
||||
while (idx[i] < n && st->index_entries[idx[i]].pos < pos)
|
||||
idx[i]++;
|
||||
if (idx[i] < n) {
|
||||
int64_t dts;
|
||||
dts = av_rescale_q(st->index_entries[idx[i]].timestamp /
|
||||
FFMAX(ast->sample_size, 1),
|
||||
st->time_base, AV_TIME_BASE_Q);
|
||||
min_dts = FFMIN(min_dts, dts);
|
||||
min_pos = FFMIN(min_pos, st->index_entries[idx[i]].pos);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < s->nb_streams; i++) {
|
||||
AVStream *st = s->streams[i];
|
||||
AVIStream *ast = st->priv_data;
|
||||
|
||||
if (idx[i] && min_dts != INT64_MAX / 2) {
|
||||
int64_t dts;
|
||||
dts = av_rescale_q(st->index_entries[idx[i] - 1].timestamp /
|
||||
FFMAX(ast->sample_size, 1),
|
||||
st->time_base, AV_TIME_BASE_Q);
|
||||
max_dts = FFMAX(max_dts, dts);
|
||||
max_buffer = FFMAX(max_buffer,
|
||||
av_rescale(dts - min_dts,
|
||||
st->codec->bit_rate,
|
||||
AV_TIME_BASE));
|
||||
}
|
||||
}
|
||||
if (max_dts - min_dts > 2 * AV_TIME_BASE ||
|
||||
max_buffer > 1024 * 1024 * 8 * 8) {
|
||||
av_free(idx);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
av_free(idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int guess_ni_flag(AVFormatContext *s){
|
||||
int i;
|
||||
int64_t last_start=0;
|
||||
@@ -1330,7 +1398,11 @@ static int guess_ni_flag(AVFormatContext *s){
|
||||
first_end= st->index_entries[n-1].pos;
|
||||
}
|
||||
avio_seek(s->pb, oldpos, SEEK_SET);
|
||||
return last_start > first_end;
|
||||
|
||||
if (last_start > first_end)
|
||||
return 1;
|
||||
|
||||
return check_stream_max_drift(s);
|
||||
}
|
||||
|
||||
static int avi_load_index(AVFormatContext *s)
|
||||
@@ -1390,12 +1462,17 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
|
||||
int64_t pos, pos_min;
|
||||
AVIStream *ast;
|
||||
|
||||
/* Does not matter which stream is requested dv in avi has the
|
||||
* stream information in the first video stream.
|
||||
*/
|
||||
if (avi->dv_demux)
|
||||
stream_index = 0;
|
||||
|
||||
if (!avi->index_loaded) {
|
||||
/* we only load the index on demand */
|
||||
avi_load_index(s);
|
||||
avi->index_loaded |= 1;
|
||||
}
|
||||
assert(stream_index>= 0);
|
||||
|
||||
st = s->streams[stream_index];
|
||||
ast= st->priv_data;
|
||||
@@ -1413,7 +1490,6 @@ static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp
|
||||
/* One and only one real stream for DV in AVI, and it has video */
|
||||
/* offsets. Calling with other stream indexes should have failed */
|
||||
/* the av_index_search_timestamp call above. */
|
||||
assert(stream_index == 0);
|
||||
|
||||
if(avio_seek(s->pb, pos, SEEK_SET) < 0)
|
||||
return -1;
|
||||
|
@@ -130,6 +130,10 @@ static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt)
|
||||
video_offset = avio_rl32(pb);
|
||||
audio_size = video_offset - audio_offset;
|
||||
bfi->video_size = chunk_size - video_offset;
|
||||
if (audio_size < 0 || bfi->video_size < 0) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid audio/video offsets or chunk size\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
//Tossing an audio packet at the audio decoder.
|
||||
ret = av_get_packet(pb, pkt, audio_size);
|
||||
@@ -138,9 +142,7 @@ static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt)
|
||||
|
||||
pkt->pts = bfi->audio_frame;
|
||||
bfi->audio_frame += ret;
|
||||
}
|
||||
|
||||
else {
|
||||
} else if (bfi->video_size > 0) {
|
||||
|
||||
//Tossing a video packet at the video decoder.
|
||||
ret = av_get_packet(pb, pkt, bfi->video_size);
|
||||
@@ -152,6 +154,9 @@ static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt)
|
||||
|
||||
/* One less frame to read. A cursory decrement. */
|
||||
bfi->nframes--;
|
||||
} else {
|
||||
/* Empty video packet */
|
||||
ret = AVERROR(EAGAIN);
|
||||
}
|
||||
|
||||
bfi->avflag = !bfi->avflag;
|
||||
|
@@ -153,6 +153,8 @@ static int cin_read_frame_header(CinDemuxContext *cin, AVIOContext *pb) {
|
||||
|
||||
if (avio_rl32(pb) != 0xAA55AA55)
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (hdr->video_frame_size < 0 || hdr->audio_frame_size < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -426,8 +426,9 @@ static int ea_read_header(AVFormatContext *s,
|
||||
}
|
||||
|
||||
if (ea->audio_codec) {
|
||||
if (ea->num_channels <= 0) {
|
||||
av_log(s, AV_LOG_WARNING, "Unsupported number of channels: %d\n", ea->num_channels);
|
||||
if (ea->num_channels <= 0 || ea->num_channels > 2) {
|
||||
av_log(s, AV_LOG_WARNING,
|
||||
"Unsupported number of channels: %d\n", ea->num_channels);
|
||||
ea->audio_codec = 0;
|
||||
return 1;
|
||||
}
|
||||
|
@@ -35,7 +35,7 @@ static int h263_probe(AVProbeData *p)
|
||||
for(i=0; i<p->buf_size; i++){
|
||||
code = (code<<8) + p->buf[i];
|
||||
if ((code & 0xfffffc0000) == 0x800000) {
|
||||
src_fmt= (code>>2)&3;
|
||||
src_fmt= (code>>2)&7;
|
||||
if( src_fmt != last_src_fmt
|
||||
&& last_src_fmt>0 && last_src_fmt<6
|
||||
&& src_fmt<6)
|
||||
|
@@ -145,6 +145,8 @@ static int roq_read_packet(AVFormatContext *s,
|
||||
break;
|
||||
|
||||
case RoQ_QUAD_CODEBOOK:
|
||||
if (roq->video_stream_index < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
/* packet needs to contain both this codebook and next VQ chunk */
|
||||
codebook_offset = avio_tell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
|
||||
codebook_size = chunk_size;
|
||||
@@ -187,6 +189,11 @@ static int roq_read_packet(AVFormatContext *s,
|
||||
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
|
||||
}
|
||||
case RoQ_QUAD_VQ:
|
||||
if (chunk_type == RoQ_QUAD_VQ) {
|
||||
if (roq->video_stream_index < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* load up the packet */
|
||||
if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
|
||||
return AVERROR(EIO);
|
||||
|
@@ -263,6 +263,7 @@ const AVCodecTag codec_movaudio_tags[] = {
|
||||
{ CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') },
|
||||
{ CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') },
|
||||
{ CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') },
|
||||
{ CODEC_ID_PCM_S16BE, MKTAG('l', 'p', 'c', 'm') },
|
||||
{ CODEC_ID_PCM_S16LE, MKTAG('l', 'p', 'c', 'm') },
|
||||
{ CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
|
||||
{ CODEC_ID_PCM_S24LE, MKTAG('i', 'n', '2', '4') },
|
||||
@@ -346,7 +347,7 @@ int ff_mov_lang_to_iso639(unsigned code, char to[4])
|
||||
memset(to, 0, 4);
|
||||
/* is it the mangled iso code? */
|
||||
/* see http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt */
|
||||
if (code > 138) {
|
||||
if (code >= 0x400 && code != 0x7fff) {
|
||||
for (i = 2; i >= 0; i--) {
|
||||
to[i] = 0x60 + (code & 0x1f);
|
||||
code >>= 5;
|
||||
|
@@ -1536,6 +1536,10 @@ static int matroska_read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
track->audio.sub_packet_h = avio_rb16(&b);
|
||||
track->audio.frame_size = avio_rb16(&b);
|
||||
track->audio.sub_packet_size = avio_rb16(&b);
|
||||
if (flavor <= 0 || track->audio.coded_framesize <= 0 ||
|
||||
track->audio.sub_packet_h <= 0 || track->audio.frame_size <= 0 ||
|
||||
track->audio.sub_packet_size <= 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
track->audio.buf = av_malloc(track->audio.frame_size * track->audio.sub_packet_h);
|
||||
if (codec_id == CODEC_ID_RA_288) {
|
||||
st->codec->block_align = track->audio.coded_framesize;
|
||||
@@ -1716,6 +1720,7 @@ static int matroska_deliver_packet(MatroskaDemuxContext *matroska,
|
||||
*/
|
||||
static void matroska_clear_queue(MatroskaDemuxContext *matroska)
|
||||
{
|
||||
matroska->prev_pkt = NULL;
|
||||
if (matroska->packets) {
|
||||
int n;
|
||||
for (n = 0; n < matroska->num_packets; n++) {
|
||||
|
@@ -776,7 +776,9 @@ static int mkv_write_tag(AVFormatContext *s, AVDictionary *m, unsigned int eleme
|
||||
end_ebml_master(s->pb, targets);
|
||||
|
||||
while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX)))
|
||||
if (av_strcasecmp(t->key, "title") && av_strcasecmp(t->key, "stereo_mode"))
|
||||
if (av_strcasecmp(t->key, "title") &&
|
||||
av_strcasecmp(t->key, "stereo_mode") &&
|
||||
av_strcasecmp(t->key, "encoding_tool"))
|
||||
mkv_write_simpletag(s->pb, t);
|
||||
|
||||
end_ebml_master(s->pb, tag);
|
||||
@@ -936,7 +938,10 @@ static int mkv_write_header(AVFormatContext *s)
|
||||
segment_uid[i] = av_lfg_get(&lfg);
|
||||
|
||||
put_ebml_string(pb, MATROSKA_ID_MUXINGAPP , LIBAVFORMAT_IDENT);
|
||||
put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, LIBAVFORMAT_IDENT);
|
||||
if ((tag = av_dict_get(s->metadata, "encoding_tool", NULL, 0)))
|
||||
put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, tag->value);
|
||||
else
|
||||
put_ebml_string(pb, MATROSKA_ID_WRITINGAPP, LIBAVFORMAT_IDENT);
|
||||
put_ebml_binary(pb, MATROSKA_ID_SEGMENTUID, segment_uid, 16);
|
||||
}
|
||||
|
||||
|
@@ -253,7 +253,7 @@ static int mov_read_udta_string(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (parse)
|
||||
parse(c, pb, str_size, key);
|
||||
else {
|
||||
if (data_type == 3 || (data_type == 0 && langcode < 0x800)) { // MAC Encoded
|
||||
if (data_type == 3 || (data_type == 0 && (langcode < 0x400 || langcode == 0x7fff))) { // MAC Encoded
|
||||
mov_read_mac_string(c, pb, str_size, str, sizeof(str));
|
||||
} else {
|
||||
avio_read(pb, str, str_size);
|
||||
@@ -1694,6 +1694,7 @@ static int mov_read_stts(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
if (entries >= UINT_MAX / sizeof(*sc->stts_data))
|
||||
return -1;
|
||||
|
||||
av_free(sc->stts_data);
|
||||
sc->stts_data = av_malloc(entries * sizeof(*sc->stts_data));
|
||||
if (!sc->stts_data)
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -1711,6 +1712,10 @@ static int mov_read_stts(MOVContext *c, AVIOContext *pb, MOVAtom atom)
|
||||
av_log(c->fc, AV_LOG_ERROR, "Invalid SampleDelta in STTS %d\n", sample_duration);
|
||||
sample_duration = 1;
|
||||
}
|
||||
if (sample_count < 0) {
|
||||
av_log(c->fc, AV_LOG_ERROR, "Invalid sample_count=%d\n", sample_count);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
sc->stts_data[i].count= sample_count;
|
||||
sc->stts_data[i].duration= sample_duration;
|
||||
|
||||
|
@@ -1696,7 +1696,8 @@ static int mov_write_ilst_tag(AVIOContext *pb, MOVMuxContext *mov,
|
||||
mov_write_string_metadata(s, pb, "\251wrt", "composer" , 1);
|
||||
mov_write_string_metadata(s, pb, "\251alb", "album" , 1);
|
||||
mov_write_string_metadata(s, pb, "\251day", "date" , 1);
|
||||
mov_write_string_tag(pb, "\251too", LIBAVFORMAT_IDENT, 0, 1);
|
||||
if (!mov_write_string_metadata(s, pb, "\251too", "encoding_tool", 1))
|
||||
mov_write_string_tag(pb, "\251too", LIBAVFORMAT_IDENT, 0, 1);
|
||||
mov_write_string_metadata(s, pb, "\251cmt", "comment" , 1);
|
||||
mov_write_string_metadata(s, pb, "\251gen", "genre" , 1);
|
||||
mov_write_string_metadata(s, pb, "\251cpy", "copyright", 1);
|
||||
|
@@ -137,12 +137,21 @@ static void mpc8_parse_seektable(AVFormatContext *s, int64_t off)
|
||||
int i, t, seekd;
|
||||
GetBitContext gb;
|
||||
|
||||
if (s->nb_streams == 0) {
|
||||
av_log(s, AV_LOG_ERROR, "No stream added before parsing seek table\n");
|
||||
return;
|
||||
}
|
||||
|
||||
avio_seek(s->pb, off, SEEK_SET);
|
||||
mpc8_get_chunk_header(s->pb, &tag, &size);
|
||||
if(tag != TAG_SEEKTABLE){
|
||||
av_log(s, AV_LOG_ERROR, "No seek table at given position\n");
|
||||
return;
|
||||
}
|
||||
if (size < 0 || size >= INT_MAX / 2) {
|
||||
av_log(s, AV_LOG_ERROR, "Bad seek table size\n");
|
||||
return;
|
||||
}
|
||||
if(!(buf = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE)))
|
||||
return;
|
||||
avio_read(s->pb, buf, size);
|
||||
|
@@ -1229,7 +1229,7 @@ static void m4sl_cb(MpegTSFilter *filter, const uint8_t *section, int section_le
|
||||
AVStream *st;
|
||||
if (ts->pids[pid]->es_id != mp4_descr[i].es_id)
|
||||
continue;
|
||||
if (!(ts->pids[pid] && ts->pids[pid]->type == MPEGTS_PES)) {
|
||||
if (ts->pids[pid]->type != MPEGTS_PES) {
|
||||
av_log(s, AV_LOG_ERROR, "pid %x is not PES\n", pid);
|
||||
continue;
|
||||
}
|
||||
|
@@ -235,7 +235,7 @@ static void mpegts_write_pat(AVFormatContext *s)
|
||||
data, q - data);
|
||||
}
|
||||
|
||||
static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
{
|
||||
// MpegTSWrite *ts = s->priv_data;
|
||||
uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr;
|
||||
@@ -288,6 +288,10 @@ static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
stream_type = STREAM_TYPE_PRIVATE_DATA;
|
||||
break;
|
||||
}
|
||||
|
||||
if (q - data > sizeof(data) - 32)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
*q++ = stream_type;
|
||||
put16(&q, 0xe000 | ts_st->pid);
|
||||
desc_length_ptr = q;
|
||||
@@ -311,7 +315,7 @@ static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
len_ptr = q++;
|
||||
*len_ptr = 0;
|
||||
|
||||
for (p = lang->value; next && *len_ptr < 255 / 4 * 4; p = next + 1) {
|
||||
for (p = lang->value; next && *len_ptr < 255 / 4 * 4 && q - data < sizeof(data) - 4; p = next + 1) {
|
||||
next = strchr(p, ',');
|
||||
if (strlen(p) != 3 && (!next || next != p + 3))
|
||||
continue; /* not a 3-letter code */
|
||||
@@ -373,6 +377,7 @@ static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
|
||||
}
|
||||
mpegts_write_section1(&service->pmt, PMT_TID, service->sid, 0, 0, 0,
|
||||
data, q - data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* NOTE: str == NULL is accepted for an empty string */
|
||||
|
@@ -91,6 +91,12 @@ static int read_header(AVFormatContext *s, AVFormatParameters *ap)
|
||||
mvi->get_int = (vst->codec->width * vst->codec->height < (1 << 16)) ? avio_rl16 : avio_rl24;
|
||||
|
||||
mvi->audio_frame_size = ((uint64_t)mvi->audio_data_size << MVI_FRAC_BITS) / frames_count;
|
||||
if (mvi->audio_frame_size <= 1 << MVI_FRAC_BITS - 1) {
|
||||
av_log(s, AV_LOG_ERROR, "Invalid audio_data_size (%d) or frames_count (%d)\n",
|
||||
mvi->audio_data_size, frames_count);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
mvi->audio_size_counter = (ast->codec->sample_rate * 830 / mvi->audio_frame_size - 1) * mvi->audio_frame_size;
|
||||
mvi->audio_size_left = mvi->audio_data_size;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user